import random from flask import Flask, request from flask_jwt_extended import JWTManager, jwt_required from functools import reduce from helper.api_messages import * from helper.constants import * from helper.exercises import * from helper.file_helper import delete_files_older_than_one_day from helper.firebase_helper import * from helper.heygen_api import create_video from helper.speech_to_text_helper import * from helper.token_counter import count_tokens from helper.openai_interface import make_openai_call, make_openai_instruct_call import os import re from dotenv import load_dotenv from templates.question_templates import * load_dotenv() app = Flask(__name__) app.config['JWT_SECRET_KEY'] = os.getenv("JWT_SECRET_KEY") jwt = JWTManager(app) # Initialize Firebase Admin SDK cred = credentials.Certificate(os.getenv("GOOGLE_APPLICATION_CREDENTIALS")) firebase_admin.initialize_app(cred) @app.route('/listening_section_1', methods=['GET']) @jwt_required() def get_listening_section_1_question(): try: messages = get_question_gen_messages(QuestionType.LISTENING_SECTION_1) token_count = reduce(lambda count, item: count + count_tokens(item)['n_tokens'], map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0) response = make_openai_call(GPT_3_5_TURBO_16K, messages, token_count, LISTENING_GEN_FIELDS, GEN_QUESTION_TEMPERATURE) return response except Exception as e: return str(e) @app.route('/save_listening_section_1', methods=['POST']) @jwt_required() def save_listening_section_1_question(): try: # data = request.get_json() # question = data.get('question') question = getListening1Template() file_name = str(uuid.uuid4()) + ".mp3" sound_file_path = AUDIO_FILES_PATH + file_name firebase_file_path = FIREBASE_LISTENING_AUDIO_FILES_PATH + file_name # TODO it's the conversation audio, still work to do on text-to-speech text_to_speech(question["audio"]["conversation"], sound_file_path) file_url = upload_file_firebase_get_url(FIREBASE_BUCKET, firebase_file_path, sound_file_path) question["audio"]["source"] = file_url if save_to_db("listening", question): return question else: raise Exception("Failed to save question: " + question) except Exception as e: return str(e) @app.route('/listening_section_2', methods=['GET']) @jwt_required() def get_listening_section_2_question(): try: delete_files_older_than_one_day(AUDIO_FILES_PATH) messages = get_question_gen_messages(QuestionType.LISTENING_SECTION_2) token_count = reduce(lambda count, item: count + count_tokens(item)['n_tokens'], map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0) response = make_openai_call(GPT_3_5_TURBO_16K, messages, token_count, LISTENING_GEN_FIELDS, GEN_QUESTION_TEMPERATURE) return response except Exception as e: return str(e) @app.route('/save_listening_section_2', methods=['POST']) @jwt_required() def save_listening_section_2_question(): try: # data = request.get_json() # question = data.get('question') question = getListening2Template() file_name = str(uuid.uuid4()) + ".mp3" sound_file_path = AUDIO_FILES_PATH + file_name firebase_file_path = FIREBASE_LISTENING_AUDIO_FILES_PATH + file_name text_to_speech(question["audio"]["text"], sound_file_path) file_url = upload_file_firebase_get_url(FIREBASE_BUCKET, firebase_file_path, sound_file_path) question["audio"]["source"] = file_url if save_to_db("listening", question): return question else: raise Exception("Failed to save question: " + question) except Exception as e: return str(e) @app.route('/listening_section_3', methods=['GET']) @jwt_required() def get_listening_section_3_question(): try: delete_files_older_than_one_day(AUDIO_FILES_PATH) messages = get_question_gen_messages(QuestionType.LISTENING_SECTION_3) token_count = reduce(lambda count, item: count + count_tokens(item)['n_tokens'], map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0) response = make_openai_call(GPT_3_5_TURBO_16K, messages, token_count, LISTENING_GEN_FIELDS, GEN_QUESTION_TEMPERATURE) return response except Exception as e: return str(e) @app.route('/save_listening_section_3', methods=['POST']) @jwt_required() def save_listening_section_3_question(): try: # data = request.get_json() # question = data.get('question') question = getListening2Template() file_name = str(uuid.uuid4()) + ".mp3" sound_file_path = AUDIO_FILES_PATH + file_name firebase_file_path = FIREBASE_LISTENING_AUDIO_FILES_PATH + file_name text_to_speech(question["audio"]["text"], sound_file_path) file_url = upload_file_firebase_get_url(FIREBASE_BUCKET, firebase_file_path, sound_file_path) question["audio"]["source"] = file_url if save_to_db("listening", question): return question else: raise Exception("Failed to save question: " + question) except Exception as e: return str(e) @app.route('/listening_section_4', methods=['GET']) @jwt_required() def get_listening_section_4_question(): try: delete_files_older_than_one_day(AUDIO_FILES_PATH) messages = get_question_gen_messages(QuestionType.LISTENING_SECTION_4) token_count = reduce(lambda count, item: count + count_tokens(item)['n_tokens'], map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0) response = make_openai_call(GPT_3_5_TURBO_16K, messages, token_count, LISTENING_GEN_FIELDS, GEN_QUESTION_TEMPERATURE) return response except Exception as e: return str(e) @app.route('/save_listening_section_4', methods=['POST']) @jwt_required() def save_listening_section_4_question(): try: # data = request.get_json() # question = data.get('question') question = getListening2Template() file_name = str(uuid.uuid4()) + ".mp3" sound_file_path = AUDIO_FILES_PATH + file_name firebase_file_path = FIREBASE_LISTENING_AUDIO_FILES_PATH + file_name text_to_speech(question["audio"]["text"], sound_file_path) file_url = upload_file_firebase_get_url(FIREBASE_BUCKET, firebase_file_path, sound_file_path) question["audio"]["source"] = file_url if save_to_db("listening", question): return question else: raise Exception("Failed to save question: " + question) except Exception as e: return str(e) @app.route('/writing_task1', methods=['POST']) @jwt_required() def grade_writing_task_1(): try: data = request.get_json() question = data.get('question') context = data.get('context') answer = data.get('answer') if has_words(answer): messages = get_grading_messages(QuestionType.WRITING_TASK_1, question, answer, context) token_count = reduce(lambda count, item: count + count_tokens(item)['n_tokens'], map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0) response = make_openai_call(GPT_3_5_TURBO, messages, token_count, GRADING_FIELDS, GRADING_TEMPERATURE) return response else: return { 'comment': "The answer does not contain any english words.", 'overall': 0, 'task_response': { 'Coherence and Cohesion': 0, 'Grammatical Range and Accuracy': 0, 'Lexical Resource': 0, 'Task Achievement': 0 } } except Exception as e: return str(e) @app.route('/writing_task1_general', methods=['GET']) @jwt_required() def get_writing_task_1_general_question(): try: gen_wt1_question = "Craft a prompt for an IELTS Writing Task 1 General Training exercise that instructs the " \ "student to compose a letter. The prompt should present a specific scenario or situation, " \ "requiring the student to provide information, advice, or instructions within the letter." token_count = count_tokens(gen_wt1_question)["n_tokens"] response = make_openai_instruct_call(GPT_3_5_TURBO_INSTRUCT, gen_wt1_question, token_count, None, GEN_QUESTION_TEMPERATURE) return { "question": response.strip() } except Exception as e: return str(e) @app.route('/save_writing_task_1', methods=['POST']) @jwt_required() def save_writing_task_1_question(): try: # data = request.get_json() # question = data.get('question') # TODO ADD SAVE IMAGE TO DB question = getListening2Template() if save_to_db("writing", question): return question else: raise Exception("Failed to save question: " + question) except Exception as e: return str(e) @app.route('/writing_task2', methods=['POST']) @jwt_required() def grade_writing_task_2(): try: data = request.get_json() question = data.get('question') answer = data.get('answer') if has_words(answer): messages = get_grading_messages(QuestionType.WRITING_TASK_2, question, answer) token_count = reduce(lambda count, item: count + count_tokens(item)['n_tokens'], map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0) response = make_openai_call(GPT_3_5_TURBO, messages, token_count, GRADING_FIELDS, GRADING_TEMPERATURE) return response else: return { 'comment': "The answer does not contain any english words.", 'overall': 0, 'task_response': { 'Coherence and Cohesion': 0, 'Grammatical Range and Accuracy': 0, 'Lexical Resource': 0, 'Task Achievement': 0 } } except Exception as e: return str(e) @app.route('/writing_task2_general', methods=['GET']) @jwt_required() def get_writing_task_2_general_question(): try: gen_wt2_question = "Craft a comprehensive question for IELTS Writing Task 2 General Training that directs the candidate " \ "to delve into an in-depth analysis of contrasting perspectives on a specific topic. The candidate " \ "should be asked to discuss the strengths and weaknesses of both viewpoints, provide evidence or " \ "examples, and present a well-rounded argument before concluding with their personal opinion on the " \ "subject." token_count = count_tokens(gen_wt2_question)["n_tokens"] response = make_openai_instruct_call(GPT_3_5_TURBO_INSTRUCT, gen_wt2_question, token_count, None, GEN_QUESTION_TEMPERATURE) return { "question": response.strip() } except Exception as e: return str(e) @app.route('/save_writing_task_2', methods=['POST']) @jwt_required() def save_writing_task_2_question(): try: # data = request.get_json() # question = data.get('question') question = getListening2Template() if save_to_db("writing", question): return question else: raise Exception("Failed to save question: " + question) except Exception as e: return str(e) @app.route('/speaking_task_1', methods=['POST']) @jwt_required() def grade_speaking_task_1(): delete_files_older_than_one_day(AUDIO_FILES_PATH) sound_file_name = AUDIO_FILES_PATH + str(uuid.uuid4()) try: data = request.get_json() question = data.get('question') answer_firebase_path = data.get('answer') download_firebase_file(FIREBASE_BUCKET, answer_firebase_path, sound_file_name) answer = speech_to_text(sound_file_name) if has_10_words(answer): messages = get_grading_messages(QuestionType.SPEAKING_1, question, answer) token_count = reduce(lambda count, item: count + count_tokens(item)['n_tokens'], map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0) response = make_openai_call(GPT_3_5_TURBO, messages, token_count, GRADING_FIELDS, GRADING_TEMPERATURE) os.remove(sound_file_name) return response else: return { "comment": "The audio recorded does not contain enough english words to be graded.", "overall": 0, "task_response": { "Fluency and Coherence": 0, "Lexical Resource": 0, "Grammatical Range and Accuracy": 0, "Pronunciation": 0 } } except Exception as e: os.remove(sound_file_name) return str(e), 400 @app.route('/speaking_task_1', methods=['GET']) @jwt_required() def get_speaking_task_1_question(): try: gen_sp1_question = "Craft a thought-provoking question for IELTS Speaking Part 1 that encourages candidates to delve deeply " \ "into personal experiences, preferences, or insights on diverse topics. Instruct the candidate to offer " \ "not only detailed descriptions but also provide nuanced explanations, examples, or anecdotes to enrich " \ "their response." \ "Provide your response in this json format: {'topic': 'topic','question': 'question'}" token_count = count_tokens(gen_sp1_question)["n_tokens"] response = make_openai_instruct_call(GPT_3_5_TURBO_INSTRUCT, gen_sp1_question, token_count, GEN_FIELDS, GEN_QUESTION_TEMPERATURE) return response except Exception as e: return str(e) @app.route('/save_speaking_task_1', methods=['POST']) @jwt_required() def save_speaking_task_1_question(): try: # data = request.get_json() # question = data.get('question') questions_json = getSpeaking1Template() questions = [] for question in questions_json["questions"]: result = create_video(question) if result is not None: sound_file_path = VIDEO_FILES_PATH + result firebase_file_path = FIREBASE_SPEAKING_VIDEO_FILES_PATH + result url = upload_file_firebase_get_url(FIREBASE_BUCKET, firebase_file_path, sound_file_path) video = { "text": question, "video_path": firebase_file_path, "video_url": url } questions.append(video) else: print("Failed to create video for question: " + question) if len(questions) == len(questions_json["questions"]): speaking_pt1_to_insert = { "exercises": [ { "id": str(uuid.uuid4()), "prompts": questions, "text": "Listen carefully and respond.", "title": questions_json["topic"], "type": "speakingPart1" } ], "isDiagnostic": True, "minTimer": 5, "module": "speaking" } if save_to_db("speaking", speaking_pt1_to_insert): return speaking_pt1_to_insert else: raise Exception("Failed to save question: " + speaking_pt1_to_insert) else: raise Exception("Array sizes do not match. Video uploading failing is probably the cause.") except Exception as e: return str(e) @app.route('/speaking_task_2', methods=['POST']) @jwt_required() def grade_speaking_task_2(): delete_files_older_than_one_day(AUDIO_FILES_PATH) sound_file_name = AUDIO_FILES_PATH + str(uuid.uuid4()) try: data = request.get_json() question = data.get('question') answer_firebase_path = data.get('answer') download_firebase_file(FIREBASE_BUCKET, answer_firebase_path, sound_file_name) answer = speech_to_text(sound_file_name) if has_10_words(answer): messages = get_grading_messages(QuestionType.SPEAKING_2, question, answer) token_count = reduce(lambda count, item: count + count_tokens(item)['n_tokens'], map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0) response = make_openai_call(GPT_3_5_TURBO, messages, token_count, GRADING_FIELDS, GRADING_TEMPERATURE) os.remove(sound_file_name) return response else: return { "comment": "The audio recorded does not contain enough english words to be graded.", "overall": 0, "task_response": { "Fluency and Coherence": 0, "Lexical Resource": 0, "Grammatical Range and Accuracy": 0, "Pronunciation": 0 } } except Exception as e: os.remove(sound_file_name) return str(e), 400 @app.route('/speaking_task_2', methods=['GET']) @jwt_required() def get_speaking_task_2_question(): try: gen_sp2_question = "Create a question for IELTS Speaking Part 2 that encourages candidates to narrate a personal experience " \ "or story related to a randomly selected topic. Include 3 prompts that guide the candidate to describe " \ "specific aspects of the experience, such as details about the situation, their actions, and the " \ "reasons it left a lasting impression." \ "Provide your response in this json format: {'topic': 'topic','question': 'question', " \ "'prompts': ['prompt_1', 'prompt_2', 'prompt_3']}" token_count = count_tokens(gen_sp2_question)["n_tokens"] response = make_openai_instruct_call(GPT_3_5_TURBO_INSTRUCT, gen_sp2_question, token_count, GEN_FIELDS, GEN_QUESTION_TEMPERATURE) return response except Exception as e: return str(e) @app.route('/save_speaking_task_2', methods=['POST']) @jwt_required() def save_speaking_task_2_question(): try: # data = request.get_json() # question = data.get('question') questions_json = getSpeaking2Template() questions = [] for question in questions_json["questions"]: result = create_video(question) if result is not None: sound_file_path = VIDEO_FILES_PATH + result firebase_file_path = FIREBASE_SPEAKING_VIDEO_FILES_PATH + result url = upload_file_firebase_get_url(FIREBASE_BUCKET, firebase_file_path, sound_file_path) video = { "text": question, "video_path": firebase_file_path, "video_url": url } questions.append(video) else: print("Failed to create video for question: " + question) if len(questions) == len(questions_json["questions"]): speaking_pt2_to_insert = { "exercises": [ { "id": str(uuid.uuid4()), "prompts": questions, "text": "Listen carefully and respond.", "title": questions_json["topic"], "type": "speakingPart2" } ], "isDiagnostic": True, "minTimer": 5, "module": "speaking" } if save_to_db("speaking", speaking_pt2_to_insert): return speaking_pt2_to_insert else: raise Exception("Failed to save question: " + str(speaking_pt2_to_insert)) else: raise Exception("Array sizes do not match. Video uploading failing is probably the cause.") except Exception as e: return str(e) @app.route('/speaking_task_3', methods=['GET']) @jwt_required() def get_speaking_task_3_question(): try: gen_sp3_question = "Formulate a set of 3 questions for IELTS Speaking Part 3 that encourage candidates to engage in a " \ "meaningful discussion on a particular topic. Provide inquiries, ensuring " \ "they explore various aspects, perspectives, and implications related to the topic." \ "Provide your response in this json format: {'topic': 'topic','questions': ['question', " \ "'question', 'question']}" token_count = count_tokens(gen_sp3_question)["n_tokens"] response = make_openai_instruct_call(GPT_3_5_TURBO_INSTRUCT, gen_sp3_question, token_count, GEN_FIELDS, GEN_QUESTION_TEMPERATURE) # Remove the numbers from the questions only if the string starts with a number response["questions"] = [re.sub(r"^\d+\.\s*", "", question) if re.match(r"^\d+\.", question) else question for question in response["questions"]] return response except Exception as e: return str(e) @app.route('/speaking_task_3', methods=['POST']) @jwt_required() def grade_speaking_task_3(): delete_files_older_than_one_day(AUDIO_FILES_PATH) try: data = request.get_json() answers = data.get('answers') for item in answers: sound_file_name = AUDIO_FILES_PATH + str(uuid.uuid4()) download_firebase_file(FIREBASE_BUCKET, item["answer"], sound_file_name) answer_text = speech_to_text(sound_file_name) item["answer_text"] = answer_text os.remove(sound_file_name) if not has_10_words(answer_text): return { "comment": "The audio recorded does not contain enough english words to be graded.", "overall": 0, "task_response": { "Fluency and Coherence": 0, "Lexical Resource": 0, "Grammatical Range and Accuracy": 0, "Pronunciation": 0 } } messages = get_speaking_grading_messages(answers) token_count = reduce(lambda count, item: count + count_tokens(item)['n_tokens'], map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0) response = make_openai_call(GPT_3_5_TURBO, messages, token_count, GRADING_FIELDS, GRADING_TEMPERATURE) return response except Exception as e: return str(e), 400 @app.route('/save_speaking_task_3', methods=['POST']) @jwt_required() def save_speaking_task_3_question(): try: # data = request.get_json() # question = data.get('question') questions_json = getSpeaking3Template() questions = [] for question in questions_json["questions"]: result = create_video(question) if result is not None: sound_file_path = VIDEO_FILES_PATH + result firebase_file_path = FIREBASE_SPEAKING_VIDEO_FILES_PATH + result url = upload_file_firebase_get_url(FIREBASE_BUCKET, firebase_file_path, sound_file_path) video = { "text": question, "video_path": firebase_file_path, "video_url": url } questions.append(video) else: print("Failed to create video for question: " + question) if len(questions) == len(questions_json["questions"]): speaking_pt3_to_insert = { "exercises": [ { "id": str(uuid.uuid4()), "prompts": questions, "text": "Listen carefully and respond.", "title": questions_json["topic"], "type": "speakingPart3" } ], "isDiagnostic": True, "minTimer": 5, "module": "speaking" } if save_to_db("speaking", speaking_pt3_to_insert): return speaking_pt3_to_insert else: raise Exception("Failed to save question: " + str(speaking_pt3_to_insert)) else: raise Exception("Array sizes do not match. Video uploading failing is probably the cause.") except Exception as e: return str(e) @app.route('/reading_passage_1', methods=['GET']) @jwt_required() def get_reading_passage_1_question(): try: TOTAL_EXERCISES = 13 # Extract parameters from the URL query string topic = request.args.get('topic', default=random.choice(topics)) req_exercises = request.args.getlist('exercises') number_of_exercises_q = divide_number_into_parts(TOTAL_EXERCISES, len(req_exercises)) passage = generate_reading_passage(QuestionType.READING_PASSAGE_1, topic) exercises = [] for req_exercise in req_exercises: if (req_exercise == "multiple_choice"): mc_question = gen_multiple_choice_exercise(passage["text"], number_of_exercises_q.get()) exercises.append(mc_question) exercises = fix_exercise_ids(exercises) return { "exercises": exercises, "text": { "content": passage["text"], "title": passage["title"] }, } except Exception as e: return str(e) @app.route('/reading_passage_1', methods=['POST']) @jwt_required() def save_reading_passage_1_question(): try: # data = request.get_json() # question = data.get('question') question = getListening1Template() file_name = str(uuid.uuid4()) + ".mp3" sound_file_path = AUDIO_FILES_PATH + file_name firebase_file_path = FIREBASE_LISTENING_AUDIO_FILES_PATH + file_name # TODO it's the conversation audio, still work to do on text-to-speech text_to_speech(question["audio"]["conversation"], sound_file_path) file_url = upload_file_firebase_get_url(FIREBASE_BUCKET, firebase_file_path, sound_file_path) question["audio"]["source"] = file_url if save_to_db("listening", question): return question else: raise Exception("Failed to save question: " + question) except Exception as e: return str(e) @app.route('/fetch_tips', methods=['POST']) @jwt_required() def fetch_answer_tips(): try: data = request.get_json() context = data.get('context') question = data.get('question') answer = data.get('answer') correct_answer = data.get('correct_answer') messages = get_question_tips(question, answer, correct_answer, context) token_count = reduce(lambda count, item: count + count_tokens(item)['n_tokens'], map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0) response = make_openai_call(GPT_3_5_TURBO, messages, token_count, None, TIPS_TEMPERATURE) if isinstance(response, str): response = re.sub(r"^[a-zA-Z0-9_]+\:\s*", "", response) return response except Exception as e: return str(e) if __name__ == '__main__': app.run()