import threading from functools import reduce import firebase_admin from firebase_admin import credentials from flask import Flask, request from flask_jwt_extended import JWTManager, jwt_required from helper.api_messages import * from helper.exam_variant import ExamVariant from helper.exercises import * from helper.file_helper import delete_files_older_than_one_day from helper.firebase_helper import * from helper.heygen_api import create_video, create_videos_and_save_to_db from helper.openai_interface import * from helper.question_templates import * from helper.speech_to_text_helper import * from heygen.AvatarEnum import AvatarEnum load_dotenv() app = Flask(__name__) app.config['JWT_SECRET_KEY'] = os.getenv("JWT_SECRET_KEY") jwt = JWTManager(app) # Initialize Firebase Admin SDK cred = credentials.Certificate(os.getenv("GOOGLE_APPLICATION_CREDENTIALS")) FIREBASE_BUCKET = os.getenv('FIREBASE_BUCKET') firebase_admin.initialize_app(cred) thread_event = threading.Event() # Configure logging logging.basicConfig(level=logging.DEBUG, # Set the logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL) format='%(asctime)s - %(levelname)s - %(message)s') @app.route('/healthcheck', methods=['GET']) def healthcheck(): return {"healthy": True} @app.route('/listening_section_1', methods=['GET']) @jwt_required() def get_listening_section_1_question(): try: delete_files_older_than_one_day(AUDIO_FILES_PATH) # Extract parameters from the URL query string topic = request.args.get('topic', default=random.choice(two_people_scenarios)) req_exercises = request.args.getlist('exercises') difficulty = request.args.get("difficulty", default=random.choice(difficulties)) if (len(req_exercises) == 0): req_exercises = random.sample(LISTENING_EXERCISE_TYPES, 1) number_of_exercises_q = divide_number_into_parts(TOTAL_LISTENING_SECTION_1_EXERCISES, len(req_exercises)) unprocessed_conversation, processed_conversation = generate_listening_1_conversation(topic) app.logger.info("Generated conversation: " + str(processed_conversation)) start_id = 1 exercises = generate_listening_conversation_exercises(unprocessed_conversation, req_exercises, number_of_exercises_q, start_id, difficulty) return { "exercises": exercises, "text": processed_conversation, "difficulty": difficulty } except Exception as e: return str(e) @app.route('/listening_section_2', methods=['GET']) @jwt_required() def get_listening_section_2_question(): try: delete_files_older_than_one_day(AUDIO_FILES_PATH) # Extract parameters from the URL query string topic = request.args.get('topic', default=random.choice(social_monologue_contexts)) req_exercises = request.args.getlist('exercises') difficulty = request.args.get("difficulty", default=random.choice(difficulties)) if (len(req_exercises) == 0): req_exercises = random.sample(LISTENING_EXERCISE_TYPES, 2) number_of_exercises_q = divide_number_into_parts(TOTAL_LISTENING_SECTION_2_EXERCISES, len(req_exercises)) monologue = generate_listening_2_monologue(topic) app.logger.info("Generated monologue: " + str(monologue)) start_id = 11 exercises = generate_listening_monologue_exercises(monologue, req_exercises, number_of_exercises_q, start_id, difficulty) return { "exercises": exercises, "text": monologue, "difficulty": difficulty } except Exception as e: return str(e) @app.route('/listening_section_3', methods=['GET']) @jwt_required() def get_listening_section_3_question(): try: delete_files_older_than_one_day(AUDIO_FILES_PATH) # Extract parameters from the URL query string topic = request.args.get('topic', default=random.choice(four_people_scenarios)) req_exercises = request.args.getlist('exercises') difficulty = request.args.get("difficulty", default=random.choice(difficulties)) if (len(req_exercises) == 0): req_exercises = random.sample(LISTENING_EXERCISE_TYPES, 1) number_of_exercises_q = divide_number_into_parts(TOTAL_LISTENING_SECTION_3_EXERCISES, len(req_exercises)) unprocessed_conversation, processed_conversation = generate_listening_3_conversation(topic) app.logger.info("Generated conversation: " + str(processed_conversation)) start_id = 21 exercises = generate_listening_conversation_exercises(unprocessed_conversation, req_exercises, number_of_exercises_q, start_id, difficulty) return { "exercises": exercises, "text": processed_conversation, "difficulty": difficulty } except Exception as e: return str(e) @app.route('/listening_section_4', methods=['GET']) @jwt_required() def get_listening_section_4_question(): try: delete_files_older_than_one_day(AUDIO_FILES_PATH) # Extract parameters from the URL query string topic = request.args.get('topic', default=random.choice(academic_subjects)) req_exercises = request.args.getlist('exercises') difficulty = request.args.get("difficulty", default=random.choice(difficulties)) if (len(req_exercises) == 0): req_exercises = random.sample(LISTENING_EXERCISE_TYPES, 2) number_of_exercises_q = divide_number_into_parts(TOTAL_LISTENING_SECTION_4_EXERCISES, len(req_exercises)) monologue = generate_listening_4_monologue(topic) app.logger.info("Generated monologue: " + str(monologue)) start_id = 31 exercises = generate_listening_monologue_exercises(monologue, req_exercises, number_of_exercises_q, start_id, difficulty) return { "exercises": exercises, "text": monologue, "difficulty": difficulty } except Exception as e: return str(e) @app.route('/listening', methods=['POST']) @jwt_required() def save_listening(): try: data = request.get_json() parts = data.get('parts') minTimer = data.get('minTimer', LISTENING_MIN_TIMER_DEFAULT) difficulty = data.get('difficulty', random.choice(difficulties)) template = getListeningTemplate() template['difficulty'] = difficulty id = str(uuid.uuid4()) for i, part in enumerate(parts, start=0): part_template = getListeningPartTemplate() file_name = str(uuid.uuid4()) + ".mp3" sound_file_path = AUDIO_FILES_PATH + file_name firebase_file_path = FIREBASE_LISTENING_AUDIO_FILES_PATH + file_name if "conversation" in part["text"]: conversation_text_to_speech(part["text"]["conversation"], sound_file_path) else: text_to_speech(part["text"], sound_file_path) file_url = upload_file_firebase_get_url(FIREBASE_BUCKET, firebase_file_path, sound_file_path) part_template["audio"]["source"] = file_url part_template["exercises"] = part["exercises"] template['parts'].append(part_template) if minTimer != LISTENING_MIN_TIMER_DEFAULT: template["minTimer"] = minTimer template["variant"] = ExamVariant.PARTIAL.value else: template["variant"] = ExamVariant.FULL.value (result, id) = save_to_db_with_id("listening", template, id) if result: return {**template, "id": id} else: raise Exception("Failed to save question: " + parts) except Exception as e: return str(e) @app.route('/writing_task1', methods=['POST']) @jwt_required() def grade_writing_task_1(): try: data = request.get_json() question = data.get('question') answer = data.get('answer') if not has_words(answer): return { 'comment': "The answer does not contain any english words.", 'overall': 0, 'task_response': { 'Coherence and Cohesion': 0, 'Grammatical Range and Accuracy': 0, 'Lexical Resource': 0, 'Task Achievement': 0 } } elif not has_x_words(answer, 100): return { 'comment': "The answer is insufficient and too small to be graded.", 'overall': 0, 'task_response': { 'Coherence and Cohesion': 0, 'Grammatical Range and Accuracy': 0, 'Lexical Resource': 0, 'Task Achievement': 0 } } else: message = ("Evaluate the given Writing Task 1 response based on the IELTS grading system, ensuring a " "strict assessment that penalizes errors. Deduct points for deviations from the task, and " "assign a score of 0 if the response fails to address the question. Additionally, provide an " "exemplary answer with a minimum of 150 words, along with a detailed commentary highlighting " "both strengths and weaknesses in the response. Present your evaluation in JSON format with " "the following structure: {'perfect_answer': 'example perfect answer', 'comment': " "'comment about answer quality', 'overall': 0.0, 'task_response': {'Task Achievement': 0.0, " "'Coherence and Cohesion': 0.0, 'Lexical Resource': 0.0, 'Grammatical Range and Accuracy': " "0.0}}\n Question: '" + question + "' \n Answer: '" + answer + "'") token_count = count_tokens(message)["n_tokens"] response = make_openai_instruct_call(GPT_3_5_TURBO_INSTRUCT, message, token_count, ["comment"], GRADING_TEMPERATURE) response["overall"] = fix_writing_overall(response["overall"], response["task_response"]) response['fixed_text'] = get_fixed_text(answer) return response except Exception as e: return str(e) @app.route('/writing_task1_general', methods=['GET']) @jwt_required() def get_writing_task_1_general_question(): difficulty = request.args.get("difficulty", default=random.choice(difficulties)) topic = request.args.get("topic", default=random.choice(mti_topics)) try: gen_wt1_question = "Craft a prompt for an IELTS Writing Task 1 General Training exercise that instructs the " \ "student to compose a letter. The prompt should present a specific scenario or situation, " \ "based on the topic of '" + topic + "', " \ "requiring the student to provide information, advice, or instructions within the letter. " \ "Make sure that the generated prompt is of " + difficulty + " difficulty and does not contain forbidden subjects in muslim countries." token_count = count_tokens(gen_wt1_question)["n_tokens"] response = make_openai_instruct_call(GPT_3_5_TURBO_INSTRUCT, gen_wt1_question, token_count, None, GEN_QUESTION_TEMPERATURE) return { "question": response.strip(), "difficulty": difficulty, "topic": topic } except Exception as e: return str(e) @app.route('/writing_task2', methods=['POST']) @jwt_required() def grade_writing_task_2(): try: data = request.get_json() question = data.get('question') answer = data.get('answer') if not has_words(answer): return { 'comment': "The answer does not contain any english words.", 'overall': 0, 'task_response': { 'Coherence and Cohesion': 0, 'Grammatical Range and Accuracy': 0, 'Lexical Resource': 0, 'Task Achievement': 0 } } elif not has_x_words(answer, 180): return { 'comment': "The answer is insufficient and too small to be graded.", 'overall': 0, 'task_response': { 'Coherence and Cohesion': 0, 'Grammatical Range and Accuracy': 0, 'Lexical Resource': 0, 'Task Achievement': 0 } } else: message = ("Evaluate the given Writing Task 2 response based on the IELTS grading system, ensuring a " "strict assessment that penalizes errors. Deduct points for deviations from the task, and " "assign a score of 0 if the response fails to address the question. Additionally, provide an " "exemplary answer with a minimum of 250 words, along with a detailed commentary highlighting " "both strengths and weaknesses in the response. Present your evaluation in JSON format with " "the following structure: {'perfect_answer': 'example perfect answer', 'comment': " "'comment about answer quality', 'overall': 0.0, 'task_response': {'Task Achievement': 0.0, " "'Coherence and Cohesion': 0.0, 'Lexical Resource': 0.0, 'Grammatical Range and Accuracy': " "0.0}}\n Question: '" + question + "' \n Answer: '" + answer + "'") token_count = count_tokens(message)["n_tokens"] response = make_openai_instruct_call(GPT_3_5_TURBO_INSTRUCT, message, token_count, ["comment"], GEN_QUESTION_TEMPERATURE) response["overall"] = fix_writing_overall(response["overall"], response["task_response"]) response['fixed_text'] = get_fixed_text(answer) return response except Exception as e: return str(e) def fix_writing_overall(overall: float, task_response: dict): if overall > max(task_response.values()) or overall < min(task_response.values()): total_sum = sum(task_response.values()) average = total_sum / len(task_response.values()) rounded_average = round(average, 0) return rounded_average return overall @app.route('/writing_task2_general', methods=['GET']) @jwt_required() def get_writing_task_2_general_question(): difficulty = request.args.get("difficulty", default=random.choice(difficulties)) topic = request.args.get("topic", default=random.choice(mti_topics)) try: gen_wt2_question = "Craft a comprehensive question of " + difficulty + " difficulty for IELTS Writing Task 2 General Training that directs the candidate " \ "to delve into an in-depth analysis of contrasting perspectives on the topic of '" + topic + "'. The candidate " \ "should be asked to discuss the strengths and weaknesses of both viewpoints, provide evidence or " \ "examples, and present a well-rounded argument before concluding with their personal opinion on the " \ "subject." token_count = count_tokens(gen_wt2_question)["n_tokens"] response = make_openai_instruct_call(GPT_3_5_TURBO_INSTRUCT, gen_wt2_question, token_count, None, GEN_QUESTION_TEMPERATURE) return { "question": response.strip(), "difficulty": difficulty, "topic": topic } except Exception as e: return str(e) @app.route('/speaking_task_1', methods=['POST']) @jwt_required() def grade_speaking_task_1(): request_id = uuid.uuid4() delete_files_older_than_one_day(AUDIO_FILES_PATH) sound_file_name = AUDIO_FILES_PATH + str(uuid.uuid4()) logging.info("POST - speaking_task_1 - Received request to grade speaking task 1. " "Use this id to track the logs: " + str(request_id) + " - Request data: " + str(request.get_json())) try: data = request.get_json() question = data.get('question') answer_firebase_path = data.get('answer') logging.info("POST - speaking_task_1 - " + str(request_id) + " - Downloading file " + answer_firebase_path) download_firebase_file(FIREBASE_BUCKET, answer_firebase_path, sound_file_name) logging.info("POST - speaking_task_1 - " + str( request_id) + " - Downloaded file " + answer_firebase_path + " to " + sound_file_name) answer = speech_to_text(sound_file_name) logging.info("POST - speaking_task_1 - " + str(request_id) + " - Transcripted answer: " + answer) if has_x_words(answer, 20): message = ("Evaluate the given Speaking Part 1 response based on the IELTS grading system, ensuring a " "strict assessment that penalizes errors. Deduct points for deviations from the task, and " "assign a score of 0 if the response fails to address the question. Additionally, provide " "detailed commentary highlighting both strengths and weaknesses in the response. Present your " "evaluation in JSON format with " "the following structure: {'comment': 'comment about answer quality', 'overall': 0.0, " "'task_response': {'Fluency and Coherence': 0.0, 'Lexical Resource': 0.0, 'Grammatical Range " "and Accuracy': 0.0, 'Pronunciation': 0.0}}\n Question: '" + question + "' \n Answer: '" + answer + "'") token_count = count_tokens(message)["n_tokens"] logging.info("POST - speaking_task_1 - " + str(request_id) + " - Requesting grading of the answer.") response = make_openai_instruct_call(GPT_3_5_TURBO_INSTRUCT, message, token_count, ["comment"], GRADING_TEMPERATURE) logging.info("POST - speaking_task_1 - " + str(request_id) + " - Answer graded: " + str(response)) perfect_answer_message = ("Provide a perfect answer according to ielts grading system to the following " "Speaking Part 1 question: '" + question + "'") token_count = count_tokens(perfect_answer_message)["n_tokens"] logging.info("POST - speaking_task_1 - " + str(request_id) + " - Requesting perfect answer.") response['perfect_answer'] = make_openai_instruct_call(GPT_3_5_TURBO_INSTRUCT, perfect_answer_message, token_count, None, GEN_QUESTION_TEMPERATURE) logging.info("POST - speaking_task_1 - " + str( request_id) + " - Perfect answer: " + response['perfect_answer']) response['transcript'] = answer logging.info("POST - speaking_task_1 - " + str(request_id) + " - Requesting fixed_text.") response['fixed_text'] = get_speaking_corrections(answer) logging.info("POST - speaking_task_1 - " + str(request_id) + " - Fixed text: " + response['fixed_text']) if response["overall"] == "0.0" or response["overall"] == 0.0: response["overall"] = round((response["task_response"]["Fluency and Coherence"] + response["task_response"]["Lexical Resource"] + response["task_response"][ "Grammatical Range and Accuracy"] + response["task_response"][ "Pronunciation"]) / 4, 1) logging.info("POST - speaking_task_1 - " + str(request_id) + " - Final response: " + str(response)) return response else: logging.info("POST - speaking_task_1 - " + str( request_id) + " - The answer had less words than threshold 20 to be graded. Answer: " + answer) return { "comment": "The audio recorded does not contain enough english words to be graded.", "overall": 0, "task_response": { "Fluency and Coherence": 0, "Lexical Resource": 0, "Grammatical Range and Accuracy": 0, "Pronunciation": 0 } } except Exception as e: os.remove(sound_file_name) return str(e), 400 @app.route('/speaking_task_1', methods=['GET']) @jwt_required() def get_speaking_task_1_question(): difficulty = request.args.get("difficulty", default=random.choice(difficulties)) topic = request.args.get("topic", default=random.choice(mti_topics)) try: gen_sp1_question = "Craft a thought-provoking question of " + difficulty + " difficulty for IELTS Speaking Part 1 that encourages candidates to delve deeply " \ "into personal experiences, preferences, or insights on the topic of '" + topic + "'. Instruct the candidate to offer " \ "not only detailed descriptions but also provide nuanced explanations, examples, or anecdotes to enrich " \ "their response. Make sure that the generated question does not contain forbidden subjects in muslim countries." \ "Provide your response in this json format: {'topic': 'topic','question': 'question'}" token_count = count_tokens(gen_sp1_question)["n_tokens"] response = make_openai_instruct_call(GPT_3_5_TURBO_INSTRUCT, gen_sp1_question, token_count, GEN_FIELDS, GEN_QUESTION_TEMPERATURE) response["type"] = 1 response["difficulty"] = difficulty response["topic"] = topic return response except Exception as e: return str(e) @app.route('/speaking_task_2', methods=['POST']) @jwt_required() def grade_speaking_task_2(): request_id = uuid.uuid4() delete_files_older_than_one_day(AUDIO_FILES_PATH) sound_file_name = AUDIO_FILES_PATH + str(uuid.uuid4()) logging.info("POST - speaking_task_2 - Received request to grade speaking task 2. " "Use this id to track the logs: " + str(request_id) + " - Request data: " + str(request.get_json())) try: data = request.get_json() question = data.get('question') answer_firebase_path = data.get('answer') logging.info("POST - speaking_task_2 - " + str(request_id) + " - Downloading file " + answer_firebase_path) download_firebase_file(FIREBASE_BUCKET, answer_firebase_path, sound_file_name) logging.info("POST - speaking_task_2 - " + str( request_id) + " - Downloaded file " + answer_firebase_path + " to " + sound_file_name) answer = speech_to_text(sound_file_name) logging.info("POST - speaking_task_2 - " + str(request_id) + " - Transcripted answer: " + answer) if has_x_words(answer, 20): message = ("Evaluate the given Speaking Part 2 response based on the IELTS grading system, ensuring a " "strict assessment that penalizes errors. Deduct points for deviations from the task, and " "assign a score of 0 if the response fails to address the question. Additionally, provide " "detailed commentary highlighting both strengths and weaknesses in the response. Present your " "evaluation in JSON format with " "the following structure: {'comment': 'comment about answer quality', 'overall': 0.0, " "'task_response': {'Fluency and Coherence': 0.0, 'Lexical Resource': 0.0, 'Grammatical Range " "and Accuracy': 0.0, " "'Pronunciation': 0.0}}\n Question: '" + question + "' \n Answer: '" + answer + "'") token_count = count_tokens(message)["n_tokens"] logging.info("POST - speaking_task_2 - " + str(request_id) + " - Requesting grading of the answer.") response = make_openai_instruct_call(GPT_3_5_TURBO_INSTRUCT, message, token_count, ["comment"], GRADING_TEMPERATURE) logging.info("POST - speaking_task_2 - " + str(request_id) + " - Answer graded: " + str(response)) perfect_answer_message = ("Provide a perfect answer according to ielts grading system to the following " "Speaking Part 2 question: '" + question + "'") token_count = count_tokens(perfect_answer_message)["n_tokens"] logging.info("POST - speaking_task_2 - " + str(request_id) + " - Requesting perfect answer.") response['perfect_answer'] = make_openai_instruct_call(GPT_3_5_TURBO_INSTRUCT, perfect_answer_message, token_count, None, GEN_QUESTION_TEMPERATURE) logging.info("POST - speaking_task_2 - " + str( request_id) + " - Perfect answer: " + response['perfect_answer']) response['transcript'] = answer logging.info("POST - speaking_task_2 - " + str(request_id) + " - Requesting fixed_text.") response['fixed_text'] = get_speaking_corrections(answer) logging.info("POST - speaking_task_2 - " + str(request_id) + " - Fixed text: " + response['fixed_text']) if response["overall"] == "0.0" or response["overall"] == 0.0: response["overall"] = round((response["task_response"]["Fluency and Coherence"] + response["task_response"]["Lexical Resource"] + response["task_response"][ "Grammatical Range and Accuracy"] + response["task_response"][ "Pronunciation"]) / 4, 1) logging.info("POST - speaking_task_2 - " + str(request_id) + " - Final response: " + str(response)) return response else: logging.info("POST - speaking_task_2 - " + str( request_id) + " - The answer had less words than threshold 20 to be graded. Answer: " + answer) return { "comment": "The audio recorded does not contain enough english words to be graded.", "overall": 0, "task_response": { "Fluency and Coherence": 0, "Lexical Resource": 0, "Grammatical Range and Accuracy": 0, "Pronunciation": 0 } } except Exception as e: os.remove(sound_file_name) return str(e), 400 @app.route('/speaking_task_2', methods=['GET']) @jwt_required() def get_speaking_task_2_question(): difficulty = request.args.get("difficulty", default=random.choice(difficulties)) topic = request.args.get("topic", default=random.choice(mti_topics)) try: gen_sp2_question = "Create a question of " + difficulty + " difficulty for IELTS Speaking Part 2 that encourages candidates to narrate a personal experience " \ "or story related to the topic of '" + topic + "'. Include 3 prompts that guide the candidate to describe " \ "specific aspects of the experience, such as details about the situation, their actions, and the " \ "reasons it left a lasting impression. Make sure that the generated question does not contain forbidden subjects in muslim countries." \ "Provide your response in this json format: {'topic': 'topic','question': 'question', " \ "'prompts': ['prompt_1', 'prompt_2', 'prompt_3']}" token_count = count_tokens(gen_sp2_question)["n_tokens"] response = make_openai_instruct_call(GPT_3_5_TURBO_INSTRUCT, gen_sp2_question, token_count, GEN_FIELDS, GEN_QUESTION_TEMPERATURE) response["type"] = 2 response["difficulty"] = difficulty response["topic"] = topic return response except Exception as e: return str(e) @app.route('/speaking_task_3', methods=['GET']) @jwt_required() def get_speaking_task_3_question(): difficulty = request.args.get("difficulty", default=random.choice(difficulties)) topic = request.args.get("topic", default=random.choice(mti_topics)) try: gen_sp3_question = "Formulate a set of 3 questions of " + difficulty + " difficulty for IELTS Speaking Part 3 that encourage candidates to engage in a " \ "meaningful discussion on the topic of '" + topic + "'. Provide inquiries, ensuring " \ "they explore various aspects, perspectives, and implications related to the topic. " \ "Make sure that the generated question does not contain forbidden subjects in muslim countries." \ "Provide your response in this json format: {'topic': 'topic','questions': ['question', " \ "'question', 'question']}" token_count = count_tokens(gen_sp3_question)["n_tokens"] response = make_openai_instruct_call(GPT_3_5_TURBO_INSTRUCT, gen_sp3_question, token_count, GEN_FIELDS, GEN_QUESTION_TEMPERATURE) # Remove the numbers from the questions only if the string starts with a number response["questions"] = [re.sub(r"^\d+\.\s*", "", question) if re.match(r"^\d+\.", question) else question for question in response["questions"]] response["type"] = 3 response["difficulty"] = difficulty response["topic"] = topic return response except Exception as e: return str(e) @app.route('/speaking_task_3', methods=['POST']) @jwt_required() def grade_speaking_task_3(): request_id = uuid.uuid4() delete_files_older_than_one_day(AUDIO_FILES_PATH) logging.info("POST - speaking_task_3 - Received request to grade speaking task 3. " "Use this id to track the logs: " + str(request_id) + " - Request data: " + str(request.get_json())) try: data = request.get_json() answers = data.get('answers') text_answers = [] perfect_answers = [] logging.info("POST - speaking_task_3 - " + str( request_id) + " - Received " + str(len(answers)) + " total answers.") for item in answers: sound_file_name = AUDIO_FILES_PATH + str(uuid.uuid4()) logging.info("POST - speaking_task_3 - " + str(request_id) + " - Downloading file " + item["answer"]) download_firebase_file(FIREBASE_BUCKET, item["answer"], sound_file_name) logging.info("POST - speaking_task_1 - " + str( request_id) + " - Downloaded file " + item["answer"] + " to " + sound_file_name) answer_text = speech_to_text(sound_file_name) logging.info("POST - speaking_task_1 - " + str(request_id) + " - Transcripted answer: " + answer_text) text_answers.append(answer_text) item["answer"] = answer_text os.remove(sound_file_name) if not has_x_words(answer_text, 20): logging.info("POST - speaking_task_3 - " + str( request_id) + " - The answer had less words than threshold 20 to be graded. Answer: " + answer_text) return { "comment": "The audio recorded does not contain enough english words to be graded.", "overall": 0, "task_response": { "Fluency and Coherence": 0, "Lexical Resource": 0, "Grammatical Range and Accuracy": 0, "Pronunciation": 0 } } perfect_answer_message = ("Provide a perfect answer according to ielts grading system to the following " "Speaking Part 3 question: '" + item["question"] + "'") token_count = count_tokens(perfect_answer_message)["n_tokens"] logging.info("POST - speaking_task_3 - " + str( request_id) + " - Requesting perfect answer for question: " + item["question"]) perfect_answers.append(make_openai_instruct_call(GPT_3_5_TURBO_INSTRUCT, perfect_answer_message, token_count, None, GEN_QUESTION_TEMPERATURE)) message = ( "Evaluate the given Speaking Part 3 response based on the IELTS grading system, ensuring a " "strict assessment that penalizes errors. Deduct points for deviations from the task, and " "assign a score of 0 if the response fails to address the question. Additionally, provide detailed " "commentary highlighting both strengths and weaknesses in the response." "\n\n The questions and answers are: \n\n'") logging.info("POST - speaking_task_3 - " + str(request_id) + " - Formatting answers and questions for prompt.") formatted_text = "" for i, entry in enumerate(answers, start=1): formatted_text += f"**Question {i}:**\n{entry['question']}\n\n" formatted_text += f"**Answer {i}:**\n{entry['answer']}\n\n" logging.info("POST - speaking_task_3 - " + str( request_id) + " - Formatted answers and questions for prompt: " + formatted_text) message += formatted_text message += ( "'\n\nProvide your answer on the following json format: {'comment': 'comment about answer quality', " "'overall': 0.0, 'task_response': {'Fluency and Coherence': 0.0, 'Lexical Resource': 0.0, " "'Grammatical Range and Accuracy': 0.0, 'Pronunciation': 0.0}}") token_count = count_tokens(message)["n_tokens"] logging.info("POST - speaking_task_3 - " + str(request_id) + " - Requesting grading of the answers.") response = make_openai_instruct_call(GPT_3_5_TURBO_INSTRUCT, message, token_count, ["comment"], GRADING_TEMPERATURE) logging.info("POST - speaking_task_3 - " + str(request_id) + " - Answers graded: " + str(response)) logging.info("POST - speaking_task_3 - " + str(request_id) + " - Adding perfect answers to response.") for i, answer in enumerate(perfect_answers, start=1): response['perfect_answer_' + str(i)] = answer logging.info("POST - speaking_task_3 - " + str( request_id) + " - Adding transcript and fixed texts to response.") for i, answer in enumerate(text_answers, start=1): response['transcript_' + str(i)] = answer response['fixed_text_' + str(i)] = get_speaking_corrections(answer) if response["overall"] == "0.0" or response["overall"] == 0.0: response["overall"] = round((response["task_response"]["Fluency and Coherence"] + response["task_response"][ "Lexical Resource"] + response["task_response"]["Grammatical Range and Accuracy"] + response["task_response"]["Pronunciation"]) / 4, 1) logging.info("POST - speaking_task_3 - " + str(request_id) + " - Final response: " + str(response)) return response except Exception as e: return str(e), 400 @app.route('/speaking', methods=['POST']) @jwt_required() def save_speaking(): try: data = request.get_json() exercises = data.get('exercises') minTimer = data.get('minTimer', SPEAKING_MIN_TIMER_DEFAULT) template = getSpeakingTemplate() template["minTimer"] = minTimer if minTimer < SPEAKING_MIN_TIMER_DEFAULT: template["variant"] = ExamVariant.PARTIAL.value else: template["variant"] = ExamVariant.FULL.value id = str(uuid.uuid4()) app.logger.info('Received request to save speaking with id: ' + id) thread_event.set() thread = threading.Thread( target=create_videos_and_save_to_db, args=(exercises, template, id), name=("thread-save-speaking-" + id) ) thread.start() app.logger.info('Started thread to save speaking. Thread: ' + thread.getName()) # Return response without waiting for create_videos_and_save_to_db to finish return {**template, "id": id} except Exception as e: return str(e) @app.route("/speaking/generate_speaking_video", methods=['POST']) @jwt_required() def generate_speaking_video(): try: data = request.get_json() avatar = data.get("avatar", random.choice(list(AvatarEnum)).value) prompts = data.get("prompts", []) question = data.get("question") if len(prompts) > 0: question = question + " In your answer you should consider: " + " ".join(prompts) sp1_result = create_video(question, avatar) if sp1_result is not None: sound_file_path = VIDEO_FILES_PATH + sp1_result firebase_file_path = FIREBASE_SPEAKING_VIDEO_FILES_PATH + sp1_result url = upload_file_firebase_get_url(FIREBASE_BUCKET, firebase_file_path, sound_file_path) sp1_video_path = firebase_file_path sp1_video_url = url return { "text": data["question"], "prompts": prompts, "title": data["topic"], "video_url": sp1_video_url, "video_path": sp1_video_path, "type": "speaking", "id": uuid.uuid4() } else: app.logger.error("Failed to create video for part 1 question: " + data["question"]) return str("Failed to create video for part 1 question: " + data["question"]) except Exception as e: return str(e) @app.route("/speaking/generate_interactive_video", methods=['POST']) @jwt_required() def generate_interactive_video(): try: data = request.get_json() sp3_questions = [] avatar = data.get("avatar", random.choice(list(AvatarEnum)).value) app.logger.info('Creating videos for speaking part 3') for question in data["questions"]: result = create_video(question, avatar) if result is not None: sound_file_path = VIDEO_FILES_PATH + result firebase_file_path = FIREBASE_SPEAKING_VIDEO_FILES_PATH + result url = upload_file_firebase_get_url(FIREBASE_BUCKET, firebase_file_path, sound_file_path) video = { "text": question, "video_path": firebase_file_path, "video_url": url } sp3_questions.append(video) else: app.app.logger.error("Failed to create video for part 3 question: " + question) return { "prompts": sp3_questions, "title": data["topic"], "type": "interactiveSpeaking", "id": uuid.uuid4() } except Exception as e: return str(e) @app.route('/reading_passage_1', methods=['GET']) @jwt_required() def get_reading_passage_1_question(): try: # Extract parameters from the URL query string topic = request.args.get('topic', default=random.choice(topics)) req_exercises = request.args.getlist('exercises') difficulty = request.args.get("difficulty", default=random.choice(difficulties)) return gen_reading_passage_1(topic, req_exercises, difficulty) except Exception as e: return str(e) @app.route('/reading_passage_2', methods=['GET']) @jwt_required() def get_reading_passage_2_question(): try: # Extract parameters from the URL query string topic = request.args.get('topic', default=random.choice(topics)) req_exercises = request.args.getlist('exercises') difficulty = request.args.get("difficulty", default=random.choice(difficulties)) return gen_reading_passage_2(topic, req_exercises, difficulty) except Exception as e: return str(e) @app.route('/reading_passage_3', methods=['GET']) @jwt_required() def get_reading_passage_3_question(): try: # Extract parameters from the URL query string topic = request.args.get('topic', default=random.choice(topics)) req_exercises = request.args.getlist('exercises') difficulty = request.args.get("difficulty", default=random.choice(difficulties)) return gen_reading_passage_3(topic, req_exercises, difficulty) except Exception as e: return str(e) @app.route('/level', methods=['GET']) @jwt_required() def get_level_exam(): try: number_of_exercises = 25 exercises = gen_multiple_choice_level(number_of_exercises) return { "exercises": [exercises], "isDiagnostic": False, "minTimer": 25, "module": "level" } except Exception as e: return str(e) @app.route('/fetch_tips', methods=['POST']) @jwt_required() def fetch_answer_tips(): try: data = request.get_json() context = data.get('context') question = data.get('question') answer = data.get('answer') correct_answer = data.get('correct_answer') messages = get_question_tips(question, answer, correct_answer, context) token_count = reduce(lambda count, item: count + count_tokens(item)['n_tokens'], map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0) response = make_openai_call(GPT_3_5_TURBO, messages, token_count, None, TIPS_TEMPERATURE) if isinstance(response, str): response = re.sub(r"^[a-zA-Z0-9_]+\:\s*", "", response) return response except Exception as e: return str(e) @app.route('/grading_summary', methods=['POST']) @jwt_required() def grading_summary(): # Body Format # {'sections': Array of {'code': key, 'name': name, 'grade': grade}} # Output Format # {'sections': Array of {'code': key, 'name': name, 'grade': grade, 'evaluation': evaluation, 'suggestions': suggestions}} try: return calculate_grading_summary(request.get_json()) except Exception as e: return str(e) if __name__ == '__main__': app.run()