diff --git a/app.py b/app.py index fef1f0b..5b60928 100644 --- a/app.py +++ b/app.py @@ -2,11 +2,12 @@ from flask import Flask, request from flask_jwt_extended import JWTManager, jwt_required from functools import reduce import firebase_admin -from firebase_admin import credentials +from firebase_admin import credentials, firestore from helper.api_messages import QuestionType, get_grading_messages, get_question_gen_messages, get_question_tips from helper.file_helper import delete_files_older_than_one_day from helper.firebase_helper import download_firebase_file, upload_file_firebase, upload_file_firebase_get_url, \ save_to_db +from helper.heygen_api import create_video from helper.speech_to_text_helper import speech_to_text, text_to_speech, has_words from helper.token_counter import count_tokens from helper.openai_interface import make_openai_call @@ -16,7 +17,8 @@ import re from dotenv import load_dotenv -from templates.question_templates import getListening1Template, getListening2Template +from templates.question_templates import getListening1Template, getListening2Template, getSpeaking1Template, \ + getSpeaking2Template, getSpeaking3Template load_dotenv() @@ -42,6 +44,8 @@ FIREBASE_BUCKET = 'mti-ielts.appspot.com' AUDIO_FILES_PATH = 'download-audio/' FIREBASE_LISTENING_AUDIO_FILES_PATH = 'listening_recordings/' +VIDEO_FILES_PATH = 'download-video/' +FIREBASE_SPEAKING_VIDEO_FILES_PATH = 'speaking_videos/' @app.route('/listening_section_1', methods=['GET']) @jwt_required() @@ -52,12 +56,6 @@ def get_listening_section_1_question(): map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0) response = make_openai_call(GPT_3_5_TURBO_16K, messages, token_count, LISTENING_GEN_FIELDS, GEN_QUESTION_TEMPERATURE) - # file_name = str(uuid.uuid4()) + ".mp3" - # sound_file_path = AUDIO_FILES_PATH + file_name - # firebase_file_path = FIREBASE_LISTENING_AUDIO_FILES_PATH + file_name - # text_to_speech(response["transcript"], sound_file_path) - # upload_file_firebase(FIREBASE_BUCKET, firebase_file_path, sound_file_path) - # response["audio_file"] = firebase_file_path return response except Exception as e: return str(e) @@ -94,13 +92,6 @@ def get_listening_section_2_question(): map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0) response = make_openai_call(GPT_3_5_TURBO_16K, messages, token_count, LISTENING_GEN_FIELDS, GEN_QUESTION_TEMPERATURE) - - # file_name = str(uuid.uuid4()) + ".mp3" - # sound_file_path = AUDIO_FILES_PATH + file_name - # firebase_file_path = FIREBASE_LISTENING_AUDIO_FILES_PATH + file_name - # text_to_speech(response["transcript"], sound_file_path) - # upload_file_firebase(FIREBASE_BUCKET, firebase_file_path, sound_file_path) - # response["audio_file"] = firebase_file_path return response except Exception as e: return str(e) @@ -136,13 +127,6 @@ def get_listening_section_3_question(): map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0) response = make_openai_call(GPT_3_5_TURBO_16K, messages, token_count, LISTENING_GEN_FIELDS, GEN_QUESTION_TEMPERATURE) - - # file_name = str(uuid.uuid4()) + ".mp3" - # sound_file_path = AUDIO_FILES_PATH + file_name - # firebase_file_path = FIREBASE_LISTENING_AUDIO_FILES_PATH + file_name - # text_to_speech(response["transcript"], sound_file_path) - # upload_file_firebase(FIREBASE_BUCKET, firebase_file_path, sound_file_path) - # response["audio_file"] = firebase_file_path return response except Exception as e: return str(e) @@ -178,13 +162,6 @@ def get_listening_section_4_question(): map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0) response = make_openai_call(GPT_3_5_TURBO_16K, messages, token_count, LISTENING_GEN_FIELDS, GEN_QUESTION_TEMPERATURE) - - # file_name = str(uuid.uuid4()) + ".mp3" - # sound_file_path = AUDIO_FILES_PATH + file_name - # firebase_file_path = FIREBASE_LISTENING_AUDIO_FILES_PATH + file_name - # text_to_speech(response["transcript"], sound_file_path) - # upload_file_firebase(FIREBASE_BUCKET, firebase_file_path, sound_file_path) - # response["audio_file"] = firebase_file_path return response except Exception as e: return str(e) @@ -281,29 +258,6 @@ def grade_writing_task_2(): except Exception as e: return str(e) - -@app.route('/fetch_tips', methods=['POST']) -@jwt_required() -def fetch_answer_tips(): - try: - data = request.get_json() - context = data.get('context') - question = data.get('question') - answer = data.get('answer') - correct_answer = data.get('correct_answer') - messages = get_question_tips(question, answer, correct_answer, context) - token_count = reduce(lambda count, item: count + count_tokens(item)['n_tokens'], - map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0) - response = make_openai_call(GPT_3_5_TURBO, messages, token_count, None, TIPS_TEMPERATURE) - - if isinstance(response, str): - response = re.sub(r"^[a-zA-Z0-9_]+\:\s*", "", response) - - return response - except Exception as e: - return str(e) - - @app.route('/writing_task2', methods=['GET']) @jwt_required() def get_writing_task_2_question(): @@ -378,6 +332,52 @@ def get_speaking_task_1_question(): except Exception as e: return str(e) +@app.route('/save_speaking_task_1', methods=['POST']) +@jwt_required() +def save_speaking_task_1_question(): + try: + # data = request.get_json() + # question = data.get('question') + questions_json = getSpeaking1Template() + questions = [] + for question in questions_json["questions"]: + result = create_video(question) + if result is not None: + sound_file_path = VIDEO_FILES_PATH + result + firebase_file_path = FIREBASE_SPEAKING_VIDEO_FILES_PATH + result + url = upload_file_firebase_get_url(FIREBASE_BUCKET, firebase_file_path, sound_file_path) + video = { + "text": question, + "video_path": firebase_file_path, + "video_url": url + } + questions.append(video) + else: + print("Failed to create video for question: " + question) + + if len(questions) == len(questions_json["questions"]): + speaking_pt1_to_insert = { + "exercises": [ + { + "id": str(uuid.uuid4()), + "prompts": questions, + "text": "Listen carefully and respond.", + "title": questions_json["topic"], + "type": "speakingPart1" + } + ], + "isDiagnostic": True, + "minTimer": 5, + "module": "speaking" + } + if save_to_db("speaking", speaking_pt1_to_insert): + return speaking_pt1_to_insert + else: + raise Exception("Failed to save question: " + speaking_pt1_to_insert) + else: + raise Exception("Array sizes do not match. Video uploading failing is probably the cause.") + except Exception as e: + return str(e) @app.route('/speaking_task_2', methods=['POST']) @jwt_required() @@ -426,6 +426,120 @@ def get_speaking_task_2_question(): except Exception as e: return str(e) +@app.route('/save_speaking_task_2', methods=['POST']) +@jwt_required() +def save_speaking_task_2_question(): + try: + # data = request.get_json() + # question = data.get('question') + questions_json = getSpeaking2Template() + questions = [] + for question in questions_json["questions"]: + result = create_video(question) + if result is not None: + sound_file_path = VIDEO_FILES_PATH + result + firebase_file_path = FIREBASE_SPEAKING_VIDEO_FILES_PATH + result + url = upload_file_firebase_get_url(FIREBASE_BUCKET, firebase_file_path, sound_file_path) + video = { + "text": question, + "video_path": firebase_file_path, + "video_url": url + } + questions.append(video) + else: + print("Failed to create video for question: " + question) + + if len(questions) == len(questions_json["questions"]): + speaking_pt2_to_insert = { + "exercises": [ + { + "id": str(uuid.uuid4()), + "prompts": questions, + "text": "Listen carefully and respond.", + "title": questions_json["topic"], + "type": "speakingPart2" + } + ], + "isDiagnostic": True, + "minTimer": 5, + "module": "speaking" + } + if save_to_db("speaking", speaking_pt2_to_insert): + return speaking_pt2_to_insert + else: + raise Exception("Failed to save question: " + str(speaking_pt2_to_insert)) + else: + raise Exception("Array sizes do not match. Video uploading failing is probably the cause.") + except Exception as e: + return str(e) + +@app.route('/save_speaking_task_3', methods=['POST']) +@jwt_required() +def save_speaking_task_3_question(): + try: + # data = request.get_json() + # question = data.get('question') + questions_json = getSpeaking3Template() + questions = [] + for question in questions_json["questions"]: + result = create_video(question) + if result is not None: + sound_file_path = VIDEO_FILES_PATH + result + firebase_file_path = FIREBASE_SPEAKING_VIDEO_FILES_PATH + result + url = upload_file_firebase_get_url(FIREBASE_BUCKET, firebase_file_path, sound_file_path) + video = { + "text": question, + "video_path": firebase_file_path, + "video_url": url + } + questions.append(video) + else: + print("Failed to create video for question: " + question) + + if len(questions) == len(questions_json["questions"]): + speaking_pt3_to_insert = { + "exercises": [ + { + "id": str(uuid.uuid4()), + "prompts": questions, + "text": "Listen carefully and respond.", + "title": questions_json["topic"], + "type": "speakingPart3" + } + ], + "isDiagnostic": True, + "minTimer": 5, + "module": "speaking" + } + if save_to_db("speaking", speaking_pt3_to_insert): + return speaking_pt3_to_insert + else: + raise Exception("Failed to save question: " + str(speaking_pt3_to_insert)) + else: + raise Exception("Array sizes do not match. Video uploading failing is probably the cause.") + except Exception as e: + return str(e) + +@app.route('/fetch_tips', methods=['POST']) +@jwt_required() +def fetch_answer_tips(): + try: + data = request.get_json() + context = data.get('context') + question = data.get('question') + answer = data.get('answer') + correct_answer = data.get('correct_answer') + messages = get_question_tips(question, answer, correct_answer, context) + token_count = reduce(lambda count, item: count + count_tokens(item)['n_tokens'], + map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0) + response = make_openai_call(GPT_3_5_TURBO, messages, token_count, None, TIPS_TEMPERATURE) + + if isinstance(response, str): + response = re.sub(r"^[a-zA-Z0-9_]+\:\s*", "", response) + + return response + except Exception as e: + return str(e) if __name__ == '__main__': app.run() diff --git a/templates/question_templates.py b/templates/question_templates.py index 189b822..85ec5ee 100644 --- a/templates/question_templates.py +++ b/templates/question_templates.py @@ -133,7 +133,7 @@ def getReading1Template(): "prompt": "Select the appropriate option.", "questions": [ { - "id": 1, + "id": "1", "options": [ { "id": "A", @@ -157,7 +157,7 @@ def getReading1Template(): "variant": "text", }, { - "id": 2, + "id": "2", "options": [ { "id": "A", @@ -181,7 +181,7 @@ def getReading1Template(): "variant": "text", }, { - "id": 3, + "id": "3", "options": [ { "id": "A", @@ -284,7 +284,7 @@ def getReading2Template(): "prompt": "Select the appropriate option.", "questions": [ { - "id": 1, + "id": "1", "options": [ { "id": "A", @@ -308,7 +308,7 @@ def getReading2Template(): "variant": "text", }, { - "id": 2, + "id": "2", "options": [ { "id": "A", @@ -332,7 +332,7 @@ def getReading2Template(): "variant": "text", }, { - "id": 3, + "id": "3", "options": [ { "id": "A", @@ -356,7 +356,7 @@ def getReading2Template(): "variant": "text", }, { - "id": 4, + "id": "4", "options": [ { "id": "A", @@ -420,41 +420,34 @@ def getReading2Template(): def getSpeaking1Template(): return { - "exercises": [ - { - "id": str(uuid.uuid4()), - "prompts": [], - "text": "Do you enjoy traveling?\\nWhat was the last place you visited for a holiday?\\nDo you prefer traveling " - "by car or by plane?\\nWhat type of places do you like to visit when you travel?", - "title": "Travel", - "type": "speaking" - } - ], - "isDiagnostic": False, - "minTimer": 5, - "module": "speaking" + "topic": "Travelling", + "questions": [ + "Do you enjoy traveling?\\nWhat was the last place you visited for a holiday?\\nDo you prefer traveling " + "by car or by plane?\\nWhat type of places do you like to visit when you travel?" + ] } def getSpeaking2Template(): return { - "exercises": [ - { - "id": str(uuid.uuid4()), - "prompts": [ - "Explain the circumstances that led to your need to adapt to a new environment.", - "What were the major changes or differences in this new environment?", - "How did you handle the process of adapting to these changes?", - "Reflect on the impact this experience had on your adaptability and personal growth." - ], - "text": "Describe an occasion when you had to adapt to a new environment.", - "title": "New Environment", - "type": "speaking" - } - ], - "isDiagnostic": False, - "minTimer": 5, - "module": "speaking" + "topic": "New Environment", + "questions": [ + "Explain the circumstances that led to your need to adapt to a new environment.\\n" + "What were the major changes or differences in this new environment?\\n" + "How did you handle the process of adapting to these changes?\\n" + "Reflect on the impact this experience had on your adaptability and personal growth." + ] + } + + +def getSpeaking3Template(): + return { + "topic": "Technology and Society", + "questions": [ + "How do you think technology has affected the way people communicate with each other in today's society?", + "In what ways has the use of smartphones and social media platforms changed the dynamics of personal relationships?", + "Some argue that technology has made communication more convenient, while others worry that it has led to a decline in face-to-face interactions. What's your perspective on this matter, and how do you think it might impact future generations?" + ] }