Small bugfix to not call OpenAI twice and File Reformat

This commit is contained in:
Pedro Fonseca
2023-09-03 15:13:41 +01:00
parent fcd7483fd9
commit cfff3ee6dd
3 changed files with 34 additions and 12 deletions

27
app.py
View File

@@ -27,6 +27,7 @@ cred = credentials.Certificate(os.getenv("GOOGLE_APPLICATION_CREDENTIALS"))
firebase_admin.initialize_app(cred)
GRADING_TEMPERATURE = 0.1
TIPS_TEMPERATURE = 0.2
GEN_QUESTION_TEMPERATURE = 0.9
GPT_3_5_TURBO = "gpt-3.5-turbo"
GPT_3_5_TURBO_16K = "gpt-3.5-turbo-16k"
@@ -38,6 +39,7 @@ FIREBASE_BUCKET = 'mti-ielts.appspot.com'
AUDIO_FILES_PATH = 'download-audio/'
FIREBASE_LISTENING_AUDIO_FILES_PATH = 'listening_recordings/'
@app.route('/listening_section_1', methods=['GET'])
@jwt_required()
def get_listening_section_1_question():
@@ -45,11 +47,13 @@ def get_listening_section_1_question():
messages = get_question_gen_messages(QuestionType.LISTENING_SECTION_1)
token_count = reduce(lambda count, item: count + count_tokens(item)['n_tokens'],
map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0)
response = make_openai_call(GPT_3_5_TURBO_16K, messages, token_count, LISTENING_GEN_FIELDS, GEN_QUESTION_TEMPERATURE)
response = make_openai_call(GPT_3_5_TURBO_16K, messages, token_count, LISTENING_GEN_FIELDS,
GEN_QUESTION_TEMPERATURE)
return response
except Exception as e:
return str(e)
@app.route('/listening_section_2', methods=['GET'])
@jwt_required()
def get_listening_section_2_question():
@@ -58,7 +62,8 @@ def get_listening_section_2_question():
messages = get_question_gen_messages(QuestionType.LISTENING_SECTION_2)
token_count = reduce(lambda count, item: count + count_tokens(item)['n_tokens'],
map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0)
response = make_openai_call(GPT_3_5_TURBO_16K, messages, token_count, LISTENING_GEN_FIELDS, GEN_QUESTION_TEMPERATURE)
response = make_openai_call(GPT_3_5_TURBO_16K, messages, token_count, LISTENING_GEN_FIELDS,
GEN_QUESTION_TEMPERATURE)
# file_name = str(uuid.uuid4()) + ".mp3"
# sound_file_path = AUDIO_FILES_PATH + file_name
@@ -70,6 +75,7 @@ def get_listening_section_2_question():
except Exception as e:
return str(e)
@app.route('/listening_section_3', methods=['GET'])
@jwt_required()
def get_listening_section_3_question():
@@ -78,7 +84,8 @@ def get_listening_section_3_question():
messages = get_question_gen_messages(QuestionType.LISTENING_SECTION_3)
token_count = reduce(lambda count, item: count + count_tokens(item)['n_tokens'],
map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0)
response = make_openai_call(GPT_3_5_TURBO_16K, messages, token_count, LISTENING_GEN_FIELDS, GEN_QUESTION_TEMPERATURE)
response = make_openai_call(GPT_3_5_TURBO_16K, messages, token_count, LISTENING_GEN_FIELDS,
GEN_QUESTION_TEMPERATURE)
# file_name = str(uuid.uuid4()) + ".mp3"
# sound_file_path = AUDIO_FILES_PATH + file_name
@@ -90,6 +97,7 @@ def get_listening_section_3_question():
except Exception as e:
return str(e)
@app.route('/listening_section_4', methods=['GET'])
@jwt_required()
def get_listening_section_4_question():
@@ -98,7 +106,8 @@ def get_listening_section_4_question():
messages = get_question_gen_messages(QuestionType.LISTENING_SECTION_4)
token_count = reduce(lambda count, item: count + count_tokens(item)['n_tokens'],
map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0)
response = make_openai_call(GPT_3_5_TURBO_16K, messages, token_count, LISTENING_GEN_FIELDS, GEN_QUESTION_TEMPERATURE)
response = make_openai_call(GPT_3_5_TURBO_16K, messages, token_count, LISTENING_GEN_FIELDS,
GEN_QUESTION_TEMPERATURE)
# file_name = str(uuid.uuid4()) + ".mp3"
# sound_file_path = AUDIO_FILES_PATH + file_name
@@ -110,6 +119,7 @@ def get_listening_section_4_question():
except Exception as e:
return str(e)
@app.route('/writing_task1', methods=['POST'])
@jwt_required()
def grade_writing_task_1():
@@ -126,6 +136,7 @@ def grade_writing_task_1():
except Exception as e:
return str(e)
@app.route('/writing_task2', methods=['POST'])
@jwt_required()
def grade_writing_task_2():
@@ -141,6 +152,7 @@ def grade_writing_task_2():
except Exception as e:
return str(e)
@app.route('/fetch_tips', methods=['POST'])
@jwt_required()
def fetch_answer_tips():
@@ -153,7 +165,7 @@ def fetch_answer_tips():
messages = get_question_tips(question, answer, correct_answer, context)
token_count = reduce(lambda count, item: count + count_tokens(item)['n_tokens'],
map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0)
response = make_openai_call(GPT_3_5_TURBO, messages, token_count, GRADING_FIELDS, GRADING_TEMPERATURE)
response = make_openai_call(GPT_3_5_TURBO, messages, token_count, None, TIPS_TEMPERATURE)
if isinstance(response, str):
response = re.sub(r"^[a-zA-Z0-9_]+\:\s*", "", response)
@@ -175,6 +187,7 @@ def get_writing_task_2_question():
except Exception as e:
return str(e)
@app.route('/speaking_task_1', methods=['POST'])
@jwt_required()
def grade_speaking_task_1():
@@ -198,6 +211,7 @@ def grade_speaking_task_1():
os.remove(sound_file_name)
return str(e)
@app.route('/speaking_task_1', methods=['GET'])
@jwt_required()
def get_speaking_task_1_question():
@@ -210,6 +224,7 @@ def get_speaking_task_1_question():
except Exception as e:
return str(e)
@app.route('/speaking_task_2', methods=['POST'])
@jwt_required()
def grade_speaking_task_2():
@@ -233,6 +248,7 @@ def grade_speaking_task_2():
os.remove(sound_file_name)
return str(e)
@app.route('/speaking_task_2', methods=['GET'])
@jwt_required()
def get_speaking_task_2_question():
@@ -245,5 +261,6 @@ def get_speaking_task_2_question():
except Exception as e:
return str(e)
if __name__ == '__main__':
app.run()