Add question db insert.
This commit is contained in:
94
app.py
94
app.py
@@ -5,8 +5,8 @@ import firebase_admin
|
||||
from firebase_admin import credentials
|
||||
from helper.api_messages import QuestionType, get_grading_messages, get_question_gen_messages
|
||||
from helper.file_helper import delete_files_older_than_one_day
|
||||
from helper.firebase_helper import download_firebase_file
|
||||
from helper.speech_to_text_helper import speech_to_text
|
||||
from helper.firebase_helper import download_firebase_file, upload_file_firebase
|
||||
from helper.speech_to_text_helper import speech_to_text, text_to_speech
|
||||
from helper.token_counter import count_tokens
|
||||
from helper.openai_interface import make_openai_call
|
||||
import os
|
||||
@@ -27,11 +27,87 @@ firebase_admin.initialize_app(cred)
|
||||
|
||||
GRADING_TEMPERATURE = 0.1
|
||||
GEN_QUESTION_TEMPERATURE = 0.9
|
||||
GPT_3_5_TURBO = "gpt-3.5-turbo"
|
||||
GPT_3_5_TURBO_16K = "gpt-3.5-turbo-16k"
|
||||
GRADING_FIELDS = ['overall', 'comment', 'task_response']
|
||||
GEN_FIELDS = ['question']
|
||||
LISTENING_GEN_FIELDS = ['transcript', 'exercise']
|
||||
|
||||
FIREBASE_BUCKET = 'mti-ielts.appspot.com'
|
||||
AUDIO_FILES_PATH = 'download-audio/'
|
||||
FIREBASE_LISTENING_AUDIO_FILES_PATH = 'listening_recordings/'
|
||||
|
||||
@app.route('/listening_section_1', methods=['GET'])
|
||||
@jwt_required()
|
||||
def get_listening_section_1_question():
|
||||
try:
|
||||
messages = get_question_gen_messages(QuestionType.LISTENING_SECTION_1)
|
||||
token_count = reduce(lambda count, item: count + count_tokens(item)['n_tokens'],
|
||||
map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0)
|
||||
response = make_openai_call(GPT_3_5_TURBO_16K, messages, token_count, LISTENING_GEN_FIELDS, GEN_QUESTION_TEMPERATURE)
|
||||
return response
|
||||
except Exception as e:
|
||||
return str(e)
|
||||
|
||||
@app.route('/listening_section_2', methods=['GET'])
|
||||
@jwt_required()
|
||||
def get_listening_section_2_question():
|
||||
try:
|
||||
delete_files_older_than_one_day(AUDIO_FILES_PATH)
|
||||
messages = get_question_gen_messages(QuestionType.LISTENING_SECTION_2)
|
||||
token_count = reduce(lambda count, item: count + count_tokens(item)['n_tokens'],
|
||||
map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0)
|
||||
response = make_openai_call(GPT_3_5_TURBO_16K, messages, token_count, LISTENING_GEN_FIELDS, GEN_QUESTION_TEMPERATURE)
|
||||
|
||||
# file_name = str(uuid.uuid4()) + ".mp3"
|
||||
# sound_file_path = AUDIO_FILES_PATH + file_name
|
||||
# firebase_file_path = FIREBASE_LISTENING_AUDIO_FILES_PATH + file_name
|
||||
# text_to_speech(response["transcript"], sound_file_path)
|
||||
# upload_file_firebase(FIREBASE_BUCKET, firebase_file_path, sound_file_path)
|
||||
# response["audio_file"] = firebase_file_path
|
||||
return response
|
||||
except Exception as e:
|
||||
return str(e)
|
||||
|
||||
@app.route('/listening_section_3', methods=['GET'])
|
||||
@jwt_required()
|
||||
def get_listening_section_3_question():
|
||||
try:
|
||||
delete_files_older_than_one_day(AUDIO_FILES_PATH)
|
||||
messages = get_question_gen_messages(QuestionType.LISTENING_SECTION_3)
|
||||
token_count = reduce(lambda count, item: count + count_tokens(item)['n_tokens'],
|
||||
map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0)
|
||||
response = make_openai_call(GPT_3_5_TURBO_16K, messages, token_count, LISTENING_GEN_FIELDS, GEN_QUESTION_TEMPERATURE)
|
||||
|
||||
# file_name = str(uuid.uuid4()) + ".mp3"
|
||||
# sound_file_path = AUDIO_FILES_PATH + file_name
|
||||
# firebase_file_path = FIREBASE_LISTENING_AUDIO_FILES_PATH + file_name
|
||||
# text_to_speech(response["transcript"], sound_file_path)
|
||||
# upload_file_firebase(FIREBASE_BUCKET, firebase_file_path, sound_file_path)
|
||||
# response["audio_file"] = firebase_file_path
|
||||
return response
|
||||
except Exception as e:
|
||||
return str(e)
|
||||
|
||||
@app.route('/listening_section_4', methods=['GET'])
|
||||
@jwt_required()
|
||||
def get_listening_section_4_question():
|
||||
try:
|
||||
delete_files_older_than_one_day(AUDIO_FILES_PATH)
|
||||
messages = get_question_gen_messages(QuestionType.LISTENING_SECTION_4)
|
||||
token_count = reduce(lambda count, item: count + count_tokens(item)['n_tokens'],
|
||||
map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0)
|
||||
response = make_openai_call(GPT_3_5_TURBO_16K, messages, token_count, LISTENING_GEN_FIELDS, GEN_QUESTION_TEMPERATURE)
|
||||
|
||||
# file_name = str(uuid.uuid4()) + ".mp3"
|
||||
# sound_file_path = AUDIO_FILES_PATH + file_name
|
||||
# firebase_file_path = FIREBASE_LISTENING_AUDIO_FILES_PATH + file_name
|
||||
# text_to_speech(response["transcript"], sound_file_path)
|
||||
# upload_file_firebase(FIREBASE_BUCKET, firebase_file_path, sound_file_path)
|
||||
# response["audio_file"] = firebase_file_path
|
||||
return response
|
||||
except Exception as e:
|
||||
return str(e)
|
||||
|
||||
@app.route('/writing_task1', methods=['POST'])
|
||||
@jwt_required()
|
||||
@@ -44,7 +120,7 @@ def grade_writing_task_1():
|
||||
messages = get_grading_messages(QuestionType.WRITING_TASK_1, question, answer, context)
|
||||
token_count = reduce(lambda count, item: count + count_tokens(item)['n_tokens'],
|
||||
map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0)
|
||||
response = make_openai_call(messages, token_count, GRADING_FIELDS, GRADING_TEMPERATURE)
|
||||
response = make_openai_call(GPT_3_5_TURBO, messages, token_count, GRADING_FIELDS, GRADING_TEMPERATURE)
|
||||
return response
|
||||
except Exception as e:
|
||||
return str(e)
|
||||
@@ -59,7 +135,7 @@ def grade_writing_task_2():
|
||||
messages = get_grading_messages(QuestionType.WRITING_TASK_2, question, answer)
|
||||
token_count = reduce(lambda count, item: count + count_tokens(item)['n_tokens'],
|
||||
map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0)
|
||||
response = make_openai_call(messages, token_count, GRADING_FIELDS, GRADING_TEMPERATURE)
|
||||
response = make_openai_call(GPT_3_5_TURBO, messages, token_count, GRADING_FIELDS, GRADING_TEMPERATURE)
|
||||
return response
|
||||
except Exception as e:
|
||||
return str(e)
|
||||
@@ -72,7 +148,7 @@ def get_writing_task_2_question():
|
||||
messages = get_question_gen_messages(QuestionType.WRITING_TASK_2)
|
||||
token_count = reduce(lambda count, item: count + count_tokens(item)['n_tokens'],
|
||||
map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0)
|
||||
response = make_openai_call(messages, token_count, GEN_FIELDS, GEN_QUESTION_TEMPERATURE)
|
||||
response = make_openai_call(GPT_3_5_TURBO, messages, token_count, GEN_FIELDS, GEN_QUESTION_TEMPERATURE)
|
||||
return response
|
||||
except Exception as e:
|
||||
return str(e)
|
||||
@@ -93,7 +169,7 @@ def grade_speaking_task_1():
|
||||
messages = get_grading_messages(QuestionType.SPEAKING_1, question, answer)
|
||||
token_count = reduce(lambda count, item: count + count_tokens(item)['n_tokens'],
|
||||
map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0)
|
||||
response = make_openai_call(messages, token_count, GRADING_FIELDS, GRADING_TEMPERATURE)
|
||||
response = make_openai_call(GPT_3_5_TURBO, messages, token_count, GRADING_FIELDS, GRADING_TEMPERATURE)
|
||||
os.remove(sound_file_name)
|
||||
return response
|
||||
except Exception as e:
|
||||
@@ -107,7 +183,7 @@ def get_speaking_task_1_question():
|
||||
messages = get_question_gen_messages(QuestionType.SPEAKING_1)
|
||||
token_count = reduce(lambda count, item: count + count_tokens(item)['n_tokens'],
|
||||
map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0)
|
||||
response = make_openai_call(messages, token_count, GEN_FIELDS, GEN_QUESTION_TEMPERATURE)
|
||||
response = make_openai_call(GPT_3_5_TURBO, messages, token_count, GEN_FIELDS, GEN_QUESTION_TEMPERATURE)
|
||||
return response
|
||||
except Exception as e:
|
||||
return str(e)
|
||||
@@ -128,7 +204,7 @@ def grade_speaking_task_2():
|
||||
messages = get_grading_messages(QuestionType.SPEAKING_2, question, answer)
|
||||
token_count = reduce(lambda count, item: count + count_tokens(item)['n_tokens'],
|
||||
map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0)
|
||||
response = make_openai_call(messages, token_count, GRADING_FIELDS, GRADING_TEMPERATURE)
|
||||
response = make_openai_call(GPT_3_5_TURBO, messages, token_count, GRADING_FIELDS, GRADING_TEMPERATURE)
|
||||
os.remove(sound_file_name)
|
||||
return response
|
||||
except Exception as e:
|
||||
@@ -142,7 +218,7 @@ def get_speaking_task_2_question():
|
||||
messages = get_question_gen_messages(QuestionType.SPEAKING_2)
|
||||
token_count = reduce(lambda count, item: count + count_tokens(item)['n_tokens'],
|
||||
map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0)
|
||||
response = make_openai_call(messages, token_count, GEN_FIELDS, GEN_QUESTION_TEMPERATURE)
|
||||
response = make_openai_call(GPT_3_5_TURBO, messages, token_count, GEN_FIELDS, GEN_QUESTION_TEMPERATURE)
|
||||
return response
|
||||
except Exception as e:
|
||||
return str(e)
|
||||
|
||||
Reference in New Issue
Block a user