Merged in feature/fetch-tips (pull request #3)

Added endpoint for /fetch_tips

Approved-by: Cristiano Ferreira
This commit is contained in:
Pedro Fonseca
2023-09-03 15:20:14 +00:00
committed by Cristiano Ferreira
4 changed files with 132 additions and 7 deletions

49
app.py
View File

@@ -3,7 +3,7 @@ from flask_jwt_extended import JWTManager, jwt_required
from functools import reduce from functools import reduce
import firebase_admin import firebase_admin
from firebase_admin import credentials from firebase_admin import credentials
from helper.api_messages import QuestionType, get_grading_messages, get_question_gen_messages from helper.api_messages import QuestionType, get_grading_messages, get_question_gen_messages, get_question_tips
from helper.file_helper import delete_files_older_than_one_day from helper.file_helper import delete_files_older_than_one_day
from helper.firebase_helper import download_firebase_file, upload_file_firebase from helper.firebase_helper import download_firebase_file, upload_file_firebase
from helper.speech_to_text_helper import speech_to_text, text_to_speech from helper.speech_to_text_helper import speech_to_text, text_to_speech
@@ -11,6 +11,7 @@ from helper.token_counter import count_tokens
from helper.openai_interface import make_openai_call from helper.openai_interface import make_openai_call
import os import os
import uuid import uuid
import re
from dotenv import load_dotenv from dotenv import load_dotenv
@@ -26,6 +27,7 @@ cred = credentials.Certificate(os.getenv("GOOGLE_APPLICATION_CREDENTIALS"))
firebase_admin.initialize_app(cred) firebase_admin.initialize_app(cred)
GRADING_TEMPERATURE = 0.1 GRADING_TEMPERATURE = 0.1
TIPS_TEMPERATURE = 0.2
GEN_QUESTION_TEMPERATURE = 0.9 GEN_QUESTION_TEMPERATURE = 0.9
GPT_3_5_TURBO = "gpt-3.5-turbo" GPT_3_5_TURBO = "gpt-3.5-turbo"
GPT_3_5_TURBO_16K = "gpt-3.5-turbo-16k" GPT_3_5_TURBO_16K = "gpt-3.5-turbo-16k"
@@ -37,6 +39,7 @@ FIREBASE_BUCKET = 'mti-ielts.appspot.com'
AUDIO_FILES_PATH = 'download-audio/' AUDIO_FILES_PATH = 'download-audio/'
FIREBASE_LISTENING_AUDIO_FILES_PATH = 'listening_recordings/' FIREBASE_LISTENING_AUDIO_FILES_PATH = 'listening_recordings/'
@app.route('/listening_section_1', methods=['GET']) @app.route('/listening_section_1', methods=['GET'])
@jwt_required() @jwt_required()
def get_listening_section_1_question(): def get_listening_section_1_question():
@@ -44,11 +47,13 @@ def get_listening_section_1_question():
messages = get_question_gen_messages(QuestionType.LISTENING_SECTION_1) messages = get_question_gen_messages(QuestionType.LISTENING_SECTION_1)
token_count = reduce(lambda count, item: count + count_tokens(item)['n_tokens'], token_count = reduce(lambda count, item: count + count_tokens(item)['n_tokens'],
map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0) map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0)
response = make_openai_call(GPT_3_5_TURBO_16K, messages, token_count, LISTENING_GEN_FIELDS, GEN_QUESTION_TEMPERATURE) response = make_openai_call(GPT_3_5_TURBO_16K, messages, token_count, LISTENING_GEN_FIELDS,
GEN_QUESTION_TEMPERATURE)
return response return response
except Exception as e: except Exception as e:
return str(e) return str(e)
@app.route('/listening_section_2', methods=['GET']) @app.route('/listening_section_2', methods=['GET'])
@jwt_required() @jwt_required()
def get_listening_section_2_question(): def get_listening_section_2_question():
@@ -57,7 +62,8 @@ def get_listening_section_2_question():
messages = get_question_gen_messages(QuestionType.LISTENING_SECTION_2) messages = get_question_gen_messages(QuestionType.LISTENING_SECTION_2)
token_count = reduce(lambda count, item: count + count_tokens(item)['n_tokens'], token_count = reduce(lambda count, item: count + count_tokens(item)['n_tokens'],
map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0) map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0)
response = make_openai_call(GPT_3_5_TURBO_16K, messages, token_count, LISTENING_GEN_FIELDS, GEN_QUESTION_TEMPERATURE) response = make_openai_call(GPT_3_5_TURBO_16K, messages, token_count, LISTENING_GEN_FIELDS,
GEN_QUESTION_TEMPERATURE)
# file_name = str(uuid.uuid4()) + ".mp3" # file_name = str(uuid.uuid4()) + ".mp3"
# sound_file_path = AUDIO_FILES_PATH + file_name # sound_file_path = AUDIO_FILES_PATH + file_name
@@ -69,6 +75,7 @@ def get_listening_section_2_question():
except Exception as e: except Exception as e:
return str(e) return str(e)
@app.route('/listening_section_3', methods=['GET']) @app.route('/listening_section_3', methods=['GET'])
@jwt_required() @jwt_required()
def get_listening_section_3_question(): def get_listening_section_3_question():
@@ -77,7 +84,8 @@ def get_listening_section_3_question():
messages = get_question_gen_messages(QuestionType.LISTENING_SECTION_3) messages = get_question_gen_messages(QuestionType.LISTENING_SECTION_3)
token_count = reduce(lambda count, item: count + count_tokens(item)['n_tokens'], token_count = reduce(lambda count, item: count + count_tokens(item)['n_tokens'],
map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0) map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0)
response = make_openai_call(GPT_3_5_TURBO_16K, messages, token_count, LISTENING_GEN_FIELDS, GEN_QUESTION_TEMPERATURE) response = make_openai_call(GPT_3_5_TURBO_16K, messages, token_count, LISTENING_GEN_FIELDS,
GEN_QUESTION_TEMPERATURE)
# file_name = str(uuid.uuid4()) + ".mp3" # file_name = str(uuid.uuid4()) + ".mp3"
# sound_file_path = AUDIO_FILES_PATH + file_name # sound_file_path = AUDIO_FILES_PATH + file_name
@@ -89,6 +97,7 @@ def get_listening_section_3_question():
except Exception as e: except Exception as e:
return str(e) return str(e)
@app.route('/listening_section_4', methods=['GET']) @app.route('/listening_section_4', methods=['GET'])
@jwt_required() @jwt_required()
def get_listening_section_4_question(): def get_listening_section_4_question():
@@ -97,7 +106,8 @@ def get_listening_section_4_question():
messages = get_question_gen_messages(QuestionType.LISTENING_SECTION_4) messages = get_question_gen_messages(QuestionType.LISTENING_SECTION_4)
token_count = reduce(lambda count, item: count + count_tokens(item)['n_tokens'], token_count = reduce(lambda count, item: count + count_tokens(item)['n_tokens'],
map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0) map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0)
response = make_openai_call(GPT_3_5_TURBO_16K, messages, token_count, LISTENING_GEN_FIELDS, GEN_QUESTION_TEMPERATURE) response = make_openai_call(GPT_3_5_TURBO_16K, messages, token_count, LISTENING_GEN_FIELDS,
GEN_QUESTION_TEMPERATURE)
# file_name = str(uuid.uuid4()) + ".mp3" # file_name = str(uuid.uuid4()) + ".mp3"
# sound_file_path = AUDIO_FILES_PATH + file_name # sound_file_path = AUDIO_FILES_PATH + file_name
@@ -109,6 +119,7 @@ def get_listening_section_4_question():
except Exception as e: except Exception as e:
return str(e) return str(e)
@app.route('/writing_task1', methods=['POST']) @app.route('/writing_task1', methods=['POST'])
@jwt_required() @jwt_required()
def grade_writing_task_1(): def grade_writing_task_1():
@@ -125,6 +136,7 @@ def grade_writing_task_1():
except Exception as e: except Exception as e:
return str(e) return str(e)
@app.route('/writing_task2', methods=['POST']) @app.route('/writing_task2', methods=['POST'])
@jwt_required() @jwt_required()
def grade_writing_task_2(): def grade_writing_task_2():
@@ -141,6 +153,28 @@ def grade_writing_task_2():
return str(e) return str(e)
@app.route('/fetch_tips', methods=['POST'])
@jwt_required()
def fetch_answer_tips():
try:
data = request.get_json()
context = data.get('context')
question = data.get('question')
answer = data.get('answer')
correct_answer = data.get('correct_answer')
messages = get_question_tips(question, answer, correct_answer, context)
token_count = reduce(lambda count, item: count + count_tokens(item)['n_tokens'],
map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0)
response = make_openai_call(GPT_3_5_TURBO, messages, token_count, None, TIPS_TEMPERATURE)
if isinstance(response, str):
response = re.sub(r"^[a-zA-Z0-9_]+\:\s*", "", response)
return response
except Exception as e:
return str(e)
@app.route('/writing_task2', methods=['GET']) @app.route('/writing_task2', methods=['GET'])
@jwt_required() @jwt_required()
def get_writing_task_2_question(): def get_writing_task_2_question():
@@ -153,6 +187,7 @@ def get_writing_task_2_question():
except Exception as e: except Exception as e:
return str(e) return str(e)
@app.route('/speaking_task_1', methods=['POST']) @app.route('/speaking_task_1', methods=['POST'])
@jwt_required() @jwt_required()
def grade_speaking_task_1(): def grade_speaking_task_1():
@@ -176,6 +211,7 @@ def grade_speaking_task_1():
os.remove(sound_file_name) os.remove(sound_file_name)
return str(e) return str(e)
@app.route('/speaking_task_1', methods=['GET']) @app.route('/speaking_task_1', methods=['GET'])
@jwt_required() @jwt_required()
def get_speaking_task_1_question(): def get_speaking_task_1_question():
@@ -188,6 +224,7 @@ def get_speaking_task_1_question():
except Exception as e: except Exception as e:
return str(e) return str(e)
@app.route('/speaking_task_2', methods=['POST']) @app.route('/speaking_task_2', methods=['POST'])
@jwt_required() @jwt_required()
def grade_speaking_task_2(): def grade_speaking_task_2():
@@ -211,6 +248,7 @@ def grade_speaking_task_2():
os.remove(sound_file_name) os.remove(sound_file_name)
return str(e) return str(e)
@app.route('/speaking_task_2', methods=['GET']) @app.route('/speaking_task_2', methods=['GET'])
@jwt_required() @jwt_required()
def get_speaking_task_2_question(): def get_speaking_task_2_question():
@@ -223,5 +261,6 @@ def get_speaking_task_2_question():
except Exception as e: except Exception as e:
return str(e) return str(e)
if __name__ == '__main__': if __name__ == '__main__':
app.run() app.run()

View File

@@ -344,3 +344,37 @@ def get_question_gen_messages(question_type: QuestionType):
] ]
else: else:
raise Exception("Question type not implemented: " + question_type.value) raise Exception("Question type not implemented: " + question_type.value)
def get_question_tips(question: str, answer: str, correct_answer: str, context: str = None):
messages = [
{
"role": "user",
"content": "You are a IELTS exam program that analyzes incorrect answers to questions and gives tips to "
"help students understand why it was a wrong answer and gives helpful insight for the future. "
"The tip should refer to the context and question.",
}
]
if not (context is None or context == ""):
messages.append({
"role": "user",
"content": f"This is the context for the question: {context}",
})
messages.extend([
{
"role": "user",
"content": f"This is the question: {question}",
},
{
"role": "user",
"content": f"This is the answer: {answer}",
},
{
"role": "user",
"content": f"This is the correct answer: {correct_answer}",
}
])
return messages

View File

@@ -53,7 +53,12 @@ def make_openai_call(model, messages, token_count, fields_to_check, temperature)
frequency_penalty=float(FREQUENCY_PENALTY), frequency_penalty=float(FREQUENCY_PENALTY),
messages=messages messages=messages
) )
if fields_to_check is None:
return result["choices"][0]["message"]["content"]
processed_response = process_response(result["choices"][0]["message"]["content"], fields_to_check[0]) processed_response = process_response(result["choices"][0]["message"]["content"], fields_to_check[0])
if check_fields(processed_response, fields_to_check) is False and try_count < TRY_LIMIT: if check_fields(processed_response, fields_to_check) is False and try_count < TRY_LIMIT:
try_count = try_count + 1 try_count = try_count + 1
return make_openai_call(model, messages, token_count, fields_to_check, temperature) return make_openai_call(model, messages, token_count, fields_to_check, temperature)

View File

@@ -1,9 +1,9 @@
{ {
"info": { "info": {
"_postman_id": "2e0eed9d-6a6f-4785-9972-087d51ac0265", "_postman_id": "c3a09737-c624-4b32-9e9a-af8ee8084764",
"name": "ielts", "name": "ielts",
"schema": "https://schema.getpostman.com/json/collection/v2.1.0/collection.json", "schema": "https://schema.getpostman.com/json/collection/v2.1.0/collection.json",
"_exporter_id": "26107457" "_exporter_id": "29491168"
}, },
"item": [ "item": [
{ {
@@ -257,6 +257,53 @@
} }
}, },
"response": [] "response": []
},
{
"name": "Fetch Answer Tips",
"request": {
"auth": {
"type": "bearer",
"bearer": [
{
"key": "token",
"value": "{{jwt_token}}",
"type": "string"
}
]
},
"method": "POST",
"header": [
{
"key": "Content-Type",
"value": "application/json",
"type": "text"
}
],
"body": {
"mode": "raw",
"raw": "{\n \"question\": \"When did Kendrick Lamar sign for TDE?\",\n \"answer\": \"Hello GPT.\",\n\t\t\"correct_answer\": \"2005\",\n \"context\": \"Kendrick Lamar Duckworth (born June 17, 1987) is an American rapper and songwriter. Known for his progressive musical styles and socially conscious songwriting, he is often considered one of the most influential hip hop artists of his generation. Born and raised in Compton, California, Lamar began his career as a teenager performing under the stage name K.Dot. He quickly garnered local attention which led to him signing a recording contract with Top Dawg Entertainment (TDE) in 2005.\"\n}\n",
"options": {
"raw": {
"language": "json"
}
}
},
"url": {
"raw": "http://127.0.0.1:5000/fetch_tips",
"protocol": "http",
"host": [
"127",
"0",
"0",
"1"
],
"port": "5000",
"path": [
"fetch_tips"
]
}
},
"response": []
} }
] ]
} }