Add speaking endpoints and clean code.
This commit is contained in:
141
app.py
141
app.py
@@ -1,9 +1,15 @@
|
||||
from flask import Flask, request
|
||||
from flask_jwt_extended import JWTManager, jwt_required
|
||||
from functools import reduce
|
||||
import firebase_admin
|
||||
from firebase_admin import credentials
|
||||
from helper.api_messages import QuestionType, get_grading_messages, get_question_gen_messages
|
||||
from helper.firebase_helper import download_firebase_file
|
||||
from helper.speech_to_text_helper import speech_to_text
|
||||
from helper.token_counter import count_tokens
|
||||
from helper.openai_interface import make_openai_call
|
||||
import os
|
||||
import uuid
|
||||
|
||||
from dotenv import load_dotenv
|
||||
|
||||
@@ -14,91 +20,80 @@ app = Flask(__name__)
|
||||
app.config['JWT_SECRET_KEY'] = os.getenv("JWT_SECRET_KEY")
|
||||
jwt = JWTManager(app)
|
||||
|
||||
# Initialize Firebase Admin SDK
|
||||
cred = credentials.Certificate(os.getenv("GOOGLE_APPLICATION_CREDENTIALS"))
|
||||
firebase_admin.initialize_app(cred)
|
||||
|
||||
GRADING_TEMPERATURE = 0.1
|
||||
GEN_QUESTION_TEMPERATURE = 0.7
|
||||
WRITING_TASK_2_POST_FIELDS = ['overall', 'comment', 'task_response']
|
||||
WRITING_TASK_2_GET_FIELDS = ['question']
|
||||
|
||||
FIREBASE_BUCKET = 'mti-ielts.appspot.com'
|
||||
AUDIO_FILES_PATH = 'download-audio/'
|
||||
|
||||
|
||||
@app.route('/writing_task2', methods=['POST'])
|
||||
@jwt_required()
|
||||
def grade_writing_task():
|
||||
data = request.get_json() # Assuming the request data is in JSON format
|
||||
question = data.get('question')
|
||||
answer = data.get('answer')
|
||||
messages = [
|
||||
{
|
||||
"role": "system",
|
||||
"content": "You are a IELTS examiner.",
|
||||
},
|
||||
{
|
||||
"role": "system",
|
||||
"content": f"The question you have to grade is of type Writing Task 2 and is the following: {question}",
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": "It is mandatory for you to provide your response with the overall grade and breakdown grades, "
|
||||
"in the following json format: {'comment': 'comment about answer quality', 'overall': 7.0, 'task_response': {'Task Achievement': 8.0, "
|
||||
"'Coherence and Cohesion': 6.5, 'Lexical Resource': 7.5, 'Grammatical Range and Accuracy': "
|
||||
"6.0}}",
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": "Example output: { 'comment': 'Overall, the response is good but there are some areas that need "
|
||||
"improvement.\n\nIn terms of Task Achievement, the writer has addressed all parts of the question "
|
||||
"and has provided a clear opinion on the topic. However, some of the points made are not fully "
|
||||
"developed or supported with examples.\n\nIn terms of Coherence and Cohesion, there is a clear "
|
||||
"structure to the response with an introduction, body paragraphs and conclusion. However, there "
|
||||
"are some issues with cohesion as some sentences do not flow smoothly from one to another.\n\nIn "
|
||||
"terms of Lexical Resource, there is a good range of vocabulary used throughout the response and "
|
||||
"some less common words have been used effectively.\n\nIn terms of Grammatical Range and Accuracy, "
|
||||
"there are some errors in grammar and sentence structure which affect clarity in places.\n\nOverall, "
|
||||
"this response would score a band 6.5.', 'overall': 6.5, 'task_response': "
|
||||
"{ 'Coherence and Cohesion': 6.5, 'Grammatical Range and Accuracy': 6.0, 'Lexical Resource': 7.0, "
|
||||
"'Task Achievement': 7.0}}",
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": f"Evaluate this answer according to ielts grading system: {answer}",
|
||||
},
|
||||
]
|
||||
token_count = reduce(lambda count, item: count + count_tokens(item)['n_tokens'],
|
||||
map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0)
|
||||
response = make_openai_call(messages, token_count, WRITING_TASK_2_POST_FIELDS, GRADING_TEMPERATURE)
|
||||
return response
|
||||
try:
|
||||
data = request.get_json()
|
||||
question = data.get('question')
|
||||
answer = data.get('answer')
|
||||
messages = get_grading_messages(QuestionType.WRITING_TASK_2, question, answer)
|
||||
token_count = reduce(lambda count, item: count + count_tokens(item)['n_tokens'],
|
||||
map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0)
|
||||
response = make_openai_call(messages, token_count, WRITING_TASK_2_POST_FIELDS, GRADING_TEMPERATURE)
|
||||
return response
|
||||
except Exception as e:
|
||||
return str(e)
|
||||
|
||||
|
||||
@app.route('/writing_task2', methods=['GET'])
|
||||
@jwt_required()
|
||||
def get_writing_task_question():
|
||||
messages = [
|
||||
{
|
||||
"role": "system",
|
||||
"content": "You are a IELTS program that generates questions for the exams.",
|
||||
},
|
||||
{
|
||||
"role": "system",
|
||||
"content": "The question you have to generate is of type Writing Task 2 and is the following.",
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": "It is mandatory for you to provide your response with the question "
|
||||
"in the following json format: {'question': 'question'}",
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": "Example output: { 'question': 'We are becoming increasingly dependent on computers. "
|
||||
"They are used in businesses, hospitals, crime detection and even to fly planes. What things will "
|
||||
"they be used for in the future? Is this dependence on computers a good thing or should we he more "
|
||||
"auspicious of their benefits?'}",
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": "Generate a question for IELTS exam Writing Task 2.",
|
||||
},
|
||||
]
|
||||
token_count = reduce(lambda count, item: count + count_tokens(item)['n_tokens'],
|
||||
map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0)
|
||||
response = make_openai_call(messages, token_count, WRITING_TASK_2_GET_FIELDS, GEN_QUESTION_TEMPERATURE)
|
||||
return response
|
||||
try:
|
||||
messages = get_question_gen_messages(QuestionType.WRITING_TASK_2)
|
||||
token_count = reduce(lambda count, item: count + count_tokens(item)['n_tokens'],
|
||||
map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0)
|
||||
response = make_openai_call(messages, token_count, WRITING_TASK_2_GET_FIELDS, GEN_QUESTION_TEMPERATURE)
|
||||
return response
|
||||
except Exception as e:
|
||||
return str(e)
|
||||
|
||||
@app.route('/speaking_task', methods=['POST'])
|
||||
@jwt_required()
|
||||
def grade_speaking_task():
|
||||
sound_file_name = AUDIO_FILES_PATH + str(uuid.uuid4())
|
||||
try:
|
||||
data = request.get_json()
|
||||
question = data.get('question')
|
||||
answer_firebase_path = data.get('answer')
|
||||
|
||||
download_firebase_file(FIREBASE_BUCKET, answer_firebase_path, sound_file_name)
|
||||
answer = speech_to_text(sound_file_name)
|
||||
|
||||
messages = get_grading_messages(QuestionType.SPEAKING, question, answer)
|
||||
token_count = reduce(lambda count, item: count + count_tokens(item)['n_tokens'],
|
||||
map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0)
|
||||
response = make_openai_call(messages, token_count, WRITING_TASK_2_POST_FIELDS, GRADING_TEMPERATURE)
|
||||
os.remove(sound_file_name)
|
||||
return response
|
||||
except Exception as e:
|
||||
os.remove(sound_file_name)
|
||||
return str(e)
|
||||
|
||||
@app.route('/speaking_task', methods=['GET'])
|
||||
@jwt_required()
|
||||
def get_speaking_task_question():
|
||||
try:
|
||||
messages = get_question_gen_messages(QuestionType.SPEAKING)
|
||||
token_count = reduce(lambda count, item: count + count_tokens(item)['n_tokens'],
|
||||
map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0)
|
||||
response = make_openai_call(messages, token_count, WRITING_TASK_2_GET_FIELDS, GEN_QUESTION_TEMPERATURE)
|
||||
return response
|
||||
except Exception as e:
|
||||
return str(e)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
Reference in New Issue
Block a user