Calculate Grading Summary Logic

This commit is contained in:
Pedro Fonseca
2024-01-06 18:46:29 +00:00
parent f2e8497756
commit ac27239787
2 changed files with 28 additions and 22 deletions

42
app.py
View File

@@ -11,6 +11,7 @@ from helper.heygen_api import create_videos_and_save_to_db
from helper.speech_to_text_helper import *
from helper.token_counter import count_tokens
from helper.openai_interface import make_openai_call, make_openai_instruct_call
from grading_summary.grading_summary import calculate_grading_summary
import os
import re
import logging
@@ -37,6 +38,7 @@ thread_event = threading.Event()
logging.basicConfig(level=logging.DEBUG, # Set the logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
format='%(asctime)s - %(levelname)s - %(message)s')
@app.route('/healthcheck', methods=['GET'])
def healthcheck():
return {"healthy": True}
@@ -407,10 +409,10 @@ def grade_speaking_task_2():
"Speaking Part 2 question: '" + question + "'")
token_count = count_tokens(perfect_answer_message)["n_tokens"]
response['perfect_answer'] = make_openai_instruct_call(GPT_3_5_TURBO_INSTRUCT,
perfect_answer_message,
token_count,
None,
GEN_QUESTION_TEMPERATURE)
perfect_answer_message,
token_count,
None,
GEN_QUESTION_TEMPERATURE)
return response
else:
return {
@@ -495,15 +497,15 @@ def grade_speaking_task_3():
"Speaking Part 3 question: '" + item["question"] + "'")
token_count = count_tokens(perfect_answer_message)["n_tokens"]
perfect_answers.append(make_openai_instruct_call(GPT_3_5_TURBO_INSTRUCT,
perfect_answer_message,
token_count,
None,
GEN_QUESTION_TEMPERATURE))
perfect_answer_message,
token_count,
None,
GEN_QUESTION_TEMPERATURE))
message = (
"Grade this Speaking Part 3 answer according to ielts grading system and provide "
"an elaborated comment where you deep dive into what is wrong and right about the answers."
"Please assign a grade of 0 if the answer provided does not address the question."
"\n\n The questions and answers are: \n\n'")
"Grade this Speaking Part 3 answer according to ielts grading system and provide "
"an elaborated comment where you deep dive into what is wrong and right about the answers."
"Please assign a grade of 0 if the answer provided does not address the question."
"\n\n The questions and answers are: \n\n'")
formatted_text = ""
for i, entry in enumerate(answers, start=1):
@@ -511,9 +513,10 @@ def grade_speaking_task_3():
formatted_text += f"**Answer {i}:**\n{entry['answer']}\n\n"
message += formatted_text
message += ("'\n\nProvide your answer on the following json format: {'comment': 'comment about answer quality', "
"'overall': 0.0, 'task_response': {'Fluency and Coherence': 0.0, 'Lexical Resource': 0.0, "
"'Grammatical Range and Accuracy': 0.0, 'Pronunciation': 0.0}}")
message += (
"'\n\nProvide your answer on the following json format: {'comment': 'comment about answer quality', "
"'overall': 0.0, 'task_response': {'Fluency and Coherence': 0.0, 'Lexical Resource': 0.0, "
"'Grammatical Range and Accuracy': 0.0, 'Pronunciation': 0.0}}")
token_count = count_tokens(message)["n_tokens"]
response = make_openai_instruct_call(GPT_3_5_TURBO_INSTRUCT, message, token_count,
@@ -690,5 +693,14 @@ def fetch_answer_tips():
return str(e)
@app.route('/grading_summary', methods=['POST'])
@jwt_required()
def grading_summary():
try:
return calculate_grading_summary(request.get_json())
except Exception as e:
return str(e)
if __name__ == '__main__':
app.run()