Calculate Grading Summary Logic

This commit is contained in:
Pedro Fonseca
2024-01-06 18:46:29 +00:00
parent f2e8497756
commit ac27239787
2 changed files with 28 additions and 22 deletions

42
app.py
View File

@@ -11,6 +11,7 @@ from helper.heygen_api import create_videos_and_save_to_db
from helper.speech_to_text_helper import * from helper.speech_to_text_helper import *
from helper.token_counter import count_tokens from helper.token_counter import count_tokens
from helper.openai_interface import make_openai_call, make_openai_instruct_call from helper.openai_interface import make_openai_call, make_openai_instruct_call
from grading_summary.grading_summary import calculate_grading_summary
import os import os
import re import re
import logging import logging
@@ -37,6 +38,7 @@ thread_event = threading.Event()
logging.basicConfig(level=logging.DEBUG, # Set the logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL) logging.basicConfig(level=logging.DEBUG, # Set the logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
format='%(asctime)s - %(levelname)s - %(message)s') format='%(asctime)s - %(levelname)s - %(message)s')
@app.route('/healthcheck', methods=['GET']) @app.route('/healthcheck', methods=['GET'])
def healthcheck(): def healthcheck():
return {"healthy": True} return {"healthy": True}
@@ -407,10 +409,10 @@ def grade_speaking_task_2():
"Speaking Part 2 question: '" + question + "'") "Speaking Part 2 question: '" + question + "'")
token_count = count_tokens(perfect_answer_message)["n_tokens"] token_count = count_tokens(perfect_answer_message)["n_tokens"]
response['perfect_answer'] = make_openai_instruct_call(GPT_3_5_TURBO_INSTRUCT, response['perfect_answer'] = make_openai_instruct_call(GPT_3_5_TURBO_INSTRUCT,
perfect_answer_message, perfect_answer_message,
token_count, token_count,
None, None,
GEN_QUESTION_TEMPERATURE) GEN_QUESTION_TEMPERATURE)
return response return response
else: else:
return { return {
@@ -495,15 +497,15 @@ def grade_speaking_task_3():
"Speaking Part 3 question: '" + item["question"] + "'") "Speaking Part 3 question: '" + item["question"] + "'")
token_count = count_tokens(perfect_answer_message)["n_tokens"] token_count = count_tokens(perfect_answer_message)["n_tokens"]
perfect_answers.append(make_openai_instruct_call(GPT_3_5_TURBO_INSTRUCT, perfect_answers.append(make_openai_instruct_call(GPT_3_5_TURBO_INSTRUCT,
perfect_answer_message, perfect_answer_message,
token_count, token_count,
None, None,
GEN_QUESTION_TEMPERATURE)) GEN_QUESTION_TEMPERATURE))
message = ( message = (
"Grade this Speaking Part 3 answer according to ielts grading system and provide " "Grade this Speaking Part 3 answer according to ielts grading system and provide "
"an elaborated comment where you deep dive into what is wrong and right about the answers." "an elaborated comment where you deep dive into what is wrong and right about the answers."
"Please assign a grade of 0 if the answer provided does not address the question." "Please assign a grade of 0 if the answer provided does not address the question."
"\n\n The questions and answers are: \n\n'") "\n\n The questions and answers are: \n\n'")
formatted_text = "" formatted_text = ""
for i, entry in enumerate(answers, start=1): for i, entry in enumerate(answers, start=1):
@@ -511,9 +513,10 @@ def grade_speaking_task_3():
formatted_text += f"**Answer {i}:**\n{entry['answer']}\n\n" formatted_text += f"**Answer {i}:**\n{entry['answer']}\n\n"
message += formatted_text message += formatted_text
message += ("'\n\nProvide your answer on the following json format: {'comment': 'comment about answer quality', " message += (
"'overall': 0.0, 'task_response': {'Fluency and Coherence': 0.0, 'Lexical Resource': 0.0, " "'\n\nProvide your answer on the following json format: {'comment': 'comment about answer quality', "
"'Grammatical Range and Accuracy': 0.0, 'Pronunciation': 0.0}}") "'overall': 0.0, 'task_response': {'Fluency and Coherence': 0.0, 'Lexical Resource': 0.0, "
"'Grammatical Range and Accuracy': 0.0, 'Pronunciation': 0.0}}")
token_count = count_tokens(message)["n_tokens"] token_count = count_tokens(message)["n_tokens"]
response = make_openai_instruct_call(GPT_3_5_TURBO_INSTRUCT, message, token_count, response = make_openai_instruct_call(GPT_3_5_TURBO_INSTRUCT, message, token_count,
@@ -690,5 +693,14 @@ def fetch_answer_tips():
return str(e) return str(e)
@app.route('/grading_summary', methods=['POST'])
@jwt_required()
def grading_summary():
try:
return calculate_grading_summary(request.get_json())
except Exception as e:
return str(e)
if __name__ == '__main__': if __name__ == '__main__':
app.run() app.run()

View File

@@ -3,7 +3,6 @@ import json
import openai import openai
import os import os
from dotenv import load_dotenv from dotenv import load_dotenv
from functools import reduce
load_dotenv() load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY") openai.api_key = os.getenv("OPENAI_API_KEY")
@@ -35,12 +34,7 @@ tools = [{
}] }]
# Input Format def calculate_grading_summary(body):
# {'sections': Array of {'code': key, 'name': name, 'grade': grade}}
# Output
# {'sections': Array of {'code': key, 'name': name, 'grade': grade, 'evaluation': evaluation, 'suggestions': suggestions}}
def grading_summary(body):
extracted_sections = extract_existing_sections_from_body(body, section_keys) extracted_sections = extract_existing_sections_from_body(body, section_keys)
ret = [] ret = []