Refactored grading summary to fit previous existing files.

This commit is contained in:
Cristiano Ferreira
2024-01-07 19:36:57 +00:00
parent 046606a8ec
commit 75df686cd1
4 changed files with 84 additions and 91 deletions

3
app.py
View File

@@ -10,8 +10,7 @@ from helper.firebase_helper import *
from helper.heygen_api import create_videos_and_save_to_db from helper.heygen_api import create_videos_and_save_to_db
from helper.speech_to_text_helper import * from helper.speech_to_text_helper import *
from helper.token_counter import count_tokens from helper.token_counter import count_tokens
from helper.openai_interface import make_openai_call, make_openai_instruct_call from helper.openai_interface import *
from grading_summary.grading_summary import calculate_grading_summary
import os import os
import re import re
import logging import logging

View File

@@ -1,88 +0,0 @@
import json
import openai
import os
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
chat_config = {'max_tokens': 1000, 'temperature': 0.2}
section_keys = ['reading', 'listening', 'writing', 'speaking', 'level']
grade_top_limit = 9
tools = [{
"type": "function",
"function": {
"name": "save_evaluation_and_suggestions",
"description": "Saves the evaluation and suggestions requested by input.",
"parameters": {
"type": "object",
"properties": {
"evaluation": {
"type": "string",
"description": "A comment on the IELTS section grade obtained in the specific section and what it could mean without suggestions.",
},
"suggestions": {
"type": "string",
"description": "A small paragraph text with suggestions on how to possibly get a better grade than the one obtained.",
},
},
"required": ["evaluation", "suggestions"],
},
}
}]
def calculate_grading_summary(body):
extracted_sections = extract_existing_sections_from_body(body, section_keys)
ret = []
for section in extracted_sections:
openai_response_dict = calculate_section_grade_summary(section)
ret = ret + [{'code': section['code'], 'name': section['name'], 'grade': section['grade'],
'evaluation': openai_response_dict['evaluation'],
'suggestions': openai_response_dict['suggestions']}]
return {'sections': ret}
def calculate_section_grade_summary(section):
res = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
max_tokens=chat_config['max_tokens'],
temperature=chat_config['temperature'],
tools=tools,
messages=[
{
"role": "user",
"content": "You are a IELTS test section grade evaluator. You will receive a IELTS test section name and the grade obtained in the section. You should offer a comment on this grade with also suggestions on how to possibly get a better grade.",
},
{
"role": "user",
"content": "Section: " + str(section['name']) + " Grade: " + str(section['grade']),
},
{"role": "user", "content": "Speak in third person."},
{"role": "user", "content": "Please save the evaluation and suggestions generated."}
])
return parse_openai_response(res)
def parse_openai_response(response):
if 'choices' in response and len(response['choices']) > 0 and 'message' in response['choices'][
0] and 'tool_calls' in response['choices'][0]['message'] and isinstance(
response['choices'][0]['message']['tool_calls'], list) and len(
response['choices'][0]['message']['tool_calls']) > 0 and \
response['choices'][0]['message']['tool_calls'][0]['function']['arguments']:
return json.loads(response['choices'][0]['message']['tool_calls'][0]['function']['arguments'])
else:
return {'evaluation': "", 'suggestions': ""}
def extract_existing_sections_from_body(my_dict, keys_to_extract):
if 'sections' in my_dict and isinstance(my_dict['sections'], list) and len(my_dict['sections']) > 0:
return list(filter(
lambda item: 'code' in item and item['code'] in keys_to_extract and 'grade' in item and 'name' in item,
my_dict['sections']))

View File

@@ -16,6 +16,33 @@ TRY_LIMIT = 1
try_count = 0 try_count = 0
# GRADING SUMMARY
chat_config = {'max_tokens': 1000, 'temperature': 0.2}
section_keys = ['reading', 'listening', 'writing', 'speaking', 'level']
grade_top_limit = 9
tools = [{
"type": "function",
"function": {
"name": "save_evaluation_and_suggestions",
"description": "Saves the evaluation and suggestions requested by input.",
"parameters": {
"type": "object",
"properties": {
"evaluation": {
"type": "string",
"description": "A comment on the IELTS section grade obtained in the specific section and what it could mean without suggestions.",
},
"suggestions": {
"type": "string",
"description": "A small paragraph text with suggestions on how to possibly get a better grade than the one obtained.",
},
},
"required": ["evaluation", "suggestions"],
},
}
}]
###
def process_response(input_string, quotation_check_field): def process_response(input_string, quotation_check_field):
if '{' in input_string: if '{' in input_string:
try: try:
@@ -141,3 +168,58 @@ def make_openai_instruct_call(model, message: str, token_count, fields_to_check,
else: else:
try_count = 0 try_count = 0
return processed_response return processed_response
# GRADING SUMMARY
def calculate_grading_summary(body):
extracted_sections = extract_existing_sections_from_body(body, section_keys)
ret = []
for section in extracted_sections:
openai_response_dict = calculate_section_grade_summary(section)
ret = ret + [{'code': section['code'], 'name': section['name'], 'grade': section['grade'],
'evaluation': openai_response_dict['evaluation'],
'suggestions': openai_response_dict['suggestions']}]
return {'sections': ret}
def calculate_section_grade_summary(section):
res = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
max_tokens=chat_config['max_tokens'],
temperature=chat_config['temperature'],
tools=tools,
messages=[
{
"role": "user",
"content": "You are a IELTS test section grade evaluator. You will receive a IELTS test section name and the grade obtained in the section. You should offer a comment on this grade with also suggestions on how to possibly get a better grade.",
},
{
"role": "user",
"content": "Section: " + str(section['name']) + " Grade: " + str(section['grade']),
},
{"role": "user", "content": "Speak in third person."},
{"role": "user", "content": "Please save the evaluation and suggestions generated."}
])
return parse_openai_response(res)
def parse_openai_response(response):
if 'choices' in response and len(response['choices']) > 0 and 'message' in response['choices'][
0] and 'tool_calls' in response['choices'][0]['message'] and isinstance(
response['choices'][0]['message']['tool_calls'], list) and len(
response['choices'][0]['message']['tool_calls']) > 0 and \
response['choices'][0]['message']['tool_calls'][0]['function']['arguments']:
return json.loads(response['choices'][0]['message']['tool_calls'][0]['function']['arguments'])
else:
return {'evaluation': "", 'suggestions': ""}
def extract_existing_sections_from_body(my_dict, keys_to_extract):
if 'sections' in my_dict and isinstance(my_dict['sections'], list) and len(my_dict['sections']) > 0:
return list(filter(
lambda item: 'code' in item and item['code'] in keys_to_extract and 'grade' in item and 'name' in item,
my_dict['sections']))

View File

@@ -1106,7 +1106,7 @@
"response": [] "response": []
}, },
{ {
"name": "Fetch Answer Tips Copy", "name": "Get Grading Summary",
"request": { "request": {
"auth": { "auth": {
"type": "bearer", "type": "bearer",