diff --git a/app.py b/app.py index 643e465..1eb1c57 100644 --- a/app.py +++ b/app.py @@ -10,7 +10,7 @@ from helper.firebase_helper import * from helper.heygen_api import create_videos_and_save_to_db from helper.speech_to_text_helper import * from helper.token_counter import count_tokens -from helper.openai_interface import make_openai_call, make_openai_instruct_call +from helper.openai_interface import * import os import re import logging @@ -37,6 +37,7 @@ thread_event = threading.Event() logging.basicConfig(level=logging.DEBUG, # Set the logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL) format='%(asctime)s - %(levelname)s - %(message)s') + @app.route('/healthcheck', methods=['GET']) def healthcheck(): return {"healthy": True} @@ -407,10 +408,10 @@ def grade_speaking_task_2(): "Speaking Part 2 question: '" + question + "'") token_count = count_tokens(perfect_answer_message)["n_tokens"] response['perfect_answer'] = make_openai_instruct_call(GPT_3_5_TURBO_INSTRUCT, - perfect_answer_message, - token_count, - None, - GEN_QUESTION_TEMPERATURE) + perfect_answer_message, + token_count, + None, + GEN_QUESTION_TEMPERATURE) return response else: return { @@ -495,15 +496,15 @@ def grade_speaking_task_3(): "Speaking Part 3 question: '" + item["question"] + "'") token_count = count_tokens(perfect_answer_message)["n_tokens"] perfect_answers.append(make_openai_instruct_call(GPT_3_5_TURBO_INSTRUCT, - perfect_answer_message, - token_count, - None, - GEN_QUESTION_TEMPERATURE)) + perfect_answer_message, + token_count, + None, + GEN_QUESTION_TEMPERATURE)) message = ( - "Grade this Speaking Part 3 answer according to ielts grading system and provide " - "an elaborated comment where you deep dive into what is wrong and right about the answers." - "Please assign a grade of 0 if the answer provided does not address the question." - "\n\n The questions and answers are: \n\n'") + "Grade this Speaking Part 3 answer according to ielts grading system and provide " + "an elaborated comment where you deep dive into what is wrong and right about the answers." + "Please assign a grade of 0 if the answer provided does not address the question." + "\n\n The questions and answers are: \n\n'") formatted_text = "" for i, entry in enumerate(answers, start=1): @@ -511,9 +512,10 @@ def grade_speaking_task_3(): formatted_text += f"**Answer {i}:**\n{entry['answer']}\n\n" message += formatted_text - message += ("'\n\nProvide your answer on the following json format: {'comment': 'comment about answer quality', " - "'overall': 0.0, 'task_response': {'Fluency and Coherence': 0.0, 'Lexical Resource': 0.0, " - "'Grammatical Range and Accuracy': 0.0, 'Pronunciation': 0.0}}") + message += ( + "'\n\nProvide your answer on the following json format: {'comment': 'comment about answer quality', " + "'overall': 0.0, 'task_response': {'Fluency and Coherence': 0.0, 'Lexical Resource': 0.0, " + "'Grammatical Range and Accuracy': 0.0, 'Pronunciation': 0.0}}") token_count = count_tokens(message)["n_tokens"] response = make_openai_instruct_call(GPT_3_5_TURBO_INSTRUCT, message, token_count, @@ -690,5 +692,18 @@ def fetch_answer_tips(): return str(e) +@app.route('/grading_summary', methods=['POST']) +@jwt_required() +def grading_summary(): + # Body Format + # {'sections': Array of {'code': key, 'name': name, 'grade': grade}} + # Output Format + # {'sections': Array of {'code': key, 'name': name, 'grade': grade, 'evaluation': evaluation, 'suggestions': suggestions}} + try: + return calculate_grading_summary(request.get_json()) + except Exception as e: + return str(e) + + if __name__ == '__main__': app.run() diff --git a/helper/openai_interface.py b/helper/openai_interface.py index 5c6bf99..f5d9f39 100644 --- a/helper/openai_interface.py +++ b/helper/openai_interface.py @@ -16,6 +16,36 @@ TRY_LIMIT = 1 try_count = 0 +# GRADING SUMMARY +chat_config = {'max_tokens': 1000, 'temperature': 0.2} +section_keys = ['reading', 'listening', 'writing', 'speaking', 'level'] +grade_top_limit = 9 + +tools = [{ + "type": "function", + "function": { + "name": "save_evaluation_and_suggestions", + "description": "Saves the evaluation and suggestions requested by input.", + "parameters": { + "type": "object", + "properties": { + "evaluation": { + "type": "string", + "description": "A comment on the IELTS section grade obtained in the specific section and what it could mean without suggestions.", + }, + "suggestions": { + "type": "string", + "description": "A small paragraph text with suggestions on how to possibly get a better grade than the one obtained.", + }, + }, + "required": ["evaluation", "suggestions"], + }, + } +}] + + +### + def process_response(input_string, quotation_check_field): if '{' in input_string: try: @@ -44,6 +74,7 @@ def process_response(input_string, quotation_check_field): else: return input_string + def parse_string(to_parse: str): parsed_string = to_parse.replace("\"", "\\\"") pattern = r"(? 0 and 'message' in response['choices'][ + 0] and 'tool_calls' in response['choices'][0]['message'] and isinstance( + response['choices'][0]['message']['tool_calls'], list) and len( + response['choices'][0]['message']['tool_calls']) > 0 and \ + response['choices'][0]['message']['tool_calls'][0]['function']['arguments']: + return json.loads(response['choices'][0]['message']['tool_calls'][0]['function']['arguments']) + else: + return {'evaluation': "", 'suggestions': ""} + + +def extract_existing_sections_from_body(my_dict, keys_to_extract): + if 'sections' in my_dict and isinstance(my_dict['sections'], list) and len(my_dict['sections']) > 0: + return list(filter( + lambda item: 'code' in item and item['code'] in keys_to_extract and 'grade' in item and 'name' in item, + my_dict['sections'])) diff --git a/postman/ielts.postman_collection.json b/postman/ielts.postman_collection.json index e270b90..78a4003 100644 --- a/postman/ielts.postman_collection.json +++ b/postman/ielts.postman_collection.json @@ -1,9 +1,9 @@ { "info": { - "_postman_id": "1b901158-4228-426a-9c96-8cedc4df8470", + "_postman_id": "9905f8e4-f3b9-45e4-8ede-434c5de11eca", "name": "ielts", "schema": "https://schema.getpostman.com/json/collection/v2.1.0/collection.json", - "_exporter_id": "26107457" + "_exporter_id": "29491168" }, "item": [ { @@ -1104,6 +1104,53 @@ } }, "response": [] + }, + { + "name": "Get Grading Summary", + "request": { + "auth": { + "type": "bearer", + "bearer": [ + { + "key": "token", + "value": "{{jwt_token}}", + "type": "string" + } + ] + }, + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json", + "type": "text" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"question\": \"When did Kendrick Lamar sign for TDE?\",\n \"answer\": \"Hello GPT.\",\n\t\t\"correct_answer\": \"2005\",\n \"context\": \"Kendrick Lamar Duckworth (born June 17, 1987) is an American rapper and songwriter. Known for his progressive musical styles and socially conscious songwriting, he is often considered one of the most influential hip hop artists of his generation. Born and raised in Compton, California, Lamar began his career as a teenager performing under the stage name K.Dot. He quickly garnered local attention which led to him signing a recording contract with Top Dawg Entertainment (TDE) in 2005.\"\n}\n", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "http://127.0.0.1:5000/fetch_tips", + "protocol": "http", + "host": [ + "127", + "0", + "0", + "1" + ], + "port": "5000", + "path": [ + "fetch_tips" + ] + } + }, + "response": [] } ] } \ No newline at end of file