Files
encoach_backend/app.py
2024-07-31 14:56:33 +01:00

1622 lines
69 KiB
Python

import threading
from functools import reduce
import firebase_admin
from firebase_admin import credentials
from flask import Flask, request
from flask_jwt_extended import JWTManager, jwt_required
from sentence_transformers import SentenceTransformer
from helper.api_messages import *
from helper.exam_variant import ExamVariant
from helper.exercises import *
from helper.file_helper import delete_files_older_than_one_day
from helper.firebase_helper import *
from helper.gpt_zero import GPTZero
from helper.heygen_api import create_video, create_videos_and_save_to_db
from helper.openai_interface import *
from helper.question_templates import *
from helper.speech_to_text_helper import *
from heygen.AvatarEnum import AvatarEnum
from training_content import TrainingContentService, TrainingContentKnowledgeBase, GPT
load_dotenv()
app = Flask(__name__)
app.config['JWT_SECRET_KEY'] = os.getenv("JWT_SECRET_KEY")
jwt = JWTManager(app)
# Initialize Firebase Admin SDK
cred = credentials.Certificate(os.getenv("GOOGLE_APPLICATION_CREDENTIALS"))
FIREBASE_BUCKET = os.getenv('FIREBASE_BUCKET')
firebase_admin.initialize_app(cred)
gpt_zero = GPTZero(os.getenv('GPT_ZERO_API_KEY'))
# Training Content Dependencies
embeddings = SentenceTransformer('all-MiniLM-L6-v2')
kb = TrainingContentKnowledgeBase(embeddings)
kb.load_indices_and_metadata()
open_ai = GPT(OpenAI())
firestore_client = firestore.client()
tc_service = TrainingContentService(kb, open_ai, firestore_client)
thread_event = threading.Event()
# Configure logging
logging.basicConfig(level=logging.DEBUG, # Set the logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
format='%(asctime)s - %(levelname)s - %(message)s')
@app.route('/healthcheck', methods=['GET'])
def healthcheck():
return {"healthy": True}
@app.route('/listening_section_1', methods=['GET'])
@jwt_required()
def get_listening_section_1_question():
try:
delete_files_older_than_one_day(AUDIO_FILES_PATH)
# Extract parameters from the URL query string
topic = request.args.get('topic', default=random.choice(two_people_scenarios))
req_exercises = request.args.getlist('exercises')
difficulty = request.args.get("difficulty", default=random.choice(difficulties))
if (len(req_exercises) == 0):
req_exercises = random.sample(LISTENING_1_EXERCISE_TYPES, 1)
number_of_exercises_q = divide_number_into_parts(TOTAL_LISTENING_SECTION_1_EXERCISES, len(req_exercises))
processed_conversation = generate_listening_1_conversation(topic)
app.logger.info("Generated conversation: " + str(processed_conversation))
start_id = 1
exercises = generate_listening_conversation_exercises(parse_conversation(processed_conversation),
req_exercises,
number_of_exercises_q,
start_id, difficulty)
return {
"exercises": exercises,
"text": processed_conversation,
"difficulty": difficulty
}
except Exception as e:
return str(e)
@app.route('/listening_section_2', methods=['GET'])
@jwt_required()
def get_listening_section_2_question():
try:
delete_files_older_than_one_day(AUDIO_FILES_PATH)
# Extract parameters from the URL query string
topic = request.args.get('topic', default=random.choice(social_monologue_contexts))
req_exercises = request.args.getlist('exercises')
difficulty = request.args.get("difficulty", default=random.choice(difficulties))
if (len(req_exercises) == 0):
req_exercises = random.sample(LISTENING_2_EXERCISE_TYPES, 2)
number_of_exercises_q = divide_number_into_parts(TOTAL_LISTENING_SECTION_2_EXERCISES, len(req_exercises))
monologue = generate_listening_2_monologue(topic)
app.logger.info("Generated monologue: " + str(monologue))
start_id = 11
exercises = generate_listening_monologue_exercises(str(monologue), req_exercises, number_of_exercises_q,
start_id, difficulty)
return {
"exercises": exercises,
"text": monologue,
"difficulty": difficulty
}
except Exception as e:
return str(e)
@app.route('/listening_section_3', methods=['GET'])
@jwt_required()
def get_listening_section_3_question():
try:
delete_files_older_than_one_day(AUDIO_FILES_PATH)
# Extract parameters from the URL query string
topic = request.args.get('topic', default=random.choice(four_people_scenarios))
req_exercises = request.args.getlist('exercises')
difficulty = request.args.get("difficulty", default=random.choice(difficulties))
if (len(req_exercises) == 0):
req_exercises = random.sample(LISTENING_3_EXERCISE_TYPES, 1)
number_of_exercises_q = divide_number_into_parts(TOTAL_LISTENING_SECTION_3_EXERCISES, len(req_exercises))
processed_conversation = generate_listening_3_conversation(topic)
app.logger.info("Generated conversation: " + str(processed_conversation))
start_id = 21
exercises = generate_listening_conversation_exercises(parse_conversation(processed_conversation), req_exercises,
number_of_exercises_q,
start_id, difficulty)
return {
"exercises": exercises,
"text": processed_conversation,
"difficulty": difficulty
}
except Exception as e:
return str(e)
@app.route('/listening_section_4', methods=['GET'])
@jwt_required()
def get_listening_section_4_question():
try:
delete_files_older_than_one_day(AUDIO_FILES_PATH)
# Extract parameters from the URL query string
topic = request.args.get('topic', default=random.choice(academic_subjects))
req_exercises = request.args.getlist('exercises')
difficulty = request.args.get("difficulty", default=random.choice(difficulties))
if (len(req_exercises) == 0):
req_exercises = random.sample(LISTENING_EXERCISE_TYPES, 2)
number_of_exercises_q = divide_number_into_parts(TOTAL_LISTENING_SECTION_4_EXERCISES, len(req_exercises))
monologue = generate_listening_4_monologue(topic)
app.logger.info("Generated monologue: " + str(monologue))
start_id = 31
exercises = generate_listening_monologue_exercises(str(monologue), req_exercises, number_of_exercises_q,
start_id, difficulty)
return {
"exercises": exercises,
"text": monologue,
"difficulty": difficulty
}
except Exception as e:
return str(e)
@app.route('/listening', methods=['POST'])
@jwt_required()
def save_listening():
try:
data = request.get_json()
parts = data.get('parts')
minTimer = data.get('minTimer', LISTENING_MIN_TIMER_DEFAULT)
difficulty = data.get('difficulty', random.choice(difficulties))
template = getListeningTemplate()
template['difficulty'] = difficulty
id = str(uuid.uuid4())
for i, part in enumerate(parts, start=0):
part_template = getListeningPartTemplate()
file_name = str(uuid.uuid4()) + ".mp3"
sound_file_path = AUDIO_FILES_PATH + file_name
firebase_file_path = FIREBASE_LISTENING_AUDIO_FILES_PATH + file_name
if "conversation" in part["text"]:
conversation_text_to_speech(part["text"]["conversation"], sound_file_path)
else:
text_to_speech(part["text"], sound_file_path)
file_url = upload_file_firebase_get_url(FIREBASE_BUCKET, firebase_file_path, sound_file_path)
part_template["audio"]["source"] = file_url
part_template["exercises"] = part["exercises"]
template['parts'].append(part_template)
if minTimer != LISTENING_MIN_TIMER_DEFAULT:
template["minTimer"] = minTimer
template["variant"] = ExamVariant.PARTIAL.value
else:
template["variant"] = ExamVariant.FULL.value
(result, id) = save_to_db_with_id("listening", template, id)
if result:
return {**template, "id": id}
else:
raise Exception("Failed to save question: " + parts)
except Exception as e:
return str(e)
@app.route('/writing_task1', methods=['POST'])
@jwt_required()
def grade_writing_task_1():
try:
data = request.get_json()
question = data.get('question')
answer = data.get('answer')
if not has_words(answer):
return {
'comment': "The answer does not contain enough english words.",
'overall': 0,
'task_response': {
'Task Achievement': {
"grade": 0.0,
"comment": ""
},
'Coherence and Cohesion': {
"grade": 0.0,
"comment": ""
},
'Lexical Resource': {
"grade": 0.0,
"comment": ""
},
'Grammatical Range and Accuracy': {
"grade": 0.0,
"comment": ""
}
}
}
elif not has_x_words(answer, 100):
return {
'comment': "The answer is insufficient and too small to be graded.",
'overall': 0,
'task_response': {
'Task Achievement': {
"grade": 0.0,
"comment": ""
},
'Coherence and Cohesion': {
"grade": 0.0,
"comment": ""
},
'Lexical Resource': {
"grade": 0.0,
"comment": ""
},
'Grammatical Range and Accuracy': {
"grade": 0.0,
"comment": ""
}
}
}
else:
json_format = {
"comment": "comment about student's response quality",
"overall": 0.0,
"task_response": {
"Task Achievement": {
"grade": 0.0,
"comment": "comment about Task Achievement of the student's response"
},
"Coherence and Cohesion": {
"grade": 0.0,
"comment": "comment about Coherence and Cohesion of the student's response"
},
"Lexical Resource": {
"grade": 0.0,
"comment": "comment about Lexical Resource of the student's response"
},
"Grammatical Range and Accuracy": {
"grade": 0.0,
"comment": "comment about Grammatical Range and Accuracy of the student's response"
}
}
}
messages = [
{
"role": "system",
"content": ('You are a helpful assistant designed to output JSON on this format: ' + str(
json_format))
},
{
"role": "user",
"content": ('Evaluate the given Writing Task 1 response based on the IELTS grading system, '
'ensuring a strict assessment that penalizes errors. Deduct points for deviations '
'from the task, and assign a score of 0 if the response fails to address the question. '
'Additionally, provide a detailed commentary highlighting both strengths and '
'weaknesses in the response. '
'\n Question: "' + question + '" \n Answer: "' + answer + '"')
}
]
token_count = count_total_tokens(messages)
response = make_openai_call(GPT_3_5_TURBO, messages, token_count,
["comment"],
GRADING_TEMPERATURE)
response["perfect_answer"] = get_perfect_answer(question, 150)["perfect_answer"]
response["overall"] = fix_writing_overall(response["overall"], response["task_response"])
response['fixed_text'] = get_fixed_text(answer)
ai_detection = gpt_zero.run_detection(answer)
if ai_detection is not None:
response['ai_detection'] = ai_detection
return response
except Exception as e:
return str(e)
@app.route('/writing_task1_general', methods=['GET'])
@jwt_required()
def get_writing_task_1_general_question():
difficulty = request.args.get("difficulty", default=random.choice(difficulties))
topic = request.args.get("topic", default=random.choice(mti_topics))
try:
messages = [
{
"role": "system",
"content": ('You are a helpful assistant designed to output JSON on this format: '
'{"prompt": "prompt content"}')
},
{
"role": "user",
"content": ('Craft a prompt for an IELTS Writing Task 1 General Training exercise that instructs the '
'student to compose a letter. The prompt should present a specific scenario or situation, '
'based on the topic of "' + topic + '", requiring the student to provide information, '
'advice, or instructions within the letter. '
'Make sure that the generated prompt is '
'of ' + difficulty + 'difficulty and does not contain '
'forbidden subjects in muslim '
'countries.')
},
{
"role": "user",
"content": 'The prompt should end with "In the letter you should" followed by 3 bullet points of what '
'the answer should include.'
}
]
token_count = count_total_tokens(messages)
response = make_openai_call(GPT_3_5_TURBO, messages, token_count, "prompt",
GEN_QUESTION_TEMPERATURE)
return {
"question": add_newline_before_hyphen(response["prompt"].strip()),
"difficulty": difficulty,
"topic": topic
}
except Exception as e:
return str(e)
def add_newline_before_hyphen(s):
return s.replace(" -", "\n-")
@app.route('/writing_task2', methods=['POST'])
@jwt_required()
def grade_writing_task_2():
try:
data = request.get_json()
question = data.get('question')
answer = data.get('answer')
if not has_words(answer):
return {
'comment': "The answer does not contain enough english words.",
'overall': 0,
'task_response': {
'Task Achievement': {
"grade": 0.0,
"comment": ""
},
'Coherence and Cohesion': {
"grade": 0.0,
"comment": ""
},
'Lexical Resource': {
"grade": 0.0,
"comment": ""
},
'Grammatical Range and Accuracy': {
"grade": 0.0,
"comment": ""
}
}
}
elif not has_x_words(answer, 180):
return {
'comment': "The answer is insufficient and too small to be graded.",
'overall': 0,
'task_response': {
'Task Achievement': {
"grade": 0.0,
"comment": ""
},
'Coherence and Cohesion': {
"grade": 0.0,
"comment": ""
},
'Lexical Resource': {
"grade": 0.0,
"comment": ""
},
'Grammatical Range and Accuracy': {
"grade": 0.0,
"comment": ""
}
}
}
else:
json_format = {
"comment": "comment about student's response quality",
"overall": 0.0,
"task_response": {
"Task Achievement": {
"grade": 0.0,
"comment": "comment about Task Achievement of the student's response"
},
"Coherence and Cohesion": {
"grade": 0.0,
"comment": "comment about Coherence and Cohesion of the student's response"
},
"Lexical Resource": {
"grade": 0.0,
"comment": "comment about Lexical Resource of the student's response"
},
"Grammatical Range and Accuracy": {
"grade": 0.0,
"comment": "comment about Grammatical Range and Accuracy of the student's response"
}
}
}
messages = [
{
"role": "system",
"content": ('You are a helpful assistant designed to output JSON on this format: ' + str(
json_format))
},
{
"role": "user",
"content": (
'Evaluate the given Writing Task 2 response based on the IELTS grading system, ensuring a '
'strict assessment that penalizes errors. Deduct points for deviations from the task, and '
'assign a score of 0 if the response fails to address the question. Additionally, provide'
' a detailed commentary highlighting '
'both strengths and weaknesses in the response.'
'\n Question: "' + question + '" \n Answer: "' + answer + '"')
}
]
token_count = count_total_tokens(messages)
response = make_openai_call(GPT_4_O, messages, token_count, ["comment"],
GEN_QUESTION_TEMPERATURE)
response["perfect_answer"] = get_perfect_answer(question, 250)["perfect_answer"]
response["overall"] = fix_writing_overall(response["overall"], response["task_response"])
response['fixed_text'] = get_fixed_text(answer)
ai_detection = gpt_zero.run_detection(answer)
if ai_detection is not None:
response['ai_detection'] = ai_detection
return response
except Exception as e:
return str(e)
def fix_writing_overall(overall: float, task_response: dict):
grades = [category["grade"] for category in task_response.values()]
if overall > max(grades) or overall < min(grades):
total_sum = sum(grades)
average = total_sum / len(grades)
rounded_average = round(average, 0)
return rounded_average
return overall
@app.route('/writing_task2_general', methods=['GET'])
@jwt_required()
def get_writing_task_2_general_question():
difficulty = request.args.get("difficulty", default=random.choice(difficulties))
topic = request.args.get("topic", default=random.choice(mti_topics))
try:
messages = [
{
"role": "system",
"content": ('You are a helpful assistant designed to output JSON on this format: '
'{"prompt": "prompt content"}')
},
{
"role": "user",
"content": (
'Craft a comprehensive question of ' + difficulty + 'difficulty like the ones for IELTS Writing Task 2 General Training that directs the candidate '
'to delve into an in-depth analysis of contrasting perspectives on the topic of "' + topic + '". '
'The candidate should be asked to discuss the strengths and weaknesses of both viewpoints.')
},
{
"role": "user",
"content": 'The question should lead to an answer with either "theories", "complicated information" or '
'be "very descriptive" on the topic.'
}
]
token_count = count_total_tokens(messages)
response = make_openai_call(GPT_4_O, messages, token_count, "prompt", GEN_QUESTION_TEMPERATURE)
return {
"question": response["prompt"].strip(),
"difficulty": difficulty,
"topic": topic
}
except Exception as e:
return str(e)
@app.route('/speaking_task_1', methods=['POST'])
@jwt_required()
def grade_speaking_task_1():
request_id = uuid.uuid4()
delete_files_older_than_one_day(AUDIO_FILES_PATH)
logging.info("POST - speaking_task_1 - Received request to grade speaking task 1. "
"Use this id to track the logs: " + str(request_id) + " - Request data: " + str(request.get_json()))
try:
data = request.get_json()
answers = data.get('answers')
text_answers = []
perfect_answers = []
logging.info("POST - speaking_task_1 - " + str(
request_id) + " - Received " + str(len(answers)) + " total answers.")
for item in answers:
sound_file_name = AUDIO_FILES_PATH + str(uuid.uuid4())
logging.info("POST - speaking_task_1 - " + str(request_id) + " - Downloading file " + item["answer"])
download_firebase_file(FIREBASE_BUCKET, item["answer"], sound_file_name)
logging.info("POST - speaking_task_1 - " + str(
request_id) + " - Downloaded file " + item["answer"] + " to " + sound_file_name)
answer_text = speech_to_text(sound_file_name)
logging.info("POST - speaking_task_1 - " + str(request_id) + " - Transcripted answer: " + answer_text)
text_answers.append(answer_text)
item["answer"] = answer_text
os.remove(sound_file_name)
if not has_x_words(answer_text, 20):
logging.info("POST - speaking_task_1 - " + str(
request_id) + " - The answer had less words than threshold 20 to be graded. Answer: " + answer_text)
return {
"comment": "The audio recorded does not contain enough english words to be graded.",
"overall": 0,
"task_response": {
"Fluency and Coherence": {
"grade": 0.0,
"comment": ""
},
"Lexical Resource": {
"grade": 0.0,
"comment": ""
},
"Grammatical Range and Accuracy": {
"grade": 0.0,
"comment": ""
},
"Pronunciation": {
"grade": 0.0,
"comment": ""
}
}
}
perfect_answer_messages = [
{
"role": "system",
"content": ('You are a helpful assistant designed to output JSON on this format: '
'{"answer": "perfect answer"}')
},
{
"role": "user",
"content": (
'Provide a perfect answer according to ielts grading system to the following '
'Speaking Part 1 question: "' + item["question"] + '"')
},
{
"role": "user",
"content": 'The answer must be 2 or 3 sentences long.'
}
]
token_count = count_total_tokens(perfect_answer_messages)
logging.info("POST - speaking_task_1 - " + str(
request_id) + " - Requesting perfect answer for question: " + item["question"])
perfect_answers.append(make_openai_call(GPT_4_O,
perfect_answer_messages,
token_count,
["answer"],
GEN_QUESTION_TEMPERATURE))
json_format = {
"comment": "comment about answers quality",
"overall": 0.0,
"task_response": {
"Fluency and Coherence": {
"grade": 0.0,
"comment": "comment about fluency and coherence"
},
"Lexical Resource": {
"grade": 0.0,
"comment": "comment about lexical resource"
},
"Grammatical Range and Accuracy": {
"grade": 0.0,
"comment": "comment about grammatical range and accuracy"
},
"Pronunciation": {
"grade": 0.0,
"comment": "comment about pronunciation on the transcribed answers"
}
}
}
logging.info("POST - speaking_task_1 - " + str(request_id) + " - Formatting answers and questions for prompt.")
formatted_text = ""
for i, entry in enumerate(answers, start=1):
formatted_text += f"**Question {i}:**\n{entry['question']}\n\n"
formatted_text += f"**Answer {i}:**\n{entry['answer']}\n\n"
logging.info("POST - speaking_task_1 - " + str(
request_id) + " - Formatted answers and questions for prompt: " + formatted_text)
grade_message = (
'Evaluate the given Speaking Part 1 response based on the IELTS grading system, ensuring a '
'strict assessment that penalizes errors. Deduct points for deviations from the task, and '
'assign a score of 0 if the response fails to address the question. Additionally, provide '
'detailed commentary highlighting both strengths and weaknesses in the response.'
"\n\n The questions and answers are: \n\n'" + formatted_text)
messages = [
{
"role": "system",
"content": (
'You are a helpful assistant designed to output JSON on this format: ' + str(json_format))
},
{
"role": "user",
"content": grade_message
},
{
"role": "user",
"content": 'Address the student as "you". If the answers are not 2 or 3 sentences long, warn the '
'student that they should be.'
},
{
"role": "user",
"content": 'For pronunciations act as if you heard the answers and they were transcripted as you heard them.'
},
{
"role": "user",
"content": 'The comments must be long, detailed, justify the grading and suggest improvements.'
}
]
token_count = count_total_tokens(messages)
logging.info("POST - speaking_task_1 - " + str(request_id) + " - Requesting grading of the answer.")
response = make_openai_call(GPT_4_O, messages, token_count, ["comment"],
GRADING_TEMPERATURE)
logging.info("POST - speaking_task_1 - " + str(request_id) + " - Answers graded: " + str(response))
logging.info("POST - speaking_task_1 - " + str(request_id) + " - Adding perfect answers to response.")
for i, answer in enumerate(perfect_answers, start=1):
response['perfect_answer_' + str(i)] = answer
logging.info("POST - speaking_task_1 - " + str(
request_id) + " - Adding transcript and fixed texts to response.")
for i, answer in enumerate(text_answers, start=1):
response['transcript_' + str(i)] = answer
response['fixed_text_' + str(i)] = get_speaking_corrections(answer)
response["overall"] = fix_speaking_overall(response["overall"], response["task_response"])
logging.info("POST - speaking_task_1 - " + str(request_id) + " - Final response: " + str(response))
return response
except Exception as e:
return str(e), 400
@app.route('/speaking_task_1', methods=['GET'])
@jwt_required()
def get_speaking_task_1_question():
difficulty = request.args.get("difficulty", default="easy")
first_topic = request.args.get("first_topic", default=random.choice(mti_topics))
second_topic = request.args.get("second_topic", default=random.choice(mti_topics))
json_format = {
"first_topic": "topic 1",
"second_topic": "topic 2",
"questions": [
"Introductory question, should start with a greeting and introduce a question about the first topic, starting the topic with 'Let's talk about x' and then the question.",
"Follow up question about the first topic",
"Follow up question about the first topic",
"Question about second topic",
"Follow up question about the second topic",
]
}
try:
messages = [
{
"role": "system",
"content": (
'You are a helpful assistant designed to output JSON on this format: ' + str(json_format))
},
{
"role": "user",
"content": (
'Craft 5 simple questions of easy difficulty for IELTS Speaking Part 1 '
'that encourages candidates to delve deeply into '
'personal experiences, preferences, or insights on the topic '
'of "' + first_topic + '" and the topic of "' + second_topic + '". Instruct the candidate '
'to offer not only detailed '
'descriptions but also provide '
'nuanced explanations, examples, '
'or anecdotes to enrich their response. '
'Make sure that the generated question '
'does not contain forbidden subjects in '
'muslim countries.')
},
{
"role": "user",
"content": 'The questions should lead to the usage of 4 verb tenses (present perfect, present, past and future).'
}
]
token_count = count_total_tokens(messages)
response = make_openai_call(GPT_4_O, messages, token_count, ["first_topic"],
GEN_QUESTION_TEMPERATURE)
response["type"] = 1
response["difficulty"] = difficulty
return response
except Exception as e:
return str(e)
@app.route('/speaking_task_2', methods=['POST'])
@jwt_required()
def grade_speaking_task_2():
request_id = uuid.uuid4()
delete_files_older_than_one_day(AUDIO_FILES_PATH)
sound_file_name = AUDIO_FILES_PATH + str(uuid.uuid4())
logging.info("POST - speaking_task_2 - Received request to grade speaking task 2. "
"Use this id to track the logs: " + str(request_id) + " - Request data: " + str(request.get_json()))
try:
data = request.get_json()
question = data.get('question')
answer_firebase_path = data.get('answer')
logging.info("POST - speaking_task_2 - " + str(request_id) + " - Downloading file " + answer_firebase_path)
download_firebase_file(FIREBASE_BUCKET, answer_firebase_path, sound_file_name)
logging.info("POST - speaking_task_2 - " + str(
request_id) + " - Downloaded file " + answer_firebase_path + " to " + sound_file_name)
answer = speech_to_text(sound_file_name)
logging.info("POST - speaking_task_2 - " + str(request_id) + " - Transcripted answer: " + answer)
json_format = {
"comment": "extensive comment about answer quality",
"overall": 0.0,
"task_response": {
"Fluency and Coherence": {
"grade": 0.0,
"comment": "extensive comment about fluency and coherence, use examples to justify the grade awarded."
},
"Lexical Resource": {
"grade": 0.0,
"comment": "extensive comment about lexical resource, use examples to justify the grade awarded."
},
"Grammatical Range and Accuracy": {
"grade": 0.0,
"comment": "extensive comment about grammatical range and accuracy, use examples to justify the grade awarded."
},
"Pronunciation": {
"grade": 0.0,
"comment": "extensive comment about pronunciation on the transcribed answer, use examples to justify the grade awarded."
}
}
}
if has_x_words(answer, 20):
messages = [
{
"role": "system",
"content": (
'You are a helpful assistant designed to output JSON on this format: ' + str(json_format))
},
{
"role": "user",
"content": (
'Evaluate the given Speaking Part 2 response based on the IELTS grading system, ensuring a '
'strict assessment that penalizes errors. Deduct points for deviations from the task, and '
'assign a score of 0 if the response fails to address the question. Additionally, provide '
'detailed commentary highlighting both strengths and weaknesses in the response.'
'\n Question: "' + question + '" \n Answer: "' + answer + '"')
},
{
"role": "user",
"content": 'Address the student as "you"'
}
]
token_count = count_total_tokens(messages)
logging.info("POST - speaking_task_2 - " + str(request_id) + " - Requesting grading of the answer.")
response = make_openai_call(GPT_3_5_TURBO, messages, token_count, ["comment"],
GRADING_TEMPERATURE)
logging.info("POST - speaking_task_2 - " + str(request_id) + " - Answer graded: " + str(response))
perfect_answer_messages = [
{
"role": "system",
"content": ('You are a helpful assistant designed to output JSON on this format: '
'{"answer": "perfect answer"}')
},
{
"role": "user",
"content": (
'Provide a perfect answer according to ielts grading system to the following '
'Speaking Part 2 question: "' + question + '"')
}
]
token_count = count_total_tokens(perfect_answer_messages)
logging.info("POST - speaking_task_2 - " + str(request_id) + " - Requesting perfect answer.")
response['perfect_answer'] = make_openai_call(GPT_3_5_TURBO,
perfect_answer_messages,
token_count,
["answer"],
GEN_QUESTION_TEMPERATURE)["answer"]
logging.info("POST - speaking_task_2 - " + str(
request_id) + " - Perfect answer: " + response['perfect_answer'])
response['transcript'] = answer
logging.info("POST - speaking_task_2 - " + str(request_id) + " - Requesting fixed text.")
response['fixed_text'] = get_speaking_corrections(answer)
logging.info("POST - speaking_task_2 - " + str(request_id) + " - Fixed text: " + response['fixed_text'])
response["overall"] = fix_speaking_overall(response["overall"], response["task_response"])
logging.info("POST - speaking_task_2 - " + str(request_id) + " - Final response: " + str(response))
return response
else:
logging.info("POST - speaking_task_2 - " + str(
request_id) + " - The answer had less words than threshold 20 to be graded. Answer: " + answer)
return {
"comment": "The audio recorded does not contain enough english words to be graded.",
"overall": 0,
"task_response": {
"Fluency and Coherence": {
"grade": 0.0,
"comment": ""
},
"Lexical Resource": {
"grade": 0.0,
"comment": ""
},
"Grammatical Range and Accuracy": {
"grade": 0.0,
"comment": ""
},
"Pronunciation": {
"grade": 0.0,
"comment": ""
}
}
}
except Exception as e:
os.remove(sound_file_name)
return str(e), 400
@app.route('/speaking_task_2', methods=['GET'])
@jwt_required()
def get_speaking_task_2_question():
difficulty = request.args.get("difficulty", default=random.choice(difficulties))
topic = request.args.get("topic", default=random.choice(mti_topics))
json_format = {
"topic": "topic",
"question": "question",
"prompts": [
"prompt_1",
"prompt_2",
"prompt_3"
],
"suffix": "And explain why..."
}
try:
messages = [
{
"role": "system",
"content": 'You are a helpful assistant designed to output JSON on this format: ' + str(json_format)
},
{
"role": "user",
"content": (
'Create a question of medium difficulty for IELTS Speaking Part 2 '
'that encourages candidates to narrate a '
'personal experience or story related to the topic '
'of "' + topic + '". Include 3 prompts that '
'guide the candidate to describe '
'specific aspects of the experience, '
'such as details about the situation, '
'their actions, and the reasons it left a '
'lasting impression. Make sure that the '
'generated question does not contain '
'forbidden subjects in muslim countries.')
},
{
"role": "user",
"content": 'The prompts must not be questions. Also include a suffix like the ones in the IELTS exams '
'that start with "And explain why".'
}
]
token_count = count_total_tokens(messages)
response = make_openai_call(GPT_4_O, messages, token_count, GEN_FIELDS, GEN_QUESTION_TEMPERATURE)
response["type"] = 2
response["difficulty"] = difficulty
response["topic"] = topic
return response
except Exception as e:
return str(e)
@app.route('/speaking_task_3', methods=['GET'])
@jwt_required()
def get_speaking_task_3_question():
difficulty = request.args.get("difficulty", default=random.choice(difficulties))
topic = request.args.get("topic", default=random.choice(mti_topics))
json_format = {
"topic": "topic",
"questions": [
"Introductory question about the topic.",
"Follow up question about the topic",
"Follow up question about the topic",
"Follow up question about the topic",
"Follow up question about the topic"
]
}
try:
messages = [
{
"role": "system",
"content": (
'You are a helpful assistant designed to output JSON on this format: ' + str(json_format))
},
{
"role": "user",
"content": (
'Formulate a set of 5 questions of hard difficulty for IELTS Speaking Part 3 that encourage candidates to engage in a '
'meaningful discussion on the topic of "' + topic + '". Provide inquiries, ensuring '
'they explore various aspects, perspectives, and implications related to the topic.'
'Make sure that the generated question does not contain forbidden subjects in muslim countries.')
}
]
token_count = count_total_tokens(messages)
response = make_openai_call(GPT_4_O, messages, token_count, GEN_FIELDS, GEN_QUESTION_TEMPERATURE)
# Remove the numbers from the questions only if the string starts with a number
response["questions"] = [re.sub(r"^\d+\.\s*", "", question) if re.match(r"^\d+\.", question) else question for
question in response["questions"]]
response["type"] = 3
response["difficulty"] = difficulty
response["topic"] = topic
return response
except Exception as e:
return str(e)
@app.route('/speaking_task_3', methods=['POST'])
@jwt_required()
def grade_speaking_task_3():
request_id = uuid.uuid4()
delete_files_older_than_one_day(AUDIO_FILES_PATH)
logging.info("POST - speaking_task_3 - Received request to grade speaking task 3. "
"Use this id to track the logs: " + str(request_id) + " - Request data: " + str(request.get_json()))
try:
data = request.get_json()
answers = data.get('answers')
text_answers = []
perfect_answers = []
logging.info("POST - speaking_task_3 - " + str(
request_id) + " - Received " + str(len(answers)) + " total answers.")
for item in answers:
sound_file_name = AUDIO_FILES_PATH + str(uuid.uuid4())
logging.info("POST - speaking_task_3 - " + str(request_id) + " - Downloading file " + item["answer"])
download_firebase_file(FIREBASE_BUCKET, item["answer"], sound_file_name)
logging.info("POST - speaking_task_3 - " + str(
request_id) + " - Downloaded file " + item["answer"] + " to " + sound_file_name)
answer_text = speech_to_text(sound_file_name)
logging.info("POST - speaking_task_3 - " + str(request_id) + " - Transcripted answer: " + answer_text)
text_answers.append(answer_text)
item["answer"] = answer_text
os.remove(sound_file_name)
if not has_x_words(answer_text, 20):
logging.info("POST - speaking_task_3 - " + str(
request_id) + " - The answer had less words than threshold 20 to be graded. Answer: " + answer_text)
return {
"comment": "The audio recorded does not contain enough english words to be graded.",
"overall": 0,
"task_response": {
"Fluency and Coherence": {
"grade": 0.0,
"comment": ""
},
"Lexical Resource": {
"grade": 0.0,
"comment": ""
},
"Grammatical Range and Accuracy": {
"grade": 0.0,
"comment": ""
},
"Pronunciation": {
"grade": 0.0,
"comment": ""
}
}
}
perfect_answer_messages = [
{
"role": "system",
"content": ('You are a helpful assistant designed to output JSON on this format: '
'{"answer": "perfect answer"}')
},
{
"role": "user",
"content": (
'Provide a perfect answer according to ielts grading system to the following '
'Speaking Part 3 question: "' + item["question"] + '"')
}
]
token_count = count_total_tokens(perfect_answer_messages)
logging.info("POST - speaking_task_3 - " + str(
request_id) + " - Requesting perfect answer for question: " + item["question"])
perfect_answers.append(make_openai_call(GPT_3_5_TURBO,
perfect_answer_messages,
token_count,
["answer"],
GEN_QUESTION_TEMPERATURE))
json_format = {
"comment": "extensive comment about answer quality",
"overall": 0.0,
"task_response": {
"Fluency and Coherence": {
"grade": 0.0,
"comment": "extensive comment about fluency and coherence, use examples to justify the grade awarded."
},
"Lexical Resource": {
"grade": 0.0,
"comment": "extensive comment about lexical resource, use examples to justify the grade awarded."
},
"Grammatical Range and Accuracy": {
"grade": 0.0,
"comment": "extensive comment about grammatical range and accuracy, use examples to justify the grade awarded."
},
"Pronunciation": {
"grade": 0.0,
"comment": "extensive comment about pronunciation on the transcribed answer, use examples to justify the grade awarded."
}
}
}
logging.info("POST - speaking_task_3 - " + str(request_id) + " - Formatting answers and questions for prompt.")
formatted_text = ""
for i, entry in enumerate(answers, start=1):
formatted_text += f"**Question {i}:**\n{entry['question']}\n\n"
formatted_text += f"**Answer {i}:**\n{entry['answer']}\n\n"
logging.info("POST - speaking_task_3 - " + str(
request_id) + " - Formatted answers and questions for prompt: " + formatted_text)
grade_message = (
"Evaluate the given Speaking Part 3 response based on the IELTS grading system, ensuring a "
"strict assessment that penalizes errors. Deduct points for deviations from the task, and "
"assign a score of 0 if the response fails to address the question. Additionally, provide detailed "
"commentary highlighting both strengths and weaknesses in the response."
"\n\n The questions and answers are: \n\n'")
messages = [
{
"role": "system",
"content": (
'You are a helpful assistant designed to output JSON on this format: ' + str(json_format))
},
{
"role": "user",
"content": grade_message
},
{
"role": "user",
"content": 'Address the student as "you" and pay special attention to coherence between the answers.'
},
{
"role": "user",
"content": 'For pronunciations act as if you heard the answers and they were transcripted as you heard them.'
},
{
"role": "user",
"content": 'The comments must be long, detailed, justify the grading and suggest improvements.'
}
]
token_count = count_total_tokens(messages)
logging.info("POST - speaking_task_3 - " + str(request_id) + " - Requesting grading of the answers.")
response = make_openai_call(GPT_4_O, messages, token_count, ["comment"], GRADING_TEMPERATURE)
logging.info("POST - speaking_task_3 - " + str(request_id) + " - Answers graded: " + str(response))
logging.info("POST - speaking_task_3 - " + str(request_id) + " - Adding perfect answers to response.")
for i, answer in enumerate(perfect_answers, start=1):
response['perfect_answer_' + str(i)] = answer
logging.info("POST - speaking_task_3 - " + str(
request_id) + " - Adding transcript and fixed texts to response.")
for i, answer in enumerate(text_answers, start=1):
response['transcript_' + str(i)] = answer
response['fixed_text_' + str(i)] = get_speaking_corrections(answer)
response["overall"] = fix_speaking_overall(response["overall"], response["task_response"])
logging.info("POST - speaking_task_3 - " + str(request_id) + " - Final response: " + str(response))
return response
except Exception as e:
return str(e), 400
def fix_speaking_overall(overall: float, task_response: dict):
grades = [category["grade"] for category in task_response.values()]
if overall > max(grades) or overall < min(grades):
total_sum = sum(grades)
average = total_sum / len(grades)
rounded_average = round(average, 0)
return rounded_average
return overall
@app.route('/speaking', methods=['POST'])
@jwt_required()
def save_speaking():
try:
data = request.get_json()
exercises = data.get('exercises')
minTimer = data.get('minTimer', SPEAKING_MIN_TIMER_DEFAULT)
template = getSpeakingTemplate()
template["minTimer"] = minTimer
if minTimer < SPEAKING_MIN_TIMER_DEFAULT:
template["variant"] = ExamVariant.PARTIAL.value
else:
template["variant"] = ExamVariant.FULL.value
id = str(uuid.uuid4())
app.logger.info('Received request to save speaking with id: ' + id)
thread_event.set()
thread = threading.Thread(
target=create_videos_and_save_to_db,
args=(exercises, template, id),
name=("thread-save-speaking-" + id)
)
thread.start()
app.logger.info('Started thread to save speaking. Thread: ' + thread.getName())
# Return response without waiting for create_videos_and_save_to_db to finish
return {**template, "id": id}
except Exception as e:
return str(e)
@app.route("/speaking/generate_video_1", methods=['POST'])
@jwt_required()
def generate_video_1():
try:
data = request.get_json()
sp3_questions = []
avatar = data.get("avatar", random.choice(list(AvatarEnum)).value)
request_id = str(uuid.uuid4())
logging.info("POST - generate_video_1 - Received request to generate video 1. "
"Use this id to track the logs: " + str(request_id) + " - Request data: " + str(
request.get_json()))
logging.info("POST - generate_video_1 - " + str(request_id) + " - Creating videos for speaking part 1.")
for question in data["questions"]:
logging.info("POST - generate_video_1 - " + str(request_id) + " - Creating video for question: " + question)
result = create_video(question, avatar)
logging.info("POST - generate_video_1 - " + str(request_id) + " - Video created: " + result)
if result is not None:
sound_file_path = VIDEO_FILES_PATH + result
firebase_file_path = FIREBASE_SPEAKING_VIDEO_FILES_PATH + result
logging.info(
"POST - generate_video_1 - " + str(
request_id) + " - Uploading video to firebase: " + firebase_file_path)
url = upload_file_firebase_get_url(FIREBASE_BUCKET, firebase_file_path, sound_file_path)
logging.info(
"POST - generate_video_1 - " + str(
request_id) + " - Uploaded video to firebase: " + url)
video = {
"text": question,
"video_path": firebase_file_path,
"video_url": url
}
sp3_questions.append(video)
else:
logging.error("POST - generate_video_1 - " + str(
request_id) + " - Failed to create video for part 1 question: " + question)
response = {
"prompts": sp3_questions,
"first_title": data["first_topic"],
"second_title": data["second_topic"],
"type": "interactiveSpeaking",
"id": uuid.uuid4()
}
logging.info(
"POST - generate_video_1 - " + str(
request_id) + " - Finished creating videos for speaking part 1: " + str(response))
return response
except Exception as e:
return str(e)
@app.route("/speaking/generate_video_2", methods=['POST'])
@jwt_required()
def generate_video_2():
try:
data = request.get_json()
avatar = data.get("avatar", random.choice(list(AvatarEnum)).value)
prompts = data.get("prompts", [])
question = data.get("question")
suffix = data.get("suffix", "")
# Removed as the examiner should not say what is on the card.
# question = question + " In your answer you should consider: " + " ".join(prompts) + suffix
question = question + "\nYou have 1 minute to take notes."
request_id = str(uuid.uuid4())
logging.info("POST - generate_video_2 - Received request to generate video 2. "
"Use this id to track the logs: " + str(request_id) + " - Request data: " + str(
request.get_json()))
logging.info("POST - generate_video_2 - " + str(request_id) + " - Creating video for speaking part 2.")
logging.info("POST - generate_video_2 - " + str(request_id) + " - Creating video for question: " + question)
result = create_video(question, avatar)
logging.info("POST - generate_video_2 - " + str(request_id) + " - Video created: " + result)
if result is not None:
sound_file_path = VIDEO_FILES_PATH + result
firebase_file_path = FIREBASE_SPEAKING_VIDEO_FILES_PATH + result
logging.info(
"POST - generate_video_2 - " + str(
request_id) + " - Uploading video to firebase: " + firebase_file_path)
url = upload_file_firebase_get_url(FIREBASE_BUCKET, firebase_file_path, sound_file_path)
logging.info(
"POST - generate_video_2 - " + str(
request_id) + " - Uploaded video to firebase: " + url)
sp1_video_path = firebase_file_path
sp1_video_url = url
return {
"text": data["question"],
"prompts": prompts,
"title": data["topic"],
"video_url": sp1_video_url,
"video_path": sp1_video_path,
"type": "speaking",
"id": uuid.uuid4(),
"suffix": suffix
}
else:
logging.error("POST - generate_video_2 - " + str(
request_id) + " - Failed to create video for part 2 question: " + question)
return str("Failed to create video for part 2 question: " + data["question"])
except Exception as e:
return str(e)
@app.route("/speaking/generate_video_3", methods=['POST'])
@jwt_required()
def generate_video_3():
try:
data = request.get_json()
sp3_questions = []
avatar = data.get("avatar", random.choice(list(AvatarEnum)).value)
request_id = str(uuid.uuid4())
logging.info("POST - generate_video_3 - Received request to generate video 3. "
"Use this id to track the logs: " + str(request_id) + " - Request data: " + str(
request.get_json()))
logging.info("POST - generate_video_3 - " + str(request_id) + " - Creating videos for speaking part 3.")
for question in data["questions"]:
logging.info("POST - generate_video_3 - " + str(request_id) + " - Creating video for question: " + question)
result = create_video(question, avatar)
logging.info("POST - generate_video_3 - " + str(request_id) + " - Video created: " + result)
if result is not None:
sound_file_path = VIDEO_FILES_PATH + result
firebase_file_path = FIREBASE_SPEAKING_VIDEO_FILES_PATH + result
logging.info(
"POST - generate_video_3 - " + str(
request_id) + " - Uploading video to firebase: " + firebase_file_path)
url = upload_file_firebase_get_url(FIREBASE_BUCKET, firebase_file_path, sound_file_path)
logging.info(
"POST - generate_video_3 - " + str(
request_id) + " - Uploaded video to firebase: " + url)
video = {
"text": question,
"video_path": firebase_file_path,
"video_url": url
}
sp3_questions.append(video)
else:
logging.error("POST - generate_video_3 - " + str(
request_id) + " - Failed to create video for part 3 question: " + question)
response = {
"prompts": sp3_questions,
"title": data["topic"],
"type": "interactiveSpeaking",
"id": uuid.uuid4()
}
logging.info(
"POST - generate_video_3 - " + str(
request_id) + " - Finished creating videos for speaking part 3: " + str(response))
return response
except Exception as e:
return str(e)
@app.route('/reading_passage_1', methods=['GET'])
@jwt_required()
def get_reading_passage_1_question():
try:
# Extract parameters from the URL query string
topic = request.args.get('topic', default=random.choice(topics))
req_exercises = request.args.getlist('exercises')
difficulty = request.args.get("difficulty", default=random.choice(difficulties))
return gen_reading_passage_1(topic, req_exercises, difficulty)
except Exception as e:
return str(e)
@app.route('/reading_passage_2', methods=['GET'])
@jwt_required()
def get_reading_passage_2_question():
try:
# Extract parameters from the URL query string
topic = request.args.get('topic', default=random.choice(topics))
req_exercises = request.args.getlist('exercises')
difficulty = request.args.get("difficulty", default=random.choice(difficulties))
return gen_reading_passage_2(topic, req_exercises, difficulty)
except Exception as e:
return str(e)
@app.route('/reading_passage_3', methods=['GET'])
@jwt_required()
def get_reading_passage_3_question():
try:
# Extract parameters from the URL query string
topic = request.args.get('topic', default=random.choice(topics))
req_exercises = request.args.getlist('exercises')
difficulty = request.args.get("difficulty", default=random.choice(difficulties))
return gen_reading_passage_3(topic, req_exercises, difficulty)
except Exception as e:
return str(e)
@app.route('/level', methods=['GET'])
@jwt_required()
def get_level_exam():
try:
number_of_exercises = 25
exercises = gen_multiple_choice_level(number_of_exercises)
return {
"exercises": [exercises],
"isDiagnostic": False,
"minTimer": 25,
"module": "level"
}
except Exception as e:
return str(e)
@app.route('/level_utas', methods=['GET'])
@jwt_required()
def get_level_utas():
try:
# Formats
mc = {
"id": str(uuid.uuid4()),
"prompt": "Choose the correct word or group of words that completes the sentences.",
"questions": None,
"type": "multipleChoice",
"part": 1
}
umc = {
"id": str(uuid.uuid4()),
"prompt": "Choose the underlined word or group of words that is not correct.",
"questions": None,
"type": "multipleChoice",
"part": 2
}
bs_1 = {
"id": str(uuid.uuid4()),
"prompt": "Read the text and write the correct word for each space.",
"questions": None,
"type": "blankSpaceText",
"part": 3
}
bs_2 = {
"id": str(uuid.uuid4()),
"prompt": "Read the text and write the correct word for each space.",
"questions": None,
"type": "blankSpaceText",
"part": 4
}
reading = {
"id": str(uuid.uuid4()),
"prompt": "Read the text and answer the questions below.",
"questions": None,
"type": "readingExercises",
"part": 5
}
all_mc_questions = []
# PART 1
mc_exercises1 = gen_multiple_choice_blank_space_utas(15, 1, all_mc_questions)
print(json.dumps(mc_exercises1, indent=4))
all_mc_questions.append(mc_exercises1)
# PART 2
mc_exercises2 = gen_multiple_choice_blank_space_utas(15, 16, all_mc_questions)
print(json.dumps(mc_exercises2, indent=4))
all_mc_questions.append(mc_exercises2)
# PART 3
mc_exercises3 = gen_multiple_choice_blank_space_utas(15, 31, all_mc_questions)
print(json.dumps(mc_exercises3, indent=4))
all_mc_questions.append(mc_exercises3)
mc_exercises = mc_exercises1['questions'] + mc_exercises2['questions'] + mc_exercises3['questions']
print(json.dumps(mc_exercises, indent=4))
mc["questions"] = mc_exercises
# Underlined mc
underlined_mc = gen_multiple_choice_underlined_utas(15, 46)
print(json.dumps(underlined_mc, indent=4))
umc["questions"] = underlined_mc
# Blank Space text 1
blank_space_text_1 = gen_blank_space_text_utas(12, 61, 250)
print(json.dumps(blank_space_text_1, indent=4))
bs_1["questions"] = blank_space_text_1
# Blank Space text 2
blank_space_text_2 = gen_blank_space_text_utas(14, 73, 350)
print(json.dumps(blank_space_text_2, indent=4))
bs_2["questions"] = blank_space_text_2
# Reading text
reading_text = gen_reading_passage_utas(87, 10, 4)
print(json.dumps(reading_text, indent=4))
reading["questions"] = reading_text
return {
"exercises": {
"blankSpaceMultipleChoice": mc,
"underlinedMultipleChoice": umc,
"blankSpaceText1": bs_1,
"blankSpaceText2": bs_2,
"readingExercises": reading,
},
"isDiagnostic": False,
"minTimer": 25,
"module": "level"
}
except Exception as e:
return str(e)
from enum import Enum
class CustomLevelExerciseTypes(Enum):
MULTIPLE_CHOICE_4 = "multiple_choice_4"
MULTIPLE_CHOICE_BLANK_SPACE = "multiple_choice_blank_space"
MULTIPLE_CHOICE_UNDERLINED = "multiple_choice_underlined"
BLANK_SPACE_TEXT = "blank_space_text"
READING_PASSAGE_UTAS = "reading_passage_utas"
@app.route('/custom_level', methods=['GET'])
@jwt_required()
def get_custom_level():
nr_exercises = int(request.args.get('nr_exercises'))
exercise_id = 1
response = {
"exercises": {},
"module": "level"
}
for i in range(1, nr_exercises + 1, 1):
exercise_type = request.args.get('exercise_' + str(i) + '_type')
exercise_qty = int(request.args.get('exercise_' + str(i) + '_qty', -1))
exercise_topic = request.args.get('exercise_' + str(i) + '_topic', random.choice(topics))
exercise_text_size = int(request.args.get('exercise_' + str(i) + '_text_size', -1))
exercise_sa_qty = int(request.args.get('exercise_' + str(i) + '_sa_qty', -1))
exercise_mc_qty = int(request.args.get('exercise_' + str(i) + '_mc_qty', -1))
if exercise_type == CustomLevelExerciseTypes.MULTIPLE_CHOICE_4.value:
response["exercises"]["exercise_" + str(i)] = generate_level_mc(exercise_id, exercise_qty)
response["exercises"]["exercise_" + str(i)]["type"] = "multipleChoice"
exercise_id = exercise_id + exercise_qty
elif exercise_type == CustomLevelExerciseTypes.MULTIPLE_CHOICE_BLANK_SPACE.value:
response["exercises"]["exercise_" + str(i)] = gen_multiple_choice_blank_space_utas(exercise_qty,
exercise_id)
response["exercises"]["exercise_" + str(i)]["type"] = "multipleChoice"
exercise_id = exercise_id + exercise_qty
elif exercise_type == CustomLevelExerciseTypes.MULTIPLE_CHOICE_UNDERLINED.value:
response["exercises"]["exercise_" + str(i)] = gen_multiple_choice_underlined_utas(exercise_qty, exercise_id)
response["exercises"]["exercise_" + str(i)]["type"] = "multipleChoice"
exercise_id = exercise_id + exercise_qty
elif exercise_type == CustomLevelExerciseTypes.BLANK_SPACE_TEXT.value:
response["exercises"]["exercise_" + str(i)] = gen_blank_space_text_utas(exercise_qty, exercise_id,
exercise_text_size)
response["exercises"]["exercise_" + str(i)]["type"] = "blankSpaceText"
exercise_id = exercise_id + exercise_qty
elif exercise_type == CustomLevelExerciseTypes.READING_PASSAGE_UTAS.value:
response["exercises"]["exercise_" + str(i)] = gen_reading_passage_utas(exercise_id, exercise_sa_qty,
exercise_mc_qty, exercise_topic)
response["exercises"]["exercise_" + str(i)]["type"] = "readingExercises"
exercise_id = exercise_id + exercise_qty
return response
@app.route('/fetch_tips', methods=['POST'])
@jwt_required()
def fetch_answer_tips():
try:
data = request.get_json()
context = data.get('context')
question = data.get('question')
answer = data.get('answer')
correct_answer = data.get('correct_answer')
messages = get_question_tips(question, answer, correct_answer, context)
token_count = reduce(lambda count, item: count + count_tokens(item)['n_tokens'],
map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0)
response = make_openai_call(GPT_3_5_TURBO, messages, token_count, None, TIPS_TEMPERATURE)
if isinstance(response, str):
response = re.sub(r"^[a-zA-Z0-9_]+\:\s*", "", response)
return response
except Exception as e:
return str(e)
@app.route('/grading_summary', methods=['POST'])
@jwt_required()
def grading_summary():
# Body Format
# {'sections': Array of {'code': key, 'name': name, 'grade': grade}}
# Output Format
# {'sections': Array of {'code': key, 'name': name, 'grade': grade, 'evaluation': evaluation, 'suggestions': suggestions}}
try:
return calculate_grading_summary(request.get_json())
except Exception as e:
return str(e)
@app.route('/training_content', methods=['POST'])
@jwt_required()
def training_content():
try:
data = request.get_json()
return tc_service.get_tips(data)
except Exception as e:
app.logger.error(str(e))
return str(e)
if __name__ == '__main__':
app.run()