Files
encoach_backend/app.py
2024-10-01 18:12:56 +01:00

1743 lines
75 KiB
Python

import threading
from functools import reduce
import firebase_admin
from firebase_admin import credentials
from flask import Flask, request
from flask_jwt_extended import JWTManager, jwt_required
from pymongo import MongoClient
from sentence_transformers import SentenceTransformer
from helper.api_messages import *
from helper.exam_variant import ExamVariant
from helper.exercises import *
from helper.file_helper import delete_files_older_than_one_day
from helper.firebase_helper import *
from helper.gpt_zero import GPTZero
from helper.elai_api import create_video, create_videos_and_save_to_db
from helper.openai_interface import *
from helper.question_templates import *
from helper.speech_to_text_helper import *
from elai.AvatarEnum import AvatarEnum
from modules import GPT
from modules.training_content import TrainingContentService, TrainingContentKnowledgeBase
from modules.upload_level import UploadLevelService
from modules.batch_users import BatchUsers
load_dotenv()
app = Flask(__name__)
app.config['JWT_SECRET_KEY'] = os.getenv("JWT_SECRET_KEY")
jwt = JWTManager(app)
# Initialize Firebase Admin SDK
cred = credentials.Certificate(os.getenv("GOOGLE_APPLICATION_CREDENTIALS"))
FIREBASE_BUCKET = os.getenv('FIREBASE_BUCKET')
firebase_admin.initialize_app(cred)
gpt_zero = GPTZero(os.getenv('GPT_ZERO_API_KEY'))
# Training Content Dependencies
embeddings = SentenceTransformer('all-MiniLM-L6-v2')
kb = TrainingContentKnowledgeBase(embeddings)
kb.load_indices_and_metadata()
open_ai = GPT(OpenAI())
mongo_db = MongoClient(os.getenv('MONGODB_URI'))[os.getenv('MONGODB_DB')]
tc_service = TrainingContentService(kb, open_ai, mongo_db)
upload_level_service = UploadLevelService(open_ai)
batch_users_service = BatchUsers(mongo_db)
thread_event = threading.Event()
# Configure logging
logging.basicConfig(level=logging.DEBUG, # Set the logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
format='%(asctime)s - %(levelname)s - %(message)s')
@app.route('/healthcheck', methods=['GET'])
def healthcheck():
return {"healthy": True}
@app.route('/listening_section_1', methods=['GET'])
@jwt_required()
def get_listening_section_1_question():
try:
delete_files_older_than_one_day(AUDIO_FILES_PATH)
# Extract parameters from the URL query string
topic = request.args.get('topic', default=random.choice(two_people_scenarios))
req_exercises = request.args.getlist('exercises')
difficulty = request.args.get("difficulty", default=random.choice(difficulties))
return gen_listening_section_1(topic, difficulty, req_exercises)
except Exception as e:
return str(e)
@app.route('/listening_section_2', methods=['GET'])
@jwt_required()
def get_listening_section_2_question():
try:
delete_files_older_than_one_day(AUDIO_FILES_PATH)
# Extract parameters from the URL query string
topic = request.args.get('topic', default=random.choice(social_monologue_contexts))
req_exercises = request.args.getlist('exercises')
difficulty = request.args.get("difficulty", default=random.choice(difficulties))
return gen_listening_section_2(topic, difficulty, req_exercises)
except Exception as e:
return str(e)
@app.route('/listening_section_3', methods=['GET'])
@jwt_required()
def get_listening_section_3_question():
try:
delete_files_older_than_one_day(AUDIO_FILES_PATH)
# Extract parameters from the URL query string
topic = request.args.get('topic', default=random.choice(four_people_scenarios))
req_exercises = request.args.getlist('exercises')
difficulty = request.args.get("difficulty", default=random.choice(difficulties))
return gen_listening_section_3(topic, difficulty, req_exercises)
except Exception as e:
return str(e)
@app.route('/listening_section_4', methods=['GET'])
@jwt_required()
def get_listening_section_4_question():
try:
delete_files_older_than_one_day(AUDIO_FILES_PATH)
# Extract parameters from the URL query string
topic = request.args.get('topic', default=random.choice(academic_subjects))
req_exercises = request.args.getlist('exercises')
difficulty = request.args.get("difficulty", default=random.choice(difficulties))
return gen_listening_section_4(topic, difficulty, req_exercises)
except Exception as e:
return str(e)
@app.route('/listening', methods=['POST'])
@jwt_required()
def save_listening():
try:
data = request.get_json()
parts = data.get('parts')
minTimer = data.get('minTimer', LISTENING_MIN_TIMER_DEFAULT)
difficulty = data.get('difficulty', random.choice(difficulties))
template = getListeningTemplate()
template['difficulty'] = difficulty
id = data.get('id', str(uuid.uuid4()))
for i, part in enumerate(parts, start=0):
part_template = getListeningPartTemplate()
file_name = str(uuid.uuid4()) + ".mp3"
sound_file_path = AUDIO_FILES_PATH + file_name
firebase_file_path = FIREBASE_LISTENING_AUDIO_FILES_PATH + file_name
if "conversation" in part["text"]:
conversation_text_to_speech(part["text"]["conversation"], sound_file_path)
else:
text_to_speech(part["text"], sound_file_path)
file_url = upload_file_firebase_get_url(FIREBASE_BUCKET, firebase_file_path, sound_file_path)
part_template["audio"]["source"] = file_url
part_template["exercises"] = part["exercises"]
template['parts'].append(part_template)
if minTimer != LISTENING_MIN_TIMER_DEFAULT:
template["minTimer"] = minTimer
template["variant"] = ExamVariant.PARTIAL.value
else:
template["variant"] = ExamVariant.FULL.value
(result, id) = save_to_db_with_id(mongo_db, "listening", template, id)
if result:
return {**template, "id": id}
else:
raise Exception("Failed to save question: " + parts)
except Exception as e:
return str(e)
@app.route('/writing_task1', methods=['POST'])
@jwt_required()
def grade_writing_task_1():
try:
data = request.get_json()
question = data.get('question')
answer = data.get('answer')
if not has_words(answer):
return {
'comment': "The answer does not contain enough english words.",
'overall': 0,
'task_response': {
'Task Achievement': {
"grade": 0.0,
"comment": ""
},
'Coherence and Cohesion': {
"grade": 0.0,
"comment": ""
},
'Lexical Resource': {
"grade": 0.0,
"comment": ""
},
'Grammatical Range and Accuracy': {
"grade": 0.0,
"comment": ""
}
}
}
elif not has_x_words(answer, 100):
return {
'comment': "The answer is insufficient and too small to be graded.",
'overall': 0,
'task_response': {
'Task Achievement': {
"grade": 0.0,
"comment": ""
},
'Coherence and Cohesion': {
"grade": 0.0,
"comment": ""
},
'Lexical Resource': {
"grade": 0.0,
"comment": ""
},
'Grammatical Range and Accuracy': {
"grade": 0.0,
"comment": ""
}
}
}
else:
json_format = {
"comment": "comment about student's response quality",
"overall": 0.0,
"task_response": {
"Task Achievement": {
"grade": 0.0,
"comment": "comment about Task Achievement of the student's response"
},
"Coherence and Cohesion": {
"grade": 0.0,
"comment": "comment about Coherence and Cohesion of the student's response"
},
"Lexical Resource": {
"grade": 0.0,
"comment": "comment about Lexical Resource of the student's response"
},
"Grammatical Range and Accuracy": {
"grade": 0.0,
"comment": "comment about Grammatical Range and Accuracy of the student's response"
}
}
}
messages = [
{
"role": "system",
"content": ('You are a helpful assistant designed to output JSON on this format: ' + str(
json_format))
},
{
"role": "user",
"content": ('Evaluate the given Writing Task 1 response based on the IELTS grading system, '
'ensuring a strict assessment that penalizes errors. Deduct points for deviations '
'from the task, and assign a score of 0 if the response fails to address the question. '
'Additionally, provide a detailed commentary highlighting both strengths and '
'weaknesses in the response. '
'\n Question: "' + question + '" \n Answer: "' + answer + '"')
},
{
"role": "user",
"content": ('Refer to the parts of the letter as: "Greeting Opener", "bullet 1", "bullet 2", '
'"bullet 3", "closer (restate the purpose of the letter)", "closing greeting"')
}
]
token_count = count_total_tokens(messages)
response = make_openai_call(GPT_3_5_TURBO, messages, token_count,
["comment"],
GRADING_TEMPERATURE)
response["perfect_answer"] = get_perfect_answer(question, 150)["perfect_answer"]
response["overall"] = fix_writing_overall(response["overall"], response["task_response"])
response['fixed_text'] = get_fixed_text(answer)
ai_detection = gpt_zero.run_detection(answer)
if ai_detection is not None:
response['ai_detection'] = ai_detection
return response
except Exception as e:
return str(e)
@app.route('/writing_task1_general', methods=['GET'])
@jwt_required()
def get_writing_task_1_general_question():
difficulty = request.args.get("difficulty", default=random.choice(difficulties))
topic = request.args.get("topic", default=random.choice(mti_topics))
try:
return gen_writing_task_1(topic, difficulty)
except Exception as e:
return str(e)
def add_newline_before_hyphen(s):
return s.replace(" -", "\n-")
@app.route('/writing_task2', methods=['POST'])
@jwt_required()
def grade_writing_task_2():
try:
data = request.get_json()
question = data.get('question')
answer = data.get('answer')
if not has_words(answer):
return {
'comment': "The answer does not contain enough english words.",
'overall': 0,
'task_response': {
'Task Achievement': {
"grade": 0.0,
"comment": ""
},
'Coherence and Cohesion': {
"grade": 0.0,
"comment": ""
},
'Lexical Resource': {
"grade": 0.0,
"comment": ""
},
'Grammatical Range and Accuracy': {
"grade": 0.0,
"comment": ""
}
}
}
elif not has_x_words(answer, 180):
return {
'comment': "The answer is insufficient and too small to be graded.",
'overall': 0,
'task_response': {
'Task Achievement': {
"grade": 0.0,
"comment": ""
},
'Coherence and Cohesion': {
"grade": 0.0,
"comment": ""
},
'Lexical Resource': {
"grade": 0.0,
"comment": ""
},
'Grammatical Range and Accuracy': {
"grade": 0.0,
"comment": ""
}
}
}
else:
json_format = {
"comment": "comment about student's response quality",
"overall": 0.0,
"task_response": {
"Task Achievement": {
"grade": 0.0,
"comment": "comment about Task Achievement of the student's response"
},
"Coherence and Cohesion": {
"grade": 0.0,
"comment": "comment about Coherence and Cohesion of the student's response"
},
"Lexical Resource": {
"grade": 0.0,
"comment": "comment about Lexical Resource of the student's response"
},
"Grammatical Range and Accuracy": {
"grade": 0.0,
"comment": "comment about Grammatical Range and Accuracy of the student's response"
}
}
}
messages = [
{
"role": "system",
"content": ('You are a helpful assistant designed to output JSON on this format: ' + str(
json_format))
},
{
"role": "user",
"content": (
'Evaluate the given Writing Task 2 response based on the IELTS grading system, ensuring a '
'strict assessment that penalizes errors. Deduct points for deviations from the task, and '
'assign a score of 0 if the response fails to address the question. Additionally, provide'
' a detailed commentary highlighting '
'both strengths and weaknesses in the response.'
'\n Question: "' + question + '" \n Answer: "' + answer + '"')
}
]
token_count = count_total_tokens(messages)
response = make_openai_call(GPT_4_O, messages, token_count, ["comment"],
GEN_QUESTION_TEMPERATURE)
response["perfect_answer"] = get_perfect_answer(question, 250)["perfect_answer"]
response["overall"] = fix_writing_overall(response["overall"], response["task_response"])
response['fixed_text'] = get_fixed_text(answer)
ai_detection = gpt_zero.run_detection(answer)
if ai_detection is not None:
response['ai_detection'] = ai_detection
return response
except Exception as e:
return str(e)
def fix_writing_overall(overall: float, task_response: dict):
grades = [category["grade"] for category in task_response.values()]
if overall > max(grades) or overall < min(grades):
total_sum = sum(grades)
average = total_sum / len(grades)
rounded_average = round(average, 0)
return rounded_average
return overall
@app.route('/writing_task2_general', methods=['GET'])
@jwt_required()
def get_writing_task_2_general_question():
difficulty = request.args.get("difficulty", default=random.choice(difficulties))
topic = request.args.get("topic", default=random.choice(mti_topics))
try:
return gen_writing_task_2(topic, difficulty)
except Exception as e:
return str(e)
@app.route('/speaking_task_1', methods=['POST'])
@jwt_required()
def grade_speaking_task_1():
request_id = uuid.uuid4()
delete_files_older_than_one_day(AUDIO_FILES_PATH)
logging.info("POST - speaking_task_1 - Received request to grade speaking task 1. "
"Use this id to track the logs: " + str(request_id) + " - Request data: " + str(request.get_json()))
try:
data = request.get_json()
answers = data.get('answers')
text_answers = []
perfect_answers = []
logging.info("POST - speaking_task_1 - " + str(
request_id) + " - Received " + str(len(answers)) + " total answers.")
for item in answers:
sound_file_name = AUDIO_FILES_PATH + str(uuid.uuid4())
logging.info("POST - speaking_task_1 - " + str(request_id) + " - Downloading file " + item["answer"])
download_firebase_file(FIREBASE_BUCKET, item["answer"], sound_file_name)
logging.info("POST - speaking_task_1 - " + str(
request_id) + " - Downloaded file " + item["answer"] + " to " + sound_file_name)
answer_text = speech_to_text(sound_file_name)
logging.info("POST - speaking_task_1 - " + str(request_id) + " - Transcripted answer: " + answer_text)
text_answers.append(answer_text)
item["answer"] = answer_text
os.remove(sound_file_name)
if not has_x_words(answer_text, 20):
logging.info("POST - speaking_task_1 - " + str(
request_id) + " - The answer had less words than threshold 20 to be graded. Answer: " + answer_text)
return {
"comment": "The audio recorded does not contain enough english words to be graded.",
"overall": 0,
"task_response": {
"Fluency and Coherence": {
"grade": 0.0,
"comment": ""
},
"Lexical Resource": {
"grade": 0.0,
"comment": ""
},
"Grammatical Range and Accuracy": {
"grade": 0.0,
"comment": ""
},
"Pronunciation": {
"grade": 0.0,
"comment": ""
}
}
}
perfect_answer_messages = [
{
"role": "system",
"content": ('You are a helpful assistant designed to output JSON on this format: '
'{"answer": "perfect answer"}')
},
{
"role": "user",
"content": (
'Provide a perfect answer according to ielts grading system to the following '
'Speaking Part 1 question: "' + item["question"] + '"')
},
{
"role": "user",
"content": 'The answer must be 2 or 3 sentences long.'
}
]
token_count = count_total_tokens(perfect_answer_messages)
logging.info("POST - speaking_task_1 - " + str(
request_id) + " - Requesting perfect answer for question: " + item["question"])
perfect_answers.append(make_openai_call(GPT_4_O,
perfect_answer_messages,
token_count,
["answer"],
GEN_QUESTION_TEMPERATURE))
json_format = {
"comment": "comment about answers quality",
"overall": 0.0,
"task_response": {
"Fluency and Coherence": {
"grade": 0.0,
"comment": "comment about fluency and coherence"
},
"Lexical Resource": {
"grade": 0.0,
"comment": "comment about lexical resource"
},
"Grammatical Range and Accuracy": {
"grade": 0.0,
"comment": "comment about grammatical range and accuracy"
},
"Pronunciation": {
"grade": 0.0,
"comment": "comment about pronunciation on the transcribed answers"
}
}
}
logging.info("POST - speaking_task_1 - " + str(request_id) + " - Formatting answers and questions for prompt.")
formatted_text = ""
for i, entry in enumerate(answers, start=1):
formatted_text += f"**Question {i}:**\n{entry['question']}\n\n"
formatted_text += f"**Answer {i}:**\n{entry['answer']}\n\n"
logging.info("POST - speaking_task_1 - " + str(
request_id) + " - Formatted answers and questions for prompt: " + formatted_text)
grade_message = (
'Evaluate the given Speaking Part 1 response based on the IELTS grading system, ensuring a '
'strict assessment that penalizes errors. Deduct points for deviations from the task, and '
'assign a score of 0 if the response fails to address the question. Additionally, provide '
'detailed commentary highlighting both strengths and weaknesses in the response.'
"\n\n The questions and answers are: \n\n'" + formatted_text)
messages = [
{
"role": "system",
"content": (
'You are a helpful assistant designed to output JSON on this format: ' + str(json_format))
},
{
"role": "user",
"content": grade_message
},
{
"role": "user",
"content": 'Address the student as "you". If the answers are not 2 or 3 sentences long, warn the '
'student that they should be.'
},
{
"role": "user",
"content": 'For pronunciations act as if you heard the answers and they were transcripted as you heard them.'
},
{
"role": "user",
"content": 'The comments must be long, detailed, justify the grading and suggest improvements.'
}
]
token_count = count_total_tokens(messages)
logging.info("POST - speaking_task_1 - " + str(request_id) + " - Requesting grading of the answer.")
response = make_openai_call(GPT_4_O, messages, token_count, ["comment"],
GRADING_TEMPERATURE)
logging.info("POST - speaking_task_1 - " + str(request_id) + " - Answers graded: " + str(response))
logging.info("POST - speaking_task_1 - " + str(request_id) + " - Adding perfect answers to response.")
for i, answer in enumerate(perfect_answers, start=1):
response['perfect_answer_' + str(i)] = answer
logging.info("POST - speaking_task_1 - " + str(
request_id) + " - Adding transcript and fixed texts to response.")
for i, answer in enumerate(text_answers, start=1):
response['transcript_' + str(i)] = answer
response['fixed_text_' + str(i)] = get_speaking_corrections(answer)
response["overall"] = fix_speaking_overall(response["overall"], response["task_response"])
logging.info("POST - speaking_task_1 - " + str(request_id) + " - Final response: " + str(response))
return response
except Exception as e:
return str(e), 400
@app.route('/speaking_task_1', methods=['GET'])
@jwt_required()
def get_speaking_task_1_question():
difficulty = request.args.get("difficulty", default="easy")
first_topic = request.args.get("first_topic", default=random.choice(mti_topics))
second_topic = request.args.get("second_topic", default=random.choice(mti_topics))
try:
return gen_speaking_part_1(first_topic, second_topic, difficulty)
except Exception as e:
return str(e)
@app.route('/speaking_task_2', methods=['POST'])
@jwt_required()
def grade_speaking_task_2():
request_id = uuid.uuid4()
delete_files_older_than_one_day(AUDIO_FILES_PATH)
sound_file_name = AUDIO_FILES_PATH + str(uuid.uuid4())
logging.info("POST - speaking_task_2 - Received request to grade speaking task 2. "
"Use this id to track the logs: " + str(request_id) + " - Request data: " + str(request.get_json()))
try:
data = request.get_json()
question = data.get('question')
answer_firebase_path = data.get('answer')
logging.info("POST - speaking_task_2 - " + str(request_id) + " - Downloading file " + answer_firebase_path)
download_firebase_file(FIREBASE_BUCKET, answer_firebase_path, sound_file_name)
logging.info("POST - speaking_task_2 - " + str(
request_id) + " - Downloaded file " + answer_firebase_path + " to " + sound_file_name)
answer = speech_to_text(sound_file_name)
logging.info("POST - speaking_task_2 - " + str(request_id) + " - Transcripted answer: " + answer)
json_format = {
"comment": "extensive comment about answer quality",
"overall": 0.0,
"task_response": {
"Fluency and Coherence": {
"grade": 0.0,
"comment": "extensive comment about fluency and coherence, use examples to justify the grade "
"awarded."
},
"Lexical Resource": {
"grade": 0.0,
"comment": "extensive comment about lexical resource, use examples to justify the grade awarded."
},
"Grammatical Range and Accuracy": {
"grade": 0.0,
"comment": "extensive comment about grammatical range and accuracy, use examples to justify the "
"grade awarded."
},
"Pronunciation": {
"grade": 0.0,
"comment": "extensive comment about pronunciation on the transcribed answer, use examples to "
"justify the grade awarded."
}
}
}
if has_x_words(answer, 20):
messages = [
{
"role": "system",
"content": (
'You are a helpful assistant designed to output JSON on this format: ' + str(json_format))
},
{
"role": "user",
"content": (
'Evaluate the given Speaking Part 2 response based on the IELTS grading system, ensuring a '
'strict assessment that penalizes errors. Deduct points for deviations from the task, and '
'assign a score of 0 if the response fails to address the question. Additionally, provide '
'detailed commentary highlighting both strengths and weaknesses in the response.'
'\n Question: "' + question + '" \n Answer: "' + answer + '"')
},
{
"role": "user",
"content": 'Address the student as "you"'
}
]
token_count = count_total_tokens(messages)
logging.info("POST - speaking_task_2 - " + str(request_id) + " - Requesting grading of the answer.")
response = make_openai_call(GPT_4_O, messages, token_count, ["comment"],
GRADING_TEMPERATURE)
logging.info("POST - speaking_task_2 - " + str(request_id) + " - Answer graded: " + str(response))
perfect_answer_messages = [
{
"role": "system",
"content": ('You are a helpful assistant designed to output JSON on this format: '
'{"answer": "perfect answer"}')
},
{
"role": "user",
"content": (
'Provide a perfect answer according to ielts grading system to the following '
'Speaking Part 2 question: "' + question + '"')
}
]
token_count = count_total_tokens(perfect_answer_messages)
logging.info("POST - speaking_task_2 - " + str(request_id) + " - Requesting perfect answer.")
response['perfect_answer'] = make_openai_call(GPT_3_5_TURBO,
perfect_answer_messages,
token_count,
["answer"],
GEN_QUESTION_TEMPERATURE)["answer"]
logging.info("POST - speaking_task_2 - " + str(
request_id) + " - Perfect answer: " + response['perfect_answer'])
response['transcript'] = answer
logging.info("POST - speaking_task_2 - " + str(request_id) + " - Requesting fixed text.")
response['fixed_text'] = get_speaking_corrections(answer)
logging.info("POST - speaking_task_2 - " + str(request_id) + " - Fixed text: " + response['fixed_text'])
response["overall"] = fix_speaking_overall(response["overall"], response["task_response"])
logging.info("POST - speaking_task_2 - " + str(request_id) + " - Final response: " + str(response))
return response
else:
logging.info("POST - speaking_task_2 - " + str(
request_id) + " - The answer had less words than threshold 20 to be graded. Answer: " + answer)
return {
"comment": "The audio recorded does not contain enough english words to be graded.",
"overall": 0,
"task_response": {
"Fluency and Coherence": {
"grade": 0.0,
"comment": ""
},
"Lexical Resource": {
"grade": 0.0,
"comment": ""
},
"Grammatical Range and Accuracy": {
"grade": 0.0,
"comment": ""
},
"Pronunciation": {
"grade": 0.0,
"comment": ""
}
}
}
except Exception as e:
os.remove(sound_file_name)
return str(e), 400
@app.route('/speaking_task_2', methods=['GET'])
@jwt_required()
def get_speaking_task_2_question():
difficulty = request.args.get("difficulty", default=random.choice(difficulties))
topic = request.args.get("topic", default=random.choice(mti_topics))
try:
return gen_speaking_part_2(topic, difficulty)
except Exception as e:
return str(e)
@app.route('/speaking_task_3', methods=['GET'])
@jwt_required()
def get_speaking_task_3_question():
difficulty = request.args.get("difficulty", default=random.choice(difficulties))
topic = request.args.get("topic", default=random.choice(mti_topics))
try:
return gen_speaking_part_3(topic, difficulty)
except Exception as e:
return str(e)
@app.route('/speaking_task_3', methods=['POST'])
@jwt_required()
def grade_speaking_task_3():
request_id = uuid.uuid4()
delete_files_older_than_one_day(AUDIO_FILES_PATH)
logging.info("POST - speaking_task_3 - Received request to grade speaking task 3. "
"Use this id to track the logs: " + str(request_id) + " - Request data: " + str(request.get_json()))
try:
data = request.get_json()
answers = data.get('answers')
text_answers = []
perfect_answers = []
logging.info("POST - speaking_task_3 - " + str(
request_id) + " - Received " + str(len(answers)) + " total answers.")
for item in answers:
sound_file_name = AUDIO_FILES_PATH + str(uuid.uuid4())
logging.info("POST - speaking_task_3 - " + str(request_id) + " - Downloading file " + item["answer"])
download_firebase_file(FIREBASE_BUCKET, item["answer"], sound_file_name)
logging.info("POST - speaking_task_3 - " + str(
request_id) + " - Downloaded file " + item["answer"] + " to " + sound_file_name)
answer_text = speech_to_text(sound_file_name)
logging.info("POST - speaking_task_3 - " + str(request_id) + " - Transcripted answer: " + answer_text)
text_answers.append(answer_text)
item["answer"] = answer_text
os.remove(sound_file_name)
if not has_x_words(answer_text, 20):
logging.info("POST - speaking_task_3 - " + str(
request_id) + " - The answer had less words than threshold 20 to be graded. Answer: " + answer_text)
return {
"comment": "The audio recorded does not contain enough english words to be graded.",
"overall": 0,
"task_response": {
"Fluency and Coherence": {
"grade": 0.0,
"comment": ""
},
"Lexical Resource": {
"grade": 0.0,
"comment": ""
},
"Grammatical Range and Accuracy": {
"grade": 0.0,
"comment": ""
},
"Pronunciation": {
"grade": 0.0,
"comment": ""
}
}
}
perfect_answer_messages = [
{
"role": "system",
"content": ('You are a helpful assistant designed to output JSON on this format: '
'{"answer": "perfect answer"}')
},
{
"role": "user",
"content": (
'Provide a perfect answer according to ielts grading system to the following '
'Speaking Part 3 question: "' + item["question"] + '"')
}
]
token_count = count_total_tokens(perfect_answer_messages)
logging.info("POST - speaking_task_3 - " + str(
request_id) + " - Requesting perfect answer for question: " + item["question"])
perfect_answers.append(make_openai_call(GPT_3_5_TURBO,
perfect_answer_messages,
token_count,
["answer"],
GEN_QUESTION_TEMPERATURE))
json_format = {
"comment": "extensive comment about answer quality",
"overall": 0.0,
"task_response": {
"Fluency and Coherence": {
"grade": 0.0,
"comment": "extensive comment about fluency and coherence, use examples to justify the grade awarded."
},
"Lexical Resource": {
"grade": 0.0,
"comment": "extensive comment about lexical resource, use examples to justify the grade awarded."
},
"Grammatical Range and Accuracy": {
"grade": 0.0,
"comment": "extensive comment about grammatical range and accuracy, use examples to justify the grade awarded."
},
"Pronunciation": {
"grade": 0.0,
"comment": "extensive comment about pronunciation on the transcribed answer, use examples to justify the grade awarded."
}
}
}
logging.info("POST - speaking_task_3 - " + str(request_id) + " - Formatting answers and questions for prompt.")
formatted_text = ""
for i, entry in enumerate(answers, start=1):
formatted_text += f"**Question {i}:**\n{entry['question']}\n\n"
formatted_text += f"**Answer {i}:**\n{entry['answer']}\n\n"
logging.info("POST - speaking_task_3 - " + str(
request_id) + " - Formatted answers and questions for prompt: " + formatted_text)
grade_message = (
"Evaluate the given Speaking Part 3 response based on the IELTS grading system, ensuring a "
"strict assessment that penalizes errors. Deduct points for deviations from the task, and "
"assign a score of 0 if the response fails to address the question. Additionally, provide detailed "
"commentary highlighting both strengths and weaknesses in the response."
"\n\n The questions and answers are: \n\n'")
messages = [
{
"role": "system",
"content": (
'You are a helpful assistant designed to output JSON on this format: ' + str(json_format))
},
{
"role": "user",
"content": grade_message
},
{
"role": "user",
"content": 'Address the student as "you" and pay special attention to coherence between the answers.'
},
{
"role": "user",
"content": 'For pronunciations act as if you heard the answers and they were transcripted as you heard them.'
},
{
"role": "user",
"content": 'The comments must be long, detailed, justify the grading and suggest improvements.'
}
]
token_count = count_total_tokens(messages)
logging.info("POST - speaking_task_3 - " + str(request_id) + " - Requesting grading of the answers.")
response = make_openai_call(GPT_4_O, messages, token_count, ["comment"], GRADING_TEMPERATURE)
logging.info("POST - speaking_task_3 - " + str(request_id) + " - Answers graded: " + str(response))
logging.info("POST - speaking_task_3 - " + str(request_id) + " - Adding perfect answers to response.")
for i, answer in enumerate(perfect_answers, start=1):
response['perfect_answer_' + str(i)] = answer
logging.info("POST - speaking_task_3 - " + str(
request_id) + " - Adding transcript and fixed texts to response.")
for i, answer in enumerate(text_answers, start=1):
response['transcript_' + str(i)] = answer
response['fixed_text_' + str(i)] = get_speaking_corrections(answer)
response["overall"] = fix_speaking_overall(response["overall"], response["task_response"])
logging.info("POST - speaking_task_3 - " + str(request_id) + " - Final response: " + str(response))
return response
except Exception as e:
return str(e), 400
def fix_speaking_overall(overall: float, task_response: dict):
grades = [category["grade"] for category in task_response.values()]
if overall > max(grades) or overall < min(grades):
total_sum = sum(grades)
average = total_sum / len(grades)
rounded_average = round(average, 0)
return rounded_average
return overall
@app.route('/speaking', methods=['POST'])
@jwt_required()
def save_speaking():
try:
data = request.get_json()
exercises = data.get('exercises')
minTimer = data.get('minTimer', SPEAKING_MIN_TIMER_DEFAULT)
template = getSpeakingTemplate()
template["minTimer"] = minTimer
if minTimer < SPEAKING_MIN_TIMER_DEFAULT:
template["variant"] = ExamVariant.PARTIAL.value
else:
template["variant"] = ExamVariant.FULL.value
id = str(uuid.uuid4())
app.logger.info('Received request to save speaking with id: ' + id)
thread_event.set()
thread = threading.Thread(
target=create_videos_and_save_to_db,
args=(exercises, template, id),
name=("thread-save-speaking-" + id)
)
thread.start()
app.logger.info('Started thread to save speaking. Thread: ' + thread.name)
# Return response without waiting for create_videos_and_save_to_db to finish
return {**template, "id": id}
except Exception as e:
return str(e)
@app.route("/speaking/generate_video_1", methods=['POST'])
@jwt_required()
def generate_video_1():
try:
data = request.get_json()
sp1_questions = []
avatar = data.get("avatar", random.choice(list(AvatarEnum)).name)
request_id = str(uuid.uuid4())
logging.info("POST - generate_video_1 - Received request to generate video 1. "
"Use this id to track the logs: " + str(request_id) + " - Request data: " + str(
request.get_json()))
id_to_name = {
"VADIM_BUSINESS": "MATTHEW",
"GIA_BUSINESS": "VERA",
"ORHAN_BUSINESS": "EDWARD",
"FLORA_BUSINESS": "TANYA",
"SCARLETT_BUSINESS": "KAYLA",
"ETHAN_BUSINESS": "JEROME",
"PARKER_CASUAL": "TYLER",
}
standard_questions = [
"Hello my name is " + id_to_name.get(avatar) + ", what is yours?",
"Do you work or do you study?"
]
questions = standard_questions + data["questions"]
logging.info("POST - generate_video_1 - " + str(request_id) + " - Creating videos for speaking part 1.")
for question in questions:
logging.info("POST - generate_video_1 - " + str(request_id) + " - Creating video for question: " + question)
result = create_video(question, avatar)
logging.info("POST - generate_video_1 - " + str(request_id) + " - Video created: " + result)
if result is not None:
sound_file_path = VIDEO_FILES_PATH + result
firebase_file_path = FIREBASE_SPEAKING_VIDEO_FILES_PATH + result
logging.info(
"POST - generate_video_1 - " + str(
request_id) + " - Uploading video to firebase: " + firebase_file_path)
url = upload_file_firebase_get_url(FIREBASE_BUCKET, firebase_file_path, sound_file_path)
logging.info(
"POST - generate_video_1 - " + str(
request_id) + " - Uploaded video to firebase: " + url)
video = {
"text": question,
"video_path": firebase_file_path,
"video_url": url
}
sp1_questions.append(video)
else:
logging.error("POST - generate_video_1 - " + str(
request_id) + " - Failed to create video for part 1 question: " + question)
response = {
"prompts": sp1_questions,
"first_title": data["first_topic"],
"second_title": data["second_topic"],
"type": "interactiveSpeaking",
"id": uuid.uuid4()
}
logging.info(
"POST - generate_video_1 - " + str(
request_id) + " - Finished creating videos for speaking part 1: " + str(response))
return response
except Exception as e:
return str(e)
@app.route("/speaking/generate_video_2", methods=['POST'])
@jwt_required()
def generate_video_2():
try:
data = request.get_json()
avatar = data.get("avatar", random.choice(list(AvatarEnum)).name)
prompts = data.get("prompts", [])
question = data.get("question")
suffix = data.get("suffix", "")
# Removed as the examiner should not say what is on the card.
# question = question + " In your answer you should consider: " + " ".join(prompts) + suffix
question = question + "\nYou have 1 minute to take notes."
request_id = str(uuid.uuid4())
logging.info("POST - generate_video_2 - Received request to generate video 2. "
"Use this id to track the logs: " + str(request_id) + " - Request data: " + str(
request.get_json()))
logging.info("POST - generate_video_2 - " + str(request_id) + " - Creating video for speaking part 2.")
logging.info("POST - generate_video_2 - " + str(request_id) + " - Creating video for question: " + question)
result = create_video(question, avatar)
logging.info("POST - generate_video_2 - " + str(request_id) + " - Video created: " + result)
if result is not None:
sound_file_path = VIDEO_FILES_PATH + result
firebase_file_path = FIREBASE_SPEAKING_VIDEO_FILES_PATH + result
logging.info(
"POST - generate_video_2 - " + str(
request_id) + " - Uploading video to firebase: " + firebase_file_path)
url = upload_file_firebase_get_url(FIREBASE_BUCKET, firebase_file_path, sound_file_path)
logging.info(
"POST - generate_video_2 - " + str(
request_id) + " - Uploaded video to firebase: " + url)
sp1_video_path = firebase_file_path
sp1_video_url = url
return {
"text": data["question"],
"prompts": prompts,
"title": data["topic"],
"video_url": sp1_video_url,
"video_path": sp1_video_path,
"type": "speaking",
"id": uuid.uuid4(),
"suffix": suffix
}
else:
logging.error("POST - generate_video_2 - " + str(
request_id) + " - Failed to create video for part 2 question: " + question)
return str("Failed to create video for part 2 question: " + data["question"])
except Exception as e:
return str(e)
@app.route("/speaking/generate_video_3", methods=['POST'])
@jwt_required()
def generate_video_3():
try:
data = request.get_json()
sp3_questions = []
avatar = data.get("avatar", random.choice(list(AvatarEnum)).name)
request_id = str(uuid.uuid4())
logging.info("POST - generate_video_3 - Received request to generate video 3. "
"Use this id to track the logs: " + str(request_id) + " - Request data: " + str(
request.get_json()))
logging.info("POST - generate_video_3 - " + str(request_id) + " - Creating videos for speaking part 3.")
for question in data["questions"]:
logging.info("POST - generate_video_3 - " + str(request_id) + " - Creating video for question: " + question)
result = create_video(question, avatar)
logging.info("POST - generate_video_3 - " + str(request_id) + " - Video created: " + result)
if result is not None:
sound_file_path = VIDEO_FILES_PATH + result
firebase_file_path = FIREBASE_SPEAKING_VIDEO_FILES_PATH + result
logging.info(
"POST - generate_video_3 - " + str(
request_id) + " - Uploading video to firebase: " + firebase_file_path)
url = upload_file_firebase_get_url(FIREBASE_BUCKET, firebase_file_path, sound_file_path)
logging.info(
"POST - generate_video_3 - " + str(
request_id) + " - Uploaded video to firebase: " + url)
video = {
"text": question,
"video_path": firebase_file_path,
"video_url": url
}
sp3_questions.append(video)
else:
logging.error("POST - generate_video_3 - " + str(
request_id) + " - Failed to create video for part 3 question: " + question)
response = {
"prompts": sp3_questions,
"title": data["topic"],
"type": "interactiveSpeaking",
"id": uuid.uuid4()
}
logging.info(
"POST - generate_video_3 - " + str(
request_id) + " - Finished creating videos for speaking part 3: " + str(response))
return response
except Exception as e:
return str(e)
@app.route('/reading_passage_1', methods=['GET'])
@jwt_required()
def get_reading_passage_1_question():
try:
# Extract parameters from the URL query string
topic = request.args.get('topic', default=random.choice(topics))
req_exercises = request.args.getlist('exercises')
difficulty = request.args.get("difficulty", default=random.choice(difficulties))
return gen_reading_passage_1(topic, difficulty, req_exercises)
except Exception as e:
return str(e)
@app.route('/reading_passage_2', methods=['GET'])
@jwt_required()
def get_reading_passage_2_question():
try:
# Extract parameters from the URL query string
topic = request.args.get('topic', default=random.choice(topics))
req_exercises = request.args.getlist('exercises')
difficulty = request.args.get("difficulty", default=random.choice(difficulties))
return gen_reading_passage_2(topic, difficulty, req_exercises)
except Exception as e:
return str(e)
@app.route('/reading_passage_3', methods=['GET'])
@jwt_required()
def get_reading_passage_3_question():
try:
# Extract parameters from the URL query string
topic = request.args.get('topic', default=random.choice(topics))
req_exercises = request.args.getlist('exercises')
difficulty = request.args.get("difficulty", default=random.choice(difficulties))
return gen_reading_passage_3(topic, difficulty, req_exercises)
except Exception as e:
return str(e)
@app.route('/level', methods=['GET'])
@jwt_required()
def get_level_exam():
try:
number_of_exercises = 25
exercises = gen_multiple_choice_level(mongo_db, number_of_exercises)
return {
"exercises": [exercises],
"isDiagnostic": False,
"minTimer": 25,
"module": "level"
}
except Exception as e:
return str(e)
@app.route('/level_utas', methods=['GET'])
@jwt_required()
def get_level_utas():
try:
# Formats
mc = {
"id": str(uuid.uuid4()),
"prompt": "Choose the correct word or group of words that completes the sentences.",
"questions": None,
"type": "multipleChoice",
"part": 1
}
umc = {
"id": str(uuid.uuid4()),
"prompt": "Choose the underlined word or group of words that is not correct.",
"questions": None,
"type": "multipleChoice",
"part": 2
}
bs_1 = {
"id": str(uuid.uuid4()),
"prompt": "Read the text and write the correct word for each space.",
"questions": None,
"type": "blankSpaceText",
"part": 3
}
bs_2 = {
"id": str(uuid.uuid4()),
"prompt": "Read the text and write the correct word for each space.",
"questions": None,
"type": "blankSpaceText",
"part": 4
}
reading = {
"id": str(uuid.uuid4()),
"prompt": "Read the text and answer the questions below.",
"questions": None,
"type": "readingExercises",
"part": 5
}
all_mc_questions = []
# PART 1
mc_exercises1 = gen_multiple_choice_blank_space_utas(15, 1, all_mc_questions)
print(json.dumps(mc_exercises1, indent=4))
all_mc_questions.append(mc_exercises1)
# PART 2
mc_exercises2 = gen_multiple_choice_blank_space_utas(15, 16, all_mc_questions)
print(json.dumps(mc_exercises2, indent=4))
all_mc_questions.append(mc_exercises2)
# PART 3
mc_exercises3 = gen_multiple_choice_blank_space_utas(15, 31, all_mc_questions)
print(json.dumps(mc_exercises3, indent=4))
all_mc_questions.append(mc_exercises3)
mc_exercises = mc_exercises1['questions'] + mc_exercises2['questions'] + mc_exercises3['questions']
print(json.dumps(mc_exercises, indent=4))
mc["questions"] = mc_exercises
# Underlined mc
underlined_mc = gen_multiple_choice_underlined_utas(15, 46)
print(json.dumps(underlined_mc, indent=4))
umc["questions"] = underlined_mc
# Blank Space text 1
blank_space_text_1 = gen_blank_space_text_utas(12, 61, 250)
print(json.dumps(blank_space_text_1, indent=4))
bs_1["questions"] = blank_space_text_1
# Blank Space text 2
blank_space_text_2 = gen_blank_space_text_utas(14, 73, 350)
print(json.dumps(blank_space_text_2, indent=4))
bs_2["questions"] = blank_space_text_2
# Reading text
reading_text = gen_reading_passage_utas(mongo_db, 87, 10, 4)
print(json.dumps(reading_text, indent=4))
reading["questions"] = reading_text
return {
"exercises": {
"blankSpaceMultipleChoice": mc,
"underlinedMultipleChoice": umc,
"blankSpaceText1": bs_1,
"blankSpaceText2": bs_2,
"readingExercises": reading,
},
"isDiagnostic": False,
"minTimer": 25,
"module": "level"
}
except Exception as e:
return str(e)
from enum import Enum
class CustomLevelExerciseTypes(Enum):
MULTIPLE_CHOICE_4 = "multiple_choice_4"
MULTIPLE_CHOICE_BLANK_SPACE = "multiple_choice_blank_space"
MULTIPLE_CHOICE_UNDERLINED = "multiple_choice_underlined"
FILL_BLANKS_MC = "fill_blanks_mc"
BLANK_SPACE_TEXT = "blank_space_text"
READING_PASSAGE_UTAS = "reading_passage_utas"
WRITING_LETTER = "writing_letter"
WRITING_2 = "writing_2"
SPEAKING_1 = "speaking_1"
SPEAKING_2 = "speaking_2"
SPEAKING_3 = "speaking_3"
READING_1 = "reading_1"
READING_2 = "reading_2"
READING_3 = "reading_3"
LISTENING_1 = "listening_1"
LISTENING_2 = "listening_2"
LISTENING_3 = "listening_3"
LISTENING_4 = "listening_4"
@app.route('/custom_level', methods=['GET'])
@jwt_required()
def get_custom_level():
nr_exercises = int(request.args.get('nr_exercises'))
exercise_id = 1
response = {
"exercises": {},
"module": "level"
}
for i in range(1, nr_exercises + 1, 1):
exercise_type = request.args.get('exercise_' + str(i) + '_type')
exercise_difficulty = request.args.get('exercise_' + str(i) + '_difficulty',
random.choice(['easy', 'medium', 'hard']))
exercise_qty = int(request.args.get('exercise_' + str(i) + '_qty', -1))
exercise_topic = request.args.get('exercise_' + str(i) + '_topic', random.choice(topics))
exercise_topic_2 = request.args.get('exercise_' + str(i) + '_topic_2', random.choice(topics))
exercise_text_size = int(request.args.get('exercise_' + str(i) + '_text_size', 700))
exercise_sa_qty = int(request.args.get('exercise_' + str(i) + '_sa_qty', -1))
exercise_mc_qty = int(request.args.get('exercise_' + str(i) + '_mc_qty', -1))
exercise_mc3_qty = int(request.args.get('exercise_' + str(i) + '_mc3_qty', -1))
exercise_fillblanks_qty = int(request.args.get('exercise_' + str(i) + '_fillblanks_qty', -1))
exercise_writeblanks_qty = int(request.args.get('exercise_' + str(i) + '_writeblanks_qty', -1))
exercise_writeblanksquestions_qty = int(
request.args.get('exercise_' + str(i) + '_writeblanksquestions_qty', -1))
exercise_writeblanksfill_qty = int(request.args.get('exercise_' + str(i) + '_writeblanksfill_qty', -1))
exercise_writeblanksform_qty = int(request.args.get('exercise_' + str(i) + '_writeblanksform_qty', -1))
exercise_truefalse_qty = int(request.args.get('exercise_' + str(i) + '_truefalse_qty', -1))
exercise_paragraphmatch_qty = int(request.args.get('exercise_' + str(i) + '_paragraphmatch_qty', -1))
exercise_ideamatch_qty = int(request.args.get('exercise_' + str(i) + '_ideamatch_qty', -1))
if exercise_type == CustomLevelExerciseTypes.MULTIPLE_CHOICE_4.value:
response["exercises"]["exercise_" + str(i)] = {}
response["exercises"]["exercise_" + str(i)]["questions"] = []
response["exercises"]["exercise_" + str(i)]["type"] = "multipleChoice"
while exercise_qty > 0:
if exercise_qty - 15 > 0:
qty = 15
else:
qty = exercise_qty
response["exercises"]["exercise_" + str(i)]["questions"].extend(
generate_level_mc(exercise_id, qty,
response["exercises"]["exercise_" + str(i)]["questions"])["questions"])
exercise_id = exercise_id + qty
exercise_qty = exercise_qty - qty
elif exercise_type == CustomLevelExerciseTypes.MULTIPLE_CHOICE_BLANK_SPACE.value:
response["exercises"]["exercise_" + str(i)] = {}
response["exercises"]["exercise_" + str(i)]["questions"] = []
response["exercises"]["exercise_" + str(i)]["type"] = "multipleChoice"
while exercise_qty > 0:
if exercise_qty - 15 > 0:
qty = 15
else:
qty = exercise_qty
response["exercises"]["exercise_" + str(i)]["questions"].extend(
gen_multiple_choice_blank_space_utas(qty, exercise_id,
response["exercises"]["exercise_" + str(i)]["questions"])[
"questions"])
exercise_id = exercise_id + qty
exercise_qty = exercise_qty - qty
elif exercise_type == CustomLevelExerciseTypes.MULTIPLE_CHOICE_UNDERLINED.value:
response["exercises"]["exercise_" + str(i)] = {}
response["exercises"]["exercise_" + str(i)]["questions"] = []
response["exercises"]["exercise_" + str(i)]["type"] = "multipleChoice"
while exercise_qty > 0:
if exercise_qty - 15 > 0:
qty = 15
else:
qty = exercise_qty
response["exercises"]["exercise_" + str(i)]["questions"].extend(
gen_multiple_choice_underlined_utas(qty, exercise_id,
response["exercises"]["exercise_" + str(i)]["questions"])[
"questions"])
exercise_id = exercise_id + qty
exercise_qty = exercise_qty - qty
elif exercise_type == CustomLevelExerciseTypes.FILL_BLANKS_MC.value:
response["exercises"]["exercise_" + str(i)] = gen_fill_blanks_mc_utas(
exercise_qty, exercise_id, exercise_text_size
)
response["exercises"]["exercise_" + str(i)]["type"] = "fillBlanks"
response["exercises"]["exercise_" + str(i)]["variant"] = "mc"
exercise_id = exercise_id + exercise_qty
elif exercise_type == CustomLevelExerciseTypes.BLANK_SPACE_TEXT.value:
response["exercises"]["exercise_" + str(i)] = gen_blank_space_text_utas(exercise_qty, exercise_id,
exercise_text_size)
response["exercises"]["exercise_" + str(i)]["type"] = "blankSpaceText"
exercise_id = exercise_id + exercise_qty
elif exercise_type == CustomLevelExerciseTypes.READING_PASSAGE_UTAS.value:
response["exercises"]["exercise_" + str(i)] = gen_reading_passage_utas(exercise_id, exercise_sa_qty,
exercise_mc_qty, exercise_topic)
response["exercises"]["exercise_" + str(i)]["type"] = "readingExercises"
exercise_id = exercise_id + exercise_qty
elif exercise_type == CustomLevelExerciseTypes.WRITING_LETTER.value:
response["exercises"]["exercise_" + str(i)] = gen_writing_task_1(exercise_topic, exercise_difficulty)
response["exercises"]["exercise_" + str(i)]["type"] = "writing"
exercise_id = exercise_id + 1
elif exercise_type == CustomLevelExerciseTypes.WRITING_2.value:
response["exercises"]["exercise_" + str(i)] = gen_writing_task_2(exercise_topic, exercise_difficulty)
response["exercises"]["exercise_" + str(i)]["type"] = "writing"
exercise_id = exercise_id + 1
elif exercise_type == CustomLevelExerciseTypes.SPEAKING_1.value:
response["exercises"]["exercise_" + str(i)] = (
gen_speaking_part_1(exercise_topic, exercise_topic_2, exercise_difficulty))
response["exercises"]["exercise_" + str(i)]["type"] = "interactiveSpeaking"
exercise_id = exercise_id + 1
elif exercise_type == CustomLevelExerciseTypes.SPEAKING_2.value:
response["exercises"]["exercise_" + str(i)] = gen_speaking_part_2(exercise_topic, exercise_difficulty)
response["exercises"]["exercise_" + str(i)]["type"] = "speaking"
exercise_id = exercise_id + 1
elif exercise_type == CustomLevelExerciseTypes.SPEAKING_3.value:
response["exercises"]["exercise_" + str(i)] = gen_speaking_part_3(exercise_topic, exercise_difficulty)
response["exercises"]["exercise_" + str(i)]["type"] = "interactiveSpeaking"
exercise_id = exercise_id + 1
elif exercise_type == CustomLevelExerciseTypes.READING_1.value:
exercises = []
exercise_qty_q = queue.Queue()
total_qty = 0
if exercise_fillblanks_qty != -1:
exercises.append('fillBlanks')
exercise_qty_q.put(exercise_fillblanks_qty)
total_qty = total_qty + exercise_fillblanks_qty
if exercise_writeblanks_qty != -1:
exercises.append('writeBlanks')
exercise_qty_q.put(exercise_writeblanks_qty)
total_qty = total_qty + exercise_writeblanks_qty
if exercise_truefalse_qty != -1:
exercises.append('trueFalse')
exercise_qty_q.put(exercise_truefalse_qty)
total_qty = total_qty + exercise_truefalse_qty
if exercise_paragraphmatch_qty != -1:
exercises.append('paragraphMatch')
exercise_qty_q.put(exercise_paragraphmatch_qty)
total_qty = total_qty + exercise_paragraphmatch_qty
response["exercises"]["exercise_" + str(i)] = gen_reading_passage_1(exercise_topic, exercise_difficulty,
exercises, exercise_qty_q, exercise_id)
response["exercises"]["exercise_" + str(i)]["type"] = "reading"
exercise_id = exercise_id + total_qty
elif exercise_type == CustomLevelExerciseTypes.READING_2.value:
exercises = []
exercise_qty_q = queue.Queue()
total_qty = 0
if exercise_fillblanks_qty != -1:
exercises.append('fillBlanks')
exercise_qty_q.put(exercise_fillblanks_qty)
total_qty = total_qty + exercise_fillblanks_qty
if exercise_writeblanks_qty != -1:
exercises.append('writeBlanks')
exercise_qty_q.put(exercise_writeblanks_qty)
total_qty = total_qty + exercise_writeblanks_qty
if exercise_truefalse_qty != -1:
exercises.append('trueFalse')
exercise_qty_q.put(exercise_truefalse_qty)
total_qty = total_qty + exercise_truefalse_qty
if exercise_paragraphmatch_qty != -1:
exercises.append('paragraphMatch')
exercise_qty_q.put(exercise_paragraphmatch_qty)
total_qty = total_qty + exercise_paragraphmatch_qty
response["exercises"]["exercise_" + str(i)] = gen_reading_passage_2(exercise_topic, exercise_difficulty,
exercises, exercise_qty_q, exercise_id)
response["exercises"]["exercise_" + str(i)]["type"] = "reading"
exercise_id = exercise_id + total_qty
elif exercise_type == CustomLevelExerciseTypes.READING_3.value:
exercises = []
exercise_qty_q = queue.Queue()
total_qty = 0
if exercise_fillblanks_qty != -1:
exercises.append('fillBlanks')
exercise_qty_q.put(exercise_fillblanks_qty)
total_qty = total_qty + exercise_fillblanks_qty
if exercise_writeblanks_qty != -1:
exercises.append('writeBlanks')
exercise_qty_q.put(exercise_writeblanks_qty)
total_qty = total_qty + exercise_writeblanks_qty
if exercise_truefalse_qty != -1:
exercises.append('trueFalse')
exercise_qty_q.put(exercise_truefalse_qty)
total_qty = total_qty + exercise_truefalse_qty
if exercise_paragraphmatch_qty != -1:
exercises.append('paragraphMatch')
exercise_qty_q.put(exercise_paragraphmatch_qty)
total_qty = total_qty + exercise_paragraphmatch_qty
if exercise_ideamatch_qty != -1:
exercises.append('ideaMatch')
exercise_qty_q.put(exercise_ideamatch_qty)
total_qty = total_qty + exercise_ideamatch_qty
response["exercises"]["exercise_" + str(i)] = gen_reading_passage_3(exercise_topic, exercise_difficulty,
exercises, exercise_qty_q, exercise_id)
response["exercises"]["exercise_" + str(i)]["type"] = "reading"
exercise_id = exercise_id + total_qty
elif exercise_type == CustomLevelExerciseTypes.LISTENING_1.value:
exercises = []
exercise_qty_q = queue.Queue()
total_qty = 0
if exercise_mc_qty != -1:
exercises.append('multipleChoice')
exercise_qty_q.put(exercise_mc_qty)
total_qty = total_qty + exercise_mc_qty
if exercise_writeblanksquestions_qty != -1:
exercises.append('writeBlanksQuestions')
exercise_qty_q.put(exercise_writeblanksquestions_qty)
total_qty = total_qty + exercise_writeblanksquestions_qty
if exercise_writeblanksfill_qty != -1:
exercises.append('writeBlanksFill')
exercise_qty_q.put(exercise_writeblanksfill_qty)
total_qty = total_qty + exercise_writeblanksfill_qty
if exercise_writeblanksform_qty != -1:
exercises.append('writeBlanksForm')
exercise_qty_q.put(exercise_writeblanksform_qty)
total_qty = total_qty + exercise_writeblanksform_qty
response["exercises"]["exercise_" + str(i)] = gen_listening_section_1(exercise_topic, exercise_difficulty,
exercises, exercise_qty_q,
exercise_id)
response["exercises"]["exercise_" + str(i)]["type"] = "listening"
exercise_id = exercise_id + total_qty
elif exercise_type == CustomLevelExerciseTypes.LISTENING_2.value:
exercises = []
exercise_qty_q = queue.Queue()
total_qty = 0
if exercise_mc_qty != -1:
exercises.append('multipleChoice')
exercise_qty_q.put(exercise_mc_qty)
total_qty = total_qty + exercise_mc_qty
if exercise_writeblanksquestions_qty != -1:
exercises.append('writeBlanksQuestions')
exercise_qty_q.put(exercise_writeblanksquestions_qty)
total_qty = total_qty + exercise_writeblanksquestions_qty
response["exercises"]["exercise_" + str(i)] = gen_listening_section_2(exercise_topic, exercise_difficulty,
exercises, exercise_qty_q,
exercise_id)
response["exercises"]["exercise_" + str(i)]["type"] = "listening"
exercise_id = exercise_id + total_qty
elif exercise_type == CustomLevelExerciseTypes.LISTENING_3.value:
exercises = []
exercise_qty_q = queue.Queue()
total_qty = 0
if exercise_mc3_qty != -1:
exercises.append('multipleChoice3Options')
exercise_qty_q.put(exercise_mc3_qty)
total_qty = total_qty + exercise_mc3_qty
if exercise_writeblanksquestions_qty != -1:
exercises.append('writeBlanksQuestions')
exercise_qty_q.put(exercise_writeblanksquestions_qty)
total_qty = total_qty + exercise_writeblanksquestions_qty
response["exercises"]["exercise_" + str(i)] = gen_listening_section_3(exercise_topic, exercise_difficulty,
exercises, exercise_qty_q,
exercise_id)
response["exercises"]["exercise_" + str(i)]["type"] = "listening"
exercise_id = exercise_id + total_qty
elif exercise_type == CustomLevelExerciseTypes.LISTENING_4.value:
exercises = []
exercise_qty_q = queue.Queue()
total_qty = 0
if exercise_mc_qty != -1:
exercises.append('multipleChoice')
exercise_qty_q.put(exercise_mc_qty)
total_qty = total_qty + exercise_mc_qty
if exercise_writeblanksquestions_qty != -1:
exercises.append('writeBlanksQuestions')
exercise_qty_q.put(exercise_writeblanksquestions_qty)
total_qty = total_qty + exercise_writeblanksquestions_qty
if exercise_writeblanksfill_qty != -1:
exercises.append('writeBlanksFill')
exercise_qty_q.put(exercise_writeblanksfill_qty)
total_qty = total_qty + exercise_writeblanksfill_qty
if exercise_writeblanksform_qty != -1:
exercises.append('writeBlanksForm')
exercise_qty_q.put(exercise_writeblanksform_qty)
total_qty = total_qty + exercise_writeblanksform_qty
response["exercises"]["exercise_" + str(i)] = gen_listening_section_4(exercise_topic, exercise_difficulty,
exercises, exercise_qty_q,
exercise_id)
response["exercises"]["exercise_" + str(i)]["type"] = "listening"
exercise_id = exercise_id + total_qty
return response
@app.route('/grade_short_answers', methods=['POST'])
@jwt_required()
def grade_short_answers():
data = request.get_json()
json_format = {
"exercises": [
{
"id": 1,
"correct": True,
"correct_answer": " correct answer if wrong"
}
]
}
try:
messages = [
{
"role": "system",
"content": (
'You are a helpful assistant designed to output JSON on this format: ' + str(json_format))
},
{
"role": "user",
"content": 'Grade these answers according to the text content and write a correct answer if they are '
'wrong. Text, questions and answers:\n ' + str(data)
}
]
token_count = count_total_tokens(messages)
response = make_openai_call(GPT_4_O, messages, token_count, GEN_FIELDS, GEN_QUESTION_TEMPERATURE)
return response
except Exception as e:
return str(e)
@app.route('/fetch_tips', methods=['POST'])
@jwt_required()
def fetch_answer_tips():
try:
data = request.get_json()
context = data.get('context')
question = data.get('question')
answer = data.get('answer')
correct_answer = data.get('correct_answer')
messages = get_question_tips(question, answer, correct_answer, context)
token_count = reduce(lambda count, item: count + count_tokens(item)['n_tokens'],
map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0)
response = make_openai_call(GPT_3_5_TURBO, messages, token_count, None, TIPS_TEMPERATURE)
if isinstance(response, str):
response = re.sub(r"^[a-zA-Z0-9_]+\:\s*", "", response)
return response
except Exception as e:
return str(e)
@app.route('/grading_summary', methods=['POST'])
@jwt_required()
def grading_summary():
# Body Format
# {'sections': Array of {'code': key, 'name': name, 'grade': grade}}
# Output Format
# {'sections': Array of {'code': key, 'name': name, 'grade': grade, 'evaluation': evaluation, 'suggestions': suggestions}}
try:
return calculate_grading_summary(request.get_json())
except Exception as e:
return str(e)
@app.route('/training_content', methods=['POST'])
@jwt_required()
def training_content():
try:
return tc_service.get_tips(request.get_json())
except Exception as e:
app.logger.error(str(e))
return str(e)
# TODO: create a doc in firestore with a status and get its id, run this in a thread and modify the doc in firestore,
# return the id right away, in generation view poll for the id
@app.route('/upload_level', methods=['POST'])
def upload_file():
if 'file' not in request.files:
return 'File wasn\'t uploaded', 400
file = request.files['file']
if file.filename == '':
return 'No selected file', 400
if file:
return upload_level_service.generate_level_from_file(file), 200
@app.route('/batch_users', methods=['POST'])
def create_users_batch():
try:
return batch_users_service.batch_users(request.get_json())
except Exception as e:
app.logger.error(str(e))
return str(e)
if __name__ == '__main__':
app.run()