Writing and speaking rework, some changes to module upload
This commit is contained in:
108
app/services/impl/exam/evaluation.py
Normal file
108
app/services/impl/exam/evaluation.py
Normal file
@@ -0,0 +1,108 @@
|
||||
import logging
|
||||
from typing import Union, Dict, List
|
||||
|
||||
from fastapi import BackgroundTasks
|
||||
|
||||
from app.dtos.evaluation import EvaluationType
|
||||
from app.dtos.speaking import GradeSpeakingItem
|
||||
from app.dtos.writing import WritingGradeTaskDTO
|
||||
from app.repositories.abc import IDocumentStore
|
||||
from app.services.abc import IWritingService, ISpeakingService, IEvaluationService
|
||||
|
||||
|
||||
class EvaluationService(IEvaluationService):
|
||||
|
||||
def __init__(self, db: IDocumentStore, writing_service: IWritingService, speaking_service: ISpeakingService):
|
||||
self._db = db
|
||||
self._writing_service = writing_service
|
||||
self._speaking_service = speaking_service
|
||||
self._logger = logging.getLogger(__name__)
|
||||
|
||||
async def create_evaluation(
|
||||
self,
|
||||
session_id: str,
|
||||
exercise_id: str,
|
||||
eval_type: EvaluationType,
|
||||
task: int
|
||||
):
|
||||
await self._db.save_to_db(
|
||||
"evaluation",
|
||||
{
|
||||
"session_id": session_id,
|
||||
"exercise_id": exercise_id,
|
||||
"type": eval_type,
|
||||
"task": task,
|
||||
"status": "pending"
|
||||
}
|
||||
)
|
||||
|
||||
async def begin_evaluation(
|
||||
self,
|
||||
session_id: str, task: int,
|
||||
exercise_id: str, exercise_type: str,
|
||||
solution: Union[WritingGradeTaskDTO, List[GradeSpeakingItem]],
|
||||
background_tasks: BackgroundTasks
|
||||
):
|
||||
background_tasks.add_task(
|
||||
self._begin_evaluation,
|
||||
session_id, task,
|
||||
exercise_id, exercise_type,
|
||||
solution
|
||||
)
|
||||
|
||||
async def _begin_evaluation(
|
||||
self, session_id: str, task: int,
|
||||
exercise_id: str, exercise_type: str,
|
||||
solution: Union[WritingGradeTaskDTO, List[GradeSpeakingItem]]
|
||||
):
|
||||
try:
|
||||
if exercise_type == EvaluationType.WRITING:
|
||||
result = await self._writing_service.grade_writing_task(
|
||||
task,
|
||||
solution.question,
|
||||
solution.answer
|
||||
)
|
||||
else:
|
||||
result = await self._speaking_service.grade_speaking_task(
|
||||
task,
|
||||
solution
|
||||
)
|
||||
|
||||
await self._db.update(
|
||||
"evaluation",
|
||||
{
|
||||
"exercise_id": exercise_id,
|
||||
"session_id": session_id,
|
||||
},
|
||||
{
|
||||
"$set": {
|
||||
"status": "completed",
|
||||
"result": result,
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
self._logger.error(f"Error processing evaluation {session_id} - {exercise_id}: {str(e)}")
|
||||
await self._db.update(
|
||||
"evaluation",
|
||||
{
|
||||
"exercise_id": exercise_id,
|
||||
"session_id": session_id
|
||||
},
|
||||
{
|
||||
"$set": {
|
||||
"status": "error",
|
||||
"error": str(e),
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
async def get_evaluations(self, session_id: str, status: str) -> List[Dict]:
|
||||
return await self._db.find(
|
||||
"evaluation",
|
||||
{
|
||||
"session_id": session_id,
|
||||
"status": status
|
||||
}
|
||||
)
|
||||
@@ -1,8 +1,10 @@
|
||||
from uuid import uuid4
|
||||
|
||||
import aiofiles
|
||||
import os
|
||||
from logging import getLogger
|
||||
|
||||
from typing import Dict, Any, Coroutine, Optional
|
||||
from typing import Dict, Any, Optional
|
||||
|
||||
import pdfplumber
|
||||
from fastapi import UploadFile
|
||||
@@ -21,20 +23,19 @@ class UploadLevelModule:
|
||||
self._logger = getLogger(__name__)
|
||||
self._llm = openai
|
||||
|
||||
async def generate_level_from_file(self, file: UploadFile, solutions: Optional[UploadFile]) -> Dict[str, Any] | None:
|
||||
ext, path_id = await FileHelper.save_upload(file)
|
||||
FileHelper.convert_file_to_pdf(
|
||||
f'./tmp/{path_id}/upload.{ext}', f'./tmp/{path_id}/exercises.pdf'
|
||||
)
|
||||
file_has_images = self._check_pdf_for_images(f'./tmp/{path_id}/exercises.pdf')
|
||||
async def generate_level_from_file(self, exercises: UploadFile, solutions: Optional[UploadFile]) -> Dict[str, Any] | None:
|
||||
path_id = str(uuid4())
|
||||
ext, _ = await FileHelper.save_upload(exercises, "exercises", path_id)
|
||||
FileHelper.convert_file_to_html(f'./tmp/{path_id}/exercises.{ext}', f'./tmp/{path_id}/exercises.html')
|
||||
|
||||
if not file_has_images:
|
||||
FileHelper.convert_file_to_html(f'./tmp/{path_id}/upload.{ext}', f'./tmp/{path_id}/exercises.html')
|
||||
if solutions:
|
||||
ext, _ = await FileHelper.save_upload(solutions, "solutions", path_id)
|
||||
FileHelper.convert_file_to_html(f'./tmp/{path_id}/solutions.{ext}', f'./tmp/{path_id}/solutions.html')
|
||||
|
||||
completion: Coroutine[Any, Any, Exam] = (
|
||||
self._png_completion(path_id) if file_has_images else self._html_completion(path_id)
|
||||
)
|
||||
response = await completion
|
||||
#completion: Coroutine[Any, Any, Exam] = (
|
||||
# self._png_completion(path_id) if file_has_images else self._html_completion(path_id)
|
||||
#)
|
||||
response = await self._html_completion(path_id)
|
||||
|
||||
FileHelper.remove_directory(f'./tmp/{path_id}')
|
||||
|
||||
@@ -42,6 +43,7 @@ class UploadLevelModule:
|
||||
return self.fix_ids(response.model_dump(exclude_none=True))
|
||||
return None
|
||||
|
||||
|
||||
@staticmethod
|
||||
@suppress_loggers()
|
||||
def _check_pdf_for_images(pdf_path: str) -> bool:
|
||||
|
||||
@@ -98,7 +98,7 @@ class ImportReadingModule:
|
||||
]
|
||||
}
|
||||
],
|
||||
"text": "<numbered questions with format: <question text>{{<question number>}}\\n>",
|
||||
"text": "<numbered questions with format in square brackets: [<question text>{{<question number>}}\\\\n] notice how there is a double backslash before the n -> I want an escaped newline in your output> ",
|
||||
"type": "writeBlanks",
|
||||
"prompt": "<specific instructions for this exercise section>"
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import asyncio
|
||||
import logging
|
||||
import os
|
||||
import random
|
||||
import aiofiles
|
||||
import re
|
||||
import uuid
|
||||
from typing import Dict, List, Optional
|
||||
@@ -9,6 +10,7 @@ from app.configs.constants import (
|
||||
FieldsAndExercises, GPTModels, TemperatureSettings,
|
||||
FilePaths
|
||||
)
|
||||
from app.dtos.speaking import GradeSpeakingItem
|
||||
from app.helpers import TextHelper
|
||||
from app.repositories.abc import IFileStorage, IDocumentStore
|
||||
from app.services.abc import ISpeakingService, ILLMService, IVideoGeneratorService, ISpeechToTextService
|
||||
@@ -165,105 +167,110 @@ class SpeakingService(ISpeakingService):
|
||||
|
||||
return response
|
||||
|
||||
async def grade_speaking_task(self, task: int, answers: List[Dict]) -> Dict:
|
||||
request_id = uuid.uuid4()
|
||||
self._logger.info(
|
||||
f'POST - speaking_task_{task} - Received request to grade speaking task {task}. '
|
||||
f'Use this id to track the logs: {str(request_id)} - Request data: {str(answers)}'
|
||||
)
|
||||
|
||||
text_answers = []
|
||||
perfect_answers = []
|
||||
async def grade_speaking_task(self, task: int, items: List[GradeSpeakingItem]) -> Dict:
|
||||
request_id = str(uuid.uuid4())
|
||||
self._log(task, request_id, f"Received request to grade speaking task {task}.")
|
||||
|
||||
if task != 2:
|
||||
self._logger.info(
|
||||
f'POST - speaking_task_{task} - {str(request_id)} - Received {str(len(answers))} total answers.'
|
||||
)
|
||||
self._log(task, request_id, f'Received {len(items)} total answers.')
|
||||
|
||||
for item in answers:
|
||||
sound_file_name = FilePaths.AUDIO_FILES_PATH + str(uuid.uuid4())
|
||||
temp_files = []
|
||||
try:
|
||||
# Save all files first
|
||||
temp_files = await asyncio.gather(*[
|
||||
self.save_file(item) for item in items
|
||||
])
|
||||
|
||||
self._logger.info(f'POST - speaking_task_{task} - {request_id} - Downloading file {item["answer"]}')
|
||||
# Process all transcriptions concurrently (up to 4)
|
||||
self._log(task, request_id, 'Starting batch transcription')
|
||||
text_answers = await asyncio.gather(*[
|
||||
self._stt.speech_to_text(file_path)
|
||||
for file_path in temp_files
|
||||
])
|
||||
|
||||
await self._file_storage.download_firebase_file(item["answer"], sound_file_name)
|
||||
for answer in text_answers:
|
||||
self._log(task, request_id, f'Transcribed answer: {answer}')
|
||||
if not TextHelper.has_x_words(answer, 20):
|
||||
self._log(
|
||||
task, request_id,
|
||||
f'The answer had less words than threshold 20 to be graded. Answer: {answer}'
|
||||
)
|
||||
return self._zero_rating("The audio recorded does not contain enough english words to be graded.")
|
||||
|
||||
self._logger.info(
|
||||
f'POST - speaking_task_{task} - {request_id} - '
|
||||
f'Downloaded file {item["answer"]} to {sound_file_name}'
|
||||
)
|
||||
# Get perfect answers
|
||||
self._log(task, request_id, 'Requesting perfect answers')
|
||||
perfect_answers = await asyncio.gather(*[
|
||||
self._get_perfect_answer(task, item.question)
|
||||
for item in items
|
||||
])
|
||||
|
||||
answer_text = await self._stt.speech_to_text(sound_file_name)
|
||||
self._logger.info(f'POST - speaking_task_{task} - {request_id} - Transcripted answer: {answer_text}')
|
||||
# Format the responses
|
||||
if task in {1, 3}:
|
||||
self._log(task, request_id, 'Formatting answers and questions for prompt.')
|
||||
|
||||
text_answers.append(answer_text)
|
||||
item["answer"] = answer_text
|
||||
os.remove(sound_file_name)
|
||||
formatted_text = ""
|
||||
for i, (item, transcribed_answer) in enumerate(zip(items, text_answers), start=1):
|
||||
formatted_text += f"**Question {i}:**\n{item.question}\n\n"
|
||||
formatted_text += f"**Answer {i}:**\n{transcribed_answer}\n\n"
|
||||
|
||||
# TODO: This will end the grading of all answers if a single one does not have enough words
|
||||
# don't know if this is intended
|
||||
if not TextHelper.has_x_words(answer_text, 20):
|
||||
self._logger.info(
|
||||
f'POST - speaking_task_{task} - {request_id} - '
|
||||
f'The answer had less words than threshold 20 to be graded. Answer: {answer_text}'
|
||||
)
|
||||
return self._zero_rating("The audio recorded does not contain enough english words to be graded.")
|
||||
self._log(task, request_id, f'Formatted answers and questions for prompt: {formatted_text}')
|
||||
questions_and_answers = f'\n\n The questions and answers are: \n\n{formatted_text}'
|
||||
else:
|
||||
questions_and_answers = f'\n Question: "{items[0].question}" \n Answer: "{text_answers[0]}"'
|
||||
|
||||
self._logger.info(
|
||||
f'POST - speaking_task_{task} - {request_id} - '
|
||||
f'Requesting perfect answer for question: {item["question"]}'
|
||||
)
|
||||
perfect_answers.append(await self._get_perfect_answer(task, item["question"]))
|
||||
self._log(task, request_id, 'Requesting grading of the answer(s).')
|
||||
response = await self._grade_task(task, questions_and_answers)
|
||||
self._log(task, request_id, f'Answer(s) graded: {response}')
|
||||
|
||||
if task in {1, 3}:
|
||||
self._logger.info(
|
||||
f'POST - speaking_task_{task} - {request_id} - Formatting answers and questions for prompt.'
|
||||
)
|
||||
if task in {1, 3}:
|
||||
self._log(task, request_id, 'Adding perfect answer(s) to response.')
|
||||
|
||||
formatted_text = ""
|
||||
for i, entry in enumerate(answers, start=1):
|
||||
formatted_text += f"**Question {i}:**\n{entry['question']}\n\n"
|
||||
formatted_text += f"**Answer {i}:**\n{entry['answer']}\n\n"
|
||||
# TODO: check if it is answer["answer"] instead
|
||||
for i, answer in enumerate(perfect_answers, start=1):
|
||||
response['perfect_answer_' + str(i)] = answer
|
||||
|
||||
self._logger.info(
|
||||
f'POST - speaking_task_{task} - {request_id} - '
|
||||
f'Formatted answers and questions for prompt: {formatted_text}'
|
||||
)
|
||||
questions_and_answers = f'\n\n The questions and answers are: \n\n{formatted_text}'
|
||||
else:
|
||||
questions_and_answers = f'\n Question: "{answers[0]["question"]}" \n Answer: "{answers[0]["answer"]}"'
|
||||
self._log(task, request_id, 'Getting speaking corrections in parallel')
|
||||
# Get all corrections in parallel
|
||||
fixed_texts = await asyncio.gather(*[
|
||||
self._get_speaking_corrections(answer)
|
||||
for answer in text_answers
|
||||
])
|
||||
|
||||
self._logger.info(f'POST - speaking_task_{task} - {request_id} - Requesting grading of the answer(s).')
|
||||
response = await self._grade_task(task, questions_and_answers)
|
||||
self._log(task, request_id, 'Adding transcript and fixed texts to response.')
|
||||
for i, (answer, fixed) in enumerate(zip(text_answers, fixed_texts), start=1):
|
||||
response['transcript_' + str(i)] = answer
|
||||
response['fixed_text_' + str(i)] = fixed
|
||||
else:
|
||||
response['transcript'] = text_answers[0]
|
||||
|
||||
self._logger.info(f'POST - speaking_task_{task} - {request_id} - Answer(s) graded: {response}')
|
||||
self._log(task, request_id, 'Requesting fixed text.')
|
||||
response['fixed_text'] = await self._get_speaking_corrections(text_answers[0])
|
||||
self._log(task, request_id, f'Fixed text: {response["fixed_text"]}')
|
||||
|
||||
if task in {1, 3}:
|
||||
self._logger.info(
|
||||
f'POST - speaking_task_{task} - {request_id} - Adding perfect answer(s) to response.')
|
||||
response['perfect_answer'] = perfect_answers[0]["answer"]
|
||||
|
||||
# TODO: check if it is answer["answer"] instead
|
||||
for i, answer in enumerate(perfect_answers, start=1):
|
||||
response['perfect_answer_' + str(i)] = answer
|
||||
response["overall"] = self._fix_speaking_overall(response["overall"], response["task_response"])
|
||||
self._log(task, request_id, f'Final response: {response}')
|
||||
return response
|
||||
|
||||
self._logger.info(
|
||||
f'POST - speaking_task_{task} - {request_id} - Adding transcript and fixed texts to response.'
|
||||
)
|
||||
finally:
|
||||
for file_path in temp_files:
|
||||
try:
|
||||
if os.path.exists(file_path):
|
||||
os.remove(file_path)
|
||||
except Exception as e:
|
||||
self._log(task, request_id, f'Error cleaning up temp file {file_path}: {str(e)}')
|
||||
|
||||
for i, answer in enumerate(text_answers, start=1):
|
||||
response['transcript_' + str(i)] = answer
|
||||
response['fixed_text_' + str(i)] = await self._get_speaking_corrections(answer)
|
||||
else:
|
||||
response['transcript'] = answers[0]["answer"]
|
||||
def _log(self, task: int, request_id: str, message: str):
|
||||
self._logger.info(f'POST - speaking_task_{task} - {request_id} - {message}')
|
||||
|
||||
self._logger.info(f'POST - speaking_task_{task} - {request_id} - Requesting fixed text.')
|
||||
response['fixed_text'] = await self._get_speaking_corrections(answers[0]["answer"])
|
||||
self._logger.info(f'POST - speaking_task_{task} - {request_id} - Fixed text: {response["fixed_text"]}')
|
||||
|
||||
response['perfect_answer'] = perfect_answers[0]["answer"]
|
||||
|
||||
response["overall"] = self._fix_speaking_overall(response["overall"], response["task_response"])
|
||||
self._logger.info(f'POST - speaking_task_{task} - {request_id} - Final response: {response}')
|
||||
return response
|
||||
@staticmethod
|
||||
async def save_file(item: GradeSpeakingItem) -> str:
|
||||
sound_file_name = FilePaths.AUDIO_FILES_PATH + str(uuid.uuid4())
|
||||
content = await item.answer.read()
|
||||
async with aiofiles.open(sound_file_name, 'wb') as f:
|
||||
await f.write(content)
|
||||
return sound_file_name
|
||||
|
||||
# ==================================================================================================================
|
||||
# grade_speaking_task helpers
|
||||
@@ -336,7 +343,7 @@ class SpeakingService(ISpeakingService):
|
||||
{
|
||||
"role": "user",
|
||||
"content": (
|
||||
'For pronunciations act as if you heard the answers and they were transcripted '
|
||||
'For pronunciations act as if you heard the answers and they were transcribed '
|
||||
'as you heard them.'
|
||||
)
|
||||
},
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import asyncio
|
||||
from typing import List, Dict
|
||||
|
||||
from app.services.abc import IWritingService, ILLMService, IAIDetectorService
|
||||
@@ -126,7 +127,7 @@ class WritingService(IWritingService):
|
||||
TemperatureSettings.GEN_QUESTION_TEMPERATURE
|
||||
)
|
||||
|
||||
response = await self._llm.prediction(
|
||||
evaluation_promise = self._llm.prediction(
|
||||
llm_model,
|
||||
messages,
|
||||
["comment"],
|
||||
@@ -134,15 +135,27 @@ class WritingService(IWritingService):
|
||||
)
|
||||
|
||||
perfect_answer_minimum = 150 if task == 1 else 250
|
||||
perfect_answer = await self._get_perfect_answer(question, perfect_answer_minimum)
|
||||
perfect_answer_promise = self._get_perfect_answer(question, perfect_answer_minimum)
|
||||
fixed_text_promise = self._get_fixed_text(answer)
|
||||
ai_detection_promise = self._ai_detector.run_detection(answer)
|
||||
|
||||
response["perfect_answer"] = perfect_answer["perfect_answer"]
|
||||
response["overall"] = ExercisesHelper.fix_writing_overall(response["overall"], response["task_response"])
|
||||
response['fixed_text'] = await self._get_fixed_text(answer)
|
||||
prediction_result, perfect_answer_result, fixed_text_result, ai_detection_result = await asyncio.gather(
|
||||
evaluation_promise,
|
||||
perfect_answer_promise,
|
||||
fixed_text_promise,
|
||||
ai_detection_promise
|
||||
)
|
||||
|
||||
ai_detection = await self._ai_detector.run_detection(answer)
|
||||
if ai_detection is not None:
|
||||
response['ai_detection'] = ai_detection
|
||||
response = prediction_result
|
||||
response["perfect_answer"] = perfect_answer_result["perfect_answer"]
|
||||
response["overall"] = ExercisesHelper.fix_writing_overall(
|
||||
response["overall"],
|
||||
response["task_response"]
|
||||
)
|
||||
response['fixed_text'] = fixed_text_result
|
||||
|
||||
if ai_detection_result is not None:
|
||||
response['ai_detection'] = ai_detection_result
|
||||
|
||||
return response
|
||||
|
||||
|
||||
@@ -1,22 +1,66 @@
|
||||
import os
|
||||
|
||||
from fastapi.concurrency import run_in_threadpool
|
||||
|
||||
import threading
|
||||
import whisper
|
||||
import asyncio
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from typing import Dict
|
||||
from whisper import Whisper
|
||||
|
||||
from app.services.abc import ISpeechToTextService
|
||||
|
||||
|
||||
"""
|
||||
The whisper model is not thread safe, a thread pool
|
||||
with 4 whisper models will be created so it can
|
||||
process up to 4 transcriptions at a time.
|
||||
|
||||
The base model requires ~1GB so 4 instances is the safe bet:
|
||||
https://github.com/openai/whisper?tab=readme-ov-file#available-models-and-languages
|
||||
"""
|
||||
class OpenAIWhisper(ISpeechToTextService):
|
||||
def __init__(self, model_name: str = "base", num_models: int = 4):
|
||||
self._model_name = model_name
|
||||
self._num_models = num_models
|
||||
self._models: Dict[int, 'Whisper'] = {}
|
||||
self._lock = threading.Lock()
|
||||
self._next_model_id = 0
|
||||
self._is_closed = False
|
||||
|
||||
def __init__(self, model: Whisper):
|
||||
self._model = model
|
||||
for i in range(num_models):
|
||||
self._models[i] = whisper.load_model(self._model_name)
|
||||
|
||||
async def speech_to_text(self, file_path):
|
||||
if os.path.exists(file_path):
|
||||
result = await run_in_threadpool(
|
||||
self._model.transcribe, file_path, fp16=False, language='English', verbose=False
|
||||
)
|
||||
return result["text"]
|
||||
else:
|
||||
print("File not found:", file_path)
|
||||
raise Exception("File " + file_path + " not found.")
|
||||
self._executor = ThreadPoolExecutor(
|
||||
max_workers=num_models,
|
||||
thread_name_prefix="whisper_worker"
|
||||
)
|
||||
|
||||
def get_model(self) -> 'Whisper':
|
||||
with self._lock:
|
||||
model_id = self._next_model_id
|
||||
self._next_model_id = (self._next_model_id + 1) % self._num_models
|
||||
return self._models[model_id]
|
||||
|
||||
async def speech_to_text(self, file_path: str) -> str:
|
||||
if not os.path.exists(file_path):
|
||||
raise FileNotFoundError(f"File {file_path} not found.")
|
||||
|
||||
def transcribe():
|
||||
model = self.get_model()
|
||||
return model.transcribe(
|
||||
file_path,
|
||||
fp16=False,
|
||||
language='English',
|
||||
verbose=False
|
||||
)["text"]
|
||||
|
||||
loop = asyncio.get_running_loop()
|
||||
return await loop.run_in_executor(self._executor, transcribe)
|
||||
|
||||
def close(self):
|
||||
with self._lock:
|
||||
if not self._is_closed:
|
||||
self._is_closed = True
|
||||
if self._executor:
|
||||
self._executor.shutdown(wait=True, cancel_futures=True)
|
||||
|
||||
def __del__(self):
|
||||
self.close()
|
||||
|
||||
Reference in New Issue
Block a user