211 lines
9.0 KiB
Python
211 lines
9.0 KiB
Python
from asyncio import gather
|
|
from typing import Dict, Optional
|
|
from uuid import uuid4
|
|
|
|
from fastapi import UploadFile
|
|
|
|
import random
|
|
|
|
from app.configs.constants import EducationalContent
|
|
from app.dtos.level import LevelExercisesDTO
|
|
from app.repositories.abc import IDocumentStore
|
|
from app.services.abc import (
|
|
ILevelService, ILLMService, IReadingService,
|
|
IWritingService, IListeningService, ISpeakingService
|
|
)
|
|
from .exercises import MultipleChoice, BlankSpace, PassageUtas, FillBlanks
|
|
from .full_exams import CustomLevelModule, LevelUtas
|
|
from .upload import UploadLevelModule
|
|
|
|
|
|
class LevelService(ILevelService):
|
|
|
|
def __init__(
|
|
self,
|
|
llm: ILLMService,
|
|
document_store: IDocumentStore,
|
|
mc_variants: Dict,
|
|
reading_service: IReadingService,
|
|
writing_service: IWritingService,
|
|
speaking_service: ISpeakingService,
|
|
listening_service: IListeningService
|
|
):
|
|
self._llm = llm
|
|
self._document_store = document_store
|
|
self._reading_service = reading_service
|
|
self._upload_module = UploadLevelModule(llm)
|
|
self._mc_variants = mc_variants
|
|
|
|
self._mc = MultipleChoice(llm, mc_variants)
|
|
self._blank_space = BlankSpace(llm, mc_variants)
|
|
self._passage_utas = PassageUtas(llm, reading_service, mc_variants)
|
|
self._fill_blanks = FillBlanks(llm)
|
|
|
|
self._level_utas = LevelUtas(llm, self, mc_variants)
|
|
self._custom = CustomLevelModule(
|
|
llm, self, reading_service, listening_service, writing_service, speaking_service
|
|
)
|
|
|
|
|
|
async def upload_level(self, upload: UploadFile, solutions: Optional[UploadFile] = None) -> Dict:
|
|
return await self._upload_module.generate_level_from_file(upload, solutions)
|
|
|
|
async def _generate_exercise(self, req_exercise, start_id):
|
|
if req_exercise.type == "mcBlank":
|
|
questions = await self._mc.gen_multiple_choice("blank_space", req_exercise.quantity, start_id)
|
|
questions["variant"] = "mcBlank"
|
|
questions["type"] = "multipleChoice"
|
|
questions["prompt"] = "Choose the correct word or group of words that completes the sentences."
|
|
return questions
|
|
|
|
elif req_exercise.type == "mcUnderline":
|
|
questions = await self._mc.gen_multiple_choice("underline", req_exercise.quantity, start_id)
|
|
questions["variant"] = "mcUnderline"
|
|
questions["type"] = "multipleChoice"
|
|
questions["prompt"] = "Choose the underlined word or group of words that is not correct."
|
|
return questions
|
|
|
|
elif req_exercise.type == "passageUtas":
|
|
topic = req_exercise.topic if req_exercise.topic else random.choice(EducationalContent.TOPICS)
|
|
exercise = await self._passage_utas.gen_reading_passage_utas(
|
|
start_id,
|
|
req_exercise.quantity,
|
|
topic,
|
|
req_exercise.text_size
|
|
)
|
|
exercise["prompt"] = "Read the text and answer the questions below."
|
|
|
|
return exercise
|
|
|
|
elif req_exercise.type == "fillBlanksMC":
|
|
exercise = await self._fill_blanks.gen_fill_blanks(
|
|
start_id,
|
|
req_exercise.quantity,
|
|
req_exercise.text_size,
|
|
req_exercise.topic
|
|
)
|
|
exercise["prompt"] = "Read the text below and choose the correct word for each space."
|
|
return exercise
|
|
|
|
async def generate_exercises(self, dto: LevelExercisesDTO):
|
|
start_ids = []
|
|
current_id = 1
|
|
for req_exercise in dto.exercises:
|
|
start_ids.append(current_id)
|
|
current_id += req_exercise.quantity
|
|
|
|
tasks = [
|
|
self._generate_exercise(req_exercise, start_id)
|
|
for req_exercise, start_id in zip(dto.exercises, start_ids)
|
|
]
|
|
questions = await gather(*tasks)
|
|
questions = [{'id': str(uuid4()), **exercise} for exercise in questions]
|
|
|
|
return {"exercises": questions}
|
|
|
|
# Just here to support other modules that I don't know if they are supposed to still be used
|
|
async def gen_multiple_choice(self, mc_variant: str, quantity: int, start_id: int = 1):
|
|
return await self._mc.gen_multiple_choice(mc_variant, quantity, start_id)
|
|
|
|
async def gen_reading_passage_utas(self, start_id, mc_quantity: int, topic=Optional[str]): # sa_quantity: int,
|
|
return await self._passage_utas.gen_reading_passage_utas(start_id, mc_quantity, topic)
|
|
|
|
async def gen_blank_space_text_utas(self, quantity: int, start_id: int, size: int, topic: str):
|
|
return await self._blank_space.gen_blank_space_text_utas(quantity, start_id, size, topic)
|
|
|
|
async def get_level_exam(
|
|
self, number_of_exercises: int = 25, min_timer: int = 25, diagnostic: bool = False
|
|
) -> Dict:
|
|
pass
|
|
|
|
async def get_level_utas(self):
|
|
return await self._level_utas.get_level_utas()
|
|
|
|
async def get_custom_level(self, data: Dict):
|
|
return await self._custom.get_custom_level(data)
|
|
"""
|
|
async def _generate_single_multiple_choice(self, mc_variant: str = "normal"):
|
|
mc_template = self._mc_variants[mc_variant]["questions"][0]
|
|
blank_mod = " blank space " if mc_variant == "blank_space" else " "
|
|
|
|
messages = [
|
|
{
|
|
"role": "system",
|
|
"content": (
|
|
f'You are a helpful assistant designed to output JSON on this format: {mc_template}'
|
|
)
|
|
},
|
|
{
|
|
"role": "user",
|
|
"content": (
|
|
f'Generate 1 multiple choice {blank_mod} question of 4 options for an english level exam, '
|
|
f'it can be easy, intermediate or advanced.'
|
|
)
|
|
|
|
}
|
|
]
|
|
|
|
if mc_variant == "underline":
|
|
messages.append({
|
|
"role": "user",
|
|
"content": (
|
|
'The type of multiple choice in the prompt has wrong words or group of words and the options '
|
|
'are to find the wrong word or group of words that are underlined in the prompt. \nExample:\n'
|
|
'Prompt: "I <u>complain</u> about my boss <u>all the time</u>, but my colleagues <u>thinks</u> '
|
|
'the boss <u>is</u> nice."\n'
|
|
'Options:\na: "complain"\nb: "all the time"\nc: "thinks"\nd: "is"'
|
|
)
|
|
})
|
|
|
|
question = await self._llm.prediction(
|
|
GPTModels.GPT_4_O, messages, ["options"], TemperatureSettings.GEN_QUESTION_TEMPERATURE
|
|
)
|
|
|
|
return question
|
|
"""
|
|
"""
|
|
async def _replace_exercise_if_exists(
|
|
self, all_exams, current_exercise, current_exam, seen_keys, mc_variant: str, utas: bool = False
|
|
):
|
|
# Extracting relevant fields for comparison
|
|
key = (current_exercise['prompt'], tuple(sorted(option['text'] for option in current_exercise['options'])))
|
|
# Check if the key is in the set
|
|
if key in seen_keys:
|
|
return await self._replace_exercise_if_exists(
|
|
all_exams, await self._generate_single_multiple_choice(mc_variant), current_exam, seen_keys,
|
|
mc_variant, utas
|
|
)
|
|
else:
|
|
seen_keys.add(key)
|
|
|
|
if not utas:
|
|
for exam in all_exams:
|
|
exam_dict = exam.to_dict()
|
|
if len(exam_dict.get("parts", [])) > 0:
|
|
exercise_dict = exam_dict.get("parts", [])[0]
|
|
if len(exercise_dict.get("exercises", [])) > 0:
|
|
if any(
|
|
exercise["prompt"] == current_exercise["prompt"] and
|
|
any(exercise["options"][0]["text"] == current_option["text"] for current_option in
|
|
current_exercise["options"])
|
|
for exercise in exercise_dict.get("exercises", [])[0]["questions"]
|
|
):
|
|
return await self._replace_exercise_if_exists(
|
|
all_exams, await self._generate_single_multiple_choice(mc_variant), current_exam,
|
|
seen_keys, mc_variant, utas
|
|
)
|
|
else:
|
|
for exam in all_exams:
|
|
if any(
|
|
exercise["prompt"] == current_exercise["prompt"] and
|
|
any(exercise["options"][0]["text"] == current_option["text"] for current_option in
|
|
current_exercise["options"])
|
|
for exercise in exam.get("questions", [])
|
|
):
|
|
return await self._replace_exercise_if_exists(
|
|
all_exams, await self._generate_single_multiple_choice(mc_variant), current_exam,
|
|
seen_keys, mc_variant, utas
|
|
)
|
|
return current_exercise, seen_keys
|
|
"""
|