91 lines
3.4 KiB
Python
91 lines
3.4 KiB
Python
from typing import Optional
|
|
|
|
from app.configs.constants import GPTModels, TemperatureSettings
|
|
from app.helpers import ExercisesHelper
|
|
from app.services.abc import ILLMService, IReadingService
|
|
|
|
|
|
class PassageUtas:
|
|
|
|
def __init__(self, llm: ILLMService, reading_service: IReadingService, mc_variants: dict):
|
|
self._llm = llm
|
|
self._reading_service = reading_service
|
|
self._mc_variants = mc_variants
|
|
|
|
async def gen_reading_passage_utas(
|
|
self, start_id, mc_quantity: int, topic: Optional[str], word_size: Optional[int] # sa_quantity: int,
|
|
):
|
|
|
|
passage = await self._reading_service.generate_reading_passage(1, topic, word_size)
|
|
mc_exercises = await self._gen_text_multiple_choice_utas(passage["text"], start_id, mc_quantity)
|
|
mc_exercises["type"] = "multipleChoice"
|
|
"""
|
|
exercises: {
|
|
"shortAnswer": short_answer,
|
|
"multipleChoice": mc_exercises,
|
|
},
|
|
"""
|
|
return {
|
|
**mc_exercises,
|
|
"passage": {
|
|
"content": passage["text"],
|
|
"title": passage["title"]
|
|
},
|
|
"mcVariant": "passageUtas"
|
|
}
|
|
|
|
async def _gen_short_answer_utas(self, text: str, start_id: int, sa_quantity: int):
|
|
json_format = {"questions": [{"id": 1, "question": "question", "possible_answers": ["answer_1", "answer_2"]}]}
|
|
|
|
messages = [
|
|
{
|
|
"role": "system",
|
|
"content": f'You are a helpful assistant designed to output JSON on this format: {json_format}'
|
|
},
|
|
{
|
|
"role": "user",
|
|
"content": (
|
|
f'Generate {sa_quantity} short answer questions, and the possible answers, must have '
|
|
f'maximum 3 words per answer, about this text:\n"{text}"'
|
|
)
|
|
},
|
|
{
|
|
"role": "user",
|
|
"content": f'The id starts at {start_id}.'
|
|
}
|
|
]
|
|
|
|
question = await self._llm.prediction(
|
|
GPTModels.GPT_4_O, messages, ["questions"], TemperatureSettings.GEN_QUESTION_TEMPERATURE
|
|
)
|
|
|
|
return question["questions"]
|
|
|
|
async def _gen_text_multiple_choice_utas(self, text: str, start_id: int, mc_quantity: int):
|
|
json_template = self._mc_variants["text_mc_utas"]
|
|
|
|
messages = [
|
|
{
|
|
"role": "system",
|
|
"content": f'You are a helpful assistant designed to output JSON on this format: {json_template}'
|
|
},
|
|
{
|
|
"role": "user",
|
|
"content": f'Generate {mc_quantity} multiple choice questions of 4 options for this text:\n{text}'
|
|
},
|
|
{
|
|
"role": "user",
|
|
"content": 'Make sure every question only has 1 correct answer.'
|
|
}
|
|
]
|
|
|
|
question = await self._llm.prediction(
|
|
GPTModels.GPT_4_O, messages, ["questions"], TemperatureSettings.GEN_QUESTION_TEMPERATURE
|
|
)
|
|
|
|
if len(question["questions"]) != mc_quantity:
|
|
return await self._gen_text_multiple_choice_utas(text, mc_quantity, start_id)
|
|
else:
|
|
response = ExercisesHelper.fix_exercise_ids(question, start_id)
|
|
response["questions"] = ExercisesHelper.randomize_mc_options_order(response["questions"])
|
|
return response |