Changes to endpoints so they allow to only get context and then the exercises as well as tidying up a bit
This commit is contained in:
93
app/services/impl/exam/level/exercises/passage_utas.py
Normal file
93
app/services/impl/exam/level/exercises/passage_utas.py
Normal file
@@ -0,0 +1,93 @@
|
||||
from typing import Optional
|
||||
|
||||
from app.configs.constants import GPTModels, TemperatureSettings
|
||||
from app.helpers import ExercisesHelper
|
||||
from app.services.abc import ILLMService, IReadingService
|
||||
|
||||
|
||||
class PassageUtas:
|
||||
|
||||
def __init__(self, llm: ILLMService, reading_service: IReadingService, mc_variants: dict):
|
||||
self._llm = llm
|
||||
self._reading_service = reading_service
|
||||
self._mc_variants = mc_variants
|
||||
|
||||
async def gen_reading_passage_utas(
|
||||
self, start_id, mc_quantity: int, topic: Optional[str] # sa_quantity: int,
|
||||
):
|
||||
|
||||
passage = await self._reading_service.generate_reading_passage(1, topic)
|
||||
mc_exercises = await self._gen_text_multiple_choice_utas(passage["text"], start_id, mc_quantity)
|
||||
|
||||
#short_answer = await self._gen_short_answer_utas(passage["text"], start_id, sa_quantity)
|
||||
# + sa_quantity, mc_quantity)
|
||||
|
||||
"""
|
||||
exercises: {
|
||||
"shortAnswer": short_answer,
|
||||
"multipleChoice": mc_exercises,
|
||||
},
|
||||
"""
|
||||
return {
|
||||
"exercises": mc_exercises,
|
||||
"text": {
|
||||
"content": passage["text"],
|
||||
"title": passage["title"]
|
||||
}
|
||||
}
|
||||
|
||||
async def _gen_short_answer_utas(self, text: str, start_id: int, sa_quantity: int):
|
||||
json_format = {"questions": [{"id": 1, "question": "question", "possible_answers": ["answer_1", "answer_2"]}]}
|
||||
|
||||
messages = [
|
||||
{
|
||||
"role": "system",
|
||||
"content": f'You are a helpful assistant designed to output JSON on this format: {json_format}'
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": (
|
||||
f'Generate {sa_quantity} short answer questions, and the possible answers, must have '
|
||||
f'maximum 3 words per answer, about this text:\n"{text}"'
|
||||
)
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": f'The id starts at {start_id}.'
|
||||
}
|
||||
]
|
||||
|
||||
question = await self._llm.prediction(
|
||||
GPTModels.GPT_4_O, messages, ["questions"], TemperatureSettings.GEN_QUESTION_TEMPERATURE
|
||||
)
|
||||
|
||||
return question["questions"]
|
||||
|
||||
async def _gen_text_multiple_choice_utas(self, text: str, start_id: int, mc_quantity: int):
|
||||
json_template = self._mc_variants["text_mc_utas"]
|
||||
|
||||
messages = [
|
||||
{
|
||||
"role": "system",
|
||||
"content": f'You are a helpful assistant designed to output JSON on this format: {json_template}'
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": f'Generate {mc_quantity} multiple choice questions of 4 options for this text:\n{text}'
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": 'Make sure every question only has 1 correct answer.'
|
||||
}
|
||||
]
|
||||
|
||||
question = await self._llm.prediction(
|
||||
GPTModels.GPT_4_O, messages, ["questions"], TemperatureSettings.GEN_QUESTION_TEMPERATURE
|
||||
)
|
||||
|
||||
if len(question["questions"]) != mc_quantity:
|
||||
return await self._gen_text_multiple_choice_utas(text, mc_quantity, start_id)
|
||||
else:
|
||||
response = ExercisesHelper.fix_exercise_ids(question, start_id)
|
||||
response["questions"] = ExercisesHelper.randomize_mc_options_order(response["questions"])
|
||||
return response
|
||||
Reference in New Issue
Block a user