trueFalse added to listening

This commit is contained in:
Carlos-Mesquita
2024-11-13 20:36:12 +00:00
parent 6daab0d9a7
commit 229dbe3e29
5 changed files with 25 additions and 4 deletions

View File

@@ -10,7 +10,7 @@ from app.services.abc import IReadingService, ILLMService
from .fill_blanks import FillBlanks
from .idea_match import IdeaMatch
from .paragraph_match import ParagraphMatch
from .true_false import TrueFalse
from ..shared import TrueFalse
from .import_reading import ImportReadingModule
from .write_blanks import WriteBlanks
@@ -88,7 +88,7 @@ class ReadingService(IReadingService):
elif req_exercise.type == "trueFalse":
question = await self._true_false.gen_true_false_not_given_exercise(
text, req_exercise.quantity, start_id, difficulty
text, req_exercise.quantity, start_id, difficulty, "reading"
)
self._logger.info(f"Added trueFalse: {question}")
return question

View File

@@ -1,49 +0,0 @@
import uuid
from app.configs.constants import GPTModels, TemperatureSettings
from app.helpers import ExercisesHelper
from app.services.abc import ILLMService
class TrueFalse:
def __init__(self, llm: ILLMService):
self._llm = llm
async def gen_true_false_not_given_exercise(self, text: str, quantity: int, start_id: int, difficulty: str):
messages = [
{
"role": "system",
"content": (
'You are a helpful assistant designed to output JSON on this format: '
'{"prompts":[{"prompt": "statement_1", "solution": "true/false/not_given"}, '
'{"prompt": "statement_2", "solution": "true/false/not_given"}]}')
},
{
"role": "user",
"content": (
f'Generate {str(quantity)} {difficulty} difficulty statements based on the provided text. '
'Ensure that your statements accurately represent information or inferences from the text, and '
'provide a variety of responses, including, at least one of each True, False, and Not Given, '
f'as appropriate.\n\nReference text:\n\n {text}'
)
}
]
response = await self._llm.prediction(
GPTModels.GPT_4_O, messages, ["prompts"], TemperatureSettings.GEN_QUESTION_TEMPERATURE
)
questions = response["prompts"]
if len(questions) > quantity:
questions = ExercisesHelper.remove_excess_questions(questions, len(questions) - quantity)
for i, question in enumerate(questions, start=start_id):
question["id"] = str(i)
return {
"id": str(uuid.uuid4()),
"prompt": "Do the following statements agree with the information given in the Reading Passage?",
"questions": questions,
"type": "trueFalse"
}