import uuid from ielts_be.configs.constants import GPTModels, TemperatureSettings from ielts_be.helpers import ExercisesHelper from ielts_be.services import ILLMService class TrueFalse: def __init__(self, llm: ILLMService): self._llm = llm async def gen_true_false_not_given_exercise(self, text: str, quantity: int, start_id: int, difficulty: str, module: str): messages = [ { "role": "system", "content": ( 'You are a helpful assistant designed to output JSON on this format: ' '{"prompts":[{"prompt": "statement_1", "solution": "true/false/not_given"}, ' '{"prompt": "statement_2", "solution": "true/false/not_given"}]}') }, { "role": "user", "content": ( f'Generate {str(quantity)} {difficulty} CEFR level difficulty statements based on the provided text. ' 'Ensure that your statements accurately represent information or inferences from the text, and ' 'provide a variety of responses, including, at least one of each True, False, and Not Given, ' f'as appropriate.\n\nReference text:\n\n {text}' ) } ] response = await self._llm.prediction( GPTModels.GPT_4_O, messages, ["prompts"], TemperatureSettings.GEN_QUESTION_TEMPERATURE ) questions = response["prompts"] if len(questions) > quantity: questions = ExercisesHelper.remove_excess_questions(questions, len(questions) - quantity) for i, question in enumerate(questions, start=start_id): question["id"] = str(i) tail = ( "the information given in the Reading Passage" if module == "reading" else "what you've heard" ) return { "id": str(uuid.uuid4()), "prompt": f"Do the following statements agree with {tail}?", "questions": questions, "type": "trueFalse" }