import uuid from app.configs.constants import GPTModels, TemperatureSettings from app.helpers import ExercisesHelper from app.services.abc import ILLMService class TrueFalse: def __init__(self, llm: ILLMService): self._llm = llm async def gen_true_false_not_given_exercise(self, text: str, quantity: int, start_id: int, difficulty: str): messages = [ { "role": "system", "content": ( 'You are a helpful assistant designed to output JSON on this format: ' '{"prompts":[{"prompt": "statement_1", "solution": "true/false/not_given"}, ' '{"prompt": "statement_2", "solution": "true/false/not_given"}]}') }, { "role": "user", "content": ( f'Generate {str(quantity)} {difficulty} difficulty statements based on the provided text. ' 'Ensure that your statements accurately represent information or inferences from the text, and ' 'provide a variety of responses, including, at least one of each True, False, and Not Given, ' f'as appropriate.\n\nReference text:\n\n {text}' ) } ] response = await self._llm.prediction( GPTModels.GPT_4_O, messages, ["prompts"], TemperatureSettings.GEN_QUESTION_TEMPERATURE ) questions = response["prompts"] if len(questions) > quantity: questions = ExercisesHelper.remove_excess_questions(questions, len(questions) - quantity) for i, question in enumerate(questions, start=start_id): question["id"] = str(i) return { "id": str(uuid.uuid4()), "prompt": "Do the following statements agree with the information given in the Reading Passage?", "questions": questions, "type": "trueFalse" }