Brushed up the backend, added writing task 1 academic prompt gen and grading ENCOA-274
This commit is contained in:
7
ielts_be/services/impl/exam/shared/__init__.py
Normal file
7
ielts_be/services/impl/exam/shared/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
||||
from .true_false import TrueFalse
|
||||
from .multiple_choice import MultipleChoice
|
||||
|
||||
__all__ = [
|
||||
"TrueFalse",
|
||||
"MultipleChoice"
|
||||
]
|
||||
46
ielts_be/services/impl/exam/shared/multiple_choice.py
Normal file
46
ielts_be/services/impl/exam/shared/multiple_choice.py
Normal file
@@ -0,0 +1,46 @@
|
||||
import uuid
|
||||
|
||||
from ielts_be.configs.constants import GPTModels, TemperatureSettings
|
||||
from ielts_be.helpers import ExercisesHelper
|
||||
from ielts_be.services import ILLMService
|
||||
|
||||
|
||||
class MultipleChoice:
|
||||
|
||||
def __init__(self, llm: ILLMService):
|
||||
self._llm = llm
|
||||
|
||||
async def gen_multiple_choice(
|
||||
self, text: str, quantity: int, start_id: int, difficulty: str, n_options: int = 4
|
||||
):
|
||||
messages = [
|
||||
{
|
||||
"role": "system",
|
||||
"content": (
|
||||
'You are a helpful assistant designed to output JSON on this format: '
|
||||
'{"questions": [{"id": "9", "options": [{"id": "A", "text": "Economic benefits"}, {"id": "B", "text": '
|
||||
'"Government regulations"}, {"id": "C", "text": "Concerns about climate change"}, {"id": "D", "text": '
|
||||
'"Technological advancement"}], "prompt": "What is the main reason for the shift towards renewable '
|
||||
'energy sources?", "solution": "C", "variant": "text"}]}')
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": (
|
||||
f'Generate {quantity} {difficulty} difficulty multiple choice questions of {n_options} '
|
||||
f'options for this text:\n"' + text + '"')
|
||||
|
||||
}
|
||||
]
|
||||
|
||||
questions = await self._llm.prediction(
|
||||
GPTModels.GPT_4_O,
|
||||
messages,
|
||||
["questions"],
|
||||
TemperatureSettings.GEN_QUESTION_TEMPERATURE
|
||||
)
|
||||
return {
|
||||
"id": str(uuid.uuid4()),
|
||||
"prompt": "Select the appropriate option.",
|
||||
"questions": ExercisesHelper.fix_exercise_ids(questions, start_id)["questions"],
|
||||
"type": "multipleChoice",
|
||||
}
|
||||
55
ielts_be/services/impl/exam/shared/true_false.py
Normal file
55
ielts_be/services/impl/exam/shared/true_false.py
Normal file
@@ -0,0 +1,55 @@
|
||||
import uuid
|
||||
|
||||
from ielts_be.configs.constants import GPTModels, TemperatureSettings
|
||||
from ielts_be.helpers import ExercisesHelper
|
||||
from ielts_be.services import ILLMService
|
||||
|
||||
|
||||
class TrueFalse:
|
||||
|
||||
def __init__(self, llm: ILLMService):
|
||||
self._llm = llm
|
||||
|
||||
async def gen_true_false_not_given_exercise(self, text: str, quantity: int, start_id: int, difficulty: str, module: str):
|
||||
messages = [
|
||||
{
|
||||
"role": "system",
|
||||
"content": (
|
||||
'You are a helpful assistant designed to output JSON on this format: '
|
||||
'{"prompts":[{"prompt": "statement_1", "solution": "true/false/not_given"}, '
|
||||
'{"prompt": "statement_2", "solution": "true/false/not_given"}]}')
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": (
|
||||
f'Generate {str(quantity)} {difficulty} difficulty statements based on the provided text. '
|
||||
'Ensure that your statements accurately represent information or inferences from the text, and '
|
||||
'provide a variety of responses, including, at least one of each True, False, and Not Given, '
|
||||
f'as appropriate.\n\nReference text:\n\n {text}'
|
||||
)
|
||||
}
|
||||
]
|
||||
|
||||
response = await self._llm.prediction(
|
||||
GPTModels.GPT_4_O, messages, ["prompts"], TemperatureSettings.GEN_QUESTION_TEMPERATURE
|
||||
)
|
||||
questions = response["prompts"]
|
||||
|
||||
if len(questions) > quantity:
|
||||
questions = ExercisesHelper.remove_excess_questions(questions, len(questions) - quantity)
|
||||
|
||||
for i, question in enumerate(questions, start=start_id):
|
||||
question["id"] = str(i)
|
||||
|
||||
tail = (
|
||||
"the information given in the Reading Passage"
|
||||
if module == "reading" else
|
||||
"what you've heard"
|
||||
)
|
||||
|
||||
return {
|
||||
"id": str(uuid.uuid4()),
|
||||
"prompt": f"Do the following statements agree with {tail}?",
|
||||
"questions": questions,
|
||||
"type": "trueFalse"
|
||||
}
|
||||
Reference in New Issue
Block a user