Fixed level issues

This commit is contained in:
Carlos-Mesquita
2024-11-10 04:21:36 +00:00
parent cf1b676312
commit 6909d75eb6
15 changed files with 101 additions and 84 deletions

View File

@@ -1,6 +1,12 @@
from asyncio import gather
from typing import Dict, Optional
from uuid import uuid4
from fastapi import UploadFile
import random
from app.configs.constants import EducationalContent
from app.dtos.level import LevelExercisesDTO
from app.repositories.abc import IDocumentStore
from app.services.abc import (
@@ -41,48 +47,61 @@ class LevelService(ILevelService):
)
async def upload_level(self, upload: UploadFile) -> Dict:
return await self._upload_module.generate_level_from_file(upload)
async def upload_level(self, upload: UploadFile, solutions: Optional[UploadFile] = None) -> Dict:
return await self._upload_module.generate_level_from_file(upload, solutions)
async def _generate_exercise(self, req_exercise, start_id):
if req_exercise.type == "mcBlank":
questions = await self._mc.gen_multiple_choice("blank_space", req_exercise.quantity, start_id)
questions["variant"] = "mcBlank"
questions["type"] = "multipleChoice"
questions["prompt"] = "Choose the correct word or group of words that completes the sentences."
return questions
elif req_exercise.type == "mcUnderline":
questions = await self._mc.gen_multiple_choice("underline", req_exercise.quantity, start_id)
questions["variant"] = "mcUnderline"
questions["type"] = "multipleChoice"
questions["prompt"] = "Choose the underlined word or group of words that is not correct."
return questions
elif req_exercise.type == "passageUtas":
topic = req_exercise.topic if req_exercise.topic else random.choice(EducationalContent.TOPICS)
exercise = await self._passage_utas.gen_reading_passage_utas(
start_id,
req_exercise.quantity,
topic,
req_exercise.text_size
)
exercise["prompt"] = "Read the text and answer the questions below."
return exercise
elif req_exercise.type == "fillBlanksMC":
exercise = await self._fill_blanks.gen_fill_blanks(
start_id,
req_exercise.quantity,
req_exercise.text_size,
req_exercise.topic
)
exercise["prompt"] = "Read the text below and choose the correct word for each space."
return exercise
async def generate_exercises(self, dto: LevelExercisesDTO):
exercises = []
start_id = 1
start_ids = []
current_id = 1
for req_exercise in dto.exercises:
if req_exercise.type == "multipleChoice":
questions = await self._mc.gen_multiple_choice("normal", req_exercise.quantity, start_id)
exercises.append(questions)
start_ids.append(current_id)
current_id += req_exercise.quantity
elif req_exercise.type == "mcBlank":
questions = await self._mc.gen_multiple_choice("blank_space", req_exercise.quantity, start_id)
questions["variant"] = "mc"
exercises.append(questions)
tasks = [
self._generate_exercise(req_exercise, start_id)
for req_exercise, start_id in zip(dto.exercises, start_ids)
]
questions = await gather(*tasks)
questions = [{'id': str(uuid4()), **exercise} for exercise in questions]
elif req_exercise.type == "mcUnderline":
questions = await self._mc.gen_multiple_choice("underline", req_exercise.quantity, start_id)
exercises.append(questions)
elif req_exercise.type == "blankSpaceText":
questions = await self._blank_space.gen_blank_space_text_utas(
req_exercise.quantity, start_id, req_exercise.text_size, req_exercise.topic
)
exercises.append(questions)
elif req_exercise.type == "passageUtas":
questions = await self._passage_utas.gen_reading_passage_utas(
start_id, req_exercise.mc_qty, req_exercise.text_size
)
exercises.append(questions)
elif req_exercise.type == "fillBlanksMC":
questions = await self._passage_utas.gen_reading_passage_utas(
start_id, req_exercise.mc_qty, req_exercise.text_size
)
exercises.append(questions)
start_id = start_id + req_exercise.quantity
return exercises
return {"exercises": questions}
# Just here to support other modules that I don't know if they are supposed to still be used
async def gen_multiple_choice(self, mc_variant: str, quantity: int, start_id: int = 1):

View File

@@ -11,11 +11,12 @@ class FillBlanks:
async def gen_fill_blanks(
self, quantity: int, start_id: int, size: int, topic=None
self, start_id: int, quantity: int, size: int = 300, topic=None
):
if not topic:
topic = random.choice(EducationalContent.MTI_TOPICS)
print(quantity)
print(start_id)
messages = [
{
"role": "system",
@@ -28,19 +29,18 @@ class FillBlanks:
{
"role": "user",
"content": (
f'From the generated text choose {quantity} words (cannot be sequential words) to replace '
'once with {{id}} where id starts on ' + str(start_id) + ' and is incremented for each word. '
'The ids must be ordered throughout the text and the words must be replaced only once. '
'For each removed word you will place it in the solutions array and assign a letter from A to D,'
' then you will place that removed word and the chosen letter on the words array along with '
' other 3 other words for the remaining letter. This is a fill blanks question for an english '
'exam, so don\'t choose words completely at random.'
f'From the generated text choose exactly {quantity} words (cannot be sequential words) replace '
'each with {{id}} (starting from ' + str(start_id) + ' and incrementing), then generate a '
'JSON object containing: the modified text, a solutions array with each word\'s correct '
'letter (A-D), and a words array containing each id with four options where one is '
'the original word (matching the solution) and three are plausible but incorrect '
'alternatives that maintain grammatical consistency. '
'You cannot use repeated words!' #TODO: Solve this after
)
}
]
question = await self._llm.prediction(
GPTModels.GPT_4_O, messages, ["question"], TemperatureSettings.GEN_QUESTION_TEMPERATURE
GPTModels.GPT_4_O, messages, [], TemperatureSettings.GEN_QUESTION_TEMPERATURE
)
return {
**question,
@@ -56,7 +56,7 @@ class FillBlanks:
"solutions": [
{
"id": "",
"solution": ""
"solution": "<A,B,C or D>"
}
],
"words": [

View File

@@ -13,15 +13,12 @@ class PassageUtas:
self._mc_variants = mc_variants
async def gen_reading_passage_utas(
self, start_id, mc_quantity: int, topic: Optional[str] # sa_quantity: int,
self, start_id, mc_quantity: int, topic: Optional[str], word_size: Optional[int] # sa_quantity: int,
):
passage = await self._reading_service.generate_reading_passage(1, topic)
passage = await self._reading_service.generate_reading_passage(1, topic, word_size)
mc_exercises = await self._gen_text_multiple_choice_utas(passage["text"], start_id, mc_quantity)
#short_answer = await self._gen_short_answer_utas(passage["text"], start_id, sa_quantity)
# + sa_quantity, mc_quantity)
mc_exercises["type"] = "multipleChoice"
"""
exercises: {
"shortAnswer": short_answer,
@@ -29,11 +26,12 @@ class PassageUtas:
},
"""
return {
"exercises": mc_exercises,
"text": {
**mc_exercises,
"passage": {
"content": passage["text"],
"title": passage["title"]
}
},
"mcVariant": "passageUtas"
}
async def _gen_short_answer_utas(self, text: str, start_id: int, sa_quantity: int):

View File

@@ -2,7 +2,7 @@ import aiofiles
import os
from logging import getLogger
from typing import Dict, Any, Coroutine
from typing import Dict, Any, Coroutine, Optional
import pdfplumber
from fastapi import UploadFile
@@ -21,7 +21,7 @@ class UploadLevelModule:
self._logger = getLogger(__name__)
self._llm = openai
async def generate_level_from_file(self, file: UploadFile) -> Dict[str, Any] | None:
async def generate_level_from_file(self, file: UploadFile, solutions: Optional[UploadFile]) -> Dict[str, Any] | None:
ext, path_id = await FileHelper.save_upload(file)
FileHelper.convert_file_to_pdf(
f'./tmp/{path_id}/upload.{ext}', f'./tmp/{path_id}/exercises.pdf'

View File

@@ -57,7 +57,7 @@ class Heygen(IVideoGeneratorService):
)
async def pool_status(self, video_id: str) -> Task:
async def poll_status(self, video_id: str) -> Task:
response = await self._http_client.get(self._GET_VIDEO_URL, headers=self._get_header, params={
'video_id': video_id
})
@@ -65,7 +65,6 @@ class Heygen(IVideoGeneratorService):
status = response_data["data"]["status"]
error = response_data["data"]["error"]
if status != "completed" and error is None:
self._logger.info(f"Status: {status}")
return Task(

View File

@@ -73,7 +73,7 @@ class OpenAI(ILLMService):
return await self._prediction(
model, messages, token_count, fields_to_check, temperature, (try_count + 1), check_blacklisted
)
print(result)
return json.loads(result)
async def prediction_override(self, **kwargs):