diff --git a/.dockerignore b/.dockerignore index 37ee14f..fb69dfa 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,8 +1,8 @@ -Dockerfile -README.md -*.pyc -*.pyo -*.pyd -__pycache__ -.pytest_cache -postman +Dockerfile +README.md +*.pyc +*.pyo +*.pyd +__pycache__ +.pytest_cache +postman diff --git a/.env b/.env index 8c214ae..e3ff363 100644 --- a/.env +++ b/.env @@ -1,8 +1,30 @@ -ENV=local -OPENAI_API_KEY=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 -JWT_SECRET_KEY=6e9c124ba92e8814719dcb0f21200c8aa4d0f119a994ac5e06eb90a366c83ab2 -JWT_TEST_TOKEN=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiJ0ZXN0In0.Emrs2D3BmMP4b3zMjw0fJTPeyMwWEBDbxx2vvaWguO0 -GOOGLE_APPLICATION_CREDENTIALS=firebase-configs/encoach-staging.json -HEY_GEN_TOKEN=MjY4MDE0MjdjZmNhNDFmYTlhZGRkNmI3MGFlMzYwZDItMTY5NTExNzY3MA== - -GPT_ZERO_API_KEY=0195b9bb24c5439899f71230809c74af +OPENAI_API_KEY=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 +JWT_SECRET_KEY=6e9c124ba92e8814719dcb0f21200c8aa4d0f119a994ac5e06eb90a366c83ab2 +JWT_TEST_TOKEN=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiJ0ZXN0In0.Emrs2D3BmMP4b3zMjw0fJTPeyMwWEBDbxx2vvaWguO0 +HEY_GEN_TOKEN=MjY4MDE0MjdjZmNhNDFmYTlhZGRkNmI3MGFlMzYwZDItMTY5NTExNzY3MA== +GPT_ZERO_API_KEY=0195b9bb24c5439899f71230809c74af +MONGODB_URI=mongodb+srv://user:JKpFBymv0WLv3STj@encoach.lz18a.mongodb.net/?retryWrites=true&w=majority&appName=EnCoach +GOOGLE_APPLICATION_CREDENTIALS=firebase-configs/encoach-staging.json + +# Staging +ENV=staging + +# +#FIREBASE_SCRYPT_B64_SIGNER_KEY="qjo/b5U5oNxA8o+PHFMZx/ZfG8ZQ7688zYmwMOcfZvVjOM6aHe4Jf270xgyrVArqLIQwFi7VkFnbysBjueMbVw==" +#FIREBASE_SCRYPT_B64_SALT_SEPARATOR="Bw==" +#FIREBASE_SCRYPT_ROUNDS=8 +#FIREBASE_SCRYPT_MEM_COST=14 +#FIREBASE_PROJECT_ID=encoach-staging +#MONGODB_DB=staging + +# Prod +#ENV=production + +#GOOGLE_APPLICATION_CREDENTIALS=firebase-configs/storied-phalanx-349916.json +#FIREBASE_SCRYPT_B64_SIGNER_KEY="vbO3Xii2lajSeSkCstq3s/dCwpXP7J2YN9rP/KRreU2vGOT1fg+wzSuy1kIhBECqJHG82tmwAilSxLFFtNKVMA==" +#FIREBASE_SCRYPT_B64_SALT_SEPARATOR="Bw==" +#FIREBASE_SCRYPT_ROUNDS=8 +#FIREBASE_SCRYPT_MEM_COST=14 +#FIREBASE_PROJECT_ID=storied-phalanx-349916 +MONGODB_DB=staging + diff --git a/.gitignore b/.gitignore index aecdd8d..254769a 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,6 @@ -__pycache__ -.idea -.env -.DS_Store -.venv -scripts +__pycache__ +.idea +.env +.DS_Store +.venv +_scripts diff --git a/.idea/.gitignore b/.idea/.gitignore index 13566b8..1c2fda5 100644 --- a/.idea/.gitignore +++ b/.idea/.gitignore @@ -1,8 +1,8 @@ -# Default ignored files -/shelf/ -/workspace.xml -# Editor-based HTTP Client requests -/httpRequests/ -# Datasource local storage ignored files -/dataSources/ -/dataSources.local.xml +# Default ignored files +/shelf/ +/workspace.xml +# Editor-based HTTP Client requests +/httpRequests/ +# Datasource local storage ignored files +/dataSources/ +/dataSources.local.xml diff --git a/.idea/ielts-be.iml b/.idea/ielts-be.iml index a9631c9..550fe89 100644 --- a/.idea/ielts-be.iml +++ b/.idea/ielts-be.iml @@ -1,25 +1,26 @@ - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/misc.xml b/.idea/misc.xml index 6601cfb..7d41fa7 100644 --- a/.idea/misc.xml +++ b/.idea/misc.xml @@ -1,10 +1,10 @@ - - - - - - - + + + + + + + \ No newline at end of file diff --git a/.idea/modules.xml b/.idea/modules.xml index 31940e2..e63d696 100644 --- a/.idea/modules.xml +++ b/.idea/modules.xml @@ -1,8 +1,8 @@ - - - - - - - + + + + + + + \ No newline at end of file diff --git a/.idea/vcs.xml b/.idea/vcs.xml index 94a25f7..9661ac7 100644 --- a/.idea/vcs.xml +++ b/.idea/vcs.xml @@ -1,6 +1,6 @@ - - - - - + + + + + \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index 64e8726..cb60b4f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,41 +1,41 @@ -FROM python:3.11-slim as requirements-stage -WORKDIR /tmp -RUN pip install poetry -COPY pyproject.toml ./poetry.lock* /tmp/ -RUN poetry export -f requirements.txt --output requirements.txt --without-hashes - - -FROM python:3.11-slim - -# Allow statements and log messages to immediately appear in the logs -ENV PYTHONUNBUFFERED True - -# Copy local code to the container image. -ENV APP_HOME /app -WORKDIR $APP_HOME - -COPY . ./ - -COPY --from=requirements-stage /tmp/requirements.txt /app/requirements.txt - -RUN apt update && apt install -y \ - ffmpeg \ - poppler-utils \ - texlive-latex-base \ - texlive-fonts-recommended \ - texlive-latex-extra \ - texlive-xetex \ - pandoc \ - librsvg2-bin \ - && rm -rf /var/lib/apt/lists/* - -RUN pip install --no-cache-dir -r /app/requirements.txt - -EXPOSE 8000 - -# Run the web service on container startup. Here we use the gunicorn -# webserver, with one worker process and 8 threads. -# For environments with multiple CPU cores, increase the number of workers -# to be equal to the cores available. -# Timeout is set to 0 to disable the timeouts of the workers to allow Cloud Run to handle instance scaling. -CMD exec uvicorn --bind 0.0.0.0:8000 --workers 1 --threads 8 --timeout 0 app.server:app +FROM python:3.11-slim as requirements-stage +WORKDIR /tmp +RUN pip install poetry +COPY pyproject.toml ./poetry.lock* /tmp/ +RUN poetry export -f requirements.txt --output requirements.txt --without-hashes + + +FROM python:3.11-slim + +# Allow statements and log messages to immediately appear in the logs +ENV PYTHONUNBUFFERED True + +# Copy local code to the container image. +ENV APP_HOME /app +WORKDIR $APP_HOME + +COPY . ./ + +COPY --from=requirements-stage /tmp/requirements.txt /app/requirements.txt + +RUN apt update && apt install -y \ + ffmpeg \ + poppler-utils \ + texlive-latex-base \ + texlive-fonts-recommended \ + texlive-latex-extra \ + texlive-xetex \ + pandoc \ + librsvg2-bin \ + && rm -rf /var/lib/apt/lists/* + +RUN pip install --no-cache-dir -r /app/requirements.txt + +EXPOSE 8000 + +# Run the web service on container startup. Here we use the gunicorn +# webserver, with one worker process and 8 threads. +# For environments with multiple CPU cores, increase the number of workers +# to be equal to the cores available. +# Timeout is set to 0 to disable the timeouts of the workers to allow Cloud Run to handle instance scaling. +CMD exec uvicorn --bind 0.0.0.0:8000 --workers 1 --threads 8 --timeout 0 app.server:app diff --git a/README.md b/README.md index e22b313..027cd08 100644 --- a/README.md +++ b/README.md @@ -1,52 +1,49 @@ -Latest refactor from develop's branch commit 5d5cd21 2024-08-28 - - -# Endpoints - -In ielts-ui I've added a wrapper to every backend request in '/src/utils/translate.backend.endpoints.ts' to use the -new endpoints if the "BACKEND_TYPE" environment variable is set to "async", if the env variable is not present or -with another value, the wrapper will return the old endpoint. - -| Method | ielts-be | This one | -|--------|--------------------------------------|---------------------------------------------| -| GET | /healthcheck | /api/healthcheck | -| GET | /listening_section_1 | /api/listening/section/1 | -| GET | /listening_section_2 | /api/listening/section/2 | -| GET | /listening_section_3 | /api/listening/section/3 | -| GET | /listening_section_4 | /api/listening/section/4 | -| POST | /listening | /api/listening | -| POST | /writing_task1 | /api/grade/writing/1 | -| POST | /writing_task2 | /api/grade/writing/2 | -| GET | /writing_task1_general | /api/writing/1 | -| GET | /writing_task2_general | /api/writing/2 | -| POST | /speaking_task_1 | /api/grade/speaking/1 | -| POST | /speaking_task_2 | /api/grade/speaking/2 | -| POST | /speaking_task_3 | /api/grade/speaking/3 | -| GET | /speaking_task_1 | /api/speaking/1 | -| GET | /speaking_task_2 | /api/speaking/2 | -| GET | /speaking_task_3 | /api/speaking/3 | -| POST | /speaking | /api/speaking | -| POST | /speaking/generate_speaking_video | /api/speaking/generate_speaking_video | -| POST | /speaking/generate_interactive_video | /api/speaking/generate_interactive_video | -| GET | /reading_passage_1 | /api/reading/passage/1 | -| GET | /reading_passage_2 | /api/reading/passage/2 | -| GET | /reading_passage_3 | /api/reading/passage/3 | -| GET | /level | /api/level | -| GET | /level_utas | /api/level/utas | -| POST | /fetch_tips | /api/training/tips | -| POST | /grading_summary | /api/grade/summary | -| POST | /grade_short_answers | /api/grade/short_answers | -| POST | /upload_level | /api/level/upload | -| POST | /training_content | /api/training/ | -| POST | /custom_level | /api/level/custom | - -# Run the app - -This is for Windows, creating venv and activating it may differ based on your OS - -1. python -m venv env -2. env\Scripts\activate -3. pip install poetry -4. poetry install -5. python app.py - +Latest refactor from develop's branch commit 5d5cd21 2024-08-28 + + +# Endpoints + + +| Method | ielts-be | This one | +|--------|--------------------------------------|---------------------------------------------| +| GET | /healthcheck | /api/healthcheck | +| GET | /listening_section_1 | /api/listening/section/1 | +| GET | /listening_section_2 | /api/listening/section/2 | +| GET | /listening_section_3 | /api/listening/section/3 | +| GET | /listening_section_4 | /api/listening/section/4 | +| POST | /listening | /api/listening | +| POST | /writing_task1 | /api/grade/writing/1 | +| POST | /writing_task2 | /api/grade/writing/2 | +| GET | /writing_task1_general | /api/writing/1 | +| GET | /writing_task2_general | /api/writing/2 | +| POST | /speaking_task_1 | /api/grade/speaking/1 | +| POST | /speaking_task_2 | /api/grade/speaking/2 | +| POST | /speaking_task_3 | /api/grade/speaking/3 | +| GET | /speaking_task_1 | /api/speaking/1 | +| GET | /speaking_task_2 | /api/speaking/2 | +| GET | /speaking_task_3 | /api/speaking/3 | +| POST | /speaking | /api/speaking | +| POST | /speaking/generate_speaking_video | /api/speaking/generate_speaking_video | +| POST | /speaking/generate_interactive_video | /api/speaking/generate_interactive_video | +| GET | /reading_passage_1 | /api/reading/passage/1 | +| GET | /reading_passage_2 | /api/reading/passage/2 | +| GET | /reading_passage_3 | /api/reading/passage/3 | +| GET | /level | /api/level | +| GET | /level_utas | /api/level/utas | +| POST | /fetch_tips | /api/training/tips | +| POST | /grading_summary | /api/grade/summary | +| POST | /grade_short_answers | /api/grade/short_answers | +| POST | /upload_level | /api/level/upload | +| POST | /training_content | /api/training/ | +| POST | /custom_level | /api/level/custom | + +# Run the app + +This is for Windows, creating venv and activating it may differ based on your OS + +1. python -m venv env +2. env\Scripts\activate +3. pip install poetry +4. poetry install +5. python app.py + diff --git a/app.py b/app.py index 80feec4..ec5f581 100644 --- a/app.py +++ b/app.py @@ -1,30 +1,25 @@ -import os - -import click -import uvicorn -from dotenv import load_dotenv - - -@click.command() -@click.option( - "--env", - type=click.Choice(["local", "dev", "prod"], case_sensitive=False), - default="local", -) -def main(env: str): - load_dotenv() - os.environ["ENV"] = env - if env == "prod": - raise Exception("Production environment not supported yet!") - - uvicorn.run( - app="app.server:app", - host="localhost", - port=8000, - reload=True if env != "prod" else False, - workers=1, - ) - - -if __name__ == "__main__": - main() +import click +import uvicorn +from dotenv import load_dotenv + +load_dotenv() + + +@click.command() +@click.option( + "--env", + type=click.Choice(["local", "staging", "production"], case_sensitive=False), + default="staging", +) +def main(env: str): + uvicorn.run( + app="app.server:app", + host="localhost", + port=8000, + reload=True if env != "production" else False, + workers=1, + ) + + +if __name__ == "__main__": + main() diff --git a/app/api/__init__.py b/app/api/__init__.py index 9d05fff..622f981 100644 --- a/app/api/__init__.py +++ b/app/api/__init__.py @@ -1,18 +1,20 @@ -from fastapi import APIRouter - -from .home import home_router -from .listening import listening_router -from .reading import reading_router -from .speaking import speaking_router -from .training import training_router -from .writing import writing_router -from .grade import grade_router - -router = APIRouter() -router.include_router(home_router, prefix="/api", tags=["Home"]) -router.include_router(listening_router, prefix="/api/listening", tags=["Listening"]) -router.include_router(reading_router, prefix="/api/reading", tags=["Reading"]) -router.include_router(speaking_router, prefix="/api/speaking", tags=["Speaking"]) -router.include_router(writing_router, prefix="/api/writing", tags=["Writing"]) -router.include_router(grade_router, prefix="/api/grade", tags=["Grade"]) -router.include_router(training_router, prefix="/api/training", tags=["Training"]) +from fastapi import APIRouter + +from .home import home_router +from .listening import listening_router +from .reading import reading_router +from .speaking import speaking_router +from .training import training_router +from .writing import writing_router +from .grade import grade_router +from .user import user_router + +router = APIRouter() +router.include_router(home_router, prefix="/api", tags=["Home"]) +router.include_router(listening_router, prefix="/api/listening", tags=["Listening"]) +router.include_router(reading_router, prefix="/api/reading", tags=["Reading"]) +router.include_router(speaking_router, prefix="/api/speaking", tags=["Speaking"]) +router.include_router(writing_router, prefix="/api/writing", tags=["Writing"]) +router.include_router(grade_router, prefix="/api/grade", tags=["Grade"]) +router.include_router(training_router, prefix="/api/training", tags=["Training"]) +router.include_router(user_router, prefix="/api/user", tags=["Users"]) diff --git a/app/api/grade.py b/app/api/grade.py index 7f054e7..84a1a0b 100644 --- a/app/api/grade.py +++ b/app/api/grade.py @@ -1,74 +1,74 @@ -from dependency_injector.wiring import inject, Provide -from fastapi import APIRouter, Depends, Path, Request - -from app.controllers.abc import IGradeController -from app.dtos.writing import WritingGradeTaskDTO -from app.dtos.speaking import GradeSpeakingAnswersDTO, GradeSpeakingDTO -from app.middlewares import Authorized, IsAuthenticatedViaBearerToken - -controller = "grade_controller" -grade_router = APIRouter() - - -@grade_router.post( - '/writing/{task}', - dependencies=[Depends(Authorized([IsAuthenticatedViaBearerToken]))] -) -@inject -async def grade_writing_task( - data: WritingGradeTaskDTO, - task: int = Path(..., ge=1, le=2), - grade_controller: IGradeController = Depends(Provide[controller]) -): - return await grade_controller.grade_writing_task(task, data) - - -@grade_router.post( - '/speaking/2', - dependencies=[Depends(Authorized([IsAuthenticatedViaBearerToken]))] -) -@inject -async def grade_speaking_task_2( - data: GradeSpeakingDTO, - grade_controller: IGradeController = Depends(Provide[controller]) -): - return await grade_controller.grade_speaking_task(2, [data.dict()]) - - -@grade_router.post( - '/speaking/{task}', - dependencies=[Depends(Authorized([IsAuthenticatedViaBearerToken]))] -) -@inject -async def grade_speaking_task_1_and_3( - data: GradeSpeakingAnswersDTO, - task: int = Path(..., ge=1, le=3), - grade_controller: IGradeController = Depends(Provide[controller]) -): - return await grade_controller.grade_speaking_task(task, data.answers) - - -@grade_router.post( - '/summary', - dependencies=[Depends(Authorized([IsAuthenticatedViaBearerToken]))] -) -@inject -async def grading_summary( - request: Request, - grade_controller: IGradeController = Depends(Provide[controller]) -): - data = await request.json() - return await grade_controller.grading_summary(data) - - -@grade_router.post( - '/short_answers', - dependencies=[Depends(Authorized([IsAuthenticatedViaBearerToken]))] -) -@inject -async def grade_short_answers( - request: Request, - grade_controller: IGradeController = Depends(Provide[controller]) -): - data = await request.json() - return await grade_controller.grade_short_answers(data) +from dependency_injector.wiring import inject, Provide +from fastapi import APIRouter, Depends, Path, Request + +from app.controllers.abc import IGradeController +from app.dtos.writing import WritingGradeTaskDTO +from app.dtos.speaking import GradeSpeakingAnswersDTO, GradeSpeakingDTO +from app.middlewares import Authorized, IsAuthenticatedViaBearerToken + +controller = "grade_controller" +grade_router = APIRouter() + + +@grade_router.post( + '/writing/{task}', + dependencies=[Depends(Authorized([IsAuthenticatedViaBearerToken]))] +) +@inject +async def grade_writing_task( + data: WritingGradeTaskDTO, + task: int = Path(..., ge=1, le=2), + grade_controller: IGradeController = Depends(Provide[controller]) +): + return await grade_controller.grade_writing_task(task, data) + + +@grade_router.post( + '/speaking/2', + dependencies=[Depends(Authorized([IsAuthenticatedViaBearerToken]))] +) +@inject +async def grade_speaking_task_2( + data: GradeSpeakingDTO, + grade_controller: IGradeController = Depends(Provide[controller]) +): + return await grade_controller.grade_speaking_task(2, [data.dict()]) + + +@grade_router.post( + '/speaking/{task}', + dependencies=[Depends(Authorized([IsAuthenticatedViaBearerToken]))] +) +@inject +async def grade_speaking_task_1_and_3( + data: GradeSpeakingAnswersDTO, + task: int = Path(..., ge=1, le=3), + grade_controller: IGradeController = Depends(Provide[controller]) +): + return await grade_controller.grade_speaking_task(task, data.answers) + + +@grade_router.post( + '/summary', + dependencies=[Depends(Authorized([IsAuthenticatedViaBearerToken]))] +) +@inject +async def grading_summary( + request: Request, + grade_controller: IGradeController = Depends(Provide[controller]) +): + data = await request.json() + return await grade_controller.grading_summary(data) + + +@grade_router.post( + '/short_answers', + dependencies=[Depends(Authorized([IsAuthenticatedViaBearerToken]))] +) +@inject +async def grade_short_answers( + request: Request, + grade_controller: IGradeController = Depends(Provide[controller]) +): + data = await request.json() + return await grade_controller.grade_short_answers(data) diff --git a/app/api/home.py b/app/api/home.py index 4c23c75..84e0209 100644 --- a/app/api/home.py +++ b/app/api/home.py @@ -1,9 +1,9 @@ -from fastapi import APIRouter -home_router = APIRouter() - - -@home_router.get( - '/healthcheck' -) -async def healthcheck(): - return {"healthy": True} +from fastapi import APIRouter +home_router = APIRouter() + + +@home_router.get( + '/healthcheck' +) +async def healthcheck(): + return {"healthy": True} diff --git a/app/api/level.py b/app/api/level.py index 0c9a791..e550eb5 100644 --- a/app/api/level.py +++ b/app/api/level.py @@ -1,55 +1,55 @@ -from dependency_injector.wiring import Provide, inject -from fastapi import APIRouter, Depends, UploadFile, Request - -from app.middlewares import Authorized, IsAuthenticatedViaBearerToken -from app.controllers.abc import ILevelController - -controller = "level_controller" -level_router = APIRouter() - - -@level_router.get( - '/', - dependencies=[Depends(Authorized([IsAuthenticatedViaBearerToken]))] -) -@inject -async def get_level_exam( - level_controller: ILevelController = Depends(Provide[controller]) -): - return await level_controller.get_level_exam() - - -@level_router.get( - '/utas', - dependencies=[Depends(Authorized([IsAuthenticatedViaBearerToken]))] -) -@inject -async def get_level_utas( - level_controller: ILevelController = Depends(Provide[controller]) -): - return await level_controller.get_level_utas() - - -@level_router.post( - '/upload', - dependencies=[Depends(Authorized([IsAuthenticatedViaBearerToken]))] -) -@inject -async def upload( - file: UploadFile, - level_controller: ILevelController = Depends(Provide[controller]) -): - return await level_controller.upload_level(file) - - -@level_router.post( - '/custom', - dependencies=[Depends(Authorized([IsAuthenticatedViaBearerToken]))] -) -@inject -async def custom_level( - request: Request, - level_controller: ILevelController = Depends(Provide[controller]) -): - data = await request.json() - return await level_controller.get_custom_level(data) +from dependency_injector.wiring import Provide, inject +from fastapi import APIRouter, Depends, UploadFile, Request + +from app.middlewares import Authorized, IsAuthenticatedViaBearerToken +from app.controllers.abc import ILevelController + +controller = "level_controller" +level_router = APIRouter() + + +@level_router.get( + '/', + dependencies=[Depends(Authorized([IsAuthenticatedViaBearerToken]))] +) +@inject +async def get_level_exam( + level_controller: ILevelController = Depends(Provide[controller]) +): + return await level_controller.get_level_exam() + + +@level_router.get( + '/utas', + dependencies=[Depends(Authorized([IsAuthenticatedViaBearerToken]))] +) +@inject +async def get_level_utas( + level_controller: ILevelController = Depends(Provide[controller]) +): + return await level_controller.get_level_utas() + + +@level_router.post( + '/upload', + dependencies=[Depends(Authorized([IsAuthenticatedViaBearerToken]))] +) +@inject +async def upload( + file: UploadFile, + level_controller: ILevelController = Depends(Provide[controller]) +): + return await level_controller.upload_level(file) + + +@level_router.post( + '/custom', + dependencies=[Depends(Authorized([IsAuthenticatedViaBearerToken]))] +) +@inject +async def custom_level( + request: Request, + level_controller: ILevelController = Depends(Provide[controller]) +): + data = await request.json() + return await level_controller.get_custom_level(data) diff --git a/app/api/listening.py b/app/api/listening.py index 3fd15d4..b29c34d 100644 --- a/app/api/listening.py +++ b/app/api/listening.py @@ -1,40 +1,40 @@ -import random - -from dependency_injector.wiring import Provide, inject -from fastapi import APIRouter, Depends, Path - -from app.middlewares import Authorized, IsAuthenticatedViaBearerToken -from app.controllers.abc import IListeningController -from app.configs.constants import EducationalContent -from app.dtos.listening import SaveListeningDTO - - -controller = "listening_controller" -listening_router = APIRouter() - - -@listening_router.get( - '/section/{section}', - dependencies=[Depends(Authorized([IsAuthenticatedViaBearerToken]))] -) -@inject -async def get_listening_question( - exercises: list[str], - section: int = Path(..., ge=1, le=4), - topic: str | None = None, - difficulty: str = random.choice(EducationalContent.DIFFICULTIES), - listening_controller: IListeningController = Depends(Provide[controller]) -): - return await listening_controller.get_listening_question(section, topic, exercises, difficulty) - - -@listening_router.post( - '/', - dependencies=[Depends(Authorized([IsAuthenticatedViaBearerToken]))] -) -@inject -async def save_listening( - data: SaveListeningDTO, - listening_controller: IListeningController = Depends(Provide[controller]) -): - return await listening_controller.save_listening(data) +import random + +from dependency_injector.wiring import Provide, inject +from fastapi import APIRouter, Depends, Path + +from app.middlewares import Authorized, IsAuthenticatedViaBearerToken +from app.controllers.abc import IListeningController +from app.configs.constants import EducationalContent +from app.dtos.listening import SaveListeningDTO + + +controller = "listening_controller" +listening_router = APIRouter() + + +@listening_router.get( + '/section/{section}', + dependencies=[Depends(Authorized([IsAuthenticatedViaBearerToken]))] +) +@inject +async def get_listening_question( + exercises: list[str], + section: int = Path(..., ge=1, le=4), + topic: str | None = None, + difficulty: str = random.choice(EducationalContent.DIFFICULTIES), + listening_controller: IListeningController = Depends(Provide[controller]) +): + return await listening_controller.get_listening_question(section, topic, exercises, difficulty) + + +@listening_router.post( + '/', + dependencies=[Depends(Authorized([IsAuthenticatedViaBearerToken]))] +) +@inject +async def save_listening( + data: SaveListeningDTO, + listening_controller: IListeningController = Depends(Provide[controller]) +): + return await listening_controller.save_listening(data) diff --git a/app/api/reading.py b/app/api/reading.py index 6089cd7..ba5090e 100644 --- a/app/api/reading.py +++ b/app/api/reading.py @@ -1,28 +1,28 @@ -import random - -from dependency_injector.wiring import Provide, inject -from fastapi import APIRouter, Depends, Path, Query - -from app.middlewares import Authorized, IsAuthenticatedViaBearerToken -from app.configs.constants import EducationalContent -from app.controllers.abc import IReadingController - -controller = "reading_controller" -reading_router = APIRouter() - - -@reading_router.get( - '/passage/{passage}', - dependencies=[Depends(Authorized([IsAuthenticatedViaBearerToken]))] -) -@inject -async def get_reading_passage( - passage: int = Path(..., ge=1, le=3), - topic: str = Query(default=random.choice(EducationalContent.TOPICS)), - exercises: list[str] = Query(default=[]), - difficulty: str = Query(default=random.choice(EducationalContent.DIFFICULTIES)), - reading_controller: IReadingController = Depends(Provide[controller]) -): - return await reading_controller.get_reading_passage(passage, topic, exercises, difficulty) - - +import random + +from dependency_injector.wiring import Provide, inject +from fastapi import APIRouter, Depends, Path, Query + +from app.middlewares import Authorized, IsAuthenticatedViaBearerToken +from app.configs.constants import EducationalContent +from app.controllers.abc import IReadingController + +controller = "reading_controller" +reading_router = APIRouter() + + +@reading_router.get( + '/passage/{passage}', + dependencies=[Depends(Authorized([IsAuthenticatedViaBearerToken]))] +) +@inject +async def get_reading_passage( + passage: int = Path(..., ge=1, le=3), + topic: str = Query(default=random.choice(EducationalContent.TOPICS)), + exercises: list[str] = Query(default=[]), + difficulty: str = Query(default=random.choice(EducationalContent.DIFFICULTIES)), + reading_controller: IReadingController = Depends(Provide[controller]) +): + return await reading_controller.get_reading_passage(passage, topic, exercises, difficulty) + + diff --git a/app/api/speaking.py b/app/api/speaking.py index ee32422..41a11bd 100644 --- a/app/api/speaking.py +++ b/app/api/speaking.py @@ -1,97 +1,97 @@ -import random - -from dependency_injector.wiring import inject, Provide -from fastapi import APIRouter, Path, Query, Depends, BackgroundTasks - -from app.middlewares import Authorized, IsAuthenticatedViaBearerToken -from app.configs.constants import EducationalContent -from app.controllers.abc import ISpeakingController -from app.dtos.speaking import ( - SaveSpeakingDTO, GenerateVideo1DTO, GenerateVideo2DTO, GenerateVideo3DTO -) - -controller = "speaking_controller" -speaking_router = APIRouter() - - -@speaking_router.get( - '/1', - dependencies=[Depends(Authorized([IsAuthenticatedViaBearerToken]))] -) -@inject -async def get_speaking_task( - first_topic: str = Query(default=random.choice(EducationalContent.MTI_TOPICS)), - second_topic: str = Query(default=random.choice(EducationalContent.MTI_TOPICS)), - difficulty: str = Query(default=random.choice(EducationalContent.DIFFICULTIES)), - speaking_controller: ISpeakingController = Depends(Provide[controller]) -): - return await speaking_controller.get_speaking_part(1, first_topic, difficulty, second_topic) - - -@speaking_router.get( - '/{task}', - dependencies=[Depends(Authorized([IsAuthenticatedViaBearerToken]))] -) -@inject -async def get_speaking_task( - task: int = Path(..., ge=2, le=3), - topic: str = Query(default=random.choice(EducationalContent.MTI_TOPICS)), - difficulty: str = Query(default=random.choice(EducationalContent.DIFFICULTIES)), - speaking_controller: ISpeakingController = Depends(Provide[controller]) -): - return await speaking_controller.get_speaking_part(task, topic, difficulty) - - -@speaking_router.post( - '/', - dependencies=[Depends(Authorized([IsAuthenticatedViaBearerToken]))] -) -@inject -async def save_speaking( - data: SaveSpeakingDTO, - background_tasks: BackgroundTasks, - speaking_controller: ISpeakingController = Depends(Provide[controller]) -): - return await speaking_controller.save_speaking(data, background_tasks) - - -@speaking_router.post( - '/generate_video/1', - dependencies=[Depends(Authorized([IsAuthenticatedViaBearerToken]))] -) -@inject -async def generate_video_1( - data: GenerateVideo1DTO, - speaking_controller: ISpeakingController = Depends(Provide[controller]) -): - return await speaking_controller.generate_video( - 1, data.avatar, data.first_topic, data.questions, second_topic=data.second_topic - ) - - -@speaking_router.post( - '/generate_video/2', - dependencies=[Depends(Authorized([IsAuthenticatedViaBearerToken]))] -) -@inject -async def generate_video_2( - data: GenerateVideo2DTO, - speaking_controller: ISpeakingController = Depends(Provide[controller]) -): - return await speaking_controller.generate_video( - 2, data.avatar, data.topic, [data.question], prompts=data.prompts, suffix=data.suffix - ) - - -@speaking_router.post( - '/generate_video/3', - dependencies=[Depends(Authorized([IsAuthenticatedViaBearerToken]))] -) -@inject -async def generate_video_3( - data: GenerateVideo3DTO, - speaking_controller: ISpeakingController = Depends(Provide[controller]) -): - return await speaking_controller.generate_video( - 3, data.avatar, data.topic, data.questions - ) +import random + +from dependency_injector.wiring import inject, Provide +from fastapi import APIRouter, Path, Query, Depends, BackgroundTasks + +from app.middlewares import Authorized, IsAuthenticatedViaBearerToken +from app.configs.constants import EducationalContent +from app.controllers.abc import ISpeakingController +from app.dtos.speaking import ( + SaveSpeakingDTO, GenerateVideo1DTO, GenerateVideo2DTO, GenerateVideo3DTO +) + +controller = "speaking_controller" +speaking_router = APIRouter() + + +@speaking_router.get( + '/1', + dependencies=[Depends(Authorized([IsAuthenticatedViaBearerToken]))] +) +@inject +async def get_speaking_task( + first_topic: str = Query(default=random.choice(EducationalContent.MTI_TOPICS)), + second_topic: str = Query(default=random.choice(EducationalContent.MTI_TOPICS)), + difficulty: str = Query(default=random.choice(EducationalContent.DIFFICULTIES)), + speaking_controller: ISpeakingController = Depends(Provide[controller]) +): + return await speaking_controller.get_speaking_part(1, first_topic, difficulty, second_topic) + + +@speaking_router.get( + '/{task}', + dependencies=[Depends(Authorized([IsAuthenticatedViaBearerToken]))] +) +@inject +async def get_speaking_task( + task: int = Path(..., ge=2, le=3), + topic: str = Query(default=random.choice(EducationalContent.MTI_TOPICS)), + difficulty: str = Query(default=random.choice(EducationalContent.DIFFICULTIES)), + speaking_controller: ISpeakingController = Depends(Provide[controller]) +): + return await speaking_controller.get_speaking_part(task, topic, difficulty) + + +@speaking_router.post( + '/', + dependencies=[Depends(Authorized([IsAuthenticatedViaBearerToken]))] +) +@inject +async def save_speaking( + data: SaveSpeakingDTO, + background_tasks: BackgroundTasks, + speaking_controller: ISpeakingController = Depends(Provide[controller]) +): + return await speaking_controller.save_speaking(data, background_tasks) + + +@speaking_router.post( + '/generate_video/1', + dependencies=[Depends(Authorized([IsAuthenticatedViaBearerToken]))] +) +@inject +async def generate_video_1( + data: GenerateVideo1DTO, + speaking_controller: ISpeakingController = Depends(Provide[controller]) +): + return await speaking_controller.generate_video( + 1, data.avatar, data.first_topic, data.questions, second_topic=data.second_topic + ) + + +@speaking_router.post( + '/generate_video/2', + dependencies=[Depends(Authorized([IsAuthenticatedViaBearerToken]))] +) +@inject +async def generate_video_2( + data: GenerateVideo2DTO, + speaking_controller: ISpeakingController = Depends(Provide[controller]) +): + return await speaking_controller.generate_video( + 2, data.avatar, data.topic, [data.question], prompts=data.prompts, suffix=data.suffix + ) + + +@speaking_router.post( + '/generate_video/3', + dependencies=[Depends(Authorized([IsAuthenticatedViaBearerToken]))] +) +@inject +async def generate_video_3( + data: GenerateVideo3DTO, + speaking_controller: ISpeakingController = Depends(Provide[controller]) +): + return await speaking_controller.generate_video( + 3, data.avatar, data.topic, data.questions + ) diff --git a/app/api/training.py b/app/api/training.py index 8c4e44e..739876e 100644 --- a/app/api/training.py +++ b/app/api/training.py @@ -1,34 +1,34 @@ -from dependency_injector.wiring import Provide, inject -from fastapi import APIRouter, Depends, Request - -from app.dtos.training import FetchTipsDTO -from app.middlewares import Authorized, IsAuthenticatedViaBearerToken -from app.controllers.abc import ITrainingController - -controller = "training_controller" -training_router = APIRouter() - - -@training_router.post( - '/tips', - dependencies=[Depends(Authorized([IsAuthenticatedViaBearerToken]))] -) -@inject -async def get_reading_passage( - data: FetchTipsDTO, - training_controller: ITrainingController = Depends(Provide[controller]) -): - return await training_controller.fetch_tips(data) - - -@training_router.post( - '/', - dependencies=[Depends(Authorized([IsAuthenticatedViaBearerToken]))] -) -@inject -async def training_content( - request: Request, - training_controller: ITrainingController = Depends(Provide[controller]) -): - data = await request.json() - return await training_controller.get_training_content(data) +from dependency_injector.wiring import Provide, inject +from fastapi import APIRouter, Depends, Request + +from app.dtos.training import FetchTipsDTO +from app.middlewares import Authorized, IsAuthenticatedViaBearerToken +from app.controllers.abc import ITrainingController + +controller = "training_controller" +training_router = APIRouter() + + +@training_router.post( + '/tips', + dependencies=[Depends(Authorized([IsAuthenticatedViaBearerToken]))] +) +@inject +async def get_reading_passage( + data: FetchTipsDTO, + training_controller: ITrainingController = Depends(Provide[controller]) +): + return await training_controller.fetch_tips(data) + + +@training_router.post( + '/', + dependencies=[Depends(Authorized([IsAuthenticatedViaBearerToken]))] +) +@inject +async def training_content( + request: Request, + training_controller: ITrainingController = Depends(Provide[controller]) +): + data = await request.json() + return await training_controller.get_training_content(data) diff --git a/app/api/user.py b/app/api/user.py new file mode 100644 index 0000000..2275680 --- /dev/null +++ b/app/api/user.py @@ -0,0 +1,21 @@ +from dependency_injector.wiring import Provide, inject +from fastapi import APIRouter, Depends + +from app.dtos.user_batch import BatchUsersDTO +from app.middlewares import Authorized, IsAuthenticatedViaBearerToken +from app.controllers.abc import IUserController + +controller = "user_controller" +user_router = APIRouter() + + +@user_router.post( + '/import', + dependencies=[Depends(Authorized([IsAuthenticatedViaBearerToken]))] +) +@inject +async def batch_import( + batch: BatchUsersDTO, + user_controller: IUserController = Depends(Provide[controller]) +): + return await user_controller.batch_import(batch) diff --git a/app/api/writing.py b/app/api/writing.py index 010208b..3bd3b8e 100644 --- a/app/api/writing.py +++ b/app/api/writing.py @@ -1,25 +1,25 @@ -import random - -from dependency_injector.wiring import inject, Provide -from fastapi import APIRouter, Path, Query, Depends - -from app.middlewares import Authorized, IsAuthenticatedViaBearerToken -from app.configs.constants import EducationalContent -from app.controllers.abc import IWritingController - -controller = "writing_controller" -writing_router = APIRouter() - - -@writing_router.get( - '/{task}', - dependencies=[Depends(Authorized([IsAuthenticatedViaBearerToken]))] -) -@inject -async def get_writing_task_general_question( - task: int = Path(..., ge=1, le=2), - topic: str = Query(default=random.choice(EducationalContent.MTI_TOPICS)), - difficulty: str = Query(default=random.choice(EducationalContent.DIFFICULTIES)), - writing_controller: IWritingController = Depends(Provide[controller]) -): - return await writing_controller.get_writing_task_general_question(task, topic, difficulty) +import random + +from dependency_injector.wiring import inject, Provide +from fastapi import APIRouter, Path, Query, Depends + +from app.middlewares import Authorized, IsAuthenticatedViaBearerToken +from app.configs.constants import EducationalContent +from app.controllers.abc import IWritingController + +controller = "writing_controller" +writing_router = APIRouter() + + +@writing_router.get( + '/{task}', + dependencies=[Depends(Authorized([IsAuthenticatedViaBearerToken]))] +) +@inject +async def get_writing_task_general_question( + task: int = Path(..., ge=1, le=2), + topic: str = Query(default=random.choice(EducationalContent.MTI_TOPICS)), + difficulty: str = Query(default=random.choice(EducationalContent.DIFFICULTIES)), + writing_controller: IWritingController = Depends(Provide[controller]) +): + return await writing_controller.get_writing_task_general_question(task, topic, difficulty) diff --git a/app/configs/__init__.py b/app/configs/__init__.py index ba5088c..05952b8 100644 --- a/app/configs/__init__.py +++ b/app/configs/__init__.py @@ -1,5 +1,5 @@ -from .dependency_injection import config_di - -__all__ = [ - "config_di" -] +from .dependency_injection import DependencyInjector + +__all__ = [ + "DependencyInjector" +] diff --git a/app/configs/constants.py b/app/configs/constants.py index bb58dba..eac0de8 100644 --- a/app/configs/constants.py +++ b/app/configs/constants.py @@ -1,732 +1,762 @@ -from enum import Enum - -BLACKLISTED_WORDS = ["jesus", "sex", "gay", "lesbian", "homosexual", "god", "angel", "pornography", "beer", "wine", - "cocaine", "alcohol", "nudity", "lgbt", "casino", "gambling", "catholicism", - "discrimination", "politic", "christianity", "islam", "christian", "christians", - "jews", "jew", "discrimination", "discriminatory"] - - -class ExamVariant(Enum): - FULL = "full" - PARTIAL = "partial" - - -class CustomLevelExerciseTypes(Enum): - MULTIPLE_CHOICE_4 = "multiple_choice_4" - MULTIPLE_CHOICE_BLANK_SPACE = "multiple_choice_blank_space" - MULTIPLE_CHOICE_UNDERLINED = "multiple_choice_underlined" - BLANK_SPACE_TEXT = "blank_space_text" - READING_PASSAGE_UTAS = "reading_passage_utas" - WRITING_LETTER = "writing_letter" - WRITING_2 = "writing_2" - SPEAKING_1 = "speaking_1" - SPEAKING_2 = "speaking_2" - SPEAKING_3 = "speaking_3" - READING_1 = "reading_1" - READING_2 = "reading_2" - READING_3 = "reading_3" - LISTENING_1 = "listening_1" - LISTENING_2 = "listening_2" - LISTENING_3 = "listening_3" - LISTENING_4 = "listening_4" - - -class QuestionType(Enum): - LISTENING_SECTION_1 = "Listening Section 1" - LISTENING_SECTION_2 = "Listening Section 2" - LISTENING_SECTION_3 = "Listening Section 3" - LISTENING_SECTION_4 = "Listening Section 4" - WRITING_TASK_1 = "Writing Task 1" - WRITING_TASK_2 = "Writing Task 2" - SPEAKING_1 = "Speaking Task Part 1" - SPEAKING_2 = "Speaking Task Part 2" - READING_PASSAGE_1 = "Reading Passage 1" - READING_PASSAGE_2 = "Reading Passage 2" - READING_PASSAGE_3 = "Reading Passage 3" - - -class AvatarEnum(Enum): - MATTHEW_NOAH = "5912afa7c77c47d3883af3d874047aaf" - VERA_CERISE = "9e58d96a383e4568a7f1e49df549e0e4" - EDWARD_TONY = "d2cdd9c0379a4d06ae2afb6e5039bd0c" - TANYA_MOLLY = "045cb5dcd00042b3a1e4f3bc1c12176b" - KAYLA_ABBI = "1ae1e5396cc444bfad332155fdb7a934" - JEROME_RYAN = "0ee6aa7cc1084063a630ae514fccaa31" - TYLER_CHRISTOPHER = "5772cff935844516ad7eeff21f839e43" - - -class FilePaths: - AUDIO_FILES_PATH = 'download-audio/' - FIREBASE_LISTENING_AUDIO_FILES_PATH = 'listening_recordings/' - VIDEO_FILES_PATH = 'download-video/' - FIREBASE_SPEAKING_VIDEO_FILES_PATH = 'speaking_videos/' - - -class TemperatureSettings: - GRADING_TEMPERATURE = 0.1 - TIPS_TEMPERATURE = 0.2 - GEN_QUESTION_TEMPERATURE = 0.7 - - -class GPTModels: - GPT_3_5_TURBO = "gpt-3.5-turbo" - GPT_4_TURBO = "gpt-4-turbo" - GPT_4_O = "gpt-4o" - GPT_3_5_TURBO_16K = "gpt-3.5-turbo-16k" - GPT_3_5_TURBO_INSTRUCT = "gpt-3.5-turbo-instruct" - GPT_4_PREVIEW = "gpt-4-turbo-preview" - - -class FieldsAndExercises: - GRADING_FIELDS = ['comment', 'overall', 'task_response'] - GEN_FIELDS = ['topic'] - GEN_TEXT_FIELDS = ['title'] - LISTENING_GEN_FIELDS = ['transcript', 'exercise'] - READING_EXERCISE_TYPES = ['fillBlanks', 'writeBlanks', 'trueFalse', 'paragraphMatch'] - READING_3_EXERCISE_TYPES = ['fillBlanks', 'writeBlanks', 'trueFalse', 'paragraphMatch', 'ideaMatch'] - - LISTENING_EXERCISE_TYPES = ['multipleChoice', 'writeBlanksQuestions', 'writeBlanksFill', 'writeBlanksForm'] - LISTENING_1_EXERCISE_TYPES = ['multipleChoice', 'writeBlanksQuestions', 'writeBlanksFill', 'writeBlanksFill', - 'writeBlanksForm', 'writeBlanksForm', 'writeBlanksForm', 'writeBlanksForm'] - LISTENING_2_EXERCISE_TYPES = ['multipleChoice', 'writeBlanksQuestions'] - LISTENING_3_EXERCISE_TYPES = ['multipleChoice3Options', 'writeBlanksQuestions'] - LISTENING_4_EXERCISE_TYPES = ['multipleChoice', 'writeBlanksQuestions', 'writeBlanksFill', 'writeBlanksForm'] - - TOTAL_READING_PASSAGE_1_EXERCISES = 13 - TOTAL_READING_PASSAGE_2_EXERCISES = 13 - TOTAL_READING_PASSAGE_3_EXERCISES = 14 - - TOTAL_LISTENING_SECTION_1_EXERCISES = 10 - TOTAL_LISTENING_SECTION_2_EXERCISES = 10 - TOTAL_LISTENING_SECTION_3_EXERCISES = 10 - TOTAL_LISTENING_SECTION_4_EXERCISES = 10 - - -class MinTimers: - LISTENING_MIN_TIMER_DEFAULT = 30 - WRITING_MIN_TIMER_DEFAULT = 60 - SPEAKING_MIN_TIMER_DEFAULT = 14 - - -class Voices: - EN_US_VOICES = [ - {'Gender': 'Female', 'Id': 'Salli', 'LanguageCode': 'en-US', 'LanguageName': 'US English', 'Name': 'Salli', - 'SupportedEngines': ['neural', 'standard']}, - {'Gender': 'Male', 'Id': 'Matthew', 'LanguageCode': 'en-US', 'LanguageName': 'US English', 'Name': 'Matthew', - 'SupportedEngines': ['neural', 'standard']}, - {'Gender': 'Female', 'Id': 'Kimberly', 'LanguageCode': 'en-US', 'LanguageName': 'US English', 'Name': 'Kimberly', - 'SupportedEngines': ['neural', 'standard']}, - {'Gender': 'Female', 'Id': 'Kendra', 'LanguageCode': 'en-US', 'LanguageName': 'US English', 'Name': 'Kendra', - 'SupportedEngines': ['neural', 'standard']}, - {'Gender': 'Male', 'Id': 'Justin', 'LanguageCode': 'en-US', 'LanguageName': 'US English', 'Name': 'Justin', - 'SupportedEngines': ['neural', 'standard']}, - {'Gender': 'Male', 'Id': 'Joey', 'LanguageCode': 'en-US', 'LanguageName': 'US English', 'Name': 'Joey', - 'SupportedEngines': ['neural', 'standard']}, - {'Gender': 'Female', 'Id': 'Joanna', 'LanguageCode': 'en-US', 'LanguageName': 'US English', 'Name': 'Joanna', - 'SupportedEngines': ['neural', 'standard']}, - {'Gender': 'Female', 'Id': 'Ivy', 'LanguageCode': 'en-US', 'LanguageName': 'US English', 'Name': 'Ivy', - 'SupportedEngines': ['neural', 'standard']}] - EN_GB_VOICES = [ - {'Gender': 'Female', 'Id': 'Emma', 'LanguageCode': 'en-GB', 'LanguageName': 'British English', 'Name': 'Emma', - 'SupportedEngines': ['neural', 'standard']}, - {'Gender': 'Male', 'Id': 'Brian', 'LanguageCode': 'en-GB', 'LanguageName': 'British English', 'Name': 'Brian', - 'SupportedEngines': ['neural', 'standard']}, - {'Gender': 'Female', 'Id': 'Amy', 'LanguageCode': 'en-GB', 'LanguageName': 'British English', 'Name': 'Amy', - 'SupportedEngines': ['neural', 'standard']}] - EN_GB_WLS_VOICES = [ - {'Gender': 'Male', 'Id': 'Geraint', 'LanguageCode': 'en-GB-WLS', 'LanguageName': 'Welsh English', 'Name': 'Geraint', - 'SupportedEngines': ['standard']}] - EN_AU_VOICES = [{'Gender': 'Male', 'Id': 'Russell', 'LanguageCode': 'en-AU', 'LanguageName': 'Australian English', - 'Name': 'Russell', 'SupportedEngines': ['standard']}, - {'Gender': 'Female', 'Id': 'Nicole', 'LanguageCode': 'en-AU', 'LanguageName': 'Australian English', - 'Name': 'Nicole', 'SupportedEngines': ['standard']}] - - ALL_VOICES = EN_US_VOICES + EN_GB_VOICES + EN_GB_WLS_VOICES + EN_AU_VOICES - - MALE_VOICES = [item for item in ALL_VOICES if item.get('Gender') == 'Male'] - FEMALE_VOICES = [item for item in ALL_VOICES if item.get('Gender') == 'Female'] - - -class NeuralVoices: - NEURAL_EN_US_VOICES = [ - {'Gender': 'Female', 'Id': 'Danielle', 'LanguageCode': 'en-US', 'LanguageName': 'US English', 'Name': 'Danielle', - 'SupportedEngines': ['neural']}, - {'Gender': 'Male', 'Id': 'Gregory', 'LanguageCode': 'en-US', 'LanguageName': 'US English', 'Name': 'Gregory', - 'SupportedEngines': ['neural']}, - {'Gender': 'Male', 'Id': 'Kevin', 'LanguageCode': 'en-US', 'LanguageName': 'US English', 'Name': 'Kevin', - 'SupportedEngines': ['neural']}, - {'Gender': 'Female', 'Id': 'Ruth', 'LanguageCode': 'en-US', 'LanguageName': 'US English', 'Name': 'Ruth', - 'SupportedEngines': ['neural']}, - {'Gender': 'Male', 'Id': 'Stephen', 'LanguageCode': 'en-US', 'LanguageName': 'US English', 'Name': 'Stephen', - 'SupportedEngines': ['neural']}] - NEURAL_EN_GB_VOICES = [ - {'Gender': 'Male', 'Id': 'Arthur', 'LanguageCode': 'en-GB', 'LanguageName': 'British English', 'Name': 'Arthur', - 'SupportedEngines': ['neural']}] - NEURAL_EN_AU_VOICES = [ - {'Gender': 'Female', 'Id': 'Olivia', 'LanguageCode': 'en-AU', 'LanguageName': 'Australian English', - 'Name': 'Olivia', 'SupportedEngines': ['neural']}] - NEURAL_EN_ZA_VOICES = [ - {'Gender': 'Female', 'Id': 'Ayanda', 'LanguageCode': 'en-ZA', 'LanguageName': 'South African English', - 'Name': 'Ayanda', 'SupportedEngines': ['neural']}] - NEURAL_EN_NZ_VOICES = [ - {'Gender': 'Female', 'Id': 'Aria', 'LanguageCode': 'en-NZ', 'LanguageName': 'New Zealand English', 'Name': 'Aria', - 'SupportedEngines': ['neural']}] - NEURAL_EN_IN_VOICES = [ - {'Gender': 'Female', 'Id': 'Kajal', 'LanguageCode': 'en-IN', 'LanguageName': 'Indian English', 'Name': 'Kajal', - 'SupportedEngines': ['neural']}] - NEURAL_EN_IE_VOICES = [ - {'Gender': 'Female', 'Id': 'Niamh', 'LanguageCode': 'en-IE', 'LanguageName': 'Irish English', 'Name': 'Niamh', - 'SupportedEngines': ['neural']}] - - ALL_NEURAL_VOICES = NEURAL_EN_US_VOICES + NEURAL_EN_GB_VOICES + NEURAL_EN_AU_VOICES + NEURAL_EN_ZA_VOICES + NEURAL_EN_NZ_VOICES + NEURAL_EN_IE_VOICES - - MALE_NEURAL_VOICES = [item for item in ALL_NEURAL_VOICES if item.get('Gender') == 'Male'] - FEMALE_NEURAL_VOICES = [item for item in ALL_NEURAL_VOICES if item.get('Gender') == 'Female'] - - -class EducationalContent: - DIFFICULTIES = ["easy", "medium", "hard"] - - MTI_TOPICS = [ - "Education", - "Technology", - "Environment", - "Health and Fitness", - "Engineering", - "Work and Careers", - "Travel and Tourism", - "Culture and Traditions", - "Social Issues", - "Arts and Entertainment", - "Climate Change", - "Social Media", - "Sustainable Development", - "Health Care", - "Immigration", - "Artificial Intelligence", - "Consumerism", - "Online Shopping", - "Energy", - "Oil and Gas", - "Poverty and Inequality", - "Cultural Diversity", - "Democracy and Governance", - "Mental Health", - "Ethics and Morality", - "Population Growth", - "Science and Innovation", - "Poverty Alleviation", - "Cybersecurity and Privacy", - "Human Rights", - "Food and Agriculture", - "Cyberbullying and Online Safety", - "Linguistic Diversity", - "Urbanization", - "Artificial Intelligence in Education", - "Youth Empowerment", - "Disaster Management", - "Mental Health Stigma", - "Internet Censorship", - "Sustainable Fashion", - "Indigenous Rights", - "Water Scarcity", - "Social Entrepreneurship", - "Privacy in the Digital Age", - "Sustainable Transportation", - "Gender Equality", - "Automation and Job Displacement", - "Digital Divide", - "Education Inequality" - ] - TOPICS = [ - "Art and Creativity", - "History of Ancient Civilizations", - "Environmental Conservation", - "Space Exploration", - "Artificial Intelligence", - "Climate Change", - "The Human Brain", - "Renewable Energy", - "Cultural Diversity", - "Modern Technology Trends", - "Sustainable Agriculture", - "Natural Disasters", - "Cybersecurity", - "Philosophy of Ethics", - "Robotics", - "Health and Wellness", - "Literature and Classics", - "World Geography", - "Social Media Impact", - "Food Sustainability", - "Economics and Markets", - "Human Evolution", - "Political Systems", - "Mental Health Awareness", - "Quantum Physics", - "Biodiversity", - "Education Reform", - "Animal Rights", - "The Industrial Revolution", - "Future of Work", - "Film and Cinema", - "Genetic Engineering", - "Climate Policy", - "Space Travel", - "Renewable Energy Sources", - "Cultural Heritage Preservation", - "Modern Art Movements", - "Sustainable Transportation", - "The History of Medicine", - "Artificial Neural Networks", - "Climate Adaptation", - "Philosophy of Existence", - "Augmented Reality", - "Yoga and Meditation", - "Literary Genres", - "World Oceans", - "Social Networking", - "Sustainable Fashion", - "Prehistoric Era", - "Democracy and Governance", - "Postcolonial Literature", - "Geopolitics", - "Psychology and Behavior", - "Nanotechnology", - "Endangered Species", - "Education Technology", - "Renaissance Art", - "Renewable Energy Policy", - "Modern Architecture", - "Climate Resilience", - "Artificial Life", - "Fitness and Nutrition", - "Classic Literature Adaptations", - "Ethical Dilemmas", - "Internet of Things (IoT)", - "Meditation Practices", - "Literary Symbolism", - "Marine Conservation", - "Sustainable Tourism", - "Ancient Philosophy", - "Cold War Era", - "Behavioral Economics", - "Space Colonization", - "Clean Energy Initiatives", - "Cultural Exchange", - "Modern Sculpture", - "Climate Mitigation", - "Mindfulness", - "Literary Criticism", - "Wildlife Conservation", - "Renewable Energy Innovations", - "History of Mathematics", - "Human-Computer Interaction", - "Global Health", - "Cultural Appropriation", - "Traditional cuisine and culinary arts", - "Local music and dance traditions", - "History of the region and historical landmarks", - "Traditional crafts and artisanal skills", - "Wildlife and conservation efforts", - "Local sports and athletic competitions", - "Fashion trends and clothing styles", - "Education systems and advancements", - "Healthcare services and medical innovations", - "Family values and social dynamics", - "Travel destinations and tourist attractions", - "Environmental sustainability projects", - "Technological developments and innovations", - "Entrepreneurship and business ventures", - "Youth empowerment initiatives", - "Art exhibitions and cultural events", - "Philanthropy and community development projects" - ] - - TWO_PEOPLE_SCENARIOS = [ - "Booking a table at a restaurant", - "Making a doctor's appointment", - "Asking for directions to a tourist attraction", - "Inquiring about public transportation options", - "Discussing weekend plans with a friend", - "Ordering food at a café", - "Renting a bicycle for a day", - "Arranging a meeting with a colleague", - "Talking to a real estate agent about renting an apartment", - "Discussing travel plans for an upcoming vacation", - "Checking the availability of a hotel room", - "Talking to a car rental service", - "Asking for recommendations at a library", - "Inquiring about opening hours at a museum", - "Discussing the weather forecast", - "Shopping for groceries", - "Renting a movie from a video store", - "Booking a flight ticket", - "Discussing a school assignment with a classmate", - "Making a reservation for a spa appointment", - "Talking to a customer service representative about a product issue", - "Discussing household chores with a family member", - "Planning a surprise party for a friend", - "Talking to a coworker about a project deadline", - "Inquiring about a gym membership", - "Discussing the menu options at a fast-food restaurant", - "Talking to a neighbor about a community event", - "Asking for help with computer problems", - "Discussing a recent sports game with a sports enthusiast", - "Talking to a pet store employee about buying a pet", - "Asking for information about a local farmer's market", - "Discussing the details of a home renovation project", - "Talking to a coworker about office supplies", - "Making plans for a family picnic", - "Inquiring about admission requirements at a university", - "Discussing the features of a new smartphone with a salesperson", - "Talking to a mechanic about car repairs", - "Making arrangements for a child's birthday party", - "Discussing a new diet plan with a nutritionist", - "Asking for information about a music concert", - "Talking to a hairdresser about getting a haircut", - "Inquiring about a language course at a language school", - "Discussing plans for a weekend camping trip", - "Talking to a bank teller about opening a new account", - "Ordering a drink at a coffee shop", - "Discussing a new book with a book club member", - "Talking to a librarian about library services", - "Asking for advice on finding a job", - "Discussing plans for a garden makeover with a landscaper", - "Talking to a travel agent about a cruise vacation", - "Inquiring about a fitness class at a gym", - "Ordering flowers for a special occasion", - "Discussing a new exercise routine with a personal trainer", - "Talking to a teacher about a child's progress in school", - "Asking for information about a local art exhibition", - "Discussing a home improvement project with a contractor", - "Talking to a babysitter about childcare arrangements", - "Making arrangements for a car service appointment", - "Inquiring about a photography workshop at a studio", - "Discussing plans for a family reunion with a relative", - "Talking to a tech support representative about computer issues", - "Asking for recommendations on pet grooming services", - "Discussing weekend plans with a significant other", - "Talking to a counselor about personal issues", - "Inquiring about a music lesson with a music teacher", - "Ordering a pizza for delivery", - "Making a reservation for a taxi", - "Discussing a new recipe with a chef", - "Talking to a fitness trainer about weight loss goals", - "Inquiring about a dance class at a dance studio", - "Ordering a meal at a food truck", - "Discussing plans for a weekend getaway with a partner", - "Talking to a florist about wedding flower arrangements", - "Asking for advice on home decorating", - "Discussing plans for a charity fundraiser event", - "Talking to a pet sitter about taking care of pets", - "Making arrangements for a spa day with a friend", - "Asking for recommendations on home improvement stores", - "Discussing weekend plans with a travel enthusiast", - "Talking to a car mechanic about car maintenance", - "Inquiring about a cooking class at a culinary school", - "Ordering a sandwich at a deli", - "Discussing plans for a family holiday party", - "Talking to a personal assistant about organizing tasks", - "Asking for information about a local theater production", - "Discussing a new DIY project with a home improvement expert", - "Talking to a wine expert about wine pairing", - "Making arrangements for a pet adoption", - "Asking for advice on planning a wedding" - ] - - SOCIAL_MONOLOGUE_CONTEXTS = [ - "A guided tour of a historical museum", - "An introduction to a new city for tourists", - "An orientation session for new university students", - "A safety briefing for airline passengers", - "An explanation of the process of recycling", - "A lecture on the benefits of a healthy diet", - "A talk on the importance of time management", - "A monologue about wildlife conservation", - "An overview of local public transportation options", - "A presentation on the history of cinema", - "An introduction to the art of photography", - "A discussion about the effects of climate change", - "An overview of different types of cuisine", - "A lecture on the principles of financial planning", - "A monologue about sustainable energy sources", - "An explanation of the process of online shopping", - "A guided tour of a botanical garden", - "An introduction to a local wildlife sanctuary", - "A safety briefing for hikers in a national park", - "A talk on the benefits of physical exercise", - "A lecture on the principles of effective communication", - "A monologue about the impact of social media", - "An overview of the history of a famous landmark", - "An introduction to the world of fashion design", - "A discussion about the challenges of global poverty", - "An explanation of the process of organic farming", - "A presentation on the history of space exploration", - "An overview of traditional music from different cultures", - "A lecture on the principles of effective leadership", - "A monologue about the influence of technology", - "A guided tour of a famous archaeological site", - "An introduction to a local wildlife rehabilitation center", - "A safety briefing for visitors to a science museum", - "A talk on the benefits of learning a new language", - "A lecture on the principles of architectural design", - "A monologue about the impact of renewable energy", - "An explanation of the process of online banking", - "A presentation on the history of a famous art movement", - "An overview of traditional clothing from various regions", - "A lecture on the principles of sustainable agriculture", - "A discussion about the challenges of urban development", - "A monologue about the influence of social norms", - "A guided tour of a historical battlefield", - "An introduction to a local animal shelter", - "A safety briefing for participants in a charity run", - "A talk on the benefits of community involvement", - "A lecture on the principles of sustainable tourism", - "A monologue about the impact of alternative medicine", - "An explanation of the process of wildlife tracking", - "A presentation on the history of a famous inventor", - "An overview of traditional dance forms from different cultures", - "A lecture on the principles of ethical business practices", - "A discussion about the challenges of healthcare access", - "A monologue about the influence of cultural traditions", - "A guided tour of a famous lighthouse", - "An introduction to a local astronomy observatory", - "A safety briefing for participants in a team-building event", - "A talk on the benefits of volunteering", - "A lecture on the principles of wildlife protection", - "A monologue about the impact of space exploration", - "An explanation of the process of wildlife photography", - "A presentation on the history of a famous musician", - "An overview of traditional art forms from different cultures", - "A lecture on the principles of effective education", - "A discussion about the challenges of sustainable development", - "A monologue about the influence of cultural diversity", - "A guided tour of a famous national park", - "An introduction to a local marine conservation project", - "A safety briefing for participants in a hot air balloon ride", - "A talk on the benefits of cultural exchange programs", - "A lecture on the principles of wildlife conservation", - "A monologue about the impact of technological advancements", - "An explanation of the process of wildlife rehabilitation", - "A presentation on the history of a famous explorer", - "A lecture on the principles of effective marketing", - "A discussion about the challenges of environmental sustainability", - "A monologue about the influence of social entrepreneurship", - "A guided tour of a famous historical estate", - "An introduction to a local marine life research center", - "A safety briefing for participants in a zip-lining adventure", - "A talk on the benefits of cultural preservation", - "A lecture on the principles of wildlife ecology", - "A monologue about the impact of space technology", - "An explanation of the process of wildlife conservation", - "A presentation on the history of a famous scientist", - "An overview of traditional crafts and artisans from different cultures", - "A lecture on the principles of effective intercultural communication" - ] - - FOUR_PEOPLE_SCENARIOS = [ - "A university lecture on history", - "A physics class discussing Newton's laws", - "A medical school seminar on anatomy", - "A training session on computer programming", - "A business school lecture on marketing strategies", - "A chemistry lab experiment and discussion", - "A language class practicing conversational skills", - "A workshop on creative writing techniques", - "A high school math lesson on calculus", - "A training program for customer service representatives", - "A lecture on environmental science and sustainability", - "A psychology class exploring human behavior", - "A music theory class analyzing compositions", - "A nursing school simulation for patient care", - "A computer science class on algorithms", - "A workshop on graphic design principles", - "A law school lecture on constitutional law", - "A geology class studying rock formations", - "A vocational training program for electricians", - "A history seminar focusing on ancient civilizations", - "A biology class dissecting specimens", - "A financial literacy course for adults", - "A literature class discussing classic novels", - "A training session for emergency response teams", - "A sociology lecture on social inequality", - "An art class exploring different painting techniques", - "A medical school seminar on diagnosis", - "A programming bootcamp teaching web development", - "An economics class analyzing market trends", - "A chemistry lab experiment on chemical reactions", - "A language class practicing pronunciation", - "A workshop on public speaking skills", - "A high school physics lesson on electromagnetism", - "A training program for IT professionals", - "A lecture on climate change and its effects", - "A psychology class studying cognitive psychology", - "A music class composing original songs", - "A nursing school simulation for patient assessment", - "A computer science class on data structures", - "A workshop on 3D modeling and animation", - "A law school lecture on contract law", - "A geography class examining world maps", - "A vocational training program for plumbers", - "A history seminar discussing revolutions", - "A biology class exploring genetics", - "A financial literacy course for teens", - "A literature class analyzing poetry", - "A training session for public speaking coaches", - "A sociology lecture on cultural diversity", - "An art class creating sculptures", - "A medical school seminar on surgical techniques", - "A programming bootcamp teaching app development", - "An economics class on global trade policies", - "A chemistry lab experiment on chemical bonding", - "A language class discussing idiomatic expressions", - "A workshop on conflict resolution", - "A high school biology lesson on evolution", - "A training program for project managers", - "A lecture on renewable energy sources", - "A psychology class on abnormal psychology", - "A music class rehearsing for a performance", - "A nursing school simulation for emergency response", - "A computer science class on cybersecurity", - "A workshop on digital marketing strategies", - "A law school lecture on intellectual property", - "A geology class analyzing seismic activity", - "A vocational training program for carpenters", - "A history seminar on the Renaissance", - "A chemistry class synthesizing compounds", - "A financial literacy course for seniors", - "A literature class interpreting Shakespearean plays", - "A training session for negotiation skills", - "A sociology lecture on urbanization", - "An art class creating digital art", - "A medical school seminar on patient communication", - "A programming bootcamp teaching mobile app development", - "An economics class on fiscal policy", - "A physics lab experiment on electromagnetism", - "A language class on cultural immersion", - "A workshop on time management", - "A high school chemistry lesson on stoichiometry", - "A training program for HR professionals", - "A lecture on space exploration and astronomy", - "A psychology class on human development", - "A music class practicing for a recital", - "A nursing school simulation for triage", - "A computer science class on web development frameworks", - "A workshop on team-building exercises", - "A law school lecture on criminal law", - "A geography class studying world cultures", - "A vocational training program for HVAC technicians", - "A history seminar on ancient civilizations", - "A biology class examining ecosystems", - "A financial literacy course for entrepreneurs", - "A literature class analyzing modern literature", - "A training session for leadership skills", - "A sociology lecture on gender studies", - "An art class exploring multimedia art", - "A medical school seminar on patient diagnosis", - "A programming bootcamp teaching software architecture" - ] - - ACADEMIC_SUBJECTS = [ - "Astrophysics", - "Microbiology", - "Political Science", - "Environmental Science", - "Literature", - "Biochemistry", - "Sociology", - "Art History", - "Geology", - "Economics", - "Psychology", - "History of Architecture", - "Linguistics", - "Neurobiology", - "Anthropology", - "Quantum Mechanics", - "Urban Planning", - "Philosophy", - "Marine Biology", - "International Relations", - "Medieval History", - "Geophysics", - "Finance", - "Educational Psychology", - "Graphic Design", - "Paleontology", - "Macroeconomics", - "Cognitive Psychology", - "Renaissance Art", - "Archaeology", - "Microeconomics", - "Social Psychology", - "Contemporary Art", - "Meteorology", - "Political Philosophy", - "Space Exploration", - "Cognitive Science", - "Classical Music", - "Oceanography", - "Public Health", - "Gender Studies", - "Baroque Art", - "Volcanology", - "Business Ethics", - "Music Composition", - "Environmental Policy", - "Media Studies", - "Ancient History", - "Seismology", - "Marketing", - "Human Development", - "Modern Art", - "Astronomy", - "International Law", - "Developmental Psychology", - "Film Studies", - "American History", - "Soil Science", - "Entrepreneurship", - "Clinical Psychology", - "Contemporary Dance", - "Space Physics", - "Political Economy", - "Cognitive Neuroscience", - "20th Century Literature", - "Public Administration", - "European History", - "Atmospheric Science", - "Supply Chain Management", - "Social Work", - "Japanese Literature", - "Planetary Science", - "Labor Economics", - "Industrial-Organizational Psychology", - "French Philosophy", - "Biogeochemistry", - "Strategic Management", - "Educational Sociology", - "Postmodern Literature", - "Public Relations", - "Middle Eastern History", - "Oceanography", - "International Development", - "Human Resources Management", - "Educational Leadership", - "Russian Literature", - "Quantum Chemistry", - "Environmental Economics", - "Environmental Psychology", - "Ancient Philosophy", - "Immunology", - "Comparative Politics", - "Child Development", - "Fashion Design", - "Geological Engineering", - "Macroeconomic Policy", - "Media Psychology", - "Byzantine Art", - "Ecology", - "International Business" - ] +from enum import Enum + +######################################################################################################################## +# DISCLAIMER # +# # +# All the array and dict "constants" are mutable variables, if somewhere in the app you modify them in any way, shape # +# or form all the other methods that will use these "constants" will also use the modified version. If you're unsure # +# whether a method will modify it use copy's deepcopy: # +# # +# from copy import deepcopy # +# # +# new_ref = deepcopy(CONSTANT) # +# # +# Using a wrapper method that returns a "constant" won't handle nested mutables. # +######################################################################################################################## + +BLACKLISTED_WORDS = ["jesus", "sex", "gay", "lesbian", "homosexual", "god", "angel", "pornography", "beer", "wine", + "cocaine", "alcohol", "nudity", "lgbt", "casino", "gambling", "catholicism", + "discrimination", "politic", "christianity", "islam", "christian", "christians", + "jews", "jew", "discrimination", "discriminatory"] + + +class UserDefaults: + DESIRED_LEVELS = { + "reading": 9, + "listening": 9, + "writing": 9, + "speaking": 9, + } + + LEVELS = { + "reading": 0, + "listening": 0, + "writing": 0, + "speaking": 0, + } + + +class ExamVariant(Enum): + FULL = "full" + PARTIAL = "partial" + + +class CustomLevelExerciseTypes(Enum): + MULTIPLE_CHOICE_4 = "multiple_choice_4" + MULTIPLE_CHOICE_BLANK_SPACE = "multiple_choice_blank_space" + MULTIPLE_CHOICE_UNDERLINED = "multiple_choice_underlined" + BLANK_SPACE_TEXT = "blank_space_text" + READING_PASSAGE_UTAS = "reading_passage_utas" + WRITING_LETTER = "writing_letter" + WRITING_2 = "writing_2" + SPEAKING_1 = "speaking_1" + SPEAKING_2 = "speaking_2" + SPEAKING_3 = "speaking_3" + READING_1 = "reading_1" + READING_2 = "reading_2" + READING_3 = "reading_3" + LISTENING_1 = "listening_1" + LISTENING_2 = "listening_2" + LISTENING_3 = "listening_3" + LISTENING_4 = "listening_4" + + +class QuestionType(Enum): + LISTENING_SECTION_1 = "Listening Section 1" + LISTENING_SECTION_2 = "Listening Section 2" + LISTENING_SECTION_3 = "Listening Section 3" + LISTENING_SECTION_4 = "Listening Section 4" + WRITING_TASK_1 = "Writing Task 1" + WRITING_TASK_2 = "Writing Task 2" + SPEAKING_1 = "Speaking Task Part 1" + SPEAKING_2 = "Speaking Task Part 2" + READING_PASSAGE_1 = "Reading Passage 1" + READING_PASSAGE_2 = "Reading Passage 2" + READING_PASSAGE_3 = "Reading Passage 3" + + +class AvatarEnum(Enum): + MATTHEW_NOAH = "5912afa7c77c47d3883af3d874047aaf" + VERA_CERISE = "9e58d96a383e4568a7f1e49df549e0e4" + EDWARD_TONY = "d2cdd9c0379a4d06ae2afb6e5039bd0c" + TANYA_MOLLY = "045cb5dcd00042b3a1e4f3bc1c12176b" + KAYLA_ABBI = "1ae1e5396cc444bfad332155fdb7a934" + JEROME_RYAN = "0ee6aa7cc1084063a630ae514fccaa31" + TYLER_CHRISTOPHER = "5772cff935844516ad7eeff21f839e43" + + +class FilePaths: + AUDIO_FILES_PATH = 'download-audio/' + FIREBASE_LISTENING_AUDIO_FILES_PATH = 'listening_recordings/' + VIDEO_FILES_PATH = 'download-video/' + FIREBASE_SPEAKING_VIDEO_FILES_PATH = 'speaking_videos/' + + +class TemperatureSettings: + GRADING_TEMPERATURE = 0.1 + TIPS_TEMPERATURE = 0.2 + GEN_QUESTION_TEMPERATURE = 0.7 + + +class GPTModels: + GPT_3_5_TURBO = "gpt-3.5-turbo" + GPT_4_TURBO = "gpt-4-turbo" + GPT_4_O = "gpt-4o" + GPT_3_5_TURBO_16K = "gpt-3.5-turbo-16k" + GPT_3_5_TURBO_INSTRUCT = "gpt-3.5-turbo-instruct" + GPT_4_PREVIEW = "gpt-4-turbo-preview" + + +class FieldsAndExercises: + GRADING_FIELDS = ['comment', 'overall', 'task_response'] + GEN_FIELDS = ['topic'] + GEN_TEXT_FIELDS = ['title'] + LISTENING_GEN_FIELDS = ['transcript', 'exercise'] + READING_EXERCISE_TYPES = ['fillBlanks', 'writeBlanks', 'trueFalse', 'paragraphMatch'] + READING_3_EXERCISE_TYPES = ['fillBlanks', 'writeBlanks', 'trueFalse', 'paragraphMatch', 'ideaMatch'] + + LISTENING_EXERCISE_TYPES = ['multipleChoice', 'writeBlanksQuestions', 'writeBlanksFill', 'writeBlanksForm'] + LISTENING_1_EXERCISE_TYPES = ['multipleChoice', 'writeBlanksQuestions', 'writeBlanksFill', 'writeBlanksFill', + 'writeBlanksForm', 'writeBlanksForm', 'writeBlanksForm', 'writeBlanksForm'] + LISTENING_2_EXERCISE_TYPES = ['multipleChoice', 'writeBlanksQuestions'] + LISTENING_3_EXERCISE_TYPES = ['multipleChoice3Options', 'writeBlanksQuestions'] + LISTENING_4_EXERCISE_TYPES = ['multipleChoice', 'writeBlanksQuestions', 'writeBlanksFill', 'writeBlanksForm'] + + TOTAL_READING_PASSAGE_1_EXERCISES = 13 + TOTAL_READING_PASSAGE_2_EXERCISES = 13 + TOTAL_READING_PASSAGE_3_EXERCISES = 14 + + TOTAL_LISTENING_SECTION_1_EXERCISES = 10 + TOTAL_LISTENING_SECTION_2_EXERCISES = 10 + TOTAL_LISTENING_SECTION_3_EXERCISES = 10 + TOTAL_LISTENING_SECTION_4_EXERCISES = 10 + + +class MinTimers: + LISTENING_MIN_TIMER_DEFAULT = 30 + WRITING_MIN_TIMER_DEFAULT = 60 + SPEAKING_MIN_TIMER_DEFAULT = 14 + + +class Voices: + EN_US_VOICES = [ + {'Gender': 'Female', 'Id': 'Salli', 'LanguageCode': 'en-US', 'LanguageName': 'US English', 'Name': 'Salli', + 'SupportedEngines': ['neural', 'standard']}, + {'Gender': 'Male', 'Id': 'Matthew', 'LanguageCode': 'en-US', 'LanguageName': 'US English', 'Name': 'Matthew', + 'SupportedEngines': ['neural', 'standard']}, + {'Gender': 'Female', 'Id': 'Kimberly', 'LanguageCode': 'en-US', 'LanguageName': 'US English', 'Name': 'Kimberly', + 'SupportedEngines': ['neural', 'standard']}, + {'Gender': 'Female', 'Id': 'Kendra', 'LanguageCode': 'en-US', 'LanguageName': 'US English', 'Name': 'Kendra', + 'SupportedEngines': ['neural', 'standard']}, + {'Gender': 'Male', 'Id': 'Justin', 'LanguageCode': 'en-US', 'LanguageName': 'US English', 'Name': 'Justin', + 'SupportedEngines': ['neural', 'standard']}, + {'Gender': 'Male', 'Id': 'Joey', 'LanguageCode': 'en-US', 'LanguageName': 'US English', 'Name': 'Joey', + 'SupportedEngines': ['neural', 'standard']}, + {'Gender': 'Female', 'Id': 'Joanna', 'LanguageCode': 'en-US', 'LanguageName': 'US English', 'Name': 'Joanna', + 'SupportedEngines': ['neural', 'standard']}, + {'Gender': 'Female', 'Id': 'Ivy', 'LanguageCode': 'en-US', 'LanguageName': 'US English', 'Name': 'Ivy', + 'SupportedEngines': ['neural', 'standard']}] + EN_GB_VOICES = [ + {'Gender': 'Female', 'Id': 'Emma', 'LanguageCode': 'en-GB', 'LanguageName': 'British English', 'Name': 'Emma', + 'SupportedEngines': ['neural', 'standard']}, + {'Gender': 'Male', 'Id': 'Brian', 'LanguageCode': 'en-GB', 'LanguageName': 'British English', 'Name': 'Brian', + 'SupportedEngines': ['neural', 'standard']}, + {'Gender': 'Female', 'Id': 'Amy', 'LanguageCode': 'en-GB', 'LanguageName': 'British English', 'Name': 'Amy', + 'SupportedEngines': ['neural', 'standard']}] + EN_GB_WLS_VOICES = [ + {'Gender': 'Male', 'Id': 'Geraint', 'LanguageCode': 'en-GB-WLS', 'LanguageName': 'Welsh English', 'Name': 'Geraint', + 'SupportedEngines': ['standard']}] + EN_AU_VOICES = [{'Gender': 'Male', 'Id': 'Russell', 'LanguageCode': 'en-AU', 'LanguageName': 'Australian English', + 'Name': 'Russell', 'SupportedEngines': ['standard']}, + {'Gender': 'Female', 'Id': 'Nicole', 'LanguageCode': 'en-AU', 'LanguageName': 'Australian English', + 'Name': 'Nicole', 'SupportedEngines': ['standard']}] + + ALL_VOICES = EN_US_VOICES + EN_GB_VOICES + EN_GB_WLS_VOICES + EN_AU_VOICES + + MALE_VOICES = [item for item in ALL_VOICES if item.get('Gender') == 'Male'] + FEMALE_VOICES = [item for item in ALL_VOICES if item.get('Gender') == 'Female'] + + +class NeuralVoices: + NEURAL_EN_US_VOICES = [ + {'Gender': 'Female', 'Id': 'Danielle', 'LanguageCode': 'en-US', 'LanguageName': 'US English', 'Name': 'Danielle', + 'SupportedEngines': ['neural']}, + {'Gender': 'Male', 'Id': 'Gregory', 'LanguageCode': 'en-US', 'LanguageName': 'US English', 'Name': 'Gregory', + 'SupportedEngines': ['neural']}, + {'Gender': 'Male', 'Id': 'Kevin', 'LanguageCode': 'en-US', 'LanguageName': 'US English', 'Name': 'Kevin', + 'SupportedEngines': ['neural']}, + {'Gender': 'Female', 'Id': 'Ruth', 'LanguageCode': 'en-US', 'LanguageName': 'US English', 'Name': 'Ruth', + 'SupportedEngines': ['neural']}, + {'Gender': 'Male', 'Id': 'Stephen', 'LanguageCode': 'en-US', 'LanguageName': 'US English', 'Name': 'Stephen', + 'SupportedEngines': ['neural']}] + NEURAL_EN_GB_VOICES = [ + {'Gender': 'Male', 'Id': 'Arthur', 'LanguageCode': 'en-GB', 'LanguageName': 'British English', 'Name': 'Arthur', + 'SupportedEngines': ['neural']}] + NEURAL_EN_AU_VOICES = [ + {'Gender': 'Female', 'Id': 'Olivia', 'LanguageCode': 'en-AU', 'LanguageName': 'Australian English', + 'Name': 'Olivia', 'SupportedEngines': ['neural']}] + NEURAL_EN_ZA_VOICES = [ + {'Gender': 'Female', 'Id': 'Ayanda', 'LanguageCode': 'en-ZA', 'LanguageName': 'South African English', + 'Name': 'Ayanda', 'SupportedEngines': ['neural']}] + NEURAL_EN_NZ_VOICES = [ + {'Gender': 'Female', 'Id': 'Aria', 'LanguageCode': 'en-NZ', 'LanguageName': 'New Zealand English', 'Name': 'Aria', + 'SupportedEngines': ['neural']}] + NEURAL_EN_IN_VOICES = [ + {'Gender': 'Female', 'Id': 'Kajal', 'LanguageCode': 'en-IN', 'LanguageName': 'Indian English', 'Name': 'Kajal', + 'SupportedEngines': ['neural']}] + NEURAL_EN_IE_VOICES = [ + {'Gender': 'Female', 'Id': 'Niamh', 'LanguageCode': 'en-IE', 'LanguageName': 'Irish English', 'Name': 'Niamh', + 'SupportedEngines': ['neural']}] + + ALL_NEURAL_VOICES = NEURAL_EN_US_VOICES + NEURAL_EN_GB_VOICES + NEURAL_EN_AU_VOICES + NEURAL_EN_ZA_VOICES + NEURAL_EN_NZ_VOICES + NEURAL_EN_IE_VOICES + + MALE_NEURAL_VOICES = [item for item in ALL_NEURAL_VOICES if item.get('Gender') == 'Male'] + FEMALE_NEURAL_VOICES = [item for item in ALL_NEURAL_VOICES if item.get('Gender') == 'Female'] + + +class EducationalContent: + DIFFICULTIES = ["easy", "medium", "hard"] + + MTI_TOPICS = [ + "Education", + "Technology", + "Environment", + "Health and Fitness", + "Engineering", + "Work and Careers", + "Travel and Tourism", + "Culture and Traditions", + "Social Issues", + "Arts and Entertainment", + "Climate Change", + "Social Media", + "Sustainable Development", + "Health Care", + "Immigration", + "Artificial Intelligence", + "Consumerism", + "Online Shopping", + "Energy", + "Oil and Gas", + "Poverty and Inequality", + "Cultural Diversity", + "Democracy and Governance", + "Mental Health", + "Ethics and Morality", + "Population Growth", + "Science and Innovation", + "Poverty Alleviation", + "Cybersecurity and Privacy", + "Human Rights", + "Food and Agriculture", + "Cyberbullying and Online Safety", + "Linguistic Diversity", + "Urbanization", + "Artificial Intelligence in Education", + "Youth Empowerment", + "Disaster Management", + "Mental Health Stigma", + "Internet Censorship", + "Sustainable Fashion", + "Indigenous Rights", + "Water Scarcity", + "Social Entrepreneurship", + "Privacy in the Digital Age", + "Sustainable Transportation", + "Gender Equality", + "Automation and Job Displacement", + "Digital Divide", + "Education Inequality" + ] + TOPICS = [ + "Art and Creativity", + "History of Ancient Civilizations", + "Environmental Conservation", + "Space Exploration", + "Artificial Intelligence", + "Climate Change", + "The Human Brain", + "Renewable Energy", + "Cultural Diversity", + "Modern Technology Trends", + "Sustainable Agriculture", + "Natural Disasters", + "Cybersecurity", + "Philosophy of Ethics", + "Robotics", + "Health and Wellness", + "Literature and Classics", + "World Geography", + "Social Media Impact", + "Food Sustainability", + "Economics and Markets", + "Human Evolution", + "Political Systems", + "Mental Health Awareness", + "Quantum Physics", + "Biodiversity", + "Education Reform", + "Animal Rights", + "The Industrial Revolution", + "Future of Work", + "Film and Cinema", + "Genetic Engineering", + "Climate Policy", + "Space Travel", + "Renewable Energy Sources", + "Cultural Heritage Preservation", + "Modern Art Movements", + "Sustainable Transportation", + "The History of Medicine", + "Artificial Neural Networks", + "Climate Adaptation", + "Philosophy of Existence", + "Augmented Reality", + "Yoga and Meditation", + "Literary Genres", + "World Oceans", + "Social Networking", + "Sustainable Fashion", + "Prehistoric Era", + "Democracy and Governance", + "Postcolonial Literature", + "Geopolitics", + "Psychology and Behavior", + "Nanotechnology", + "Endangered Species", + "Education Technology", + "Renaissance Art", + "Renewable Energy Policy", + "Modern Architecture", + "Climate Resilience", + "Artificial Life", + "Fitness and Nutrition", + "Classic Literature Adaptations", + "Ethical Dilemmas", + "Internet of Things (IoT)", + "Meditation Practices", + "Literary Symbolism", + "Marine Conservation", + "Sustainable Tourism", + "Ancient Philosophy", + "Cold War Era", + "Behavioral Economics", + "Space Colonization", + "Clean Energy Initiatives", + "Cultural Exchange", + "Modern Sculpture", + "Climate Mitigation", + "Mindfulness", + "Literary Criticism", + "Wildlife Conservation", + "Renewable Energy Innovations", + "History of Mathematics", + "Human-Computer Interaction", + "Global Health", + "Cultural Appropriation", + "Traditional cuisine and culinary arts", + "Local music and dance traditions", + "History of the region and historical landmarks", + "Traditional crafts and artisanal skills", + "Wildlife and conservation efforts", + "Local sports and athletic competitions", + "Fashion trends and clothing styles", + "Education systems and advancements", + "Healthcare services and medical innovations", + "Family values and social dynamics", + "Travel destinations and tourist attractions", + "Environmental sustainability projects", + "Technological developments and innovations", + "Entrepreneurship and business ventures", + "Youth empowerment initiatives", + "Art exhibitions and cultural events", + "Philanthropy and community development projects" + ] + + TWO_PEOPLE_SCENARIOS = [ + "Booking a table at a restaurant", + "Making a doctor's appointment", + "Asking for directions to a tourist attraction", + "Inquiring about public transportation options", + "Discussing weekend plans with a friend", + "Ordering food at a café", + "Renting a bicycle for a day", + "Arranging a meeting with a colleague", + "Talking to a real estate agent about renting an apartment", + "Discussing travel plans for an upcoming vacation", + "Checking the availability of a hotel room", + "Talking to a car rental service", + "Asking for recommendations at a library", + "Inquiring about opening hours at a museum", + "Discussing the weather forecast", + "Shopping for groceries", + "Renting a movie from a video store", + "Booking a flight ticket", + "Discussing a school assignment with a classmate", + "Making a reservation for a spa appointment", + "Talking to a customer service representative about a product issue", + "Discussing household chores with a family member", + "Planning a surprise party for a friend", + "Talking to a coworker about a project deadline", + "Inquiring about a gym membership", + "Discussing the menu options at a fast-food restaurant", + "Talking to a neighbor about a community event", + "Asking for help with computer problems", + "Discussing a recent sports game with a sports enthusiast", + "Talking to a pet store employee about buying a pet", + "Asking for information about a local farmer's market", + "Discussing the details of a home renovation project", + "Talking to a coworker about office supplies", + "Making plans for a family picnic", + "Inquiring about admission requirements at a university", + "Discussing the features of a new smartphone with a salesperson", + "Talking to a mechanic about car repairs", + "Making arrangements for a child's birthday party", + "Discussing a new diet plan with a nutritionist", + "Asking for information about a music concert", + "Talking to a hairdresser about getting a haircut", + "Inquiring about a language course at a language school", + "Discussing plans for a weekend camping trip", + "Talking to a bank teller about opening a new account", + "Ordering a drink at a coffee shop", + "Discussing a new book with a book club member", + "Talking to a librarian about library services", + "Asking for advice on finding a job", + "Discussing plans for a garden makeover with a landscaper", + "Talking to a travel agent about a cruise vacation", + "Inquiring about a fitness class at a gym", + "Ordering flowers for a special occasion", + "Discussing a new exercise routine with a personal trainer", + "Talking to a teacher about a child's progress in school", + "Asking for information about a local art exhibition", + "Discussing a home improvement project with a contractor", + "Talking to a babysitter about childcare arrangements", + "Making arrangements for a car service appointment", + "Inquiring about a photography workshop at a studio", + "Discussing plans for a family reunion with a relative", + "Talking to a tech support representative about computer issues", + "Asking for recommendations on pet grooming services", + "Discussing weekend plans with a significant other", + "Talking to a counselor about personal issues", + "Inquiring about a music lesson with a music teacher", + "Ordering a pizza for delivery", + "Making a reservation for a taxi", + "Discussing a new recipe with a chef", + "Talking to a fitness trainer about weight loss goals", + "Inquiring about a dance class at a dance studio", + "Ordering a meal at a food truck", + "Discussing plans for a weekend getaway with a partner", + "Talking to a florist about wedding flower arrangements", + "Asking for advice on home decorating", + "Discussing plans for a charity fundraiser event", + "Talking to a pet sitter about taking care of pets", + "Making arrangements for a spa day with a friend", + "Asking for recommendations on home improvement stores", + "Discussing weekend plans with a travel enthusiast", + "Talking to a car mechanic about car maintenance", + "Inquiring about a cooking class at a culinary school", + "Ordering a sandwich at a deli", + "Discussing plans for a family holiday party", + "Talking to a personal assistant about organizing tasks", + "Asking for information about a local theater production", + "Discussing a new DIY project with a home improvement expert", + "Talking to a wine expert about wine pairing", + "Making arrangements for a pet adoption", + "Asking for advice on planning a wedding" + ] + + SOCIAL_MONOLOGUE_CONTEXTS = [ + "A guided tour of a historical museum", + "An introduction to a new city for tourists", + "An orientation session for new university students", + "A safety briefing for airline passengers", + "An explanation of the process of recycling", + "A lecture on the benefits of a healthy diet", + "A talk on the importance of time management", + "A monologue about wildlife conservation", + "An overview of local public transportation options", + "A presentation on the history of cinema", + "An introduction to the art of photography", + "A discussion about the effects of climate change", + "An overview of different types of cuisine", + "A lecture on the principles of financial planning", + "A monologue about sustainable energy sources", + "An explanation of the process of online shopping", + "A guided tour of a botanical garden", + "An introduction to a local wildlife sanctuary", + "A safety briefing for hikers in a national park", + "A talk on the benefits of physical exercise", + "A lecture on the principles of effective communication", + "A monologue about the impact of social media", + "An overview of the history of a famous landmark", + "An introduction to the world of fashion design", + "A discussion about the challenges of global poverty", + "An explanation of the process of organic farming", + "A presentation on the history of space exploration", + "An overview of traditional music from different cultures", + "A lecture on the principles of effective leadership", + "A monologue about the influence of technology", + "A guided tour of a famous archaeological site", + "An introduction to a local wildlife rehabilitation center", + "A safety briefing for visitors to a science museum", + "A talk on the benefits of learning a new language", + "A lecture on the principles of architectural design", + "A monologue about the impact of renewable energy", + "An explanation of the process of online banking", + "A presentation on the history of a famous art movement", + "An overview of traditional clothing from various regions", + "A lecture on the principles of sustainable agriculture", + "A discussion about the challenges of urban development", + "A monologue about the influence of social norms", + "A guided tour of a historical battlefield", + "An introduction to a local animal shelter", + "A safety briefing for participants in a charity run", + "A talk on the benefits of community involvement", + "A lecture on the principles of sustainable tourism", + "A monologue about the impact of alternative medicine", + "An explanation of the process of wildlife tracking", + "A presentation on the history of a famous inventor", + "An overview of traditional dance forms from different cultures", + "A lecture on the principles of ethical business practices", + "A discussion about the challenges of healthcare access", + "A monologue about the influence of cultural traditions", + "A guided tour of a famous lighthouse", + "An introduction to a local astronomy observatory", + "A safety briefing for participants in a team-building event", + "A talk on the benefits of volunteering", + "A lecture on the principles of wildlife protection", + "A monologue about the impact of space exploration", + "An explanation of the process of wildlife photography", + "A presentation on the history of a famous musician", + "An overview of traditional art forms from different cultures", + "A lecture on the principles of effective education", + "A discussion about the challenges of sustainable development", + "A monologue about the influence of cultural diversity", + "A guided tour of a famous national park", + "An introduction to a local marine conservation project", + "A safety briefing for participants in a hot air balloon ride", + "A talk on the benefits of cultural exchange programs", + "A lecture on the principles of wildlife conservation", + "A monologue about the impact of technological advancements", + "An explanation of the process of wildlife rehabilitation", + "A presentation on the history of a famous explorer", + "A lecture on the principles of effective marketing", + "A discussion about the challenges of environmental sustainability", + "A monologue about the influence of social entrepreneurship", + "A guided tour of a famous historical estate", + "An introduction to a local marine life research center", + "A safety briefing for participants in a zip-lining adventure", + "A talk on the benefits of cultural preservation", + "A lecture on the principles of wildlife ecology", + "A monologue about the impact of space technology", + "An explanation of the process of wildlife conservation", + "A presentation on the history of a famous scientist", + "An overview of traditional crafts and artisans from different cultures", + "A lecture on the principles of effective intercultural communication" + ] + + FOUR_PEOPLE_SCENARIOS = [ + "A university lecture on history", + "A physics class discussing Newton's laws", + "A medical school seminar on anatomy", + "A training session on computer programming", + "A business school lecture on marketing strategies", + "A chemistry lab experiment and discussion", + "A language class practicing conversational skills", + "A workshop on creative writing techniques", + "A high school math lesson on calculus", + "A training program for customer service representatives", + "A lecture on environmental science and sustainability", + "A psychology class exploring human behavior", + "A music theory class analyzing compositions", + "A nursing school simulation for patient care", + "A computer science class on algorithms", + "A workshop on graphic design principles", + "A law school lecture on constitutional law", + "A geology class studying rock formations", + "A vocational training program for electricians", + "A history seminar focusing on ancient civilizations", + "A biology class dissecting specimens", + "A financial literacy course for adults", + "A literature class discussing classic novels", + "A training session for emergency response teams", + "A sociology lecture on social inequality", + "An art class exploring different painting techniques", + "A medical school seminar on diagnosis", + "A programming bootcamp teaching web development", + "An economics class analyzing market trends", + "A chemistry lab experiment on chemical reactions", + "A language class practicing pronunciation", + "A workshop on public speaking skills", + "A high school physics lesson on electromagnetism", + "A training program for IT professionals", + "A lecture on climate change and its effects", + "A psychology class studying cognitive psychology", + "A music class composing original songs", + "A nursing school simulation for patient assessment", + "A computer science class on data structures", + "A workshop on 3D modeling and animation", + "A law school lecture on contract law", + "A geography class examining world maps", + "A vocational training program for plumbers", + "A history seminar discussing revolutions", + "A biology class exploring genetics", + "A financial literacy course for teens", + "A literature class analyzing poetry", + "A training session for public speaking coaches", + "A sociology lecture on cultural diversity", + "An art class creating sculptures", + "A medical school seminar on surgical techniques", + "A programming bootcamp teaching app development", + "An economics class on global trade policies", + "A chemistry lab experiment on chemical bonding", + "A language class discussing idiomatic expressions", + "A workshop on conflict resolution", + "A high school biology lesson on evolution", + "A training program for project managers", + "A lecture on renewable energy sources", + "A psychology class on abnormal psychology", + "A music class rehearsing for a performance", + "A nursing school simulation for emergency response", + "A computer science class on cybersecurity", + "A workshop on digital marketing strategies", + "A law school lecture on intellectual property", + "A geology class analyzing seismic activity", + "A vocational training program for carpenters", + "A history seminar on the Renaissance", + "A chemistry class synthesizing compounds", + "A financial literacy course for seniors", + "A literature class interpreting Shakespearean plays", + "A training session for negotiation skills", + "A sociology lecture on urbanization", + "An art class creating digital art", + "A medical school seminar on patient communication", + "A programming bootcamp teaching mobile app development", + "An economics class on fiscal policy", + "A physics lab experiment on electromagnetism", + "A language class on cultural immersion", + "A workshop on time management", + "A high school chemistry lesson on stoichiometry", + "A training program for HR professionals", + "A lecture on space exploration and astronomy", + "A psychology class on human development", + "A music class practicing for a recital", + "A nursing school simulation for triage", + "A computer science class on web development frameworks", + "A workshop on team-building exercises", + "A law school lecture on criminal law", + "A geography class studying world cultures", + "A vocational training program for HVAC technicians", + "A history seminar on ancient civilizations", + "A biology class examining ecosystems", + "A financial literacy course for entrepreneurs", + "A literature class analyzing modern literature", + "A training session for leadership skills", + "A sociology lecture on gender studies", + "An art class exploring multimedia art", + "A medical school seminar on patient diagnosis", + "A programming bootcamp teaching software architecture" + ] + + ACADEMIC_SUBJECTS = [ + "Astrophysics", + "Microbiology", + "Political Science", + "Environmental Science", + "Literature", + "Biochemistry", + "Sociology", + "Art History", + "Geology", + "Economics", + "Psychology", + "History of Architecture", + "Linguistics", + "Neurobiology", + "Anthropology", + "Quantum Mechanics", + "Urban Planning", + "Philosophy", + "Marine Biology", + "International Relations", + "Medieval History", + "Geophysics", + "Finance", + "Educational Psychology", + "Graphic Design", + "Paleontology", + "Macroeconomics", + "Cognitive Psychology", + "Renaissance Art", + "Archaeology", + "Microeconomics", + "Social Psychology", + "Contemporary Art", + "Meteorology", + "Political Philosophy", + "Space Exploration", + "Cognitive Science", + "Classical Music", + "Oceanography", + "Public Health", + "Gender Studies", + "Baroque Art", + "Volcanology", + "Business Ethics", + "Music Composition", + "Environmental Policy", + "Media Studies", + "Ancient History", + "Seismology", + "Marketing", + "Human Development", + "Modern Art", + "Astronomy", + "International Law", + "Developmental Psychology", + "Film Studies", + "American History", + "Soil Science", + "Entrepreneurship", + "Clinical Psychology", + "Contemporary Dance", + "Space Physics", + "Political Economy", + "Cognitive Neuroscience", + "20th Century Literature", + "Public Administration", + "European History", + "Atmospheric Science", + "Supply Chain Management", + "Social Work", + "Japanese Literature", + "Planetary Science", + "Labor Economics", + "Industrial-Organizational Psychology", + "French Philosophy", + "Biogeochemistry", + "Strategic Management", + "Educational Sociology", + "Postmodern Literature", + "Public Relations", + "Middle Eastern History", + "Oceanography", + "International Development", + "Human Resources Management", + "Educational Leadership", + "Russian Literature", + "Quantum Chemistry", + "Environmental Economics", + "Environmental Psychology", + "Ancient Philosophy", + "Immunology", + "Comparative Politics", + "Child Development", + "Fashion Design", + "Geological Engineering", + "Macroeconomic Policy", + "Media Psychology", + "Byzantine Art", + "Ecology", + "International Business" + ] diff --git a/app/configs/dependency_injection.py b/app/configs/dependency_injection.py index c1d13fd..2114923 100644 --- a/app/configs/dependency_injection.py +++ b/app/configs/dependency_injection.py @@ -1,120 +1,140 @@ -import json -import os - -from dependency_injector import providers, containers -from firebase_admin import credentials -from openai import AsyncOpenAI -from httpx import AsyncClient as HTTPClient -from google.cloud.firestore_v1 import AsyncClient as FirestoreClient -from dotenv import load_dotenv -from sentence_transformers import SentenceTransformer - -from app.repositories.impl import * -from app.services.impl import * -from app.controllers.impl import * - -load_dotenv() - - -def config_di( - *, polly_client: any, http_client: HTTPClient, whisper_model: any -) -> None: - """ - Loads up all the common configs of all the environments - and then calls the specific env configs - """ - # Firebase token - cred = credentials.Certificate(os.getenv("GOOGLE_APPLICATION_CREDENTIALS")) - firebase_token = cred.get_access_token().access_token - - container = containers.DynamicContainer() - - openai_client = providers.Singleton(AsyncOpenAI) - polly_client = providers.Object(polly_client) - http_client = providers.Object(http_client) - firestore_client = providers.Singleton(FirestoreClient) - whisper_model = providers.Object(whisper_model) - - llm = providers.Factory(OpenAI, client=openai_client) - stt = providers.Factory(OpenAIWhisper, model=whisper_model) - tts = providers.Factory(AWSPolly, client=polly_client) - vid_gen = providers.Factory(Heygen, client=http_client, heygen_token=os.getenv("HEY_GEN_TOKEN")) - ai_detector = providers.Factory(GPTZero, client=http_client, gpt_zero_key=os.getenv("GPT_ZERO_API_KEY")) - - firebase_instance = providers.Factory( - FirebaseStorage, client=http_client, token=firebase_token, bucket=os.getenv("FIREBASE_BUCKET") - ) - - firestore = providers.Factory(Firestore, client=firestore_client) - - # Services - - listening_service = providers.Factory( - ListeningService, llm=llm, tts=tts, file_storage=firebase_instance, document_store=firestore - ) - reading_service = providers.Factory(ReadingService, llm=llm) - - speaking_service = providers.Factory( - SpeakingService, llm=llm, vid_gen=vid_gen, - file_storage=firebase_instance, document_store=firestore, - stt=stt - ) - - writing_service = providers.Factory(WritingService, llm=llm, ai_detector=ai_detector) - - with open('app/services/impl/level/mc_variants.json', 'r') as file: - mc_variants = json.load(file) - - level_service = providers.Factory( - LevelService, llm=llm, document_store=firestore, mc_variants=mc_variants, reading_service=reading_service, - writing_service=writing_service, speaking_service=speaking_service, listening_service=listening_service - ) - - grade_service = providers.Factory( - GradeService, llm=llm - ) - - embeddings = SentenceTransformer('all-MiniLM-L6-v2') - - training_kb = providers.Factory( - TrainingContentKnowledgeBase, embeddings=embeddings - ) - - training_service = providers.Factory( - TrainingService, llm=llm, firestore=firestore, training_kb=training_kb - ) - - # Controllers - - container.grade_controller = providers.Factory( - GradeController, grade_service=grade_service, speaking_service=speaking_service, writing_service=writing_service - ) - - container.training_controller = providers.Factory( - TrainingController, training_service=training_service - ) - - container.level_controller = providers.Factory( - LevelController, level_service=level_service - ) - container.listening_controller = providers.Factory( - ListeningController, listening_service=listening_service - ) - - container.reading_controller = providers.Factory( - ReadingController, reading_service=reading_service - ) - - container.speaking_controller = providers.Factory( - SpeakingController, speaking_service=speaking_service - ) - - container.writing_controller = providers.Factory( - WritingController, writing_service=writing_service - ) - - container.llm = llm - - container.wire( - packages=["app"] - ) +import json +import os + +from dependency_injector import providers, containers +from firebase_admin import credentials +from motor.motor_asyncio import AsyncIOMotorClient +from openai import AsyncOpenAI +from httpx import AsyncClient as HTTPClient +from dotenv import load_dotenv +from sentence_transformers import SentenceTransformer + +from app.repositories.impl import * +from app.services.impl import * +from app.controllers.impl import * + +load_dotenv() + + +class DependencyInjector: + + def __init__(self, polly_client: any, http_client: HTTPClient, whisper_model: any): + self._container = containers.DynamicContainer() + self._polly_client = polly_client + self._http_client = http_client + self._whisper_model = whisper_model + + def inject(self): + self._setup_clients() + self._setup_third_parties() + self._setup_repositories() + self._setup_services() + self._setup_controllers() + self._container.wire( + packages=["app"] + ) + + def _setup_clients(self): + self._container.openai_client = providers.Singleton(AsyncOpenAI) + self._container.polly_client = providers.Object(self._polly_client) + self._container.http_client = providers.Object(self._http_client) + self._container.whisper_model = providers.Object(self._whisper_model) + + def _setup_third_parties(self): + self._container.llm = providers.Factory(OpenAI, client=self._container.openai_client) + self._container.stt = providers.Factory(OpenAIWhisper, model=self._container.whisper_model) + self._container.tts = providers.Factory(AWSPolly, client=self._container.polly_client) + self._container.vid_gen = providers.Factory( + Heygen, client=self._container.http_client, heygen_token=os.getenv("HEY_GEN_TOKEN") + ) + self._container.ai_detector = providers.Factory( + GPTZero, client=self._container.http_client, gpt_zero_key=os.getenv("GPT_ZERO_API_KEY") + ) + + def _setup_repositories(self): + cred = credentials.Certificate(os.getenv("GOOGLE_APPLICATION_CREDENTIALS")) + firebase_token = cred.get_access_token().access_token + + self._container.document_store = providers.Object( + AsyncIOMotorClient(os.getenv("MONGODB_URI"))[os.getenv("MONGODB_DB")] + ) + + self._container.firebase_instance = providers.Factory( + FirebaseStorage, + client=self._container.http_client, token=firebase_token, bucket=os.getenv("FIREBASE_BUCKET") + ) + + def _setup_services(self): + self._container.listening_service = providers.Factory( + ListeningService, + llm=self._container.llm, + tts=self._container.tts, + file_storage=self._container.firebase_instance, + document_store=self._container.document_store + ) + self._container.reading_service = providers.Factory(ReadingService, llm=self._container.llm) + + self._container.speaking_service = providers.Factory( + SpeakingService, llm=self._container.llm, vid_gen=self._container.vid_gen, + file_storage=self._container.firebase_instance, document_store=self._container.document_store, + stt=self._container.stt + ) + + self._container.writing_service = providers.Factory( + WritingService, llm=self._container.llm, ai_detector=self._container.ai_detector + ) + + with open('app/services/impl/exam/level/mc_variants.json', 'r') as file: + mc_variants = json.load(file) + + self._container.level_service = providers.Factory( + LevelService, llm=self._container.llm, document_store=self._container.document_store, + mc_variants=mc_variants, reading_service=self._container.reading_service, + writing_service=self._container.writing_service, speaking_service=self._container.speaking_service, + listening_service=self._container.listening_service + ) + + self._container.grade_service = providers.Factory( + GradeService, llm=self._container.llm + ) + + embeddings = SentenceTransformer('all-MiniLM-L6-v2') + + self._container.training_kb = providers.Factory( + TrainingContentKnowledgeBase, embeddings=embeddings + ) + + self._container.training_service = providers.Factory( + TrainingService, llm=self._container.llm, + firestore=self._container.document_store, training_kb=self._container.training_kb + ) + + def _setup_controllers(self): + self._container.grade_controller = providers.Factory( + GradeController, grade_service=self._container.grade_service, + speaking_service=self._container.speaking_service, + writing_service=self._container.writing_service + ) + + self._container.training_controller = providers.Factory( + TrainingController, training_service=self._container.training_service + ) + + self._container.level_controller = providers.Factory( + LevelController, level_service=self._container.level_service + ) + self._container.listening_controller = providers.Factory( + ListeningController, listening_service=self._container.listening_service + ) + + self._container.reading_controller = providers.Factory( + ReadingController, reading_service=self._container.reading_service + ) + + self._container.speaking_controller = providers.Factory( + SpeakingController, speaking_service=self._container.speaking_service + ) + + self._container.writing_controller = providers.Factory( + WritingController, writing_service=self._container.writing_service + ) diff --git a/app/configs/logging/__init__.py b/app/configs/logging/__init__.py index c8d0cdf..addcd44 100644 --- a/app/configs/logging/__init__.py +++ b/app/configs/logging/__init__.py @@ -1,7 +1,7 @@ -from .filters import ErrorAndAboveFilter -from .queue_handler import QueueListenerHandler - -__all__ = [ - "ErrorAndAboveFilter", - "QueueListenerHandler" -] +from .filters import ErrorAndAboveFilter +from .queue_handler import QueueListenerHandler + +__all__ = [ + "ErrorAndAboveFilter", + "QueueListenerHandler" +] diff --git a/app/configs/logging/filters.py b/app/configs/logging/filters.py index f2ccd9b..5b60503 100644 --- a/app/configs/logging/filters.py +++ b/app/configs/logging/filters.py @@ -1,6 +1,6 @@ -import logging - - -class ErrorAndAboveFilter(logging.Filter): - def filter(self, record: logging.LogRecord) -> bool | logging.LogRecord: - return record.levelno < logging.ERROR +import logging + + +class ErrorAndAboveFilter(logging.Filter): + def filter(self, record: logging.LogRecord) -> bool | logging.LogRecord: + return record.levelno < logging.ERROR diff --git a/app/configs/logging/formatters.py b/app/configs/logging/formatters.py index c6653fd..71fbc69 100644 --- a/app/configs/logging/formatters.py +++ b/app/configs/logging/formatters.py @@ -1,105 +1,105 @@ -import datetime as dt -import json -import logging - -LOG_RECORD_BUILTIN_ATTRS = { - "args", - "asctime", - "created", - "exc_info", - "exc_text", - "filename", - "funcName", - "levelname", - "levelno", - "lineno", - "module", - "msecs", - "message", - "msg", - "name", - "pathname", - "process", - "processName", - "relativeCreated", - "stack_info", - "thread", - "threadName", - "taskName", -} - -""" - This isn't being used since the app will be run on gcloud run but this can be used for future apps. - If you want to test it: - - formatters: - - "json": { - "()": "json_formatter.JSONFormatter", - "fmt_keys": { - "level": "levelname", - "message": "message", - "timestamp": "timestamp", - "logger": "name", - "module": "module", - "function": "funcName", - "line": "lineno", - "thread_name": "threadName" - } - } - - handlers: - - "file_json": { - "class": "logging.handlers.RotatingFileHandler", - "level": "DEBUG", - "formatter": "json", - "filename": "logs/log", - "maxBytes": 1000000, - "backupCount": 3 - } - - and add "cfg://handlers.file_json" to queue handler -""" - -# From this video https://www.youtube.com/watch?v=9L77QExPmI0 -# Src here: https://github.com/mCodingLLC/VideosSampleCode/blob/master/videos/135_modern_logging/mylogger.py -class JSONFormatter(logging.Formatter): - def __init__( - self, - *, - fmt_keys: dict[str, str] | None = None, - ): - super().__init__() - self.fmt_keys = fmt_keys if fmt_keys is not None else {} - - def format(self, record: logging.LogRecord) -> str: - message = self._prepare_log_dict(record) - return json.dumps(message, default=str) - - def _prepare_log_dict(self, record: logging.LogRecord): - always_fields = { - "message": record.getMessage(), - "timestamp": dt.datetime.fromtimestamp( - record.created, tz=dt.timezone.utc - ).isoformat(), - } - if record.exc_info is not None: - always_fields["exc_info"] = self.formatException(record.exc_info) - - if record.stack_info is not None: - always_fields["stack_info"] = self.formatStack(record.stack_info) - - message = { - key: msg_val - if (msg_val := always_fields.pop(val, None)) is not None - else getattr(record, val) - for key, val in self.fmt_keys.items() - } - message.update(always_fields) - - for key, val in record.__dict__.items(): - if key not in LOG_RECORD_BUILTIN_ATTRS: - message[key] = val - - return message +import datetime as dt +import json +import logging + +LOG_RECORD_BUILTIN_ATTRS = { + "args", + "asctime", + "created", + "exc_info", + "exc_text", + "filename", + "funcName", + "levelname", + "levelno", + "lineno", + "module", + "msecs", + "message", + "msg", + "name", + "pathname", + "process", + "processName", + "relativeCreated", + "stack_info", + "thread", + "threadName", + "taskName", +} + +""" + This isn't being used since the app will be run on gcloud run but this can be used for future apps. + If you want to test it: + + formatters: + + "json": { + "()": "json_formatter.JSONFormatter", + "fmt_keys": { + "level": "levelname", + "message": "message", + "timestamp": "timestamp", + "logger": "name", + "module": "module", + "function": "funcName", + "line": "lineno", + "thread_name": "threadName" + } + } + + handlers: + + "file_json": { + "class": "logging.handlers.RotatingFileHandler", + "level": "DEBUG", + "formatter": "json", + "filename": "logs/log", + "maxBytes": 1000000, + "backupCount": 3 + } + + and add "cfg://handlers.file_json" to queue handler +""" + +# From this video https://www.youtube.com/watch?v=9L77QExPmI0 +# Src here: https://github.com/mCodingLLC/VideosSampleCode/blob/master/videos/135_modern_logging/mylogger.py +class JSONFormatter(logging.Formatter): + def __init__( + self, + *, + fmt_keys: dict[str, str] | None = None, + ): + super().__init__() + self.fmt_keys = fmt_keys if fmt_keys is not None else {} + + def format(self, record: logging.LogRecord) -> str: + message = self._prepare_log_dict(record) + return json.dumps(message, default=str) + + def _prepare_log_dict(self, record: logging.LogRecord): + always_fields = { + "message": record.getMessage(), + "timestamp": dt.datetime.fromtimestamp( + record.created, tz=dt.timezone.utc + ).isoformat(), + } + if record.exc_info is not None: + always_fields["exc_info"] = self.formatException(record.exc_info) + + if record.stack_info is not None: + always_fields["stack_info"] = self.formatStack(record.stack_info) + + message = { + key: msg_val + if (msg_val := always_fields.pop(val, None)) is not None + else getattr(record, val) + for key, val in self.fmt_keys.items() + } + message.update(always_fields) + + for key, val in record.__dict__.items(): + if key not in LOG_RECORD_BUILTIN_ATTRS: + message[key] = val + + return message diff --git a/app/configs/logging/logging_config.json b/app/configs/logging/logging_config.json index 63b6746..61ea4f1 100644 --- a/app/configs/logging/logging_config.json +++ b/app/configs/logging/logging_config.json @@ -1,53 +1,53 @@ -{ - "version": 1, - "objects": { - "queue": { - "class": "queue.Queue", - "maxsize": 1000 - } - }, - "disable_existing_loggers": false, - "formatters": { - "simple": { - "format": "[%(levelname)s] (%(module)s|L: %(lineno)d) %(asctime)s: %(message)s", - "datefmt": "%Y-%m-%dT%H:%M:%S%z" - } - }, - "filters": { - "error_and_above": { - "()": "app.configs.logging.ErrorAndAboveFilter" - } - }, - "handlers": { - "console": { - "class": "logging.StreamHandler", - "level": "INFO", - "formatter": "simple", - "stream": "ext://sys.stdout", - "filters": ["error_and_above"] - }, - "error": { - "class": "logging.StreamHandler", - "level": "ERROR", - "formatter": "simple", - "stream": "ext://sys.stderr" - }, - "queue_handler": { - "class": "app.configs.logging.QueueListenerHandler", - "handlers": [ - "cfg://handlers.console", - "cfg://handlers.error" - ], - "queue": "cfg://objects.queue", - "respect_handler_level": true - } - }, - "loggers": { - "root": { - "level": "DEBUG", - "handlers": [ - "queue_handler" - ] - } - } -} +{ + "version": 1, + "objects": { + "queue": { + "class": "queue.Queue", + "maxsize": 1000 + } + }, + "disable_existing_loggers": false, + "formatters": { + "simple": { + "format": "[%(levelname)s] (%(module)s|L: %(lineno)d) %(asctime)s: %(message)s", + "datefmt": "%Y-%m-%dT%H:%M:%S%z" + } + }, + "filters": { + "error_and_above": { + "()": "app.configs.logging.ErrorAndAboveFilter" + } + }, + "handlers": { + "console": { + "class": "logging.StreamHandler", + "level": "INFO", + "formatter": "simple", + "stream": "ext://sys.stdout", + "filters": ["error_and_above"] + }, + "error": { + "class": "logging.StreamHandler", + "level": "ERROR", + "formatter": "simple", + "stream": "ext://sys.stderr" + }, + "queue_handler": { + "class": "app.configs.logging.QueueListenerHandler", + "handlers": [ + "cfg://handlers.console", + "cfg://handlers.error" + ], + "queue": "cfg://objects.queue", + "respect_handler_level": true + } + }, + "loggers": { + "root": { + "level": "DEBUG", + "handlers": [ + "queue_handler" + ] + } + } +} diff --git a/app/configs/logging/queue_handler.py b/app/configs/logging/queue_handler.py index 6a224b8..4a6dd98 100644 --- a/app/configs/logging/queue_handler.py +++ b/app/configs/logging/queue_handler.py @@ -1,61 +1,61 @@ -from logging.config import ConvertingList, ConvertingDict, valid_ident -from logging.handlers import QueueHandler, QueueListener -from queue import Queue -import atexit - - -class QueueHnadlerHelper: - - @staticmethod - def resolve_handlers(l): - if not isinstance(l, ConvertingList): - return l - - # Indexing the list performs the evaluation. - return [l[i] for i in range(len(l))] - - @staticmethod - def resolve_queue(q): - if not isinstance(q, ConvertingDict): - return q - if '__resolved_value__' in q: - return q['__resolved_value__'] - - cname = q.pop('class') - klass = q.configurator.resolve(cname) - props = q.pop('.', None) - kwargs = {k: q[k] for k in q if valid_ident(k)} - result = klass(**kwargs) - if props: - for name, value in props.items(): - setattr(result, name, value) - - q['__resolved_value__'] = result - return result - - -# The guy from this video https://www.youtube.com/watch?v=9L77QExPmI0 is using logging features only available in 3.12 -# This article had the class required to build the queue handler in 3.11 -# https://rob-blackbourn.medium.com/how-to-use-python-logging-queuehandler-with-dictconfig-1e8b1284e27a -class QueueListenerHandler(QueueHandler): - - def __init__(self, handlers, respect_handler_level=False, auto_run=True, queue=Queue(-1)): - queue = QueueHnadlerHelper.resolve_queue(queue) - super().__init__(queue) - handlers = QueueHnadlerHelper.resolve_handlers(handlers) - self._listener = QueueListener( - self.queue, - *handlers, - respect_handler_level=respect_handler_level) - if auto_run: - self.start() - atexit.register(self.stop) - - def start(self): - self._listener.start() - - def stop(self): - self._listener.stop() - - def emit(self, record): - return super().emit(record) +from logging.config import ConvertingList, ConvertingDict, valid_ident +from logging.handlers import QueueHandler, QueueListener +from queue import Queue +import atexit + + +class QueueHnadlerHelper: + + @staticmethod + def resolve_handlers(l): + if not isinstance(l, ConvertingList): + return l + + # Indexing the list performs the evaluation. + return [l[i] for i in range(len(l))] + + @staticmethod + def resolve_queue(q): + if not isinstance(q, ConvertingDict): + return q + if '__resolved_value__' in q: + return q['__resolved_value__'] + + cname = q.pop('class') + klass = q.configurator.resolve(cname) + props = q.pop('.', None) + kwargs = {k: q[k] for k in q if valid_ident(k)} + result = klass(**kwargs) + if props: + for name, value in props.items(): + setattr(result, name, value) + + q['__resolved_value__'] = result + return result + + +# The guy from this video https://www.youtube.com/watch?v=9L77QExPmI0 is using logging features only available in 3.12 +# This article had the class required to build the queue handler in 3.11 +# https://rob-blackbourn.medium.com/how-to-use-python-logging-queuehandler-with-dictconfig-1e8b1284e27a +class QueueListenerHandler(QueueHandler): + + def __init__(self, handlers, respect_handler_level=False, auto_run=True, queue=Queue(-1)): + queue = QueueHnadlerHelper.resolve_queue(queue) + super().__init__(queue) + handlers = QueueHnadlerHelper.resolve_handlers(handlers) + self._listener = QueueListener( + self.queue, + *handlers, + respect_handler_level=respect_handler_level) + if auto_run: + self.start() + atexit.register(self.stop) + + def start(self): + self._listener.start() + + def stop(self): + self._listener.stop() + + def emit(self, record): + return super().emit(record) diff --git a/app/configs/question_templates.py b/app/configs/question_templates.py index b4498a8..b752a01 100644 --- a/app/configs/question_templates.py +++ b/app/configs/question_templates.py @@ -1,1275 +1,1275 @@ -import uuid - -from .constants import MinTimers - - -def getListeningPartTemplate(): - return { - "audio": { - "repeatableTimes": 3, - "source": "", - }, - "exercises": [] - } - - -def getListeningTemplate(): - return { - "parts": [], - "isDiagnostic": False, - "minTimer": MinTimers.LISTENING_MIN_TIMER_DEFAULT, - "module": "listening" - } - - - -def getListeningPostSample(): - return { - "parts": [ - { - "exercises": [ - { - "questions": [ - { - "id": "1", - "options": [ - { - "id": "A", - "text": "To start working out together" - }, - { - "id": "B", - "text": "To join a book club" - }, - { - "id": "C", - "text": "To go on a trip" - }, - { - "id": "D", - "text": "To take a cooking class" - } - ], - "prompt": "What is John's suggestion to Emily?", - "solution": "A", - "variant": "text" - }, - { - "id": "2", - "options": [ - { - "id": "A", - "text": "She doesn't have time" - }, - { - "id": "B", - "text": "She doesn't have money" - }, - { - "id": "C", - "text": "She doesn't have a gym membership" - }, - { - "id": "D", - "text": "She doesn't like working out" - } - ], - "prompt": "What is Emily's current reason for not working out?", - "solution": "D", - "variant": "text" - }, - { - "id": "3", - "options": [ - { - "id": "A", - "text": "Gold's Gym" - }, - { - "id": "B", - "text": "Planet Fitness" - }, - { - "id": "C", - "text": "Fitness Plus" - }, - { - "id": "D", - "text": "Anytime Fitness" - } - ], - "prompt": "What gym does John suggest to Emily?", - "solution": "C", - "variant": "text" - }, - { - "id": "4", - "options": [ - { - "id": "A", - "text": "$10 a month" - }, - { - "id": "B", - "text": "$20 a month" - }, - { - "id": "C", - "text": "$30 a month" - }, - { - "id": "D", - "text": "$40 a month" - } - ], - "prompt": "What is the price of the basic membership at Fitness Plus?", - "solution": "C", - "variant": "text" - }, - { - "id": "5", - "options": [ - { - "id": "A", - "text": "3 months" - }, - { - "id": "B", - "text": "6 months" - }, - { - "id": "C", - "text": "12 months" - }, - { - "id": "D", - "text": "No commitment required" - } - ], - "prompt": "How long is the commitment for the basic membership at Fitness Plus?", - "solution": "D", - "variant": "text" - }, - { - "id": "6", - "options": [ - { - "id": "A", - "text": "Dance and cooking" - }, - { - "id": "B", - "text": "Yoga and spin" - }, - { - "id": "C", - "text": "Singing and art" - }, - { - "id": "D", - "text": "Martial arts and rock climbing" - } - ], - "prompt": "What type of classes does Fitness Plus offer?", - "solution": "B", - "variant": "text" - }, - { - "id": "7", - "options": [ - { - "id": "A", - "text": "Watch movies" - }, - { - "id": "B", - "text": "Take classes" - }, - { - "id": "C", - "text": "Play sports" - }, - { - "id": "D", - "text": "Study" - } - ], - "prompt": "What does John and Emily plan to do together at the gym?", - "solution": "B", - "variant": "text" - }, - { - "id": "8", - "options": [ - { - "id": "A", - "text": "Saturday" - }, - { - "id": "B", - "text": "Sunday" - }, - { - "id": "C", - "text": "Monday" - }, - { - "id": "D", - "text": "Tuesday" - } - ], - "prompt": "What day does John suggest to go check out the gym?", - "solution": "C", - "variant": "text" - }, - { - "id": "9", - "options": [ - { - "id": "A", - "text": "To go shopping" - }, - { - "id": "B", - "text": "To get lunch" - }, - { - "id": "C", - "text": "To schedule a tour" - }, - { - "id": "D", - "text": "To watch a movie" - } - ], - "prompt": "What is John's plan after checking out the gym?", - "solution": "C", - "variant": "text" - }, - { - "id": "10", - "options": [ - { - "id": "A", - "text": "Nervous" - }, - { - "id": "B", - "text": "Excited" - }, - { - "id": "C", - "text": "Uninterested" - }, - { - "id": "D", - "text": "Angry" - } - ], - "prompt": "How does Emily feel about starting to work out again?", - "solution": "B", - "variant": "text" - } - ] - } - ], - "text": { - "conversation": [ - { - "gender": "male", - "name": "John", - "text": "Hey, have you been working out lately?", - "voice": "Stephen" - }, - { - "gender": "female", - "name": "Emily", - "text": "Not really, I've been so busy with work.", - "voice": "Ruth" - }, - { - "gender": "male", - "name": "John", - "text": "Well, I've been thinking about getting a gym membership. Do you have one?", - "voice": "Stephen" - }, - { - "gender": "female", - "name": "Emily", - "text": "No, but I've been considering it too. Which gym are you thinking of joining?", - "voice": "Ruth" - }, - { - "gender": "male", - "name": "John", - "text": "I was looking at the one down the street, Fitness Plus. It seems to have good reviews.", - "voice": "Stephen" - }, - { - "gender": "female", - "name": "Emily", - "text": "Oh, I've heard of that one. What's the membership like?", - "voice": "Ruth" - }, - { - "gender": "male", - "name": "John", - "text": "They have different packages, but I'm thinking of going for the basic one. It's $30 a month with a one-year commitment.", - "voice": "Stephen" - }, - { - "gender": "female", - "name": "Emily", - "text": "That's not bad. Do they have classes too?", - "voice": "Ruth" - }, - { - "gender": "male", - "name": "John", - "text": "Yeah, they have a variety of classes like yoga and spin. I'm interested in trying out some of those.", - "voice": "Stephen" - }, - { - "gender": "female", - "name": "Emily", - "text": "I've always wanted to try yoga. Maybe we can go together sometime.", - "voice": "Ruth" - }, - { - "gender": "male", - "name": "John", - "text": "That would be great! It's always more fun to have a workout buddy. Have you looked into any other gyms?", - "voice": "Stephen" - }, - { - "gender": "female", - "name": "Emily", - "text": "Not really. I've been busy with work, but I'll definitely check out Fitness Plus. Maybe we can both join and motivate each other.", - "voice": "Ruth" - }, - { - "gender": "male", - "name": "John", - "text": "Sounds like a plan. Let's do it.", - "voice": "Stephen" - }, - { - "gender": "female", - "name": "Emily", - "text": "Awesome. We can go check it out this weekend.", - "voice": "Ruth" - }, - { - "gender": "male", - "name": "John", - "text": "Perfect. I'll give them a call to schedule a tour.", - "voice": "Stephen" - }, - { - "gender": "female", - "name": "Emily", - "text": "Thanks, John. I'm excited to start working out again.", - "voice": "Ruth" - }, - { - "gender": "male", - "name": "John", - "text": "Me too. Let's do this!", - "voice": "Stephen" - } - ] - } - }, - { - "exercises": [ - { - "id": "0646ab5b-e8e2-4da5-8a0c-784fe3d0186a", - "maxWords": 3, - "prompt": "You will hear a monologue. Answer the questions below using no more than three words or a number accordingly.", - "solutions": [ - { - "id": "11", - "solution": [ - "multi-faceted", - "various dimensions" - ] - }, - { - "id": "12", - "solution": [ - "climate change", - "rising temperature" - ] - }, - { - "id": "13", - "solution": [ - "waste minimization", - "resource reuse" - ] - }, - { - "id": "14", - "solution": [ - "reduce carbon footprint", - "support sustainable businesses" - ] - }, - { - "id": "15", - "solution": [ - "ourselves", - "parents/educators" - ] - } - ], - "text": "What is the concept of sustainability?{{11}}\\nWhat is the biggest challenge we are facing?{{12}}\\nWhat is the need for a circular economy?{{13}}\\nWhat can individuals do to address these challenges?{{14}}\\nWho is responsible for educating future generations about environmental sustainability?{{15}}\\n", - "type": "writeBlanks" - }, - { - "id": "0c99267c-8c3a-4ed0-9f05-00c07a480831", - "maxWords": 1, - "prompt": "You will hear a monologue. Fill the form with words/numbers missing.", - "solutions": [ - { - "id": "16", - "solution": "dimensional" - }, - { - "id": "17", - "solution": "Change" - }, - { - "id": "18", - "solution": "Natural" - }, - { - "id": "19", - "solution": "Waste" - }, - { - "id": "20", - "solution": "Injustice" - } - ], - "text": "Key: Multi-{{1}} Concept of Sustainability\nValue: The concept of sustainability encompasses social, economic, and environmental aspects, and it is essential to address all dimensions in order to achieve a sustainable future.\\nKey: Climate {{2}}\nValue: Rising temperatures, extreme weather events, and loss of biodiversity are just some of the consequences of climate change caused by human activities such as burning fossil fuels and deforestation.\\nKey: Depletion of {{3}} Resources\nValue: Our current consumption patterns are not sustainable, and the overexploitation of resources is leading to deforestation, water scarcity, and depletion of fisheries. It is crucial to find ways to reduce our consumption and ensure resource replenishment.\\nKey: {{4}} Management\nValue: Our linear model of consumption and disposal is not sustainable, and we need to shift towards a circular economy where waste is minimized, and resources are reused or recycled.\\nKey: Environmental {{5}}\nValue: Marginalized communities are often the most affected by environmental degradation, and it is crucial to address this injustice and ensure that environmental policies and actions are fair and inclusive.\\n", - "type": "writeBlanks" - } - ], - "text": "\n\nHello everyone, thank you for joining me in this discussion about one of the most pressing issues of our time - environmental sustainability. I believe we can all agree that our planet is facing numerous challenges due to human activities and it is high time we address them.\n\nFirstly, I would like to acknowledge that the concept of sustainability itself is multi-faceted and encompasses various dimensions such as social, economic, and environmental aspects. However, today, I would like to focus on the environmental challenges we are currently facing.\n\nOne of the biggest challenges we are facing is climate change. The Earth's temperature is rising at an alarming rate due to the increase in greenhouse gas emissions, primarily from human activities such as burning fossil fuels and deforestation. This has resulted in extreme weather events, loss of biodiversity, and rising sea levels, among others. We are already seeing the consequences of climate change, and if we do not take immediate action, the situation will only worsen.\n\nAnother challenge is the depletion of natural resources. Our planet has a finite amount of resources, and yet, our current consumption patterns are not sustainable. The overexploitation of resources is leading to deforestation, water scarcity, and depletion of fisheries. We need to find ways to reduce our consumption and ensure that we are not exploiting resources faster than they can replenish.\n\nIn addition to climate change and resource depletion, waste management is also a significant challenge. Our current linear model of consumption and disposal is not sustainable in the long run. We produce a massive amount of waste, and most of it ends up in landfills or our oceans, polluting our environment and harming wildlife. We need to shift towards a circular economy, where waste is minimized, and resources are reused or recycled.\n\nFurthermore, there is also the issue of environmental injustice. The impacts of environmental degradation are not equally distributed, and marginalized communities are often the most affected. This injustice needs to be addressed, and measures must be taken to ensure that environmental policies and actions are fair and inclusive.\n\nSo, what can we do to address these challenges? Firstly, we need to acknowledge that each one of us has a role to play. We cannot rely on governments or organizations alone to solve these issues. We need to make changes in our daily lives, such as reducing our carbon footprint, adopting sustainable practices, and supporting businesses that prioritize the environment.\n\nWe also need to hold corporations and governments accountable for their actions. We have the power to demand change through our consumer choices and our votes. It is crucial that we urge our leaders to implement policies that promote sustainable practices and penalize those who harm the environment.\n\nMoreover, education and awareness are essential in tackling these challenges. We need to educate ourselves and others about the importance of environmental sustainability and the actions we can take to achieve it. Our children are the future, and it is our responsibility to educate them on the significance of preserving our planet.\n\nIn conclusion, the challenges of environmental sustainability are daunting, but they are not insurmountable. It is up to us to take action and make a difference. We owe it to ourselves, future generations, and the planet to ensure a sustainable future. Let us work together towards a greener, cleaner, and more sustainable world. Thank you." - }, - { - "exercises": [ - { - "id": "0149f828-c216-4e60-80fa-1dd28a860031", - "maxWords": 1, - "prompt": "Fill the blank space with the word missing from the audio.", - "solutions": [ - { - "id": "21", - "solution": "Smith" - }, - { - "id": "22", - "solution": "rash" - }, - { - "id": "23", - "solution": "fever" - }, - { - "id": "24", - "solution": "allergies" - }, - { - "id": "25", - "solution": "antinuclear" - }, - { - "id": "26", - "solution": "immunosuppressants" - }, - { - "id": "27", - "solution": "evaluation" - }, - { - "id": "28", - "solution": "Autoimmune" - }, - { - "id": "29", - "solution": "four" - }, - { - "id": "30", - "solution": "possibilities" - } - ], - "text": "Dr. {{21}}, Dr. Patel, Sarah, and Alex were discussing a case study of a patient with a fever and rash.\\nThe patient was a 30-year-old female with a history of allergies.\\nThe patient's rash was erythematous and she had a {{23}} of 101°F.\\nPossible initial diagnoses included a viral infection or allergic reaction.\\nHowever, the final diagnosis was an autoimmune disorder based on elevated {{25}} antibodies in the patient's blood work.\\nTreatment involved {{26}} and the patient's symptoms resolved within a week.\\nThorough {{27}} and considering all possibilities is important in the diagnostic process.\\n{{28}} disorders can develop without any underlying trigger.\\nThe seminar ended with the {{29}} individuals thanking each other and continuing their day with new knowledge.\\nThinking outside the box and considering all {{30}} is crucial in the diagnostic process.", - "type": "writeBlanks" - } - ], - "text": { - "conversation": [ - { - "gender": "male", - "name": "Dr. Smith", - "text": "Good morning everyone, thank you for joining us today for this seminar on diagnosis. Let's begin with the case study of a patient who presented with a fever and rash.", - "voice": "Kevin" - }, - { - "gender": "male", - "name": "Dr. Patel", - "text": "Yes, I remember this case. The patient was a 30-year-old female with a history of allergies.", - "voice": "Stephen" - }, - { - "gender": "female", - "name": "Sarah", - "text": "Hi, I'm Sarah, a third-year medical student. I remember studying this case in our lectures. The patient's rash was erythematous, right?", - "voice": "Aria" - }, - { - "gender": "male", - "name": "Dr. Smith", - "text": "Yes, that's correct. And the patient had a fever of 101°F. What would be your initial diagnosis?", - "voice": "Kevin" - }, - { - "gender": "male", - "name": "Alex", - "text": "Hi, I'm Alex, a fourth-year medical student. I would say it could be a viral infection or an allergic reaction.", - "voice": "Kevin" - }, - { - "gender": "male", - "name": "Dr. Patel", - "text": "That's a good guess, Alex. But remember, always consider other possibilities. In this case, it turned out to be an autoimmune disorder.", - "voice": "Stephen" - }, - { - "gender": "female", - "name": "Sarah", - "text": "Oh, I didn't think of that. How did you come to that diagnosis?", - "voice": "Aria" - }, - { - "gender": "male", - "name": "Dr. Smith", - "text": "Well, the patient's blood work showed elevated levels of antinuclear antibodies. That, along with the clinical presentation, pointed towards an autoimmune disorder.", - "voice": "Kevin" - }, - { - "gender": "male", - "name": "Alex", - "text": "That's interesting. I would have never thought of that. How did you treat the patient?", - "voice": "Kevin" - }, - { - "gender": "male", - "name": "Dr. Patel", - "text": "We started the patient on immunosuppressants and the rash and fever resolved within a week.", - "voice": "Stephen" - }, - { - "gender": "female", - "name": "Sarah", - "text": "Wow, it's amazing how a simple rash and fever could lead to such a complex diagnosis.", - "voice": "Aria" - }, - { - "gender": "male", - "name": "Dr. Smith", - "text": "That's the importance of thorough evaluation and considering all possibilities. Any other thoughts or questions?", - "voice": "Kevin" - }, - { - "gender": "male", - "name": "Alex", - "text": "I just wanted to ask, do you think the patient's allergies could have triggered the autoimmune disorder?", - "voice": "Kevin" - }, - { - "gender": "male", - "name": "Dr. Patel", - "text": "It's possible, but we can't say for sure. Sometimes autoimmune disorders can develop without any underlying trigger.", - "voice": "Stephen" - }, - { - "gender": "female", - "name": "Sarah", - "text": "Thank you for sharing this case with us, it was very informative.", - "voice": "Aria" - }, - { - "gender": "male", - "name": "Dr. Smith", - "text": "My pleasure. I hope this discussion has given you a better understanding of the diagnostic process. Always remember to think outside the box and consider all possibilities.", - "voice": "Kevin" - } - ] - } - }, - { - "exercises": [ - { - "id": "921d3a2a-7f6e-46ae-a19d-65eb5ba21375", - "maxWords": 3, - "prompt": "You will hear a monologue. Answer the questions below using no more than three words or a number accordingly.", - "solutions": [ - { - "id": "31", - "solution": [ - "complex mixture", - "vital natural resource", - "non-renewable resource" - ] - }, - { - "id": "32", - "solution": [ - "sand, silt, clay" - ] - }, - { - "id": "33", - "solution": [ - "ability to provide nutrients", - "balance of essential nutrients", - "breaking down organic matter" - ] - }, - { - "id": "34", - "solution": [ - "break down organic matter", - "improve soil structure", - "release nutrients" - ] - }, - { - "id": "35", - "solution": [ - "soil erosion", - "land use practices", - "preservation of soil for future generations" - ] - } - ], - "text": "What is soil?{{31}}\\nWhat are the three main categories of soil?{{32}}\\nWhat is soil fertility?{{33}}\\nWhat is the role of microorganisms in soil?{{34}}\\nWhat environmental issue can be prevented through proper soil management?{{35}}\\n", - "type": "writeBlanks" - }, - { - "id": "e86afd9a-90d1-48db-bee1-0d20f6eea64d", - "maxWords": 1, - "prompt": "Fill the blank space with the word missing from the audio.", - "solutions": [ - { - "id": "36", - "solution": "vital" - }, - { - "id": "37", - "solution": "classified" - }, - { - "id": "38", - "solution": "fertility" - }, - { - "id": "39", - "solution": "microorganisms" - }, - { - "id": "40", - "solution": "science" - } - ], - "text": "Soil is a {{36}} natural resource that provides the foundation for plant growth\\nSoil is {{37}} into three main categories: sand, silt, and clay\\nSoil {{38}} refers to the ability of the soil to provide essential nutrients for plant growth\\nHealthy soil is teeming with {{39}}\\nSoil {{40}} plays a significant role in environmental conservation", - "type": "writeBlanks" - } - ], - "text": "\n\nGood morning everyone, today I would like to talk to you about a topic that is often overlooked but plays a crucial role in our daily lives - soil science. Soil science deals with the study of the composition, structure, and properties of soil, as well as how it interacts with the environment and living organisms.\n\nSoil is a vital natural resource that provides the foundation for plant growth, which in turn sustains all life on earth. It is the basis for our food production, as well as the source of many raw materials such as wood, cotton, and rubber. Without a healthy and productive soil, our agricultural systems would collapse, and we would struggle to feed our growing population.\n\nBut what exactly is soil? Soil is a complex mixture of minerals, organic matter, water, and air, all held together by microorganisms. It takes thousands of years for soil to form, and it is a non-renewable resource, which makes its conservation even more critical.\n\nOne of the key aspects of soil science is understanding the different types of soil and their properties. Soil is classified into three main categories: sand, silt, and clay. These categories are based on the size of the particles that make up the soil. Sand particles are the largest, followed by silt and then clay. The composition of these particles greatly affects the soil's properties, such as water retention, drainage, and nutrient availability.\n\nAnother crucial aspect of soil science is the study of soil fertility. Soil fertility refers to the ability of the soil to provide essential nutrients for plant growth. The nutrients in the soil come from the breakdown of organic matter, such as dead plants and animal remains. Fertile soil contains a balance of essential nutrients, such as nitrogen, phosphorus, and potassium, which are necessary for plant growth.\n\nThe health of the soil is also crucial in soil science. Healthy soil is teeming with microorganisms, which play a vital role in breaking down organic matter and releasing nutrients into the soil. These microorganisms also help to improve soil structure, making it more porous and allowing for better air and water circulation.\n\nSoil science also plays a significant role in environmental conservation. Soil erosion, the removal of topsoil by wind and water, is a significant environmental issue that can be prevented through proper soil management. By understanding the factors that contribute to soil erosion, such as improper land use practices, we can implement strategies to prevent it and preserve our soil for future generations.\n\nIn conclusion, soil science is a critical field of study that impacts our daily lives in more ways than we can imagine. It is not just about digging in the dirt; it is a complex science that requires a multidisciplinary approach. By understanding the composition, properties, and fertility of soil, we can ensure the sustainable use of this precious resource and preserve it for future generations. Thank you." - } - ] - } - - -def getReadingTemplate(): - return { - "parts": [], - "isDiagnostic": False, - "minTimer": 60, - "type": "academic" - } - - -def getReadingPostSample(): - return { - "parts": [ - { - "exercises": [ - { - "id": "cbd08cdd-5850-40a8-b6e2-6021c04474ad", - "prompt": "Do the following statements agree with the information given in the Reading Passage?", - "questions": [ - { - "id": "1", - "prompt": "Technology is constantly evolving and shaping our world.", - "solution": "true" - }, - { - "id": "2", - "prompt": "The use of artificial intelligence (AI) has only recently become popular.", - "solution": "false" - }, - { - "id": "3", - "prompt": "5G technology offers slower speeds and higher latency than its predecessors.", - "solution": "false" - }, - { - "id": "4", - "prompt": "Social media has had a minimal impact on our society.", - "solution": "false" - }, - { - "id": "5", - "prompt": "Cybersecurity is not a growing concern as technology becomes more integrated into our lives.", - "solution": "false" - }, - { - "id": "6", - "prompt": "Technology has not had a significant impact on the education sector.", - "solution": "false" - }, - { - "id": "7", - "prompt": "Automation and AI are not causing shifts in the job market.", - "solution": "false" - } - ], - "type": "trueFalse" - }, - { - "allowRepetition": True, - "id": "b88f3eb5-11b7-4a8e-bb1a-4e96215b34bf", - "prompt": "Complete the summary below. Click a blank to select the corresponding word(s) for it.\\nThere are more words than spaces so you will not use them all. You may use any of the words more than once.", - "solutions": [ - { - "id": "8", - "solution": "smartphones" - }, - { - "id": "9", - "solution": "artificial intelligence" - }, - { - "id": "10", - "solution": "5G technology" - }, - { - "id": "11", - "solution": "virtual reality" - }, - { - "id": "12", - "solution": "cybersecurity" - }, - { - "id": "13", - "solution": "telemedicine" - } - ], - "text": "\n\nTechnology has become an integral part of our daily lives, from {{8}} to smart homes. The rise of {{9}} (AI) and the Internet of Things (IoT) are two major trends that are revolutionizing the way we live and work. {{10}} is also gaining popularity, enabling advancements in areas like {{11}} and self-driving cars. Cloud computing, virtual and augmented reality (VR and AR), and blockchain technology are also on the rise, impacting industries such as finance, education, and healthcare. Social media has changed the way we communicate and raised concerns about privacy and mental health. With the increase in data breaches and cyber attacks, {{12}} has become a growing concern. {{13}} and online learning have made healthcare and education more accessible and efficient. However, there are concerns about the impact of technology on the job market, leading to discussions about the need for reskilling and upskilling. As technology continues to advance, it is crucial to understand its impact and consequences on our society.", - "type": "fillBlanks", - "words": [ - "speaking", - "5G technology", - "telemedicine", - "virtual reality", - "antechamber", - "smartphones", - "kitsch", - "devilish", - "parent", - "artificial intelligence", - "cybersecurity" - ] - } - ], - "text": { - "content": "In today's society, technology has become an integral part of our daily lives. From smartphones to smart homes, we are constantly surrounded by the latest and most advanced technological devices. As technology continues to evolve and improve, it is important to understand the current trends and how they are shaping our world. One of the biggest technology trends in recent years is the rise of artificial intelligence (AI). AI is the simulation of human intelligence processes by machines, particularly computer systems. This technology has been around for decades, but with the advancement of computing power and big data, AI has become more sophisticated and prevalent. From virtual personal assistants like Siri and Alexa to self-driving cars, AI is revolutionizing the way we live and work. Another trend that has gained widespread popularity is the Internet of Things (IoT). This refers to the interconnection of everyday objects via the internet, allowing them to send and receive data. Smart homes, wearable devices, and even smart cities are all examples of IoT. With IoT, our devices and appliances can communicate with each other, making our lives more convenient and efficient. The use of 5G technology is also on the rise. 5G is the fifth generation of wireless technology, offering faster speeds and lower latency than its predecessors. With 5G, we can expect to see advancements in areas like virtual reality, self-driving cars, and remote surgery. It will also enable the development of smart cities and the Internet of Things to reach its full potential. Cloud computing is another trend that has been steadily growing. Cloud computing involves the delivery of computing services over the internet, such as storage, servers, and databases. This allows for easy access to data and applications from anywhere, at any time. Many businesses and individuals are utilizing cloud computing to streamline their operations and increase efficiency. Virtual and augmented reality (VR and AR) are becoming more prevalent in various industries, from gaming and entertainment to healthcare and education. VR immerses the user in a simulated environment, while AR overlays digital information onto the real world. These technologies have the potential to change the way we learn, work, and entertain ourselves. Blockchain technology is also gaining traction, particularly in the financial sector. Blockchain is a decentralized digital ledger that records transactions across a network of computers. It allows for secure and transparent transactions without the need for intermediaries. This technology has the potential to disrupt traditional banking and financial systems. Social media has been a dominant force in the technology world for some time now, and it continues to evolve and shape our society. With the rise of platforms like Facebook, Twitter, and Instagram, we are more connected than ever before. Social media has changed the way we communicate, share information, and even do business. It has also raised concerns about privacy and the impact of social media on mental health. Cybersecurity is a growing concern as technology becomes more integrated into our lives. With the increase in data breaches and cyber attacks, the need for strong cybersecurity measures is greater than ever. Companies and individuals are investing in better security protocols to protect their sensitive information. The healthcare industry is also experiencing technological advancements with the introduction of telemedicine. This allows patients to receive medical care remotely, without having to visit a physical doctor's office. Telemedicine has become increasingly popular, especially during the COVID-19 pandemic, as it allows for safe and convenient access to healthcare. In the education sector, technology has brought about significant changes as well. Online learning platforms and digital tools have made education more accessible and efficient. With the rise of e-learning, students can access education from anywhere in the world and at their own pace. As technology continues to advance, concerns about its impact on the job market have also arisen. Automation and AI are replacing human workers in many industries, leading to job loss and shifts in the workforce. This trend has sparked discussions about the need for reskilling and upskilling to adapt to the changing job market. In conclusion, the world is constantly evolving and adapting to the latest technology trends. From AI and IoT to 5G and blockchain, these advancements are shaping the way we live, work, and interact with each other. As society continues to embrace and integrate technology into our daily lives, it is crucial to understand its impact and potential consequences. Whether it be in the fields of healthcare, education, or finance, technology is undoubtedly transforming the world as we know it.", - "title": "Modern Technology Trends" - } - }, - { - "exercises": [ - { - "id": "f2daa91a-3e92-4c07-aefd-719bcf22bac7", - "prompt": "Do the following statements agree with the information given in the Reading Passage?", - "questions": [ - { - "id": "14", - "prompt": "Yoga and meditation have been gaining popularity in recent years.", - "solution": "true" - }, - { - "id": "15", - "prompt": "Yoga and meditation originated in ancient India.", - "solution": "true" - }, - { - "id": "16", - "prompt": "Yoga is a system that combines physical postures, breathing techniques, and meditation.", - "solution": "true" - }, - { - "id": "17", - "prompt": "Meditation involves training the mind to achieve a state of inner peace and relaxation.", - "solution": "true" - }, - { - "id": "18", - "prompt": "Yoga and meditation can reduce stress and improve mental health.", - "solution": "true" - }, - { - "id": "19", - "prompt": "Yoga and meditation can improve physical health.", - "solution": "true" - }, - { - "id": "20", - "prompt": "Yoga and meditation are only suitable for people who are physically fit.", - "solution": "false" - } - ], - "type": "trueFalse" - }, - { - "id": "b500cb69-843d-4430-a544-924c514ea12a", - "maxWords": 3, - "prompt": "Choose no more than three words and/or a number from the passage for each answer.", - "solutions": [ - { - "id": "21", - "solution": [ - "physical, mental, emotional" - ] - }, - { - "id": "22", - "solution": [ - "ancient India" - ] - }, - { - "id": "23", - "solution": [ - "physical postures, breathing techniques" - ] - }, - { - "id": "24", - "solution": [ - "reduce stress, improve mindfulness" - ] - }, - { - "id": "25", - "solution": [ - "improve, promote relaxation" - ] - }, - { - "id": "26", - "solution": [ - "anyone, all ages and backgrounds" - ] - } - ], - "text": "What are the three main benefits of yoga and meditation?{{21}}\\nWhere did yoga originate?{{22}}\\nWhat are the two components of yoga?{{23}}\\nHow do yoga and meditation improve mental health?{{24}}\\nWhat is the impact of yoga and meditation on sleep quality?{{25}}\\nWho can practice yoga and meditation?{{26}}\\n", - "type": "writeBlanks" - } - ], - "text": { - "content": "Yoga and meditation have been gaining popularity in recent years as more and more people recognize the physical, mental, and emotional benefits of these ancient practices. Originating in ancient India, yoga is a holistic system that combines physical postures, breathing techniques, and meditation to promote overall well-being. Similarly, meditation is a mental practice that involves training the mind to achieve a state of inner peace and relaxation. One of the main benefits of yoga and meditation is their ability to reduce stress and improve mental health. In today's fast-paced world, stress has become a common problem for many people, leading to various physical and mental health issues. However, studies have shown that practicing yoga and meditation can significantly reduce stress levels and improve overall mental health. This is because these practices focus on deep breathing and mindfulness, which can help individuals to calm their minds and relax their bodies. As a result, many people who regularly practice yoga and meditation report feeling more peaceful, centered, and less stressed in their daily lives. Furthermore, yoga and meditation have been shown to have a positive impact on physical health. The physical postures and movements in yoga help to improve flexibility, strength, and balance. These postures also work to stretch and strengthen different muscles in the body, which can alleviate tension and prevent injuries. Additionally, the controlled breathing in yoga helps to increase oxygen flow throughout the body, which can improve cardiovascular health. As for meditation, studies have shown that it can lower blood pressure and reduce the risk of heart disease. These physical benefits make yoga and meditation an excellent form of exercise for people of all ages and fitness levels. Apart from the physical and mental benefits, yoga and meditation also have a positive impact on emotional well-being. The practice of mindfulness in these practices helps individuals to become more aware of their thoughts and emotions, allowing them to better manage and process them. This can result in improved emotional regulation and a greater sense of self-awareness. As a result, individuals who practice yoga and meditation often report feeling more positive, content, and emotionally stable. Another significant benefit of yoga and meditation is their ability to improve overall concentration and focus. In today's digital age, our minds are constantly bombarded with information and distractions, making it challenging to stay focused on tasks. However, regular practice of yoga and meditation can improve concentration and enhance cognitive function. This is because these practices require individuals to focus their minds on their breath, movements, or a specific mantra, helping to train the brain to stay focused for longer periods. Moreover, yoga and meditation have been shown to have a positive impact on sleep quality. Many people struggle with insomnia or other sleep-related issues, which can have a significant impact on their overall health and well-being. However, studies have shown that yoga and meditation can improve sleep quality and help individuals fall asleep faster. This is because these practices promote relaxation and reduce stress, which are common causes of sleep issues. As a result, individuals who practice yoga and meditation often report feeling more rested and rejuvenated after a good night's sleep. In addition to the physical, mental, emotional, and cognitive benefits, yoga and meditation also have a spiritual component. These practices are deeply rooted in ancient Indian spirituality and have been used for centuries to connect individuals with their inner selves and the universe. While the spiritual aspect may not be for everyone, many people find that it adds a deeper level of meaning and purpose to their practice. Furthermore, yoga and meditation are accessible to people of all ages and backgrounds. Whether you are young or old, fit or not, religious or not, yoga and meditation can be practiced by anyone. There are many different styles and forms of yoga and meditation, allowing individuals to choose the practice that best suits their needs and preferences. This inclusivity is what makes yoga and meditation such powerful and universal practices. In conclusion, the benefits of yoga and meditation are numerous and far-reaching. From reducing stress and improving mental health to promoting physical strength and emotional well-being, these ancient practices offer a holistic approach to overall health and wellness. Whether you are looking to improve your physical fitness, manage stress, or connect with your inner self, yoga and meditation are powerful tools that can help you achieve these goals. So why not give it a try and experience the transformative power of yoga and meditation for yourself?", - "title": "The Benefits of Yoga and Meditation" - } - }, - { - "exercises": [ - { - "allowRepetition": True, - "id": "1035c153-d38a-4f27-a14e-0ce63184ff82", - "prompt": "Complete the summary below. Click a blank to select the corresponding word(s) for it.\\nThere are more words than spaces so you will not use them all. You may use any of the words more than once.", - "solutions": [ - { - "id": "27", - "solution": "Cultural diversity" - }, - { - "id": "28", - "solution": "Variety" - }, - { - "id": "29", - "solution": "Multicultural" - }, - { - "id": "30", - "solution": "Tolerance" - }, - { - "id": "31", - "solution": "Unity" - }, - { - "id": "32", - "solution": "Challenges" - }, - { - "id": "33", - "solution": "Celebrated" - } - ], - "text": "\n\n{{27}} refers to the {{28}} of cultures, traditions, beliefs, and lifestyles that exist within a society. In today's interconnected world, the movement of people, goods, and ideas has led to a more diverse and {{29}} society. The exchange of ideas and knowledge, {{30}} and understanding, and promoting peace and {{31}} are some of the benefits of cultural diversity. However, it also poses {{32}} such as potential clashes and the marginalization of certain groups. To address these challenges, it is important for societies to promote cultural competency and sensitivity, as well as for individuals to embrace diversity and participate in cultural events. Overall, cultural diversity is a crucial aspect of our global society that needs to be preserved and {{33}}.", - "type": "fillBlanks", - "words": [ - "Tolerance", - "Cultural diversity", - "penny", - "Multicultural", - "shrill", - "Celebrated", - "Variety", - "query", - "Challenges", - "wont", - "Unity", - "chemical" - ] - }, - { - "questions": [ - { - "id": "34", - "options": [ - { - "id": "A", - "text": "The variety of cultures, traditions, beliefs, and lifestyles within a society" - }, - { - "id": "B", - "text": "The number of countries in the world" - }, - { - "id": "C", - "text": "The different types of technology used in different cultures" - }, - { - "id": "D", - "text": "The number of languages spoken in a society" - } - ], - "prompt": "What is the main definition of cultural diversity?", - "solution": "A", - "variant": "text" - }, - { - "id": "35", - "options": [ - { - "id": "A", - "text": "By making it easier for people to travel" - }, - { - "id": "B", - "text": "By increasing the number of countries in the world" - }, - { - "id": "C", - "text": "By creating more jobs for people from different cultures" - }, - { - "id": "D", - "text": "By promoting a single global culture" - } - ], - "prompt": "How has technology contributed to an increase in cultural diversity?", - "solution": "A", - "variant": "text" - }, - { - "id": "36", - "options": [ - { - "id": "A", - "text": "Increased economic opportunities" - }, - { - "id": "B", - "text": "Higher levels of education" - }, - { - "id": "C", - "text": "Improved transportation systems" - }, - { - "id": "D", - "text": "The exchange of ideas and knowledge" - } - ], - "prompt": "What is one of the key benefits of cultural diversity?", - "solution": "D", - "variant": "text" - }, - { - "id": "37", - "options": [ - { - "id": "A", - "text": "By forcing people to conform to a single culture" - }, - { - "id": "B", - "text": "By exposing people to different perspectives and experiences" - }, - { - "id": "C", - "text": "By creating a homogenous society" - }, - { - "id": "D", - "text": "By limiting the movement of people between countries" - } - ], - "prompt": "How does cultural diversity promote tolerance and understanding?", - "solution": "B", - "variant": "text" - }, - { - "id": "38", - "options": [ - { - "id": "A", - "text": "Increased discrimination" - }, - { - "id": "B", - "text": "A decline in technological advancements" - }, - { - "id": "C", - "text": "A decrease in the number of countries" - }, - { - "id": "D", - "text": "A lack of cultural exchange" - } - ], - "prompt": "What is one challenge posed by cultural diversity?", - "solution": "A", - "variant": "text" - }, - { - "id": "39", - "options": [ - { - "id": "A", - "text": "By promoting a single global culture" - }, - { - "id": "B", - "text": "By creating barriers between different groups" - }, - { - "id": "C", - "text": "By promoting cultural competency and sensitivity" - }, - { - "id": "D", - "text": "By limiting the number of countries in the world" - } - ], - "prompt": "What is one way that societies can address the challenges of cultural diversity?", - "solution": "C", - "variant": "text" - }, - { - "id": "40", - "options": [ - { - "id": "A", - "text": "To ignore cultural differences" - }, - { - "id": "B", - "text": "To actively participate in cultural events and activities" - }, - { - "id": "C", - "text": "To only embrace their own culture" - }, - { - "id": "D", - "text": "To avoid learning about other cultures" - } - ], - "prompt": "What is the responsibility of individuals in promoting and preserving cultural diversity?", - "solution": "B", - "variant": "text" - } - ] - } - ], - "text": { - "content": "Cultural diversity is a term that is often used in today's world, but what does it really mean? Simply put, cultural diversity refers to the variety of cultures, traditions, beliefs, and lifestyles that exist within a society. It is a reflection of the different backgrounds, experiences, and identities of individuals and groups. In this IELTS Reading Passage, we will explore the concept of cultural diversity and its significance in our global society. The world we live in today is more interconnected and interdependent than ever before. With the advancements in technology, transportation, and communication, people from different parts of the world can easily connect and interact with one another. This has led to an increase in the movement of people, goods, and ideas, resulting in a more diverse and multicultural society. In fact, it is estimated that over 190 countries exist in the world, each with its unique culture and traditions. One of the key benefits of cultural diversity is the exchange of ideas and knowledge. When people from different backgrounds come together, they bring with them their unique perspectives and experiences. This leads to a rich exchange of ideas, which can result in the development of new innovations and solutions to various problems. For example, the fusion of different cuisines has led to the creation of new and delicious dishes, and the blending of different musical styles has given birth to new genres of music. Moreover, cultural diversity also promotes tolerance and understanding among individuals and groups. When people are exposed to different cultures, they learn to appreciate and respect the differences that exist. This, in turn, leads to a more inclusive and harmonious society, where people from diverse backgrounds can coexist peacefully. In a world that is plagued by conflicts and discrimination, cultural diversity plays a crucial role in promoting peace and unity. However, despite its numerous benefits, cultural diversity also poses some challenges. One of the main challenges is the potential for cultural clashes. As individuals from different cultures interact, conflicts can arise due to differences in values, beliefs, and customs. This can lead to misunderstandings and even discrimination. For instance, a person from a collectivist culture may struggle to understand the individualistic values of someone from a Western culture. Furthermore, cultural diversity can also lead to the marginalization of certain groups within a society. In some cases, minority cultures may face discrimination and exclusion, which can result in social and economic disadvantages. This is often seen in the case of migrant communities, where they may struggle to fully integrate into the host society due to cultural barriers. To address these challenges, it is important for societies to promote cultural competency and sensitivity. This means educating individuals about different cultures and encouraging them to embrace diversity. It also involves creating policies and programs that promote inclusivity and equality for all groups within a society. For example, many countries have implemented diversity training programs in schools and workplaces to promote understanding and respect for different cultures. In addition, governments play a crucial role in promoting and preserving cultural diversity. They can do this by protecting the cultural heritage of different groups and promoting cultural events and festivals. This not only helps in preserving the unique identities of different cultures but also promotes cultural exchange and understanding. On an individual level, there are also steps that we can take to embrace cultural diversity. This includes being open-minded and respectful towards different cultures, being willing to learn about other cultures, and actively participating in cultural events and activities. By doing so, we can break down barriers and promote a more inclusive and harmonious society. In conclusion, cultural diversity is a key aspect of our global society. It brings numerous benefits such as the exchange of ideas and promoting tolerance, but it also poses challenges that need to be addressed. As individuals and societies, it is our responsibility to promote and preserve cultural diversity and create a world where everyone is embraced and valued for their unique identities and backgrounds. By doing so, we can create a more peaceful and prosperous world for all.", - "title": "Cultural Diversity: A Key Aspect of Our Global Society" - } - } - ] - } - - -def getSpeakingTemplate(): - return { - "exercises": [ - { - "id": str(uuid.uuid4()), - "prompts": [], - "text": "text", - "title": "topic", - "video_url": "sp1_video_url", - "video_path": "sp1_video_path", - "type": "speaking" - }, - { - "id": str(uuid.uuid4()), - "prompts": ["prompts"], - "text": "text", - "title": "topic", - "video_url": "sp2_video_url", - "video_path": "sp2_video_path", - "type": "speaking" - }, - { - "id": str(uuid.uuid4()), - "prompts": ["questions"], - "text": "Listen carefully and respond.", - "title": "topic", - "type": "interactiveSpeaking" - } - ], - "isDiagnostic": False, - "minTimer": MinTimers.SPEAKING_MIN_TIMER_DEFAULT, - "module": "speaking" - } - - -def getSpeakingPostTemplate(): - return { - "exercises": [ - { - "question": "What is the most impactful book you have ever read and how has it influenced your perspective on life? Please share specific examples from the book that resonated with you on a personal level.", - "topic": "Books" - }, - { - "prompts": [ - "Where did you go?", - "What did you do there?", - "Why was it a memorable experience for you?" - ], - "question": "Tell me about a memorable travel experience you have had.", - "topic": "Travel" - }, - { - "questions": [ - "In what ways has technology improved our lives?", - "What are some potential negative effects of technology on society?", - "How can we strike a balance between the use of technology and maintaining healthy relationships?" - ], - "topic": "Technology and Society" - } - ] - } - - -def getWritingTemplate(): - return { - "exercises": [ - { - "id": str(uuid.uuid4()), - "prefix": "You should spend about 20 minutes on this task.", - "prompt": "", - "suffix": "You should write at least 100 words.", - "type": "writing", - "wordCounter": { - "limit": 100, - "type": "min" - } - }, - { - "id": str(uuid.uuid4()), - "prefix": "You should spend about 40 minutes on this task.\nPresent a written argument or case to an educated " - "reader with no specialist knowledge of the following topic:", - "prompt": "", - "suffix": "You should write at least 250 words.\nUse your own ideas, knowledge and experience and support " - "your arguments with examples and relevant evidence.", - "type": "writing", - "wordCounter": { - "limit": 250, - "type": "min" - } - } - ], - "isDiagnostic": False, - "minTimer": MinTimers.WRITING_MIN_TIMER_DEFAULT, - "module": "writing", - "type": "general" - } - - -def getWritingPostSample(): - return { - "exercises": [ - "You recently attended a friend's wedding and were impressed by their wedding planner. Write a letter to your friend, advising them on the best wedding planner for their upcoming wedding. In your letter, include information about the planner's services, pricing, and any personal experiences you had with them. Provide your friend with recommendations and tips on how to make the most out of their wedding planning experience.", - "To what extent do you agree or disagree with the statement that technology has had a positive impact on modern society? In your response, critically examine the opposing perspectives on this issue, considering both the benefits and drawbacks of technological advancements. Support your arguments with relevant examples and evidence, and conclude with your own stance on the matter." - ] - } - - -def get_question_tips(question: str, answer: str, correct_answer: str, context: str = None): - messages = [ - { - "role": "user", - "content": "You are a IELTS exam program that analyzes incorrect answers to questions and gives tips to " - "help students understand why it was a wrong answer and gives helpful insight for the future. " - "The tip should refer to the context and question.", - } - ] - - if not (context is None or context == ""): - messages.append({ - "role": "user", - "content": f"This is the context for the question: {context}", - }) - - messages.extend([ - { - "role": "user", - "content": f"This is the question: {question}", - }, - { - "role": "user", - "content": f"This is the answer: {answer}", - }, - { - "role": "user", - "content": f"This is the correct answer: {correct_answer}", - } - ]) - - return messages +import uuid + +from .constants import MinTimers + + +def getListeningPartTemplate(): + return { + "audio": { + "repeatableTimes": 3, + "source": "", + }, + "exercises": [] + } + + +def getListeningTemplate(): + return { + "parts": [], + "isDiagnostic": False, + "minTimer": MinTimers.LISTENING_MIN_TIMER_DEFAULT, + "module": "listening" + } + + + +def getListeningPostSample(): + return { + "parts": [ + { + "exercises": [ + { + "questions": [ + { + "id": "1", + "options": [ + { + "id": "A", + "text": "To start working out together" + }, + { + "id": "B", + "text": "To join a book club" + }, + { + "id": "C", + "text": "To go on a trip" + }, + { + "id": "D", + "text": "To take a cooking class" + } + ], + "prompt": "What is John's suggestion to Emily?", + "solution": "A", + "variant": "text" + }, + { + "id": "2", + "options": [ + { + "id": "A", + "text": "She doesn't have time" + }, + { + "id": "B", + "text": "She doesn't have money" + }, + { + "id": "C", + "text": "She doesn't have a gym membership" + }, + { + "id": "D", + "text": "She doesn't like working out" + } + ], + "prompt": "What is Emily's current reason for not working out?", + "solution": "D", + "variant": "text" + }, + { + "id": "3", + "options": [ + { + "id": "A", + "text": "Gold's Gym" + }, + { + "id": "B", + "text": "Planet Fitness" + }, + { + "id": "C", + "text": "Fitness Plus" + }, + { + "id": "D", + "text": "Anytime Fitness" + } + ], + "prompt": "What gym does John suggest to Emily?", + "solution": "C", + "variant": "text" + }, + { + "id": "4", + "options": [ + { + "id": "A", + "text": "$10 a month" + }, + { + "id": "B", + "text": "$20 a month" + }, + { + "id": "C", + "text": "$30 a month" + }, + { + "id": "D", + "text": "$40 a month" + } + ], + "prompt": "What is the price of the basic membership at Fitness Plus?", + "solution": "C", + "variant": "text" + }, + { + "id": "5", + "options": [ + { + "id": "A", + "text": "3 months" + }, + { + "id": "B", + "text": "6 months" + }, + { + "id": "C", + "text": "12 months" + }, + { + "id": "D", + "text": "No commitment required" + } + ], + "prompt": "How long is the commitment for the basic membership at Fitness Plus?", + "solution": "D", + "variant": "text" + }, + { + "id": "6", + "options": [ + { + "id": "A", + "text": "Dance and cooking" + }, + { + "id": "B", + "text": "Yoga and spin" + }, + { + "id": "C", + "text": "Singing and art" + }, + { + "id": "D", + "text": "Martial arts and rock climbing" + } + ], + "prompt": "What type of classes does Fitness Plus offer?", + "solution": "B", + "variant": "text" + }, + { + "id": "7", + "options": [ + { + "id": "A", + "text": "Watch movies" + }, + { + "id": "B", + "text": "Take classes" + }, + { + "id": "C", + "text": "Play sports" + }, + { + "id": "D", + "text": "Study" + } + ], + "prompt": "What does John and Emily plan to do together at the gym?", + "solution": "B", + "variant": "text" + }, + { + "id": "8", + "options": [ + { + "id": "A", + "text": "Saturday" + }, + { + "id": "B", + "text": "Sunday" + }, + { + "id": "C", + "text": "Monday" + }, + { + "id": "D", + "text": "Tuesday" + } + ], + "prompt": "What day does John suggest to go check out the gym?", + "solution": "C", + "variant": "text" + }, + { + "id": "9", + "options": [ + { + "id": "A", + "text": "To go shopping" + }, + { + "id": "B", + "text": "To get lunch" + }, + { + "id": "C", + "text": "To schedule a tour" + }, + { + "id": "D", + "text": "To watch a movie" + } + ], + "prompt": "What is John's plan after checking out the gym?", + "solution": "C", + "variant": "text" + }, + { + "id": "10", + "options": [ + { + "id": "A", + "text": "Nervous" + }, + { + "id": "B", + "text": "Excited" + }, + { + "id": "C", + "text": "Uninterested" + }, + { + "id": "D", + "text": "Angry" + } + ], + "prompt": "How does Emily feel about starting to work out again?", + "solution": "B", + "variant": "text" + } + ] + } + ], + "text": { + "conversation": [ + { + "gender": "male", + "name": "John", + "text": "Hey, have you been working out lately?", + "voice": "Stephen" + }, + { + "gender": "female", + "name": "Emily", + "text": "Not really, I've been so busy with work.", + "voice": "Ruth" + }, + { + "gender": "male", + "name": "John", + "text": "Well, I've been thinking about getting a gym membership. Do you have one?", + "voice": "Stephen" + }, + { + "gender": "female", + "name": "Emily", + "text": "No, but I've been considering it too. Which gym are you thinking of joining?", + "voice": "Ruth" + }, + { + "gender": "male", + "name": "John", + "text": "I was looking at the one down the street, Fitness Plus. It seems to have good reviews.", + "voice": "Stephen" + }, + { + "gender": "female", + "name": "Emily", + "text": "Oh, I've heard of that one. What's the membership like?", + "voice": "Ruth" + }, + { + "gender": "male", + "name": "John", + "text": "They have different packages, but I'm thinking of going for the basic one. It's $30 a month with a one-year commitment.", + "voice": "Stephen" + }, + { + "gender": "female", + "name": "Emily", + "text": "That's not bad. Do they have classes too?", + "voice": "Ruth" + }, + { + "gender": "male", + "name": "John", + "text": "Yeah, they have a variety of classes like yoga and spin. I'm interested in trying out some of those.", + "voice": "Stephen" + }, + { + "gender": "female", + "name": "Emily", + "text": "I've always wanted to try yoga. Maybe we can go together sometime.", + "voice": "Ruth" + }, + { + "gender": "male", + "name": "John", + "text": "That would be great! It's always more fun to have a workout buddy. Have you looked into any other gyms?", + "voice": "Stephen" + }, + { + "gender": "female", + "name": "Emily", + "text": "Not really. I've been busy with work, but I'll definitely check out Fitness Plus. Maybe we can both join and motivate each other.", + "voice": "Ruth" + }, + { + "gender": "male", + "name": "John", + "text": "Sounds like a plan. Let's do it.", + "voice": "Stephen" + }, + { + "gender": "female", + "name": "Emily", + "text": "Awesome. We can go check it out this weekend.", + "voice": "Ruth" + }, + { + "gender": "male", + "name": "John", + "text": "Perfect. I'll give them a call to schedule a tour.", + "voice": "Stephen" + }, + { + "gender": "female", + "name": "Emily", + "text": "Thanks, John. I'm excited to start working out again.", + "voice": "Ruth" + }, + { + "gender": "male", + "name": "John", + "text": "Me too. Let's do this!", + "voice": "Stephen" + } + ] + } + }, + { + "exercises": [ + { + "id": "0646ab5b-e8e2-4da5-8a0c-784fe3d0186a", + "maxWords": 3, + "prompt": "You will hear a monologue. Answer the questions below using no more than three words or a number accordingly.", + "solutions": [ + { + "id": "11", + "solution": [ + "multi-faceted", + "various dimensions" + ] + }, + { + "id": "12", + "solution": [ + "climate change", + "rising temperature" + ] + }, + { + "id": "13", + "solution": [ + "waste minimization", + "resource reuse" + ] + }, + { + "id": "14", + "solution": [ + "reduce carbon footprint", + "support sustainable businesses" + ] + }, + { + "id": "15", + "solution": [ + "ourselves", + "parents/educators" + ] + } + ], + "text": "What is the concept of sustainability?{{11}}\\nWhat is the biggest challenge we are facing?{{12}}\\nWhat is the need for a circular economy?{{13}}\\nWhat can individuals do to address these challenges?{{14}}\\nWho is responsible for educating future generations about environmental sustainability?{{15}}\\n", + "type": "writeBlanks" + }, + { + "id": "0c99267c-8c3a-4ed0-9f05-00c07a480831", + "maxWords": 1, + "prompt": "You will hear a monologue. Fill the form with words/numbers missing.", + "solutions": [ + { + "id": "16", + "solution": "dimensional" + }, + { + "id": "17", + "solution": "Change" + }, + { + "id": "18", + "solution": "Natural" + }, + { + "id": "19", + "solution": "Waste" + }, + { + "id": "20", + "solution": "Injustice" + } + ], + "text": "Key: Multi-{{1}} Concept of Sustainability\nValue: The concept of sustainability encompasses social, economic, and environmental aspects, and it is essential to address all dimensions in order to achieve a sustainable future.\\nKey: Climate {{2}}\nValue: Rising temperatures, extreme weather events, and loss of biodiversity are just some of the consequences of climate change caused by human activities such as burning fossil fuels and deforestation.\\nKey: Depletion of {{3}} Resources\nValue: Our current consumption patterns are not sustainable, and the overexploitation of resources is leading to deforestation, water scarcity, and depletion of fisheries. It is crucial to find ways to reduce our consumption and ensure resource replenishment.\\nKey: {{4}} Management\nValue: Our linear model of consumption and disposal is not sustainable, and we need to shift towards a circular economy where waste is minimized, and resources are reused or recycled.\\nKey: Environmental {{5}}\nValue: Marginalized communities are often the most affected by environmental degradation, and it is crucial to address this injustice and ensure that environmental policies and actions are fair and inclusive.\\n", + "type": "writeBlanks" + } + ], + "text": "\n\nHello everyone, thank you for joining me in this discussion about one of the most pressing issues of our time - environmental sustainability. I believe we can all agree that our planet is facing numerous challenges due to human activities and it is high time we address them.\n\nFirstly, I would like to acknowledge that the concept of sustainability itself is multi-faceted and encompasses various dimensions such as social, economic, and environmental aspects. However, today, I would like to focus on the environmental challenges we are currently facing.\n\nOne of the biggest challenges we are facing is climate change. The Earth's temperature is rising at an alarming rate due to the increase in greenhouse gas emissions, primarily from human activities such as burning fossil fuels and deforestation. This has resulted in extreme weather events, loss of biodiversity, and rising sea levels, among others. We are already seeing the consequences of climate change, and if we do not take immediate action, the situation will only worsen.\n\nAnother challenge is the depletion of natural resources. Our planet has a finite amount of resources, and yet, our current consumption patterns are not sustainable. The overexploitation of resources is leading to deforestation, water scarcity, and depletion of fisheries. We need to find ways to reduce our consumption and ensure that we are not exploiting resources faster than they can replenish.\n\nIn addition to climate change and resource depletion, waste management is also a significant challenge. Our current linear model of consumption and disposal is not sustainable in the long run. We produce a massive amount of waste, and most of it ends up in landfills or our oceans, polluting our environment and harming wildlife. We need to shift towards a circular economy, where waste is minimized, and resources are reused or recycled.\n\nFurthermore, there is also the issue of environmental injustice. The impacts of environmental degradation are not equally distributed, and marginalized communities are often the most affected. This injustice needs to be addressed, and measures must be taken to ensure that environmental policies and actions are fair and inclusive.\n\nSo, what can we do to address these challenges? Firstly, we need to acknowledge that each one of us has a role to play. We cannot rely on governments or organizations alone to solve these issues. We need to make changes in our daily lives, such as reducing our carbon footprint, adopting sustainable practices, and supporting businesses that prioritize the environment.\n\nWe also need to hold corporations and governments accountable for their actions. We have the power to demand change through our consumer choices and our votes. It is crucial that we urge our leaders to implement policies that promote sustainable practices and penalize those who harm the environment.\n\nMoreover, education and awareness are essential in tackling these challenges. We need to educate ourselves and others about the importance of environmental sustainability and the actions we can take to achieve it. Our children are the future, and it is our responsibility to educate them on the significance of preserving our planet.\n\nIn conclusion, the challenges of environmental sustainability are daunting, but they are not insurmountable. It is up to us to take action and make a difference. We owe it to ourselves, future generations, and the planet to ensure a sustainable future. Let us work together towards a greener, cleaner, and more sustainable world. Thank you." + }, + { + "exercises": [ + { + "id": "0149f828-c216-4e60-80fa-1dd28a860031", + "maxWords": 1, + "prompt": "Fill the blank space with the word missing from the audio.", + "solutions": [ + { + "id": "21", + "solution": "Smith" + }, + { + "id": "22", + "solution": "rash" + }, + { + "id": "23", + "solution": "fever" + }, + { + "id": "24", + "solution": "allergies" + }, + { + "id": "25", + "solution": "antinuclear" + }, + { + "id": "26", + "solution": "immunosuppressants" + }, + { + "id": "27", + "solution": "evaluation" + }, + { + "id": "28", + "solution": "Autoimmune" + }, + { + "id": "29", + "solution": "four" + }, + { + "id": "30", + "solution": "possibilities" + } + ], + "text": "Dr. {{21}}, Dr. Patel, Sarah, and Alex were discussing a case study of a patient with a fever and rash.\\nThe patient was a 30-year-old female with a history of allergies.\\nThe patient's rash was erythematous and she had a {{23}} of 101°F.\\nPossible initial diagnoses included a viral infection or allergic reaction.\\nHowever, the final diagnosis was an autoimmune disorder based on elevated {{25}} antibodies in the patient's blood work.\\nTreatment involved {{26}} and the patient's symptoms resolved within a week.\\nThorough {{27}} and considering all possibilities is important in the diagnostic process.\\n{{28}} disorders can develop without any underlying trigger.\\nThe seminar ended with the {{29}} individuals thanking each other and continuing their day with new knowledge.\\nThinking outside the box and considering all {{30}} is crucial in the diagnostic process.", + "type": "writeBlanks" + } + ], + "text": { + "conversation": [ + { + "gender": "male", + "name": "Dr. Smith", + "text": "Good morning everyone, thank you for joining us today for this seminar on diagnosis. Let's begin with the case study of a patient who presented with a fever and rash.", + "voice": "Kevin" + }, + { + "gender": "male", + "name": "Dr. Patel", + "text": "Yes, I remember this case. The patient was a 30-year-old female with a history of allergies.", + "voice": "Stephen" + }, + { + "gender": "female", + "name": "Sarah", + "text": "Hi, I'm Sarah, a third-year medical student. I remember studying this case in our lectures. The patient's rash was erythematous, right?", + "voice": "Aria" + }, + { + "gender": "male", + "name": "Dr. Smith", + "text": "Yes, that's correct. And the patient had a fever of 101°F. What would be your initial diagnosis?", + "voice": "Kevin" + }, + { + "gender": "male", + "name": "Alex", + "text": "Hi, I'm Alex, a fourth-year medical student. I would say it could be a viral infection or an allergic reaction.", + "voice": "Kevin" + }, + { + "gender": "male", + "name": "Dr. Patel", + "text": "That's a good guess, Alex. But remember, always consider other possibilities. In this case, it turned out to be an autoimmune disorder.", + "voice": "Stephen" + }, + { + "gender": "female", + "name": "Sarah", + "text": "Oh, I didn't think of that. How did you come to that diagnosis?", + "voice": "Aria" + }, + { + "gender": "male", + "name": "Dr. Smith", + "text": "Well, the patient's blood work showed elevated levels of antinuclear antibodies. That, along with the clinical presentation, pointed towards an autoimmune disorder.", + "voice": "Kevin" + }, + { + "gender": "male", + "name": "Alex", + "text": "That's interesting. I would have never thought of that. How did you treat the patient?", + "voice": "Kevin" + }, + { + "gender": "male", + "name": "Dr. Patel", + "text": "We started the patient on immunosuppressants and the rash and fever resolved within a week.", + "voice": "Stephen" + }, + { + "gender": "female", + "name": "Sarah", + "text": "Wow, it's amazing how a simple rash and fever could lead to such a complex diagnosis.", + "voice": "Aria" + }, + { + "gender": "male", + "name": "Dr. Smith", + "text": "That's the importance of thorough evaluation and considering all possibilities. Any other thoughts or questions?", + "voice": "Kevin" + }, + { + "gender": "male", + "name": "Alex", + "text": "I just wanted to ask, do you think the patient's allergies could have triggered the autoimmune disorder?", + "voice": "Kevin" + }, + { + "gender": "male", + "name": "Dr. Patel", + "text": "It's possible, but we can't say for sure. Sometimes autoimmune disorders can develop without any underlying trigger.", + "voice": "Stephen" + }, + { + "gender": "female", + "name": "Sarah", + "text": "Thank you for sharing this case with us, it was very informative.", + "voice": "Aria" + }, + { + "gender": "male", + "name": "Dr. Smith", + "text": "My pleasure. I hope this discussion has given you a better understanding of the diagnostic process. Always remember to think outside the box and consider all possibilities.", + "voice": "Kevin" + } + ] + } + }, + { + "exercises": [ + { + "id": "921d3a2a-7f6e-46ae-a19d-65eb5ba21375", + "maxWords": 3, + "prompt": "You will hear a monologue. Answer the questions below using no more than three words or a number accordingly.", + "solutions": [ + { + "id": "31", + "solution": [ + "complex mixture", + "vital natural resource", + "non-renewable resource" + ] + }, + { + "id": "32", + "solution": [ + "sand, silt, clay" + ] + }, + { + "id": "33", + "solution": [ + "ability to provide nutrients", + "balance of essential nutrients", + "breaking down organic matter" + ] + }, + { + "id": "34", + "solution": [ + "break down organic matter", + "improve soil structure", + "release nutrients" + ] + }, + { + "id": "35", + "solution": [ + "soil erosion", + "land use practices", + "preservation of soil for future generations" + ] + } + ], + "text": "What is soil?{{31}}\\nWhat are the three main categories of soil?{{32}}\\nWhat is soil fertility?{{33}}\\nWhat is the role of microorganisms in soil?{{34}}\\nWhat environmental issue can be prevented through proper soil management?{{35}}\\n", + "type": "writeBlanks" + }, + { + "id": "e86afd9a-90d1-48db-bee1-0d20f6eea64d", + "maxWords": 1, + "prompt": "Fill the blank space with the word missing from the audio.", + "solutions": [ + { + "id": "36", + "solution": "vital" + }, + { + "id": "37", + "solution": "classified" + }, + { + "id": "38", + "solution": "fertility" + }, + { + "id": "39", + "solution": "microorganisms" + }, + { + "id": "40", + "solution": "science" + } + ], + "text": "Soil is a {{36}} natural resource that provides the foundation for plant growth\\nSoil is {{37}} into three main categories: sand, silt, and clay\\nSoil {{38}} refers to the ability of the soil to provide essential nutrients for plant growth\\nHealthy soil is teeming with {{39}}\\nSoil {{40}} plays a significant role in environmental conservation", + "type": "writeBlanks" + } + ], + "text": "\n\nGood morning everyone, today I would like to talk to you about a topic that is often overlooked but plays a crucial role in our daily lives - soil science. Soil science deals with the study of the composition, structure, and properties of soil, as well as how it interacts with the environment and living organisms.\n\nSoil is a vital natural resource that provides the foundation for plant growth, which in turn sustains all life on earth. It is the basis for our food production, as well as the source of many raw materials such as wood, cotton, and rubber. Without a healthy and productive soil, our agricultural systems would collapse, and we would struggle to feed our growing population.\n\nBut what exactly is soil? Soil is a complex mixture of minerals, organic matter, water, and air, all held together by microorganisms. It takes thousands of years for soil to form, and it is a non-renewable resource, which makes its conservation even more critical.\n\nOne of the key aspects of soil science is understanding the different types of soil and their properties. Soil is classified into three main categories: sand, silt, and clay. These categories are based on the size of the particles that make up the soil. Sand particles are the largest, followed by silt and then clay. The composition of these particles greatly affects the soil's properties, such as water retention, drainage, and nutrient availability.\n\nAnother crucial aspect of soil science is the study of soil fertility. Soil fertility refers to the ability of the soil to provide essential nutrients for plant growth. The nutrients in the soil come from the breakdown of organic matter, such as dead plants and animal remains. Fertile soil contains a balance of essential nutrients, such as nitrogen, phosphorus, and potassium, which are necessary for plant growth.\n\nThe health of the soil is also crucial in soil science. Healthy soil is teeming with microorganisms, which play a vital role in breaking down organic matter and releasing nutrients into the soil. These microorganisms also help to improve soil structure, making it more porous and allowing for better air and water circulation.\n\nSoil science also plays a significant role in environmental conservation. Soil erosion, the removal of topsoil by wind and water, is a significant environmental issue that can be prevented through proper soil management. By understanding the factors that contribute to soil erosion, such as improper land use practices, we can implement strategies to prevent it and preserve our soil for future generations.\n\nIn conclusion, soil science is a critical field of study that impacts our daily lives in more ways than we can imagine. It is not just about digging in the dirt; it is a complex science that requires a multidisciplinary approach. By understanding the composition, properties, and fertility of soil, we can ensure the sustainable use of this precious resource and preserve it for future generations. Thank you." + } + ] + } + + +def getReadingTemplate(): + return { + "parts": [], + "isDiagnostic": False, + "minTimer": 60, + "type": "academic" + } + + +def getReadingPostSample(): + return { + "parts": [ + { + "exercises": [ + { + "id": "cbd08cdd-5850-40a8-b6e2-6021c04474ad", + "prompt": "Do the following statements agree with the information given in the Reading Passage?", + "questions": [ + { + "id": "1", + "prompt": "Technology is constantly evolving and shaping our world.", + "solution": "true" + }, + { + "id": "2", + "prompt": "The use of artificial intelligence (AI) has only recently become popular.", + "solution": "false" + }, + { + "id": "3", + "prompt": "5G technology offers slower speeds and higher latency than its predecessors.", + "solution": "false" + }, + { + "id": "4", + "prompt": "Social media has had a minimal impact on our society.", + "solution": "false" + }, + { + "id": "5", + "prompt": "Cybersecurity is not a growing concern as technology becomes more integrated into our lives.", + "solution": "false" + }, + { + "id": "6", + "prompt": "Technology has not had a significant impact on the education sector.", + "solution": "false" + }, + { + "id": "7", + "prompt": "Automation and AI are not causing shifts in the job market.", + "solution": "false" + } + ], + "type": "trueFalse" + }, + { + "allowRepetition": True, + "id": "b88f3eb5-11b7-4a8e-bb1a-4e96215b34bf", + "prompt": "Complete the summary below. Click a blank to select the corresponding word(s) for it.\\nThere are more words than spaces so you will not use them all. You may use any of the words more than once.", + "solutions": [ + { + "id": "8", + "solution": "smartphones" + }, + { + "id": "9", + "solution": "artificial intelligence" + }, + { + "id": "10", + "solution": "5G technology" + }, + { + "id": "11", + "solution": "virtual reality" + }, + { + "id": "12", + "solution": "cybersecurity" + }, + { + "id": "13", + "solution": "telemedicine" + } + ], + "text": "\n\nTechnology has become an integral part of our daily lives, from {{8}} to smart homes. The rise of {{9}} (AI) and the Internet of Things (IoT) are two major trends that are revolutionizing the way we live and work. {{10}} is also gaining popularity, enabling advancements in areas like {{11}} and self-driving cars. Cloud computing, virtual and augmented reality (VR and AR), and blockchain technology are also on the rise, impacting industries such as finance, education, and healthcare. Social media has changed the way we communicate and raised concerns about privacy and mental health. With the increase in data breaches and cyber attacks, {{12}} has become a growing concern. {{13}} and online learning have made healthcare and education more accessible and efficient. However, there are concerns about the impact of technology on the job market, leading to discussions about the need for reskilling and upskilling. As technology continues to advance, it is crucial to understand its impact and consequences on our society.", + "type": "fillBlanks", + "words": [ + "speaking", + "5G technology", + "telemedicine", + "virtual reality", + "antechamber", + "smartphones", + "kitsch", + "devilish", + "parent", + "artificial intelligence", + "cybersecurity" + ] + } + ], + "text": { + "content": "In today's society, technology has become an integral part of our daily lives. From smartphones to smart homes, we are constantly surrounded by the latest and most advanced technological devices. As technology continues to evolve and improve, it is important to understand the current trends and how they are shaping our world. One of the biggest technology trends in recent years is the rise of artificial intelligence (AI). AI is the simulation of human intelligence processes by machines, particularly computer systems. This technology has been around for decades, but with the advancement of computing power and big data, AI has become more sophisticated and prevalent. From virtual personal assistants like Siri and Alexa to self-driving cars, AI is revolutionizing the way we live and work. Another trend that has gained widespread popularity is the Internet of Things (IoT). This refers to the interconnection of everyday objects via the internet, allowing them to send and receive data. Smart homes, wearable devices, and even smart cities are all examples of IoT. With IoT, our devices and appliances can communicate with each other, making our lives more convenient and efficient. The use of 5G technology is also on the rise. 5G is the fifth generation of wireless technology, offering faster speeds and lower latency than its predecessors. With 5G, we can expect to see advancements in areas like virtual reality, self-driving cars, and remote surgery. It will also enable the development of smart cities and the Internet of Things to reach its full potential. Cloud computing is another trend that has been steadily growing. Cloud computing involves the delivery of computing services over the internet, such as storage, servers, and databases. This allows for easy access to data and applications from anywhere, at any time. Many businesses and individuals are utilizing cloud computing to streamline their operations and increase efficiency. Virtual and augmented reality (VR and AR) are becoming more prevalent in various industries, from gaming and entertainment to healthcare and education. VR immerses the user in a simulated environment, while AR overlays digital information onto the real world. These technologies have the potential to change the way we learn, work, and entertain ourselves. Blockchain technology is also gaining traction, particularly in the financial sector. Blockchain is a decentralized digital ledger that records transactions across a network of computers. It allows for secure and transparent transactions without the need for intermediaries. This technology has the potential to disrupt traditional banking and financial systems. Social media has been a dominant force in the technology world for some time now, and it continues to evolve and shape our society. With the rise of platforms like Facebook, Twitter, and Instagram, we are more connected than ever before. Social media has changed the way we communicate, share information, and even do business. It has also raised concerns about privacy and the impact of social media on mental health. Cybersecurity is a growing concern as technology becomes more integrated into our lives. With the increase in data breaches and cyber attacks, the need for strong cybersecurity measures is greater than ever. Companies and individuals are investing in better security protocols to protect their sensitive information. The healthcare industry is also experiencing technological advancements with the introduction of telemedicine. This allows patients to receive medical care remotely, without having to visit a physical doctor's office. Telemedicine has become increasingly popular, especially during the COVID-19 pandemic, as it allows for safe and convenient access to healthcare. In the education sector, technology has brought about significant changes as well. Online learning platforms and digital tools have made education more accessible and efficient. With the rise of e-learning, students can access education from anywhere in the world and at their own pace. As technology continues to advance, concerns about its impact on the job market have also arisen. Automation and AI are replacing human workers in many industries, leading to job loss and shifts in the workforce. This trend has sparked discussions about the need for reskilling and upskilling to adapt to the changing job market. In conclusion, the world is constantly evolving and adapting to the latest technology trends. From AI and IoT to 5G and blockchain, these advancements are shaping the way we live, work, and interact with each other. As society continues to embrace and integrate technology into our daily lives, it is crucial to understand its impact and potential consequences. Whether it be in the fields of healthcare, education, or finance, technology is undoubtedly transforming the world as we know it.", + "title": "Modern Technology Trends" + } + }, + { + "exercises": [ + { + "id": "f2daa91a-3e92-4c07-aefd-719bcf22bac7", + "prompt": "Do the following statements agree with the information given in the Reading Passage?", + "questions": [ + { + "id": "14", + "prompt": "Yoga and meditation have been gaining popularity in recent years.", + "solution": "true" + }, + { + "id": "15", + "prompt": "Yoga and meditation originated in ancient India.", + "solution": "true" + }, + { + "id": "16", + "prompt": "Yoga is a system that combines physical postures, breathing techniques, and meditation.", + "solution": "true" + }, + { + "id": "17", + "prompt": "Meditation involves training the mind to achieve a state of inner peace and relaxation.", + "solution": "true" + }, + { + "id": "18", + "prompt": "Yoga and meditation can reduce stress and improve mental health.", + "solution": "true" + }, + { + "id": "19", + "prompt": "Yoga and meditation can improve physical health.", + "solution": "true" + }, + { + "id": "20", + "prompt": "Yoga and meditation are only suitable for people who are physically fit.", + "solution": "false" + } + ], + "type": "trueFalse" + }, + { + "id": "b500cb69-843d-4430-a544-924c514ea12a", + "maxWords": 3, + "prompt": "Choose no more than three words and/or a number from the passage for each answer.", + "solutions": [ + { + "id": "21", + "solution": [ + "physical, mental, emotional" + ] + }, + { + "id": "22", + "solution": [ + "ancient India" + ] + }, + { + "id": "23", + "solution": [ + "physical postures, breathing techniques" + ] + }, + { + "id": "24", + "solution": [ + "reduce stress, improve mindfulness" + ] + }, + { + "id": "25", + "solution": [ + "improve, promote relaxation" + ] + }, + { + "id": "26", + "solution": [ + "anyone, all ages and backgrounds" + ] + } + ], + "text": "What are the three main benefits of yoga and meditation?{{21}}\\nWhere did yoga originate?{{22}}\\nWhat are the two components of yoga?{{23}}\\nHow do yoga and meditation improve mental health?{{24}}\\nWhat is the impact of yoga and meditation on sleep quality?{{25}}\\nWho can practice yoga and meditation?{{26}}\\n", + "type": "writeBlanks" + } + ], + "text": { + "content": "Yoga and meditation have been gaining popularity in recent years as more and more people recognize the physical, mental, and emotional benefits of these ancient practices. Originating in ancient India, yoga is a holistic system that combines physical postures, breathing techniques, and meditation to promote overall well-being. Similarly, meditation is a mental practice that involves training the mind to achieve a state of inner peace and relaxation. One of the main benefits of yoga and meditation is their ability to reduce stress and improve mental health. In today's fast-paced world, stress has become a common problem for many people, leading to various physical and mental health issues. However, studies have shown that practicing yoga and meditation can significantly reduce stress levels and improve overall mental health. This is because these practices focus on deep breathing and mindfulness, which can help individuals to calm their minds and relax their bodies. As a result, many people who regularly practice yoga and meditation report feeling more peaceful, centered, and less stressed in their daily lives. Furthermore, yoga and meditation have been shown to have a positive impact on physical health. The physical postures and movements in yoga help to improve flexibility, strength, and balance. These postures also work to stretch and strengthen different muscles in the body, which can alleviate tension and prevent injuries. Additionally, the controlled breathing in yoga helps to increase oxygen flow throughout the body, which can improve cardiovascular health. As for meditation, studies have shown that it can lower blood pressure and reduce the risk of heart disease. These physical benefits make yoga and meditation an excellent form of exercise for people of all ages and fitness levels. Apart from the physical and mental benefits, yoga and meditation also have a positive impact on emotional well-being. The practice of mindfulness in these practices helps individuals to become more aware of their thoughts and emotions, allowing them to better manage and process them. This can result in improved emotional regulation and a greater sense of self-awareness. As a result, individuals who practice yoga and meditation often report feeling more positive, content, and emotionally stable. Another significant benefit of yoga and meditation is their ability to improve overall concentration and focus. In today's digital age, our minds are constantly bombarded with information and distractions, making it challenging to stay focused on tasks. However, regular practice of yoga and meditation can improve concentration and enhance cognitive function. This is because these practices require individuals to focus their minds on their breath, movements, or a specific mantra, helping to train the brain to stay focused for longer periods. Moreover, yoga and meditation have been shown to have a positive impact on sleep quality. Many people struggle with insomnia or other sleep-related issues, which can have a significant impact on their overall health and well-being. However, studies have shown that yoga and meditation can improve sleep quality and help individuals fall asleep faster. This is because these practices promote relaxation and reduce stress, which are common causes of sleep issues. As a result, individuals who practice yoga and meditation often report feeling more rested and rejuvenated after a good night's sleep. In addition to the physical, mental, emotional, and cognitive benefits, yoga and meditation also have a spiritual component. These practices are deeply rooted in ancient Indian spirituality and have been used for centuries to connect individuals with their inner selves and the universe. While the spiritual aspect may not be for everyone, many people find that it adds a deeper level of meaning and purpose to their practice. Furthermore, yoga and meditation are accessible to people of all ages and backgrounds. Whether you are young or old, fit or not, religious or not, yoga and meditation can be practiced by anyone. There are many different styles and forms of yoga and meditation, allowing individuals to choose the practice that best suits their needs and preferences. This inclusivity is what makes yoga and meditation such powerful and universal practices. In conclusion, the benefits of yoga and meditation are numerous and far-reaching. From reducing stress and improving mental health to promoting physical strength and emotional well-being, these ancient practices offer a holistic approach to overall health and wellness. Whether you are looking to improve your physical fitness, manage stress, or connect with your inner self, yoga and meditation are powerful tools that can help you achieve these goals. So why not give it a try and experience the transformative power of yoga and meditation for yourself?", + "title": "The Benefits of Yoga and Meditation" + } + }, + { + "exercises": [ + { + "allowRepetition": True, + "id": "1035c153-d38a-4f27-a14e-0ce63184ff82", + "prompt": "Complete the summary below. Click a blank to select the corresponding word(s) for it.\\nThere are more words than spaces so you will not use them all. You may use any of the words more than once.", + "solutions": [ + { + "id": "27", + "solution": "Cultural diversity" + }, + { + "id": "28", + "solution": "Variety" + }, + { + "id": "29", + "solution": "Multicultural" + }, + { + "id": "30", + "solution": "Tolerance" + }, + { + "id": "31", + "solution": "Unity" + }, + { + "id": "32", + "solution": "Challenges" + }, + { + "id": "33", + "solution": "Celebrated" + } + ], + "text": "\n\n{{27}} refers to the {{28}} of cultures, traditions, beliefs, and lifestyles that exist within a society. In today's interconnected world, the movement of people, goods, and ideas has led to a more diverse and {{29}} society. The exchange of ideas and knowledge, {{30}} and understanding, and promoting peace and {{31}} are some of the benefits of cultural diversity. However, it also poses {{32}} such as potential clashes and the marginalization of certain groups. To address these challenges, it is important for societies to promote cultural competency and sensitivity, as well as for individuals to embrace diversity and participate in cultural events. Overall, cultural diversity is a crucial aspect of our global society that needs to be preserved and {{33}}.", + "type": "fillBlanks", + "words": [ + "Tolerance", + "Cultural diversity", + "penny", + "Multicultural", + "shrill", + "Celebrated", + "Variety", + "query", + "Challenges", + "wont", + "Unity", + "chemical" + ] + }, + { + "questions": [ + { + "id": "34", + "options": [ + { + "id": "A", + "text": "The variety of cultures, traditions, beliefs, and lifestyles within a society" + }, + { + "id": "B", + "text": "The number of countries in the world" + }, + { + "id": "C", + "text": "The different types of technology used in different cultures" + }, + { + "id": "D", + "text": "The number of languages spoken in a society" + } + ], + "prompt": "What is the main definition of cultural diversity?", + "solution": "A", + "variant": "text" + }, + { + "id": "35", + "options": [ + { + "id": "A", + "text": "By making it easier for people to travel" + }, + { + "id": "B", + "text": "By increasing the number of countries in the world" + }, + { + "id": "C", + "text": "By creating more jobs for people from different cultures" + }, + { + "id": "D", + "text": "By promoting a single global culture" + } + ], + "prompt": "How has technology contributed to an increase in cultural diversity?", + "solution": "A", + "variant": "text" + }, + { + "id": "36", + "options": [ + { + "id": "A", + "text": "Increased economic opportunities" + }, + { + "id": "B", + "text": "Higher levels of education" + }, + { + "id": "C", + "text": "Improved transportation systems" + }, + { + "id": "D", + "text": "The exchange of ideas and knowledge" + } + ], + "prompt": "What is one of the key benefits of cultural diversity?", + "solution": "D", + "variant": "text" + }, + { + "id": "37", + "options": [ + { + "id": "A", + "text": "By forcing people to conform to a single culture" + }, + { + "id": "B", + "text": "By exposing people to different perspectives and experiences" + }, + { + "id": "C", + "text": "By creating a homogenous society" + }, + { + "id": "D", + "text": "By limiting the movement of people between countries" + } + ], + "prompt": "How does cultural diversity promote tolerance and understanding?", + "solution": "B", + "variant": "text" + }, + { + "id": "38", + "options": [ + { + "id": "A", + "text": "Increased discrimination" + }, + { + "id": "B", + "text": "A decline in technological advancements" + }, + { + "id": "C", + "text": "A decrease in the number of countries" + }, + { + "id": "D", + "text": "A lack of cultural exchange" + } + ], + "prompt": "What is one challenge posed by cultural diversity?", + "solution": "A", + "variant": "text" + }, + { + "id": "39", + "options": [ + { + "id": "A", + "text": "By promoting a single global culture" + }, + { + "id": "B", + "text": "By creating barriers between different groups" + }, + { + "id": "C", + "text": "By promoting cultural competency and sensitivity" + }, + { + "id": "D", + "text": "By limiting the number of countries in the world" + } + ], + "prompt": "What is one way that societies can address the challenges of cultural diversity?", + "solution": "C", + "variant": "text" + }, + { + "id": "40", + "options": [ + { + "id": "A", + "text": "To ignore cultural differences" + }, + { + "id": "B", + "text": "To actively participate in cultural events and activities" + }, + { + "id": "C", + "text": "To only embrace their own culture" + }, + { + "id": "D", + "text": "To avoid learning about other cultures" + } + ], + "prompt": "What is the responsibility of individuals in promoting and preserving cultural diversity?", + "solution": "B", + "variant": "text" + } + ] + } + ], + "text": { + "content": "Cultural diversity is a term that is often used in today's world, but what does it really mean? Simply put, cultural diversity refers to the variety of cultures, traditions, beliefs, and lifestyles that exist within a society. It is a reflection of the different backgrounds, experiences, and identities of individuals and groups. In this IELTS Reading Passage, we will explore the concept of cultural diversity and its significance in our global society. The world we live in today is more interconnected and interdependent than ever before. With the advancements in technology, transportation, and communication, people from different parts of the world can easily connect and interact with one another. This has led to an increase in the movement of people, goods, and ideas, resulting in a more diverse and multicultural society. In fact, it is estimated that over 190 countries exist in the world, each with its unique culture and traditions. One of the key benefits of cultural diversity is the exchange of ideas and knowledge. When people from different backgrounds come together, they bring with them their unique perspectives and experiences. This leads to a rich exchange of ideas, which can result in the development of new innovations and solutions to various problems. For example, the fusion of different cuisines has led to the creation of new and delicious dishes, and the blending of different musical styles has given birth to new genres of music. Moreover, cultural diversity also promotes tolerance and understanding among individuals and groups. When people are exposed to different cultures, they learn to appreciate and respect the differences that exist. This, in turn, leads to a more inclusive and harmonious society, where people from diverse backgrounds can coexist peacefully. In a world that is plagued by conflicts and discrimination, cultural diversity plays a crucial role in promoting peace and unity. However, despite its numerous benefits, cultural diversity also poses some challenges. One of the main challenges is the potential for cultural clashes. As individuals from different cultures interact, conflicts can arise due to differences in values, beliefs, and customs. This can lead to misunderstandings and even discrimination. For instance, a person from a collectivist culture may struggle to understand the individualistic values of someone from a Western culture. Furthermore, cultural diversity can also lead to the marginalization of certain groups within a society. In some cases, minority cultures may face discrimination and exclusion, which can result in social and economic disadvantages. This is often seen in the case of migrant communities, where they may struggle to fully integrate into the host society due to cultural barriers. To address these challenges, it is important for societies to promote cultural competency and sensitivity. This means educating individuals about different cultures and encouraging them to embrace diversity. It also involves creating policies and programs that promote inclusivity and equality for all groups within a society. For example, many countries have implemented diversity training programs in schools and workplaces to promote understanding and respect for different cultures. In addition, governments play a crucial role in promoting and preserving cultural diversity. They can do this by protecting the cultural heritage of different groups and promoting cultural events and festivals. This not only helps in preserving the unique identities of different cultures but also promotes cultural exchange and understanding. On an individual level, there are also steps that we can take to embrace cultural diversity. This includes being open-minded and respectful towards different cultures, being willing to learn about other cultures, and actively participating in cultural events and activities. By doing so, we can break down barriers and promote a more inclusive and harmonious society. In conclusion, cultural diversity is a key aspect of our global society. It brings numerous benefits such as the exchange of ideas and promoting tolerance, but it also poses challenges that need to be addressed. As individuals and societies, it is our responsibility to promote and preserve cultural diversity and create a world where everyone is embraced and valued for their unique identities and backgrounds. By doing so, we can create a more peaceful and prosperous world for all.", + "title": "Cultural Diversity: A Key Aspect of Our Global Society" + } + } + ] + } + + +def getSpeakingTemplate(): + return { + "exercises": [ + { + "id": str(uuid.uuid4()), + "prompts": [], + "text": "text", + "title": "topic", + "video_url": "sp1_video_url", + "video_path": "sp1_video_path", + "type": "speaking" + }, + { + "id": str(uuid.uuid4()), + "prompts": ["prompts"], + "text": "text", + "title": "topic", + "video_url": "sp2_video_url", + "video_path": "sp2_video_path", + "type": "speaking" + }, + { + "id": str(uuid.uuid4()), + "prompts": ["questions"], + "text": "Listen carefully and respond.", + "title": "topic", + "type": "interactiveSpeaking" + } + ], + "isDiagnostic": False, + "minTimer": MinTimers.SPEAKING_MIN_TIMER_DEFAULT, + "module": "speaking" + } + + +def getSpeakingPostTemplate(): + return { + "exercises": [ + { + "question": "What is the most impactful book you have ever read and how has it influenced your perspective on life? Please share specific examples from the book that resonated with you on a personal level.", + "topic": "Books" + }, + { + "prompts": [ + "Where did you go?", + "What did you do there?", + "Why was it a memorable experience for you?" + ], + "question": "Tell me about a memorable travel experience you have had.", + "topic": "Travel" + }, + { + "questions": [ + "In what ways has technology improved our lives?", + "What are some potential negative effects of technology on society?", + "How can we strike a balance between the use of technology and maintaining healthy relationships?" + ], + "topic": "Technology and Society" + } + ] + } + + +def getWritingTemplate(): + return { + "exercises": [ + { + "id": str(uuid.uuid4()), + "prefix": "You should spend about 20 minutes on this task.", + "prompt": "", + "suffix": "You should write at least 100 words.", + "type": "writing", + "wordCounter": { + "limit": 100, + "type": "min" + } + }, + { + "id": str(uuid.uuid4()), + "prefix": "You should spend about 40 minutes on this task.\nPresent a written argument or case to an educated " + "reader with no specialist knowledge of the following topic:", + "prompt": "", + "suffix": "You should write at least 250 words.\nUse your own ideas, knowledge and experience and support " + "your arguments with examples and relevant evidence.", + "type": "writing", + "wordCounter": { + "limit": 250, + "type": "min" + } + } + ], + "isDiagnostic": False, + "minTimer": MinTimers.WRITING_MIN_TIMER_DEFAULT, + "module": "writing", + "type": "general" + } + + +def getWritingPostSample(): + return { + "exercises": [ + "You recently attended a friend's wedding and were impressed by their wedding planner. Write a letter to your friend, advising them on the best wedding planner for their upcoming wedding. In your letter, include information about the planner's services, pricing, and any personal experiences you had with them. Provide your friend with recommendations and tips on how to make the most out of their wedding planning experience.", + "To what extent do you agree or disagree with the statement that technology has had a positive impact on modern society? In your response, critically examine the opposing perspectives on this issue, considering both the benefits and drawbacks of technological advancements. Support your arguments with relevant examples and evidence, and conclude with your own stance on the matter." + ] + } + + +def get_question_tips(question: str, answer: str, correct_answer: str, context: str = None): + messages = [ + { + "role": "user", + "content": "You are a IELTS exam program that analyzes incorrect answers to questions and gives tips to " + "help students understand why it was a wrong answer and gives helpful insight for the future. " + "The tip should refer to the context and question.", + } + ] + + if not (context is None or context == ""): + messages.append({ + "role": "user", + "content": f"This is the context for the question: {context}", + }) + + messages.extend([ + { + "role": "user", + "content": f"This is the question: {question}", + }, + { + "role": "user", + "content": f"This is the answer: {answer}", + }, + { + "role": "user", + "content": f"This is the correct answer: {correct_answer}", + } + ]) + + return messages diff --git a/app/controllers/abc/__init__.py b/app/controllers/abc/__init__.py index 8b8186c..85fa452 100644 --- a/app/controllers/abc/__init__.py +++ b/app/controllers/abc/__init__.py @@ -1,17 +1,19 @@ -from .level import ILevelController -from .listening import IListeningController -from .reading import IReadingController -from .writing import IWritingController -from .speaking import ISpeakingController -from .grade import IGradeController -from .training import ITrainingController - -__all__ = [ - "IListeningController", - "IReadingController", - "IWritingController", - "ISpeakingController", - "ILevelController", - "IGradeController", - "ITrainingController" -] +from .level import ILevelController +from .listening import IListeningController +from .reading import IReadingController +from .writing import IWritingController +from .speaking import ISpeakingController +from .grade import IGradeController +from .training import ITrainingController +from .user import IUserController + +__all__ = [ + "IListeningController", + "IReadingController", + "IWritingController", + "ISpeakingController", + "ILevelController", + "IGradeController", + "ITrainingController", + "IUserController" +] diff --git a/app/controllers/abc/grade.py b/app/controllers/abc/grade.py index 162e246..a9853ce 100644 --- a/app/controllers/abc/grade.py +++ b/app/controllers/abc/grade.py @@ -1,22 +1,22 @@ -from abc import ABC, abstractmethod -from typing import Dict, List - - -class IGradeController(ABC): - - @abstractmethod - async def grade_writing_task(self, task: int, data): - pass - - @abstractmethod - async def grade_speaking_task(self, task: int, answers: List[Dict]) -> Dict: - pass - - @abstractmethod - async def grade_short_answers(self, data: Dict): - pass - - @abstractmethod - async def grading_summary(self, data: Dict): - pass - +from abc import ABC, abstractmethod +from typing import Dict, List + + +class IGradeController(ABC): + + @abstractmethod + async def grade_writing_task(self, task: int, data): + pass + + @abstractmethod + async def grade_speaking_task(self, task: int, answers: List[Dict]) -> Dict: + pass + + @abstractmethod + async def grade_short_answers(self, data: Dict): + pass + + @abstractmethod + async def grading_summary(self, data: Dict): + pass + diff --git a/app/controllers/abc/level.py b/app/controllers/abc/level.py index 43fe296..cba2151 100644 --- a/app/controllers/abc/level.py +++ b/app/controllers/abc/level.py @@ -1,23 +1,23 @@ -from abc import ABC, abstractmethod - -from fastapi import UploadFile -from typing import Dict - - -class ILevelController(ABC): - - @abstractmethod - async def get_level_exam(self): - pass - - @abstractmethod - async def get_level_utas(self): - pass - - @abstractmethod - async def upload_level(self, file: UploadFile): - pass - - @abstractmethod - async def get_custom_level(self, data: Dict): - pass +from abc import ABC, abstractmethod + +from fastapi import UploadFile +from typing import Dict + + +class ILevelController(ABC): + + @abstractmethod + async def get_level_exam(self): + pass + + @abstractmethod + async def get_level_utas(self): + pass + + @abstractmethod + async def upload_level(self, file: UploadFile): + pass + + @abstractmethod + async def get_custom_level(self, data: Dict): + pass diff --git a/app/controllers/abc/listening.py b/app/controllers/abc/listening.py index 11428b5..19a8a09 100644 --- a/app/controllers/abc/listening.py +++ b/app/controllers/abc/listening.py @@ -1,13 +1,13 @@ -from abc import ABC, abstractmethod -from typing import List - - -class IListeningController(ABC): - - @abstractmethod - async def get_listening_question(self, section_id: int, topic: str, exercises: List[str], difficulty: str): - pass - - @abstractmethod - async def save_listening(self, data): - pass +from abc import ABC, abstractmethod +from typing import List + + +class IListeningController(ABC): + + @abstractmethod + async def get_listening_question(self, section_id: int, topic: str, exercises: List[str], difficulty: str): + pass + + @abstractmethod + async def save_listening(self, data): + pass diff --git a/app/controllers/abc/reading.py b/app/controllers/abc/reading.py index 7a5663e..03250ab 100644 --- a/app/controllers/abc/reading.py +++ b/app/controllers/abc/reading.py @@ -1,10 +1,10 @@ -from abc import ABC, abstractmethod -from typing import List - - -class IReadingController(ABC): - - @abstractmethod - async def get_reading_passage(self, passage: int, topic: str, exercises: List[str], difficulty: str): - pass - +from abc import ABC, abstractmethod +from typing import List + + +class IReadingController(ABC): + + @abstractmethod + async def get_reading_passage(self, passage: int, topic: str, exercises: List[str], difficulty: str): + pass + diff --git a/app/controllers/abc/speaking.py b/app/controllers/abc/speaking.py index 6b96a23..4a54758 100644 --- a/app/controllers/abc/speaking.py +++ b/app/controllers/abc/speaking.py @@ -1,25 +1,25 @@ -from abc import ABC, abstractmethod -from typing import Optional - -from fastapi import BackgroundTasks - - -class ISpeakingController(ABC): - - @abstractmethod - async def get_speaking_part(self, task: int, topic: str, difficulty: str, second_topic: Optional[str] = None): - pass - - @abstractmethod - async def save_speaking(self, data, background_tasks: BackgroundTasks): - pass - - @abstractmethod - async def generate_video( - self, part: int, avatar: str, topic: str, questions: list[str], - *, - second_topic: Optional[str] = None, - prompts: Optional[list[str]] = None, - suffix: Optional[str] = None, - ): - pass +from abc import ABC, abstractmethod +from typing import Optional + +from fastapi import BackgroundTasks + + +class ISpeakingController(ABC): + + @abstractmethod + async def get_speaking_part(self, task: int, topic: str, difficulty: str, second_topic: Optional[str] = None): + pass + + @abstractmethod + async def save_speaking(self, data, background_tasks: BackgroundTasks): + pass + + @abstractmethod + async def generate_video( + self, part: int, avatar: str, topic: str, questions: list[str], + *, + second_topic: Optional[str] = None, + prompts: Optional[list[str]] = None, + suffix: Optional[str] = None, + ): + pass diff --git a/app/controllers/abc/training.py b/app/controllers/abc/training.py index 1ce25c0..f044ddf 100644 --- a/app/controllers/abc/training.py +++ b/app/controllers/abc/training.py @@ -1,12 +1,12 @@ -from abc import ABC, abstractmethod - - -class ITrainingController(ABC): - - @abstractmethod - async def fetch_tips(self, data): - pass - - @abstractmethod - async def get_training_content(self, data): - pass +from abc import ABC, abstractmethod + + +class ITrainingController(ABC): + + @abstractmethod + async def fetch_tips(self, data): + pass + + @abstractmethod + async def get_training_content(self, data): + pass diff --git a/app/controllers/abc/user.py b/app/controllers/abc/user.py new file mode 100644 index 0000000..c99c3df --- /dev/null +++ b/app/controllers/abc/user.py @@ -0,0 +1,10 @@ +from abc import ABC, abstractmethod + +from app.dtos.user_batch import BatchUsersDTO + + +class IUserController(ABC): + + @abstractmethod + async def batch_import(self, batch: BatchUsersDTO): + pass diff --git a/app/controllers/abc/writing.py b/app/controllers/abc/writing.py index d3925c9..ebb298e 100644 --- a/app/controllers/abc/writing.py +++ b/app/controllers/abc/writing.py @@ -1,8 +1,8 @@ -from abc import ABC, abstractmethod - - -class IWritingController(ABC): - - @abstractmethod - async def get_writing_task_general_question(self, task: int, topic: str, difficulty: str): - pass +from abc import ABC, abstractmethod + + +class IWritingController(ABC): + + @abstractmethod + async def get_writing_task_general_question(self, task: int, topic: str, difficulty: str): + pass diff --git a/app/controllers/impl/__init__.py b/app/controllers/impl/__init__.py index 0720b78..6671490 100644 --- a/app/controllers/impl/__init__.py +++ b/app/controllers/impl/__init__.py @@ -1,17 +1,19 @@ -from .level import LevelController -from .listening import ListeningController -from .reading import ReadingController -from .speaking import SpeakingController -from .writing import WritingController -from .training import TrainingController -from .grade import GradeController - -__all__ = [ - "LevelController", - "ListeningController", - "ReadingController", - "SpeakingController", - "WritingController", - "TrainingController", - "GradeController" -] +from .level import LevelController +from .listening import ListeningController +from .reading import ReadingController +from .speaking import SpeakingController +from .writing import WritingController +from .training import TrainingController +from .grade import GradeController +from .user import UserController + +__all__ = [ + "LevelController", + "ListeningController", + "ReadingController", + "SpeakingController", + "WritingController", + "TrainingController", + "GradeController", + "UserController" +] diff --git a/app/controllers/impl/grade.py b/app/controllers/impl/grade.py index 3474664..b91c180 100644 --- a/app/controllers/impl/grade.py +++ b/app/controllers/impl/grade.py @@ -1,54 +1,54 @@ -import logging -from typing import Dict, List - -from app.configs.constants import FilePaths -from app.controllers.abc import IGradeController -from app.dtos.writing import WritingGradeTaskDTO -from app.helpers import FileHelper -from app.services.abc import ISpeakingService, IWritingService, IGradeService -from app.utils import handle_exception - - -class GradeController(IGradeController): - - def __init__( - self, - grade_service: IGradeService, - speaking_service: ISpeakingService, - writing_service: IWritingService - ): - self._service = grade_service - self._speaking_service = speaking_service - self._writing_service = writing_service - self._logger = logging.getLogger(__name__) - - async def grade_writing_task(self, task: int, data: WritingGradeTaskDTO): - return await self._writing_service.grade_writing_task(task, data.question, data.answer) - - @handle_exception(400) - async def grade_speaking_task(self, task: int, answers: List[Dict]) -> Dict: - FileHelper.delete_files_older_than_one_day(FilePaths.AUDIO_FILES_PATH) - return await self._speaking_service.grade_speaking_task(task, answers) - - async def grade_short_answers(self, data: Dict): - return await self._service.grade_short_answers(data) - - async def grading_summary(self, data: Dict): - section_keys = ['reading', 'listening', 'writing', 'speaking', 'level'] - extracted_sections = self._extract_existing_sections_from_body(data, section_keys) - return await self._service.calculate_grading_summary(extracted_sections) - - @staticmethod - def _extract_existing_sections_from_body(my_dict, keys_to_extract): - if 'sections' in my_dict and isinstance(my_dict['sections'], list) and len(my_dict['sections']) > 0: - return list( - filter( - lambda item: - 'code' in item and - item['code'] in keys_to_extract and - 'grade' in item and - 'name' in item, - my_dict['sections'] - ) - ) - +import logging +from typing import Dict, List + +from app.configs.constants import FilePaths +from app.controllers.abc import IGradeController +from app.dtos.writing import WritingGradeTaskDTO +from app.helpers import FileHelper +from app.services.abc import ISpeakingService, IWritingService, IGradeService +from app.utils import handle_exception + + +class GradeController(IGradeController): + + def __init__( + self, + grade_service: IGradeService, + speaking_service: ISpeakingService, + writing_service: IWritingService + ): + self._service = grade_service + self._speaking_service = speaking_service + self._writing_service = writing_service + self._logger = logging.getLogger(__name__) + + async def grade_writing_task(self, task: int, data: WritingGradeTaskDTO): + return await self._writing_service.grade_writing_task(task, data.question, data.answer) + + @handle_exception(400) + async def grade_speaking_task(self, task: int, answers: List[Dict]) -> Dict: + FileHelper.delete_files_older_than_one_day(FilePaths.AUDIO_FILES_PATH) + return await self._speaking_service.grade_speaking_task(task, answers) + + async def grade_short_answers(self, data: Dict): + return await self._service.grade_short_answers(data) + + async def grading_summary(self, data: Dict): + section_keys = ['reading', 'listening', 'writing', 'speaking', 'level'] + extracted_sections = self._extract_existing_sections_from_body(data, section_keys) + return await self._service.calculate_grading_summary(extracted_sections) + + @staticmethod + def _extract_existing_sections_from_body(my_dict, keys_to_extract): + if 'sections' in my_dict and isinstance(my_dict['sections'], list) and len(my_dict['sections']) > 0: + return list( + filter( + lambda item: + 'code' in item and + item['code'] in keys_to_extract and + 'grade' in item and + 'name' in item, + my_dict['sections'] + ) + ) + diff --git a/app/controllers/impl/level.py b/app/controllers/impl/level.py index eacb202..7133a1e 100644 --- a/app/controllers/impl/level.py +++ b/app/controllers/impl/level.py @@ -1,23 +1,23 @@ -from fastapi import UploadFile -from typing import Dict - -from app.controllers.abc import ILevelController -from app.services.abc import ILevelService - - -class LevelController(ILevelController): - - def __init__(self, level_service: ILevelService): - self._service = level_service - - async def get_level_exam(self): - return await self._service.get_level_exam() - - async def get_level_utas(self): - return await self._service.get_level_utas() - - async def upload_level(self, file: UploadFile): - return await self._service.upload_level(file) - - async def get_custom_level(self, data: Dict): - return await self._service.get_custom_level(data) +from fastapi import UploadFile +from typing import Dict + +from app.controllers.abc import ILevelController +from app.services.abc import ILevelService + + +class LevelController(ILevelController): + + def __init__(self, level_service: ILevelService): + self._service = level_service + + async def get_level_exam(self): + return await self._service.get_level_exam() + + async def get_level_utas(self): + return await self._service.get_level_utas() + + async def upload_level(self, file: UploadFile): + return await self._service.upload_level(file) + + async def get_custom_level(self, data: Dict): + return await self._service.get_custom_level(data) diff --git a/app/controllers/impl/listening.py b/app/controllers/impl/listening.py index 3095388..e77e5f8 100644 --- a/app/controllers/impl/listening.py +++ b/app/controllers/impl/listening.py @@ -1,19 +1,19 @@ -from typing import List - -from app.controllers.abc import IListeningController -from app.dtos.listening import SaveListeningDTO -from app.services.abc import IListeningService - - -class ListeningController(IListeningController): - - def __init__(self, listening_service: IListeningService): - self._service = listening_service - - async def get_listening_question( - self, section_id: int, topic: str, req_exercises: List[str], difficulty: str - ): - return await self._service.get_listening_question(section_id, topic, req_exercises, difficulty) - - async def save_listening(self, data: SaveListeningDTO): - return await self._service.save_listening(data.parts, data.minTimer, data.difficulty, data.id) +from typing import List + +from app.controllers.abc import IListeningController +from app.dtos.listening import SaveListeningDTO +from app.services.abc import IListeningService + + +class ListeningController(IListeningController): + + def __init__(self, listening_service: IListeningService): + self._service = listening_service + + async def get_listening_question( + self, section_id: int, topic: str, req_exercises: List[str], difficulty: str + ): + return await self._service.get_listening_question(section_id, topic, req_exercises, difficulty) + + async def save_listening(self, data: SaveListeningDTO): + return await self._service.save_listening(data.parts, data.minTimer, data.difficulty, data.id) diff --git a/app/controllers/impl/reading.py b/app/controllers/impl/reading.py index d496c02..e4337c4 100644 --- a/app/controllers/impl/reading.py +++ b/app/controllers/impl/reading.py @@ -1,43 +1,43 @@ -import random -import logging -from typing import List - -from app.controllers.abc import IReadingController -from app.services.abc import IReadingService -from app.configs.constants import FieldsAndExercises -from app.helpers import ExercisesHelper - - -class ReadingController(IReadingController): - - def __init__(self, reading_service: IReadingService): - self._service = reading_service - self._logger = logging.getLogger(__name__) - self._passages = { - "passage_1": { - "start_id": 1, - "total_exercises": FieldsAndExercises.TOTAL_READING_PASSAGE_1_EXERCISES - }, - "passage_2": { - "start_id": 14, - "total_exercises": FieldsAndExercises.TOTAL_READING_PASSAGE_2_EXERCISES - }, - "passage_3": { - "start_id": 27, - "total_exercises": FieldsAndExercises.TOTAL_READING_PASSAGE_3_EXERCISES - } - } - - async def get_reading_passage(self, passage_id: int, topic: str, req_exercises: List[str], difficulty: str): - passage = self._passages[f'passage_{str(passage_id)}'] - - if len(req_exercises) == 0: - req_exercises = random.sample(FieldsAndExercises.READING_EXERCISE_TYPES, 2) - - number_of_exercises_q = ExercisesHelper.divide_number_into_parts( - passage["total_exercises"], len(req_exercises) - ) - - return await self._service.gen_reading_passage( - passage_id, topic, req_exercises, number_of_exercises_q, difficulty, passage["start_id"] - ) +import random +import logging +from typing import List + +from app.controllers.abc import IReadingController +from app.services.abc import IReadingService +from app.configs.constants import FieldsAndExercises +from app.helpers import ExercisesHelper + + +class ReadingController(IReadingController): + + def __init__(self, reading_service: IReadingService): + self._service = reading_service + self._logger = logging.getLogger(__name__) + self._passages = { + "passage_1": { + "start_id": 1, + "total_exercises": FieldsAndExercises.TOTAL_READING_PASSAGE_1_EXERCISES + }, + "passage_2": { + "start_id": 14, + "total_exercises": FieldsAndExercises.TOTAL_READING_PASSAGE_2_EXERCISES + }, + "passage_3": { + "start_id": 27, + "total_exercises": FieldsAndExercises.TOTAL_READING_PASSAGE_3_EXERCISES + } + } + + async def get_reading_passage(self, passage_id: int, topic: str, req_exercises: List[str], difficulty: str): + passage = self._passages[f'passage_{str(passage_id)}'] + + if len(req_exercises) == 0: + req_exercises = random.sample(FieldsAndExercises.READING_EXERCISE_TYPES, 2) + + number_of_exercises_q = ExercisesHelper.divide_number_into_parts( + passage["total_exercises"], len(req_exercises) + ) + + return await self._service.gen_reading_passage( + passage_id, topic, req_exercises, number_of_exercises_q, difficulty, passage["start_id"] + ) diff --git a/app/controllers/impl/speaking.py b/app/controllers/impl/speaking.py index 7c2a383..7190ce2 100644 --- a/app/controllers/impl/speaking.py +++ b/app/controllers/impl/speaking.py @@ -1,47 +1,47 @@ -import logging -import uuid -from typing import Optional - -from fastapi import BackgroundTasks - -from app.controllers.abc import ISpeakingController -from app.dtos.speaking import SaveSpeakingDTO - -from app.services.abc import ISpeakingService -from app.configs.constants import ExamVariant, MinTimers -from app.configs.question_templates import getSpeakingTemplate - - -class SpeakingController(ISpeakingController): - - def __init__(self, speaking_service: ISpeakingService): - self._service = speaking_service - self._logger = logging.getLogger(__name__) - - async def get_speaking_part(self, task: int, topic: str, difficulty: str, second_topic: Optional[str] = None): - return await self._service.get_speaking_part(task, topic, difficulty, second_topic) - - async def save_speaking(self, data: SaveSpeakingDTO, background_tasks: BackgroundTasks): - exercises = data.exercises - min_timer = data.minTimer - - template = getSpeakingTemplate() - template["minTimer"] = min_timer - - if min_timer < MinTimers.SPEAKING_MIN_TIMER_DEFAULT: - template["variant"] = ExamVariant.PARTIAL.value - else: - template["variant"] = ExamVariant.FULL.value - - req_id = str(uuid.uuid4()) - self._logger.info(f'Received request to save speaking with id: {req_id}') - - background_tasks.add_task(self._service.create_videos_and_save_to_db, exercises, template, req_id) - - self._logger.info('Started background task to save speaking.') - - # Return response without waiting for create_videos_and_save_to_db to finish - return {**template, "id": req_id} - - async def generate_video(self, *args, **kwargs): - return await self._service.generate_video(*args, **kwargs) +import logging +import uuid +from typing import Optional + +from fastapi import BackgroundTasks + +from app.controllers.abc import ISpeakingController +from app.dtos.speaking import SaveSpeakingDTO + +from app.services.abc import ISpeakingService +from app.configs.constants import ExamVariant, MinTimers +from app.configs.question_templates import getSpeakingTemplate + + +class SpeakingController(ISpeakingController): + + def __init__(self, speaking_service: ISpeakingService): + self._service = speaking_service + self._logger = logging.getLogger(__name__) + + async def get_speaking_part(self, task: int, topic: str, difficulty: str, second_topic: Optional[str] = None): + return await self._service.get_speaking_part(task, topic, difficulty, second_topic) + + async def save_speaking(self, data: SaveSpeakingDTO, background_tasks: BackgroundTasks): + exercises = data.exercises + min_timer = data.minTimer + + template = getSpeakingTemplate() + template["minTimer"] = min_timer + + if min_timer < MinTimers.SPEAKING_MIN_TIMER_DEFAULT: + template["variant"] = ExamVariant.PARTIAL.value + else: + template["variant"] = ExamVariant.FULL.value + + req_id = str(uuid.uuid4()) + self._logger.info(f'Received request to save speaking with id: {req_id}') + + background_tasks.add_task(self._service.create_videos_and_save_to_db, exercises, template, req_id) + + self._logger.info('Started background task to save speaking.') + + # Return response without waiting for create_videos_and_save_to_db to finish + return {**template, "id": req_id} + + async def generate_video(self, *args, **kwargs): + return await self._service.generate_video(*args, **kwargs) diff --git a/app/controllers/impl/training.py b/app/controllers/impl/training.py index dc39017..dab335d 100644 --- a/app/controllers/impl/training.py +++ b/app/controllers/impl/training.py @@ -1,17 +1,17 @@ -from typing import Dict - -from app.controllers.abc import ITrainingController -from app.dtos.training import FetchTipsDTO -from app.services.abc import ITrainingService - - -class TrainingController(ITrainingController): - - def __init__(self, training_service: ITrainingService): - self._service = training_service - - async def fetch_tips(self, data: FetchTipsDTO): - return await self._service.fetch_tips(data.context, data.question, data.answer, data.correct_answer) - - async def get_training_content(self, data: Dict): - return await self._service.get_training_content(data) +from typing import Dict + +from app.controllers.abc import ITrainingController +from app.dtos.training import FetchTipsDTO +from app.services.abc import ITrainingService + + +class TrainingController(ITrainingController): + + def __init__(self, training_service: ITrainingService): + self._service = training_service + + async def fetch_tips(self, data: FetchTipsDTO): + return await self._service.fetch_tips(data.context, data.question, data.answer, data.correct_answer) + + async def get_training_content(self, data: Dict): + return await self._service.get_training_content(data) diff --git a/app/controllers/impl/user.py b/app/controllers/impl/user.py new file mode 100644 index 0000000..9952fb8 --- /dev/null +++ b/app/controllers/impl/user.py @@ -0,0 +1,12 @@ +from app.controllers.abc import IUserController +from app.dtos.user_batch import BatchUsersDTO +from app.services.abc import IUserService + + +class UserController(IUserController): + + def __init__(self, user_service: IUserService): + self._service = user_service + + async def batch_import(self, batch: BatchUsersDTO): + return await self._service.fetch_tips(batch) diff --git a/app/controllers/impl/writing.py b/app/controllers/impl/writing.py index b01726d..c097d5c 100644 --- a/app/controllers/impl/writing.py +++ b/app/controllers/impl/writing.py @@ -1,11 +1,11 @@ -from app.controllers.abc import IWritingController -from app.services.abc import IWritingService - - -class WritingController(IWritingController): - - def __init__(self, writing_service: IWritingService): - self._service = writing_service - - async def get_writing_task_general_question(self, task: int, topic: str, difficulty: str): - return await self._service.get_writing_task_general_question(task, topic, difficulty) +from app.controllers.abc import IWritingController +from app.services.abc import IWritingService + + +class WritingController(IWritingController): + + def __init__(self, writing_service: IWritingService): + self._service = writing_service + + async def get_writing_task_general_question(self, task: int, topic: str, difficulty: str): + return await self._service.get_writing_task_general_question(task, topic, difficulty) diff --git a/app/dtos/exam.py b/app/dtos/exam.py index 779daea..92c217b 100644 --- a/app/dtos/exam.py +++ b/app/dtos/exam.py @@ -1,57 +1,57 @@ -from pydantic import BaseModel, Field -from typing import List, Dict, Union, Optional -from uuid import uuid4, UUID - - -class Option(BaseModel): - id: str - text: str - - -class MultipleChoiceQuestion(BaseModel): - id: str - prompt: str - variant: str = "text" - solution: str - options: List[Option] - - -class MultipleChoiceExercise(BaseModel): - id: UUID = Field(default_factory=uuid4) - type: str = "multipleChoice" - prompt: str = "Select the appropriate option." - questions: List[MultipleChoiceQuestion] - userSolutions: List = Field(default_factory=list) - - -class FillBlanksWord(BaseModel): - id: str - options: Dict[str, str] - - -class FillBlanksSolution(BaseModel): - id: str - solution: str - - -class FillBlanksExercise(BaseModel): - id: UUID = Field(default_factory=uuid4) - type: str = "fillBlanks" - variant: str = "mc" - prompt: str = "Click a blank to select the appropriate word for it." - text: str - solutions: List[FillBlanksSolution] - words: List[FillBlanksWord] - userSolutions: List = Field(default_factory=list) - - -Exercise = Union[MultipleChoiceExercise, FillBlanksExercise] - - -class Part(BaseModel): - exercises: List[Exercise] - context: Optional[str] = Field(default=None) - - -class Exam(BaseModel): - parts: List[Part] +from pydantic import BaseModel, Field +from typing import List, Dict, Union, Optional +from uuid import uuid4, UUID + + +class Option(BaseModel): + id: str + text: str + + +class MultipleChoiceQuestion(BaseModel): + id: str + prompt: str + variant: str = "text" + solution: str + options: List[Option] + + +class MultipleChoiceExercise(BaseModel): + id: UUID = Field(default_factory=uuid4) + type: str = "multipleChoice" + prompt: str = "Select the appropriate option." + questions: List[MultipleChoiceQuestion] + userSolutions: List = Field(default_factory=list) + + +class FillBlanksWord(BaseModel): + id: str + options: Dict[str, str] + + +class FillBlanksSolution(BaseModel): + id: str + solution: str + + +class FillBlanksExercise(BaseModel): + id: UUID = Field(default_factory=uuid4) + type: str = "fillBlanks" + variant: str = "mc" + prompt: str = "Click a blank to select the appropriate word for it." + text: str + solutions: List[FillBlanksSolution] + words: List[FillBlanksWord] + userSolutions: List = Field(default_factory=list) + + +Exercise = Union[MultipleChoiceExercise, FillBlanksExercise] + + +class Part(BaseModel): + exercises: List[Exercise] + context: Optional[str] = Field(default=None) + + +class Exam(BaseModel): + parts: List[Part] diff --git a/app/dtos/listening.py b/app/dtos/listening.py index d7e44db..03270f8 100644 --- a/app/dtos/listening.py +++ b/app/dtos/listening.py @@ -1,14 +1,14 @@ -import random -import uuid -from typing import List, Dict - -from pydantic import BaseModel - -from app.configs.constants import MinTimers, EducationalContent - - -class SaveListeningDTO(BaseModel): - parts: List[Dict] - minTimer: int = MinTimers.LISTENING_MIN_TIMER_DEFAULT - difficulty: str = random.choice(EducationalContent.DIFFICULTIES) - id: str = str(uuid.uuid4()) +import random +import uuid +from typing import List, Dict + +from pydantic import BaseModel + +from app.configs.constants import MinTimers, EducationalContent + + +class SaveListeningDTO(BaseModel): + parts: List[Dict] + minTimer: int = MinTimers.LISTENING_MIN_TIMER_DEFAULT + difficulty: str = random.choice(EducationalContent.DIFFICULTIES) + id: str = str(uuid.uuid4()) diff --git a/app/dtos/sheet.py b/app/dtos/sheet.py index 8efac82..68c3e16 100644 --- a/app/dtos/sheet.py +++ b/app/dtos/sheet.py @@ -1,29 +1,29 @@ -from pydantic import BaseModel -from typing import List, Dict, Union, Any, Optional - - -class Option(BaseModel): - id: str - text: str - - -class MultipleChoiceQuestion(BaseModel): - type: str = "multipleChoice" - id: str - prompt: str - variant: str = "text" - options: List[Option] - - -class FillBlanksWord(BaseModel): - type: str = "fillBlanks" - id: str - options: Dict[str, str] - - -Component = Union[MultipleChoiceQuestion, FillBlanksWord, Dict[str, Any]] - - -class Sheet(BaseModel): - batch: Optional[int] = None - components: List[Component] +from pydantic import BaseModel +from typing import List, Dict, Union, Any, Optional + + +class Option(BaseModel): + id: str + text: str + + +class MultipleChoiceQuestion(BaseModel): + type: str = "multipleChoice" + id: str + prompt: str + variant: str = "text" + options: List[Option] + + +class FillBlanksWord(BaseModel): + type: str = "fillBlanks" + id: str + options: Dict[str, str] + + +Component = Union[MultipleChoiceQuestion, FillBlanksWord, Dict[str, Any]] + + +class Sheet(BaseModel): + batch: Optional[int] = None + components: List[Component] diff --git a/app/dtos/speaking.py b/app/dtos/speaking.py index 7c8b124..243b4d4 100644 --- a/app/dtos/speaking.py +++ b/app/dtos/speaking.py @@ -1,42 +1,42 @@ -import random -from typing import List, Dict - -from pydantic import BaseModel - -from app.configs.constants import MinTimers, AvatarEnum - - -class SaveSpeakingDTO(BaseModel): - exercises: List[Dict] - minTimer: int = MinTimers.SPEAKING_MIN_TIMER_DEFAULT - - -class GradeSpeakingDTO(BaseModel): - question: str - answer: str - - -class GradeSpeakingAnswersDTO(BaseModel): - answers: List[Dict] - - -class GenerateVideo1DTO(BaseModel): - avatar: str = (random.choice(list(AvatarEnum))).value - questions: List[str] - first_topic: str - second_topic: str - - -class GenerateVideo2DTO(BaseModel): - avatar: str = (random.choice(list(AvatarEnum))).value - prompts: List[str] = [] - suffix: str = "" - question: str - topic: str - - -class GenerateVideo3DTO(BaseModel): - avatar: str = (random.choice(list(AvatarEnum))).value - questions: List[str] - topic: str - +import random +from typing import List, Dict + +from pydantic import BaseModel + +from app.configs.constants import MinTimers, AvatarEnum + + +class SaveSpeakingDTO(BaseModel): + exercises: List[Dict] + minTimer: int = MinTimers.SPEAKING_MIN_TIMER_DEFAULT + + +class GradeSpeakingDTO(BaseModel): + question: str + answer: str + + +class GradeSpeakingAnswersDTO(BaseModel): + answers: List[Dict] + + +class GenerateVideo1DTO(BaseModel): + avatar: str = (random.choice(list(AvatarEnum))).value + questions: List[str] + first_topic: str + second_topic: str + + +class GenerateVideo2DTO(BaseModel): + avatar: str = (random.choice(list(AvatarEnum))).value + prompts: List[str] = [] + suffix: str = "" + question: str + topic: str + + +class GenerateVideo3DTO(BaseModel): + avatar: str = (random.choice(list(AvatarEnum))).value + questions: List[str] + topic: str + diff --git a/app/dtos/training.py b/app/dtos/training.py index d5de433..0b9c272 100644 --- a/app/dtos/training.py +++ b/app/dtos/training.py @@ -1,37 +1,37 @@ -from pydantic import BaseModel -from typing import List - - -class FetchTipsDTO(BaseModel): - context: str - question: str - answer: str - correct_answer: str - - -class QueryDTO(BaseModel): - category: str - text: str - - -class DetailsDTO(BaseModel): - exam_id: str - date: int - performance_comment: str - detailed_summary: str - - -class WeakAreaDTO(BaseModel): - area: str - comment: str - - -class TrainingContentDTO(BaseModel): - details: List[DetailsDTO] - weak_areas: List[WeakAreaDTO] - queries: List[QueryDTO] - - -class TipsDTO(BaseModel): - tip_ids: List[str] - +from pydantic import BaseModel +from typing import List + + +class FetchTipsDTO(BaseModel): + context: str + question: str + answer: str + correct_answer: str + + +class QueryDTO(BaseModel): + category: str + text: str + + +class DetailsDTO(BaseModel): + exam_id: str + date: int + performance_comment: str + detailed_summary: str + + +class WeakAreaDTO(BaseModel): + area: str + comment: str + + +class TrainingContentDTO(BaseModel): + details: List[DetailsDTO] + weak_areas: List[WeakAreaDTO] + queries: List[QueryDTO] + + +class TipsDTO(BaseModel): + tip_ids: List[str] + diff --git a/app/dtos/user_batch.py b/app/dtos/user_batch.py new file mode 100644 index 0000000..c746373 --- /dev/null +++ b/app/dtos/user_batch.py @@ -0,0 +1,30 @@ +import uuid +from typing import Optional + +from pydantic import BaseModel, Field + + +class DemographicInfo(BaseModel): + phone: str + passport_id: Optional[str] = None + country: Optional[str] = None + + +class UserDTO(BaseModel): + id: uuid.UUID = Field(default_factory=uuid.uuid4) + email: str + name: str + type: str + passport_id: str + passwordHash: str + passwordSalt: str + groupName: Optional[str] = None + corporate: Optional[str] = None + studentID: Optional[str | int] = None + expiryDate: Optional[str] = None + demographicInformation: Optional[DemographicInfo] = None + + +class BatchUsersDTO(BaseModel): + makerID: str + users: list[UserDTO] diff --git a/app/dtos/writing.py b/app/dtos/writing.py index 7756bfe..5d9caf7 100644 --- a/app/dtos/writing.py +++ b/app/dtos/writing.py @@ -1,6 +1,6 @@ -from pydantic import BaseModel - - -class WritingGradeTaskDTO(BaseModel): - question: str - answer: str +from pydantic import BaseModel + + +class WritingGradeTaskDTO(BaseModel): + question: str + answer: str diff --git a/app/exceptions/__init__.py b/app/exceptions/__init__.py index 59e1569..c1305bb 100644 --- a/app/exceptions/__init__.py +++ b/app/exceptions/__init__.py @@ -1,6 +1,6 @@ -from .exceptions import CustomException, UnauthorizedException - -__all__ = [ - "CustomException", - "UnauthorizedException" -] +from .exceptions import CustomException, UnauthorizedException + +__all__ = [ + "CustomException", + "UnauthorizedException" +] diff --git a/app/exceptions/exceptions.py b/app/exceptions/exceptions.py index 2656378..c5ee41c 100644 --- a/app/exceptions/exceptions.py +++ b/app/exceptions/exceptions.py @@ -1,17 +1,17 @@ -from http import HTTPStatus - - -class CustomException(Exception): - code = HTTPStatus.INTERNAL_SERVER_ERROR - error_code = HTTPStatus.INTERNAL_SERVER_ERROR - message = HTTPStatus.INTERNAL_SERVER_ERROR.description - - def __init__(self, message=None): - if message: - self.message = message - - -class UnauthorizedException(CustomException): - code = HTTPStatus.UNAUTHORIZED - error_code = HTTPStatus.UNAUTHORIZED - message = HTTPStatus.UNAUTHORIZED.description +from http import HTTPStatus + + +class CustomException(Exception): + code = HTTPStatus.INTERNAL_SERVER_ERROR + error_code = HTTPStatus.INTERNAL_SERVER_ERROR + message = HTTPStatus.INTERNAL_SERVER_ERROR.description + + def __init__(self, message=None): + if message: + self.message = message + + +class UnauthorizedException(CustomException): + code = HTTPStatus.UNAUTHORIZED + error_code = HTTPStatus.UNAUTHORIZED + message = HTTPStatus.UNAUTHORIZED.description diff --git a/app/helpers/__init__.py b/app/helpers/__init__.py index eddd6da..4647af8 100644 --- a/app/helpers/__init__.py +++ b/app/helpers/__init__.py @@ -1,13 +1,13 @@ -from .file import FileHelper -from .text import TextHelper -from .token_counter import count_tokens -from .exercises import ExercisesHelper -from .logger import LoggerHelper - -__all__ = [ - "FileHelper", - "TextHelper", - "count_tokens", - "ExercisesHelper", - "LoggerHelper" -] +from .file import FileHelper +from .text import TextHelper +from .token_counter import count_tokens +from .exercises import ExercisesHelper +from .logger import LoggerHelper + +__all__ = [ + "FileHelper", + "TextHelper", + "count_tokens", + "ExercisesHelper", + "LoggerHelper" +] diff --git a/app/helpers/exercises.py b/app/helpers/exercises.py index be40bc8..662d960 100644 --- a/app/helpers/exercises.py +++ b/app/helpers/exercises.py @@ -1,249 +1,249 @@ -import queue -import random -import re -import string -from wonderwords import RandomWord - -from .text import TextHelper - - -class ExercisesHelper: - - @staticmethod - def divide_number_into_parts(number, parts): - if number < parts: - return None - - part_size = number // parts - remaining = number % parts - - q = queue.Queue() - - for i in range(parts): - if i < remaining: - q.put(part_size + 1) - else: - q.put(part_size) - - return q - - @staticmethod - def fix_exercise_ids(exercise, start_id): - # Initialize the starting ID for the first exercise - current_id = start_id - - questions = exercise["questions"] - - # Iterate through questions and update the "id" value - for question in questions: - question["id"] = str(current_id) - current_id += 1 - - return exercise - - @staticmethod - def replace_first_occurrences_with_placeholders(text: str, words_to_replace: list, start_id): - for i, word in enumerate(words_to_replace, start=start_id): - # Create a case-insensitive regular expression pattern - pattern = re.compile(r'\b' + re.escape(word) + r'\b', re.IGNORECASE) - placeholder = '{{' + str(i) + '}}' - text = pattern.sub(placeholder, text, 1) - return text - - @staticmethod - def replace_first_occurrences_with_placeholders_notes(notes: list, words_to_replace: list, start_id): - replaced_notes = [] - for i, note in enumerate(notes, start=0): - word = words_to_replace[i] - pattern = re.compile(r'\b' + re.escape(word) + r'\b', re.IGNORECASE) - placeholder = '{{' + str(start_id + i) + '}}' - note = pattern.sub(placeholder, note, 1) - replaced_notes.append(note) - return replaced_notes - - @staticmethod - def add_random_words_and_shuffle(word_array, num_random_words): - r = RandomWord() - random_words_selected = r.random_words(num_random_words) - - combined_array = word_array + random_words_selected - - random.shuffle(combined_array) - - result = [] - for i, word in enumerate(combined_array): - letter = chr(65 + i) # chr(65) is 'A' - result.append({"letter": letter, "word": word}) - - return result - - @staticmethod - def fillblanks_build_solutions_array(words, start_id): - solutions = [] - for i, word in enumerate(words, start=start_id): - solutions.append( - { - "id": str(i), - "solution": word - } - ) - return solutions - - @staticmethod - def remove_excess_questions(questions: [], quantity): - count_true = 0 - result = [] - - for item in reversed(questions): - if item.get('solution') == 'true' and count_true < quantity: - count_true += 1 - else: - result.append(item) - - result.reverse() - return result - - @staticmethod - def build_write_blanks_text(questions: [], start_id): - result = "" - for i, q in enumerate(questions, start=start_id): - placeholder = '{{' + str(i) + '}}' - result = result + q["question"] + placeholder + "\\n" - return result - - @staticmethod - def build_write_blanks_text_form(form: [], start_id): - result = "" - replaced_words = [] - for i, entry in enumerate(form, start=start_id): - placeholder = '{{' + str(i) + '}}' - # Use regular expression to find the string after ':' - match = re.search(r'(?<=:)\s*(.*)', entry) - # Extract the matched string - original_string = match.group(1) - # Split the string into words - words = re.findall(r'\b\w+\b', original_string) - # Remove words with only one letter - filtered_words = [word for word in words if len(word) > 1] - # Choose a random word from the list of words - selected_word = random.choice(filtered_words) - pattern = re.compile(r'\b' + re.escape(selected_word) + r'\b', re.IGNORECASE) - - # Replace the chosen word with the placeholder - replaced_string = pattern.sub(placeholder, original_string, 1) - # Construct the final replaced string - replaced_string = entry.replace(original_string, replaced_string) - - result = result + replaced_string + "\\n" - # Save the replaced word or use it as needed - # For example, you can save it to a file or a list - replaced_words.append(selected_word) - return result, replaced_words - - @staticmethod - def build_write_blanks_solutions(questions: [], start_id): - solutions = [] - for i, q in enumerate(questions, start=start_id): - solution = [q["possible_answers"]] if isinstance(q["possible_answers"], str) else q["possible_answers"] - - solutions.append( - { - "id": str(i), - "solution": solution - } - ) - return solutions - - @staticmethod - def build_write_blanks_solutions_listening(words: [], start_id): - solutions = [] - for i, word in enumerate(words, start=start_id): - solution = [word] if isinstance(word, str) else word - - solutions.append( - { - "id": str(i), - "solution": solution - } - ) - return solutions - - @staticmethod - def answer_word_limit_ok(question): - # Check if any option in any solution has more than three words - return not any( - len(option.split()) > 3 - for solution in question["solutions"] - for option in solution["solution"] - ) - - @staticmethod - def assign_letters_to_paragraphs(paragraphs): - result = [] - letters = iter(string.ascii_uppercase) - for paragraph in paragraphs.split("\n\n"): - if TextHelper.has_x_words(paragraph, 10): - result.append({'paragraph': paragraph.strip(), 'letter': next(letters)}) - return result - - @staticmethod - def contains_empty_dict(arr): - return any(elem == {} for elem in arr) - - @staticmethod - def fix_writing_overall(overall: float, task_response: dict): - grades = [category["grade"] for category in task_response.values()] - - if overall > max(grades) or overall < min(grades): - total_sum = sum(grades) - average = total_sum / len(grades) - rounded_average = round(average, 0) - return rounded_average - - return overall - - @staticmethod - def build_options(ideas): - options = [] - letters = iter(string.ascii_uppercase) - for idea in ideas: - options.append({ - "id": next(letters), - "sentence": idea["from"] - }) - return options - - @staticmethod - def build_sentences(ideas, start_id): - sentences = [] - letters = iter(string.ascii_uppercase) - for idea in ideas: - sentences.append({ - "solution": next(letters), - "sentence": idea["idea"] - }) - - random.shuffle(sentences) - for i, sentence in enumerate(sentences, start=start_id): - sentence["id"] = i - return sentences - - @staticmethod - def randomize_mc_options_order(questions): - option_ids = ['A', 'B', 'C', 'D'] - - for question in questions: - # Store the original solution text - original_solution_text = next( - option['text'] for option in question['options'] if option['id'] == question['solution']) - - # Shuffle the options - random.shuffle(question['options']) - - # Update the option ids and find the new solution id - for idx, option in enumerate(question['options']): - option['id'] = option_ids[idx] - if option['text'] == original_solution_text: - question['solution'] = option['id'] - - return questions +import queue +import random +import re +import string +from wonderwords import RandomWord + +from .text import TextHelper + + +class ExercisesHelper: + + @staticmethod + def divide_number_into_parts(number, parts): + if number < parts: + return None + + part_size = number // parts + remaining = number % parts + + q = queue.Queue() + + for i in range(parts): + if i < remaining: + q.put(part_size + 1) + else: + q.put(part_size) + + return q + + @staticmethod + def fix_exercise_ids(exercise, start_id): + # Initialize the starting ID for the first exercise + current_id = start_id + + questions = exercise["questions"] + + # Iterate through questions and update the "id" value + for question in questions: + question["id"] = str(current_id) + current_id += 1 + + return exercise + + @staticmethod + def replace_first_occurrences_with_placeholders(text: str, words_to_replace: list, start_id): + for i, word in enumerate(words_to_replace, start=start_id): + # Create a case-insensitive regular expression pattern + pattern = re.compile(r'\b' + re.escape(word) + r'\b', re.IGNORECASE) + placeholder = '{{' + str(i) + '}}' + text = pattern.sub(placeholder, text, 1) + return text + + @staticmethod + def replace_first_occurrences_with_placeholders_notes(notes: list, words_to_replace: list, start_id): + replaced_notes = [] + for i, note in enumerate(notes, start=0): + word = words_to_replace[i] + pattern = re.compile(r'\b' + re.escape(word) + r'\b', re.IGNORECASE) + placeholder = '{{' + str(start_id + i) + '}}' + note = pattern.sub(placeholder, note, 1) + replaced_notes.append(note) + return replaced_notes + + @staticmethod + def add_random_words_and_shuffle(word_array, num_random_words): + r = RandomWord() + random_words_selected = r.random_words(num_random_words) + + combined_array = word_array + random_words_selected + + random.shuffle(combined_array) + + result = [] + for i, word in enumerate(combined_array): + letter = chr(65 + i) # chr(65) is 'A' + result.append({"letter": letter, "word": word}) + + return result + + @staticmethod + def fillblanks_build_solutions_array(words, start_id): + solutions = [] + for i, word in enumerate(words, start=start_id): + solutions.append( + { + "id": str(i), + "solution": word + } + ) + return solutions + + @staticmethod + def remove_excess_questions(questions: [], quantity): + count_true = 0 + result = [] + + for item in reversed(questions): + if item.get('solution') == 'true' and count_true < quantity: + count_true += 1 + else: + result.append(item) + + result.reverse() + return result + + @staticmethod + def build_write_blanks_text(questions: [], start_id): + result = "" + for i, q in enumerate(questions, start=start_id): + placeholder = '{{' + str(i) + '}}' + result = result + q["question"] + placeholder + "\\n" + return result + + @staticmethod + def build_write_blanks_text_form(form: [], start_id): + result = "" + replaced_words = [] + for i, entry in enumerate(form, start=start_id): + placeholder = '{{' + str(i) + '}}' + # Use regular expression to find the string after ':' + match = re.search(r'(?<=:)\s*(.*)', entry) + # Extract the matched string + original_string = match.group(1) + # Split the string into words + words = re.findall(r'\b\w+\b', original_string) + # Remove words with only one letter + filtered_words = [word for word in words if len(word) > 1] + # Choose a random word from the list of words + selected_word = random.choice(filtered_words) + pattern = re.compile(r'\b' + re.escape(selected_word) + r'\b', re.IGNORECASE) + + # Replace the chosen word with the placeholder + replaced_string = pattern.sub(placeholder, original_string, 1) + # Construct the final replaced string + replaced_string = entry.replace(original_string, replaced_string) + + result = result + replaced_string + "\\n" + # Save the replaced word or use it as needed + # For example, you can save it to a file or a list + replaced_words.append(selected_word) + return result, replaced_words + + @staticmethod + def build_write_blanks_solutions(questions: [], start_id): + solutions = [] + for i, q in enumerate(questions, start=start_id): + solution = [q["possible_answers"]] if isinstance(q["possible_answers"], str) else q["possible_answers"] + + solutions.append( + { + "id": str(i), + "solution": solution + } + ) + return solutions + + @staticmethod + def build_write_blanks_solutions_listening(words: [], start_id): + solutions = [] + for i, word in enumerate(words, start=start_id): + solution = [word] if isinstance(word, str) else word + + solutions.append( + { + "id": str(i), + "solution": solution + } + ) + return solutions + + @staticmethod + def answer_word_limit_ok(question): + # Check if any option in any solution has more than three words + return not any( + len(option.split()) > 3 + for solution in question["solutions"] + for option in solution["solution"] + ) + + @staticmethod + def assign_letters_to_paragraphs(paragraphs): + result = [] + letters = iter(string.ascii_uppercase) + for paragraph in paragraphs.split("\n\n"): + if TextHelper.has_x_words(paragraph, 10): + result.append({'paragraph': paragraph.strip(), 'letter': next(letters)}) + return result + + @staticmethod + def contains_empty_dict(arr): + return any(elem == {} for elem in arr) + + @staticmethod + def fix_writing_overall(overall: float, task_response: dict): + grades = [category["grade"] for category in task_response.values()] + + if overall > max(grades) or overall < min(grades): + total_sum = sum(grades) + average = total_sum / len(grades) + rounded_average = round(average, 0) + return rounded_average + + return overall + + @staticmethod + def build_options(ideas): + options = [] + letters = iter(string.ascii_uppercase) + for idea in ideas: + options.append({ + "id": next(letters), + "sentence": idea["from"] + }) + return options + + @staticmethod + def build_sentences(ideas, start_id): + sentences = [] + letters = iter(string.ascii_uppercase) + for idea in ideas: + sentences.append({ + "solution": next(letters), + "sentence": idea["idea"] + }) + + random.shuffle(sentences) + for i, sentence in enumerate(sentences, start=start_id): + sentence["id"] = i + return sentences + + @staticmethod + def randomize_mc_options_order(questions): + option_ids = ['A', 'B', 'C', 'D'] + + for question in questions: + # Store the original solution text + original_solution_text = next( + option['text'] for option in question['options'] if option['id'] == question['solution']) + + # Shuffle the options + random.shuffle(question['options']) + + # Update the option ids and find the new solution id + for idx, option in enumerate(question['options']): + option['id'] = option_ids[idx] + if option['text'] == original_solution_text: + question['solution'] = option['id'] + + return questions diff --git a/app/helpers/file.py b/app/helpers/file.py index aa3230b..6762ece 100644 --- a/app/helpers/file.py +++ b/app/helpers/file.py @@ -1,95 +1,114 @@ -import datetime -from pathlib import Path -import base64 -import io -import os -import shutil -import subprocess -from typing import Optional - -import numpy as np -import pypandoc -from PIL import Image - -import aiofiles - - -class FileHelper: - - @staticmethod - def delete_files_older_than_one_day(directory: str): - current_time = datetime.datetime.now() - - for entry in os.scandir(directory): - if entry.is_file(): - file_path = Path(entry) - file_name = file_path.name - file_modified_time = datetime.datetime.fromtimestamp(file_path.stat().st_mtime) - time_difference = current_time - file_modified_time - if time_difference.days > 1 and "placeholder" not in file_name: - file_path.unlink() - print(f"Deleted file: {file_path}") - - # Supposedly pandoc covers a wide range of file extensions only tested with docx - @staticmethod - def convert_file_to_pdf(input_path: str, output_path: str): - pypandoc.convert_file(input_path, 'pdf', outputfile=output_path, extra_args=[ - '-V', 'geometry:paperwidth=5.5in', - '-V', 'geometry:paperheight=8.5in', - '-V', 'geometry:margin=0.5in', - '-V', 'pagestyle=empty' - ]) - - @staticmethod - def convert_file_to_html(input_path: str, output_path: str): - pypandoc.convert_file(input_path, 'html', outputfile=output_path) - - @staticmethod - def pdf_to_png(path_id: str): - to_png = f"pdftoppm -png exercises.pdf page" - result = subprocess.run(to_png, shell=True, cwd=f'./tmp/{path_id}', capture_output=True, text=True) - if result.returncode != 0: - raise Exception( - f"Couldn't convert pdf to png. Failed to run command '{to_png}' -> ```cmd {result.stderr}```") - - @staticmethod - def is_page_blank(image_bytes: bytes, image_threshold=10) -> bool: - with Image.open(io.BytesIO(image_bytes)) as img: - img_gray = img.convert('L') - img_array = np.array(img_gray) - non_white_pixels = np.sum(img_array < 255) - - return non_white_pixels <= image_threshold - - @classmethod - async def _encode_image(cls, image_path: str, image_threshold=10) -> Optional[str]: - async with aiofiles.open(image_path, "rb") as image_file: - image_bytes = await image_file.read() - - if cls.is_page_blank(image_bytes, image_threshold): - return None - - return base64.b64encode(image_bytes).decode('utf-8') - - @classmethod - def b64_pngs(cls, path_id: str, files: list[str]): - png_messages = [] - for filename in files: - b64_string = cls._encode_image(os.path.join(f'./tmp/{path_id}', filename)) - if b64_string: - png_messages.append({ - "type": "image_url", - "image_url": { - "url": f"data:image/png;base64,{b64_string}" - } - }) - return png_messages - - @staticmethod - def remove_directory(path): - try: - if os.path.exists(path): - if os.path.isdir(path): - shutil.rmtree(path) - except Exception as e: - print(f"An error occurred while trying to remove {path}: {str(e)}") +import base64 +import io +import os +import shutil +import subprocess +import uuid +import datetime +from pathlib import Path +from typing import Optional, Tuple + +import aiofiles +import numpy as np +import pypandoc +from PIL import Image + + +class FileHelper: + + @staticmethod + def delete_files_older_than_one_day(directory: str): + current_time = datetime.datetime.now() + + for entry in os.scandir(directory): + if entry.is_file(): + file_path = Path(entry) + file_name = file_path.name + file_modified_time = datetime.datetime.fromtimestamp(file_path.stat().st_mtime) + time_difference = current_time - file_modified_time + if time_difference.days > 1 and "placeholder" not in file_name: + file_path.unlink() + print(f"Deleted file: {file_path}") + + # Supposedly pandoc covers a wide range of file extensions only tested with docx + @staticmethod + def convert_file_to_pdf(input_path: str, output_path: str): + pypandoc.convert_file(input_path, 'pdf', outputfile=output_path, extra_args=[ + '-V', 'geometry:paperwidth=5.5in', + '-V', 'geometry:paperheight=8.5in', + '-V', 'geometry:margin=0.5in', + '-V', 'pagestyle=empty' + ]) + + @staticmethod + def convert_file_to_html(input_path: str, output_path: str): + pypandoc.convert_file(input_path, 'html', outputfile=output_path) + + @staticmethod + def pdf_to_png(path_id: str): + to_png = f"pdftoppm -png exercises.pdf page" + result = subprocess.run(to_png, shell=True, cwd=f'./tmp/{path_id}', capture_output=True, text=True) + if result.returncode != 0: + raise Exception( + f"Couldn't convert pdf to png. Failed to run command '{to_png}' -> ```cmd {result.stderr}```") + + @staticmethod + def is_page_blank(image_bytes: bytes, image_threshold=10) -> bool: + with Image.open(io.BytesIO(image_bytes)) as img: + img_gray = img.convert('L') + img_array = np.array(img_gray) + non_white_pixels = np.sum(img_array < 255) + + return non_white_pixels <= image_threshold + + @classmethod + async def _encode_image(cls, image_path: str, image_threshold=10) -> Optional[str]: + async with aiofiles.open(image_path, "rb") as image_file: + image_bytes = await image_file.read() + + if cls.is_page_blank(image_bytes, image_threshold): + return None + + return base64.b64encode(image_bytes).decode('utf-8') + + @classmethod + async def b64_pngs(cls, path_id: str, files: list[str]): + png_messages = [] + for filename in files: + b64_string = await cls._encode_image(os.path.join(f'./tmp/{path_id}', filename)) + if b64_string: + png_messages.append({ + "type": "image_url", + "image_url": { + "url": f"data:image/png;base64,{b64_string}" + } + }) + return png_messages + + @staticmethod + def remove_directory(path): + try: + if os.path.exists(path): + if os.path.isdir(path): + shutil.rmtree(path) + except Exception as e: + print(f"An error occurred while trying to remove {path}: {str(e)}") + + @staticmethod + def remove_file(file_path): + try: + if os.path.exists(file_path): + if os.path.isfile(file_path): + os.remove(file_path) + except Exception as e: + print(f"An error occurred while trying to remove the file {file_path}: {str(e)}") + + @staticmethod + def save_upload(file) -> Tuple[str, str]: + ext = file.filename.split('.')[-1] + path_id = str(uuid.uuid4()) + os.makedirs(f'./tmp/{path_id}', exist_ok=True) + + tmp_filename = f'./tmp/{path_id}/uploaded.{ext}' + file.save(tmp_filename) + return ext, path_id diff --git a/app/helpers/logger.py b/app/helpers/logger.py index 762766a..b8a5005 100644 --- a/app/helpers/logger.py +++ b/app/helpers/logger.py @@ -1,23 +1,23 @@ -import logging -from functools import wraps - - -class LoggerHelper: - - @staticmethod - def suppress_loggers(): - def decorator(f): - @wraps(f) - def wrapped(*args, **kwargs): - root_logger = logging.getLogger() - original_level = root_logger.level - - root_logger.setLevel(logging.ERROR) - - try: - return f(*args, **kwargs) - finally: - root_logger.setLevel(original_level) - - return wrapped - return decorator +import logging +from functools import wraps + + +class LoggerHelper: + + @staticmethod + def suppress_loggers(): + def decorator(f): + @wraps(f) + def wrapped(*args, **kwargs): + root_logger = logging.getLogger() + original_level = root_logger.level + + root_logger.setLevel(logging.ERROR) + + try: + return f(*args, **kwargs) + finally: + root_logger.setLevel(original_level) + + return wrapped + return decorator diff --git a/app/helpers/text.py b/app/helpers/text.py index df0868d..d945438 100644 --- a/app/helpers/text.py +++ b/app/helpers/text.py @@ -1,28 +1,28 @@ -from nltk.corpus import words - - -class TextHelper: - - @classmethod - def has_words(cls, text: str): - if not cls._has_common_words(text): - return False - english_words = set(words.words()) - words_in_input = text.split() - return any(word.lower() in english_words for word in words_in_input) - - @classmethod - def has_x_words(cls, text: str, quantity): - if not cls._has_common_words(text): - return False - english_words = set(words.words()) - words_in_input = text.split() - english_word_count = sum(1 for word in words_in_input if word.lower() in english_words) - return english_word_count >= quantity - - @staticmethod - def _has_common_words(text: str): - english_words = {"the", "be", "to", "of", "and", "a", "in", "that", "have", "i"} - words_in_input = text.split() - english_word_count = sum(1 for word in words_in_input if word.lower() in english_words) - return english_word_count >= 10 +from nltk.corpus import words + + +class TextHelper: + + @classmethod + def has_words(cls, text: str): + if not cls._has_common_words(text): + return False + english_words = set(words.words()) + words_in_input = text.split() + return any(word.lower() in english_words for word in words_in_input) + + @classmethod + def has_x_words(cls, text: str, quantity): + if not cls._has_common_words(text): + return False + english_words = set(words.words()) + words_in_input = text.split() + english_word_count = sum(1 for word in words_in_input if word.lower() in english_words) + return english_word_count >= quantity + + @staticmethod + def _has_common_words(text: str): + english_words = {"the", "be", "to", "of", "and", "a", "in", "that", "have", "i"} + words_in_input = text.split() + english_word_count = sum(1 for word in words_in_input if word.lower() in english_words) + return english_word_count >= 10 diff --git a/app/helpers/token_counter.py b/app/helpers/token_counter.py index 239850f..4eba977 100644 --- a/app/helpers/token_counter.py +++ b/app/helpers/token_counter.py @@ -1,89 +1,89 @@ -# This is a work in progress. There are still bugs. Once it is production-ready this will become a full repo. - -import tiktoken -import nltk - - -def count_tokens(text, model_name="gpt-3.5-turbo", debug=False): - """ - Count the number of tokens in a given text string without using the OpenAI API. - - This function tries three methods in the following order: - 1. tiktoken (preferred): Accurate token counting similar to the OpenAI API. - 2. nltk: Token counting using the Natural Language Toolkit library. - 3. split: Simple whitespace-based token counting as a fallback. - - Usage: - ------ - text = "Your text here" - result = count_tokens(text, model_name="gpt-3.5-turbo", debug=True) - print(result) - - Required libraries: - ------------------- - - tiktoken: Install with 'pip install tiktoken' - - nltk: Install with 'pip install nltk' - - Parameters: - ----------- - text : str - The text string for which you want to count tokens. - model_name : str, optional - The OpenAI model for which you want to count tokens (default: "gpt-3.5-turbo"). - debug : bool, optional - Set to True to print error messages (default: False). - - Returns: - -------- - result : dict - A dictionary containing the number of tokens and the method used for counting. - """ - - # Try using tiktoken - try: - encoding = tiktoken.encoding_for_model(model_name) - num_tokens = len(encoding.encode(text)) - result = {"n_tokens": num_tokens, "method": "tiktoken"} - return result - except Exception as e: - if debug: - print(f"Error using tiktoken: {e}") - pass - - # Try using nltk - try: - # Passed nltk.download("punkt") to server.py's @asynccontextmanager - tokens = nltk.word_tokenize(text) - result = {"n_tokens": len(tokens), "method": "nltk"} - return result - except Exception as e: - if debug: - print(f"Error using nltk: {e}") - pass - - # If nltk and tiktoken fail, use a simple split-based method - tokens = text.split() - result = {"n_tokens": len(tokens), "method": "split"} - return result - - -class TokenBuffer: - def __init__(self, max_tokens=2048): - self.max_tokens = max_tokens - self.buffer = "" - self.token_lengths = [] - self.token_count = 0 - - def update(self, text, model_name="gpt-3.5-turbo", debug=False): - new_tokens = count_tokens(text, model_name=model_name, debug=debug)["n_tokens"] - self.token_count += new_tokens - self.buffer += text - self.token_lengths.append(new_tokens) - - while self.token_count > self.max_tokens: - removed_tokens = self.token_lengths.pop(0) - self.token_count -= removed_tokens - self.buffer = self.buffer.split(" ", removed_tokens)[-1] - - def get_buffer(self): - return self.buffer +# This is a work in progress. There are still bugs. Once it is production-ready this will become a full repo. + +import tiktoken +import nltk + + +def count_tokens(text, model_name="gpt-3.5-turbo", debug=False): + """ + Count the number of tokens in a given text string without using the OpenAI API. + + This function tries three methods in the following order: + 1. tiktoken (preferred): Accurate token counting similar to the OpenAI API. + 2. nltk: Token counting using the Natural Language Toolkit library. + 3. split: Simple whitespace-based token counting as a fallback. + + Usage: + ------ + text = "Your text here" + result = count_tokens(text, model_name="gpt-3.5-turbo", debug=True) + print(result) + + Required libraries: + ------------------- + - tiktoken: Install with 'pip install tiktoken' + - nltk: Install with 'pip install nltk' + + Parameters: + ----------- + text : str + The text string for which you want to count tokens. + model_name : str, optional + The OpenAI model for which you want to count tokens (default: "gpt-3.5-turbo"). + debug : bool, optional + Set to True to print error messages (default: False). + + Returns: + -------- + result : dict + A dictionary containing the number of tokens and the method used for counting. + """ + + # Try using tiktoken + try: + encoding = tiktoken.encoding_for_model(model_name) + num_tokens = len(encoding.encode(text)) + result = {"n_tokens": num_tokens, "method": "tiktoken"} + return result + except Exception as e: + if debug: + print(f"Error using tiktoken: {e}") + pass + + # Try using nltk + try: + # Passed nltk.download("punkt") to server.py's @asynccontextmanager + tokens = nltk.word_tokenize(text) + result = {"n_tokens": len(tokens), "method": "nltk"} + return result + except Exception as e: + if debug: + print(f"Error using nltk: {e}") + pass + + # If nltk and tiktoken fail, use a simple split-based method + tokens = text.split() + result = {"n_tokens": len(tokens), "method": "split"} + return result + + +class TokenBuffer: + def __init__(self, max_tokens=2048): + self.max_tokens = max_tokens + self.buffer = "" + self.token_lengths = [] + self.token_count = 0 + + def update(self, text, model_name="gpt-3.5-turbo", debug=False): + new_tokens = count_tokens(text, model_name=model_name, debug=debug)["n_tokens"] + self.token_count += new_tokens + self.buffer += text + self.token_lengths.append(new_tokens) + + while self.token_count > self.max_tokens: + removed_tokens = self.token_lengths.pop(0) + self.token_count -= removed_tokens + self.buffer = self.buffer.split(" ", removed_tokens)[-1] + + def get_buffer(self): + return self.buffer diff --git a/app/mappers/__init__.py b/app/mappers/__init__.py index bc00787..2f71b3b 100644 --- a/app/mappers/__init__.py +++ b/app/mappers/__init__.py @@ -1,5 +1,5 @@ -from .exam import ExamMapper - -__all__ = [ - "ExamMapper" -] +from .exam import ExamMapper + +__all__ = [ + "ExamMapper" +] diff --git a/app/mappers/exam.py b/app/mappers/exam.py index df26eea..8ebf13a 100644 --- a/app/mappers/exam.py +++ b/app/mappers/exam.py @@ -1,66 +1,66 @@ -from typing import Dict, Any - -from pydantic import ValidationError - -from app.dtos.exam import ( - MultipleChoiceExercise, - FillBlanksExercise, - Part, Exam -) -from app.dtos.sheet import Sheet, Option, MultipleChoiceQuestion, FillBlanksWord - - -class ExamMapper: - - @staticmethod - def map_to_exam_model(response: Dict[str, Any]) -> Exam: - parts = [] - for part in response['parts']: - part_exercises = part['exercises'] - context = part.get('context', None) - - exercises = [] - for exercise in part_exercises: - exercise_type = exercise['type'] - if exercise_type == 'multipleChoice': - exercise_model = MultipleChoiceExercise(**exercise) - elif exercise_type == 'fillBlanks': - exercise_model = FillBlanksExercise(**exercise) - else: - raise ValidationError(f"Unknown exercise type: {exercise_type}") - - exercises.append(exercise_model) - - part_kwargs = {"exercises": exercises} - if context is not None: - part_kwargs["context"] = context - - part_model = Part(**part_kwargs) - parts.append(part_model) - - return Exam(parts=parts) - - @staticmethod - def map_to_sheet(response: Dict[str, Any]) -> Sheet: - components = [] - - for item in response["components"]: - component_type = item["type"] - - if component_type == "multipleChoice": - options = [Option(id=opt["id"], text=opt["text"]) for opt in item["options"]] - components.append(MultipleChoiceQuestion( - id=item["id"], - prompt=item["prompt"], - variant=item.get("variant", "text"), - options=options - )) - elif component_type == "fillBlanks": - components.append(FillBlanksWord( - id=item["id"], - options=item["options"] - )) - else: - components.append(item) - - return Sheet(components=components) +from typing import Dict, Any + +from pydantic import ValidationError + +from app.dtos.exam import ( + MultipleChoiceExercise, + FillBlanksExercise, + Part, Exam +) +from app.dtos.sheet import Sheet, Option, MultipleChoiceQuestion, FillBlanksWord + + +class ExamMapper: + + @staticmethod + def map_to_exam_model(response: Dict[str, Any]) -> Exam: + parts = [] + for part in response['parts']: + part_exercises = part['exercises'] + context = part.get('context', None) + + exercises = [] + for exercise in part_exercises: + exercise_type = exercise['type'] + if exercise_type == 'multipleChoice': + exercise_model = MultipleChoiceExercise(**exercise) + elif exercise_type == 'fillBlanks': + exercise_model = FillBlanksExercise(**exercise) + else: + raise ValidationError(f"Unknown exercise type: {exercise_type}") + + exercises.append(exercise_model) + + part_kwargs = {"exercises": exercises} + if context is not None: + part_kwargs["context"] = context + + part_model = Part(**part_kwargs) + parts.append(part_model) + + return Exam(parts=parts) + + @staticmethod + def map_to_sheet(response: Dict[str, Any]) -> Sheet: + components = [] + + for item in response["components"]: + component_type = item["type"] + + if component_type == "multipleChoice": + options = [Option(id=opt["id"], text=opt["text"]) for opt in item["options"]] + components.append(MultipleChoiceQuestion( + id=item["id"], + prompt=item["prompt"], + variant=item.get("variant", "text"), + options=options + )) + elif component_type == "fillBlanks": + components.append(FillBlanksWord( + id=item["id"], + options=item["options"] + )) + else: + components.append(item) + + return Sheet(components=components) diff --git a/app/middlewares/__init__.py b/app/middlewares/__init__.py index f4f6ed5..93e83df 100644 --- a/app/middlewares/__init__.py +++ b/app/middlewares/__init__.py @@ -1,9 +1,9 @@ -from .authentication import AuthBackend, AuthenticationMiddleware -from .authorization import Authorized, IsAuthenticatedViaBearerToken - -__all__ = [ - "AuthBackend", - "AuthenticationMiddleware", - "Authorized", - "IsAuthenticatedViaBearerToken" +from .authentication import AuthBackend, AuthenticationMiddleware +from .authorization import Authorized, IsAuthenticatedViaBearerToken + +__all__ = [ + "AuthBackend", + "AuthenticationMiddleware", + "Authorized", + "IsAuthenticatedViaBearerToken" ] \ No newline at end of file diff --git a/app/middlewares/authentication.py b/app/middlewares/authentication.py index fea3f99..1285ce6 100644 --- a/app/middlewares/authentication.py +++ b/app/middlewares/authentication.py @@ -1,48 +1,48 @@ -import os -from typing import Tuple - -import jwt -from jwt import InvalidTokenError -from pydantic import BaseModel, Field -from starlette.authentication import AuthenticationBackend -from starlette.middleware.authentication import ( - AuthenticationMiddleware as BaseAuthenticationMiddleware, -) -from starlette.requests import HTTPConnection - - -class Session(BaseModel): - authenticated: bool = Field(False, description="Is user authenticated?") - - -class AuthBackend(AuthenticationBackend): - async def authenticate( - self, conn: HTTPConnection - ) -> Tuple[bool, Session]: - session = Session() - authorization: str = conn.headers.get("Authorization") - if not authorization: - return False, session - - try: - scheme, token = authorization.split(" ") - if scheme.lower() != "bearer": - return False, session - except ValueError: - return False, session - - jwt_secret_key = os.getenv("JWT_SECRET_KEY") - if not jwt_secret_key: - return False, session - - try: - jwt.decode(token, jwt_secret_key, algorithms=["HS256"]) - except InvalidTokenError: - return False, session - - session.authenticated = True - return True, session - - -class AuthenticationMiddleware(BaseAuthenticationMiddleware): - pass +import os +from typing import Tuple + +import jwt +from jwt import InvalidTokenError +from pydantic import BaseModel, Field +from starlette.authentication import AuthenticationBackend +from starlette.middleware.authentication import ( + AuthenticationMiddleware as BaseAuthenticationMiddleware, +) +from starlette.requests import HTTPConnection + + +class Session(BaseModel): + authenticated: bool = Field(False, description="Is user authenticated?") + + +class AuthBackend(AuthenticationBackend): + async def authenticate( + self, conn: HTTPConnection + ) -> Tuple[bool, Session]: + session = Session() + authorization: str = conn.headers.get("Authorization") + if not authorization: + return False, session + + try: + scheme, token = authorization.split(" ") + if scheme.lower() != "bearer": + return False, session + except ValueError: + return False, session + + jwt_secret_key = os.getenv("JWT_SECRET_KEY") + if not jwt_secret_key: + return False, session + + try: + jwt.decode(token, jwt_secret_key, algorithms=["HS256"]) + except InvalidTokenError: + return False, session + + session.authenticated = True + return True, session + + +class AuthenticationMiddleware(BaseAuthenticationMiddleware): + pass diff --git a/app/middlewares/authorization.py b/app/middlewares/authorization.py index 22bc86d..e0c95d1 100644 --- a/app/middlewares/authorization.py +++ b/app/middlewares/authorization.py @@ -1,36 +1,36 @@ -from abc import ABC, abstractmethod -from typing import List, Type - -from fastapi import Request -from fastapi.openapi.models import APIKey, APIKeyIn -from fastapi.security.base import SecurityBase - -from app.exceptions import CustomException, UnauthorizedException - - -class BaseAuthorization(ABC): - exception = CustomException - - @abstractmethod - async def has_permission(self, request: Request) -> bool: - pass - - -class IsAuthenticatedViaBearerToken(BaseAuthorization): - exception = UnauthorizedException - - async def has_permission(self, request: Request) -> bool: - return request.user.authenticated - - -class Authorized(SecurityBase): - def __init__(self, permissions: List[Type[BaseAuthorization]]): - self.permissions = permissions - self.model: APIKey = APIKey(**{"in": APIKeyIn.header}, name="Authorization") - self.scheme_name = self.__class__.__name__ - - async def __call__(self, request: Request): - for permission in self.permissions: - cls = permission() - if not await cls.has_permission(request=request): - raise cls.exception +from abc import ABC, abstractmethod +from typing import List, Type + +from fastapi import Request +from fastapi.openapi.models import APIKey, APIKeyIn +from fastapi.security.base import SecurityBase + +from app.exceptions import CustomException, UnauthorizedException + + +class BaseAuthorization(ABC): + exception = CustomException + + @abstractmethod + async def has_permission(self, request: Request) -> bool: + pass + + +class IsAuthenticatedViaBearerToken(BaseAuthorization): + exception = UnauthorizedException + + async def has_permission(self, request: Request) -> bool: + return request.user.authenticated + + +class Authorized(SecurityBase): + def __init__(self, permissions: List[Type[BaseAuthorization]]): + self.permissions = permissions + self.model: APIKey = APIKey(**{"in": APIKeyIn.header}, name="Authorization") + self.scheme_name = self.__class__.__name__ + + async def __call__(self, request: Request): + for permission in self.permissions: + cls = permission() + if not await cls.has_permission(request=request): + raise cls.exception diff --git a/app/repositories/abc/__init__.py b/app/repositories/abc/__init__.py index 55b68f9..bbce6fa 100644 --- a/app/repositories/abc/__init__.py +++ b/app/repositories/abc/__init__.py @@ -1,7 +1,7 @@ -from .file_storage import IFileStorage -from .document_store import IDocumentStore - -__all__ = [ - "IFileStorage", - "IDocumentStore" +from .file_storage import IFileStorage +from .document_store import IDocumentStore + +__all__ = [ + "IFileStorage", + "IDocumentStore" ] \ No newline at end of file diff --git a/app/repositories/abc/document_store.py b/app/repositories/abc/document_store.py index 78b0a12..03c041f 100644 --- a/app/repositories/abc/document_store.py +++ b/app/repositories/abc/document_store.py @@ -1,16 +1,15 @@ -from abc import ABC - - -class IDocumentStore(ABC): - - async def save_to_db(self, collection: str, item): - pass - - async def save_to_db_with_id(self, collection: str, item, id: str): - pass - - async def get_all(self, collection: str): - pass - - async def get_doc_by_id(self, collection: str, doc_id: str): - pass +from abc import ABC + +from typing import Dict, Optional, List + + +class IDocumentStore(ABC): + + async def save_to_db(self, collection: str, item: Dict, doc_id: Optional[str]) -> Optional[str]: + pass + + async def get_all(self, collection: str) -> List[Dict]: + pass + + async def get_doc_by_id(self, collection: str, doc_id: str) -> Optional[Dict]: + pass diff --git a/app/repositories/abc/file_storage.py b/app/repositories/abc/file_storage.py index 2efdcfa..ea34959 100644 --- a/app/repositories/abc/file_storage.py +++ b/app/repositories/abc/file_storage.py @@ -1,16 +1,16 @@ -from abc import ABC, abstractmethod - - -class IFileStorage(ABC): - - @abstractmethod - async def download_firebase_file(self, source_blob_name, destination_file_name): - pass - - @abstractmethod - async def upload_file_firebase_get_url(self, destination_blob_name, source_file_name): - pass - - @abstractmethod - async def make_public(self, blob_name: str): - pass +from abc import ABC, abstractmethod + + +class IFileStorage(ABC): + + @abstractmethod + async def download_firebase_file(self, source_blob_name, destination_file_name): + pass + + @abstractmethod + async def upload_file_firebase_get_url(self, destination_blob_name, source_file_name): + pass + + @abstractmethod + async def make_public(self, blob_name: str): + pass diff --git a/app/repositories/impl/__init__.py b/app/repositories/impl/__init__.py index 4200d13..5415ab4 100644 --- a/app/repositories/impl/__init__.py +++ b/app/repositories/impl/__init__.py @@ -1,8 +1,8 @@ -from .document_stores import * -from .firebase import FirebaseStorage - -__all__ = [ - "FirebaseStorage" -] - -__all__.extend(document_stores.__all__) +from .document_stores import * +from app.repositories.impl.file_storage.firebase import FirebaseStorage + +__all__ = [ + "FirebaseStorage" +] + +__all__.extend(document_stores.__all__) diff --git a/app/repositories/impl/document_stores/__init__.py b/app/repositories/impl/document_stores/__init__.py index 585da39..ccea6ee 100644 --- a/app/repositories/impl/document_stores/__init__.py +++ b/app/repositories/impl/document_stores/__init__.py @@ -1,7 +1,7 @@ -from .firestore import Firestore -#from .mongo import MongoDB - -__all__ = [ - "Firestore", - #"MongoDB" -] +from .firestore import Firestore +#from .mongo import MongoDB + +__all__ = [ + "Firestore", + #"MongoDB" +] diff --git a/app/repositories/impl/document_stores/firestore.py b/app/repositories/impl/document_stores/firestore.py index 30f4e3e..db7c4cb 100644 --- a/app/repositories/impl/document_stores/firestore.py +++ b/app/repositories/impl/document_stores/firestore.py @@ -1,47 +1,47 @@ -import logging -from google.cloud.firestore_v1.async_client import AsyncClient -from google.cloud.firestore_v1.async_collection import AsyncCollectionReference -from google.cloud.firestore_v1.async_document import AsyncDocumentReference -from app.repositories.abc import IDocumentStore - - -class Firestore(IDocumentStore): - def __init__(self, client: AsyncClient): - self._client = client - self._logger = logging.getLogger(__name__) - - async def save_to_db(self, collection: str, item): - collection_ref: AsyncCollectionReference = self._client.collection(collection) - update_time, document_ref = await collection_ref.add(item) - if document_ref: - self._logger.info(f"Document added with ID: {document_ref.id}") - return document_ref.id - else: - return None - - async def save_to_db_with_id(self, collection: str, item, id: str): - collection_ref: AsyncCollectionReference = self._client.collection(collection) - document_ref: AsyncDocumentReference = collection_ref.document(id) - await document_ref.set(item) - doc_snapshot = await document_ref.get() - if doc_snapshot.exists: - self._logger.info(f"Document added with ID: {document_ref.id}") - return document_ref.id - else: - return None - - async def get_all(self, collection: str): - collection_ref: AsyncCollectionReference = self._client.collection(collection) - docs = [] - async for doc in collection_ref.stream(): - docs.append(doc.to_dict()) - return docs - - async def get_doc_by_id(self, collection: str, doc_id: str): - collection_ref: AsyncCollectionReference = self._client.collection(collection) - doc_ref: AsyncDocumentReference = collection_ref.document(doc_id) - doc = await doc_ref.get() - - if doc.exists: - return doc.to_dict() - return None +import logging +from typing import Optional, List, Dict + +from google.cloud.firestore_v1.async_client import AsyncClient +from google.cloud.firestore_v1.async_collection import AsyncCollectionReference +from google.cloud.firestore_v1.async_document import AsyncDocumentReference +from app.repositories.abc import IDocumentStore + + +class Firestore(IDocumentStore): + def __init__(self, client: AsyncClient): + self._client = client + self._logger = logging.getLogger(__name__) + + async def save_to_db(self, collection: str, item, doc_id: Optional[str] = None) -> Optional[str]: + collection_ref: AsyncCollectionReference = self._client.collection(collection) + + if doc_id: + document_ref: AsyncDocumentReference = collection_ref.document(doc_id) + await document_ref.set(item) + doc_snapshot = await document_ref.get() + if doc_snapshot.exists: + self._logger.info(f"Document added with ID: {document_ref.id}") + return document_ref.id + else: + update_time, document_ref = await collection_ref.add(item) + if document_ref: + self._logger.info(f"Document added with ID: {document_ref.id}") + return document_ref.id + + return None + + async def get_all(self, collection: str) -> List[Dict]: + collection_ref: AsyncCollectionReference = self._client.collection(collection) + docs = [] + async for doc in collection_ref.stream(): + docs.append(doc.to_dict()) + return docs + + async def get_doc_by_id(self, collection: str, doc_id: str) -> Optional[Dict]: + collection_ref: AsyncCollectionReference = self._client.collection(collection) + doc_ref: AsyncDocumentReference = collection_ref.document(doc_id) + doc = await doc_ref.get() + + if doc.exists: + return doc.to_dict() + return None diff --git a/app/repositories/impl/document_stores/mongo.py b/app/repositories/impl/document_stores/mongo.py index 1353dcf..c1f1097 100644 --- a/app/repositories/impl/document_stores/mongo.py +++ b/app/repositories/impl/document_stores/mongo.py @@ -1,36 +1,37 @@ -"""import logging -from pymongo import MongoClient - -from app.repositories.abc import IDocumentStore - - -class MongoDB(IDocumentStore): - - def __init__(self, client: MongoClient): - self._client = client - self._logger = logging.getLogger(__name__) - - def save_to_db(self, collection: str, item): - collection_ref = self._client[collection] - result = collection_ref.insert_one(item) - if result.inserted_id: - self._logger.info(f"Document added with ID: {result.inserted_id}") - return True, str(result.inserted_id) - else: - return False, None - - def save_to_db_with_id(self, collection: str, item, doc_id: str): - collection_ref = self._client[collection] - item['_id'] = doc_id - result = collection_ref.replace_one({'_id': id}, item, upsert=True) - if result.upserted_id or result.matched_count: - self._logger.info(f"Document added with ID: {doc_id}") - return True, doc_id - else: - return False, None - - def get_all(self, collection: str): - collection_ref = self._client[collection] - all_documents = list(collection_ref.find()) - return all_documents -""" \ No newline at end of file +import logging +import uuid +from typing import Optional, List, Dict + +from motor.motor_asyncio import AsyncIOMotorDatabase + +from app.repositories.abc import IDocumentStore + + +class MongoDB(IDocumentStore): + + def __init__(self, mongo_db: AsyncIOMotorDatabase): + self._mongo_db = mongo_db + self._logger = logging.getLogger(__name__) + + async def save_to_db(self, collection: str, item, doc_id: Optional[str] = None) -> Optional[str]: + collection_ref = self._mongo_db[collection] + + if doc_id is None: + doc_id = str(uuid.uuid4()) + + item['id'] = doc_id + + result = await collection_ref.insert_one(item) + if result.inserted_id: + # returning id instead of _id + self._logger.info(f"Document added with ID: {doc_id}") + return doc_id + + return None + + async def get_all(self, collection: str) -> List[Dict]: + cursor = self._mongo_db[collection].find() + return [document async for document in cursor] + + async def get_doc_by_id(self, collection: str, doc_id: str) -> Optional[Dict]: + return await self._mongo_db[collection].find_one({"id": doc_id}) diff --git a/app/repositories/impl/file_storage/__init__.py b/app/repositories/impl/file_storage/__init__.py new file mode 100644 index 0000000..f1b6b00 --- /dev/null +++ b/app/repositories/impl/file_storage/__init__.py @@ -0,0 +1,5 @@ +from .firebase import FirebaseStorage + +__all__ = [ + "FirebaseStorage" +] diff --git a/app/repositories/impl/firebase.py b/app/repositories/impl/file_storage/firebase.py similarity index 97% rename from app/repositories/impl/firebase.py rename to app/repositories/impl/file_storage/firebase.py index 30d9552..07c7adf 100644 --- a/app/repositories/impl/firebase.py +++ b/app/repositories/impl/file_storage/firebase.py @@ -1,83 +1,83 @@ -import logging -from typing import Optional - -import aiofiles -from httpx import AsyncClient - -from app.repositories.abc import IFileStorage - - -class FirebaseStorage(IFileStorage): - - def __init__(self, client: AsyncClient, token: str, bucket: str): - self._httpx_client = client - self._token = token - self._storage_url = f'https://firebasestorage.googleapis.com/v0/b/{bucket}' - self._logger = logging.getLogger(__name__) - - async def download_firebase_file(self, source_blob_name: str, destination_file_name: str) -> Optional[str]: - source_blob_name = source_blob_name.replace('/', '%2F') - download_url = f"{self._storage_url}/o/{source_blob_name}?alt=media" - - response = await self._httpx_client.get( - download_url, - headers={'Authorization': f'Firebase {self._token}'} - ) - - if response.status_code == 200: - async with aiofiles.open(destination_file_name, 'wb') as file: - await file.write(response.content) - self._logger.info(f"File downloaded to {destination_file_name}") - return destination_file_name - else: - self._logger.error(f"Failed to download blob {source_blob_name}. {response.status_code} - {response.content}") - return None - - async def upload_file_firebase_get_url(self, destination_blob_name: str, source_file_name: str) -> Optional[str]: - destination_blob_name = destination_blob_name.replace('/', '%2F') - upload_url = f"{self._storage_url}/o/{destination_blob_name}" - - async with aiofiles.open(source_file_name, 'rb') as file: - file_bytes = await file.read() - - response = await self._httpx_client.post( - upload_url, - headers={ - 'Authorization': f'Firebase {self._token}', - "X-Goog-Upload-Protocol": "multipart" - }, - files={ - 'metadata': (None, '{"metadata":{"test":"testMetadata"}}', 'application/json'), - 'file': file_bytes - } - ) - - if response.status_code == 200: - self._logger.info(f"File {source_file_name} uploaded to {self._storage_url}/o/{destination_blob_name}.") - - # TODO: Test this - #await self.make_public(destination_blob_name) - - file_url = f"{self._storage_url}/o/{destination_blob_name}" - return file_url - else: - self._logger.error(f"Failed to upload file {source_file_name}. Error: {response.status_code} - {str(response.content)}") - return None - - async def make_public(self, destination_blob_name: str): - acl_url = f"{self._storage_url}/o/{destination_blob_name}/acl" - acl = {'entity': 'allUsers', 'role': 'READER'} - - response = await self._httpx_client.post( - acl_url, - headers={ - 'Authorization': f'Bearer {self._token}', - 'Content-Type': 'application/json' - }, - json=acl - ) - - if response.status_code == 200: - self._logger.info(f"Blob {destination_blob_name} is now public.") - else: - self._logger.error(f"Failed to make blob {destination_blob_name} public. {response.status_code} - {response.content}") +import logging +from typing import Optional + +import aiofiles +from httpx import AsyncClient + +from app.repositories.abc import IFileStorage + + +class FirebaseStorage(IFileStorage): + + def __init__(self, client: AsyncClient, token: str, bucket: str): + self._httpx_client = client + self._token = token + self._storage_url = f'https://firebasestorage.googleapis.com/v0/b/{bucket}' + self._logger = logging.getLogger(__name__) + + async def download_firebase_file(self, source_blob_name: str, destination_file_name: str) -> Optional[str]: + source_blob_name = source_blob_name.replace('/', '%2F') + download_url = f"{self._storage_url}/o/{source_blob_name}?alt=media" + + response = await self._httpx_client.get( + download_url, + headers={'Authorization': f'Firebase {self._token}'} + ) + + if response.status_code == 200: + async with aiofiles.open(destination_file_name, 'wb') as file: + await file.write(response.content) + self._logger.info(f"File downloaded to {destination_file_name}") + return destination_file_name + else: + self._logger.error(f"Failed to download blob {source_blob_name}. {response.status_code} - {response.content}") + return None + + async def upload_file_firebase_get_url(self, destination_blob_name: str, source_file_name: str) -> Optional[str]: + destination_blob_name = destination_blob_name.replace('/', '%2F') + upload_url = f"{self._storage_url}/o/{destination_blob_name}" + + async with aiofiles.open(source_file_name, 'rb') as file: + file_bytes = await file.read() + + response = await self._httpx_client.post( + upload_url, + headers={ + 'Authorization': f'Firebase {self._token}', + "X-Goog-Upload-Protocol": "multipart" + }, + files={ + 'metadata': (None, '{"metadata":{"test":"testMetadata"}}', 'application/json'), + 'file': file_bytes + } + ) + + if response.status_code == 200: + self._logger.info(f"File {source_file_name} uploaded to {self._storage_url}/o/{destination_blob_name}.") + + # TODO: Test this + #await self.make_public(destination_blob_name) + + file_url = f"{self._storage_url}/o/{destination_blob_name}" + return file_url + else: + self._logger.error(f"Failed to upload file {source_file_name}. Error: {response.status_code} - {str(response.content)}") + return None + + async def make_public(self, destination_blob_name: str): + acl_url = f"{self._storage_url}/o/{destination_blob_name}/acl" + acl = {'entity': 'allUsers', 'role': 'READER'} + + response = await self._httpx_client.post( + acl_url, + headers={ + 'Authorization': f'Bearer {self._token}', + 'Content-Type': 'application/json' + }, + json=acl + ) + + if response.status_code == 200: + self._logger.info(f"Blob {destination_blob_name} is now public.") + else: + self._logger.error(f"Failed to make blob {destination_blob_name} public. {response.status_code} - {response.content}") diff --git a/app/server.py b/app/server.py index 5ff7d5d..96ae087 100644 --- a/app/server.py +++ b/app/server.py @@ -1,160 +1,156 @@ -import json -import os -import pathlib -import logging.config -import logging.handlers - -import aioboto3 -import contextlib -from contextlib import asynccontextmanager -from collections import defaultdict -from typing import List -from http import HTTPStatus - -import httpx -import whisper -from fastapi import FastAPI, Request -from fastapi.encoders import jsonable_encoder -from fastapi.exceptions import RequestValidationError -from fastapi.middleware import Middleware -from fastapi.middleware.cors import CORSMiddleware -from fastapi.responses import JSONResponse - -import nltk -from dotenv import load_dotenv -from starlette import status - -from app.api import router -from app.configs import config_di -from app.exceptions import CustomException -from app.middlewares import AuthenticationMiddleware, AuthBackend - -load_dotenv() - - -@asynccontextmanager -async def lifespan(_app: FastAPI): - """ - Startup and Shutdown logic is in this lifespan method - - https://fastapi.tiangolo.com/advanced/events/ - """ - # Whisper model - whisper_model = whisper.load_model("base") - - # NLTK required datasets download - nltk.download('words') - nltk.download("punkt") - - # AWS Polly client instantiation - context_stack = contextlib.AsyncExitStack() - session = aioboto3.Session() - polly_client = await context_stack.enter_async_context( - session.client( - 'polly', - region_name='eu-west-1', - aws_secret_access_key=os.getenv("AWS_ACCESS_KEY_ID"), - aws_access_key_id=os.getenv("AWS_SECRET_ACCESS_KEY") - ) - ) - - # HTTP Client - http_client = httpx.AsyncClient() - - config_di( - polly_client=polly_client, - http_client=http_client, - whisper_model=whisper_model - ) - - # Setup logging - config_file = pathlib.Path("./app/configs/logging/logging_config.json") - with open(config_file) as f_in: - config = json.load(f_in) - - logging.config.dictConfig(config) - - yield - - await http_client.aclose() - await polly_client.close() - await context_stack.aclose() - - -def setup_listeners(_app: FastAPI) -> None: - @_app.exception_handler(RequestValidationError) - async def custom_form_validation_error(request, exc): - """ - Don't delete request param - """ - reformatted_message = defaultdict(list) - for pydantic_error in exc.errors(): - loc, msg = pydantic_error["loc"], pydantic_error["msg"] - filtered_loc = loc[1:] if loc[0] in ("body", "query", "path") else loc - field_string = ".".join(filtered_loc) - if field_string == "cookie.refresh_token": - return JSONResponse( - status_code=401, - content={"error_code": 401, "message": HTTPStatus.UNAUTHORIZED.description}, - ) - reformatted_message[field_string].append(msg) - - return JSONResponse( - status_code=status.HTTP_400_BAD_REQUEST, - content=jsonable_encoder( - {"details": "Invalid request!", "errors": reformatted_message} - ), - ) - - @_app.exception_handler(CustomException) - async def custom_exception_handler(request: Request, exc: CustomException): - """ - Don't delete request param - """ - return JSONResponse( - status_code=exc.code, - content={"error_code": exc.error_code, "message": exc.message}, - ) - - @_app.exception_handler(Exception) - async def default_exception_handler(request: Request, exc: Exception): - """ - Don't delete request param - """ - return JSONResponse( - status_code=500, - content=str(exc), - ) - - -def setup_middleware() -> List[Middleware]: - middleware = [ - Middleware( - CORSMiddleware, - allow_origins=["*"], - allow_credentials=True, - allow_methods=["*"], - allow_headers=["*"], - ), - Middleware( - AuthenticationMiddleware, - backend=AuthBackend() - ) - ] - return middleware - - -def create_app() -> FastAPI: - env = os.getenv("ENV") - _app = FastAPI( - docs_url="/docs" if env != "prod" else None, - redoc_url="/redoc" if env != "prod" else None, - middleware=setup_middleware(), - lifespan=lifespan - ) - _app.include_router(router) - setup_listeners(_app) - return _app - - -app = create_app() +import json +import os +import pathlib +import logging.config +import logging.handlers + +import aioboto3 +import contextlib +from contextlib import asynccontextmanager +from collections import defaultdict +from typing import List +from http import HTTPStatus + +import httpx +import whisper +from fastapi import FastAPI, Request +from fastapi.encoders import jsonable_encoder +from fastapi.exceptions import RequestValidationError +from fastapi.middleware import Middleware +from fastapi.middleware.cors import CORSMiddleware +from fastapi.responses import JSONResponse + +import nltk +from starlette import status + +from app.api import router +from app.configs import DependencyInjector +from app.exceptions import CustomException +from app.middlewares import AuthenticationMiddleware, AuthBackend + + +@asynccontextmanager +async def lifespan(_app: FastAPI): + """ + Startup and Shutdown logic is in this lifespan method + + https://fastapi.tiangolo.com/advanced/events/ + """ + # Whisper model + whisper_model = whisper.load_model("base") + + # NLTK required datasets download + nltk.download('words') + nltk.download("punkt") + + # AWS Polly client instantiation + context_stack = contextlib.AsyncExitStack() + session = aioboto3.Session() + polly_client = await context_stack.enter_async_context( + session.client( + 'polly', + region_name='eu-west-1', + aws_secret_access_key=os.getenv("AWS_ACCESS_KEY_ID"), + aws_access_key_id=os.getenv("AWS_SECRET_ACCESS_KEY") + ) + ) + + http_client = httpx.AsyncClient() + + DependencyInjector( + polly_client, + http_client, + whisper_model + ).inject() + + # Setup logging + config_file = pathlib.Path("./app/configs/logging/logging_config.json") + with open(config_file) as f_in: + config = json.load(f_in) + + logging.config.dictConfig(config) + + yield + + await http_client.aclose() + await polly_client.close() + await context_stack.aclose() + + +def setup_listeners(_app: FastAPI) -> None: + @_app.exception_handler(RequestValidationError) + async def custom_form_validation_error(request, exc): + """ + Don't delete request param + """ + reformatted_message = defaultdict(list) + for pydantic_error in exc.errors(): + loc, msg = pydantic_error["loc"], pydantic_error["msg"] + filtered_loc = loc[1:] if loc[0] in ("body", "query", "path") else loc + field_string = ".".join(filtered_loc) + if field_string == "cookie.refresh_token": + return JSONResponse( + status_code=401, + content={"error_code": 401, "message": HTTPStatus.UNAUTHORIZED.description}, + ) + reformatted_message[field_string].append(msg) + + return JSONResponse( + status_code=status.HTTP_400_BAD_REQUEST, + content=jsonable_encoder( + {"details": "Invalid request!", "errors": reformatted_message} + ), + ) + + @_app.exception_handler(CustomException) + async def custom_exception_handler(request: Request, exc: CustomException): + """ + Don't delete request param + """ + return JSONResponse( + status_code=exc.code, + content={"error_code": exc.error_code, "message": exc.message}, + ) + + @_app.exception_handler(Exception) + async def default_exception_handler(request: Request, exc: Exception): + """ + Don't delete request param + """ + return JSONResponse( + status_code=500, + content=str(exc), + ) + + +def setup_middleware() -> List[Middleware]: + middleware = [ + Middleware( + CORSMiddleware, + allow_origins=["*"], + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], + ), + Middleware( + AuthenticationMiddleware, + backend=AuthBackend() + ) + ] + return middleware + + +def create_app() -> FastAPI: + env = os.getenv("ENV") + _app = FastAPI( + docs_url="/docs" if env != "production" else None, + redoc_url="/redoc" if env != "production" else None, + middleware=setup_middleware(), + lifespan=lifespan + ) + _app.include_router(router) + setup_listeners(_app) + return _app + + +app = create_app() diff --git a/app/services/abc/__init__.py b/app/services/abc/__init__.py index b8130df..368c511 100644 --- a/app/services/abc/__init__.py +++ b/app/services/abc/__init__.py @@ -1,20 +1,11 @@ -from .level import ILevelService -from .listening import IListeningService -from .writing import IWritingService -from .speaking import ISpeakingService -from .reading import IReadingService -from .grade import IGradeService -from .training import ITrainingService -from .kb import IKnowledgeBase -from .third_parties import * - -__all__ = [ - "ILevelService", - "IListeningService", - "IWritingService", - "ISpeakingService", - "IReadingService", - "IGradeService", - "ITrainingService" -] -__all__.extend(third_parties.__all__) +from .third_parties import * +from .exam import * +from .training import * +from .user import IUserService + +__all__ = [ + "IUserService" +] +__all__.extend(third_parties.__all__) +__all__.extend(exam.__all__) +__all__.extend(training.__all__) diff --git a/app/services/abc/exam/__init__.py b/app/services/abc/exam/__init__.py new file mode 100644 index 0000000..1f93263 --- /dev/null +++ b/app/services/abc/exam/__init__.py @@ -0,0 +1,15 @@ +from .level import ILevelService +from .listening import IListeningService +from .writing import IWritingService +from .speaking import ISpeakingService +from .reading import IReadingService +from .grade import IGradeService + +__all__ = [ + "ILevelService", + "IListeningService", + "IWritingService", + "ISpeakingService", + "IReadingService", + "IGradeService", +] diff --git a/app/services/abc/grade.py b/app/services/abc/exam/grade.py similarity index 95% rename from app/services/abc/grade.py rename to app/services/abc/exam/grade.py index 9ee89eb..e429419 100644 --- a/app/services/abc/grade.py +++ b/app/services/abc/exam/grade.py @@ -1,13 +1,13 @@ -from abc import ABC, abstractmethod -from typing import Dict, List - - -class IGradeService(ABC): - - @abstractmethod - async def grade_short_answers(self, data: Dict): - pass - - @abstractmethod - async def calculate_grading_summary(self, extracted_sections: List): - pass +from abc import ABC, abstractmethod +from typing import Dict, List + + +class IGradeService(ABC): + + @abstractmethod + async def grade_short_answers(self, data: Dict): + pass + + @abstractmethod + async def calculate_grading_summary(self, extracted_sections: List): + pass diff --git a/app/services/abc/level.py b/app/services/abc/exam/level.py similarity index 96% rename from app/services/abc/level.py rename to app/services/abc/exam/level.py index 7f7d954..758f090 100644 --- a/app/services/abc/level.py +++ b/app/services/abc/exam/level.py @@ -1,47 +1,47 @@ -from abc import ABC, abstractmethod -import random - -from typing import Dict - -from fastapi import UploadFile - -from app.configs.constants import EducationalContent - - -class ILevelService(ABC): - - @abstractmethod - async def get_level_exam( - self, number_of_exercises: int = 25, min_timer: int = 25, diagnostic: bool = False - ) -> Dict: - pass - - @abstractmethod - async def get_level_utas(self): - pass - - @abstractmethod - async def get_custom_level(self, data: Dict): - pass - - @abstractmethod - async def upload_level(self, upload: UploadFile) -> Dict: - pass - - @abstractmethod - async def gen_multiple_choice( - self, mc_variant: str, quantity: int, start_id: int = 1, *, utas: bool = False, all_exams=None - ): - pass - - @abstractmethod - async def gen_blank_space_text_utas( - self, quantity: int, start_id: int, size: int, topic=random.choice(EducationalContent.MTI_TOPICS) - ): - pass - - @abstractmethod - async def gen_reading_passage_utas( - self, start_id, sa_quantity: int, mc_quantity: int, topic=random.choice(EducationalContent.MTI_TOPICS) - ): - pass +from abc import ABC, abstractmethod +import random + +from typing import Dict + +from fastapi import UploadFile + +from app.configs.constants import EducationalContent + + +class ILevelService(ABC): + + @abstractmethod + async def get_level_exam( + self, number_of_exercises: int = 25, min_timer: int = 25, diagnostic: bool = False + ) -> Dict: + pass + + @abstractmethod + async def get_level_utas(self): + pass + + @abstractmethod + async def get_custom_level(self, data: Dict): + pass + + @abstractmethod + async def upload_level(self, upload: UploadFile) -> Dict: + pass + + @abstractmethod + async def gen_multiple_choice( + self, mc_variant: str, quantity: int, start_id: int = 1, *, utas: bool = False, all_exams=None + ): + pass + + @abstractmethod + async def gen_blank_space_text_utas( + self, quantity: int, start_id: int, size: int, topic=random.choice(EducationalContent.MTI_TOPICS) + ): + pass + + @abstractmethod + async def gen_reading_passage_utas( + self, start_id, sa_quantity: int, mc_quantity: int, topic=random.choice(EducationalContent.MTI_TOPICS) + ): + pass diff --git a/app/services/abc/listening.py b/app/services/abc/exam/listening.py similarity index 96% rename from app/services/abc/listening.py rename to app/services/abc/exam/listening.py index 4654fde..497f5c4 100644 --- a/app/services/abc/listening.py +++ b/app/services/abc/exam/listening.py @@ -1,18 +1,18 @@ -import queue -from abc import ABC, abstractmethod -from queue import Queue -from typing import Dict, List - - -class IListeningService(ABC): - - @abstractmethod - async def get_listening_question( - self, section_id: int, topic: str, req_exercises: List[str], difficulty: str, - number_of_exercises_q=queue.Queue(), start_id=-1 - ): - pass - - @abstractmethod - async def save_listening(self, parts: list[dict], min_timer: int, difficulty: str, listening_id: str) -> Dict: - pass +import queue +from abc import ABC, abstractmethod +from queue import Queue +from typing import Dict, List + + +class IListeningService(ABC): + + @abstractmethod + async def get_listening_question( + self, section_id: int, topic: str, req_exercises: List[str], difficulty: str, + number_of_exercises_q=queue.Queue(), start_id=-1 + ): + pass + + @abstractmethod + async def save_listening(self, parts: list[dict], min_timer: int, difficulty: str, listening_id: str) -> Dict: + pass diff --git a/app/services/abc/reading.py b/app/services/abc/exam/reading.py similarity index 95% rename from app/services/abc/reading.py rename to app/services/abc/exam/reading.py index 70c672a..4af7588 100644 --- a/app/services/abc/reading.py +++ b/app/services/abc/exam/reading.py @@ -1,22 +1,22 @@ -from abc import ABC, abstractmethod -from queue import Queue -from typing import List - - -class IReadingService(ABC): - - @abstractmethod - async def gen_reading_passage( - self, - passage_id: int, - topic: str, - req_exercises: List[str], - number_of_exercises_q: Queue, - difficulty: str, - start_id: int - ): - pass - - @abstractmethod - async def generate_reading_passage(self, part: int, topic: str, word_count: int = 800): - pass +from abc import ABC, abstractmethod +from queue import Queue +from typing import List + + +class IReadingService(ABC): + + @abstractmethod + async def gen_reading_passage( + self, + passage_id: int, + topic: str, + req_exercises: List[str], + number_of_exercises_q: Queue, + difficulty: str, + start_id: int + ): + pass + + @abstractmethod + async def generate_reading_passage(self, part: int, topic: str, word_count: int = 800): + pass diff --git a/app/services/abc/speaking.py b/app/services/abc/exam/speaking.py similarity index 96% rename from app/services/abc/speaking.py rename to app/services/abc/exam/speaking.py index 48d0fa8..07ef32e 100644 --- a/app/services/abc/speaking.py +++ b/app/services/abc/exam/speaking.py @@ -1,29 +1,29 @@ -from abc import ABC, abstractmethod -from typing import List, Dict, Optional - - -class ISpeakingService(ABC): - - @abstractmethod - async def get_speaking_part( - self, part: int, topic: str, difficulty: str, second_topic: Optional[str] = None - ) -> Dict: - pass - - @abstractmethod - async def grade_speaking_task(self, task: int, answers: List[Dict]) -> Dict: - pass - - @abstractmethod - async def create_videos_and_save_to_db(self, exercises: List[Dict], template: Dict, req_id: str): - pass - - @abstractmethod - async def generate_video( - self, part: int, avatar: str, topic: str, questions: list[str], - *, - second_topic: Optional[str] = None, - prompts: Optional[list[str]] = None, - suffix: Optional[str] = None, - ): - pass +from abc import ABC, abstractmethod +from typing import List, Dict, Optional + + +class ISpeakingService(ABC): + + @abstractmethod + async def get_speaking_part( + self, part: int, topic: str, difficulty: str, second_topic: Optional[str] = None + ) -> Dict: + pass + + @abstractmethod + async def grade_speaking_task(self, task: int, answers: List[Dict]) -> Dict: + pass + + @abstractmethod + async def create_videos_and_save_to_db(self, exercises: List[Dict], template: Dict, req_id: str): + pass + + @abstractmethod + async def generate_video( + self, part: int, avatar: str, topic: str, questions: list[str], + *, + second_topic: Optional[str] = None, + prompts: Optional[list[str]] = None, + suffix: Optional[str] = None, + ): + pass diff --git a/app/services/abc/writing.py b/app/services/abc/exam/writing.py similarity index 96% rename from app/services/abc/writing.py rename to app/services/abc/exam/writing.py index a59d442..3f9c47e 100644 --- a/app/services/abc/writing.py +++ b/app/services/abc/exam/writing.py @@ -1,11 +1,11 @@ -from abc import ABC, abstractmethod - -class IWritingService(ABC): - - @abstractmethod - async def get_writing_task_general_question(self, task: int, topic: str, difficulty: str): - pass - - @abstractmethod - async def grade_writing_task(self, task: int, question: str, answer: str): - pass +from abc import ABC, abstractmethod + +class IWritingService(ABC): + + @abstractmethod + async def get_writing_task_general_question(self, task: int, topic: str, difficulty: str): + pass + + @abstractmethod + async def grade_writing_task(self, task: int, question: str, answer: str): + pass diff --git a/app/services/abc/third_parties/__init__.py b/app/services/abc/third_parties/__init__.py index 48a314b..f69c71d 100644 --- a/app/services/abc/third_parties/__init__.py +++ b/app/services/abc/third_parties/__init__.py @@ -1,13 +1,13 @@ -from .stt import ISpeechToTextService -from .tts import ITextToSpeechService -from .llm import ILLMService -from .vid_gen import IVideoGeneratorService -from .ai_detector import IAIDetectorService - -__all__ = [ - "ISpeechToTextService", - "ITextToSpeechService", - "ILLMService", - "IVideoGeneratorService", - "IAIDetectorService" -] +from .stt import ISpeechToTextService +from .tts import ITextToSpeechService +from .llm import ILLMService +from .vid_gen import IVideoGeneratorService +from .ai_detector import IAIDetectorService + +__all__ = [ + "ISpeechToTextService", + "ITextToSpeechService", + "ILLMService", + "IVideoGeneratorService", + "IAIDetectorService" +] diff --git a/app/services/abc/third_parties/ai_detector.py b/app/services/abc/third_parties/ai_detector.py index e095210..71939dd 100644 --- a/app/services/abc/third_parties/ai_detector.py +++ b/app/services/abc/third_parties/ai_detector.py @@ -1,13 +1,13 @@ -from abc import ABC, abstractmethod -from typing import Dict, Optional - - -class IAIDetectorService(ABC): - - @abstractmethod - async def run_detection(self, text: str): - pass - - @abstractmethod - def _parse_detection(self, response: Dict) -> Optional[Dict]: - pass +from abc import ABC, abstractmethod +from typing import Dict, Optional + + +class IAIDetectorService(ABC): + + @abstractmethod + async def run_detection(self, text: str): + pass + + @abstractmethod + def _parse_detection(self, response: Dict) -> Optional[Dict]: + pass diff --git a/app/services/abc/third_parties/llm.py b/app/services/abc/third_parties/llm.py index 38ba83c..4baf089 100644 --- a/app/services/abc/third_parties/llm.py +++ b/app/services/abc/third_parties/llm.py @@ -1,38 +1,38 @@ -from abc import ABC, abstractmethod -from typing import List, Optional, TypeVar, Callable - -from openai.types.chat import ChatCompletionMessageParam -from pydantic import BaseModel - -T = TypeVar('T', bound=BaseModel) - -class ILLMService(ABC): - - @abstractmethod - async def prediction( - self, - model: str, - messages: List, - fields_to_check: Optional[List[str]], - temperature: float, - check_blacklisted: bool = True, - token_count: int = -1 - ): - pass - - @abstractmethod - async def prediction_override(self, **kwargs): - pass - - @abstractmethod - async def pydantic_prediction( - self, - messages: List[ChatCompletionMessageParam], - map_to_model: Callable, - json_scheme: str, - *, - model: Optional[str] = None, - temperature: Optional[float] = None, - max_retries: int = 3 - ) -> List[T] | T | None: - pass +from abc import ABC, abstractmethod +from typing import List, Optional, TypeVar, Callable + +from openai.types.chat import ChatCompletionMessageParam +from pydantic import BaseModel + +T = TypeVar('T', bound=BaseModel) + +class ILLMService(ABC): + + @abstractmethod + async def prediction( + self, + model: str, + messages: List, + fields_to_check: Optional[List[str]], + temperature: float, + check_blacklisted: bool = True, + token_count: int = -1 + ): + pass + + @abstractmethod + async def prediction_override(self, **kwargs): + pass + + @abstractmethod + async def pydantic_prediction( + self, + messages: List[ChatCompletionMessageParam], + map_to_model: Callable, + json_scheme: str, + *, + model: Optional[str] = None, + temperature: Optional[float] = None, + max_retries: int = 3 + ) -> List[T] | T | None: + pass diff --git a/app/services/abc/third_parties/stt.py b/app/services/abc/third_parties/stt.py index 7aec50d..7fce30a 100644 --- a/app/services/abc/third_parties/stt.py +++ b/app/services/abc/third_parties/stt.py @@ -1,8 +1,8 @@ -from abc import ABC, abstractmethod - - -class ISpeechToTextService(ABC): - - @abstractmethod - async def speech_to_text(self, file_path): - pass +from abc import ABC, abstractmethod + + +class ISpeechToTextService(ABC): + + @abstractmethod + async def speech_to_text(self, file_path): + pass diff --git a/app/services/abc/third_parties/tts.py b/app/services/abc/third_parties/tts.py index bb375f9..c28cd42 100644 --- a/app/services/abc/third_parties/tts.py +++ b/app/services/abc/third_parties/tts.py @@ -1,22 +1,22 @@ -from abc import ABC, abstractmethod -from typing import Union - - -class ITextToSpeechService(ABC): - - @abstractmethod - async def synthesize_speech(self, text: str, voice: str, engine: str, output_format: str): - pass - - @abstractmethod - async def text_to_speech(self, text: Union[list[str], str], file_name: str): - pass - - @abstractmethod - async def _conversation_to_speech(self, conversation: list): - pass - - @abstractmethod - async def _text_to_speech(self, text: str): - pass - +from abc import ABC, abstractmethod +from typing import Union + + +class ITextToSpeechService(ABC): + + @abstractmethod + async def synthesize_speech(self, text: str, voice: str, engine: str, output_format: str): + pass + + @abstractmethod + async def text_to_speech(self, text: Union[list[str], str], file_name: str): + pass + + @abstractmethod + async def _conversation_to_speech(self, conversation: list): + pass + + @abstractmethod + async def _text_to_speech(self, text: str): + pass + diff --git a/app/services/abc/third_parties/vid_gen.py b/app/services/abc/third_parties/vid_gen.py index 01dee64..31f6831 100644 --- a/app/services/abc/third_parties/vid_gen.py +++ b/app/services/abc/third_parties/vid_gen.py @@ -1,10 +1,10 @@ -from abc import ABC, abstractmethod - -from app.configs.constants import AvatarEnum - - -class IVideoGeneratorService(ABC): - - @abstractmethod - async def create_video(self, text: str, avatar: str): - pass +from abc import ABC, abstractmethod + +from app.configs.constants import AvatarEnum + + +class IVideoGeneratorService(ABC): + + @abstractmethod + async def create_video(self, text: str, avatar: str): + pass diff --git a/app/services/abc/training/__init__.py b/app/services/abc/training/__init__.py new file mode 100644 index 0000000..e1f6408 --- /dev/null +++ b/app/services/abc/training/__init__.py @@ -0,0 +1,7 @@ +from .training import ITrainingService +from .kb import IKnowledgeBase + +__all__ = [ + "ITrainingService", + "IKnowledgeBase" +] diff --git a/app/services/abc/kb.py b/app/services/abc/training/kb.py similarity index 95% rename from app/services/abc/kb.py rename to app/services/abc/training/kb.py index 4568c0c..b6c25ef 100644 --- a/app/services/abc/kb.py +++ b/app/services/abc/training/kb.py @@ -1,10 +1,10 @@ -from abc import ABC, abstractmethod - -from typing import List, Dict - - -class IKnowledgeBase(ABC): - - @abstractmethod - def query_knowledge_base(self, query: str, category: str, top_k: int = 5) -> List[Dict[str, str]]: - pass +from abc import ABC, abstractmethod + +from typing import List, Dict + + +class IKnowledgeBase(ABC): + + @abstractmethod + def query_knowledge_base(self, query: str, category: str, top_k: int = 5) -> List[Dict[str, str]]: + pass diff --git a/app/services/abc/training.py b/app/services/abc/training/training.py similarity index 95% rename from app/services/abc/training.py rename to app/services/abc/training/training.py index bb62f01..fdae679 100644 --- a/app/services/abc/training.py +++ b/app/services/abc/training/training.py @@ -1,14 +1,14 @@ -from abc import ABC, abstractmethod - -from typing import Dict - - -class ITrainingService(ABC): - - @abstractmethod - async def fetch_tips(self, context: str, question: str, answer: str, correct_answer: str): - pass - - @abstractmethod - async def get_training_content(self, training_content: Dict) -> Dict: - pass +from abc import ABC, abstractmethod + +from typing import Dict + + +class ITrainingService(ABC): + + @abstractmethod + async def fetch_tips(self, context: str, question: str, answer: str, correct_answer: str): + pass + + @abstractmethod + async def get_training_content(self, training_content: Dict) -> Dict: + pass diff --git a/app/services/abc/user.py b/app/services/abc/user.py new file mode 100644 index 0000000..2472f5c --- /dev/null +++ b/app/services/abc/user.py @@ -0,0 +1,10 @@ +from abc import ABC, abstractmethod + +from app.dtos.user_batch import BatchUsersDTO + + +class IUserService(ABC): + + @abstractmethod + async def fetch_tips(self, batch: BatchUsersDTO): + pass diff --git a/app/services/impl/__init__.py b/app/services/impl/__init__.py index f0c65cb..0b0d4c0 100644 --- a/app/services/impl/__init__.py +++ b/app/services/impl/__init__.py @@ -1,19 +1,11 @@ -from .level import LevelService -from .listening import ListeningService -from .reading import ReadingService -from .speaking import SpeakingService -from .writing import WritingService -from .grade import GradeService -from .training import * -from .third_parties import * - -__all__ = [ - "LevelService", - "ListeningService", - "ReadingService", - "SpeakingService", - "WritingService", - "GradeService", -] -__all__.extend(third_parties.__all__) -__all__.extend(training.__all__) +from .user import UserService +from .training import * +from .third_parties import * +from .exam import * + +__all__ = [ + "UserService" +] +__all__.extend(third_parties.__all__) +__all__.extend(training.__all__) +__all__.extend(exam.__all__) diff --git a/app/services/impl/exam/__init__.py b/app/services/impl/exam/__init__.py new file mode 100644 index 0000000..fd3ebc8 --- /dev/null +++ b/app/services/impl/exam/__init__.py @@ -0,0 +1,16 @@ +from .level import LevelService +from .listening import ListeningService +from .reading import ReadingService +from .speaking import SpeakingService +from .writing import WritingService +from .grade import GradeService + + +__all__ = [ + "LevelService", + "ListeningService", + "ReadingService", + "SpeakingService", + "WritingService", + "GradeService", +] diff --git a/app/services/impl/grade.py b/app/services/impl/exam/grade.py similarity index 97% rename from app/services/impl/grade.py rename to app/services/impl/exam/grade.py index f3792a2..eaabd1f 100644 --- a/app/services/impl/grade.py +++ b/app/services/impl/exam/grade.py @@ -1,200 +1,200 @@ -import json -from typing import List, Dict - -from app.configs.constants import GPTModels, TemperatureSettings -from app.services.abc import ILLMService, IGradeService - - -class GradeService(IGradeService): - - def __init__(self, llm: ILLMService): - self._llm = llm - - async def grade_short_answers(self, data: Dict): - json_format = { - "exercises": [ - { - "id": 1, - "correct": True, - "correct_answer": " correct answer if wrong" - } - ] - } - - messages = [ - { - "role": "system", - "content": f'You are a helpful assistant designed to output JSON on this format: {json_format}' - }, - { - "role": "user", - "content": ( - 'Grade these answers according to the text content and write a correct answer if they are ' - f'wrong. Text, questions and answers:\n {data}' - ) - } - ] - - return await self._llm.prediction( - GPTModels.GPT_4_O, - messages, - ["exercises"], - TemperatureSettings.GEN_QUESTION_TEMPERATURE - ) - - async def calculate_grading_summary(self, extracted_sections: List): - ret = [] - - for section in extracted_sections: - openai_response_dict = await self._calculate_section_grade_summary(section) - ret.append( - { - 'code': section['code'], - 'name': section['name'], - 'grade': section['grade'], - 'evaluation': openai_response_dict['evaluation'], - 'suggestions': openai_response_dict['suggestions'], - 'bullet_points': self._parse_bullet_points(openai_response_dict['bullet_points'], section['grade']) - } - ) - - return {'sections': ret} - - async def _calculate_section_grade_summary(self, section): - section_name = section['name'] - section_grade = section['grade'] - messages = [ - { - "role": "user", - "content": ( - 'You are a IELTS test section grade evaluator. You will receive a IELTS test section name and the ' - 'grade obtained in the section. You should offer a evaluation comment on this grade and separately ' - 'suggestions on how to possibly get a better grade.' - ) - }, - { - "role": "user", - "content": f'Section: {str(section_name)} Grade: {str(section_grade)}', - }, - { - "role": "user", - "content": "Speak in third person." - }, - { - "role": "user", - "content": "Don't offer suggestions in the evaluation comment. Only in the suggestions section." - }, - { - "role": "user", - "content": ( - "Your evaluation comment on the grade should enunciate the grade, be insightful, be speculative, " - "be one paragraph long." - ) - }, - { - "role": "user", - "content": "Please save the evaluation comment and suggestions generated." - }, - { - "role": "user", - "content": f"Offer bullet points to improve the english {str(section_name)} ability." - }, - ] - - if section['code'] == "level": - messages[2:2] = [{ - "role": "user", - "content": ( - "This section is comprised of multiple choice questions that measure the user's overall english " - "level. These multiple choice questions are about knowledge on vocabulary, syntax, grammar rules, " - "and contextual usage. The grade obtained measures the ability in these areas and english language " - "overall." - ) - }] - elif section['code'] == "speaking": - messages[2:2] = [{ - "role": "user", - "content": ( - "This section is s designed to assess the English language proficiency of individuals who want to " - "study or work in English-speaking countries. The speaking section evaluates a candidate's ability " - "to communicate effectively in spoken English." - ) - }] - - chat_config = {'max_tokens': 1000, 'temperature': 0.2} - tools = self.get_tools() - - res = await self._llm.prediction_override( - model="gpt-3.5-turbo", - max_tokens=chat_config['max_tokens'], - temperature=chat_config['temperature'], - tools=tools, - messages=messages - ) - - return self._parse_openai_response(res) - - @staticmethod - def _parse_openai_response(response): - if 'choices' in response and len(response['choices']) > 0 and 'message' in response['choices'][ - 0] and 'tool_calls' in response['choices'][0]['message'] and isinstance( - response['choices'][0]['message']['tool_calls'], list) and len( - response['choices'][0]['message']['tool_calls']) > 0 and \ - response['choices'][0]['message']['tool_calls'][0]['function']['arguments']: - return json.loads(response['choices'][0]['message']['tool_calls'][0]['function']['arguments']) - else: - return {'evaluation': "", 'suggestions': "", 'bullet_points': []} - - @staticmethod - def _parse_bullet_points(bullet_points_str, grade): - max_grade_for_suggestions = 9 - if isinstance(bullet_points_str, str) and grade < max_grade_for_suggestions: - # Split the string by '\n' - lines = bullet_points_str.split('\n') - - # Remove '-' and trim whitespace from each line - cleaned_lines = [line.replace('-', '').strip() for line in lines] - - # Add '.' to lines that don't end with it - return [line + '.' if line and not line.endswith('.') else line for line in cleaned_lines] - else: - return [] - - @staticmethod - def get_tools(): - return [ - { - "type": "function", - "function": { - "name": "save_evaluation_and_suggestions", - "description": "Saves the evaluation and suggestions requested by input.", - "parameters": { - "type": "object", - "properties": { - "evaluation": { - "type": "string", - "description": ( - "A comment on the IELTS section grade obtained in the specific section and what " - "it could mean without suggestions." - ), - }, - "suggestions": { - "type": "string", - "description": ( - "A small paragraph text with suggestions on how to possibly get a better grade " - "than the one obtained." - ), - }, - "bullet_points": { - "type": "string", - "description": ( - "Text with four bullet points to improve the english speaking ability. Only " - "include text for the bullet points separated by a paragraph." - ), - }, - }, - "required": ["evaluation", "suggestions"], - }, - } - } - ] +import json +from typing import List, Dict + +from app.configs.constants import GPTModels, TemperatureSettings +from app.services.abc import ILLMService, IGradeService + + +class GradeService(IGradeService): + + def __init__(self, llm: ILLMService): + self._llm = llm + + async def grade_short_answers(self, data: Dict): + json_format = { + "exercises": [ + { + "id": 1, + "correct": True, + "correct_answer": " correct answer if wrong" + } + ] + } + + messages = [ + { + "role": "system", + "content": f'You are a helpful assistant designed to output JSON on this format: {json_format}' + }, + { + "role": "user", + "content": ( + 'Grade these answers according to the text content and write a correct answer if they are ' + f'wrong. Text, questions and answers:\n {data}' + ) + } + ] + + return await self._llm.prediction( + GPTModels.GPT_4_O, + messages, + ["exercises"], + TemperatureSettings.GEN_QUESTION_TEMPERATURE + ) + + async def calculate_grading_summary(self, extracted_sections: List): + ret = [] + + for section in extracted_sections: + openai_response_dict = await self._calculate_section_grade_summary(section) + ret.append( + { + 'code': section['code'], + 'name': section['name'], + 'grade': section['grade'], + 'evaluation': openai_response_dict['evaluation'], + 'suggestions': openai_response_dict['suggestions'], + 'bullet_points': self._parse_bullet_points(openai_response_dict['bullet_points'], section['grade']) + } + ) + + return {'sections': ret} + + async def _calculate_section_grade_summary(self, section): + section_name = section['name'] + section_grade = section['grade'] + messages = [ + { + "role": "user", + "content": ( + 'You are a IELTS test section grade evaluator. You will receive a IELTS test section name and the ' + 'grade obtained in the section. You should offer a evaluation comment on this grade and separately ' + 'suggestions on how to possibly get a better grade.' + ) + }, + { + "role": "user", + "content": f'Section: {str(section_name)} Grade: {str(section_grade)}', + }, + { + "role": "user", + "content": "Speak in third person." + }, + { + "role": "user", + "content": "Don't offer suggestions in the evaluation comment. Only in the suggestions section." + }, + { + "role": "user", + "content": ( + "Your evaluation comment on the grade should enunciate the grade, be insightful, be speculative, " + "be one paragraph long." + ) + }, + { + "role": "user", + "content": "Please save the evaluation comment and suggestions generated." + }, + { + "role": "user", + "content": f"Offer bullet points to improve the english {str(section_name)} ability." + }, + ] + + if section['code'] == "level": + messages[2:2] = [{ + "role": "user", + "content": ( + "This section is comprised of multiple choice questions that measure the user's overall english " + "level. These multiple choice questions are about knowledge on vocabulary, syntax, grammar rules, " + "and contextual usage. The grade obtained measures the ability in these areas and english language " + "overall." + ) + }] + elif section['code'] == "speaking": + messages[2:2] = [{ + "role": "user", + "content": ( + "This section is s designed to assess the English language proficiency of individuals who want to " + "study or work in English-speaking countries. The speaking section evaluates a candidate's ability " + "to communicate effectively in spoken English." + ) + }] + + chat_config = {'max_tokens': 1000, 'temperature': 0.2} + tools = self.get_tools() + + res = await self._llm.prediction_override( + model="gpt-3.5-turbo", + max_tokens=chat_config['max_tokens'], + temperature=chat_config['temperature'], + tools=tools, + messages=messages + ) + + return self._parse_openai_response(res) + + @staticmethod + def _parse_openai_response(response): + if 'choices' in response and len(response['choices']) > 0 and 'message' in response['choices'][ + 0] and 'tool_calls' in response['choices'][0]['message'] and isinstance( + response['choices'][0]['message']['tool_calls'], list) and len( + response['choices'][0]['message']['tool_calls']) > 0 and \ + response['choices'][0]['message']['tool_calls'][0]['function']['arguments']: + return json.loads(response['choices'][0]['message']['tool_calls'][0]['function']['arguments']) + else: + return {'evaluation': "", 'suggestions': "", 'bullet_points': []} + + @staticmethod + def _parse_bullet_points(bullet_points_str, grade): + max_grade_for_suggestions = 9 + if isinstance(bullet_points_str, str) and grade < max_grade_for_suggestions: + # Split the string by '\n' + lines = bullet_points_str.split('\n') + + # Remove '-' and trim whitespace from each line + cleaned_lines = [line.replace('-', '').strip() for line in lines] + + # Add '.' to lines that don't end with it + return [line + '.' if line and not line.endswith('.') else line for line in cleaned_lines] + else: + return [] + + @staticmethod + def get_tools(): + return [ + { + "type": "function", + "function": { + "name": "save_evaluation_and_suggestions", + "description": "Saves the evaluation and suggestions requested by input.", + "parameters": { + "type": "object", + "properties": { + "evaluation": { + "type": "string", + "description": ( + "A comment on the IELTS section grade obtained in the specific section and what " + "it could mean without suggestions." + ), + }, + "suggestions": { + "type": "string", + "description": ( + "A small paragraph text with suggestions on how to possibly get a better grade " + "than the one obtained." + ), + }, + "bullet_points": { + "type": "string", + "description": ( + "Text with four bullet points to improve the english speaking ability. Only " + "include text for the bullet points separated by a paragraph." + ), + }, + }, + "required": ["evaluation", "suggestions"], + }, + } + } + ] diff --git a/app/services/impl/level/__init__.py b/app/services/impl/exam/level/__init__.py similarity index 94% rename from app/services/impl/level/__init__.py rename to app/services/impl/exam/level/__init__.py index 584a03d..7a4ada7 100644 --- a/app/services/impl/level/__init__.py +++ b/app/services/impl/exam/level/__init__.py @@ -1,5 +1,5 @@ -from .level import LevelService - -__all__ = [ - "LevelService" +from .level import LevelService + +__all__ = [ + "LevelService" ] \ No newline at end of file diff --git a/app/services/impl/level/custom.py b/app/services/impl/exam/level/custom.py similarity index 98% rename from app/services/impl/level/custom.py rename to app/services/impl/exam/level/custom.py index 09ba991..dee8497 100644 --- a/app/services/impl/level/custom.py +++ b/app/services/impl/exam/level/custom.py @@ -1,335 +1,335 @@ -import queue -import random - -from typing import Dict - -from app.configs.constants import CustomLevelExerciseTypes, EducationalContent -from app.services.abc import ( - ILLMService, ILevelService, IReadingService, - IWritingService, IListeningService, ISpeakingService -) - - -class CustomLevelModule: - - def __init__( - self, - llm: ILLMService, - level: ILevelService, - reading: IReadingService, - listening: IListeningService, - writing: IWritingService, - speaking: ISpeakingService - ): - self._llm = llm - self._level = level - self._reading = reading - self._listening = listening - self._writing = writing - self._speaking = speaking - - # TODO: I've changed this to retrieve the args from the body request and not request query args - async def get_custom_level(self, data: Dict): - nr_exercises = int(data.get('nr_exercises')) - - exercise_id = 1 - response = { - "exercises": {}, - "module": "level" - } - for i in range(1, nr_exercises + 1, 1): - exercise_type = data.get(f'exercise_{i}_type') - exercise_difficulty = data.get(f'exercise_{i}_difficulty', random.choice(['easy', 'medium', 'hard'])) - exercise_qty = int(data.get(f'exercise_{i}_qty', -1)) - exercise_topic = data.get(f'exercise_{i}_topic', random.choice(EducationalContent.TOPICS)) - exercise_topic_2 = data.get(f'exercise_{i}_topic_2', random.choice(EducationalContent.TOPICS)) - exercise_text_size = int(data.get(f'exercise_{i}_text_size', 700)) - exercise_sa_qty = int(data.get(f'exercise_{i}_sa_qty', -1)) - exercise_mc_qty = int(data.get(f'exercise_{i}_mc_qty', -1)) - exercise_mc3_qty = int(data.get(f'exercise_{i}_mc3_qty', -1)) - exercise_fillblanks_qty = int(data.get(f'exercise_{i}_fillblanks_qty', -1)) - exercise_writeblanks_qty = int(data.get(f'exercise_{i}_writeblanks_qty', -1)) - exercise_writeblanksquestions_qty = int(data.get(f'exercise_{i}_writeblanksquestions_qty', -1)) - exercise_writeblanksfill_qty = int(data.get(f'exercise_{i}_writeblanksfill_qty', -1)) - exercise_writeblanksform_qty = int(data.get(f'exercise_{i}_writeblanksform_qty', -1)) - exercise_truefalse_qty = int(data.get(f'exercise_{i}_truefalse_qty', -1)) - exercise_paragraphmatch_qty = int(data.get(f'exercise_{i}_paragraphmatch_qty', -1)) - exercise_ideamatch_qty = int(data.get(f'exercise_{i}_ideamatch_qty', -1)) - - if exercise_type == CustomLevelExerciseTypes.MULTIPLE_CHOICE_4.value: - response["exercises"][f"exercise_{i}"] = {} - response["exercises"][f"exercise_{i}"]["questions"] = [] - response["exercises"][f"exercise_{i}"]["type"] = "multipleChoice" - while exercise_qty > 0: - if exercise_qty - 15 > 0: - qty = 15 - else: - qty = exercise_qty - - mc_response = await self._level.gen_multiple_choice( - "normal", qty, exercise_id, utas=True, - all_exams=response["exercises"][f"exercise_{i}"]["questions"] - ) - response["exercises"][f"exercise_{i}"]["questions"].extend(mc_response["questions"]) - exercise_id = exercise_id + qty - exercise_qty = exercise_qty - qty - - elif exercise_type == CustomLevelExerciseTypes.MULTIPLE_CHOICE_BLANK_SPACE.value: - response["exercises"][f"exercise_{i}"] = {} - response["exercises"][f"exercise_{i}"]["questions"] = [] - response["exercises"][f"exercise_{i}"]["type"] = "multipleChoice" - while exercise_qty > 0: - if exercise_qty - 15 > 0: - qty = 15 - else: - qty = exercise_qty - - mc_response = await self._level.gen_multiple_choice( - "blank_space", qty, exercise_id, utas=True, - all_exams=response["exercises"][f"exercise_{i}"]["questions"] - ) - response["exercises"][f"exercise_{i}"]["questions"].extend(mc_response["questions"]) - - exercise_id = exercise_id + qty - exercise_qty = exercise_qty - qty - - elif exercise_type == CustomLevelExerciseTypes.MULTIPLE_CHOICE_UNDERLINED.value: - response["exercises"][f"exercise_{i}"] = {} - response["exercises"][f"exercise_{i}"]["questions"] = [] - response["exercises"][f"exercise_{i}"]["type"] = "multipleChoice" - while exercise_qty > 0: - if exercise_qty - 15 > 0: - qty = 15 - else: - qty = exercise_qty - - mc_response = await self._level.gen_multiple_choice( - "underline", qty, exercise_id, utas=True, - all_exams=response["exercises"][f"exercise_{i}"]["questions"] - ) - response["exercises"][f"exercise_{i}"]["questions"].extend(mc_response["questions"]) - - exercise_id = exercise_id + qty - exercise_qty = exercise_qty - qty - - elif exercise_type == CustomLevelExerciseTypes.BLANK_SPACE_TEXT.value: - response["exercises"][f"exercise_{i}"] = await self._level.gen_blank_space_text_utas( - exercise_qty, exercise_id, exercise_text_size - ) - response["exercises"][f"exercise_{i}"]["type"] = "blankSpaceText" - exercise_id = exercise_id + exercise_qty - elif exercise_type == CustomLevelExerciseTypes.READING_PASSAGE_UTAS.value: - response["exercises"][f"exercise_{i}"] = await self._level.gen_reading_passage_utas( - exercise_id, exercise_sa_qty, exercise_mc_qty, exercise_topic - ) - response["exercises"][f"exercise_{i}"]["type"] = "readingExercises" - exercise_id = exercise_id + exercise_qty - elif exercise_type == CustomLevelExerciseTypes.WRITING_LETTER.value: - response["exercises"][f"exercise_{i}"] = await self._writing.get_writing_task_general_question( - 1, exercise_topic, exercise_difficulty - ) - response["exercises"][f"exercise_{i}"]["type"] = "writing" - exercise_id = exercise_id + 1 - elif exercise_type == CustomLevelExerciseTypes.WRITING_2.value: - response["exercises"][f"exercise_{i}"] = await self._writing.get_writing_task_general_question( - 2, exercise_topic, exercise_difficulty - ) - response["exercises"][f"exercise_{i}"]["type"] = "writing" - exercise_id = exercise_id + 1 - elif exercise_type == CustomLevelExerciseTypes.SPEAKING_1.value: - response["exercises"][f"exercise_{i}"] = await self._speaking.get_speaking_part( - 1, exercise_topic, exercise_difficulty, exercise_topic_2 - ) - response["exercises"][f"exercise_{i}"]["type"] = "interactiveSpeaking" - exercise_id = exercise_id + 1 - elif exercise_type == CustomLevelExerciseTypes.SPEAKING_2.value: - response["exercises"][f"exercise_{i}"] = await self._speaking.get_speaking_part( - 2, exercise_topic, exercise_difficulty - ) - response["exercises"][f"exercise_{i}"]["type"] = "speaking" - exercise_id = exercise_id + 1 - elif exercise_type == CustomLevelExerciseTypes.SPEAKING_3.value: - response["exercises"][f"exercise_{i}"] = await self._speaking.get_speaking_part( - 3, exercise_topic, exercise_difficulty - ) - response["exercises"][f"exercise_{i}"]["type"] = "interactiveSpeaking" - exercise_id = exercise_id + 1 - elif exercise_type == CustomLevelExerciseTypes.READING_1.value: - exercises = [] - exercise_qty_q = queue.Queue() - total_qty = 0 - if exercise_fillblanks_qty != -1: - exercises.append('fillBlanks') - exercise_qty_q.put(exercise_fillblanks_qty) - total_qty = total_qty + exercise_fillblanks_qty - if exercise_writeblanks_qty != -1: - exercises.append('writeBlanks') - exercise_qty_q.put(exercise_writeblanks_qty) - total_qty = total_qty + exercise_writeblanks_qty - if exercise_truefalse_qty != -1: - exercises.append('trueFalse') - exercise_qty_q.put(exercise_truefalse_qty) - total_qty = total_qty + exercise_truefalse_qty - if exercise_paragraphmatch_qty != -1: - exercises.append('paragraphMatch') - exercise_qty_q.put(exercise_paragraphmatch_qty) - total_qty = total_qty + exercise_paragraphmatch_qty - - response["exercises"][f"exercise_{i}"] = await self._reading.gen_reading_passage( - 1, exercise_topic, exercises, exercise_qty_q, exercise_difficulty, exercise_id - ) - response["exercises"][f"exercise_{i}"]["type"] = "reading" - - exercise_id = exercise_id + total_qty - elif exercise_type == CustomLevelExerciseTypes.READING_2.value: - exercises = [] - exercise_qty_q = queue.Queue() - total_qty = 0 - if exercise_fillblanks_qty != -1: - exercises.append('fillBlanks') - exercise_qty_q.put(exercise_fillblanks_qty) - total_qty = total_qty + exercise_fillblanks_qty - if exercise_writeblanks_qty != -1: - exercises.append('writeBlanks') - exercise_qty_q.put(exercise_writeblanks_qty) - total_qty = total_qty + exercise_writeblanks_qty - if exercise_truefalse_qty != -1: - exercises.append('trueFalse') - exercise_qty_q.put(exercise_truefalse_qty) - total_qty = total_qty + exercise_truefalse_qty - if exercise_paragraphmatch_qty != -1: - exercises.append('paragraphMatch') - exercise_qty_q.put(exercise_paragraphmatch_qty) - total_qty = total_qty + exercise_paragraphmatch_qty - - response["exercises"][f"exercise_{i}"] = await self._reading.gen_reading_passage( - 2, exercise_topic, exercises, exercise_qty_q, exercise_difficulty, exercise_id - ) - response["exercises"][f"exercise_{i}"]["type"] = "reading" - - exercise_id = exercise_id + total_qty - elif exercise_type == CustomLevelExerciseTypes.READING_3.value: - exercises = [] - exercise_qty_q = queue.Queue() - total_qty = 0 - if exercise_fillblanks_qty != -1: - exercises.append('fillBlanks') - exercise_qty_q.put(exercise_fillblanks_qty) - total_qty = total_qty + exercise_fillblanks_qty - if exercise_writeblanks_qty != -1: - exercises.append('writeBlanks') - exercise_qty_q.put(exercise_writeblanks_qty) - total_qty = total_qty + exercise_writeblanks_qty - if exercise_truefalse_qty != -1: - exercises.append('trueFalse') - exercise_qty_q.put(exercise_truefalse_qty) - total_qty = total_qty + exercise_truefalse_qty - if exercise_paragraphmatch_qty != -1: - exercises.append('paragraphMatch') - exercise_qty_q.put(exercise_paragraphmatch_qty) - total_qty = total_qty + exercise_paragraphmatch_qty - if exercise_ideamatch_qty != -1: - exercises.append('ideaMatch') - exercise_qty_q.put(exercise_ideamatch_qty) - total_qty = total_qty + exercise_ideamatch_qty - - response["exercises"][f"exercise_{i}"] = await self._reading.gen_reading_passage( - 3, exercise_topic, exercises, exercise_qty_q, exercise_id, exercise_difficulty - ) - response["exercises"][f"exercise_{i}"]["type"] = "reading" - - exercise_id = exercise_id + total_qty - elif exercise_type == CustomLevelExerciseTypes.LISTENING_1.value: - exercises = [] - exercise_qty_q = queue.Queue() - total_qty = 0 - if exercise_mc_qty != -1: - exercises.append('multipleChoice') - exercise_qty_q.put(exercise_mc_qty) - total_qty = total_qty + exercise_mc_qty - if exercise_writeblanksquestions_qty != -1: - exercises.append('writeBlanksQuestions') - exercise_qty_q.put(exercise_writeblanksquestions_qty) - total_qty = total_qty + exercise_writeblanksquestions_qty - if exercise_writeblanksfill_qty != -1: - exercises.append('writeBlanksFill') - exercise_qty_q.put(exercise_writeblanksfill_qty) - total_qty = total_qty + exercise_writeblanksfill_qty - if exercise_writeblanksform_qty != -1: - exercises.append('writeBlanksForm') - exercise_qty_q.put(exercise_writeblanksform_qty) - total_qty = total_qty + exercise_writeblanksform_qty - - response["exercises"][f"exercise_{i}"] = await self._listening.get_listening_question( - 1, exercise_topic, exercises, exercise_difficulty, exercise_qty_q, exercise_id - ) - response["exercises"][f"exercise_{i}"]["type"] = "listening" - - exercise_id = exercise_id + total_qty - elif exercise_type == CustomLevelExerciseTypes.LISTENING_2.value: - exercises = [] - exercise_qty_q = queue.Queue() - total_qty = 0 - if exercise_mc_qty != -1: - exercises.append('multipleChoice') - exercise_qty_q.put(exercise_mc_qty) - total_qty = total_qty + exercise_mc_qty - if exercise_writeblanksquestions_qty != -1: - exercises.append('writeBlanksQuestions') - exercise_qty_q.put(exercise_writeblanksquestions_qty) - total_qty = total_qty + exercise_writeblanksquestions_qty - - response["exercises"][f"exercise_{i}"] = await self._listening.get_listening_question( - 2, exercise_topic, exercises, exercise_difficulty, exercise_qty_q, exercise_id - ) - response["exercises"][f"exercise_{i}"]["type"] = "listening" - - exercise_id = exercise_id + total_qty - elif exercise_type == CustomLevelExerciseTypes.LISTENING_3.value: - exercises = [] - exercise_qty_q = queue.Queue() - total_qty = 0 - if exercise_mc3_qty != -1: - exercises.append('multipleChoice3Options') - exercise_qty_q.put(exercise_mc3_qty) - total_qty = total_qty + exercise_mc3_qty - if exercise_writeblanksquestions_qty != -1: - exercises.append('writeBlanksQuestions') - exercise_qty_q.put(exercise_writeblanksquestions_qty) - total_qty = total_qty + exercise_writeblanksquestions_qty - - response["exercises"][f"exercise_{i}"] = await self._listening.get_listening_question( - 3, exercise_topic, exercises, exercise_difficulty, exercise_qty_q, exercise_id - ) - response["exercises"][f"exercise_{i}"]["type"] = "listening" - - exercise_id = exercise_id + total_qty - elif exercise_type == CustomLevelExerciseTypes.LISTENING_4.value: - exercises = [] - exercise_qty_q = queue.Queue() - total_qty = 0 - if exercise_mc_qty != -1: - exercises.append('multipleChoice') - exercise_qty_q.put(exercise_mc_qty) - total_qty = total_qty + exercise_mc_qty - if exercise_writeblanksquestions_qty != -1: - exercises.append('writeBlanksQuestions') - exercise_qty_q.put(exercise_writeblanksquestions_qty) - total_qty = total_qty + exercise_writeblanksquestions_qty - if exercise_writeblanksfill_qty != -1: - exercises.append('writeBlanksFill') - exercise_qty_q.put(exercise_writeblanksfill_qty) - total_qty = total_qty + exercise_writeblanksfill_qty - if exercise_writeblanksform_qty != -1: - exercises.append('writeBlanksForm') - exercise_qty_q.put(exercise_writeblanksform_qty) - total_qty = total_qty + exercise_writeblanksform_qty - - response["exercises"][f"exercise_{i}"] = await self._listening.get_listening_question( - 4, exercise_topic, exercises, exercise_difficulty, exercise_qty_q, exercise_id - ) - response["exercises"][f"exercise_{i}"]["type"] = "listening" - - exercise_id = exercise_id + total_qty - - return response +import queue +import random + +from typing import Dict + +from app.configs.constants import CustomLevelExerciseTypes, EducationalContent +from app.services.abc import ( + ILLMService, ILevelService, IReadingService, + IWritingService, IListeningService, ISpeakingService +) + + +class CustomLevelModule: + + def __init__( + self, + llm: ILLMService, + level: ILevelService, + reading: IReadingService, + listening: IListeningService, + writing: IWritingService, + speaking: ISpeakingService + ): + self._llm = llm + self._level = level + self._reading = reading + self._listening = listening + self._writing = writing + self._speaking = speaking + + # TODO: I've changed this to retrieve the args from the body request and not request query args + async def get_custom_level(self, data: Dict): + nr_exercises = int(data.get('nr_exercises')) + + exercise_id = 1 + response = { + "exercises": {}, + "module": "level" + } + for i in range(1, nr_exercises + 1, 1): + exercise_type = data.get(f'exercise_{i}_type') + exercise_difficulty = data.get(f'exercise_{i}_difficulty', random.choice(['easy', 'medium', 'hard'])) + exercise_qty = int(data.get(f'exercise_{i}_qty', -1)) + exercise_topic = data.get(f'exercise_{i}_topic', random.choice(EducationalContent.TOPICS)) + exercise_topic_2 = data.get(f'exercise_{i}_topic_2', random.choice(EducationalContent.TOPICS)) + exercise_text_size = int(data.get(f'exercise_{i}_text_size', 700)) + exercise_sa_qty = int(data.get(f'exercise_{i}_sa_qty', -1)) + exercise_mc_qty = int(data.get(f'exercise_{i}_mc_qty', -1)) + exercise_mc3_qty = int(data.get(f'exercise_{i}_mc3_qty', -1)) + exercise_fillblanks_qty = int(data.get(f'exercise_{i}_fillblanks_qty', -1)) + exercise_writeblanks_qty = int(data.get(f'exercise_{i}_writeblanks_qty', -1)) + exercise_writeblanksquestions_qty = int(data.get(f'exercise_{i}_writeblanksquestions_qty', -1)) + exercise_writeblanksfill_qty = int(data.get(f'exercise_{i}_writeblanksfill_qty', -1)) + exercise_writeblanksform_qty = int(data.get(f'exercise_{i}_writeblanksform_qty', -1)) + exercise_truefalse_qty = int(data.get(f'exercise_{i}_truefalse_qty', -1)) + exercise_paragraphmatch_qty = int(data.get(f'exercise_{i}_paragraphmatch_qty', -1)) + exercise_ideamatch_qty = int(data.get(f'exercise_{i}_ideamatch_qty', -1)) + + if exercise_type == CustomLevelExerciseTypes.MULTIPLE_CHOICE_4.value: + response["exercises"][f"exercise_{i}"] = {} + response["exercises"][f"exercise_{i}"]["questions"] = [] + response["exercises"][f"exercise_{i}"]["type"] = "multipleChoice" + while exercise_qty > 0: + if exercise_qty - 15 > 0: + qty = 15 + else: + qty = exercise_qty + + mc_response = await self._level.gen_multiple_choice( + "normal", qty, exercise_id, utas=True, + all_exams=response["exercises"][f"exercise_{i}"]["questions"] + ) + response["exercises"][f"exercise_{i}"]["questions"].extend(mc_response["questions"]) + exercise_id = exercise_id + qty + exercise_qty = exercise_qty - qty + + elif exercise_type == CustomLevelExerciseTypes.MULTIPLE_CHOICE_BLANK_SPACE.value: + response["exercises"][f"exercise_{i}"] = {} + response["exercises"][f"exercise_{i}"]["questions"] = [] + response["exercises"][f"exercise_{i}"]["type"] = "multipleChoice" + while exercise_qty > 0: + if exercise_qty - 15 > 0: + qty = 15 + else: + qty = exercise_qty + + mc_response = await self._level.gen_multiple_choice( + "blank_space", qty, exercise_id, utas=True, + all_exams=response["exercises"][f"exercise_{i}"]["questions"] + ) + response["exercises"][f"exercise_{i}"]["questions"].extend(mc_response["questions"]) + + exercise_id = exercise_id + qty + exercise_qty = exercise_qty - qty + + elif exercise_type == CustomLevelExerciseTypes.MULTIPLE_CHOICE_UNDERLINED.value: + response["exercises"][f"exercise_{i}"] = {} + response["exercises"][f"exercise_{i}"]["questions"] = [] + response["exercises"][f"exercise_{i}"]["type"] = "multipleChoice" + while exercise_qty > 0: + if exercise_qty - 15 > 0: + qty = 15 + else: + qty = exercise_qty + + mc_response = await self._level.gen_multiple_choice( + "underline", qty, exercise_id, utas=True, + all_exams=response["exercises"][f"exercise_{i}"]["questions"] + ) + response["exercises"][f"exercise_{i}"]["questions"].extend(mc_response["questions"]) + + exercise_id = exercise_id + qty + exercise_qty = exercise_qty - qty + + elif exercise_type == CustomLevelExerciseTypes.BLANK_SPACE_TEXT.value: + response["exercises"][f"exercise_{i}"] = await self._level.gen_blank_space_text_utas( + exercise_qty, exercise_id, exercise_text_size + ) + response["exercises"][f"exercise_{i}"]["type"] = "blankSpaceText" + exercise_id = exercise_id + exercise_qty + elif exercise_type == CustomLevelExerciseTypes.READING_PASSAGE_UTAS.value: + response["exercises"][f"exercise_{i}"] = await self._level.gen_reading_passage_utas( + exercise_id, exercise_sa_qty, exercise_mc_qty, exercise_topic + ) + response["exercises"][f"exercise_{i}"]["type"] = "readingExercises" + exercise_id = exercise_id + exercise_qty + elif exercise_type == CustomLevelExerciseTypes.WRITING_LETTER.value: + response["exercises"][f"exercise_{i}"] = await self._writing.get_writing_task_general_question( + 1, exercise_topic, exercise_difficulty + ) + response["exercises"][f"exercise_{i}"]["type"] = "writing" + exercise_id = exercise_id + 1 + elif exercise_type == CustomLevelExerciseTypes.WRITING_2.value: + response["exercises"][f"exercise_{i}"] = await self._writing.get_writing_task_general_question( + 2, exercise_topic, exercise_difficulty + ) + response["exercises"][f"exercise_{i}"]["type"] = "writing" + exercise_id = exercise_id + 1 + elif exercise_type == CustomLevelExerciseTypes.SPEAKING_1.value: + response["exercises"][f"exercise_{i}"] = await self._speaking.get_speaking_part( + 1, exercise_topic, exercise_difficulty, exercise_topic_2 + ) + response["exercises"][f"exercise_{i}"]["type"] = "interactiveSpeaking" + exercise_id = exercise_id + 1 + elif exercise_type == CustomLevelExerciseTypes.SPEAKING_2.value: + response["exercises"][f"exercise_{i}"] = await self._speaking.get_speaking_part( + 2, exercise_topic, exercise_difficulty + ) + response["exercises"][f"exercise_{i}"]["type"] = "speaking" + exercise_id = exercise_id + 1 + elif exercise_type == CustomLevelExerciseTypes.SPEAKING_3.value: + response["exercises"][f"exercise_{i}"] = await self._speaking.get_speaking_part( + 3, exercise_topic, exercise_difficulty + ) + response["exercises"][f"exercise_{i}"]["type"] = "interactiveSpeaking" + exercise_id = exercise_id + 1 + elif exercise_type == CustomLevelExerciseTypes.READING_1.value: + exercises = [] + exercise_qty_q = queue.Queue() + total_qty = 0 + if exercise_fillblanks_qty != -1: + exercises.append('fillBlanks') + exercise_qty_q.put(exercise_fillblanks_qty) + total_qty = total_qty + exercise_fillblanks_qty + if exercise_writeblanks_qty != -1: + exercises.append('writeBlanks') + exercise_qty_q.put(exercise_writeblanks_qty) + total_qty = total_qty + exercise_writeblanks_qty + if exercise_truefalse_qty != -1: + exercises.append('trueFalse') + exercise_qty_q.put(exercise_truefalse_qty) + total_qty = total_qty + exercise_truefalse_qty + if exercise_paragraphmatch_qty != -1: + exercises.append('paragraphMatch') + exercise_qty_q.put(exercise_paragraphmatch_qty) + total_qty = total_qty + exercise_paragraphmatch_qty + + response["exercises"][f"exercise_{i}"] = await self._reading.gen_reading_passage( + 1, exercise_topic, exercises, exercise_qty_q, exercise_difficulty, exercise_id + ) + response["exercises"][f"exercise_{i}"]["type"] = "reading" + + exercise_id = exercise_id + total_qty + elif exercise_type == CustomLevelExerciseTypes.READING_2.value: + exercises = [] + exercise_qty_q = queue.Queue() + total_qty = 0 + if exercise_fillblanks_qty != -1: + exercises.append('fillBlanks') + exercise_qty_q.put(exercise_fillblanks_qty) + total_qty = total_qty + exercise_fillblanks_qty + if exercise_writeblanks_qty != -1: + exercises.append('writeBlanks') + exercise_qty_q.put(exercise_writeblanks_qty) + total_qty = total_qty + exercise_writeblanks_qty + if exercise_truefalse_qty != -1: + exercises.append('trueFalse') + exercise_qty_q.put(exercise_truefalse_qty) + total_qty = total_qty + exercise_truefalse_qty + if exercise_paragraphmatch_qty != -1: + exercises.append('paragraphMatch') + exercise_qty_q.put(exercise_paragraphmatch_qty) + total_qty = total_qty + exercise_paragraphmatch_qty + + response["exercises"][f"exercise_{i}"] = await self._reading.gen_reading_passage( + 2, exercise_topic, exercises, exercise_qty_q, exercise_difficulty, exercise_id + ) + response["exercises"][f"exercise_{i}"]["type"] = "reading" + + exercise_id = exercise_id + total_qty + elif exercise_type == CustomLevelExerciseTypes.READING_3.value: + exercises = [] + exercise_qty_q = queue.Queue() + total_qty = 0 + if exercise_fillblanks_qty != -1: + exercises.append('fillBlanks') + exercise_qty_q.put(exercise_fillblanks_qty) + total_qty = total_qty + exercise_fillblanks_qty + if exercise_writeblanks_qty != -1: + exercises.append('writeBlanks') + exercise_qty_q.put(exercise_writeblanks_qty) + total_qty = total_qty + exercise_writeblanks_qty + if exercise_truefalse_qty != -1: + exercises.append('trueFalse') + exercise_qty_q.put(exercise_truefalse_qty) + total_qty = total_qty + exercise_truefalse_qty + if exercise_paragraphmatch_qty != -1: + exercises.append('paragraphMatch') + exercise_qty_q.put(exercise_paragraphmatch_qty) + total_qty = total_qty + exercise_paragraphmatch_qty + if exercise_ideamatch_qty != -1: + exercises.append('ideaMatch') + exercise_qty_q.put(exercise_ideamatch_qty) + total_qty = total_qty + exercise_ideamatch_qty + + response["exercises"][f"exercise_{i}"] = await self._reading.gen_reading_passage( + 3, exercise_topic, exercises, exercise_qty_q, exercise_id, exercise_difficulty + ) + response["exercises"][f"exercise_{i}"]["type"] = "reading" + + exercise_id = exercise_id + total_qty + elif exercise_type == CustomLevelExerciseTypes.LISTENING_1.value: + exercises = [] + exercise_qty_q = queue.Queue() + total_qty = 0 + if exercise_mc_qty != -1: + exercises.append('multipleChoice') + exercise_qty_q.put(exercise_mc_qty) + total_qty = total_qty + exercise_mc_qty + if exercise_writeblanksquestions_qty != -1: + exercises.append('writeBlanksQuestions') + exercise_qty_q.put(exercise_writeblanksquestions_qty) + total_qty = total_qty + exercise_writeblanksquestions_qty + if exercise_writeblanksfill_qty != -1: + exercises.append('writeBlanksFill') + exercise_qty_q.put(exercise_writeblanksfill_qty) + total_qty = total_qty + exercise_writeblanksfill_qty + if exercise_writeblanksform_qty != -1: + exercises.append('writeBlanksForm') + exercise_qty_q.put(exercise_writeblanksform_qty) + total_qty = total_qty + exercise_writeblanksform_qty + + response["exercises"][f"exercise_{i}"] = await self._listening.get_listening_question( + 1, exercise_topic, exercises, exercise_difficulty, exercise_qty_q, exercise_id + ) + response["exercises"][f"exercise_{i}"]["type"] = "listening" + + exercise_id = exercise_id + total_qty + elif exercise_type == CustomLevelExerciseTypes.LISTENING_2.value: + exercises = [] + exercise_qty_q = queue.Queue() + total_qty = 0 + if exercise_mc_qty != -1: + exercises.append('multipleChoice') + exercise_qty_q.put(exercise_mc_qty) + total_qty = total_qty + exercise_mc_qty + if exercise_writeblanksquestions_qty != -1: + exercises.append('writeBlanksQuestions') + exercise_qty_q.put(exercise_writeblanksquestions_qty) + total_qty = total_qty + exercise_writeblanksquestions_qty + + response["exercises"][f"exercise_{i}"] = await self._listening.get_listening_question( + 2, exercise_topic, exercises, exercise_difficulty, exercise_qty_q, exercise_id + ) + response["exercises"][f"exercise_{i}"]["type"] = "listening" + + exercise_id = exercise_id + total_qty + elif exercise_type == CustomLevelExerciseTypes.LISTENING_3.value: + exercises = [] + exercise_qty_q = queue.Queue() + total_qty = 0 + if exercise_mc3_qty != -1: + exercises.append('multipleChoice3Options') + exercise_qty_q.put(exercise_mc3_qty) + total_qty = total_qty + exercise_mc3_qty + if exercise_writeblanksquestions_qty != -1: + exercises.append('writeBlanksQuestions') + exercise_qty_q.put(exercise_writeblanksquestions_qty) + total_qty = total_qty + exercise_writeblanksquestions_qty + + response["exercises"][f"exercise_{i}"] = await self._listening.get_listening_question( + 3, exercise_topic, exercises, exercise_difficulty, exercise_qty_q, exercise_id + ) + response["exercises"][f"exercise_{i}"]["type"] = "listening" + + exercise_id = exercise_id + total_qty + elif exercise_type == CustomLevelExerciseTypes.LISTENING_4.value: + exercises = [] + exercise_qty_q = queue.Queue() + total_qty = 0 + if exercise_mc_qty != -1: + exercises.append('multipleChoice') + exercise_qty_q.put(exercise_mc_qty) + total_qty = total_qty + exercise_mc_qty + if exercise_writeblanksquestions_qty != -1: + exercises.append('writeBlanksQuestions') + exercise_qty_q.put(exercise_writeblanksquestions_qty) + total_qty = total_qty + exercise_writeblanksquestions_qty + if exercise_writeblanksfill_qty != -1: + exercises.append('writeBlanksFill') + exercise_qty_q.put(exercise_writeblanksfill_qty) + total_qty = total_qty + exercise_writeblanksfill_qty + if exercise_writeblanksform_qty != -1: + exercises.append('writeBlanksForm') + exercise_qty_q.put(exercise_writeblanksform_qty) + total_qty = total_qty + exercise_writeblanksform_qty + + response["exercises"][f"exercise_{i}"] = await self._listening.get_listening_question( + 4, exercise_topic, exercises, exercise_difficulty, exercise_qty_q, exercise_id + ) + response["exercises"][f"exercise_{i}"]["type"] = "listening" + + exercise_id = exercise_id + total_qty + + return response diff --git a/app/services/impl/level/level.py b/app/services/impl/exam/level/level.py similarity index 97% rename from app/services/impl/level/level.py rename to app/services/impl/exam/level/level.py index 285a11e..fad8ce6 100644 --- a/app/services/impl/level/level.py +++ b/app/services/impl/exam/level/level.py @@ -1,417 +1,417 @@ -import json -import random -import uuid - -from typing import Dict - -from fastapi import UploadFile - -from app.configs.constants import GPTModels, TemperatureSettings, EducationalContent -from app.helpers import ExercisesHelper -from app.repositories.abc import IDocumentStore -from app.services.abc import ILevelService, ILLMService, IReadingService, IWritingService, ISpeakingService, \ - IListeningService -from .custom import CustomLevelModule -from .upload import UploadLevelModule - - -class LevelService(ILevelService): - - def __init__( - self, - llm: ILLMService, - document_store: IDocumentStore, - mc_variants: Dict, - reading_service: IReadingService, - writing_service: IWritingService, - speaking_service: ISpeakingService, - listening_service: IListeningService - ): - self._llm = llm - self._document_store = document_store - self._reading_service = reading_service - self._custom_module = CustomLevelModule( - llm, self, reading_service, listening_service, writing_service, speaking_service - ) - self._upload_module = UploadLevelModule(llm) - - # TODO: normal and blank spaces only differ on "multiple choice blank space questions" in the prompt - # mc_variants are stored in ./mc_variants.json - self._mc_variants = mc_variants - - async def upload_level(self, upload: UploadFile) -> Dict: - return await self._upload_module.generate_level_from_file(upload) - - async def get_custom_level(self, data: Dict): - return await self._custom_module.get_custom_level(data) - - async def get_level_exam( - self, number_of_exercises: int = 25, min_timer: int = 25, diagnostic: bool = False - ) -> Dict: - exercises = await self.gen_multiple_choice("normal", number_of_exercises, utas=False) - return { - "exercises": [exercises], - "isDiagnostic": diagnostic, - "minTimer": min_timer, - "module": "level" - } - - async def get_level_utas(self, diagnostic: bool = False, min_timer: int = 25): - # Formats - mc = { - "id": str(uuid.uuid4()), - "prompt": "Choose the correct word or group of words that completes the sentences.", - "questions": None, - "type": "multipleChoice", - "part": 1 - } - - umc = { - "id": str(uuid.uuid4()), - "prompt": "Choose the underlined word or group of words that is not correct.", - "questions": None, - "type": "multipleChoice", - "part": 2 - } - - bs_1 = { - "id": str(uuid.uuid4()), - "prompt": "Read the text and write the correct word for each space.", - "questions": None, - "type": "blankSpaceText", - "part": 3 - } - - bs_2 = { - "id": str(uuid.uuid4()), - "prompt": "Read the text and write the correct word for each space.", - "questions": None, - "type": "blankSpaceText", - "part": 4 - } - - reading = { - "id": str(uuid.uuid4()), - "prompt": "Read the text and answer the questions below.", - "questions": None, - "type": "readingExercises", - "part": 5 - } - - all_mc_questions = [] - - # PART 1 - # await self._gen_multiple_choice("normal", number_of_exercises, utas=False) - mc_exercises1 = await self.gen_multiple_choice( - "blank_space", 15, 1, utas=True, all_exams=all_mc_questions - ) - print(json.dumps(mc_exercises1, indent=4)) - all_mc_questions.append(mc_exercises1) - - # PART 2 - mc_exercises2 = await self.gen_multiple_choice( - "blank_space", 15, 16, utas=True, all_exams=all_mc_questions - ) - print(json.dumps(mc_exercises2, indent=4)) - all_mc_questions.append(mc_exercises2) - - # PART 3 - mc_exercises3 = await self.gen_multiple_choice( - "blank_space", 15, 31, utas=True, all_exams=all_mc_questions - ) - print(json.dumps(mc_exercises3, indent=4)) - all_mc_questions.append(mc_exercises3) - - mc_exercises = mc_exercises1['questions'] + mc_exercises2['questions'] + mc_exercises3['questions'] - print(json.dumps(mc_exercises, indent=4)) - mc["questions"] = mc_exercises - - # Underlined mc - underlined_mc = await self.gen_multiple_choice( - "underline", 15, 46, utas=True, all_exams=all_mc_questions - ) - print(json.dumps(underlined_mc, indent=4)) - umc["questions"] = underlined_mc - - # Blank Space text 1 - blank_space_text_1 = await self.gen_blank_space_text_utas(12, 61, 250) - print(json.dumps(blank_space_text_1, indent=4)) - bs_1["questions"] = blank_space_text_1 - - # Blank Space text 2 - blank_space_text_2 = await self.gen_blank_space_text_utas(14, 73, 350) - print(json.dumps(blank_space_text_2, indent=4)) - bs_2["questions"] = blank_space_text_2 - - # Reading text - reading_text = await self.gen_reading_passage_utas(87, 10, 4) - print(json.dumps(reading_text, indent=4)) - reading["questions"] = reading_text - - return { - "exercises": { - "blankSpaceMultipleChoice": mc, - "underlinedMultipleChoice": umc, - "blankSpaceText1": bs_1, - "blankSpaceText2": bs_2, - "readingExercises": reading, - }, - "isDiagnostic": diagnostic, - "minTimer": min_timer, - "module": "level" - } - - async def gen_multiple_choice( - self, mc_variant: str, quantity: int, start_id: int = 1, *, utas: bool = False, all_exams=None - ): - mc_template = self._mc_variants[mc_variant] - blank_mod = " blank space " if mc_variant == "blank_space" else " " - - gen_multiple_choice_for_text: str = ( - 'Generate {quantity} multiple choice{blank}questions of 4 options for an english level exam, some easy ' - 'questions, some intermediate questions and some advanced questions. Ensure that the questions cover ' - 'a range of topics such as verb tense, subject-verb agreement, pronoun usage, sentence structure, and ' - 'punctuation. Make sure every question only has 1 correct answer.' - ) - - messages = [ - { - "role": "system", - "content": ( - f'You are a helpful assistant designed to output JSON on this format: {mc_template}' - ) - }, - { - "role": "user", - "content": gen_multiple_choice_for_text.format(quantity=str(quantity), blank=blank_mod) - } - ] - - if mc_variant == "underline": - messages.append({ - "role": "user", - "content": ( - 'The type of multiple choice in the prompt has wrong words or group of words and the options ' - 'are to find the wrong word or group of words that are underlined in the prompt. \nExample:\n' - 'Prompt: "I complain about my boss all the time, but my colleagues thinks ' - 'the boss is nice."\n' - 'Options:\na: "complain"\nb: "all the time"\nc: "thinks"\nd: "is"' - ) - }) - - question = await self._llm.prediction( - GPTModels.GPT_4_O, messages, ["questions"], TemperatureSettings.GEN_QUESTION_TEMPERATURE - ) - - if len(question["questions"]) != quantity: - return await self.gen_multiple_choice(mc_variant, quantity, start_id, utas=utas, all_exams=all_exams) - else: - if not utas: - all_exams = await self._document_store.get_all("level") - seen_keys = set() - for i in range(len(question["questions"])): - question["questions"][i], seen_keys = await self._replace_exercise_if_exists( - all_exams, question["questions"][i], question, seen_keys, mc_variant, utas - ) - return { - "id": str(uuid.uuid4()), - "prompt": "Select the appropriate option.", - "questions": ExercisesHelper.fix_exercise_ids(question, start_id)["questions"], - "type": "multipleChoice", - } - else: - if all_exams is not None: - seen_keys = set() - for i in range(len(question["questions"])): - question["questions"][i], seen_keys = await self._replace_exercise_if_exists( - all_exams, question["questions"][i], question, seen_keys, mc_variant, utas - ) - response = ExercisesHelper.fix_exercise_ids(question, start_id) - response["questions"] = ExercisesHelper.randomize_mc_options_order(response["questions"]) - return response - - async def _generate_single_multiple_choice(self, mc_variant: str = "normal"): - mc_template = self._mc_variants[mc_variant]["questions"][0] - blank_mod = " blank space " if mc_variant == "blank_space" else " " - - messages = [ - { - "role": "system", - "content": ( - f'You are a helpful assistant designed to output JSON on this format: {mc_template}' - ) - }, - { - "role": "user", - "content": ( - f'Generate 1 multiple choice {blank_mod} question of 4 options for an english level exam, ' - f'it can be easy, intermediate or advanced.' - ) - - } - ] - - if mc_variant == "underline": - messages.append({ - "role": "user", - "content": ( - 'The type of multiple choice in the prompt has wrong words or group of words and the options ' - 'are to find the wrong word or group of words that are underlined in the prompt. \nExample:\n' - 'Prompt: "I complain about my boss all the time, but my colleagues thinks ' - 'the boss is nice."\n' - 'Options:\na: "complain"\nb: "all the time"\nc: "thinks"\nd: "is"' - ) - }) - - question = await self._llm.prediction( - GPTModels.GPT_4_O, messages, ["options"], TemperatureSettings.GEN_QUESTION_TEMPERATURE - ) - - return question - - async def _replace_exercise_if_exists( - self, all_exams, current_exercise, current_exam, seen_keys, mc_variant: str, utas: bool = False - ): - # Extracting relevant fields for comparison - key = (current_exercise['prompt'], tuple(sorted(option['text'] for option in current_exercise['options']))) - # Check if the key is in the set - if key in seen_keys: - return await self._replace_exercise_if_exists( - all_exams, await self._generate_single_multiple_choice(mc_variant), current_exam, seen_keys, - mc_variant, utas - ) - else: - seen_keys.add(key) - - if not utas: - for exam in all_exams: - exam_dict = exam.to_dict() - if len(exam_dict.get("parts", [])) > 0: - exercise_dict = exam_dict.get("parts", [])[0] - if len(exercise_dict.get("exercises", [])) > 0: - if any( - exercise["prompt"] == current_exercise["prompt"] and - any(exercise["options"][0]["text"] == current_option["text"] for current_option in - current_exercise["options"]) - for exercise in exercise_dict.get("exercises", [])[0]["questions"] - ): - return await self._replace_exercise_if_exists( - all_exams, await self._generate_single_multiple_choice(mc_variant), current_exam, - seen_keys, mc_variant, utas - ) - else: - for exam in all_exams: - if any( - exercise["prompt"] == current_exercise["prompt"] and - any(exercise["options"][0]["text"] == current_option["text"] for current_option in - current_exercise["options"]) - for exercise in exam.get("questions", []) - ): - return await self._replace_exercise_if_exists( - all_exams, await self._generate_single_multiple_choice(mc_variant), current_exam, - seen_keys, mc_variant, utas - ) - return current_exercise, seen_keys - - async def gen_blank_space_text_utas( - self, quantity: int, start_id: int, size: int, topic=random.choice(EducationalContent.MTI_TOPICS) - ): - json_template = self._mc_variants["blank_space_text"] - messages = [ - { - "role": "system", - "content": f'You are a helpful assistant designed to output JSON on this format: {json_template}' - }, - { - "role": "user", - "content": f'Generate a text of at least {size} words about the topic {topic}.' - }, - { - "role": "user", - "content": ( - f'From the generated text choose {quantity} words (cannot be sequential words) to replace ' - 'once with {{id}} where id starts on ' + str(start_id) + ' and is incremented for each word. ' - 'The ids must be ordered throughout the text and the words must be replaced only once. ' - 'Put the removed words and respective ids on the words array of the json in the correct order.' - ) - } - ] - - question = await self._llm.prediction( - GPTModels.GPT_4_O, messages, ["question"], TemperatureSettings.GEN_QUESTION_TEMPERATURE - ) - - return question["question"] - - async def gen_reading_passage_utas( - self, start_id, sa_quantity: int, mc_quantity: int, topic=random.choice(EducationalContent.MTI_TOPICS) - ): - passage = await self._reading_service.generate_reading_passage(1, topic) - short_answer = await self._gen_short_answer_utas(passage["text"], start_id, sa_quantity) - mc_exercises = await self._gen_text_multiple_choice_utas(passage["text"], start_id + sa_quantity, mc_quantity) - return { - "exercises": { - "shortAnswer": short_answer, - "multipleChoice": mc_exercises, - }, - "text": { - "content": passage["text"], - "title": passage["title"] - } - } - - async def _gen_short_answer_utas(self, text: str, start_id: int, sa_quantity: int): - json_format = {"questions": [{"id": 1, "question": "question", "possible_answers": ["answer_1", "answer_2"]}]} - - messages = [ - { - "role": "system", - "content": f'You are a helpful assistant designed to output JSON on this format: {json_format}' - }, - { - "role": "user", - "content": ( - f'Generate {sa_quantity} short answer questions, and the possible answers, must have ' - f'maximum 3 words per answer, about this text:\n"{text}"' - ) - }, - { - "role": "user", - "content": f'The id starts at {start_id}.' - } - ] - - question = await self._llm.prediction( - GPTModels.GPT_4_O, messages, ["questions"], TemperatureSettings.GEN_QUESTION_TEMPERATURE - ) - - return question["questions"] - - async def _gen_text_multiple_choice_utas(self, text: str, start_id: int, mc_quantity: int): - json_template = self._mc_variants["text_mc_utas"] - - messages = [ - { - "role": "system", - "content": f'You are a helpful assistant designed to output JSON on this format: {json_template}' - }, - { - "role": "user", - "content": f'Generate {mc_quantity} multiple choice questions of 4 options for this text:\n{text}' - }, - { - "role": "user", - "content": 'Make sure every question only has 1 correct answer.' - } - ] - - question = await self._llm.prediction( - GPTModels.GPT_4_O, messages, ["questions"], TemperatureSettings.GEN_QUESTION_TEMPERATURE - ) - - if len(question["questions"]) != mc_quantity: - return await self._gen_text_multiple_choice_utas(text, mc_quantity, start_id) - else: - response = ExercisesHelper.fix_exercise_ids(question, start_id) - response["questions"] = ExercisesHelper.randomize_mc_options_order(response["questions"]) - return response +import json +import random +import uuid + +from typing import Dict + +from fastapi import UploadFile + +from app.configs.constants import GPTModels, TemperatureSettings, EducationalContent +from app.helpers import ExercisesHelper +from app.repositories.abc import IDocumentStore +from app.services.abc import ILevelService, ILLMService, IReadingService, IWritingService, ISpeakingService, \ + IListeningService +from .custom import CustomLevelModule +from .upload import UploadLevelModule + + +class LevelService(ILevelService): + + def __init__( + self, + llm: ILLMService, + document_store: IDocumentStore, + mc_variants: Dict, + reading_service: IReadingService, + writing_service: IWritingService, + speaking_service: ISpeakingService, + listening_service: IListeningService + ): + self._llm = llm + self._document_store = document_store + self._reading_service = reading_service + self._custom_module = CustomLevelModule( + llm, self, reading_service, listening_service, writing_service, speaking_service + ) + self._upload_module = UploadLevelModule(llm) + + # TODO: normal and blank spaces only differ on "multiple choice blank space questions" in the prompt + # mc_variants are stored in ./mc_variants.json + self._mc_variants = mc_variants + + async def upload_level(self, upload: UploadFile) -> Dict: + return await self._upload_module.generate_level_from_file(upload) + + async def get_custom_level(self, data: Dict): + return await self._custom_module.get_custom_level(data) + + async def get_level_exam( + self, number_of_exercises: int = 25, min_timer: int = 25, diagnostic: bool = False + ) -> Dict: + exercises = await self.gen_multiple_choice("normal", number_of_exercises, utas=False) + return { + "exercises": [exercises], + "isDiagnostic": diagnostic, + "minTimer": min_timer, + "module": "level" + } + + async def get_level_utas(self, diagnostic: bool = False, min_timer: int = 25): + # Formats + mc = { + "id": str(uuid.uuid4()), + "prompt": "Choose the correct word or group of words that completes the sentences.", + "questions": None, + "type": "multipleChoice", + "part": 1 + } + + umc = { + "id": str(uuid.uuid4()), + "prompt": "Choose the underlined word or group of words that is not correct.", + "questions": None, + "type": "multipleChoice", + "part": 2 + } + + bs_1 = { + "id": str(uuid.uuid4()), + "prompt": "Read the text and write the correct word for each space.", + "questions": None, + "type": "blankSpaceText", + "part": 3 + } + + bs_2 = { + "id": str(uuid.uuid4()), + "prompt": "Read the text and write the correct word for each space.", + "questions": None, + "type": "blankSpaceText", + "part": 4 + } + + reading = { + "id": str(uuid.uuid4()), + "prompt": "Read the text and answer the questions below.", + "questions": None, + "type": "readingExercises", + "part": 5 + } + + all_mc_questions = [] + + # PART 1 + # await self._gen_multiple_choice("normal", number_of_exercises, utas=False) + mc_exercises1 = await self.gen_multiple_choice( + "blank_space", 15, 1, utas=True, all_exams=all_mc_questions + ) + print(json.dumps(mc_exercises1, indent=4)) + all_mc_questions.append(mc_exercises1) + + # PART 2 + mc_exercises2 = await self.gen_multiple_choice( + "blank_space", 15, 16, utas=True, all_exams=all_mc_questions + ) + print(json.dumps(mc_exercises2, indent=4)) + all_mc_questions.append(mc_exercises2) + + # PART 3 + mc_exercises3 = await self.gen_multiple_choice( + "blank_space", 15, 31, utas=True, all_exams=all_mc_questions + ) + print(json.dumps(mc_exercises3, indent=4)) + all_mc_questions.append(mc_exercises3) + + mc_exercises = mc_exercises1['questions'] + mc_exercises2['questions'] + mc_exercises3['questions'] + print(json.dumps(mc_exercises, indent=4)) + mc["questions"] = mc_exercises + + # Underlined mc + underlined_mc = await self.gen_multiple_choice( + "underline", 15, 46, utas=True, all_exams=all_mc_questions + ) + print(json.dumps(underlined_mc, indent=4)) + umc["questions"] = underlined_mc + + # Blank Space text 1 + blank_space_text_1 = await self.gen_blank_space_text_utas(12, 61, 250) + print(json.dumps(blank_space_text_1, indent=4)) + bs_1["questions"] = blank_space_text_1 + + # Blank Space text 2 + blank_space_text_2 = await self.gen_blank_space_text_utas(14, 73, 350) + print(json.dumps(blank_space_text_2, indent=4)) + bs_2["questions"] = blank_space_text_2 + + # Reading text + reading_text = await self.gen_reading_passage_utas(87, 10, 4) + print(json.dumps(reading_text, indent=4)) + reading["questions"] = reading_text + + return { + "exercises": { + "blankSpaceMultipleChoice": mc, + "underlinedMultipleChoice": umc, + "blankSpaceText1": bs_1, + "blankSpaceText2": bs_2, + "readingExercises": reading, + }, + "isDiagnostic": diagnostic, + "minTimer": min_timer, + "module": "level" + } + + async def gen_multiple_choice( + self, mc_variant: str, quantity: int, start_id: int = 1, *, utas: bool = False, all_exams=None + ): + mc_template = self._mc_variants[mc_variant] + blank_mod = " blank space " if mc_variant == "blank_space" else " " + + gen_multiple_choice_for_text: str = ( + 'Generate {quantity} multiple choice{blank}questions of 4 options for an english level exam, some easy ' + 'questions, some intermediate questions and some advanced questions. Ensure that the questions cover ' + 'a range of topics such as verb tense, subject-verb agreement, pronoun usage, sentence structure, and ' + 'punctuation. Make sure every question only has 1 correct answer.' + ) + + messages = [ + { + "role": "system", + "content": ( + f'You are a helpful assistant designed to output JSON on this format: {mc_template}' + ) + }, + { + "role": "user", + "content": gen_multiple_choice_for_text.format(quantity=str(quantity), blank=blank_mod) + } + ] + + if mc_variant == "underline": + messages.append({ + "role": "user", + "content": ( + 'The type of multiple choice in the prompt has wrong words or group of words and the options ' + 'are to find the wrong word or group of words that are underlined in the prompt. \nExample:\n' + 'Prompt: "I complain about my boss all the time, but my colleagues thinks ' + 'the boss is nice."\n' + 'Options:\na: "complain"\nb: "all the time"\nc: "thinks"\nd: "is"' + ) + }) + + question = await self._llm.prediction( + GPTModels.GPT_4_O, messages, ["questions"], TemperatureSettings.GEN_QUESTION_TEMPERATURE + ) + + if len(question["questions"]) != quantity: + return await self.gen_multiple_choice(mc_variant, quantity, start_id, utas=utas, all_exams=all_exams) + else: + if not utas: + all_exams = await self._document_store.get_all("level") + seen_keys = set() + for i in range(len(question["questions"])): + question["questions"][i], seen_keys = await self._replace_exercise_if_exists( + all_exams, question["questions"][i], question, seen_keys, mc_variant, utas + ) + return { + "id": str(uuid.uuid4()), + "prompt": "Select the appropriate option.", + "questions": ExercisesHelper.fix_exercise_ids(question, start_id)["questions"], + "type": "multipleChoice", + } + else: + if all_exams is not None: + seen_keys = set() + for i in range(len(question["questions"])): + question["questions"][i], seen_keys = await self._replace_exercise_if_exists( + all_exams, question["questions"][i], question, seen_keys, mc_variant, utas + ) + response = ExercisesHelper.fix_exercise_ids(question, start_id) + response["questions"] = ExercisesHelper.randomize_mc_options_order(response["questions"]) + return response + + async def _generate_single_multiple_choice(self, mc_variant: str = "normal"): + mc_template = self._mc_variants[mc_variant]["questions"][0] + blank_mod = " blank space " if mc_variant == "blank_space" else " " + + messages = [ + { + "role": "system", + "content": ( + f'You are a helpful assistant designed to output JSON on this format: {mc_template}' + ) + }, + { + "role": "user", + "content": ( + f'Generate 1 multiple choice {blank_mod} question of 4 options for an english level exam, ' + f'it can be easy, intermediate or advanced.' + ) + + } + ] + + if mc_variant == "underline": + messages.append({ + "role": "user", + "content": ( + 'The type of multiple choice in the prompt has wrong words or group of words and the options ' + 'are to find the wrong word or group of words that are underlined in the prompt. \nExample:\n' + 'Prompt: "I complain about my boss all the time, but my colleagues thinks ' + 'the boss is nice."\n' + 'Options:\na: "complain"\nb: "all the time"\nc: "thinks"\nd: "is"' + ) + }) + + question = await self._llm.prediction( + GPTModels.GPT_4_O, messages, ["options"], TemperatureSettings.GEN_QUESTION_TEMPERATURE + ) + + return question + + async def _replace_exercise_if_exists( + self, all_exams, current_exercise, current_exam, seen_keys, mc_variant: str, utas: bool = False + ): + # Extracting relevant fields for comparison + key = (current_exercise['prompt'], tuple(sorted(option['text'] for option in current_exercise['options']))) + # Check if the key is in the set + if key in seen_keys: + return await self._replace_exercise_if_exists( + all_exams, await self._generate_single_multiple_choice(mc_variant), current_exam, seen_keys, + mc_variant, utas + ) + else: + seen_keys.add(key) + + if not utas: + for exam in all_exams: + exam_dict = exam.to_dict() + if len(exam_dict.get("parts", [])) > 0: + exercise_dict = exam_dict.get("parts", [])[0] + if len(exercise_dict.get("exercises", [])) > 0: + if any( + exercise["prompt"] == current_exercise["prompt"] and + any(exercise["options"][0]["text"] == current_option["text"] for current_option in + current_exercise["options"]) + for exercise in exercise_dict.get("exercises", [])[0]["questions"] + ): + return await self._replace_exercise_if_exists( + all_exams, await self._generate_single_multiple_choice(mc_variant), current_exam, + seen_keys, mc_variant, utas + ) + else: + for exam in all_exams: + if any( + exercise["prompt"] == current_exercise["prompt"] and + any(exercise["options"][0]["text"] == current_option["text"] for current_option in + current_exercise["options"]) + for exercise in exam.get("questions", []) + ): + return await self._replace_exercise_if_exists( + all_exams, await self._generate_single_multiple_choice(mc_variant), current_exam, + seen_keys, mc_variant, utas + ) + return current_exercise, seen_keys + + async def gen_blank_space_text_utas( + self, quantity: int, start_id: int, size: int, topic=random.choice(EducationalContent.MTI_TOPICS) + ): + json_template = self._mc_variants["blank_space_text"] + messages = [ + { + "role": "system", + "content": f'You are a helpful assistant designed to output JSON on this format: {json_template}' + }, + { + "role": "user", + "content": f'Generate a text of at least {size} words about the topic {topic}.' + }, + { + "role": "user", + "content": ( + f'From the generated text choose {quantity} words (cannot be sequential words) to replace ' + 'once with {{id}} where id starts on ' + str(start_id) + ' and is incremented for each word. ' + 'The ids must be ordered throughout the text and the words must be replaced only once. ' + 'Put the removed words and respective ids on the words array of the json in the correct order.' + ) + } + ] + + question = await self._llm.prediction( + GPTModels.GPT_4_O, messages, ["question"], TemperatureSettings.GEN_QUESTION_TEMPERATURE + ) + + return question["question"] + + async def gen_reading_passage_utas( + self, start_id, sa_quantity: int, mc_quantity: int, topic=random.choice(EducationalContent.MTI_TOPICS) + ): + passage = await self._reading_service.generate_reading_passage(1, topic) + short_answer = await self._gen_short_answer_utas(passage["text"], start_id, sa_quantity) + mc_exercises = await self._gen_text_multiple_choice_utas(passage["text"], start_id + sa_quantity, mc_quantity) + return { + "exercises": { + "shortAnswer": short_answer, + "multipleChoice": mc_exercises, + }, + "text": { + "content": passage["text"], + "title": passage["title"] + } + } + + async def _gen_short_answer_utas(self, text: str, start_id: int, sa_quantity: int): + json_format = {"questions": [{"id": 1, "question": "question", "possible_answers": ["answer_1", "answer_2"]}]} + + messages = [ + { + "role": "system", + "content": f'You are a helpful assistant designed to output JSON on this format: {json_format}' + }, + { + "role": "user", + "content": ( + f'Generate {sa_quantity} short answer questions, and the possible answers, must have ' + f'maximum 3 words per answer, about this text:\n"{text}"' + ) + }, + { + "role": "user", + "content": f'The id starts at {start_id}.' + } + ] + + question = await self._llm.prediction( + GPTModels.GPT_4_O, messages, ["questions"], TemperatureSettings.GEN_QUESTION_TEMPERATURE + ) + + return question["questions"] + + async def _gen_text_multiple_choice_utas(self, text: str, start_id: int, mc_quantity: int): + json_template = self._mc_variants["text_mc_utas"] + + messages = [ + { + "role": "system", + "content": f'You are a helpful assistant designed to output JSON on this format: {json_template}' + }, + { + "role": "user", + "content": f'Generate {mc_quantity} multiple choice questions of 4 options for this text:\n{text}' + }, + { + "role": "user", + "content": 'Make sure every question only has 1 correct answer.' + } + ] + + question = await self._llm.prediction( + GPTModels.GPT_4_O, messages, ["questions"], TemperatureSettings.GEN_QUESTION_TEMPERATURE + ) + + if len(question["questions"]) != mc_quantity: + return await self._gen_text_multiple_choice_utas(text, mc_quantity, start_id) + else: + response = ExercisesHelper.fix_exercise_ids(question, start_id) + response["questions"] = ExercisesHelper.randomize_mc_options_order(response["questions"]) + return response diff --git a/app/services/impl/level/mc_variants.json b/app/services/impl/exam/level/mc_variants.json similarity index 96% rename from app/services/impl/level/mc_variants.json rename to app/services/impl/exam/level/mc_variants.json index 3b9c55b..5621bd7 100644 --- a/app/services/impl/level/mc_variants.json +++ b/app/services/impl/exam/level/mc_variants.json @@ -1,137 +1,137 @@ -{ - "normal": { - "questions": [ - { - "id": "9", - "options": [ - { - "id": "A", - "text": "And" - }, - { - "id": "B", - "text": "Cat" - }, - { - "id": "C", - "text": "Happy" - }, - { - "id": "D", - "text": "Jump" - } - ], - "prompt": "Which of the following is a conjunction?", - "solution": "A", - "variant": "text" - } - ] - }, - "blank_space": { - "questions": [ - { - "id": "9", - "options": [ - { - "id": "A", - "text": "And" - }, - { - "id": "B", - "text": "Cat" - }, - { - "id": "C", - "text": "Happy" - }, - { - "id": "D", - "text": "Jump" - } - ], - "prompt": "Which of the following is a conjunction?", - "solution": "A", - "variant": "text" - } - ] - }, - "underline": { - "questions": [ - { - "id": "9", - "options": [ - { - "id": "A", - "text": "a" - }, - { - "id": "B", - "text": "b" - }, - { - "id": "C", - "text": "c" - }, - { - "id": "D", - "text": "d" - } - ], - "prompt": "prompt", - "solution": "A", - "variant": "text" - } - ] - }, - "blank_space_text": { - "question": { - "words": [ - { - "id": "1", - "text": "a" - }, - { - "id": "2", - "text": "b" - }, - { - "id": "3", - "text": "c" - }, - { - "id": "4", - "text": "d" - } - ], - "text": "text" - } - }, - "text_mc_utas": { - "questions": [ - { - "id": "9", - "options": [ - { - "id": "A", - "text": "a" - }, - { - "id": "B", - "text": "b" - }, - { - "id": "C", - "text": "c" - }, - { - "id": "D", - "text": "d" - } - ], - "prompt": "prompt", - "solution": "A", - "variant": "text" - } - ] - } +{ + "normal": { + "questions": [ + { + "id": "9", + "options": [ + { + "id": "A", + "text": "And" + }, + { + "id": "B", + "text": "Cat" + }, + { + "id": "C", + "text": "Happy" + }, + { + "id": "D", + "text": "Jump" + } + ], + "prompt": "Which of the following is a conjunction?", + "solution": "A", + "variant": "text" + } + ] + }, + "blank_space": { + "questions": [ + { + "id": "9", + "options": [ + { + "id": "A", + "text": "And" + }, + { + "id": "B", + "text": "Cat" + }, + { + "id": "C", + "text": "Happy" + }, + { + "id": "D", + "text": "Jump" + } + ], + "prompt": "Which of the following is a conjunction?", + "solution": "A", + "variant": "text" + } + ] + }, + "underline": { + "questions": [ + { + "id": "9", + "options": [ + { + "id": "A", + "text": "a" + }, + { + "id": "B", + "text": "b" + }, + { + "id": "C", + "text": "c" + }, + { + "id": "D", + "text": "d" + } + ], + "prompt": "prompt", + "solution": "A", + "variant": "text" + } + ] + }, + "blank_space_text": { + "question": { + "words": [ + { + "id": "1", + "text": "a" + }, + { + "id": "2", + "text": "b" + }, + { + "id": "3", + "text": "c" + }, + { + "id": "4", + "text": "d" + } + ], + "text": "text" + } + }, + "text_mc_utas": { + "questions": [ + { + "id": "9", + "options": [ + { + "id": "A", + "text": "a" + }, + { + "id": "B", + "text": "b" + }, + { + "id": "C", + "text": "c" + }, + { + "id": "D", + "text": "d" + } + ], + "prompt": "prompt", + "solution": "A", + "variant": "text" + } + ] + } } \ No newline at end of file diff --git a/app/services/impl/level/upload.py b/app/services/impl/exam/level/upload.py similarity index 97% rename from app/services/impl/level/upload.py rename to app/services/impl/exam/level/upload.py index ee2d326..fd720ce 100644 --- a/app/services/impl/level/upload.py +++ b/app/services/impl/exam/level/upload.py @@ -1,404 +1,404 @@ -import aiofiles -import os -import uuid -from logging import getLogger - -from typing import Dict, Any, Tuple, Coroutine - -import pdfplumber -from fastapi import UploadFile - -from app.services.abc import ILLMService -from app.helpers import LoggerHelper, FileHelper -from app.mappers import ExamMapper - -from app.dtos.exam import Exam -from app.dtos.sheet import Sheet - - -class UploadLevelModule: - def __init__(self, openai: ILLMService): - self._logger = getLogger(__name__) - self._llm = openai - - # TODO: create a doc in firestore with a status and get its id, run this in a thread and modify the doc in - # firestore, return the id right away, in generation view poll for the id - async def generate_level_from_file(self, file: UploadFile) -> Dict[str, Any] | None: - ext, path_id = await self._save_upload(file) - FileHelper.convert_file_to_pdf( - f'./tmp/{path_id}/uploaded.{ext}', f'./tmp/{path_id}/exercises.pdf' - ) - file_has_images = self._check_pdf_for_images(f'./tmp/{path_id}/exercises.pdf') - - if not file_has_images: - FileHelper.convert_file_to_html(f'./tmp/{path_id}/uploaded.{ext}', f'./tmp/{path_id}/exercises.html') - - completion: Coroutine[Any, Any, Exam] = ( - self._png_completion(path_id) if file_has_images else self._html_completion(path_id) - ) - response = await completion - - FileHelper.remove_directory(f'./tmp/{path_id}') - - if response: - return self.fix_ids(response.dict(exclude_none=True)) - return None - - @staticmethod - @LoggerHelper.suppress_loggers() - def _check_pdf_for_images(pdf_path: str) -> bool: - with pdfplumber.open(pdf_path) as pdf: - for page in pdf.pages: - if page.images: - return True - return False - - @staticmethod - async def _save_upload(file: UploadFile) -> Tuple[str, str]: - ext = file.filename.split('.')[-1] - path_id = str(uuid.uuid4()) - os.makedirs(f'./tmp/{path_id}', exist_ok=True) - - tmp_filename = f'./tmp/{path_id}/uploaded.{ext}' - file_bytes: bytes = await file.read() - - async with aiofiles.open(tmp_filename, 'wb') as file: - await file.write(file_bytes) - - return ext, path_id - - def _level_json_schema(self): - return { - "parts": [ - { - "context": "", - "exercises": [ - self._multiple_choice_html(), - self._passage_blank_space_html() - ] - } - ] - } - - async def _html_completion(self, path_id: str) -> Exam: - async with aiofiles.open(f'./tmp/{path_id}/exercises.html', 'r', encoding='utf-8') as f: - html = await f.read() - - return await self._llm.pydantic_prediction( - [self._gpt_instructions_html(), - { - "role": "user", - "content": html - } - ], - ExamMapper.map_to_exam_model, - str(self._level_json_schema()) - ) - - def _gpt_instructions_html(self): - return { - "role": "system", - "content": ( - 'You are GPT Scraper and your job is to clean dirty html into clean usable JSON formatted data.' - 'Your current task is to scrape html english questions sheets.\n\n' - - 'In the question sheet you will only see 4 types of question:\n' - '- blank space multiple choice\n' - '- underline multiple choice\n' - '- reading passage blank space multiple choice\n' - '- reading passage multiple choice\n\n' - - 'For the first two types of questions the template is the same but the question prompts differ, ' - 'whilst in the blank space multiple choice you must include in the prompt the blank spaces with ' - 'multiple "_", in the underline you must include in the prompt the to ' - 'indicate the underline and the options a, b, c, d must be the ordered underlines in the prompt.\n\n' - - 'For the reading passage exercise you must handle the formatting of the passages. If it is a ' - 'reading passage with blank spaces you will see blanks represented with (question id) followed by a ' - 'line and your job is to replace the brackets with the question id and line with "{{question id}}" ' - 'with 2 newlines between paragraphs. For the reading passages without blanks you must remove ' - 'any numbers that may be there to specify paragraph numbers or line numbers, and place 2 newlines ' - 'between paragraphs.\n\n' - - 'IMPORTANT: Note that for the reading passages, the html might not reflect the actual paragraph ' - 'structure, don\'t format the reading passages paragraphs only by the

tags, try to figure ' - 'out the best paragraph separation possible.' - - 'You will place all the information in a single JSON: ' - '{"parts": [{"exercises": [{...}], "context": ""}]}\n ' - 'Where {...} are the exercises templates for each part of a question sheet and the optional field ' - 'context.' - - 'IMPORTANT: The question sheet may be divided by sections but you need to only consider the parts, ' - 'so that you can group the exercises by the parts that are in the html, this is crucial since only ' - 'reading passage multiple choice require context and if the context is included in parts where it ' - 'is not required the UI will be messed up. Some make sure to correctly group the exercises by parts.\n' - - 'The templates for the exercises are the following:\n' - '- blank space multiple choice, underline multiple choice and reading passage multiple choice: ' - f'{self._multiple_choice_html()}\n' - f'- reading passage blank space multiple choice: {self._passage_blank_space_html()}\n' - - 'IMPORTANT: For the reading passage multiple choice the context field must be set with the reading ' - 'passages without paragraphs or line numbers, with 2 newlines between paragraphs, for the other ' - 'exercises exclude the context field.' - ) - } - - @staticmethod - def _multiple_choice_html(): - return { - "type": "multipleChoice", - "prompt": "Select the appropriate option.", - "questions": [ - { - "id": "", - "prompt": "", - "solution": "", - "options": [ - { - "id": "A", - "text": "" - }, - { - "id": "B", - "text": "" - }, - { - "id": "C", - "text": "" - }, - { - "id": "D", - "text": "" - } - ] - } - ] - } - - @staticmethod - def _passage_blank_space_html(): - return { - "type": "fillBlanks", - "variant": "mc", - "prompt": "Click a blank to select the appropriate word for it.", - "text": ( - "}} with 2 newlines between paragraphs>" - ), - "solutions": [ - { - "id": "", - "solution": "" - } - ], - "words": [ - { - "id": "", - "options": { - "A": "", - "B": "", - "C": "", - "D": "" - } - } - ] - } - - async def _png_completion(self, path_id: str) -> Exam: - FileHelper.pdf_to_png(path_id) - - tmp_files = os.listdir(f'./tmp/{path_id}') - pages = [f for f in tmp_files if f.startswith('page-') and f.endswith('.png')] - pages.sort(key=lambda f: int(f.split('-')[1].split('.')[0])) - - json_schema = { - "components": [ - {"type": "part", "part": ""}, - self._multiple_choice_png(), - {"type": "blanksPassage", "text": ( - "}} with 2 newlines between paragraphs>" - )}, - {"type": "passage", "context": ( - "" - )}, - self._passage_blank_space_png() - ] - } - - components = [] - - for i in range(len(pages)): - current_page = pages[i] - next_page = pages[i + 1] if i + 1 < len(pages) else None - batch = [current_page, next_page] if next_page else [current_page] - - sheet = await self._png_batch(path_id, batch, json_schema) - sheet.batch = i + 1 - components.append(sheet.dict()) - - batches = {"batches": components} - - return await self._batches_to_exam_completion(batches) - - async def _png_batch(self, path_id: str, files: list[str], json_schema) -> Sheet: - return await self._llm.pydantic_prediction( - [self._gpt_instructions_png(), - { - "role": "user", - "content": [ - *FileHelper.b64_pngs(path_id, files) - ] - } - ], - ExamMapper.map_to_sheet, - str(json_schema) - ) - - def _gpt_instructions_png(self): - return { - "role": "system", - "content": ( - 'You are GPT OCR and your job is to scan image text data and format it to JSON format.' - 'Your current task is to scan english questions sheets.\n\n' - - 'You will place all the information in a single JSON: {"components": [{...}]} where {...} is a set of ' - 'sheet components you will retrieve from the images, the components and their corresponding JSON ' - 'templates are as follows:\n' - - '- Part, a standalone part or part of a section of the question sheet: ' - '{"type": "part", "part": ""}\n' - - '- Multiple Choice Question, there are three types of multiple choice questions that differ on ' - 'the prompt field of the template: blanks, underlines and normal. ' - - 'In the blanks prompt you must leave 5 underscores to represent the blank space. ' - 'In the underlines questions the objective is to pick the words that are incorrect in the given ' - 'sentence, for these questions you must wrap the answer to the question with the html tag , ' - 'choose 3 other words to wrap in , place them in the prompt field and use the underlined words ' - 'in the order they appear in the question for the options A to D, disreguard options that might be ' - 'included underneath the underlines question and use the ones you wrapped in .' - 'In normal you just leave the question as is. ' - - f'The template for multiple choice questions is the following: {self._multiple_choice_png()}.\n' - - '- Reading Passages, there are two types of reading passages. Reading passages where you will see ' - 'blanks represented by a (question id) followed by a line, you must format these types of reading ' - 'passages to be only the text with the brackets that have the question id and line replaced with ' - '"{{question id}}", also place 2 newlines between paragraphs. For the reading passages without blanks ' - 'you must remove any numbers that may be there to specify paragraph numbers or line numbers, ' - 'and place 2 newlines between paragraphs. ' - - 'For the reading passages with blanks the template is: {"type": "blanksPassage", ' - '"text": "}} also place 2 newlines between paragraphs>"}. ' - - 'For the reading passage without blanks is: {"type": "passage", "context": ""}\n' - - '- Blanks Options, options for a blanks reading passage exercise, this type of component is a group of ' - 'options with the question id and the options from a to d. The template is: ' - f'{self._passage_blank_space_png()}\n' - - 'IMPORTANT: You must place the components in the order that they were given to you. If an exercise or ' - 'reading passages are cut off don\'t include them in the JSON.' - ) - } - - def _multiple_choice_png(self): - multiple_choice = self._multiple_choice_html()["questions"][0] - multiple_choice["type"] = "multipleChoice" - multiple_choice.pop("solution") - return multiple_choice - - def _passage_blank_space_png(self): - passage_blank_space = self._passage_blank_space_html()["words"][0] - passage_blank_space["type"] = "fillBlanks" - return passage_blank_space - - async def _batches_to_exam_completion(self, batches: Dict[str, Any]) -> Exam: - return await self._llm.pydantic_prediction( - [self._gpt_instructions_html(), - { - "role": "user", - "content": str(batches) - } - ], - ExamMapper.map_to_exam_model, - str(self._level_json_schema()) - ) - - def _gpt_instructions_batches(self): - return { - "role": "system", - "content": ( - 'You are helpfull assistant. Your task is to merge multiple batches of english question sheet ' - 'components and solve the questions. Each batch may contain overlapping content with the previous ' - 'batch, or close enough content which needs to be excluded. The components are as follows:' - - '- Part, a standalone part or part of a section of the question sheet: ' - '{"type": "part", "part": ""}\n' - - '- Multiple Choice Question, there are three types of multiple choice questions that differ on ' - 'the prompt field of the template: blanks, underlines and normal. ' - - 'In a blanks question, the prompt has underscores to represent the blank space, you must select the ' - 'appropriate option to solve it.' - - 'In a underlines question, the prompt has 4 underlines represented by the html tags , you must ' - 'select the option that makes the prompt incorrect to solve it. If the options order doesn\'t reflect ' - 'the order in which the underlines appear in the prompt you will need to fix it.' - - 'In a normal question there isn\'t either blanks or underlines in the prompt, you should just ' - 'select the appropriate solution.' - - f'The template for these questions is the same: {self._multiple_choice_png()}\n' - - '- Reading Passages, there are two types of reading passages with different templates. The one with ' - 'type "blanksPassage" where the text field holds the passage and a blank is represented by ' - '{{}} and the other one with type "passage" that has the context field with just ' - 'reading passages. For both of these components you will have to remove any additional data that might ' - 'be related to a question description and also remove some "()" and "_" from blanksPassage' - ' if there are any. These components are used in conjunction with other ones.' - - '- Blanks Options, options for a blanks reading passage exercise, this type of component is a group of ' - 'options with the question id and the options from a to d. The template is: ' - f'{self._passage_blank_space_png()}\n\n' - - 'Now that you know the possible components here\'s what I want you to do:\n' - '1. Remove duplicates. A batch will have duplicates of other batches and the components of ' - 'the next batch should always take precedence over the previous one batch, what I mean by this is that ' - 'if batch 1 has, for example, multiple choice question with id 10 and the next one also has id 10, ' - 'you pick the next one.\n' - '2. Solve the exercises. There are 4 types of exercises, the 3 multipleChoice variants + a fill blanks ' - 'exercise. For the multiple choice question follow the previous instruction to solve them and place ' - f'them in this format: {self._multiple_choice_html()}. For the fill blanks exercises you need to match ' - 'the correct blanksPassage to the correct fillBlanks options and then pick the correct option. Here is ' - f'the template for this exercise: {self._passage_blank_space_html()}.\n' - f'3. Restructure the JSON to match this template: {self._level_json_schema()}. ' - f'You must group the exercises by the parts in the order they appear in the batches components. ' - f'The context field of a part is the context of a passage component that has text relevant to normal ' - f'multiple choice questions.\n' - - 'Do your utmost to fullfill the requisites, make sure you include all non-duplicate questions' - 'in your response and correctly structure the JSON.' - ) - } - - @staticmethod - def fix_ids(response): - counter = 1 - for part in response["parts"]: - for exercise in part["exercises"]: - if exercise["type"] == "multipleChoice": - for question in exercise["questions"]: - question["id"] = counter - counter += 1 - if exercise["type"] == "fillBlanks": - for i in range(len(exercise["words"])): - exercise["words"][i]["id"] = counter - exercise["solutions"][i]["id"] = counter - counter += 1 - return response +import aiofiles +import os +import uuid +from logging import getLogger + +from typing import Dict, Any, Tuple, Coroutine + +import pdfplumber +from fastapi import UploadFile + +from app.services.abc import ILLMService +from app.helpers import LoggerHelper, FileHelper +from app.mappers import ExamMapper + +from app.dtos.exam import Exam +from app.dtos.sheet import Sheet + + +class UploadLevelModule: + def __init__(self, openai: ILLMService): + self._logger = getLogger(__name__) + self._llm = openai + + # TODO: create a doc in firestore with a status and get its id, run this in a thread and modify the doc in + # firestore, return the id right away, in generation view poll for the id + async def generate_level_from_file(self, file: UploadFile) -> Dict[str, Any] | None: + ext, path_id = await self._save_upload(file) + FileHelper.convert_file_to_pdf( + f'./tmp/{path_id}/uploaded.{ext}', f'./tmp/{path_id}/exercises.pdf' + ) + file_has_images = self._check_pdf_for_images(f'./tmp/{path_id}/exercises.pdf') + + if not file_has_images: + FileHelper.convert_file_to_html(f'./tmp/{path_id}/uploaded.{ext}', f'./tmp/{path_id}/exercises.html') + + completion: Coroutine[Any, Any, Exam] = ( + self._png_completion(path_id) if file_has_images else self._html_completion(path_id) + ) + response = await completion + + FileHelper.remove_directory(f'./tmp/{path_id}') + + if response: + return self.fix_ids(response.dict(exclude_none=True)) + return None + + @staticmethod + @LoggerHelper.suppress_loggers() + def _check_pdf_for_images(pdf_path: str) -> bool: + with pdfplumber.open(pdf_path) as pdf: + for page in pdf.pages: + if page.images: + return True + return False + + @staticmethod + async def _save_upload(file: UploadFile) -> Tuple[str, str]: + ext = file.filename.split('.')[-1] + path_id = str(uuid.uuid4()) + os.makedirs(f'./tmp/{path_id}', exist_ok=True) + + tmp_filename = f'./tmp/{path_id}/uploaded.{ext}' + file_bytes: bytes = await file.read() + + async with aiofiles.open(tmp_filename, 'wb') as file: + await file.write(file_bytes) + + return ext, path_id + + def _level_json_schema(self): + return { + "parts": [ + { + "context": "", + "exercises": [ + self._multiple_choice_html(), + self._passage_blank_space_html() + ] + } + ] + } + + async def _html_completion(self, path_id: str) -> Exam: + async with aiofiles.open(f'./tmp/{path_id}/exercises.html', 'r', encoding='utf-8') as f: + html = await f.read() + + return await self._llm.pydantic_prediction( + [self._gpt_instructions_html(), + { + "role": "user", + "content": html + } + ], + ExamMapper.map_to_exam_model, + str(self._level_json_schema()) + ) + + def _gpt_instructions_html(self): + return { + "role": "system", + "content": ( + 'You are GPT Scraper and your job is to clean dirty html into clean usable JSON formatted data.' + 'Your current task is to scrape html english questions sheets.\n\n' + + 'In the question sheet you will only see 4 types of question:\n' + '- blank space multiple choice\n' + '- underline multiple choice\n' + '- reading passage blank space multiple choice\n' + '- reading passage multiple choice\n\n' + + 'For the first two types of questions the template is the same but the question prompts differ, ' + 'whilst in the blank space multiple choice you must include in the prompt the blank spaces with ' + 'multiple "_", in the underline you must include in the prompt the to ' + 'indicate the underline and the options a, b, c, d must be the ordered underlines in the prompt.\n\n' + + 'For the reading passage exercise you must handle the formatting of the passages. If it is a ' + 'reading passage with blank spaces you will see blanks represented with (question id) followed by a ' + 'line and your job is to replace the brackets with the question id and line with "{{question id}}" ' + 'with 2 newlines between paragraphs. For the reading passages without blanks you must remove ' + 'any numbers that may be there to specify paragraph numbers or line numbers, and place 2 newlines ' + 'between paragraphs.\n\n' + + 'IMPORTANT: Note that for the reading passages, the html might not reflect the actual paragraph ' + 'structure, don\'t format the reading passages paragraphs only by the

tags, try to figure ' + 'out the best paragraph separation possible.' + + 'You will place all the information in a single JSON: ' + '{"parts": [{"exercises": [{...}], "context": ""}]}\n ' + 'Where {...} are the exercises templates for each part of a question sheet and the optional field ' + 'context.' + + 'IMPORTANT: The question sheet may be divided by sections but you need to only consider the parts, ' + 'so that you can group the exercises by the parts that are in the html, this is crucial since only ' + 'reading passage multiple choice require context and if the context is included in parts where it ' + 'is not required the UI will be messed up. Some make sure to correctly group the exercises by parts.\n' + + 'The templates for the exercises are the following:\n' + '- blank space multiple choice, underline multiple choice and reading passage multiple choice: ' + f'{self._multiple_choice_html()}\n' + f'- reading passage blank space multiple choice: {self._passage_blank_space_html()}\n' + + 'IMPORTANT: For the reading passage multiple choice the context field must be set with the reading ' + 'passages without paragraphs or line numbers, with 2 newlines between paragraphs, for the other ' + 'exercises exclude the context field.' + ) + } + + @staticmethod + def _multiple_choice_html(): + return { + "type": "multipleChoice", + "prompt": "Select the appropriate option.", + "questions": [ + { + "id": "", + "prompt": "", + "solution": "", + "options": [ + { + "id": "A", + "text": "" + }, + { + "id": "B", + "text": "" + }, + { + "id": "C", + "text": "" + }, + { + "id": "D", + "text": "" + } + ] + } + ] + } + + @staticmethod + def _passage_blank_space_html(): + return { + "type": "fillBlanks", + "variant": "mc", + "prompt": "Click a blank to select the appropriate word for it.", + "text": ( + "}} with 2 newlines between paragraphs>" + ), + "solutions": [ + { + "id": "", + "solution": "" + } + ], + "words": [ + { + "id": "", + "options": { + "A": "
", + "B": "", + "C": "", + "D": "" + } + } + ] + } + + async def _png_completion(self, path_id: str) -> Exam: + FileHelper.pdf_to_png(path_id) + + tmp_files = os.listdir(f'./tmp/{path_id}') + pages = [f for f in tmp_files if f.startswith('page-') and f.endswith('.png')] + pages.sort(key=lambda f: int(f.split('-')[1].split('.')[0])) + + json_schema = { + "components": [ + {"type": "part", "part": ""}, + self._multiple_choice_png(), + {"type": "blanksPassage", "text": ( + "}} with 2 newlines between paragraphs>" + )}, + {"type": "passage", "context": ( + "" + )}, + self._passage_blank_space_png() + ] + } + + components = [] + + for i in range(len(pages)): + current_page = pages[i] + next_page = pages[i + 1] if i + 1 < len(pages) else None + batch = [current_page, next_page] if next_page else [current_page] + + sheet = await self._png_batch(path_id, batch, json_schema) + sheet.batch = i + 1 + components.append(sheet.dict()) + + batches = {"batches": components} + + return await self._batches_to_exam_completion(batches) + + async def _png_batch(self, path_id: str, files: list[str], json_schema) -> Sheet: + return await self._llm.pydantic_prediction( + [self._gpt_instructions_png(), + { + "role": "user", + "content": [ + *FileHelper.b64_pngs(path_id, files) + ] + } + ], + ExamMapper.map_to_sheet, + str(json_schema) + ) + + def _gpt_instructions_png(self): + return { + "role": "system", + "content": ( + 'You are GPT OCR and your job is to scan image text data and format it to JSON format.' + 'Your current task is to scan english questions sheets.\n\n' + + 'You will place all the information in a single JSON: {"components": [{...}]} where {...} is a set of ' + 'sheet components you will retrieve from the images, the components and their corresponding JSON ' + 'templates are as follows:\n' + + '- Part, a standalone part or part of a section of the question sheet: ' + '{"type": "part", "part": ""}\n' + + '- Multiple Choice Question, there are three types of multiple choice questions that differ on ' + 'the prompt field of the template: blanks, underlines and normal. ' + + 'In the blanks prompt you must leave 5 underscores to represent the blank space. ' + 'In the underlines questions the objective is to pick the words that are incorrect in the given ' + 'sentence, for these questions you must wrap the answer to the question with the html tag , ' + 'choose 3 other words to wrap in , place them in the prompt field and use the underlined words ' + 'in the order they appear in the question for the options A to D, disreguard options that might be ' + 'included underneath the underlines question and use the ones you wrapped in .' + 'In normal you just leave the question as is. ' + + f'The template for multiple choice questions is the following: {self._multiple_choice_png()}.\n' + + '- Reading Passages, there are two types of reading passages. Reading passages where you will see ' + 'blanks represented by a (question id) followed by a line, you must format these types of reading ' + 'passages to be only the text with the brackets that have the question id and line replaced with ' + '"{{question id}}", also place 2 newlines between paragraphs. For the reading passages without blanks ' + 'you must remove any numbers that may be there to specify paragraph numbers or line numbers, ' + 'and place 2 newlines between paragraphs. ' + + 'For the reading passages with blanks the template is: {"type": "blanksPassage", ' + '"text": "}} also place 2 newlines between paragraphs>"}. ' + + 'For the reading passage without blanks is: {"type": "passage", "context": ""}\n' + + '- Blanks Options, options for a blanks reading passage exercise, this type of component is a group of ' + 'options with the question id and the options from a to d. The template is: ' + f'{self._passage_blank_space_png()}\n' + + 'IMPORTANT: You must place the components in the order that they were given to you. If an exercise or ' + 'reading passages are cut off don\'t include them in the JSON.' + ) + } + + def _multiple_choice_png(self): + multiple_choice = self._multiple_choice_html()["questions"][0] + multiple_choice["type"] = "multipleChoice" + multiple_choice.pop("solution") + return multiple_choice + + def _passage_blank_space_png(self): + passage_blank_space = self._passage_blank_space_html()["words"][0] + passage_blank_space["type"] = "fillBlanks" + return passage_blank_space + + async def _batches_to_exam_completion(self, batches: Dict[str, Any]) -> Exam: + return await self._llm.pydantic_prediction( + [self._gpt_instructions_html(), + { + "role": "user", + "content": str(batches) + } + ], + ExamMapper.map_to_exam_model, + str(self._level_json_schema()) + ) + + def _gpt_instructions_batches(self): + return { + "role": "system", + "content": ( + 'You are helpfull assistant. Your task is to merge multiple batches of english question sheet ' + 'components and solve the questions. Each batch may contain overlapping content with the previous ' + 'batch, or close enough content which needs to be excluded. The components are as follows:' + + '- Part, a standalone part or part of a section of the question sheet: ' + '{"type": "part", "part": ""}\n' + + '- Multiple Choice Question, there are three types of multiple choice questions that differ on ' + 'the prompt field of the template: blanks, underlines and normal. ' + + 'In a blanks question, the prompt has underscores to represent the blank space, you must select the ' + 'appropriate option to solve it.' + + 'In a underlines question, the prompt has 4 underlines represented by the html tags , you must ' + 'select the option that makes the prompt incorrect to solve it. If the options order doesn\'t reflect ' + 'the order in which the underlines appear in the prompt you will need to fix it.' + + 'In a normal question there isn\'t either blanks or underlines in the prompt, you should just ' + 'select the appropriate solution.' + + f'The template for these questions is the same: {self._multiple_choice_png()}\n' + + '- Reading Passages, there are two types of reading passages with different templates. The one with ' + 'type "blanksPassage" where the text field holds the passage and a blank is represented by ' + '{{}} and the other one with type "passage" that has the context field with just ' + 'reading passages. For both of these components you will have to remove any additional data that might ' + 'be related to a question description and also remove some "()" and "_" from blanksPassage' + ' if there are any. These components are used in conjunction with other ones.' + + '- Blanks Options, options for a blanks reading passage exercise, this type of component is a group of ' + 'options with the question id and the options from a to d. The template is: ' + f'{self._passage_blank_space_png()}\n\n' + + 'Now that you know the possible components here\'s what I want you to do:\n' + '1. Remove duplicates. A batch will have duplicates of other batches and the components of ' + 'the next batch should always take precedence over the previous one batch, what I mean by this is that ' + 'if batch 1 has, for example, multiple choice question with id 10 and the next one also has id 10, ' + 'you pick the next one.\n' + '2. Solve the exercises. There are 4 types of exercises, the 3 multipleChoice variants + a fill blanks ' + 'exercise. For the multiple choice question follow the previous instruction to solve them and place ' + f'them in this format: {self._multiple_choice_html()}. For the fill blanks exercises you need to match ' + 'the correct blanksPassage to the correct fillBlanks options and then pick the correct option. Here is ' + f'the template for this exercise: {self._passage_blank_space_html()}.\n' + f'3. Restructure the JSON to match this template: {self._level_json_schema()}. ' + f'You must group the exercises by the parts in the order they appear in the batches components. ' + f'The context field of a part is the context of a passage component that has text relevant to normal ' + f'multiple choice questions.\n' + + 'Do your utmost to fullfill the requisites, make sure you include all non-duplicate questions' + 'in your response and correctly structure the JSON.' + ) + } + + @staticmethod + def fix_ids(response): + counter = 1 + for part in response["parts"]: + for exercise in part["exercises"]: + if exercise["type"] == "multipleChoice": + for question in exercise["questions"]: + question["id"] = counter + counter += 1 + if exercise["type"] == "fillBlanks": + for i in range(len(exercise["words"])): + exercise["words"][i]["id"] = counter + exercise["solutions"][i]["id"] = counter + counter += 1 + return response diff --git a/app/services/impl/listening.py b/app/services/impl/exam/listening.py similarity index 97% rename from app/services/impl/listening.py rename to app/services/impl/exam/listening.py index 14c7a07..9fdbfd2 100644 --- a/app/services/impl/listening.py +++ b/app/services/impl/exam/listening.py @@ -1,492 +1,492 @@ -import queue -import uuid -from logging import getLogger -from queue import Queue -import random -from typing import Dict, List - -from app.repositories.abc import IFileStorage, IDocumentStore -from app.services.abc import IListeningService, ILLMService, ITextToSpeechService -from app.configs.question_templates import getListeningTemplate, getListeningPartTemplate -from app.configs.constants import ( - NeuralVoices, GPTModels, TemperatureSettings, FilePaths, MinTimers, ExamVariant, EducationalContent, - FieldsAndExercises -) -from app.helpers import ExercisesHelper, FileHelper - - -class ListeningService(IListeningService): - - CONVERSATION_TAIL = ( - "Please include random names and genders for the characters in your dialogue. " - "Make sure that the generated conversation does not contain forbidden subjects in muslim countries." - ) - - MONOLOGUE_TAIL = ( - "Make sure that the generated monologue does not contain forbidden subjects in muslim countries." - ) - - def __init__( - self, llm: ILLMService, - tts: ITextToSpeechService, - file_storage: IFileStorage, - document_store: IDocumentStore - ): - self._llm = llm - self._tts = tts - self._file_storage = file_storage - self._document_store = document_store - self._logger = getLogger(__name__) - self._sections = { - "section_1": { - "topic": EducationalContent.TWO_PEOPLE_SCENARIOS, - "exercise_types": FieldsAndExercises.LISTENING_1_EXERCISE_TYPES, - "exercise_sample_size": 1, - "total_exercises": FieldsAndExercises.TOTAL_LISTENING_SECTION_1_EXERCISES, - "start_id": 1, - "generate_dialogue": self._generate_listening_conversation, - "type": "conversation", - }, - "section_2": { - "topic": EducationalContent.SOCIAL_MONOLOGUE_CONTEXTS, - "exercise_types": FieldsAndExercises.LISTENING_2_EXERCISE_TYPES, - "exercise_sample_size": 2, - "total_exercises": FieldsAndExercises.TOTAL_LISTENING_SECTION_2_EXERCISES, - "start_id": 11, - "generate_dialogue": self._generate_listening_monologue, - "type": "monologue", - }, - "section_3": { - "topic": EducationalContent.FOUR_PEOPLE_SCENARIOS, - "exercise_types": FieldsAndExercises.LISTENING_3_EXERCISE_TYPES, - "exercise_sample_size": 1, - "total_exercises": FieldsAndExercises.TOTAL_LISTENING_SECTION_3_EXERCISES, - "start_id": 21, - "generate_dialogue": self._generate_listening_conversation, - "type": "conversation", - }, - "section_4": { - "topic": EducationalContent.ACADEMIC_SUBJECTS, - "exercise_types": FieldsAndExercises.LISTENING_EXERCISE_TYPES, - "exercise_sample_size": 2, - "total_exercises": FieldsAndExercises.TOTAL_LISTENING_SECTION_4_EXERCISES, - "start_id": 31, - "generate_dialogue": self._generate_listening_monologue, - "type": "monologue" - } - } - - async def get_listening_question( - self, section_id: int, topic: str, req_exercises: List[str], difficulty: str, - number_of_exercises_q=queue.Queue(), start_id=-1 - ): - FileHelper.delete_files_older_than_one_day(FilePaths.AUDIO_FILES_PATH) - section = self._sections[f"section_{section_id}"] - if not topic: - topic = random.choice(section["topic"]) - - if len(req_exercises) == 0: - req_exercises = random.sample(section["exercise_types"], section["exercise_sample_size"]) - - if number_of_exercises_q.empty(): - number_of_exercises_q = ExercisesHelper.divide_number_into_parts( - section["total_exercises"], len(req_exercises) - ) - - if start_id == -1: - start_id = section["start_id"] - - dialog = await self.generate_listening_question(section_id, topic) - - if section_id in {1, 3}: - dialog = self.parse_conversation(dialog) - - self._logger.info(f'Generated {section["type"]}: {dialog}') - - exercises = await self.generate_listening_exercises( - section_id, str(dialog), req_exercises, number_of_exercises_q, start_id, difficulty - ) - - return { - "exercises": exercises, - "text": dialog, - "difficulty": difficulty - } - - async def generate_listening_question(self, section: int, topic: str): - return await self._sections[f'section_{section}']["generate_dialogue"](section, topic) - - async def generate_listening_exercises( - self, section: int, dialog: str, - req_exercises: list[str], number_of_exercises_q: Queue, - start_id: int, difficulty: str - ): - dialog_type = self._sections[f'section_{section}']["type"] - - exercises = [] - - for req_exercise in req_exercises: - number_of_exercises = number_of_exercises_q.get() - - if req_exercise == "multipleChoice" or req_exercise == "multipleChoice3Options": - n_options = 4 if "multipleChoice" else 3 - question = await self._gen_multiple_choice_exercise_listening( - dialog_type, dialog, number_of_exercises, start_id, difficulty, n_options - ) - - exercises.append(question) - print("Added multiple choice: " + str(question)) - elif req_exercise == "writeBlanksQuestions": - question = await self._gen_write_blanks_questions_exercise_listening( - dialog_type, dialog, number_of_exercises, start_id, difficulty - ) - - exercises.append(question) - print("Added write blanks questions: " + str(question)) - elif req_exercise == "writeBlanksFill": - question = await self._gen_write_blanks_notes_exercise_listening( - dialog_type, dialog, number_of_exercises, start_id, difficulty - ) - - exercises.append(question) - print("Added write blanks notes: " + str(question)) - elif req_exercise == "writeBlanksForm": - question = await self._gen_write_blanks_form_exercise_listening( - dialog_type, dialog, number_of_exercises, start_id, difficulty - ) - - exercises.append(question) - print("Added write blanks form: " + str(question)) - - start_id = start_id + number_of_exercises - - return exercises - - async def save_listening(self, parts: list[dict], min_timer: int, difficulty: str, listening_id: str): - template = getListeningTemplate() - template['difficulty'] = difficulty - for i, part in enumerate(parts, start=0): - part_template = getListeningPartTemplate() - - file_name = str(uuid.uuid4()) + ".mp3" - sound_file_path = FilePaths.AUDIO_FILES_PATH + file_name - firebase_file_path = FilePaths.FIREBASE_LISTENING_AUDIO_FILES_PATH + file_name - if "conversation" in part["text"]: - await self._tts.text_to_speech(part["text"]["conversation"], sound_file_path) - else: - await self._tts.text_to_speech(part["text"], sound_file_path) - file_url = await self._file_storage.upload_file_firebase_get_url(firebase_file_path, sound_file_path) - - part_template["audio"]["source"] = file_url - part_template["exercises"] = part["exercises"] - - template['parts'].append(part_template) - - if min_timer != MinTimers.LISTENING_MIN_TIMER_DEFAULT: - template["minTimer"] = min_timer - template["variant"] = ExamVariant.PARTIAL.value - else: - template["variant"] = ExamVariant.FULL.value - - listening_id = await self._document_store.save_to_db_with_id("listening", template, listening_id) - if listening_id: - return {**template, "id": listening_id} - else: - raise Exception("Failed to save question: " + str(parts)) - - # ================================================================================================================== - # generate_listening_question helpers - # ================================================================================================================== - - async def _generate_listening_conversation(self, section: int, topic: str) -> Dict: - head = ( - 'Compose an authentic conversation between two individuals in the everyday social context of "' - if section == 1 else - 'Compose an authentic and elaborate conversation between up to four individuals in the everyday ' - 'social context of "' - ) - - messages = [ - { - "role": "system", - "content": ( - 'You are a helpful assistant designed to output JSON on this format: ' - '{"conversation": [{"name": "name", "gender": "gender", "text": "text"}]}') - }, - { - "role": "user", - "content": ( - f'{head}{topic}". {self.CONVERSATION_TAIL}' - ) - } - ] - - if section == 1: - messages.extend([ - { - "role": "user", - "content": 'Try to have misleading discourse (refer multiple dates, multiple colors and etc).' - - }, - { - "role": "user", - "content": 'Try to have spelling of names (cities, people, etc)' - - } - ]) - - response = await self._llm.prediction( - GPTModels.GPT_4_O, - messages, - ["conversation"], - TemperatureSettings.GEN_QUESTION_TEMPERATURE - ) - - return self._get_conversation_voices(response, True) - - async def _generate_listening_monologue(self, section: int, topic: str) -> Dict: - head = ( - 'Generate a comprehensive monologue set in the social context of' - if section == 2 else - 'Generate a comprehensive and complex monologue on the academic subject of' - ) - - messages = [ - { - "role": "system", - "content": ( - 'You are a helpful assistant designed to output JSON on this format: ' - '{"monologue": "monologue"}') - }, - { - "role": "user", - "content": ( - f'{head}: "{topic}". {self.MONOLOGUE_TAIL}' - ) - } - ] - - response = await self._llm.prediction( - GPTModels.GPT_4_O, - messages, - ["monologue"], - TemperatureSettings.GEN_QUESTION_TEMPERATURE - ) - return response["monologue"] - - def _get_conversation_voices(self, response: Dict, unique_voices_across_segments: bool): - chosen_voices = [] - name_to_voice = {} - for segment in response['conversation']: - if 'voice' not in segment: - name = segment['name'] - if name in name_to_voice: - voice = name_to_voice[name] - else: - voice = None - # section 1 - if unique_voices_across_segments: - while voice is None: - chosen_voice = self._get_random_voice(segment['gender']) - if chosen_voice not in chosen_voices: - voice = chosen_voice - chosen_voices.append(voice) - # section 3 - else: - voice = self._get_random_voice(segment['gender']) - name_to_voice[name] = voice - segment['voice'] = voice - return response - - @staticmethod - def _get_random_voice(gender: str): - if gender.lower() == 'male': - available_voices = NeuralVoices.MALE_NEURAL_VOICES - else: - available_voices = NeuralVoices.FEMALE_NEURAL_VOICES - - return random.choice(available_voices)['Id'] - - # ================================================================================================================== - # generate_listening_exercises helpers - # ================================================================================================================== - - async def _gen_multiple_choice_exercise_listening( - self, dialog_type: str, text: str, quantity: int, start_id: int, difficulty: str, n_options: int = 4 - ): - messages = [ - { - "role": "system", - "content": ( - 'You are a helpful assistant designed to output JSON on this format: ' - '{"questions": [{"id": "9", "options": [{"id": "A", "text": "Economic benefits"}, {"id": "B", "text": ' - '"Government regulations"}, {"id": "C", "text": "Concerns about climate change"}, {"id": "D", "text": ' - '"Technological advancement"}], "prompt": "What is the main reason for the shift towards renewable ' - 'energy sources?", "solution": "C", "variant": "text"}]}') - }, - { - "role": "user", - "content": ( - f'Generate {quantity} {difficulty} difficulty multiple choice questions of {n_options} ' - f'options for this {dialog_type}:\n"' + text + '"') - - } - ] - - questions = await self._llm.prediction( - GPTModels.GPT_4_O, - messages, - ["questions"], - TemperatureSettings.GEN_QUESTION_TEMPERATURE - ) - return { - "id": str(uuid.uuid4()), - "prompt": "Select the appropriate option.", - "questions": ExercisesHelper.fix_exercise_ids(questions, start_id)["questions"], - "type": "multipleChoice", - } - - async def _gen_write_blanks_questions_exercise_listening( - self, dialog_type: str, text: str, quantity: int, start_id: int, difficulty: str - ): - messages = [ - { - "role": "system", - "content": ( - 'You are a helpful assistant designed to output JSON on this format: ' - '{"questions": [{"question": question, "possible_answers": ["answer_1", "answer_2"]}]}') - }, - { - "role": "user", - "content": ( - f'Generate {quantity} {difficulty} difficulty short answer questions, and the ' - f'possible answers (max 3 words per answer), about this {dialog_type}:\n"{text}"') - } - ] - - questions = await self._llm.prediction( - GPTModels.GPT_4_O, messages, ["questions"], TemperatureSettings.GEN_QUESTION_TEMPERATURE - ) - questions = questions["questions"][:quantity] - - return { - "id": str(uuid.uuid4()), - "maxWords": 3, - "prompt": f"You will hear a {dialog_type}. Answer the questions below using no more than three words or a number accordingly.", - "solutions": ExercisesHelper.build_write_blanks_solutions(questions, start_id), - "text": ExercisesHelper.build_write_blanks_text(questions, start_id), - "type": "writeBlanks" - } - - async def _gen_write_blanks_notes_exercise_listening( - self, dialog_type: str, text: str, quantity: int, start_id: int, difficulty: str - ): - messages = [ - { - "role": "system", - "content": ( - 'You are a helpful assistant designed to output JSON on this format: ' - '{"notes": ["note_1", "note_2"]}') - }, - { - "role": "user", - "content": ( - f'Generate {quantity} {difficulty} difficulty notes taken from this ' - f'{dialog_type}:\n"{text}"' - ) - - } - ] - - questions = await self._llm.prediction( - GPTModels.GPT_4_O, messages, ["notes"], TemperatureSettings.GEN_QUESTION_TEMPERATURE - ) - questions = questions["notes"][:quantity] - - formatted_phrases = "\n".join([f"{i + 1}. {phrase}" for i, phrase in enumerate(questions)]) - - word_messages = [ - { - "role": "system", - "content": ( - 'You are a helpful assistant designed to output JSON on this ' - 'format: {"words": ["word_1", "word_2"] }' - ) - }, - { - "role": "user", - "content": ('Select 1 word from each phrase in this list:\n"' + formatted_phrases + '"') - - } - ] - words = await self._llm.prediction( - GPTModels.GPT_4_O, word_messages, ["words"], TemperatureSettings.GEN_QUESTION_TEMPERATURE - ) - - words = words["words"][:quantity] - - replaced_notes = ExercisesHelper.replace_first_occurrences_with_placeholders_notes(questions, words, start_id) - return { - "id": str(uuid.uuid4()), - "maxWords": 3, - "prompt": "Fill the blank space with the word missing from the audio.", - "solutions": ExercisesHelper.build_write_blanks_solutions_listening(words, start_id), - "text": "\\n".join(replaced_notes), - "type": "writeBlanks" - } - - async def _gen_write_blanks_form_exercise_listening( - self, dialog_type: str, text: str, quantity: int, start_id: int, difficulty: str - ): - messages = [ - { - "role": "system", - "content": ( - 'You are a helpful assistant designed to output JSON on this format: ' - '{"form": ["key: value", "key2: value"]}') - }, - { - "role": "user", - "content": ( - f'Generate a form with {quantity} {difficulty} difficulty key-value pairs ' - f'about this {dialog_type}:\n"{text}"' - ) - } - ] - - if dialog_type == "conversation": - messages.append({ - "role": "user", - "content": ( - 'It must be a form and not questions. ' - 'Example: {"form": ["Color of car": "blue", "Brand of car": "toyota"]}' - ) - }) - - parsed_form = await self._llm.prediction( - GPTModels.GPT_4_O, messages, ["form"], TemperatureSettings.GEN_QUESTION_TEMPERATURE - ) - - parsed_form = parsed_form["form"][:quantity] - - replaced_form, words = ExercisesHelper.build_write_blanks_text_form(parsed_form, start_id) - return { - "id": str(uuid.uuid4()), - "maxWords": 3, - "prompt": f"You will hear a {dialog_type}. Fill the form with words/numbers missing.", - "solutions": ExercisesHelper.build_write_blanks_solutions_listening(words, start_id), - "text": replaced_form, - "type": "writeBlanks" - } - - @staticmethod - def parse_conversation(conversation_data): - conversation_list = conversation_data.get('conversation', []) - readable_text = [] - - for message in conversation_list: - name = message.get('name', 'Unknown') - text = message.get('text', '') - readable_text.append(f"{name}: {text}") - +import queue +import uuid +from logging import getLogger +from queue import Queue +import random +from typing import Dict, List + +from app.repositories.abc import IFileStorage, IDocumentStore +from app.services.abc import IListeningService, ILLMService, ITextToSpeechService +from app.configs.question_templates import getListeningTemplate, getListeningPartTemplate +from app.configs.constants import ( + NeuralVoices, GPTModels, TemperatureSettings, FilePaths, MinTimers, ExamVariant, EducationalContent, + FieldsAndExercises +) +from app.helpers import ExercisesHelper, FileHelper + + +class ListeningService(IListeningService): + + CONVERSATION_TAIL = ( + "Please include random names and genders for the characters in your dialogue. " + "Make sure that the generated conversation does not contain forbidden subjects in muslim countries." + ) + + MONOLOGUE_TAIL = ( + "Make sure that the generated monologue does not contain forbidden subjects in muslim countries." + ) + + def __init__( + self, llm: ILLMService, + tts: ITextToSpeechService, + file_storage: IFileStorage, + document_store: IDocumentStore + ): + self._llm = llm + self._tts = tts + self._file_storage = file_storage + self._document_store = document_store + self._logger = getLogger(__name__) + self._sections = { + "section_1": { + "topic": EducationalContent.TWO_PEOPLE_SCENARIOS, + "exercise_types": FieldsAndExercises.LISTENING_1_EXERCISE_TYPES, + "exercise_sample_size": 1, + "total_exercises": FieldsAndExercises.TOTAL_LISTENING_SECTION_1_EXERCISES, + "start_id": 1, + "generate_dialogue": self._generate_listening_conversation, + "type": "conversation", + }, + "section_2": { + "topic": EducationalContent.SOCIAL_MONOLOGUE_CONTEXTS, + "exercise_types": FieldsAndExercises.LISTENING_2_EXERCISE_TYPES, + "exercise_sample_size": 2, + "total_exercises": FieldsAndExercises.TOTAL_LISTENING_SECTION_2_EXERCISES, + "start_id": 11, + "generate_dialogue": self._generate_listening_monologue, + "type": "monologue", + }, + "section_3": { + "topic": EducationalContent.FOUR_PEOPLE_SCENARIOS, + "exercise_types": FieldsAndExercises.LISTENING_3_EXERCISE_TYPES, + "exercise_sample_size": 1, + "total_exercises": FieldsAndExercises.TOTAL_LISTENING_SECTION_3_EXERCISES, + "start_id": 21, + "generate_dialogue": self._generate_listening_conversation, + "type": "conversation", + }, + "section_4": { + "topic": EducationalContent.ACADEMIC_SUBJECTS, + "exercise_types": FieldsAndExercises.LISTENING_EXERCISE_TYPES, + "exercise_sample_size": 2, + "total_exercises": FieldsAndExercises.TOTAL_LISTENING_SECTION_4_EXERCISES, + "start_id": 31, + "generate_dialogue": self._generate_listening_monologue, + "type": "monologue" + } + } + + async def get_listening_question( + self, section_id: int, topic: str, req_exercises: List[str], difficulty: str, + number_of_exercises_q=queue.Queue(), start_id=-1 + ): + FileHelper.delete_files_older_than_one_day(FilePaths.AUDIO_FILES_PATH) + section = self._sections[f"section_{section_id}"] + if not topic: + topic = random.choice(section["topic"]) + + if len(req_exercises) == 0: + req_exercises = random.sample(section["exercise_types"], section["exercise_sample_size"]) + + if number_of_exercises_q.empty(): + number_of_exercises_q = ExercisesHelper.divide_number_into_parts( + section["total_exercises"], len(req_exercises) + ) + + if start_id == -1: + start_id = section["start_id"] + + dialog = await self.generate_listening_question(section_id, topic) + + if section_id in {1, 3}: + dialog = self.parse_conversation(dialog) + + self._logger.info(f'Generated {section["type"]}: {dialog}') + + exercises = await self.generate_listening_exercises( + section_id, str(dialog), req_exercises, number_of_exercises_q, start_id, difficulty + ) + + return { + "exercises": exercises, + "text": dialog, + "difficulty": difficulty + } + + async def generate_listening_question(self, section: int, topic: str): + return await self._sections[f'section_{section}']["generate_dialogue"](section, topic) + + async def generate_listening_exercises( + self, section: int, dialog: str, + req_exercises: list[str], number_of_exercises_q: Queue, + start_id: int, difficulty: str + ): + dialog_type = self._sections[f'section_{section}']["type"] + + exercises = [] + + for req_exercise in req_exercises: + number_of_exercises = number_of_exercises_q.get() + + if req_exercise == "multipleChoice" or req_exercise == "multipleChoice3Options": + n_options = 4 if "multipleChoice" else 3 + question = await self._gen_multiple_choice_exercise_listening( + dialog_type, dialog, number_of_exercises, start_id, difficulty, n_options + ) + + exercises.append(question) + print("Added multiple choice: " + str(question)) + elif req_exercise == "writeBlanksQuestions": + question = await self._gen_write_blanks_questions_exercise_listening( + dialog_type, dialog, number_of_exercises, start_id, difficulty + ) + + exercises.append(question) + print("Added write blanks questions: " + str(question)) + elif req_exercise == "writeBlanksFill": + question = await self._gen_write_blanks_notes_exercise_listening( + dialog_type, dialog, number_of_exercises, start_id, difficulty + ) + + exercises.append(question) + print("Added write blanks notes: " + str(question)) + elif req_exercise == "writeBlanksForm": + question = await self._gen_write_blanks_form_exercise_listening( + dialog_type, dialog, number_of_exercises, start_id, difficulty + ) + + exercises.append(question) + print("Added write blanks form: " + str(question)) + + start_id = start_id + number_of_exercises + + return exercises + + async def save_listening(self, parts: list[dict], min_timer: int, difficulty: str, listening_id: str): + template = getListeningTemplate() + template['difficulty'] = difficulty + for i, part in enumerate(parts, start=0): + part_template = getListeningPartTemplate() + + file_name = str(uuid.uuid4()) + ".mp3" + sound_file_path = FilePaths.AUDIO_FILES_PATH + file_name + firebase_file_path = FilePaths.FIREBASE_LISTENING_AUDIO_FILES_PATH + file_name + if "conversation" in part["text"]: + await self._tts.text_to_speech(part["text"]["conversation"], sound_file_path) + else: + await self._tts.text_to_speech(part["text"], sound_file_path) + file_url = await self._file_storage.upload_file_firebase_get_url(firebase_file_path, sound_file_path) + + part_template["audio"]["source"] = file_url + part_template["exercises"] = part["exercises"] + + template['parts'].append(part_template) + + if min_timer != MinTimers.LISTENING_MIN_TIMER_DEFAULT: + template["minTimer"] = min_timer + template["variant"] = ExamVariant.PARTIAL.value + else: + template["variant"] = ExamVariant.FULL.value + + listening_id = await self._document_store.save_to_db_with_id("listening", template, listening_id) + if listening_id: + return {**template, "id": listening_id} + else: + raise Exception("Failed to save question: " + str(parts)) + + # ================================================================================================================== + # generate_listening_question helpers + # ================================================================================================================== + + async def _generate_listening_conversation(self, section: int, topic: str) -> Dict: + head = ( + 'Compose an authentic conversation between two individuals in the everyday social context of "' + if section == 1 else + 'Compose an authentic and elaborate conversation between up to four individuals in the everyday ' + 'social context of "' + ) + + messages = [ + { + "role": "system", + "content": ( + 'You are a helpful assistant designed to output JSON on this format: ' + '{"conversation": [{"name": "name", "gender": "gender", "text": "text"}]}') + }, + { + "role": "user", + "content": ( + f'{head}{topic}". {self.CONVERSATION_TAIL}' + ) + } + ] + + if section == 1: + messages.extend([ + { + "role": "user", + "content": 'Try to have misleading discourse (refer multiple dates, multiple colors and etc).' + + }, + { + "role": "user", + "content": 'Try to have spelling of names (cities, people, etc)' + + } + ]) + + response = await self._llm.prediction( + GPTModels.GPT_4_O, + messages, + ["conversation"], + TemperatureSettings.GEN_QUESTION_TEMPERATURE + ) + + return self._get_conversation_voices(response, True) + + async def _generate_listening_monologue(self, section: int, topic: str) -> Dict: + head = ( + 'Generate a comprehensive monologue set in the social context of' + if section == 2 else + 'Generate a comprehensive and complex monologue on the academic subject of' + ) + + messages = [ + { + "role": "system", + "content": ( + 'You are a helpful assistant designed to output JSON on this format: ' + '{"monologue": "monologue"}') + }, + { + "role": "user", + "content": ( + f'{head}: "{topic}". {self.MONOLOGUE_TAIL}' + ) + } + ] + + response = await self._llm.prediction( + GPTModels.GPT_4_O, + messages, + ["monologue"], + TemperatureSettings.GEN_QUESTION_TEMPERATURE + ) + return response["monologue"] + + def _get_conversation_voices(self, response: Dict, unique_voices_across_segments: bool): + chosen_voices = [] + name_to_voice = {} + for segment in response['conversation']: + if 'voice' not in segment: + name = segment['name'] + if name in name_to_voice: + voice = name_to_voice[name] + else: + voice = None + # section 1 + if unique_voices_across_segments: + while voice is None: + chosen_voice = self._get_random_voice(segment['gender']) + if chosen_voice not in chosen_voices: + voice = chosen_voice + chosen_voices.append(voice) + # section 3 + else: + voice = self._get_random_voice(segment['gender']) + name_to_voice[name] = voice + segment['voice'] = voice + return response + + @staticmethod + def _get_random_voice(gender: str): + if gender.lower() == 'male': + available_voices = NeuralVoices.MALE_NEURAL_VOICES + else: + available_voices = NeuralVoices.FEMALE_NEURAL_VOICES + + return random.choice(available_voices)['Id'] + + # ================================================================================================================== + # generate_listening_exercises helpers + # ================================================================================================================== + + async def _gen_multiple_choice_exercise_listening( + self, dialog_type: str, text: str, quantity: int, start_id: int, difficulty: str, n_options: int = 4 + ): + messages = [ + { + "role": "system", + "content": ( + 'You are a helpful assistant designed to output JSON on this format: ' + '{"questions": [{"id": "9", "options": [{"id": "A", "text": "Economic benefits"}, {"id": "B", "text": ' + '"Government regulations"}, {"id": "C", "text": "Concerns about climate change"}, {"id": "D", "text": ' + '"Technological advancement"}], "prompt": "What is the main reason for the shift towards renewable ' + 'energy sources?", "solution": "C", "variant": "text"}]}') + }, + { + "role": "user", + "content": ( + f'Generate {quantity} {difficulty} difficulty multiple choice questions of {n_options} ' + f'options for this {dialog_type}:\n"' + text + '"') + + } + ] + + questions = await self._llm.prediction( + GPTModels.GPT_4_O, + messages, + ["questions"], + TemperatureSettings.GEN_QUESTION_TEMPERATURE + ) + return { + "id": str(uuid.uuid4()), + "prompt": "Select the appropriate option.", + "questions": ExercisesHelper.fix_exercise_ids(questions, start_id)["questions"], + "type": "multipleChoice", + } + + async def _gen_write_blanks_questions_exercise_listening( + self, dialog_type: str, text: str, quantity: int, start_id: int, difficulty: str + ): + messages = [ + { + "role": "system", + "content": ( + 'You are a helpful assistant designed to output JSON on this format: ' + '{"questions": [{"question": question, "possible_answers": ["answer_1", "answer_2"]}]}') + }, + { + "role": "user", + "content": ( + f'Generate {quantity} {difficulty} difficulty short answer questions, and the ' + f'possible answers (max 3 words per answer), about this {dialog_type}:\n"{text}"') + } + ] + + questions = await self._llm.prediction( + GPTModels.GPT_4_O, messages, ["questions"], TemperatureSettings.GEN_QUESTION_TEMPERATURE + ) + questions = questions["questions"][:quantity] + + return { + "id": str(uuid.uuid4()), + "maxWords": 3, + "prompt": f"You will hear a {dialog_type}. Answer the questions below using no more than three words or a number accordingly.", + "solutions": ExercisesHelper.build_write_blanks_solutions(questions, start_id), + "text": ExercisesHelper.build_write_blanks_text(questions, start_id), + "type": "writeBlanks" + } + + async def _gen_write_blanks_notes_exercise_listening( + self, dialog_type: str, text: str, quantity: int, start_id: int, difficulty: str + ): + messages = [ + { + "role": "system", + "content": ( + 'You are a helpful assistant designed to output JSON on this format: ' + '{"notes": ["note_1", "note_2"]}') + }, + { + "role": "user", + "content": ( + f'Generate {quantity} {difficulty} difficulty notes taken from this ' + f'{dialog_type}:\n"{text}"' + ) + + } + ] + + questions = await self._llm.prediction( + GPTModels.GPT_4_O, messages, ["notes"], TemperatureSettings.GEN_QUESTION_TEMPERATURE + ) + questions = questions["notes"][:quantity] + + formatted_phrases = "\n".join([f"{i + 1}. {phrase}" for i, phrase in enumerate(questions)]) + + word_messages = [ + { + "role": "system", + "content": ( + 'You are a helpful assistant designed to output JSON on this ' + 'format: {"words": ["word_1", "word_2"] }' + ) + }, + { + "role": "user", + "content": ('Select 1 word from each phrase in this list:\n"' + formatted_phrases + '"') + + } + ] + words = await self._llm.prediction( + GPTModels.GPT_4_O, word_messages, ["words"], TemperatureSettings.GEN_QUESTION_TEMPERATURE + ) + + words = words["words"][:quantity] + + replaced_notes = ExercisesHelper.replace_first_occurrences_with_placeholders_notes(questions, words, start_id) + return { + "id": str(uuid.uuid4()), + "maxWords": 3, + "prompt": "Fill the blank space with the word missing from the audio.", + "solutions": ExercisesHelper.build_write_blanks_solutions_listening(words, start_id), + "text": "\\n".join(replaced_notes), + "type": "writeBlanks" + } + + async def _gen_write_blanks_form_exercise_listening( + self, dialog_type: str, text: str, quantity: int, start_id: int, difficulty: str + ): + messages = [ + { + "role": "system", + "content": ( + 'You are a helpful assistant designed to output JSON on this format: ' + '{"form": ["key: value", "key2: value"]}') + }, + { + "role": "user", + "content": ( + f'Generate a form with {quantity} {difficulty} difficulty key-value pairs ' + f'about this {dialog_type}:\n"{text}"' + ) + } + ] + + if dialog_type == "conversation": + messages.append({ + "role": "user", + "content": ( + 'It must be a form and not questions. ' + 'Example: {"form": ["Color of car": "blue", "Brand of car": "toyota"]}' + ) + }) + + parsed_form = await self._llm.prediction( + GPTModels.GPT_4_O, messages, ["form"], TemperatureSettings.GEN_QUESTION_TEMPERATURE + ) + + parsed_form = parsed_form["form"][:quantity] + + replaced_form, words = ExercisesHelper.build_write_blanks_text_form(parsed_form, start_id) + return { + "id": str(uuid.uuid4()), + "maxWords": 3, + "prompt": f"You will hear a {dialog_type}. Fill the form with words/numbers missing.", + "solutions": ExercisesHelper.build_write_blanks_solutions_listening(words, start_id), + "text": replaced_form, + "type": "writeBlanks" + } + + @staticmethod + def parse_conversation(conversation_data): + conversation_list = conversation_data.get('conversation', []) + readable_text = [] + + for message in conversation_list: + name = message.get('name', 'Unknown') + text = message.get('text', '') + readable_text.append(f"{name}: {text}") + return "\n".join(readable_text) \ No newline at end of file diff --git a/app/services/impl/reading.py b/app/services/impl/exam/reading.py similarity index 97% rename from app/services/impl/reading.py rename to app/services/impl/exam/reading.py index 50b136d..62d38a9 100644 --- a/app/services/impl/reading.py +++ b/app/services/impl/exam/reading.py @@ -1,349 +1,349 @@ -import random -import uuid -from queue import Queue -from typing import List - -from app.services.abc import IReadingService, ILLMService -from app.configs.constants import QuestionType, TemperatureSettings, FieldsAndExercises, GPTModels -from app.helpers import ExercisesHelper - - -class ReadingService(IReadingService): - - def __init__(self, llm: ILLMService): - self._llm = llm - - async def gen_reading_passage( - self, - part: int, - topic: str, - req_exercises: List[str], - number_of_exercises_q: Queue, - difficulty: str, - start_id: int - ): - passage = await self.generate_reading_passage(part, topic) - exercises = await self._generate_reading_exercises( - passage["text"], req_exercises, number_of_exercises_q, start_id, difficulty - ) - - if ExercisesHelper.contains_empty_dict(exercises): - return await self.gen_reading_passage( - part, topic, req_exercises, number_of_exercises_q, difficulty, start_id - ) - - return { - "exercises": exercises, - "text": { - "content": passage["text"], - "title": passage["title"] - }, - "difficulty": difficulty - } - - async def generate_reading_passage(self, part: int, topic: str, word_count: int = 800): - part_system_message = { - "1": 'The generated text should be fairly easy to understand and have multiple paragraphs.', - "2": 'The generated text should be fairly hard to understand and have multiple paragraphs.', - "3": ( - 'The generated text should be very hard to understand and include different points, theories, ' - 'subtle differences of opinions from people, correctly sourced to the person who said it, ' - 'over the specified topic and have multiple paragraphs.' - ) - } - - messages = [ - { - "role": "system", - "content": ( - 'You are a helpful assistant designed to output JSON on this format: ' - '{"title": "title of the text", "text": "generated text"}') - }, - { - "role": "user", - "content": ( - f'Generate an extensive text for IELTS Reading Passage {part}, of at least {word_count} words, ' - f'on the topic of "{topic}". The passage should offer a substantial amount of ' - 'information, analysis, or narrative relevant to the chosen subject matter. This text ' - 'passage aims to serve as the primary reading section of an IELTS test, providing an ' - 'in-depth and comprehensive exploration of the topic. Make sure that the generated text ' - 'does not contain forbidden subjects in muslim countries.' - ) - }, - { - "role": "system", - "content": part_system_message[str(part)] - } - ] - - if part == 3: - messages.append({ - "role": "user", - "content": "Use real text excerpts on you generated passage and cite the sources." - }) - - return await self._llm.prediction( - GPTModels.GPT_4_O, - messages, - FieldsAndExercises.GEN_TEXT_FIELDS, - TemperatureSettings.GEN_QUESTION_TEMPERATURE - ) - - async def _generate_reading_exercises( - self, passage: str, req_exercises: list, number_of_exercises_q, start_id, difficulty - ): - exercises = [] - for req_exercise in req_exercises: - number_of_exercises = number_of_exercises_q.get() - - if req_exercise == "fillBlanks": - question = await self._gen_summary_fill_blanks_exercise( - passage, number_of_exercises, start_id, difficulty - ) - exercises.append(question) - print("Added fill blanks: " + str(question)) - elif req_exercise == "trueFalse": - question = await self._gen_true_false_not_given_exercise( - passage, number_of_exercises, start_id, difficulty - ) - exercises.append(question) - print("Added trueFalse: " + str(question)) - elif req_exercise == "writeBlanks": - question = await self._gen_write_blanks_exercise(passage, number_of_exercises, start_id, difficulty) - if ExercisesHelper.answer_word_limit_ok(question): - exercises.append(question) - print("Added write blanks: " + str(question)) - else: - exercises.append({}) - print("Did not add write blanks because it did not respect word limit") - elif req_exercise == "paragraphMatch": - question = await self._gen_paragraph_match_exercise(passage, number_of_exercises, start_id) - exercises.append(question) - print("Added paragraph match: " + str(question)) - elif req_exercise == "ideaMatch": - question = await self._gen_idea_match_exercise(passage, number_of_exercises, start_id) - exercises.append(question) - print("Added idea match: " + str(question)) - - start_id = start_id + number_of_exercises - - return exercises - - async def _gen_summary_fill_blanks_exercise( - self, text: str, quantity: int, start_id, difficulty, num_random_words: int = 1 - ): - messages = [ - { - "role": "system", - "content": ( - 'You are a helpful assistant designed to output JSON on this format: { "summary": "summary" }' - ) - }, - { - "role": "user", - "content": f'Summarize this text: "{text}"' - - } - ] - - response = await self._llm.prediction( - GPTModels.GPT_4_O, messages, ["summary"], TemperatureSettings.GEN_QUESTION_TEMPERATURE - ) - - messages = [ - { - "role": "system", - "content": ( - 'You are a helpful assistant designed to output JSON on this format: ' - '{"words": ["word_1", "word_2"] }' - ) - }, - { - "role": "user", - "content": ( - f'Select {quantity} {difficulty} difficulty words, it must be words and not expressions, ' - f'from this:\n{response["summary"]}' - ) - } - ] - - words_response = await self._llm.prediction( - GPTModels.GPT_4_O, messages, ["words"], TemperatureSettings.GEN_QUESTION_TEMPERATURE - ) - - response["words"] = words_response["words"] - replaced_summary = ExercisesHelper.replace_first_occurrences_with_placeholders( - response["summary"], response["words"], start_id - ) - options_words = ExercisesHelper.add_random_words_and_shuffle(response["words"], num_random_words) - solutions = ExercisesHelper.fillblanks_build_solutions_array(response["words"], start_id) - - return { - "allowRepetition": True, - "id": str(uuid.uuid4()), - "prompt": ( - "Complete the summary below. Write the letter of the corresponding word(s) for it.\\nThere are " - "more words than spaces so you will not use them all. You may use any of the words more than once." - ), - "solutions": solutions, - "text": replaced_summary, - "type": "fillBlanks", - "words": options_words - } - - async def _gen_true_false_not_given_exercise(self, text: str, quantity: int, start_id, difficulty): - messages = [ - { - "role": "system", - "content": ( - 'You are a helpful assistant designed to output JSON on this format: ' - '{"prompts":[{"prompt": "statement_1", "solution": "true/false/not_given"}, ' - '{"prompt": "statement_2", "solution": "true/false/not_given"}]}') - }, - { - "role": "user", - "content": ( - f'Generate {str(quantity)} {difficulty} difficulty statements based on the provided text. ' - 'Ensure that your statements accurately represent information or inferences from the text, and ' - 'provide a variety of responses, including, at least one of each True, False, and Not Given, ' - f'as appropriate.\n\nReference text:\n\n {text}' - ) - } - ] - - response = await self._llm.prediction( - GPTModels.GPT_4_O, messages, ["prompts"], TemperatureSettings.GEN_QUESTION_TEMPERATURE - ) - questions = response["prompts"] - - if len(questions) > quantity: - questions = ExercisesHelper.remove_excess_questions(questions, len(questions) - quantity) - - for i, question in enumerate(questions, start=start_id): - question["id"] = str(i) - - return { - "id": str(uuid.uuid4()), - "prompt": "Do the following statements agree with the information given in the Reading Passage?", - "questions": questions, - "type": "trueFalse" - } - - async def _gen_write_blanks_exercise(self, text: str, quantity: int, start_id, difficulty): - messages = [ - { - "role": "system", - "content": ( - 'You are a helpful assistant designed to output JSON on this format: ' - '{"questions": [{"question": question, "possible_answers": ["answer_1", "answer_2"]}]}' - ) - }, - { - "role": "user", - "content": ( - f'Generate {str(quantity)} {difficulty} difficulty short answer questions, and the ' - f'possible answers, must have maximum 3 words per answer, about this text:\n"{text}"' - ) - - } - ] - - response = await self._llm.prediction( - GPTModels.GPT_4_O, messages, ["questions"], TemperatureSettings.GEN_QUESTION_TEMPERATURE - ) - questions = response["questions"][:quantity] - - return { - "id": str(uuid.uuid4()), - "maxWords": 3, - "prompt": "Choose no more than three words and/or a number from the passage for each answer.", - "solutions": ExercisesHelper.build_write_blanks_solutions(questions, start_id), - "text": ExercisesHelper.build_write_blanks_text(questions, start_id), - "type": "writeBlanks" - } - - async def _gen_paragraph_match_exercise(self, text: str, quantity: int, start_id): - paragraphs = ExercisesHelper.assign_letters_to_paragraphs(text) - messages = [ - { - "role": "system", - "content": ( - 'You are a helpful assistant designed to output JSON on this format: ' - '{"headings": [ {"heading": "first paragraph heading"}, {"heading": "second paragraph heading"}]}' - ) - }, - { - "role": "user", - "content": ( - 'For every paragraph of the list generate a minimum 5 word heading for it. ' - f'The paragraphs are these: {str(paragraphs)}' - ) - - } - ] - - response = await self._llm.prediction( - GPTModels.GPT_4_O, messages, ["headings"], TemperatureSettings.GEN_QUESTION_TEMPERATURE - ) - headings = response["headings"] - - options = [] - for i, paragraph in enumerate(paragraphs, start=0): - paragraph["heading"] = headings[i]["heading"] - options.append({ - "id": paragraph["letter"], - "sentence": paragraph["paragraph"] - }) - - random.shuffle(paragraphs) - sentences = [] - for i, paragraph in enumerate(paragraphs, start=start_id): - sentences.append({ - "id": i, - "sentence": paragraph["heading"], - "solution": paragraph["letter"] - }) - - return { - "id": str(uuid.uuid4()), - "allowRepetition": False, - "options": options, - "prompt": "Choose the correct heading for paragraphs from the list of headings below.", - "sentences": sentences[:quantity], - "type": "matchSentences" - } - - async def _gen_idea_match_exercise(self, text: str, quantity: int, start_id): - messages = [ - { - "role": "system", - "content": ( - 'You are a helpful assistant designed to output JSON on this format: ' - '{"ideas": [ ' - '{"idea": "some idea or opinion", "from": "person, institution whose idea or opinion this is"}, ' - '{"idea": "some other idea or opinion", "from": "person, institution whose idea or opinion this is"}' - ']}' - ) - }, - { - "role": "user", - "content": ( - f'From the text extract {quantity} ideas, theories, opinions and who they are from. ' - f'The text: {text}' - ) - } - ] - - response = await self._llm.prediction( - GPTModels.GPT_4_O, messages, ["ideas"], TemperatureSettings.GEN_QUESTION_TEMPERATURE - ) - ideas = response["ideas"] - - return { - "id": str(uuid.uuid4()), - "allowRepetition": False, - "options": ExercisesHelper.build_options(ideas), - "prompt": "Choose the correct author for the ideas/opinions from the list of authors below.", - "sentences": ExercisesHelper.build_sentences(ideas, start_id), - "type": "matchSentences" - } +import random +import uuid +from queue import Queue +from typing import List + +from app.services.abc import IReadingService, ILLMService +from app.configs.constants import QuestionType, TemperatureSettings, FieldsAndExercises, GPTModels +from app.helpers import ExercisesHelper + + +class ReadingService(IReadingService): + + def __init__(self, llm: ILLMService): + self._llm = llm + + async def gen_reading_passage( + self, + part: int, + topic: str, + req_exercises: List[str], + number_of_exercises_q: Queue, + difficulty: str, + start_id: int + ): + passage = await self.generate_reading_passage(part, topic) + exercises = await self._generate_reading_exercises( + passage["text"], req_exercises, number_of_exercises_q, start_id, difficulty + ) + + if ExercisesHelper.contains_empty_dict(exercises): + return await self.gen_reading_passage( + part, topic, req_exercises, number_of_exercises_q, difficulty, start_id + ) + + return { + "exercises": exercises, + "text": { + "content": passage["text"], + "title": passage["title"] + }, + "difficulty": difficulty + } + + async def generate_reading_passage(self, part: int, topic: str, word_count: int = 800): + part_system_message = { + "1": 'The generated text should be fairly easy to understand and have multiple paragraphs.', + "2": 'The generated text should be fairly hard to understand and have multiple paragraphs.', + "3": ( + 'The generated text should be very hard to understand and include different points, theories, ' + 'subtle differences of opinions from people, correctly sourced to the person who said it, ' + 'over the specified topic and have multiple paragraphs.' + ) + } + + messages = [ + { + "role": "system", + "content": ( + 'You are a helpful assistant designed to output JSON on this format: ' + '{"title": "title of the text", "text": "generated text"}') + }, + { + "role": "user", + "content": ( + f'Generate an extensive text for IELTS Reading Passage {part}, of at least {word_count} words, ' + f'on the topic of "{topic}". The passage should offer a substantial amount of ' + 'information, analysis, or narrative relevant to the chosen subject matter. This text ' + 'passage aims to serve as the primary reading section of an IELTS test, providing an ' + 'in-depth and comprehensive exploration of the topic. Make sure that the generated text ' + 'does not contain forbidden subjects in muslim countries.' + ) + }, + { + "role": "system", + "content": part_system_message[str(part)] + } + ] + + if part == 3: + messages.append({ + "role": "user", + "content": "Use real text excerpts on you generated passage and cite the sources." + }) + + return await self._llm.prediction( + GPTModels.GPT_4_O, + messages, + FieldsAndExercises.GEN_TEXT_FIELDS, + TemperatureSettings.GEN_QUESTION_TEMPERATURE + ) + + async def _generate_reading_exercises( + self, passage: str, req_exercises: list, number_of_exercises_q, start_id, difficulty + ): + exercises = [] + for req_exercise in req_exercises: + number_of_exercises = number_of_exercises_q.get() + + if req_exercise == "fillBlanks": + question = await self._gen_summary_fill_blanks_exercise( + passage, number_of_exercises, start_id, difficulty + ) + exercises.append(question) + print("Added fill blanks: " + str(question)) + elif req_exercise == "trueFalse": + question = await self._gen_true_false_not_given_exercise( + passage, number_of_exercises, start_id, difficulty + ) + exercises.append(question) + print("Added trueFalse: " + str(question)) + elif req_exercise == "writeBlanks": + question = await self._gen_write_blanks_exercise(passage, number_of_exercises, start_id, difficulty) + if ExercisesHelper.answer_word_limit_ok(question): + exercises.append(question) + print("Added write blanks: " + str(question)) + else: + exercises.append({}) + print("Did not add write blanks because it did not respect word limit") + elif req_exercise == "paragraphMatch": + question = await self._gen_paragraph_match_exercise(passage, number_of_exercises, start_id) + exercises.append(question) + print("Added paragraph match: " + str(question)) + elif req_exercise == "ideaMatch": + question = await self._gen_idea_match_exercise(passage, number_of_exercises, start_id) + exercises.append(question) + print("Added idea match: " + str(question)) + + start_id = start_id + number_of_exercises + + return exercises + + async def _gen_summary_fill_blanks_exercise( + self, text: str, quantity: int, start_id, difficulty, num_random_words: int = 1 + ): + messages = [ + { + "role": "system", + "content": ( + 'You are a helpful assistant designed to output JSON on this format: { "summary": "summary" }' + ) + }, + { + "role": "user", + "content": f'Summarize this text: "{text}"' + + } + ] + + response = await self._llm.prediction( + GPTModels.GPT_4_O, messages, ["summary"], TemperatureSettings.GEN_QUESTION_TEMPERATURE + ) + + messages = [ + { + "role": "system", + "content": ( + 'You are a helpful assistant designed to output JSON on this format: ' + '{"words": ["word_1", "word_2"] }' + ) + }, + { + "role": "user", + "content": ( + f'Select {quantity} {difficulty} difficulty words, it must be words and not expressions, ' + f'from this:\n{response["summary"]}' + ) + } + ] + + words_response = await self._llm.prediction( + GPTModels.GPT_4_O, messages, ["words"], TemperatureSettings.GEN_QUESTION_TEMPERATURE + ) + + response["words"] = words_response["words"] + replaced_summary = ExercisesHelper.replace_first_occurrences_with_placeholders( + response["summary"], response["words"], start_id + ) + options_words = ExercisesHelper.add_random_words_and_shuffle(response["words"], num_random_words) + solutions = ExercisesHelper.fillblanks_build_solutions_array(response["words"], start_id) + + return { + "allowRepetition": True, + "id": str(uuid.uuid4()), + "prompt": ( + "Complete the summary below. Write the letter of the corresponding word(s) for it.\\nThere are " + "more words than spaces so you will not use them all. You may use any of the words more than once." + ), + "solutions": solutions, + "text": replaced_summary, + "type": "fillBlanks", + "words": options_words + } + + async def _gen_true_false_not_given_exercise(self, text: str, quantity: int, start_id, difficulty): + messages = [ + { + "role": "system", + "content": ( + 'You are a helpful assistant designed to output JSON on this format: ' + '{"prompts":[{"prompt": "statement_1", "solution": "true/false/not_given"}, ' + '{"prompt": "statement_2", "solution": "true/false/not_given"}]}') + }, + { + "role": "user", + "content": ( + f'Generate {str(quantity)} {difficulty} difficulty statements based on the provided text. ' + 'Ensure that your statements accurately represent information or inferences from the text, and ' + 'provide a variety of responses, including, at least one of each True, False, and Not Given, ' + f'as appropriate.\n\nReference text:\n\n {text}' + ) + } + ] + + response = await self._llm.prediction( + GPTModels.GPT_4_O, messages, ["prompts"], TemperatureSettings.GEN_QUESTION_TEMPERATURE + ) + questions = response["prompts"] + + if len(questions) > quantity: + questions = ExercisesHelper.remove_excess_questions(questions, len(questions) - quantity) + + for i, question in enumerate(questions, start=start_id): + question["id"] = str(i) + + return { + "id": str(uuid.uuid4()), + "prompt": "Do the following statements agree with the information given in the Reading Passage?", + "questions": questions, + "type": "trueFalse" + } + + async def _gen_write_blanks_exercise(self, text: str, quantity: int, start_id, difficulty): + messages = [ + { + "role": "system", + "content": ( + 'You are a helpful assistant designed to output JSON on this format: ' + '{"questions": [{"question": question, "possible_answers": ["answer_1", "answer_2"]}]}' + ) + }, + { + "role": "user", + "content": ( + f'Generate {str(quantity)} {difficulty} difficulty short answer questions, and the ' + f'possible answers, must have maximum 3 words per answer, about this text:\n"{text}"' + ) + + } + ] + + response = await self._llm.prediction( + GPTModels.GPT_4_O, messages, ["questions"], TemperatureSettings.GEN_QUESTION_TEMPERATURE + ) + questions = response["questions"][:quantity] + + return { + "id": str(uuid.uuid4()), + "maxWords": 3, + "prompt": "Choose no more than three words and/or a number from the passage for each answer.", + "solutions": ExercisesHelper.build_write_blanks_solutions(questions, start_id), + "text": ExercisesHelper.build_write_blanks_text(questions, start_id), + "type": "writeBlanks" + } + + async def _gen_paragraph_match_exercise(self, text: str, quantity: int, start_id): + paragraphs = ExercisesHelper.assign_letters_to_paragraphs(text) + messages = [ + { + "role": "system", + "content": ( + 'You are a helpful assistant designed to output JSON on this format: ' + '{"headings": [ {"heading": "first paragraph heading"}, {"heading": "second paragraph heading"}]}' + ) + }, + { + "role": "user", + "content": ( + 'For every paragraph of the list generate a minimum 5 word heading for it. ' + f'The paragraphs are these: {str(paragraphs)}' + ) + + } + ] + + response = await self._llm.prediction( + GPTModels.GPT_4_O, messages, ["headings"], TemperatureSettings.GEN_QUESTION_TEMPERATURE + ) + headings = response["headings"] + + options = [] + for i, paragraph in enumerate(paragraphs, start=0): + paragraph["heading"] = headings[i]["heading"] + options.append({ + "id": paragraph["letter"], + "sentence": paragraph["paragraph"] + }) + + random.shuffle(paragraphs) + sentences = [] + for i, paragraph in enumerate(paragraphs, start=start_id): + sentences.append({ + "id": i, + "sentence": paragraph["heading"], + "solution": paragraph["letter"] + }) + + return { + "id": str(uuid.uuid4()), + "allowRepetition": False, + "options": options, + "prompt": "Choose the correct heading for paragraphs from the list of headings below.", + "sentences": sentences[:quantity], + "type": "matchSentences" + } + + async def _gen_idea_match_exercise(self, text: str, quantity: int, start_id): + messages = [ + { + "role": "system", + "content": ( + 'You are a helpful assistant designed to output JSON on this format: ' + '{"ideas": [ ' + '{"idea": "some idea or opinion", "from": "person, institution whose idea or opinion this is"}, ' + '{"idea": "some other idea or opinion", "from": "person, institution whose idea or opinion this is"}' + ']}' + ) + }, + { + "role": "user", + "content": ( + f'From the text extract {quantity} ideas, theories, opinions and who they are from. ' + f'The text: {text}' + ) + } + ] + + response = await self._llm.prediction( + GPTModels.GPT_4_O, messages, ["ideas"], TemperatureSettings.GEN_QUESTION_TEMPERATURE + ) + ideas = response["ideas"] + + return { + "id": str(uuid.uuid4()), + "allowRepetition": False, + "options": ExercisesHelper.build_options(ideas), + "prompt": "Choose the correct author for the ideas/opinions from the list of authors below.", + "sentences": ExercisesHelper.build_sentences(ideas, start_id), + "type": "matchSentences" + } diff --git a/app/services/impl/speaking.py b/app/services/impl/exam/speaking.py similarity index 97% rename from app/services/impl/speaking.py rename to app/services/impl/exam/speaking.py index 86cfe1c..b0364b0 100644 --- a/app/services/impl/speaking.py +++ b/app/services/impl/exam/speaking.py @@ -1,633 +1,635 @@ -import logging -import os -import re -import uuid -import random -from typing import Dict, List, Optional - -from app.repositories.abc import IFileStorage, IDocumentStore -from app.services.abc import ISpeakingService, ILLMService, IVideoGeneratorService, ISpeechToTextService -from app.configs.constants import ( - FieldsAndExercises, GPTModels, TemperatureSettings, - AvatarEnum, FilePaths -) -from app.helpers import TextHelper - - -class SpeakingService(ISpeakingService): - - def __init__( - self, llm: ILLMService, vid_gen: IVideoGeneratorService, - file_storage: IFileStorage, document_store: IDocumentStore, - stt: ISpeechToTextService - ): - self._llm = llm - self._vid_gen = vid_gen - self._file_storage = file_storage - self._document_store = document_store - self._stt = stt - self._logger = logging.getLogger(__name__) - - # TODO: Is the difficulty in the prompts supposed to be hardcoded? The response is set with - # either the difficulty in the request or a random one yet the prompt doesn't change - self._tasks = { - "task_1": { - "get": { - "json_template": { - "first_topic": "topic 1", - "second_topic": "topic 2", - "questions": [ - ( - "Introductory question about the first topic, starting the topic with " - "'Let's talk about x' and then the question." - ), - "Follow up question about the first topic", - "Follow up question about the first topic", - "Question about second topic", - "Follow up question about the second topic", - ] - }, - "prompt": ( - 'Craft 5 simple and single questions of easy difficulty for IELTS Speaking Part 1 ' - 'that encourages candidates to delve deeply into personal experiences, preferences, or ' - 'insights on the topic of "{first_topic}" and the topic of "{second_topic}". ' - 'Make sure that the generated question does not contain forbidden subjects in ' - 'muslim countries.' - ) - } - }, - "task_2": { - "get": { - "json_template": { - "topic": "topic", - "question": "question", - "prompts": [ - "prompt_1", - "prompt_2", - "prompt_3" - ], - "suffix": "And explain why..." - }, - "prompt": ( - 'Create a question of medium difficulty for IELTS Speaking Part 2 ' - 'that encourages candidates to narrate a personal experience or story related to the topic ' - 'of "{topic}". Include 3 prompts that guide the candidate to describe ' - 'specific aspects of the experience, such as details about the situation, ' - 'their actions, and the reasons it left a lasting impression. Make sure that the ' - 'generated question does not contain forbidden subjects in muslim countries.' - ) - } - }, - "task_3": { - "get": { - "json_template": { - "topic": "topic", - "questions": [ - "Introductory question about the topic.", - "Follow up question about the topic", - "Follow up question about the topic", - "Follow up question about the topic", - "Follow up question about the topic" - ] - }, - "prompt": ( - 'Formulate a set of 5 single questions of hard difficulty for IELTS Speaking Part 3' - 'that encourage candidates to engage in a meaningful discussion on the topic of "{topic}". ' - 'Provide inquiries, ensuring they explore various aspects, perspectives, and implications ' - 'related to the topic. Make sure that the generated question does not contain forbidden ' - 'subjects in muslim countries.' - ) - } - }, - } - - async def get_speaking_part( - self, part: int, topic: str, difficulty: str, second_topic: Optional[str] = None - ) -> Dict: - task_values = self._tasks[f'task_{part}']['get'] - - if part == 1: - task_prompt = task_values["prompt"].format(first_topic=topic, second_topic=second_topic) - else: - task_prompt = task_values["prompt"].format(topic=topic) - - messages = [ - { - "role": "system", - "content": ( - 'You are a helpful assistant designed to output JSON on this format: ' - f'{task_values["json_template"]}' - ) - }, - { - "role": "user", - "content": task_prompt - } - ] - - part_specific = { - "1": 'The questions should lead to the usage of 4 verb tenses (present perfect, present, past and future).', - "2": ( - 'The prompts must not be questions. Also include a suffix like the ones in the IELTS exams ' - 'that start with "And explain why".' - ) - } - - if part in {1, 2}: - messages.append({ - "role": "user", - "content": part_specific[str(part)] - }) - - if part in {1, 3}: - messages.append({ - "role": "user", - "content": 'They must be 1 single question each and not be double-barreled questions.' - }) - - fields_to_check = ["first_topic"] if part == 1 else FieldsAndExercises.GEN_FIELDS - - response = await self._llm.prediction( - GPTModels.GPT_4_O, messages, fields_to_check, TemperatureSettings.GEN_QUESTION_TEMPERATURE - ) - - if part == 3: - # Remove the numbers from the questions only if the string starts with a number - response["questions"] = [ - re.sub(r"^\d+\.\s*", "", question) - if re.match(r"^\d+\.", question) else question - for question in response["questions"] - ] - - response["type"] = part - response["difficulty"] = difficulty - - if part in {2, 3}: - response["topic"] = topic - - return response - - async def grade_speaking_task(self, task: int, answers: List[Dict]) -> Dict: - request_id = uuid.uuid4() - self._logger.info( - f'POST - speaking_task_{task} - Received request to grade speaking task {task}. ' - f'Use this id to track the logs: {str(request_id)} - Request data: {str(answers)}' - ) - - text_answers = [] - perfect_answers = [] - - if task != 2: - self._logger.info( - f'POST - speaking_task_{task} - {str(request_id)} - Received {str(len(answers))} total answers.' - ) - - for item in answers: - sound_file_name = FilePaths.AUDIO_FILES_PATH + str(uuid.uuid4()) - - self._logger.info(f'POST - speaking_task_{task} - {request_id} - Downloading file {item["answer"]}') - - await self._file_storage.download_firebase_file(item["answer"], sound_file_name) - - self._logger.info( - f'POST - speaking_task_{task} - {request_id} - ' - f'Downloaded file {item["answer"]} to {sound_file_name}' - ) - - answer_text = await self._stt.speech_to_text(sound_file_name) - self._logger.info(f'POST - speaking_task_{task} - {request_id} - Transcripted answer: {answer_text}') - - text_answers.append(answer_text) - item["answer"] = answer_text - os.remove(sound_file_name) - - # TODO: This will end the grading of all answers if a single one does not have enough words - # don't know if this is intended - if not TextHelper.has_x_words(answer_text, 20): - self._logger.info( - f'POST - speaking_task_{task} - {request_id} - ' - f'The answer had less words than threshold 20 to be graded. Answer: {answer_text}' - ) - return self._zero_rating("The audio recorded does not contain enough english words to be graded.") - - self._logger.info( - f'POST - speaking_task_{task} - {request_id} - ' - f'Requesting perfect answer for question: {item["question"]}' - ) - perfect_answers.append(await self._get_perfect_answer(task, item["question"])) - - if task in {1, 3}: - self._logger.info( - f'POST - speaking_task_{task} - {request_id} - Formatting answers and questions for prompt.' - ) - - formatted_text = "" - for i, entry in enumerate(answers, start=1): - formatted_text += f"**Question {i}:**\n{entry['question']}\n\n" - formatted_text += f"**Answer {i}:**\n{entry['answer']}\n\n" - - self._logger.info( - f'POST - speaking_task_{task} - {request_id} - ' - f'Formatted answers and questions for prompt: {formatted_text}' - ) - questions_and_answers = f'\n\n The questions and answers are: \n\n{formatted_text}' - else: - questions_and_answers = f'\n Question: "{answers[0]["question"]}" \n Answer: "{answers[0]["answer"]}"' - - self._logger.info(f'POST - speaking_task_{task} - {request_id} - Requesting grading of the answer(s).') - response = await self._grade_task(task, questions_and_answers) - - self._logger.info(f'POST - speaking_task_{task} - {request_id} - Answer(s) graded: {response}') - - if task in {1, 3}: - self._logger.info( - f'POST - speaking_task_{task} - {request_id} - Adding perfect answer(s) to response.') - - # TODO: check if it is answer["answer"] instead - for i, answer in enumerate(perfect_answers, start=1): - response['perfect_answer_' + str(i)] = answer - - self._logger.info( - f'POST - speaking_task_{task} - {request_id} - Adding transcript and fixed texts to response.' - ) - - for i, answer in enumerate(text_answers, start=1): - response['transcript_' + str(i)] = answer - response['fixed_text_' + str(i)] = await self._get_speaking_corrections(answer) - else: - response['transcript'] = answers[0]["answer"] - - self._logger.info(f'POST - speaking_task_{task} - {request_id} - Requesting fixed text.') - response['fixed_text'] = await self._get_speaking_corrections(answers[0]["answer"]) - self._logger.info(f'POST - speaking_task_{task} - {request_id} - Fixed text: {response["fixed_text"]}') - - response['perfect_answer'] = perfect_answers[0]["answer"] - - response["overall"] = self._fix_speaking_overall(response["overall"], response["task_response"]) - self._logger.info(f'POST - speaking_task_{task} - {request_id} - Final response: {response}') - return response - - # ================================================================================================================== - # grade_speaking_task helpers - # ================================================================================================================== - - async def _get_perfect_answer(self, task: int, question: str): - messages = [ - { - "role": "system", - "content": ( - 'You are a helpful assistant designed to output JSON on this format: {"answer": "perfect answer"}' - ) - }, - { - "role": "user", - "content": ( - 'Provide a perfect answer according to ielts grading system to the following ' - f'Speaking Part {task} question: "{question}"' - ) - } - ] - - if task == 1: - messages.append({ - "role": "user", - "content": 'The answer must be 2 or 3 sentences long.' - }) - - gpt_model = GPTModels.GPT_4_O if task == 1 else GPTModels.GPT_3_5_TURBO - - return await self._llm.prediction( - gpt_model, messages, ["answer"], TemperatureSettings.GRADING_TEMPERATURE - ) - - async def _grade_task(self, task: int, questions_and_answers: str) -> Dict: - messages = [ - { - "role": "system", - "content": ( - f'You are a helpful assistant designed to output JSON on this format: {self._grade_template()}' - ) - }, - { - "role": "user", - "content": ( - f'Evaluate the given Speaking Part {task} response based on the IELTS grading system, ensuring a ' - 'strict assessment that penalizes errors. Deduct points for deviations from the task, and ' - 'assign a score of 0 if the response fails to address the question. Additionally, provide ' - 'detailed commentary highlighting both strengths and weaknesses in the response.' - ) + questions_and_answers - } - ] - - task_specific = { - "1": ( - 'Address the student as "you". If the answers are not 2 or 3 sentences long, warn the ' - 'student that they should be.' - ), - "2": 'Address the student as "you"', - "3": 'Address the student as "you" and pay special attention to coherence between the answers.' - } - - messages.append({ - "role": "user", - "content": task_specific[str(task)] - }) - - if task in {1, 3}: - messages.extend([ - { - "role": "user", - "content": ( - 'For pronunciations act as if you heard the answers and they were transcripted ' - 'as you heard them.' - ) - }, - { - "role": "user", - "content": 'The comments must be long, detailed, justify the grading and suggest improvements.' - } - ]) - - return await self._llm.prediction( - GPTModels.GPT_4_O, messages, ["comment"], TemperatureSettings.GRADING_TEMPERATURE - ) - - @staticmethod - def _fix_speaking_overall(overall: float, task_response: dict): - grades = [category["grade"] for category in task_response.values()] - - if overall > max(grades) or overall < min(grades): - total_sum = sum(grades) - average = total_sum / len(grades) - rounded_average = round(average, 0) - return rounded_average - - return overall - - @staticmethod - def _zero_rating(comment: str): - return { - "comment": comment, - "overall": 0, - "task_response": { - "Fluency and Coherence": { - "grade": 0.0, - "comment": "" - }, - "Lexical Resource": { - "grade": 0.0, - "comment": "" - }, - "Grammatical Range and Accuracy": { - "grade": 0.0, - "comment": "" - }, - "Pronunciation": { - "grade": 0.0, - "comment": "" - } - } - } - - async def _get_speaking_corrections(self, text): - messages = [ - { - "role": "system", - "content": ( - 'You are a helpful assistant designed to output JSON on this format: ' - '{"fixed_text": "fixed transcription with no misspelling errors"}' - ) - }, - { - "role": "user", - "content": ( - 'Fix the errors in the provided transcription and put it in a JSON. ' - f'Do not complete the answer, only replace what is wrong. \n The text: "{text}"' - ) - } - ] - - response = await self._llm.prediction( - GPTModels.GPT_3_5_TURBO, - messages, - ["fixed_text"], - 0.2, - False - ) - return response["fixed_text"] - - async def create_videos_and_save_to_db(self, exercises, template, req_id): - template = await self._create_video_per_part(exercises, template, 1) - template = await self._create_video_per_part(exercises, template, 2) - template = await self._create_video_per_part(exercises, template, 3) - - await self._document_store.save_to_db_with_id("speaking", template, req_id) - self._logger.info(f'Saved speaking to DB with id {req_id} : {str(template)}') - - async def _create_video_per_part(self, exercises: List[Dict], template: Dict, part: int): - avatar = (random.choice(list(AvatarEnum))).value - template_index = part - 1 - - # Using list comprehension to find the element with the desired value in the 'type' field - found_exercises = [element for element in exercises if element.get('type') == part] - - # Check if any elements were found - if found_exercises: - exercise = found_exercises[0] - self._logger.info(f'Creating video for speaking part {part}') - if part in {1, 3}: - questions = [] - for question in exercise["questions"]: - result = await self._create_video( - question, - avatar, - f'Failed to create video for part {part} question: {str(exercise["question"])}' - ) - if result is not None: - video = { - "text": question, - "video_path": result["video_path"], - "video_url": result["video_url"] - } - questions.append(video) - - template["exercises"][template_index]["prompts"] = questions - if part == 1: - template["exercises"][template_index]["first_title"] = exercise["first_topic"] - template["exercises"][template_index]["second_title"] = exercise["second_topic"] - else: - template["exercises"][template_index]["title"] = exercise["topic"] - else: - result = await self._create_video( - exercise["question"], - avatar, - f'Failed to create video for part {part} question: {str(exercise["question"])}' - ) - if result is not None: - template["exercises"][template_index]["prompts"] = exercise["prompts"] - template["exercises"][template_index]["text"] = exercise["question"] - template["exercises"][template_index]["title"] = exercise["topic"] - template["exercises"][template_index]["video_url"] = result["video_url"] - template["exercises"][template_index]["video_path"] = result["video_path"] - - if not found_exercises: - template["exercises"].pop(template_index) - - return template - - async def generate_video( - self, part: int, avatar: str, topic: str, questions: list[str], - *, - second_topic: Optional[str] = None, - prompts: Optional[list[str]] = None, - suffix: Optional[str] = None, - ): - request_id = str(uuid.uuid4()) - # TODO: request data - self._logger.info( - f'POST - generate_video_{part} - Received request to generate video {part}. ' - f'Use this id to track the logs: {request_id} - Request data: " + str(request.get_json())' - ) - - part_questions = self._get_part_questions(part, questions, avatar) - videos = [] - - self._logger.info(f'POST - generate_video_{part} - {request_id} - Creating videos for speaking part {part}.') - for question in part_questions: - self._logger.info(f'POST - generate_video_{part} - {request_id} - Creating video for question: {question}') - result = await self._create_video( - question, - avatar, - 'POST - generate_video_{p} - {r} - Failed to create video for part {p} question: {q}'.format( - p=part, r=request_id, q=question - ) - ) - if result is not None: - self._logger.info(f'POST - generate_video_{part} - {request_id} - Video created') - self._logger.info( - f'POST - generate_video_{part} - {request_id} - Uploaded video to firebase: {result["video_url"]}' - ) - video = { - "text": question, - "video_path": result["video_path"], - "video_url": result["video_url"] - } - videos.append(video) - - if part == 2 and len(videos) == 0: - raise Exception(f'Failed to create video for part 2 question: {questions[0]}') - - return self._get_part_response(part, topic, videos, second_topic, prompts, suffix) - - @staticmethod - def _get_part_questions(part: int, questions: list[str], avatar: str): - part_questions: list[str] = [] - - if part == 1: - id_to_name = { - "5912afa7c77c47d3883af3d874047aaf": "MATTHEW", - "9e58d96a383e4568a7f1e49df549e0e4": "VERA", - "d2cdd9c0379a4d06ae2afb6e5039bd0c": "EDWARD", - "045cb5dcd00042b3a1e4f3bc1c12176b": "TANYA", - "1ae1e5396cc444bfad332155fdb7a934": "KAYLA", - "0ee6aa7cc1084063a630ae514fccaa31": "JEROME", - "5772cff935844516ad7eeff21f839e43": "TYLER", - - } - part_questions.extend( - [ - "Hello my name is " + id_to_name.get(avatar) + ", what is yours?", - "Do you work or do you study?", - *questions - ] - ) - elif part == 2: - # Removed as the examiner should not say what is on the card. - # question = question + " In your answer you should consider: " + " ".join(prompts) + suffix - part_questions.append(f'{questions[0]}\nYou have 1 minute to take notes.') - elif part == 3: - part_questions = questions - - return part_questions - - @staticmethod - def _get_part_response( - part: int, - topic: str, - videos: list[dict], - second_topic: Optional[str], - prompts: Optional[list[str]], - suffix: Optional[str] - ): - response = {} - if part == 1: - response = { - "prompts": videos, - "first_title": topic, - "second_title": second_topic, - "type": "interactiveSpeaking" - } - if part == 2: - response = { - "prompts": prompts, - "title": topic, - "suffix": suffix, - "type": "speaking", - # includes text, video_url and video_path - **videos[0] - } - if part == 3: - response = { - "prompts": videos, - "title": topic, - "type": "interactiveSpeaking", - } - - response["id"] = str(uuid.uuid4()) - return response - - async def _create_video(self, question: str, avatar: str, error_message: str): - result = await self._vid_gen.create_video(question, avatar) - if result is not None: - sound_file_path = FilePaths.VIDEO_FILES_PATH + result - firebase_file_path = FilePaths.FIREBASE_SPEAKING_VIDEO_FILES_PATH + result - url = await self._file_storage.upload_file_firebase_get_url(firebase_file_path, sound_file_path) - return { - "video_path": firebase_file_path, - "video_url": url - } - self._logger.error(error_message) - return None - - @staticmethod - def _grade_template(): - return { - "comment": "extensive comment about answer quality", - "overall": 0.0, - "task_response": { - "Fluency and Coherence": { - "grade": 0.0, - "comment": ( - "extensive comment about fluency and coherence, use examples to justify the grade awarded." - ) - }, - "Lexical Resource": { - "grade": 0.0, - "comment": "extensive comment about lexical resource, use examples to justify the grade awarded." - }, - "Grammatical Range and Accuracy": { - "grade": 0.0, - "comment": ( - "extensive comment about grammatical range and accuracy, use examples to justify the " - "grade awarded." - ) - }, - "Pronunciation": { - "grade": 0.0, - "comment": ( - "extensive comment about pronunciation on the transcribed answer, use examples to justify the " - "grade awarded." - ) - } - } +import logging +import os +import re +import uuid +import random +from typing import Dict, List, Optional + +from app.repositories.abc import IFileStorage, IDocumentStore +from app.services.abc import ISpeakingService, ILLMService, IVideoGeneratorService, ISpeechToTextService +from app.configs.constants import ( + FieldsAndExercises, GPTModels, TemperatureSettings, + AvatarEnum, FilePaths +) +from app.helpers import TextHelper + + +class SpeakingService(ISpeakingService): + + def __init__( + self, llm: ILLMService, vid_gen: IVideoGeneratorService, + file_storage: IFileStorage, document_store: IDocumentStore, + stt: ISpeechToTextService + ): + self._llm = llm + self._vid_gen = vid_gen + self._file_storage = file_storage + self._document_store = document_store + self._stt = stt + self._logger = logging.getLogger(__name__) + + # TODO: Is the difficulty in the prompts supposed to be hardcoded? The response is set with + # either the difficulty in the request or a random one yet the prompt doesn't change + self._tasks = { + "task_1": { + "get": { + "json_template": { + "first_topic": "topic 1", + "second_topic": "topic 2", + "questions": [ + ( + "Introductory question about the first topic, starting the topic with " + "'Let's talk about x' and then the question." + ), + "Follow up question about the first topic", + "Follow up question about the first topic", + "Question about second topic", + "Follow up question about the second topic", + ] + }, + "prompt": ( + 'Craft 5 simple and single questions of easy difficulty for IELTS Speaking Part 1 ' + 'that encourages candidates to delve deeply into personal experiences, preferences, or ' + 'insights on the topic of "{first_topic}" and the topic of "{second_topic}". ' + 'Make sure that the generated question does not contain forbidden subjects in ' + 'muslim countries.' + ) + } + }, + "task_2": { + "get": { + "json_template": { + "topic": "topic", + "question": "question", + "prompts": [ + "prompt_1", + "prompt_2", + "prompt_3" + ], + "suffix": "And explain why..." + }, + "prompt": ( + 'Create a question of medium difficulty for IELTS Speaking Part 2 ' + 'that encourages candidates to narrate a personal experience or story related to the topic ' + 'of "{topic}". Include 3 prompts that guide the candidate to describe ' + 'specific aspects of the experience, such as details about the situation, ' + 'their actions, and the reasons it left a lasting impression. Make sure that the ' + 'generated question does not contain forbidden subjects in muslim countries.' + ) + } + }, + "task_3": { + "get": { + "json_template": { + "topic": "topic", + "questions": [ + "Introductory question about the topic.", + "Follow up question about the topic", + "Follow up question about the topic", + "Follow up question about the topic", + "Follow up question about the topic" + ] + }, + "prompt": ( + 'Formulate a set of 5 single questions of hard difficulty for IELTS Speaking Part 3' + 'that encourage candidates to engage in a meaningful discussion on the topic of "{topic}". ' + 'Provide inquiries, ensuring they explore various aspects, perspectives, and implications ' + 'related to the topic. Make sure that the generated question does not contain forbidden ' + 'subjects in muslim countries.' + ) + } + }, + } + + async def get_speaking_part( + self, part: int, topic: str, difficulty: str, second_topic: Optional[str] = None + ) -> Dict: + task_values = self._tasks[f'task_{part}']['get'] + + if part == 1: + task_prompt = task_values["prompt"].format(first_topic=topic, second_topic=second_topic) + else: + task_prompt = task_values["prompt"].format(topic=topic) + + messages = [ + { + "role": "system", + "content": ( + 'You are a helpful assistant designed to output JSON on this format: ' + f'{task_values["json_template"]}' + ) + }, + { + "role": "user", + "content": task_prompt + } + ] + + part_specific = { + "1": 'The questions should lead to the usage of 4 verb tenses (present perfect, present, past and future).', + "2": ( + 'The prompts must not be questions. Also include a suffix like the ones in the IELTS exams ' + 'that start with "And explain why".' + ) + } + + if part in {1, 2}: + messages.append({ + "role": "user", + "content": part_specific[str(part)] + }) + + if part in {1, 3}: + messages.append({ + "role": "user", + "content": 'They must be 1 single question each and not be double-barreled questions.' + }) + + fields_to_check = ["first_topic"] if part == 1 else FieldsAndExercises.GEN_FIELDS + + response = await self._llm.prediction( + GPTModels.GPT_4_O, messages, fields_to_check, TemperatureSettings.GEN_QUESTION_TEMPERATURE + ) + + if part == 3: + # Remove the numbers from the questions only if the string starts with a number + response["questions"] = [ + re.sub(r"^\d+\.\s*", "", question) + if re.match(r"^\d+\.", question) else question + for question in response["questions"] + ] + + response["type"] = part + response["difficulty"] = difficulty + + if part in {2, 3}: + response["topic"] = topic + + return response + + async def grade_speaking_task(self, task: int, answers: List[Dict]) -> Dict: + request_id = uuid.uuid4() + self._logger.info( + f'POST - speaking_task_{task} - Received request to grade speaking task {task}. ' + f'Use this id to track the logs: {str(request_id)} - Request data: {str(answers)}' + ) + + text_answers = [] + perfect_answers = [] + + if task != 2: + self._logger.info( + f'POST - speaking_task_{task} - {str(request_id)} - Received {str(len(answers))} total answers.' + ) + + for item in answers: + sound_file_name = FilePaths.AUDIO_FILES_PATH + str(uuid.uuid4()) + + self._logger.info(f'POST - speaking_task_{task} - {request_id} - Downloading file {item["answer"]}') + + await self._file_storage.download_firebase_file(item["answer"], sound_file_name) + + self._logger.info( + f'POST - speaking_task_{task} - {request_id} - ' + f'Downloaded file {item["answer"]} to {sound_file_name}' + ) + + answer_text = await self._stt.speech_to_text(sound_file_name) + self._logger.info(f'POST - speaking_task_{task} - {request_id} - Transcripted answer: {answer_text}') + + text_answers.append(answer_text) + item["answer"] = answer_text + os.remove(sound_file_name) + + # TODO: This will end the grading of all answers if a single one does not have enough words + # don't know if this is intended + if not TextHelper.has_x_words(answer_text, 20): + self._logger.info( + f'POST - speaking_task_{task} - {request_id} - ' + f'The answer had less words than threshold 20 to be graded. Answer: {answer_text}' + ) + return self._zero_rating("The audio recorded does not contain enough english words to be graded.") + + self._logger.info( + f'POST - speaking_task_{task} - {request_id} - ' + f'Requesting perfect answer for question: {item["question"]}' + ) + perfect_answers.append(await self._get_perfect_answer(task, item["question"])) + + if task in {1, 3}: + self._logger.info( + f'POST - speaking_task_{task} - {request_id} - Formatting answers and questions for prompt.' + ) + + formatted_text = "" + for i, entry in enumerate(answers, start=1): + formatted_text += f"**Question {i}:**\n{entry['question']}\n\n" + formatted_text += f"**Answer {i}:**\n{entry['answer']}\n\n" + + self._logger.info( + f'POST - speaking_task_{task} - {request_id} - ' + f'Formatted answers and questions for prompt: {formatted_text}' + ) + questions_and_answers = f'\n\n The questions and answers are: \n\n{formatted_text}' + else: + questions_and_answers = f'\n Question: "{answers[0]["question"]}" \n Answer: "{answers[0]["answer"]}"' + + self._logger.info(f'POST - speaking_task_{task} - {request_id} - Requesting grading of the answer(s).') + response = await self._grade_task(task, questions_and_answers) + + self._logger.info(f'POST - speaking_task_{task} - {request_id} - Answer(s) graded: {response}') + + if task in {1, 3}: + self._logger.info( + f'POST - speaking_task_{task} - {request_id} - Adding perfect answer(s) to response.') + + # TODO: check if it is answer["answer"] instead + for i, answer in enumerate(perfect_answers, start=1): + response['perfect_answer_' + str(i)] = answer + + self._logger.info( + f'POST - speaking_task_{task} - {request_id} - Adding transcript and fixed texts to response.' + ) + + for i, answer in enumerate(text_answers, start=1): + response['transcript_' + str(i)] = answer + response['fixed_text_' + str(i)] = await self._get_speaking_corrections(answer) + else: + response['transcript'] = answers[0]["answer"] + + self._logger.info(f'POST - speaking_task_{task} - {request_id} - Requesting fixed text.') + response['fixed_text'] = await self._get_speaking_corrections(answers[0]["answer"]) + self._logger.info(f'POST - speaking_task_{task} - {request_id} - Fixed text: {response["fixed_text"]}') + + response['perfect_answer'] = perfect_answers[0]["answer"] + + response["overall"] = self._fix_speaking_overall(response["overall"], response["task_response"]) + self._logger.info(f'POST - speaking_task_{task} - {request_id} - Final response: {response}') + return response + + # ================================================================================================================== + # grade_speaking_task helpers + # ================================================================================================================== + + async def _get_perfect_answer(self, task: int, question: str): + messages = [ + { + "role": "system", + "content": ( + 'You are a helpful assistant designed to output JSON on this format: {"answer": "perfect answer"}' + ) + }, + { + "role": "user", + "content": ( + 'Provide a perfect answer according to ielts grading system to the following ' + f'Speaking Part {task} question: "{question}"' + ) + } + ] + + if task == 1: + messages.append({ + "role": "user", + "content": 'The answer must be 2 or 3 sentences long.' + }) + + gpt_model = GPTModels.GPT_4_O if task == 1 else GPTModels.GPT_3_5_TURBO + + return await self._llm.prediction( + gpt_model, messages, ["answer"], TemperatureSettings.GRADING_TEMPERATURE + ) + + async def _grade_task(self, task: int, questions_and_answers: str) -> Dict: + messages = [ + { + "role": "system", + "content": ( + f'You are a helpful assistant designed to output JSON on this format: {self._grade_template()}' + ) + }, + { + "role": "user", + "content": ( + f'Evaluate the given Speaking Part {task} response based on the IELTS grading system, ensuring a ' + 'strict assessment that penalizes errors. Deduct points for deviations from the task, and ' + 'assign a score of 0 if the response fails to address the question. Additionally, provide ' + 'detailed commentary highlighting both strengths and weaknesses in the response.' + ) + questions_and_answers + } + ] + + task_specific = { + "1": ( + 'Address the student as "you". If the answers are not 2 or 3 sentences long, warn the ' + 'student that they should be.' + ), + "2": 'Address the student as "you"', + "3": 'Address the student as "you" and pay special attention to coherence between the answers.' + } + + messages.append({ + "role": "user", + "content": task_specific[str(task)] + }) + + if task in {1, 3}: + messages.extend([ + { + "role": "user", + "content": ( + 'For pronunciations act as if you heard the answers and they were transcripted ' + 'as you heard them.' + ) + }, + { + "role": "user", + "content": 'The comments must be long, detailed, justify the grading and suggest improvements.' + } + ]) + + return await self._llm.prediction( + GPTModels.GPT_4_O, messages, ["comment"], TemperatureSettings.GRADING_TEMPERATURE + ) + + @staticmethod + def _fix_speaking_overall(overall: float, task_response: dict): + grades = [category["grade"] for category in task_response.values()] + + if overall > max(grades) or overall < min(grades): + total_sum = sum(grades) + average = total_sum / len(grades) + rounded_average = round(average, 0) + return rounded_average + + return overall + + @staticmethod + def _zero_rating(comment: str): + return { + "comment": comment, + "overall": 0, + "task_response": { + "Fluency and Coherence": { + "grade": 0.0, + "comment": "" + }, + "Lexical Resource": { + "grade": 0.0, + "comment": "" + }, + "Grammatical Range and Accuracy": { + "grade": 0.0, + "comment": "" + }, + "Pronunciation": { + "grade": 0.0, + "comment": "" + } + } + } + + async def _get_speaking_corrections(self, text): + messages = [ + { + "role": "system", + "content": ( + 'You are a helpful assistant designed to output JSON on this format: ' + '{"fixed_text": "fixed transcription with no misspelling errors"}' + ) + }, + { + "role": "user", + "content": ( + 'Fix the errors in the provided transcription and put it in a JSON. ' + f'Do not complete the answer, only replace what is wrong. \n The text: "{text}"' + ) + } + ] + + response = await self._llm.prediction( + GPTModels.GPT_3_5_TURBO, + messages, + ["fixed_text"], + 0.2, + False + ) + return response["fixed_text"] + + async def create_videos_and_save_to_db(self, exercises, template, req_id): + template = await self._create_video_per_part(exercises, template, 1) + template = await self._create_video_per_part(exercises, template, 2) + template = await self._create_video_per_part(exercises, template, 3) + + await self._document_store.save_to_db_with_id("speaking", template, req_id) + self._logger.info(f'Saved speaking to DB with id {req_id} : {str(template)}') + + async def _create_video_per_part(self, exercises: List[Dict], template: Dict, part: int): + avatar = (random.choice(list(AvatarEnum))).value + template_index = part - 1 + + # Using list comprehension to find the element with the desired value in the 'type' field + found_exercises = [element for element in exercises if element.get('type') == part] + + # Check if any elements were found + if found_exercises: + exercise = found_exercises[0] + self._logger.info(f'Creating video for speaking part {part}') + if part in {1, 3}: + questions = [] + for question in exercise["questions"]: + result = await self._create_video( + question, + avatar, + f'Failed to create video for part {part} question: {str(exercise["question"])}' + ) + if result is not None: + video = { + "text": question, + "video_path": result["video_path"], + "video_url": result["video_url"] + } + questions.append(video) + + template["exercises"][template_index]["prompts"] = questions + if part == 1: + template["exercises"][template_index]["first_title"] = exercise["first_topic"] + template["exercises"][template_index]["second_title"] = exercise["second_topic"] + else: + template["exercises"][template_index]["title"] = exercise["topic"] + else: + result = await self._create_video( + exercise["question"], + avatar, + f'Failed to create video for part {part} question: {str(exercise["question"])}' + ) + if result is not None: + template["exercises"][template_index]["prompts"] = exercise["prompts"] + template["exercises"][template_index]["text"] = exercise["question"] + template["exercises"][template_index]["title"] = exercise["topic"] + template["exercises"][template_index]["video_url"] = result["video_url"] + template["exercises"][template_index]["video_path"] = result["video_path"] + + if not found_exercises: + template["exercises"].pop(template_index) + + return template + + async def generate_video( + self, part: int, avatar: str, topic: str, questions: list[str], + *, + second_topic: Optional[str] = None, + prompts: Optional[list[str]] = None, + suffix: Optional[str] = None, + ): + params = locals() + params.pop('self') + + request_id = str(uuid.uuid4()) + self._logger.info( + f'POST - generate_video_{part} - Received request to generate video {part}. ' + f'Use this id to track the logs: {request_id} - Request data: " + {params}' + ) + + part_questions = self._get_part_questions(part, questions, avatar) + videos = [] + + self._logger.info(f'POST - generate_video_{part} - {request_id} - Creating videos for speaking part {part}.') + for question in part_questions: + self._logger.info(f'POST - generate_video_{part} - {request_id} - Creating video for question: {question}') + result = await self._create_video( + question, + avatar, + 'POST - generate_video_{p} - {r} - Failed to create video for part {p} question: {q}'.format( + p=part, r=request_id, q=question + ) + ) + if result is not None: + self._logger.info(f'POST - generate_video_{part} - {request_id} - Video created') + self._logger.info( + f'POST - generate_video_{part} - {request_id} - Uploaded video to firebase: {result["video_url"]}' + ) + video = { + "text": question, + "video_path": result["video_path"], + "video_url": result["video_url"] + } + videos.append(video) + + if part == 2 and len(videos) == 0: + raise Exception(f'Failed to create video for part 2 question: {questions[0]}') + + return self._get_part_response(part, topic, videos, second_topic, prompts, suffix) + + @staticmethod + def _get_part_questions(part: int, questions: list[str], avatar: str): + part_questions: list[str] = [] + + if part == 1: + id_to_name = { + "5912afa7c77c47d3883af3d874047aaf": "MATTHEW", + "9e58d96a383e4568a7f1e49df549e0e4": "VERA", + "d2cdd9c0379a4d06ae2afb6e5039bd0c": "EDWARD", + "045cb5dcd00042b3a1e4f3bc1c12176b": "TANYA", + "1ae1e5396cc444bfad332155fdb7a934": "KAYLA", + "0ee6aa7cc1084063a630ae514fccaa31": "JEROME", + "5772cff935844516ad7eeff21f839e43": "TYLER", + + } + part_questions.extend( + [ + "Hello my name is " + id_to_name.get(avatar) + ", what is yours?", + "Do you work or do you study?", + *questions + ] + ) + elif part == 2: + # Removed as the examiner should not say what is on the card. + # question = question + " In your answer you should consider: " + " ".join(prompts) + suffix + part_questions.append(f'{questions[0]}\nYou have 1 minute to take notes.') + elif part == 3: + part_questions = questions + + return part_questions + + @staticmethod + def _get_part_response( + part: int, + topic: str, + videos: list[dict], + second_topic: Optional[str], + prompts: Optional[list[str]], + suffix: Optional[str] + ): + response = {} + if part == 1: + response = { + "prompts": videos, + "first_title": topic, + "second_title": second_topic, + "type": "interactiveSpeaking" + } + if part == 2: + response = { + "prompts": prompts, + "title": topic, + "suffix": suffix, + "type": "speaking", + # includes text, video_url and video_path + **videos[0] + } + if part == 3: + response = { + "prompts": videos, + "title": topic, + "type": "interactiveSpeaking", + } + + response["id"] = str(uuid.uuid4()) + return response + + async def _create_video(self, question: str, avatar: str, error_message: str): + result = await self._vid_gen.create_video(question, avatar) + if result is not None: + sound_file_path = FilePaths.VIDEO_FILES_PATH + result + firebase_file_path = FilePaths.FIREBASE_SPEAKING_VIDEO_FILES_PATH + result + url = await self._file_storage.upload_file_firebase_get_url(firebase_file_path, sound_file_path) + return { + "video_path": firebase_file_path, + "video_url": url + } + self._logger.error(error_message) + return None + + @staticmethod + def _grade_template(): + return { + "comment": "extensive comment about answer quality", + "overall": 0.0, + "task_response": { + "Fluency and Coherence": { + "grade": 0.0, + "comment": ( + "extensive comment about fluency and coherence, use examples to justify the grade awarded." + ) + }, + "Lexical Resource": { + "grade": 0.0, + "comment": "extensive comment about lexical resource, use examples to justify the grade awarded." + }, + "Grammatical Range and Accuracy": { + "grade": 0.0, + "comment": ( + "extensive comment about grammatical range and accuracy, use examples to justify the " + "grade awarded." + ) + }, + "Pronunciation": { + "grade": 0.0, + "comment": ( + "extensive comment about pronunciation on the transcribed answer, use examples to justify the " + "grade awarded." + ) + } + } } \ No newline at end of file diff --git a/app/services/impl/writing.py b/app/services/impl/exam/writing.py similarity index 97% rename from app/services/impl/writing.py rename to app/services/impl/exam/writing.py index 9bf19ff..ca7e10a 100644 --- a/app/services/impl/writing.py +++ b/app/services/impl/exam/writing.py @@ -1,248 +1,248 @@ -from typing import List, Dict - -from app.services.abc import IWritingService, ILLMService, IAIDetectorService -from app.configs.constants import GPTModels, TemperatureSettings, FieldsAndExercises -from app.helpers import TextHelper, ExercisesHelper - - -class WritingService(IWritingService): - - def __init__(self, llm: ILLMService, ai_detector: IAIDetectorService): - self._llm = llm - self._ai_detector = ai_detector - - async def get_writing_task_general_question(self, task: int, topic: str, difficulty: str): - messages = [ - { - "role": "system", - "content": ( - 'You are a helpful assistant designed to output JSON on this format: {"prompt": "prompt content"}' - ) - }, - *self._get_writing_messages(task, topic, difficulty) - ] - - llm_model = GPTModels.GPT_3_5_TURBO if task == 1 else GPTModels.GPT_4_O - - response = await self._llm.prediction( - llm_model, - messages, - ["prompt"], - TemperatureSettings.GEN_QUESTION_TEMPERATURE - ) - - question = response["prompt"].strip() - - return { - "question": self._add_newline_before_hyphen(question) if task == 1 else question, - "difficulty": difficulty, - "topic": topic - } - - @staticmethod - def _get_writing_messages(task: int, topic: str, difficulty: str) -> List[Dict]: - # TODO: Should the muslim disclaimer be added to task 2? - task_prompt = ( - 'Craft a prompt for an IELTS Writing Task 1 General Training exercise that instructs the ' - 'student to compose a letter. The prompt should present a specific scenario or situation, ' - f'based on the topic of "{topic}", requiring the student to provide information, ' - 'advice, or instructions within the letter. Make sure that the generated prompt is ' - f'of {difficulty} difficulty and does not contain forbidden subjects in muslim countries.' - ) if task == 1 else ( - f'Craft a comprehensive question of {difficulty} difficulty like the ones for IELTS ' - 'Writing Task 2 General Training that directs the candidate to delve into an in-depth ' - f'analysis of contrasting perspectives on the topic of "{topic}".' - ) - - task_instructions = ( - 'The prompt should end with "In the letter you should" followed by 3 bullet points of what ' - 'the answer should include.' - ) if task == 1 else ( - 'The question should lead to an answer with either "theories", "complicated information" or ' - 'be "very descriptive" on the topic.' - ) - - messages = [ - { - "role": "user", - "content": task_prompt - }, - { - "role": "user", - "content": task_instructions - } - ] - - return messages - - async def grade_writing_task(self, task: int, question: str, answer: str): - bare_minimum = 100 if task == 1 else 180 - - if not TextHelper.has_words(answer): - return self._zero_rating("The answer does not contain enough english words.") - elif not TextHelper.has_x_words(answer, bare_minimum): - return self._zero_rating("The answer is insufficient and too small to be graded.") - else: - template = self._get_writing_template() - messages = [ - { - "role": "system", - "content": ( - f'You are a helpful assistant designed to output JSON on this format: {template}' - ) - }, - { - "role": "user", - "content": ( - f'Evaluate the given Writing Task {task} response based on the IELTS grading system, ' - 'ensuring a strict assessment that penalizes errors. Deduct points for deviations ' - 'from the task, and assign a score of 0 if the response fails to address the question. ' - 'Additionally, provide a detailed commentary highlighting both strengths and ' - 'weaknesses in the response. ' - f'\n Question: "{question}" \n Answer: "{answer}"') - } - ] - - if task == 1: - messages.append({ - "role": "user", - "content": ( - 'Refer to the parts of the letter as: "Greeting Opener", "bullet 1", "bullet 2", ' - '"bullet 3", "closer (restate the purpose of the letter)", "closing greeting"' - ) - }) - - llm_model = GPTModels.GPT_3_5_TURBO if task == 1 else GPTModels.GPT_4_O - temperature = ( - TemperatureSettings.GRADING_TEMPERATURE - if task == 1 else - TemperatureSettings.GEN_QUESTION_TEMPERATURE - ) - - response = await self._llm.prediction( - llm_model, - messages, - ["comment"], - temperature - ) - - perfect_answer_minimum = 150 if task == 1 else 250 - perfect_answer = await self._get_perfect_answer(question, perfect_answer_minimum) - - response["perfect_answer"] = perfect_answer["perfect_answer"] - response["overall"] = ExercisesHelper.fix_writing_overall(response["overall"], response["task_response"]) - response['fixed_text'] = await self._get_fixed_text(answer) - - ai_detection = await self._ai_detector.run_detection(answer) - if ai_detection is not None: - response['ai_detection'] = ai_detection - - return response - - async def _get_fixed_text(self, text): - messages = [ - { - "role": "system", - "content": ( - 'You are a helpful assistant designed to output JSON on this format: ' - '{"fixed_text": "fixed test with no misspelling errors"}' - ) - }, - { - "role": "user", - "content": ( - 'Fix the errors in the given text and put it in a JSON. ' - f'Do not complete the answer, only replace what is wrong. \n The text: "{text}"' - ) - } - ] - - response = await self._llm.prediction( - GPTModels.GPT_3_5_TURBO, - messages, - ["fixed_text"], - 0.2, - False - ) - return response["fixed_text"] - - async def _get_perfect_answer(self, question: str, size: int) -> Dict: - messages = [ - { - "role": "system", - "content": ( - 'You are a helpful assistant designed to output JSON on this format: ' - '{"perfect_answer": "perfect answer for the question"}' - ) - }, - { - "role": "user", - "content": f'Write a perfect answer for this writing exercise of a IELTS exam. Question: {question}' - - }, - { - "role": "user", - "content": f'The answer must have at least {size} words' - } - ] - return await self._llm.prediction( - GPTModels.GPT_4_O, - messages, - ["perfect_answer"], - TemperatureSettings.GEN_QUESTION_TEMPERATURE - ) - - @staticmethod - def _zero_rating(comment: str): - return { - 'comment': comment, - 'overall': 0, - 'task_response': { - 'Task Achievement': { - "grade": 0.0, - "comment": "" - }, - 'Coherence and Cohesion': { - "grade": 0.0, - "comment": "" - }, - 'Lexical Resource': { - "grade": 0.0, - "comment": "" - }, - 'Grammatical Range and Accuracy': { - "grade": 0.0, - "comment": "" - } - } - } - - @staticmethod - def _get_writing_template(): - return { - "comment": "comment about student's response quality", - "overall": 0.0, - "task_response": { - "Task Achievement": { - "grade": 0.0, - "comment": "comment about Task Achievement of the student's response" - }, - "Coherence and Cohesion": { - "grade": 0.0, - "comment": "comment about Coherence and Cohesion of the student's response" - }, - "Lexical Resource": { - "grade": 0.0, - "comment": "comment about Lexical Resource of the student's response" - }, - "Grammatical Range and Accuracy": { - "grade": 0.0, - "comment": "comment about Grammatical Range and Accuracy of the student's response" - } - } - } - - @staticmethod - def _add_newline_before_hyphen(s): - return s.replace(" -", "\n-") - +from typing import List, Dict + +from app.services.abc import IWritingService, ILLMService, IAIDetectorService +from app.configs.constants import GPTModels, TemperatureSettings, FieldsAndExercises +from app.helpers import TextHelper, ExercisesHelper + + +class WritingService(IWritingService): + + def __init__(self, llm: ILLMService, ai_detector: IAIDetectorService): + self._llm = llm + self._ai_detector = ai_detector + + async def get_writing_task_general_question(self, task: int, topic: str, difficulty: str): + messages = [ + { + "role": "system", + "content": ( + 'You are a helpful assistant designed to output JSON on this format: {"prompt": "prompt content"}' + ) + }, + *self._get_writing_messages(task, topic, difficulty) + ] + + llm_model = GPTModels.GPT_3_5_TURBO if task == 1 else GPTModels.GPT_4_O + + response = await self._llm.prediction( + llm_model, + messages, + ["prompt"], + TemperatureSettings.GEN_QUESTION_TEMPERATURE + ) + + question = response["prompt"].strip() + + return { + "question": self._add_newline_before_hyphen(question) if task == 1 else question, + "difficulty": difficulty, + "topic": topic + } + + @staticmethod + def _get_writing_messages(task: int, topic: str, difficulty: str) -> List[Dict]: + # TODO: Should the muslim disclaimer be added to task 2? + task_prompt = ( + 'Craft a prompt for an IELTS Writing Task 1 General Training exercise that instructs the ' + 'student to compose a letter. The prompt should present a specific scenario or situation, ' + f'based on the topic of "{topic}", requiring the student to provide information, ' + 'advice, or instructions within the letter. Make sure that the generated prompt is ' + f'of {difficulty} difficulty and does not contain forbidden subjects in muslim countries.' + ) if task == 1 else ( + f'Craft a comprehensive question of {difficulty} difficulty like the ones for IELTS ' + 'Writing Task 2 General Training that directs the candidate to delve into an in-depth ' + f'analysis of contrasting perspectives on the topic of "{topic}".' + ) + + task_instructions = ( + 'The prompt should end with "In the letter you should" followed by 3 bullet points of what ' + 'the answer should include.' + ) if task == 1 else ( + 'The question should lead to an answer with either "theories", "complicated information" or ' + 'be "very descriptive" on the topic.' + ) + + messages = [ + { + "role": "user", + "content": task_prompt + }, + { + "role": "user", + "content": task_instructions + } + ] + + return messages + + async def grade_writing_task(self, task: int, question: str, answer: str): + bare_minimum = 100 if task == 1 else 180 + + if not TextHelper.has_words(answer): + return self._zero_rating("The answer does not contain enough english words.") + elif not TextHelper.has_x_words(answer, bare_minimum): + return self._zero_rating("The answer is insufficient and too small to be graded.") + else: + template = self._get_writing_template() + messages = [ + { + "role": "system", + "content": ( + f'You are a helpful assistant designed to output JSON on this format: {template}' + ) + }, + { + "role": "user", + "content": ( + f'Evaluate the given Writing Task {task} response based on the IELTS grading system, ' + 'ensuring a strict assessment that penalizes errors. Deduct points for deviations ' + 'from the task, and assign a score of 0 if the response fails to address the question. ' + 'Additionally, provide a detailed commentary highlighting both strengths and ' + 'weaknesses in the response. ' + f'\n Question: "{question}" \n Answer: "{answer}"') + } + ] + + if task == 1: + messages.append({ + "role": "user", + "content": ( + 'Refer to the parts of the letter as: "Greeting Opener", "bullet 1", "bullet 2", ' + '"bullet 3", "closer (restate the purpose of the letter)", "closing greeting"' + ) + }) + + llm_model = GPTModels.GPT_3_5_TURBO if task == 1 else GPTModels.GPT_4_O + temperature = ( + TemperatureSettings.GRADING_TEMPERATURE + if task == 1 else + TemperatureSettings.GEN_QUESTION_TEMPERATURE + ) + + response = await self._llm.prediction( + llm_model, + messages, + ["comment"], + temperature + ) + + perfect_answer_minimum = 150 if task == 1 else 250 + perfect_answer = await self._get_perfect_answer(question, perfect_answer_minimum) + + response["perfect_answer"] = perfect_answer["perfect_answer"] + response["overall"] = ExercisesHelper.fix_writing_overall(response["overall"], response["task_response"]) + response['fixed_text'] = await self._get_fixed_text(answer) + + ai_detection = await self._ai_detector.run_detection(answer) + if ai_detection is not None: + response['ai_detection'] = ai_detection + + return response + + async def _get_fixed_text(self, text): + messages = [ + { + "role": "system", + "content": ( + 'You are a helpful assistant designed to output JSON on this format: ' + '{"fixed_text": "fixed test with no misspelling errors"}' + ) + }, + { + "role": "user", + "content": ( + 'Fix the errors in the given text and put it in a JSON. ' + f'Do not complete the answer, only replace what is wrong. \n The text: "{text}"' + ) + } + ] + + response = await self._llm.prediction( + GPTModels.GPT_3_5_TURBO, + messages, + ["fixed_text"], + 0.2, + False + ) + return response["fixed_text"] + + async def _get_perfect_answer(self, question: str, size: int) -> Dict: + messages = [ + { + "role": "system", + "content": ( + 'You are a helpful assistant designed to output JSON on this format: ' + '{"perfect_answer": "perfect answer for the question"}' + ) + }, + { + "role": "user", + "content": f'Write a perfect answer for this writing exercise of a IELTS exam. Question: {question}' + + }, + { + "role": "user", + "content": f'The answer must have at least {size} words' + } + ] + return await self._llm.prediction( + GPTModels.GPT_4_O, + messages, + ["perfect_answer"], + TemperatureSettings.GEN_QUESTION_TEMPERATURE + ) + + @staticmethod + def _zero_rating(comment: str): + return { + 'comment': comment, + 'overall': 0, + 'task_response': { + 'Task Achievement': { + "grade": 0.0, + "comment": "" + }, + 'Coherence and Cohesion': { + "grade": 0.0, + "comment": "" + }, + 'Lexical Resource': { + "grade": 0.0, + "comment": "" + }, + 'Grammatical Range and Accuracy': { + "grade": 0.0, + "comment": "" + } + } + } + + @staticmethod + def _get_writing_template(): + return { + "comment": "comment about student's response quality", + "overall": 0.0, + "task_response": { + "Task Achievement": { + "grade": 0.0, + "comment": "comment about Task Achievement of the student's response" + }, + "Coherence and Cohesion": { + "grade": 0.0, + "comment": "comment about Coherence and Cohesion of the student's response" + }, + "Lexical Resource": { + "grade": 0.0, + "comment": "comment about Lexical Resource of the student's response" + }, + "Grammatical Range and Accuracy": { + "grade": 0.0, + "comment": "comment about Grammatical Range and Accuracy of the student's response" + } + } + } + + @staticmethod + def _add_newline_before_hyphen(s): + return s.replace(" -", "\n-") + diff --git a/app/services/impl/third_parties/__init__.py b/app/services/impl/third_parties/__init__.py index de0420b..d8675cb 100644 --- a/app/services/impl/third_parties/__init__.py +++ b/app/services/impl/third_parties/__init__.py @@ -1,13 +1,13 @@ -from .aws_polly import AWSPolly -from .heygen import Heygen -from .openai import OpenAI -from .whisper import OpenAIWhisper -from .gpt_zero import GPTZero - -__all__ = [ - "AWSPolly", - "Heygen", - "OpenAI", - "OpenAIWhisper", - "GPTZero" -] +from .aws_polly import AWSPolly +from .heygen import Heygen +from .openai import OpenAI +from .whisper import OpenAIWhisper +from .gpt_zero import GPTZero + +__all__ = [ + "AWSPolly", + "Heygen", + "OpenAI", + "OpenAIWhisper", + "GPTZero" +] diff --git a/app/services/impl/third_parties/aws_polly.py b/app/services/impl/third_parties/aws_polly.py index 23ad1eb..559e8d1 100644 --- a/app/services/impl/third_parties/aws_polly.py +++ b/app/services/impl/third_parties/aws_polly.py @@ -1,87 +1,87 @@ -import random -from typing import Union - -import aiofiles -from aiobotocore.client import BaseClient - -from app.services.abc import ITextToSpeechService -from app.configs.constants import NeuralVoices - - -class AWSPolly(ITextToSpeechService): - - def __init__(self, client: BaseClient): - self._client = client - - async def synthesize_speech(self, text: str, voice: str, engine: str = "neural", output_format: str = "mp3"): - tts_response = await self._client.synthesize_speech( - Engine=engine, - Text=text, - OutputFormat=output_format, - VoiceId=voice - ) - return await tts_response['AudioStream'].read() - - async def text_to_speech(self, text: Union[list[str], str], file_name: str): - if isinstance(text, str): - audio_segments = await self._text_to_speech(text) - elif isinstance(text, list): - audio_segments = await self._conversation_to_speech(text) - else: - raise ValueError("Unsupported argument for text_to_speech") - - final_message = await self.synthesize_speech( - "This audio recording, for the listening exercise, has finished.", - "Stephen" - ) - - # Add finish message - audio_segments.append(final_message) - - # Combine the audio segments into a single audio file - combined_audio = b"".join(audio_segments) - # Save the combined audio to a single file - async with aiofiles.open(file_name, "wb") as f: - await f.write(combined_audio) - - print("Speech segments saved to " + file_name) - - async def _text_to_speech(self, text: str): - voice = random.choice(NeuralVoices.ALL_NEURAL_VOICES)['Id'] - # Initialize an empty list to store audio segments - audio_segments = [] - for part in self._divide_text(text): - audio_segments.append(await self.synthesize_speech(part, voice)) - - return audio_segments - - async def _conversation_to_speech(self, conversation: list): - # Initialize an empty list to store audio segments - audio_segments = [] - # Iterate through the text segments, convert to audio segments, and store them - for segment in conversation: - audio_segments.append(await self.synthesize_speech(segment["text"], segment["voice"])) - - return audio_segments - - @staticmethod - def _divide_text(text, max_length=3000): - if len(text) <= max_length: - return [text] - - divisions = [] - current_position = 0 - - while current_position < len(text): - next_position = min(current_position + max_length, len(text)) - next_period_position = text.rfind('.', current_position, next_position) - - if next_period_position != -1 and next_period_position > current_position: - divisions.append(text[current_position:next_period_position + 1]) - current_position = next_period_position + 1 - else: - # If no '.' found in the next chunk, split at max_length - divisions.append(text[current_position:next_position]) - current_position = next_position - - return divisions +import random +from typing import Union + +import aiofiles +from aiobotocore.client import BaseClient + +from app.services.abc import ITextToSpeechService +from app.configs.constants import NeuralVoices + + +class AWSPolly(ITextToSpeechService): + + def __init__(self, client: BaseClient): + self._client = client + + async def synthesize_speech(self, text: str, voice: str, engine: str = "neural", output_format: str = "mp3"): + tts_response = await self._client.synthesize_speech( + Engine=engine, + Text=text, + OutputFormat=output_format, + VoiceId=voice + ) + return await tts_response['AudioStream'].read() + + async def text_to_speech(self, text: Union[list[str], str], file_name: str): + if isinstance(text, str): + audio_segments = await self._text_to_speech(text) + elif isinstance(text, list): + audio_segments = await self._conversation_to_speech(text) + else: + raise ValueError("Unsupported argument for text_to_speech") + + final_message = await self.synthesize_speech( + "This audio recording, for the listening exercise, has finished.", + "Stephen" + ) + + # Add finish message + audio_segments.append(final_message) + + # Combine the audio segments into a single audio file + combined_audio = b"".join(audio_segments) + # Save the combined audio to a single file + async with aiofiles.open(file_name, "wb") as f: + await f.write(combined_audio) + + print("Speech segments saved to " + file_name) + + async def _text_to_speech(self, text: str): + voice = random.choice(NeuralVoices.ALL_NEURAL_VOICES)['Id'] + # Initialize an empty list to store audio segments + audio_segments = [] + for part in self._divide_text(text): + audio_segments.append(await self.synthesize_speech(part, voice)) + + return audio_segments + + async def _conversation_to_speech(self, conversation: list): + # Initialize an empty list to store audio segments + audio_segments = [] + # Iterate through the text segments, convert to audio segments, and store them + for segment in conversation: + audio_segments.append(await self.synthesize_speech(segment["text"], segment["voice"])) + + return audio_segments + + @staticmethod + def _divide_text(text, max_length=3000): + if len(text) <= max_length: + return [text] + + divisions = [] + current_position = 0 + + while current_position < len(text): + next_position = min(current_position + max_length, len(text)) + next_period_position = text.rfind('.', current_position, next_position) + + if next_period_position != -1 and next_period_position > current_position: + divisions.append(text[current_position:next_period_position + 1]) + current_position = next_period_position + 1 + else: + # If no '.' found in the next chunk, split at max_length + divisions.append(text[current_position:next_position]) + current_position = next_position + + return divisions diff --git a/app/services/impl/third_parties/gpt_zero.py b/app/services/impl/third_parties/gpt_zero.py index bee10db..051bbcf 100644 --- a/app/services/impl/third_parties/gpt_zero.py +++ b/app/services/impl/third_parties/gpt_zero.py @@ -1,52 +1,52 @@ -from logging import getLogger -from typing import Dict, Optional - -from httpx import AsyncClient - -from app.services.abc.third_parties.ai_detector import IAIDetectorService - - -class GPTZero(IAIDetectorService): - - _GPT_ZERO_ENDPOINT = 'https://api.gptzero.me/v2/predict/text' - - def __init__(self, client: AsyncClient, gpt_zero_key: str): - self._header = { - 'x-api-key': gpt_zero_key - } - self._http_client = client - self._logger = getLogger(__name__) - - async def run_detection(self, text: str): - data = { - 'document': text, - 'version': '', - 'multilingual': False - } - - response = await self._http_client.post(self._GPT_ZERO_ENDPOINT, headers=self._header, json=data) - if response.status_code != 200: - return None - return self._parse_detection(response.json()) - - def _parse_detection(self, response: Dict) -> Optional[Dict]: - try: - text_scan = response["documents"][0] - - filtered_sentences = [ - { - "sentence": item["sentence"], - "highlight_sentence_for_ai": item["highlight_sentence_for_ai"] - } - for item in text_scan["sentences"] - ] - - return { - "class_probabilities": text_scan["class_probabilities"], - "confidence_category": text_scan["confidence_category"], - "predicted_class": text_scan["predicted_class"], - "sentences": filtered_sentences - } - except Exception as e: - self._logger.error(f'Failed to parse GPT\'s Zero response: {str(e)}') - return None +from logging import getLogger +from typing import Dict, Optional + +from httpx import AsyncClient + +from app.services.abc.third_parties.ai_detector import IAIDetectorService + + +class GPTZero(IAIDetectorService): + + _GPT_ZERO_ENDPOINT = 'https://api.gptzero.me/v2/predict/text' + + def __init__(self, client: AsyncClient, gpt_zero_key: str): + self._header = { + 'x-api-key': gpt_zero_key + } + self._http_client = client + self._logger = getLogger(__name__) + + async def run_detection(self, text: str): + data = { + 'document': text, + 'version': '', + 'multilingual': False + } + + response = await self._http_client.post(self._GPT_ZERO_ENDPOINT, headers=self._header, json=data) + if response.status_code != 200: + return None + return self._parse_detection(response.json()) + + def _parse_detection(self, response: Dict) -> Optional[Dict]: + try: + text_scan = response["documents"][0] + + filtered_sentences = [ + { + "sentence": item["sentence"], + "highlight_sentence_for_ai": item["highlight_sentence_for_ai"] + } + for item in text_scan["sentences"] + ] + + return { + "class_probabilities": text_scan["class_probabilities"], + "confidence_category": text_scan["confidence_category"], + "predicted_class": text_scan["predicted_class"], + "sentences": filtered_sentences + } + except Exception as e: + self._logger.error(f'Failed to parse GPT\'s Zero response: {str(e)}') + return None diff --git a/app/services/impl/third_parties/heygen.py b/app/services/impl/third_parties/heygen.py index 31361bf..6427673 100644 --- a/app/services/impl/third_parties/heygen.py +++ b/app/services/impl/third_parties/heygen.py @@ -1,90 +1,90 @@ -import asyncio -import os -import logging -import aiofiles - -from httpx import AsyncClient - -from app.services.abc import IVideoGeneratorService - - -class Heygen(IVideoGeneratorService): - - # TODO: Not used, remove if not necessary - # CREATE_VIDEO_URL = 'https://api.heygen.com/v1/template.generate' - - _GET_VIDEO_URL = 'https://api.heygen.com/v1/video_status.get' - - def __init__(self, client: AsyncClient, heygen_token: str): - self._get_header = { - 'X-Api-Key': heygen_token - } - self._post_header = { - 'X-Api-Key': heygen_token, - 'Content-Type': 'application/json' - } - self._http_client = client - self._logger = logging.getLogger(__name__) - - async def create_video(self, text: str, avatar: str): - # POST TO CREATE VIDEO - create_video_url = 'https://api.heygen.com/v2/template/' + avatar + '/generate' - data = { - "test": False, - "caption": False, - "title": "video_title", - "variables": { - "script_here": { - "name": "script_here", - "type": "text", - "properties": { - "content": text - } - } - } - } - response = await self._http_client.post(create_video_url, headers=self._post_header, json=data) - self._logger.info(response.status_code) - self._logger.info(response.json()) - - # GET TO CHECK STATUS AND GET VIDEO WHEN READY - video_id = response.json()["data"]["video_id"] - params = { - 'video_id': response.json()["data"]["video_id"] - } - response = {} - status = "processing" - error = None - - while status != "completed" and error is None: - response = await self._http_client.get(self._GET_VIDEO_URL, headers=self._get_header, params=params) - response_data = response.json() - - status = response_data["data"]["status"] - error = response_data["data"]["error"] - - if status != "completed" and error is None: - self._logger.info(f"Status: {status}") - await asyncio.sleep(10) # Wait for 10 second before the next request - - self._logger.info(response.status_code) - self._logger.info(response.json()) - - # DOWNLOAD VIDEO - download_url = response.json()['data']['video_url'] - output_directory = 'download-video/' - output_filename = video_id + '.mp4' - - response = await self._http_client.get(download_url) - - if response.status_code == 200: - os.makedirs(output_directory, exist_ok=True) # Create the directory if it doesn't exist - output_path = os.path.join(output_directory, output_filename) - async with aiofiles.open(output_path, 'wb') as f: - await f.write(response.content) - self._logger.info(f"File '{output_filename}' downloaded successfully.") - return output_filename - else: - self._logger.error(f"Failed to download file. Status code: {response.status_code}") - return None - +import asyncio +import os +import logging +import aiofiles + +from httpx import AsyncClient + +from app.services.abc import IVideoGeneratorService + + +class Heygen(IVideoGeneratorService): + + # TODO: Not used, remove if not necessary + # CREATE_VIDEO_URL = 'https://api.heygen.com/v1/template.generate' + + _GET_VIDEO_URL = 'https://api.heygen.com/v1/video_status.get' + + def __init__(self, client: AsyncClient, heygen_token: str): + self._get_header = { + 'X-Api-Key': heygen_token + } + self._post_header = { + 'X-Api-Key': heygen_token, + 'Content-Type': 'application/json' + } + self._http_client = client + self._logger = logging.getLogger(__name__) + + async def create_video(self, text: str, avatar: str): + # POST TO CREATE VIDEO + create_video_url = 'https://api.heygen.com/v2/template/' + avatar + '/generate' + data = { + "test": False, + "caption": False, + "title": "video_title", + "variables": { + "script_here": { + "name": "script_here", + "type": "text", + "properties": { + "content": text + } + } + } + } + response = await self._http_client.post(create_video_url, headers=self._post_header, json=data) + self._logger.info(response.status_code) + self._logger.info(response.json()) + + # GET TO CHECK STATUS AND GET VIDEO WHEN READY + video_id = response.json()["data"]["video_id"] + params = { + 'video_id': response.json()["data"]["video_id"] + } + response = {} + status = "processing" + error = None + + while status != "completed" and error is None: + response = await self._http_client.get(self._GET_VIDEO_URL, headers=self._get_header, params=params) + response_data = response.json() + + status = response_data["data"]["status"] + error = response_data["data"]["error"] + + if status != "completed" and error is None: + self._logger.info(f"Status: {status}") + await asyncio.sleep(10) # Wait for 10 second before the next request + + self._logger.info(response.status_code) + self._logger.info(response.json()) + + # DOWNLOAD VIDEO + download_url = response.json()['data']['video_url'] + output_directory = 'download-video/' + output_filename = video_id + '.mp4' + + response = await self._http_client.get(download_url) + + if response.status_code == 200: + os.makedirs(output_directory, exist_ok=True) # Create the directory if it doesn't exist + output_path = os.path.join(output_directory, output_filename) + async with aiofiles.open(output_path, 'wb') as f: + await f.write(response.content) + self._logger.info(f"File '{output_filename}' downloaded successfully.") + return output_filename + else: + self._logger.error(f"Failed to download file. Status code: {response.status_code}") + return None + diff --git a/app/services/impl/third_parties/openai.py b/app/services/impl/third_parties/openai.py index e049d93..4b9d246 100644 --- a/app/services/impl/third_parties/openai.py +++ b/app/services/impl/third_parties/openai.py @@ -1,150 +1,150 @@ -import json -import re -import logging -from typing import List, Optional, Callable, TypeVar -from openai import AsyncOpenAI -from openai.types.chat import ChatCompletionMessageParam - -from app.services.abc import ILLMService -from app.helpers import count_tokens -from app.configs.constants import BLACKLISTED_WORDS -from pydantic import BaseModel - -T = TypeVar('T', bound=BaseModel) - - -class OpenAI(ILLMService): - - MAX_TOKENS = 4097 - TRY_LIMIT = 2 - - def __init__(self, client: AsyncOpenAI): - self._client = client - self._logger = logging.getLogger(__name__) - self._default_model = "gpt-4o-2024-08-06" - - async def prediction( - self, - model: str, - messages: List[ChatCompletionMessageParam], - fields_to_check: Optional[List[str]], - temperature: float, - check_blacklisted: bool = True, - token_count: int = -1 - ): - if token_count == -1: - token_count = self._count_total_tokens(messages) - return await self._prediction(model, messages, token_count, fields_to_check, temperature, 0, check_blacklisted) - - async def _prediction( - self, - model: str, - messages: List[ChatCompletionMessageParam], - token_count: int, - fields_to_check: Optional[List[str]], - temperature: float, - try_count: int, - check_blacklisted: bool, - ): - result = await self._client.chat.completions.create( - model=model, - max_tokens=int(self.MAX_TOKENS - token_count - 300), - temperature=float(temperature), - messages=messages, - response_format={"type": "json_object"} - ) - result = result.choices[0].message.content - - if check_blacklisted: - found_blacklisted_word = self._get_found_blacklisted_words(result) - - if found_blacklisted_word is not None and try_count < self.TRY_LIMIT: - self._logger.warning("Result contains blacklisted words: " + str(found_blacklisted_word)) - return await self._prediction( - model, messages, token_count, fields_to_check, temperature, (try_count + 1), check_blacklisted - ) - elif found_blacklisted_word is not None and try_count >= self.TRY_LIMIT: - return "" - - if fields_to_check is None: - return json.loads(result) - - if not self._check_fields(result, fields_to_check) and try_count < self.TRY_LIMIT: - return await self._prediction( - model, messages, token_count, fields_to_check, temperature, (try_count + 1), check_blacklisted - ) - - return json.loads(result) - - async def prediction_override(self, **kwargs): - return await self._client.chat.completions.create( - **kwargs - ) - - @staticmethod - def _get_found_blacklisted_words(text: str): - text_lower = text.lower() - for word in BLACKLISTED_WORDS: - if re.search(r'\b' + re.escape(word) + r'\b', text_lower): - return word - return None - - @staticmethod - def _count_total_tokens(messages): - total_tokens = 0 - for message in messages: - total_tokens += count_tokens(message["content"])["n_tokens"] - return total_tokens - - @staticmethod - def _check_fields(obj, fields): - return all(field in obj for field in fields) - - async def pydantic_prediction( - self, - messages: List[ChatCompletionMessageParam], - map_to_model: Callable, - json_scheme: str, - *, - model: Optional[str] = None, - temperature: Optional[float] = None, - max_retries: int = 3 - ) -> List[T] | T | None: - params = { - "messages": messages, - "response_format": {"type": "json_object"}, - "model": model if model else self._default_model - } - - if temperature: - params["temperature"] = temperature - - attempt = 0 - while attempt < max_retries: - result = await self._client.chat.completions.create(**params) - result_content = result.choices[0].message.content - try: - result_json = json.loads(result_content) - return map_to_model(result_json) - except Exception as e: - attempt += 1 - self._logger.info(f"GPT returned malformed response: {result_content}\n {str(e)}") - params["messages"] = [ - { - "role": "user", - "content": ( - "Your previous response wasn't in the json format I've explicitly told you to output. " - f"In your next response, you will fix it and return me just the json I've asked." - ) - }, - { - "role": "user", - "content": ( - f"Previous response: {result_content}\n" - f"JSON format: {json_scheme}" - ) - } - ] - if attempt >= max_retries: - self._logger.error(f"Max retries exceeded!") - return None +import json +import re +import logging +from typing import List, Optional, Callable, TypeVar +from openai import AsyncOpenAI +from openai.types.chat import ChatCompletionMessageParam + +from app.services.abc import ILLMService +from app.helpers import count_tokens +from app.configs.constants import BLACKLISTED_WORDS +from pydantic import BaseModel + +T = TypeVar('T', bound=BaseModel) + + +class OpenAI(ILLMService): + + MAX_TOKENS = 4097 + TRY_LIMIT = 2 + + def __init__(self, client: AsyncOpenAI): + self._client = client + self._logger = logging.getLogger(__name__) + self._default_model = "gpt-4o-2024-08-06" + + async def prediction( + self, + model: str, + messages: List[ChatCompletionMessageParam], + fields_to_check: Optional[List[str]], + temperature: float, + check_blacklisted: bool = True, + token_count: int = -1 + ): + if token_count == -1: + token_count = self._count_total_tokens(messages) + return await self._prediction(model, messages, token_count, fields_to_check, temperature, 0, check_blacklisted) + + async def _prediction( + self, + model: str, + messages: List[ChatCompletionMessageParam], + token_count: int, + fields_to_check: Optional[List[str]], + temperature: float, + try_count: int, + check_blacklisted: bool, + ): + result = await self._client.chat.completions.create( + model=model, + max_tokens=int(self.MAX_TOKENS - token_count - 300), + temperature=float(temperature), + messages=messages, + response_format={"type": "json_object"} + ) + result = result.choices[0].message.content + + if check_blacklisted: + found_blacklisted_word = self._get_found_blacklisted_words(result) + + if found_blacklisted_word is not None and try_count < self.TRY_LIMIT: + self._logger.warning("Result contains blacklisted words: " + str(found_blacklisted_word)) + return await self._prediction( + model, messages, token_count, fields_to_check, temperature, (try_count + 1), check_blacklisted + ) + elif found_blacklisted_word is not None and try_count >= self.TRY_LIMIT: + return "" + + if fields_to_check is None: + return json.loads(result) + + if not self._check_fields(result, fields_to_check) and try_count < self.TRY_LIMIT: + return await self._prediction( + model, messages, token_count, fields_to_check, temperature, (try_count + 1), check_blacklisted + ) + + return json.loads(result) + + async def prediction_override(self, **kwargs): + return await self._client.chat.completions.create( + **kwargs + ) + + @staticmethod + def _get_found_blacklisted_words(text: str): + text_lower = text.lower() + for word in BLACKLISTED_WORDS: + if re.search(r'\b' + re.escape(word) + r'\b', text_lower): + return word + return None + + @staticmethod + def _count_total_tokens(messages): + total_tokens = 0 + for message in messages: + total_tokens += count_tokens(message["content"])["n_tokens"] + return total_tokens + + @staticmethod + def _check_fields(obj, fields): + return all(field in obj for field in fields) + + async def pydantic_prediction( + self, + messages: List[ChatCompletionMessageParam], + map_to_model: Callable, + json_scheme: str, + *, + model: Optional[str] = None, + temperature: Optional[float] = None, + max_retries: int = 3 + ) -> List[T] | T | None: + params = { + "messages": messages, + "response_format": {"type": "json_object"}, + "model": model if model else self._default_model + } + + if temperature: + params["temperature"] = temperature + + attempt = 0 + while attempt < max_retries: + result = await self._client.chat.completions.create(**params) + result_content = result.choices[0].message.content + try: + result_json = json.loads(result_content) + return map_to_model(result_json) + except Exception as e: + attempt += 1 + self._logger.info(f"GPT returned malformed response: {result_content}\n {str(e)}") + params["messages"] = [ + { + "role": "user", + "content": ( + "Your previous response wasn't in the json format I've explicitly told you to output. " + f"In your next response, you will fix it and return me just the json I've asked." + ) + }, + { + "role": "user", + "content": ( + f"Previous response: {result_content}\n" + f"JSON format: {json_scheme}" + ) + } + ] + if attempt >= max_retries: + self._logger.error(f"Max retries exceeded!") + return None diff --git a/app/services/impl/third_parties/whisper.py b/app/services/impl/third_parties/whisper.py index c379288..ca87070 100644 --- a/app/services/impl/third_parties/whisper.py +++ b/app/services/impl/third_parties/whisper.py @@ -1,22 +1,22 @@ -import os - -from fastapi.concurrency import run_in_threadpool - -from whisper import Whisper -from app.services.abc import ISpeechToTextService - - -class OpenAIWhisper(ISpeechToTextService): - - def __init__(self, model: Whisper): - self._model = model - - async def speech_to_text(self, file_path): - if os.path.exists(file_path): - result = await run_in_threadpool( - self._model.transcribe, file_path, fp16=False, language='English', verbose=False - ) - return result["text"] - else: - print("File not found:", file_path) - raise Exception("File " + file_path + " not found.") +import os + +from fastapi.concurrency import run_in_threadpool + +from whisper import Whisper +from app.services.abc import ISpeechToTextService + + +class OpenAIWhisper(ISpeechToTextService): + + def __init__(self, model: Whisper): + self._model = model + + async def speech_to_text(self, file_path): + if os.path.exists(file_path): + result = await run_in_threadpool( + self._model.transcribe, file_path, fp16=False, language='English', verbose=False + ) + return result["text"] + else: + print("File not found:", file_path) + raise Exception("File " + file_path + " not found.") diff --git a/app/services/impl/training/__init__.py b/app/services/impl/training/__init__.py index 8ea231b..e0523c0 100644 --- a/app/services/impl/training/__init__.py +++ b/app/services/impl/training/__init__.py @@ -1,7 +1,7 @@ -from .training import TrainingService -from .kb import TrainingContentKnowledgeBase - -__all__ = [ - "TrainingService", - "TrainingContentKnowledgeBase" -] +from .training import TrainingService +from .kb import TrainingContentKnowledgeBase + +__all__ = [ + "TrainingService", + "TrainingContentKnowledgeBase" +] diff --git a/app/services/impl/training/kb.py b/app/services/impl/training/kb.py index dce316e..a19ce7b 100644 --- a/app/services/impl/training/kb.py +++ b/app/services/impl/training/kb.py @@ -1,88 +1,88 @@ -import json -import os -from logging import getLogger -from typing import Dict, List - -import faiss -import pickle - -from app.services.abc import IKnowledgeBase - - -class TrainingContentKnowledgeBase(IKnowledgeBase): - - def __init__(self, embeddings, path: str = 'pathways_2_rw_with_ids.json'): - self._embedding_model = embeddings - self._tips = None # self._read_json(path) - self._category_metadata = None - self._indices = None - self.load_indices_and_metadata() - self._logger = getLogger(__name__) - - @staticmethod - def _read_json(path: str) -> Dict[str, any]: - with open(path, 'r', encoding="utf-8") as json_file: - return json.loads(json_file.read()) - - def print_category_count(self): - category_tips = {} - for unit in self._tips['units']: - for page in unit['pages']: - for tip in page['tips']: - category = tip['category'].lower().replace(" ", "_") - if category not in category_tips: - category_tips[category] = 0 - else: - category_tips[category] = category_tips[category] + 1 - print(category_tips) - - def create_embeddings_and_save_them(self) -> None: - category_embeddings = {} - category_metadata = {} - - for unit in self._tips['units']: - for page in unit['pages']: - for tip in page['tips']: - category = tip['category'].lower().replace(" ", "_") - if category not in category_embeddings: - category_embeddings[category] = [] - category_metadata[category] = [] - - category_embeddings[category].append(tip['embedding']) - category_metadata[category].append({"id": tip['id'], "text": tip['text']}) - - category_indices = {} - for category, embeddings in category_embeddings.items(): - embeddings_array = self._embedding_model.encode(embeddings) - index = faiss.IndexFlatL2(embeddings_array.shape[1]) - index.add(embeddings_array) - category_indices[category] = index - - faiss.write_index(index, f"./faiss/{category}_tips_index.faiss") - - with open("./faiss/tips_metadata.pkl", "wb") as f: - pickle.dump(category_metadata, f) - - def load_indices_and_metadata( - self, - directory: str = './faiss', - suffix: str = '_tips_index.faiss', - metadata_path: str = './faiss/tips_metadata.pkl' - ): - files = os.listdir(directory) - self._indices = {} - for file in files: - if file.endswith(suffix): - self._indices[file[:-len(suffix)]] = faiss.read_index(f'{directory}/{file}') - self._logger.info(f'Loaded embeddings for {file[:-len(suffix)]} category.') - - with open(metadata_path, 'rb') as f: - self._category_metadata = pickle.load(f) - self._logger.info("Loaded tips metadata") - - def query_knowledge_base(self, query: str, category: str, top_k: int = 5) -> List[Dict[str, str]]: - query_embedding = self._embedding_model.encode([query]) - index = self._indices[category] - D, I = index.search(query_embedding, top_k) - results = [self._category_metadata[category][i] for i in I[0]] - return results +import json +import os +from logging import getLogger +from typing import Dict, List + +import faiss +import pickle + +from app.services.abc import IKnowledgeBase + + +class TrainingContentKnowledgeBase(IKnowledgeBase): + + def __init__(self, embeddings, path: str = 'pathways_2_rw_with_ids.json'): + self._embedding_model = embeddings + self._tips = None # self._read_json(path) + self._category_metadata = None + self._indices = None + self.load_indices_and_metadata() + self._logger = getLogger(__name__) + + @staticmethod + def _read_json(path: str) -> Dict[str, any]: + with open(path, 'r', encoding="utf-8") as json_file: + return json.loads(json_file.read()) + + def print_category_count(self): + category_tips = {} + for unit in self._tips['units']: + for page in unit['pages']: + for tip in page['tips']: + category = tip['category'].lower().replace(" ", "_") + if category not in category_tips: + category_tips[category] = 0 + else: + category_tips[category] = category_tips[category] + 1 + print(category_tips) + + def create_embeddings_and_save_them(self) -> None: + category_embeddings = {} + category_metadata = {} + + for unit in self._tips['units']: + for page in unit['pages']: + for tip in page['tips']: + category = tip['category'].lower().replace(" ", "_") + if category not in category_embeddings: + category_embeddings[category] = [] + category_metadata[category] = [] + + category_embeddings[category].append(tip['embedding']) + category_metadata[category].append({"id": tip['id'], "text": tip['text']}) + + category_indices = {} + for category, embeddings in category_embeddings.items(): + embeddings_array = self._embedding_model.encode(embeddings) + index = faiss.IndexFlatL2(embeddings_array.shape[1]) + index.add(embeddings_array) + category_indices[category] = index + + faiss.write_index(index, f"./faiss/{category}_tips_index.faiss") + + with open("./faiss/tips_metadata.pkl", "wb") as f: + pickle.dump(category_metadata, f) + + def load_indices_and_metadata( + self, + directory: str = './faiss', + suffix: str = '_tips_index.faiss', + metadata_path: str = './faiss/tips_metadata.pkl' + ): + files = os.listdir(directory) + self._indices = {} + for file in files: + if file.endswith(suffix): + self._indices[file[:-len(suffix)]] = faiss.read_index(f'{directory}/{file}') + self._logger.info(f'Loaded embeddings for {file[:-len(suffix)]} category.') + + with open(metadata_path, 'rb') as f: + self._category_metadata = pickle.load(f) + self._logger.info("Loaded tips metadata") + + def query_knowledge_base(self, query: str, category: str, top_k: int = 5) -> List[Dict[str, str]]: + query_embedding = self._embedding_model.encode([query]) + index = self._indices[category] + D, I = index.search(query_embedding, top_k) + results = [self._category_metadata[category][i] for i in I[0]] + return results diff --git a/app/services/impl/training/training.py b/app/services/impl/training/training.py index 53f897b..7e5c8dd 100644 --- a/app/services/impl/training/training.py +++ b/app/services/impl/training/training.py @@ -1,459 +1,459 @@ -import re -from datetime import datetime -from functools import reduce -from logging import getLogger - -from typing import Dict, List - -from app.configs.constants import TemperatureSettings, GPTModels -from app.helpers import count_tokens -from app.repositories.abc import IDocumentStore -from app.services.abc import ILLMService, ITrainingService, IKnowledgeBase -from app.dtos.training import * - - -class TrainingService(ITrainingService): - TOOLS = [ - 'critical_thinking', - 'language_for_writing', - 'reading_skills', - 'strategy', - 'words', - 'writing_skills' - ] - # strategy word_link ct_focus reading_skill word_partners writing_skill language_for_writing - - def __init__(self, llm: ILLMService, firestore: IDocumentStore, training_kb: IKnowledgeBase): - self._llm = llm - self._db = firestore - self._kb = training_kb - self._logger = getLogger(__name__) - - async def fetch_tips(self, context: str, question: str, answer: str, correct_answer: str): - messages = self._get_question_tips(question, answer, correct_answer, context) - - token_count = reduce(lambda count, item: count + count_tokens(item)['n_tokens'], - map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0) - - response = await self._llm.prediction( - GPTModels.GPT_3_5_TURBO, - messages, - None, - TemperatureSettings.TIPS_TEMPERATURE, - token_count=token_count - ) - - if isinstance(response, str): - response = re.sub(r"^[a-zA-Z0-9_]+\:\s*", "", response) - - return response - - @staticmethod - def _get_question_tips(question: str, answer: str, correct_answer: str, context: str = None): - messages = [ - { - "role": "user", - "content": ( - "You are a IELTS exam program that analyzes incorrect answers to questions and gives tips to " - "help students understand why it was a wrong answer and gives helpful insight for the future. " - "The tip should refer to the context and question." - ), - } - ] - - if not (context is None or context == ""): - messages.append({ - "role": "user", - "content": f"This is the context for the question: {context}", - }) - - messages.extend([ - { - "role": "user", - "content": f"This is the question: {question}", - }, - { - "role": "user", - "content": f"This is the answer: {answer}", - }, - { - "role": "user", - "content": f"This is the correct answer: {correct_answer}", - } - ]) - - return messages - - async def get_training_content(self, training_content: Dict) -> Dict: - user, stats = training_content["userID"], training_content["stats"] - exam_data, exam_map = await self._sort_out_solutions(stats) - training_content = await self._get_exam_details_and_tips(exam_data) - tips = self._query_kb(training_content.queries) - usefull_tips = await self._get_usefull_tips(exam_data, tips) - exam_map = self._merge_exam_map_with_details(exam_map, training_content.details) - - weak_areas = {"weak_areas": []} - for area in training_content.weak_areas: - weak_areas["weak_areas"].append(area.dict()) - - training_doc = { - 'created_at': int(datetime.now().timestamp() * 1000), - **exam_map, - **usefull_tips.dict(), - **weak_areas, - "user": user - } - doc_id = await self._db.save_to_db('training', training_doc) - return { - "id": doc_id - } - - @staticmethod - def _merge_exam_map_with_details(exam_map: Dict[str, any], details: List[DetailsDTO]): - new_exam_map = {"exams": []} - for detail in details: - new_exam_map["exams"].append({ - "id": detail.exam_id, - "date": detail.date, - "performance_comment": detail.performance_comment, - "detailed_summary": detail.detailed_summary, - **exam_map[detail.exam_id] - }) - return new_exam_map - - def _query_kb(self, queries: List[QueryDTO]): - map_categories = { - "critical_thinking": "ct_focus", - "language_for_writing": "language_for_writing", - "reading_skills": "reading_skill", - "strategy": "strategy", - "writing_skills": "writing_skill" - } - - tips = {"tips": []} - for query in queries: - if query.category == "words": - tips["tips"].extend( - self._kb.query_knowledge_base(query.text, "word_link") - ) - tips["tips"].extend( - self._kb.query_knowledge_base(query.text, "word_partners") - ) - else: - if query.category in map_categories: - tips["tips"].extend( - self._kb.query_knowledge_base(query.text, map_categories[query.category]) - ) - else: - self._logger.info(f"GTP tried to query knowledge base for {query.category} and it doesn't exist.") - return tips - - async def _get_exam_details_and_tips(self, exam_data: Dict[str, any]) -> TrainingContentDTO: - json_schema = ( - '{ "details": [{"exam_id": "", "date": 0, "performance_comment": "", "detailed_summary": ""}],' - ' "weak_areas": [{"area": "", "comment": ""}], "queries": [{"text": "", "category": ""}] }' - ) - messages = [ - { - "role": "user", - "content": ( - f"I'm going to provide you with exam data, you will take the exam data and fill this json " - f'schema : {json_schema}. "performance_comment" is a short sentence that describes the ' - 'students\'s performance and main mistakes in a single exam, "detailed_summary" is a detailed ' - 'summary of the student\'s performance, "weak_areas" are identified areas' - ' across all exams which need to be improved upon, for example, area "Grammar and Syntax" comment "Issues' - ' with sentence structure and punctuation.", the "queries" field is where you will write queries ' - 'for tips that will be displayed to the student, the category attribute is a collection of ' - 'embeddings and the text will be the text used to query the knowledge base. The categories are ' - f'the following [{", ".join(self.TOOLS)}]. The exam data will be a json where the key of the field ' - '"exams" is the exam id, an exam can be composed of multiple modules or single modules. The student' - ' will see your response so refrain from using phrasing like "The student" did x, y and z. If the ' - 'field "answer" in a question is an empty array "[]", then the student didn\'t answer any question ' - 'and you must address that in your response. Also questions aren\'t modules, the only modules are: ' - 'level, speaking, writing, reading and listening. The details array needs to be tailored to the ' - 'exam attempt, even if you receive the same exam you must treat as different exams by their id.' - 'Don\'t make references to an exam by it\'s id, the GUI will handle that so the student knows ' - 'which is the exam your comments and summary are referencing too. Even if the student hasn\'t ' - 'submitted no answers for an exam, you must still fill the details structure addressing that fact.' - ) - }, - { - "role": "user", - "content": f'Exam Data: {str(exam_data)}' - } - ] - return await self._llm.pydantic_prediction(messages, self._map_gpt_response, json_schema) - - async def _get_usefull_tips(self, exam_data: Dict[str, any], tips: Dict[str, any]) -> TipsDTO: - json_schema = ( - '{ "tip_ids": [] }' - ) - messages = [ - { - "role": "user", - "content": ( - f"I'm going to provide you with tips and I want you to return to me the tips that " - f"can be usefull for the student that made the exam that I'm going to send you, return " - f"me the tip ids in this json format {json_schema}." - ) - }, - { - "role": "user", - "content": f'Exam Data: {str(exam_data)}' - }, - { - "role": "user", - "content": f'Tips: {str(tips)}' - } - ] - return await self._llm.pydantic_prediction(messages, lambda response: TipsDTO(**response), json_schema) - - @staticmethod - def _map_gpt_response(response: Dict[str, any]) -> TrainingContentDTO: - parsed_response = { - "details": [DetailsDTO(**detail) for detail in response["details"]], - "weak_areas": [WeakAreaDTO(**area) for area in response["weak_areas"]], - "queries": [QueryDTO(**query) for query in response["queries"]] - } - return TrainingContentDTO(**parsed_response) - - async def _sort_out_solutions(self, stats): - grouped_stats = {} - for stat in stats: - session_key = f'{str(stat["date"])}-{stat["user"]}' - module = stat["module"] - exam_id = stat["exam"] - - if session_key not in grouped_stats: - grouped_stats[session_key] = {} - if module not in grouped_stats[session_key]: - grouped_stats[session_key][module] = { - "stats": [], - "exam_id": exam_id - } - grouped_stats[session_key][module]["stats"].append(stat) - - exercises = {} - exam_map = {} - for session_key, modules in grouped_stats.items(): - exercises[session_key] = {} - for module, module_stats in modules.items(): - exercises[session_key][module] = {} - - exam_id = module_stats["exam_id"] - if exam_id not in exercises[session_key][module]: - exercises[session_key][module][exam_id] = {"date": None, "exercises": []} - - exam_total_questions = 0 - exam_total_correct = 0 - - for stat in module_stats["stats"]: - exam_total_questions += stat["score"]["total"] - exam_total_correct += stat["score"]["correct"] - exercises[session_key][module][exam_id]["date"] = stat["date"] - - if session_key not in exam_map: - exam_map[session_key] = {"stat_ids": [], "score": 0} - exam_map[session_key]["stat_ids"].append(stat["id"]) - - exam = await self._db.get_doc_by_id(module, exam_id) - if module == "listening": - exercises[session_key][module][exam_id]["exercises"].extend( - self._get_listening_solutions(stat, exam)) - elif module == "reading": - exercises[session_key][module][exam_id]["exercises"].extend( - self._get_reading_solutions(stat, exam)) - elif module == "writing": - exercises[session_key][module][exam_id]["exercises"].extend( - self._get_writing_prompts_and_answers(stat, exam) - ) - elif module == "speaking": - exercises[session_key][module][exam_id]["exercises"].extend( - self._get_speaking_solutions(stat, exam) - ) - elif module == "level": - exercises[session_key][module][exam_id]["exercises"].extend( - self._get_level_solutions(stat, exam) - ) - - exam_map[session_key]["score"] = round((exam_total_correct / exam_total_questions) * 100) - exam_map[session_key]["module"] = module - - return {"exams": exercises}, exam_map - - def _get_writing_prompts_and_answers(self, stat, exam): - result = [] - try: - exercises = [] - for solution in stat['solutions']: - answer = solution['solution'] - exercise_id = solution['id'] - exercises.append({ - "exercise_id": exercise_id, - "answer": answer - }) - for exercise in exercises: - for exam_exercise in exam["exercises"]: - if exam_exercise["id"] == exercise["exercise_id"]: - result.append({ - "exercise": exam_exercise["prompt"], - "answer": exercise["answer"] - }) - - except KeyError as e: - self._logger.warning(f"Malformed stat object: {str(e)}") - - return result - - @staticmethod - def _get_mc_question(exercise, stat): - shuffle_maps = stat.get("shuffleMaps", []) - answer = stat["solutions"] if len(shuffle_maps) == 0 else [] - if len(shuffle_maps) != 0: - for solution in stat["solutions"]: - shuffle_map = [ - item["map"] for item in shuffle_maps - if item["questionID"] == solution["question"] - ] - answer.append({ - "question": solution["question"], - "option": shuffle_map[solution["option"]] - }) - return { - "question": exercise["prompt"], - "exercise": exercise["questions"], - "answer": stat["solutions"] - } - - @staticmethod - def _swap_key_name(d, original_key, new_key): - d[new_key] = d.pop(original_key) - return d - - def _get_level_solutions(self, stat, exam): - result = [] - try: - for part in exam["parts"]: - for exercise in part["exercises"]: - if exercise["id"] == stat["exercise"]: - if stat["type"] == "fillBlanks": - result.append({ - "prompt": exercise["prompt"], - "template": exercise["text"], - "words": exercise["words"], - "solutions": exercise["solutions"], - "answer": [ - self._swap_key_name(item, 'solution', 'option') - for item in stat["solutions"] - ] - }) - elif stat["type"] == "multipleChoice": - result.append(self._get_mc_question(exercise, stat)) - except KeyError as e: - self._logger.warning(f"Malformed stat object: {str(e)}") - return result - - def _get_listening_solutions(self, stat, exam): - result = [] - try: - for part in exam["parts"]: - for exercise in part["exercises"]: - if exercise["id"] == stat["exercise"]: - if stat["type"] == "writeBlanks": - result.append({ - "question": exercise["prompt"], - "template": exercise["text"], - "solution": exercise["solutions"], - "answer": stat["solutions"] - }) - elif stat["type"] == "fillBlanks": - result.append({ - "question": exercise["prompt"], - "template": exercise["text"], - "words": exercise["words"], - "solutions": exercise["solutions"], - "answer": stat["solutions"] - }) - elif stat["type"] == "multipleChoice": - result.append(self._get_mc_question(exercise, stat)) - - except KeyError as e: - self._logger.warning(f"Malformed stat object: {str(e)}") - return result - - @staticmethod - def _find_shuffle_map(shuffle_maps, question_id): - return next((item["map"] for item in shuffle_maps if item["questionID"] == question_id), None) - - def _get_speaking_solutions(self, stat, exam): - result = {} - try: - result = { - "comments": { - key: value['comment'] for key, value in stat['solutions'][0]['evaluation']['task_response'].items()} - , - "exercises": {} - } - - for exercise in exam["exercises"]: - if exercise["id"] == stat["exercise"]: - if stat["type"] == "interactiveSpeaking": - for i in range(len(exercise["prompts"])): - result["exercises"][f"exercise_{i+1}"] = { - "question": exercise["prompts"][i]["text"] - } - for i in range(len(exercise["prompts"])): - answer = stat['solutions'][0]["evaluation"].get(f'transcript_{i+1}', '') - result["exercises"][f"exercise_{i+1}"]["answer"] = answer - elif stat["type"] == "speaking": - result["exercises"]["exercise_1"] = { - "question": exercise["text"], - "answer": stat['solutions'][0]["evaluation"].get(f'transcript', '') - } - except KeyError as e: - self._logger.warning(f"Malformed stat object: {str(e)}") - return [result] - - def _get_reading_solutions(self, stat, exam): - result = [] - try: - for part in exam["parts"]: - text = part["text"] - for exercise in part["exercises"]: - if exercise["id"] == stat["exercise"]: - if stat["type"] == "fillBlanks": - result.append({ - "text": text, - "question": exercise["prompt"], - "template": exercise["text"], - "words": exercise["words"], - "solutions": exercise["solutions"], - "answer": stat["solutions"] - }) - elif stat["type"] == "writeBlanks": - result.append({ - "text": text, - "question": exercise["prompt"], - "template": exercise["text"], - "solutions": exercise["solutions"], - "answer": stat["solutions"] - }) - elif stat["type"] == "trueFalse": - result.append({ - "text": text, - "questions": exercise["questions"], - "answer": stat["solutions"] - }) - elif stat["type"] == "matchSentences": - result.append({ - "text": text, - "question": exercise["prompt"], - "sentences": exercise["sentences"], - "options": exercise["options"], - "answer": stat["solutions"] - }) - except KeyError as e: - self._logger.warning(f"Malformed stat object: {str(e)}") - return result - - +import re +from datetime import datetime +from functools import reduce +from logging import getLogger + +from typing import Dict, List + +from app.configs.constants import TemperatureSettings, GPTModels +from app.helpers import count_tokens +from app.repositories.abc import IDocumentStore +from app.services.abc import ILLMService, ITrainingService, IKnowledgeBase +from app.dtos.training import * + + +class TrainingService(ITrainingService): + TOOLS = [ + 'critical_thinking', + 'language_for_writing', + 'reading_skills', + 'strategy', + 'words', + 'writing_skills' + ] + # strategy word_link ct_focus reading_skill word_partners writing_skill language_for_writing + + def __init__(self, llm: ILLMService, firestore: IDocumentStore, training_kb: IKnowledgeBase): + self._llm = llm + self._db = firestore + self._kb = training_kb + self._logger = getLogger(__name__) + + async def fetch_tips(self, context: str, question: str, answer: str, correct_answer: str): + messages = self._get_question_tips(question, answer, correct_answer, context) + + token_count = reduce(lambda count, item: count + count_tokens(item)['n_tokens'], + map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0) + + response = await self._llm.prediction( + GPTModels.GPT_3_5_TURBO, + messages, + None, + TemperatureSettings.TIPS_TEMPERATURE, + token_count=token_count + ) + + if isinstance(response, str): + response = re.sub(r"^[a-zA-Z0-9_]+\:\s*", "", response) + + return response + + @staticmethod + def _get_question_tips(question: str, answer: str, correct_answer: str, context: str = None): + messages = [ + { + "role": "user", + "content": ( + "You are a IELTS exam program that analyzes incorrect answers to questions and gives tips to " + "help students understand why it was a wrong answer and gives helpful insight for the future. " + "The tip should refer to the context and question." + ), + } + ] + + if not (context is None or context == ""): + messages.append({ + "role": "user", + "content": f"This is the context for the question: {context}", + }) + + messages.extend([ + { + "role": "user", + "content": f"This is the question: {question}", + }, + { + "role": "user", + "content": f"This is the answer: {answer}", + }, + { + "role": "user", + "content": f"This is the correct answer: {correct_answer}", + } + ]) + + return messages + + async def get_training_content(self, training_content: Dict) -> Dict: + user, stats = training_content["userID"], training_content["stats"] + exam_data, exam_map = await self._sort_out_solutions(stats) + training_content = await self._get_exam_details_and_tips(exam_data) + tips = self._query_kb(training_content.queries) + usefull_tips = await self._get_usefull_tips(exam_data, tips) + exam_map = self._merge_exam_map_with_details(exam_map, training_content.details) + + weak_areas = {"weak_areas": []} + for area in training_content.weak_areas: + weak_areas["weak_areas"].append(area.dict()) + + training_doc = { + 'created_at': int(datetime.now().timestamp() * 1000), + **exam_map, + **usefull_tips.dict(), + **weak_areas, + "user": user + } + doc_id = await self._db.save_to_db('training', training_doc) + return { + "id": doc_id + } + + @staticmethod + def _merge_exam_map_with_details(exam_map: Dict[str, any], details: List[DetailsDTO]): + new_exam_map = {"exams": []} + for detail in details: + new_exam_map["exams"].append({ + "id": detail.exam_id, + "date": detail.date, + "performance_comment": detail.performance_comment, + "detailed_summary": detail.detailed_summary, + **exam_map[detail.exam_id] + }) + return new_exam_map + + def _query_kb(self, queries: List[QueryDTO]): + map_categories = { + "critical_thinking": "ct_focus", + "language_for_writing": "language_for_writing", + "reading_skills": "reading_skill", + "strategy": "strategy", + "writing_skills": "writing_skill" + } + + tips = {"tips": []} + for query in queries: + if query.category == "words": + tips["tips"].extend( + self._kb.query_knowledge_base(query.text, "word_link") + ) + tips["tips"].extend( + self._kb.query_knowledge_base(query.text, "word_partners") + ) + else: + if query.category in map_categories: + tips["tips"].extend( + self._kb.query_knowledge_base(query.text, map_categories[query.category]) + ) + else: + self._logger.info(f"GTP tried to query knowledge base for {query.category} and it doesn't exist.") + return tips + + async def _get_exam_details_and_tips(self, exam_data: Dict[str, any]) -> TrainingContentDTO: + json_schema = ( + '{ "details": [{"exam_id": "", "date": 0, "performance_comment": "", "detailed_summary": ""}],' + ' "weak_areas": [{"area": "", "comment": ""}], "queries": [{"text": "", "category": ""}] }' + ) + messages = [ + { + "role": "user", + "content": ( + f"I'm going to provide you with exam data, you will take the exam data and fill this json " + f'schema : {json_schema}. "performance_comment" is a short sentence that describes the ' + 'students\'s performance and main mistakes in a single exam, "detailed_summary" is a detailed ' + 'summary of the student\'s performance, "weak_areas" are identified areas' + ' across all exams which need to be improved upon, for example, area "Grammar and Syntax" comment "Issues' + ' with sentence structure and punctuation.", the "queries" field is where you will write queries ' + 'for tips that will be displayed to the student, the category attribute is a collection of ' + 'embeddings and the text will be the text used to query the knowledge base. The categories are ' + f'the following [{", ".join(self.TOOLS)}]. The exam data will be a json where the key of the field ' + '"exams" is the exam id, an exam can be composed of multiple modules or single modules. The student' + ' will see your response so refrain from using phrasing like "The student" did x, y and z. If the ' + 'field "answer" in a question is an empty array "[]", then the student didn\'t answer any question ' + 'and you must address that in your response. Also questions aren\'t modules, the only modules are: ' + 'level, speaking, writing, reading and listening. The details array needs to be tailored to the ' + 'exam attempt, even if you receive the same exam you must treat as different exams by their id.' + 'Don\'t make references to an exam by it\'s id, the GUI will handle that so the student knows ' + 'which is the exam your comments and summary are referencing too. Even if the student hasn\'t ' + 'submitted no answers for an exam, you must still fill the details structure addressing that fact.' + ) + }, + { + "role": "user", + "content": f'Exam Data: {str(exam_data)}' + } + ] + return await self._llm.pydantic_prediction(messages, self._map_gpt_response, json_schema) + + async def _get_usefull_tips(self, exam_data: Dict[str, any], tips: Dict[str, any]) -> TipsDTO: + json_schema = ( + '{ "tip_ids": [] }' + ) + messages = [ + { + "role": "user", + "content": ( + f"I'm going to provide you with tips and I want you to return to me the tips that " + f"can be usefull for the student that made the exam that I'm going to send you, return " + f"me the tip ids in this json format {json_schema}." + ) + }, + { + "role": "user", + "content": f'Exam Data: {str(exam_data)}' + }, + { + "role": "user", + "content": f'Tips: {str(tips)}' + } + ] + return await self._llm.pydantic_prediction(messages, lambda response: TipsDTO(**response), json_schema) + + @staticmethod + def _map_gpt_response(response: Dict[str, any]) -> TrainingContentDTO: + parsed_response = { + "details": [DetailsDTO(**detail) for detail in response["details"]], + "weak_areas": [WeakAreaDTO(**area) for area in response["weak_areas"]], + "queries": [QueryDTO(**query) for query in response["queries"]] + } + return TrainingContentDTO(**parsed_response) + + async def _sort_out_solutions(self, stats): + grouped_stats = {} + for stat in stats: + session_key = f'{str(stat["date"])}-{stat["user"]}' + module = stat["module"] + exam_id = stat["exam"] + + if session_key not in grouped_stats: + grouped_stats[session_key] = {} + if module not in grouped_stats[session_key]: + grouped_stats[session_key][module] = { + "stats": [], + "exam_id": exam_id + } + grouped_stats[session_key][module]["stats"].append(stat) + + exercises = {} + exam_map = {} + for session_key, modules in grouped_stats.items(): + exercises[session_key] = {} + for module, module_stats in modules.items(): + exercises[session_key][module] = {} + + exam_id = module_stats["exam_id"] + if exam_id not in exercises[session_key][module]: + exercises[session_key][module][exam_id] = {"date": None, "exercises": []} + + exam_total_questions = 0 + exam_total_correct = 0 + + for stat in module_stats["stats"]: + exam_total_questions += stat["score"]["total"] + exam_total_correct += stat["score"]["correct"] + exercises[session_key][module][exam_id]["date"] = stat["date"] + + if session_key not in exam_map: + exam_map[session_key] = {"stat_ids": [], "score": 0} + exam_map[session_key]["stat_ids"].append(stat["id"]) + + exam = await self._db.get_doc_by_id(module, exam_id) + if module == "listening": + exercises[session_key][module][exam_id]["exercises"].extend( + self._get_listening_solutions(stat, exam)) + elif module == "reading": + exercises[session_key][module][exam_id]["exercises"].extend( + self._get_reading_solutions(stat, exam)) + elif module == "writing": + exercises[session_key][module][exam_id]["exercises"].extend( + self._get_writing_prompts_and_answers(stat, exam) + ) + elif module == "speaking": + exercises[session_key][module][exam_id]["exercises"].extend( + self._get_speaking_solutions(stat, exam) + ) + elif module == "level": + exercises[session_key][module][exam_id]["exercises"].extend( + self._get_level_solutions(stat, exam) + ) + + exam_map[session_key]["score"] = round((exam_total_correct / exam_total_questions) * 100) + exam_map[session_key]["module"] = module + + return {"exams": exercises}, exam_map + + def _get_writing_prompts_and_answers(self, stat, exam): + result = [] + try: + exercises = [] + for solution in stat['solutions']: + answer = solution['solution'] + exercise_id = solution['id'] + exercises.append({ + "exercise_id": exercise_id, + "answer": answer + }) + for exercise in exercises: + for exam_exercise in exam["exercises"]: + if exam_exercise["id"] == exercise["exercise_id"]: + result.append({ + "exercise": exam_exercise["prompt"], + "answer": exercise["answer"] + }) + + except KeyError as e: + self._logger.warning(f"Malformed stat object: {str(e)}") + + return result + + @staticmethod + def _get_mc_question(exercise, stat): + shuffle_maps = stat.get("shuffleMaps", []) + answer = stat["solutions"] if len(shuffle_maps) == 0 else [] + if len(shuffle_maps) != 0: + for solution in stat["solutions"]: + shuffle_map = [ + item["map"] for item in shuffle_maps + if item["questionID"] == solution["question"] + ] + answer.append({ + "question": solution["question"], + "option": shuffle_map[solution["option"]] + }) + return { + "question": exercise["prompt"], + "exercise": exercise["questions"], + "answer": stat["solutions"] + } + + @staticmethod + def _swap_key_name(d, original_key, new_key): + d[new_key] = d.pop(original_key) + return d + + def _get_level_solutions(self, stat, exam): + result = [] + try: + for part in exam["parts"]: + for exercise in part["exercises"]: + if exercise["id"] == stat["exercise"]: + if stat["type"] == "fillBlanks": + result.append({ + "prompt": exercise["prompt"], + "template": exercise["text"], + "words": exercise["words"], + "solutions": exercise["solutions"], + "answer": [ + self._swap_key_name(item, 'solution', 'option') + for item in stat["solutions"] + ] + }) + elif stat["type"] == "multipleChoice": + result.append(self._get_mc_question(exercise, stat)) + except KeyError as e: + self._logger.warning(f"Malformed stat object: {str(e)}") + return result + + def _get_listening_solutions(self, stat, exam): + result = [] + try: + for part in exam["parts"]: + for exercise in part["exercises"]: + if exercise["id"] == stat["exercise"]: + if stat["type"] == "writeBlanks": + result.append({ + "question": exercise["prompt"], + "template": exercise["text"], + "solution": exercise["solutions"], + "answer": stat["solutions"] + }) + elif stat["type"] == "fillBlanks": + result.append({ + "question": exercise["prompt"], + "template": exercise["text"], + "words": exercise["words"], + "solutions": exercise["solutions"], + "answer": stat["solutions"] + }) + elif stat["type"] == "multipleChoice": + result.append(self._get_mc_question(exercise, stat)) + + except KeyError as e: + self._logger.warning(f"Malformed stat object: {str(e)}") + return result + + @staticmethod + def _find_shuffle_map(shuffle_maps, question_id): + return next((item["map"] for item in shuffle_maps if item["questionID"] == question_id), None) + + def _get_speaking_solutions(self, stat, exam): + result = {} + try: + result = { + "comments": { + key: value['comment'] for key, value in stat['solutions'][0]['evaluation']['task_response'].items()} + , + "exercises": {} + } + + for exercise in exam["exercises"]: + if exercise["id"] == stat["exercise"]: + if stat["type"] == "interactiveSpeaking": + for i in range(len(exercise["prompts"])): + result["exercises"][f"exercise_{i+1}"] = { + "question": exercise["prompts"][i]["text"] + } + for i in range(len(exercise["prompts"])): + answer = stat['solutions'][0]["evaluation"].get(f'transcript_{i+1}', '') + result["exercises"][f"exercise_{i+1}"]["answer"] = answer + elif stat["type"] == "speaking": + result["exercises"]["exercise_1"] = { + "question": exercise["text"], + "answer": stat['solutions'][0]["evaluation"].get(f'transcript', '') + } + except KeyError as e: + self._logger.warning(f"Malformed stat object: {str(e)}") + return [result] + + def _get_reading_solutions(self, stat, exam): + result = [] + try: + for part in exam["parts"]: + text = part["text"] + for exercise in part["exercises"]: + if exercise["id"] == stat["exercise"]: + if stat["type"] == "fillBlanks": + result.append({ + "text": text, + "question": exercise["prompt"], + "template": exercise["text"], + "words": exercise["words"], + "solutions": exercise["solutions"], + "answer": stat["solutions"] + }) + elif stat["type"] == "writeBlanks": + result.append({ + "text": text, + "question": exercise["prompt"], + "template": exercise["text"], + "solutions": exercise["solutions"], + "answer": stat["solutions"] + }) + elif stat["type"] == "trueFalse": + result.append({ + "text": text, + "questions": exercise["questions"], + "answer": stat["solutions"] + }) + elif stat["type"] == "matchSentences": + result.append({ + "text": text, + "question": exercise["prompt"], + "sentences": exercise["sentences"], + "options": exercise["options"], + "answer": stat["solutions"] + }) + except KeyError as e: + self._logger.warning(f"Malformed stat object: {str(e)}") + return result + + diff --git a/app/services/impl/user.py b/app/services/impl/user.py new file mode 100644 index 0000000..bf64bd4 --- /dev/null +++ b/app/services/impl/user.py @@ -0,0 +1,262 @@ +import os +import subprocess +import time +import uuid +import pandas as pd +import shortuuid + +from datetime import datetime +from logging import getLogger +from pymongo.database import Database + +from app.dtos.user_batch import BatchUsersDTO, UserDTO +from app.helpers import FileHelper +from app.services.abc import IUserService + + +class UserService(IUserService): + _DEFAULT_DESIRED_LEVELS = { + "reading": 9, + "listening": 9, + "writing": 9, + "speaking": 9, + } + + _DEFAULT_LEVELS = { + "reading": 0, + "listening": 0, + "writing": 0, + "speaking": 0, + } + + def __init__(self, mongo: Database): + self._db: Database = mongo + self._logger = getLogger(__name__) + + def fetch_tips(self, batch: BatchUsersDTO): + file_name = f'{uuid.uuid4()}.csv' + path = f'./tmp/{file_name}' + self._generate_firebase_auth_csv(batch, path) + + result = self._upload_users('./tmp', file_name) + if result.returncode != 0: + error_msg = f"Couldn't upload users. Failed to run command firebase auth import -> ```cmd {result.stdout}```" + self._logger.error(error_msg) + return error_msg + + self._init_users(batch) + + FileHelper.remove_file(path) + return {"ok": True} + + @staticmethod + def _generate_firebase_auth_csv(batch_dto: BatchUsersDTO, path: str): + # https://firebase.google.com/docs/cli/auth#file_format + columns = [ + 'UID', 'Email', 'Email Verified', 'Password Hash', 'Password Salt', 'Name', + 'Photo URL', 'Google ID', 'Google Email', 'Google Display Name', 'Google Photo URL', + 'Facebook ID', 'Facebook Email', 'Facebook Display Name', 'Facebook Photo URL', + 'Twitter ID', 'Twitter Email', 'Twitter Display Name', 'Twitter Photo URL', + 'GitHub ID', 'GitHub Email', 'GitHub Display Name', 'GitHub Photo URL', + 'User Creation Time', 'Last Sign-In Time', 'Phone Number' + ] + users_data = [] + + current_time = int(time.time() * 1000) + + for user in batch_dto.users: + user_data = { + 'UID': str(user.id), + 'Email': user.email, + 'Email Verified': False, + 'Password Hash': user.passwordHash, + 'Password Salt': user.passwordSalt, + 'Name': '', + 'Photo URL': '', + 'Google ID': '', + 'Google Email': '', + 'Google Display Name': '', + 'Google Photo URL': '', + 'Facebook ID': '', + 'Facebook Email': '', + 'Facebook Display Name': '', + 'Facebook Photo URL': '', + 'Twitter ID': '', + 'Twitter Email': '', + 'Twitter Display Name': '', + 'Twitter Photo URL': '', + 'GitHub ID': '', + 'GitHub Email': '', + 'GitHub Display Name': '', + 'GitHub Photo URL': '', + 'User Creation Time': current_time, + 'Last Sign-In Time': '', + 'Phone Number': '' + } + users_data.append(user_data) + + df = pd.DataFrame(users_data, columns=columns) + df.to_csv(path, index=False, header=False) + + @staticmethod + def _upload_users(directory: str, file_name: str): + command = ( + f'firebase auth:import {file_name} ' + f'--hash-algo=SCRYPT ' + f'--hash-key={os.getenv("FIREBASE_SCRYPT_B64_SIGNER_KEY")} ' + f'--salt-separator={os.getenv("FIREBASE_SCRYPT_B64_SALT_SEPARATOR")} ' + f'--rounds={os.getenv("FIREBASE_SCRYPT_ROUNDS")} ' + f'--mem-cost={os.getenv("FIREBASE_SCRYPT_MEM_COST")} ' + f'--project={os.getenv("FIREBASE_PROJECT_ID")} ' + ) + + result = subprocess.run(command, shell=True, cwd=directory, capture_output=True, text=True) + return result + + def _init_users(self, batch_users: BatchUsersDTO): + maker_id = batch_users.makerID + for user in batch_users.users: + self._insert_new_user(user) + code = self._create_code(user, maker_id) + + if user.type == "corporate": + self._set_corporate_default_groups(user) + + if user.corporate: + self._assign_corporate_to_user(user, code) + + if user.groupName and len(user.groupName.strip()) > 0: + self._assign_user_to_group_by_name(user, maker_id) + + def _insert_new_user(self, user: UserDTO): + new_user = { + **user.dict(exclude={ + 'passport_id', 'groupName', 'expiryDate', + 'corporate', 'passwordHash', 'passwordSalt' + }), + 'id': str(user.id), + 'bio': "", + 'focus': "academic", + 'status': "active", + 'desiredLevels': self._DEFAULT_DESIRED_LEVELS, + 'profilePicture': "/defaultAvatar.png", + 'levels': self._DEFAULT_LEVELS, + 'isFirstLogin': False, + 'isVerified': True, + 'registrationDate': datetime.now(), + 'subscriptionExpirationDate': user.expiryDate + } + self._db.users.insert_one(new_user) + + def _create_code(self, user: UserDTO, maker_id: str) -> str: + code = shortuuid.ShortUUID().random(length=6) + self._db.codes.insert_one({ + 'id': code, + 'code': code, + 'creator': maker_id, + 'expiryDate': user.expiryDate, + 'type': user.type, + 'creationDate': datetime.now(), + 'userId': str(user.id), + 'email': user.email, + 'name': user.name, + 'passport_id': user.passport_id + }) + return code + + def _set_corporate_default_groups(self, user: UserDTO): + user_id = str(user.id) + default_groups = [ + { + 'admin': user_id, + 'id': str(uuid.uuid4()), + 'name': "Teachers", + 'participants': [], + 'disableEditing': True, + }, + { + 'admin': user_id, + 'id': str(uuid.uuid4()), + 'name': "Students", + 'participants': [], + 'disableEditing': True, + }, + { + 'admin': user_id, + 'id': str(uuid.uuid4()), + 'name': "Corporate", + 'participants': [], + 'disableEditing': True, + } + ] + for group in default_groups: + self._db.groups.insert_one(group) + + def _assign_corporate_to_user(self, user: UserDTO, code: str): + user_id = str(user.id) + corporate_user = self._db.users.find_one( + {"email": user.corporate} + ) + if corporate_user: + self._db.codes.update_one( + {"id": code}, + {"$set": {"creator": corporate_user["id"]}}, + upsert=True + ) + group_type = "Students" if user.type == "student" else "Teachers" + + group = self._db.groups.find_one( + { + "admin": corporate_user["id"], + "name": group_type + } + ) + + if group: + participants = group['participants'] + if user_id not in participants: + participants.append(user_id) + self._db.groups.update_one( + {"id": group["id"]}, + {"$set": {"participants": participants}} + ) + + else: + group = { + 'admin': corporate_user["id"], + 'id': str(uuid.uuid4()), + 'name': group_type, + 'participants': [user_id], + 'disableEditing': True, + } + + self._db.groups.insert_one(group) + + def _assign_user_to_group_by_name(self, user: UserDTO, maker_id: str): + user_id = str(user.id) + + groups = list(self._db.groups.find( + { + "admin": maker_id, + "name": user.groupName.strip() + } + )) + + if len(groups) == 0: + new_group = { + 'id': str(uuid.uuid4()), + 'admin': maker_id, + 'name': user.groupName.strip(), + 'participants': [user_id], + 'disableEditing': False, + } + self._db.groups.insert_one(new_group) + else: + group = groups[0] + participants = group["participants"] + if user_id not in participants: + participants.append(user_id) + self._db.groups.update_one( + {"id": group["id"]}, + {"$set": {"participants": participants}} + ) diff --git a/app/utils/__init__.py b/app/utils/__init__.py index f366ec6..8851c66 100644 --- a/app/utils/__init__.py +++ b/app/utils/__init__.py @@ -1,5 +1,5 @@ -from .handle_exception import handle_exception - -__all__ = [ - "handle_exception" -] +from .handle_exception import handle_exception + +__all__ = [ + "handle_exception" +] diff --git a/app/utils/handle_exception.py b/app/utils/handle_exception.py index 1c340d6..f1adae7 100644 --- a/app/utils/handle_exception.py +++ b/app/utils/handle_exception.py @@ -1,15 +1,15 @@ -import functools -from typing import Callable, Any -from fastapi import Response - - -def handle_exception(status_code: int = 500): - def decorator(func: Callable) -> Callable: - @functools.wraps(func) - async def wrapper(*args: Any, **kwargs: Any) -> Any: - try: - return await func(*args, **kwargs) - except Exception as e: - return Response(content=str(e), status_code=status_code) - return wrapper - return decorator +import functools +from typing import Callable, Any +from fastapi import Response + + +def handle_exception(status_code: int = 500): + def decorator(func: Callable) -> Callable: + @functools.wraps(func) + async def wrapper(*args: Any, **kwargs: Any) -> Any: + try: + return await func(*args, **kwargs) + except Exception as e: + return Response(content=str(e), status_code=status_code) + return wrapper + return decorator diff --git a/docker-compose.yml b/docker-compose.yml index 9423cc6..7848999 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,10 +1,10 @@ -version: "3" - -services: - ielts-be: - container_name: ielts-be - build: . - image: ecrop/ielts-be:latest - ports: - - 8080:8000 - restart: unless-stopped +version: "3" + +services: + ielts-be: + container_name: ielts-be + build: . + image: ecrop/ielts-be:latest + ports: + - 8080:8000 + restart: unless-stopped diff --git a/firebase-configs/encoach-staging.json b/firebase-configs/encoach-staging.json index f1489a6..63e4f60 100644 --- a/firebase-configs/encoach-staging.json +++ b/firebase-configs/encoach-staging.json @@ -1,13 +1,13 @@ -{ - "type": "service_account", - "project_id": "encoach-staging", - "private_key_id": "5718a649419776df9637589f8696a258a6a70f6c", - "private_key": "-----BEGIN PRIVATE KEY-----\nMIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC2C6Es2gY8lLvH\ndVilNtRNm9glSaPXMNw2PzZZbSGuG1uGPFaCzlq1lOb2u17YfMG4GriKIMjIQKXF\nqdvxA8CAmAFRuDjUGmpbO/X1ZW7amOs5Bjed2BYmL01dEqzzwwh7rEfNDjeghRPx\n1uKzH8A6TLT5xq+74I5K1CIgiljBpZimsERu2SDawjkdtZfA7qoylA46Nq66LuwQ\nVyv9CK2SZNpBcT3sunCmRsrCzmSTzKdbcqRPdqUKgZOH/Rjp0sw9VuUgwoxdGZV3\n5SJjObo5ceZ1OSiJm7GwLzp7uq16sqycgSYwppNLI5OtzOfSuWbGD4+a044t2Mlq\n9PHXv7H/AgMBAAECggEAAfhKlFwq8MaL6PggRJq9HbaKgQ4fcOmCmy8AQmPNF1UM\nyVKSKGndjxUfPLCWsaaunUnjZlHoKkvndKXxDyttuVaBE9EiWEqNjRLZ3KpuJ9Jm\nH+CtLbmUCnISQb1n1AlvvZAwhLZbLBL/PhYyWiLapybZAdJAaOWLVKGgBD8gVRQW\nJFCqnszX1O2YlpWHutb979R4qoY/XAf94gyMkTpXZwuETvFqZbau2vxRZ8qARix3\nmic881PwiF6Cod8UPCS9yMK+Q+Se6SomwXU9PCmlummn9xmQBAxYy8gIAVs/J9Fg\n5SvhnImAPDd+zIzzw2cHCiruNWIhroMVZDZJgWdY1QKBgQDjTKKeFOur3ijJJL2/\nWg1SE2jLP0GpXzM5YMx6jdOCNDCzugPngRucRXiTkJ2FnUgyMcQyi6hyrbWXN/6z\nXhx5fwLB4tnTcqOMvNfcay5mDk3RW9ZZJxayB54Sf1Nm/4xiDBnGPT+iHQvK+/pT\nwScWznFkmk60E796o76OLn3PEwKBgQDNCC2uPq+uOcCopIO8HH88gqdxTvpbeHUU\nrdJOmr1VtGNuvay/mfpva9+VEtGbZTFzjhfvfCEIjpj3Llh8Flb9EYa6BmscBiyp\ngszEeFuB3zHndlSCZPnGJ7JiRAdPAEgG3Gl/r9th6PDaEMq0MFS5i7GGhPBIRYCG\nUtmY5eVy5QKBgH5Nuls/YsnJFD7ZNLscziQadvPhvZnhNbSfjmBXaP2EBMAKEFtX\nCcGndN4C0RVLFbAWqWAw7LR0xGA4FEcVd5snsZ+Nb98oZ6sv0H9B67F4J1O7xXsa\n1mitBPBgYjbsr9RXxwa6SB7MJx5vMGXUAeWRZ78wY6V7B76dOKkHOo+TAoGBAJf5\nBOsPueZZFm2qK58GPGVcrsI0+StNuPLP+H+dANQC9mTCIMaQWmm2Oq5jmYwmUKZH\nX4R6rH2MPOOSrbGkWWwRTpyaX1ARX49xzVefoqw8BOB8/Bz+vYjcKcPeitBK9Bhp\nzaUAc4s6PzRTl/xBirtRSQ/df8ECC0cFKBbF6PHlAoGAGqnlpo+k8vAtg6ulCuGu\nx2Y/c5UmvXGHk60pccnW3UtENSDnl99OgMfBz8/qLAMWs6DUQ/kvSlHQPmMBHRWZ\nNTr6ceGXyNs4KdYoj1K7AU3c0Lm0wyQ2giQMoOOUQAm98Xr8z5aiihj10hHPmzzL\n9kwpOmZpjNmC/ERD69imWhY=\n-----END PRIVATE KEY-----\n", - "client_email": "firebase-adminsdk-8rs9e@encoach-staging.iam.gserviceaccount.com", - "client_id": "108221424237414412378", - "auth_uri": "https://accounts.google.com/o/oauth2/auth", - "token_uri": "https://oauth2.googleapis.com/token", - "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", - "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/firebase-adminsdk-8rs9e%40encoach-staging.iam.gserviceaccount.com", - "universe_domain": "googleapis.com" +{ + "type": "service_account", + "project_id": "encoach-staging", + "private_key_id": "5718a649419776df9637589f8696a258a6a70f6c", + "private_key": "-----BEGIN PRIVATE KEY-----\nMIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC2C6Es2gY8lLvH\ndVilNtRNm9glSaPXMNw2PzZZbSGuG1uGPFaCzlq1lOb2u17YfMG4GriKIMjIQKXF\nqdvxA8CAmAFRuDjUGmpbO/X1ZW7amOs5Bjed2BYmL01dEqzzwwh7rEfNDjeghRPx\n1uKzH8A6TLT5xq+74I5K1CIgiljBpZimsERu2SDawjkdtZfA7qoylA46Nq66LuwQ\nVyv9CK2SZNpBcT3sunCmRsrCzmSTzKdbcqRPdqUKgZOH/Rjp0sw9VuUgwoxdGZV3\n5SJjObo5ceZ1OSiJm7GwLzp7uq16sqycgSYwppNLI5OtzOfSuWbGD4+a044t2Mlq\n9PHXv7H/AgMBAAECggEAAfhKlFwq8MaL6PggRJq9HbaKgQ4fcOmCmy8AQmPNF1UM\nyVKSKGndjxUfPLCWsaaunUnjZlHoKkvndKXxDyttuVaBE9EiWEqNjRLZ3KpuJ9Jm\nH+CtLbmUCnISQb1n1AlvvZAwhLZbLBL/PhYyWiLapybZAdJAaOWLVKGgBD8gVRQW\nJFCqnszX1O2YlpWHutb979R4qoY/XAf94gyMkTpXZwuETvFqZbau2vxRZ8qARix3\nmic881PwiF6Cod8UPCS9yMK+Q+Se6SomwXU9PCmlummn9xmQBAxYy8gIAVs/J9Fg\n5SvhnImAPDd+zIzzw2cHCiruNWIhroMVZDZJgWdY1QKBgQDjTKKeFOur3ijJJL2/\nWg1SE2jLP0GpXzM5YMx6jdOCNDCzugPngRucRXiTkJ2FnUgyMcQyi6hyrbWXN/6z\nXhx5fwLB4tnTcqOMvNfcay5mDk3RW9ZZJxayB54Sf1Nm/4xiDBnGPT+iHQvK+/pT\nwScWznFkmk60E796o76OLn3PEwKBgQDNCC2uPq+uOcCopIO8HH88gqdxTvpbeHUU\nrdJOmr1VtGNuvay/mfpva9+VEtGbZTFzjhfvfCEIjpj3Llh8Flb9EYa6BmscBiyp\ngszEeFuB3zHndlSCZPnGJ7JiRAdPAEgG3Gl/r9th6PDaEMq0MFS5i7GGhPBIRYCG\nUtmY5eVy5QKBgH5Nuls/YsnJFD7ZNLscziQadvPhvZnhNbSfjmBXaP2EBMAKEFtX\nCcGndN4C0RVLFbAWqWAw7LR0xGA4FEcVd5snsZ+Nb98oZ6sv0H9B67F4J1O7xXsa\n1mitBPBgYjbsr9RXxwa6SB7MJx5vMGXUAeWRZ78wY6V7B76dOKkHOo+TAoGBAJf5\nBOsPueZZFm2qK58GPGVcrsI0+StNuPLP+H+dANQC9mTCIMaQWmm2Oq5jmYwmUKZH\nX4R6rH2MPOOSrbGkWWwRTpyaX1ARX49xzVefoqw8BOB8/Bz+vYjcKcPeitBK9Bhp\nzaUAc4s6PzRTl/xBirtRSQ/df8ECC0cFKBbF6PHlAoGAGqnlpo+k8vAtg6ulCuGu\nx2Y/c5UmvXGHk60pccnW3UtENSDnl99OgMfBz8/qLAMWs6DUQ/kvSlHQPmMBHRWZ\nNTr6ceGXyNs4KdYoj1K7AU3c0Lm0wyQ2giQMoOOUQAm98Xr8z5aiihj10hHPmzzL\n9kwpOmZpjNmC/ERD69imWhY=\n-----END PRIVATE KEY-----\n", + "client_email": "firebase-adminsdk-8rs9e@encoach-staging.iam.gserviceaccount.com", + "client_id": "108221424237414412378", + "auth_uri": "https://accounts.google.com/o/oauth2/auth", + "token_uri": "https://oauth2.googleapis.com/token", + "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", + "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/firebase-adminsdk-8rs9e%40encoach-staging.iam.gserviceaccount.com", + "universe_domain": "googleapis.com" } \ No newline at end of file diff --git a/firebase-configs/mti-ielts-626a2dcf6091.json b/firebase-configs/mti-ielts-626a2dcf6091.json index 3bf3594..fc7a898 100644 --- a/firebase-configs/mti-ielts-626a2dcf6091.json +++ b/firebase-configs/mti-ielts-626a2dcf6091.json @@ -1,13 +1,13 @@ -{ - "type": "service_account", - "project_id": "mti-ielts", - "private_key_id": "626a2dcf60916a1b5011f388495b8f9c4fc065ef", - "private_key": "-----BEGIN PRIVATE KEY-----\nMIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDuaLgLNa5yb5LI\nPZYa7qav0URgCF7miK3dUXIBoABQ+U6y1LwdsIiJqHZ4Cm2lotTqeTGOIV83PuA6\n9H/TwnvsHH8jilmsPxO5OX7AyZSDPvN45nJrgQ21RKZCYQGVetBMGhclCRbYFraS\nE6X/p6gSOpSqZ5fLz8BbdCMfib6HSfDmBkYTK42X6d2eNNwLM1wLbE8RmCGwRATC\nQFfMhjlvQcSJ1EDMfkMUUE9U/ux77wfHqs1d+7utVcQTIMFAP9fo1ynJlwp8D1HQ\ntalB6kkpuDQetUR0A1FHMMJekhmuRDUMfokX1F9JfUjR0OetuD3KEH5y2asxC2+0\n8JYcwbvlAgMBAAECggEAKaaW3LJ8rxZp/NyxkDP4YAf9248q0Ti4s00qzzjeRUdA\n5gI/eSphuDb7t34O6NyZOPuCWlPfOB4ee35CpMK59qaF2bYuc2azseznBZRSA1no\nnEsaW0i5Fd2P9FHRPoWtxVXbjEdZu9e//qY7Hn5yYPjmBx1BCkTZ1MBl8HkWlbjR\nbu18uveg5Vg6Wc+rnPmH/gMRLLpq9iQBpzXWT8Mj+k48O8GnW6v8S3R027ymqUou\n3W5b69xDGn0nwxgLIVzdxjoo7RnpjD3mP0x4faiBhScVgFhwZP8hqBeVyqbV5dMh\nfF+p9zLOeilFLJEjH1lZbZAb8wwP23LozIXJWFG3oQKBgQD6COCJ7hNSx9/AzDhO\nh73hKH/KSOJtxHc8795hcZjy9HJkoM45Fm7o2QGZzsZmV+N6VU0BjoDQAyftCq+G\ndIX0wcAGJIsLuQ9K00WI2hn7Uq1gjUl0d9XEorogKa1ZNTLL/9By/xnA7sEpI6Ng\nIsKQ4R2CfqNFU4bs1nyKWCWudQKBgQD0GNYwZt3xV2YBATVYsrvg1OGO/tmkCJ8Y\nLOdM0L+8WMCgw0uQcNFF9uqq6/oFgq7tOvpeZDsY8onRy55saaMT+Lr4xs0sj5B0\ns5Hqc0L37tdXXXXEne8WABMBF9injNgNbAm9W0kqME2Stc53OJQPj2DBdYxWSr8v\n36imCwoJsQKBgH0BBSlQQo7naKFeOGRijvbLpZ//clzIlYh8r+Rtw7brqWlPz+pQ\noeB95cP80coG9K6LiPVXRmU4vrRO3FRPW01ztEod6PpSaifRmnkB+W1h91ZHLMsy\nwkgNxxofXBA2fY/p9FAZ48lGVIH51EtS9Y0zTuqX347gZJtx3E/aI/SlAoGBAJer\nCwM+F2+K352GM7BuNiDoBVLFdVPf64Ko+/sVxdzwxJffYQdZoh634m3bfBmKbsiG\nmeSmoLXKlenefAxewu544SwM0pV6isaIgQTNI3JMXE8ziiZl/5WK7EQEniDVebU1\nSQP4QYjORJUBFE2twQm+C9+I+27uuMa1UOQC/fSxAoGBANuWloacqGfws6nbHvqF\nLZKlkKNPI/0sC+6VlqjoHn5LQz3lcFM1+iKSQIGJvJyru2ODgv2Lmq2W+cx+HMeq\n0BSetK4XtalmO9YflH7uMgvOEVewf4uJ2d+4I1pbY9aI1gHaZ1EUiiy6Ds4kAK8s\nTQqp88pfTbOnkdJBVi0AWs5B\n-----END PRIVATE KEY-----\n", - "client_email": "firebase-adminsdk-dyg6p@mti-ielts.iam.gserviceaccount.com", - "client_id": "104980563453519094431", - "auth_uri": "https://accounts.google.com/o/oauth2/auth", - "token_uri": "https://oauth2.googleapis.com/token", - "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", - "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/firebase-adminsdk-dyg6p%40mti-ielts.iam.gserviceaccount.com", - "universe_domain": "googleapis.com" -} +{ + "type": "service_account", + "project_id": "mti-ielts", + "private_key_id": "626a2dcf60916a1b5011f388495b8f9c4fc065ef", + "private_key": "-----BEGIN PRIVATE KEY-----\nMIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDuaLgLNa5yb5LI\nPZYa7qav0URgCF7miK3dUXIBoABQ+U6y1LwdsIiJqHZ4Cm2lotTqeTGOIV83PuA6\n9H/TwnvsHH8jilmsPxO5OX7AyZSDPvN45nJrgQ21RKZCYQGVetBMGhclCRbYFraS\nE6X/p6gSOpSqZ5fLz8BbdCMfib6HSfDmBkYTK42X6d2eNNwLM1wLbE8RmCGwRATC\nQFfMhjlvQcSJ1EDMfkMUUE9U/ux77wfHqs1d+7utVcQTIMFAP9fo1ynJlwp8D1HQ\ntalB6kkpuDQetUR0A1FHMMJekhmuRDUMfokX1F9JfUjR0OetuD3KEH5y2asxC2+0\n8JYcwbvlAgMBAAECggEAKaaW3LJ8rxZp/NyxkDP4YAf9248q0Ti4s00qzzjeRUdA\n5gI/eSphuDb7t34O6NyZOPuCWlPfOB4ee35CpMK59qaF2bYuc2azseznBZRSA1no\nnEsaW0i5Fd2P9FHRPoWtxVXbjEdZu9e//qY7Hn5yYPjmBx1BCkTZ1MBl8HkWlbjR\nbu18uveg5Vg6Wc+rnPmH/gMRLLpq9iQBpzXWT8Mj+k48O8GnW6v8S3R027ymqUou\n3W5b69xDGn0nwxgLIVzdxjoo7RnpjD3mP0x4faiBhScVgFhwZP8hqBeVyqbV5dMh\nfF+p9zLOeilFLJEjH1lZbZAb8wwP23LozIXJWFG3oQKBgQD6COCJ7hNSx9/AzDhO\nh73hKH/KSOJtxHc8795hcZjy9HJkoM45Fm7o2QGZzsZmV+N6VU0BjoDQAyftCq+G\ndIX0wcAGJIsLuQ9K00WI2hn7Uq1gjUl0d9XEorogKa1ZNTLL/9By/xnA7sEpI6Ng\nIsKQ4R2CfqNFU4bs1nyKWCWudQKBgQD0GNYwZt3xV2YBATVYsrvg1OGO/tmkCJ8Y\nLOdM0L+8WMCgw0uQcNFF9uqq6/oFgq7tOvpeZDsY8onRy55saaMT+Lr4xs0sj5B0\ns5Hqc0L37tdXXXXEne8WABMBF9injNgNbAm9W0kqME2Stc53OJQPj2DBdYxWSr8v\n36imCwoJsQKBgH0BBSlQQo7naKFeOGRijvbLpZ//clzIlYh8r+Rtw7brqWlPz+pQ\noeB95cP80coG9K6LiPVXRmU4vrRO3FRPW01ztEod6PpSaifRmnkB+W1h91ZHLMsy\nwkgNxxofXBA2fY/p9FAZ48lGVIH51EtS9Y0zTuqX347gZJtx3E/aI/SlAoGBAJer\nCwM+F2+K352GM7BuNiDoBVLFdVPf64Ko+/sVxdzwxJffYQdZoh634m3bfBmKbsiG\nmeSmoLXKlenefAxewu544SwM0pV6isaIgQTNI3JMXE8ziiZl/5WK7EQEniDVebU1\nSQP4QYjORJUBFE2twQm+C9+I+27uuMa1UOQC/fSxAoGBANuWloacqGfws6nbHvqF\nLZKlkKNPI/0sC+6VlqjoHn5LQz3lcFM1+iKSQIGJvJyru2ODgv2Lmq2W+cx+HMeq\n0BSetK4XtalmO9YflH7uMgvOEVewf4uJ2d+4I1pbY9aI1gHaZ1EUiiy6Ds4kAK8s\nTQqp88pfTbOnkdJBVi0AWs5B\n-----END PRIVATE KEY-----\n", + "client_email": "firebase-adminsdk-dyg6p@mti-ielts.iam.gserviceaccount.com", + "client_id": "104980563453519094431", + "auth_uri": "https://accounts.google.com/o/oauth2/auth", + "token_uri": "https://oauth2.googleapis.com/token", + "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", + "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/firebase-adminsdk-dyg6p%40mti-ielts.iam.gserviceaccount.com", + "universe_domain": "googleapis.com" +} diff --git a/firebase-configs/storied-phalanx-349916.json b/firebase-configs/storied-phalanx-349916.json index 71aab34..83da44e 100644 --- a/firebase-configs/storied-phalanx-349916.json +++ b/firebase-configs/storied-phalanx-349916.json @@ -1,13 +1,13 @@ -{ - "type": "service_account", - "project_id": "storied-phalanx-349916", - "private_key_id": "c9e05f6fe413b1031a71f981160075ff4b044444", - "private_key": "-----BEGIN PRIVATE KEY-----\nMIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDdgavFB63nMHyb\n38ncwijTrUmqU9UyzNJ8wlZCWAWuoz25Gng988fkKNDXnHY+ap9esHyNYg9IdSA7\nAuZeHpzTZmKiWZzFWq61KWSTgIn1JwKHGHJJdmVhTYfCe9I51cFLa5q2lTFzJ0ce\nbP7/X/7kw53odgva+M8AhDTbe60akpemgZc+LFwO0Abm7erH2HiNyjoNZzNw525L\n933PCaQwhZan04s1u0oRdVlBIBwMk+J0ojgVEpUiJOzF7gkN+UpDXujalLYdlR4q\nhkGgScXQhDYJkECC3GuvOnEo1YXGNjW9D73S6sSH+Lvqta4wW1+sTn0kB6goiQBI\n7cA1G6x3AgMBAAECggEAZPMwAX/adb7XS4LWUNH8IVyccg/63kgSteErxtiu3kRv\nYOj7W+C6fPVNGLap/RBCybjNSvIh3PfkVICh1MtG1eGXmj4VAKyvaskOmVq/hQbe\nVAuEKo7W7V2UPcKIsOsGSQUlYYjlHIIOG4O5Q1HQrRmp4cPK62Txkl6uaEkZPz4u\nbvIK2BJI8aHRwxE3Phw09blwlLqQQQ8nrhK29x5puaN+ft++IlzIOVsLz+n4kTdB\n6qkG/dhenn3K8o3+NkmSN6eNRbdJd36zXTo4Oatbvqb7r0E8vYn/3Llawo2X75zn\nec7jMHrOmcwtiu9H3PsrTWtzdSjxPHy0UtEn1HWK4QKBgQD+c/V8tAvbaUGVoZf6\ntKtDSKF6IHuY2vUO33v950mVdjrTursqOG2d+SLfSnKpc+sjDlj7/S5u4uRP+qUN\ng1rb2U7oIA7tsDa2ZTSkIx6HkPUzS+fBOxELLrbgMoJ2RLzgkiPhS95YgXJ/rYG5\nWQTehzCT5roes0RvtgM0gl3EhQKBgQDe2m7PRIU4g3RJ8HTx92B4ja8W9FVCYDG5\nPOAdZB8WB6Bvu4BJHBDLr8vDi930pKj+vYObRqBDQuILW4t8wZQJ834dnoq6EpUz\nhbVEURVBP4A/nEHrQHfq0Lp+cxThy2rw7obRQOLPETtC7p3WFgSHT6PRTcpGzCCX\n+76a30yrywKBgC/5JNtyBppDaf4QDVtTHMb+tpMT9LmI7pLzR6lDJfhr5gNtPURk\nhyY1hoGaw6t3E2n0lopL3alCVdFObDfz//lbKylQggAGLQqOYjJf/K2KgvA862Df\nBgOZtxjl7PrnUsT0SJd9elotbazsxXxwcB6UVnBMG+MV4V0+b7RCr/MRAoGBAIfp\nTcVIs7roqOZjKN9dEE/VkR/9uXW2tvyS/NfP9Ql5c0ZRYwazgCbJOwsyZRZLyek6\naWYsp5b91mA435QhdwiuoI6t30tmA+qdNBTLIpxdfvjMcoNoGPpzfBmcU/L1HW58\n+mnqGalRiAPlBQvI99ASKQWAXMnaulIWrYNEhj0LAoGBALi+QZ2pp+hDeC59ezWr\nbP1zbbONceHKGgJcevChP2k1OJyIOIqmBYeTuM4cPc5ofZYQNaMC31cs8SVeSRX1\nNTxQZmvCjMyTe/WYWYNFXdgkVz4egFXbeochCGzMYo57HV1PCkPBrARRZO8OfdDD\n8sDu//ohb7nCzceEI0DnWs13\n-----END PRIVATE KEY-----\n", - "client_email": "firebase-adminsdk-3ml0u@storied-phalanx-349916.iam.gserviceaccount.com", - "client_id": "114163760341944984396", - "auth_uri": "https://accounts.google.com/o/oauth2/auth", - "token_uri": "https://oauth2.googleapis.com/token", - "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", - "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/firebase-adminsdk-3ml0u%40storied-phalanx-349916.iam.gserviceaccount.com", - "universe_domain": "googleapis.com" +{ + "type": "service_account", + "project_id": "storied-phalanx-349916", + "private_key_id": "c9e05f6fe413b1031a71f981160075ff4b044444", + "private_key": "-----BEGIN PRIVATE KEY-----\nMIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDdgavFB63nMHyb\n38ncwijTrUmqU9UyzNJ8wlZCWAWuoz25Gng988fkKNDXnHY+ap9esHyNYg9IdSA7\nAuZeHpzTZmKiWZzFWq61KWSTgIn1JwKHGHJJdmVhTYfCe9I51cFLa5q2lTFzJ0ce\nbP7/X/7kw53odgva+M8AhDTbe60akpemgZc+LFwO0Abm7erH2HiNyjoNZzNw525L\n933PCaQwhZan04s1u0oRdVlBIBwMk+J0ojgVEpUiJOzF7gkN+UpDXujalLYdlR4q\nhkGgScXQhDYJkECC3GuvOnEo1YXGNjW9D73S6sSH+Lvqta4wW1+sTn0kB6goiQBI\n7cA1G6x3AgMBAAECggEAZPMwAX/adb7XS4LWUNH8IVyccg/63kgSteErxtiu3kRv\nYOj7W+C6fPVNGLap/RBCybjNSvIh3PfkVICh1MtG1eGXmj4VAKyvaskOmVq/hQbe\nVAuEKo7W7V2UPcKIsOsGSQUlYYjlHIIOG4O5Q1HQrRmp4cPK62Txkl6uaEkZPz4u\nbvIK2BJI8aHRwxE3Phw09blwlLqQQQ8nrhK29x5puaN+ft++IlzIOVsLz+n4kTdB\n6qkG/dhenn3K8o3+NkmSN6eNRbdJd36zXTo4Oatbvqb7r0E8vYn/3Llawo2X75zn\nec7jMHrOmcwtiu9H3PsrTWtzdSjxPHy0UtEn1HWK4QKBgQD+c/V8tAvbaUGVoZf6\ntKtDSKF6IHuY2vUO33v950mVdjrTursqOG2d+SLfSnKpc+sjDlj7/S5u4uRP+qUN\ng1rb2U7oIA7tsDa2ZTSkIx6HkPUzS+fBOxELLrbgMoJ2RLzgkiPhS95YgXJ/rYG5\nWQTehzCT5roes0RvtgM0gl3EhQKBgQDe2m7PRIU4g3RJ8HTx92B4ja8W9FVCYDG5\nPOAdZB8WB6Bvu4BJHBDLr8vDi930pKj+vYObRqBDQuILW4t8wZQJ834dnoq6EpUz\nhbVEURVBP4A/nEHrQHfq0Lp+cxThy2rw7obRQOLPETtC7p3WFgSHT6PRTcpGzCCX\n+76a30yrywKBgC/5JNtyBppDaf4QDVtTHMb+tpMT9LmI7pLzR6lDJfhr5gNtPURk\nhyY1hoGaw6t3E2n0lopL3alCVdFObDfz//lbKylQggAGLQqOYjJf/K2KgvA862Df\nBgOZtxjl7PrnUsT0SJd9elotbazsxXxwcB6UVnBMG+MV4V0+b7RCr/MRAoGBAIfp\nTcVIs7roqOZjKN9dEE/VkR/9uXW2tvyS/NfP9Ql5c0ZRYwazgCbJOwsyZRZLyek6\naWYsp5b91mA435QhdwiuoI6t30tmA+qdNBTLIpxdfvjMcoNoGPpzfBmcU/L1HW58\n+mnqGalRiAPlBQvI99ASKQWAXMnaulIWrYNEhj0LAoGBALi+QZ2pp+hDeC59ezWr\nbP1zbbONceHKGgJcevChP2k1OJyIOIqmBYeTuM4cPc5ofZYQNaMC31cs8SVeSRX1\nNTxQZmvCjMyTe/WYWYNFXdgkVz4egFXbeochCGzMYo57HV1PCkPBrARRZO8OfdDD\n8sDu//ohb7nCzceEI0DnWs13\n-----END PRIVATE KEY-----\n", + "client_email": "firebase-adminsdk-3ml0u@storied-phalanx-349916.iam.gserviceaccount.com", + "client_id": "114163760341944984396", + "auth_uri": "https://accounts.google.com/o/oauth2/auth", + "token_uri": "https://oauth2.googleapis.com/token", + "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", + "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/firebase-adminsdk-3ml0u%40storied-phalanx-349916.iam.gserviceaccount.com", + "universe_domain": "googleapis.com" } \ No newline at end of file diff --git a/poetry.lock b/poetry.lock new file mode 100644 index 0000000..d7c55e9 --- /dev/null +++ b/poetry.lock @@ -0,0 +1,4343 @@ +# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. + +[[package]] +name = "aioboto3" +version = "13.1.1" +description = "Async boto3 wrapper" +optional = false +python-versions = "<4.0,>=3.8" +files = [ + {file = "aioboto3-13.1.1-py3-none-any.whl", hash = "sha256:4b44a7c1317a51479b92ee57a2fea2cdef6bea2c3669870830b3f4dec6be7ca0"}, + {file = "aioboto3-13.1.1.tar.gz", hash = "sha256:7def49471b7b79b7dfe3859acac01423e241b5d69abf0a5f2bcfd2c64855b2ab"}, +] + +[package.dependencies] +aiobotocore = {version = "2.13.1", extras = ["boto3"]} +aiofiles = ">=23.2.1" + +[package.extras] +chalice = ["chalice (>=1.24.0)"] +s3cse = ["cryptography (>=2.3.1)"] + +[[package]] +name = "aiobotocore" +version = "2.13.1" +description = "Async client for aws services using botocore and aiohttp" +optional = false +python-versions = ">=3.8" +files = [ + {file = "aiobotocore-2.13.1-py3-none-any.whl", hash = "sha256:1bef121b99841ee3cc788e4ed97c332ba32353b1f00e886d1beb3aae95520858"}, + {file = "aiobotocore-2.13.1.tar.gz", hash = "sha256:134f9606c2f91abde38cbc61c3241113e26ff244633e0c31abb7e09da3581c9b"}, +] + +[package.dependencies] +aiohttp = ">=3.9.2,<4.0.0" +aioitertools = ">=0.5.1,<1.0.0" +boto3 = {version = ">=1.34.70,<1.34.132", optional = true, markers = "extra == \"boto3\""} +botocore = ">=1.34.70,<1.34.132" +wrapt = ">=1.10.10,<2.0.0" + +[package.extras] +awscli = ["awscli (>=1.32.70,<1.33.14)"] +boto3 = ["boto3 (>=1.34.70,<1.34.132)"] + +[[package]] +name = "aiofiles" +version = "24.1.0" +description = "File support for asyncio." +optional = false +python-versions = ">=3.8" +files = [ + {file = "aiofiles-24.1.0-py3-none-any.whl", hash = "sha256:b4ec55f4195e3eb5d7abd1bf7e061763e864dd4954231fb8539a0ef8bb8260e5"}, + {file = "aiofiles-24.1.0.tar.gz", hash = "sha256:22a075c9e5a3810f0c2e48f3008c94d68c65d763b9b03857924c99e57355166c"}, +] + +[[package]] +name = "aiohappyeyeballs" +version = "2.4.2" +description = "Happy Eyeballs for asyncio" +optional = false +python-versions = ">=3.8" +files = [ + {file = "aiohappyeyeballs-2.4.2-py3-none-any.whl", hash = "sha256:8522691d9a154ba1145b157d6d5c15e5c692527ce6a53c5e5f9876977f6dab2f"}, + {file = "aiohappyeyeballs-2.4.2.tar.gz", hash = "sha256:4ca893e6c5c1f5bf3888b04cb5a3bee24995398efef6e0b9f747b5e89d84fd74"}, +] + +[[package]] +name = "aiohttp" +version = "3.10.8" +description = "Async http client/server framework (asyncio)" +optional = false +python-versions = ">=3.8" +files = [ + {file = "aiohttp-3.10.8-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a1ba7bc139592339ddeb62c06486d0fa0f4ca61216e14137a40d626c81faf10c"}, + {file = "aiohttp-3.10.8-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:85e4d7bd05d18e4b348441e7584c681eff646e3bf38f68b2626807f3add21aa2"}, + {file = "aiohttp-3.10.8-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:69de056022e7abf69cb9fec795515973cc3eeaff51e3ea8d72a77aa933a91c52"}, + {file = "aiohttp-3.10.8-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee3587506898d4a404b33bd19689286ccf226c3d44d7a73670c8498cd688e42c"}, + {file = "aiohttp-3.10.8-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fe285a697c851734285369614443451462ce78aac2b77db23567507484b1dc6f"}, + {file = "aiohttp-3.10.8-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:10c7932337285a6bfa3a5fe1fd4da90b66ebfd9d0cbd1544402e1202eb9a8c3e"}, + {file = "aiohttp-3.10.8-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cd9716ef0224fe0d0336997eb242f40619f9f8c5c57e66b525a1ebf9f1d8cebe"}, + {file = "aiohttp-3.10.8-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ceacea31f8a55cdba02bc72c93eb2e1b77160e91f8abd605969c168502fd71eb"}, + {file = "aiohttp-3.10.8-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:9721554bfa9e15f6e462da304374c2f1baede3cb06008c36c47fa37ea32f1dc4"}, + {file = "aiohttp-3.10.8-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:22cdeb684d8552490dd2697a5138c4ecb46f844892df437aaf94f7eea99af879"}, + {file = "aiohttp-3.10.8-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:e56bb7e31c4bc79956b866163170bc89fd619e0581ce813330d4ea46921a4881"}, + {file = "aiohttp-3.10.8-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:3a95d2686bc4794d66bd8de654e41b5339fab542b2bca9238aa63ed5f4f2ce82"}, + {file = "aiohttp-3.10.8-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:d82404a0e7b10e0d7f022cf44031b78af8a4f99bd01561ac68f7c24772fed021"}, + {file = "aiohttp-3.10.8-cp310-cp310-win32.whl", hash = "sha256:4e10b04542d27e21538e670156e88766543692a0a883f243ba8fad9ddea82e53"}, + {file = "aiohttp-3.10.8-cp310-cp310-win_amd64.whl", hash = "sha256:680dbcff5adc7f696ccf8bf671d38366a1f620b5616a1d333d0cb33956065395"}, + {file = "aiohttp-3.10.8-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:33a68011a38020ed4ff41ae0dbf4a96a202562ecf2024bdd8f65385f1d07f6ef"}, + {file = "aiohttp-3.10.8-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6c7efa6616a95e3bd73b8a69691012d2ef1f95f9ea0189e42f338fae080c2fc6"}, + {file = "aiohttp-3.10.8-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ddb9b9764cfb4459acf01c02d2a59d3e5066b06a846a364fd1749aa168efa2be"}, + {file = "aiohttp-3.10.8-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c7f270f4ca92760f98a42c45a58674fff488e23b144ec80b1cc6fa2effed377"}, + {file = "aiohttp-3.10.8-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6984dda9d79064361ab58d03f6c1e793ea845c6cfa89ffe1a7b9bb400dfd56bd"}, + {file = "aiohttp-3.10.8-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3f6d47e392c27206701565c8df4cac6ebed28fdf6dcaea5b1eea7a4631d8e6db"}, + {file = "aiohttp-3.10.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a72f89aea712c619b2ca32c6f4335c77125ede27530ad9705f4f349357833695"}, + {file = "aiohttp-3.10.8-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c36074b26f3263879ba8e4dbd33db2b79874a3392f403a70b772701363148b9f"}, + {file = "aiohttp-3.10.8-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:e32148b4a745e70a255a1d44b5664de1f2e24fcefb98a75b60c83b9e260ddb5b"}, + {file = "aiohttp-3.10.8-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5aa1a073514cf59c81ad49a4ed9b5d72b2433638cd53160fd2f3a9cfa94718db"}, + {file = "aiohttp-3.10.8-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:d3a79200a9d5e621c4623081ddb25380b713c8cf5233cd11c1aabad990bb9381"}, + {file = "aiohttp-3.10.8-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:e45fdfcb2d5bcad83373e4808825b7512953146d147488114575780640665027"}, + {file = "aiohttp-3.10.8-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f78e2a78432c537ae876a93013b7bc0027ba5b93ad7b3463624c4b6906489332"}, + {file = "aiohttp-3.10.8-cp311-cp311-win32.whl", hash = "sha256:f8179855a4e4f3b931cb1764ec87673d3fbdcca2af496c8d30567d7b034a13db"}, + {file = "aiohttp-3.10.8-cp311-cp311-win_amd64.whl", hash = "sha256:ef9b484604af05ca745b6108ca1aaa22ae1919037ae4f93aaf9a37ba42e0b835"}, + {file = "aiohttp-3.10.8-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:ab2d6523575fc98896c80f49ac99e849c0b0e69cc80bf864eed6af2ae728a52b"}, + {file = "aiohttp-3.10.8-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f5d5d5401744dda50b943d8764508d0e60cc2d3305ac1e6420935861a9d544bc"}, + {file = "aiohttp-3.10.8-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:de23085cf90911600ace512e909114385026b16324fa203cc74c81f21fd3276a"}, + {file = "aiohttp-3.10.8-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4618f0d2bf523043866a9ff8458900d8eb0a6d4018f251dae98e5f1fb699f3a8"}, + {file = "aiohttp-3.10.8-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:21c1925541ca84f7b5e0df361c0a813a7d6a56d3b0030ebd4b220b8d232015f9"}, + {file = "aiohttp-3.10.8-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:497a7d20caea8855c5429db3cdb829385467217d7feb86952a6107e033e031b9"}, + {file = "aiohttp-3.10.8-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c887019dbcb4af58a091a45ccf376fffe800b5531b45c1efccda4bedf87747ea"}, + {file = "aiohttp-3.10.8-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40d2d719c3c36a7a65ed26400e2b45b2d9ed7edf498f4df38b2ae130f25a0d01"}, + {file = "aiohttp-3.10.8-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:57359785f27394a8bcab0da6dcd46706d087dfebf59a8d0ad2e64a4bc2f6f94f"}, + {file = "aiohttp-3.10.8-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a961ee6f2cdd1a2be4735333ab284691180d40bad48f97bb598841bfcbfb94ec"}, + {file = "aiohttp-3.10.8-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:fe3d79d6af839ffa46fdc5d2cf34295390894471e9875050eafa584cb781508d"}, + {file = "aiohttp-3.10.8-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:9a281cba03bdaa341c70b7551b2256a88d45eead149f48b75a96d41128c240b3"}, + {file = "aiohttp-3.10.8-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c6769d71bfb1ed60321363a9bc05e94dcf05e38295ef41d46ac08919e5b00d19"}, + {file = "aiohttp-3.10.8-cp312-cp312-win32.whl", hash = "sha256:a3081246bab4d419697ee45e555cef5cd1def7ac193dff6f50be761d2e44f194"}, + {file = "aiohttp-3.10.8-cp312-cp312-win_amd64.whl", hash = "sha256:ab1546fc8e00676febc81c548a876c7bde32f881b8334b77f84719ab2c7d28dc"}, + {file = "aiohttp-3.10.8-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:b1a012677b8e0a39e181e218de47d6741c5922202e3b0b65e412e2ce47c39337"}, + {file = "aiohttp-3.10.8-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:2df786c96c57cd6b87156ba4c5f166af7b88f3fc05f9d592252fdc83d8615a3c"}, + {file = "aiohttp-3.10.8-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:8885ca09d3a9317219c0831276bfe26984b17b2c37b7bf70dd478d17092a4772"}, + {file = "aiohttp-3.10.8-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4dbf252ac19860e0ab56cd480d2805498f47c5a2d04f5995d8d8a6effd04b48c"}, + {file = "aiohttp-3.10.8-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b2036479b6b94afaaca7d07b8a68dc0e67b0caf5f6293bb6a5a1825f5923000"}, + {file = "aiohttp-3.10.8-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:365783e1b7c40b59ed4ce2b5a7491bae48f41cd2c30d52647a5b1ee8604c68ad"}, + {file = "aiohttp-3.10.8-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:270e653b5a4b557476a1ed40e6b6ce82f331aab669620d7c95c658ef976c9c5e"}, + {file = "aiohttp-3.10.8-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8960fabc20bfe4fafb941067cda8e23c8c17c98c121aa31c7bf0cdab11b07842"}, + {file = "aiohttp-3.10.8-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f21e8f2abed9a44afc3d15bba22e0dfc71e5fa859bea916e42354c16102b036f"}, + {file = "aiohttp-3.10.8-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:fecd55e7418fabd297fd836e65cbd6371aa4035a264998a091bbf13f94d9c44d"}, + {file = "aiohttp-3.10.8-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:badb51d851358cd7535b647bb67af4854b64f3c85f0d089c737f75504d5910ec"}, + {file = "aiohttp-3.10.8-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:e860985f30f3a015979e63e7ba1a391526cdac1b22b7b332579df7867848e255"}, + {file = "aiohttp-3.10.8-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:71462f8eeca477cbc0c9700a9464e3f75f59068aed5e9d4a521a103692da72dc"}, + {file = "aiohttp-3.10.8-cp313-cp313-win32.whl", hash = "sha256:177126e971782769b34933e94fddd1089cef0fe6b82fee8a885e539f5b0f0c6a"}, + {file = "aiohttp-3.10.8-cp313-cp313-win_amd64.whl", hash = "sha256:98a4eb60e27033dee9593814ca320ee8c199489fbc6b2699d0f710584db7feb7"}, + {file = "aiohttp-3.10.8-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ffef3d763e4c8fc97e740da5b4d0f080b78630a3914f4e772a122bbfa608c1db"}, + {file = "aiohttp-3.10.8-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:597128cb7bc5f068181b49a732961f46cb89f85686206289d6ccb5e27cb5fbe2"}, + {file = "aiohttp-3.10.8-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f23a6c1d09de5de89a33c9e9b229106cb70dcfdd55e81a3a3580eaadaa32bc92"}, + {file = "aiohttp-3.10.8-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da57af0c54a302b7c655fa1ccd5b1817a53739afa39924ef1816e7b7c8a07ccb"}, + {file = "aiohttp-3.10.8-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1e7a6af57091056a79a35104d6ec29d98ec7f1fb7270ad9c6fff871b678d1ff8"}, + {file = "aiohttp-3.10.8-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:32710d6b3b6c09c60c794d84ca887a3a2890131c0b02b3cefdcc6709a2260a7c"}, + {file = "aiohttp-3.10.8-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b91f4f62ad39a8a42d511d66269b46cb2fb7dea9564c21ab6c56a642d28bff5"}, + {file = "aiohttp-3.10.8-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:471a8c47344b9cc309558b3fcc469bd2c12b49322b4b31eb386c4a2b2d44e44a"}, + {file = "aiohttp-3.10.8-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:fc0e7f91705445d79beafba9bb3057dd50830e40fe5417017a76a214af54e122"}, + {file = "aiohttp-3.10.8-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:85431c9131a9a0f65260dc7a65c800ca5eae78c4c9931618f18c8e0933a0e0c1"}, + {file = "aiohttp-3.10.8-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:b91557ee0893da52794b25660d4f57bb519bcad8b7df301acd3898f7197c5d81"}, + {file = "aiohttp-3.10.8-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:4954e6b06dd0be97e1a5751fc606be1f9edbdc553c5d9b57d72406a8fbd17f9d"}, + {file = "aiohttp-3.10.8-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:a087c84b4992160ffef7afd98ef24177c8bd4ad61c53607145a8377457385100"}, + {file = "aiohttp-3.10.8-cp38-cp38-win32.whl", hash = "sha256:e1f0f7b27171b2956a27bd8f899751d0866ddabdd05cbddf3520f945130a908c"}, + {file = "aiohttp-3.10.8-cp38-cp38-win_amd64.whl", hash = "sha256:c4916070e12ae140110aa598031876c1bf8676a36a750716ea0aa5bd694aa2e7"}, + {file = "aiohttp-3.10.8-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5284997e3d88d0dfb874c43e51ae8f4a6f4ca5b90dcf22995035187253d430db"}, + {file = "aiohttp-3.10.8-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9443d9ebc5167ce1fbb552faf2d666fb22ef5716a8750be67efd140a7733738c"}, + {file = "aiohttp-3.10.8-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b667e2a03407d79a76c618dc30cedebd48f082d85880d0c9c4ec2faa3e10f43e"}, + {file = "aiohttp-3.10.8-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:98fae99d5c2146f254b7806001498e6f9ffb0e330de55a35e72feb7cb2fa399b"}, + {file = "aiohttp-3.10.8-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8296edd99d0dd9d0eb8b9e25b3b3506eef55c1854e9cc230f0b3f885f680410b"}, + {file = "aiohttp-3.10.8-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1ce46dfb49cfbf9e92818be4b761d4042230b1f0e05ffec0aad15b3eb162b905"}, + {file = "aiohttp-3.10.8-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7c38cfd355fd86c39b2d54651bd6ed7d63d4fe3b5553f364bae3306e2445f847"}, + {file = "aiohttp-3.10.8-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:713dff3f87ceec3bde4f3f484861464e722cf7533f9fa6b824ec82bb5a9010a7"}, + {file = "aiohttp-3.10.8-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:21a72f4a9c69a8567a0aca12042f12bba25d3139fd5dd8eeb9931f4d9e8599cd"}, + {file = "aiohttp-3.10.8-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:6d1ad868624f6cea77341ef2877ad4e71f7116834a6cd7ec36ec5c32f94ee6ae"}, + {file = "aiohttp-3.10.8-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:a78ba86d5a08207d1d1ad10b97aed6ea48b374b3f6831d02d0b06545ac0f181e"}, + {file = "aiohttp-3.10.8-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:aff048793d05e1ce05b62e49dccf81fe52719a13f4861530706619506224992b"}, + {file = "aiohttp-3.10.8-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:d088ca05381fd409793571d8e34eca06daf41c8c50a05aeed358d2d340c7af81"}, + {file = "aiohttp-3.10.8-cp39-cp39-win32.whl", hash = "sha256:ee97c4e54f457c366e1f76fbbf3e8effee9de57dae671084a161c00f481106ce"}, + {file = "aiohttp-3.10.8-cp39-cp39-win_amd64.whl", hash = "sha256:d95ae4420669c871667aad92ba8cce6251d61d79c1a38504621094143f94a8b4"}, + {file = "aiohttp-3.10.8.tar.gz", hash = "sha256:21f8225f7dc187018e8433c9326be01477fb2810721e048b33ac49091b19fb4a"}, +] + +[package.dependencies] +aiohappyeyeballs = ">=2.3.0" +aiosignal = ">=1.1.2" +attrs = ">=17.3.0" +frozenlist = ">=1.1.1" +multidict = ">=4.5,<7.0" +yarl = ">=1.12.0,<2.0" + +[package.extras] +speedups = ["Brotli", "aiodns (>=3.2.0)", "brotlicffi"] + +[[package]] +name = "aioitertools" +version = "0.12.0" +description = "itertools and builtins for AsyncIO and mixed iterables" +optional = false +python-versions = ">=3.8" +files = [ + {file = "aioitertools-0.12.0-py3-none-any.whl", hash = "sha256:fc1f5fac3d737354de8831cbba3eb04f79dd649d8f3afb4c5b114925e662a796"}, + {file = "aioitertools-0.12.0.tar.gz", hash = "sha256:c2a9055b4fbb7705f561b9d86053e8af5d10cc845d22c32008c43490b2d8dd6b"}, +] + +[package.extras] +dev = ["attribution (==1.8.0)", "black (==24.8.0)", "build (>=1.2)", "coverage (==7.6.1)", "flake8 (==7.1.1)", "flit (==3.9.0)", "mypy (==1.11.2)", "ufmt (==2.7.1)", "usort (==1.0.8.post1)"] +docs = ["sphinx (==8.0.2)", "sphinx-mdinclude (==0.6.2)"] + +[[package]] +name = "aiosignal" +version = "1.3.1" +description = "aiosignal: a list of registered asynchronous callbacks" +optional = false +python-versions = ">=3.7" +files = [ + {file = "aiosignal-1.3.1-py3-none-any.whl", hash = "sha256:f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17"}, + {file = "aiosignal-1.3.1.tar.gz", hash = "sha256:54cd96e15e1649b75d6c87526a6ff0b6c1b0dd3459f43d9ca11d48c339b68cfc"}, +] + +[package.dependencies] +frozenlist = ">=1.1.0" + +[[package]] +name = "annotated-types" +version = "0.7.0" +description = "Reusable constraint types to use with typing.Annotated" +optional = false +python-versions = ">=3.8" +files = [ + {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, + {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, +] + +[[package]] +name = "anyio" +version = "4.6.0" +description = "High level compatibility layer for multiple asynchronous event loop implementations" +optional = false +python-versions = ">=3.9" +files = [ + {file = "anyio-4.6.0-py3-none-any.whl", hash = "sha256:c7d2e9d63e31599eeb636c8c5c03a7e108d73b345f064f1c19fdc87b79036a9a"}, + {file = "anyio-4.6.0.tar.gz", hash = "sha256:137b4559cbb034c477165047febb6ff83f390fc3b20bf181c1fc0a728cb8beeb"}, +] + +[package.dependencies] +idna = ">=2.8" +sniffio = ">=1.1" + +[package.extras] +doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] +test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.21.0b1)"] +trio = ["trio (>=0.26.1)"] + +[[package]] +name = "attrs" +version = "24.2.0" +description = "Classes Without Boilerplate" +optional = false +python-versions = ">=3.7" +files = [ + {file = "attrs-24.2.0-py3-none-any.whl", hash = "sha256:81921eb96de3191c8258c199618104dd27ac608d9366f5e35d011eae1867ede2"}, + {file = "attrs-24.2.0.tar.gz", hash = "sha256:5cfb1b9148b5b086569baec03f20d7b6bf3bcacc9a42bebf87ffaaca362f6346"}, +] + +[package.extras] +benchmark = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +cov = ["cloudpickle", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +dev = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pre-commit", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier (<24.7)"] +tests = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"] + +[[package]] +name = "boto3" +version = "1.34.131" +description = "The AWS SDK for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "boto3-1.34.131-py3-none-any.whl", hash = "sha256:05e388cb937e82be70bfd7eb0c84cf8011ff35cf582a593873ac21675268683b"}, + {file = "boto3-1.34.131.tar.gz", hash = "sha256:dab8f72a6c4e62b4fd70da09e08a6b2a65ea2115b27dd63737142005776ef216"}, +] + +[package.dependencies] +botocore = ">=1.34.131,<1.35.0" +jmespath = ">=0.7.1,<2.0.0" +s3transfer = ">=0.10.0,<0.11.0" + +[package.extras] +crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] + +[[package]] +name = "botocore" +version = "1.34.131" +description = "Low-level, data-driven core of boto 3." +optional = false +python-versions = ">=3.8" +files = [ + {file = "botocore-1.34.131-py3-none-any.whl", hash = "sha256:13b011d7b206ce00727dcee26548fa3b550db9046d5a0e90ac25a6e6c8fde6ef"}, + {file = "botocore-1.34.131.tar.gz", hash = "sha256:502ddafe1d627fcf1e4c007c86454e5dd011dba7c58bd8e8a5368a79f3e387dc"}, +] + +[package.dependencies] +jmespath = ">=0.7.1,<2.0.0" +python-dateutil = ">=2.1,<3.0.0" +urllib3 = {version = ">=1.25.4,<2.2.0 || >2.2.0,<3", markers = "python_version >= \"3.10\""} + +[package.extras] +crt = ["awscrt (==0.20.11)"] + +[[package]] +name = "cachecontrol" +version = "0.14.0" +description = "httplib2 caching for requests" +optional = false +python-versions = ">=3.7" +files = [ + {file = "cachecontrol-0.14.0-py3-none-any.whl", hash = "sha256:f5bf3f0620c38db2e5122c0726bdebb0d16869de966ea6a2befe92470b740ea0"}, + {file = "cachecontrol-0.14.0.tar.gz", hash = "sha256:7db1195b41c81f8274a7bbd97c956f44e8348265a1bc7641c37dfebc39f0c938"}, +] + +[package.dependencies] +msgpack = ">=0.5.2,<2.0.0" +requests = ">=2.16.0" + +[package.extras] +dev = ["CacheControl[filecache,redis]", "black", "build", "cherrypy", "furo", "mypy", "pytest", "pytest-cov", "sphinx", "sphinx-copybutton", "tox", "types-redis", "types-requests"] +filecache = ["filelock (>=3.8.0)"] +redis = ["redis (>=2.10.5)"] + +[[package]] +name = "cachetools" +version = "5.5.0" +description = "Extensible memoizing collections and decorators" +optional = false +python-versions = ">=3.7" +files = [ + {file = "cachetools-5.5.0-py3-none-any.whl", hash = "sha256:02134e8439cdc2ffb62023ce1debca2944c3f289d66bb17ead3ab3dede74b292"}, + {file = "cachetools-5.5.0.tar.gz", hash = "sha256:2cc24fb4cbe39633fb7badd9db9ca6295d766d9c2995f245725a46715d050f2a"}, +] + +[[package]] +name = "certifi" +version = "2024.8.30" +description = "Python package for providing Mozilla's CA Bundle." +optional = false +python-versions = ">=3.6" +files = [ + {file = "certifi-2024.8.30-py3-none-any.whl", hash = "sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8"}, + {file = "certifi-2024.8.30.tar.gz", hash = "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9"}, +] + +[[package]] +name = "cffi" +version = "1.17.1" +description = "Foreign Function Interface for Python calling C code." +optional = false +python-versions = ">=3.8" +files = [ + {file = "cffi-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14"}, + {file = "cffi-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be"}, + {file = "cffi-1.17.1-cp310-cp310-win32.whl", hash = "sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c"}, + {file = "cffi-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15"}, + {file = "cffi-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401"}, + {file = "cffi-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b"}, + {file = "cffi-1.17.1-cp311-cp311-win32.whl", hash = "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655"}, + {file = "cffi-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0"}, + {file = "cffi-1.17.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4"}, + {file = "cffi-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93"}, + {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3"}, + {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8"}, + {file = "cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65"}, + {file = "cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903"}, + {file = "cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e"}, + {file = "cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd"}, + {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed"}, + {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9"}, + {file = "cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d"}, + {file = "cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a"}, + {file = "cffi-1.17.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:636062ea65bd0195bc012fea9321aca499c0504409f413dc88af450b57ffd03b"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7eac2ef9b63c79431bc4b25f1cd649d7f061a28808cbc6c47b534bd789ef964"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e221cf152cff04059d011ee126477f0d9588303eb57e88923578ace7baad17f9"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:31000ec67d4221a71bd3f67df918b1f88f676f1c3b535a7eb473255fdc0b83fc"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f17be4345073b0a7b8ea599688f692ac3ef23ce28e5df79c04de519dbc4912c"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2b1fac190ae3ebfe37b979cc1ce69c81f4e4fe5746bb401dca63a9062cdaf1"}, + {file = "cffi-1.17.1-cp38-cp38-win32.whl", hash = "sha256:7596d6620d3fa590f677e9ee430df2958d2d6d6de2feeae5b20e82c00b76fbf8"}, + {file = "cffi-1.17.1-cp38-cp38-win_amd64.whl", hash = "sha256:78122be759c3f8a014ce010908ae03364d00a1f81ab5c7f4a7a5120607ea56e1"}, + {file = "cffi-1.17.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16"}, + {file = "cffi-1.17.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e"}, + {file = "cffi-1.17.1-cp39-cp39-win32.whl", hash = "sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7"}, + {file = "cffi-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662"}, + {file = "cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824"}, +] + +[package.dependencies] +pycparser = "*" + +[[package]] +name = "charset-normalizer" +version = "3.3.2" +description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-win32.whl", hash = "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-win32.whl", hash = "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-win32.whl", hash = "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-win32.whl", hash = "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-win32.whl", hash = "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-win32.whl", hash = "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d"}, + {file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"}, +] + +[[package]] +name = "click" +version = "8.1.7" +description = "Composable command line interface toolkit" +optional = false +python-versions = ">=3.7" +files = [ + {file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"}, + {file = "click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[[package]] +name = "colorama" +version = "0.4.6" +description = "Cross-platform colored terminal text." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] + +[[package]] +name = "cryptography" +version = "43.0.1" +description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." +optional = false +python-versions = ">=3.7" +files = [ + {file = "cryptography-43.0.1-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:8385d98f6a3bf8bb2d65a73e17ed87a3ba84f6991c155691c51112075f9ffc5d"}, + {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:27e613d7077ac613e399270253259d9d53872aaf657471473ebfc9a52935c062"}, + {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68aaecc4178e90719e95298515979814bda0cbada1256a4485414860bd7ab962"}, + {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:de41fd81a41e53267cb020bb3a7212861da53a7d39f863585d13ea11049cf277"}, + {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f98bf604c82c416bc829e490c700ca1553eafdf2912a91e23a79d97d9801372a"}, + {file = "cryptography-43.0.1-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:61ec41068b7b74268fa86e3e9e12b9f0c21fcf65434571dbb13d954bceb08042"}, + {file = "cryptography-43.0.1-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:014f58110f53237ace6a408b5beb6c427b64e084eb451ef25a28308270086494"}, + {file = "cryptography-43.0.1-cp37-abi3-win32.whl", hash = "sha256:2bd51274dcd59f09dd952afb696bf9c61a7a49dfc764c04dd33ef7a6b502a1e2"}, + {file = "cryptography-43.0.1-cp37-abi3-win_amd64.whl", hash = "sha256:666ae11966643886c2987b3b721899d250855718d6d9ce41b521252a17985f4d"}, + {file = "cryptography-43.0.1-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:ac119bb76b9faa00f48128b7f5679e1d8d437365c5d26f1c2c3f0da4ce1b553d"}, + {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1bbcce1a551e262dfbafb6e6252f1ae36a248e615ca44ba302df077a846a8806"}, + {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58d4e9129985185a06d849aa6df265bdd5a74ca6e1b736a77959b498e0505b85"}, + {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:d03a475165f3134f773d1388aeb19c2d25ba88b6a9733c5c590b9ff7bbfa2e0c"}, + {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:511f4273808ab590912a93ddb4e3914dfd8a388fed883361b02dea3791f292e1"}, + {file = "cryptography-43.0.1-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:80eda8b3e173f0f247f711eef62be51b599b5d425c429b5d4ca6a05e9e856baa"}, + {file = "cryptography-43.0.1-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:38926c50cff6f533f8a2dae3d7f19541432610d114a70808f0926d5aaa7121e4"}, + {file = "cryptography-43.0.1-cp39-abi3-win32.whl", hash = "sha256:a575913fb06e05e6b4b814d7f7468c2c660e8bb16d8d5a1faf9b33ccc569dd47"}, + {file = "cryptography-43.0.1-cp39-abi3-win_amd64.whl", hash = "sha256:d75601ad10b059ec832e78823b348bfa1a59f6b8d545db3a24fd44362a1564cb"}, + {file = "cryptography-43.0.1-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ea25acb556320250756e53f9e20a4177515f012c9eaea17eb7587a8c4d8ae034"}, + {file = "cryptography-43.0.1-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c1332724be35d23a854994ff0b66530119500b6053d0bd3363265f7e5e77288d"}, + {file = "cryptography-43.0.1-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:fba1007b3ef89946dbbb515aeeb41e30203b004f0b4b00e5e16078b518563289"}, + {file = "cryptography-43.0.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:5b43d1ea6b378b54a1dc99dd8a2b5be47658fe9a7ce0a58ff0b55f4b43ef2b84"}, + {file = "cryptography-43.0.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:88cce104c36870d70c49c7c8fd22885875d950d9ee6ab54df2745f83ba0dc365"}, + {file = "cryptography-43.0.1-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:9d3cdb25fa98afdd3d0892d132b8d7139e2c087da1712041f6b762e4f807cc96"}, + {file = "cryptography-43.0.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e710bf40870f4db63c3d7d929aa9e09e4e7ee219e703f949ec4073b4294f6172"}, + {file = "cryptography-43.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7c05650fe8023c5ed0d46793d4b7d7e6cd9c04e68eabe5b0aeea836e37bdcec2"}, + {file = "cryptography-43.0.1.tar.gz", hash = "sha256:203e92a75716d8cfb491dc47c79e17d0d9207ccffcbcb35f598fbe463ae3444d"}, +] + +[package.dependencies] +cffi = {version = ">=1.12", markers = "platform_python_implementation != \"PyPy\""} + +[package.extras] +docs = ["sphinx (>=5.3.0)", "sphinx-rtd-theme (>=1.1.1)"] +docstest = ["pyenchant (>=1.6.11)", "readme-renderer", "sphinxcontrib-spelling (>=4.0.1)"] +nox = ["nox"] +pep8test = ["check-sdist", "click", "mypy", "ruff"] +sdist = ["build"] +ssh = ["bcrypt (>=3.1.5)"] +test = ["certifi", "cryptography-vectors (==43.0.1)", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] +test-randomorder = ["pytest-randomly"] + +[[package]] +name = "dependency-injector" +version = "4.42.0" +description = "Dependency injection framework for Python" +optional = false +python-versions = "*" +files = [ + {file = "dependency_injector-4.42.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0bb90f064970366acddc6e946626dd9b59505dc0f798459fe31fce458a8d0fc5"}, + {file = "dependency_injector-4.42.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf96588c58790768c9ebb2f61532dc847236bdf7317b07cfdf75a14d63c3114e"}, + {file = "dependency_injector-4.42.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:54b3ef487584bf2f9945b3f2f97e0ada8933e773960e0a3c4b40d369e37003fb"}, + {file = "dependency_injector-4.42.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a0625b993dc3c58c35b613d9ccdf0eb8420f978f62d19d71820ea7630326972d"}, + {file = "dependency_injector-4.42.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:4a4eb3356e26273f8065266fab4fd51c863afafe10e586d3bfc67340677e2674"}, + {file = "dependency_injector-4.42.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:dffba9e2864b5d002f5b3bd89df4cde4f20dec4c2cd073ce0bd460229ed0afdb"}, + {file = "dependency_injector-4.42.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:19b50b48030dfa7cd3488253cbe7ee69c9b737275ac1aa184d9e995f44bb8317"}, + {file = "dependency_injector-4.42.0-cp310-cp310-win32.whl", hash = "sha256:68af4878040573a710202e171ecc1cec2c47910783f59d14c299f68439f8fe0b"}, + {file = "dependency_injector-4.42.0-cp310-cp310-win_amd64.whl", hash = "sha256:8eb65b102b36d171dfdf6c9b06766797d97d535b83a61859d1d91092b960c05a"}, + {file = "dependency_injector-4.42.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:952565b29186ce59f6972eb4cd37a927b3f3b61a2715345f0d6f4a7c01305ebb"}, + {file = "dependency_injector-4.42.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ae4593738906f8b2ec3e61fda30e6b7dc24f34b83dad5d4ac0d8a07c4be6c3e8"}, + {file = "dependency_injector-4.42.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffddebe6d22394b0e8b3a41ef8d140d2bd829cbcc39fc48e5f560fe3db8ac3f5"}, + {file = "dependency_injector-4.42.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8271ffd0c95c598c9340b8f4c21478a7029c4dcba85d377fcbedf708f3f1564a"}, + {file = "dependency_injector-4.42.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8796683cce845204ebe6594b617ce2407742534fce4635db5c30e8dcf4a0e2f0"}, + {file = "dependency_injector-4.42.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8fc9aedb66a26b6fc0cd6065e52c45e9197cec6ab0b32b9e565421ad2be66a88"}, + {file = "dependency_injector-4.42.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:36f78f7e3fe5c95f11757928d7b93c0d4a5fd38be45172fa1a9eb1653d3261de"}, + {file = "dependency_injector-4.42.0-cp311-cp311-win32.whl", hash = "sha256:9eb1dcb897c00b853e3843ba15947cc2b25b6af947077ea65d7d5bef84d0d0d8"}, + {file = "dependency_injector-4.42.0-cp311-cp311-win_amd64.whl", hash = "sha256:cd794587a8b71cda35231b3351977765ffd3f09dd8b8be1f981726c76b44742f"}, + {file = "dependency_injector-4.42.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f283c0c67cd71723744b6016f6f55b6e2ee790059816b0cc1d6792ec236e62e9"}, + {file = "dependency_injector-4.42.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b1e9016f864fb8f58a53a7ce13ddccb3b44d5333b205101301d42168dfbada5e"}, + {file = "dependency_injector-4.42.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c5a39274cd130f9adc6beb7b9488c7a2faf55f9d894124908f1209d9c84c3b7"}, + {file = "dependency_injector-4.42.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3b478f65b20844ed3f9fda5edb1f09e34575e006c439c387283fd833053e3bd1"}, + {file = "dependency_injector-4.42.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:524654f11c839b2e8ea0b217f49f762182daf244a5ecdf7339b664d9d77be7f1"}, + {file = "dependency_injector-4.42.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:386bc36d337c17e149a11f89d33e6383b9d05cff18c68d8e95a50f3483b03ff1"}, + {file = "dependency_injector-4.42.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c2d632b9ff85cc9158c4f815109faedbd4c384bab839aefc288c3249024a2a66"}, + {file = "dependency_injector-4.42.0-cp312-cp312-win32.whl", hash = "sha256:ecfca509e0108fbb85c2af3ebaa8eaa4a6dcacafc93d0ad8e0ca46b74c8d0df7"}, + {file = "dependency_injector-4.42.0-cp312-cp312-win_amd64.whl", hash = "sha256:34d506b31b150ed2f8191ec0b7f9f7e67f8d8ba90414d23c14c4e3515ac9d0c2"}, + {file = "dependency_injector-4.42.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bda68a7f6eef700c493de94daf532bd021aa65458209c11f8db6b0012aa1b32f"}, + {file = "dependency_injector-4.42.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:578707e06c45b46a8db7eb5ebcd7a566bd0602197322184cf4a4e8c23a513ce4"}, + {file = "dependency_injector-4.42.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:89fd5f7937977b2deb8454bf4c7f3ffa9c630d4e893d99b1ceac9ebb075a5527"}, + {file = "dependency_injector-4.42.0-cp36-cp36m-musllinux_1_2_aarch64.whl", hash = "sha256:5997868c51898951abd1e606dfd993ed87df49beafa04652dfc6482e64f79771"}, + {file = "dependency_injector-4.42.0-cp36-cp36m-musllinux_1_2_i686.whl", hash = "sha256:07d262d8438d79bc4d4fe500d6bbffaf24c7b3b5f2c87973de6dba7b33851f20"}, + {file = "dependency_injector-4.42.0-cp36-cp36m-musllinux_1_2_x86_64.whl", hash = "sha256:ac755e5aaf268edc00590bd4c8b6618f6e5dfdedbc417dd532ea8d61f6d372b3"}, + {file = "dependency_injector-4.42.0-cp36-cp36m-win32.whl", hash = "sha256:d2ff45f5ddcab3c833e685c8851f03b7bb7360911db39f1960bd8a8f7ef7e515"}, + {file = "dependency_injector-4.42.0-cp36-cp36m-win_amd64.whl", hash = "sha256:656d89135118b31ac8e49c4fae09ae31119f50a61ba6af0da121aea774210d70"}, + {file = "dependency_injector-4.42.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8797cfe97bd8cb3672286a9c759244032b46c25e4360d91f710b348c2d0605bc"}, + {file = "dependency_injector-4.42.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e071f40a9d911b16555171a5030e27c6c3b379d984fff2b4a78a10183108557"}, + {file = "dependency_injector-4.42.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6204e5da66e92e51df6df363dce98ad994c969631535909014df3e4e5c8a3b23"}, + {file = "dependency_injector-4.42.0-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:86814cba9a39658b3632b40ae93e5dc44a7de7214b9f23bf3311a216bdf59526"}, + {file = "dependency_injector-4.42.0-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:d7c26d3de4fb98594c23bc8813933ede4f624544ab7f5b4f13d75a672ed3f276"}, + {file = "dependency_injector-4.42.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:82cfa80b6c57463314e18aa3d01fba62131aa98f15bad55f185eef7e101a6f34"}, + {file = "dependency_injector-4.42.0-cp37-cp37m-win32.whl", hash = "sha256:690d3c0cafc2e7ada536255f093fe05aff8bac6d5b46c9ebe144ff004c509498"}, + {file = "dependency_injector-4.42.0-cp37-cp37m-win_amd64.whl", hash = "sha256:14b6e91997691b26b62b39dfd50b6246765274ce2768a9bb491bcd77b994700b"}, + {file = "dependency_injector-4.42.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5437faac11595c0d50275d520554dc85d9e9909d9d1c7eb8c56cf28b91869f1a"}, + {file = "dependency_injector-4.42.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:611cb623399aac15e356a68ad5d8ee9e01c02b47f19b81c061f510c6e624bed7"}, + {file = "dependency_injector-4.42.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8507e2c7c769ff6b74d5cbca58cc6d08c34d09c5972c890b2514fb653c930e8"}, + {file = "dependency_injector-4.42.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:51cccaf884d62953a688da483494b113e670598c4591e99701638db0baa8a5fe"}, + {file = "dependency_injector-4.42.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:96e10090424415d6762da4945ec06b3391ce03a7da43e01a64b09e44cd25563e"}, + {file = "dependency_injector-4.42.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:39c21064fc042a809dfc5f93cd149ecb893be9eaf8f839d147ea2d1d19507b19"}, + {file = "dependency_injector-4.42.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:9eebc5775de27eb33b994aa864f81061b39007b4370fbde3525749acf5fc2d68"}, + {file = "dependency_injector-4.42.0-cp38-cp38-win32.whl", hash = "sha256:1e65c16fe88ec4bdb80dff01874bc3447809f78942c7c77293d0ffdf15e2cddf"}, + {file = "dependency_injector-4.42.0-cp38-cp38-win_amd64.whl", hash = "sha256:8d198e25ecbfbef4c1c5f331d0630bfcb1252534dcf32600e0e72d0aab292213"}, + {file = "dependency_injector-4.42.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1b285f2399594bdb7770fe1e7d9dd3180cabfefcac93e64ed61cda0cf73be943"}, + {file = "dependency_injector-4.42.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a6e52a74f4d06dcde0033e92f6a29fa5f91b4a81be5f99795315e5f945bdff8b"}, + {file = "dependency_injector-4.42.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:28bb5b94adc35d997d1e34b829c03104ff3756024ded6da767198f7291c4e959"}, + {file = "dependency_injector-4.42.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f38c9ddfb9160aee62cbe424d7bf67099b7f21c7777cdbbad7c1106dc35f667f"}, + {file = "dependency_injector-4.42.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:a158bf48ebbd04ae97a69ffbf00e3a45c6f6879886b23583eeee655a544838a3"}, + {file = "dependency_injector-4.42.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:435bed281c68146e789970938ca6576ac999cd4e09f7f64ddc7674158bc62d42"}, + {file = "dependency_injector-4.42.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:43a70b098626eb2e72c77a819c31641de5e96b87573c37f459ec580035390868"}, + {file = "dependency_injector-4.42.0-cp39-cp39-win32.whl", hash = "sha256:6524d1403a589f3eebdf925aef95f350cf7b86f72e09c798ec0a1e6464435e1a"}, + {file = "dependency_injector-4.42.0-cp39-cp39-win_amd64.whl", hash = "sha256:932b0c6cccfb336cf7d0b8d8fce36f58983d543a3a4e81309d6190732160c3ad"}, + {file = "dependency_injector-4.42.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:e040496d6d22ed0bab2ab96a13734a22bd4f60b74b73a4a682688fc0e615841c"}, + {file = "dependency_injector-4.42.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2507d1de74314340479b57e31c103e0fcfa5135de03b07a774794cfb244f6098"}, + {file = "dependency_injector-4.42.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:67f52a45a00fd93e33b3d5c45181298ae2f09c56640e41524eec517ceccaf2fc"}, + {file = "dependency_injector-4.42.0-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1b20c4d1be03eac0ab8e7ec91604f58a9fae705f6a4e228802c6302464cc1701"}, + {file = "dependency_injector-4.42.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:0d11dcd7461747ae968e5e459eb5389652e6f49ab00eecfe05b19249f6d5aebd"}, + {file = "dependency_injector-4.42.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:51f6fce317725cdc738ef45e5a56c28c51d15b060a346324ed8c547d9965035f"}, + {file = "dependency_injector-4.42.0-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f677d62b6de82a7e6c14db1f40831a80cd54324d20bb1c053f200860202b5ec1"}, + {file = "dependency_injector-4.42.0-pp37-pypy37_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45498d2bc1b801686f3123d753b47310876dab34642bcda283df774dcfd1a2de"}, + {file = "dependency_injector-4.42.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:f1b99e832919d784a7979bef3afa7ba10653c24e4ea55177578e87fb645bbf43"}, + {file = "dependency_injector-4.42.0-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:5a3fcdb443222b03770c6dbee0feb425e2d633099bde9ece758136cca4d73fc6"}, + {file = "dependency_injector-4.42.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0c1a121d37f9511604afbcbc8fc30c322fb3161e71178c240afaad521fa1916a"}, + {file = "dependency_injector-4.42.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d1464bc2c707a4657c31188bf8fda45b107cb67c324dde829b69db7f1d4a458f"}, + {file = "dependency_injector-4.42.0-pp38-pypy38_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4d83f3a38be04d54c9abda8d95bf26ef6f4f913c54bcc0511e17c65d4a605954"}, + {file = "dependency_injector-4.42.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:c57016c155071b88d8f14c79e7c06557b5518ce4df2c57c7e7f214118b6cdeda"}, + {file = "dependency_injector-4.42.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:1b6ce807fec00c9ef93c318ce28f641f40a01e76df2d4505f2ba16b2dd3f8943"}, + {file = "dependency_injector-4.42.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2ee595345d77a2375eaf3fb5c75d099c7593b6efd3088d7a44f5553241f66194"}, + {file = "dependency_injector-4.42.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:502c9dedda77f35154d0ff934d2368e90749aa03198a9356081b82b63c387fcd"}, + {file = "dependency_injector-4.42.0-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2048628b98c61ec96fe2c2022b175cd1f629516bae635f2e4a9bca33aa9fa85"}, + {file = "dependency_injector-4.42.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:b29610964250ee1f532c079112e1a58e41f27670b02a5890433ed8ca1313a9ed"}, + {file = "dependency_injector-4.42.0.tar.gz", hash = "sha256:7057fc07b89aa09bc1c75e4190a8a6b86a3038d91a6d8302aea4f8094b184cd0"}, +] + +[package.dependencies] +six = ">=1.7.0,<=1.16.0" + +[package.extras] +aiohttp = ["aiohttp"] +flask = ["flask"] +pydantic = ["pydantic"] +yaml = ["pyyaml"] + +[[package]] +name = "distro" +version = "1.9.0" +description = "Distro - an OS platform information API" +optional = false +python-versions = ">=3.6" +files = [ + {file = "distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2"}, + {file = "distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed"}, +] + +[[package]] +name = "dnspython" +version = "2.6.1" +description = "DNS toolkit" +optional = false +python-versions = ">=3.8" +files = [ + {file = "dnspython-2.6.1-py3-none-any.whl", hash = "sha256:5ef3b9680161f6fa89daf8ad451b5f1a33b18ae8a1c6778cdf4b43f08c0a6e50"}, + {file = "dnspython-2.6.1.tar.gz", hash = "sha256:e8f0f9c23a7b7cb99ded64e6c3a6f3e701d78f50c55e002b839dea7225cff7cc"}, +] + +[package.extras] +dev = ["black (>=23.1.0)", "coverage (>=7.0)", "flake8 (>=7)", "mypy (>=1.8)", "pylint (>=3)", "pytest (>=7.4)", "pytest-cov (>=4.1.0)", "sphinx (>=7.2.0)", "twine (>=4.0.0)", "wheel (>=0.42.0)"] +dnssec = ["cryptography (>=41)"] +doh = ["h2 (>=4.1.0)", "httpcore (>=1.0.0)", "httpx (>=0.26.0)"] +doq = ["aioquic (>=0.9.25)"] +idna = ["idna (>=3.6)"] +trio = ["trio (>=0.23)"] +wmi = ["wmi (>=1.5.1)"] + +[[package]] +name = "email-validator" +version = "2.2.0" +description = "A robust email address syntax and deliverability validation library." +optional = false +python-versions = ">=3.8" +files = [ + {file = "email_validator-2.2.0-py3-none-any.whl", hash = "sha256:561977c2d73ce3611850a06fa56b414621e0c8faa9d66f2611407d87465da631"}, + {file = "email_validator-2.2.0.tar.gz", hash = "sha256:cb690f344c617a714f22e66ae771445a1ceb46821152df8e165c5f9a364582b7"}, +] + +[package.dependencies] +dnspython = ">=2.0.0" +idna = ">=2.0.0" + +[[package]] +name = "faiss-cpu" +version = "1.8.0.post1" +description = "A library for efficient similarity search and clustering of dense vectors." +optional = false +python-versions = ">=3.8" +files = [ + {file = "faiss_cpu-1.8.0.post1-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:fd84721eb599aa1da19b1b36345bb8705a60bb1d2887bbbc395a29e3d36a1a62"}, + {file = "faiss_cpu-1.8.0.post1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b78ff9079d15fd0f156bf5dd8a2975a8abffac1854a86ece263eec1500a2e836"}, + {file = "faiss_cpu-1.8.0.post1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9de25c943d1789e35fe06a20884c88cd32aedbb1a33bb8da2238cdea7bd9633f"}, + {file = "faiss_cpu-1.8.0.post1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:adae0f1b144e7216da696f14bc4991ca4300c94baaa59247c3d322588e661c95"}, + {file = "faiss_cpu-1.8.0.post1-cp310-cp310-win_amd64.whl", hash = "sha256:00345290680a444a4b4cb2d98a3844bb5c401a2160fee547c7631d759fd2ec3e"}, + {file = "faiss_cpu-1.8.0.post1-cp311-cp311-macosx_10_14_x86_64.whl", hash = "sha256:8d4bade10cb63e9f9ff261751edd7eb097b1f4bf30be4d0d25d6f688559d795e"}, + {file = "faiss_cpu-1.8.0.post1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:20bd43eca3b7d77e71ea56b7a558cc28e900d8abff417eb285e2d92e95d934d4"}, + {file = "faiss_cpu-1.8.0.post1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8542a87743a7f94ac656fd3e9592ad57e58b04d961ad2fe654a22a8ca59defdb"}, + {file = "faiss_cpu-1.8.0.post1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ed46928de3dc20170b10fec89c54075a11383c2aaf4f119c63e0f6ae5a507d74"}, + {file = "faiss_cpu-1.8.0.post1-cp311-cp311-win_amd64.whl", hash = "sha256:4fa5fc8ea210b919aa469e27d6687e50052db906e7fec3f2257178b1384fa18b"}, + {file = "faiss_cpu-1.8.0.post1-cp312-cp312-macosx_10_14_x86_64.whl", hash = "sha256:96aec0d08a3099883af3a9b6356cfe736e8bd879318a940a27e9d1ae6f33d788"}, + {file = "faiss_cpu-1.8.0.post1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:92b06147fa84732ecdc965922e8ef50dc7011ef8be65821ff4abb2118cb5dce0"}, + {file = "faiss_cpu-1.8.0.post1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:709ef9394d1148aef70dbe890edbde8c282a4a2e06a8b69ab64f65e90f5ba572"}, + {file = "faiss_cpu-1.8.0.post1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:327a9c30971bf72cd8392b15eb4aff5d898c453212eae656dfaa3ba555b9ca0c"}, + {file = "faiss_cpu-1.8.0.post1-cp312-cp312-win_amd64.whl", hash = "sha256:8756f1d93faba56349883fa2f5d47fe36bb2f11f789200c6b1c691ef805485f2"}, + {file = "faiss_cpu-1.8.0.post1-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:f4a3045909c447bf1955b70083891e80f2c87c5427f20cae25245e08ec5c9e52"}, + {file = "faiss_cpu-1.8.0.post1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8842b7fc921ca1fafdb0845f2ba029e79df04eebae72ab135239f93478a9b7a2"}, + {file = "faiss_cpu-1.8.0.post1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9d5a9799634e32c3862d5436d1e78112ed9a38f319e4523f5916e55d86adda8f"}, + {file = "faiss_cpu-1.8.0.post1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2a70923b0fbbb40f647e20bcbcbfd472277e6d84bb23ff12d2a94b6841806b55"}, + {file = "faiss_cpu-1.8.0.post1-cp38-cp38-win_amd64.whl", hash = "sha256:ce652df3c4dd50c88ac9235d072f30ce60694dc422c5f523bbbcab320e8f3097"}, + {file = "faiss_cpu-1.8.0.post1-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:83ef04b17b19189dd6601a941bdf4bfa9de0740dbcd80305aeba51a1b1955f80"}, + {file = "faiss_cpu-1.8.0.post1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c50c8697077470ede7f1939ef8dc8a846ec19cf1893b543f6b67f9af03b0a122"}, + {file = "faiss_cpu-1.8.0.post1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:98ce428a7a67fe5c64047280e5e12a8dbdecf7002f9d127b26cf1db354e9fe76"}, + {file = "faiss_cpu-1.8.0.post1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f3b36b80380bae523e3198cfb4a137867055945ce7bf10d18fe9f0284f2fb47"}, + {file = "faiss_cpu-1.8.0.post1-cp39-cp39-win_amd64.whl", hash = "sha256:4fcc67a2353f08a20c1ab955de3cde14ef3b447761b26244a5aa849c15cbc9b3"}, + {file = "faiss_cpu-1.8.0.post1.tar.gz", hash = "sha256:5686af34414678c3d49c4fa8d774df7156e9cb48d7029071e56230e74b01cc13"}, +] + +[package.dependencies] +numpy = ">=1.0,<2.0" +packaging = "*" + +[[package]] +name = "fastapi" +version = "0.111.1" +description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production" +optional = false +python-versions = ">=3.8" +files = [ + {file = "fastapi-0.111.1-py3-none-any.whl", hash = "sha256:4f51cfa25d72f9fbc3280832e84b32494cf186f50158d364a8765aabf22587bf"}, + {file = "fastapi-0.111.1.tar.gz", hash = "sha256:ddd1ac34cb1f76c2e2d7f8545a4bcb5463bce4834e81abf0b189e0c359ab2413"}, +] + +[package.dependencies] +email_validator = ">=2.0.0" +fastapi-cli = ">=0.0.2" +httpx = ">=0.23.0" +jinja2 = ">=2.11.2" +pydantic = ">=1.7.4,<1.8 || >1.8,<1.8.1 || >1.8.1,<2.0.0 || >2.0.0,<2.0.1 || >2.0.1,<2.1.0 || >2.1.0,<3.0.0" +python-multipart = ">=0.0.7" +starlette = ">=0.37.2,<0.38.0" +typing-extensions = ">=4.8.0" +uvicorn = {version = ">=0.12.0", extras = ["standard"]} + +[package.extras] +all = ["email_validator (>=2.0.0)", "httpx (>=0.23.0)", "itsdangerous (>=1.1.0)", "jinja2 (>=2.11.2)", "orjson (>=3.2.1)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.7)", "pyyaml (>=5.3.1)", "ujson (>=4.0.1,!=4.0.2,!=4.1.0,!=4.2.0,!=4.3.0,!=5.0.0,!=5.1.0)", "uvicorn[standard] (>=0.12.0)"] + +[[package]] +name = "fastapi-cli" +version = "0.0.5" +description = "Run and manage FastAPI apps from the command line with FastAPI CLI. 🚀" +optional = false +python-versions = ">=3.8" +files = [ + {file = "fastapi_cli-0.0.5-py3-none-any.whl", hash = "sha256:e94d847524648c748a5350673546bbf9bcaeb086b33c24f2e82e021436866a46"}, + {file = "fastapi_cli-0.0.5.tar.gz", hash = "sha256:d30e1239c6f46fcb95e606f02cdda59a1e2fa778a54b64686b3ff27f6211ff9f"}, +] + +[package.dependencies] +typer = ">=0.12.3" +uvicorn = {version = ">=0.15.0", extras = ["standard"]} + +[package.extras] +standard = ["uvicorn[standard] (>=0.15.0)"] + +[[package]] +name = "filelock" +version = "3.16.1" +description = "A platform independent file lock." +optional = false +python-versions = ">=3.8" +files = [ + {file = "filelock-3.16.1-py3-none-any.whl", hash = "sha256:2082e5703d51fbf98ea75855d9d5527e33d8ff23099bec374a134febee6946b0"}, + {file = "filelock-3.16.1.tar.gz", hash = "sha256:c249fbfcd5db47e5e2d6d62198e565475ee65e4831e2561c8e313fa7eb961435"}, +] + +[package.extras] +docs = ["furo (>=2024.8.6)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4.1)"] +testing = ["covdefaults (>=2.3)", "coverage (>=7.6.1)", "diff-cover (>=9.2)", "pytest (>=8.3.3)", "pytest-asyncio (>=0.24)", "pytest-cov (>=5)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.26.4)"] +typing = ["typing-extensions (>=4.12.2)"] + +[[package]] +name = "firebase-admin" +version = "6.5.0" +description = "Firebase Admin Python SDK" +optional = false +python-versions = ">=3.7" +files = [ + {file = "firebase_admin-6.5.0-py3-none-any.whl", hash = "sha256:fe34ee3ca0e625c5156b3931ca4b4b69b5fc344dbe51bba9706ff674ce277898"}, + {file = "firebase_admin-6.5.0.tar.gz", hash = "sha256:e716dde1447f0a1cd1523be76ff872df33c4e1a3c079564ace033b2ad60bcc4f"}, +] + +[package.dependencies] +cachecontrol = ">=0.12.6" +google-api-core = {version = ">=1.22.1,<3.0.0dev", extras = ["grpc"], markers = "platform_python_implementation != \"PyPy\""} +google-api-python-client = ">=1.7.8" +google-cloud-firestore = {version = ">=2.9.1", markers = "platform_python_implementation != \"PyPy\""} +google-cloud-storage = ">=1.37.1" +pyjwt = {version = ">=2.5.0", extras = ["crypto"]} + +[[package]] +name = "frozenlist" +version = "1.4.1" +description = "A list-like structure which implements collections.abc.MutableSequence" +optional = false +python-versions = ">=3.8" +files = [ + {file = "frozenlist-1.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f9aa1878d1083b276b0196f2dfbe00c9b7e752475ed3b682025ff20c1c1f51ac"}, + {file = "frozenlist-1.4.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:29acab3f66f0f24674b7dc4736477bcd4bc3ad4b896f5f45379a67bce8b96868"}, + {file = "frozenlist-1.4.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:74fb4bee6880b529a0c6560885fce4dc95936920f9f20f53d99a213f7bf66776"}, + {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:590344787a90ae57d62511dd7c736ed56b428f04cd8c161fcc5e7232c130c69a"}, + {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:068b63f23b17df8569b7fdca5517edef76171cf3897eb68beb01341131fbd2ad"}, + {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c849d495bf5154cd8da18a9eb15db127d4dba2968d88831aff6f0331ea9bd4c"}, + {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9750cc7fe1ae3b1611bb8cfc3f9ec11d532244235d75901fb6b8e42ce9229dfe"}, + {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9b2de4cf0cdd5bd2dee4c4f63a653c61d2408055ab77b151c1957f221cabf2a"}, + {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0633c8d5337cb5c77acbccc6357ac49a1770b8c487e5b3505c57b949b4b82e98"}, + {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:27657df69e8801be6c3638054e202a135c7f299267f1a55ed3a598934f6c0d75"}, + {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:f9a3ea26252bd92f570600098783d1371354d89d5f6b7dfd87359d669f2109b5"}, + {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:4f57dab5fe3407b6c0c1cc907ac98e8a189f9e418f3b6e54d65a718aaafe3950"}, + {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e02a0e11cf6597299b9f3bbd3f93d79217cb90cfd1411aec33848b13f5c656cc"}, + {file = "frozenlist-1.4.1-cp310-cp310-win32.whl", hash = "sha256:a828c57f00f729620a442881cc60e57cfcec6842ba38e1b19fd3e47ac0ff8dc1"}, + {file = "frozenlist-1.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:f56e2333dda1fe0f909e7cc59f021eba0d2307bc6f012a1ccf2beca6ba362439"}, + {file = "frozenlist-1.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:a0cb6f11204443f27a1628b0e460f37fb30f624be6051d490fa7d7e26d4af3d0"}, + {file = "frozenlist-1.4.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b46c8ae3a8f1f41a0d2ef350c0b6e65822d80772fe46b653ab6b6274f61d4a49"}, + {file = "frozenlist-1.4.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:fde5bd59ab5357e3853313127f4d3565fc7dad314a74d7b5d43c22c6a5ed2ced"}, + {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:722e1124aec435320ae01ee3ac7bec11a5d47f25d0ed6328f2273d287bc3abb0"}, + {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2471c201b70d58a0f0c1f91261542a03d9a5e088ed3dc6c160d614c01649c106"}, + {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c757a9dd70d72b076d6f68efdbb9bc943665ae954dad2801b874c8c69e185068"}, + {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f146e0911cb2f1da549fc58fc7bcd2b836a44b79ef871980d605ec392ff6b0d2"}, + {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f9c515e7914626b2a2e1e311794b4c35720a0be87af52b79ff8e1429fc25f19"}, + {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c302220494f5c1ebeb0912ea782bcd5e2f8308037b3c7553fad0e48ebad6ad82"}, + {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:442acde1e068288a4ba7acfe05f5f343e19fac87bfc96d89eb886b0363e977ec"}, + {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:1b280e6507ea8a4fa0c0a7150b4e526a8d113989e28eaaef946cc77ffd7efc0a"}, + {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:fe1a06da377e3a1062ae5fe0926e12b84eceb8a50b350ddca72dc85015873f74"}, + {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:db9e724bebd621d9beca794f2a4ff1d26eed5965b004a97f1f1685a173b869c2"}, + {file = "frozenlist-1.4.1-cp311-cp311-win32.whl", hash = "sha256:e774d53b1a477a67838a904131c4b0eef6b3d8a651f8b138b04f748fccfefe17"}, + {file = "frozenlist-1.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:fb3c2db03683b5767dedb5769b8a40ebb47d6f7f45b1b3e3b4b51ec8ad9d9825"}, + {file = "frozenlist-1.4.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:1979bc0aeb89b33b588c51c54ab0161791149f2461ea7c7c946d95d5f93b56ae"}, + {file = "frozenlist-1.4.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:cc7b01b3754ea68a62bd77ce6020afaffb44a590c2289089289363472d13aedb"}, + {file = "frozenlist-1.4.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c9c92be9fd329ac801cc420e08452b70e7aeab94ea4233a4804f0915c14eba9b"}, + {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c3894db91f5a489fc8fa6a9991820f368f0b3cbdb9cd8849547ccfab3392d86"}, + {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ba60bb19387e13597fb059f32cd4d59445d7b18b69a745b8f8e5db0346f33480"}, + {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8aefbba5f69d42246543407ed2461db31006b0f76c4e32dfd6f42215a2c41d09"}, + {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:780d3a35680ced9ce682fbcf4cb9c2bad3136eeff760ab33707b71db84664e3a"}, + {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9acbb16f06fe7f52f441bb6f413ebae6c37baa6ef9edd49cdd567216da8600cd"}, + {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:23b701e65c7b36e4bf15546a89279bd4d8675faabc287d06bbcfac7d3c33e1e6"}, + {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:3e0153a805a98f5ada7e09826255ba99fb4f7524bb81bf6b47fb702666484ae1"}, + {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:dd9b1baec094d91bf36ec729445f7769d0d0cf6b64d04d86e45baf89e2b9059b"}, + {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:1a4471094e146b6790f61b98616ab8e44f72661879cc63fa1049d13ef711e71e"}, + {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5667ed53d68d91920defdf4035d1cdaa3c3121dc0b113255124bcfada1cfa1b8"}, + {file = "frozenlist-1.4.1-cp312-cp312-win32.whl", hash = "sha256:beee944ae828747fd7cb216a70f120767fc9f4f00bacae8543c14a6831673f89"}, + {file = "frozenlist-1.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:64536573d0a2cb6e625cf309984e2d873979709f2cf22839bf2d61790b448ad5"}, + {file = "frozenlist-1.4.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:20b51fa3f588ff2fe658663db52a41a4f7aa6c04f6201449c6c7c476bd255c0d"}, + {file = "frozenlist-1.4.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:410478a0c562d1a5bcc2f7ea448359fcb050ed48b3c6f6f4f18c313a9bdb1826"}, + {file = "frozenlist-1.4.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c6321c9efe29975232da3bd0af0ad216800a47e93d763ce64f291917a381b8eb"}, + {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48f6a4533887e189dae092f1cf981f2e3885175f7a0f33c91fb5b7b682b6bab6"}, + {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6eb73fa5426ea69ee0e012fb59cdc76a15b1283d6e32e4f8dc4482ec67d1194d"}, + {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fbeb989b5cc29e8daf7f976b421c220f1b8c731cbf22b9130d8815418ea45887"}, + {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:32453c1de775c889eb4e22f1197fe3bdfe457d16476ea407472b9442e6295f7a"}, + {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:693945278a31f2086d9bf3df0fe8254bbeaef1fe71e1351c3bd730aa7d31c41b"}, + {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:1d0ce09d36d53bbbe566fe296965b23b961764c0bcf3ce2fa45f463745c04701"}, + {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:3a670dc61eb0d0eb7080890c13de3066790f9049b47b0de04007090807c776b0"}, + {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:dca69045298ce5c11fd539682cff879cc1e664c245d1c64da929813e54241d11"}, + {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:a06339f38e9ed3a64e4c4e43aec7f59084033647f908e4259d279a52d3757d09"}, + {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b7f2f9f912dca3934c1baec2e4585a674ef16fe00218d833856408c48d5beee7"}, + {file = "frozenlist-1.4.1-cp38-cp38-win32.whl", hash = "sha256:e7004be74cbb7d9f34553a5ce5fb08be14fb33bc86f332fb71cbe5216362a497"}, + {file = "frozenlist-1.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:5a7d70357e7cee13f470c7883a063aae5fe209a493c57d86eb7f5a6f910fae09"}, + {file = "frozenlist-1.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:bfa4a17e17ce9abf47a74ae02f32d014c5e9404b6d9ac7f729e01562bbee601e"}, + {file = "frozenlist-1.4.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b7e3ed87d4138356775346e6845cccbe66cd9e207f3cd11d2f0b9fd13681359d"}, + {file = "frozenlist-1.4.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c99169d4ff810155ca50b4da3b075cbde79752443117d89429595c2e8e37fed8"}, + {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edb678da49d9f72c9f6c609fbe41a5dfb9a9282f9e6a2253d5a91e0fc382d7c0"}, + {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6db4667b187a6742b33afbbaf05a7bc551ffcf1ced0000a571aedbb4aa42fc7b"}, + {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:55fdc093b5a3cb41d420884cdaf37a1e74c3c37a31f46e66286d9145d2063bd0"}, + {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82e8211d69a4f4bc360ea22cd6555f8e61a1bd211d1d5d39d3d228b48c83a897"}, + {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89aa2c2eeb20957be2d950b85974b30a01a762f3308cd02bb15e1ad632e22dc7"}, + {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9d3e0c25a2350080e9319724dede4f31f43a6c9779be48021a7f4ebde8b2d742"}, + {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7268252af60904bf52c26173cbadc3a071cece75f873705419c8681f24d3edea"}, + {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:0c250a29735d4f15321007fb02865f0e6b6a41a6b88f1f523ca1596ab5f50bd5"}, + {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:96ec70beabbd3b10e8bfe52616a13561e58fe84c0101dd031dc78f250d5128b9"}, + {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:23b2d7679b73fe0e5a4560b672a39f98dfc6f60df63823b0a9970525325b95f6"}, + {file = "frozenlist-1.4.1-cp39-cp39-win32.whl", hash = "sha256:a7496bfe1da7fb1a4e1cc23bb67c58fab69311cc7d32b5a99c2007b4b2a0e932"}, + {file = "frozenlist-1.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:e6a20a581f9ce92d389a8c7d7c3dd47c81fd5d6e655c8dddf341e14aa48659d0"}, + {file = "frozenlist-1.4.1-py3-none-any.whl", hash = "sha256:04ced3e6a46b4cfffe20f9ae482818e34eba9b5fb0ce4056e4cc9b6e212d09b7"}, + {file = "frozenlist-1.4.1.tar.gz", hash = "sha256:c037a86e8513059a2613aaba4d817bb90b9d9b6b69aace3ce9c877e8c8ed402b"}, +] + +[[package]] +name = "fsspec" +version = "2024.9.0" +description = "File-system specification" +optional = false +python-versions = ">=3.8" +files = [ + {file = "fsspec-2024.9.0-py3-none-any.whl", hash = "sha256:a0947d552d8a6efa72cc2c730b12c41d043509156966cca4fb157b0f2a0c574b"}, + {file = "fsspec-2024.9.0.tar.gz", hash = "sha256:4b0afb90c2f21832df142f292649035d80b421f60a9e1c027802e5a0da2b04e8"}, +] + +[package.extras] +abfs = ["adlfs"] +adl = ["adlfs"] +arrow = ["pyarrow (>=1)"] +dask = ["dask", "distributed"] +dev = ["pre-commit", "ruff"] +doc = ["numpydoc", "sphinx", "sphinx-design", "sphinx-rtd-theme", "yarl"] +dropbox = ["dropbox", "dropboxdrivefs", "requests"] +full = ["adlfs", "aiohttp (!=4.0.0a0,!=4.0.0a1)", "dask", "distributed", "dropbox", "dropboxdrivefs", "fusepy", "gcsfs", "libarchive-c", "ocifs", "panel", "paramiko", "pyarrow (>=1)", "pygit2", "requests", "s3fs", "smbprotocol", "tqdm"] +fuse = ["fusepy"] +gcs = ["gcsfs"] +git = ["pygit2"] +github = ["requests"] +gs = ["gcsfs"] +gui = ["panel"] +hdfs = ["pyarrow (>=1)"] +http = ["aiohttp (!=4.0.0a0,!=4.0.0a1)"] +libarchive = ["libarchive-c"] +oci = ["ocifs"] +s3 = ["s3fs"] +sftp = ["paramiko"] +smb = ["smbprotocol"] +ssh = ["paramiko"] +test = ["aiohttp (!=4.0.0a0,!=4.0.0a1)", "numpy", "pytest", "pytest-asyncio (!=0.22.0)", "pytest-benchmark", "pytest-cov", "pytest-mock", "pytest-recording", "pytest-rerunfailures", "requests"] +test-downstream = ["aiobotocore (>=2.5.4,<3.0.0)", "dask-expr", "dask[dataframe,test]", "moto[server] (>4,<5)", "pytest-timeout", "xarray"] +test-full = ["adlfs", "aiohttp (!=4.0.0a0,!=4.0.0a1)", "cloudpickle", "dask", "distributed", "dropbox", "dropboxdrivefs", "fastparquet", "fusepy", "gcsfs", "jinja2", "kerchunk", "libarchive-c", "lz4", "notebook", "numpy", "ocifs", "pandas", "panel", "paramiko", "pyarrow", "pyarrow (>=1)", "pyftpdlib", "pygit2", "pytest", "pytest-asyncio (!=0.22.0)", "pytest-benchmark", "pytest-cov", "pytest-mock", "pytest-recording", "pytest-rerunfailures", "python-snappy", "requests", "smbprotocol", "tqdm", "urllib3", "zarr", "zstandard"] +tqdm = ["tqdm"] + +[[package]] +name = "google-api-core" +version = "2.20.0" +description = "Google API client core library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google_api_core-2.20.0-py3-none-any.whl", hash = "sha256:ef0591ef03c30bb83f79b3d0575c3f31219001fc9c5cf37024d08310aeffed8a"}, + {file = "google_api_core-2.20.0.tar.gz", hash = "sha256:f74dff1889ba291a4b76c5079df0711810e2d9da81abfdc99957bc961c1eb28f"}, +] + +[package.dependencies] +google-auth = ">=2.14.1,<3.0.dev0" +googleapis-common-protos = ">=1.56.2,<2.0.dev0" +grpcio = {version = ">=1.49.1,<2.0dev", optional = true, markers = "python_version >= \"3.11\" and extra == \"grpc\""} +grpcio-status = {version = ">=1.49.1,<2.0.dev0", optional = true, markers = "python_version >= \"3.11\" and extra == \"grpc\""} +proto-plus = ">=1.22.3,<2.0.0dev" +protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0.dev0" +requests = ">=2.18.0,<3.0.0.dev0" + +[package.extras] +grpc = ["grpcio (>=1.33.2,<2.0dev)", "grpcio (>=1.49.1,<2.0dev)", "grpcio-status (>=1.33.2,<2.0.dev0)", "grpcio-status (>=1.49.1,<2.0.dev0)"] +grpcgcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"] +grpcio-gcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"] + +[[package]] +name = "google-api-python-client" +version = "2.147.0" +description = "Google API Client Library for Python" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google_api_python_client-2.147.0-py2.py3-none-any.whl", hash = "sha256:c6ecfa193c695baa41e84562d8f8f244fcd164419eca3fc9fd7565646668f9b2"}, + {file = "google_api_python_client-2.147.0.tar.gz", hash = "sha256:e864c2cf61d34c00f05278b8bdb72b93b6fa34f0de9ead51d20435f3b65f91be"}, +] + +[package.dependencies] +google-api-core = ">=1.31.5,<2.0.dev0 || >2.3.0,<3.0.0.dev0" +google-auth = ">=1.32.0,<2.24.0 || >2.24.0,<2.25.0 || >2.25.0,<3.0.0.dev0" +google-auth-httplib2 = ">=0.2.0,<1.0.0" +httplib2 = ">=0.19.0,<1.dev0" +uritemplate = ">=3.0.1,<5" + +[[package]] +name = "google-auth" +version = "2.35.0" +description = "Google Authentication Library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google_auth-2.35.0-py2.py3-none-any.whl", hash = "sha256:25df55f327ef021de8be50bad0dfd4a916ad0de96da86cd05661c9297723ad3f"}, + {file = "google_auth-2.35.0.tar.gz", hash = "sha256:f4c64ed4e01e8e8b646ef34c018f8bf3338df0c8e37d8b3bba40e7f574a3278a"}, +] + +[package.dependencies] +cachetools = ">=2.0.0,<6.0" +pyasn1-modules = ">=0.2.1" +rsa = ">=3.1.4,<5" + +[package.extras] +aiohttp = ["aiohttp (>=3.6.2,<4.0.0.dev0)", "requests (>=2.20.0,<3.0.0.dev0)"] +enterprise-cert = ["cryptography", "pyopenssl"] +pyopenssl = ["cryptography (>=38.0.3)", "pyopenssl (>=20.0.0)"] +reauth = ["pyu2f (>=0.1.5)"] +requests = ["requests (>=2.20.0,<3.0.0.dev0)"] + +[[package]] +name = "google-auth-httplib2" +version = "0.2.0" +description = "Google Authentication Library: httplib2 transport" +optional = false +python-versions = "*" +files = [ + {file = "google-auth-httplib2-0.2.0.tar.gz", hash = "sha256:38aa7badf48f974f1eb9861794e9c0cb2a0511a4ec0679b1f886d108f5640e05"}, + {file = "google_auth_httplib2-0.2.0-py2.py3-none-any.whl", hash = "sha256:b65a0a2123300dd71281a7bf6e64d65a0759287df52729bdd1ae2e47dc311a3d"}, +] + +[package.dependencies] +google-auth = "*" +httplib2 = ">=0.19.0" + +[[package]] +name = "google-cloud-core" +version = "2.4.1" +description = "Google Cloud API client core library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google-cloud-core-2.4.1.tar.gz", hash = "sha256:9b7749272a812bde58fff28868d0c5e2f585b82f37e09a1f6ed2d4d10f134073"}, + {file = "google_cloud_core-2.4.1-py2.py3-none-any.whl", hash = "sha256:a9e6a4422b9ac5c29f79a0ede9485473338e2ce78d91f2370c01e730eab22e61"}, +] + +[package.dependencies] +google-api-core = ">=1.31.6,<2.0.dev0 || >2.3.0,<3.0.0dev" +google-auth = ">=1.25.0,<3.0dev" + +[package.extras] +grpc = ["grpcio (>=1.38.0,<2.0dev)", "grpcio-status (>=1.38.0,<2.0.dev0)"] + +[[package]] +name = "google-cloud-firestore" +version = "2.19.0" +description = "Google Cloud Firestore API client library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google_cloud_firestore-2.19.0-py2.py3-none-any.whl", hash = "sha256:b49f0019d7bd0d4ab5972a4cff13994b0aabe72d24242200d904db2fb49df7f7"}, + {file = "google_cloud_firestore-2.19.0.tar.gz", hash = "sha256:1b2ce6e0b791aee89a1e4f072beba1012247e89baca361eed721fb467fe054b0"}, +] + +[package.dependencies] +google-api-core = {version = ">=1.34.0,<2.0.dev0 || >=2.11.dev0,<3.0.0dev", extras = ["grpc"]} +google-auth = ">=2.14.1,<2.24.0 || >2.24.0,<2.25.0 || >2.25.0,<3.0.0dev" +google-cloud-core = ">=1.4.1,<3.0.0dev" +proto-plus = {version = ">=1.22.2,<2.0.0dev", markers = "python_version >= \"3.11\""} +protobuf = ">=3.20.2,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0dev" + +[[package]] +name = "google-cloud-storage" +version = "2.18.2" +description = "Google Cloud Storage API client library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google_cloud_storage-2.18.2-py2.py3-none-any.whl", hash = "sha256:97a4d45c368b7d401ed48c4fdfe86e1e1cb96401c9e199e419d289e2c0370166"}, + {file = "google_cloud_storage-2.18.2.tar.gz", hash = "sha256:aaf7acd70cdad9f274d29332673fcab98708d0e1f4dceb5a5356aaef06af4d99"}, +] + +[package.dependencies] +google-api-core = ">=2.15.0,<3.0.0dev" +google-auth = ">=2.26.1,<3.0dev" +google-cloud-core = ">=2.3.0,<3.0dev" +google-crc32c = ">=1.0,<2.0dev" +google-resumable-media = ">=2.7.2" +requests = ">=2.18.0,<3.0.0dev" + +[package.extras] +protobuf = ["protobuf (<6.0.0dev)"] +tracing = ["opentelemetry-api (>=1.1.0)"] + +[[package]] +name = "google-crc32c" +version = "1.6.0" +description = "A python wrapper of the C library 'Google CRC32C'" +optional = false +python-versions = ">=3.9" +files = [ + {file = "google_crc32c-1.6.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:5bcc90b34df28a4b38653c36bb5ada35671ad105c99cfe915fb5bed7ad6924aa"}, + {file = "google_crc32c-1.6.0-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:d9e9913f7bd69e093b81da4535ce27af842e7bf371cde42d1ae9e9bd382dc0e9"}, + {file = "google_crc32c-1.6.0-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:a184243544811e4a50d345838a883733461e67578959ac59964e43cca2c791e7"}, + {file = "google_crc32c-1.6.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:236c87a46cdf06384f614e9092b82c05f81bd34b80248021f729396a78e55d7e"}, + {file = "google_crc32c-1.6.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ebab974b1687509e5c973b5c4b8b146683e101e102e17a86bd196ecaa4d099fc"}, + {file = "google_crc32c-1.6.0-cp310-cp310-win_amd64.whl", hash = "sha256:50cf2a96da226dcbff8671233ecf37bf6e95de98b2a2ebadbfdf455e6d05df42"}, + {file = "google_crc32c-1.6.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:f7a1fc29803712f80879b0806cb83ab24ce62fc8daf0569f2204a0cfd7f68ed4"}, + {file = "google_crc32c-1.6.0-cp311-cp311-macosx_12_0_x86_64.whl", hash = "sha256:40b05ab32a5067525670880eb5d169529089a26fe35dce8891127aeddc1950e8"}, + {file = "google_crc32c-1.6.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a9e4b426c3702f3cd23b933436487eb34e01e00327fac20c9aebb68ccf34117d"}, + {file = "google_crc32c-1.6.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:51c4f54dd8c6dfeb58d1df5e4f7f97df8abf17a36626a217f169893d1d7f3e9f"}, + {file = "google_crc32c-1.6.0-cp311-cp311-win_amd64.whl", hash = "sha256:bb8b3c75bd157010459b15222c3fd30577042a7060e29d42dabce449c087f2b3"}, + {file = "google_crc32c-1.6.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:ed767bf4ba90104c1216b68111613f0d5926fb3780660ea1198fc469af410e9d"}, + {file = "google_crc32c-1.6.0-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:62f6d4a29fea082ac4a3c9be5e415218255cf11684ac6ef5488eea0c9132689b"}, + {file = "google_crc32c-1.6.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c87d98c7c4a69066fd31701c4e10d178a648c2cac3452e62c6b24dc51f9fcc00"}, + {file = "google_crc32c-1.6.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd5e7d2445d1a958c266bfa5d04c39932dc54093fa391736dbfdb0f1929c1fb3"}, + {file = "google_crc32c-1.6.0-cp312-cp312-win_amd64.whl", hash = "sha256:7aec8e88a3583515f9e0957fe4f5f6d8d4997e36d0f61624e70469771584c760"}, + {file = "google_crc32c-1.6.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:e2806553238cd076f0a55bddab37a532b53580e699ed8e5606d0de1f856b5205"}, + {file = "google_crc32c-1.6.0-cp39-cp39-macosx_12_0_x86_64.whl", hash = "sha256:bb0966e1c50d0ef5bc743312cc730b533491d60585a9a08f897274e57c3f70e0"}, + {file = "google_crc32c-1.6.0-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:386122eeaaa76951a8196310432c5b0ef3b53590ef4c317ec7588ec554fec5d2"}, + {file = "google_crc32c-1.6.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d2952396dc604544ea7476b33fe87faedc24d666fb0c2d5ac971a2b9576ab871"}, + {file = "google_crc32c-1.6.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:35834855408429cecf495cac67ccbab802de269e948e27478b1e47dfb6465e57"}, + {file = "google_crc32c-1.6.0-cp39-cp39-win_amd64.whl", hash = "sha256:d8797406499f28b5ef791f339594b0b5fdedf54e203b5066675c406ba69d705c"}, + {file = "google_crc32c-1.6.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48abd62ca76a2cbe034542ed1b6aee851b6f28aaca4e6551b5599b6f3ef175cc"}, + {file = "google_crc32c-1.6.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18e311c64008f1f1379158158bb3f0c8d72635b9eb4f9545f8cf990c5668e59d"}, + {file = "google_crc32c-1.6.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05e2d8c9a2f853ff116db9706b4a27350587f341eda835f46db3c0a8c8ce2f24"}, + {file = "google_crc32c-1.6.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:91ca8145b060679ec9176e6de4f89b07363d6805bd4760631ef254905503598d"}, + {file = "google_crc32c-1.6.0.tar.gz", hash = "sha256:6eceb6ad197656a1ff49ebfbbfa870678c75be4344feb35ac1edf694309413dc"}, +] + +[package.extras] +testing = ["pytest"] + +[[package]] +name = "google-resumable-media" +version = "2.7.2" +description = "Utilities for Google Media Downloads and Resumable Uploads" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google_resumable_media-2.7.2-py2.py3-none-any.whl", hash = "sha256:3ce7551e9fe6d99e9a126101d2536612bb73486721951e9562fee0f90c6ababa"}, + {file = "google_resumable_media-2.7.2.tar.gz", hash = "sha256:5280aed4629f2b60b847b0d42f9857fd4935c11af266744df33d8074cae92fe0"}, +] + +[package.dependencies] +google-crc32c = ">=1.0,<2.0dev" + +[package.extras] +aiohttp = ["aiohttp (>=3.6.2,<4.0.0dev)", "google-auth (>=1.22.0,<2.0dev)"] +requests = ["requests (>=2.18.0,<3.0.0dev)"] + +[[package]] +name = "googleapis-common-protos" +version = "1.65.0" +description = "Common protobufs used in Google APIs" +optional = false +python-versions = ">=3.7" +files = [ + {file = "googleapis_common_protos-1.65.0-py2.py3-none-any.whl", hash = "sha256:2972e6c496f435b92590fd54045060867f3fe9be2c82ab148fc8885035479a63"}, + {file = "googleapis_common_protos-1.65.0.tar.gz", hash = "sha256:334a29d07cddc3aa01dee4988f9afd9b2916ee2ff49d6b757155dc0d197852c0"}, +] + +[package.dependencies] +protobuf = ">=3.20.2,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0.dev0" + +[package.extras] +grpc = ["grpcio (>=1.44.0,<2.0.0.dev0)"] + +[[package]] +name = "grpcio" +version = "1.66.2" +description = "HTTP/2-based RPC framework" +optional = false +python-versions = ">=3.8" +files = [ + {file = "grpcio-1.66.2-cp310-cp310-linux_armv7l.whl", hash = "sha256:fe96281713168a3270878255983d2cb1a97e034325c8c2c25169a69289d3ecfa"}, + {file = "grpcio-1.66.2-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:73fc8f8b9b5c4a03e802b3cd0c18b2b06b410d3c1dcbef989fdeb943bd44aff7"}, + {file = "grpcio-1.66.2-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:03b0b307ba26fae695e067b94cbb014e27390f8bc5ac7a3a39b7723fed085604"}, + {file = "grpcio-1.66.2-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d69ce1f324dc2d71e40c9261d3fdbe7d4c9d60f332069ff9b2a4d8a257c7b2b"}, + {file = "grpcio-1.66.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:05bc2ceadc2529ab0b227b1310d249d95d9001cd106aa4d31e8871ad3c428d73"}, + {file = "grpcio-1.66.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:8ac475e8da31484efa25abb774674d837b343afb78bb3bcdef10f81a93e3d6bf"}, + {file = "grpcio-1.66.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0be4e0490c28da5377283861bed2941d1d20ec017ca397a5df4394d1c31a9b50"}, + {file = "grpcio-1.66.2-cp310-cp310-win32.whl", hash = "sha256:4e504572433f4e72b12394977679161d495c4c9581ba34a88d843eaf0f2fbd39"}, + {file = "grpcio-1.66.2-cp310-cp310-win_amd64.whl", hash = "sha256:2018b053aa15782db2541ca01a7edb56a0bf18c77efed975392583725974b249"}, + {file = "grpcio-1.66.2-cp311-cp311-linux_armv7l.whl", hash = "sha256:2335c58560a9e92ac58ff2bc5649952f9b37d0735608242973c7a8b94a6437d8"}, + {file = "grpcio-1.66.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:45a3d462826f4868b442a6b8fdbe8b87b45eb4f5b5308168c156b21eca43f61c"}, + {file = "grpcio-1.66.2-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:a9539f01cb04950fd4b5ab458e64a15f84c2acc273670072abe49a3f29bbad54"}, + {file = "grpcio-1.66.2-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ce89f5876662f146d4c1f695dda29d4433a5d01c8681fbd2539afff535da14d4"}, + {file = "grpcio-1.66.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d25a14af966438cddf498b2e338f88d1c9706f3493b1d73b93f695c99c5f0e2a"}, + {file = "grpcio-1.66.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:6001e575b8bbd89eee11960bb640b6da6ae110cf08113a075f1e2051cc596cae"}, + {file = "grpcio-1.66.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4ea1d062c9230278793820146c95d038dc0f468cbdd172eec3363e42ff1c7d01"}, + {file = "grpcio-1.66.2-cp311-cp311-win32.whl", hash = "sha256:38b68498ff579a3b1ee8f93a05eb48dc2595795f2f62716e797dc24774c1aaa8"}, + {file = "grpcio-1.66.2-cp311-cp311-win_amd64.whl", hash = "sha256:6851de821249340bdb100df5eacfecfc4e6075fa85c6df7ee0eb213170ec8e5d"}, + {file = "grpcio-1.66.2-cp312-cp312-linux_armv7l.whl", hash = "sha256:802d84fd3d50614170649853d121baaaa305de7b65b3e01759247e768d691ddf"}, + {file = "grpcio-1.66.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:80fd702ba7e432994df208f27514280b4b5c6843e12a48759c9255679ad38db8"}, + {file = "grpcio-1.66.2-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:12fda97ffae55e6526825daf25ad0fa37483685952b5d0f910d6405c87e3adb6"}, + {file = "grpcio-1.66.2-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:950da58d7d80abd0ea68757769c9db0a95b31163e53e5bb60438d263f4bed7b7"}, + {file = "grpcio-1.66.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e636ce23273683b00410f1971d209bf3689238cf5538d960adc3cdfe80dd0dbd"}, + {file = "grpcio-1.66.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:a917d26e0fe980b0ac7bfcc1a3c4ad6a9a4612c911d33efb55ed7833c749b0ee"}, + {file = "grpcio-1.66.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:49f0ca7ae850f59f828a723a9064cadbed90f1ece179d375966546499b8a2c9c"}, + {file = "grpcio-1.66.2-cp312-cp312-win32.whl", hash = "sha256:31fd163105464797a72d901a06472860845ac157389e10f12631025b3e4d0453"}, + {file = "grpcio-1.66.2-cp312-cp312-win_amd64.whl", hash = "sha256:ff1f7882e56c40b0d33c4922c15dfa30612f05fb785074a012f7cda74d1c3679"}, + {file = "grpcio-1.66.2-cp313-cp313-linux_armv7l.whl", hash = "sha256:3b00efc473b20d8bf83e0e1ae661b98951ca56111feb9b9611df8efc4fe5d55d"}, + {file = "grpcio-1.66.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:1caa38fb22a8578ab8393da99d4b8641e3a80abc8fd52646f1ecc92bcb8dee34"}, + {file = "grpcio-1.66.2-cp313-cp313-manylinux_2_17_aarch64.whl", hash = "sha256:c408f5ef75cfffa113cacd8b0c0e3611cbfd47701ca3cdc090594109b9fcbaed"}, + {file = "grpcio-1.66.2-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c806852deaedee9ce8280fe98955c9103f62912a5b2d5ee7e3eaa284a6d8d8e7"}, + {file = "grpcio-1.66.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f145cc21836c332c67baa6fc81099d1d27e266401565bf481948010d6ea32d46"}, + {file = "grpcio-1.66.2-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:73e3b425c1e155730273f73e419de3074aa5c5e936771ee0e4af0814631fb30a"}, + {file = "grpcio-1.66.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:9c509a4f78114cbc5f0740eb3d7a74985fd2eff022971bc9bc31f8bc93e66a3b"}, + {file = "grpcio-1.66.2-cp313-cp313-win32.whl", hash = "sha256:20657d6b8cfed7db5e11b62ff7dfe2e12064ea78e93f1434d61888834bc86d75"}, + {file = "grpcio-1.66.2-cp313-cp313-win_amd64.whl", hash = "sha256:fb70487c95786e345af5e854ffec8cb8cc781bcc5df7930c4fbb7feaa72e1cdf"}, + {file = "grpcio-1.66.2-cp38-cp38-linux_armv7l.whl", hash = "sha256:a18e20d8321c6400185b4263e27982488cb5cdd62da69147087a76a24ef4e7e3"}, + {file = "grpcio-1.66.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:02697eb4a5cbe5a9639f57323b4c37bcb3ab2d48cec5da3dc2f13334d72790dd"}, + {file = "grpcio-1.66.2-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:99a641995a6bc4287a6315989ee591ff58507aa1cbe4c2e70d88411c4dcc0839"}, + {file = "grpcio-1.66.2-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3ed71e81782966ffead60268bbda31ea3f725ebf8aa73634d5dda44f2cf3fb9c"}, + {file = "grpcio-1.66.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bbd27c24a4cc5e195a7f56cfd9312e366d5d61b86e36d46bbe538457ea6eb8dd"}, + {file = "grpcio-1.66.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:d9a9724a156c8ec6a379869b23ba3323b7ea3600851c91489b871e375f710bc8"}, + {file = "grpcio-1.66.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d8d4732cc5052e92cea2f78b233c2e2a52998ac40cd651f40e398893ad0d06ec"}, + {file = "grpcio-1.66.2-cp38-cp38-win32.whl", hash = "sha256:7b2c86457145ce14c38e5bf6bdc19ef88e66c5fee2c3d83285c5aef026ba93b3"}, + {file = "grpcio-1.66.2-cp38-cp38-win_amd64.whl", hash = "sha256:e88264caad6d8d00e7913996030bac8ad5f26b7411495848cc218bd3a9040b6c"}, + {file = "grpcio-1.66.2-cp39-cp39-linux_armv7l.whl", hash = "sha256:c400ba5675b67025c8a9f48aa846f12a39cf0c44df5cd060e23fda5b30e9359d"}, + {file = "grpcio-1.66.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:66a0cd8ba6512b401d7ed46bb03f4ee455839957f28b8d61e7708056a806ba6a"}, + {file = "grpcio-1.66.2-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:06de8ec0bd71be123eec15b0e0d457474931c2c407869b6c349bd9bed4adbac3"}, + {file = "grpcio-1.66.2-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fb57870449dfcfac428afbb5a877829fcb0d6db9d9baa1148705739e9083880e"}, + {file = "grpcio-1.66.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b672abf90a964bfde2d0ecbce30f2329a47498ba75ce6f4da35a2f4532b7acbc"}, + {file = "grpcio-1.66.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:ad2efdbe90c73b0434cbe64ed372e12414ad03c06262279b104a029d1889d13e"}, + {file = "grpcio-1.66.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9c3a99c519f4638e700e9e3f83952e27e2ea10873eecd7935823dab0c1c9250e"}, + {file = "grpcio-1.66.2-cp39-cp39-win32.whl", hash = "sha256:78fa51ebc2d9242c0fc5db0feecc57a9943303b46664ad89921f5079e2e4ada7"}, + {file = "grpcio-1.66.2-cp39-cp39-win_amd64.whl", hash = "sha256:728bdf36a186e7f51da73be7f8d09457a03061be848718d0edf000e709418987"}, + {file = "grpcio-1.66.2.tar.gz", hash = "sha256:563588c587b75c34b928bc428548e5b00ea38c46972181a4d8b75ba7e3f24231"}, +] + +[package.extras] +protobuf = ["grpcio-tools (>=1.66.2)"] + +[[package]] +name = "grpcio-status" +version = "1.66.2" +description = "Status proto mapping for gRPC" +optional = false +python-versions = ">=3.8" +files = [ + {file = "grpcio_status-1.66.2-py3-none-any.whl", hash = "sha256:e5fe189f6897d12aa9cd74408a17ca41e44fad30871cf84f5cbd17bd713d2455"}, + {file = "grpcio_status-1.66.2.tar.gz", hash = "sha256:fb55cbb5c2e67062f7a4d5c99e489d074fb57e98678d5c3c6692a2d74d89e9ae"}, +] + +[package.dependencies] +googleapis-common-protos = ">=1.5.5" +grpcio = ">=1.66.2" +protobuf = ">=5.26.1,<6.0dev" + +[[package]] +name = "h11" +version = "0.14.0" +description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" +optional = false +python-versions = ">=3.7" +files = [ + {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, + {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, +] + +[[package]] +name = "httpcore" +version = "1.0.5" +description = "A minimal low-level HTTP client." +optional = false +python-versions = ">=3.8" +files = [ + {file = "httpcore-1.0.5-py3-none-any.whl", hash = "sha256:421f18bac248b25d310f3cacd198d55b8e6125c107797b609ff9b7a6ba7991b5"}, + {file = "httpcore-1.0.5.tar.gz", hash = "sha256:34a38e2f9291467ee3b44e89dd52615370e152954ba21721378a87b2960f7a61"}, +] + +[package.dependencies] +certifi = "*" +h11 = ">=0.13,<0.15" + +[package.extras] +asyncio = ["anyio (>=4.0,<5.0)"] +http2 = ["h2 (>=3,<5)"] +socks = ["socksio (==1.*)"] +trio = ["trio (>=0.22.0,<0.26.0)"] + +[[package]] +name = "httplib2" +version = "0.22.0" +description = "A comprehensive HTTP client library." +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "httplib2-0.22.0-py3-none-any.whl", hash = "sha256:14ae0a53c1ba8f3d37e9e27cf37eabb0fb9980f435ba405d546948b009dd64dc"}, + {file = "httplib2-0.22.0.tar.gz", hash = "sha256:d7a10bc5ef5ab08322488bde8c726eeee5c8618723fdb399597ec58f3d82df81"}, +] + +[package.dependencies] +pyparsing = {version = ">=2.4.2,<3.0.0 || >3.0.0,<3.0.1 || >3.0.1,<3.0.2 || >3.0.2,<3.0.3 || >3.0.3,<4", markers = "python_version > \"3.0\""} + +[[package]] +name = "httptools" +version = "0.6.1" +description = "A collection of framework independent HTTP protocol utils." +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "httptools-0.6.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d2f6c3c4cb1948d912538217838f6e9960bc4a521d7f9b323b3da579cd14532f"}, + {file = "httptools-0.6.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:00d5d4b68a717765b1fabfd9ca755bd12bf44105eeb806c03d1962acd9b8e563"}, + {file = "httptools-0.6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:639dc4f381a870c9ec860ce5c45921db50205a37cc3334e756269736ff0aac58"}, + {file = "httptools-0.6.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e57997ac7fb7ee43140cc03664de5f268813a481dff6245e0075925adc6aa185"}, + {file = "httptools-0.6.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0ac5a0ae3d9f4fe004318d64b8a854edd85ab76cffbf7ef5e32920faef62f142"}, + {file = "httptools-0.6.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:3f30d3ce413088a98b9db71c60a6ada2001a08945cb42dd65a9a9fe228627658"}, + {file = "httptools-0.6.1-cp310-cp310-win_amd64.whl", hash = "sha256:1ed99a373e327f0107cb513b61820102ee4f3675656a37a50083eda05dc9541b"}, + {file = "httptools-0.6.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:7a7ea483c1a4485c71cb5f38be9db078f8b0e8b4c4dc0210f531cdd2ddac1ef1"}, + {file = "httptools-0.6.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:85ed077c995e942b6f1b07583e4eb0a8d324d418954fc6af913d36db7c05a5a0"}, + {file = "httptools-0.6.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b0bb634338334385351a1600a73e558ce619af390c2b38386206ac6a27fecfc"}, + {file = "httptools-0.6.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7d9ceb2c957320def533671fc9c715a80c47025139c8d1f3797477decbc6edd2"}, + {file = "httptools-0.6.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:4f0f8271c0a4db459f9dc807acd0eadd4839934a4b9b892f6f160e94da309837"}, + {file = "httptools-0.6.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:6a4f5ccead6d18ec072ac0b84420e95d27c1cdf5c9f1bc8fbd8daf86bd94f43d"}, + {file = "httptools-0.6.1-cp311-cp311-win_amd64.whl", hash = "sha256:5cceac09f164bcba55c0500a18fe3c47df29b62353198e4f37bbcc5d591172c3"}, + {file = "httptools-0.6.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:75c8022dca7935cba14741a42744eee13ba05db00b27a4b940f0d646bd4d56d0"}, + {file = "httptools-0.6.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:48ed8129cd9a0d62cf4d1575fcf90fb37e3ff7d5654d3a5814eb3d55f36478c2"}, + {file = "httptools-0.6.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6f58e335a1402fb5a650e271e8c2d03cfa7cea46ae124649346d17bd30d59c90"}, + {file = "httptools-0.6.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93ad80d7176aa5788902f207a4e79885f0576134695dfb0fefc15b7a4648d503"}, + {file = "httptools-0.6.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:9bb68d3a085c2174c2477eb3ffe84ae9fb4fde8792edb7bcd09a1d8467e30a84"}, + {file = "httptools-0.6.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:b512aa728bc02354e5ac086ce76c3ce635b62f5fbc32ab7082b5e582d27867bb"}, + {file = "httptools-0.6.1-cp312-cp312-win_amd64.whl", hash = "sha256:97662ce7fb196c785344d00d638fc9ad69e18ee4bfb4000b35a52efe5adcc949"}, + {file = "httptools-0.6.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:8e216a038d2d52ea13fdd9b9c9c7459fb80d78302b257828285eca1c773b99b3"}, + {file = "httptools-0.6.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3e802e0b2378ade99cd666b5bffb8b2a7cc8f3d28988685dc300469ea8dd86cb"}, + {file = "httptools-0.6.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4bd3e488b447046e386a30f07af05f9b38d3d368d1f7b4d8f7e10af85393db97"}, + {file = "httptools-0.6.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe467eb086d80217b7584e61313ebadc8d187a4d95bb62031b7bab4b205c3ba3"}, + {file = "httptools-0.6.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:3c3b214ce057c54675b00108ac42bacf2ab8f85c58e3f324a4e963bbc46424f4"}, + {file = "httptools-0.6.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8ae5b97f690badd2ca27cbf668494ee1b6d34cf1c464271ef7bfa9ca6b83ffaf"}, + {file = "httptools-0.6.1-cp38-cp38-win_amd64.whl", hash = "sha256:405784577ba6540fa7d6ff49e37daf104e04f4b4ff2d1ac0469eaa6a20fde084"}, + {file = "httptools-0.6.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:95fb92dd3649f9cb139e9c56604cc2d7c7bf0fc2e7c8d7fbd58f96e35eddd2a3"}, + {file = "httptools-0.6.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:dcbab042cc3ef272adc11220517278519adf8f53fd3056d0e68f0a6f891ba94e"}, + {file = "httptools-0.6.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cf2372e98406efb42e93bfe10f2948e467edfd792b015f1b4ecd897903d3e8d"}, + {file = "httptools-0.6.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:678fcbae74477a17d103b7cae78b74800d795d702083867ce160fc202104d0da"}, + {file = "httptools-0.6.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:e0b281cf5a125c35f7f6722b65d8542d2e57331be573e9e88bc8b0115c4a7a81"}, + {file = "httptools-0.6.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:95658c342529bba4e1d3d2b1a874db16c7cca435e8827422154c9da76ac4e13a"}, + {file = "httptools-0.6.1-cp39-cp39-win_amd64.whl", hash = "sha256:7ebaec1bf683e4bf5e9fbb49b8cc36da482033596a415b3e4ebab5a4c0d7ec5e"}, + {file = "httptools-0.6.1.tar.gz", hash = "sha256:c6e26c30455600b95d94b1b836085138e82f177351454ee841c148f93a9bad5a"}, +] + +[package.extras] +test = ["Cython (>=0.29.24,<0.30.0)"] + +[[package]] +name = "httpx" +version = "0.27.2" +description = "The next generation HTTP client." +optional = false +python-versions = ">=3.8" +files = [ + {file = "httpx-0.27.2-py3-none-any.whl", hash = "sha256:7bb2708e112d8fdd7829cd4243970f0c223274051cb35ee80c03301ee29a3df0"}, + {file = "httpx-0.27.2.tar.gz", hash = "sha256:f7c2be1d2f3c3c3160d441802406b206c2b76f5947b11115e6df10c6c65e66c2"}, +] + +[package.dependencies] +anyio = "*" +certifi = "*" +httpcore = "==1.*" +idna = "*" +sniffio = "*" + +[package.extras] +brotli = ["brotli", "brotlicffi"] +cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] +http2 = ["h2 (>=3,<5)"] +socks = ["socksio (==1.*)"] +zstd = ["zstandard (>=0.18.0)"] + +[[package]] +name = "huggingface-hub" +version = "0.25.1" +description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "huggingface_hub-0.25.1-py3-none-any.whl", hash = "sha256:a5158ded931b3188f54ea9028097312cb0acd50bffaaa2612014c3c526b44972"}, + {file = "huggingface_hub-0.25.1.tar.gz", hash = "sha256:9ff7cb327343211fbd06e2b149b8f362fd1e389454f3f14c6db75a4999ee20ff"}, +] + +[package.dependencies] +filelock = "*" +fsspec = ">=2023.5.0" +packaging = ">=20.9" +pyyaml = ">=5.1" +requests = "*" +tqdm = ">=4.42.1" +typing-extensions = ">=3.7.4.3" + +[package.extras] +all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio", "jedi", "minijinja (>=1.0)", "mypy (==1.5.1)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.5.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] +cli = ["InquirerPy (==0.3.4)"] +dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio", "jedi", "minijinja (>=1.0)", "mypy (==1.5.1)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.5.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] +fastai = ["fastai (>=2.4)", "fastcore (>=1.3.27)", "toml"] +hf-transfer = ["hf-transfer (>=0.1.4)"] +inference = ["aiohttp", "minijinja (>=1.0)"] +quality = ["mypy (==1.5.1)", "ruff (>=0.5.0)"] +tensorflow = ["graphviz", "pydot", "tensorflow"] +tensorflow-testing = ["keras (<3.0)", "tensorflow"] +testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio", "jedi", "minijinja (>=1.0)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"] +torch = ["safetensors[torch]", "torch"] +typing = ["types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)"] + +[[package]] +name = "idna" +version = "3.10" +description = "Internationalized Domain Names in Applications (IDNA)" +optional = false +python-versions = ">=3.6" +files = [ + {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, + {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, +] + +[package.extras] +all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] + +[[package]] +name = "intel-openmp" +version = "2021.4.0" +description = "Intel OpenMP* Runtime Library" +optional = false +python-versions = "*" +files = [ + {file = "intel_openmp-2021.4.0-py2.py3-none-macosx_10_15_x86_64.macosx_11_0_x86_64.whl", hash = "sha256:41c01e266a7fdb631a7609191709322da2bbf24b252ba763f125dd651bcc7675"}, + {file = "intel_openmp-2021.4.0-py2.py3-none-manylinux1_i686.whl", hash = "sha256:3b921236a38384e2016f0f3d65af6732cf2c12918087128a9163225451e776f2"}, + {file = "intel_openmp-2021.4.0-py2.py3-none-manylinux1_x86_64.whl", hash = "sha256:e2240ab8d01472fed04f3544a878cda5da16c26232b7ea1b59132dbfb48b186e"}, + {file = "intel_openmp-2021.4.0-py2.py3-none-win32.whl", hash = "sha256:6e863d8fd3d7e8ef389d52cf97a50fe2afe1a19247e8c0d168ce021546f96fc9"}, + {file = "intel_openmp-2021.4.0-py2.py3-none-win_amd64.whl", hash = "sha256:eef4c8bcc8acefd7f5cd3b9384dbf73d59e2c99fc56545712ded913f43c4a94f"}, +] + +[[package]] +name = "jinja2" +version = "3.1.4" +description = "A very fast and expressive template engine." +optional = false +python-versions = ">=3.7" +files = [ + {file = "jinja2-3.1.4-py3-none-any.whl", hash = "sha256:bc5dd2abb727a5319567b7a813e6a2e7318c39f4f487cfe6c89c6f9c7d25197d"}, + {file = "jinja2-3.1.4.tar.gz", hash = "sha256:4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369"}, +] + +[package.dependencies] +MarkupSafe = ">=2.0" + +[package.extras] +i18n = ["Babel (>=2.7)"] + +[[package]] +name = "jiter" +version = "0.5.0" +description = "Fast iterable JSON parser." +optional = false +python-versions = ">=3.8" +files = [ + {file = "jiter-0.5.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:b599f4e89b3def9a94091e6ee52e1d7ad7bc33e238ebb9c4c63f211d74822c3f"}, + {file = "jiter-0.5.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2a063f71c4b06225543dddadbe09d203dc0c95ba352d8b85f1221173480a71d5"}, + {file = "jiter-0.5.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:acc0d5b8b3dd12e91dd184b87273f864b363dfabc90ef29a1092d269f18c7e28"}, + {file = "jiter-0.5.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c22541f0b672f4d741382a97c65609332a783501551445ab2df137ada01e019e"}, + {file = "jiter-0.5.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:63314832e302cc10d8dfbda0333a384bf4bcfce80d65fe99b0f3c0da8945a91a"}, + {file = "jiter-0.5.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a25fbd8a5a58061e433d6fae6d5298777c0814a8bcefa1e5ecfff20c594bd749"}, + {file = "jiter-0.5.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:503b2c27d87dfff5ab717a8200fbbcf4714516c9d85558048b1fc14d2de7d8dc"}, + {file = "jiter-0.5.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6d1f3d27cce923713933a844872d213d244e09b53ec99b7a7fdf73d543529d6d"}, + {file = "jiter-0.5.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:c95980207b3998f2c3b3098f357994d3fd7661121f30669ca7cb945f09510a87"}, + {file = "jiter-0.5.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:afa66939d834b0ce063f57d9895e8036ffc41c4bd90e4a99631e5f261d9b518e"}, + {file = "jiter-0.5.0-cp310-none-win32.whl", hash = "sha256:f16ca8f10e62f25fd81d5310e852df6649af17824146ca74647a018424ddeccf"}, + {file = "jiter-0.5.0-cp310-none-win_amd64.whl", hash = "sha256:b2950e4798e82dd9176935ef6a55cf6a448b5c71515a556da3f6b811a7844f1e"}, + {file = "jiter-0.5.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d4c8e1ed0ef31ad29cae5ea16b9e41529eb50a7fba70600008e9f8de6376d553"}, + {file = "jiter-0.5.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c6f16e21276074a12d8421692515b3fd6d2ea9c94fd0734c39a12960a20e85f3"}, + {file = "jiter-0.5.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5280e68e7740c8c128d3ae5ab63335ce6d1fb6603d3b809637b11713487af9e6"}, + {file = "jiter-0.5.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:583c57fc30cc1fec360e66323aadd7fc3edeec01289bfafc35d3b9dcb29495e4"}, + {file = "jiter-0.5.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:26351cc14507bdf466b5f99aba3df3143a59da75799bf64a53a3ad3155ecded9"}, + {file = "jiter-0.5.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4829df14d656b3fb87e50ae8b48253a8851c707da9f30d45aacab2aa2ba2d614"}, + {file = "jiter-0.5.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a42a4bdcf7307b86cb863b2fb9bb55029b422d8f86276a50487982d99eed7c6e"}, + {file = "jiter-0.5.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:04d461ad0aebf696f8da13c99bc1b3e06f66ecf6cfd56254cc402f6385231c06"}, + {file = "jiter-0.5.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e6375923c5f19888c9226582a124b77b622f8fd0018b843c45eeb19d9701c403"}, + {file = "jiter-0.5.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:2cec323a853c24fd0472517113768c92ae0be8f8c384ef4441d3632da8baa646"}, + {file = "jiter-0.5.0-cp311-none-win32.whl", hash = "sha256:aa1db0967130b5cab63dfe4d6ff547c88b2a394c3410db64744d491df7f069bb"}, + {file = "jiter-0.5.0-cp311-none-win_amd64.whl", hash = "sha256:aa9d2b85b2ed7dc7697597dcfaac66e63c1b3028652f751c81c65a9f220899ae"}, + {file = "jiter-0.5.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9f664e7351604f91dcdd557603c57fc0d551bc65cc0a732fdacbf73ad335049a"}, + {file = "jiter-0.5.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:044f2f1148b5248ad2c8c3afb43430dccf676c5a5834d2f5089a4e6c5bbd64df"}, + {file = "jiter-0.5.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:702e3520384c88b6e270c55c772d4bd6d7b150608dcc94dea87ceba1b6391248"}, + {file = "jiter-0.5.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:528d742dcde73fad9d63e8242c036ab4a84389a56e04efd854062b660f559544"}, + {file = "jiter-0.5.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8cf80e5fe6ab582c82f0c3331df27a7e1565e2dcf06265afd5173d809cdbf9ba"}, + {file = "jiter-0.5.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:44dfc9ddfb9b51a5626568ef4e55ada462b7328996294fe4d36de02fce42721f"}, + {file = "jiter-0.5.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c451f7922992751a936b96c5f5b9bb9312243d9b754c34b33d0cb72c84669f4e"}, + {file = "jiter-0.5.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:308fce789a2f093dca1ff91ac391f11a9f99c35369117ad5a5c6c4903e1b3e3a"}, + {file = "jiter-0.5.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7f5ad4a7c6b0d90776fdefa294f662e8a86871e601309643de30bf94bb93a64e"}, + {file = "jiter-0.5.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ea189db75f8eca08807d02ae27929e890c7d47599ce3d0a6a5d41f2419ecf338"}, + {file = "jiter-0.5.0-cp312-none-win32.whl", hash = "sha256:e3bbe3910c724b877846186c25fe3c802e105a2c1fc2b57d6688b9f8772026e4"}, + {file = "jiter-0.5.0-cp312-none-win_amd64.whl", hash = "sha256:a586832f70c3f1481732919215f36d41c59ca080fa27a65cf23d9490e75b2ef5"}, + {file = "jiter-0.5.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:f04bc2fc50dc77be9d10f73fcc4e39346402ffe21726ff41028f36e179b587e6"}, + {file = "jiter-0.5.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6f433a4169ad22fcb550b11179bb2b4fd405de9b982601914ef448390b2954f3"}, + {file = "jiter-0.5.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ad4a6398c85d3a20067e6c69890ca01f68659da94d74c800298581724e426c7e"}, + {file = "jiter-0.5.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6baa88334e7af3f4d7a5c66c3a63808e5efbc3698a1c57626541ddd22f8e4fbf"}, + {file = "jiter-0.5.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ece0a115c05efca597c6d938f88c9357c843f8c245dbbb53361a1c01afd7148"}, + {file = "jiter-0.5.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:335942557162ad372cc367ffaf93217117401bf930483b4b3ebdb1223dbddfa7"}, + {file = "jiter-0.5.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:649b0ee97a6e6da174bffcb3c8c051a5935d7d4f2f52ea1583b5b3e7822fbf14"}, + {file = "jiter-0.5.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f4be354c5de82157886ca7f5925dbda369b77344b4b4adf2723079715f823989"}, + {file = "jiter-0.5.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5206144578831a6de278a38896864ded4ed96af66e1e63ec5dd7f4a1fce38a3a"}, + {file = "jiter-0.5.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8120c60f8121ac3d6f072b97ef0e71770cc72b3c23084c72c4189428b1b1d3b6"}, + {file = "jiter-0.5.0-cp38-none-win32.whl", hash = "sha256:6f1223f88b6d76b519cb033a4d3687ca157c272ec5d6015c322fc5b3074d8a5e"}, + {file = "jiter-0.5.0-cp38-none-win_amd64.whl", hash = "sha256:c59614b225d9f434ea8fc0d0bec51ef5fa8c83679afedc0433905994fb36d631"}, + {file = "jiter-0.5.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:0af3838cfb7e6afee3f00dc66fa24695199e20ba87df26e942820345b0afc566"}, + {file = "jiter-0.5.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:550b11d669600dbc342364fd4adbe987f14d0bbedaf06feb1b983383dcc4b961"}, + {file = "jiter-0.5.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:489875bf1a0ffb3cb38a727b01e6673f0f2e395b2aad3c9387f94187cb214bbf"}, + {file = "jiter-0.5.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b250ca2594f5599ca82ba7e68785a669b352156260c5362ea1b4e04a0f3e2389"}, + {file = "jiter-0.5.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8ea18e01f785c6667ca15407cd6dabbe029d77474d53595a189bdc813347218e"}, + {file = "jiter-0.5.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:462a52be85b53cd9bffd94e2d788a09984274fe6cebb893d6287e1c296d50653"}, + {file = "jiter-0.5.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:92cc68b48d50fa472c79c93965e19bd48f40f207cb557a8346daa020d6ba973b"}, + {file = "jiter-0.5.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1c834133e59a8521bc87ebcad773608c6fa6ab5c7a022df24a45030826cf10bc"}, + {file = "jiter-0.5.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ab3a71ff31cf2d45cb216dc37af522d335211f3a972d2fe14ea99073de6cb104"}, + {file = "jiter-0.5.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:cccd3af9c48ac500c95e1bcbc498020c87e1781ff0345dd371462d67b76643eb"}, + {file = "jiter-0.5.0-cp39-none-win32.whl", hash = "sha256:368084d8d5c4fc40ff7c3cc513c4f73e02c85f6009217922d0823a48ee7adf61"}, + {file = "jiter-0.5.0-cp39-none-win_amd64.whl", hash = "sha256:ce03f7b4129eb72f1687fa11300fbf677b02990618428934662406d2a76742a1"}, + {file = "jiter-0.5.0.tar.gz", hash = "sha256:1d916ba875bcab5c5f7d927df998c4cb694d27dceddf3392e58beaf10563368a"}, +] + +[[package]] +name = "jmespath" +version = "1.0.1" +description = "JSON Matching Expressions" +optional = false +python-versions = ">=3.7" +files = [ + {file = "jmespath-1.0.1-py3-none-any.whl", hash = "sha256:02e2e4cc71b5bcab88332eebf907519190dd9e6e82107fa7f83b1003a6252980"}, + {file = "jmespath-1.0.1.tar.gz", hash = "sha256:90261b206d6defd58fdd5e85f478bf633a2901798906be2ad389150c5c60edbe"}, +] + +[[package]] +name = "joblib" +version = "1.4.2" +description = "Lightweight pipelining with Python functions" +optional = false +python-versions = ">=3.8" +files = [ + {file = "joblib-1.4.2-py3-none-any.whl", hash = "sha256:06d478d5674cbc267e7496a410ee875abd68e4340feff4490bcb7afb88060ae6"}, + {file = "joblib-1.4.2.tar.gz", hash = "sha256:2382c5816b2636fbd20a09e0f4e9dad4736765fdfb7dca582943b9c1366b3f0e"}, +] + +[[package]] +name = "llvmlite" +version = "0.43.0" +description = "lightweight wrapper around basic LLVM functionality" +optional = false +python-versions = ">=3.9" +files = [ + {file = "llvmlite-0.43.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a289af9a1687c6cf463478f0fa8e8aa3b6fb813317b0d70bf1ed0759eab6f761"}, + {file = "llvmlite-0.43.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6d4fd101f571a31acb1559ae1af30f30b1dc4b3186669f92ad780e17c81e91bc"}, + {file = "llvmlite-0.43.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7d434ec7e2ce3cc8f452d1cd9a28591745de022f931d67be688a737320dfcead"}, + {file = "llvmlite-0.43.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6912a87782acdff6eb8bf01675ed01d60ca1f2551f8176a300a886f09e836a6a"}, + {file = "llvmlite-0.43.0-cp310-cp310-win_amd64.whl", hash = "sha256:14f0e4bf2fd2d9a75a3534111e8ebeb08eda2f33e9bdd6dfa13282afacdde0ed"}, + {file = "llvmlite-0.43.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3e8d0618cb9bfe40ac38a9633f2493d4d4e9fcc2f438d39a4e854f39cc0f5f98"}, + {file = "llvmlite-0.43.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e0a9a1a39d4bf3517f2af9d23d479b4175ead205c592ceeb8b89af48a327ea57"}, + {file = "llvmlite-0.43.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c1da416ab53e4f7f3bc8d4eeba36d801cc1894b9fbfbf2022b29b6bad34a7df2"}, + {file = "llvmlite-0.43.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:977525a1e5f4059316b183fb4fd34fa858c9eade31f165427a3977c95e3ee749"}, + {file = "llvmlite-0.43.0-cp311-cp311-win_amd64.whl", hash = "sha256:d5bd550001d26450bd90777736c69d68c487d17bf371438f975229b2b8241a91"}, + {file = "llvmlite-0.43.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f99b600aa7f65235a5a05d0b9a9f31150c390f31261f2a0ba678e26823ec38f7"}, + {file = "llvmlite-0.43.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:35d80d61d0cda2d767f72de99450766250560399edc309da16937b93d3b676e7"}, + {file = "llvmlite-0.43.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eccce86bba940bae0d8d48ed925f21dbb813519169246e2ab292b5092aba121f"}, + {file = "llvmlite-0.43.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:df6509e1507ca0760787a199d19439cc887bfd82226f5af746d6977bd9f66844"}, + {file = "llvmlite-0.43.0-cp312-cp312-win_amd64.whl", hash = "sha256:7a2872ee80dcf6b5dbdc838763d26554c2a18aa833d31a2635bff16aafefb9c9"}, + {file = "llvmlite-0.43.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9cd2a7376f7b3367019b664c21f0c61766219faa3b03731113ead75107f3b66c"}, + {file = "llvmlite-0.43.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:18e9953c748b105668487b7c81a3e97b046d8abf95c4ddc0cd3c94f4e4651ae8"}, + {file = "llvmlite-0.43.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:74937acd22dc11b33946b67dca7680e6d103d6e90eeaaaf932603bec6fe7b03a"}, + {file = "llvmlite-0.43.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc9efc739cc6ed760f795806f67889923f7274276f0eb45092a1473e40d9b867"}, + {file = "llvmlite-0.43.0-cp39-cp39-win_amd64.whl", hash = "sha256:47e147cdda9037f94b399bf03bfd8a6b6b1f2f90be94a454e3386f006455a9b4"}, + {file = "llvmlite-0.43.0.tar.gz", hash = "sha256:ae2b5b5c3ef67354824fb75517c8db5fbe93bc02cd9671f3c62271626bc041d5"}, +] + +[[package]] +name = "markdown-it-py" +version = "3.0.0" +description = "Python port of markdown-it. Markdown parsing, done right!" +optional = false +python-versions = ">=3.8" +files = [ + {file = "markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb"}, + {file = "markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1"}, +] + +[package.dependencies] +mdurl = ">=0.1,<1.0" + +[package.extras] +benchmarking = ["psutil", "pytest", "pytest-benchmark"] +code-style = ["pre-commit (>=3.0,<4.0)"] +compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "mistletoe (>=1.0,<2.0)", "mistune (>=2.0,<3.0)", "panflute (>=2.3,<3.0)"] +linkify = ["linkify-it-py (>=1,<3)"] +plugins = ["mdit-py-plugins"] +profiling = ["gprof2dot"] +rtd = ["jupyter_sphinx", "mdit-py-plugins", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"] +testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] + +[[package]] +name = "markupsafe" +version = "2.1.5" +description = "Safely add untrusted strings to HTML/XML markup." +optional = false +python-versions = ">=3.7" +files = [ + {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-win32.whl", hash = "sha256:d9fad5155d72433c921b782e58892377c44bd6252b5af2f67f16b194987338a4"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-win_amd64.whl", hash = "sha256:bf50cd79a75d181c9181df03572cdce0fbb75cc353bc350712073108cba98de5"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:629ddd2ca402ae6dbedfceeba9c46d5f7b2a61d9749597d4307f943ef198fc1f"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5b7b716f97b52c5a14bffdf688f971b2d5ef4029127f1ad7a513973cfd818df2"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ec585f69cec0aa07d945b20805be741395e28ac1627333b1c5b0105962ffced"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b91c037585eba9095565a3556f611e3cbfaa42ca1e865f7b8015fe5c7336d5a5"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7502934a33b54030eaf1194c21c692a534196063db72176b0c4028e140f8f32c"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0e397ac966fdf721b2c528cf028494e86172b4feba51d65f81ffd65c63798f3f"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c061bb86a71b42465156a3ee7bd58c8c2ceacdbeb95d05a99893e08b8467359a"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3a57fdd7ce31c7ff06cdfbf31dafa96cc533c21e443d57f5b1ecc6cdc668ec7f"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-win32.whl", hash = "sha256:397081c1a0bfb5124355710fe79478cdbeb39626492b15d399526ae53422b906"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-win_amd64.whl", hash = "sha256:2b7c57a4dfc4f16f7142221afe5ba4e093e09e728ca65c51f5620c9aaeb9a617"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:8dec4936e9c3100156f8a2dc89c4b88d5c435175ff03413b443469c7c8c5f4d1"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:3c6b973f22eb18a789b1460b4b91bf04ae3f0c4234a0a6aa6b0a92f6f7b951d4"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac07bad82163452a6884fe8fa0963fb98c2346ba78d779ec06bd7a6262132aee"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5dfb42c4604dddc8e4305050aa6deb084540643ed5804d7455b5df8fe16f5e5"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ea3d8a3d18833cf4304cd2fc9cbb1efe188ca9b5efef2bdac7adc20594a0e46b"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d050b3361367a06d752db6ead6e7edeb0009be66bc3bae0ee9d97fb326badc2a"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bec0a414d016ac1a18862a519e54b2fd0fc8bbfd6890376898a6c0891dd82e9f"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:58c98fee265677f63a4385256a6d7683ab1832f3ddd1e66fe948d5880c21a169"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-win32.whl", hash = "sha256:8590b4ae07a35970728874632fed7bd57b26b0102df2d2b233b6d9d82f6c62ad"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-win_amd64.whl", hash = "sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c8b29db45f8fe46ad280a7294f5c3ec36dbac9491f2d1c17345be8e69cc5928f"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec6a563cff360b50eed26f13adc43e61bc0c04d94b8be985e6fb24b81f6dcfdf"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a549b9c31bec33820e885335b451286e2969a2d9e24879f83fe904a5ce59d70a"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4f11aa001c540f62c6166c7726f71f7573b52c68c31f014c25cc7901deea0b52"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:7b2e5a267c855eea6b4283940daa6e88a285f5f2a67f2220203786dfa59b37e9"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:2d2d793e36e230fd32babe143b04cec8a8b3eb8a3122d2aceb4a371e6b09b8df"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ce409136744f6521e39fd8e2a24c53fa18ad67aa5bc7c2cf83645cce5b5c4e50"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-win32.whl", hash = "sha256:4096e9de5c6fdf43fb4f04c26fb114f61ef0bf2e5604b6ee3019d51b69e8c371"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-win_amd64.whl", hash = "sha256:4275d846e41ecefa46e2015117a9f491e57a71ddd59bbead77e904dc02b1bed2"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:656f7526c69fac7f600bd1f400991cc282b417d17539a1b228617081106feb4a"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:97cafb1f3cbcd3fd2b6fbfb99ae11cdb14deea0736fc2b0952ee177f2b813a46"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f3fbcb7ef1f16e48246f704ab79d79da8a46891e2da03f8783a5b6fa41a9532"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa9db3f79de01457b03d4f01b34cf91bc0048eb2c3846ff26f66687c2f6d16ab"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffee1f21e5ef0d712f9033568f8344d5da8cc2869dbd08d87c84656e6a2d2f68"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5dedb4db619ba5a2787a94d877bc8ffc0566f92a01c0ef214865e54ecc9ee5e0"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:30b600cf0a7ac9234b2638fbc0fb6158ba5bdcdf46aeb631ead21248b9affbc4"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8dd717634f5a044f860435c1d8c16a270ddf0ef8588d4887037c5028b859b0c3"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-win32.whl", hash = "sha256:daa4ee5a243f0f20d528d939d06670a298dd39b1ad5f8a72a4275124a7819eff"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-win_amd64.whl", hash = "sha256:619bc166c4f2de5caa5a633b8b7326fbe98e0ccbfacabd87268a2b15ff73a029"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7a68b554d356a91cce1236aa7682dc01df0edba8d043fd1ce607c49dd3c1edcf"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:db0b55e0f3cc0be60c1f19efdde9a637c32740486004f20d1cff53c3c0ece4d2"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e53af139f8579a6d5f7b76549125f0d94d7e630761a2111bc431fd820e163b8"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17b950fccb810b3293638215058e432159d2b71005c74371d784862b7e4683f3"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c31f53cdae6ecfa91a77820e8b151dba54ab528ba65dfd235c80b086d68a465"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bff1b4290a66b490a2f4719358c0cdcd9bafb6b8f061e45c7a2460866bf50c2e"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bc1667f8b83f48511b94671e0e441401371dfd0f0a795c7daa4a3cd1dde55bea"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5049256f536511ee3f7e1b3f87d1d1209d327e818e6ae1365e8653d7e3abb6a6"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-win32.whl", hash = "sha256:00e046b6dd71aa03a41079792f8473dc494d564611a8f89bbbd7cb93295ebdcf"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-win_amd64.whl", hash = "sha256:fa173ec60341d6bb97a89f5ea19c85c5643c1e7dedebc22f5181eb73573142c5"}, + {file = "MarkupSafe-2.1.5.tar.gz", hash = "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b"}, +] + +[[package]] +name = "mdurl" +version = "0.1.2" +description = "Markdown URL utilities" +optional = false +python-versions = ">=3.7" +files = [ + {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"}, + {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, +] + +[[package]] +name = "mkl" +version = "2021.4.0" +description = "Intel® oneAPI Math Kernel Library" +optional = false +python-versions = "*" +files = [ + {file = "mkl-2021.4.0-py2.py3-none-macosx_10_15_x86_64.macosx_11_0_x86_64.whl", hash = "sha256:67460f5cd7e30e405b54d70d1ed3ca78118370b65f7327d495e9c8847705e2fb"}, + {file = "mkl-2021.4.0-py2.py3-none-manylinux1_i686.whl", hash = "sha256:636d07d90e68ccc9630c654d47ce9fdeb036bb46e2b193b3a9ac8cfea683cce5"}, + {file = "mkl-2021.4.0-py2.py3-none-manylinux1_x86_64.whl", hash = "sha256:398dbf2b0d12acaf54117a5210e8f191827f373d362d796091d161f610c1ebfb"}, + {file = "mkl-2021.4.0-py2.py3-none-win32.whl", hash = "sha256:439c640b269a5668134e3dcbcea4350459c4a8bc46469669b2d67e07e3d330e8"}, + {file = "mkl-2021.4.0-py2.py3-none-win_amd64.whl", hash = "sha256:ceef3cafce4c009dd25f65d7ad0d833a0fbadc3d8903991ec92351fe5de1e718"}, +] + +[package.dependencies] +intel-openmp = "==2021.*" +tbb = "==2021.*" + +[[package]] +name = "more-itertools" +version = "10.5.0" +description = "More routines for operating on iterables, beyond itertools" +optional = false +python-versions = ">=3.8" +files = [ + {file = "more-itertools-10.5.0.tar.gz", hash = "sha256:5482bfef7849c25dc3c6dd53a6173ae4795da2a41a80faea6700d9f5846c5da6"}, + {file = "more_itertools-10.5.0-py3-none-any.whl", hash = "sha256:037b0d3203ce90cca8ab1defbbdac29d5f993fc20131f3664dc8d6acfa872aef"}, +] + +[[package]] +name = "motor" +version = "3.6.0" +description = "Non-blocking MongoDB driver for Tornado or asyncio" +optional = false +python-versions = ">=3.8" +files = [ + {file = "motor-3.6.0-py3-none-any.whl", hash = "sha256:9f07ed96f1754963d4386944e1b52d403a5350c687edc60da487d66f98dbf894"}, + {file = "motor-3.6.0.tar.gz", hash = "sha256:0ef7f520213e852bf0eac306adf631aabe849227d8aec900a2612512fb9c5b8d"}, +] + +[package.dependencies] +pymongo = ">=4.9,<4.10" + +[package.extras] +aws = ["pymongo[aws] (>=4.5,<5)"] +docs = ["aiohttp", "furo (==2024.8.6)", "readthedocs-sphinx-search (>=0.3,<1.0)", "sphinx (>=5.3,<8)", "sphinx-rtd-theme (>=2,<3)", "tornado"] +encryption = ["pymongo[encryption] (>=4.5,<5)"] +gssapi = ["pymongo[gssapi] (>=4.5,<5)"] +ocsp = ["pymongo[ocsp] (>=4.5,<5)"] +snappy = ["pymongo[snappy] (>=4.5,<5)"] +test = ["aiohttp (>=3.8.7)", "cffi (>=1.17.0rc1)", "mockupdb", "pymongo[encryption] (>=4.5,<5)", "pytest (>=7)", "pytest-asyncio", "tornado (>=5)"] +zstd = ["pymongo[zstd] (>=4.5,<5)"] + +[[package]] +name = "mpmath" +version = "1.3.0" +description = "Python library for arbitrary-precision floating-point arithmetic" +optional = false +python-versions = "*" +files = [ + {file = "mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c"}, + {file = "mpmath-1.3.0.tar.gz", hash = "sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f"}, +] + +[package.extras] +develop = ["codecov", "pycodestyle", "pytest (>=4.6)", "pytest-cov", "wheel"] +docs = ["sphinx"] +gmpy = ["gmpy2 (>=2.1.0a4)"] +tests = ["pytest (>=4.6)"] + +[[package]] +name = "msgpack" +version = "1.1.0" +description = "MessagePack serializer" +optional = false +python-versions = ">=3.8" +files = [ + {file = "msgpack-1.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7ad442d527a7e358a469faf43fda45aaf4ac3249c8310a82f0ccff9164e5dccd"}, + {file = "msgpack-1.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:74bed8f63f8f14d75eec75cf3d04ad581da6b914001b474a5d3cd3372c8cc27d"}, + {file = "msgpack-1.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:914571a2a5b4e7606997e169f64ce53a8b1e06f2cf2c3a7273aa106236d43dd5"}, + {file = "msgpack-1.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c921af52214dcbb75e6bdf6a661b23c3e6417f00c603dd2070bccb5c3ef499f5"}, + {file = "msgpack-1.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d8ce0b22b890be5d252de90d0e0d119f363012027cf256185fc3d474c44b1b9e"}, + {file = "msgpack-1.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:73322a6cc57fcee3c0c57c4463d828e9428275fb85a27aa2aa1a92fdc42afd7b"}, + {file = "msgpack-1.1.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e1f3c3d21f7cf67bcf2da8e494d30a75e4cf60041d98b3f79875afb5b96f3a3f"}, + {file = "msgpack-1.1.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:64fc9068d701233effd61b19efb1485587560b66fe57b3e50d29c5d78e7fef68"}, + {file = "msgpack-1.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:42f754515e0f683f9c79210a5d1cad631ec3d06cea5172214d2176a42e67e19b"}, + {file = "msgpack-1.1.0-cp310-cp310-win32.whl", hash = "sha256:3df7e6b05571b3814361e8464f9304c42d2196808e0119f55d0d3e62cd5ea044"}, + {file = "msgpack-1.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:685ec345eefc757a7c8af44a3032734a739f8c45d1b0ac45efc5d8977aa4720f"}, + {file = "msgpack-1.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:3d364a55082fb2a7416f6c63ae383fbd903adb5a6cf78c5b96cc6316dc1cedc7"}, + {file = "msgpack-1.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:79ec007767b9b56860e0372085f8504db5d06bd6a327a335449508bbee9648fa"}, + {file = "msgpack-1.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6ad622bf7756d5a497d5b6836e7fc3752e2dd6f4c648e24b1803f6048596f701"}, + {file = "msgpack-1.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e59bca908d9ca0de3dc8684f21ebf9a690fe47b6be93236eb40b99af28b6ea6"}, + {file = "msgpack-1.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e1da8f11a3dd397f0a32c76165cf0c4eb95b31013a94f6ecc0b280c05c91b59"}, + {file = "msgpack-1.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:452aff037287acb1d70a804ffd022b21fa2bb7c46bee884dbc864cc9024128a0"}, + {file = "msgpack-1.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8da4bf6d54ceed70e8861f833f83ce0814a2b72102e890cbdfe4b34764cdd66e"}, + {file = "msgpack-1.1.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:41c991beebf175faf352fb940bf2af9ad1fb77fd25f38d9142053914947cdbf6"}, + {file = "msgpack-1.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a52a1f3a5af7ba1c9ace055b659189f6c669cf3657095b50f9602af3a3ba0fe5"}, + {file = "msgpack-1.1.0-cp311-cp311-win32.whl", hash = "sha256:58638690ebd0a06427c5fe1a227bb6b8b9fdc2bd07701bec13c2335c82131a88"}, + {file = "msgpack-1.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:fd2906780f25c8ed5d7b323379f6138524ba793428db5d0e9d226d3fa6aa1788"}, + {file = "msgpack-1.1.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:d46cf9e3705ea9485687aa4001a76e44748b609d260af21c4ceea7f2212a501d"}, + {file = "msgpack-1.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5dbad74103df937e1325cc4bfeaf57713be0b4f15e1c2da43ccdd836393e2ea2"}, + {file = "msgpack-1.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:58dfc47f8b102da61e8949708b3eafc3504509a5728f8b4ddef84bd9e16ad420"}, + {file = "msgpack-1.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4676e5be1b472909b2ee6356ff425ebedf5142427842aa06b4dfd5117d1ca8a2"}, + {file = "msgpack-1.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17fb65dd0bec285907f68b15734a993ad3fc94332b5bb21b0435846228de1f39"}, + {file = "msgpack-1.1.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a51abd48c6d8ac89e0cfd4fe177c61481aca2d5e7ba42044fd218cfd8ea9899f"}, + {file = "msgpack-1.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2137773500afa5494a61b1208619e3871f75f27b03bcfca7b3a7023284140247"}, + {file = "msgpack-1.1.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:398b713459fea610861c8a7b62a6fec1882759f308ae0795b5413ff6a160cf3c"}, + {file = "msgpack-1.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:06f5fd2f6bb2a7914922d935d3b8bb4a7fff3a9a91cfce6d06c13bc42bec975b"}, + {file = "msgpack-1.1.0-cp312-cp312-win32.whl", hash = "sha256:ad33e8400e4ec17ba782f7b9cf868977d867ed784a1f5f2ab46e7ba53b6e1e1b"}, + {file = "msgpack-1.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:115a7af8ee9e8cddc10f87636767857e7e3717b7a2e97379dc2054712693e90f"}, + {file = "msgpack-1.1.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:071603e2f0771c45ad9bc65719291c568d4edf120b44eb36324dcb02a13bfddf"}, + {file = "msgpack-1.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0f92a83b84e7c0749e3f12821949d79485971f087604178026085f60ce109330"}, + {file = "msgpack-1.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:4a1964df7b81285d00a84da4e70cb1383f2e665e0f1f2a7027e683956d04b734"}, + {file = "msgpack-1.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59caf6a4ed0d164055ccff8fe31eddc0ebc07cf7326a2aaa0dbf7a4001cd823e"}, + {file = "msgpack-1.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0907e1a7119b337971a689153665764adc34e89175f9a34793307d9def08e6ca"}, + {file = "msgpack-1.1.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:65553c9b6da8166e819a6aa90ad15288599b340f91d18f60b2061f402b9a4915"}, + {file = "msgpack-1.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:7a946a8992941fea80ed4beae6bff74ffd7ee129a90b4dd5cf9c476a30e9708d"}, + {file = "msgpack-1.1.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:4b51405e36e075193bc051315dbf29168d6141ae2500ba8cd80a522964e31434"}, + {file = "msgpack-1.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b4c01941fd2ff87c2a934ee6055bda4ed353a7846b8d4f341c428109e9fcde8c"}, + {file = "msgpack-1.1.0-cp313-cp313-win32.whl", hash = "sha256:7c9a35ce2c2573bada929e0b7b3576de647b0defbd25f5139dcdaba0ae35a4cc"}, + {file = "msgpack-1.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:bce7d9e614a04d0883af0b3d4d501171fbfca038f12c77fa838d9f198147a23f"}, + {file = "msgpack-1.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c40ffa9a15d74e05ba1fe2681ea33b9caffd886675412612d93ab17b58ea2fec"}, + {file = "msgpack-1.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f1ba6136e650898082d9d5a5217d5906d1e138024f836ff48691784bbe1adf96"}, + {file = "msgpack-1.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e0856a2b7e8dcb874be44fea031d22e5b3a19121be92a1e098f46068a11b0870"}, + {file = "msgpack-1.1.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:471e27a5787a2e3f974ba023f9e265a8c7cfd373632247deb225617e3100a3c7"}, + {file = "msgpack-1.1.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:646afc8102935a388ffc3914b336d22d1c2d6209c773f3eb5dd4d6d3b6f8c1cb"}, + {file = "msgpack-1.1.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:13599f8829cfbe0158f6456374e9eea9f44eee08076291771d8ae93eda56607f"}, + {file = "msgpack-1.1.0-cp38-cp38-win32.whl", hash = "sha256:8a84efb768fb968381e525eeeb3d92857e4985aacc39f3c47ffd00eb4509315b"}, + {file = "msgpack-1.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:879a7b7b0ad82481c52d3c7eb99bf6f0645dbdec5134a4bddbd16f3506947feb"}, + {file = "msgpack-1.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:53258eeb7a80fc46f62fd59c876957a2d0e15e6449a9e71842b6d24419d88ca1"}, + {file = "msgpack-1.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7e7b853bbc44fb03fbdba34feb4bd414322180135e2cb5164f20ce1c9795ee48"}, + {file = "msgpack-1.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f3e9b4936df53b970513eac1758f3882c88658a220b58dcc1e39606dccaaf01c"}, + {file = "msgpack-1.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:46c34e99110762a76e3911fc923222472c9d681f1094096ac4102c18319e6468"}, + {file = "msgpack-1.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8a706d1e74dd3dea05cb54580d9bd8b2880e9264856ce5068027eed09680aa74"}, + {file = "msgpack-1.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:534480ee5690ab3cbed89d4c8971a5c631b69a8c0883ecfea96c19118510c846"}, + {file = "msgpack-1.1.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8cf9e8c3a2153934a23ac160cc4cba0ec035f6867c8013cc6077a79823370346"}, + {file = "msgpack-1.1.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:3180065ec2abbe13a4ad37688b61b99d7f9e012a535b930e0e683ad6bc30155b"}, + {file = "msgpack-1.1.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:c5a91481a3cc573ac8c0d9aace09345d989dc4a0202b7fcb312c88c26d4e71a8"}, + {file = "msgpack-1.1.0-cp39-cp39-win32.whl", hash = "sha256:f80bc7d47f76089633763f952e67f8214cb7b3ee6bfa489b3cb6a84cfac114cd"}, + {file = "msgpack-1.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:4d1b7ff2d6146e16e8bd665ac726a89c74163ef8cd39fa8c1087d4e52d3a2325"}, + {file = "msgpack-1.1.0.tar.gz", hash = "sha256:dd432ccc2c72b914e4cb77afce64aab761c1137cc698be3984eee260bcb2896e"}, +] + +[[package]] +name = "multidict" +version = "6.1.0" +description = "multidict implementation" +optional = false +python-versions = ">=3.8" +files = [ + {file = "multidict-6.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3380252550e372e8511d49481bd836264c009adb826b23fefcc5dd3c69692f60"}, + {file = "multidict-6.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:99f826cbf970077383d7de805c0681799491cb939c25450b9b5b3ced03ca99f1"}, + {file = "multidict-6.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a114d03b938376557927ab23f1e950827c3b893ccb94b62fd95d430fd0e5cf53"}, + {file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b1c416351ee6271b2f49b56ad7f308072f6f44b37118d69c2cad94f3fa8a40d5"}, + {file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6b5d83030255983181005e6cfbac1617ce9746b219bc2aad52201ad121226581"}, + {file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3e97b5e938051226dc025ec80980c285b053ffb1e25a3db2a3aa3bc046bf7f56"}, + {file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d618649d4e70ac6efcbba75be98b26ef5078faad23592f9b51ca492953012429"}, + {file = "multidict-6.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:10524ebd769727ac77ef2278390fb0068d83f3acb7773792a5080f2b0abf7748"}, + {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:ff3827aef427c89a25cc96ded1759271a93603aba9fb977a6d264648ebf989db"}, + {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:06809f4f0f7ab7ea2cabf9caca7d79c22c0758b58a71f9d32943ae13c7ace056"}, + {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:f179dee3b863ab1c59580ff60f9d99f632f34ccb38bf67a33ec6b3ecadd0fd76"}, + {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:aaed8b0562be4a0876ee3b6946f6869b7bcdb571a5d1496683505944e268b160"}, + {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3c8b88a2ccf5493b6c8da9076fb151ba106960a2df90c2633f342f120751a9e7"}, + {file = "multidict-6.1.0-cp310-cp310-win32.whl", hash = "sha256:4a9cb68166a34117d6646c0023c7b759bf197bee5ad4272f420a0141d7eb03a0"}, + {file = "multidict-6.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:20b9b5fbe0b88d0bdef2012ef7dee867f874b72528cf1d08f1d59b0e3850129d"}, + {file = "multidict-6.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:3efe2c2cb5763f2f1b275ad2bf7a287d3f7ebbef35648a9726e3b69284a4f3d6"}, + {file = "multidict-6.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c7053d3b0353a8b9de430a4f4b4268ac9a4fb3481af37dfe49825bf45ca24156"}, + {file = "multidict-6.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:27e5fc84ccef8dfaabb09d82b7d179c7cf1a3fbc8a966f8274fcb4ab2eb4cadb"}, + {file = "multidict-6.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e2b90b43e696f25c62656389d32236e049568b39320e2735d51f08fd362761b"}, + {file = "multidict-6.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d83a047959d38a7ff552ff94be767b7fd79b831ad1cd9920662db05fec24fe72"}, + {file = "multidict-6.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d1a9dd711d0877a1ece3d2e4fea11a8e75741ca21954c919406b44e7cf971304"}, + {file = "multidict-6.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec2abea24d98246b94913b76a125e855eb5c434f7c46546046372fe60f666351"}, + {file = "multidict-6.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4867cafcbc6585e4b678876c489b9273b13e9fff9f6d6d66add5e15d11d926cb"}, + {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:5b48204e8d955c47c55b72779802b219a39acc3ee3d0116d5080c388970b76e3"}, + {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:d8fff389528cad1618fb4b26b95550327495462cd745d879a8c7c2115248e399"}, + {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:a7a9541cd308eed5e30318430a9c74d2132e9a8cb46b901326272d780bf2d423"}, + {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:da1758c76f50c39a2efd5e9859ce7d776317eb1dd34317c8152ac9251fc574a3"}, + {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c943a53e9186688b45b323602298ab727d8865d8c9ee0b17f8d62d14b56f0753"}, + {file = "multidict-6.1.0-cp311-cp311-win32.whl", hash = "sha256:90f8717cb649eea3504091e640a1b8568faad18bd4b9fcd692853a04475a4b80"}, + {file = "multidict-6.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:82176036e65644a6cc5bd619f65f6f19781e8ec2e5330f51aa9ada7504cc1926"}, + {file = "multidict-6.1.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:b04772ed465fa3cc947db808fa306d79b43e896beb677a56fb2347ca1a49c1fa"}, + {file = "multidict-6.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6180c0ae073bddeb5a97a38c03f30c233e0a4d39cd86166251617d1bbd0af436"}, + {file = "multidict-6.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:071120490b47aa997cca00666923a83f02c7fbb44f71cf7f136df753f7fa8761"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50b3a2710631848991d0bf7de077502e8994c804bb805aeb2925a981de58ec2e"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b58c621844d55e71c1b7f7c498ce5aa6985d743a1a59034c57a905b3f153c1ef"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:55b6d90641869892caa9ca42ff913f7ff1c5ece06474fbd32fb2cf6834726c95"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b820514bfc0b98a30e3d85462084779900347e4d49267f747ff54060cc33925"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:10a9b09aba0c5b48c53761b7c720aaaf7cf236d5fe394cd399c7ba662d5f9966"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1e16bf3e5fc9f44632affb159d30a437bfe286ce9e02754759be5536b169b305"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:76f364861c3bfc98cbbcbd402d83454ed9e01a5224bb3a28bf70002a230f73e2"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:820c661588bd01a0aa62a1283f20d2be4281b086f80dad9e955e690c75fb54a2"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:0e5f362e895bc5b9e67fe6e4ded2492d8124bdf817827f33c5b46c2fe3ffaca6"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3ec660d19bbc671e3a6443325f07263be452c453ac9e512f5eb935e7d4ac28b3"}, + {file = "multidict-6.1.0-cp312-cp312-win32.whl", hash = "sha256:58130ecf8f7b8112cdb841486404f1282b9c86ccb30d3519faf301b2e5659133"}, + {file = "multidict-6.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:188215fc0aafb8e03341995e7c4797860181562380f81ed0a87ff455b70bf1f1"}, + {file = "multidict-6.1.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:d569388c381b24671589335a3be6e1d45546c2988c2ebe30fdcada8457a31008"}, + {file = "multidict-6.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:052e10d2d37810b99cc170b785945421141bf7bb7d2f8799d431e7db229c385f"}, + {file = "multidict-6.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f90c822a402cb865e396a504f9fc8173ef34212a342d92e362ca498cad308e28"}, + {file = "multidict-6.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b225d95519a5bf73860323e633a664b0d85ad3d5bede6d30d95b35d4dfe8805b"}, + {file = "multidict-6.1.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:23bfd518810af7de1116313ebd9092cb9aa629beb12f6ed631ad53356ed6b86c"}, + {file = "multidict-6.1.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c09fcfdccdd0b57867577b719c69e347a436b86cd83747f179dbf0cc0d4c1f3"}, + {file = "multidict-6.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf6bea52ec97e95560af5ae576bdac3aa3aae0b6758c6efa115236d9e07dae44"}, + {file = "multidict-6.1.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57feec87371dbb3520da6192213c7d6fc892d5589a93db548331954de8248fd2"}, + {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0c3f390dc53279cbc8ba976e5f8035eab997829066756d811616b652b00a23a3"}, + {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:59bfeae4b25ec05b34f1956eaa1cb38032282cd4dfabc5056d0a1ec4d696d3aa"}, + {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:b2f59caeaf7632cc633b5cf6fc449372b83bbdf0da4ae04d5be36118e46cc0aa"}, + {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:37bb93b2178e02b7b618893990941900fd25b6b9ac0fa49931a40aecdf083fe4"}, + {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4e9f48f58c2c523d5a06faea47866cd35b32655c46b443f163d08c6d0ddb17d6"}, + {file = "multidict-6.1.0-cp313-cp313-win32.whl", hash = "sha256:3a37ffb35399029b45c6cc33640a92bef403c9fd388acce75cdc88f58bd19a81"}, + {file = "multidict-6.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:e9aa71e15d9d9beaad2c6b9319edcdc0a49a43ef5c0a4c8265ca9ee7d6c67774"}, + {file = "multidict-6.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:db7457bac39421addd0c8449933ac32d8042aae84a14911a757ae6ca3eef1392"}, + {file = "multidict-6.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d094ddec350a2fb899fec68d8353c78233debde9b7d8b4beeafa70825f1c281a"}, + {file = "multidict-6.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5845c1fd4866bb5dd3125d89b90e57ed3138241540897de748cdf19de8a2fca2"}, + {file = "multidict-6.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9079dfc6a70abe341f521f78405b8949f96db48da98aeb43f9907f342f627cdc"}, + {file = "multidict-6.1.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3914f5aaa0f36d5d60e8ece6a308ee1c9784cd75ec8151062614657a114c4478"}, + {file = "multidict-6.1.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c08be4f460903e5a9d0f76818db3250f12e9c344e79314d1d570fc69d7f4eae4"}, + {file = "multidict-6.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d093be959277cb7dee84b801eb1af388b6ad3ca6a6b6bf1ed7585895789d027d"}, + {file = "multidict-6.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3702ea6872c5a2a4eeefa6ffd36b042e9773f05b1f37ae3ef7264b1163c2dcf6"}, + {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:2090f6a85cafc5b2db085124d752757c9d251548cedabe9bd31afe6363e0aff2"}, + {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:f67f217af4b1ff66c68a87318012de788dd95fcfeb24cc889011f4e1c7454dfd"}, + {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:189f652a87e876098bbc67b4da1049afb5f5dfbaa310dd67c594b01c10388db6"}, + {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:6bb5992037f7a9eff7991ebe4273ea7f51f1c1c511e6a2ce511d0e7bdb754492"}, + {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:ac10f4c2b9e770c4e393876e35a7046879d195cd123b4f116d299d442b335bcd"}, + {file = "multidict-6.1.0-cp38-cp38-win32.whl", hash = "sha256:e27bbb6d14416713a8bd7aaa1313c0fc8d44ee48d74497a0ff4c3a1b6ccb5167"}, + {file = "multidict-6.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:22f3105d4fb15c8f57ff3959a58fcab6ce36814486500cd7485651230ad4d4ef"}, + {file = "multidict-6.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:4e18b656c5e844539d506a0a06432274d7bd52a7487e6828c63a63d69185626c"}, + {file = "multidict-6.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a185f876e69897a6f3325c3f19f26a297fa058c5e456bfcff8015e9a27e83ae1"}, + {file = "multidict-6.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ab7c4ceb38d91570a650dba194e1ca87c2b543488fe9309b4212694174fd539c"}, + {file = "multidict-6.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e617fb6b0b6953fffd762669610c1c4ffd05632c138d61ac7e14ad187870669c"}, + {file = "multidict-6.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:16e5f4bf4e603eb1fdd5d8180f1a25f30056f22e55ce51fb3d6ad4ab29f7d96f"}, + {file = "multidict-6.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f4c035da3f544b1882bac24115f3e2e8760f10a0107614fc9839fd232200b875"}, + {file = "multidict-6.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:957cf8e4b6e123a9eea554fa7ebc85674674b713551de587eb318a2df3e00255"}, + {file = "multidict-6.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:483a6aea59cb89904e1ceabd2b47368b5600fb7de78a6e4a2c2987b2d256cf30"}, + {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:87701f25a2352e5bf7454caa64757642734da9f6b11384c1f9d1a8e699758057"}, + {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:682b987361e5fd7a139ed565e30d81fd81e9629acc7d925a205366877d8c8657"}, + {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ce2186a7df133a9c895dea3331ddc5ddad42cdd0d1ea2f0a51e5d161e4762f28"}, + {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:9f636b730f7e8cb19feb87094949ba54ee5357440b9658b2a32a5ce4bce53972"}, + {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:73eae06aa53af2ea5270cc066dcaf02cc60d2994bbb2c4ef5764949257d10f43"}, + {file = "multidict-6.1.0-cp39-cp39-win32.whl", hash = "sha256:1ca0083e80e791cffc6efce7660ad24af66c8d4079d2a750b29001b53ff59ada"}, + {file = "multidict-6.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:aa466da5b15ccea564bdab9c89175c762bc12825f4659c11227f515cee76fa4a"}, + {file = "multidict-6.1.0-py3-none-any.whl", hash = "sha256:48e171e52d1c4d33888e529b999e5900356b9ae588c2f09a52dcefb158b27506"}, + {file = "multidict-6.1.0.tar.gz", hash = "sha256:22ae2ebf9b0c69d206c003e2f6a914ea33f0a932d4aa16f236afc049d9958f4a"}, +] + +[[package]] +name = "networkx" +version = "3.3" +description = "Python package for creating and manipulating graphs and networks" +optional = false +python-versions = ">=3.10" +files = [ + {file = "networkx-3.3-py3-none-any.whl", hash = "sha256:28575580c6ebdaf4505b22c6256a2b9de86b316dc63ba9e93abde3d78dfdbcf2"}, + {file = "networkx-3.3.tar.gz", hash = "sha256:0c127d8b2f4865f59ae9cb8aafcd60b5c70f3241ebd66f7defad7c4ab90126c9"}, +] + +[package.extras] +default = ["matplotlib (>=3.6)", "numpy (>=1.23)", "pandas (>=1.4)", "scipy (>=1.9,!=1.11.0,!=1.11.1)"] +developer = ["changelist (==0.5)", "mypy (>=1.1)", "pre-commit (>=3.2)", "rtoml"] +doc = ["myst-nb (>=1.0)", "numpydoc (>=1.7)", "pillow (>=9.4)", "pydata-sphinx-theme (>=0.14)", "sphinx (>=7)", "sphinx-gallery (>=0.14)", "texext (>=0.6.7)"] +extra = ["lxml (>=4.6)", "pydot (>=2.0)", "pygraphviz (>=1.12)", "sympy (>=1.10)"] +test = ["pytest (>=7.2)", "pytest-cov (>=4.0)"] + +[[package]] +name = "nltk" +version = "3.9.1" +description = "Natural Language Toolkit" +optional = false +python-versions = ">=3.8" +files = [ + {file = "nltk-3.9.1-py3-none-any.whl", hash = "sha256:4fa26829c5b00715afe3061398a8989dc643b92ce7dd93fb4585a70930d168a1"}, + {file = "nltk-3.9.1.tar.gz", hash = "sha256:87d127bd3de4bd89a4f81265e5fa59cb1b199b27440175370f7417d2bc7ae868"}, +] + +[package.dependencies] +click = "*" +joblib = "*" +regex = ">=2021.8.3" +tqdm = "*" + +[package.extras] +all = ["matplotlib", "numpy", "pyparsing", "python-crfsuite", "requests", "scikit-learn", "scipy", "twython"] +corenlp = ["requests"] +machine-learning = ["numpy", "python-crfsuite", "scikit-learn", "scipy"] +plot = ["matplotlib"] +tgrep = ["pyparsing"] +twitter = ["twython"] + +[[package]] +name = "numba" +version = "0.60.0" +description = "compiling Python code using LLVM" +optional = false +python-versions = ">=3.9" +files = [ + {file = "numba-0.60.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5d761de835cd38fb400d2c26bb103a2726f548dc30368853121d66201672e651"}, + {file = "numba-0.60.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:159e618ef213fba758837f9837fb402bbe65326e60ba0633dbe6c7f274d42c1b"}, + {file = "numba-0.60.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:1527dc578b95c7c4ff248792ec33d097ba6bef9eda466c948b68dfc995c25781"}, + {file = "numba-0.60.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fe0b28abb8d70f8160798f4de9d486143200f34458d34c4a214114e445d7124e"}, + {file = "numba-0.60.0-cp310-cp310-win_amd64.whl", hash = "sha256:19407ced081d7e2e4b8d8c36aa57b7452e0283871c296e12d798852bc7d7f198"}, + {file = "numba-0.60.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a17b70fc9e380ee29c42717e8cc0bfaa5556c416d94f9aa96ba13acb41bdece8"}, + {file = "numba-0.60.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3fb02b344a2a80efa6f677aa5c40cd5dd452e1b35f8d1c2af0dfd9ada9978e4b"}, + {file = "numba-0.60.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5f4fde652ea604ea3c86508a3fb31556a6157b2c76c8b51b1d45eb40c8598703"}, + {file = "numba-0.60.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4142d7ac0210cc86432b818338a2bc368dc773a2f5cf1e32ff7c5b378bd63ee8"}, + {file = "numba-0.60.0-cp311-cp311-win_amd64.whl", hash = "sha256:cac02c041e9b5bc8cf8f2034ff6f0dbafccd1ae9590dc146b3a02a45e53af4e2"}, + {file = "numba-0.60.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d7da4098db31182fc5ffe4bc42c6f24cd7d1cb8a14b59fd755bfee32e34b8404"}, + {file = "numba-0.60.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:38d6ea4c1f56417076ecf8fc327c831ae793282e0ff51080c5094cb726507b1c"}, + {file = "numba-0.60.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:62908d29fb6a3229c242e981ca27e32a6e606cc253fc9e8faeb0e48760de241e"}, + {file = "numba-0.60.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:0ebaa91538e996f708f1ab30ef4d3ddc344b64b5227b67a57aa74f401bb68b9d"}, + {file = "numba-0.60.0-cp312-cp312-win_amd64.whl", hash = "sha256:f75262e8fe7fa96db1dca93d53a194a38c46da28b112b8a4aca168f0df860347"}, + {file = "numba-0.60.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:01ef4cd7d83abe087d644eaa3d95831b777aa21d441a23703d649e06b8e06b74"}, + {file = "numba-0.60.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:819a3dfd4630d95fd574036f99e47212a1af41cbcb019bf8afac63ff56834449"}, + {file = "numba-0.60.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0b983bd6ad82fe868493012487f34eae8bf7dd94654951404114f23c3466d34b"}, + {file = "numba-0.60.0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c151748cd269ddeab66334bd754817ffc0cabd9433acb0f551697e5151917d25"}, + {file = "numba-0.60.0-cp39-cp39-win_amd64.whl", hash = "sha256:3031547a015710140e8c87226b4cfe927cac199835e5bf7d4fe5cb64e814e3ab"}, + {file = "numba-0.60.0.tar.gz", hash = "sha256:5df6158e5584eece5fc83294b949fd30b9f1125df7708862205217e068aabf16"}, +] + +[package.dependencies] +llvmlite = "==0.43.*" +numpy = ">=1.22,<2.1" + +[[package]] +name = "numpy" +version = "1.26.4" +description = "Fundamental package for array computing in Python" +optional = false +python-versions = ">=3.9" +files = [ + {file = "numpy-1.26.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9ff0f4f29c51e2803569d7a51c2304de5554655a60c5d776e35b4a41413830d0"}, + {file = "numpy-1.26.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2e4ee3380d6de9c9ec04745830fd9e2eccb3e6cf790d39d7b98ffd19b0dd754a"}, + {file = "numpy-1.26.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d209d8969599b27ad20994c8e41936ee0964e6da07478d6c35016bc386b66ad4"}, + {file = "numpy-1.26.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffa75af20b44f8dba823498024771d5ac50620e6915abac414251bd971b4529f"}, + {file = "numpy-1.26.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:62b8e4b1e28009ef2846b4c7852046736bab361f7aeadeb6a5b89ebec3c7055a"}, + {file = "numpy-1.26.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a4abb4f9001ad2858e7ac189089c42178fcce737e4169dc61321660f1a96c7d2"}, + {file = "numpy-1.26.4-cp310-cp310-win32.whl", hash = "sha256:bfe25acf8b437eb2a8b2d49d443800a5f18508cd811fea3181723922a8a82b07"}, + {file = "numpy-1.26.4-cp310-cp310-win_amd64.whl", hash = "sha256:b97fe8060236edf3662adfc2c633f56a08ae30560c56310562cb4f95500022d5"}, + {file = "numpy-1.26.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4c66707fabe114439db9068ee468c26bbdf909cac0fb58686a42a24de1760c71"}, + {file = "numpy-1.26.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:edd8b5fe47dab091176d21bb6de568acdd906d1887a4584a15a9a96a1dca06ef"}, + {file = "numpy-1.26.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7ab55401287bfec946ced39700c053796e7cc0e3acbef09993a9ad2adba6ca6e"}, + {file = "numpy-1.26.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:666dbfb6ec68962c033a450943ded891bed2d54e6755e35e5835d63f4f6931d5"}, + {file = "numpy-1.26.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:96ff0b2ad353d8f990b63294c8986f1ec3cb19d749234014f4e7eb0112ceba5a"}, + {file = "numpy-1.26.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:60dedbb91afcbfdc9bc0b1f3f402804070deed7392c23eb7a7f07fa857868e8a"}, + {file = "numpy-1.26.4-cp311-cp311-win32.whl", hash = "sha256:1af303d6b2210eb850fcf03064d364652b7120803a0b872f5211f5234b399f20"}, + {file = "numpy-1.26.4-cp311-cp311-win_amd64.whl", hash = "sha256:cd25bcecc4974d09257ffcd1f098ee778f7834c3ad767fe5db785be9a4aa9cb2"}, + {file = "numpy-1.26.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b3ce300f3644fb06443ee2222c2201dd3a89ea6040541412b8fa189341847218"}, + {file = "numpy-1.26.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:03a8c78d01d9781b28a6989f6fa1bb2c4f2d51201cf99d3dd875df6fbd96b23b"}, + {file = "numpy-1.26.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9fad7dcb1aac3c7f0584a5a8133e3a43eeb2fe127f47e3632d43d677c66c102b"}, + {file = "numpy-1.26.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:675d61ffbfa78604709862923189bad94014bef562cc35cf61d3a07bba02a7ed"}, + {file = "numpy-1.26.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ab47dbe5cc8210f55aa58e4805fe224dac469cde56b9f731a4c098b91917159a"}, + {file = "numpy-1.26.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:1dda2e7b4ec9dd512f84935c5f126c8bd8b9f2fc001e9f54af255e8c5f16b0e0"}, + {file = "numpy-1.26.4-cp312-cp312-win32.whl", hash = "sha256:50193e430acfc1346175fcbdaa28ffec49947a06918b7b92130744e81e640110"}, + {file = "numpy-1.26.4-cp312-cp312-win_amd64.whl", hash = "sha256:08beddf13648eb95f8d867350f6a018a4be2e5ad54c8d8caed89ebca558b2818"}, + {file = "numpy-1.26.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7349ab0fa0c429c82442a27a9673fc802ffdb7c7775fad780226cb234965e53c"}, + {file = "numpy-1.26.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:52b8b60467cd7dd1e9ed082188b4e6bb35aa5cdd01777621a1658910745b90be"}, + {file = "numpy-1.26.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d5241e0a80d808d70546c697135da2c613f30e28251ff8307eb72ba696945764"}, + {file = "numpy-1.26.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f870204a840a60da0b12273ef34f7051e98c3b5961b61b0c2c1be6dfd64fbcd3"}, + {file = "numpy-1.26.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:679b0076f67ecc0138fd2ede3a8fd196dddc2ad3254069bcb9faf9a79b1cebcd"}, + {file = "numpy-1.26.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:47711010ad8555514b434df65f7d7b076bb8261df1ca9bb78f53d3b2db02e95c"}, + {file = "numpy-1.26.4-cp39-cp39-win32.whl", hash = "sha256:a354325ee03388678242a4d7ebcd08b5c727033fcff3b2f536aea978e15ee9e6"}, + {file = "numpy-1.26.4-cp39-cp39-win_amd64.whl", hash = "sha256:3373d5d70a5fe74a2c1bb6d2cfd9609ecf686d47a2d7b1d37a8f3b6bf6003aea"}, + {file = "numpy-1.26.4-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:afedb719a9dcfc7eaf2287b839d8198e06dcd4cb5d276a3df279231138e83d30"}, + {file = "numpy-1.26.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95a7476c59002f2f6c590b9b7b998306fba6a5aa646b1e22ddfeaf8f78c3a29c"}, + {file = "numpy-1.26.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7e50d0a0cc3189f9cb0aeb3a6a6af18c16f59f004b866cd2be1c14b36134a4a0"}, + {file = "numpy-1.26.4.tar.gz", hash = "sha256:2a02aba9ed12e4ac4eb3ea9421c420301a0c6460d9830d74a9df87efa4912010"}, +] + +[[package]] +name = "nvidia-cublas-cu12" +version = "12.1.3.1" +description = "CUBLAS native runtime libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cublas_cu12-12.1.3.1-py3-none-manylinux1_x86_64.whl", hash = "sha256:ee53ccca76a6fc08fb9701aa95b6ceb242cdaab118c3bb152af4e579af792728"}, + {file = "nvidia_cublas_cu12-12.1.3.1-py3-none-win_amd64.whl", hash = "sha256:2b964d60e8cf11b5e1073d179d85fa340c120e99b3067558f3cf98dd69d02906"}, +] + +[[package]] +name = "nvidia-cuda-cupti-cu12" +version = "12.1.105" +description = "CUDA profiling tools runtime libs." +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cuda_cupti_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:e54fde3983165c624cb79254ae9818a456eb6e87a7fd4d56a2352c24ee542d7e"}, + {file = "nvidia_cuda_cupti_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:bea8236d13a0ac7190bd2919c3e8e6ce1e402104276e6f9694479e48bb0eb2a4"}, +] + +[[package]] +name = "nvidia-cuda-nvrtc-cu12" +version = "12.1.105" +description = "NVRTC native runtime libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cuda_nvrtc_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:339b385f50c309763ca65456ec75e17bbefcbbf2893f462cb8b90584cd27a1c2"}, + {file = "nvidia_cuda_nvrtc_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:0a98a522d9ff138b96c010a65e145dc1b4850e9ecb75a0172371793752fd46ed"}, +] + +[[package]] +name = "nvidia-cuda-runtime-cu12" +version = "12.1.105" +description = "CUDA Runtime native Libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cuda_runtime_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:6e258468ddf5796e25f1dc591a31029fa317d97a0a94ed93468fc86301d61e40"}, + {file = "nvidia_cuda_runtime_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:dfb46ef84d73fababab44cf03e3b83f80700d27ca300e537f85f636fac474344"}, +] + +[[package]] +name = "nvidia-cudnn-cu12" +version = "8.9.2.26" +description = "cuDNN runtime libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cudnn_cu12-8.9.2.26-py3-none-manylinux1_x86_64.whl", hash = "sha256:5ccb288774fdfb07a7e7025ffec286971c06d8d7b4fb162525334616d7629ff9"}, +] + +[package.dependencies] +nvidia-cublas-cu12 = "*" + +[[package]] +name = "nvidia-cufft-cu12" +version = "11.0.2.54" +description = "CUFFT native runtime libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cufft_cu12-11.0.2.54-py3-none-manylinux1_x86_64.whl", hash = "sha256:794e3948a1aa71fd817c3775866943936774d1c14e7628c74f6f7417224cdf56"}, + {file = "nvidia_cufft_cu12-11.0.2.54-py3-none-win_amd64.whl", hash = "sha256:d9ac353f78ff89951da4af698f80870b1534ed69993f10a4cf1d96f21357e253"}, +] + +[[package]] +name = "nvidia-curand-cu12" +version = "10.3.2.106" +description = "CURAND native runtime libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_curand_cu12-10.3.2.106-py3-none-manylinux1_x86_64.whl", hash = "sha256:9d264c5036dde4e64f1de8c50ae753237c12e0b1348738169cd0f8a536c0e1e0"}, + {file = "nvidia_curand_cu12-10.3.2.106-py3-none-win_amd64.whl", hash = "sha256:75b6b0c574c0037839121317e17fd01f8a69fd2ef8e25853d826fec30bdba74a"}, +] + +[[package]] +name = "nvidia-cusolver-cu12" +version = "11.4.5.107" +description = "CUDA solver native runtime libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cusolver_cu12-11.4.5.107-py3-none-manylinux1_x86_64.whl", hash = "sha256:8a7ec542f0412294b15072fa7dab71d31334014a69f953004ea7a118206fe0dd"}, + {file = "nvidia_cusolver_cu12-11.4.5.107-py3-none-win_amd64.whl", hash = "sha256:74e0c3a24c78612192a74fcd90dd117f1cf21dea4822e66d89e8ea80e3cd2da5"}, +] + +[package.dependencies] +nvidia-cublas-cu12 = "*" +nvidia-cusparse-cu12 = "*" +nvidia-nvjitlink-cu12 = "*" + +[[package]] +name = "nvidia-cusparse-cu12" +version = "12.1.0.106" +description = "CUSPARSE native runtime libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cusparse_cu12-12.1.0.106-py3-none-manylinux1_x86_64.whl", hash = "sha256:f3b50f42cf363f86ab21f720998517a659a48131e8d538dc02f8768237bd884c"}, + {file = "nvidia_cusparse_cu12-12.1.0.106-py3-none-win_amd64.whl", hash = "sha256:b798237e81b9719373e8fae8d4f091b70a0cf09d9d85c95a557e11df2d8e9a5a"}, +] + +[package.dependencies] +nvidia-nvjitlink-cu12 = "*" + +[[package]] +name = "nvidia-nccl-cu12" +version = "2.20.5" +description = "NVIDIA Collective Communication Library (NCCL) Runtime" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_nccl_cu12-2.20.5-py3-none-manylinux2014_aarch64.whl", hash = "sha256:1fc150d5c3250b170b29410ba682384b14581db722b2531b0d8d33c595f33d01"}, + {file = "nvidia_nccl_cu12-2.20.5-py3-none-manylinux2014_x86_64.whl", hash = "sha256:057f6bf9685f75215d0c53bf3ac4a10b3e6578351de307abad9e18a99182af56"}, +] + +[[package]] +name = "nvidia-nvjitlink-cu12" +version = "12.6.68" +description = "Nvidia JIT LTO Library" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_nvjitlink_cu12-12.6.68-py3-none-manylinux2014_aarch64.whl", hash = "sha256:b3fd0779845f68b92063ab1393abab1ed0a23412fc520df79a8190d098b5cd6b"}, + {file = "nvidia_nvjitlink_cu12-12.6.68-py3-none-manylinux2014_x86_64.whl", hash = "sha256:125a6c2a44e96386dda634e13d944e60b07a0402d391a070e8fb4104b34ea1ab"}, + {file = "nvidia_nvjitlink_cu12-12.6.68-py3-none-win_amd64.whl", hash = "sha256:a55744c98d70317c5e23db14866a8cc2b733f7324509e941fc96276f9f37801d"}, +] + +[[package]] +name = "nvidia-nvtx-cu12" +version = "12.1.105" +description = "NVIDIA Tools Extension" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_nvtx_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:dc21cf308ca5691e7c04d962e213f8a4aa9bbfa23d95412f452254c2caeb09e5"}, + {file = "nvidia_nvtx_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:65f4d98982b31b60026e0e6de73fbdfc09d08a96f4656dd3665ca616a11e1e82"}, +] + +[[package]] +name = "openai" +version = "1.50.2" +description = "The official Python library for the openai API" +optional = false +python-versions = ">=3.7.1" +files = [ + {file = "openai-1.50.2-py3-none-any.whl", hash = "sha256:822dd2051baa3393d0d5406990611975dd6f533020dc9375a34d4fe67e8b75f7"}, + {file = "openai-1.50.2.tar.gz", hash = "sha256:3987ae027152fc8bea745d60b02c8f4c4a76e1b5c70e73565fa556db6f78c9e6"}, +] + +[package.dependencies] +anyio = ">=3.5.0,<5" +distro = ">=1.7.0,<2" +httpx = ">=0.23.0,<1" +jiter = ">=0.4.0,<1" +pydantic = ">=1.9.0,<3" +sniffio = "*" +tqdm = ">4" +typing-extensions = ">=4.11,<5" + +[package.extras] +datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"] + +[[package]] +name = "openai-whisper" +version = "20231117" +description = "Robust Speech Recognition via Large-Scale Weak Supervision" +optional = false +python-versions = ">=3.8" +files = [ + {file = "openai-whisper-20231117.tar.gz", hash = "sha256:7af424181436f1800cc0b7d75cf40ede34e9ddf1ba4983a910832fcf4aade4a4"}, +] + +[package.dependencies] +more-itertools = "*" +numba = "*" +numpy = "*" +tiktoken = "*" +torch = "*" +tqdm = "*" +triton = ">=2.0.0,<3" + +[package.extras] +dev = ["black", "flake8", "isort", "pytest", "scipy"] + +[[package]] +name = "packaging" +version = "24.1" +description = "Core utilities for Python packages" +optional = false +python-versions = ">=3.8" +files = [ + {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"}, + {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"}, +] + +[[package]] +name = "pandas" +version = "2.2.3" +description = "Powerful data structures for data analysis, time series, and statistics" +optional = false +python-versions = ">=3.9" +files = [ + {file = "pandas-2.2.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1948ddde24197a0f7add2bdc4ca83bf2b1ef84a1bc8ccffd95eda17fd836ecb5"}, + {file = "pandas-2.2.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:381175499d3802cde0eabbaf6324cce0c4f5d52ca6f8c377c29ad442f50f6348"}, + {file = "pandas-2.2.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d9c45366def9a3dd85a6454c0e7908f2b3b8e9c138f5dc38fed7ce720d8453ed"}, + {file = "pandas-2.2.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86976a1c5b25ae3f8ccae3a5306e443569ee3c3faf444dfd0f41cda24667ad57"}, + {file = "pandas-2.2.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:b8661b0238a69d7aafe156b7fa86c44b881387509653fdf857bebc5e4008ad42"}, + {file = "pandas-2.2.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:37e0aced3e8f539eccf2e099f65cdb9c8aa85109b0be6e93e2baff94264bdc6f"}, + {file = "pandas-2.2.3-cp310-cp310-win_amd64.whl", hash = "sha256:56534ce0746a58afaf7942ba4863e0ef81c9c50d3f0ae93e9497d6a41a057645"}, + {file = "pandas-2.2.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:66108071e1b935240e74525006034333f98bcdb87ea116de573a6a0dccb6c039"}, + {file = "pandas-2.2.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7c2875855b0ff77b2a64a0365e24455d9990730d6431b9e0ee18ad8acee13dbd"}, + {file = "pandas-2.2.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cd8d0c3be0515c12fed0bdbae072551c8b54b7192c7b1fda0ba56059a0179698"}, + {file = "pandas-2.2.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c124333816c3a9b03fbeef3a9f230ba9a737e9e5bb4060aa2107a86cc0a497fc"}, + {file = "pandas-2.2.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:63cc132e40a2e084cf01adf0775b15ac515ba905d7dcca47e9a251819c575ef3"}, + {file = "pandas-2.2.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:29401dbfa9ad77319367d36940cd8a0b3a11aba16063e39632d98b0e931ddf32"}, + {file = "pandas-2.2.3-cp311-cp311-win_amd64.whl", hash = "sha256:3fc6873a41186404dad67245896a6e440baacc92f5b716ccd1bc9ed2995ab2c5"}, + {file = "pandas-2.2.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b1d432e8d08679a40e2a6d8b2f9770a5c21793a6f9f47fdd52c5ce1948a5a8a9"}, + {file = "pandas-2.2.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a5a1595fe639f5988ba6a8e5bc9649af3baf26df3998a0abe56c02609392e0a4"}, + {file = "pandas-2.2.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5de54125a92bb4d1c051c0659e6fcb75256bf799a732a87184e5ea503965bce3"}, + {file = "pandas-2.2.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fffb8ae78d8af97f849404f21411c95062db1496aeb3e56f146f0355c9989319"}, + {file = "pandas-2.2.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6dfcb5ee8d4d50c06a51c2fffa6cff6272098ad6540aed1a76d15fb9318194d8"}, + {file = "pandas-2.2.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:062309c1b9ea12a50e8ce661145c6aab431b1e99530d3cd60640e255778bd43a"}, + {file = "pandas-2.2.3-cp312-cp312-win_amd64.whl", hash = "sha256:59ef3764d0fe818125a5097d2ae867ca3fa64df032331b7e0917cf5d7bf66b13"}, + {file = "pandas-2.2.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f00d1345d84d8c86a63e476bb4955e46458b304b9575dcf71102b5c705320015"}, + {file = "pandas-2.2.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3508d914817e153ad359d7e069d752cdd736a247c322d932eb89e6bc84217f28"}, + {file = "pandas-2.2.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:22a9d949bfc9a502d320aa04e5d02feab689d61da4e7764b62c30b991c42c5f0"}, + {file = "pandas-2.2.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3a255b2c19987fbbe62a9dfd6cff7ff2aa9ccab3fc75218fd4b7530f01efa24"}, + {file = "pandas-2.2.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:800250ecdadb6d9c78eae4990da62743b857b470883fa27f652db8bdde7f6659"}, + {file = "pandas-2.2.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6374c452ff3ec675a8f46fd9ab25c4ad0ba590b71cf0656f8b6daa5202bca3fb"}, + {file = "pandas-2.2.3-cp313-cp313-win_amd64.whl", hash = "sha256:61c5ad4043f791b61dd4752191d9f07f0ae412515d59ba8f005832a532f8736d"}, + {file = "pandas-2.2.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:3b71f27954685ee685317063bf13c7709a7ba74fc996b84fc6821c59b0f06468"}, + {file = "pandas-2.2.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:38cf8125c40dae9d5acc10fa66af8ea6fdf760b2714ee482ca691fc66e6fcb18"}, + {file = "pandas-2.2.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ba96630bc17c875161df3818780af30e43be9b166ce51c9a18c1feae342906c2"}, + {file = "pandas-2.2.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1db71525a1538b30142094edb9adc10be3f3e176748cd7acc2240c2f2e5aa3a4"}, + {file = "pandas-2.2.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:15c0e1e02e93116177d29ff83e8b1619c93ddc9c49083f237d4312337a61165d"}, + {file = "pandas-2.2.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:ad5b65698ab28ed8d7f18790a0dc58005c7629f227be9ecc1072aa74c0c1d43a"}, + {file = "pandas-2.2.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bc6b93f9b966093cb0fd62ff1a7e4c09e6d546ad7c1de191767baffc57628f39"}, + {file = "pandas-2.2.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5dbca4c1acd72e8eeef4753eeca07de9b1db4f398669d5994086f788a5d7cc30"}, + {file = "pandas-2.2.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8cd6d7cc958a3910f934ea8dbdf17b2364827bb4dafc38ce6eef6bb3d65ff09c"}, + {file = "pandas-2.2.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99df71520d25fade9db7c1076ac94eb994f4d2673ef2aa2e86ee039b6746d20c"}, + {file = "pandas-2.2.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:31d0ced62d4ea3e231a9f228366919a5ea0b07440d9d4dac345376fd8e1477ea"}, + {file = "pandas-2.2.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7eee9e7cea6adf3e3d24e304ac6b8300646e2a5d1cd3a3c2abed9101b0846761"}, + {file = "pandas-2.2.3-cp39-cp39-win_amd64.whl", hash = "sha256:4850ba03528b6dd51d6c5d273c46f183f39a9baf3f0143e566b89450965b105e"}, + {file = "pandas-2.2.3.tar.gz", hash = "sha256:4f18ba62b61d7e192368b84517265a99b4d7ee8912f8708660fb4a366cc82667"}, +] + +[package.dependencies] +numpy = [ + {version = ">=1.23.2", markers = "python_version == \"3.11\""}, + {version = ">=1.26.0", markers = "python_version >= \"3.12\""}, +] +python-dateutil = ">=2.8.2" +pytz = ">=2020.1" +tzdata = ">=2022.7" + +[package.extras] +all = ["PyQt5 (>=5.15.9)", "SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-driver-sqlite (>=0.8.0)", "beautifulsoup4 (>=4.11.2)", "bottleneck (>=1.3.6)", "dataframe-api-compat (>=0.1.7)", "fastparquet (>=2022.12.0)", "fsspec (>=2022.11.0)", "gcsfs (>=2022.11.0)", "html5lib (>=1.1)", "hypothesis (>=6.46.1)", "jinja2 (>=3.1.2)", "lxml (>=4.9.2)", "matplotlib (>=3.6.3)", "numba (>=0.56.4)", "numexpr (>=2.8.4)", "odfpy (>=1.4.1)", "openpyxl (>=3.1.0)", "pandas-gbq (>=0.19.0)", "psycopg2 (>=2.9.6)", "pyarrow (>=10.0.1)", "pymysql (>=1.0.2)", "pyreadstat (>=1.2.0)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)", "python-calamine (>=0.1.7)", "pyxlsb (>=1.0.10)", "qtpy (>=2.3.0)", "s3fs (>=2022.11.0)", "scipy (>=1.10.0)", "tables (>=3.8.0)", "tabulate (>=0.9.0)", "xarray (>=2022.12.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.5)", "zstandard (>=0.19.0)"] +aws = ["s3fs (>=2022.11.0)"] +clipboard = ["PyQt5 (>=5.15.9)", "qtpy (>=2.3.0)"] +compression = ["zstandard (>=0.19.0)"] +computation = ["scipy (>=1.10.0)", "xarray (>=2022.12.0)"] +consortium-standard = ["dataframe-api-compat (>=0.1.7)"] +excel = ["odfpy (>=1.4.1)", "openpyxl (>=3.1.0)", "python-calamine (>=0.1.7)", "pyxlsb (>=1.0.10)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.5)"] +feather = ["pyarrow (>=10.0.1)"] +fss = ["fsspec (>=2022.11.0)"] +gcp = ["gcsfs (>=2022.11.0)", "pandas-gbq (>=0.19.0)"] +hdf5 = ["tables (>=3.8.0)"] +html = ["beautifulsoup4 (>=4.11.2)", "html5lib (>=1.1)", "lxml (>=4.9.2)"] +mysql = ["SQLAlchemy (>=2.0.0)", "pymysql (>=1.0.2)"] +output-formatting = ["jinja2 (>=3.1.2)", "tabulate (>=0.9.0)"] +parquet = ["pyarrow (>=10.0.1)"] +performance = ["bottleneck (>=1.3.6)", "numba (>=0.56.4)", "numexpr (>=2.8.4)"] +plot = ["matplotlib (>=3.6.3)"] +postgresql = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "psycopg2 (>=2.9.6)"] +pyarrow = ["pyarrow (>=10.0.1)"] +spss = ["pyreadstat (>=1.2.0)"] +sql-other = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-driver-sqlite (>=0.8.0)"] +test = ["hypothesis (>=6.46.1)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)"] +xml = ["lxml (>=4.9.2)"] + +[[package]] +name = "pdfminer-six" +version = "20231228" +description = "PDF parser and analyzer" +optional = false +python-versions = ">=3.6" +files = [ + {file = "pdfminer.six-20231228-py3-none-any.whl", hash = "sha256:e8d3c3310e6fbc1fe414090123ab01351634b4ecb021232206c4c9a8ca3e3b8f"}, + {file = "pdfminer.six-20231228.tar.gz", hash = "sha256:6004da3ad1a7a4d45930cb950393df89b068e73be365a6ff64a838d37bcb08c4"}, +] + +[package.dependencies] +charset-normalizer = ">=2.0.0" +cryptography = ">=36.0.0" + +[package.extras] +dev = ["black", "mypy (==0.931)", "nox", "pytest"] +docs = ["sphinx", "sphinx-argparse"] +image = ["Pillow"] + +[[package]] +name = "pdfplumber" +version = "0.11.3" +description = "Plumb a PDF for detailed information about each char, rectangle, and line." +optional = false +python-versions = ">=3.8" +files = [ + {file = "pdfplumber-0.11.3-py3-none-any.whl", hash = "sha256:4f3e13795d18b2e53dfc4cd667a3bc2478cd6975fc9a188881376265d599c5a6"}, + {file = "pdfplumber-0.11.3.tar.gz", hash = "sha256:43a3cac33d2135ce00ac59ad5bc3813a33afe0f513d9284c0e8cb6e447ed6e53"}, +] + +[package.dependencies] +"pdfminer.six" = "20231228" +Pillow = ">=9.1" +pypdfium2 = ">=4.18.0" + +[[package]] +name = "pillow" +version = "10.4.0" +description = "Python Imaging Library (Fork)" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pillow-10.4.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:4d9667937cfa347525b319ae34375c37b9ee6b525440f3ef48542fcf66f2731e"}, + {file = "pillow-10.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:543f3dc61c18dafb755773efc89aae60d06b6596a63914107f75459cf984164d"}, + {file = "pillow-10.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7928ecbf1ece13956b95d9cbcfc77137652b02763ba384d9ab508099a2eca856"}, + {file = "pillow-10.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4d49b85c4348ea0b31ea63bc75a9f3857869174e2bf17e7aba02945cd218e6f"}, + {file = "pillow-10.4.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:6c762a5b0997f5659a5ef2266abc1d8851ad7749ad9a6a5506eb23d314e4f46b"}, + {file = "pillow-10.4.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:a985e028fc183bf12a77a8bbf36318db4238a3ded7fa9df1b9a133f1cb79f8fc"}, + {file = "pillow-10.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:812f7342b0eee081eaec84d91423d1b4650bb9828eb53d8511bcef8ce5aecf1e"}, + {file = "pillow-10.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ac1452d2fbe4978c2eec89fb5a23b8387aba707ac72810d9490118817d9c0b46"}, + {file = "pillow-10.4.0-cp310-cp310-win32.whl", hash = "sha256:bcd5e41a859bf2e84fdc42f4edb7d9aba0a13d29a2abadccafad99de3feff984"}, + {file = "pillow-10.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:ecd85a8d3e79cd7158dec1c9e5808e821feea088e2f69a974db5edf84dc53141"}, + {file = "pillow-10.4.0-cp310-cp310-win_arm64.whl", hash = "sha256:ff337c552345e95702c5fde3158acb0625111017d0e5f24bf3acdb9cc16b90d1"}, + {file = "pillow-10.4.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:0a9ec697746f268507404647e531e92889890a087e03681a3606d9b920fbee3c"}, + {file = "pillow-10.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dfe91cb65544a1321e631e696759491ae04a2ea11d36715eca01ce07284738be"}, + {file = "pillow-10.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5dc6761a6efc781e6a1544206f22c80c3af4c8cf461206d46a1e6006e4429ff3"}, + {file = "pillow-10.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e84b6cc6a4a3d76c153a6b19270b3526a5a8ed6b09501d3af891daa2a9de7d6"}, + {file = "pillow-10.4.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:bbc527b519bd3aa9d7f429d152fea69f9ad37c95f0b02aebddff592688998abe"}, + {file = "pillow-10.4.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:76a911dfe51a36041f2e756b00f96ed84677cdeb75d25c767f296c1c1eda1319"}, + {file = "pillow-10.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:59291fb29317122398786c2d44427bbd1a6d7ff54017075b22be9d21aa59bd8d"}, + {file = "pillow-10.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:416d3a5d0e8cfe4f27f574362435bc9bae57f679a7158e0096ad2beb427b8696"}, + {file = "pillow-10.4.0-cp311-cp311-win32.whl", hash = "sha256:7086cc1d5eebb91ad24ded9f58bec6c688e9f0ed7eb3dbbf1e4800280a896496"}, + {file = "pillow-10.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:cbed61494057c0f83b83eb3a310f0bf774b09513307c434d4366ed64f4128a91"}, + {file = "pillow-10.4.0-cp311-cp311-win_arm64.whl", hash = "sha256:f5f0c3e969c8f12dd2bb7e0b15d5c468b51e5017e01e2e867335c81903046a22"}, + {file = "pillow-10.4.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:673655af3eadf4df6b5457033f086e90299fdd7a47983a13827acf7459c15d94"}, + {file = "pillow-10.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:866b6942a92f56300012f5fbac71f2d610312ee65e22f1aa2609e491284e5597"}, + {file = "pillow-10.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:29dbdc4207642ea6aad70fbde1a9338753d33fb23ed6956e706936706f52dd80"}, + {file = "pillow-10.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf2342ac639c4cf38799a44950bbc2dfcb685f052b9e262f446482afaf4bffca"}, + {file = "pillow-10.4.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:f5b92f4d70791b4a67157321c4e8225d60b119c5cc9aee8ecf153aace4aad4ef"}, + {file = "pillow-10.4.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:86dcb5a1eb778d8b25659d5e4341269e8590ad6b4e8b44d9f4b07f8d136c414a"}, + {file = "pillow-10.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:780c072c2e11c9b2c7ca37f9a2ee8ba66f44367ac3e5c7832afcfe5104fd6d1b"}, + {file = "pillow-10.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:37fb69d905be665f68f28a8bba3c6d3223c8efe1edf14cc4cfa06c241f8c81d9"}, + {file = "pillow-10.4.0-cp312-cp312-win32.whl", hash = "sha256:7dfecdbad5c301d7b5bde160150b4db4c659cee2b69589705b6f8a0c509d9f42"}, + {file = "pillow-10.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:1d846aea995ad352d4bdcc847535bd56e0fd88d36829d2c90be880ef1ee4668a"}, + {file = "pillow-10.4.0-cp312-cp312-win_arm64.whl", hash = "sha256:e553cad5179a66ba15bb18b353a19020e73a7921296a7979c4a2b7f6a5cd57f9"}, + {file = "pillow-10.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8bc1a764ed8c957a2e9cacf97c8b2b053b70307cf2996aafd70e91a082e70df3"}, + {file = "pillow-10.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:6209bb41dc692ddfee4942517c19ee81b86c864b626dbfca272ec0f7cff5d9fb"}, + {file = "pillow-10.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bee197b30783295d2eb680b311af15a20a8b24024a19c3a26431ff83eb8d1f70"}, + {file = "pillow-10.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ef61f5dd14c300786318482456481463b9d6b91ebe5ef12f405afbba77ed0be"}, + {file = "pillow-10.4.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:297e388da6e248c98bc4a02e018966af0c5f92dfacf5a5ca22fa01cb3179bca0"}, + {file = "pillow-10.4.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:e4db64794ccdf6cb83a59d73405f63adbe2a1887012e308828596100a0b2f6cc"}, + {file = "pillow-10.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bd2880a07482090a3bcb01f4265f1936a903d70bc740bfcb1fd4e8a2ffe5cf5a"}, + {file = "pillow-10.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4b35b21b819ac1dbd1233317adeecd63495f6babf21b7b2512d244ff6c6ce309"}, + {file = "pillow-10.4.0-cp313-cp313-win32.whl", hash = "sha256:551d3fd6e9dc15e4c1eb6fc4ba2b39c0c7933fa113b220057a34f4bb3268a060"}, + {file = "pillow-10.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:030abdbe43ee02e0de642aee345efa443740aa4d828bfe8e2eb11922ea6a21ea"}, + {file = "pillow-10.4.0-cp313-cp313-win_arm64.whl", hash = "sha256:5b001114dd152cfd6b23befeb28d7aee43553e2402c9f159807bf55f33af8a8d"}, + {file = "pillow-10.4.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:8d4d5063501b6dd4024b8ac2f04962d661222d120381272deea52e3fc52d3736"}, + {file = "pillow-10.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7c1ee6f42250df403c5f103cbd2768a28fe1a0ea1f0f03fe151c8741e1469c8b"}, + {file = "pillow-10.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b15e02e9bb4c21e39876698abf233c8c579127986f8207200bc8a8f6bb27acf2"}, + {file = "pillow-10.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a8d4bade9952ea9a77d0c3e49cbd8b2890a399422258a77f357b9cc9be8d680"}, + {file = "pillow-10.4.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:43efea75eb06b95d1631cb784aa40156177bf9dd5b4b03ff38979e048258bc6b"}, + {file = "pillow-10.4.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:950be4d8ba92aca4b2bb0741285a46bfae3ca699ef913ec8416c1b78eadd64cd"}, + {file = "pillow-10.4.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:d7480af14364494365e89d6fddc510a13e5a2c3584cb19ef65415ca57252fb84"}, + {file = "pillow-10.4.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:73664fe514b34c8f02452ffb73b7a92c6774e39a647087f83d67f010eb9a0cf0"}, + {file = "pillow-10.4.0-cp38-cp38-win32.whl", hash = "sha256:e88d5e6ad0d026fba7bdab8c3f225a69f063f116462c49892b0149e21b6c0a0e"}, + {file = "pillow-10.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:5161eef006d335e46895297f642341111945e2c1c899eb406882a6c61a4357ab"}, + {file = "pillow-10.4.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:0ae24a547e8b711ccaaf99c9ae3cd975470e1a30caa80a6aaee9a2f19c05701d"}, + {file = "pillow-10.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:298478fe4f77a4408895605f3482b6cc6222c018b2ce565c2b6b9c354ac3229b"}, + {file = "pillow-10.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:134ace6dc392116566980ee7436477d844520a26a4b1bd4053f6f47d096997fd"}, + {file = "pillow-10.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:930044bb7679ab003b14023138b50181899da3f25de50e9dbee23b61b4de2126"}, + {file = "pillow-10.4.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:c76e5786951e72ed3686e122d14c5d7012f16c8303a674d18cdcd6d89557fc5b"}, + {file = "pillow-10.4.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:b2724fdb354a868ddf9a880cb84d102da914e99119211ef7ecbdc613b8c96b3c"}, + {file = "pillow-10.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:dbc6ae66518ab3c5847659e9988c3b60dc94ffb48ef9168656e0019a93dbf8a1"}, + {file = "pillow-10.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:06b2f7898047ae93fad74467ec3d28fe84f7831370e3c258afa533f81ef7f3df"}, + {file = "pillow-10.4.0-cp39-cp39-win32.whl", hash = "sha256:7970285ab628a3779aecc35823296a7869f889b8329c16ad5a71e4901a3dc4ef"}, + {file = "pillow-10.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:961a7293b2457b405967af9c77dcaa43cc1a8cd50d23c532e62d48ab6cdd56f5"}, + {file = "pillow-10.4.0-cp39-cp39-win_arm64.whl", hash = "sha256:32cda9e3d601a52baccb2856b8ea1fc213c90b340c542dcef77140dfa3278a9e"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:5b4815f2e65b30f5fbae9dfffa8636d992d49705723fe86a3661806e069352d4"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:8f0aef4ef59694b12cadee839e2ba6afeab89c0f39a3adc02ed51d109117b8da"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f4727572e2918acaa9077c919cbbeb73bd2b3ebcfe033b72f858fc9fbef0026"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ff25afb18123cea58a591ea0244b92eb1e61a1fd497bf6d6384f09bc3262ec3e"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:dc3e2db6ba09ffd7d02ae9141cfa0ae23393ee7687248d46a7507b75d610f4f5"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:02a2be69f9c9b8c1e97cf2713e789d4e398c751ecfd9967c18d0ce304efbf885"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:0755ffd4a0c6f267cccbae2e9903d95477ca2f77c4fcf3a3a09570001856c8a5"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:a02364621fe369e06200d4a16558e056fe2805d3468350df3aef21e00d26214b"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:1b5dea9831a90e9d0721ec417a80d4cbd7022093ac38a568db2dd78363b00908"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9b885f89040bb8c4a1573566bbb2f44f5c505ef6e74cec7ab9068c900047f04b"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87dd88ded2e6d74d31e1e0a99a726a6765cda32d00ba72dc37f0651f306daaa8"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:2db98790afc70118bd0255c2eeb465e9767ecf1f3c25f9a1abb8ffc8cfd1fe0a"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:f7baece4ce06bade126fb84b8af1c33439a76d8a6fd818970215e0560ca28c27"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:cfdd747216947628af7b259d274771d84db2268ca062dd5faf373639d00113a3"}, + {file = "pillow-10.4.0.tar.gz", hash = "sha256:166c1cd4d24309b30d61f79f4a9114b7b2313d7450912277855ff5dfd7cd4a06"}, +] + +[package.extras] +docs = ["furo", "olefile", "sphinx (>=7.3)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinxext-opengraph"] +fpx = ["olefile"] +mic = ["olefile"] +tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] +typing = ["typing-extensions"] +xmp = ["defusedxml"] + +[[package]] +name = "proto-plus" +version = "1.24.0" +description = "Beautiful, Pythonic protocol buffers." +optional = false +python-versions = ">=3.7" +files = [ + {file = "proto-plus-1.24.0.tar.gz", hash = "sha256:30b72a5ecafe4406b0d339db35b56c4059064e69227b8c3bda7462397f966445"}, + {file = "proto_plus-1.24.0-py3-none-any.whl", hash = "sha256:402576830425e5f6ce4c2a6702400ac79897dab0b4343821aa5188b0fab81a12"}, +] + +[package.dependencies] +protobuf = ">=3.19.0,<6.0.0dev" + +[package.extras] +testing = ["google-api-core (>=1.31.5)"] + +[[package]] +name = "protobuf" +version = "5.28.2" +description = "" +optional = false +python-versions = ">=3.8" +files = [ + {file = "protobuf-5.28.2-cp310-abi3-win32.whl", hash = "sha256:eeea10f3dc0ac7e6b4933d32db20662902b4ab81bf28df12218aa389e9c2102d"}, + {file = "protobuf-5.28.2-cp310-abi3-win_amd64.whl", hash = "sha256:2c69461a7fcc8e24be697624c09a839976d82ae75062b11a0972e41fd2cd9132"}, + {file = "protobuf-5.28.2-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:a8b9403fc70764b08d2f593ce44f1d2920c5077bf7d311fefec999f8c40f78b7"}, + {file = "protobuf-5.28.2-cp38-abi3-manylinux2014_aarch64.whl", hash = "sha256:35cfcb15f213449af7ff6198d6eb5f739c37d7e4f1c09b5d0641babf2cc0c68f"}, + {file = "protobuf-5.28.2-cp38-abi3-manylinux2014_x86_64.whl", hash = "sha256:5e8a95246d581eef20471b5d5ba010d55f66740942b95ba9b872d918c459452f"}, + {file = "protobuf-5.28.2-cp38-cp38-win32.whl", hash = "sha256:87317e9bcda04a32f2ee82089a204d3a2f0d3c8aeed16568c7daf4756e4f1fe0"}, + {file = "protobuf-5.28.2-cp38-cp38-win_amd64.whl", hash = "sha256:c0ea0123dac3399a2eeb1a1443d82b7afc9ff40241433296769f7da42d142ec3"}, + {file = "protobuf-5.28.2-cp39-cp39-win32.whl", hash = "sha256:ca53faf29896c526863366a52a8f4d88e69cd04ec9571ed6082fa117fac3ab36"}, + {file = "protobuf-5.28.2-cp39-cp39-win_amd64.whl", hash = "sha256:8ddc60bf374785fb7cb12510b267f59067fa10087325b8e1855b898a0d81d276"}, + {file = "protobuf-5.28.2-py3-none-any.whl", hash = "sha256:52235802093bd8a2811abbe8bf0ab9c5f54cca0a751fdd3f6ac2a21438bffece"}, + {file = "protobuf-5.28.2.tar.gz", hash = "sha256:59379674ff119717404f7454647913787034f03fe7049cbef1d74a97bb4593f0"}, +] + +[[package]] +name = "pyasn1" +version = "0.6.1" +description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629"}, + {file = "pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034"}, +] + +[[package]] +name = "pyasn1-modules" +version = "0.4.1" +description = "A collection of ASN.1-based protocols modules" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pyasn1_modules-0.4.1-py3-none-any.whl", hash = "sha256:49bfa96b45a292b711e986f222502c1c9a5e1f4e568fc30e2574a6c7d07838fd"}, + {file = "pyasn1_modules-0.4.1.tar.gz", hash = "sha256:c28e2dbf9c06ad61c71a075c7e0f9fd0f1b0bb2d2ad4377f240d33ac2ab60a7c"}, +] + +[package.dependencies] +pyasn1 = ">=0.4.6,<0.7.0" + +[[package]] +name = "pycparser" +version = "2.22" +description = "C parser in Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc"}, + {file = "pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6"}, +] + +[[package]] +name = "pydantic" +version = "2.9.2" +description = "Data validation using Python type hints" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pydantic-2.9.2-py3-none-any.whl", hash = "sha256:f048cec7b26778210e28a0459867920654d48e5e62db0958433636cde4254f12"}, + {file = "pydantic-2.9.2.tar.gz", hash = "sha256:d155cef71265d1e9807ed1c32b4c8deec042a44a50a4188b25ac67ecd81a9c0f"}, +] + +[package.dependencies] +annotated-types = ">=0.6.0" +pydantic-core = "2.23.4" +typing-extensions = [ + {version = ">=4.6.1", markers = "python_version < \"3.13\""}, + {version = ">=4.12.2", markers = "python_version >= \"3.13\""}, +] + +[package.extras] +email = ["email-validator (>=2.0.0)"] +timezone = ["tzdata"] + +[[package]] +name = "pydantic-core" +version = "2.23.4" +description = "Core functionality for Pydantic validation and serialization" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pydantic_core-2.23.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:b10bd51f823d891193d4717448fab065733958bdb6a6b351967bd349d48d5c9b"}, + {file = "pydantic_core-2.23.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4fc714bdbfb534f94034efaa6eadd74e5b93c8fa6315565a222f7b6f42ca1166"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63e46b3169866bd62849936de036f901a9356e36376079b05efa83caeaa02ceb"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed1a53de42fbe34853ba90513cea21673481cd81ed1be739f7f2efb931b24916"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cfdd16ab5e59fc31b5e906d1a3f666571abc367598e3e02c83403acabc092e07"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:255a8ef062cbf6674450e668482456abac99a5583bbafb73f9ad469540a3a232"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a7cd62e831afe623fbb7aabbb4fe583212115b3ef38a9f6b71869ba644624a2"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f09e2ff1f17c2b51f2bc76d1cc33da96298f0a036a137f5440ab3ec5360b624f"}, + {file = "pydantic_core-2.23.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e38e63e6f3d1cec5a27e0afe90a085af8b6806ee208b33030e65b6516353f1a3"}, + {file = "pydantic_core-2.23.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0dbd8dbed2085ed23b5c04afa29d8fd2771674223135dc9bc937f3c09284d071"}, + {file = "pydantic_core-2.23.4-cp310-none-win32.whl", hash = "sha256:6531b7ca5f951d663c339002e91aaebda765ec7d61b7d1e3991051906ddde119"}, + {file = "pydantic_core-2.23.4-cp310-none-win_amd64.whl", hash = "sha256:7c9129eb40958b3d4500fa2467e6a83356b3b61bfff1b414c7361d9220f9ae8f"}, + {file = "pydantic_core-2.23.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:77733e3892bb0a7fa797826361ce8a9184d25c8dffaec60b7ffe928153680ba8"}, + {file = "pydantic_core-2.23.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1b84d168f6c48fabd1f2027a3d1bdfe62f92cade1fb273a5d68e621da0e44e6d"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df49e7a0861a8c36d089c1ed57d308623d60416dab2647a4a17fe050ba85de0e"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ff02b6d461a6de369f07ec15e465a88895f3223eb75073ffea56b84d9331f607"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:996a38a83508c54c78a5f41456b0103c30508fed9abcad0a59b876d7398f25fd"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d97683ddee4723ae8c95d1eddac7c192e8c552da0c73a925a89fa8649bf13eea"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:216f9b2d7713eb98cb83c80b9c794de1f6b7e3145eef40400c62e86cee5f4e1e"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6f783e0ec4803c787bcea93e13e9932edab72068f68ecffdf86a99fd5918878b"}, + {file = "pydantic_core-2.23.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d0776dea117cf5272382634bd2a5c1b6eb16767c223c6a5317cd3e2a757c61a0"}, + {file = "pydantic_core-2.23.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d5f7a395a8cf1621939692dba2a6b6a830efa6b3cee787d82c7de1ad2930de64"}, + {file = "pydantic_core-2.23.4-cp311-none-win32.whl", hash = "sha256:74b9127ffea03643e998e0c5ad9bd3811d3dac8c676e47db17b0ee7c3c3bf35f"}, + {file = "pydantic_core-2.23.4-cp311-none-win_amd64.whl", hash = "sha256:98d134c954828488b153d88ba1f34e14259284f256180ce659e8d83e9c05eaa3"}, + {file = "pydantic_core-2.23.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f3e0da4ebaef65158d4dfd7d3678aad692f7666877df0002b8a522cdf088f231"}, + {file = "pydantic_core-2.23.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f69a8e0b033b747bb3e36a44e7732f0c99f7edd5cea723d45bc0d6e95377ffee"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:723314c1d51722ab28bfcd5240d858512ffd3116449c557a1336cbe3919beb87"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bb2802e667b7051a1bebbfe93684841cc9351004e2badbd6411bf357ab8d5ac8"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d18ca8148bebe1b0a382a27a8ee60350091a6ddaf475fa05ef50dc35b5df6327"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33e3d65a85a2a4a0dc3b092b938a4062b1a05f3a9abde65ea93b233bca0e03f2"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:128585782e5bfa515c590ccee4b727fb76925dd04a98864182b22e89a4e6ed36"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:68665f4c17edcceecc112dfed5dbe6f92261fb9d6054b47d01bf6371a6196126"}, + {file = "pydantic_core-2.23.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:20152074317d9bed6b7a95ade3b7d6054845d70584216160860425f4fbd5ee9e"}, + {file = "pydantic_core-2.23.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:9261d3ce84fa1d38ed649c3638feefeae23d32ba9182963e465d58d62203bd24"}, + {file = "pydantic_core-2.23.4-cp312-none-win32.whl", hash = "sha256:4ba762ed58e8d68657fc1281e9bb72e1c3e79cc5d464be146e260c541ec12d84"}, + {file = "pydantic_core-2.23.4-cp312-none-win_amd64.whl", hash = "sha256:97df63000f4fea395b2824da80e169731088656d1818a11b95f3b173747b6cd9"}, + {file = "pydantic_core-2.23.4-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7530e201d10d7d14abce4fb54cfe5b94a0aefc87da539d0346a484ead376c3cc"}, + {file = "pydantic_core-2.23.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:df933278128ea1cd77772673c73954e53a1c95a4fdf41eef97c2b779271bd0bd"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cb3da3fd1b6a5d0279a01877713dbda118a2a4fc6f0d821a57da2e464793f05"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c6dcb030aefb668a2b7009c85b27f90e51e6a3b4d5c9bc4c57631292015b0d"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:696dd8d674d6ce621ab9d45b205df149399e4bb9aa34102c970b721554828510"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2971bb5ffe72cc0f555c13e19b23c85b654dd2a8f7ab493c262071377bfce9f6"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8394d940e5d400d04cad4f75c0598665cbb81aecefaca82ca85bd28264af7f9b"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0dff76e0602ca7d4cdaacc1ac4c005e0ce0dcfe095d5b5259163a80d3a10d327"}, + {file = "pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7d32706badfe136888bdea71c0def994644e09fff0bfe47441deaed8e96fdbc6"}, + {file = "pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ed541d70698978a20eb63d8c5d72f2cc6d7079d9d90f6b50bad07826f1320f5f"}, + {file = "pydantic_core-2.23.4-cp313-none-win32.whl", hash = "sha256:3d5639516376dce1940ea36edf408c554475369f5da2abd45d44621cb616f769"}, + {file = "pydantic_core-2.23.4-cp313-none-win_amd64.whl", hash = "sha256:5a1504ad17ba4210df3a045132a7baeeba5a200e930f57512ee02909fc5c4cb5"}, + {file = "pydantic_core-2.23.4-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d4488a93b071c04dc20f5cecc3631fc78b9789dd72483ba15d423b5b3689b555"}, + {file = "pydantic_core-2.23.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:81965a16b675b35e1d09dd14df53f190f9129c0202356ed44ab2728b1c905658"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ffa2ebd4c8530079140dd2d7f794a9d9a73cbb8e9d59ffe24c63436efa8f271"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:61817945f2fe7d166e75fbfb28004034b48e44878177fc54d81688e7b85a3665"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:29d2c342c4bc01b88402d60189f3df065fb0dda3654744d5a165a5288a657368"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5e11661ce0fd30a6790e8bcdf263b9ec5988e95e63cf901972107efc49218b13"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d18368b137c6295db49ce7218b1a9ba15c5bc254c96d7c9f9e924a9bc7825ad"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ec4e55f79b1c4ffb2eecd8a0cfba9955a2588497d96851f4c8f99aa4a1d39b12"}, + {file = "pydantic_core-2.23.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:374a5e5049eda9e0a44c696c7ade3ff355f06b1fe0bb945ea3cac2bc336478a2"}, + {file = "pydantic_core-2.23.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5c364564d17da23db1106787675fc7af45f2f7b58b4173bfdd105564e132e6fb"}, + {file = "pydantic_core-2.23.4-cp38-none-win32.whl", hash = "sha256:d7a80d21d613eec45e3d41eb22f8f94ddc758a6c4720842dc74c0581f54993d6"}, + {file = "pydantic_core-2.23.4-cp38-none-win_amd64.whl", hash = "sha256:5f5ff8d839f4566a474a969508fe1c5e59c31c80d9e140566f9a37bba7b8d556"}, + {file = "pydantic_core-2.23.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a4fa4fc04dff799089689f4fd502ce7d59de529fc2f40a2c8836886c03e0175a"}, + {file = "pydantic_core-2.23.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0a7df63886be5e270da67e0966cf4afbae86069501d35c8c1b3b6c168f42cb36"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dcedcd19a557e182628afa1d553c3895a9f825b936415d0dbd3cd0bbcfd29b4b"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f54b118ce5de9ac21c363d9b3caa6c800341e8c47a508787e5868c6b79c9323"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86d2f57d3e1379a9525c5ab067b27dbb8a0642fb5d454e17a9ac434f9ce523e3"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:de6d1d1b9e5101508cb37ab0d972357cac5235f5c6533d1071964c47139257df"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1278e0d324f6908e872730c9102b0112477a7f7cf88b308e4fc36ce1bdb6d58c"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9a6b5099eeec78827553827f4c6b8615978bb4b6a88e5d9b93eddf8bb6790f55"}, + {file = "pydantic_core-2.23.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:e55541f756f9b3ee346b840103f32779c695a19826a4c442b7954550a0972040"}, + {file = "pydantic_core-2.23.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a5c7ba8ffb6d6f8f2ab08743be203654bb1aaa8c9dcb09f82ddd34eadb695605"}, + {file = "pydantic_core-2.23.4-cp39-none-win32.whl", hash = "sha256:37b0fe330e4a58d3c58b24d91d1eb102aeec675a3db4c292ec3928ecd892a9a6"}, + {file = "pydantic_core-2.23.4-cp39-none-win_amd64.whl", hash = "sha256:1498bec4c05c9c787bde9125cfdcc63a41004ff167f495063191b863399b1a29"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f455ee30a9d61d3e1a15abd5068827773d6e4dc513e795f380cdd59932c782d5"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1e90d2e3bd2c3863d48525d297cd143fe541be8bbf6f579504b9712cb6b643ec"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e203fdf807ac7e12ab59ca2bfcabb38c7cf0b33c41efeb00f8e5da1d86af480"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e08277a400de01bc72436a0ccd02bdf596631411f592ad985dcee21445bd0068"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f220b0eea5965dec25480b6333c788fb72ce5f9129e8759ef876a1d805d00801"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d06b0c8da4f16d1d1e352134427cb194a0a6e19ad5db9161bf32b2113409e728"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:ba1a0996f6c2773bd83e63f18914c1de3c9dd26d55f4ac302a7efe93fb8e7433"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:9a5bce9d23aac8f0cf0836ecfc033896aa8443b501c58d0602dbfd5bd5b37753"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:78ddaaa81421a29574a682b3179d4cf9e6d405a09b99d93ddcf7e5239c742e21"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:883a91b5dd7d26492ff2f04f40fbb652de40fcc0afe07e8129e8ae779c2110eb"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88ad334a15b32a791ea935af224b9de1bf99bcd62fabf745d5f3442199d86d59"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:233710f069d251feb12a56da21e14cca67994eab08362207785cf8c598e74577"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:19442362866a753485ba5e4be408964644dd6a09123d9416c54cd49171f50744"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:624e278a7d29b6445e4e813af92af37820fafb6dcc55c012c834f9e26f9aaaef"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f5ef8f42bec47f21d07668a043f077d507e5bf4e668d5c6dfe6aaba89de1a5b8"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:aea443fffa9fbe3af1a9ba721a87f926fe548d32cab71d188a6ede77d0ff244e"}, + {file = "pydantic_core-2.23.4.tar.gz", hash = "sha256:2584f7cf844ac4d970fba483a717dbe10c1c1c96a969bf65d61ffe94df1b2863"}, +] + +[package.dependencies] +typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" + +[[package]] +name = "pygments" +version = "2.18.0" +description = "Pygments is a syntax highlighting package written in Python." +optional = false +python-versions = ">=3.8" +files = [ + {file = "pygments-2.18.0-py3-none-any.whl", hash = "sha256:b8e6aca0523f3ab76fee51799c488e38782ac06eafcf95e7ba832985c8e7b13a"}, + {file = "pygments-2.18.0.tar.gz", hash = "sha256:786ff802f32e91311bff3889f6e9a86e81505fe99f2735bb6d60ae0c5004f199"}, +] + +[package.extras] +windows-terminal = ["colorama (>=0.4.6)"] + +[[package]] +name = "pyjwt" +version = "2.9.0" +description = "JSON Web Token implementation in Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "PyJWT-2.9.0-py3-none-any.whl", hash = "sha256:3b02fb0f44517787776cf48f2ae25d8e14f300e6d7545a4315cee571a415e850"}, + {file = "pyjwt-2.9.0.tar.gz", hash = "sha256:7e1e5b56cc735432a7369cbfa0efe50fa113ebecdc04ae6922deba8b84582d0c"}, +] + +[package.dependencies] +cryptography = {version = ">=3.4.0", optional = true, markers = "extra == \"crypto\""} + +[package.extras] +crypto = ["cryptography (>=3.4.0)"] +dev = ["coverage[toml] (==5.0.4)", "cryptography (>=3.4.0)", "pre-commit", "pytest (>=6.0.0,<7.0.0)", "sphinx", "sphinx-rtd-theme", "zope.interface"] +docs = ["sphinx", "sphinx-rtd-theme", "zope.interface"] +tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"] + +[[package]] +name = "pymongo" +version = "4.9.1" +description = "Python driver for MongoDB " +optional = false +python-versions = ">=3.8" +files = [ + {file = "pymongo-4.9.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:dc3d070d746ab79e9b393a5c236df20e56607389af2b79bf1bfe9a841117558e"}, + {file = "pymongo-4.9.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fe709d05654c12fc513617c8d5c8d05b7e9cf1d5d94ada68add4e89530c867d2"}, + {file = "pymongo-4.9.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa4493f304b33c5d2ecee3055c98889ac6724d56f5f922d47420a45d0d4099c9"}, + {file = "pymongo-4.9.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f8e8b8deba6a4bff3dd5421071083219521c74d2acae0322de5c06f1a66c56af"}, + {file = "pymongo-4.9.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e3645aff8419ca60f9ccd08966b2f6b0d78053f9f98a814d025426f1d874c19a"}, + {file = "pymongo-4.9.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:51dbc6251c6783dfcc7d657c346986d8bad7210989b2fe15de16db5204a8e7ae"}, + {file = "pymongo-4.9.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d7aa9cc2d92e73bdb036c578ba019da94ea165eb147e691cd910a6fab7ce3b7"}, + {file = "pymongo-4.9.1-cp310-cp310-win32.whl", hash = "sha256:8b632e01617f2608880f7b9926f54a5f5ebb51631996e0540fff7fc7980663c9"}, + {file = "pymongo-4.9.1-cp310-cp310-win_amd64.whl", hash = "sha256:f05e34d401be871d7c87cb10727d49315444e4ded07ff876a595e4c23b7436da"}, + {file = "pymongo-4.9.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6bb3d5282278594753089dc7da48bfae4a7f337a2dd4d397eabb591c649e58d0"}, + {file = "pymongo-4.9.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8f0d5258bc85a4e6b5bcae8160628168e71ec4625a58ceb53327c3280a0b6914"}, + {file = "pymongo-4.9.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:96462fb2175f740701d229f52018ea6e4adc4148c4112e6628bb359dd534a3df"}, + {file = "pymongo-4.9.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:286fb275267f0293364ba579f6354452599161f1902ad411061c7f744ab88328"}, + {file = "pymongo-4.9.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4cddb51cead9700c4dccc916952bc0321b8d766bf782d374bfa0e93ef47c1d20"}, + {file = "pymongo-4.9.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d79f20f9c7cbc1c708fb80b648b6fbd3220fd3437a9bd6017c1eb592e03b361"}, + {file = "pymongo-4.9.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dd3352eaf578f8e9bdea7a5692910eedad1e8680f60726fc70e99c8af51a5449"}, + {file = "pymongo-4.9.1-cp311-cp311-win32.whl", hash = "sha256:ea3f0196e7c311b9944a609ac175bd91ab97952164a1246716fdd38d53ca3bcc"}, + {file = "pymongo-4.9.1-cp311-cp311-win_amd64.whl", hash = "sha256:b4c793db8457c856f333f396798470b9bfe405e17c307d581532c74cec70150c"}, + {file = "pymongo-4.9.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:47b4896544095d172c366dd4d4ea1da6b0ab1a77d8416897cc1801e2421b1e67"}, + {file = "pymongo-4.9.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:fbb1c7dfcf6c44e9e1928290631c7603817991cdf570691c9e15fca594918435"}, + {file = "pymongo-4.9.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a7689da1d1b444284e4ea9ab2eb64a15307b6b795918c0f3cd7774dd1d8a7556"}, + {file = "pymongo-4.9.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7f962d74201c772555f7a78792fed820a5ea76db5c7ee6cf43748e411b44e430"}, + {file = "pymongo-4.9.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:08fbab69f3fb6f8088c81f4c4a8abd84a99c132034f5e27e47f894bbcb6bf439"}, + {file = "pymongo-4.9.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4327c0d9bd616b8289691360f2d4a09a72fe35479795832eae0d4ff78af53923"}, + {file = "pymongo-4.9.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:34e4993ae78be56f9e27a141168a1ab78253576fa3e893fa335a719ce204c3ef"}, + {file = "pymongo-4.9.1-cp312-cp312-win32.whl", hash = "sha256:e1f346811d4a2369f88ab7a6f886fa9c3bbc9ed4e4f4a3becca8717a73d465cb"}, + {file = "pymongo-4.9.1-cp312-cp312-win_amd64.whl", hash = "sha256:a2b12c74cfd90147babb77f9728646bcedfdbd2bd2a5b4130a00e3a0af1a3d34"}, + {file = "pymongo-4.9.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a40ea8bc9cffb61c5c9c426c430d22235e085e610ee81ae075ddf51f12f76236"}, + {file = "pymongo-4.9.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:75d5974f874acdb2f125bdbe785045b23a39ecce1d3143dd5712800c7b6d25eb"}, + {file = "pymongo-4.9.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f23a046531030318622414f21198e232cf93c5640da9a80b45596a059c8cc090"}, + {file = "pymongo-4.9.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:91b1a92214c3912af5467f77c2f6435cd76f6de64c70cba7bb4ee43eba7f459e"}, + {file = "pymongo-4.9.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3a846423c4535428f69a90a1451df3718bc59f0c4ab685b9e96d3071951e0be4"}, + {file = "pymongo-4.9.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d476d91a5c9e6c37bc8ec3fb294e1c01d95736ccf01a59bb1540fe2f710f826e"}, + {file = "pymongo-4.9.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:172d8ba0f567e351a18765db23dab7dbcfdffd91a8788d90d46b350f80a40781"}, + {file = "pymongo-4.9.1-cp313-cp313-win32.whl", hash = "sha256:95418e334629440f70fe5ceeefc6cbbd50defb566901c8d68179ffbaec8d5f01"}, + {file = "pymongo-4.9.1-cp313-cp313-win_amd64.whl", hash = "sha256:1dfd2aa30174d36a3ef1dae4ee4c89710c2d65cac52ce6e13f17c710edbd61cf"}, + {file = "pymongo-4.9.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c4204fad54830a3173a5c939cd052d0561fba03dba7e0ff6852fd631f3314aa4"}, + {file = "pymongo-4.9.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:375765ec81b1f0a26d08928afea0c3dff897c36080a090be53fc7b70cc51d497"}, + {file = "pymongo-4.9.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4d1b959a3dda0775d9111622ee47ad47772aed3a9da2e7d5f2f513fa68175dea"}, + {file = "pymongo-4.9.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:42c19d2b094cdd0ead7dbb38860bbe8268c140334ce55d8b39204ddb4ebd4904"}, + {file = "pymongo-4.9.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1fac1def9e9073f1c80198c99f0ec39c2528236c8912d96d7fd3b0237f4c523a"}, + {file = "pymongo-4.9.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b347052d510989d1f52b8553b31297f21cf74bd9f6aed71ee84e563492f4ff17"}, + {file = "pymongo-4.9.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1b4b961fce213f2bcdc92268f85111a3668c61b9b4d4e7ece27dce3a137cfcbd"}, + {file = "pymongo-4.9.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a0b10cf51ec14a487c94709d294c00e1fb6a0a4c38cdc3acfb2ced5ef60972a0"}, + {file = "pymongo-4.9.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:679b8d55854da7c7fdb82aa5e092ab4de0144daf6758defed8ab00ff9ce05360"}, + {file = "pymongo-4.9.1-cp38-cp38-win32.whl", hash = "sha256:432ad395d2233056b042ccc73234e7136aa65d944d6bd8b5138394bd38aaff79"}, + {file = "pymongo-4.9.1-cp38-cp38-win_amd64.whl", hash = "sha256:9fbe9fad27619ac4cfda5df0ade26a99906da7dfe7b01deddc25997eb1804e4c"}, + {file = "pymongo-4.9.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:99b611ff75b5d9e17183dcf9584a7b04f9db07e51a162f23ea05e485e0735c0a"}, + {file = "pymongo-4.9.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8089003a99127f917bdbeec177d41cef019cda8ec70534c1018cb60aacd23c2a"}, + {file = "pymongo-4.9.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9d78adf25967c06298c7e488f4cfab79a390fc32c2b1d428613976f99031603d"}, + {file = "pymongo-4.9.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:56877cfcdf7dfc5c6408e4551ec0d6d65ebbca4d744a0bc90400f09ef6bbcc8a"}, + {file = "pymongo-4.9.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:16d2efe559d0d96bc0b74b3ff76701ad6f6e1a65f6581b573dcacc29158131c8"}, + {file = "pymongo-4.9.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f838f613e74b4dad8ace0d90f42346005bece4eda5bf6d389cfadb8322d39316"}, + {file = "pymongo-4.9.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:db5b299e11284f8d82ce2983d8e19fcc28f98f902a179709ef1982b4cca6f8b8"}, + {file = "pymongo-4.9.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b23211c031b45d0f32de83ab7d77f9c26f1025c2d2c91463a5d8594a16103655"}, + {file = "pymongo-4.9.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:687cf70e096381bc65b4273a6a9319617618f7ace65caffc356e1099c4a68511"}, + {file = "pymongo-4.9.1-cp39-cp39-win32.whl", hash = "sha256:e02b03e3815b80a63e773e4c32aed3cf5633d406f376477be74550295c211256"}, + {file = "pymongo-4.9.1-cp39-cp39-win_amd64.whl", hash = "sha256:0492ef43f3342354cf581712e431621c221f60c877ebded84e3f3e53b71bbbe0"}, + {file = "pymongo-4.9.1.tar.gz", hash = "sha256:b7f2d34390acf60e229c30037d1473fcf69f4536cd7f48f6f78c0c931c61c505"}, +] + +[package.dependencies] +dnspython = ">=1.16.0,<3.0.0" + +[package.extras] +aws = ["pymongo-auth-aws (>=1.1.0,<2.0.0)"] +docs = ["furo (==2023.9.10)", "readthedocs-sphinx-search (>=0.3,<1.0)", "sphinx (>=5.3,<8)", "sphinx-autobuild (>=2020.9.1)", "sphinx-rtd-theme (>=2,<3)", "sphinxcontrib-shellcheck (>=1,<2)"] +encryption = ["certifi", "pymongo-auth-aws (>=1.1.0,<2.0.0)", "pymongocrypt (>=1.10.0,<2.0.0)"] +gssapi = ["pykerberos", "winkerberos (>=0.5.0)"] +ocsp = ["certifi", "cryptography (>=2.5)", "pyopenssl (>=17.2.0)", "requests (<3.0.0)", "service-identity (>=18.1.0)"] +snappy = ["python-snappy"] +test = ["pytest (>=8.2)", "pytest-asyncio (>=0.24.0)"] +zstd = ["zstandard"] + +[[package]] +name = "pypandoc" +version = "1.13" +description = "Thin wrapper for pandoc." +optional = false +python-versions = ">=3.6" +files = [ + {file = "pypandoc-1.13-py3-none-any.whl", hash = "sha256:4c7d71bf2f1ed122aac287113b5c4d537a33bbc3c1df5aed11a7d4a7ac074681"}, + {file = "pypandoc-1.13.tar.gz", hash = "sha256:31652073c7960c2b03570bd1e94f602ca9bc3e70099df5ead4cea98ff5151c1e"}, +] + +[[package]] +name = "pyparsing" +version = "3.1.4" +description = "pyparsing module - Classes and methods to define and execute parsing grammars" +optional = false +python-versions = ">=3.6.8" +files = [ + {file = "pyparsing-3.1.4-py3-none-any.whl", hash = "sha256:a6a7ee4235a3f944aa1fa2249307708f893fe5717dc603503c6c7969c070fb7c"}, + {file = "pyparsing-3.1.4.tar.gz", hash = "sha256:f86ec8d1a83f11977c9a6ea7598e8c27fc5cddfa5b07ea2241edbbde1d7bc032"}, +] + +[package.extras] +diagrams = ["jinja2", "railroad-diagrams"] + +[[package]] +name = "pypdfium2" +version = "4.30.0" +description = "Python bindings to PDFium" +optional = false +python-versions = ">=3.6" +files = [ + {file = "pypdfium2-4.30.0-py3-none-macosx_10_13_x86_64.whl", hash = "sha256:b33ceded0b6ff5b2b93bc1fe0ad4b71aa6b7e7bd5875f1ca0cdfb6ba6ac01aab"}, + {file = "pypdfium2-4.30.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:4e55689f4b06e2d2406203e771f78789bd4f190731b5d57383d05cf611d829de"}, + {file = "pypdfium2-4.30.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e6e50f5ce7f65a40a33d7c9edc39f23140c57e37144c2d6d9e9262a2a854854"}, + {file = "pypdfium2-4.30.0-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3d0dd3ecaffd0b6dbda3da663220e705cb563918249bda26058c6036752ba3a2"}, + {file = "pypdfium2-4.30.0-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cc3bf29b0db8c76cdfaac1ec1cde8edf211a7de7390fbf8934ad2aa9b4d6dfad"}, + {file = "pypdfium2-4.30.0-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f1f78d2189e0ddf9ac2b7a9b9bd4f0c66f54d1389ff6c17e9fd9dc034d06eb3f"}, + {file = "pypdfium2-4.30.0-py3-none-musllinux_1_1_aarch64.whl", hash = "sha256:5eda3641a2da7a7a0b2f4dbd71d706401a656fea521b6b6faa0675b15d31a163"}, + {file = "pypdfium2-4.30.0-py3-none-musllinux_1_1_i686.whl", hash = "sha256:0dfa61421b5eb68e1188b0b2231e7ba35735aef2d867d86e48ee6cab6975195e"}, + {file = "pypdfium2-4.30.0-py3-none-musllinux_1_1_x86_64.whl", hash = "sha256:f33bd79e7a09d5f7acca3b0b69ff6c8a488869a7fab48fdf400fec6e20b9c8be"}, + {file = "pypdfium2-4.30.0-py3-none-win32.whl", hash = "sha256:ee2410f15d576d976c2ab2558c93d392a25fb9f6635e8dd0a8a3a5241b275e0e"}, + {file = "pypdfium2-4.30.0-py3-none-win_amd64.whl", hash = "sha256:90dbb2ac07be53219f56be09961eb95cf2473f834d01a42d901d13ccfad64b4c"}, + {file = "pypdfium2-4.30.0-py3-none-win_arm64.whl", hash = "sha256:119b2969a6d6b1e8d55e99caaf05290294f2d0fe49c12a3f17102d01c441bd29"}, + {file = "pypdfium2-4.30.0.tar.gz", hash = "sha256:48b5b7e5566665bc1015b9d69c1ebabe21f6aee468b509531c3c8318eeee2e16"}, +] + +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +description = "Extensions to the standard Python datetime module" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +files = [ + {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, + {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, +] + +[package.dependencies] +six = ">=1.5" + +[[package]] +name = "python-dotenv" +version = "1.0.1" +description = "Read key-value pairs from a .env file and set them as environment variables" +optional = false +python-versions = ">=3.8" +files = [ + {file = "python-dotenv-1.0.1.tar.gz", hash = "sha256:e324ee90a023d808f1959c46bcbc04446a10ced277783dc6ee09987c37ec10ca"}, + {file = "python_dotenv-1.0.1-py3-none-any.whl", hash = "sha256:f7b63ef50f1b690dddf550d03497b66d609393b40b564ed0d674909a68ebf16a"}, +] + +[package.extras] +cli = ["click (>=5.0)"] + +[[package]] +name = "python-multipart" +version = "0.0.9" +description = "A streaming multipart parser for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "python_multipart-0.0.9-py3-none-any.whl", hash = "sha256:97ca7b8ea7b05f977dc3849c3ba99d51689822fab725c3703af7c866a0c2b215"}, + {file = "python_multipart-0.0.9.tar.gz", hash = "sha256:03f54688c663f1b7977105f021043b0793151e4cb1c1a9d4a11fc13d622c4026"}, +] + +[package.extras] +dev = ["atomicwrites (==1.4.1)", "attrs (==23.2.0)", "coverage (==7.4.1)", "hatch", "invoke (==2.2.0)", "more-itertools (==10.2.0)", "pbr (==6.0.0)", "pluggy (==1.4.0)", "py (==1.11.0)", "pytest (==8.0.0)", "pytest-cov (==4.1.0)", "pytest-timeout (==2.2.0)", "pyyaml (==6.0.1)", "ruff (==0.2.1)"] + +[[package]] +name = "pytz" +version = "2024.2" +description = "World timezone definitions, modern and historical" +optional = false +python-versions = "*" +files = [ + {file = "pytz-2024.2-py2.py3-none-any.whl", hash = "sha256:31c7c1817eb7fae7ca4b8c7ee50c72f93aa2dd863de768e1ef4245d426aa0725"}, + {file = "pytz-2024.2.tar.gz", hash = "sha256:2aa355083c50a0f93fa581709deac0c9ad65cca8a9e9beac660adcbd493c798a"}, +] + +[[package]] +name = "pyyaml" +version = "6.0.2" +description = "YAML parser and emitter for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, + {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"}, + {file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"}, + {file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"}, + {file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"}, + {file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"}, + {file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"}, + {file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"}, + {file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"}, + {file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"}, + {file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"}, + {file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"}, + {file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"}, + {file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"}, + {file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"}, + {file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"}, + {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"}, +] + +[[package]] +name = "regex" +version = "2024.9.11" +description = "Alternative regular expression module, to replace re." +optional = false +python-versions = ">=3.8" +files = [ + {file = "regex-2024.9.11-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1494fa8725c285a81d01dc8c06b55287a1ee5e0e382d8413adc0a9197aac6408"}, + {file = "regex-2024.9.11-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0e12c481ad92d129c78f13a2a3662317e46ee7ef96c94fd332e1c29131875b7d"}, + {file = "regex-2024.9.11-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:16e13a7929791ac1216afde26f712802e3df7bf0360b32e4914dca3ab8baeea5"}, + {file = "regex-2024.9.11-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:46989629904bad940bbec2106528140a218b4a36bb3042d8406980be1941429c"}, + {file = "regex-2024.9.11-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a906ed5e47a0ce5f04b2c981af1c9acf9e8696066900bf03b9d7879a6f679fc8"}, + {file = "regex-2024.9.11-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e9a091b0550b3b0207784a7d6d0f1a00d1d1c8a11699c1a4d93db3fbefc3ad35"}, + {file = "regex-2024.9.11-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ddcd9a179c0a6fa8add279a4444015acddcd7f232a49071ae57fa6e278f1f71"}, + {file = "regex-2024.9.11-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6b41e1adc61fa347662b09398e31ad446afadff932a24807d3ceb955ed865cc8"}, + {file = "regex-2024.9.11-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ced479f601cd2f8ca1fd7b23925a7e0ad512a56d6e9476f79b8f381d9d37090a"}, + {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:635a1d96665f84b292e401c3d62775851aedc31d4f8784117b3c68c4fcd4118d"}, + {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:c0256beda696edcf7d97ef16b2a33a8e5a875affd6fa6567b54f7c577b30a137"}, + {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:3ce4f1185db3fbde8ed8aa223fc9620f276c58de8b0d4f8cc86fd1360829edb6"}, + {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:09d77559e80dcc9d24570da3745ab859a9cf91953062e4ab126ba9d5993688ca"}, + {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7a22ccefd4db3f12b526eccb129390942fe874a3a9fdbdd24cf55773a1faab1a"}, + {file = "regex-2024.9.11-cp310-cp310-win32.whl", hash = "sha256:f745ec09bc1b0bd15cfc73df6fa4f726dcc26bb16c23a03f9e3367d357eeedd0"}, + {file = "regex-2024.9.11-cp310-cp310-win_amd64.whl", hash = "sha256:01c2acb51f8a7d6494c8c5eafe3d8e06d76563d8a8a4643b37e9b2dd8a2ff623"}, + {file = "regex-2024.9.11-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:2cce2449e5927a0bf084d346da6cd5eb016b2beca10d0013ab50e3c226ffc0df"}, + {file = "regex-2024.9.11-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3b37fa423beefa44919e009745ccbf353d8c981516e807995b2bd11c2c77d268"}, + {file = "regex-2024.9.11-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:64ce2799bd75039b480cc0360907c4fb2f50022f030bf9e7a8705b636e408fad"}, + {file = "regex-2024.9.11-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a4cc92bb6db56ab0c1cbd17294e14f5e9224f0cc6521167ef388332604e92679"}, + {file = "regex-2024.9.11-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d05ac6fa06959c4172eccd99a222e1fbf17b5670c4d596cb1e5cde99600674c4"}, + {file = "regex-2024.9.11-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:040562757795eeea356394a7fb13076ad4f99d3c62ab0f8bdfb21f99a1f85664"}, + {file = "regex-2024.9.11-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6113c008a7780792efc80f9dfe10ba0cd043cbf8dc9a76ef757850f51b4edc50"}, + {file = "regex-2024.9.11-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8e5fb5f77c8745a60105403a774fe2c1759b71d3e7b4ca237a5e67ad066c7199"}, + {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:54d9ff35d4515debf14bc27f1e3b38bfc453eff3220f5bce159642fa762fe5d4"}, + {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:df5cbb1fbc74a8305b6065d4ade43b993be03dbe0f8b30032cced0d7740994bd"}, + {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:7fb89ee5d106e4a7a51bce305ac4efb981536301895f7bdcf93ec92ae0d91c7f"}, + {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:a738b937d512b30bf75995c0159c0ddf9eec0775c9d72ac0202076c72f24aa96"}, + {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e28f9faeb14b6f23ac55bfbbfd3643f5c7c18ede093977f1df249f73fd22c7b1"}, + {file = "regex-2024.9.11-cp311-cp311-win32.whl", hash = "sha256:18e707ce6c92d7282dfce370cd205098384b8ee21544e7cb29b8aab955b66fa9"}, + {file = "regex-2024.9.11-cp311-cp311-win_amd64.whl", hash = "sha256:313ea15e5ff2a8cbbad96ccef6be638393041b0a7863183c2d31e0c6116688cf"}, + {file = "regex-2024.9.11-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:b0d0a6c64fcc4ef9c69bd5b3b3626cc3776520a1637d8abaa62b9edc147a58f7"}, + {file = "regex-2024.9.11-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:49b0e06786ea663f933f3710a51e9385ce0cba0ea56b67107fd841a55d56a231"}, + {file = "regex-2024.9.11-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5b513b6997a0b2f10e4fd3a1313568e373926e8c252bd76c960f96fd039cd28d"}, + {file = "regex-2024.9.11-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee439691d8c23e76f9802c42a95cfeebf9d47cf4ffd06f18489122dbb0a7ad64"}, + {file = "regex-2024.9.11-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a8f877c89719d759e52783f7fe6e1c67121076b87b40542966c02de5503ace42"}, + {file = "regex-2024.9.11-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:23b30c62d0f16827f2ae9f2bb87619bc4fba2044911e2e6c2eb1af0161cdb766"}, + {file = "regex-2024.9.11-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85ab7824093d8f10d44330fe1e6493f756f252d145323dd17ab6b48733ff6c0a"}, + {file = "regex-2024.9.11-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8dee5b4810a89447151999428fe096977346cf2f29f4d5e29609d2e19e0199c9"}, + {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:98eeee2f2e63edae2181c886d7911ce502e1292794f4c5ee71e60e23e8d26b5d"}, + {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:57fdd2e0b2694ce6fc2e5ccf189789c3e2962916fb38779d3e3521ff8fe7a822"}, + {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:d552c78411f60b1fdaafd117a1fca2f02e562e309223b9d44b7de8be451ec5e0"}, + {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:a0b2b80321c2ed3fcf0385ec9e51a12253c50f146fddb2abbb10f033fe3d049a"}, + {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:18406efb2f5a0e57e3a5881cd9354c1512d3bb4f5c45d96d110a66114d84d23a"}, + {file = "regex-2024.9.11-cp312-cp312-win32.whl", hash = "sha256:e464b467f1588e2c42d26814231edecbcfe77f5ac414d92cbf4e7b55b2c2a776"}, + {file = "regex-2024.9.11-cp312-cp312-win_amd64.whl", hash = "sha256:9e8719792ca63c6b8340380352c24dcb8cd7ec49dae36e963742a275dfae6009"}, + {file = "regex-2024.9.11-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:c157bb447303070f256e084668b702073db99bbb61d44f85d811025fcf38f784"}, + {file = "regex-2024.9.11-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:4db21ece84dfeefc5d8a3863f101995de646c6cb0536952c321a2650aa202c36"}, + {file = "regex-2024.9.11-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:220e92a30b426daf23bb67a7962900ed4613589bab80382be09b48896d211e92"}, + {file = "regex-2024.9.11-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eb1ae19e64c14c7ec1995f40bd932448713d3c73509e82d8cd7744dc00e29e86"}, + {file = "regex-2024.9.11-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f47cd43a5bfa48f86925fe26fbdd0a488ff15b62468abb5d2a1e092a4fb10e85"}, + {file = "regex-2024.9.11-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9d4a76b96f398697fe01117093613166e6aa8195d63f1b4ec3f21ab637632963"}, + {file = "regex-2024.9.11-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ea51dcc0835eea2ea31d66456210a4e01a076d820e9039b04ae8d17ac11dee6"}, + {file = "regex-2024.9.11-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b7aaa315101c6567a9a45d2839322c51c8d6e81f67683d529512f5bcfb99c802"}, + {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c57d08ad67aba97af57a7263c2d9006d5c404d721c5f7542f077f109ec2a4a29"}, + {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:f8404bf61298bb6f8224bb9176c1424548ee1181130818fcd2cbffddc768bed8"}, + {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:dd4490a33eb909ef5078ab20f5f000087afa2a4daa27b4c072ccb3cb3050ad84"}, + {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:eee9130eaad130649fd73e5cd92f60e55708952260ede70da64de420cdcad554"}, + {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6a2644a93da36c784e546de579ec1806bfd2763ef47babc1b03d765fe560c9f8"}, + {file = "regex-2024.9.11-cp313-cp313-win32.whl", hash = "sha256:e997fd30430c57138adc06bba4c7c2968fb13d101e57dd5bb9355bf8ce3fa7e8"}, + {file = "regex-2024.9.11-cp313-cp313-win_amd64.whl", hash = "sha256:042c55879cfeb21a8adacc84ea347721d3d83a159da6acdf1116859e2427c43f"}, + {file = "regex-2024.9.11-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:35f4a6f96aa6cb3f2f7247027b07b15a374f0d5b912c0001418d1d55024d5cb4"}, + {file = "regex-2024.9.11-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:55b96e7ce3a69a8449a66984c268062fbaa0d8ae437b285428e12797baefce7e"}, + {file = "regex-2024.9.11-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cb130fccd1a37ed894824b8c046321540263013da72745d755f2d35114b81a60"}, + {file = "regex-2024.9.11-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:323c1f04be6b2968944d730e5c2091c8c89767903ecaa135203eec4565ed2b2b"}, + {file = "regex-2024.9.11-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:be1c8ed48c4c4065ecb19d882a0ce1afe0745dfad8ce48c49586b90a55f02366"}, + {file = "regex-2024.9.11-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b5b029322e6e7b94fff16cd120ab35a253236a5f99a79fb04fda7ae71ca20ae8"}, + {file = "regex-2024.9.11-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6fff13ef6b5f29221d6904aa816c34701462956aa72a77f1f151a8ec4f56aeb"}, + {file = "regex-2024.9.11-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:587d4af3979376652010e400accc30404e6c16b7df574048ab1f581af82065e4"}, + {file = "regex-2024.9.11-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:079400a8269544b955ffa9e31f186f01d96829110a3bf79dc338e9910f794fca"}, + {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:f9268774428ec173654985ce55fc6caf4c6d11ade0f6f914d48ef4719eb05ebb"}, + {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:23f9985c8784e544d53fc2930fc1ac1a7319f5d5332d228437acc9f418f2f168"}, + {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:ae2941333154baff9838e88aa71c1d84f4438189ecc6021a12c7573728b5838e"}, + {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:e93f1c331ca8e86fe877a48ad64e77882c0c4da0097f2212873a69bbfea95d0c"}, + {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:846bc79ee753acf93aef4184c040d709940c9d001029ceb7b7a52747b80ed2dd"}, + {file = "regex-2024.9.11-cp38-cp38-win32.whl", hash = "sha256:c94bb0a9f1db10a1d16c00880bdebd5f9faf267273b8f5bd1878126e0fbde771"}, + {file = "regex-2024.9.11-cp38-cp38-win_amd64.whl", hash = "sha256:2b08fce89fbd45664d3df6ad93e554b6c16933ffa9d55cb7e01182baaf971508"}, + {file = "regex-2024.9.11-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:07f45f287469039ffc2c53caf6803cd506eb5f5f637f1d4acb37a738f71dd066"}, + {file = "regex-2024.9.11-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4838e24ee015101d9f901988001038f7f0d90dc0c3b115541a1365fb439add62"}, + {file = "regex-2024.9.11-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6edd623bae6a737f10ce853ea076f56f507fd7726bee96a41ee3d68d347e4d16"}, + {file = "regex-2024.9.11-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c69ada171c2d0e97a4b5aa78fbb835e0ffbb6b13fc5da968c09811346564f0d3"}, + {file = "regex-2024.9.11-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:02087ea0a03b4af1ed6ebab2c54d7118127fee8d71b26398e8e4b05b78963199"}, + {file = "regex-2024.9.11-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:69dee6a020693d12a3cf892aba4808fe168d2a4cef368eb9bf74f5398bfd4ee8"}, + {file = "regex-2024.9.11-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:297f54910247508e6e5cae669f2bc308985c60540a4edd1c77203ef19bfa63ca"}, + {file = "regex-2024.9.11-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ecea58b43a67b1b79805f1a0255730edaf5191ecef84dbc4cc85eb30bc8b63b9"}, + {file = "regex-2024.9.11-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:eab4bb380f15e189d1313195b062a6aa908f5bd687a0ceccd47c8211e9cf0d4a"}, + {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:0cbff728659ce4bbf4c30b2a1be040faafaa9eca6ecde40aaff86f7889f4ab39"}, + {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:54c4a097b8bc5bb0dfc83ae498061d53ad7b5762e00f4adaa23bee22b012e6ba"}, + {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:73d6d2f64f4d894c96626a75578b0bf7d9e56dcda8c3d037a2118fdfe9b1c664"}, + {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:e53b5fbab5d675aec9f0c501274c467c0f9a5d23696cfc94247e1fb56501ed89"}, + {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:0ffbcf9221e04502fc35e54d1ce9567541979c3fdfb93d2c554f0ca583a19b35"}, + {file = "regex-2024.9.11-cp39-cp39-win32.whl", hash = "sha256:e4c22e1ac1f1ec1e09f72e6c44d8f2244173db7eb9629cc3a346a8d7ccc31142"}, + {file = "regex-2024.9.11-cp39-cp39-win_amd64.whl", hash = "sha256:faa3c142464efec496967359ca99696c896c591c56c53506bac1ad465f66e919"}, + {file = "regex-2024.9.11.tar.gz", hash = "sha256:6c188c307e8433bcb63dc1915022deb553b4203a70722fc542c363bf120a01fd"}, +] + +[[package]] +name = "requests" +version = "2.32.3" +description = "Python HTTP for Humans." +optional = false +python-versions = ">=3.8" +files = [ + {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, + {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, +] + +[package.dependencies] +certifi = ">=2017.4.17" +charset-normalizer = ">=2,<4" +idna = ">=2.5,<4" +urllib3 = ">=1.21.1,<3" + +[package.extras] +socks = ["PySocks (>=1.5.6,!=1.5.7)"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] + +[[package]] +name = "rich" +version = "13.8.1" +description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "rich-13.8.1-py3-none-any.whl", hash = "sha256:1760a3c0848469b97b558fc61c85233e3dafb69c7a071b4d60c38099d3cd4c06"}, + {file = "rich-13.8.1.tar.gz", hash = "sha256:8260cda28e3db6bf04d2d1ef4dbc03ba80a824c88b0e7668a0f23126a424844a"}, +] + +[package.dependencies] +markdown-it-py = ">=2.2.0" +pygments = ">=2.13.0,<3.0.0" + +[package.extras] +jupyter = ["ipywidgets (>=7.5.1,<9)"] + +[[package]] +name = "rsa" +version = "4.9" +description = "Pure-Python RSA implementation" +optional = false +python-versions = ">=3.6,<4" +files = [ + {file = "rsa-4.9-py3-none-any.whl", hash = "sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7"}, + {file = "rsa-4.9.tar.gz", hash = "sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21"}, +] + +[package.dependencies] +pyasn1 = ">=0.1.3" + +[[package]] +name = "s3transfer" +version = "0.10.2" +description = "An Amazon S3 Transfer Manager" +optional = false +python-versions = ">=3.8" +files = [ + {file = "s3transfer-0.10.2-py3-none-any.whl", hash = "sha256:eca1c20de70a39daee580aef4986996620f365c4e0fda6a86100231d62f1bf69"}, + {file = "s3transfer-0.10.2.tar.gz", hash = "sha256:0711534e9356d3cc692fdde846b4a1e4b0cb6519971860796e6bc4c7aea00ef6"}, +] + +[package.dependencies] +botocore = ">=1.33.2,<2.0a.0" + +[package.extras] +crt = ["botocore[crt] (>=1.33.2,<2.0a.0)"] + +[[package]] +name = "safetensors" +version = "0.4.5" +description = "" +optional = false +python-versions = ">=3.7" +files = [ + {file = "safetensors-0.4.5-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:a63eaccd22243c67e4f2b1c3e258b257effc4acd78f3b9d397edc8cf8f1298a7"}, + {file = "safetensors-0.4.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:23fc9b4ec7b602915cbb4ec1a7c1ad96d2743c322f20ab709e2c35d1b66dad27"}, + {file = "safetensors-0.4.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6885016f34bef80ea1085b7e99b3c1f92cb1be78a49839203060f67b40aee761"}, + {file = "safetensors-0.4.5-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:133620f443450429322f238fda74d512c4008621227fccf2f8cf4a76206fea7c"}, + {file = "safetensors-0.4.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4fb3e0609ec12d2a77e882f07cced530b8262027f64b75d399f1504ffec0ba56"}, + {file = "safetensors-0.4.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d0f1dd769f064adc33831f5e97ad07babbd728427f98e3e1db6902e369122737"}, + {file = "safetensors-0.4.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c6d156bdb26732feada84f9388a9f135528c1ef5b05fae153da365ad4319c4c5"}, + {file = "safetensors-0.4.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9e347d77e2c77eb7624400ccd09bed69d35c0332f417ce8c048d404a096c593b"}, + {file = "safetensors-0.4.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:9f556eea3aec1d3d955403159fe2123ddd68e880f83954ee9b4a3f2e15e716b6"}, + {file = "safetensors-0.4.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:9483f42be3b6bc8ff77dd67302de8ae411c4db39f7224dec66b0eb95822e4163"}, + {file = "safetensors-0.4.5-cp310-none-win32.whl", hash = "sha256:7389129c03fadd1ccc37fd1ebbc773f2b031483b04700923c3511d2a939252cc"}, + {file = "safetensors-0.4.5-cp310-none-win_amd64.whl", hash = "sha256:e98ef5524f8b6620c8cdef97220c0b6a5c1cef69852fcd2f174bb96c2bb316b1"}, + {file = "safetensors-0.4.5-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:21f848d7aebd5954f92538552d6d75f7c1b4500f51664078b5b49720d180e47c"}, + {file = "safetensors-0.4.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bb07000b19d41e35eecef9a454f31a8b4718a185293f0d0b1c4b61d6e4487971"}, + {file = "safetensors-0.4.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09dedf7c2fda934ee68143202acff6e9e8eb0ddeeb4cfc24182bef999efa9f42"}, + {file = "safetensors-0.4.5-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:59b77e4b7a708988d84f26de3ebead61ef1659c73dcbc9946c18f3b1786d2688"}, + {file = "safetensors-0.4.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5d3bc83e14d67adc2e9387e511097f254bd1b43c3020440e708858c684cbac68"}, + {file = "safetensors-0.4.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:39371fc551c1072976073ab258c3119395294cf49cdc1f8476794627de3130df"}, + {file = "safetensors-0.4.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a6c19feda32b931cae0acd42748a670bdf56bee6476a046af20181ad3fee4090"}, + {file = "safetensors-0.4.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a659467495de201e2f282063808a41170448c78bada1e62707b07a27b05e6943"}, + {file = "safetensors-0.4.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:bad5e4b2476949bcd638a89f71b6916fa9a5cae5c1ae7eede337aca2100435c0"}, + {file = "safetensors-0.4.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:a3a315a6d0054bc6889a17f5668a73f94f7fe55121ff59e0a199e3519c08565f"}, + {file = "safetensors-0.4.5-cp311-none-win32.whl", hash = "sha256:a01e232e6d3d5cf8b1667bc3b657a77bdab73f0743c26c1d3c5dd7ce86bd3a92"}, + {file = "safetensors-0.4.5-cp311-none-win_amd64.whl", hash = "sha256:cbd39cae1ad3e3ef6f63a6f07296b080c951f24cec60188378e43d3713000c04"}, + {file = "safetensors-0.4.5-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:473300314e026bd1043cef391bb16a8689453363381561b8a3e443870937cc1e"}, + {file = "safetensors-0.4.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:801183a0f76dc647f51a2d9141ad341f9665602a7899a693207a82fb102cc53e"}, + {file = "safetensors-0.4.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1524b54246e422ad6fb6aea1ac71edeeb77666efa67230e1faf6999df9b2e27f"}, + {file = "safetensors-0.4.5-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b3139098e3e8b2ad7afbca96d30ad29157b50c90861084e69fcb80dec7430461"}, + {file = "safetensors-0.4.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:65573dc35be9059770808e276b017256fa30058802c29e1038eb1c00028502ea"}, + {file = "safetensors-0.4.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fd33da8e9407559f8779c82a0448e2133737f922d71f884da27184549416bfed"}, + {file = "safetensors-0.4.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3685ce7ed036f916316b567152482b7e959dc754fcc4a8342333d222e05f407c"}, + {file = "safetensors-0.4.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:dde2bf390d25f67908278d6f5d59e46211ef98e44108727084d4637ee70ab4f1"}, + {file = "safetensors-0.4.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7469d70d3de970b1698d47c11ebbf296a308702cbaae7fcb993944751cf985f4"}, + {file = "safetensors-0.4.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:3a6ba28118636a130ccbb968bc33d4684c48678695dba2590169d5ab03a45646"}, + {file = "safetensors-0.4.5-cp312-none-win32.whl", hash = "sha256:c859c7ed90b0047f58ee27751c8e56951452ed36a67afee1b0a87847d065eec6"}, + {file = "safetensors-0.4.5-cp312-none-win_amd64.whl", hash = "sha256:b5a8810ad6a6f933fff6c276eae92c1da217b39b4d8b1bc1c0b8af2d270dc532"}, + {file = "safetensors-0.4.5-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:25e5f8e2e92a74f05b4ca55686234c32aac19927903792b30ee6d7bd5653d54e"}, + {file = "safetensors-0.4.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:81efb124b58af39fcd684254c645e35692fea81c51627259cdf6d67ff4458916"}, + {file = "safetensors-0.4.5-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:585f1703a518b437f5103aa9cf70e9bd437cb78eea9c51024329e4fb8a3e3679"}, + {file = "safetensors-0.4.5-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4b99fbf72e3faf0b2f5f16e5e3458b93b7d0a83984fe8d5364c60aa169f2da89"}, + {file = "safetensors-0.4.5-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b17b299ca9966ca983ecda1c0791a3f07f9ca6ab5ded8ef3d283fff45f6bcd5f"}, + {file = "safetensors-0.4.5-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:76ded72f69209c9780fdb23ea89e56d35c54ae6abcdec67ccb22af8e696e449a"}, + {file = "safetensors-0.4.5-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2783956926303dcfeb1de91a4d1204cd4089ab441e622e7caee0642281109db3"}, + {file = "safetensors-0.4.5-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d94581aab8c6b204def4d7320f07534d6ee34cd4855688004a4354e63b639a35"}, + {file = "safetensors-0.4.5-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:67e1e7cb8678bb1b37ac48ec0df04faf689e2f4e9e81e566b5c63d9f23748523"}, + {file = "safetensors-0.4.5-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:dbd280b07e6054ea68b0cb4b16ad9703e7d63cd6890f577cb98acc5354780142"}, + {file = "safetensors-0.4.5-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:77d9b228da8374c7262046a36c1f656ba32a93df6cc51cd4453af932011e77f1"}, + {file = "safetensors-0.4.5-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:500cac01d50b301ab7bb192353317035011c5ceeef0fca652f9f43c000bb7f8d"}, + {file = "safetensors-0.4.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:75331c0c746f03158ded32465b7d0b0e24c5a22121743662a2393439c43a45cf"}, + {file = "safetensors-0.4.5-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:670e95fe34e0d591d0529e5e59fd9d3d72bc77b1444fcaa14dccda4f36b5a38b"}, + {file = "safetensors-0.4.5-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:098923e2574ff237c517d6e840acada8e5b311cb1fa226019105ed82e9c3b62f"}, + {file = "safetensors-0.4.5-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:13ca0902d2648775089fa6a0c8fc9e6390c5f8ee576517d33f9261656f851e3f"}, + {file = "safetensors-0.4.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f0032bedc869c56f8d26259fe39cd21c5199cd57f2228d817a0e23e8370af25"}, + {file = "safetensors-0.4.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f4b15f51b4f8f2a512341d9ce3475cacc19c5fdfc5db1f0e19449e75f95c7dc8"}, + {file = "safetensors-0.4.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:f6594d130d0ad933d885c6a7b75c5183cb0e8450f799b80a39eae2b8508955eb"}, + {file = "safetensors-0.4.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:60c828a27e852ded2c85fc0f87bf1ec20e464c5cd4d56ff0e0711855cc2e17f8"}, + {file = "safetensors-0.4.5-cp37-none-win32.whl", hash = "sha256:6d3de65718b86c3eeaa8b73a9c3d123f9307a96bbd7be9698e21e76a56443af5"}, + {file = "safetensors-0.4.5-cp37-none-win_amd64.whl", hash = "sha256:5a2d68a523a4cefd791156a4174189a4114cf0bf9c50ceb89f261600f3b2b81a"}, + {file = "safetensors-0.4.5-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:e7a97058f96340850da0601a3309f3d29d6191b0702b2da201e54c6e3e44ccf0"}, + {file = "safetensors-0.4.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:63bfd425e25f5c733f572e2246e08a1c38bd6f2e027d3f7c87e2e43f228d1345"}, + {file = "safetensors-0.4.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f3664ac565d0e809b0b929dae7ccd74e4d3273cd0c6d1220c6430035befb678e"}, + {file = "safetensors-0.4.5-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:313514b0b9b73ff4ddfb4edd71860696dbe3c1c9dc4d5cc13dbd74da283d2cbf"}, + {file = "safetensors-0.4.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:31fa33ee326f750a2f2134a6174773c281d9a266ccd000bd4686d8021f1f3dac"}, + {file = "safetensors-0.4.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:09566792588d77b68abe53754c9f1308fadd35c9f87be939e22c623eaacbed6b"}, + {file = "safetensors-0.4.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:309aaec9b66cbf07ad3a2e5cb8a03205663324fea024ba391594423d0f00d9fe"}, + {file = "safetensors-0.4.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:53946c5813b8f9e26103c5efff4a931cc45d874f45229edd68557ffb35ffb9f8"}, + {file = "safetensors-0.4.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:868f9df9e99ad1e7f38c52194063a982bc88fedc7d05096f4f8160403aaf4bd6"}, + {file = "safetensors-0.4.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:9cc9449bd0b0bc538bd5e268221f0c5590bc5c14c1934a6ae359d44410dc68c4"}, + {file = "safetensors-0.4.5-cp38-none-win32.whl", hash = "sha256:83c4f13a9e687335c3928f615cd63a37e3f8ef072a3f2a0599fa09f863fb06a2"}, + {file = "safetensors-0.4.5-cp38-none-win_amd64.whl", hash = "sha256:b98d40a2ffa560653f6274e15b27b3544e8e3713a44627ce268f419f35c49478"}, + {file = "safetensors-0.4.5-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:cf727bb1281d66699bef5683b04d98c894a2803442c490a8d45cd365abfbdeb2"}, + {file = "safetensors-0.4.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:96f1d038c827cdc552d97e71f522e1049fef0542be575421f7684756a748e457"}, + {file = "safetensors-0.4.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:139fbee92570ecea774e6344fee908907db79646d00b12c535f66bc78bd5ea2c"}, + {file = "safetensors-0.4.5-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c36302c1c69eebb383775a89645a32b9d266878fab619819ce660309d6176c9b"}, + {file = "safetensors-0.4.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d641f5b8149ea98deb5ffcf604d764aad1de38a8285f86771ce1abf8e74c4891"}, + {file = "safetensors-0.4.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b4db6a61d968de73722b858038c616a1bebd4a86abe2688e46ca0cc2d17558f2"}, + {file = "safetensors-0.4.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b75a616e02f21b6f1d5785b20cecbab5e2bd3f6358a90e8925b813d557666ec1"}, + {file = "safetensors-0.4.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:788ee7d04cc0e0e7f944c52ff05f52a4415b312f5efd2ee66389fb7685ee030c"}, + {file = "safetensors-0.4.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:87bc42bd04fd9ca31396d3ca0433db0be1411b6b53ac5a32b7845a85d01ffc2e"}, + {file = "safetensors-0.4.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4037676c86365a721a8c9510323a51861d703b399b78a6b4486a54a65a975fca"}, + {file = "safetensors-0.4.5-cp39-none-win32.whl", hash = "sha256:1500418454529d0ed5c1564bda376c4ddff43f30fce9517d9bee7bcce5a8ef50"}, + {file = "safetensors-0.4.5-cp39-none-win_amd64.whl", hash = "sha256:9d1a94b9d793ed8fe35ab6d5cea28d540a46559bafc6aae98f30ee0867000cab"}, + {file = "safetensors-0.4.5-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:fdadf66b5a22ceb645d5435a0be7a0292ce59648ca1d46b352f13cff3ea80410"}, + {file = "safetensors-0.4.5-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d42ffd4c2259f31832cb17ff866c111684c87bd930892a1ba53fed28370c918c"}, + {file = "safetensors-0.4.5-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd8a1f6d2063a92cd04145c7fd9e31a1c7d85fbec20113a14b487563fdbc0597"}, + {file = "safetensors-0.4.5-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:951d2fcf1817f4fb0ef0b48f6696688a4e852a95922a042b3f96aaa67eedc920"}, + {file = "safetensors-0.4.5-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6ac85d9a8c1af0e3132371d9f2d134695a06a96993c2e2f0bbe25debb9e3f67a"}, + {file = "safetensors-0.4.5-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:e3cec4a29eb7fe8da0b1c7988bc3828183080439dd559f720414450de076fcab"}, + {file = "safetensors-0.4.5-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:21742b391b859e67b26c0b2ac37f52c9c0944a879a25ad2f9f9f3cd61e7fda8f"}, + {file = "safetensors-0.4.5-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c7db3006a4915151ce1913652e907cdede299b974641a83fbc092102ac41b644"}, + {file = "safetensors-0.4.5-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f68bf99ea970960a237f416ea394e266e0361895753df06e3e06e6ea7907d98b"}, + {file = "safetensors-0.4.5-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8158938cf3324172df024da511839d373c40fbfaa83e9abf467174b2910d7b4c"}, + {file = "safetensors-0.4.5-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:540ce6c4bf6b58cb0fd93fa5f143bc0ee341c93bb4f9287ccd92cf898cc1b0dd"}, + {file = "safetensors-0.4.5-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:bfeaa1a699c6b9ed514bd15e6a91e74738b71125a9292159e3d6b7f0a53d2cde"}, + {file = "safetensors-0.4.5-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:01c8f00da537af711979e1b42a69a8ec9e1d7112f208e0e9b8a35d2c381085ef"}, + {file = "safetensors-0.4.5-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a0dd565f83b30f2ca79b5d35748d0d99dd4b3454f80e03dfb41f0038e3bdf180"}, + {file = "safetensors-0.4.5-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:023b6e5facda76989f4cba95a861b7e656b87e225f61811065d5c501f78cdb3f"}, + {file = "safetensors-0.4.5-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9633b663393d5796f0b60249549371e392b75a0b955c07e9c6f8708a87fc841f"}, + {file = "safetensors-0.4.5-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78dd8adfb48716233c45f676d6e48534d34b4bceb50162c13d1f0bdf6f78590a"}, + {file = "safetensors-0.4.5-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8e8deb16c4321d61ae72533b8451ec4a9af8656d1c61ff81aa49f966406e4b68"}, + {file = "safetensors-0.4.5-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:52452fa5999dc50c4decaf0c53aa28371f7f1e0fe5c2dd9129059fbe1e1599c7"}, + {file = "safetensors-0.4.5-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:d5f23198821e227cfc52d50fa989813513db381255c6d100927b012f0cfec63d"}, + {file = "safetensors-0.4.5-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f4beb84b6073b1247a773141a6331117e35d07134b3bb0383003f39971d414bb"}, + {file = "safetensors-0.4.5-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:68814d599d25ed2fdd045ed54d370d1d03cf35e02dce56de44c651f828fb9b7b"}, + {file = "safetensors-0.4.5-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f0b6453c54c57c1781292c46593f8a37254b8b99004c68d6c3ce229688931a22"}, + {file = "safetensors-0.4.5-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:adaa9c6dead67e2dd90d634f89131e43162012479d86e25618e821a03d1eb1dc"}, + {file = "safetensors-0.4.5-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:73e7d408e9012cd17511b382b43547850969c7979efc2bc353f317abaf23c84c"}, + {file = "safetensors-0.4.5-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:775409ce0fcc58b10773fdb4221ed1eb007de10fe7adbdf8f5e8a56096b6f0bc"}, + {file = "safetensors-0.4.5-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:834001bed193e4440c4a3950a31059523ee5090605c907c66808664c932b549c"}, + {file = "safetensors-0.4.5.tar.gz", hash = "sha256:d73de19682deabb02524b3d5d1f8b3aaba94c72f1bbfc7911b9b9d5d391c0310"}, +] + +[package.extras] +all = ["safetensors[jax]", "safetensors[numpy]", "safetensors[paddlepaddle]", "safetensors[pinned-tf]", "safetensors[quality]", "safetensors[testing]", "safetensors[torch]"] +dev = ["safetensors[all]"] +jax = ["flax (>=0.6.3)", "jax (>=0.3.25)", "jaxlib (>=0.3.25)", "safetensors[numpy]"] +mlx = ["mlx (>=0.0.9)"] +numpy = ["numpy (>=1.21.6)"] +paddlepaddle = ["paddlepaddle (>=2.4.1)", "safetensors[numpy]"] +pinned-tf = ["safetensors[numpy]", "tensorflow (==2.11.0)"] +quality = ["black (==22.3)", "click (==8.0.4)", "flake8 (>=3.8.3)", "isort (>=5.5.4)"] +tensorflow = ["safetensors[numpy]", "tensorflow (>=2.11.0)"] +testing = ["h5py (>=3.7.0)", "huggingface-hub (>=0.12.1)", "hypothesis (>=6.70.2)", "pytest (>=7.2.0)", "pytest-benchmark (>=4.0.0)", "safetensors[numpy]", "setuptools-rust (>=1.5.2)"] +torch = ["safetensors[numpy]", "torch (>=1.10)"] + +[[package]] +name = "scikit-learn" +version = "1.5.2" +description = "A set of python modules for machine learning and data mining" +optional = false +python-versions = ">=3.9" +files = [ + {file = "scikit_learn-1.5.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:299406827fb9a4f862626d0fe6c122f5f87f8910b86fe5daa4c32dcd742139b6"}, + {file = "scikit_learn-1.5.2-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:2d4cad1119c77930b235579ad0dc25e65c917e756fe80cab96aa3b9428bd3fb0"}, + {file = "scikit_learn-1.5.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c412ccc2ad9bf3755915e3908e677b367ebc8d010acbb3f182814524f2e5540"}, + {file = "scikit_learn-1.5.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a686885a4b3818d9e62904d91b57fa757fc2bed3e465c8b177be652f4dd37c8"}, + {file = "scikit_learn-1.5.2-cp310-cp310-win_amd64.whl", hash = "sha256:c15b1ca23d7c5f33cc2cb0a0d6aaacf893792271cddff0edbd6a40e8319bc113"}, + {file = "scikit_learn-1.5.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:03b6158efa3faaf1feea3faa884c840ebd61b6484167c711548fce208ea09445"}, + {file = "scikit_learn-1.5.2-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:1ff45e26928d3b4eb767a8f14a9a6efbf1cbff7c05d1fb0f95f211a89fd4f5de"}, + {file = "scikit_learn-1.5.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f763897fe92d0e903aa4847b0aec0e68cadfff77e8a0687cabd946c89d17e675"}, + {file = "scikit_learn-1.5.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8b0ccd4a902836493e026c03256e8b206656f91fbcc4fde28c57a5b752561f1"}, + {file = "scikit_learn-1.5.2-cp311-cp311-win_amd64.whl", hash = "sha256:6c16d84a0d45e4894832b3c4d0bf73050939e21b99b01b6fd59cbb0cf39163b6"}, + {file = "scikit_learn-1.5.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f932a02c3f4956dfb981391ab24bda1dbd90fe3d628e4b42caef3e041c67707a"}, + {file = "scikit_learn-1.5.2-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:3b923d119d65b7bd555c73be5423bf06c0105678ce7e1f558cb4b40b0a5502b1"}, + {file = "scikit_learn-1.5.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f60021ec1574e56632be2a36b946f8143bf4e5e6af4a06d85281adc22938e0dd"}, + {file = "scikit_learn-1.5.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:394397841449853c2290a32050382edaec3da89e35b3e03d6cc966aebc6a8ae6"}, + {file = "scikit_learn-1.5.2-cp312-cp312-win_amd64.whl", hash = "sha256:57cc1786cfd6bd118220a92ede80270132aa353647684efa385a74244a41e3b1"}, + {file = "scikit_learn-1.5.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:757c7d514ddb00ae249832fe87100d9c73c6ea91423802872d9e74970a0e40b9"}, + {file = "scikit_learn-1.5.2-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:52788f48b5d8bca5c0736c175fa6bdaab2ef00a8f536cda698db61bd89c551c1"}, + {file = "scikit_learn-1.5.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:643964678f4b5fbdc95cbf8aec638acc7aa70f5f79ee2cdad1eec3df4ba6ead8"}, + {file = "scikit_learn-1.5.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ca64b3089a6d9b9363cd3546f8978229dcbb737aceb2c12144ee3f70f95684b7"}, + {file = "scikit_learn-1.5.2-cp39-cp39-win_amd64.whl", hash = "sha256:3bed4909ba187aca80580fe2ef370d9180dcf18e621a27c4cf2ef10d279a7efe"}, + {file = "scikit_learn-1.5.2.tar.gz", hash = "sha256:b4237ed7b3fdd0a4882792e68ef2545d5baa50aca3bb45aa7df468138ad8f94d"}, +] + +[package.dependencies] +joblib = ">=1.2.0" +numpy = ">=1.19.5" +scipy = ">=1.6.0" +threadpoolctl = ">=3.1.0" + +[package.extras] +benchmark = ["matplotlib (>=3.3.4)", "memory_profiler (>=0.57.0)", "pandas (>=1.1.5)"] +build = ["cython (>=3.0.10)", "meson-python (>=0.16.0)", "numpy (>=1.19.5)", "scipy (>=1.6.0)"] +docs = ["Pillow (>=7.1.2)", "matplotlib (>=3.3.4)", "memory_profiler (>=0.57.0)", "numpydoc (>=1.2.0)", "pandas (>=1.1.5)", "plotly (>=5.14.0)", "polars (>=0.20.30)", "pooch (>=1.6.0)", "pydata-sphinx-theme (>=0.15.3)", "scikit-image (>=0.17.2)", "seaborn (>=0.9.0)", "sphinx (>=7.3.7)", "sphinx-copybutton (>=0.5.2)", "sphinx-design (>=0.5.0)", "sphinx-design (>=0.6.0)", "sphinx-gallery (>=0.16.0)", "sphinx-prompt (>=1.4.0)", "sphinx-remove-toctrees (>=1.0.0.post1)", "sphinxcontrib-sass (>=0.3.4)", "sphinxext-opengraph (>=0.9.1)"] +examples = ["matplotlib (>=3.3.4)", "pandas (>=1.1.5)", "plotly (>=5.14.0)", "pooch (>=1.6.0)", "scikit-image (>=0.17.2)", "seaborn (>=0.9.0)"] +install = ["joblib (>=1.2.0)", "numpy (>=1.19.5)", "scipy (>=1.6.0)", "threadpoolctl (>=3.1.0)"] +maintenance = ["conda-lock (==2.5.6)"] +tests = ["black (>=24.3.0)", "matplotlib (>=3.3.4)", "mypy (>=1.9)", "numpydoc (>=1.2.0)", "pandas (>=1.1.5)", "polars (>=0.20.30)", "pooch (>=1.6.0)", "pyamg (>=4.0.0)", "pyarrow (>=12.0.0)", "pytest (>=7.1.2)", "pytest-cov (>=2.9.0)", "ruff (>=0.2.1)", "scikit-image (>=0.17.2)"] + +[[package]] +name = "scipy" +version = "1.14.1" +description = "Fundamental algorithms for scientific computing in Python" +optional = false +python-versions = ">=3.10" +files = [ + {file = "scipy-1.14.1-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:b28d2ca4add7ac16ae8bb6632a3c86e4b9e4d52d3e34267f6e1b0c1f8d87e389"}, + {file = "scipy-1.14.1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:d0d2821003174de06b69e58cef2316a6622b60ee613121199cb2852a873f8cf3"}, + {file = "scipy-1.14.1-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:8bddf15838ba768bb5f5083c1ea012d64c9a444e16192762bd858f1e126196d0"}, + {file = "scipy-1.14.1-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:97c5dddd5932bd2a1a31c927ba5e1463a53b87ca96b5c9bdf5dfd6096e27efc3"}, + {file = "scipy-1.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2ff0a7e01e422c15739ecd64432743cf7aae2b03f3084288f399affcefe5222d"}, + {file = "scipy-1.14.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e32dced201274bf96899e6491d9ba3e9a5f6b336708656466ad0522d8528f69"}, + {file = "scipy-1.14.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8426251ad1e4ad903a4514712d2fa8fdd5382c978010d1c6f5f37ef286a713ad"}, + {file = "scipy-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:a49f6ed96f83966f576b33a44257d869756df6cf1ef4934f59dd58b25e0327e5"}, + {file = "scipy-1.14.1-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:2da0469a4ef0ecd3693761acbdc20f2fdeafb69e6819cc081308cc978153c675"}, + {file = "scipy-1.14.1-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:c0ee987efa6737242745f347835da2cc5bb9f1b42996a4d97d5c7ff7928cb6f2"}, + {file = "scipy-1.14.1-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:3a1b111fac6baec1c1d92f27e76511c9e7218f1695d61b59e05e0fe04dc59617"}, + {file = "scipy-1.14.1-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:8475230e55549ab3f207bff11ebfc91c805dc3463ef62eda3ccf593254524ce8"}, + {file = "scipy-1.14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:278266012eb69f4a720827bdd2dc54b2271c97d84255b2faaa8f161a158c3b37"}, + {file = "scipy-1.14.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fef8c87f8abfb884dac04e97824b61299880c43f4ce675dd2cbeadd3c9b466d2"}, + {file = "scipy-1.14.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b05d43735bb2f07d689f56f7b474788a13ed8adc484a85aa65c0fd931cf9ccd2"}, + {file = "scipy-1.14.1-cp311-cp311-win_amd64.whl", hash = "sha256:716e389b694c4bb564b4fc0c51bc84d381735e0d39d3f26ec1af2556ec6aad94"}, + {file = "scipy-1.14.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:631f07b3734d34aced009aaf6fedfd0eb3498a97e581c3b1e5f14a04164a456d"}, + {file = "scipy-1.14.1-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:af29a935803cc707ab2ed7791c44288a682f9c8107bc00f0eccc4f92c08d6e07"}, + {file = "scipy-1.14.1-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:2843f2d527d9eebec9a43e6b406fb7266f3af25a751aa91d62ff416f54170bc5"}, + {file = "scipy-1.14.1-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:eb58ca0abd96911932f688528977858681a59d61a7ce908ffd355957f7025cfc"}, + {file = "scipy-1.14.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:30ac8812c1d2aab7131a79ba62933a2a76f582d5dbbc695192453dae67ad6310"}, + {file = "scipy-1.14.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f9ea80f2e65bdaa0b7627fb00cbeb2daf163caa015e59b7516395fe3bd1e066"}, + {file = "scipy-1.14.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:edaf02b82cd7639db00dbff629995ef185c8df4c3ffa71a5562a595765a06ce1"}, + {file = "scipy-1.14.1-cp312-cp312-win_amd64.whl", hash = "sha256:2ff38e22128e6c03ff73b6bb0f85f897d2362f8c052e3b8ad00532198fbdae3f"}, + {file = "scipy-1.14.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:1729560c906963fc8389f6aac023739ff3983e727b1a4d87696b7bf108316a79"}, + {file = "scipy-1.14.1-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:4079b90df244709e675cdc8b93bfd8a395d59af40b72e339c2287c91860deb8e"}, + {file = "scipy-1.14.1-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:e0cf28db0f24a38b2a0ca33a85a54852586e43cf6fd876365c86e0657cfe7d73"}, + {file = "scipy-1.14.1-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:0c2f95de3b04e26f5f3ad5bb05e74ba7f68b837133a4492414b3afd79dfe540e"}, + {file = "scipy-1.14.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b99722ea48b7ea25e8e015e8341ae74624f72e5f21fc2abd45f3a93266de4c5d"}, + {file = "scipy-1.14.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5149e3fd2d686e42144a093b206aef01932a0059c2a33ddfa67f5f035bdfe13e"}, + {file = "scipy-1.14.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e4f5a7c49323533f9103d4dacf4e4f07078f360743dec7f7596949149efeec06"}, + {file = "scipy-1.14.1-cp313-cp313-win_amd64.whl", hash = "sha256:baff393942b550823bfce952bb62270ee17504d02a1801d7fd0719534dfb9c84"}, + {file = "scipy-1.14.1.tar.gz", hash = "sha256:5a275584e726026a5699459aa72f828a610821006228e841b94275c4a7c08417"}, +] + +[package.dependencies] +numpy = ">=1.23.5,<2.3" + +[package.extras] +dev = ["cython-lint (>=0.12.2)", "doit (>=0.36.0)", "mypy (==1.10.0)", "pycodestyle", "pydevtool", "rich-click", "ruff (>=0.0.292)", "types-psutil", "typing_extensions"] +doc = ["jupyterlite-pyodide-kernel", "jupyterlite-sphinx (>=0.13.1)", "jupytext", "matplotlib (>=3.5)", "myst-nb", "numpydoc", "pooch", "pydata-sphinx-theme (>=0.15.2)", "sphinx (>=5.0.0,<=7.3.7)", "sphinx-design (>=0.4.0)"] +test = ["Cython", "array-api-strict (>=2.0)", "asv", "gmpy2", "hypothesis (>=6.30)", "meson", "mpmath", "ninja", "pooch", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] + +[[package]] +name = "sentence-transformers" +version = "3.0.1" +description = "Multilingual text embeddings" +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "sentence_transformers-3.0.1-py3-none-any.whl", hash = "sha256:01050cc4053c49b9f5b78f6980b5a72db3fd3a0abb9169b1792ac83875505ee6"}, + {file = "sentence_transformers-3.0.1.tar.gz", hash = "sha256:8a3d2c537cc4d1014ccc20ac92be3d6135420a3bc60ae29a3a8a9b4bb35fbff6"}, +] + +[package.dependencies] +huggingface-hub = ">=0.15.1" +numpy = "*" +Pillow = "*" +scikit-learn = "*" +scipy = "*" +torch = ">=1.11.0" +tqdm = "*" +transformers = ">=4.34.0,<5.0.0" + +[package.extras] +dev = ["accelerate (>=0.20.3)", "datasets", "pre-commit", "pytest", "ruff (>=0.3.0)"] +train = ["accelerate (>=0.20.3)", "datasets"] + +[[package]] +name = "shellingham" +version = "1.5.4" +description = "Tool to Detect Surrounding Shell" +optional = false +python-versions = ">=3.7" +files = [ + {file = "shellingham-1.5.4-py2.py3-none-any.whl", hash = "sha256:7ecfff8f2fd72616f7481040475a65b2bf8af90a56c89140852d1120324e8686"}, + {file = "shellingham-1.5.4.tar.gz", hash = "sha256:8dbca0739d487e5bd35ab3ca4b36e11c4078f3a234bfce294b0a0291363404de"}, +] + +[[package]] +name = "shortuuid" +version = "1.0.13" +description = "A generator library for concise, unambiguous and URL-safe UUIDs." +optional = false +python-versions = ">=3.6" +files = [ + {file = "shortuuid-1.0.13-py3-none-any.whl", hash = "sha256:a482a497300b49b4953e15108a7913244e1bb0d41f9d332f5e9925dba33a3c5a"}, + {file = "shortuuid-1.0.13.tar.gz", hash = "sha256:3bb9cf07f606260584b1df46399c0b87dd84773e7b25912b7e391e30797c5e72"}, +] + +[[package]] +name = "six" +version = "1.16.0" +description = "Python 2 and 3 compatibility utilities" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, + {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, +] + +[[package]] +name = "sniffio" +version = "1.3.1" +description = "Sniff out which async library your code is running under" +optional = false +python-versions = ">=3.7" +files = [ + {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, + {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, +] + +[[package]] +name = "starlette" +version = "0.37.2" +description = "The little ASGI library that shines." +optional = false +python-versions = ">=3.8" +files = [ + {file = "starlette-0.37.2-py3-none-any.whl", hash = "sha256:6fe59f29268538e5d0d182f2791a479a0c64638e6935d1c6989e63fb2699c6ee"}, + {file = "starlette-0.37.2.tar.gz", hash = "sha256:9af890290133b79fc3db55474ade20f6220a364a0402e0b556e7cd5e1e093823"}, +] + +[package.dependencies] +anyio = ">=3.4.0,<5" + +[package.extras] +full = ["httpx (>=0.22.0)", "itsdangerous", "jinja2", "python-multipart (>=0.0.7)", "pyyaml"] + +[[package]] +name = "sympy" +version = "1.13.3" +description = "Computer algebra system (CAS) in Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "sympy-1.13.3-py3-none-any.whl", hash = "sha256:54612cf55a62755ee71824ce692986f23c88ffa77207b30c1368eda4a7060f73"}, + {file = "sympy-1.13.3.tar.gz", hash = "sha256:b27fd2c6530e0ab39e275fc9b683895367e51d5da91baa8d3d64db2565fec4d9"}, +] + +[package.dependencies] +mpmath = ">=1.1.0,<1.4" + +[package.extras] +dev = ["hypothesis (>=6.70.0)", "pytest (>=7.1.0)"] + +[[package]] +name = "tbb" +version = "2021.13.1" +description = "Intel® oneAPI Threading Building Blocks (oneTBB)" +optional = false +python-versions = "*" +files = [ + {file = "tbb-2021.13.1-py2.py3-none-manylinux1_i686.whl", hash = "sha256:bb5bdea0c0e9e6ad0739e7a8796c2635ce9eccca86dd48c426cd8027ac70fb1d"}, + {file = "tbb-2021.13.1-py2.py3-none-manylinux1_x86_64.whl", hash = "sha256:d916359dc685579d09e4b344241550afc1cc034f7f5ec7234c258b6680912d70"}, + {file = "tbb-2021.13.1-py3-none-win32.whl", hash = "sha256:00f5e5a70051650ddd0ab6247c0549521968339ec21002e475cd23b1cbf46d66"}, + {file = "tbb-2021.13.1-py3-none-win_amd64.whl", hash = "sha256:cbf024b2463fdab3ebe3fa6ff453026358e6b903839c80d647e08ad6d0796ee9"}, +] + +[[package]] +name = "threadpoolctl" +version = "3.5.0" +description = "threadpoolctl" +optional = false +python-versions = ">=3.8" +files = [ + {file = "threadpoolctl-3.5.0-py3-none-any.whl", hash = "sha256:56c1e26c150397e58c4926da8eeee87533b1e32bef131bd4bf6a2f45f3185467"}, + {file = "threadpoolctl-3.5.0.tar.gz", hash = "sha256:082433502dd922bf738de0d8bcc4fdcbf0979ff44c42bd40f5af8a282f6fa107"}, +] + +[[package]] +name = "tiktoken" +version = "0.7.0" +description = "tiktoken is a fast BPE tokeniser for use with OpenAI's models" +optional = false +python-versions = ">=3.8" +files = [ + {file = "tiktoken-0.7.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:485f3cc6aba7c6b6ce388ba634fbba656d9ee27f766216f45146beb4ac18b25f"}, + {file = "tiktoken-0.7.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e54be9a2cd2f6d6ffa3517b064983fb695c9a9d8aa7d574d1ef3c3f931a99225"}, + {file = "tiktoken-0.7.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79383a6e2c654c6040e5f8506f3750db9ddd71b550c724e673203b4f6b4b4590"}, + {file = "tiktoken-0.7.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d4511c52caacf3c4981d1ae2df85908bd31853f33d30b345c8b6830763f769c"}, + {file = "tiktoken-0.7.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:13c94efacdd3de9aff824a788353aa5749c0faee1fbe3816df365ea450b82311"}, + {file = "tiktoken-0.7.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8e58c7eb29d2ab35a7a8929cbeea60216a4ccdf42efa8974d8e176d50c9a3df5"}, + {file = "tiktoken-0.7.0-cp310-cp310-win_amd64.whl", hash = "sha256:21a20c3bd1dd3e55b91c1331bf25f4af522c525e771691adbc9a69336fa7f702"}, + {file = "tiktoken-0.7.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:10c7674f81e6e350fcbed7c09a65bca9356eaab27fb2dac65a1e440f2bcfe30f"}, + {file = "tiktoken-0.7.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:084cec29713bc9d4189a937f8a35dbdfa785bd1235a34c1124fe2323821ee93f"}, + {file = "tiktoken-0.7.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:811229fde1652fedcca7c6dfe76724d0908775b353556d8a71ed74d866f73f7b"}, + {file = "tiktoken-0.7.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86b6e7dc2e7ad1b3757e8a24597415bafcfb454cebf9a33a01f2e6ba2e663992"}, + {file = "tiktoken-0.7.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1063c5748be36344c7e18c7913c53e2cca116764c2080177e57d62c7ad4576d1"}, + {file = "tiktoken-0.7.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:20295d21419bfcca092644f7e2f2138ff947a6eb8cfc732c09cc7d76988d4a89"}, + {file = "tiktoken-0.7.0-cp311-cp311-win_amd64.whl", hash = "sha256:959d993749b083acc57a317cbc643fb85c014d055b2119b739487288f4e5d1cb"}, + {file = "tiktoken-0.7.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:71c55d066388c55a9c00f61d2c456a6086673ab7dec22dd739c23f77195b1908"}, + {file = "tiktoken-0.7.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:09ed925bccaa8043e34c519fbb2f99110bd07c6fd67714793c21ac298e449410"}, + {file = "tiktoken-0.7.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:03c6c40ff1db0f48a7b4d2dafeae73a5607aacb472fa11f125e7baf9dce73704"}, + {file = "tiktoken-0.7.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d20b5c6af30e621b4aca094ee61777a44118f52d886dbe4f02b70dfe05c15350"}, + {file = "tiktoken-0.7.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d427614c3e074004efa2f2411e16c826f9df427d3c70a54725cae860f09e4bf4"}, + {file = "tiktoken-0.7.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:8c46d7af7b8c6987fac9b9f61041b452afe92eb087d29c9ce54951280f899a97"}, + {file = "tiktoken-0.7.0-cp312-cp312-win_amd64.whl", hash = "sha256:0bc603c30b9e371e7c4c7935aba02af5994a909fc3c0fe66e7004070858d3f8f"}, + {file = "tiktoken-0.7.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2398fecd38c921bcd68418675a6d155fad5f5e14c2e92fcf5fe566fa5485a858"}, + {file = "tiktoken-0.7.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8f5f6afb52fb8a7ea1c811e435e4188f2bef81b5e0f7a8635cc79b0eef0193d6"}, + {file = "tiktoken-0.7.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:861f9ee616766d736be4147abac500732b505bf7013cfaf019b85892637f235e"}, + {file = "tiktoken-0.7.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:54031f95c6939f6b78122c0aa03a93273a96365103793a22e1793ee86da31685"}, + {file = "tiktoken-0.7.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:fffdcb319b614cf14f04d02a52e26b1d1ae14a570f90e9b55461a72672f7b13d"}, + {file = "tiktoken-0.7.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c72baaeaefa03ff9ba9688624143c858d1f6b755bb85d456d59e529e17234769"}, + {file = "tiktoken-0.7.0-cp38-cp38-win_amd64.whl", hash = "sha256:131b8aeb043a8f112aad9f46011dced25d62629091e51d9dc1adbf4a1cc6aa98"}, + {file = "tiktoken-0.7.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:cabc6dc77460df44ec5b879e68692c63551ae4fae7460dd4ff17181df75f1db7"}, + {file = "tiktoken-0.7.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8d57f29171255f74c0aeacd0651e29aa47dff6f070cb9f35ebc14c82278f3b25"}, + {file = "tiktoken-0.7.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2ee92776fdbb3efa02a83f968c19d4997a55c8e9ce7be821ceee04a1d1ee149c"}, + {file = "tiktoken-0.7.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e215292e99cb41fbc96988ef62ea63bb0ce1e15f2c147a61acc319f8b4cbe5bf"}, + {file = "tiktoken-0.7.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8a81bac94769cab437dd3ab0b8a4bc4e0f9cf6835bcaa88de71f39af1791727a"}, + {file = "tiktoken-0.7.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:d6d73ea93e91d5ca771256dfc9d1d29f5a554b83821a1dc0891987636e0ae226"}, + {file = "tiktoken-0.7.0-cp39-cp39-win_amd64.whl", hash = "sha256:2bcb28ddf79ffa424f171dfeef9a4daff61a94c631ca6813f43967cb263b83b9"}, + {file = "tiktoken-0.7.0.tar.gz", hash = "sha256:1077266e949c24e0291f6c350433c6f0971365ece2b173a23bc3b9f9defef6b6"}, +] + +[package.dependencies] +regex = ">=2022.1.18" +requests = ">=2.26.0" + +[package.extras] +blobfile = ["blobfile (>=2)"] + +[[package]] +name = "tokenizers" +version = "0.20.0" +description = "" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tokenizers-0.20.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:6cff5c5e37c41bc5faa519d6f3df0679e4b37da54ea1f42121719c5e2b4905c0"}, + {file = "tokenizers-0.20.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:62a56bf75c27443432456f4ca5ca055befa95e25be8a28141cc495cac8ae4d6d"}, + {file = "tokenizers-0.20.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68cc7de6a63f09c4a86909c2597b995aa66e19df852a23aea894929c74369929"}, + {file = "tokenizers-0.20.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:053c37ecee482cc958fdee53af3c6534286a86f5d35aac476f7c246830e53ae5"}, + {file = "tokenizers-0.20.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3d7074aaabc151a6363fa03db5493fc95b423b2a1874456783989e96d541c7b6"}, + {file = "tokenizers-0.20.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a11435780f2acd89e8fefe5e81cecf01776f6edb9b3ac95bcb76baee76b30b90"}, + {file = "tokenizers-0.20.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9a81cd2712973b007d84268d45fc3f6f90a79c31dfe7f1925e6732f8d2959987"}, + {file = "tokenizers-0.20.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d7dfd796ab9d909f76fb93080e1c7c8309f196ecb316eb130718cd5e34231c69"}, + {file = "tokenizers-0.20.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:8029ad2aa8cb00605c9374566034c1cc1b15130713e0eb5afcef6cface8255c9"}, + {file = "tokenizers-0.20.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ca4d54260ebe97d59dfa9a30baa20d0c4dd9137d99a8801700055c561145c24e"}, + {file = "tokenizers-0.20.0-cp310-none-win32.whl", hash = "sha256:95ee16b57cec11b86a7940174ec5197d506439b0f415ab3859f254b1dffe9df0"}, + {file = "tokenizers-0.20.0-cp310-none-win_amd64.whl", hash = "sha256:0a61a11e93eeadbf02aea082ffc75241c4198e0608bbbac4f65a9026851dcf37"}, + {file = "tokenizers-0.20.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:6636b798b3c4d6c9b1af1a918bd07c867808e5a21c64324e95318a237e6366c3"}, + {file = "tokenizers-0.20.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5ec603e42eaf499ffd58b9258162add948717cf21372458132f14e13a6bc7172"}, + {file = "tokenizers-0.20.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cce124264903a8ea6f8f48e1cc7669e5ef638c18bd4ab0a88769d5f92debdf7f"}, + {file = "tokenizers-0.20.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:07bbeba0231cf8de07aa6b9e33e9779ff103d47042eeeb859a8c432e3292fb98"}, + {file = "tokenizers-0.20.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:06c0ca8397b35d38b83a44a9c6929790c1692957d88541df061cb34d82ebbf08"}, + {file = "tokenizers-0.20.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ca6557ac3b83d912dfbb1f70ab56bd4b0594043916688e906ede09f42e192401"}, + {file = "tokenizers-0.20.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2a5ad94c9e80ac6098328bee2e3264dbced4c6faa34429994d473f795ec58ef4"}, + {file = "tokenizers-0.20.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b5c7f906ee6bec30a9dc20268a8b80f3b9584de1c9f051671cb057dc6ce28f6"}, + {file = "tokenizers-0.20.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:31e087e9ee1b8f075b002bfee257e858dc695f955b43903e1bb4aa9f170e37fe"}, + {file = "tokenizers-0.20.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:c3124fb6f3346cb3d8d775375d3b429bf4dcfc24f739822702009d20a4297990"}, + {file = "tokenizers-0.20.0-cp311-none-win32.whl", hash = "sha256:a4bb8b40ba9eefa621fdcabf04a74aa6038ae3be0c614c6458bd91a4697a452f"}, + {file = "tokenizers-0.20.0-cp311-none-win_amd64.whl", hash = "sha256:2b709d371f1fe60a28ef0c5c67815952d455ca7f34dbe7197eaaed3cc54b658e"}, + {file = "tokenizers-0.20.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:15c81a17d0d66f4987c6ca16f4bea7ec253b8c7ed1bb00fdc5d038b1bb56e714"}, + {file = "tokenizers-0.20.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6a531cdf1fb6dc41c984c785a3b299cb0586de0b35683842a3afbb1e5207f910"}, + {file = "tokenizers-0.20.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:06caabeb4587f8404e0cd9d40f458e9cba3e815c8155a38e579a74ff3e2a4301"}, + {file = "tokenizers-0.20.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8768f964f23f5b9f50546c0369c75ab3262de926983888bbe8b98be05392a79c"}, + {file = "tokenizers-0.20.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:626403860152c816f97b649fd279bd622c3d417678c93b4b1a8909b6380b69a8"}, + {file = "tokenizers-0.20.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9c1b88fa9e5ff062326f4bf82681da5a96fca7104d921a6bd7b1e6fcf224af26"}, + {file = "tokenizers-0.20.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d7e559436a07dc547f22ce1101f26d8b2fad387e28ec8e7e1e3b11695d681d8"}, + {file = "tokenizers-0.20.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e48afb75e50449848964e4a67b0da01261dd3aa8df8daecf10db8fd7f5b076eb"}, + {file = "tokenizers-0.20.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:baf5d0e1ff44710a95eefc196dd87666ffc609fd447c5e5b68272a7c3d342a1d"}, + {file = "tokenizers-0.20.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:e5e56df0e8ed23ba60ae3848c3f069a0710c4b197218fe4f89e27eba38510768"}, + {file = "tokenizers-0.20.0-cp312-none-win32.whl", hash = "sha256:ec53e5ecc142a82432f9c6c677dbbe5a2bfee92b8abf409a9ecb0d425ee0ce75"}, + {file = "tokenizers-0.20.0-cp312-none-win_amd64.whl", hash = "sha256:f18661ece72e39c0dfaa174d6223248a15b457dbd4b0fc07809b8e6d3ca1a234"}, + {file = "tokenizers-0.20.0-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:f7065b1084d8d1a03dc89d9aad69bcbc8415d4bc123c367063eb32958cd85054"}, + {file = "tokenizers-0.20.0-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:e5d4069e4714e3f7ba0a4d3d44f9d84a432cd4e4aa85c3d7dd1f51440f12e4a1"}, + {file = "tokenizers-0.20.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:799b808529e54b7e1a36350bda2aeb470e8390e484d3e98c10395cee61d4e3c6"}, + {file = "tokenizers-0.20.0-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7f9baa027cc8a281ad5f7725a93c204d7a46986f88edbe8ef7357f40a23fb9c7"}, + {file = "tokenizers-0.20.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:010ec7f3f7a96adc4c2a34a3ada41fa14b4b936b5628b4ff7b33791258646c6b"}, + {file = "tokenizers-0.20.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:98d88f06155335b14fd78e32ee28ca5b2eb30fced4614e06eb14ae5f7fba24ed"}, + {file = "tokenizers-0.20.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e13eb000ef540c2280758d1b9cfa5fe424b0424ae4458f440e6340a4f18b2638"}, + {file = "tokenizers-0.20.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fab3cf066ff426f7e6d70435dc28a9ff01b2747be83810e397cba106f39430b0"}, + {file = "tokenizers-0.20.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:39fa3761b30a89368f322e5daf4130dce8495b79ad831f370449cdacfb0c0d37"}, + {file = "tokenizers-0.20.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:c8da0fba4d179ddf2607821575998df3c294aa59aa8df5a6646dc64bc7352bce"}, + {file = "tokenizers-0.20.0-cp37-none-win32.whl", hash = "sha256:fada996d6da8cf213f6e3c91c12297ad4f6cdf7a85c2fadcd05ec32fa6846fcd"}, + {file = "tokenizers-0.20.0-cp37-none-win_amd64.whl", hash = "sha256:7d29aad702279e0760c265fcae832e89349078e3418dd329732d4503259fd6bd"}, + {file = "tokenizers-0.20.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:099c68207f3ef0227ecb6f80ab98ea74de559f7b124adc7b17778af0250ee90a"}, + {file = "tokenizers-0.20.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:68012d8a8cddb2eab3880870d7e2086cb359c7f7a2b03f5795044f5abff4e850"}, + {file = "tokenizers-0.20.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9253bdd209c6aee168deca7d0e780581bf303e0058f268f9bb06859379de19b6"}, + {file = "tokenizers-0.20.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8f868600ddbcb0545905ed075eb7218a0756bf6c09dae7528ea2f8436ebd2c93"}, + {file = "tokenizers-0.20.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9a9643d9c8c5f99b6aba43fd10034f77cc6c22c31f496d2f0ee183047d948fa0"}, + {file = "tokenizers-0.20.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c375c6a889aeab44734028bc65cc070acf93ccb0f9368be42b67a98e1063d3f6"}, + {file = "tokenizers-0.20.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e359f852328e254f070bbd09a19a568421d23388f04aad9f2fb7da7704c7228d"}, + {file = "tokenizers-0.20.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d98b01a309d4387f3b1c1dd68a8b8136af50376cf146c1b7e8d8ead217a5be4b"}, + {file = "tokenizers-0.20.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:459f7537119554c2899067dec1ac74a00d02beef6558f4ee2e99513bf6d568af"}, + {file = "tokenizers-0.20.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:392b87ec89452628c045c9f2a88bc2a827f4c79e7d84bc3b72752b74c2581f70"}, + {file = "tokenizers-0.20.0-cp38-none-win32.whl", hash = "sha256:55a393f893d2ed4dd95a1553c2e42d4d4086878266f437b03590d3f81984c4fe"}, + {file = "tokenizers-0.20.0-cp38-none-win_amd64.whl", hash = "sha256:30ffe33c5c2f2aab8e9a3340d0110dd9f7ace7eec7362e20a697802306bd8068"}, + {file = "tokenizers-0.20.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:aa2d4a6fed2a7e3f860c7fc9d48764bb30f2649d83915d66150d6340e06742b8"}, + {file = "tokenizers-0.20.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b5ef0f814084a897e9071fc4a868595f018c5c92889197bdc4bf19018769b148"}, + {file = "tokenizers-0.20.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc1e1b791e8c3bf4c4f265f180dadaff1c957bf27129e16fdd5e5d43c2d3762c"}, + {file = "tokenizers-0.20.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b69e55e481459c07885263743a0d3c18d52db19bae8226a19bcca4aaa213fff"}, + {file = "tokenizers-0.20.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4806b4d82e27a2512bc23057b2986bc8b85824914286975b84d8105ff40d03d9"}, + {file = "tokenizers-0.20.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9859e9ef13adf5a473ccab39d31bff9c550606ae3c784bf772b40f615742a24f"}, + {file = "tokenizers-0.20.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ef703efedf4c20488a8eb17637b55973745b27997ff87bad88ed499b397d1144"}, + {file = "tokenizers-0.20.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6eec0061bab94b1841ab87d10831fdf1b48ebaed60e6d66d66dbe1d873f92bf5"}, + {file = "tokenizers-0.20.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:980f3d0d7e73f845b69087f29a63c11c7eb924c4ad6b358da60f3db4cf24bdb4"}, + {file = "tokenizers-0.20.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7c157550a2f3851b29d7fdc9dc059fcf81ff0c0fc49a1e5173a89d533ed043fa"}, + {file = "tokenizers-0.20.0-cp39-none-win32.whl", hash = "sha256:8a3d2f4d08608ec4f9895ec25b4b36a97f05812543190a5f2c3cd19e8f041e5a"}, + {file = "tokenizers-0.20.0-cp39-none-win_amd64.whl", hash = "sha256:d90188d12afd0c75e537f9a1d92f9c7375650188ee4f48fdc76f9e38afbd2251"}, + {file = "tokenizers-0.20.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:d68e15f1815357b059ec266062340c343ea7f98f7f330602df81ffa3474b6122"}, + {file = "tokenizers-0.20.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:23f9ecec637b9bc80da5f703808d29ed5329e56b5aa8d791d1088014f48afadc"}, + {file = "tokenizers-0.20.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f830b318ee599e3d0665b3e325f85bc75ee2d2ca6285f52e439dc22b64691580"}, + {file = "tokenizers-0.20.0-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3dc750def789cb1de1b5a37657919545e1d9ffa667658b3fa9cb7862407a1b8"}, + {file = "tokenizers-0.20.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e26e6c755ae884c2ea6135cd215bdd0fccafe4ee62405014b8c3cd19954e3ab9"}, + {file = "tokenizers-0.20.0-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:a1158c7174f427182e08baa2a8ded2940f2b4a3e94969a85cc9cfd16004cbcea"}, + {file = "tokenizers-0.20.0-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:6324826287a3fc198898d3dcf758fe4a8479e42d6039f4c59e2cedd3cf92f64e"}, + {file = "tokenizers-0.20.0-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:7d8653149405bb0c16feaf9cfee327fdb6aaef9dc2998349fec686f35e81c4e2"}, + {file = "tokenizers-0.20.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b8a2dc1e402a155e97309287ca085c80eb1b7fab8ae91527d3b729181639fa51"}, + {file = "tokenizers-0.20.0-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07bef67b20aa6e5f7868c42c7c5eae4d24f856274a464ae62e47a0f2cccec3da"}, + {file = "tokenizers-0.20.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da06e397182ff53789c506c7833220c192952c57e1581a53f503d8d953e2d67e"}, + {file = "tokenizers-0.20.0-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:302f7e11a14814028b7fc88c45a41f1bbe9b5b35fd76d6869558d1d1809baa43"}, + {file = "tokenizers-0.20.0-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:055ec46e807b875589dfbe3d9259f9a6ee43394fb553b03b3d1e9541662dbf25"}, + {file = "tokenizers-0.20.0-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e3144b8acebfa6ae062e8f45f7ed52e4b50fb6c62f93afc8871b525ab9fdcab3"}, + {file = "tokenizers-0.20.0-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:b52aa3fd14b2a07588c00a19f66511cff5cca8f7266ca3edcdd17f3512ad159f"}, + {file = "tokenizers-0.20.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2b8cf52779ffc5d4d63a0170fbeb512372bad0dd014ce92bbb9149756c831124"}, + {file = "tokenizers-0.20.0-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:983a45dd11a876124378dae71d6d9761822199b68a4c73f32873d8cdaf326a5b"}, + {file = "tokenizers-0.20.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:df6b819c9a19831ebec581e71a7686a54ab45d90faf3842269a10c11d746de0c"}, + {file = "tokenizers-0.20.0-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:e738cfd80795fcafcef89c5731c84b05638a4ab3f412f97d5ed7765466576eb1"}, + {file = "tokenizers-0.20.0-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:c8842c7be2fadb9c9edcee233b1b7fe7ade406c99b0973f07439985c1c1d0683"}, + {file = "tokenizers-0.20.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e47a82355511c373a4a430c4909dc1e518e00031207b1fec536c49127388886b"}, + {file = "tokenizers-0.20.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:9afbf359004551179a5db19424180c81276682773cff2c5d002f6eaaffe17230"}, + {file = "tokenizers-0.20.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a07eaa8799a92e6af6f472c21a75bf71575de2af3c0284120b7a09297c0de2f3"}, + {file = "tokenizers-0.20.0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0994b2e5fc53a301071806bc4303e4bc3bdc3f490e92a21338146a36746b0872"}, + {file = "tokenizers-0.20.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b6466e0355b603d10e3cc3d282d350b646341b601e50969464a54939f9848d0"}, + {file = "tokenizers-0.20.0-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:1e86594c2a433cb1ea09cfbe596454448c566e57ee8905bd557e489d93e89986"}, + {file = "tokenizers-0.20.0-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:3e14cdef1efa96ecead6ea64a891828432c3ebba128bdc0596e3059fea104ef3"}, + {file = "tokenizers-0.20.0.tar.gz", hash = "sha256:39d7acc43f564c274085cafcd1dae9d36f332456de1a31970296a6b8da4eac8d"}, +] + +[package.dependencies] +huggingface-hub = ">=0.16.4,<1.0" + +[package.extras] +dev = ["tokenizers[testing]"] +docs = ["setuptools-rust", "sphinx", "sphinx-rtd-theme"] +testing = ["black (==22.3)", "datasets", "numpy", "pytest", "requests", "ruff"] + +[[package]] +name = "torch" +version = "2.3.1" +description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration" +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "torch-2.3.1-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:605a25b23944be5ab7c3467e843580e1d888b8066e5aaf17ff7bf9cc30001cc3"}, + {file = "torch-2.3.1-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:f2357eb0965583a0954d6f9ad005bba0091f956aef879822274b1bcdb11bd308"}, + {file = "torch-2.3.1-cp310-cp310-win_amd64.whl", hash = "sha256:32b05fe0d1ada7f69c9f86c14ff69b0ef1957a5a54199bacba63d22d8fab720b"}, + {file = "torch-2.3.1-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:7c09a94362778428484bcf995f6004b04952106aee0ef45ff0b4bab484f5498d"}, + {file = "torch-2.3.1-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:b2ec81b61bb094ea4a9dee1cd3f7b76a44555375719ad29f05c0ca8ef596ad39"}, + {file = "torch-2.3.1-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:490cc3d917d1fe0bd027057dfe9941dc1d6d8e3cae76140f5dd9a7e5bc7130ab"}, + {file = "torch-2.3.1-cp311-cp311-win_amd64.whl", hash = "sha256:5802530783bd465fe66c2df99123c9a54be06da118fbd785a25ab0a88123758a"}, + {file = "torch-2.3.1-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:a7dd4ed388ad1f3d502bf09453d5fe596c7b121de7e0cfaca1e2017782e9bbac"}, + {file = "torch-2.3.1-cp312-cp312-manylinux1_x86_64.whl", hash = "sha256:a486c0b1976a118805fc7c9641d02df7afbb0c21e6b555d3bb985c9f9601b61a"}, + {file = "torch-2.3.1-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:224259821fe3e4c6f7edf1528e4fe4ac779c77addaa74215eb0b63a5c474d66c"}, + {file = "torch-2.3.1-cp312-cp312-win_amd64.whl", hash = "sha256:e5fdccbf6f1334b2203a61a0e03821d5845f1421defe311dabeae2fc8fbeac2d"}, + {file = "torch-2.3.1-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:3c333dc2ebc189561514eda06e81df22bf8fb64e2384746b2cb9f04f96d1d4c8"}, + {file = "torch-2.3.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:07e9ba746832b8d069cacb45f312cadd8ad02b81ea527ec9766c0e7404bb3feb"}, + {file = "torch-2.3.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:462d1c07dbf6bb5d9d2f3316fee73a24f3d12cd8dacf681ad46ef6418f7f6626"}, + {file = "torch-2.3.1-cp38-cp38-win_amd64.whl", hash = "sha256:ff60bf7ce3de1d43ad3f6969983f321a31f0a45df3690921720bcad6a8596cc4"}, + {file = "torch-2.3.1-cp38-none-macosx_11_0_arm64.whl", hash = "sha256:bee0bd33dc58aa8fc8a7527876e9b9a0e812ad08122054a5bff2ce5abf005b10"}, + {file = "torch-2.3.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:aaa872abde9a3d4f91580f6396d54888620f4a0b92e3976a6034759df4b961ad"}, + {file = "torch-2.3.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:3d7a7f7ef21a7520510553dc3938b0c57c116a7daee20736a9e25cbc0e832bdc"}, + {file = "torch-2.3.1-cp39-cp39-win_amd64.whl", hash = "sha256:4777f6cefa0c2b5fa87223c213e7b6f417cf254a45e5829be4ccd1b2a4ee1011"}, + {file = "torch-2.3.1-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:2bb5af780c55be68fe100feb0528d2edebace1d55cb2e351de735809ba7391eb"}, +] + +[package.dependencies] +filelock = "*" +fsspec = "*" +jinja2 = "*" +mkl = {version = ">=2021.1.1,<=2021.4.0", markers = "platform_system == \"Windows\""} +networkx = "*" +nvidia-cublas-cu12 = {version = "12.1.3.1", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cuda-cupti-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cuda-nvrtc-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cuda-runtime-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cudnn-cu12 = {version = "8.9.2.26", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cufft-cu12 = {version = "11.0.2.54", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-curand-cu12 = {version = "10.3.2.106", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cusolver-cu12 = {version = "11.4.5.107", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cusparse-cu12 = {version = "12.1.0.106", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-nccl-cu12 = {version = "2.20.5", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-nvtx-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +sympy = "*" +triton = {version = "2.3.1", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\" and python_version < \"3.12\""} +typing-extensions = ">=4.8.0" + +[package.extras] +opt-einsum = ["opt-einsum (>=3.3)"] +optree = ["optree (>=0.9.1)"] + +[[package]] +name = "tqdm" +version = "4.66.5" +description = "Fast, Extensible Progress Meter" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tqdm-4.66.5-py3-none-any.whl", hash = "sha256:90279a3770753eafc9194a0364852159802111925aa30eb3f9d85b0e805ac7cd"}, + {file = "tqdm-4.66.5.tar.gz", hash = "sha256:e1020aef2e5096702d8a025ac7d16b1577279c9d63f8375b63083e9a5f0fcbad"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[package.extras] +dev = ["pytest (>=6)", "pytest-cov", "pytest-timeout", "pytest-xdist"] +notebook = ["ipywidgets (>=6)"] +slack = ["slack-sdk"] +telegram = ["requests"] + +[[package]] +name = "transformers" +version = "4.45.1" +description = "State-of-the-art Machine Learning for JAX, PyTorch and TensorFlow" +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "transformers-4.45.1-py3-none-any.whl", hash = "sha256:21e3f47aa7256dbbfb5215937a3168a984c94432ce3a16b7908265807d62aee8"}, + {file = "transformers-4.45.1.tar.gz", hash = "sha256:9cace11072172df05ca6a694fcd1f5064a55b63285e492bd88f0ad1cec270f02"}, +] + +[package.dependencies] +filelock = "*" +huggingface-hub = ">=0.23.2,<1.0" +numpy = ">=1.17" +packaging = ">=20.0" +pyyaml = ">=5.1" +regex = "!=2019.12.17" +requests = "*" +safetensors = ">=0.4.1" +tokenizers = ">=0.20,<0.21" +tqdm = ">=4.27" + +[package.extras] +accelerate = ["accelerate (>=0.26.0)"] +agents = ["Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.26.0)", "datasets (!=2.5.0)", "diffusers", "opencv-python", "sentencepiece (>=0.1.91,!=0.1.92)", "torch"] +all = ["Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.26.0)", "av (==9.2.0)", "codecarbon (==1.2.0)", "decord (==0.6.0)", "flax (>=0.4.1,<=0.7.0)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1,<0.14.0)", "librosa", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "phonemizer", "protobuf", "pyctcdecode (>=0.4.0)", "ray[tune] (>=2.7.0)", "scipy (<1.13.0)", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timm (<=0.9.16)", "tokenizers (>=0.20,<0.21)", "torch", "torchaudio", "torchvision"] +audio = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] +benchmark = ["optimum-benchmark (>=0.3.0)"] +codecarbon = ["codecarbon (==1.2.0)"] +deepspeed = ["accelerate (>=0.26.0)", "deepspeed (>=0.9.3)"] +deepspeed-testing = ["GitPython (<3.1.19)", "accelerate (>=0.26.0)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "deepspeed (>=0.9.3)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "nltk (<=3.8.1)", "optuna", "parameterized", "protobuf", "psutil", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.5.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "timeout-decorator"] +dev = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.26.0)", "av (==9.2.0)", "beautifulsoup4", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "decord (==0.6.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "flax (>=0.4.1,<=0.7.0)", "fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1,<0.14.0)", "libcst", "librosa", "nltk (<=3.8.1)", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "ray[tune] (>=2.7.0)", "rhoknp (>=1.1.0,<1.3.1)", "rich", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.5.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "scipy (<1.13.0)", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "tensorboard", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timeout-decorator", "timm (<=0.9.16)", "tokenizers (>=0.20,<0.21)", "torch", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"] +dev-tensorflow = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "isort (>=5.5.4)", "kenlm", "keras-nlp (>=0.3.1,<0.14.0)", "libcst", "librosa", "nltk (<=3.8.1)", "onnxconverter-common", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "rich", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.5.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timeout-decorator", "tokenizers (>=0.20,<0.21)", "urllib3 (<2.0.0)"] +dev-torch = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.26.0)", "beautifulsoup4", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "kenlm", "libcst", "librosa", "nltk (<=3.8.1)", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "optuna", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "ray[tune] (>=2.7.0)", "rhoknp (>=1.1.0,<1.3.1)", "rich", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.5.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "tensorboard", "timeout-decorator", "timm (<=0.9.16)", "tokenizers (>=0.20,<0.21)", "torch", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"] +flax = ["flax (>=0.4.1,<=0.7.0)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "optax (>=0.0.8,<=0.1.4)", "scipy (<1.13.0)"] +flax-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] +ftfy = ["ftfy"] +integrations = ["optuna", "ray[tune] (>=2.7.0)", "sigopt"] +ja = ["fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "rhoknp (>=1.1.0,<1.3.1)", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)"] +modelcreation = ["cookiecutter (==1.7.3)"] +natten = ["natten (>=0.14.6,<0.15.0)"] +onnx = ["onnxconverter-common", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "tf2onnx"] +onnxruntime = ["onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)"] +optuna = ["optuna"] +quality = ["GitPython (<3.1.19)", "datasets (!=2.5.0)", "isort (>=5.5.4)", "libcst", "rich", "ruff (==0.5.1)", "urllib3 (<2.0.0)"] +ray = ["ray[tune] (>=2.7.0)"] +retrieval = ["datasets (!=2.5.0)", "faiss-cpu"] +ruff = ["ruff (==0.5.1)"] +sagemaker = ["sagemaker (>=2.31.0)"] +sentencepiece = ["protobuf", "sentencepiece (>=0.1.91,!=0.1.92)"] +serving = ["fastapi", "pydantic", "starlette", "uvicorn"] +sigopt = ["sigopt"] +sklearn = ["scikit-learn"] +speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)", "torchaudio"] +testing = ["GitPython (<3.1.19)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "nltk (<=3.8.1)", "parameterized", "psutil", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.5.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "timeout-decorator"] +tf = ["keras-nlp (>=0.3.1,<0.14.0)", "onnxconverter-common", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx"] +tf-cpu = ["keras (>2.9,<2.16)", "keras-nlp (>=0.3.1,<0.14.0)", "onnxconverter-common", "tensorflow-cpu (>2.9,<2.16)", "tensorflow-probability (<0.24)", "tensorflow-text (<2.16)", "tf2onnx"] +tf-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] +tiktoken = ["blobfile", "tiktoken"] +timm = ["timm (<=0.9.16)"] +tokenizers = ["tokenizers (>=0.20,<0.21)"] +torch = ["accelerate (>=0.26.0)", "torch"] +torch-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)", "torchaudio"] +torch-vision = ["Pillow (>=10.0.1,<=15.0)", "torchvision"] +torchhub = ["filelock", "huggingface-hub (>=0.23.2,<1.0)", "importlib-metadata", "numpy (>=1.17)", "packaging (>=20.0)", "protobuf", "regex (!=2019.12.17)", "requests", "sentencepiece (>=0.1.91,!=0.1.92)", "tokenizers (>=0.20,<0.21)", "torch", "tqdm (>=4.27)"] +video = ["av (==9.2.0)", "decord (==0.6.0)"] +vision = ["Pillow (>=10.0.1,<=15.0)"] + +[[package]] +name = "triton" +version = "2.3.1" +description = "A language and compiler for custom Deep Learning operations" +optional = false +python-versions = "*" +files = [ + {file = "triton-2.3.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c84595cbe5e546b1b290d2a58b1494df5a2ef066dd890655e5b8a8a92205c33"}, + {file = "triton-2.3.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c9d64ae33bcb3a7a18081e3a746e8cf87ca8623ca13d2c362413ce7a486f893e"}, + {file = "triton-2.3.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eaf80e8761a9e3498aa92e7bf83a085b31959c61f5e8ac14eedd018df6fccd10"}, + {file = "triton-2.3.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b13bf35a2b659af7159bf78e92798dc62d877aa991de723937329e2d382f1991"}, + {file = "triton-2.3.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63381e35ded3304704ea867ffde3b7cfc42c16a55b3062d41e017ef510433d66"}, + {file = "triton-2.3.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d968264523c7a07911c8fb51b4e0d1b920204dae71491b1fe7b01b62a31e124"}, +] + +[package.dependencies] +filelock = "*" + +[package.extras] +build = ["cmake (>=3.20)", "lit"] +tests = ["autopep8", "flake8", "isort", "numpy", "pytest", "scipy (>=1.7.1)", "torch"] +tutorials = ["matplotlib", "pandas", "tabulate", "torch"] + +[[package]] +name = "typer" +version = "0.12.5" +description = "Typer, build great CLIs. Easy to code. Based on Python type hints." +optional = false +python-versions = ">=3.7" +files = [ + {file = "typer-0.12.5-py3-none-any.whl", hash = "sha256:62fe4e471711b147e3365034133904df3e235698399bc4de2b36c8579298d52b"}, + {file = "typer-0.12.5.tar.gz", hash = "sha256:f592f089bedcc8ec1b974125d64851029c3b1af145f04aca64d69410f0c9b722"}, +] + +[package.dependencies] +click = ">=8.0.0" +rich = ">=10.11.0" +shellingham = ">=1.3.0" +typing-extensions = ">=3.7.4.3" + +[[package]] +name = "typing-extensions" +version = "4.12.2" +description = "Backported and Experimental Type Hints for Python 3.8+" +optional = false +python-versions = ">=3.8" +files = [ + {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, + {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, +] + +[[package]] +name = "tzdata" +version = "2024.2" +description = "Provider of IANA time zone data" +optional = false +python-versions = ">=2" +files = [ + {file = "tzdata-2024.2-py2.py3-none-any.whl", hash = "sha256:a48093786cdcde33cad18c2555e8532f34422074448fbc874186f0abd79565cd"}, + {file = "tzdata-2024.2.tar.gz", hash = "sha256:7d85cc416e9382e69095b7bdf4afd9e3880418a2413feec7069d533d6b4e31cc"}, +] + +[[package]] +name = "uritemplate" +version = "4.1.1" +description = "Implementation of RFC 6570 URI Templates" +optional = false +python-versions = ">=3.6" +files = [ + {file = "uritemplate-4.1.1-py2.py3-none-any.whl", hash = "sha256:830c08b8d99bdd312ea4ead05994a38e8936266f84b9a7878232db50b044e02e"}, + {file = "uritemplate-4.1.1.tar.gz", hash = "sha256:4346edfc5c3b79f694bccd6d6099a322bbeb628dbf2cd86eea55a456ce5124f0"}, +] + +[[package]] +name = "urllib3" +version = "2.2.3" +description = "HTTP library with thread-safe connection pooling, file post, and more." +optional = false +python-versions = ">=3.8" +files = [ + {file = "urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac"}, + {file = "urllib3-2.2.3.tar.gz", hash = "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9"}, +] + +[package.extras] +brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] +h2 = ["h2 (>=4,<5)"] +socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] +zstd = ["zstandard (>=0.18.0)"] + +[[package]] +name = "uvicorn" +version = "0.30.6" +description = "The lightning-fast ASGI server." +optional = false +python-versions = ">=3.8" +files = [ + {file = "uvicorn-0.30.6-py3-none-any.whl", hash = "sha256:65fd46fe3fda5bdc1b03b94eb634923ff18cd35b2f084813ea79d1f103f711b5"}, + {file = "uvicorn-0.30.6.tar.gz", hash = "sha256:4b15decdda1e72be08209e860a1e10e92439ad5b97cf44cc945fcbee66fc5788"}, +] + +[package.dependencies] +click = ">=7.0" +colorama = {version = ">=0.4", optional = true, markers = "sys_platform == \"win32\" and extra == \"standard\""} +h11 = ">=0.8" +httptools = {version = ">=0.5.0", optional = true, markers = "extra == \"standard\""} +python-dotenv = {version = ">=0.13", optional = true, markers = "extra == \"standard\""} +pyyaml = {version = ">=5.1", optional = true, markers = "extra == \"standard\""} +uvloop = {version = ">=0.14.0,<0.15.0 || >0.15.0,<0.15.1 || >0.15.1", optional = true, markers = "(sys_platform != \"win32\" and sys_platform != \"cygwin\") and platform_python_implementation != \"PyPy\" and extra == \"standard\""} +watchfiles = {version = ">=0.13", optional = true, markers = "extra == \"standard\""} +websockets = {version = ">=10.4", optional = true, markers = "extra == \"standard\""} + +[package.extras] +standard = ["colorama (>=0.4)", "httptools (>=0.5.0)", "python-dotenv (>=0.13)", "pyyaml (>=5.1)", "uvloop (>=0.14.0,!=0.15.0,!=0.15.1)", "watchfiles (>=0.13)", "websockets (>=10.4)"] + +[[package]] +name = "uvloop" +version = "0.20.0" +description = "Fast implementation of asyncio event loop on top of libuv" +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "uvloop-0.20.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:9ebafa0b96c62881d5cafa02d9da2e44c23f9f0cd829f3a32a6aff771449c996"}, + {file = "uvloop-0.20.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:35968fc697b0527a06e134999eef859b4034b37aebca537daeb598b9d45a137b"}, + {file = "uvloop-0.20.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b16696f10e59d7580979b420eedf6650010a4a9c3bd8113f24a103dfdb770b10"}, + {file = "uvloop-0.20.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9b04d96188d365151d1af41fa2d23257b674e7ead68cfd61c725a422764062ae"}, + {file = "uvloop-0.20.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:94707205efbe809dfa3a0d09c08bef1352f5d3d6612a506f10a319933757c006"}, + {file = "uvloop-0.20.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:89e8d33bb88d7263f74dc57d69f0063e06b5a5ce50bb9a6b32f5fcbe655f9e73"}, + {file = "uvloop-0.20.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:e50289c101495e0d1bb0bfcb4a60adde56e32f4449a67216a1ab2750aa84f037"}, + {file = "uvloop-0.20.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e237f9c1e8a00e7d9ddaa288e535dc337a39bcbf679f290aee9d26df9e72bce9"}, + {file = "uvloop-0.20.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:746242cd703dc2b37f9d8b9f173749c15e9a918ddb021575a0205ec29a38d31e"}, + {file = "uvloop-0.20.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82edbfd3df39fb3d108fc079ebc461330f7c2e33dbd002d146bf7c445ba6e756"}, + {file = "uvloop-0.20.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:80dc1b139516be2077b3e57ce1cb65bfed09149e1d175e0478e7a987863b68f0"}, + {file = "uvloop-0.20.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4f44af67bf39af25db4c1ac27e82e9665717f9c26af2369c404be865c8818dcf"}, + {file = "uvloop-0.20.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:4b75f2950ddb6feed85336412b9a0c310a2edbcf4cf931aa5cfe29034829676d"}, + {file = "uvloop-0.20.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:77fbc69c287596880ecec2d4c7a62346bef08b6209749bf6ce8c22bbaca0239e"}, + {file = "uvloop-0.20.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6462c95f48e2d8d4c993a2950cd3d31ab061864d1c226bbf0ee2f1a8f36674b9"}, + {file = "uvloop-0.20.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:649c33034979273fa71aa25d0fe120ad1777c551d8c4cd2c0c9851d88fcb13ab"}, + {file = "uvloop-0.20.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:3a609780e942d43a275a617c0839d85f95c334bad29c4c0918252085113285b5"}, + {file = "uvloop-0.20.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:aea15c78e0d9ad6555ed201344ae36db5c63d428818b4b2a42842b3870127c00"}, + {file = "uvloop-0.20.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:f0e94b221295b5e69de57a1bd4aeb0b3a29f61be6e1b478bb8a69a73377db7ba"}, + {file = "uvloop-0.20.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:fee6044b64c965c425b65a4e17719953b96e065c5b7e09b599ff332bb2744bdf"}, + {file = "uvloop-0.20.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:265a99a2ff41a0fd56c19c3838b29bf54d1d177964c300dad388b27e84fd7847"}, + {file = "uvloop-0.20.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b10c2956efcecb981bf9cfb8184d27d5d64b9033f917115a960b83f11bfa0d6b"}, + {file = "uvloop-0.20.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:e7d61fe8e8d9335fac1bf8d5d82820b4808dd7a43020c149b63a1ada953d48a6"}, + {file = "uvloop-0.20.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2beee18efd33fa6fdb0976e18475a4042cd31c7433c866e8a09ab604c7c22ff2"}, + {file = "uvloop-0.20.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:d8c36fdf3e02cec92aed2d44f63565ad1522a499c654f07935c8f9d04db69e95"}, + {file = "uvloop-0.20.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a0fac7be202596c7126146660725157d4813aa29a4cc990fe51346f75ff8fde7"}, + {file = "uvloop-0.20.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9d0fba61846f294bce41eb44d60d58136090ea2b5b99efd21cbdf4e21927c56a"}, + {file = "uvloop-0.20.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95720bae002ac357202e0d866128eb1ac82545bcf0b549b9abe91b5178d9b541"}, + {file = "uvloop-0.20.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:36c530d8fa03bfa7085af54a48f2ca16ab74df3ec7108a46ba82fd8b411a2315"}, + {file = "uvloop-0.20.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e97152983442b499d7a71e44f29baa75b3b02e65d9c44ba53b10338e98dedb66"}, + {file = "uvloop-0.20.0.tar.gz", hash = "sha256:4603ca714a754fc8d9b197e325db25b2ea045385e8a3ad05d3463de725fdf469"}, +] + +[package.extras] +docs = ["Sphinx (>=4.1.2,<4.2.0)", "sphinx-rtd-theme (>=0.5.2,<0.6.0)", "sphinxcontrib-asyncio (>=0.3.0,<0.4.0)"] +test = ["Cython (>=0.29.36,<0.30.0)", "aiohttp (==3.9.0b0)", "aiohttp (>=3.8.1)", "flake8 (>=5.0,<6.0)", "mypy (>=0.800)", "psutil", "pyOpenSSL (>=23.0.0,<23.1.0)", "pycodestyle (>=2.9.0,<2.10.0)"] + +[[package]] +name = "watchfiles" +version = "0.24.0" +description = "Simple, modern and high performance file watching and code reload in python." +optional = false +python-versions = ">=3.8" +files = [ + {file = "watchfiles-0.24.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:083dc77dbdeef09fa44bb0f4d1df571d2e12d8a8f985dccde71ac3ac9ac067a0"}, + {file = "watchfiles-0.24.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e94e98c7cb94cfa6e071d401ea3342767f28eb5a06a58fafdc0d2a4974f4f35c"}, + {file = "watchfiles-0.24.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82ae557a8c037c42a6ef26c494d0631cacca040934b101d001100ed93d43f361"}, + {file = "watchfiles-0.24.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:acbfa31e315a8f14fe33e3542cbcafc55703b8f5dcbb7c1eecd30f141df50db3"}, + {file = "watchfiles-0.24.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b74fdffce9dfcf2dc296dec8743e5b0332d15df19ae464f0e249aa871fc1c571"}, + {file = "watchfiles-0.24.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:449f43f49c8ddca87c6b3980c9284cab6bd1f5c9d9a2b00012adaaccd5e7decd"}, + {file = "watchfiles-0.24.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4abf4ad269856618f82dee296ac66b0cd1d71450fc3c98532d93798e73399b7a"}, + {file = "watchfiles-0.24.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f895d785eb6164678ff4bb5cc60c5996b3ee6df3edb28dcdeba86a13ea0465e"}, + {file = "watchfiles-0.24.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:7ae3e208b31be8ce7f4c2c0034f33406dd24fbce3467f77223d10cd86778471c"}, + {file = "watchfiles-0.24.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:2efec17819b0046dde35d13fb8ac7a3ad877af41ae4640f4109d9154ed30a188"}, + {file = "watchfiles-0.24.0-cp310-none-win32.whl", hash = "sha256:6bdcfa3cd6fdbdd1a068a52820f46a815401cbc2cb187dd006cb076675e7b735"}, + {file = "watchfiles-0.24.0-cp310-none-win_amd64.whl", hash = "sha256:54ca90a9ae6597ae6dc00e7ed0a040ef723f84ec517d3e7ce13e63e4bc82fa04"}, + {file = "watchfiles-0.24.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:bdcd5538e27f188dd3c804b4a8d5f52a7fc7f87e7fd6b374b8e36a4ca03db428"}, + {file = "watchfiles-0.24.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2dadf8a8014fde6addfd3c379e6ed1a981c8f0a48292d662e27cabfe4239c83c"}, + {file = "watchfiles-0.24.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6509ed3f467b79d95fc62a98229f79b1a60d1b93f101e1c61d10c95a46a84f43"}, + {file = "watchfiles-0.24.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8360f7314a070c30e4c976b183d1d8d1585a4a50c5cb603f431cebcbb4f66327"}, + {file = "watchfiles-0.24.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:316449aefacf40147a9efaf3bd7c9bdd35aaba9ac5d708bd1eb5763c9a02bef5"}, + {file = "watchfiles-0.24.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:73bde715f940bea845a95247ea3e5eb17769ba1010efdc938ffcb967c634fa61"}, + {file = "watchfiles-0.24.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3770e260b18e7f4e576edca4c0a639f704088602e0bc921c5c2e721e3acb8d15"}, + {file = "watchfiles-0.24.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa0fd7248cf533c259e59dc593a60973a73e881162b1a2f73360547132742823"}, + {file = "watchfiles-0.24.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d7a2e3b7f5703ffbd500dabdefcbc9eafeff4b9444bbdd5d83d79eedf8428fab"}, + {file = "watchfiles-0.24.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d831ee0a50946d24a53821819b2327d5751b0c938b12c0653ea5be7dea9c82ec"}, + {file = "watchfiles-0.24.0-cp311-none-win32.whl", hash = "sha256:49d617df841a63b4445790a254013aea2120357ccacbed00253f9c2b5dc24e2d"}, + {file = "watchfiles-0.24.0-cp311-none-win_amd64.whl", hash = "sha256:d3dcb774e3568477275cc76554b5a565024b8ba3a0322f77c246bc7111c5bb9c"}, + {file = "watchfiles-0.24.0-cp311-none-win_arm64.whl", hash = "sha256:9301c689051a4857d5b10777da23fafb8e8e921bcf3abe6448a058d27fb67633"}, + {file = "watchfiles-0.24.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:7211b463695d1e995ca3feb38b69227e46dbd03947172585ecb0588f19b0d87a"}, + {file = "watchfiles-0.24.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4b8693502d1967b00f2fb82fc1e744df128ba22f530e15b763c8d82baee15370"}, + {file = "watchfiles-0.24.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cdab9555053399318b953a1fe1f586e945bc8d635ce9d05e617fd9fe3a4687d6"}, + {file = "watchfiles-0.24.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:34e19e56d68b0dad5cff62273107cf5d9fbaf9d75c46277aa5d803b3ef8a9e9b"}, + {file = "watchfiles-0.24.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:41face41f036fee09eba33a5b53a73e9a43d5cb2c53dad8e61fa6c9f91b5a51e"}, + {file = "watchfiles-0.24.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5148c2f1ea043db13ce9b0c28456e18ecc8f14f41325aa624314095b6aa2e9ea"}, + {file = "watchfiles-0.24.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7e4bd963a935aaf40b625c2499f3f4f6bbd0c3776f6d3bc7c853d04824ff1c9f"}, + {file = "watchfiles-0.24.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c79d7719d027b7a42817c5d96461a99b6a49979c143839fc37aa5748c322f234"}, + {file = "watchfiles-0.24.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:32aa53a9a63b7f01ed32e316e354e81e9da0e6267435c7243bf8ae0f10b428ef"}, + {file = "watchfiles-0.24.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ce72dba6a20e39a0c628258b5c308779b8697f7676c254a845715e2a1039b968"}, + {file = "watchfiles-0.24.0-cp312-none-win32.whl", hash = "sha256:d9018153cf57fc302a2a34cb7564870b859ed9a732d16b41a9b5cb2ebed2d444"}, + {file = "watchfiles-0.24.0-cp312-none-win_amd64.whl", hash = "sha256:551ec3ee2a3ac9cbcf48a4ec76e42c2ef938a7e905a35b42a1267fa4b1645896"}, + {file = "watchfiles-0.24.0-cp312-none-win_arm64.whl", hash = "sha256:b52a65e4ea43c6d149c5f8ddb0bef8d4a1e779b77591a458a893eb416624a418"}, + {file = "watchfiles-0.24.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:3d2e3ab79a1771c530233cadfd277fcc762656d50836c77abb2e5e72b88e3a48"}, + {file = "watchfiles-0.24.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:327763da824817b38ad125dcd97595f942d720d32d879f6c4ddf843e3da3fe90"}, + {file = "watchfiles-0.24.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd82010f8ab451dabe36054a1622870166a67cf3fce894f68895db6f74bbdc94"}, + {file = "watchfiles-0.24.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d64ba08db72e5dfd5c33be1e1e687d5e4fcce09219e8aee893a4862034081d4e"}, + {file = "watchfiles-0.24.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1cf1f6dd7825053f3d98f6d33f6464ebdd9ee95acd74ba2c34e183086900a827"}, + {file = "watchfiles-0.24.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:43e3e37c15a8b6fe00c1bce2473cfa8eb3484bbeecf3aefbf259227e487a03df"}, + {file = "watchfiles-0.24.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:88bcd4d0fe1d8ff43675360a72def210ebad3f3f72cabfeac08d825d2639b4ab"}, + {file = "watchfiles-0.24.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:999928c6434372fde16c8f27143d3e97201160b48a614071261701615a2a156f"}, + {file = "watchfiles-0.24.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:30bbd525c3262fd9f4b1865cb8d88e21161366561cd7c9e1194819e0a33ea86b"}, + {file = "watchfiles-0.24.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:edf71b01dec9f766fb285b73930f95f730bb0943500ba0566ae234b5c1618c18"}, + {file = "watchfiles-0.24.0-cp313-none-win32.whl", hash = "sha256:f4c96283fca3ee09fb044f02156d9570d156698bc3734252175a38f0e8975f07"}, + {file = "watchfiles-0.24.0-cp313-none-win_amd64.whl", hash = "sha256:a974231b4fdd1bb7f62064a0565a6b107d27d21d9acb50c484d2cdba515b9366"}, + {file = "watchfiles-0.24.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:ee82c98bed9d97cd2f53bdb035e619309a098ea53ce525833e26b93f673bc318"}, + {file = "watchfiles-0.24.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:fd92bbaa2ecdb7864b7600dcdb6f2f1db6e0346ed425fbd01085be04c63f0b05"}, + {file = "watchfiles-0.24.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f83df90191d67af5a831da3a33dd7628b02a95450e168785586ed51e6d28943c"}, + {file = "watchfiles-0.24.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fca9433a45f18b7c779d2bae7beeec4f740d28b788b117a48368d95a3233ed83"}, + {file = "watchfiles-0.24.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b995bfa6bf01a9e09b884077a6d37070464b529d8682d7691c2d3b540d357a0c"}, + {file = "watchfiles-0.24.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ed9aba6e01ff6f2e8285e5aa4154e2970068fe0fc0998c4380d0e6278222269b"}, + {file = "watchfiles-0.24.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e5171ef898299c657685306d8e1478a45e9303ddcd8ac5fed5bd52ad4ae0b69b"}, + {file = "watchfiles-0.24.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4933a508d2f78099162da473841c652ad0de892719043d3f07cc83b33dfd9d91"}, + {file = "watchfiles-0.24.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:95cf3b95ea665ab03f5a54765fa41abf0529dbaf372c3b83d91ad2cfa695779b"}, + {file = "watchfiles-0.24.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:01def80eb62bd5db99a798d5e1f5f940ca0a05986dcfae21d833af7a46f7ee22"}, + {file = "watchfiles-0.24.0-cp38-none-win32.whl", hash = "sha256:4d28cea3c976499475f5b7a2fec6b3a36208656963c1a856d328aeae056fc5c1"}, + {file = "watchfiles-0.24.0-cp38-none-win_amd64.whl", hash = "sha256:21ab23fdc1208086d99ad3f69c231ba265628014d4aed31d4e8746bd59e88cd1"}, + {file = "watchfiles-0.24.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:b665caeeda58625c3946ad7308fbd88a086ee51ccb706307e5b1fa91556ac886"}, + {file = "watchfiles-0.24.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5c51749f3e4e269231510da426ce4a44beb98db2dce9097225c338f815b05d4f"}, + {file = "watchfiles-0.24.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82b2509f08761f29a0fdad35f7e1638b8ab1adfa2666d41b794090361fb8b855"}, + {file = "watchfiles-0.24.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9a60e2bf9dc6afe7f743e7c9b149d1fdd6dbf35153c78fe3a14ae1a9aee3d98b"}, + {file = "watchfiles-0.24.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f7d9b87c4c55e3ea8881dfcbf6d61ea6775fffed1fedffaa60bd047d3c08c430"}, + {file = "watchfiles-0.24.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:78470906a6be5199524641f538bd2c56bb809cd4bf29a566a75051610bc982c3"}, + {file = "watchfiles-0.24.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:07cdef0c84c03375f4e24642ef8d8178e533596b229d32d2bbd69e5128ede02a"}, + {file = "watchfiles-0.24.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d337193bbf3e45171c8025e291530fb7548a93c45253897cd764a6a71c937ed9"}, + {file = "watchfiles-0.24.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ec39698c45b11d9694a1b635a70946a5bad066b593af863460a8e600f0dff1ca"}, + {file = "watchfiles-0.24.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2e28d91ef48eab0afb939fa446d8ebe77e2f7593f5f463fd2bb2b14132f95b6e"}, + {file = "watchfiles-0.24.0-cp39-none-win32.whl", hash = "sha256:7138eff8baa883aeaa074359daabb8b6c1e73ffe69d5accdc907d62e50b1c0da"}, + {file = "watchfiles-0.24.0-cp39-none-win_amd64.whl", hash = "sha256:b3ef2c69c655db63deb96b3c3e587084612f9b1fa983df5e0c3379d41307467f"}, + {file = "watchfiles-0.24.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:632676574429bee8c26be8af52af20e0c718cc7f5f67f3fb658c71928ccd4f7f"}, + {file = "watchfiles-0.24.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:a2a9891723a735d3e2540651184be6fd5b96880c08ffe1a98bae5017e65b544b"}, + {file = "watchfiles-0.24.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a7fa2bc0efef3e209a8199fd111b8969fe9db9c711acc46636686331eda7dd4"}, + {file = "watchfiles-0.24.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:01550ccf1d0aed6ea375ef259706af76ad009ef5b0203a3a4cce0f6024f9b68a"}, + {file = "watchfiles-0.24.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:96619302d4374de5e2345b2b622dc481257a99431277662c30f606f3e22f42be"}, + {file = "watchfiles-0.24.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:85d5f0c7771dcc7a26c7a27145059b6bb0ce06e4e751ed76cdf123d7039b60b5"}, + {file = "watchfiles-0.24.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:951088d12d339690a92cef2ec5d3cfd957692834c72ffd570ea76a6790222777"}, + {file = "watchfiles-0.24.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49fb58bcaa343fedc6a9e91f90195b20ccb3135447dc9e4e2570c3a39565853e"}, + {file = "watchfiles-0.24.0.tar.gz", hash = "sha256:afb72325b74fa7a428c009c1b8be4b4d7c2afedafb2982827ef2156646df2fe1"}, +] + +[package.dependencies] +anyio = ">=3.0.0" + +[[package]] +name = "websockets" +version = "13.1" +description = "An implementation of the WebSocket Protocol (RFC 6455 & 7692)" +optional = false +python-versions = ">=3.8" +files = [ + {file = "websockets-13.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f48c749857f8fb598fb890a75f540e3221d0976ed0bf879cf3c7eef34151acee"}, + {file = "websockets-13.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c7e72ce6bda6fb9409cc1e8164dd41d7c91466fb599eb047cfda72fe758a34a7"}, + {file = "websockets-13.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f779498eeec470295a2b1a5d97aa1bc9814ecd25e1eb637bd9d1c73a327387f6"}, + {file = "websockets-13.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4676df3fe46956fbb0437d8800cd5f2b6d41143b6e7e842e60554398432cf29b"}, + {file = "websockets-13.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a7affedeb43a70351bb811dadf49493c9cfd1ed94c9c70095fd177e9cc1541fa"}, + {file = "websockets-13.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1971e62d2caa443e57588e1d82d15f663b29ff9dfe7446d9964a4b6f12c1e700"}, + {file = "websockets-13.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:5f2e75431f8dc4a47f31565a6e1355fb4f2ecaa99d6b89737527ea917066e26c"}, + {file = "websockets-13.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:58cf7e75dbf7e566088b07e36ea2e3e2bd5676e22216e4cad108d4df4a7402a0"}, + {file = "websockets-13.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:c90d6dec6be2c7d03378a574de87af9b1efea77d0c52a8301dd831ece938452f"}, + {file = "websockets-13.1-cp310-cp310-win32.whl", hash = "sha256:730f42125ccb14602f455155084f978bd9e8e57e89b569b4d7f0f0c17a448ffe"}, + {file = "websockets-13.1-cp310-cp310-win_amd64.whl", hash = "sha256:5993260f483d05a9737073be197371940c01b257cc45ae3f1d5d7adb371b266a"}, + {file = "websockets-13.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:61fc0dfcda609cda0fc9fe7977694c0c59cf9d749fbb17f4e9483929e3c48a19"}, + {file = "websockets-13.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ceec59f59d092c5007e815def4ebb80c2de330e9588e101cf8bd94c143ec78a5"}, + {file = "websockets-13.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c1dca61c6db1166c48b95198c0b7d9c990b30c756fc2923cc66f68d17dc558fd"}, + {file = "websockets-13.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:308e20f22c2c77f3f39caca508e765f8725020b84aa963474e18c59accbf4c02"}, + {file = "websockets-13.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62d516c325e6540e8a57b94abefc3459d7dab8ce52ac75c96cad5549e187e3a7"}, + {file = "websockets-13.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87c6e35319b46b99e168eb98472d6c7d8634ee37750d7693656dc766395df096"}, + {file = "websockets-13.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:5f9fee94ebafbc3117c30be1844ed01a3b177bb6e39088bc6b2fa1dc15572084"}, + {file = "websockets-13.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:7c1e90228c2f5cdde263253fa5db63e6653f1c00e7ec64108065a0b9713fa1b3"}, + {file = "websockets-13.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6548f29b0e401eea2b967b2fdc1c7c7b5ebb3eeb470ed23a54cd45ef078a0db9"}, + {file = "websockets-13.1-cp311-cp311-win32.whl", hash = "sha256:c11d4d16e133f6df8916cc5b7e3e96ee4c44c936717d684a94f48f82edb7c92f"}, + {file = "websockets-13.1-cp311-cp311-win_amd64.whl", hash = "sha256:d04f13a1d75cb2b8382bdc16ae6fa58c97337253826dfe136195b7f89f661557"}, + {file = "websockets-13.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:9d75baf00138f80b48f1eac72ad1535aac0b6461265a0bcad391fc5aba875cfc"}, + {file = "websockets-13.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:9b6f347deb3dcfbfde1c20baa21c2ac0751afaa73e64e5b693bb2b848efeaa49"}, + {file = "websockets-13.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:de58647e3f9c42f13f90ac7e5f58900c80a39019848c5547bc691693098ae1bd"}, + {file = "websockets-13.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1b54689e38d1279a51d11e3467dd2f3a50f5f2e879012ce8f2d6943f00e83f0"}, + {file = "websockets-13.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cf1781ef73c073e6b0f90af841aaf98501f975d306bbf6221683dd594ccc52b6"}, + {file = "websockets-13.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d23b88b9388ed85c6faf0e74d8dec4f4d3baf3ecf20a65a47b836d56260d4b9"}, + {file = "websockets-13.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3c78383585f47ccb0fcf186dcb8a43f5438bd7d8f47d69e0b56f71bf431a0a68"}, + {file = "websockets-13.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:d6d300f8ec35c24025ceb9b9019ae9040c1ab2f01cddc2bcc0b518af31c75c14"}, + {file = "websockets-13.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a9dcaf8b0cc72a392760bb8755922c03e17a5a54e08cca58e8b74f6902b433cf"}, + {file = "websockets-13.1-cp312-cp312-win32.whl", hash = "sha256:2f85cf4f2a1ba8f602298a853cec8526c2ca42a9a4b947ec236eaedb8f2dc80c"}, + {file = "websockets-13.1-cp312-cp312-win_amd64.whl", hash = "sha256:38377f8b0cdeee97c552d20cf1865695fcd56aba155ad1b4ca8779a5b6ef4ac3"}, + {file = "websockets-13.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a9ab1e71d3d2e54a0aa646ab6d4eebfaa5f416fe78dfe4da2839525dc5d765c6"}, + {file = "websockets-13.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b9d7439d7fab4dce00570bb906875734df13d9faa4b48e261c440a5fec6d9708"}, + {file = "websockets-13.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:327b74e915cf13c5931334c61e1a41040e365d380f812513a255aa804b183418"}, + {file = "websockets-13.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:325b1ccdbf5e5725fdcb1b0e9ad4d2545056479d0eee392c291c1bf76206435a"}, + {file = "websockets-13.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:346bee67a65f189e0e33f520f253d5147ab76ae42493804319b5716e46dddf0f"}, + {file = "websockets-13.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:91a0fa841646320ec0d3accdff5b757b06e2e5c86ba32af2e0815c96c7a603c5"}, + {file = "websockets-13.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:18503d2c5f3943e93819238bf20df71982d193f73dcecd26c94514f417f6b135"}, + {file = "websockets-13.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:a9cd1af7e18e5221d2878378fbc287a14cd527fdd5939ed56a18df8a31136bb2"}, + {file = "websockets-13.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:70c5be9f416aa72aab7a2a76c90ae0a4fe2755c1816c153c1a2bcc3333ce4ce6"}, + {file = "websockets-13.1-cp313-cp313-win32.whl", hash = "sha256:624459daabeb310d3815b276c1adef475b3e6804abaf2d9d2c061c319f7f187d"}, + {file = "websockets-13.1-cp313-cp313-win_amd64.whl", hash = "sha256:c518e84bb59c2baae725accd355c8dc517b4a3ed8db88b4bc93c78dae2974bf2"}, + {file = "websockets-13.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:c7934fd0e920e70468e676fe7f1b7261c1efa0d6c037c6722278ca0228ad9d0d"}, + {file = "websockets-13.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:149e622dc48c10ccc3d2760e5f36753db9cacf3ad7bc7bbbfd7d9c819e286f23"}, + {file = "websockets-13.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a569eb1b05d72f9bce2ebd28a1ce2054311b66677fcd46cf36204ad23acead8c"}, + {file = "websockets-13.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:95df24ca1e1bd93bbca51d94dd049a984609687cb2fb08a7f2c56ac84e9816ea"}, + {file = "websockets-13.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d8dbb1bf0c0a4ae8b40bdc9be7f644e2f3fb4e8a9aca7145bfa510d4a374eeb7"}, + {file = "websockets-13.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:035233b7531fb92a76beefcbf479504db8c72eb3bff41da55aecce3a0f729e54"}, + {file = "websockets-13.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:e4450fc83a3df53dec45922b576e91e94f5578d06436871dce3a6be38e40f5db"}, + {file = "websockets-13.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:463e1c6ec853202dd3657f156123d6b4dad0c546ea2e2e38be2b3f7c5b8e7295"}, + {file = "websockets-13.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:6d6855bbe70119872c05107e38fbc7f96b1d8cb047d95c2c50869a46c65a8e96"}, + {file = "websockets-13.1-cp38-cp38-win32.whl", hash = "sha256:204e5107f43095012b00f1451374693267adbb832d29966a01ecc4ce1db26faf"}, + {file = "websockets-13.1-cp38-cp38-win_amd64.whl", hash = "sha256:485307243237328c022bc908b90e4457d0daa8b5cf4b3723fd3c4a8012fce4c6"}, + {file = "websockets-13.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:9b37c184f8b976f0c0a231a5f3d6efe10807d41ccbe4488df8c74174805eea7d"}, + {file = "websockets-13.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:163e7277e1a0bd9fb3c8842a71661ad19c6aa7bb3d6678dc7f89b17fbcc4aeb7"}, + {file = "websockets-13.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4b889dbd1342820cc210ba44307cf75ae5f2f96226c0038094455a96e64fb07a"}, + {file = "websockets-13.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:586a356928692c1fed0eca68b4d1c2cbbd1ca2acf2ac7e7ebd3b9052582deefa"}, + {file = "websockets-13.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7bd6abf1e070a6b72bfeb71049d6ad286852e285f146682bf30d0296f5fbadfa"}, + {file = "websockets-13.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d2aad13a200e5934f5a6767492fb07151e1de1d6079c003ab31e1823733ae79"}, + {file = "websockets-13.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:df01aea34b6e9e33572c35cd16bae5a47785e7d5c8cb2b54b2acdb9678315a17"}, + {file = "websockets-13.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:e54affdeb21026329fb0744ad187cf812f7d3c2aa702a5edb562b325191fcab6"}, + {file = "websockets-13.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:9ef8aa8bdbac47f4968a5d66462a2a0935d044bf35c0e5a8af152d58516dbeb5"}, + {file = "websockets-13.1-cp39-cp39-win32.whl", hash = "sha256:deeb929efe52bed518f6eb2ddc00cc496366a14c726005726ad62c2dd9017a3c"}, + {file = "websockets-13.1-cp39-cp39-win_amd64.whl", hash = "sha256:7c65ffa900e7cc958cd088b9a9157a8141c991f8c53d11087e6fb7277a03f81d"}, + {file = "websockets-13.1-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:5dd6da9bec02735931fccec99d97c29f47cc61f644264eb995ad6c0c27667238"}, + {file = "websockets-13.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:2510c09d8e8df777177ee3d40cd35450dc169a81e747455cc4197e63f7e7bfe5"}, + {file = "websockets-13.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1c3cf67185543730888b20682fb186fc8d0fa6f07ccc3ef4390831ab4b388d9"}, + {file = "websockets-13.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bcc03c8b72267e97b49149e4863d57c2d77f13fae12066622dc78fe322490fe6"}, + {file = "websockets-13.1-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:004280a140f220c812e65f36944a9ca92d766b6cc4560be652a0a3883a79ed8a"}, + {file = "websockets-13.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:e2620453c075abeb0daa949a292e19f56de518988e079c36478bacf9546ced23"}, + {file = "websockets-13.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:9156c45750b37337f7b0b00e6248991a047be4aa44554c9886fe6bdd605aab3b"}, + {file = "websockets-13.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:80c421e07973a89fbdd93e6f2003c17d20b69010458d3a8e37fb47874bd67d51"}, + {file = "websockets-13.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82d0ba76371769d6a4e56f7e83bb8e81846d17a6190971e38b5de108bde9b0d7"}, + {file = "websockets-13.1-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e9875a0143f07d74dc5e1ded1c4581f0d9f7ab86c78994e2ed9e95050073c94d"}, + {file = "websockets-13.1-pp38-pypy38_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a11e38ad8922c7961447f35c7b17bffa15de4d17c70abd07bfbe12d6faa3e027"}, + {file = "websockets-13.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:4059f790b6ae8768471cddb65d3c4fe4792b0ab48e154c9f0a04cefaabcd5978"}, + {file = "websockets-13.1-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:25c35bf84bf7c7369d247f0b8cfa157f989862c49104c5cf85cb5436a641d93e"}, + {file = "websockets-13.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:83f91d8a9bb404b8c2c41a707ac7f7f75b9442a0a876df295de27251a856ad09"}, + {file = "websockets-13.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7a43cfdcddd07f4ca2b1afb459824dd3c6d53a51410636a2c7fc97b9a8cf4842"}, + {file = "websockets-13.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:48a2ef1381632a2f0cb4efeff34efa97901c9fbc118e01951ad7cfc10601a9bb"}, + {file = "websockets-13.1-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:459bf774c754c35dbb487360b12c5727adab887f1622b8aed5755880a21c4a20"}, + {file = "websockets-13.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:95858ca14a9f6fa8413d29e0a585b31b278388aa775b8a81fa24830123874678"}, + {file = "websockets-13.1-py3-none-any.whl", hash = "sha256:a9a396a6ad26130cdae92ae10c36af09d9bfe6cafe69670fd3b6da9b07b4044f"}, + {file = "websockets-13.1.tar.gz", hash = "sha256:a3b3366087c1bc0a2795111edcadddb8b3b59509d5db5d7ea3fdd69f954a8878"}, +] + +[[package]] +name = "wonderwords" +version = "2.2.0" +description = "A python package for random words and sentences in the english language" +optional = false +python-versions = ">=3.6" +files = [ + {file = "wonderwords-2.2.0-py3-none-any.whl", hash = "sha256:65fc665f1f5590e98f6d9259414ea036bf1b6dd83e51aa6ba44473c99ca92da1"}, + {file = "wonderwords-2.2.0.tar.gz", hash = "sha256:0b7ec6f591062afc55603bfea71463afbab06794b3064d9f7b04d0ce251a13d0"}, +] + +[package.extras] +cli = ["rich (==9.10.0)"] + +[[package]] +name = "wrapt" +version = "1.16.0" +description = "Module for decorators, wrappers and monkey patching." +optional = false +python-versions = ">=3.6" +files = [ + {file = "wrapt-1.16.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ffa565331890b90056c01db69c0fe634a776f8019c143a5ae265f9c6bc4bd6d4"}, + {file = "wrapt-1.16.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e4fdb9275308292e880dcbeb12546df7f3e0f96c6b41197e0cf37d2826359020"}, + {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb2dee3874a500de01c93d5c71415fcaef1d858370d405824783e7a8ef5db440"}, + {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2a88e6010048489cda82b1326889ec075a8c856c2e6a256072b28eaee3ccf487"}, + {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac83a914ebaf589b69f7d0a1277602ff494e21f4c2f743313414378f8f50a4cf"}, + {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:73aa7d98215d39b8455f103de64391cb79dfcad601701a3aa0dddacf74911d72"}, + {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:807cc8543a477ab7422f1120a217054f958a66ef7314f76dd9e77d3f02cdccd0"}, + {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bf5703fdeb350e36885f2875d853ce13172ae281c56e509f4e6eca049bdfb136"}, + {file = "wrapt-1.16.0-cp310-cp310-win32.whl", hash = "sha256:f6b2d0c6703c988d334f297aa5df18c45e97b0af3679bb75059e0e0bd8b1069d"}, + {file = "wrapt-1.16.0-cp310-cp310-win_amd64.whl", hash = "sha256:decbfa2f618fa8ed81c95ee18a387ff973143c656ef800c9f24fb7e9c16054e2"}, + {file = "wrapt-1.16.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1a5db485fe2de4403f13fafdc231b0dbae5eca4359232d2efc79025527375b09"}, + {file = "wrapt-1.16.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:75ea7d0ee2a15733684badb16de6794894ed9c55aa5e9903260922f0482e687d"}, + {file = "wrapt-1.16.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a452f9ca3e3267cd4d0fcf2edd0d035b1934ac2bd7e0e57ac91ad6b95c0c6389"}, + {file = "wrapt-1.16.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:43aa59eadec7890d9958748db829df269f0368521ba6dc68cc172d5d03ed8060"}, + {file = "wrapt-1.16.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:72554a23c78a8e7aa02abbd699d129eead8b147a23c56e08d08dfc29cfdddca1"}, + {file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d2efee35b4b0a347e0d99d28e884dfd82797852d62fcd7ebdeee26f3ceb72cf3"}, + {file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:6dcfcffe73710be01d90cae08c3e548d90932d37b39ef83969ae135d36ef3956"}, + {file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:eb6e651000a19c96f452c85132811d25e9264d836951022d6e81df2fff38337d"}, + {file = "wrapt-1.16.0-cp311-cp311-win32.whl", hash = "sha256:66027d667efe95cc4fa945af59f92c5a02c6f5bb6012bff9e60542c74c75c362"}, + {file = "wrapt-1.16.0-cp311-cp311-win_amd64.whl", hash = "sha256:aefbc4cb0a54f91af643660a0a150ce2c090d3652cf4052a5397fb2de549cd89"}, + {file = "wrapt-1.16.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5eb404d89131ec9b4f748fa5cfb5346802e5ee8836f57d516576e61f304f3b7b"}, + {file = "wrapt-1.16.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9090c9e676d5236a6948330e83cb89969f433b1943a558968f659ead07cb3b36"}, + {file = "wrapt-1.16.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94265b00870aa407bd0cbcfd536f17ecde43b94fb8d228560a1e9d3041462d73"}, + {file = "wrapt-1.16.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f2058f813d4f2b5e3a9eb2eb3faf8f1d99b81c3e51aeda4b168406443e8ba809"}, + {file = "wrapt-1.16.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98b5e1f498a8ca1858a1cdbffb023bfd954da4e3fa2c0cb5853d40014557248b"}, + {file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:14d7dc606219cdd7405133c713f2c218d4252f2a469003f8c46bb92d5d095d81"}, + {file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:49aac49dc4782cb04f58986e81ea0b4768e4ff197b57324dcbd7699c5dfb40b9"}, + {file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:418abb18146475c310d7a6dc71143d6f7adec5b004ac9ce08dc7a34e2babdc5c"}, + {file = "wrapt-1.16.0-cp312-cp312-win32.whl", hash = "sha256:685f568fa5e627e93f3b52fda002c7ed2fa1800b50ce51f6ed1d572d8ab3e7fc"}, + {file = "wrapt-1.16.0-cp312-cp312-win_amd64.whl", hash = "sha256:dcdba5c86e368442528f7060039eda390cc4091bfd1dca41e8046af7c910dda8"}, + {file = "wrapt-1.16.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:d462f28826f4657968ae51d2181a074dfe03c200d6131690b7d65d55b0f360f8"}, + {file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a33a747400b94b6d6b8a165e4480264a64a78c8a4c734b62136062e9a248dd39"}, + {file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3646eefa23daeba62643a58aac816945cadc0afaf21800a1421eeba5f6cfb9c"}, + {file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ebf019be5c09d400cf7b024aa52b1f3aeebeff51550d007e92c3c1c4afc2a40"}, + {file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:0d2691979e93d06a95a26257adb7bfd0c93818e89b1406f5a28f36e0d8c1e1fc"}, + {file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:1acd723ee2a8826f3d53910255643e33673e1d11db84ce5880675954183ec47e"}, + {file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:bc57efac2da352a51cc4658878a68d2b1b67dbe9d33c36cb826ca449d80a8465"}, + {file = "wrapt-1.16.0-cp36-cp36m-win32.whl", hash = "sha256:da4813f751142436b075ed7aa012a8778aa43a99f7b36afe9b742d3ed8bdc95e"}, + {file = "wrapt-1.16.0-cp36-cp36m-win_amd64.whl", hash = "sha256:6f6eac2360f2d543cc875a0e5efd413b6cbd483cb3ad7ebf888884a6e0d2e966"}, + {file = "wrapt-1.16.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a0ea261ce52b5952bf669684a251a66df239ec6d441ccb59ec7afa882265d593"}, + {file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7bd2d7ff69a2cac767fbf7a2b206add2e9a210e57947dd7ce03e25d03d2de292"}, + {file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9159485323798c8dc530a224bd3ffcf76659319ccc7bbd52e01e73bd0241a0c5"}, + {file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a86373cf37cd7764f2201b76496aba58a52e76dedfaa698ef9e9688bfd9e41cf"}, + {file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:73870c364c11f03ed072dda68ff7aea6d2a3a5c3fe250d917a429c7432e15228"}, + {file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:b935ae30c6e7400022b50f8d359c03ed233d45b725cfdd299462f41ee5ffba6f"}, + {file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:db98ad84a55eb09b3c32a96c576476777e87c520a34e2519d3e59c44710c002c"}, + {file = "wrapt-1.16.0-cp37-cp37m-win32.whl", hash = "sha256:9153ed35fc5e4fa3b2fe97bddaa7cbec0ed22412b85bcdaf54aeba92ea37428c"}, + {file = "wrapt-1.16.0-cp37-cp37m-win_amd64.whl", hash = "sha256:66dfbaa7cfa3eb707bbfcd46dab2bc6207b005cbc9caa2199bcbc81d95071a00"}, + {file = "wrapt-1.16.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1dd50a2696ff89f57bd8847647a1c363b687d3d796dc30d4dd4a9d1689a706f0"}, + {file = "wrapt-1.16.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:44a2754372e32ab315734c6c73b24351d06e77ffff6ae27d2ecf14cf3d229202"}, + {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e9723528b9f787dc59168369e42ae1c3b0d3fadb2f1a71de14531d321ee05b0"}, + {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dbed418ba5c3dce92619656802cc5355cb679e58d0d89b50f116e4a9d5a9603e"}, + {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:941988b89b4fd6b41c3f0bfb20e92bd23746579736b7343283297c4c8cbae68f"}, + {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6a42cd0cfa8ffc1915aef79cb4284f6383d8a3e9dcca70c445dcfdd639d51267"}, + {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:1ca9b6085e4f866bd584fb135a041bfc32cab916e69f714a7d1d397f8c4891ca"}, + {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d5e49454f19ef621089e204f862388d29e6e8d8b162efce05208913dde5b9ad6"}, + {file = "wrapt-1.16.0-cp38-cp38-win32.whl", hash = "sha256:c31f72b1b6624c9d863fc095da460802f43a7c6868c5dda140f51da24fd47d7b"}, + {file = "wrapt-1.16.0-cp38-cp38-win_amd64.whl", hash = "sha256:490b0ee15c1a55be9c1bd8609b8cecd60e325f0575fc98f50058eae366e01f41"}, + {file = "wrapt-1.16.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9b201ae332c3637a42f02d1045e1d0cccfdc41f1f2f801dafbaa7e9b4797bfc2"}, + {file = "wrapt-1.16.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2076fad65c6736184e77d7d4729b63a6d1ae0b70da4868adeec40989858eb3fb"}, + {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5cd603b575ebceca7da5a3a251e69561bec509e0b46e4993e1cac402b7247b8"}, + {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b47cfad9e9bbbed2339081f4e346c93ecd7ab504299403320bf85f7f85c7d46c"}, + {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8212564d49c50eb4565e502814f694e240c55551a5f1bc841d4fcaabb0a9b8a"}, + {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:5f15814a33e42b04e3de432e573aa557f9f0f56458745c2074952f564c50e664"}, + {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db2e408d983b0e61e238cf579c09ef7020560441906ca990fe8412153e3b291f"}, + {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:edfad1d29c73f9b863ebe7082ae9321374ccb10879eeabc84ba3b69f2579d537"}, + {file = "wrapt-1.16.0-cp39-cp39-win32.whl", hash = "sha256:ed867c42c268f876097248e05b6117a65bcd1e63b779e916fe2e33cd6fd0d3c3"}, + {file = "wrapt-1.16.0-cp39-cp39-win_amd64.whl", hash = "sha256:eb1b046be06b0fce7249f1d025cd359b4b80fc1c3e24ad9eca33e0dcdb2e4a35"}, + {file = "wrapt-1.16.0-py3-none-any.whl", hash = "sha256:6906c4100a8fcbf2fa735f6059214bb13b97f75b1a61777fcf6432121ef12ef1"}, + {file = "wrapt-1.16.0.tar.gz", hash = "sha256:5f370f952971e7d17c7d1ead40e49f32345a7f7a5373571ef44d800d06b1899d"}, +] + +[[package]] +name = "yarl" +version = "1.13.1" +description = "Yet another URL library" +optional = false +python-versions = ">=3.8" +files = [ + {file = "yarl-1.13.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:82e692fb325013a18a5b73a4fed5a1edaa7c58144dc67ad9ef3d604eccd451ad"}, + {file = "yarl-1.13.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df4e82e68f43a07735ae70a2d84c0353e58e20add20ec0af611f32cd5ba43fb4"}, + {file = "yarl-1.13.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ec9dd328016d8d25702a24ee274932aebf6be9787ed1c28d021945d264235b3c"}, + {file = "yarl-1.13.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5820bd4178e6a639b3ef1db8b18500a82ceab6d8b89309e121a6859f56585b05"}, + {file = "yarl-1.13.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86c438ce920e089c8c2388c7dcc8ab30dfe13c09b8af3d306bcabb46a053d6f7"}, + {file = "yarl-1.13.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3de86547c820e4f4da4606d1c8ab5765dd633189791f15247706a2eeabc783ae"}, + {file = "yarl-1.13.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8ca53632007c69ddcdefe1e8cbc3920dd88825e618153795b57e6ebcc92e752a"}, + {file = "yarl-1.13.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d4ee1d240b84e2f213565f0ec08caef27a0e657d4c42859809155cf3a29d1735"}, + {file = "yarl-1.13.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:c49f3e379177f4477f929097f7ed4b0622a586b0aa40c07ac8c0f8e40659a1ac"}, + {file = "yarl-1.13.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:5c5e32fef09ce101fe14acd0f498232b5710effe13abac14cd95de9c274e689e"}, + {file = "yarl-1.13.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:ab9524e45ee809a083338a749af3b53cc7efec458c3ad084361c1dbf7aaf82a2"}, + {file = "yarl-1.13.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:b1481c048fe787f65e34cb06f7d6824376d5d99f1231eae4778bbe5c3831076d"}, + {file = "yarl-1.13.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:31497aefd68036d8e31bfbacef915826ca2e741dbb97a8d6c7eac66deda3b606"}, + {file = "yarl-1.13.1-cp310-cp310-win32.whl", hash = "sha256:1fa56f34b2236f5192cb5fceba7bbb09620e5337e0b6dfe2ea0ddbd19dd5b154"}, + {file = "yarl-1.13.1-cp310-cp310-win_amd64.whl", hash = "sha256:1bbb418f46c7f7355084833051701b2301092e4611d9e392360c3ba2e3e69f88"}, + {file = "yarl-1.13.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:216a6785f296169ed52cd7dcdc2612f82c20f8c9634bf7446327f50398732a51"}, + {file = "yarl-1.13.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:40c6e73c03a6befb85b72da213638b8aaa80fe4136ec8691560cf98b11b8ae6e"}, + {file = "yarl-1.13.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2430cf996113abe5aee387d39ee19529327205cda975d2b82c0e7e96e5fdabdc"}, + {file = "yarl-1.13.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9fb4134cc6e005b99fa29dbc86f1ea0a298440ab6b07c6b3ee09232a3b48f495"}, + {file = "yarl-1.13.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:309c104ecf67626c033845b860d31594a41343766a46fa58c3309c538a1e22b2"}, + {file = "yarl-1.13.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f90575e9fe3aae2c1e686393a9689c724cd00045275407f71771ae5d690ccf38"}, + {file = "yarl-1.13.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d2e1626be8712333a9f71270366f4a132f476ffbe83b689dd6dc0d114796c74"}, + {file = "yarl-1.13.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5b66c87da3c6da8f8e8b648878903ca54589038a0b1e08dde2c86d9cd92d4ac9"}, + {file = "yarl-1.13.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:cf1ad338620249f8dd6d4b6a91a69d1f265387df3697ad5dc996305cf6c26fb2"}, + {file = "yarl-1.13.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:9915300fe5a0aa663c01363db37e4ae8e7c15996ebe2c6cce995e7033ff6457f"}, + {file = "yarl-1.13.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:703b0f584fcf157ef87816a3c0ff868e8c9f3c370009a8b23b56255885528f10"}, + {file = "yarl-1.13.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:1d8e3ca29f643dd121f264a7c89f329f0fcb2e4461833f02de6e39fef80f89da"}, + {file = "yarl-1.13.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:7055bbade838d68af73aea13f8c86588e4bcc00c2235b4b6d6edb0dbd174e246"}, + {file = "yarl-1.13.1-cp311-cp311-win32.whl", hash = "sha256:a3442c31c11088e462d44a644a454d48110f0588de830921fd201060ff19612a"}, + {file = "yarl-1.13.1-cp311-cp311-win_amd64.whl", hash = "sha256:81bad32c8f8b5897c909bf3468bf601f1b855d12f53b6af0271963ee67fff0d2"}, + {file = "yarl-1.13.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:f452cc1436151387d3d50533523291d5f77c6bc7913c116eb985304abdbd9ec9"}, + {file = "yarl-1.13.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:9cec42a20eae8bebf81e9ce23fb0d0c729fc54cf00643eb251ce7c0215ad49fe"}, + {file = "yarl-1.13.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d959fe96e5c2712c1876d69af0507d98f0b0e8d81bee14cfb3f6737470205419"}, + {file = "yarl-1.13.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b8c837ab90c455f3ea8e68bee143472ee87828bff19ba19776e16ff961425b57"}, + {file = "yarl-1.13.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:94a993f976cdcb2dc1b855d8b89b792893220db8862d1a619efa7451817c836b"}, + {file = "yarl-1.13.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2b2442a415a5f4c55ced0fade7b72123210d579f7d950e0b5527fc598866e62c"}, + {file = "yarl-1.13.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3fdbf0418489525231723cdb6c79e7738b3cbacbaed2b750cb033e4ea208f220"}, + {file = "yarl-1.13.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6b7f6e699304717fdc265a7e1922561b02a93ceffdaefdc877acaf9b9f3080b8"}, + {file = "yarl-1.13.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:bcd5bf4132e6a8d3eb54b8d56885f3d3a38ecd7ecae8426ecf7d9673b270de43"}, + {file = "yarl-1.13.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:2a93a4557f7fc74a38ca5a404abb443a242217b91cd0c4840b1ebedaad8919d4"}, + {file = "yarl-1.13.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:22b739f99c7e4787922903f27a892744189482125cc7b95b747f04dd5c83aa9f"}, + {file = "yarl-1.13.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:2db874dd1d22d4c2c657807562411ffdfabec38ce4c5ce48b4c654be552759dc"}, + {file = "yarl-1.13.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:4feaaa4742517eaceafcbe74595ed335a494c84634d33961214b278126ec1485"}, + {file = "yarl-1.13.1-cp312-cp312-win32.whl", hash = "sha256:bbf9c2a589be7414ac4a534d54e4517d03f1cbb142c0041191b729c2fa23f320"}, + {file = "yarl-1.13.1-cp312-cp312-win_amd64.whl", hash = "sha256:d07b52c8c450f9366c34aa205754355e933922c79135125541daae6cbf31c799"}, + {file = "yarl-1.13.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:95c6737f28069153c399d875317f226bbdea939fd48a6349a3b03da6829fb550"}, + {file = "yarl-1.13.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:cd66152561632ed4b2a9192e7f8e5a1d41e28f58120b4761622e0355f0fe034c"}, + {file = "yarl-1.13.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:6a2acde25be0cf9be23a8f6cbd31734536a264723fca860af3ae5e89d771cd71"}, + {file = "yarl-1.13.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9a18595e6a2ee0826bf7dfdee823b6ab55c9b70e8f80f8b77c37e694288f5de1"}, + {file = "yarl-1.13.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a31d21089894942f7d9a8df166b495101b7258ff11ae0abec58e32daf8088813"}, + {file = "yarl-1.13.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:45f209fb4bbfe8630e3d2e2052535ca5b53d4ce2d2026bed4d0637b0416830da"}, + {file = "yarl-1.13.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f722f30366474a99745533cc4015b1781ee54b08de73260b2bbe13316079851"}, + {file = "yarl-1.13.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3bf60444269345d712838bb11cc4eadaf51ff1a364ae39ce87a5ca8ad3bb2c8"}, + {file = "yarl-1.13.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:942c80a832a79c3707cca46bd12ab8aa58fddb34b1626d42b05aa8f0bcefc206"}, + {file = "yarl-1.13.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:44b07e1690f010c3c01d353b5790ec73b2f59b4eae5b0000593199766b3f7a5c"}, + {file = "yarl-1.13.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:396e59b8de7e4d59ff5507fb4322d2329865b909f29a7ed7ca37e63ade7f835c"}, + {file = "yarl-1.13.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:3bb83a0f12701c0b91112a11148b5217617982e1e466069d0555be9b372f2734"}, + {file = "yarl-1.13.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:c92b89bffc660f1274779cb6fbb290ec1f90d6dfe14492523a0667f10170de26"}, + {file = "yarl-1.13.1-cp313-cp313-win32.whl", hash = "sha256:269c201bbc01d2cbba5b86997a1e0f73ba5e2f471cfa6e226bcaa7fd664b598d"}, + {file = "yarl-1.13.1-cp313-cp313-win_amd64.whl", hash = "sha256:1d0828e17fa701b557c6eaed5edbd9098eb62d8838344486248489ff233998b8"}, + {file = "yarl-1.13.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:8be8cdfe20787e6a5fcbd010f8066227e2bb9058331a4eccddec6c0db2bb85b2"}, + {file = "yarl-1.13.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:08d7148ff11cb8e886d86dadbfd2e466a76d5dd38c7ea8ebd9b0e07946e76e4b"}, + {file = "yarl-1.13.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4afdf84610ca44dcffe8b6c22c68f309aff96be55f5ea2fa31c0c225d6b83e23"}, + {file = "yarl-1.13.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0d12fe78dcf60efa205e9a63f395b5d343e801cf31e5e1dda0d2c1fb618073d"}, + {file = "yarl-1.13.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:298c1eecfd3257aa16c0cb0bdffb54411e3e831351cd69e6b0739be16b1bdaa8"}, + {file = "yarl-1.13.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c14c16831b565707149c742d87a6203eb5597f4329278446d5c0ae7a1a43928e"}, + {file = "yarl-1.13.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a9bacedbb99685a75ad033fd4de37129449e69808e50e08034034c0bf063f99"}, + {file = "yarl-1.13.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:658e8449b84b92a4373f99305de042b6bd0d19bf2080c093881e0516557474a5"}, + {file = "yarl-1.13.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:373f16f38721c680316a6a00ae21cc178e3a8ef43c0227f88356a24c5193abd6"}, + {file = "yarl-1.13.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:45d23c4668d4925688e2ea251b53f36a498e9ea860913ce43b52d9605d3d8177"}, + {file = "yarl-1.13.1-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:f7917697bcaa3bc3e83db91aa3a0e448bf5cde43c84b7fc1ae2427d2417c0224"}, + {file = "yarl-1.13.1-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:5989a38ba1281e43e4663931a53fbf356f78a0325251fd6af09dd03b1d676a09"}, + {file = "yarl-1.13.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:11b3ca8b42a024513adce810385fcabdd682772411d95bbbda3b9ed1a4257644"}, + {file = "yarl-1.13.1-cp38-cp38-win32.whl", hash = "sha256:dcaef817e13eafa547cdfdc5284fe77970b891f731266545aae08d6cce52161e"}, + {file = "yarl-1.13.1-cp38-cp38-win_amd64.whl", hash = "sha256:7addd26594e588503bdef03908fc207206adac5bd90b6d4bc3e3cf33a829f57d"}, + {file = "yarl-1.13.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:a0ae6637b173d0c40b9c1462e12a7a2000a71a3258fa88756a34c7d38926911c"}, + {file = "yarl-1.13.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:576365c9f7469e1f6124d67b001639b77113cfd05e85ce0310f5f318fd02fe85"}, + {file = "yarl-1.13.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:78f271722423b2d4851cf1f4fa1a1c4833a128d020062721ba35e1a87154a049"}, + {file = "yarl-1.13.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9d74f3c335cfe9c21ea78988e67f18eb9822f5d31f88b41aec3a1ec5ecd32da5"}, + {file = "yarl-1.13.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1891d69a6ba16e89473909665cd355d783a8a31bc84720902c5911dbb6373465"}, + {file = "yarl-1.13.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fb382fd7b4377363cc9f13ba7c819c3c78ed97c36a82f16f3f92f108c787cbbf"}, + {file = "yarl-1.13.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c8854b9f80693d20cec797d8e48a848c2fb273eb6f2587b57763ccba3f3bd4b"}, + {file = "yarl-1.13.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bbf2c3f04ff50f16404ce70f822cdc59760e5e2d7965905f0e700270feb2bbfc"}, + {file = "yarl-1.13.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:fb9f59f3848edf186a76446eb8bcf4c900fe147cb756fbbd730ef43b2e67c6a7"}, + {file = "yarl-1.13.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:ef9b85fa1bc91c4db24407e7c4da93a5822a73dd4513d67b454ca7064e8dc6a3"}, + {file = "yarl-1.13.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:098b870c18f1341786f290b4d699504e18f1cd050ed179af8123fd8232513424"}, + {file = "yarl-1.13.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:8c723c91c94a3bc8033dd2696a0f53e5d5f8496186013167bddc3fb5d9df46a3"}, + {file = "yarl-1.13.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:44a4c40a6f84e4d5955b63462a0e2a988f8982fba245cf885ce3be7618f6aa7d"}, + {file = "yarl-1.13.1-cp39-cp39-win32.whl", hash = "sha256:84bbcdcf393139f0abc9f642bf03f00cac31010f3034faa03224a9ef0bb74323"}, + {file = "yarl-1.13.1-cp39-cp39-win_amd64.whl", hash = "sha256:fc2931ac9ce9c61c9968989ec831d3a5e6fcaaff9474e7cfa8de80b7aff5a093"}, + {file = "yarl-1.13.1-py3-none-any.whl", hash = "sha256:6a5185ad722ab4dd52d5fb1f30dcc73282eb1ed494906a92d1a228d3f89607b0"}, + {file = "yarl-1.13.1.tar.gz", hash = "sha256:ec8cfe2295f3e5e44c51f57272afbd69414ae629ec7c6b27f5a410efc78b70a0"}, +] + +[package.dependencies] +idna = ">=2.0" +multidict = ">=4.0" + +[metadata] +lock-version = "2.0" +python-versions = "^3.11" +content-hash = "3622a3048a7a8743d796a7f46c2eb1c62fd5bbb0df6a6ad4001d46866c09d4df" diff --git a/postman/ielts-be.postman_environment.json b/postman/ielts-be.postman_environment.json index f5f9262..2ad6732 100644 --- a/postman/ielts-be.postman_environment.json +++ b/postman/ielts-be.postman_environment.json @@ -1,15 +1,15 @@ -{ - "id": "e841db7c-7a8e-46ab-b199-6a14a1ec175b", - "name": "ielts-be", - "values": [ - { - "key": "jwt_token", - "value": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiJ0ZXN0In0.Emrs2D3BmMP4b3zMjw0fJTPeyMwWEBDbxx2vvaWguO0", - "type": "secret", - "enabled": true - } - ], - "_postman_variable_scope": "environment", - "_postman_exported_at": "2023-06-20T21:57:42.427Z", - "_postman_exported_using": "Postman/10.15.1" +{ + "id": "e841db7c-7a8e-46ab-b199-6a14a1ec175b", + "name": "ielts-be", + "values": [ + { + "key": "jwt_token", + "value": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiJ0ZXN0In0.Emrs2D3BmMP4b3zMjw0fJTPeyMwWEBDbxx2vvaWguO0", + "type": "secret", + "enabled": true + } + ], + "_postman_variable_scope": "environment", + "_postman_exported_at": "2023-06-20T21:57:42.427Z", + "_postman_exported_using": "Postman/10.15.1" } \ No newline at end of file diff --git a/postman/ielts.postman_collection.json b/postman/ielts.postman_collection.json index 78a4003..4cb86eb 100644 --- a/postman/ielts.postman_collection.json +++ b/postman/ielts.postman_collection.json @@ -1,1156 +1,1156 @@ -{ - "info": { - "_postman_id": "9905f8e4-f3b9-45e4-8ede-434c5de11eca", - "name": "ielts", - "schema": "https://schema.getpostman.com/json/collection/v2.1.0/collection.json", - "_exporter_id": "29491168" - }, - "item": [ - { - "name": "Writing", - "item": [ - { - "name": "Gen Question Writing Task 1", - "protocolProfileBehavior": { - "disableBodyPruning": true - }, - "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{jwt_token}}", - "type": "string" - } - ] - }, - "method": "GET", - "header": [], - "body": { - "mode": "raw", - "raw": "", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "http://127.0.0.1:5000/writing_task1_general", - "protocol": "http", - "host": [ - "127", - "0", - "0", - "1" - ], - "port": "5000", - "path": [ - "writing_task1_general" - ] - } - }, - "response": [] - }, - { - "name": "Grade Answer Writing Task 1 With Context", - "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{jwt_token}}", - "type": "string" - } - ] - }, - "method": "POST", - "header": [], - "body": { - "mode": "raw", - "raw": "{\r\n \"question\": \"The chart below shows the amount of money per week spent on fast foods in Britain. The graph shows the trends in consumption of fast-foods. Write a report for a university lecturer describing the information shown below.\",\r\n \"answer\": \"The chart shows that high income earners consumed considerably more fast foods than the other income groups, spending more than twice as much on hamburgers (43 pence per person per week) than on fish and chips or pizza (both under 20 pence). Average income earners also favoured hamburgers, spending 33 pence per person per week, followed by fish and chips at 24 pence, then pizza at 11 pence. Low income earners appear to spend less than other income groups on fast foods, though fish and chips remains their most popular fast food, followed by hamburgers and then pizza. From the graph we can see that in 1970, fish and chips were twice as popular as burgers, pizza being at that time the least popular fast food. The consumption of hamburgers and pizza has risen steadily over the 20 year period to 1990 while the consumption of fish and chips has been in decline over that same period with a slight increase in popularity since 1985.\"\r\n}", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "http://127.0.0.1:5000/writing_task1", - "protocol": "http", - "host": [ - "127", - "0", - "0", - "1" - ], - "port": "5000", - "path": [ - "writing_task1" - ] - } - }, - "response": [] - }, - { - "name": "Gen Question Writing Task 2", - "protocolProfileBehavior": { - "disableBodyPruning": true - }, - "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{jwt_token}}", - "type": "string" - } - ] - }, - "method": "GET", - "header": [], - "body": { - "mode": "raw", - "raw": "", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "http://127.0.0.1:5000/writing_task2_general", - "protocol": "http", - "host": [ - "127", - "0", - "0", - "1" - ], - "port": "5000", - "path": [ - "writing_task2_general" - ] - } - }, - "response": [] - }, - { - "name": "Grade Answer Writing Task 2", - "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{jwt_token}}", - "type": "string" - } - ] - }, - "method": "POST", - "header": [], - "body": { - "mode": "raw", - "raw": "{\r\n \"question\": \"The average standard of people's health is likely to be lower in the future than it is now. To what extent do you agree or disagree with this statement?\",\r\n \"answer\": \"I completly disagree with the written statment. I believe that most of the people in the world have more information about their health and also about how they can improve their healthy conditions. Nowadays, information about how harmful is to smoke for our bodies can be seen in many packets of cigars. This is a clear example how things can change from our recent past. There is a clear trend in the diminishing of smokers and if this continues it will have a positive impact in our health. On the other hand, the alimentation habbits are changing all over the world and this can affect people’s health. However every one can choose what to eat every day. Mostly everybody, from developed societies, know the importance of having a healthy diet. Advances such as the information showed in the menus of fast food restaurants will help people to have a clever choice before they choose what to eat. Another important issue that I would like to mention is how medicine is changing. There are new discovers and treatments almost every week and that is an inequivoque sintom of how things are changing in order to improve the world’s health.\"\r\n}", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "http://127.0.0.1:5000/writing_task2", - "protocol": "http", - "host": [ - "127", - "0", - "0", - "1" - ], - "port": "5000", - "path": [ - "writing_task2" - ] - } - }, - "response": [] - }, - { - "name": "Save Writing", - "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{jwt_token}}", - "type": "string" - } - ] - }, - "method": "POST", - "header": [], - "body": { - "mode": "raw", - "raw": "{\r\n \"exercises\": [\r\n \"You recently attended a friend's wedding and were impressed by their wedding planner. Write a letter to your friend, advising them on the best wedding planner for their upcoming wedding. In your letter, include information about the planner's services, pricing, and any personal experiences you had with them. Provide your friend with recommendations and tips on how to make the most out of their wedding planning experience.\",\r\n \"To what extent do you agree or disagree with the statement that technology has had a positive impact on modern society? In your response, critically examine the opposing perspectives on this issue, considering both the benefits and drawbacks of technological advancements. Support your arguments with relevant examples and evidence, and conclude with your own stance on the matter.\"\r\n ]\r\n}", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "http://127.0.0.1:5000/writing", - "protocol": "http", - "host": [ - "127", - "0", - "0", - "1" - ], - "port": "5000", - "path": [ - "writing" - ] - } - }, - "response": [] - } - ] - }, - { - "name": "Speaking", - "item": [ - { - "name": "Gen Question Speaking Part 1", - "protocolProfileBehavior": { - "disableBodyPruning": true - }, - "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{jwt_token}}", - "type": "string" - } - ] - }, - "method": "GET", - "header": [], - "body": { - "mode": "raw", - "raw": "", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "http://127.0.0.1:5000/speaking_task_1", - "protocol": "http", - "host": [ - "127", - "0", - "0", - "1" - ], - "port": "5000", - "path": [ - "speaking_task_1" - ] - } - }, - "response": [] - }, - { - "name": "Gen Question Speaking Part 2", - "protocolProfileBehavior": { - "disableBodyPruning": true - }, - "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{jwt_token}}", - "type": "string" - } - ] - }, - "method": "GET", - "header": [], - "body": { - "mode": "raw", - "raw": "", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "http://127.0.0.1:5000/speaking_task_2", - "protocol": "http", - "host": [ - "127", - "0", - "0", - "1" - ], - "port": "5000", - "path": [ - "speaking_task_2" - ] - } - }, - "response": [] - }, - { - "name": "Gen Question Speaking Part 3", - "protocolProfileBehavior": { - "disableBodyPruning": true - }, - "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{jwt_token}}", - "type": "string" - } - ] - }, - "method": "GET", - "header": [], - "body": { - "mode": "raw", - "raw": "", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "http://127.0.0.1:5000/speaking_task_3", - "protocol": "http", - "host": [ - "127", - "0", - "0", - "1" - ], - "port": "5000", - "path": [ - "speaking_task_3" - ] - } - }, - "response": [] - }, - { - "name": "Grade Answer Speaking Task", - "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{jwt_token}}", - "type": "string" - } - ] - }, - "method": "POST", - "header": [], - "body": { - "mode": "raw", - "raw": " {\r\n \"answers\": [\r\n {\r\n \"question\": \"How do you think technology has affected the way people communicate with each other in today's society?\",\r\n \"answer\": \"speaking_recordings/speaking_tech_1.m4a\"\r\n },\r\n {\r\n \"question\": \"In what ways has the use of smartphones and social media platforms changed the dynamics of personal relationships?\",\r\n \"answer\": \"speaking_recordings/speaking_tech_2.m4a\"\r\n },\r\n {\r\n \"question\": \"Some argue that technology has made communication more convenient, while others worry that it has led to a decline in face-to-face interactions. What's your perspective on this matter, and how do you think it might impact future generations?\",\r\n \"answer\": \"speaking_recordings/speaking_tech_3.m4a\"\r\n }\r\n ]\r\n}", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "http://127.0.0.1:5000/speaking_task_3", - "protocol": "http", - "host": [ - "127", - "0", - "0", - "1" - ], - "port": "5000", - "path": [ - "speaking_task_3" - ] - } - }, - "response": [] - }, - { - "name": "Save Speaking", - "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{jwt_token}}", - "type": "string" - } - ] - }, - "method": "POST", - "header": [], - "body": { - "mode": "raw", - "raw": "{\r\n \"exercises\": [\r\n {\r\n \"question\": \"What is the most impactful book you have ever read and how has it influenced your perspective on life? Please share specific examples from the book that resonated with you on a personal level.\",\r\n \"topic\": \"Books\"\r\n },\r\n {\r\n \"prompts\": [\r\n \"Where did you go?\",\r\n \"What did you do there?\",\r\n \"Why was it a memorable experience for you?\"\r\n ],\r\n \"question\": \"Tell me about a memorable travel experience you have had.\",\r\n \"topic\": \"Travel\"\r\n },\r\n {\r\n \"questions\": [\r\n \"In what ways has technology improved our lives?\",\r\n \"What are some potential negative effects of technology on society?\",\r\n \"How can we strike a balance between the use of technology and maintaining healthy relationships?\"\r\n ],\r\n \"topic\": \"Technology and Society\"\r\n }\r\n ]\r\n }", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "http://127.0.0.1:5000/speaking", - "protocol": "http", - "host": [ - "127", - "0", - "0", - "1" - ], - "port": "5000", - "path": [ - "speaking" - ] - } - }, - "response": [] - } - ] - }, - { - "name": "Reading", - "item": [ - { - "name": "Gen Question Reading Passage 1", - "protocolProfileBehavior": { - "disableBodyPruning": true - }, - "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{jwt_token}}", - "type": "string" - } - ] - }, - "method": "GET", - "header": [], - "body": { - "mode": "raw", - "raw": "", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "http://127.0.0.1:5000/reading_passage_1?topic=football manager video game&exercises=multipleChoice&exercises=trueFalse&exercises=fillBlanks&exercises=writeBlanks", - "protocol": "http", - "host": [ - "127", - "0", - "0", - "1" - ], - "port": "5000", - "path": [ - "reading_passage_1" - ], - "query": [ - { - "key": "topic", - "value": "football manager video game" - }, - { - "key": "exercises", - "value": "multipleChoice" - }, - { - "key": "exercises", - "value": "trueFalse" - }, - { - "key": "exercises", - "value": "fillBlanks" - }, - { - "key": "exercises", - "value": "writeBlanks" - } - ] - } - }, - "response": [] - }, - { - "name": "Gen Question Reading Passage 2", - "protocolProfileBehavior": { - "disableBodyPruning": true - }, - "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{jwt_token}}", - "type": "string" - } - ] - }, - "method": "GET", - "header": [], - "body": { - "mode": "raw", - "raw": "", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "http://127.0.0.1:5000/reading_passage_2?topic=football manager video game&exercises=multipleChoice&exercises=trueFalse&exercises=fillBlanks&exercises=writeBlanks", - "protocol": "http", - "host": [ - "127", - "0", - "0", - "1" - ], - "port": "5000", - "path": [ - "reading_passage_2" - ], - "query": [ - { - "key": "topic", - "value": "football manager video game" - }, - { - "key": "exercises", - "value": "multipleChoice" - }, - { - "key": "exercises", - "value": "trueFalse" - }, - { - "key": "exercises", - "value": "fillBlanks" - }, - { - "key": "exercises", - "value": "writeBlanks" - } - ] - } - }, - "response": [] - }, - { - "name": "Gen Question Reading Passage 3", - "protocolProfileBehavior": { - "disableBodyPruning": true - }, - "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{jwt_token}}", - "type": "string" - } - ] - }, - "method": "GET", - "header": [], - "body": { - "mode": "raw", - "raw": "", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "http://127.0.0.1:5000/reading_passage_3?topic=football manager video game&exercises=multipleChoice&exercises=trueFalse&exercises=fillBlanks&exercises=writeBlanks", - "protocol": "http", - "host": [ - "127", - "0", - "0", - "1" - ], - "port": "5000", - "path": [ - "reading_passage_3" - ], - "query": [ - { - "key": "topic", - "value": "football manager video game" - }, - { - "key": "exercises", - "value": "multipleChoice" - }, - { - "key": "exercises", - "value": "trueFalse" - }, - { - "key": "exercises", - "value": "fillBlanks" - }, - { - "key": "exercises", - "value": "writeBlanks" - } - ] - } - }, - "response": [] - }, - { - "name": "Save Reading", - "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{jwt_token}}", - "type": "string" - } - ] - }, - "method": "POST", - "header": [], - "body": { - "mode": "raw", - "raw": "{\r\n \"parts\": [\r\n {\r\n \"exercises\": [\r\n {\r\n \"id\": \"cbd08cdd-5850-40a8-b6e2-6021c04474ad\",\r\n \"prompt\": \"Do the following statements agree with the information given in the Reading Passage?\",\r\n \"questions\": [\r\n {\r\n \"id\": \"1\",\r\n \"prompt\": \"Technology is constantly evolving and shaping our world.\",\r\n \"solution\": \"true\"\r\n },\r\n {\r\n \"id\": \"2\",\r\n \"prompt\": \"The use of artificial intelligence (AI) has only recently become popular.\",\r\n \"solution\": \"false\"\r\n },\r\n {\r\n \"id\": \"3\",\r\n \"prompt\": \"5G technology offers slower speeds and higher latency than its predecessors.\",\r\n \"solution\": \"false\"\r\n },\r\n {\r\n \"id\": \"4\",\r\n \"prompt\": \"Social media has had a minimal impact on our society.\",\r\n \"solution\": \"false\"\r\n },\r\n {\r\n \"id\": \"5\",\r\n \"prompt\": \"Cybersecurity is not a growing concern as technology becomes more integrated into our lives.\",\r\n \"solution\": \"false\"\r\n },\r\n {\r\n \"id\": \"6\",\r\n \"prompt\": \"Technology has not had a significant impact on the education sector.\",\r\n \"solution\": \"false\"\r\n },\r\n {\r\n \"id\": \"7\",\r\n \"prompt\": \"Automation and AI are not causing shifts in the job market.\",\r\n \"solution\": \"false\"\r\n }\r\n ],\r\n \"type\": \"trueFalse\"\r\n },\r\n {\r\n \"allowRepetition\": true,\r\n \"id\": \"b88f3eb5-11b7-4a8e-bb1a-4e96215b34bf\",\r\n \"prompt\": \"Complete the summary below. Click a blank to select the corresponding word(s) for it.\\\\nThere are more words than spaces so you will not use them all. You may use any of the words more than once.\",\r\n \"solutions\": [\r\n {\r\n \"id\": \"8\",\r\n \"solution\": \"smartphones\"\r\n },\r\n {\r\n \"id\": \"9\",\r\n \"solution\": \"artificial intelligence\"\r\n },\r\n {\r\n \"id\": \"10\",\r\n \"solution\": \"5G technology\"\r\n },\r\n {\r\n \"id\": \"11\",\r\n \"solution\": \"virtual reality\"\r\n },\r\n {\r\n \"id\": \"12\",\r\n \"solution\": \"cybersecurity\"\r\n },\r\n {\r\n \"id\": \"13\",\r\n \"solution\": \"telemedicine\"\r\n }\r\n ],\r\n \"text\": \"\\n\\nTechnology has become an integral part of our daily lives, from {{8}} to smart homes. The rise of {{9}} (AI) and the Internet of Things (IoT) are two major trends that are revolutionizing the way we live and work. {{10}} is also gaining popularity, enabling advancements in areas like {{11}} and self-driving cars. Cloud computing, virtual and augmented reality (VR and AR), and blockchain technology are also on the rise, impacting industries such as finance, education, and healthcare. Social media has changed the way we communicate and raised concerns about privacy and mental health. With the increase in data breaches and cyber attacks, {{12}} has become a growing concern. {{13}} and online learning have made healthcare and education more accessible and efficient. However, there are concerns about the impact of technology on the job market, leading to discussions about the need for reskilling and upskilling. As technology continues to advance, it is crucial to understand its impact and consequences on our society.\",\r\n \"type\": \"fillBlanks\",\r\n \"words\": [\r\n \"speaking\",\r\n \"5G technology\",\r\n \"telemedicine\",\r\n \"virtual reality\",\r\n \"antechamber\",\r\n \"smartphones\",\r\n \"kitsch\",\r\n \"devilish\",\r\n \"parent\",\r\n \"artificial intelligence\",\r\n \"cybersecurity\"\r\n ]\r\n }\r\n ],\r\n \"text\": {\r\n \"content\": \"In today's society, technology has become an integral part of our daily lives. From smartphones to smart homes, we are constantly surrounded by the latest and most advanced technological devices. As technology continues to evolve and improve, it is important to understand the current trends and how they are shaping our world. One of the biggest technology trends in recent years is the rise of artificial intelligence (AI). AI is the simulation of human intelligence processes by machines, particularly computer systems. This technology has been around for decades, but with the advancement of computing power and big data, AI has become more sophisticated and prevalent. From virtual personal assistants like Siri and Alexa to self-driving cars, AI is revolutionizing the way we live and work. Another trend that has gained widespread popularity is the Internet of Things (IoT). This refers to the interconnection of everyday objects via the internet, allowing them to send and receive data. Smart homes, wearable devices, and even smart cities are all examples of IoT. With IoT, our devices and appliances can communicate with each other, making our lives more convenient and efficient. The use of 5G technology is also on the rise. 5G is the fifth generation of wireless technology, offering faster speeds and lower latency than its predecessors. With 5G, we can expect to see advancements in areas like virtual reality, self-driving cars, and remote surgery. It will also enable the development of smart cities and the Internet of Things to reach its full potential. Cloud computing is another trend that has been steadily growing. Cloud computing involves the delivery of computing services over the internet, such as storage, servers, and databases. This allows for easy access to data and applications from anywhere, at any time. Many businesses and individuals are utilizing cloud computing to streamline their operations and increase efficiency. Virtual and augmented reality (VR and AR) are becoming more prevalent in various industries, from gaming and entertainment to healthcare and education. VR immerses the user in a simulated environment, while AR overlays digital information onto the real world. These technologies have the potential to change the way we learn, work, and entertain ourselves. Blockchain technology is also gaining traction, particularly in the financial sector. Blockchain is a decentralized digital ledger that records transactions across a network of computers. It allows for secure and transparent transactions without the need for intermediaries. This technology has the potential to disrupt traditional banking and financial systems. Social media has been a dominant force in the technology world for some time now, and it continues to evolve and shape our society. With the rise of platforms like Facebook, Twitter, and Instagram, we are more connected than ever before. Social media has changed the way we communicate, share information, and even do business. It has also raised concerns about privacy and the impact of social media on mental health. Cybersecurity is a growing concern as technology becomes more integrated into our lives. With the increase in data breaches and cyber attacks, the need for strong cybersecurity measures is greater than ever. Companies and individuals are investing in better security protocols to protect their sensitive information. The healthcare industry is also experiencing technological advancements with the introduction of telemedicine. This allows patients to receive medical care remotely, without having to visit a physical doctor's office. Telemedicine has become increasingly popular, especially during the COVID-19 pandemic, as it allows for safe and convenient access to healthcare. In the education sector, technology has brought about significant changes as well. Online learning platforms and digital tools have made education more accessible and efficient. With the rise of e-learning, students can access education from anywhere in the world and at their own pace. As technology continues to advance, concerns about its impact on the job market have also arisen. Automation and AI are replacing human workers in many industries, leading to job loss and shifts in the workforce. This trend has sparked discussions about the need for reskilling and upskilling to adapt to the changing job market. In conclusion, the world is constantly evolving and adapting to the latest technology trends. From AI and IoT to 5G and blockchain, these advancements are shaping the way we live, work, and interact with each other. As society continues to embrace and integrate technology into our daily lives, it is crucial to understand its impact and potential consequences. Whether it be in the fields of healthcare, education, or finance, technology is undoubtedly transforming the world as we know it.\",\r\n \"title\": \"Modern Technology Trends\"\r\n }\r\n },\r\n {\r\n \"exercises\": [\r\n {\r\n \"id\": \"f2daa91a-3e92-4c07-aefd-719bcf22bac7\",\r\n \"prompt\": \"Do the following statements agree with the information given in the Reading Passage?\",\r\n \"questions\": [\r\n {\r\n \"id\": \"14\",\r\n \"prompt\": \"Yoga and meditation have been gaining popularity in recent years.\",\r\n \"solution\": \"true\"\r\n },\r\n {\r\n \"id\": \"15\",\r\n \"prompt\": \"Yoga and meditation originated in ancient India.\",\r\n \"solution\": \"true\"\r\n },\r\n {\r\n \"id\": \"16\",\r\n \"prompt\": \"Yoga is a system that combines physical postures, breathing techniques, and meditation.\",\r\n \"solution\": \"true\"\r\n },\r\n {\r\n \"id\": \"17\",\r\n \"prompt\": \"Meditation involves training the mind to achieve a state of inner peace and relaxation.\",\r\n \"solution\": \"true\"\r\n },\r\n {\r\n \"id\": \"18\",\r\n \"prompt\": \"Yoga and meditation can reduce stress and improve mental health.\",\r\n \"solution\": \"true\"\r\n },\r\n {\r\n \"id\": \"19\",\r\n \"prompt\": \"Yoga and meditation can improve physical health.\",\r\n \"solution\": \"true\"\r\n },\r\n {\r\n \"id\": \"20\",\r\n \"prompt\": \"Yoga and meditation are only suitable for people who are physically fit.\",\r\n \"solution\": \"false\"\r\n }\r\n ],\r\n \"type\": \"trueFalse\"\r\n },\r\n {\r\n \"id\": \"b500cb69-843d-4430-a544-924c514ea12a\",\r\n \"maxWords\": 3,\r\n \"prompt\": \"Choose no more than three words and/or a number from the passage for each answer.\",\r\n \"solutions\": [\r\n {\r\n \"id\": \"21\",\r\n \"solution\": [\r\n \"physical, mental, emotional\"\r\n ]\r\n },\r\n {\r\n \"id\": \"22\",\r\n \"solution\": [\r\n \"ancient India\"\r\n ]\r\n },\r\n {\r\n \"id\": \"23\",\r\n \"solution\": [\r\n \"physical postures, breathing techniques\"\r\n ]\r\n },\r\n {\r\n \"id\": \"24\",\r\n \"solution\": [\r\n \"reduce stress, improve mindfulness\"\r\n ]\r\n },\r\n {\r\n \"id\": \"25\",\r\n \"solution\": [\r\n \"improve, promote relaxation\"\r\n ]\r\n },\r\n {\r\n \"id\": \"26\",\r\n \"solution\": [\r\n \"anyone, all ages and backgrounds\"\r\n ]\r\n }\r\n ],\r\n \"text\": \"What are the three main benefits of yoga and meditation?{{21}}\\\\nWhere did yoga originate?{{22}}\\\\nWhat are the two components of yoga?{{23}}\\\\nHow do yoga and meditation improve mental health?{{24}}\\\\nWhat is the impact of yoga and meditation on sleep quality?{{25}}\\\\nWho can practice yoga and meditation?{{26}}\\\\n\",\r\n \"type\": \"writeBlanks\"\r\n }\r\n ],\r\n \"text\": {\r\n \"content\": \"Yoga and meditation have been gaining popularity in recent years as more and more people recognize the physical, mental, and emotional benefits of these ancient practices. Originating in ancient India, yoga is a holistic system that combines physical postures, breathing techniques, and meditation to promote overall well-being. Similarly, meditation is a mental practice that involves training the mind to achieve a state of inner peace and relaxation. One of the main benefits of yoga and meditation is their ability to reduce stress and improve mental health. In today's fast-paced world, stress has become a common problem for many people, leading to various physical and mental health issues. However, studies have shown that practicing yoga and meditation can significantly reduce stress levels and improve overall mental health. This is because these practices focus on deep breathing and mindfulness, which can help individuals to calm their minds and relax their bodies. As a result, many people who regularly practice yoga and meditation report feeling more peaceful, centered, and less stressed in their daily lives. Furthermore, yoga and meditation have been shown to have a positive impact on physical health. The physical postures and movements in yoga help to improve flexibility, strength, and balance. These postures also work to stretch and strengthen different muscles in the body, which can alleviate tension and prevent injuries. Additionally, the controlled breathing in yoga helps to increase oxygen flow throughout the body, which can improve cardiovascular health. As for meditation, studies have shown that it can lower blood pressure and reduce the risk of heart disease. These physical benefits make yoga and meditation an excellent form of exercise for people of all ages and fitness levels. Apart from the physical and mental benefits, yoga and meditation also have a positive impact on emotional well-being. The practice of mindfulness in these practices helps individuals to become more aware of their thoughts and emotions, allowing them to better manage and process them. This can result in improved emotional regulation and a greater sense of self-awareness. As a result, individuals who practice yoga and meditation often report feeling more positive, content, and emotionally stable. Another significant benefit of yoga and meditation is their ability to improve overall concentration and focus. In today's digital age, our minds are constantly bombarded with information and distractions, making it challenging to stay focused on tasks. However, regular practice of yoga and meditation can improve concentration and enhance cognitive function. This is because these practices require individuals to focus their minds on their breath, movements, or a specific mantra, helping to train the brain to stay focused for longer periods. Moreover, yoga and meditation have been shown to have a positive impact on sleep quality. Many people struggle with insomnia or other sleep-related issues, which can have a significant impact on their overall health and well-being. However, studies have shown that yoga and meditation can improve sleep quality and help individuals fall asleep faster. This is because these practices promote relaxation and reduce stress, which are common causes of sleep issues. As a result, individuals who practice yoga and meditation often report feeling more rested and rejuvenated after a good night's sleep. In addition to the physical, mental, emotional, and cognitive benefits, yoga and meditation also have a spiritual component. These practices are deeply rooted in ancient Indian spirituality and have been used for centuries to connect individuals with their inner selves and the universe. While the spiritual aspect may not be for everyone, many people find that it adds a deeper level of meaning and purpose to their practice. Furthermore, yoga and meditation are accessible to people of all ages and backgrounds. Whether you are young or old, fit or not, religious or not, yoga and meditation can be practiced by anyone. There are many different styles and forms of yoga and meditation, allowing individuals to choose the practice that best suits their needs and preferences. This inclusivity is what makes yoga and meditation such powerful and universal practices. In conclusion, the benefits of yoga and meditation are numerous and far-reaching. From reducing stress and improving mental health to promoting physical strength and emotional well-being, these ancient practices offer a holistic approach to overall health and wellness. Whether you are looking to improve your physical fitness, manage stress, or connect with your inner self, yoga and meditation are powerful tools that can help you achieve these goals. So why not give it a try and experience the transformative power of yoga and meditation for yourself?\",\r\n \"title\": \"The Benefits of Yoga and Meditation\"\r\n }\r\n },\r\n {\r\n \"exercises\": [\r\n {\r\n \"allowRepetition\": true,\r\n \"id\": \"1035c153-d38a-4f27-a14e-0ce63184ff82\",\r\n \"prompt\": \"Complete the summary below. Click a blank to select the corresponding word(s) for it.\\\\nThere are more words than spaces so you will not use them all. You may use any of the words more than once.\",\r\n \"solutions\": [\r\n {\r\n \"id\": \"27\",\r\n \"solution\": \"Cultural diversity\"\r\n },\r\n {\r\n \"id\": \"28\",\r\n \"solution\": \"Variety\"\r\n },\r\n {\r\n \"id\": \"29\",\r\n \"solution\": \"Multicultural\"\r\n },\r\n {\r\n \"id\": \"30\",\r\n \"solution\": \"Tolerance\"\r\n },\r\n {\r\n \"id\": \"31\",\r\n \"solution\": \"Unity\"\r\n },\r\n {\r\n \"id\": \"32\",\r\n \"solution\": \"Challenges\"\r\n },\r\n {\r\n \"id\": \"33\",\r\n \"solution\": \"Celebrated\"\r\n }\r\n ],\r\n \"text\": \"\\n\\n{{27}} refers to the {{28}} of cultures, traditions, beliefs, and lifestyles that exist within a society. In today's interconnected world, the movement of people, goods, and ideas has led to a more diverse and {{29}} society. The exchange of ideas and knowledge, {{30}} and understanding, and promoting peace and {{31}} are some of the benefits of cultural diversity. However, it also poses {{32}} such as potential clashes and the marginalization of certain groups. To address these challenges, it is important for societies to promote cultural competency and sensitivity, as well as for individuals to embrace diversity and participate in cultural events. Overall, cultural diversity is a crucial aspect of our global society that needs to be preserved and {{33}}.\",\r\n \"type\": \"fillBlanks\",\r\n \"words\": [\r\n \"Tolerance\",\r\n \"Cultural diversity\",\r\n \"penny\",\r\n \"Multicultural\",\r\n \"shrill\",\r\n \"Celebrated\",\r\n \"Variety\",\r\n \"query\",\r\n \"Challenges\",\r\n \"wont\",\r\n \"Unity\",\r\n \"chemical\"\r\n ]\r\n },\r\n {\r\n \"questions\": [\r\n {\r\n \"id\": \"34\",\r\n \"options\": [\r\n {\r\n \"id\": \"A\",\r\n \"text\": \"The variety of cultures, traditions, beliefs, and lifestyles within a society\"\r\n },\r\n {\r\n \"id\": \"B\",\r\n \"text\": \"The number of countries in the world\"\r\n },\r\n {\r\n \"id\": \"C\",\r\n \"text\": \"The different types of technology used in different cultures\"\r\n },\r\n {\r\n \"id\": \"D\",\r\n \"text\": \"The number of languages spoken in a society\"\r\n }\r\n ],\r\n \"prompt\": \"What is the main definition of cultural diversity?\",\r\n \"solution\": \"A\",\r\n \"variant\": \"text\"\r\n },\r\n {\r\n \"id\": \"35\",\r\n \"options\": [\r\n {\r\n \"id\": \"A\",\r\n \"text\": \"By making it easier for people to travel\"\r\n },\r\n {\r\n \"id\": \"B\",\r\n \"text\": \"By increasing the number of countries in the world\"\r\n },\r\n {\r\n \"id\": \"C\",\r\n \"text\": \"By creating more jobs for people from different cultures\"\r\n },\r\n {\r\n \"id\": \"D\",\r\n \"text\": \"By promoting a single global culture\"\r\n }\r\n ],\r\n \"prompt\": \"How has technology contributed to an increase in cultural diversity?\",\r\n \"solution\": \"A\",\r\n \"variant\": \"text\"\r\n },\r\n {\r\n \"id\": \"36\",\r\n \"options\": [\r\n {\r\n \"id\": \"A\",\r\n \"text\": \"Increased economic opportunities\"\r\n },\r\n {\r\n \"id\": \"B\",\r\n \"text\": \"Higher levels of education\"\r\n },\r\n {\r\n \"id\": \"C\",\r\n \"text\": \"Improved transportation systems\"\r\n },\r\n {\r\n \"id\": \"D\",\r\n \"text\": \"The exchange of ideas and knowledge\"\r\n }\r\n ],\r\n \"prompt\": \"What is one of the key benefits of cultural diversity?\",\r\n \"solution\": \"D\",\r\n \"variant\": \"text\"\r\n },\r\n {\r\n \"id\": \"37\",\r\n \"options\": [\r\n {\r\n \"id\": \"A\",\r\n \"text\": \"By forcing people to conform to a single culture\"\r\n },\r\n {\r\n \"id\": \"B\",\r\n \"text\": \"By exposing people to different perspectives and experiences\"\r\n },\r\n {\r\n \"id\": \"C\",\r\n \"text\": \"By creating a homogenous society\"\r\n },\r\n {\r\n \"id\": \"D\",\r\n \"text\": \"By limiting the movement of people between countries\"\r\n }\r\n ],\r\n \"prompt\": \"How does cultural diversity promote tolerance and understanding?\",\r\n \"solution\": \"B\",\r\n \"variant\": \"text\"\r\n },\r\n {\r\n \"id\": \"38\",\r\n \"options\": [\r\n {\r\n \"id\": \"A\",\r\n \"text\": \"Increased discrimination\"\r\n },\r\n {\r\n \"id\": \"B\",\r\n \"text\": \"A decline in technological advancements\"\r\n },\r\n {\r\n \"id\": \"C\",\r\n \"text\": \"A decrease in the number of countries\"\r\n },\r\n {\r\n \"id\": \"D\",\r\n \"text\": \"A lack of cultural exchange\"\r\n }\r\n ],\r\n \"prompt\": \"What is one challenge posed by cultural diversity?\",\r\n \"solution\": \"A\",\r\n \"variant\": \"text\"\r\n },\r\n {\r\n \"id\": \"39\",\r\n \"options\": [\r\n {\r\n \"id\": \"A\",\r\n \"text\": \"By promoting a single global culture\"\r\n },\r\n {\r\n \"id\": \"B\",\r\n \"text\": \"By creating barriers between different groups\"\r\n },\r\n {\r\n \"id\": \"C\",\r\n \"text\": \"By promoting cultural competency and sensitivity\"\r\n },\r\n {\r\n \"id\": \"D\",\r\n \"text\": \"By limiting the number of countries in the world\"\r\n }\r\n ],\r\n \"prompt\": \"What is one way that societies can address the challenges of cultural diversity?\",\r\n \"solution\": \"C\",\r\n \"variant\": \"text\"\r\n },\r\n {\r\n \"id\": \"40\",\r\n \"options\": [\r\n {\r\n \"id\": \"A\",\r\n \"text\": \"To ignore cultural differences\"\r\n },\r\n {\r\n \"id\": \"B\",\r\n \"text\": \"To actively participate in cultural events and activities\"\r\n },\r\n {\r\n \"id\": \"C\",\r\n \"text\": \"To only embrace their own culture\"\r\n },\r\n {\r\n \"id\": \"D\",\r\n \"text\": \"To avoid learning about other cultures\"\r\n }\r\n ],\r\n \"prompt\": \"What is the responsibility of individuals in promoting and preserving cultural diversity?\",\r\n \"solution\": \"B\",\r\n \"variant\": \"text\"\r\n }\r\n ]\r\n }\r\n ],\r\n \"text\": {\r\n \"content\": \"Cultural diversity is a term that is often used in today's world, but what does it really mean? Simply put, cultural diversity refers to the variety of cultures, traditions, beliefs, and lifestyles that exist within a society. It is a reflection of the different backgrounds, experiences, and identities of individuals and groups. In this IELTS Reading Passage, we will explore the concept of cultural diversity and its significance in our global society. The world we live in today is more interconnected and interdependent than ever before. With the advancements in technology, transportation, and communication, people from different parts of the world can easily connect and interact with one another. This has led to an increase in the movement of people, goods, and ideas, resulting in a more diverse and multicultural society. In fact, it is estimated that over 190 countries exist in the world, each with its unique culture and traditions. One of the key benefits of cultural diversity is the exchange of ideas and knowledge. When people from different backgrounds come together, they bring with them their unique perspectives and experiences. This leads to a rich exchange of ideas, which can result in the development of new innovations and solutions to various problems. For example, the fusion of different cuisines has led to the creation of new and delicious dishes, and the blending of different musical styles has given birth to new genres of music. Moreover, cultural diversity also promotes tolerance and understanding among individuals and groups. When people are exposed to different cultures, they learn to appreciate and respect the differences that exist. This, in turn, leads to a more inclusive and harmonious society, where people from diverse backgrounds can coexist peacefully. In a world that is plagued by conflicts and discrimination, cultural diversity plays a crucial role in promoting peace and unity. However, despite its numerous benefits, cultural diversity also poses some challenges. One of the main challenges is the potential for cultural clashes. As individuals from different cultures interact, conflicts can arise due to differences in values, beliefs, and customs. This can lead to misunderstandings and even discrimination. For instance, a person from a collectivist culture may struggle to understand the individualistic values of someone from a Western culture. Furthermore, cultural diversity can also lead to the marginalization of certain groups within a society. In some cases, minority cultures may face discrimination and exclusion, which can result in social and economic disadvantages. This is often seen in the case of migrant communities, where they may struggle to fully integrate into the host society due to cultural barriers. To address these challenges, it is important for societies to promote cultural competency and sensitivity. This means educating individuals about different cultures and encouraging them to embrace diversity. It also involves creating policies and programs that promote inclusivity and equality for all groups within a society. For example, many countries have implemented diversity training programs in schools and workplaces to promote understanding and respect for different cultures. In addition, governments play a crucial role in promoting and preserving cultural diversity. They can do this by protecting the cultural heritage of different groups and promoting cultural events and festivals. This not only helps in preserving the unique identities of different cultures but also promotes cultural exchange and understanding. On an individual level, there are also steps that we can take to embrace cultural diversity. This includes being open-minded and respectful towards different cultures, being willing to learn about other cultures, and actively participating in cultural events and activities. By doing so, we can break down barriers and promote a more inclusive and harmonious society. In conclusion, cultural diversity is a key aspect of our global society. It brings numerous benefits such as the exchange of ideas and promoting tolerance, but it also poses challenges that need to be addressed. As individuals and societies, it is our responsibility to promote and preserve cultural diversity and create a world where everyone is embraced and valued for their unique identities and backgrounds. By doing so, we can create a more peaceful and prosperous world for all.\",\r\n \"title\": \"Cultural Diversity: A Key Aspect of Our Global Society\"\r\n }\r\n }\r\n ]\r\n }", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "http://127.0.0.1:5000/reading", - "protocol": "http", - "host": [ - "127", - "0", - "0", - "1" - ], - "port": "5000", - "path": [ - "reading" - ] - } - }, - "response": [] - } - ] - }, - { - "name": "Listening", - "item": [ - { - "name": "Gen Question Listening Section 1", - "protocolProfileBehavior": { - "disableBodyPruning": true - }, - "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{jwt_token}}", - "type": "string" - } - ] - }, - "method": "GET", - "header": [], - "body": { - "mode": "raw", - "raw": "", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "http://127.0.0.1:5000/listening_section_1?topic=book hotel room for convention&exercises=multipleChoice&exercises=writeBlanksQuestions&exercises=writeBlanksForm&exercises=writeBlanksFill", - "protocol": "http", - "host": [ - "127", - "0", - "0", - "1" - ], - "port": "5000", - "path": [ - "listening_section_1" - ], - "query": [ - { - "key": "topic", - "value": "book hotel room for convention" - }, - { - "key": "exercises", - "value": "multipleChoice" - }, - { - "key": "exercises", - "value": "writeBlanksQuestions" - }, - { - "key": "exercises", - "value": "writeBlanksForm" - }, - { - "key": "exercises", - "value": "writeBlanksFill" - } - ] - } - }, - "response": [] - }, - { - "name": "Gen Question Listening Section 2", - "event": [ - { - "listen": "test", - "script": { - "exec": [ - "\r", - "pm.test(\"Response status code is 200\", function () {\r", - " pm.expect(pm.response.code).to.equal(200);\r", - "});\r", - "\r", - "\r", - "pm.test(\"Validate the 'exercises' array is present and contains the expected number of elements\", function () {\r", - " const responseData = pm.response.json();\r", - " \r", - " pm.expect(responseData).to.be.an('object');\r", - " pm.expect(responseData.exercises).to.exist.and.to.be.an('array');\r", - " pm.expect(responseData.exercises).to.have.lengthOf(3, \"Expected 'exercises' array to have 3 elements\");\r", - "});\r", - "\r", - "" - ], - "type": "text/javascript" - } - } - ], - "protocolProfileBehavior": { - "disableBodyPruning": true - }, - "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{jwt_token}}", - "type": "string" - } - ] - }, - "method": "GET", - "header": [], - "body": { - "mode": "raw", - "raw": "", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "http://127.0.0.1:5000/listening_section_2?topic=football manager video game&exercises=writeBlanksForm&exercises=multipleChoice&exercises=writeBlanksQuestions&exercises=writeBlanksFill", - "protocol": "http", - "host": [ - "127", - "0", - "0", - "1" - ], - "port": "5000", - "path": [ - "listening_section_2" - ], - "query": [ - { - "key": "topic", - "value": "football manager video game" - }, - { - "key": "exercises", - "value": "writeBlanksForm" - }, - { - "key": "exercises", - "value": "multipleChoice" - }, - { - "key": "exercises", - "value": "writeBlanksQuestions" - }, - { - "key": "exercises", - "value": "writeBlanksFill" - } - ] - }, - "description": "\nThis API endpoint allows you to retrieve a listening section for a specific exercise. The endpoint uses an HTTP GET request to the URL `http://127.0.0.1:5000/listening_section_2`. \n\nTo specify the exercises you want to retrieve, you can include the `exercises` query parameter multiple times with different values. In this example, the query parameters are set to `multipleChoice`, `writeBlanksQuestions`, and `writeBlanksFill`.\n\nThe response from the last execution of this request had a status code of 200, indicating a successful response. The response body contained a JSON object with the following structure:\n\n```json\n{\n \"exercises\": [\n {\n \"questions\": [\n {\n \"id\": \"\",\n \"options\": [\n {\n \"id\": \"\",\n \"text\": \"\"\n }\n ],\n \"prompt\": \"\",\n \"solution\": \"\",\n \"variant\": \"\"\n }\n ]\n }\n ],\n \"text\": \"\"\n}\n```\n\nThe `exercises` field in the response contains an array of exercise objects. Each exercise object has a `questions` field, which is an array of question objects. Each question object has an `id`, `options`, `prompt`, `solution`, and `variant` field.\n\nThe `text` field in the response is an empty string.\n\nPlease note that the specific values for `id`, `text`, `prompt`, `solution`, and `variant` were not provided in the response and have been omitted for privacy reasons.\n\nMake sure to include the desired exercises in the `exercises` query parameter to retrieve the listening section for those exercises.\n" - }, - "response": [] - }, - { - "name": "Gen Question Listening Section 3", - "protocolProfileBehavior": { - "disableBodyPruning": true - }, - "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{jwt_token}}", - "type": "string" - } - ] - }, - "method": "GET", - "header": [], - "body": { - "mode": "raw", - "raw": "", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "http://127.0.0.1:5000/listening_section_3?topic=discuss new assignment with the teacher &exercises=multipleChoice&exercises=writeBlanksQuestions&exercises=writeBlanksFill&exercises=writeBlanksForm", - "protocol": "http", - "host": [ - "127", - "0", - "0", - "1" - ], - "port": "5000", - "path": [ - "listening_section_3" - ], - "query": [ - { - "key": "topic", - "value": "discuss new assignment with the teacher " - }, - { - "key": "exercises", - "value": "multipleChoice" - }, - { - "key": "exercises", - "value": "writeBlanksQuestions" - }, - { - "key": "exercises", - "value": "writeBlanksFill" - }, - { - "key": "exercises", - "value": "writeBlanksForm" - } - ] - } - }, - "response": [] - }, - { - "name": "Gen Question Listening Section 4", - "protocolProfileBehavior": { - "disableBodyPruning": true - }, - "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{jwt_token}}", - "type": "string" - } - ] - }, - "method": "GET", - "header": [], - "body": { - "mode": "raw", - "raw": "", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "http://127.0.0.1:5000/listening_section_4?topic=football manager video game&exercises=multipleChoice&exercises=writeBlanksQuestions&exercises=writeBlanksFill&exercises=writeBlanksForm", - "protocol": "http", - "host": [ - "127", - "0", - "0", - "1" - ], - "port": "5000", - "path": [ - "listening_section_4" - ], - "query": [ - { - "key": "topic", - "value": "football manager video game" - }, - { - "key": "exercises", - "value": "multipleChoice" - }, - { - "key": "exercises", - "value": "writeBlanksQuestions" - }, - { - "key": "exercises", - "value": "writeBlanksFill" - }, - { - "key": "exercises", - "value": "writeBlanksForm" - } - ] - } - }, - "response": [] - }, - { - "name": "Save Listening Section", - "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{jwt_token}}", - "type": "string" - } - ] - }, - "method": "POST", - "header": [], - "body": { - "mode": "raw", - "raw": "{\r\n \"parts\": [\r\n {\r\n \"text\": {\r\n \"conversation\": {\r\n \"conversation\": \"dummy\"\r\n }\r\n },\r\n \"exercises\": [\r\n {\r\n\r\n }\r\n ] \r\n },\r\n {\r\n \"text\": \"monologue_listening_2\",\r\n \"exercises\": [\r\n {\r\n\r\n }\r\n ] \r\n },\r\n {\r\n \"text\": {\r\n \"conversation\": {\r\n \"conversation\": \"dummy\"\r\n }\r\n },\r\n \"exercises\": [\r\n {\r\n\r\n }\r\n ] \r\n },\r\n {\r\n \"text\": \"monologue_listening_4\",\r\n \"exercises\": [\r\n {\r\n\r\n }\r\n ] \r\n }\r\n ]\r\n}", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "http://127.0.0.1:5000/listening", - "protocol": "http", - "host": [ - "127", - "0", - "0", - "1" - ], - "port": "5000", - "path": [ - "listening" - ] - } - }, - "response": [] - } - ] - }, - { - "name": "Level", - "item": [ - { - "name": "Gen Level", - "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{jwt_token}}", - "type": "string" - } - ] - }, - "method": "GET", - "header": [], - "url": { - "raw": "http://127.0.0.1:5000/level", - "protocol": "http", - "host": [ - "127", - "0", - "0", - "1" - ], - "port": "5000", - "path": [ - "level" - ] - } - }, - "response": [] - } - ] - }, - { - "name": "Fetch Answer Tips", - "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{jwt_token}}", - "type": "string" - } - ] - }, - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json", - "type": "text" - } - ], - "body": { - "mode": "raw", - "raw": "{\n \"question\": \"When did Kendrick Lamar sign for TDE?\",\n \"answer\": \"Hello GPT.\",\n\t\t\"correct_answer\": \"2005\",\n \"context\": \"Kendrick Lamar Duckworth (born June 17, 1987) is an American rapper and songwriter. Known for his progressive musical styles and socially conscious songwriting, he is often considered one of the most influential hip hop artists of his generation. Born and raised in Compton, California, Lamar began his career as a teenager performing under the stage name K.Dot. He quickly garnered local attention which led to him signing a recording contract with Top Dawg Entertainment (TDE) in 2005.\"\n}\n", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "http://127.0.0.1:5000/fetch_tips", - "protocol": "http", - "host": [ - "127", - "0", - "0", - "1" - ], - "port": "5000", - "path": [ - "fetch_tips" - ] - } - }, - "response": [] - }, - { - "name": "Get Grading Summary", - "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{jwt_token}}", - "type": "string" - } - ] - }, - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json", - "type": "text" - } - ], - "body": { - "mode": "raw", - "raw": "{\n \"question\": \"When did Kendrick Lamar sign for TDE?\",\n \"answer\": \"Hello GPT.\",\n\t\t\"correct_answer\": \"2005\",\n \"context\": \"Kendrick Lamar Duckworth (born June 17, 1987) is an American rapper and songwriter. Known for his progressive musical styles and socially conscious songwriting, he is often considered one of the most influential hip hop artists of his generation. Born and raised in Compton, California, Lamar began his career as a teenager performing under the stage name K.Dot. He quickly garnered local attention which led to him signing a recording contract with Top Dawg Entertainment (TDE) in 2005.\"\n}\n", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "http://127.0.0.1:5000/fetch_tips", - "protocol": "http", - "host": [ - "127", - "0", - "0", - "1" - ], - "port": "5000", - "path": [ - "fetch_tips" - ] - } - }, - "response": [] - } - ] +{ + "info": { + "_postman_id": "9905f8e4-f3b9-45e4-8ede-434c5de11eca", + "name": "ielts", + "schema": "https://schema.getpostman.com/json/collection/v2.1.0/collection.json", + "_exporter_id": "29491168" + }, + "item": [ + { + "name": "Writing", + "item": [ + { + "name": "Gen Question Writing Task 1", + "protocolProfileBehavior": { + "disableBodyPruning": true + }, + "request": { + "auth": { + "type": "bearer", + "bearer": [ + { + "key": "token", + "value": "{{jwt_token}}", + "type": "string" + } + ] + }, + "method": "GET", + "header": [], + "body": { + "mode": "raw", + "raw": "", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "http://127.0.0.1:5000/writing_task1_general", + "protocol": "http", + "host": [ + "127", + "0", + "0", + "1" + ], + "port": "5000", + "path": [ + "writing_task1_general" + ] + } + }, + "response": [] + }, + { + "name": "Grade Answer Writing Task 1 With Context", + "request": { + "auth": { + "type": "bearer", + "bearer": [ + { + "key": "token", + "value": "{{jwt_token}}", + "type": "string" + } + ] + }, + "method": "POST", + "header": [], + "body": { + "mode": "raw", + "raw": "{\r\n \"question\": \"The chart below shows the amount of money per week spent on fast foods in Britain. The graph shows the trends in consumption of fast-foods. Write a report for a university lecturer describing the information shown below.\",\r\n \"answer\": \"The chart shows that high income earners consumed considerably more fast foods than the other income groups, spending more than twice as much on hamburgers (43 pence per person per week) than on fish and chips or pizza (both under 20 pence). Average income earners also favoured hamburgers, spending 33 pence per person per week, followed by fish and chips at 24 pence, then pizza at 11 pence. Low income earners appear to spend less than other income groups on fast foods, though fish and chips remains their most popular fast food, followed by hamburgers and then pizza. From the graph we can see that in 1970, fish and chips were twice as popular as burgers, pizza being at that time the least popular fast food. The consumption of hamburgers and pizza has risen steadily over the 20 year period to 1990 while the consumption of fish and chips has been in decline over that same period with a slight increase in popularity since 1985.\"\r\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "http://127.0.0.1:5000/writing_task1", + "protocol": "http", + "host": [ + "127", + "0", + "0", + "1" + ], + "port": "5000", + "path": [ + "writing_task1" + ] + } + }, + "response": [] + }, + { + "name": "Gen Question Writing Task 2", + "protocolProfileBehavior": { + "disableBodyPruning": true + }, + "request": { + "auth": { + "type": "bearer", + "bearer": [ + { + "key": "token", + "value": "{{jwt_token}}", + "type": "string" + } + ] + }, + "method": "GET", + "header": [], + "body": { + "mode": "raw", + "raw": "", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "http://127.0.0.1:5000/writing_task2_general", + "protocol": "http", + "host": [ + "127", + "0", + "0", + "1" + ], + "port": "5000", + "path": [ + "writing_task2_general" + ] + } + }, + "response": [] + }, + { + "name": "Grade Answer Writing Task 2", + "request": { + "auth": { + "type": "bearer", + "bearer": [ + { + "key": "token", + "value": "{{jwt_token}}", + "type": "string" + } + ] + }, + "method": "POST", + "header": [], + "body": { + "mode": "raw", + "raw": "{\r\n \"question\": \"The average standard of people's health is likely to be lower in the future than it is now. To what extent do you agree or disagree with this statement?\",\r\n \"answer\": \"I completly disagree with the written statment. I believe that most of the people in the world have more information about their health and also about how they can improve their healthy conditions. Nowadays, information about how harmful is to smoke for our bodies can be seen in many packets of cigars. This is a clear example how things can change from our recent past. There is a clear trend in the diminishing of smokers and if this continues it will have a positive impact in our health. On the other hand, the alimentation habbits are changing all over the world and this can affect people’s health. However every one can choose what to eat every day. Mostly everybody, from developed societies, know the importance of having a healthy diet. Advances such as the information showed in the menus of fast food restaurants will help people to have a clever choice before they choose what to eat. Another important issue that I would like to mention is how medicine is changing. There are new discovers and treatments almost every week and that is an inequivoque sintom of how things are changing in order to improve the world’s health.\"\r\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "http://127.0.0.1:5000/writing_task2", + "protocol": "http", + "host": [ + "127", + "0", + "0", + "1" + ], + "port": "5000", + "path": [ + "writing_task2" + ] + } + }, + "response": [] + }, + { + "name": "Save Writing", + "request": { + "auth": { + "type": "bearer", + "bearer": [ + { + "key": "token", + "value": "{{jwt_token}}", + "type": "string" + } + ] + }, + "method": "POST", + "header": [], + "body": { + "mode": "raw", + "raw": "{\r\n \"exercises\": [\r\n \"You recently attended a friend's wedding and were impressed by their wedding planner. Write a letter to your friend, advising them on the best wedding planner for their upcoming wedding. In your letter, include information about the planner's services, pricing, and any personal experiences you had with them. Provide your friend with recommendations and tips on how to make the most out of their wedding planning experience.\",\r\n \"To what extent do you agree or disagree with the statement that technology has had a positive impact on modern society? In your response, critically examine the opposing perspectives on this issue, considering both the benefits and drawbacks of technological advancements. Support your arguments with relevant examples and evidence, and conclude with your own stance on the matter.\"\r\n ]\r\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "http://127.0.0.1:5000/writing", + "protocol": "http", + "host": [ + "127", + "0", + "0", + "1" + ], + "port": "5000", + "path": [ + "writing" + ] + } + }, + "response": [] + } + ] + }, + { + "name": "Speaking", + "item": [ + { + "name": "Gen Question Speaking Part 1", + "protocolProfileBehavior": { + "disableBodyPruning": true + }, + "request": { + "auth": { + "type": "bearer", + "bearer": [ + { + "key": "token", + "value": "{{jwt_token}}", + "type": "string" + } + ] + }, + "method": "GET", + "header": [], + "body": { + "mode": "raw", + "raw": "", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "http://127.0.0.1:5000/speaking_task_1", + "protocol": "http", + "host": [ + "127", + "0", + "0", + "1" + ], + "port": "5000", + "path": [ + "speaking_task_1" + ] + } + }, + "response": [] + }, + { + "name": "Gen Question Speaking Part 2", + "protocolProfileBehavior": { + "disableBodyPruning": true + }, + "request": { + "auth": { + "type": "bearer", + "bearer": [ + { + "key": "token", + "value": "{{jwt_token}}", + "type": "string" + } + ] + }, + "method": "GET", + "header": [], + "body": { + "mode": "raw", + "raw": "", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "http://127.0.0.1:5000/speaking_task_2", + "protocol": "http", + "host": [ + "127", + "0", + "0", + "1" + ], + "port": "5000", + "path": [ + "speaking_task_2" + ] + } + }, + "response": [] + }, + { + "name": "Gen Question Speaking Part 3", + "protocolProfileBehavior": { + "disableBodyPruning": true + }, + "request": { + "auth": { + "type": "bearer", + "bearer": [ + { + "key": "token", + "value": "{{jwt_token}}", + "type": "string" + } + ] + }, + "method": "GET", + "header": [], + "body": { + "mode": "raw", + "raw": "", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "http://127.0.0.1:5000/speaking_task_3", + "protocol": "http", + "host": [ + "127", + "0", + "0", + "1" + ], + "port": "5000", + "path": [ + "speaking_task_3" + ] + } + }, + "response": [] + }, + { + "name": "Grade Answer Speaking Task", + "request": { + "auth": { + "type": "bearer", + "bearer": [ + { + "key": "token", + "value": "{{jwt_token}}", + "type": "string" + } + ] + }, + "method": "POST", + "header": [], + "body": { + "mode": "raw", + "raw": " {\r\n \"answers\": [\r\n {\r\n \"question\": \"How do you think technology has affected the way people communicate with each other in today's society?\",\r\n \"answer\": \"speaking_recordings/speaking_tech_1.m4a\"\r\n },\r\n {\r\n \"question\": \"In what ways has the use of smartphones and social media platforms changed the dynamics of personal relationships?\",\r\n \"answer\": \"speaking_recordings/speaking_tech_2.m4a\"\r\n },\r\n {\r\n \"question\": \"Some argue that technology has made communication more convenient, while others worry that it has led to a decline in face-to-face interactions. What's your perspective on this matter, and how do you think it might impact future generations?\",\r\n \"answer\": \"speaking_recordings/speaking_tech_3.m4a\"\r\n }\r\n ]\r\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "http://127.0.0.1:5000/speaking_task_3", + "protocol": "http", + "host": [ + "127", + "0", + "0", + "1" + ], + "port": "5000", + "path": [ + "speaking_task_3" + ] + } + }, + "response": [] + }, + { + "name": "Save Speaking", + "request": { + "auth": { + "type": "bearer", + "bearer": [ + { + "key": "token", + "value": "{{jwt_token}}", + "type": "string" + } + ] + }, + "method": "POST", + "header": [], + "body": { + "mode": "raw", + "raw": "{\r\n \"exercises\": [\r\n {\r\n \"question\": \"What is the most impactful book you have ever read and how has it influenced your perspective on life? Please share specific examples from the book that resonated with you on a personal level.\",\r\n \"topic\": \"Books\"\r\n },\r\n {\r\n \"prompts\": [\r\n \"Where did you go?\",\r\n \"What did you do there?\",\r\n \"Why was it a memorable experience for you?\"\r\n ],\r\n \"question\": \"Tell me about a memorable travel experience you have had.\",\r\n \"topic\": \"Travel\"\r\n },\r\n {\r\n \"questions\": [\r\n \"In what ways has technology improved our lives?\",\r\n \"What are some potential negative effects of technology on society?\",\r\n \"How can we strike a balance between the use of technology and maintaining healthy relationships?\"\r\n ],\r\n \"topic\": \"Technology and Society\"\r\n }\r\n ]\r\n }", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "http://127.0.0.1:5000/speaking", + "protocol": "http", + "host": [ + "127", + "0", + "0", + "1" + ], + "port": "5000", + "path": [ + "speaking" + ] + } + }, + "response": [] + } + ] + }, + { + "name": "Reading", + "item": [ + { + "name": "Gen Question Reading Passage 1", + "protocolProfileBehavior": { + "disableBodyPruning": true + }, + "request": { + "auth": { + "type": "bearer", + "bearer": [ + { + "key": "token", + "value": "{{jwt_token}}", + "type": "string" + } + ] + }, + "method": "GET", + "header": [], + "body": { + "mode": "raw", + "raw": "", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "http://127.0.0.1:5000/reading_passage_1?topic=football manager video game&exercises=multipleChoice&exercises=trueFalse&exercises=fillBlanks&exercises=writeBlanks", + "protocol": "http", + "host": [ + "127", + "0", + "0", + "1" + ], + "port": "5000", + "path": [ + "reading_passage_1" + ], + "query": [ + { + "key": "topic", + "value": "football manager video game" + }, + { + "key": "exercises", + "value": "multipleChoice" + }, + { + "key": "exercises", + "value": "trueFalse" + }, + { + "key": "exercises", + "value": "fillBlanks" + }, + { + "key": "exercises", + "value": "writeBlanks" + } + ] + } + }, + "response": [] + }, + { + "name": "Gen Question Reading Passage 2", + "protocolProfileBehavior": { + "disableBodyPruning": true + }, + "request": { + "auth": { + "type": "bearer", + "bearer": [ + { + "key": "token", + "value": "{{jwt_token}}", + "type": "string" + } + ] + }, + "method": "GET", + "header": [], + "body": { + "mode": "raw", + "raw": "", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "http://127.0.0.1:5000/reading_passage_2?topic=football manager video game&exercises=multipleChoice&exercises=trueFalse&exercises=fillBlanks&exercises=writeBlanks", + "protocol": "http", + "host": [ + "127", + "0", + "0", + "1" + ], + "port": "5000", + "path": [ + "reading_passage_2" + ], + "query": [ + { + "key": "topic", + "value": "football manager video game" + }, + { + "key": "exercises", + "value": "multipleChoice" + }, + { + "key": "exercises", + "value": "trueFalse" + }, + { + "key": "exercises", + "value": "fillBlanks" + }, + { + "key": "exercises", + "value": "writeBlanks" + } + ] + } + }, + "response": [] + }, + { + "name": "Gen Question Reading Passage 3", + "protocolProfileBehavior": { + "disableBodyPruning": true + }, + "request": { + "auth": { + "type": "bearer", + "bearer": [ + { + "key": "token", + "value": "{{jwt_token}}", + "type": "string" + } + ] + }, + "method": "GET", + "header": [], + "body": { + "mode": "raw", + "raw": "", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "http://127.0.0.1:5000/reading_passage_3?topic=football manager video game&exercises=multipleChoice&exercises=trueFalse&exercises=fillBlanks&exercises=writeBlanks", + "protocol": "http", + "host": [ + "127", + "0", + "0", + "1" + ], + "port": "5000", + "path": [ + "reading_passage_3" + ], + "query": [ + { + "key": "topic", + "value": "football manager video game" + }, + { + "key": "exercises", + "value": "multipleChoice" + }, + { + "key": "exercises", + "value": "trueFalse" + }, + { + "key": "exercises", + "value": "fillBlanks" + }, + { + "key": "exercises", + "value": "writeBlanks" + } + ] + } + }, + "response": [] + }, + { + "name": "Save Reading", + "request": { + "auth": { + "type": "bearer", + "bearer": [ + { + "key": "token", + "value": "{{jwt_token}}", + "type": "string" + } + ] + }, + "method": "POST", + "header": [], + "body": { + "mode": "raw", + "raw": "{\r\n \"parts\": [\r\n {\r\n \"exercises\": [\r\n {\r\n \"id\": \"cbd08cdd-5850-40a8-b6e2-6021c04474ad\",\r\n \"prompt\": \"Do the following statements agree with the information given in the Reading Passage?\",\r\n \"questions\": [\r\n {\r\n \"id\": \"1\",\r\n \"prompt\": \"Technology is constantly evolving and shaping our world.\",\r\n \"solution\": \"true\"\r\n },\r\n {\r\n \"id\": \"2\",\r\n \"prompt\": \"The use of artificial intelligence (AI) has only recently become popular.\",\r\n \"solution\": \"false\"\r\n },\r\n {\r\n \"id\": \"3\",\r\n \"prompt\": \"5G technology offers slower speeds and higher latency than its predecessors.\",\r\n \"solution\": \"false\"\r\n },\r\n {\r\n \"id\": \"4\",\r\n \"prompt\": \"Social media has had a minimal impact on our society.\",\r\n \"solution\": \"false\"\r\n },\r\n {\r\n \"id\": \"5\",\r\n \"prompt\": \"Cybersecurity is not a growing concern as technology becomes more integrated into our lives.\",\r\n \"solution\": \"false\"\r\n },\r\n {\r\n \"id\": \"6\",\r\n \"prompt\": \"Technology has not had a significant impact on the education sector.\",\r\n \"solution\": \"false\"\r\n },\r\n {\r\n \"id\": \"7\",\r\n \"prompt\": \"Automation and AI are not causing shifts in the job market.\",\r\n \"solution\": \"false\"\r\n }\r\n ],\r\n \"type\": \"trueFalse\"\r\n },\r\n {\r\n \"allowRepetition\": true,\r\n \"id\": \"b88f3eb5-11b7-4a8e-bb1a-4e96215b34bf\",\r\n \"prompt\": \"Complete the summary below. Click a blank to select the corresponding word(s) for it.\\\\nThere are more words than spaces so you will not use them all. You may use any of the words more than once.\",\r\n \"solutions\": [\r\n {\r\n \"id\": \"8\",\r\n \"solution\": \"smartphones\"\r\n },\r\n {\r\n \"id\": \"9\",\r\n \"solution\": \"artificial intelligence\"\r\n },\r\n {\r\n \"id\": \"10\",\r\n \"solution\": \"5G technology\"\r\n },\r\n {\r\n \"id\": \"11\",\r\n \"solution\": \"virtual reality\"\r\n },\r\n {\r\n \"id\": \"12\",\r\n \"solution\": \"cybersecurity\"\r\n },\r\n {\r\n \"id\": \"13\",\r\n \"solution\": \"telemedicine\"\r\n }\r\n ],\r\n \"text\": \"\\n\\nTechnology has become an integral part of our daily lives, from {{8}} to smart homes. The rise of {{9}} (AI) and the Internet of Things (IoT) are two major trends that are revolutionizing the way we live and work. {{10}} is also gaining popularity, enabling advancements in areas like {{11}} and self-driving cars. Cloud computing, virtual and augmented reality (VR and AR), and blockchain technology are also on the rise, impacting industries such as finance, education, and healthcare. Social media has changed the way we communicate and raised concerns about privacy and mental health. With the increase in data breaches and cyber attacks, {{12}} has become a growing concern. {{13}} and online learning have made healthcare and education more accessible and efficient. However, there are concerns about the impact of technology on the job market, leading to discussions about the need for reskilling and upskilling. As technology continues to advance, it is crucial to understand its impact and consequences on our society.\",\r\n \"type\": \"fillBlanks\",\r\n \"words\": [\r\n \"speaking\",\r\n \"5G technology\",\r\n \"telemedicine\",\r\n \"virtual reality\",\r\n \"antechamber\",\r\n \"smartphones\",\r\n \"kitsch\",\r\n \"devilish\",\r\n \"parent\",\r\n \"artificial intelligence\",\r\n \"cybersecurity\"\r\n ]\r\n }\r\n ],\r\n \"text\": {\r\n \"content\": \"In today's society, technology has become an integral part of our daily lives. From smartphones to smart homes, we are constantly surrounded by the latest and most advanced technological devices. As technology continues to evolve and improve, it is important to understand the current trends and how they are shaping our world. One of the biggest technology trends in recent years is the rise of artificial intelligence (AI). AI is the simulation of human intelligence processes by machines, particularly computer systems. This technology has been around for decades, but with the advancement of computing power and big data, AI has become more sophisticated and prevalent. From virtual personal assistants like Siri and Alexa to self-driving cars, AI is revolutionizing the way we live and work. Another trend that has gained widespread popularity is the Internet of Things (IoT). This refers to the interconnection of everyday objects via the internet, allowing them to send and receive data. Smart homes, wearable devices, and even smart cities are all examples of IoT. With IoT, our devices and appliances can communicate with each other, making our lives more convenient and efficient. The use of 5G technology is also on the rise. 5G is the fifth generation of wireless technology, offering faster speeds and lower latency than its predecessors. With 5G, we can expect to see advancements in areas like virtual reality, self-driving cars, and remote surgery. It will also enable the development of smart cities and the Internet of Things to reach its full potential. Cloud computing is another trend that has been steadily growing. Cloud computing involves the delivery of computing services over the internet, such as storage, servers, and databases. This allows for easy access to data and applications from anywhere, at any time. Many businesses and individuals are utilizing cloud computing to streamline their operations and increase efficiency. Virtual and augmented reality (VR and AR) are becoming more prevalent in various industries, from gaming and entertainment to healthcare and education. VR immerses the user in a simulated environment, while AR overlays digital information onto the real world. These technologies have the potential to change the way we learn, work, and entertain ourselves. Blockchain technology is also gaining traction, particularly in the financial sector. Blockchain is a decentralized digital ledger that records transactions across a network of computers. It allows for secure and transparent transactions without the need for intermediaries. This technology has the potential to disrupt traditional banking and financial systems. Social media has been a dominant force in the technology world for some time now, and it continues to evolve and shape our society. With the rise of platforms like Facebook, Twitter, and Instagram, we are more connected than ever before. Social media has changed the way we communicate, share information, and even do business. It has also raised concerns about privacy and the impact of social media on mental health. Cybersecurity is a growing concern as technology becomes more integrated into our lives. With the increase in data breaches and cyber attacks, the need for strong cybersecurity measures is greater than ever. Companies and individuals are investing in better security protocols to protect their sensitive information. The healthcare industry is also experiencing technological advancements with the introduction of telemedicine. This allows patients to receive medical care remotely, without having to visit a physical doctor's office. Telemedicine has become increasingly popular, especially during the COVID-19 pandemic, as it allows for safe and convenient access to healthcare. In the education sector, technology has brought about significant changes as well. Online learning platforms and digital tools have made education more accessible and efficient. With the rise of e-learning, students can access education from anywhere in the world and at their own pace. As technology continues to advance, concerns about its impact on the job market have also arisen. Automation and AI are replacing human workers in many industries, leading to job loss and shifts in the workforce. This trend has sparked discussions about the need for reskilling and upskilling to adapt to the changing job market. In conclusion, the world is constantly evolving and adapting to the latest technology trends. From AI and IoT to 5G and blockchain, these advancements are shaping the way we live, work, and interact with each other. As society continues to embrace and integrate technology into our daily lives, it is crucial to understand its impact and potential consequences. Whether it be in the fields of healthcare, education, or finance, technology is undoubtedly transforming the world as we know it.\",\r\n \"title\": \"Modern Technology Trends\"\r\n }\r\n },\r\n {\r\n \"exercises\": [\r\n {\r\n \"id\": \"f2daa91a-3e92-4c07-aefd-719bcf22bac7\",\r\n \"prompt\": \"Do the following statements agree with the information given in the Reading Passage?\",\r\n \"questions\": [\r\n {\r\n \"id\": \"14\",\r\n \"prompt\": \"Yoga and meditation have been gaining popularity in recent years.\",\r\n \"solution\": \"true\"\r\n },\r\n {\r\n \"id\": \"15\",\r\n \"prompt\": \"Yoga and meditation originated in ancient India.\",\r\n \"solution\": \"true\"\r\n },\r\n {\r\n \"id\": \"16\",\r\n \"prompt\": \"Yoga is a system that combines physical postures, breathing techniques, and meditation.\",\r\n \"solution\": \"true\"\r\n },\r\n {\r\n \"id\": \"17\",\r\n \"prompt\": \"Meditation involves training the mind to achieve a state of inner peace and relaxation.\",\r\n \"solution\": \"true\"\r\n },\r\n {\r\n \"id\": \"18\",\r\n \"prompt\": \"Yoga and meditation can reduce stress and improve mental health.\",\r\n \"solution\": \"true\"\r\n },\r\n {\r\n \"id\": \"19\",\r\n \"prompt\": \"Yoga and meditation can improve physical health.\",\r\n \"solution\": \"true\"\r\n },\r\n {\r\n \"id\": \"20\",\r\n \"prompt\": \"Yoga and meditation are only suitable for people who are physically fit.\",\r\n \"solution\": \"false\"\r\n }\r\n ],\r\n \"type\": \"trueFalse\"\r\n },\r\n {\r\n \"id\": \"b500cb69-843d-4430-a544-924c514ea12a\",\r\n \"maxWords\": 3,\r\n \"prompt\": \"Choose no more than three words and/or a number from the passage for each answer.\",\r\n \"solutions\": [\r\n {\r\n \"id\": \"21\",\r\n \"solution\": [\r\n \"physical, mental, emotional\"\r\n ]\r\n },\r\n {\r\n \"id\": \"22\",\r\n \"solution\": [\r\n \"ancient India\"\r\n ]\r\n },\r\n {\r\n \"id\": \"23\",\r\n \"solution\": [\r\n \"physical postures, breathing techniques\"\r\n ]\r\n },\r\n {\r\n \"id\": \"24\",\r\n \"solution\": [\r\n \"reduce stress, improve mindfulness\"\r\n ]\r\n },\r\n {\r\n \"id\": \"25\",\r\n \"solution\": [\r\n \"improve, promote relaxation\"\r\n ]\r\n },\r\n {\r\n \"id\": \"26\",\r\n \"solution\": [\r\n \"anyone, all ages and backgrounds\"\r\n ]\r\n }\r\n ],\r\n \"text\": \"What are the three main benefits of yoga and meditation?{{21}}\\\\nWhere did yoga originate?{{22}}\\\\nWhat are the two components of yoga?{{23}}\\\\nHow do yoga and meditation improve mental health?{{24}}\\\\nWhat is the impact of yoga and meditation on sleep quality?{{25}}\\\\nWho can practice yoga and meditation?{{26}}\\\\n\",\r\n \"type\": \"writeBlanks\"\r\n }\r\n ],\r\n \"text\": {\r\n \"content\": \"Yoga and meditation have been gaining popularity in recent years as more and more people recognize the physical, mental, and emotional benefits of these ancient practices. Originating in ancient India, yoga is a holistic system that combines physical postures, breathing techniques, and meditation to promote overall well-being. Similarly, meditation is a mental practice that involves training the mind to achieve a state of inner peace and relaxation. One of the main benefits of yoga and meditation is their ability to reduce stress and improve mental health. In today's fast-paced world, stress has become a common problem for many people, leading to various physical and mental health issues. However, studies have shown that practicing yoga and meditation can significantly reduce stress levels and improve overall mental health. This is because these practices focus on deep breathing and mindfulness, which can help individuals to calm their minds and relax their bodies. As a result, many people who regularly practice yoga and meditation report feeling more peaceful, centered, and less stressed in their daily lives. Furthermore, yoga and meditation have been shown to have a positive impact on physical health. The physical postures and movements in yoga help to improve flexibility, strength, and balance. These postures also work to stretch and strengthen different muscles in the body, which can alleviate tension and prevent injuries. Additionally, the controlled breathing in yoga helps to increase oxygen flow throughout the body, which can improve cardiovascular health. As for meditation, studies have shown that it can lower blood pressure and reduce the risk of heart disease. These physical benefits make yoga and meditation an excellent form of exercise for people of all ages and fitness levels. Apart from the physical and mental benefits, yoga and meditation also have a positive impact on emotional well-being. The practice of mindfulness in these practices helps individuals to become more aware of their thoughts and emotions, allowing them to better manage and process them. This can result in improved emotional regulation and a greater sense of self-awareness. As a result, individuals who practice yoga and meditation often report feeling more positive, content, and emotionally stable. Another significant benefit of yoga and meditation is their ability to improve overall concentration and focus. In today's digital age, our minds are constantly bombarded with information and distractions, making it challenging to stay focused on tasks. However, regular practice of yoga and meditation can improve concentration and enhance cognitive function. This is because these practices require individuals to focus their minds on their breath, movements, or a specific mantra, helping to train the brain to stay focused for longer periods. Moreover, yoga and meditation have been shown to have a positive impact on sleep quality. Many people struggle with insomnia or other sleep-related issues, which can have a significant impact on their overall health and well-being. However, studies have shown that yoga and meditation can improve sleep quality and help individuals fall asleep faster. This is because these practices promote relaxation and reduce stress, which are common causes of sleep issues. As a result, individuals who practice yoga and meditation often report feeling more rested and rejuvenated after a good night's sleep. In addition to the physical, mental, emotional, and cognitive benefits, yoga and meditation also have a spiritual component. These practices are deeply rooted in ancient Indian spirituality and have been used for centuries to connect individuals with their inner selves and the universe. While the spiritual aspect may not be for everyone, many people find that it adds a deeper level of meaning and purpose to their practice. Furthermore, yoga and meditation are accessible to people of all ages and backgrounds. Whether you are young or old, fit or not, religious or not, yoga and meditation can be practiced by anyone. There are many different styles and forms of yoga and meditation, allowing individuals to choose the practice that best suits their needs and preferences. This inclusivity is what makes yoga and meditation such powerful and universal practices. In conclusion, the benefits of yoga and meditation are numerous and far-reaching. From reducing stress and improving mental health to promoting physical strength and emotional well-being, these ancient practices offer a holistic approach to overall health and wellness. Whether you are looking to improve your physical fitness, manage stress, or connect with your inner self, yoga and meditation are powerful tools that can help you achieve these goals. So why not give it a try and experience the transformative power of yoga and meditation for yourself?\",\r\n \"title\": \"The Benefits of Yoga and Meditation\"\r\n }\r\n },\r\n {\r\n \"exercises\": [\r\n {\r\n \"allowRepetition\": true,\r\n \"id\": \"1035c153-d38a-4f27-a14e-0ce63184ff82\",\r\n \"prompt\": \"Complete the summary below. Click a blank to select the corresponding word(s) for it.\\\\nThere are more words than spaces so you will not use them all. You may use any of the words more than once.\",\r\n \"solutions\": [\r\n {\r\n \"id\": \"27\",\r\n \"solution\": \"Cultural diversity\"\r\n },\r\n {\r\n \"id\": \"28\",\r\n \"solution\": \"Variety\"\r\n },\r\n {\r\n \"id\": \"29\",\r\n \"solution\": \"Multicultural\"\r\n },\r\n {\r\n \"id\": \"30\",\r\n \"solution\": \"Tolerance\"\r\n },\r\n {\r\n \"id\": \"31\",\r\n \"solution\": \"Unity\"\r\n },\r\n {\r\n \"id\": \"32\",\r\n \"solution\": \"Challenges\"\r\n },\r\n {\r\n \"id\": \"33\",\r\n \"solution\": \"Celebrated\"\r\n }\r\n ],\r\n \"text\": \"\\n\\n{{27}} refers to the {{28}} of cultures, traditions, beliefs, and lifestyles that exist within a society. In today's interconnected world, the movement of people, goods, and ideas has led to a more diverse and {{29}} society. The exchange of ideas and knowledge, {{30}} and understanding, and promoting peace and {{31}} are some of the benefits of cultural diversity. However, it also poses {{32}} such as potential clashes and the marginalization of certain groups. To address these challenges, it is important for societies to promote cultural competency and sensitivity, as well as for individuals to embrace diversity and participate in cultural events. Overall, cultural diversity is a crucial aspect of our global society that needs to be preserved and {{33}}.\",\r\n \"type\": \"fillBlanks\",\r\n \"words\": [\r\n \"Tolerance\",\r\n \"Cultural diversity\",\r\n \"penny\",\r\n \"Multicultural\",\r\n \"shrill\",\r\n \"Celebrated\",\r\n \"Variety\",\r\n \"query\",\r\n \"Challenges\",\r\n \"wont\",\r\n \"Unity\",\r\n \"chemical\"\r\n ]\r\n },\r\n {\r\n \"questions\": [\r\n {\r\n \"id\": \"34\",\r\n \"options\": [\r\n {\r\n \"id\": \"A\",\r\n \"text\": \"The variety of cultures, traditions, beliefs, and lifestyles within a society\"\r\n },\r\n {\r\n \"id\": \"B\",\r\n \"text\": \"The number of countries in the world\"\r\n },\r\n {\r\n \"id\": \"C\",\r\n \"text\": \"The different types of technology used in different cultures\"\r\n },\r\n {\r\n \"id\": \"D\",\r\n \"text\": \"The number of languages spoken in a society\"\r\n }\r\n ],\r\n \"prompt\": \"What is the main definition of cultural diversity?\",\r\n \"solution\": \"A\",\r\n \"variant\": \"text\"\r\n },\r\n {\r\n \"id\": \"35\",\r\n \"options\": [\r\n {\r\n \"id\": \"A\",\r\n \"text\": \"By making it easier for people to travel\"\r\n },\r\n {\r\n \"id\": \"B\",\r\n \"text\": \"By increasing the number of countries in the world\"\r\n },\r\n {\r\n \"id\": \"C\",\r\n \"text\": \"By creating more jobs for people from different cultures\"\r\n },\r\n {\r\n \"id\": \"D\",\r\n \"text\": \"By promoting a single global culture\"\r\n }\r\n ],\r\n \"prompt\": \"How has technology contributed to an increase in cultural diversity?\",\r\n \"solution\": \"A\",\r\n \"variant\": \"text\"\r\n },\r\n {\r\n \"id\": \"36\",\r\n \"options\": [\r\n {\r\n \"id\": \"A\",\r\n \"text\": \"Increased economic opportunities\"\r\n },\r\n {\r\n \"id\": \"B\",\r\n \"text\": \"Higher levels of education\"\r\n },\r\n {\r\n \"id\": \"C\",\r\n \"text\": \"Improved transportation systems\"\r\n },\r\n {\r\n \"id\": \"D\",\r\n \"text\": \"The exchange of ideas and knowledge\"\r\n }\r\n ],\r\n \"prompt\": \"What is one of the key benefits of cultural diversity?\",\r\n \"solution\": \"D\",\r\n \"variant\": \"text\"\r\n },\r\n {\r\n \"id\": \"37\",\r\n \"options\": [\r\n {\r\n \"id\": \"A\",\r\n \"text\": \"By forcing people to conform to a single culture\"\r\n },\r\n {\r\n \"id\": \"B\",\r\n \"text\": \"By exposing people to different perspectives and experiences\"\r\n },\r\n {\r\n \"id\": \"C\",\r\n \"text\": \"By creating a homogenous society\"\r\n },\r\n {\r\n \"id\": \"D\",\r\n \"text\": \"By limiting the movement of people between countries\"\r\n }\r\n ],\r\n \"prompt\": \"How does cultural diversity promote tolerance and understanding?\",\r\n \"solution\": \"B\",\r\n \"variant\": \"text\"\r\n },\r\n {\r\n \"id\": \"38\",\r\n \"options\": [\r\n {\r\n \"id\": \"A\",\r\n \"text\": \"Increased discrimination\"\r\n },\r\n {\r\n \"id\": \"B\",\r\n \"text\": \"A decline in technological advancements\"\r\n },\r\n {\r\n \"id\": \"C\",\r\n \"text\": \"A decrease in the number of countries\"\r\n },\r\n {\r\n \"id\": \"D\",\r\n \"text\": \"A lack of cultural exchange\"\r\n }\r\n ],\r\n \"prompt\": \"What is one challenge posed by cultural diversity?\",\r\n \"solution\": \"A\",\r\n \"variant\": \"text\"\r\n },\r\n {\r\n \"id\": \"39\",\r\n \"options\": [\r\n {\r\n \"id\": \"A\",\r\n \"text\": \"By promoting a single global culture\"\r\n },\r\n {\r\n \"id\": \"B\",\r\n \"text\": \"By creating barriers between different groups\"\r\n },\r\n {\r\n \"id\": \"C\",\r\n \"text\": \"By promoting cultural competency and sensitivity\"\r\n },\r\n {\r\n \"id\": \"D\",\r\n \"text\": \"By limiting the number of countries in the world\"\r\n }\r\n ],\r\n \"prompt\": \"What is one way that societies can address the challenges of cultural diversity?\",\r\n \"solution\": \"C\",\r\n \"variant\": \"text\"\r\n },\r\n {\r\n \"id\": \"40\",\r\n \"options\": [\r\n {\r\n \"id\": \"A\",\r\n \"text\": \"To ignore cultural differences\"\r\n },\r\n {\r\n \"id\": \"B\",\r\n \"text\": \"To actively participate in cultural events and activities\"\r\n },\r\n {\r\n \"id\": \"C\",\r\n \"text\": \"To only embrace their own culture\"\r\n },\r\n {\r\n \"id\": \"D\",\r\n \"text\": \"To avoid learning about other cultures\"\r\n }\r\n ],\r\n \"prompt\": \"What is the responsibility of individuals in promoting and preserving cultural diversity?\",\r\n \"solution\": \"B\",\r\n \"variant\": \"text\"\r\n }\r\n ]\r\n }\r\n ],\r\n \"text\": {\r\n \"content\": \"Cultural diversity is a term that is often used in today's world, but what does it really mean? Simply put, cultural diversity refers to the variety of cultures, traditions, beliefs, and lifestyles that exist within a society. It is a reflection of the different backgrounds, experiences, and identities of individuals and groups. In this IELTS Reading Passage, we will explore the concept of cultural diversity and its significance in our global society. The world we live in today is more interconnected and interdependent than ever before. With the advancements in technology, transportation, and communication, people from different parts of the world can easily connect and interact with one another. This has led to an increase in the movement of people, goods, and ideas, resulting in a more diverse and multicultural society. In fact, it is estimated that over 190 countries exist in the world, each with its unique culture and traditions. One of the key benefits of cultural diversity is the exchange of ideas and knowledge. When people from different backgrounds come together, they bring with them their unique perspectives and experiences. This leads to a rich exchange of ideas, which can result in the development of new innovations and solutions to various problems. For example, the fusion of different cuisines has led to the creation of new and delicious dishes, and the blending of different musical styles has given birth to new genres of music. Moreover, cultural diversity also promotes tolerance and understanding among individuals and groups. When people are exposed to different cultures, they learn to appreciate and respect the differences that exist. This, in turn, leads to a more inclusive and harmonious society, where people from diverse backgrounds can coexist peacefully. In a world that is plagued by conflicts and discrimination, cultural diversity plays a crucial role in promoting peace and unity. However, despite its numerous benefits, cultural diversity also poses some challenges. One of the main challenges is the potential for cultural clashes. As individuals from different cultures interact, conflicts can arise due to differences in values, beliefs, and customs. This can lead to misunderstandings and even discrimination. For instance, a person from a collectivist culture may struggle to understand the individualistic values of someone from a Western culture. Furthermore, cultural diversity can also lead to the marginalization of certain groups within a society. In some cases, minority cultures may face discrimination and exclusion, which can result in social and economic disadvantages. This is often seen in the case of migrant communities, where they may struggle to fully integrate into the host society due to cultural barriers. To address these challenges, it is important for societies to promote cultural competency and sensitivity. This means educating individuals about different cultures and encouraging them to embrace diversity. It also involves creating policies and programs that promote inclusivity and equality for all groups within a society. For example, many countries have implemented diversity training programs in schools and workplaces to promote understanding and respect for different cultures. In addition, governments play a crucial role in promoting and preserving cultural diversity. They can do this by protecting the cultural heritage of different groups and promoting cultural events and festivals. This not only helps in preserving the unique identities of different cultures but also promotes cultural exchange and understanding. On an individual level, there are also steps that we can take to embrace cultural diversity. This includes being open-minded and respectful towards different cultures, being willing to learn about other cultures, and actively participating in cultural events and activities. By doing so, we can break down barriers and promote a more inclusive and harmonious society. In conclusion, cultural diversity is a key aspect of our global society. It brings numerous benefits such as the exchange of ideas and promoting tolerance, but it also poses challenges that need to be addressed. As individuals and societies, it is our responsibility to promote and preserve cultural diversity and create a world where everyone is embraced and valued for their unique identities and backgrounds. By doing so, we can create a more peaceful and prosperous world for all.\",\r\n \"title\": \"Cultural Diversity: A Key Aspect of Our Global Society\"\r\n }\r\n }\r\n ]\r\n }", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "http://127.0.0.1:5000/reading", + "protocol": "http", + "host": [ + "127", + "0", + "0", + "1" + ], + "port": "5000", + "path": [ + "reading" + ] + } + }, + "response": [] + } + ] + }, + { + "name": "Listening", + "item": [ + { + "name": "Gen Question Listening Section 1", + "protocolProfileBehavior": { + "disableBodyPruning": true + }, + "request": { + "auth": { + "type": "bearer", + "bearer": [ + { + "key": "token", + "value": "{{jwt_token}}", + "type": "string" + } + ] + }, + "method": "GET", + "header": [], + "body": { + "mode": "raw", + "raw": "", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "http://127.0.0.1:5000/listening_section_1?topic=book hotel room for convention&exercises=multipleChoice&exercises=writeBlanksQuestions&exercises=writeBlanksForm&exercises=writeBlanksFill", + "protocol": "http", + "host": [ + "127", + "0", + "0", + "1" + ], + "port": "5000", + "path": [ + "listening_section_1" + ], + "query": [ + { + "key": "topic", + "value": "book hotel room for convention" + }, + { + "key": "exercises", + "value": "multipleChoice" + }, + { + "key": "exercises", + "value": "writeBlanksQuestions" + }, + { + "key": "exercises", + "value": "writeBlanksForm" + }, + { + "key": "exercises", + "value": "writeBlanksFill" + } + ] + } + }, + "response": [] + }, + { + "name": "Gen Question Listening Section 2", + "event": [ + { + "listen": "test", + "script": { + "exec": [ + "\r", + "pm.test(\"Response status code is 200\", function () {\r", + " pm.expect(pm.response.code).to.equal(200);\r", + "});\r", + "\r", + "\r", + "pm.test(\"Validate the 'exercises' array is present and contains the expected number of elements\", function () {\r", + " const responseData = pm.response.json();\r", + " \r", + " pm.expect(responseData).to.be.an('object');\r", + " pm.expect(responseData.exercises).to.exist.and.to.be.an('array');\r", + " pm.expect(responseData.exercises).to.have.lengthOf(3, \"Expected 'exercises' array to have 3 elements\");\r", + "});\r", + "\r", + "" + ], + "type": "text/javascript" + } + } + ], + "protocolProfileBehavior": { + "disableBodyPruning": true + }, + "request": { + "auth": { + "type": "bearer", + "bearer": [ + { + "key": "token", + "value": "{{jwt_token}}", + "type": "string" + } + ] + }, + "method": "GET", + "header": [], + "body": { + "mode": "raw", + "raw": "", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "http://127.0.0.1:5000/listening_section_2?topic=football manager video game&exercises=writeBlanksForm&exercises=multipleChoice&exercises=writeBlanksQuestions&exercises=writeBlanksFill", + "protocol": "http", + "host": [ + "127", + "0", + "0", + "1" + ], + "port": "5000", + "path": [ + "listening_section_2" + ], + "query": [ + { + "key": "topic", + "value": "football manager video game" + }, + { + "key": "exercises", + "value": "writeBlanksForm" + }, + { + "key": "exercises", + "value": "multipleChoice" + }, + { + "key": "exercises", + "value": "writeBlanksQuestions" + }, + { + "key": "exercises", + "value": "writeBlanksFill" + } + ] + }, + "description": "\nThis API endpoint allows you to retrieve a listening section for a specific exercise. The endpoint uses an HTTP GET request to the URL `http://127.0.0.1:5000/listening_section_2`. \n\nTo specify the exercises you want to retrieve, you can include the `exercises` query parameter multiple times with different values. In this example, the query parameters are set to `multipleChoice`, `writeBlanksQuestions`, and `writeBlanksFill`.\n\nThe response from the last execution of this request had a status code of 200, indicating a successful response. The response body contained a JSON object with the following structure:\n\n```json\n{\n \"exercises\": [\n {\n \"questions\": [\n {\n \"id\": \"\",\n \"options\": [\n {\n \"id\": \"\",\n \"text\": \"\"\n }\n ],\n \"prompt\": \"\",\n \"solution\": \"\",\n \"variant\": \"\"\n }\n ]\n }\n ],\n \"text\": \"\"\n}\n```\n\nThe `exercises` field in the response contains an array of exercise objects. Each exercise object has a `questions` field, which is an array of question objects. Each question object has an `id`, `options`, `prompt`, `solution`, and `variant` field.\n\nThe `text` field in the response is an empty string.\n\nPlease note that the specific values for `id`, `text`, `prompt`, `solution`, and `variant` were not provided in the response and have been omitted for privacy reasons.\n\nMake sure to include the desired exercises in the `exercises` query parameter to retrieve the listening section for those exercises.\n" + }, + "response": [] + }, + { + "name": "Gen Question Listening Section 3", + "protocolProfileBehavior": { + "disableBodyPruning": true + }, + "request": { + "auth": { + "type": "bearer", + "bearer": [ + { + "key": "token", + "value": "{{jwt_token}}", + "type": "string" + } + ] + }, + "method": "GET", + "header": [], + "body": { + "mode": "raw", + "raw": "", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "http://127.0.0.1:5000/listening_section_3?topic=discuss new assignment with the teacher &exercises=multipleChoice&exercises=writeBlanksQuestions&exercises=writeBlanksFill&exercises=writeBlanksForm", + "protocol": "http", + "host": [ + "127", + "0", + "0", + "1" + ], + "port": "5000", + "path": [ + "listening_section_3" + ], + "query": [ + { + "key": "topic", + "value": "discuss new assignment with the teacher " + }, + { + "key": "exercises", + "value": "multipleChoice" + }, + { + "key": "exercises", + "value": "writeBlanksQuestions" + }, + { + "key": "exercises", + "value": "writeBlanksFill" + }, + { + "key": "exercises", + "value": "writeBlanksForm" + } + ] + } + }, + "response": [] + }, + { + "name": "Gen Question Listening Section 4", + "protocolProfileBehavior": { + "disableBodyPruning": true + }, + "request": { + "auth": { + "type": "bearer", + "bearer": [ + { + "key": "token", + "value": "{{jwt_token}}", + "type": "string" + } + ] + }, + "method": "GET", + "header": [], + "body": { + "mode": "raw", + "raw": "", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "http://127.0.0.1:5000/listening_section_4?topic=football manager video game&exercises=multipleChoice&exercises=writeBlanksQuestions&exercises=writeBlanksFill&exercises=writeBlanksForm", + "protocol": "http", + "host": [ + "127", + "0", + "0", + "1" + ], + "port": "5000", + "path": [ + "listening_section_4" + ], + "query": [ + { + "key": "topic", + "value": "football manager video game" + }, + { + "key": "exercises", + "value": "multipleChoice" + }, + { + "key": "exercises", + "value": "writeBlanksQuestions" + }, + { + "key": "exercises", + "value": "writeBlanksFill" + }, + { + "key": "exercises", + "value": "writeBlanksForm" + } + ] + } + }, + "response": [] + }, + { + "name": "Save Listening Section", + "request": { + "auth": { + "type": "bearer", + "bearer": [ + { + "key": "token", + "value": "{{jwt_token}}", + "type": "string" + } + ] + }, + "method": "POST", + "header": [], + "body": { + "mode": "raw", + "raw": "{\r\n \"parts\": [\r\n {\r\n \"text\": {\r\n \"conversation\": {\r\n \"conversation\": \"dummy\"\r\n }\r\n },\r\n \"exercises\": [\r\n {\r\n\r\n }\r\n ] \r\n },\r\n {\r\n \"text\": \"monologue_listening_2\",\r\n \"exercises\": [\r\n {\r\n\r\n }\r\n ] \r\n },\r\n {\r\n \"text\": {\r\n \"conversation\": {\r\n \"conversation\": \"dummy\"\r\n }\r\n },\r\n \"exercises\": [\r\n {\r\n\r\n }\r\n ] \r\n },\r\n {\r\n \"text\": \"monologue_listening_4\",\r\n \"exercises\": [\r\n {\r\n\r\n }\r\n ] \r\n }\r\n ]\r\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "http://127.0.0.1:5000/listening", + "protocol": "http", + "host": [ + "127", + "0", + "0", + "1" + ], + "port": "5000", + "path": [ + "listening" + ] + } + }, + "response": [] + } + ] + }, + { + "name": "Level", + "item": [ + { + "name": "Gen Level", + "request": { + "auth": { + "type": "bearer", + "bearer": [ + { + "key": "token", + "value": "{{jwt_token}}", + "type": "string" + } + ] + }, + "method": "GET", + "header": [], + "url": { + "raw": "http://127.0.0.1:5000/level", + "protocol": "http", + "host": [ + "127", + "0", + "0", + "1" + ], + "port": "5000", + "path": [ + "level" + ] + } + }, + "response": [] + } + ] + }, + { + "name": "Fetch Answer Tips", + "request": { + "auth": { + "type": "bearer", + "bearer": [ + { + "key": "token", + "value": "{{jwt_token}}", + "type": "string" + } + ] + }, + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json", + "type": "text" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"question\": \"When did Kendrick Lamar sign for TDE?\",\n \"answer\": \"Hello GPT.\",\n\t\t\"correct_answer\": \"2005\",\n \"context\": \"Kendrick Lamar Duckworth (born June 17, 1987) is an American rapper and songwriter. Known for his progressive musical styles and socially conscious songwriting, he is often considered one of the most influential hip hop artists of his generation. Born and raised in Compton, California, Lamar began his career as a teenager performing under the stage name K.Dot. He quickly garnered local attention which led to him signing a recording contract with Top Dawg Entertainment (TDE) in 2005.\"\n}\n", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "http://127.0.0.1:5000/fetch_tips", + "protocol": "http", + "host": [ + "127", + "0", + "0", + "1" + ], + "port": "5000", + "path": [ + "fetch_tips" + ] + } + }, + "response": [] + }, + { + "name": "Get Grading Summary", + "request": { + "auth": { + "type": "bearer", + "bearer": [ + { + "key": "token", + "value": "{{jwt_token}}", + "type": "string" + } + ] + }, + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json", + "type": "text" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"question\": \"When did Kendrick Lamar sign for TDE?\",\n \"answer\": \"Hello GPT.\",\n\t\t\"correct_answer\": \"2005\",\n \"context\": \"Kendrick Lamar Duckworth (born June 17, 1987) is an American rapper and songwriter. Known for his progressive musical styles and socially conscious songwriting, he is often considered one of the most influential hip hop artists of his generation. Born and raised in Compton, California, Lamar began his career as a teenager performing under the stage name K.Dot. He quickly garnered local attention which led to him signing a recording contract with Top Dawg Entertainment (TDE) in 2005.\"\n}\n", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "http://127.0.0.1:5000/fetch_tips", + "protocol": "http", + "host": [ + "127", + "0", + "0", + "1" + ], + "port": "5000", + "path": [ + "fetch_tips" + ] + } + }, + "response": [] + } + ] } \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index bc5026a..1668c29 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,32 +1,35 @@ -[tool.poetry] -name = "ielts-be" -version = "0.1.0" -description = "" -authors = ["Ecrop Devteam "] -readme = "README.md" - -[tool.poetry.dependencies] -python = "^3.11" -uvicorn = "^0.30.3" -python-dotenv = "^1.0.1" -aioboto3 = "^13.1.1" -httpx = "^0.27.0" -fastapi = "^0.111.1" -nltk = "^3.8.1" -firebase-admin = "^6.5.0" -wonderwords = "^2.2.0" -dependency-injector = "^4.41.0" -openai = "^1.37.0" -python-multipart = "0.0.9" -faiss-cpu = "1.8.0.post1" -pypandoc = "1.13" -pdfplumber = "0.11.3" -numpy = "1.26.4" -pillow = "10.4.0" -sentence-transformers = "3.0.1" -openai-whisper = "20231117" - - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" +[tool.poetry] +name = "ielts-be" +version = "0.1.0" +description = "" +authors = ["Ecrop Devteam "] +readme = "README.md" + +[tool.poetry.dependencies] +python = "^3.11" +uvicorn = "^0.30.3" +python-dotenv = "^1.0.1" +aioboto3 = "^13.1.1" +httpx = "^0.27.0" +fastapi = "^0.111.1" +nltk = "^3.8.1" +firebase-admin = "^6.5.0" +wonderwords = "^2.2.0" +dependency-injector = "^4.41.0" +openai = "^1.37.0" +python-multipart = "0.0.9" +faiss-cpu = "1.8.0.post1" +pypandoc = "1.13" +pdfplumber = "0.11.3" +numpy = "1.26.4" +pillow = "10.4.0" +sentence-transformers = "3.0.1" +openai-whisper = "20231117" +motor = "3.6.0" +shortuuid = "1.0.13" +pandas = "2.2.3" + + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" diff --git a/scripts/tips/instructions.MD b/scripts/tips/instructions.MD new file mode 100644 index 0000000..9b2883a --- /dev/null +++ b/scripts/tips/instructions.MD @@ -0,0 +1,67 @@ +# Adding new training content + +If you're ever tasked with the grueling task of adding more tips from manuals, my condolences. + +There are 4 components of a training content tip: the tip itself, the question, the additional and the segment. + +The tip is the actual tip, if the manual doesn't have an exercise that relates to that tip fill this out: + +```json +{ + "category": "", + "embedding": "", + "text": "", + "html": "", + "id": "", + "verified": , + "standalone": +} +``` + +If the manual does have an exercise that relates to the tip: + +```json +{ + // ... + "question": "", + "additional": "", + "segments": [ + { + "html": " >", + "wordDelay": , + "holdDelay": , + "highlight": [ + { + "targets": [""], + "phrases": [""] + } + ], + "insertHTML": [ + { + "target": "", + "targetId": "", + "position": "", + "html": "" + }, + ] + } + ] +} +``` + +In order to create these structures you will have to mannually screenshot the tips, exercises, context and send them to an llm (gpt-4o or claude) +with a prompt like "get me the html for this", you will have to check whether the html is properly structured and then +paste them in the prompt.txt file of this directory and send it +back to an llm. + +Afterwards you will have to check whether the default styles in /src/components/TrainingContent/FormatTip.ts are adequate, divs +(except for the wrapper div of a segment) and span styles are not overriden but you should aim to use the least ammount of +styles in the tip itself and create custom reusable html elements +in FormatTip.ts. + +After checking all of the tips render you will have to create new embeddings in the backend, you CAN'T change ids of existing tips since there +might be training tips that are already stored in firebase. + +This is a very tedious task here's a recommendation for [background noise](https://www.youtube.com/watch?v=lDnva_3fcTc). + +GL HF diff --git a/scripts/tips/pathways_2_rw.json b/scripts/tips/pathways_2_rw.json new file mode 100644 index 0000000..f2502c7 --- /dev/null +++ b/scripts/tips/pathways_2_rw.json @@ -0,0 +1,7579 @@ +{ + "title": "Pathways Reading, Writing and Critical Thinking 2", + "pdf_page_offset": 18, + "units": [ + { + "unit": 1, + "title": "Happiness", + "pages": [ + { + "page": 4, + "tips": [ + { + "category": "Strategy", + "embedding": "Read titles and subheads to predict what a passage is about. This will help you know what to expect as you read.", + "text": "Read titles and subheads to predict what a passage is about. This will help you know what to expect as you read.", + "html": "

Read titles and subheads to predict what a passage is about. This will help you know what to expect as you read.

", + "id": "658b24d4-9c9c-4c2b-8451-f00addaaae3e", + "standalone": false, + "verified": true, + "exercise": { + "question": "

Read the title and the subheads of the reading passage. What do you think the reading passage is about?

  • Different things make different people happy.
  • Security is the most important thing for happiness.
  • Everyone needs the same basic things to be happy.
", + "additional": "

Is there a Recipe for Happiness?

What makes us happy? Money? Friends? A good job? Are the answers the same for everyone? According to world surveys, Mexico and Singapore are two happy countries—but their people may be happy for different reasons.

Safety and Security

There are more than 19,000 people per square mile1 in the small nation of Singapore. People on the island work an average of 70 hours per week. The country has strict laws, for example, against littering,2 graffiti,3 and even for not flushing a toilet. But according to the World Database of Happiness, Singapore is the happiest country in Asia. Why?

One reason for Singapore's happiness is that the government provides the basic necessities, such as jobs and housing. There is almost no poverty, and Singapore has one of the lowest levels of unemployment in the world. The government creates jobs for people who are unemployed. It 'tops up'4 poorer people's income so everyone can have a minimum standard of living. The government also offers tax breaks5 to people who look after their aging parents. This may be why 84 percent of older people live with their children. The result is a lot of closely connected families with roughly equal standards of living.

People may not all be happy about the laws, but they are generally happy with the results — they don't step in litter, the public toilets work, and the streets are safe and clean. So for Singaporeans, it seems that living in a secure, clean, and safe place may be more important than having a lot of personal freedom. As Dr. Tan Ern Ser of Singapore's Institute of Policy Studies explains, 'If you are hopeful and confident of getting what you want in life, then you are happy.'

Friends and Neighbors

In many ways, Mexico is the opposite of Singapore. There are some parts of Mexico where people do not have a safe or secure life. Many people do not have jobs, enough food, or access to education. But, as in Singapore, most people in Mexico feel that they are happy. Why?

One reason is the importance of socializing. According to psychologists, much of our happiness comes from remembering the small joys that happen throughout the day. Simple acts of socializing, such as talking with a neighbor or having dinner with friends, can greatly increase our overall happiness. People in Mexico socialize with family and friends a lot, and this adds to their happiness.

But what about poverty? In Mexico, about half of the population is poor. However, most people in Mexico live near people in a similar financial situation. If your neighbor doesn't have expensive items, such as a big house or an expensive car, you don't feel the need to have those things. So money, by itself, may not be so important for happiness. What matters more is how much money you have compared to the people around you.

A Mixed Recipe?

So the question 'What makes people happy?' does not seem to have a simple answer. Work, security, safety, freedom, and socializing with friends and family can all play important roles. As the examples of Singapore and Mexico suggest, there may be no single recipe for happiness. The good news is that we can each find our own.

Adapted from Thrive: Finding Happiness the Blue Zones Way by Dan Buettner, 2010

1 A square mile = 2.59 square kilometers2 Littering is leaving garbage or trash lying around outside.3 Graffiti is words or pictures that are written or drawn on walls or other public places.4 If you top something up, you add to it to make it full.5 If the government gives someone a tax break, it lowers the amount of tax they have to pay.
", + "segments": [ + { + "html": "

Understanding the Tip

The tip suggests to read titles and subheads to predict what a passage is about. This strategy helps you form expectations and prepare your mind for the content you're about to read.

", + "wordDelay": 200, + "holdDelay": 5000 + }, + { + "html": "

Applying the Tip

Let's look at the title and subheads of our passage:

  • Title: Is there a Recipe for Happiness?
  • Subhead 1: Safety and Security
  • Subhead 2: Friends and Neighbors
  • Subhead 3: A Mixed Recipe?
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "all" + ], + "phrases": [ + "Is there a Recipe for Happiness?", + "Safety and Security", + "Friends and Neighbors", + "A Mixed Recipe?" + ] + } + ] + }, + { + "html": "

Based on these, we can predict that the passage will discuss:

  • Various factors that contribute to happiness
  • The role of safety and security in happiness
  • How social relationships affect happiness
  • The possibility that happiness might have multiple sources
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "segment" + ], + "phrases": [ + "factors that contribute to happiness", + "safety and security", + "social relationships", + "multiple sources" + ] + } + ] + }, + { + "html": "

Answering the Question

Now, let's look at the options provided:

  • Different things make different people happy.
  • Security is the most important thing for happiness.
  • Everyone needs the same basic things to be happy.
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "all" + ], + "phrases": [ + "Different things make different people happy.", + "Security is the most important thing for happiness.", + "Everyone needs the same basic things to be happy." + ] + } + ] + }, + { + "html": "

Based on our prediction from the title and subheads, the best answer is:

a. Different things make different people happy.

This answer aligns with the idea of exploring various factors and the possibility of a 'mixed recipe' for happiness.

", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [ + { + "targets": [ + "all" + ], + "phrases": [ + "Different things make different people happy." + ] + } + ] + }, + { + "html": "

Benefits of the Tip

By reading titles and subheads:

  • You get a quick overview of the main topics
  • It helps you anticipate the content, making reading easier
  • You can better focus on key information while reading
  • It improves comprehension and retention of the material
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "segment" + ], + "phrases": [ + "quick overview", + "anticipate the content", + "focus on key information", + "improves comprehension" + ] + } + ] + } + ] + } + }, + { + "category": "Word Link", + "embedding": "To increase your vocabulary use a dictionary to find other forms of a word.", + "text": "To increase your vocabulary use a dictionary to find other forms of a word, e.g., (adj.) confident, (n.) confidence; (adj.) secure, (n.) security; (n.) freedom, (adj.) free; (v.) socialize, (adj.) social; (adj.) financial, (n.) finance.", + "standalone": true, + "html": "

To increase your vocabulary use a dictionary to find other forms of a word, e.g. :

  • (adj.) confident, (n.) confidence;
  • (adj.) secure, (n.) security;
  • (n.) freedom, (adj.) free;
  • (v.) socialize, (adj.) social;
  • (adj.) financial, (n.) finance.
", + "id": "c7e322ad-71e5-4aef-bcff-433fbe33e8c7", + "verified": true + } + ] + }, + { + "page": 7, + "tips": [ + { + "category": "CT Focus", + "embedding": "Use the context - the words around a word - to guess the meaning of a word you don't know. The context can also help you decide the word's part of speech.", + "text": "Use the context - the words around a word - to guess the meaning of a word you don't know. The context can also help you decide the word's part of speech, e.g., noun, verb, adjective, etc.", + "html": "

Use the context - the words around a word - to guess the meaning of a word you don't know. The context can also help you decide the word's part of speech, e.g., noun, verb, adjective, etc.

", + "id": "dbb2d0a9-2c68-4933-8cd6-8a51d6e0ad9c", + "standalone": false, + "verified": true, + "exercise": { + "question": "

Guessing Meaning from Context

Use context to identify the bold words meaning. Then match the sentence halves to make definitions.

", + "additional": "
    If you are strict, If you are flushing something, If you are unemployed, If you look after people, If you make something public,
    you provide it to everyone. you take care of them and make sure they are well. you don't allow people to behave badly. you do not have a job. you are cleaning or emptying it with a fast flow of water.
", + "segments": [ + { + "html": "

Guessing Meaning from Context

Let's analyze each sentence and use the context to determine the meaning of the bold words. We'll then match them with their corresponding definitions.

", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [] + }, + { + "html": "

1. Strict

Sentence: 'If you are strict,'

Context clue: The sentence implies a characteristic or behavior.

Matching definition: c.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "additional" + ], + "phrases": [ + "If you are strict,", + "you don't allow people to behave badly." + ] + } + ], + "insertHTML": [ + { + "target": "additional", + "targetId": "question-1", + "position": "replace", + "html": "c." + } + ] + }, + { + "html": "

2. Flushing

Sentence: 'If you are flushing something,'

Context clue: The word 'something' suggests an action being done to an object.

Matching definition: e.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "additional" + ], + "phrases": [ + "If you are flushing something,", + "you are cleaning or emptying it with a fast flow of water." + ] + } + ], + "insertHTML": [ + { + "target": "additional", + "targetId": "question-2", + "position": "replace", + "html": "e." + } + ] + }, + { + "html": "

3. Unemployed

Sentence: 'If you are unemployed,'

Context clue: The prefix 'un-' often means 'not' or 'without'.

Matching definition: d.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "additional" + ], + "phrases": [ + "If you are unemployed,", + "you do not have a job." + ] + } + ], + "insertHTML": [ + { + "target": "additional", + "targetId": "question-3", + "position": "replace", + "html": "d." + } + ] + }, + { + "html": "

4. Look after

Sentence: 'If you look after people,'

Context clue: The phrase suggests an action done for others.

Matching definition: b.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "additional" + ], + "phrases": [ + "If you look after people,", + "you take care of them and make sure they are well." + ] + } + ], + "insertHTML": [ + { + "target": "additional", + "targetId": "question-4", + "position": "replace", + "html": "b." + } + ] + }, + { + "html": "

5. Public

Sentence: 'If you make something public,'

Context clue: 'Make something' suggests changing the state or availability of information.

Matching definition: a.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "additional" + ], + "phrases": [ + "If you make something public,", + "you provide it to everyone." + ] + } + ], + "insertHTML": [ + { + "target": "additional", + "targetId": "question-5", + "position": "replace", + "html": "a." + } + ] + }, + { + "html": "

Key Strategy

When guessing word meanings from context:

  • Look at the words surrounding the unknown word
  • Consider the overall topic or theme of the sentence
  • Analyze prefixes, suffixes, or word parts for clues
  • Think about the part of speech (noun, verb, adjective, etc.)
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [] + }, + { + "html": "

Helpful Tip

Remember to use the context - the words around a word - to guess the meaning of unfamiliar words. The context can also help you determine the word's part of speech. This approach not only aids in understanding specific words but also improves overall reading comprehension and vocabulary development.

", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [] + } + ] + } + } + ] + }, + { + "page": 8, + "tips": [ + { + "category": "Reading Skill", + "embedding": "Identifying the Main\n\nIdea The main idea of a paragraph is the most important idea, or the idea that the paragraph is about. A good paragraph has one main idea and one or more supporting ideas.", + "text": "Identifying the Main Idea\n\nThe main idea of a paragraph is the most important idea, or the idea that the paragraph is about. A good paragraph has one main idea and one or more supporting ideas. Read the paragraph below and think about the main idea.\n\nResearchers have found that the sunny weather in Mexico is one of the reasons that people there are happy. Mexico has many hours of sunlight, so people in Mexico get a lot of vitamin D. Vitamin D is important for overall health and well-being. Also, studies show that when people tan, they make more endorphins - chemicals in our bodies that make us feel happy.\n\nWhich of these statements is the main idea of the paragraph?\n\na. People in Mexico are happy because they get a lot of vitamin D.\nb. Tanning makes us create more endorphins, which make us feel happy.\nc. Mexico gets a lot of sun, which may make people there happier.\n\nThe last sentence is the main idea. The other two sentences are supporting ideas that explain the main idea.", + "html": "

Identifying the Main Idea

The main idea of a paragraph is the most important idea, or the idea that the paragraph is about. A good paragraph has one main idea and one or more supporting ideas.

", + "id": "ec975445-8bce-4c7e-8ff4-9c870252a39d", + "standalone": false, + "verified": true, + "exercise": { + "question": "

Which of these statements is the main idea of the paragraph?

  • People in Mexico are happy because they get a lot of vitamin D.
  • Tanning makes us create more endorphins, which make us feel happy.
  • Mexico gets a lot of sun, which may make people there happier.
", + "additional": "

Researchers have found that the sunny weather in Mexico is one of the reasons that people there are happy. Mexico has many hours of sunlight, so people in Mexico get a lot of vitamin D. Vitamin D is important for overall health and well-being. Also, studies show that when people tan, they make more endorphins - chemicals in our bodies that make us feel happy.

", + "segments": [ + { + "html": "

Analyzing the Paragraph

Let's break down the key points in the given paragraph:

  • Sunny weather in Mexico contributes to happiness
  • Mexico has many hours of sunlight
  • People in Mexico get a lot of vitamin D
  • Vitamin D is important for health and well-being
  • Tanning produces endorphins, which make people feel happy
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "segment", + "additional" + ], + "phrases": [ + "sunny weather", + "Sunny weather", + "happiness", + "Tanning", + "people tan", + "Mexico", + "happy", + "vitamin D", + "health and well-being", + "endorphins", + "many hours of sunlight", + "Vitamin D" + ] + } + ] + }, + { + "html": "

Identifying Key Relationships

The paragraph establishes connections between:

  • Sunny weather and happiness in Mexico
  • Sunlight and vitamin D production
  • Vitamin D and overall well-being
  • Tanning and endorphin production
", + "wordDelay": 200, + "holdDelay": 7000, + "highlight": [ + { + "targets": [ + "additional", + "segment" + ], + "phrases": [ + "Sunny weather", + "sunny weather", + "Mexico", + "happy", + "happiness", + "sunlight", + "Sunlight", + "vitamin D", + "Vitamin D", + "overall health and well-being", + "overall well-being", + "people tan", + "endorphins", + "Tanning", + "endorphin production" + ] + } + ] + }, + { + "html": "

Evaluating the Options

Now, let's consider each option:

  • People in Mexico are happy because they get a lot of vitamin D.
  • Tanning makes us create more endorphins, which make us feel happy.
  • Mexico gets a lot of sun, which may make people there happier.
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "all" + ], + "phrases": [ + "People in Mexico are happy because they get a lot of vitamin D.", + "Tanning makes us create more endorphins, which make us feel happy.", + "Mexico gets a lot of sun, which may make people there happier." + ] + } + ] + }, + { + "html": "

Selecting the Main Idea

The correct answer is:

c. Mexico gets a lot of sun, which may make people there happier.

This option best captures the overall message of the paragraph, linking Mexico's sunny weather to happiness. Options a and b are supporting details rather than the main idea.

", + "wordDelay": 200, + "holdDelay": 7000, + "highlight": [ + { + "targets": [ + "all" + ], + "phrases": [ + "Mexico gets a lot of sun, which may make people there happier." + ] + } + ] + }, + { + "html": "

Understanding Main Ideas

The main idea of a paragraph is the most important concept that the paragraph is conveying. It's usually supported by several details or examples. In this case, the main idea is about the connection between Mexico's sunny weather and the happiness of its people. The other points about vitamin D and endorphins are supporting details that explain why sunlight might contribute to happiness.

", + "wordDelay": 200, + "holdDelay": 8000 + }, + { + "html": "

Applying This Approach

By focusing on identifying the main idea:

  • We can quickly grasp the central message of a text
  • We can distinguish between primary concepts and supporting details
  • We improve our reading comprehension and retention
  • We can more effectively summarize and discuss the content we read

This skill is crucial for efficient reading and understanding of various texts, from academic materials to everyday articles.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "segment" + ], + "phrases": [ + "main idea", + "central message", + "primary concepts", + "supporting details", + "reading comprehension", + "retention", + "summarize", + "discuss", + "efficient reading", + "understanding" + ] + } + ] + } + ] + } + }, + { + "category": "CT Focus", + "embedding": "Use the context to guess the meaning of new words.", + "text": "Use the context to guess the meaning of new words.", + "html": "

Use the context to guess the meaning of new words. What do fit, obesity and recreation mean?

", + "id": "49e4ac6c-e927-407d-b05a-fbec974ea20e", + "standalone": false, + "verified": true, + "exercise": { + "question": "

Identifying the Main Idea.

Read the information about Denmark. Then write the main idea of the paragraph.

", + "additional": "It's hard to be happy when you're unhealthy, According to the World Database of Happiness, Denmark is the second happiest country in the world, and most Danes are fit. They have a lower rate of obesity than many of their European neighbors. Danish cities are designed so it's easy to walk or bike from one place to another. With a 30-minute walk, you can go from the city of Copenhagen to the ocean, where you can sail or swim, or to the woods, where you can hike. Everyone has easy access to recreation.", + "segments": [ + { + "html": "

Identifying the Main Idea

To identify the main idea of a paragraph, focus on what the author is trying to convey overall. Look for recurring themes or the most emphasized points.

", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [ + { + "targets": [ + "segment" + ], + "phrases": [ + "main idea", + "recurring themes", + "emphasized points" + ] + } + ] + }, + { + "html": "

In the given passage about Denmark, the main idea revolves around the connection between health and happiness. The author emphasizes how Denmark's lifestyle promotes both, making it one of the happiest countries in the world.

", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [ + { + "targets": [ + "segment", + "additional" + ], + "phrases": [ + "Denmark", + "health", + "happiness", + "unhealthy", + "happy", + "most Danes are fit" + ] + } + ] + }, + { + "html": "

Key Points:

  • Denmark is the second happiest country in the world.
  • Most Danes are fit and have a lower rate of obesity.
  • Danish cities encourage walking and biking.
  • Easy access to recreation activities.
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "all" + ], + "phrases": [ + "second happiest country", + "fit", + "obesity", + "recreation" + ] + } + ] + }, + { + "html": "

The main idea is that Denmark's urban design and lifestyle choices promote both physical health and mental well-being, contributing to the country's high happiness ranking.

", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [ + { + "targets": [ + "segment" + ], + "phrases": [ + "main idea", + "urban design", + "lifestyle choices", + "physical health", + "mental well-being" + ] + } + ] + }, + { + "html": "

Applying the Tip:

Now, let's use the provided tip:
Use the context to guess the meaning of new words. Let's examine fit, obesity, and recreation.

", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [ + { + "targets": [ + "all" + ], + "phrases": [ + "Use the context", + "fit", + "obesity", + "recreation" + ] + } + ] + }, + { + "html": "
  • Fit: Likely means being in good physical health, as it's contrasted with obesity.
  • Obesity: Refers to being overweight or having excess body fat, which is less common in Denmark.
  • Recreation: Activities done for enjoyment and relaxation, such as walking, biking, sailing, or hiking.
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "all" + ], + "phrases": [ + "Fit", + "good physical health", + "Obesity", + "excess body fat", + "Recreation", + "enjoyment and relaxation" + ] + } + ] + }, + { + "html": "

By using the context, we can understand these terms and how they contribute to the main idea of health and happiness in Denmark. This skill helps in comprehending new vocabulary and reinforces the overall message of the passage.

", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [ + { + "targets": [ + "all" + ], + "phrases": [ + "context", + "health and happiness", + "comprehending new vocabulary" + ] + } + ] + } + ] + } + } + ] + }, + { + "page": 10, + "tips": [ + { + "category": "CT Focus", + "embedding": "Look for key words to help you guess meaning from context.", + "text": "Look for key words to help you guess meaning from context.", + "html": "

Look for key words to help you guess meaning from context.

", + "id": "d1845a62-cc3b-4090-906a-6e16ad9c1496", + "standalone": false, + "verified": true, + "exercise": { + "question": "

Building Vocabulary

Look at the words around the bold words to guess their meanings. Then determine the best definition (a or b) of each word.

", + "additional": "
  1. A researcher who studies happiness might ask people what kinds of things make them happy.

    • someone who studies something and tries to discover facts about it
    • someone who teaches subjects such as science and math in school
  2. A person's long-term goals can include going to college and then medical school.

    • happening over a long time
    • traveling for a long distance
  3. It's important to live in a community that you like. Do you like the people who live near you? Does the area have good places to shop, eat, and socialize?

    • the place where you live
    • a place where people meet
  4. Most happy people have a hobby, such as writing, surfing, or painting.

    • something that you do for money, such as a job
    • an activity that you enjoy doing in your free time
  5. Some people volunteer to help others who are in need. Although you may get no money for volunteering, the work can make you feel good about yourself.

    • do something without being paid
    • go to school with a group of people
  6. People feel happier when they are grateful for the things that they have. They spend less time wanting things that they don't have.

    • thankful
    • excited
  7. A person's mood can depend on many things. For example, if someone says something nice about you, it can make you feel good.

    • the place where you spend most of your time
    • the way you feel at a particular time
  8. Healthy food, exercise, and friends are important for a person's well-being.

    • health and happiness
    • the way you spend your time
  9. In many countries, adult children support their elderly parents. The children pay their parents' bills and provide them with food and a place to live.

    • help
    • teach
  10. Good health is one factor that can make you a happy person. A close group of friends is another factor.

    • one of the things that causes a situation
    • something that is difficult or causes problems
", + "segments": [ + { + "html": "

Understanding Context Clues

When encountering unfamiliar words, the surrounding text often provides valuable hints about their meanings. Let's explore this concept using the given examples.

", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [] + }, + { + "html": "

1. Researcher

Key context: 'studies happiness' and 'ask people what kinds of things make them happy'

These phrases suggest that a researcher is someone who investigates or studies a topic.

Answer: a. someone who studies something and tries to discover facts about it

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "all" + ], + "phrases": [ + "studies happiness", + "ask people what kinds of things make them happy", + " someone who studies something and tries to discover facts about it", + "a. someone who studies something and tries to discover facts about it" + ] + } + ] + }, + { + "html": "

2. Long-term

Key context: 'goals can include going to college and then medical school'

This implies a sequence of events over an extended period, not a physical distance.

Answer: a. happening over a long time

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "all" + ], + "phrases": [ + "goals can include going to college and then medical school", + "a. happening over a long time", + " happening over a long time" + ] + } + ] + }, + { + "html": "

3. Community

Key context: 'people who live near you' and 'area has good places to shop, eat, and socialize'

These phrases describe a living environment, not just a meeting place.

Answer: a. the place where you live

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "all" + ], + "phrases": [ + "people who live near you", + "area have good places to shop, eat, and socialize", + "area has good places to shop, eat, and socialize", + "a. the place where you live", + " the place where you live" + ] + } + ] + }, + { + "html": "

4. Hobby

Key context: 'Most happy people' and 'writing, surfing, or painting'

These examples suggest activities done for enjoyment, not for money.

Answer: b. an activity that you enjoy doing in your free time

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "all" + ], + "phrases": [ + "Most happy people", + "writing, surfing, or painting", + "b. an activity that you enjoy doing in your free time", + " an activity that you enjoy doing in your free time" + ] + } + ] + }, + { + "html": "

5. Volunteer

Key context: 'help others who are in need' and 'get no money for volunteering'

This implies doing work without payment.

Answer: a. do something without being paid

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "all" + ], + "phrases": [ + "help others who are in need", + "get no money for volunteering", + "a. do something without being paid", + " do something without being paid" + ] + } + ] + }, + { + "html": "

6. Grateful

Key context: 'for the things that they have' and 'spend less time wanting things that they don't have'

This suggests being content with what one has, implying thankfulness.

Answer: a. thankful

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "all" + ], + "phrases": [ + "for the things that they have", + "spend less time wanting things that they don't have", + "a. thankful", + " thankful" + ] + } + ] + }, + { + "html": "

7. Mood

Key context: 'can depend on many things' and 'if someone says something nice about you, it can make you feel good'

This describes changing emotional states, not a physical place.

Answer: b. the way you feel at a particular time

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "all" + ], + "phrases": [ + "can depend on many things", + "if someone says something nice about you, it can make you feel good", + "b. the way you feel at a particular time", + " the way you feel at a particular time" + ] + } + ] + }, + { + "html": "

8. Well-being

Key context: 'Healthy food, exercise, and friends are important for'

These factors contribute to both physical health and mental happiness.

Answer: a. health and happiness

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "all" + ], + "phrases": [ + "Healthy food, exercise, and friends are important for", + "a. health and happiness", + " health and happiness" + ] + } + ] + }, + { + "html": "

9. Support

Key context: 'pay their parents' bills and provide them with food and a place to live'

These actions describe helping or assisting, not teaching.

Answer: a. help

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "all" + ], + "phrases": [ + "pay their parents' bills and provide them with food and a place to live", + "a. help", + " help" + ] + } + ] + }, + { + "html": "

10. Factor

Key context: 'Good health is one factor' and 'A close group of friends is another factor'

These examples suggest things that contribute to a situation (happiness), not problems.

Answer: a. one of the things that causes a situation

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "all" + ], + "phrases": [ + "Good health is one factor", + "A close group of friends is another factor", + "a. one of the things that causes a situation", + " one of the things that causes a situation" + ] + } + ] + }, + { + "html": "

Key Strategy

For each word, we identified key words or phrases in the surrounding text. These context clues helped us deduce the most appropriate meaning for each term.

", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [] + }, + { + "html": "

Tip Reminder

Remember to look for key words to help you guess meaning from context. This approach enhances vocabulary, improves reading comprehension, and develops critical thinking skills.

", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [] + } + ] + } + }, + { + "category": "Word Partners", + "embedding": "Use factor.", + "text": "Use factor with: (adj.) contributing factor, deciding factor, important factor, key factor; (n.) risk factor.", + "html": "

Use factor with: (adj.) contributing factor, deciding factor, important factor, key factor; (n.) risk factor.

", + "id": "fb055481-9d53-4bf4-977a-5dfedccf9d57", + "standalone": true, + "verified": true + } + ] + }, + { + "page": 14, + "tips": [ + { + "category": "Strategy", + "embedding": "Look for clues to the main idea in the first (and sometimes second) sentence of a paragraph.", + "text": "Look for clues to the main idea in the first (and sometimes second) sentence of a paragraph.", + "html": "

Look for clues to the main idea in the first (and sometimes second) sentence of a paragraph.

", + "id": "999b1968-f3d0-488e-8957-56f97517dff5", + "verified": true, + "standalone": false, + "exercise": { + "question": "

Identifying Main Ideas

Read the statements below. Determine the main idea in each pair of statements (a or b).

", + "additional": "
CategoryOption AOption B
SelfYou need to take care of yourself and connect with the people around you.Focus on your interests and talents and meet people who are like you.
HomeIt's a good idea to paint your living room yellow.You should arrange your home so that it makes you feel happy.
Financial LifeYou can be happy if you have enough money, but don't want money too much.If you waste money on things you don't need, you won't have enough money for things that you do need.
Social LifeA good group of friends can increase your happiness.Researchers say that a happy friend can increase our mood by nine percent.
WorkplaceYou spend a lot of time at work, so you should like your workplace.Your boss needs to be someone you enjoy working for.
CommunityThe place where you live is more important for happiness than anything else.Live around people who have the same amount of money as you do.
", + "segments": [ + { + "html": "

Identifying Main Ideas

Let's analyze each pair of statements to determine which one represents the main idea. We'll focus on which statement is more general and encompasses the overall concept.

", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [] + }, + { + "html": "

1. Self

Main idea: A. You need to take care of yourself and connect with the people around you.

This statement is more comprehensive, covering both self-care and social connections. Option B is more specific and could be considered a subset of A.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "all" + ], + "phrases": [ + "You need to take care of yourself and connect with the people around you." + ] + } + ] + }, + { + "html": "

2. Home

Main idea: B. You should arrange your home so that it makes you feel happy.

This statement is more general and applies to the entire home. Option A is a specific example that could fall under this broader concept.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "all" + ], + "phrases": [ + "You should arrange your home so that it makes you feel happy." + ] + } + ] + }, + { + "html": "

3. Financial Life

Main idea: A. You can be happy if you have enough money, but don't want money too much.

This statement provides a balanced view of money's role in happiness. Option B is more specific and could be seen as a consequence of wanting money too much.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "all" + ], + "phrases": [ + "You can be happy if you have enough money, but don't want money too much." + ] + } + ] + }, + { + "html": "

4. Social Life

Main idea: A. A good group of friends can increase your happiness.

This statement is more general about the impact of friendships. Option B provides a specific statistic that supports this main idea.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "all" + ], + "phrases": [ + "A good group of friends can increase your happiness." + ] + } + ] + }, + { + "html": "

5. Workplace

Main idea: A. You spend a lot of time at work, so you should like your workplace.

This statement covers the overall importance of workplace satisfaction. Option B focuses on one specific aspect (the boss) and is less comprehensive.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "all" + ], + "phrases": [ + "You spend a lot of time at work, so you should like your workplace." + ] + } + ] + }, + { + "html": "

6. Community

Main idea: A. The place where you live is more important for happiness than anything else.

While this statement might be debatable, it's more general and encompasses the overall importance of community. Option B is a specific suggestion about community demographics.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "all" + ], + "phrases": [ + "The place where you live is more important for happiness than anything else." + ] + } + ] + }, + { + "html": "

Key Strategy

When identifying main ideas:

  • Look for broader, more encompassing statements
  • Consider which statement other ideas could fall under
  • Identify which statement provides a general principle rather than a specific example
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [] + }, + { + "html": "

Helpful Tip

Remember to look for clues to the main idea in the first (and sometimes second) sentence of a paragraph. In this exercise, we applied this concept to pairs of statements. This approach can help you quickly identify the central theme or main point in various types of text.

", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [] + } + ] + } + } + ] + }, + { + "page": 15, + "tips": [ + { + "category": "Strategy", + "embedding": "When you brainstorm think of as many ideas as possible related to your topic. Don't worry about whether the ideas are good or bad - write down all the ideas you can think of.", + "text": "When you brainstorm think of as many ideas as possible related to your topic. Don't worry about whether the ideas are good or bad - write down all the ideas you can think of.", + "html": "

When you brainstorm think of as many ideas as possible related to your topic. Don't worry about whether the ideas are good or bad - write down all the ideas you can think of.

", + "id": "a5913388-e4bd-4c4c-a9fb-bab8642c1360", + "verified": true, + "standalone": true + } + ] + }, + { + "page": 16, + "tips": [ + { + "category": "Writing Skill", + "embedding": "Writing a Topic Sentence\n\nA paragraph is a group of sentences about one topic. Most paragraphs include a sentence that states the main idea of the paragraph. This sentence is called the topic sentence. Paragraphs often begin with topic sentences, but topic sentences can occur anywhere in a paragraph.\n\nA topic sentence should introduce the main idea that the paragraph will discuss or examine.", + "text": "Writing a Topic Sentence\n\nA paragraph is a group of sentences about one topic. Most paragraphs include a sentence that states the main idea of the paragraph. This sentence is called the topic sentence. Paragraphs often begin with topic sentences, but topic sentences can occur anywhere in a paragraph.\n\nA topic sentence should introduce the main idea that the paragraph will discuss or examine.\n\nBelow are some examples of strong and weak topic sentences.\n\nStrong Topic Sentences\n\nOne reason that Singaporeans are happy is that the government provides the basic necessities, such as jobs and housing.\n\nPeople in Mexico socialize a lot, and this may contribute to their happiness.\n\nWeak Topic Sentences\n\nSingaporeans are happy.\n\n(If the paragraph is about the ways that the government improves people's happiness, this idea should be included in the topic sentence.)\n\nPeople in Mexico socialize a lot.\n\n(if the paragraph is about how socializing contributes to people's happiness in Mexico, this idea should be included in the topic sentence.)", + "html": "

Writing a Topic Sentence

A paragraph is a group of sentences about one topic. Most paragraphs include a sentence that states the main idea of the paragraph. This sentence is called the topic sentence. Paragraphs often begin with topic sentences, but topic sentences can occur anywhere in a paragraph.

A topic sentence should introduce the main idea that the paragraph will discuss or examine.

Below are some examples of strong and weak topic sentences.

Strong Topic Sentences

One reason that Singaporeans are happy is that the government provides the basic necessities, such as jobs and housing.
People in Mexico socialize a lot, and this may contribute to their happiness.

Weak Topic Sentences

Singaporeans are happy.

(If the paragraph is about the ways that the government improves people's happiness, this idea should be included in the topic sentence.)

People in Mexico socialize a lot.

(if the paragraph is about how socializing contributes to people's happiness in Mexico, this idea should be included in the topic sentence.)

", + "id": "9867613b-6575-432c-8246-3717bafa6860", + "verified": true, + "standalone": false, + "exercise": { + "question": "

Identify the topic sentence in each paragraph. One of the topic sentences is stronger than the others.

  1. In Mexico, family is important. Family members provide support to each other during difficult times. Grandmothers take care of grandchildren so the children's mothers can go to work and earn money. When they grow up, children take care of their parents. People in Mexico are generally happy as long as family members are close.
  2. Studies have shown that laughter may be an important factor for our happiness, and people who laugh a lot are happier. People who laugh more tend to have higher levels of self-esteem. They also tend to be healthier. Laughter is so important for our general well-being that some people go to “laugh therapy” where they laugh with groups of people.
  3. We spend most of our daily lives at work. Our work can increase our happiness. In many countries, a lot of people choose their job based on how much it pays, or on what other people think about that job. But in Denmark, one of the world's happiest countries, most people take jobs that interest them. That gives them a better chance to feel good about the work that they do.

Rewrite the two topic sentences that are weak.

", + "segments": [ + { + "html": "

Analyzing Topic Sentences in Paragraphs

Let's examine each paragraph to identify the topic sentences and evaluate their strength. We'll then work on improving the weaker ones.

", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [ + { + "targets": ["question"], + "phrases": ["Identify the topic sentence in each paragraph"] + } + ], + "insertHTML": [] + }, + { + "html": "

Paragraph 1

Topic sentence: 'In Mexico, family is important.'

  • This is a weak topic sentence because:
  • It doesn't fully introduce the main idea of the paragraph
  • It doesn't connect family importance to happiness, which is the paragraph's focus

A stronger version could be: 'In Mexico, the importance of family contributes significantly to people's happiness.'

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": ["question"], + "phrases": ["In Mexico, family is important.", "People in Mexico are generally happy as long as family members are close."] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-1", + "position": "replace", + "html": "In Mexico, the importance of family contributes significantly to people's happiness." + } + ] + }, + { + "html": "

Paragraph 2

Topic sentence: 'Studies have shown that laughter may be an important factor for our happiness, and people who laugh a lot are happier.'

  • This is a strong topic sentence because:
  • It clearly states the main idea of the paragraph
  • It introduces the connection between laughter and happiness
  • It sets up the supporting details that follow
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": ["question"], + "phrases": ["Studies have shown that laughter may be an important factor for our happiness, and people who laugh a lot are happier."] + } + ], + "insertHTML": [] + }, + { + "html": "

Paragraph 3

Topic sentence: 'We spend most of our daily lives at work.'

  • This is a weak topic sentence because:
  • It doesn't introduce the main idea of job satisfaction and happiness
  • It doesn't mention Denmark or contrast it with other countries

A stronger version could be: 'In Denmark, one of the world's happiest countries, people prioritize job satisfaction over salary, contributing to their overall happiness.'

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": ["question"], + "phrases": ["We spend most of our daily lives at work.", "In Denmark, one of the world's happiest countries, most people take jobs that interest them."] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-2", + "position": "replace", + "html": "In Denmark, one of the world's happiest countries, people prioritize job satisfaction over salary, contributing to their overall happiness." + } + ] + }, + { + "html": "

The Importance of Strong Topic Sentences

Crafting effective topic sentences is crucial for clear and organized writing:

  • They provide a roadmap for the reader, introducing the main idea of the paragraph
  • They help maintain focus and coherence within the paragraph
  • They make the writer's argument or point clearer and more persuasive
  • They improve the overall flow and readability of the text

By mastering the art of writing strong topic sentences, you can significantly enhance the quality and effectiveness of your writing.

", + "wordDelay": 200, + "holdDelay": 10000, + "highlight": [ + { + "targets": ["additional"], + "phrases": ["A topic sentence should introduce the main idea that the paragraph will discuss or examine."] + } + ], + "insertHTML": [] + }, + { + "html": "

Applying the Topic Sentence Tip

The provided tip offers valuable guidance for writing effective topic sentences:

  • It emphasizes that a topic sentence should state the main idea of the paragraph
  • It notes that topic sentences often (but not always) appear at the beginning of paragraphs
  • It provides examples of strong and weak topic sentences, illustrating how to improve weak ones
  • It demonstrates how to connect the topic sentence to the paragraph's focus (e.g., relating socialization to happiness in the Mexico example)

By applying these principles, you can create more coherent and focused paragraphs, enhancing the overall quality of your writing.

", + "wordDelay": 200, + "holdDelay": 10000, + "highlight": [ + { + "targets": ["additional"], + "phrases": ["A paragraph is a group of sentences about one topic", "Most paragraphs include a sentence that states the main idea of the paragraph", "This sentence is called the topic sentence"] + } + ], + "insertHTML": [] + } + ] + } + } + ] + }, + { + "page": 18, + "tips": [ + { + "category": "CT Focus", + "embedding": "Use context to help you guess meaning.", + "text": "Use context to help you guess meaning. For example, does passionate mean a good feeling or a bad feeling?", + "html": "

Use context to help you guess meaning. For example, does passionate mean a good feeling or a bad feeling?

", + "id": "a680672e-4a62-4989-a130-d0611e53cbc1", + "standalone": false, + "verified": true, + "exercise": { + "question": "

The paragraphs below are on the topic of a happy life.

Which is the first draft?Which is the revision?", + "additional": "

A

I think I'm generally happy because I like most things about my life. I have a great job. I do work that I feel passionate about, and I like my coworkers. My family and friends are very supportive. Whenever I have problems, I know that my family and friends will help me. Also, my friends make me laugh a lot. In addition, I'm healthy. I don't have any illnesses, and I play fun sports such as soccer and basketball.

B

I think I'm generally happy. I have a great job. I do work that I feel passionate about, and I like my coworkers. I don't make a lot of money, so sometimes I have to do extra work on the weekends. I want to ask for a raise at work. My family and friends are very supportive. Whenever I have problems, I know that my family and friends will help me. Also, my friends make me laugh a lot. In addition, I'm healthy.

", + "segments": [ + { + "html": "

Analyzing Writing Drafts

Let's examine two paragraphs about a happy life and determine which is the first draft and which is the revision.

", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [] + }, + { + "html": "

Paragraph A

Key points:

  • Generally happy
  • Great job, passionate work
  • Supportive family and friends
  • Good health
  • Plays sports

This paragraph is well-structured and focuses solely on positive aspects.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [] + }, + { + "html": "

Paragraph B

Key points:

  • Generally happy
  • Great job, passionate work
  • Mentions financial struggles
  • Supportive family and friends
  • Good health

This paragraph includes both positive and negative aspects, and seems less polished.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [] + }, + { + "html": "

Comparing the Paragraphs

Differences:

  • Paragraph B mentions financial struggles, while A does not
  • Paragraph A includes sports as a source of happiness
  • Paragraph A is more focused and cohesive
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "all" + ], + "phrases": [ + "I don't make a lot of money", + "I play fun sports such as soccer and basketball." + ] + } + ] + }, + { + "html": "

Identifying First Draft and Revision

First Draft: Paragraph B

Reasons:

  • Includes both positive and negative aspects
  • Less focused structure
  • Contains extraneous information (weekend work, wanting a raise)

Revision: Paragraph A

Reasons:

  • More focused on the main topic (happiness)
  • Better structured and cohesive
  • Removed negative aspects for a more positive tone
  • Added specific details (sports) to support the claim of being healthy
", + "wordDelay": 200, + "holdDelay": 10000, + "highlight": [ + { + "targets": [ + "all" + ], + "phrases": [ + "I think I'm generally happy because I like most things about my life.", + "I think I'm generally happy." + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "draft", + "html": "B", + "position": "replace" + }, + { + "target": "question", + "targetId": "revision", + "html": "A", + "position": "replace" + } + ] + }, + { + "html": "

Understanding 'Passionate'

In both paragraphs, the word 'passionate' is used in the context of work. Based on the overall positive tone, we can deduce that 'passionate' here means a good feeling - enthusiasm or b positive emotion towards one's work.

", + "wordDelay": 200, + "holdDelay": 7000, + "highlight": [ + { + "targets": [ + "all" + ], + "phrases": [ + "I do work that I feel passionate about" + ] + } + ] + }, + { + "html": "

Key Takeaway

When analyzing drafts:

  • Look for improvements in focus and structure
  • Notice changes in tone and content
  • Observe how details are added or removed to strengthen the main idea

Remember to use context to help you guess the meaning of unfamiliar words. This skill is crucial for understanding nuances in writing and for improving your own revision process.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [] + } + ] + } + } + ] + } + ] + }, + { + "unit": 2, + "title": "Big Ideas", + "pages": [ + { + "page": 24, + "tips": [ + { + "category": "Word Link", + "embedding": "The suffix -tion can turn some verbs into nouns.", + "text": "The suffix -tion can turn some verbs into nouns, e.g., prevent / prevention, define / definition, act / action, create / creation, contribute / contribution.", + "html": "

The suffix -tion can turn some verbs into nouns, e.g. :

  • prevent / prevention
  • define / definition
  • act / action
  • create / creation
  • contribute / contribution
", + "id": "4445562b-5ab9-4284-b32e-4b41407401aa", + "standalone": true, + "verified": true + } + ] + }, + { + "page": 28, + "tips": [ + { + "category": "Reading Skill", + "embedding": "Identifying Supporting Ideas\n\nSupporting ideas tell more about the main idea. They can do the following:\n\ndescribe give reasons give examples", + "text": "Identifying Supporting Ideas\n\nSupporting ideas tell more about the main idea. They can do the following:\n\ndescribe give reasons give examples\n\nLook at the paragraph from the reading. What does each colored sentence do?\n\nWhen William went home and started building his windmill, a lot of people laughed at him, including his mother. They didn't think he could do it, but William was confident.He saw the Photo of the windmill in the book. That meant someone else was able to build it, so he knew he could build it, too. William was also creative. He didn't have the parts and equipment that he saw in the book's illustrations, and he couldn't buy them. So he looked for parts in junkyards. He explains, 'I found a tractor fan, shock absorber, [and] PVC pipes. Using a bicycle frame ... , I built my machine.'\n\nThe main idea of the paragraph is that William was confident and creative in building his windmill. The green sentences give reasons why William was confident. The blue sentences give examples of how William was creative. And the purple sentences describe how he did it.", + "html": "

Identifying Supporting Ideas

Supporting ideas tell more about the main idea. They can do the following:

  • describe
  • give reasons
  • give examples
", + "id": "693c8547-d2fa-45e1-803a-ff979c893b04", + "verified": true, + "standalone": false, + "exercise": { + "question": "

Look at the paragraph from the reading. What does each colored sentence do?

When William went home and started building his windmill, a lot of people laughed at him, including his mother. They didn't think he could do it, but William was confident. He saw the Photo of the windmill in the book. That meant someone else was able to build it, so he knew he could build it, too. William was also creative. He didn't have the parts and equipment that he saw in the book's illustrations, and he couldn't buy them. So he looked for parts in junkyards. He explains, 'I found a tractor fan, shock absorber, [and] PVC pipes. Using a bicycle frame ... , I built my machine.'

", + "segments": [ + { + "html": "

Analyzing the Paragraph

Let's examine William's windmill project, identifying the main idea and its supporting elements.

", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [] + }, + { + "html": "

Identifying the Main Idea

To find the main idea, let's look at key points in the paragraph:

  • William started building a windmill
  • People laughed and didn't think he could do it
  • William was confident
  • William was creative
  • He found ways to build it despite challenges

Which of these seems to be the central focus? The paragraph emphasizes William's attitude and approach. Therefore, we can conclude that the main idea is: William was confident and creative in building his windmill.

", + "wordDelay": 200, + "holdDelay": 12000, + "highlight": [ + { + "targets": [ + "question" + ], + "phrases": [ + "When William went home and started building his windmill", + "a lot of people laughed at him", + "William was confident", + "William was also creative", + "He didn't have the parts and equipment", + "So he looked for parts in junkyards" + ] + } + ] + }, + { + "html": "

Supporting Ideas: Confidence

The green sentences give reasons why William was confident:

'He saw the Photo of the windmill in the book. That meant someone else was able to build it, so he knew he could build it, too.'

This shows William's confidence came from seeing proof that building a windmill was possible.

", + "wordDelay": 200, + "holdDelay": 10000, + "highlight": [ + { + "targets": [ + "question" + ], + "phrases": [ + "He saw the Photo of the windmill in the book. That meant someone else was able to build it, so he knew he could build it, too." + ] + } + ] + }, + { + "html": "

Supporting Ideas: Creativity

The blue sentences give examples of how William was creative:

'He didn't have the parts and equipment that he saw in the book's illustrations, and he couldn't buy them. So he looked for parts in junkyards.'

This demonstrates William's creative problem-solving in finding alternative ways to obtain parts.

", + "wordDelay": 200, + "holdDelay": 10000, + "highlight": [ + { + "targets": [ + "question" + ], + "phrases": [ + "He didn't have the parts and equipment that he saw in the book's illustrations, and he couldn't buy them. So he looked for parts in junkyards." + ] + } + ] + }, + { + "html": "

Supporting Ideas: Description

The purple sentences describe how William built his windmill:

'He explains, \"I found a tractor fan, shock absorber, [and] PVC pipes.Using a bicycle frame ... , I built my machine.\"'

This provides specific details about the materials William used, further illustrating his creativity and resourcefulness.

", + "wordDelay": 200, + "holdDelay": 10000, + "highlight": [ + { + "targets": [ + "question" + ], + "phrases": [ + "He explains, 'I found a tractor fan, shock absorber, [and] PVC pipes. Using a bicycle frame ... , I built my machine.'" + ] + } + ] + }, + { + "html": "

Conclusion

Understanding how supporting ideas relate to the main idea helps improve reading comprehension. In this paragraph, we see how reasons, examples, and descriptions all work together to support the main idea of William's confidence and creativity.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [] + } + ] + } + } + ] + }, + { + "page": 30, + "tips": [ + { + "category": "Word Link", + "embedding": "The suffix -able can turn some verbs into adjectives.", + "text": "The suffix -able can turn some verbs into adjectives, e.g., renew / renewable, detect / detectable, afford / affordable, prevent / preventable.", + "html": "

The suffix -able can turn some verbs into adjectives, e.g. :

  • renew / renewable
  • detect / detectable
  • afford / affordable
  • prevent / preventable
", + "id": "1a15adf5-50cf-44e6-8a42-80144bfb70eb", + "standalone": true, + "verified": true + } + ] + }, + { + "page": 31, + "tips": [ + { + "category": "Strategy", + "embedding": "Use clues in titles, headings, pictures, and captions to get a quick sense of what you will read. As you read in more detail, check whether your predictions were correct.", + "text": "Use clues in titles, headings, pictures, and captions to get a quick sense of what you will read. As you read in more detail, check whether your predictions were correct.", + "html": "

Use clues in titles, headings, pictures, and captions to get a quick sense of what you will read. As you read in more detail, check whether your predictions were correct.

", + "id": "7587f165-a179-490c-8866-483b0cbf3387", + "verified": true, + "standalone": false, + "exercise": { + "question": "

Predicting

Read the passages below. What do you think is the purpose of each of the items described?

", + "additional": "

Infant Warmer

Around 19 million low-birthweight babies are born every year in developing countries. These babies weigh less than 5.5 pounds (2.5 kilograms) when they're born. Low-birthweight babies are often unable to keep their body temperatures1 warm enough. Many get too cold and die. The Embrace Infant Warmer helps keep these babies warm. Developer Jane Chen says, 'Over the next five years, we hope to save the lives of almost a million babies.'

Water Container

In poor areas, people often have to walk several miles to get clean water. Usually, women and children have to carry heavy containers of water home every day, and it is difficult work. The Q Drum holds 13 gallons (about 50 liters) in a rolling container. With this innovation, people can easily roll the water on the ground.

Portable Clay Cooler

The pot-in-pot system is a good way to store food without using electricity. The user puts wet sand between two pots, one fitting inside the other. The water evaporates2 and keeps food cool. That helps food stay fresh longer. For example, tomatoes can last for weeks instead of just days. That way, people can buy more fresh fruits and vegetables at the market, and farmers can make more money.

Health Detector

Scientist Hayat Sindi's device is the size of a postage stamp, and it costs just a penny. But it could save millions of lives. In many parts of the world, doctors and nurses work with no electricity or clean water. They have to send health tests to labs3 and wait weeks for results. But this little piece of paper could change that. It contains tiny holes that are filled with chemicals. These chemicals can detect health problems. A person places a single drop of blood on the paper. The chemicals in the paper change because of the blood and indicate* whether or not the person has an illness.

Solar Wi-Fi Light

The StarSight system is an innovation that can benefit millions of people around the world. It absorbs solar energy during the day to power streetlamps at night. The solar panels also provide wireless Internet access. The result: renewable electricity for better street lighting and faster communication. This can be extremely valuable in places where it is difficult to get electricity.

1 Your body temperature is how hot or how cold your body is.2 When a liquid evaporates, it changes to a gas as its temperature increases.3 Labs are laboratories, places where scientific research is done.
", + "segments": [ + { + "html": "

Predicting Item Purposes

Let's analyze each item based on the information provided in the passage. We'll use the titles, subheads, and opening paragraphs to make educated guesses about their purposes.

", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [] + }, + { + "html": "

1. Infant Warmer

Purpose: To keep low-birthweight babies warm in developing countries.

Key information:

  • Designed for babies weighing less than 5.5 pounds
  • Helps prevent deaths due to low body temperature
  • Aims to save nearly a million babies over five years
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "all" + ], + "phrases": [ + "Low-birthweight babies are often unable to keep their body temperatures* warm enough.", + "The Embrace Infant Warmer helps keep these babies warm." + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "purpose-1", + "position": "replace", + "html": "To keep low-birthweight babies warm in developing countries." + } + ] + }, + { + "html": "

2. Water Container

Purpose: To make transporting water easier in areas with limited access to clean water.

Key information:

  • Holds 13 gallons (50 liters) of water
  • Can be rolled instead of carried
  • Reduces the physical burden on women and children
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "all" + ], + "phrases": [ + "people often have to walk several miles to get clean water", + "The Q Drum holds 13 gallons (about 50 liters) in a rolling container." + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "purpose-2", + "position": "replace", + "html": "To make transporting water easier in areas with limited access to clean water." + } + ] + }, + { + "html": "

3. Portable Clay Cooler

Purpose: To preserve food without electricity in areas with limited resources.

Key information:

  • Uses evaporation to keep food cool
  • Extends the shelf life of fresh produce
  • Benefits both consumers and farmers
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "all" + ], + "phrases": [ + "The pot-in-pot system is a good way to store food without using electricity.", + "That helps food stay fresh longer." + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "purpose-3", + "position": "replace", + "html": "To preserve food without electricity in areas with limited resources." + } + ] + }, + { + "html": "

4. Health Detector

Purpose: To provide quick, affordable health diagnostics in resource-limited areas.

Key information:

  • Size of a postage stamp and costs only a penny
  • Uses chemical reactions to detect health problems
  • Provides rapid results without need for lab facilities
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "all" + ], + "phrases": [ + "Scientist Hayat Sindi's device is the size of a postage stamp, and it costs just a penny.", + "These chemicals can detect health problems." + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "purpose-4", + "position": "replace", + "html": "To provide quick, affordable health diagnostics in resource-limited areas." + } + ] + }, + { + "html": "

5. Solar Wi-Fi Light

Purpose: To provide sustainable lighting and internet access in areas with limited electricity.

Key information:

  • Uses solar energy to power streetlamps
  • Provides wireless internet access
  • Offers renewable electricity solution
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "all" + ], + "phrases": [ + "It absorbs solar energy during the day to power streetlamps at night.", + "The solar panels also provide wireless Internet access." + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "purpose-5", + "position": "replace", + "html": "To provide sustainable lighting and internet access in areas with limited electricity." + } + ] + }, + { + "html": "

Key Takeaway

By using clues from titles, headings, and opening paragraphs, we can quickly grasp the purpose of each innovation. This approach helps us predict and understand the main ideas before diving into detailed reading. Remember, using these contextual clues not only improves comprehension but also makes reading more efficient and engaging.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [] + } + ] + } + } + ] + }, + { + "page": 34, + "tips": [ + { + "category": "CT Focus", + "embedding": "To rank items in order, first decide on your criteria for ranking.", + "text": "To rank items in order, first decide on your criteria for ranking, e.g., how many people you think will be able to afford the item, or how many lives might be saved or improved.", + "html": "

To rank items in order, first decide on your criteria for ranking, e.g., how many people you think will be able to afford the item, or how many lives might be saved or improved.

", + "id": "999dc9ac-13ba-41e8-9a75-bff1c124710e", + "standalone": false, + "verified": true, + "exercise": { + "question": "

Critical Thinking: Ranking and Justifying

Which of the innovations do you think is the most important? Which is the least important? Rank them 1-5, with 1 as the most important.

", + "additional": "

Infant Warmer

Around 19 million low-birthweight babies are born every year in developing countries. These babies weigh less than 5.5 pounds (2.5 kilograms) when they're born. Low-birthweight babies are often unable to keep their body temperatures1 warm enough. Many get too cold and die. The Embrace Infant Warmer helps keep these babies warm. Developer Jane Chen says, 'Over the next five years, we hope to save the lives of almost a million babies.'

Water Container

In poor areas, people often have to walk several miles to get clean water. Usually, women and children have to carry heavy containers of water home every day, and it is difficult work. The Q Drum holds 13 gallons (about 50 liters) in a rolling container. With this innovation, people can easily roll the water on the ground.

Portable Clay Cooler

The pot-in-pot system is a good way to store food without using electricity. The user puts wet sand between two pots, one fitting inside the other. The water evaporates2 and keeps food cool. That helps food stay fresh longer. For example, tomatoes can last for weeks instead of just days. That way, people can buy more fresh fruits and vegetables at the market, and farmers can make more money.

Health Detector

Scientist Hayat Sindi's device is the size of a postage stamp, and it costs just a penny. But it could save millions of lives. In many parts of the world, doctors and nurses work with no electricity or clean water. They have to send health tests to labs3 and wait weeks for results. But this little piece of paper could change that. It contains tiny holes that are filled with chemicals. These chemicals can detect health problems. A person places a single drop of blood on the paper. The chemicals in the paper change because of the blood and indicate* whether or not the person has an illness.

Solar Wi-Fi Light

The StarSight system is an innovation that can benefit millions of people around the world. It absorbs solar energy during the day to power streetlamps at night. The solar panels also provide wireless Internet access. The result: renewable electricity for better street lighting and faster communication. This can be extremely valuable in places where it is difficult to get electricity.

1 Your body temperature is how hot or how cold your body is.2 When a liquid evaporates, it changes to a gas as its temperature increases.3 Labs are laboratories, places where scientific research is done.
", + "segments": [ + { + "html": "

Ranking Innovations

Let's analyze each innovation and consider potential criteria for ranking their importance. We'll focus on the impact on human lives and the scale of the problem each innovation addresses.

", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [] + }, + { + "html": "

1. Infant Warmer

Impact: Potentially saves lives of newborns

Scale: 19 million low-birthweight babies born annually

Goal: Save almost a million babies over five years

Potential Rank: High importance due to direct life-saving impact on vulnerable population
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "all" + ], + "phrases": [ + "19 million low-birthweight babies are born every year", + "we hope to save the lives of almost a million babies" + ] + } + ] + }, + { + "html": "

2. Water Container

Impact: Eases the burden of water transportation

Scale: Affects daily life in poor areas

Benefit: Improves quality of life, especially for women and children

Potential Rank: Medium to high importance due to widespread daily impact
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "all" + ], + "phrases": [ + "people often have to walk several miles to get clean water", + "people can easily roll the water on the ground" + ] + } + ] + }, + { + "html": "

3. Portable Clay Cooler

Impact: Preserves food without electricity

Scale: Benefits both consumers and farmers

Advantage: Extends food freshness, potentially improving nutrition and income

Potential Rank: Medium importance due to economic and health benefits
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "all" + ], + "phrases": [ + "store food without using electricity", + "That helps food stay fresh longer", + "farmers can make more money" + ] + } + ] + }, + { + "html": "

4. Health Detector

Impact: Provides quick, affordable health diagnostics

Scale: Could save millions of lives

Advantage: Works in areas without electricity or clean water

Potential Rank: High importance due to low cost and potential to save many lives
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "all" + ], + "phrases": [ + "it could save millions of lives", + "doctors and nurses work with no electricity or clean water", + "These chemicals can detect health problems" + ] + } + ] + }, + { + "html": "

5. Solar Wi-Fi Light

Impact: Provides lighting and internet access

Scale: Can benefit millions of people

Advantage: Uses renewable energy, improves safety and communication

Potential Rank: Medium to high importance due to broad impact on quality of life
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "all" + ], + "phrases": [ + "can benefit millions of people around the world", + "renewable electricity for better street lighting and faster communication" + ] + } + ] + }, + { + "html": "

Ranking Considerations

When ranking these innovations, consider:

  • Number of lives potentially saved or improved
  • Immediacy of the impact
  • Cost-effectiveness of the solution
  • Scope of the problem addressed
  • Long-term sustainability of the innovation
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [] + }, + { + "html": "

Ranking the Innovations

Based on the analysis and criteria discussed, here is a suggested ranking of the innovations from most important to least important:

  1. Infant Warmer: Due to its potential to save the lives of nearly a million newborns in developing countries.

  2. Health Detector: For providing quick and affordable health diagnostics in areas lacking basic medical infrastructure.

  3. Water Container: As it greatly eases the daily burden of water transportation, especially for women and children.

  4. Solar Wi-Fi Light: For its broad impact on providing renewable energy, lighting, and internet access.

  5. Portable Clay Cooler: Due to its economic and health benefits, though its impact is less immediate compared to other innovations.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [], + "insertHTML": [ + { + "target": "question", + "targetId": "rank-1", + "position": "replace", + "html": "
1
" + }, + { + "target": "question", + "targetId": "rank-5", + "position": "replace", + "html": "
2
" + }, + { + "target": "question", + "targetId": "rank-4", + "position": "replace", + "html": "
3
" + }, + { + "target": "question", + "targetId": "rank-3", + "position": "replace", + "html": "
4
" + }, + { + "target": "question", + "targetId": "rank-2", + "position": "replace", + "html": "
5
" + } + ] + }, + { + "html": "

Final Thoughts

Remember, ranking these innovations involves personal judgment. The criteria you choose will significantly influence your ranking. Consider the immediate impact on saving lives, the number of people affected, and the long-term benefits to communities. Discuss your reasoning with others to gain different perspectives and potentially refine your ranking.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [] + } + ] + } + } + ] + }, + { + "page": 35, + "tips": [ + { + "category": "Language for Writing", + "embedding": "Review of the Simple Past\n\nWe use the simple past tense to talk about events that began and ended in the past.\n\nTo form the simple past tense of be:\n\n* use was or were to form affirmative statements.\n* use was not / wasn't or were not / weren't to form negative statements.\n\nTo form the simple past tense with other verbs:\n* add -ed to the end of most verbs to form affirmative statements.\n* use did not / didn't with the base form of a main verb to form negative statements.\n", + "text": "Review of the Simple Past\n\nWe use the simple past tense to talk about events that began and ended in the past.\n\nAccording to historians, a man named Ts'ai Lun invented paper in China around AD 105.\nBefore that time, people didn't have inexpensive material to write on.\nPeople wrote on things such as silk and clay, which were expensive and inconvenient.\n\nTo form the simple past tense of be:\n\n* use was or were to form affirmative statements.\n* use was not / wasn't or were not / weren't to form negative statements.\n\nTo form the simple past tense with other verbs:\n* add -ed to the end of most verbs to form affirmative statements.\n* use did not / didn't with the base form of a main verb to form negative statements.\n\nSome verbs have irregular past tense forms in affirmative statements:\ngo-went have-had make-made take-took do-did build-built", + "html": "

Review of the Simple Past

We use the simple past tense to talk about events that began and ended in the past.

According to historians, a man named Ts'ai Lun invented paper in China around AD 105.
Before that time, people didn't have inexpensive material to write on.
People wrote on things such as silk and clay, which were expensive and inconvenient.

To form the simple past tense of be:

  • use was or were to form affirmative statements.
  • use was not / wasn't or were not / weren't to form negative statements.

To form the simple past tense with other verbs:

  • add -ed to the end of most verbs to form affirmative statements.
  • use did not / didn't with the base form of a main verb to form negative statements.

Some verbs have irregular past tense forms in affirmative statements:

go-wenthave-hadmake-madetake-tookdo-didbuild-built", + "id": "4b6d0023-010b-47eb-9f00-3beb70092918", + "verified": true, + "standalone": false, + "exercise": { + "question": "

Use the simple past tense of the verbs in parentheses to complete the sentences.

  1. Most people in William Kamkwamba’s village __________ (not / have) electricity.
  2. William __________ (go) to the library.
  3. He __________ (find) a book there called Using Energy.
  4. William __________ (use) the information in the book and he __________ (build) a windmill.
  5. When he __________ (start), people __________ (not / believe) that he could do it.
  6. William __________ (not / be) worried. He __________ (be) confident.
  7. After a while, he __________ (be) successful. His windmill __________ (make) electricity.
", + "segments": [ + { + "html": "

Let's approach this exercise step by step, focusing on the use of simple past tense:

", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [ + { + "targets": [ + "question" + ], + "phrases": [ + "Use the simple past tense" + ] + } + ], + "insertHTML": [] + }, + { + "html": "
  1. Most people in William Kamkwamba's village did not have electricity.
    • This is a negative statement in the simple past, using 'did not' with the base form of 'have'.
", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [ + { + "targets": [ + "question" + ], + "phrases": [ + "(not / have)", + "did not have" + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "answer-1", + "position": "replace", + "html": "did not have" + } + ] + }, + { + "html": "
  1. William went to the library.
    • 'Go' is an irregular verb. Its simple past form is 'went'.
", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [ + { + "targets": [ + "question" + ], + "phrases": [ + "(go)", + "went" + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "answer-2", + "position": "replace", + "html": "went" + } + ] + }, + { + "html": "
  1. He found a book there called Using Energy.
    • 'Find' is another irregular verb. Its simple past form is 'found'.
", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [ + { + "targets": [ + "question" + ], + "phrases": [ + "(find)", + "found" + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "answer-3", + "position": "replace", + "html": "found" + } + ] + }, + { + "html": "
  1. William used the information in the book and he built a windmill.
    • 'Use' is a regular verb, so we add '-ed' for the simple past.
    • 'Build' is irregular; its simple past form is 'built'.
", + "wordDelay": 200, + "holdDelay": 6000, + "highlight": [ + { + "targets": [ + "question" + ], + "phrases": [ + "(use)", + "(build)", + "used", + "built" + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "answer-4", + "position": "replace", + "html": "used" + }, + { + "target": "question", + "targetId": "answer-5", + "position": "replace", + "html": "built" + } + ] + }, + { + "html": "
  1. When he started, people did not believe that he could do it.
    • 'Start' is a regular verb, so we add '-ed'.
    • For 'believe', we form a negative with 'did not' and the base form of the verb.
", + "wordDelay": 200, + "holdDelay": 6000, + "highlight": [ + { + "targets": [ + "question" + ], + "phrases": [ + "(start)", + "(not / believe)", + "started", + "did not believe" + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "answer-6", + "position": "replace", + "html": "started" + }, + { + "target": "question", + "targetId": "answer-7", + "position": "replace", + "html": "did not believe" + } + ] + }, + { + "html": "
  1. William was not worried. He was confident.
    • Both of these use the past tense of 'be'. For negative, we use 'was not', and for affirmative, we use 'was'.
", + "wordDelay": 200, + "holdDelay": 6000, + "highlight": [ + { + "targets": [ + "question" + ], + "phrases": [ + "(not / be)", + "(be)", + "was not", + "was" + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "answer-8", + "position": "replace", + "html": "was not" + }, + { + "target": "question", + "targetId": "answer-9", + "position": "replace", + "html": "was" + } + ] + }, + { + "html": "
  1. After a while, he was successful. His windmill made electricity.
    • Again, we use 'was' for the past tense of 'be'.
    • 'Make' is an irregular verb; its simple past form is 'made'.
", + "wordDelay": 200, + "holdDelay": 6000, + "highlight": [ + { + "targets": [ + "question" + ], + "phrases": [ + "(be)", + "(make)", + "was", + "made" + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "answer-10", + "position": "replace", + "html": "was" + }, + { + "target": "question", + "targetId": "answer-11", + "position": "replace", + "html": "made" + } + ] + }, + { + "html": "

Now, let's consider how the tip about the simple past tense helps us solve this exercise:

  • The tip reminds us that the simple past is used for events that began and ended in the past, which applies to all the sentences in our exercise.
  • It provides rules for forming the past tense of 'be' (was/were), which we used in sentences 6 and 7.
  • The tip explains how to form negatives with 'did not / didn't', which we applied in sentences 1 and 5.
  • It mentions adding '-ed' to regular verbs, which we did for 'use' and 'start'.
  • The tip also notes that some verbs have irregular past forms, and provides examples like 'go-went' and 'make-made', which we used in our answers.
", + "wordDelay": 200, + "holdDelay": 10000, + "highlight": [ + { + "targets": [ + "segment" + ], + "phrases": [ + "simple past tense", + "began and ended in the past", + "was/were", + "did not / didn't", + "-ed", + "irregular past forms", + "go-went", + "make-made" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

By remembering these rules and examples from the tip, we can confidently complete exercises involving the simple past tense, whether dealing with regular verbs, irregular verbs, or the verb 'to be'.

", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [], + "insertHTML": [] + } + ] + } + } + ] + }, + { + "page": 36, + "tips": [ + { + "category": "Writing Skill", + "embedding": "Supporting the Main Idea and Giving Details\n\nGood paragraphs include supporting ideas that give information and details about the main idea. These sentences can give descriptions, reasons, or examples to help the reader clearly understand the main idea.", + "text": "Supporting the Main Idea and Giving Details\n\nGood paragraphs include supporting ideas that give information and details about the main idea. These sentences can give descriptions, reasons, or examples to help the reader clearly understand the main idea.", + "html": "

Supporting the Main Idea and Giving Details

Good paragraphs include supporting ideas that give information and details about the main idea. These sentences can give descriptions, reasons, or examples to help the reader clearly understand the main idea.

", + "id": "a2302e9a-ea46-485a-aba5-83d7efec436e", + "verified": true, + "standalone": false, + "exercise": { + "question": "

Identifying Supporting Ideas

Match each topic sentence with three supporting sentences. Write A or B for each one. Two sentences are extra.

Topic Sentence A: About 900 million people need access to safe drinking water, and a simple invention may be the answer to this problem.

Topic Sentence B: The solar-powered MightyLight is a safe and clean source of lighting that can provide light to millions of people around the world.

", + "additional": "
  • The LifeStraw provides instant clean water, saving lives during disasters.
  • You should drink about eight glasses of water a day.
  • The MightyLight is safer and cleaner than traditional kerosene lamps.
  • Each straw purifies about 160 gallons of water.
  • It's easy to carry, and you can hang it on a wall or place it on a tabletop.
  • Candles don't provide much light.
  • It also lasts longer — its LED technology is good for up to 30 years.
  • Thousands of LifeStraws were donated to Haiti after the 2010 earthquake.
", + "segments": [ + { + "html": "

Let's approach this exercise by analyzing each supporting sentence and determining which topic sentence it best supports:

", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [ + { + "targets": [ + "question" + ], + "phrases": [ + "Identifying Supporting Ideas", + "Match each topic sentence with three supporting sentences" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

First, let's review the two topic sentences:

  • A: Focuses on the LifeStraw as a solution for safe drinking water
  • B: Discusses the MightyLight as a clean lighting source
", + "wordDelay": 200, + "holdDelay": 6000, + "highlight": [ + { + "targets": [ + "question" + ], + "phrases": [ + "Topic Sentence A", + "Topic Sentence B" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

Now, let's analyze each supporting sentence:

  1. 'The LifeStraw provides instant clean water, saving lives during disasters.'
    This clearly supports Topic A about safe drinking water.
", + "wordDelay": 200, + "holdDelay": 6000, + "highlight": [ + { + "targets": [ + "additional" + ], + "phrases": [ + "The LifeStraw provides instant clean water, saving lives during disasters." + ] + } + ], + "insertHTML": [ + { + "target": "additional", + "targetId": "blank-1", + "position": "replace", + "html": "A" + } + ] + }, + { + "html": "
  1. 'You should drink about eight glasses of water a day.'
    This is general advice and doesn't directly support either topic sentence. It's an extra sentence.
", + "wordDelay": 200, + "holdDelay": 6000, + "highlight": [ + { + "targets": [ + "additional" + ], + "phrases": [ + "You should drink about eight glasses of water a day." + ] + } + ], + "insertHTML": [ + { + "target": "additional", + "targetId": "blank-2", + "position": "replace", + "html": "-" + } + ] + }, + { + "html": "
  1. 'The MightyLight is safer and cleaner than traditional kerosene lamps.'
    This directly supports Topic B about the MightyLight.
", + "wordDelay": 200, + "holdDelay": 6000, + "highlight": [ + { + "targets": [ + "additional" + ], + "phrases": [ + "The MightyLight is safer and cleaner than traditional kerosene lamps." + ] + } + ], + "insertHTML": [ + { + "target": "additional", + "targetId": "blank-3", + "position": "replace", + "html": "B" + } + ] + }, + { + "html": "
  1. 'Each straw purifies about 160 gallons of water.'
    This provides details about the LifeStraw, supporting Topic A.
", + "wordDelay": 200, + "holdDelay": 6000, + "highlight": [ + { + "targets": [ + "additional" + ], + "phrases": [ + "Each straw purifies about 160 gallons of water." + ] + } + ], + "insertHTML": [ + { + "target": "additional", + "targetId": "blank-4", + "position": "replace", + "html": "A" + } + ] + }, + { + "html": "
  1. 'It's easy to carry, and you can hang it on a wall or place it on a tabletop.'
    This describes the MightyLight's portability, supporting Topic B.
", + "wordDelay": 200, + "holdDelay": 6000, + "highlight": [ + { + "targets": [ + "additional" + ], + "phrases": [ + "It's easy to carry, and you can hang it on a wall or place it on a tabletop." + ] + } + ], + "insertHTML": [ + { + "target": "additional", + "targetId": "blank-5", + "position": "replace", + "html": "B" + } + ] + }, + { + "html": "
  1. 'Candles don't provide much light.'
    While related to lighting, this doesn't directly support either topic sentence. It's the second extra sentence.
", + "wordDelay": 200, + "holdDelay": 6000, + "highlight": [ + { + "targets": [ + "additional" + ], + "phrases": [ + "Candles don't provide much light." + ] + } + ], + "insertHTML": [ + { + "target": "additional", + "targetId": "blank-6", + "position": "replace", + "html": "-" + } + ] + }, + { + "html": "
  1. 'It also lasts longer — its LED technology is good for up to 30 years.'
    This provides information about the MightyLight's longevity, supporting Topic B.
", + "wordDelay": 200, + "holdDelay": 6000, + "highlight": [ + { + "targets": [ + "additional" + ], + "phrases": [ + "It also lasts longer — its LED technology is good for up to 30 years." + ] + } + ], + "insertHTML": [ + { + "target": "additional", + "targetId": "blank-7", + "position": "replace", + "html": "B" + } + ] + }, + { + "html": "
  1. 'Thousands of LifeStraws were donated to Haiti after the 2010 earthquake.'
    This provides an example of the LifeStraw's use in a disaster situation, supporting Topic A.
", + "wordDelay": 200, + "holdDelay": 6000, + "highlight": [ + { + "targets": [ + "additional" + ], + "phrases": [ + "Thousands of LifeStraws were donated to Haiti after the 2010 earthquake." + ] + } + ], + "insertHTML": [ + { + "target": "additional", + "targetId": "blank-8", + "position": "replace", + "html": "A" + } + ] + }, + { + "html": "

Now, let's consider how the tip about supporting ideas and giving details helps us solve this exercise:

", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [], + "insertHTML": [] + }, + { + "html": "
  • The tip reminds us that good paragraphs include supporting ideas that provide information and details about the main idea.
  • It mentions that supporting sentences can give descriptions, reasons, or examples to help readers understand the main idea clearly.
  • In this exercise, we identified how each supporting sentence provides specific details, descriptions, or examples that relate to one of the two main ideas (topic sentences).
  • By understanding the relationship between main ideas and supporting details, we were able to match each supporting sentence to its corresponding topic sentence.
  • This process demonstrates how well-structured paragraphs are organized, with each supporting sentence contributing to the development of the main idea.
", + "wordDelay": 200, + "holdDelay": 10000, + "highlight": [ + { + "targets": [ + "additional" + ], + "phrases": [ + "supporting ideas", + "information and details", + "descriptions, reasons, or examples", + "understand the main idea" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

By applying the concepts from the tip, we can better understand how to construct coherent paragraphs and identify the relationship between main ideas and supporting details in various texts.

", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [], + "insertHTML": [] + } + ] + } + } + ] + } + ] + }, + { + "unit": 3, + "title": "Connected Lives", + "pages": [ + { + "page": 44, + "tips": [ + { + "category": "Word Link", + "embedding": "The prefix -inter means between or connected.", + "text": "The prefix -inter means between or connected, e.g., interactive, interchangeable, intercontinental, international, internet, intersection, interview", + "html": "

The prefix -inter means between or connected, e.g., interactive, interchangeable, intercontinental, international, internet, intersection, interview

", + "id": "368442e8-fe58-4ba8-9964-3e6a49cbe5bd", + "standalone": true, + "verified": true + } + ] + }, + { + "page": 47, + "tips": [ + { + "category": "CT Focus", + "embedding": "You make inferences when you make logical guesses about things a writer does not say directly.", + "text": "You make inferences when you make logical guesses about things a writer does not say directly. This is also called 'reading between the lines.'", + "html": "

You make inferences when you make logical guesses about things a writer does not say directly. This is also called 'reading between the lines.'

", + "id": "27d15859-4260-4750-90a6-6302f71fc845", + "standalone": false, + "verified": true, + "exercise": { + "question": "

Making Inferences

What can you infer from each statement from the reading passage? Determine the best inference.

", + "additional": "
  1. \"When television became the dominant medium in the 1950s, it changed the way families interacted.\"

    • Before the 1950s, a different medium was probably dominant.
    • There were a lot of good television programs in the 1950s.
  2. \"This kind of sharing changes the way we communicate. With the Internet, everyone can have a voice.\"

    • People probably should not share certain things on the Internet.
    • The Internet is a better medium of communication than TV.
  3. \"It's pretty amazing that I have this little box sitting on my desk through which I can talk to any one of a billion people. And yet do any of us really use it for all the potential that's there?\"

    • There are a lot of possible uses of the Internet that most people don't really think about.
    • The Internet is an amazing tool, but most people in the world don't use it very much.
", + "segments": [ + { + "html": "

Making Inferences from Text

Let's analyze each statement and choose the most logical inference based on the information provided.

", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [] + }, + { + "html": "

Statement 1

\"When television became the dominant medium in the 1950s, it changed the way families interacted.\"

Let's analyze this statement:

  • TV became dominant in the 1950s
  • It changed family interactions
  • The word 'became' implies a change in status

Best inference: a. Before the 1950s, a different medium was probably dominant.

This inference logically follows from the idea that TV 'became' dominant, suggesting something else was dominant before.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "additional" + ], + "phrases": [ + "Before the 1950s, a different medium was probably dominant." + ] + } + ] + }, + { + "html": "

Statement 2

\"This kind of sharing changes the way we communicate. With the Internet, everyone can have a voice.\"

Key points to consider:

  • Sharing changes communication
  • The Internet gives everyone a voice
  • This implies a change in who can communicate

Best inference: b. The Internet is a better medium of communication than TV.

While not explicitly stated, the emphasis on everyone having a voice suggests the Internet offers more communication opportunities than previous media like TV.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "additional" + ], + "phrases": [ + "The Internet is a better medium of communication than TV." + ] + } + ] + }, + { + "html": "

Statement 3

\"It's pretty amazing that I have this little box sitting on my desk through which I can talk to any one of a billion people. And yet do any of us really use it for all the potential that's there?\"

Important elements:

  • The 'little box' likely refers to a computer or smartphone
  • It can connect to a billion people
  • The speaker questions if we use its full potential

Best inference: a. There are a lot of possible uses of the Internet that most people don't really think about.

This inference aligns with the speaker's wonder at the device's capabilities and the question about utilizing its full potential.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "additional" + ], + "phrases": [ + "There are a lot of possible uses of the Internet that most people don't really think about." + ] + } + ] + }, + { + "html": "

Making Inferences: Key Strategies

When making inferences:

  • Look for implied information
  • Consider the context and tone
  • Use your background knowledge
  • Think about cause and effect relationships
  • Look for words that suggest change or comparison
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [] + }, + { + "html": "

The Value of Inference

Making inferences, or 'reading between the lines,' is a crucial skill in comprehension. It allows you to understand more than what's explicitly stated, enriching your understanding of the text. By making logical guesses based on given information, you engage more deeply with the material and often uncover the author's underlying message or intent.

", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [] + } + ] + } + } + ] + }, + { + "page": 48, + "tips": [ + { + "category": "Reading Skill", + "embedding": "Skimming for Gist\n\nSkimming is quickly looking over a passage to get the general idea of what it is about. When we skim, we don't read every word. Instead, we look for important words or chunks (pieces) of information. For example, we look for things such as names, dates, and repeated words.", + "text": "Skimming for Gist\n\nSkimming is quickly looking over a passage to get the general idea of what it is about. When we skim, we don't read every word. Instead, we look for important words or chunks (pieces) of information. For example, we look for things such as names, dates, and repeated words.\n\nWe often skim online news sites to find out the most important news of the day, blogs to choose which posts we want to read, and magazines to decide what we want to read about. But skimming can also help with academic reading. If you skim a passage before you read it carefully, you can get an idea of what the passage is about and how it is organized. This can help you understand the passage more easily when you do read it carefully, because you know what to expect.", + "html": "

Skimming for Gist

Skimming is quickly looking over a passage to get the general idea of what it is about. When we skim, we don't read every word. Instead, we look for important words or chunks (pieces) of information. For example, we look for things such as names, dates, and repeated words.

We often skim online news sites to find out the most important news of the day, blogs to choose which posts we want to read, and magazines to decide what we want to read about. But skimming can also help with academic reading. If you skim a passage before you read it carefully, you can get an idea of what the passage is about and how it is organized. This can help you understand the passage more easily when you do read it carefully, because you know what to expect.

", + "id": "9b084703-96ad-4728-8f04-5c1f13493ffd", + "verified": true, + "standalone": false, + "exercise": { + "question": "

Skim the paragraph below. Read only the darker words. What do you think is the main idea of the paragraph?

For many of us, visiting Facebook, Twitter, or other online social networks has become a regular part of our daily activities. However, we may not have noticed the significant ways that social networks have changed our lives. First of all, they have changed the way we get our news. These days, we often only read the news stories that our friends post online. Second, our relationships have changed. Now, it's easier to keep in touch with new friends and find old friends that we haven't seen for a long time. Third, many of us share thoughts with our online friends that we used to keep private. For example, in an instant, we can tell all our online friends that we think we just failed an exam. Are these changes good or bad? That's for each person to decide. But one thing is certain — as more people join social networks and as new networks continue to appear, we can expect more changes in the future.", + "additional": "

Now read the whole paragraph carefully. Were you correct about the main idea?

For many of us, visiting Facebook, Twitter, or other online social networks has become a regular part of our daily activities. However, we may not have noticed the significant ways that social networks have changed our lives. First of all, they have changed the way we get our news. These days, we often only read the news stories that our friends post online. Second, our relationships have changed. Now, it's easier to keep in touch with new friends and find old friends that we haven't seen for a long time. Third, many of us share thoughts with our online friends that we used to keep private. For example, in an instant, we can tell all our online friends that we think we just failed an exam. Are these changes good or bad? That's for each person to decide. But one thing is certain — as more people join social networks and as new networks continue to appear, we can expect more changes in the future.

", + "segments": [ + { + "html": "

Skimming for the Main Idea

Let's apply the skimming technique to identify the main idea of the given paragraph.

", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [], + "insertHTML": [] + }, + { + "html": "

Key Points from Skimming

From the highlighted words, we can gather:

  • Social networks like Facebook and Twitter are mentioned
  • They have become part of daily activities
  • There are significant changes mentioned
  • News, relationships, and sharing of thoughts are discussed
  • Future changes are expected
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "question" + ], + "phrases": [ + "changed our lives", + "changed the way", + "get our news.", + "relationships", + "changed", + "keep in touch", + "new friends", + "find old friends", + "share thoughts", + "online friends", + "used to keep private" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

Identifying the Main Idea

Based on our skimming, the main idea appears to be:

Social networks have significantly changed various aspects of our lives, including how we get news, maintain relationships, and share information.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "question" + ], + "phrases": [ + "significant ways", + "social networks", + "changed our lives" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

Verifying the Main Idea

Now, let's read the entire paragraph carefully to confirm our initial understanding.

", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [], + "insertHTML": [] + }, + { + "html": "

Confirming Our Analysis

After reading the full paragraph, we can confirm that our initial understanding was correct. The main idea indeed revolves around how social networks have changed various aspects of our lives, including:

  • How we get and consume news
  • How we maintain and form relationships
  • How we share personal information

The paragraph also touches on the ongoing nature of these changes, suggesting more to come in the future.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "additional" + ], + "phrases": [ + "changed the way we get our news", + "relationships have changed", + "share thoughts with our online friends that we used to keep private", + "expect more changes in the future" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

The Value of Skimming

As we've seen, skimming allowed us to quickly grasp the main idea of the paragraph before reading it in detail. This technique is particularly useful when:

  • You need to quickly understand the gist of a text
  • You're deciding whether a text is relevant to your needs
  • You want to prepare your mind for a more detailed reading

By focusing on key words, sentence beginnings, and repeated phrases, we can efficiently extract the core message of a text, saving time and improving overall comprehension.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [], + "insertHTML": [] + } + ] + } + } + ] + }, + { + "page": 50, + "tips": [ + { + "category": "Word Link", + "embedding": "The suffix -al often indicates that a word is an adjective.", + "text": "The suffix -al often indicates that a word is an adjective, e.g., virtual, tribal, environmental, cultural, structural, traditional, influential, economical.", + "html": "

The suffix -al often indicates that a word is an adjective, e.g., virtual, tribal, environmental, cultural, structural, traditional, influential, economical.

", + "id": "95663dff-6d04-4ba0-b688-54431d3f385d", + "standalone": true, + "verified": true + }, + { + "category": "Word Partners", + "embedding": "Use environmentally.", + "text": "Use environmentally with adjectives, for example, environmentally responsible, environmentally sound, environmentally friendly, environmentally aware, and environmentally sensitive.", + "html": "

Use environmentally with adjectives, for example, environmentally responsible, environmentally sound, environmentally friendly, environmentally aware, and environmentally sensitive.

", + "id": "412ccda7-fdc7-41be-a300-cffc2475d6a7", + "standalone": true, + "verified": true + } + ] + }, + { + "page": 54, + "tips": [ + { + "category": "Strategy", + "embedding": "When you scan for key details, first consider what kind of information you need to scan for.", + "text": "When you scan for key details, first consider what kind of information you need to scan for.", + "html": "

When you scan for key details, first consider what kind of information you need to scan for.

", + "id": "74425d2b-d63c-4a86-a77f-38190611dd5d", + "standalone": false, + "verified": true, + "exercise": { + "question": "

Identifying Key Details

Read the sentences below. What kind of information is missing in each one? Match the kinds of information (a-h) with the sentences (1-8). Then read the passage to complete each sentence.

  • a country name
  • a person's name
  • a type of food
  • a website name
  • a year
  • an adjective
  • an amount of money
  • an island name
", + "additional": "
  • __________ sent a message to his friend Ben Keene about starting a tribe.
  • James and Ben named their online site __________.
  • They found a small island for their tribe. It's called __________.
  • They paid __________ to lease the island for three years.
  • In September __________, Keene went to the island with 13 other people.
  • The members of the new tribe ate __________ while they lived on the island.
  • Keene and thetribal leader hope the island will become more __________, but still keep its traditions.
  • James and Keene started another tribe in __________ in West Africa.
", + "segments": [ + { + "html": "

Scanning for Key Details

Let's approach this exercise by identifying the type of information needed for each sentence, then scanning the passage for specific details.

", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [], + "insertHTML": [] + }, + { + "html": "

Step 1: Identify Information Types

Let's determine what kind of information is missing for each sentence:

  1. The first sentence is missing a person's name because it refers to someone who sent a message.
  2. The second sentence needs a website name as it mentions an online site that was created.
  3. An island name is required in the third sentence because it talks about a specific small island.
  4. The fourth sentence is missing an amount of money, as it refers to a payment for leasing the island.
  5. A year is needed in the fifth sentence to specify when an event took place.
  6. The sixth sentence requires a type of food to describe what the tribe members ate.
  7. An adjective is missing in the seventh sentence to describe how they want the island to become.
  8. The last sentence needs a country name to specify where another tribe was started.
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [], + "insertHTML": [ + { + "target": "additional", + "targetId": "letter-1", + "position": "replace", + "html": "b" + }, + { + "target": "additional", + "targetId": "letter-2", + "position": "replace", + "html": "d" + }, + { + "target": "additional", + "targetId": "letter-3", + "position": "replace", + "html": "h" + }, + { + "target": "additional", + "targetId": "letter-4", + "position": "replace", + "html": "g" + }, + { + "target": "additional", + "targetId": "letter-5", + "position": "replace", + "html": "e" + }, + { + "target": "additional", + "targetId": "letter-6", + "position": "replace", + "html": "c" + }, + { + "target": "additional", + "targetId": "letter-7", + "position": "replace", + "html": "f" + }, + { + "target": "additional", + "targetId": "letter-8", + "position": "replace", + "html": "a" + } + ] + }, + { + "html": "

Step 2: Scan for Specific Details

Now, let's scan the passage for each type of information we've identified:

  • Look for names of people, websites, islands, and countries
  • Search for monetary amounts and years
  • Identify food types and descriptive adjectives
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [], + "insertHTML": [] + }, + { + "html": "

Step 3: Fill in the Blanks

Let's fill in all the blanks with the information we've found:

  1. A person's name: James
  2. A website name: Tribewanted
  3. An island name: Vorovoro
  4. An amount of money: $300,000
  5. A year: 2006
  6. A type of food: fish
  7. An adjective: modern
  8. A country name: Sierra Leone
", + "wordDelay": 200, + "holdDelay": 10000, + "highlight": [], + "insertHTML": [ + { + "target": "additional", + "targetId": "underline-1", + "position": "replace", + "html": "James" + }, + { + "target": "additional", + "targetId": "underline-2", + "position": "replace", + "html": "Tribewanted" + }, + { + "target": "additional", + "targetId": "underline-3", + "position": "replace", + "html": "Vorovoro" + }, + { + "target": "additional", + "targetId": "underline-4", + "position": "replace", + "html": "$300,000" + }, + { + "target": "additional", + "targetId": "underline-5", + "position": "replace", + "html": "2006" + }, + { + "target": "additional", + "targetId": "underline-6", + "position": "replace", + "html": "fish" + }, + { + "target": "additional", + "targetId": "underline-7", + "position": "replace", + "html": "modern" + }, + { + "target": "additional", + "targetId": "underline-8", + "position": "replace", + "html": "Sierra Leone" + } + ] + }, + { + "html": "

The Value of Scanning for Key Details

Scanning for key details is an essential reading skill that allows you to quickly find specific information in a text. This technique is particularly useful when:

  • You need to answer specific questions about a text
  • You're looking for particular facts or figures
  • You want to locate relevant information quickly without reading every word

By first considering what kind of information you need to scan for, you can focus your attention on specific types of words or phrases, making your reading more efficient and effective.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [], + "insertHTML": [] + } + ] + } + } + ] + }, + { + "page": 55, + "tips": [ + { + "category": "Language for Writing", + "embedding": "The Present Perfect Tense\n\nUse the present perfect tense to talk about something that happened several times in the past, something that happened at an unspecified time in the past, something that began in the past and continues to the present, or when the time in the past is not important. To form the present perfect, use have or has and the past participle of a main verb.", + "text": "The Present Perfect Tense\n\nUse the present perfect tense to talk about something that happened several times in the past, something that happened at an unspecified time in the past, something that began in the past and continues to the present, or when the time in the past is not important. To form the present perfect, use have or has and the past participle of a main verb.\n\nI think online media have improved our lives.\nThousands of people have created blogs in the past few years.\nWe have used several different kinds of social networks recently.\nShe has posted videos on YouTube three times.\n\nRemember to use the simple past to talk about something that happened at a specific time in the past.", + "html": "

Language for Writing: The Present Perfect Tense

Use the present perfect tense to talk about something that happened several times in the past, something that happened at an unspecified time in the past, something that began in the past and continues to the present, or when the time in the past is not important. To form the present perfect, use have or has and the past participle of a main verb.

  • I think online media have improved our lives.
  • Thousands of people have created blogs in the past few years.
  • We have used several different kinds of social networks recently.
  • She has posted videos on YouTube three times.

Remember to use the simple past to talk about something that happened at a specific time in the past.

", + "id": "37f57b56-a21b-4082-87fe-9139a8a692f7", + "verified": true, + "standalone": false, + "exercise": { + "question": "

Use the present perfect tense of the verbs in parentheses to complete the sentences (1-4).

  1. Social media __________ (change) our lives in many ways.
  2. Michael Wesch __________ (use) social media in several of his classes.
  3. My friend __________ (meet) a lot of great people on social networking sites.
  4. A lot of old friends __________ (find) me online.
", + "segments": [ + { + "html": "

Understanding the Present Perfect Tense

Let's explore how to use the present perfect tense to complete these sentences. We'll focus on the structure and usage of this tense.

", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [], + "insertHTML": [] + }, + { + "html": "

Structure of Present Perfect

The present perfect tense is formed using:

  • have/has + past participle of the main verb
  • Use 'has' for he/she/it, and 'have' for I/you/we/they
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "question" + ], + "phrases": [ + "Use the present perfect tense" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

Sentence 1

'Social media (change) our lives in many ways.'

Let's apply the present perfect:

  • Subject: Social media (plural)
  • Verb: have + past participle of 'change'
  • Past participle of 'change' is 'changed'

Answer: Social media have changed our lives in many ways.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "question" + ], + "phrases": [ + "Social media", + "have changed", + "(change) our lives in many ways." + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "underline-1", + "position": "replace", + "html": "have changed" + } + ] + }, + { + "html": "

Sentence 2

'Michael Wesch (use) social media in several of his classes.'

Applying the present perfect:

  • Subject: Michael Wesch (singular)
  • Verb: has + past participle of 'use'
  • Past participle of 'use' is 'used'

Answer: Michael Wesch has used social media in several of his classes.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "question" + ], + "phrases": [ + "Michael Wesch", + "has used", + "(use) social media in several of his classes." + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "underline-2", + "position": "replace", + "html": "has used" + } + ] + }, + { + "html": "

Sentence 3

'My friend (meet) a lot of great people on social networking sites.'

Applying the present perfect:

  • Subject: My friend (singular)
  • Verb: has + past participle of 'meet'
  • Past participle of 'meet' is 'met'

Answer: My friend has met a lot of great people on social networking sites.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "question" + ], + "phrases": [ + "My friend", + "has met", + "(meet) a lot of great people on social networking sites." + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "underline-3", + "position": "replace", + "html": "has met" + } + ] + }, + { + "html": "

Sentence 4

'A lot of old friends (find) me online.'

Applying the present perfect:

  • Subject: A lot of old friends (plural)
  • Verb: have + past participle of 'find'
  • Past participle of 'find' is 'found'

Answer: A lot of old friends have found me online.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "question" + ], + "phrases": [ + "A lot of old friends", + "have found", + "(find) me online." + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "underline-4", + "position": "replace", + "html": "have found" + } + ] + }, + { + "html": "

The Value of Using Present Perfect

The present perfect tense is particularly useful for discussing:

  • Actions that started in the past and continue to the present
  • Recent past actions with present results
  • Life experiences without specifying when they occurred

By mastering this tense, you can effectively communicate about ongoing situations and past experiences that are relevant to the present, adding depth and context to your writing and speech.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [], + "insertHTML": [] + } + ] + } + } + ] + }, + { + "page": 56, + "tips": [ + { + "category": "Writing Skill", + "embedding": "Writing a Concluding Sentence\n\nFormal paragraphs often have concluding sentences. A concluding sentence is the last sentence of a paragraph. It ties the paragraph together.\n\nConcluding sentences can state an opinion (either the author's, or a person mentioned in the paragraph), make a prediction, or ask a question for the reader to think about. They can also restate, or summarize, the main idea of a long or complex paragraph.", + "text": "Writing a Concluding Sentence\n\nFormal paragraphs often have concluding sentences. A concluding sentence is the last sentence of a paragraph. It ties the paragraph together.\n\nConcluding sentences can state an opinion (either the author's, or a person mentioned in the paragraph), make a prediction, or ask a question for the reader to think about. They can also restate, or summarize, the main idea of a long or complex paragraph.", + "html": "

Writing a Concluding Sentence

Formal paragraphs often have concluding sentences. A concluding sentence is the last sentence of a paragraph. It ties the paragraph together. Concluding sentences can state an opinion (either the author's, or a person mentioned in the paragraph), make a prediction, or ask a question for the reader to think about. They can also restate, or summarize, the main idea of a long or complex paragraph.

", + "id": "d8cb5ef1-97fa-4249-8d7d-e4fc6c923724", + "verified": true, + "standalone": false, + "exercise": { + "question": "

Write a concluding sentence for each paragraph below.

  • Everywhere you look these days, people are on their phones, tablets, or computers. Some are talking, some are texting, and some are surfing the Web. It seems like people communicate with each other on social networks and by email more than they do in person. According to Dan Buettner, in his book Thrive, people should spend six to seven hours a day socializing with friends and family in order to increase their happiness. Socializing online probably doesn't have the same effect as socializing in person does.

    (Write a prediction.)
  • In my opinion, reading the news online is better than reading a newspaper or watching the news on TV. One way that it is better is that readers can comment on articles that they read online. They can have conversations with other readers, and sometimes even with the writer. Also, online articles provide links to additional information. For example, if an article mentions a name, the name is often linked to another article with more information about that person. Finally, online news articles can be updated if something changes during the day. For example, an online news site might post an article about a dangerous storm in the morning. If more information about the storm becomes available later that day, it can be added to the article.

    (Restate the main idea.)
", + "segments": [ + { + "html": "

Writing Concluding Sentences

Let's explore how to write effective concluding sentences for the given paragraphs. We'll focus on creating predictions and restating main ideas.

", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [], + "insertHTML": [] + }, + { + "html": "

Paragraph 1: Writing a Prediction

The first paragraph discusses the prevalence of digital communication and its potential impact on happiness. To write a prediction, we need to consider the future implications of the information provided.

", + "wordDelay": 200, + "holdDelay": 6000, + "highlight": [ + { + "targets": [ + "question" + ], + "phrases": [ + "(Write a prediction.)" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

A good prediction for this paragraph might be:

'If this trend continues, we may see a decrease in overall happiness as people spend less time interacting face-to-face.'

This prediction:

  • Relates to the information provided about digital communication
  • Incorporates the idea of happiness mentioned in the paragraph
  • Speculates on a possible future outcome
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [], + "insertHTML": [ + { + "target": "question", + "targetId": "text-1", + "position": "replace", + "html": "

If this trend continues, we may see a decrease in overall happiness as people spend less time interacting face-to-face.

" + } + ] + }, + { + "html": "

Paragraph 2: Restating the Main Idea

The second paragraph compares online news to traditional news sources. To restate the main idea, we need to summarize the key points discussed in the paragraph.

", + "wordDelay": 200, + "holdDelay": 6000, + "highlight": [ + { + "targets": [ + "question" + ], + "phrases": [ + "(Restate the main idea.)" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

A good restatement of the main idea could be:

'In conclusion, online news offers distinct advantages over traditional media, including interactive features, additional resources, and real-time updates.'

This concluding sentence:

  • Summarizes the three main points discussed in the paragraph
  • Reinforces the author's opinion stated at the beginning
  • Provides a concise overview of the paragraph's content
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [], + "insertHTML": [ + { + "target": "question", + "targetId": "text-2", + "position": "replace", + "html": "

In conclusion, online news offers distinct advantages over traditional media, including interactive features, additional resources, and real-time updates.

" + } + ] + }, + { + "html": "

The Importance of Concluding Sentences

Concluding sentences play a crucial role in formal writing. They serve to:

  • Tie the paragraph together
  • Reinforce the main idea
  • Provide closure to the reader
  • Encourage further thought or action

By mastering the art of writing effective concluding sentences, you can significantly improve the coherence and impact of your paragraphs.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [], + "insertHTML": [] + } + ] + } + } + ] + }, + { + "page": 57, + "tips": [ + { + "category": "Strategy", + "embedding": "For an opinion paragraph, you can use these phrases in your topic sentence:\n\nI think... I believe... In my opinion...\n\nYou can also use one of them in your concluding sentence if you end the paragraph with a statement of your opinion.", + "text": "For an opinion paragraph, you can use these phrases in your topic sentence:\n\nI think... I believe... In my opinion...\n\nYou can also use one of them in your concluding sentence if you end the paragraph with a statement of your opinion.", + "html": "

For an opinion paragraph, you can use these phrases in your topic sentence:

I think...I believe...In my opinion...

You can also use one of them in your concluding sentence if you end the paragraph with a statement of your opinion.

", + "id": "f9e30057-0887-4d0a-926b-54fadd958a8e", + "verified": true, + "standalone": false, + "exercise": { + "question": "

Fill in the blanks then write a draft of the paragraph.

Topic: Has online social networking helped us or harmed us?

Topic sentence

Supporting Idea (one way social networking has helped or harmed us)

Details:

Supporting Idea (another way social networking has helped or harmed us)

Details:

Concluding sentence

Draft

", + "additional": "
", + "segments": [ + { + "html": "

Writing an Opinion Paragraph on Social Networking

Let's work through creating an opinion paragraph about whether online social networking has helped or harmed us. We'll break down the process step-by-step.

", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [], + "insertHTML": [] + }, + { + "html": "

Step 1: Topic Sentence

Start with a clear statement of your opinion. For this example, let's argue that social networking has helped us:

'I believe that online social networking has significantly improved our lives in several ways.'

This topic sentence:

  • Clearly states the writer's opinion
  • Uses the phrase 'I believe' to introduce the opinion
  • Provides a preview of what the paragraph will discuss
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [], + "insertHTML": [ + { + "target": "question", + "targetId": "topic", + "position": "replace", + "html": "

I believe that online social networking has significantly improved our lives in several ways.

" + } + ] + }, + { + "html": "

Step 2: First Supporting Idea

Provide a main way that social networking has helped us:

'One way social networking has helped us is by facilitating long-distance connections.'

Details to support this idea:

  • People can easily stay in touch with friends and family across the globe
  • It allows for sharing of life updates, photos, and experiences in real-time
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [], + "insertHTML": [ + { + "target": "question", + "targetId": "sup-idea-1", + "position": "replace", + "html": "

One way social networking has helped us is by facilitating long-distance connections.

" + }, + { + "target": "question", + "targetId": "details-1", + "position": "replace", + "html": "
  • People can easily stay in touch with friends and family across the globe
  • It allows for sharing of life updates, photos, and experiences in real-time
" + } + ] + }, + { + "html": "

Step 3: Second Supporting Idea

Provide another way that social networking has helped us:

'Another benefit of social networking is its ability to spread information quickly.'

Details to support this idea:

  • Important news and updates can reach a large audience rapidly
  • It enables the organization of events and movements more efficiently
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [], + "insertHTML": [ + { + "target": "question", + "targetId": "sup-idea-2", + "position": "replace", + "html": "

Another benefit of social networking is its ability to spread information quickly.

" + }, + { + "target": "question", + "targetId": "details-2", + "position": "replace", + "html": "
  • Important news and updates can reach a large audience rapidly
  • It enables the organization of events and movements more efficiently
" + } + ] + }, + { + "html": "

Step 4: Concluding Sentence

Wrap up your paragraph by restating your opinion:

'In my opinion, these benefits of online social networking have greatly enhanced our ability to connect and stay informed, making it a positive force in our lives.'

This concluding sentence:

  • Restates the main opinion
  • Summarizes the key points discussed
  • Provides a final thought on the topic
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [], + "insertHTML": [ + { + "target": "question", + "targetId": "concluding", + "position": "replace", + "html": "

In my opinion, these benefits of online social networking have greatly enhanced our ability to connect and stay informed, making it a positive force in our lives.

" + } + ] + }, + { + "html": "

Step 5: Draft the Complete Paragraph

Now, let's combine all the elements into a cohesive paragraph:

", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [], + "insertHTML": [ + { + "target": "question", + "targetId": "draft", + "position": "replace", + "html": "

I believe that online social networking has significantly improved our lives in several ways. One way social networking has helped us is by facilitating long-distance connections. People can easily stay in touch with friends and family across the globe, and it allows for sharing of life updates, photos, and experiences in real-time. Another benefit of social networking is its ability to spread information quickly. Important news and updates can reach a large audience rapidly, and it enables the organization of events and movements more efficiently. In my opinion, these benefits of online social networking have greatly enhanced our ability to connect and stay informed, making it a positive force in our lives.

" + } + ] + }, + { + "html": "

The Value of Opinion Phrases

Using phrases like 'I think...', 'I believe...', and 'In my opinion...' in your topic and concluding sentences is beneficial because:

  • They clearly signal that you're expressing a personal viewpoint
  • They set the tone for the entire paragraph
  • They remind the reader that this is one perspective among many possible views
  • They create a consistent structure when used in both the opening and closing of the paragraph

By incorporating these phrases, you make your writing more engaging and your opinions more explicit, which is crucial in argumentative or persuasive writing.

", + "wordDelay": 200, + "holdDelay": 10000, + "highlight": [], + "insertHTML": [] + } + ] + } + } + ] + }, + { + "page": 58, + "tips": [ + { + "category": "CT Focus", + "embedding": "Make inferences about the paragraph.", + "text": "Make inferences about the paragraph. What can you tell about the writer?", + "html": "

Make inferences about the paragraph. What can you tell about the writer? Does he or she use the Internet a lot? Do you think this person is generally honest or dishonest?

", + "id": "b66691d1-7cd0-4e12-bc7f-2bd866190d34", + "verified": true, + "standalone": false, + "exercise": { + "question": "

The paragraphs below are on the topic of online music sharing.

Which is the first draft?

Which is the revision?

", + "additional": "
a

There are many views about online music sharing, but in my opinion, people should pay for music instead of getting it free online. I have gotten free music online in the past, and I didn't really think about whether or not it was fair to the musician. Then I thought about how musicians make money. They earn money by giving concerts and selling CDs. I realized that when I get music free online, I'm stealing from the people who made the music. Musicians work hard to write and perform songs. If people want to enjoy those songs, they should pay for them. We don't expect other kinds of professionals to work for free. For example, we don't expect doctors to treat us for free or teachers to teach for free. If musicians don't get paid for their work, they might not be able to continue making music. They might have to find other work in order to make money. Without musicians, where would we get our music?

b

There have been a lot of disagreements about online music sharing. I have gotten free music online in the past, and I didn't really think about whether or not it was fair to the musician. Then I thought about how musicians make money. They earn money by giving concerts and selling CDs. I realized that when I get music free online, I'm stealing from the people who made the music. That's when I stopped sharing music online. Now I always pay for music. I feel the same way about sharing movies online. Even though movie studios make millions of dollars, I still don't think it's right to get movies for free. Musicians work hard to write and perform songs. If musicians don't get paid for their work, they might not be able to continue making music. They might have to find other work in order to make money.

", + "segments": [ + { + "html": "

Analyzing First Draft and Revision

Let's examine two paragraphs about online music sharing to determine which is the first draft and which is the revision. We'll look at the structure, content, and overall coherence of each paragraph.

", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [], + "insertHTML": [] + }, + { + "html": "

Paragraph A Analysis

Let's start by examining paragraph A:

  • Clear topic sentence stating the writer's opinion
  • Logical flow of ideas
  • Use of examples (doctors, teachers) to support the argument
  • Strong concluding sentence that poses a thought-provoking question

These characteristics suggest that paragraph A is likely the revision.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "additional" + ], + "phrases": [ + "There are many views about online music sharing, but in my opinion, people should pay for music instead of getting it free online.", + "Without musicians, where would we get our music?" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

Paragraph B Analysis

Now, let's look at paragraph B:

  • Less clear opening sentence
  • Similar content to paragraph A, but less organized
  • Introduces a new topic (movie sharing) that isn't fully developed
  • Lacks a strong concluding sentence

These characteristics suggest that paragraph B is likely the first draft.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "additional" + ], + "phrases": [ + "There have been a lot of disagreements about online music sharing.", + "I feel the same way about sharing movies online. Even though movie studios make millions of dollars, I still don't think it's right to get movies for free." + ] + } + ], + "insertHTML": [] + }, + { + "html": "

Conclusion

Based on our analysis:

  • Paragraph A (blue) is the revision
  • Paragraph B (red) is the first draft
", + "wordDelay": 200, + "holdDelay": 6000, + "highlight": [], + "insertHTML": [ + { + "target": "question", + "targetId": "underline-1", + "position": "replace", + "html": "b" + }, + { + "target": "question", + "targetId": "underline-2", + "position": "replace", + "html": "a" + } + ] + }, + { + "html": "

Making Inferences

Now, let's make some inferences about the writer based on these paragraphs:

  • The writer likely uses the Internet frequently, as they mention past experience with online music sharing.
  • The writer seems to be honest, admitting to past behavior they now consider wrong.
  • The writer appears to value fairness and ethical behavior, as they've changed their stance on free music downloads.
  • The writer shows empathy towards musicians and their need to earn a living.
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "additional" + ], + "phrases": [ + "I have gotten free music online in the past", + "Now I always pay for music" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

The Value of Making Inferences

Making inferences about a writer based on their text is beneficial because:

  • It helps develop critical reading skills
  • It encourages deeper engagement with the text
  • It aids in understanding the writer's perspective and motivations
  • It can reveal biases or assumptions in the writing
  • It helps in evaluating the credibility and reliability of the information presented

By practicing this skill, readers become more discerning and can better understand the context and subtext of what they read.

", + "wordDelay": 200, + "holdDelay": 10000, + "highlight": [], + "insertHTML": [] + } + ] + } + } + ] + } + ] + }, + { + "unit": 4, + "title": "Deep Trouble", + "pages": [ + { + "page": 64, + "tips": [ + { + "category": "Word Partners", + "embedding": "Use reduce.", + "text": "Use reduce with nouns: reduce costs, reduce crime, reduce spending, reduce the number of (something), reduce waste; you can also use reduce with adverbs: dramatically reduce, greatly reduce, significantly reduce.", + "html": "

Use reduce with nouns: reduce costs, reduce crime, reduce spending, reduce the number of (something), reduce waste; you can also use reduce with adverbs: dramatically reduce, greatly reduce, significantly reduce.

", + "id": "5a218540-cf72-4f17-adb6-380a4bdcdd8a", + "verified": true, + "standalone": true + } + ] + }, + { + "page": 67, + "tips": [ + { + "category": "CT Focus", + "embedding": "In a problem-solution passage, a writer usually describes a problem first and then provides possible solutions.", + "text": "In a problem-solution passage, a writer usually describes a problem first and then provides possible solutions. As you read, ask yourself: Does the writer provide enough information to show why the problem is real? Is it clear how the solutions match the problem(s)?", + "html": "

In a problem-solution passage, a writer usually describes a problem first and then provides possible solutions. As you read, ask yourself: Does the writer provide enough information to show why the problem is real? Is it clear how the solutions match the problem(s)?

", + "id": "b1befaec-0307-4d59-bd08-de2f64a6182f", + "verified": true, + "standalone": false, + "exercise": { + "question": "

What is the main problem described in the reading passage below? What possible solutions are there to this problem?

Main Problem
Solutions

1.

2.

3.

  1. Does the writer provide enough supporting information to show that the problem of overfishing is real? If so, how does he or she do this?

  2. How well do the solutions help to address the problem? Has the writer given enough information so the reader can see how they might work?

", + "additional": "

Where Have All the Fish Gone?

Throughout history, people have thought of the ocean as a diverse and limitless source of food. Yet today there are clear signs that the oceans have a limit. Most of the big fish in our oceans are now gone. One major factor is overfishing. People are taking so many fish from the sea that species cannot replace themselves. How did this problem start? And what is the future for fish?

Source of the Problem

For centuries, local fishermen caught only enough fish for themselves and their communities. However, in the mid-20th century, people around the world became interested in making protein-rich foods, such as fish, cheaper and more available. In response to this, governments gave money and other help to the fishing industry.

As a result, the fishing industry grew. Large commercial fishing1 companies began catching enormous quantities of fish for profit and selling them to worldwide markets. They started using new fishing technologies that were designed to catch more fish. Modern sonar2 to locate fish, and huge nets to catch them, made this easier. Modern technology allowed these companies to catch more fish than local fishermen.

Rise of the Little Fish

In 2003, a scientific report estimated that only 10 percent remained of the large ocean fish populations that existed before commercial fishing began. Specifically, commercial fishing has greatly reduced the number of large predatory fish3, such as cod and tuna. Today, there are plenty of fish in the sea, but they mostly just the little ones. Small fish, such as sardines and anchovies, have more than doubled in number — largely because there are not enough big fish to eat them.

This trend is a problem because ecosystems need predators to be stable. Predators are necessary to weed out the sick and weak individuals. Without this weeding out, or survival of the fittest, ecosystems become less stable. As a result, fish are less able to survive difficulties such as pollution, environmental change, or changes in the food supply.

A Future for Fish?

A study published in 2006 in the journal Science made a prediction: If we continue to overfish the oceans, most of the fish that we catch now—from tuna to sardines—will largely disappear by 2050. However, the researchers say we can prevent this situation if we restore ocean's biodiversity4.

Scientists say there are a few ways we can do this. First, commercial fishing companies can catch fewer fish. This will increase the number of large predatory fish. Another way to improve the biodiversity of the oceans is to develop aquaculture—fish farming. Growing fish on farms may take the pressure off wild-caught fish. This gives species of fish a chance to restore their numbers. Finally, we can make good choices about what we eat. For example, we can avoid eating species of fish that are threatened. If we are careful today, we can still look forward to a diverse and healthy ocean.

1 Commercial fishing is fishing for profit.2 Sonar technology uses sound waves to locate objects, for example, underwater.3 Predatory fish are fish that kill and eat other fish.4 Biodiversity is the existence of a wide variety of plant and animal species in their natural environments.
", + "segments": [ + { + "html": "

Analyzing a Problem-Solution Passage

Let's examine the passage 'Where Have All the Fish Gone?' to identify the main problem and its proposed solutions. We'll also evaluate how well the author presents the problem and solutions.

", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [], + "insertHTML": [] + }, + { + "html": "

Main Problem

The main problem described in the passage is:

Overfishing is depleting ocean fish populations, particularly large predatory fish, leading to an imbalance in marine ecosystems.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "additional" + ], + "phrases": [ + "Most of the big fish in our oceans are now gone. One major factor is overfishing. People are taking so many fish from the sea that species cannot replace themselves." + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "main-idea", + "position": "replace", + "html": "

Overfishing is depleting ocean fish populations, particularly large predatory fish, leading to an imbalance in marine ecosystems.

" + } + ] + }, + { + "html": "

Solutions

The passage proposes three main solutions:

  1. Reduce commercial fishing
  2. Develop aquaculture (fish farming)
  3. Make informed consumer choices about fish consumption
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "additional" + ], + "phrases": [ + "First, commercial fishing companies can catch fewer fish.", + "Another way to improve the biodiversity of the oceans is to develop aquaculture—fish farming.", + "Finally, we can make good choices about what we eat. For example, we can avoid eating species of fish that are threatened." + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "solution-1", + "position": "replace", + "html": "Reduce commercial fishing" + }, + { + "target": "question", + "targetId": "solution-2", + "position": "replace", + "html": "Develop aquaculture (fish farming)" + }, + { + "target": "question", + "targetId": "solution-3", + "position": "replace", + "html": "Make informed consumer choices about fish consumption" + } + ] + }, + { + "html": "

Problem Presentation

The writer provides substantial information to show that overfishing is a real problem:

  • Historical context of fishing practices
  • Scientific data on fish population decline
  • Explanation of ecosystem imbalance
  • Prediction of future fish depletion
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "additional" + ], + "phrases": [ + "For centuries, local fishermen caught only enough fish for themselves and their communities. However, in the mid-20th century, people around the world became interested in making protein-rich foods, such as fish, cheaper and more available.", + "In 2003, a scientific report estimated that only 10 percent remained of the large ocean fish populations that existed before commercial fishing began.", + "This trend is a problem because ecosystems need predators to be stable. Predators are necessary to weed out the sick and weak individuals. Without this weeding out, or survival of the fittest, ecosystems become less stable.", + "If we continue to overfish the oceans, most of the fish that we catch now—from tuna to sardines—will largely disappear by 2050." + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "answer-1", + "position": "replace", + "html": "

Yes, the writer provides sufficient information. They use historical context, scientific data, ecosystem explanations, and future predictions to illustrate the severity of overfishing.

" + } + ] + }, + { + "html": "

Solution Effectiveness

The solutions are well-matched to the problem:

  • Reducing commercial fishing directly addresses overfishing
  • Aquaculture offers an alternative to wild-caught fish
  • Consumer choices can influence fishing practices

However, the writer could provide more details on implementation and potential challenges of these solutions.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [], + "insertHTML": [ + { + "target": "question", + "targetId": "answer-2", + "position": "replace", + "html": "

The solutions address the problem well, but more information on implementation and potential challenges would strengthen the argument. The writer provides a good overview but could elaborate on how these solutions would work in practice.

" + } + ] + }, + { + "html": "

The Value of Analyzing Problem-Solution Passages

Analyzing problem-solution passages is beneficial because:

  • It develops critical thinking skills
  • It helps evaluate the credibility and thoroughness of arguments
  • It encourages readers to think about real-world issues and potential solutions
  • It improves comprehension of complex topics
  • It aids in identifying gaps in information or logic

By asking questions about the problem's presentation and the effectiveness of proposed solutions, readers can better understand and critically evaluate the information presented in such passages.

", + "wordDelay": 200, + "holdDelay": 10000, + "highlight": [], + "insertHTML": [] + } + ] + } + } + ] + }, + { + "page": 70, + "tips": [ + { + "category": "Word Link", + "embedding": "mini = very small", + "text": "mini = very small; minimal, minimum, minimize, miniature, minibus", + "html": "

mini = very small; minimal, minimum, minimize, miniature, minibus

", + "id": "7a6588a6-e35f-4f4a-9e2d-e168e23188c8", + "verified": true, + "standalone": true + }, + { + "category": "Word Partners", + "embedding": "Use informed and inform.", + "text": "Use the adjective informed with nouns: informed choice, informed decision. Use the verb inform with nouns: inform parents, inform the police, inform readers, inform someone in writing, inform someone of something.", + "html": "

Use the adjective informed with nouns: informed choice, informed decision. Use the verb inform with nouns: inform parents, inform the police, inform readers, inform someone in writing, inform someone of something.

", + "id": "1b011f9a-3a50-43a2-b532-75db7fba001b", + "verified": true, + "standalone": true + } + ] + } + ] + }, + { + "unit": 5, + "title": "Memory and Learning", + "pages": [ + { + "page": 84, + "tips": [ + { + "category": "Word Link", + "embedding": "The suffix -ize forms verbs that mean to cause or become something.", + "text": "The suffix -ize forms verbs that mean to cause or become something, e.g., visualize, memorize, internalize, minimize.", + "html": "

The suffix -ize forms verbs that mean to cause or become something, e.g., visualize, memorize, internalize, minimize.

", + "id": "0d263ead-e16a-434e-a04e-d031aef2dbf1", + "verified": true, + "standalone": true + } + ] + }, + { + "page": 88, + "tips": [ + { + "category": "Reading Skill", + "embedding": "Identifying Cause and Effect\n\nA cause is something that makes another event happen. The resulting event is the effect. Recognizing causes and effects can help you better understand a reading passage. You can sometimes identify cause and effect relationships by finding certain connecting or signal words. These include because, so, if, then, therefore, as a result, and by verb + -ing.", + "text": "Identifying Cause and Effect\n\nA cause is something that makes another event happen. The resulting event is the effect. Recognizing causes and effects can help you better understand a reading passage. Look at the sentence from the reading. Does the underlined portion show a cause or an effect?\nIf you think of a very familiar place, and visualize certain things in that place, you can keep those things in your memory for a long time.\nThe underlined portion shows the effect. Visualizing things within a familiar place is the cause. Keeping memories for a long time is the effect.\n\nYou can sometimes identify cause and effect relationships by finding certain connecting or signal words. These include because, so, if, then, therefore, as a result, and by verb + -ing.\nWe don't have to remember phone numbers now because we can store them on smartphones.\nI enter my email password three times a day, so I remember it easily.\n", + "html": "

Identifying Cause and Effect

A cause is something that makes another event happen. The resulting event is the effect. Recognizing causes and effects can help you better understand a reading passage. Look at the sentence from the reading. Does the underlined portion show a cause or an effect?

If you think of a very familiar place, and visualize certain things in that place, you can keep those things in your memory for a long time.

The underlined portion shows the effect. Visualizing things within a familiar place is the cause. Keeping memories for a long time is the effect.

You can sometimes identify cause and effect relationships by finding certain connecting or signal words. These include because, so, if, then, therefore, as a result, and by verb + -ing.

We don't have to remember phone numbers now because we can store them on smartphones.
I enter my email password three times a day, so I remember it easily.
", + "id": "ab708cf8-449b-4e4a-af4e-a2510f04839d", + "verified": true, + "standalone": false, + "exercise": { + "question": "

Read the information about memory techniques. How many cause-effect relationships can you find? Determine the causes and their effects.

", + "additional": "

Memory Tricks

Techniques for remembering things like lists, numbers, and facts, are called mnemonic devices. For example, people often use things like poems, pictures, or movements because it is easier to remember stories, images, or actions than plain facts and lists.

Acronyms are one type of mnemonic. For example, it may be hard to remember the colors of the rainbow in the order that they appear. Someone therefore made an acronym for this: ROY G BIV. The first letters in the acronym are the first letters in the names for the colors: red, orange, yellow, green, blue, indigo, and violet. The name Roy G. Biv is meaningless, but it's short, so it is easier to remember than the list.

English spelling rules can also be difficult to learn, so some students use rhymes to help them remember the rules. By learning \"i before e except after c\" (where you hear /ee/), students of English remember the spelling of words like niece and receipt.

", + "segments": [ + { + "html": "

Identifying Cause and Effect Relationships

Let's analyze the given text to identify cause-effect relationships. We'll go through the passage step by step, looking for connections between events or ideas.

", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [ + { + "targets": [ + "question" + ], + "phrases": [ + "How many cause-effect relationships can you find?" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

1. Mnemonic Devices

The first paragraph introduces mnemonic devices:

  • Cause: People use poems, pictures, or movements
  • Effect: It is easier to remember stories, images, or actions than plain facts and lists
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "additional" + ], + "phrases": [ + "people often use things like poems, pictures, or movements", + "it is easier to remember stories, images, or actions than plain facts and lists" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

2. Acronyms for Rainbow Colors

The second paragraph discusses using acronyms:

  • Cause: It's hard to remember the colors of the rainbow in order
  • Effect: Someone created the acronym ROY G BIV

There's another cause-effect relationship here:

  • Cause: The name Roy G. Biv is short and meaningless
  • Effect: It's easier to remember than the full list of colors
", + "wordDelay": 200, + "holdDelay": 10000, + "highlight": [ + { + "targets": [ + "additional" + ], + "phrases": [ + "it may be hard to remember the colors of the rainbow in the order that they appear", + "Someone therefore made an acronym for this: ROY G BIV", + "it's short, so it is easier to remember than the list" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

3. Rhymes for Spelling Rules

The final paragraph talks about using rhymes for spelling:

  • Cause: English spelling rules can be difficult to learn
  • Effect: Some students use rhymes to help them remember the rules

And another relationship:

  • Cause: Students learn the rhyme \"i before e except after c\"
  • Effect: Students remember the spelling of words like niece and receipt
", + "wordDelay": 200, + "holdDelay": 10000, + "highlight": [ + { + "targets": [ + "additional" + ], + "phrases": [ + "English spelling rules can also be difficult to learn", + "some students use rhymes to help them remember the rules", + "By learning \"i before e except after c\"", + "students of English remember the spelling of words like niece and receipt" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

Summary of Cause-Effect Relationships

We've identified 5 cause-effect relationships in total:

  1. Use of mnemonic devices leading to easier remembering
  2. Difficulty remembering rainbow colors leading to creation of ROY G BIV
  3. Short, meaningless acronym leading to easier memorization
  4. Difficulty with spelling rules leading to use of rhymes
  5. Learning specific rhyme leading to remembering specific spellings
", + "wordDelay": 200, + "holdDelay": 10000, + "highlight": [], + "insertHTML": [] + }, + { + "html": "

The Value of Identifying Cause and Effect

Recognizing cause and effect relationships helps us understand the logic and structure of a text. It allows us to see how different ideas or events are connected, which can improve our comprehension and critical thinking skills.

In this exercise, identifying these relationships helped us understand:

  • Why certain memory techniques are used
  • How these techniques work to improve memory
  • The reasoning behind specific mnemonic devices
", + "wordDelay": 200, + "holdDelay": 10000, + "highlight": [], + "insertHTML": [] + }, + { + "html": "

Looking for Signal Words

As mentioned in the tip, certain words can signal cause-effect relationships. In our text, we can find a few examples:

  • 'because' in \"because it is easier to remember stories...\"
  • 'therefore' in \"Someone therefore made an acronym...\"
  • 'so' in \"it's short, so it is easier to remember...\"

Recognizing these signal words can make it easier to identify cause-effect relationships in future readings.

", + "wordDelay": 200, + "holdDelay": 10000, + "highlight": [], + "insertHTML": [] + } + ] + } + } + ] + }, + { + "page": 90, + "tips": [ + { + "category": "Word Partners", + "embedding": "Use stress.", + "text": "Use stress with: (n.) effects of stress, work-related stress; (adj.) emotional stress, mental stress, physical stress; (v.) cause stress, cope with stress, deal with stress, experience stress, reduce stress.", + "html": "

Use stress with: (n.) effects of stress, work-related stress; (adj.) emotional stress, mental stress, physical stress; (v.) cause stress, cope with stress, deal with stress, experience stress, reduce stress.

", + "id": "535453f2-96d6-4845-af23-234a3edba570", + "verified": true, + "standalone": true + } + ] + }, + { + "page": 91, + "tips": [ + { + "category": "Word Link", + "embedding": "The prefix trans- means 'moving across or changing from one thing to another'.", + "text": "The prefix trans- means 'moving across or changing from one thing to another', e.g., transfer, transition, translate, transform.", + "html": "

The prefix trans- means 'moving across or changing from one thing to another', e.g., transfer, transition, translate, transform.

", + "id": "99166b16-a227-4abd-9b3c-a7ca0aaa448d", + "verified": true, + "standalone": true + }, + { + "category": "Strategy", + "embedding": "Use key words in titles and subheads to help you predict what a passage is about.", + "text": "Use key words in titles and subheads to help you predict what a passage is about.", + "html": "

Use key words in titles and subheads to help you predict what a passage is about.

", + "id": "bb189a09-f9b9-4f8e-92b2-b77f6cb0f8a9", + "verified": true, + "standalone": false, + "exercise": { + "question": "

Identify the key words in the titles and the subheads of the reading passages. Use the words to help you complete the sentences.

1. I think the reading passage titled \"Train Your Brain!\" is about how ...

2. I think the reading passage titled \"Sleep and Memory\" is about how ...

", + "additional": "

Train Your Brain!

Is there anything you can do to have a better memory? Research shows that mental and physical exercise and lifestyle choices can affect memory. In fact, many experts agree it is possible to improve your memory.

Here are some tips:

Avoid stress

Recent research shows that stress is bad for the brain. In fact, one study connects worrying with memory loss. Therefore, if you can avoid stress in your life, you may also improve your memory. Relaxation techniques like yoga are one way to reduce stress.

Play games

Can brainteasers1 like sudoku puzzles improve memory? Some scientists say that mental activity might help memory. Puzzles, math problems, even reading and writing, can probably all benefit the brain.

Get some rest

\"Poor sleep before or after learning makes it hard to encode2 new memories\", says Harvard University scientist Robert Stickgold. One study shows that by getting a good night's sleep, people remember a motor skill (such as piano playing) 30 percent better.

Eat right

Your brain can benefit from a healthy diet, just like the rest of your body. Foods that have antioxidants,3 such as blueberries, are good for brain cells. This helps memory.

1 Brainteasers are activities that exercise the mind, such as puzzles.2 If you encode information, you put it into a different form or system of language.3 Antioxidants are chemicals that reduce the effect of harmful substances in your body.

Sleep and Memory

Many people think that sleep must be important for learning and memory, but until recently there was no proof. Scientists also believe the hippocampus plays a role in making long-term memories, but they weren't sure how. Now they understand how the process happens—and why sleep is so important.

Memories in Motion

A research team at Rutgers University recently discovered a type of brain activity that happens during sleep. The activity transfers new information from the hippocampus to the neocortex. The neocortex stores long-term memories. The researchers call the transferring activity \"sharp wave ripples\", because the transferring activity looks like powerful, short waves. The brain creates these waves in the hippocampus during the deepest levels of sleep.

The Rutgers scientists discovered the wave activity in a 2009 study using rats. They trained the rats to learn a route in a maze. Then they let the rats sleep after the training session. They gave one group of sleeping rats a drug. The drug stopped the rats' wave activity. As a result, this group of rats had trouble remembering the route. The reason? The new information didn't have a chance to leave the hippocampus and go to the neocortex.

Lifelong Memories

The experiment explains how we create long-term memories. The wave activity transfers short-term memories from the hippocampus to the neocortex. Then the neocortex turns the sharp wave ripples into long-term memories. Researcher György Buzsáki says this is \"why certain events may only take place once in the waking state and yet can be remembered for a lifetime.\"

The Rutgers study is important because it proves the importance of sleep for learning and memory. It also finally explains how the brain makes long-term memories.

", + "segments": [ + { + "html": "

Predicting Content from Titles and Subheads

Let's examine the titles and subheads of the given passages to predict their content. We'll focus on identifying key words that give us clues about the main ideas.

", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [ + { + "targets": [ + "question" + ], + "phrases": [ + "Identify the key words in the titles and the subheads" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

Passage 1: \"Train Your Brain!\"

Key words in the title and subheads:

  • Train Your Brain
  • Avoid stress
  • Play games
  • Get some rest
  • Eat right

These key words suggest that the passage is about different ways to improve brain function and memory.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "additional" + ], + "phrases": [ + "Train Your Brain!", + "Avoid stress", + "Play games", + "Get some rest", + "Eat right" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

Based on these key words, we can complete the first sentence:

1. I think the reading passage titled \"Train Your Brain!\" is about how to improve memory and brain function through various lifestyle choices and activities.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [], + "insertHTML": [ + { + "target": "question", + "targetId": "text-1", + "position": "replace", + "html": "to improve memory and brain function through various lifestyle choices and activities." + } + ] + }, + { + "html": "

Passage 2: \"Sleep and Memory\"

Key words in the title and subheads:

  • Sleep and Memory
  • Memories in Motion
  • Lifelong Memories

These key words indicate that the passage explores the relationship between sleep and memory formation, particularly how sleep affects long-term memory.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "additional" + ], + "phrases": [ + "Sleep and Memory", + "Memories in Motion", + "Lifelong Memories" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

Based on these key words, we can complete the second sentence:

2. I think the reading passage titled \"Sleep and Memory\" is about how sleep plays a crucial role in forming and consolidating long-term memories.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [], + "insertHTML": [ + { + "target": "question", + "targetId": "text-2", + "position": "replace", + "html": "sleep plays a crucial role in forming and consolidating long-term memories." + } + ] + }, + { + "html": "

The Value of Using Key Words in Titles and Subheads

Identifying key words in titles and subheads is a powerful strategy for predicting and understanding the content of a passage. This approach offers several benefits:

  • It provides a quick overview of the main topics
  • It helps focus your attention on important concepts
  • It allows you to make predictions about the content, engaging your prior knowledge
  • It improves comprehension by creating a mental framework for the information you're about to read
", + "wordDelay": 200, + "holdDelay": 10000, + "highlight": [], + "insertHTML": [] + }, + { + "html": "

By using this technique, we were able to quickly grasp the main ideas of both passages without reading the full text. This skill is particularly useful when you need to quickly assess whether a text is relevant to your needs or when you want to prepare your mind for more detailed reading.

", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [], + "insertHTML": [] + } + ] + } + } + ] + }, + { + "page": 94, + "tips": [ + { + "category": "Strategy", + "embedding": "Use key words in questions, especially nouns and noun phrases, to help you scan for the most relevant parts of a text.", + "text": "Use key words in questions, especially nouns and noun phrases, to help you scan for the most relevant parts of a text.", + "html": "

Use key words in questions, especially nouns and noun phrases, to help you scan for the most relevant parts of a text.

", + "id": "e2f954f0-1961-4647-aecc-d0ccb28b1e9e", + "verified": true, + "standalone": false, + "exercise": { + "question": "

Read the passage and then complete the following exercises:

What did you learn from the second reading \"Sleep and Memory\"?

The main idea of \"Sleep and Memory\" is ____________________________

Complete the following sentences about \"Sleep and Memory.\"

  1. A team from Rutgers University found ____________________________.
  2. Sharp wave ripples transfer information from the ____________________________ to the ____________________________.
  3. Some rats had trouble remembering a route because ____________________________.
", + "additional": "

Sleep and Memory

Many people think that sleep must be important for learning and memory, but until recently there was no proof. Scientists also believe the hippocampus plays a role in making long-term memories, but they weren't sure how. Now they understand how the process happens—and why sleep is so important.

Memories in Motion

A research team at Rutgers University recently discovered a type of brain activity that happens during sleep. The activity transfers new information from the hippocampus to the neocortex. The neocortex stores long-term memories. The researchers call the transferring activity \"sharp wave ripples\", because the transferring activity looks like powerful, short waves. The brain creates these waves in the hippocampus during the deepest levels of sleep.

The Rutgers scientists discovered the wave activity in a 2009 study using rats. They trained the rats to learn a route in a maze. Then they let the rats sleep after the training session. They gave one group of sleeping rats a drug. The drug stopped the rats' wave activity. As a result, this group of rats had trouble remembering the route. The reason? The new information didn't have a chance to leave the hippocampus and go to the neocortex.

Lifelong Memories

The experiment explains how we create long-term memories. The wave activity transfers short-term memories from the hippocampus to the neocortex. Then the neocortex turns the sharp wave ripples into long-term memories. Researcher György Buzsáki says this is \"why certain events may only take place once in the waking state and yet can be remembered for a lifetime.\"

The Rutgers study is important because it proves the importance of sleep for learning and memory. It also finally explains how the brain makes long-term memories.

", + "segments": [ + { + "html": "

Understanding 'Sleep and Memory'

Let's analyze the passage 'Sleep and Memory' to answer the questions. We'll focus on key words and phrases to help us locate the relevant information.

", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [ + { + "targets": [ + "question" + ], + "phrases": [ + "What did you learn from the second reading \"Sleep and Memory\"?" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

Main Idea

To identify the main idea, let's look for recurring themes throughout the passage:

  • Sleep's importance for memory and learning
  • The process of forming long-term memories during sleep
  • The role of 'sharp wave ripples' in transferring information

Based on these key points, we can conclude that the main idea is:

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "additional" + ], + "phrases": [ + "sleep must be important for learning and memory", + "The activity transfers new information", + "The experiment explains how we create long-term memories" + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-1", + "position": "replace", + "html": "how sleep plays a crucial role in forming long-term memories through a process called 'sharp wave ripples'." + } + ] + }, + { + "html": "

Rutgers University Research

Let's look for information about the Rutgers University team's discovery:

  • Key words: Rutgers University, discovered, brain activity
  • Relevant information: 'A research team at Rutgers University recently discovered a type of brain activity that happens during sleep.'
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "additional" + ], + "phrases": [ + "A research team at Rutgers University recently discovered a type of brain activity that happens during sleep" + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-2", + "position": "replace", + "html": "a type of brain activity that occurs during sleep" + } + ] + }, + { + "html": "

Sharp Wave Ripples

To understand the transfer of information, let's focus on 'sharp wave ripples':

  • Key words: sharp wave ripples, transfer, hippocampus, neocortex
  • Relevant information: 'The activity transfers new information from the hippocampus to the neocortex. The neocortex stores long-term memories.'
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "additional" + ], + "phrases": [ + "The activity transfers new information from the hippocampus to the neocortex. The neocortex stores long-term memories." + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-3", + "position": "replace", + "html": "hippocampus" + }, + { + "target": "question", + "targetId": "blank-4", + "position": "replace", + "html": "neocortex" + } + ] + }, + { + "html": "

Rats' Memory Experiment

To understand why some rats had trouble remembering the route:

  • Key words: rats, trouble remembering, drug, wave activity
  • Relevant information: 'They gave one group of sleeping rats a drug. The drug stopped the rats' wave activity. As a result, this group of rats had trouble remembering the route.'
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "additional" + ], + "phrases": [ + "They gave one group of sleeping rats a drug. The drug stopped the rats' wave activity. As a result, this group of rats had trouble remembering the route." + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-5", + "position": "replace", + "html": "a drug stopped their sharp wave ripple activity during sleep" + } + ] + }, + { + "html": "

The Value of Using Key Words

Using key words, especially nouns and noun phrases, to scan for relevant information is an effective strategy because:

  • It helps you quickly locate specific information in a text
  • It allows you to focus on the most important concepts
  • It saves time when answering questions or completing tasks
  • It improves your comprehension by guiding your attention to crucial details
", + "wordDelay": 200, + "holdDelay": 10000, + "highlight": [], + "insertHTML": [] + }, + { + "html": "

By using this technique, we were able to efficiently extract the necessary information from the 'Sleep and Memory' passage to answer the questions. This skill is particularly useful when dealing with longer texts or when you need to quickly find specific information.

", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [], + "insertHTML": [] + } + ] + } + } + ] + }, + { + "page": 95, + "tips": [ + { + "category": "Language for Writing", + "embedding": "Using By + Gerund\n\nUse by with a gerund to say how to do something. By + gerund expresses how to reach a result.", + "text": "Using By + Gerund\n\nUse by with a gerund to say how to do something. By + gerund forms can appear at the beginning or at the end of a sentence. Use a comma when they appear at the beginning of a sentence.\nYou can improve your memory by getting enough sleep.\nBy getting enough sleep, you can improve your memory.\n\nBy + gerund expresses how to reach a result:\nBy eating right (cause), you can improve your memory. (effect)", + "html": "

Using By + Gerund

Use by with a gerund to say how to do something. By + gerund forms can appear at the beginning or at the end of a sentence. Use a comma when they appear at the beginning of a sentence.

You can improve your memory by getting enough sleep.By getting enough sleep, you can improve your memory.

By + gerund expresses how to reach a result:

By eating right,cause
you can improve your memory.effect
", + "id": "9e02d489-e457-41b1-933b-3a6abc3f57b4", + "verified": true, + "standalone": false, + "exercise": { + "question": "

Use by + gerund to combine the sentence parts.

write new words on cards / a person can retain them better

give rats drugs / the scientists stopped their brain waves

you can improve your memory / do puzzles

", + "segments": [ + { + "html": "

Combining Sentences with By + Gerund

Let's practice using 'by + gerund' to combine sentence parts. This structure helps us express how to achieve a particular result.

", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [ + { + "targets": [ + "question" + ], + "phrases": [ + "Use by + gerund to combine the sentence parts." + ] + } + ], + "insertHTML": [] + }, + { + "html": "

1. Write new words on cards / a person can retain them better

To combine these parts, we'll use the gerund form of 'write' after 'by'. We can structure this sentence in two ways:

  • A person can retain new words better by writing them on cards.
  • By writing new words on cards, a person can retain them better.

Both forms are correct. The second form uses a comma after the 'by + gerund' phrase.

", + "wordDelay": 200, + "holdDelay": 10000, + "highlight": [], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-1", + "position": "replace", + "html": "A person can retain new words better by writing them on cards." + } + ] + }, + { + "html": "

2. Give rats drugs / the scientists stopped their brain waves

For this sentence, we'll use the gerund form of 'give' after 'by'. Again, we have two possible structures:

  • The scientists stopped the rats' brain waves by giving them drugs.
  • By giving rats drugs, the scientists stopped their brain waves.

Both are correct, with the second form using a comma after the 'by + gerund' phrase.

", + "wordDelay": 200, + "holdDelay": 10000, + "highlight": [], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-2", + "position": "replace", + "html": "The scientists stopped the rats' brain waves by giving them drugs." + } + ] + }, + { + "html": "

3. You can improve your memory / do puzzles

For this sentence, we'll use the gerund form of 'do' after 'by'. Here are our two options:

  • You can improve your memory by doing puzzles.
  • By doing puzzles, you can improve your memory.

Both forms are correct, with the second form using a comma after the 'by + gerund' phrase.

", + "wordDelay": 200, + "holdDelay": 10000, + "highlight": [], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-3", + "position": "replace", + "html": "You can improve your memory by doing puzzles." + } + ] + }, + { + "html": "

The Value of Using By + Gerund

Using 'by + gerund' is beneficial for several reasons:

  • It allows us to express how an action is performed or a result is achieved
  • It helps create more concise and varied sentences
  • It clearly shows the relationship between cause and effect
  • It provides flexibility in sentence structure, allowing emphasis on either the method or the result
", + "wordDelay": 200, + "holdDelay": 10000, + "highlight": [], + "insertHTML": [] + }, + { + "html": "

By mastering this structure, you can enhance your writing and speaking skills, making your English more natural and expressive. Remember, practice is key to becoming comfortable with using 'by + gerund' in various contexts.

", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [], + "insertHTML": [] + } + ] + } + } + ] + }, + { + "page": 96, + "tips": [ + { + "category": "Writing Skill", + "embedding": "Using an Outline\n\nUsing an outline helps you to organize your main idea, supporting ideas, and examples and/or details.", + "text": "Using an Outline\n\nUsing an outline helps you to organize your main idea, supporting ideas, and examples and/or details. The examples might be a list of reasons, or steps in a process. An outline is like a map because it gives you something to follow. For example, you can use an outline to develop your ideas in a descriptive paragraph.\nDon't write complete sentences in an outline, except for your topic sentence.", + "html": "

Using an Outline

Using an outline helps you to organize your main idea, supporting ideas, and examples and/or details. The examples might be a list of reasons, or steps in a process. An outline is like a map because it gives you something to follow. For example, you can use an outline to develop your ideas in a descriptive paragraph.

Don't write complete sentences in an outline, except for your topic sentence.

", + "id": "4834e060-6c94-4f81-b8db-15a2da92b831", + "verified": true, + "standalone": false, + "exercise": { + "question": "

Look at the outline below and read the paragraph that follows. Match sentences in the paragraph (a-i) to the parts of the outline. (Two sentences are extra.)

How to Memorize a Route

__ (topic sentence)

memorize as steps __ (supporting idea 1)

write names, directions __ (details)

repeat __ (details)

create mental picture __ (supporting idea 2)

study a map __ (details)

imagine following route __ (details)

", + "additional": "

When you have to memorize a route, you should use a technique that works well for you. One way is to memorize the directions as a set of steps. To do this, write the street names and directions in the correct order on a piece of paper. If you repeat the steps several times, you won't have to look at the list anymore. You can also memorize a route by creating a mental picture of it. That is, see the streets and the places on the streets in your mind. To do this, study the route as it appears on a map. Then imagine yourself following the route. See the buildings and other places along the route in your mind. There are other ways to learn routes; use the method that works best for you.

", + "segments": [ + { + "html": "

Completing the 'How to Memorize a Route' Outline

Let's go through the outline and match it with the appropriate sentences from the paragraph, focusing on how each part contributes to the overall structure of memorizing a route.

", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [], + "insertHTML": [] + }, + { + "html": "

Topic Sentence

The outline begins with a topic sentence that introduces the main idea. Sentence (a) serves this purpose perfectly, emphasizing the importance of choosing a suitable memorization technique.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-1", + "position": "replace", + "html": "a" + } + ] + }, + { + "html": "

Supporting Idea 1: Memorizing as Steps

The first method suggested is memorizing the route as a series of steps. Sentence (b) directly corresponds to this idea, introducing it as one approach to route memorization.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-2", + "position": "replace", + "html": "b" + } + ] + }, + { + "html": "

Details of Step Method

For the step method, two important details are provided. First, sentence (c) explains how to write down the route information. Then, sentence (d) emphasizes the importance of repetition in this method.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-3", + "position": "replace", + "html": "c" + }, + { + "target": "question", + "targetId": "blank-4", + "position": "replace", + "html": "d" + } + ] + }, + { + "html": "

Supporting Idea 2: Creating a Mental Picture

The second method introduced is creating a mental picture of the route. Sentence (e) clearly presents this alternative approach to route memorization.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-5", + "position": "replace", + "html": "e" + } + ] + }, + { + "html": "

Details of Mental Picture Method

For the mental picture method, two key steps are outlined. Sentence (g) suggests studying a map to visualize the route, while sentence (h) encourages imagining yourself following the route, focusing on landmarks and buildings.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-6", + "position": "replace", + "html": "g" + }, + { + "target": "question", + "targetId": "blank-7", + "position": "replace", + "html": "h" + } + ] + }, + { + "html": "

Completing the Outline

By matching these sentences to the outline, we've created a structured guide for memorizing routes. The outline now clearly presents two main methods - step-by-step memorization and mental visualization - along with specific techniques for each method.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [], + "insertHTML": [] + } + ] + } + } + ] + } + ] + }, + { + "unit": 6, + "title": "Dangerous Cures", + "pages": [ + { + "page": 104, + "tips": [ + { + "category": "Word Link", + "embedding": "dis = negative, not", + "text": "dis = negative, not: disease, disagree, disappear, discomfort, discontinue, discourage, disrespect", + "html": "

dis = negative, not: disease, disagree, disappear, discomfort, discontinue, discourage, disrespect

", + "id": "b5fbd194-6148-4e6c-81ea-5b7d59f58966", + "verified": true, + "standalone": true + }, + { + "category": "Strategy", + "embedding": "Look for clues in titles, captions, and opening sentences to get a sense of the general topic of a passage. This will help you predict the kind of information you are going to read about.", + "text": "Look for clues in titles, captions, and opening sentences to get a sense of the general topic of a passage. This will help you predict the kind of information you are going to read about.", + "html": "

Look for clues in titles, captions, and opening sentences to get a sense of the general topic of a passage. This will help you predict the kind of information you are going to read about.

", + "id": "10a5e2ed-d615-4e63-afe5-d0f17bff891e", + "verified": true, + "standalone": false, + "exercise": { + "question": "

Skim the reading passage quickly. What do you think the reading is mainly about?

  • a recent event
  • a person's job
  • an unusual place
  • a serious disease
  • an endangered animal
", + "additional": "

The Snake Chaser

As a boy, Zoltan Takacs caught snakes and kept them in his room. Now he hunts them for a living.1

Zoltan Takacs collects snake venom so that he can study it. He wants to find out if the venom can be used as medicine to cure people. Usually, he travels alone with only a backpack, a camera bag, and equipment for collecting the venom. He often flies small planes to reach faraway places, and has traveled to 134 countries. His trips are often dangerous; he has encountered pirates,2 wars, and angry elephants. He has also survived six venomous snake bites. Takacs's adventures are like action movies, but his goal is pure science: \"Animal venoms\", Takacs explains, \"are the source of over a dozen medications.\"3

Why do toxins make good medications?

Many drugs produce side effects. These side effects happen because the drugs affect more than one target. For example, most cancer drugs can't tell the difference between cancer cells and healthy cells. So the drugs kill cancer cells, but they also kill other healthy cells in the body. Toxins are a good model for medications because they can hit a single target. But finding the right toxin to fight a specific disease can take years of work. That's why Takacs and his colleagues have developed a new technology. It allows the creation of \"toxin libraries.\"

How does the technology work?

The new toxin libraries help researchers identify which toxin might cure a specific disease. With the new technology, testing can happen much more quickly and efficiently than before. A researcher can test many different toxins at once to see if any of them have an effect on a specific disease. Takacs thinks the technology will help researchers develop new toxin-based drugs for a lot of different diseases. But Takacs is also worried that a lot of possible toxin-based drugs are being lost.

Why are we losing potential drugs?

According to Takacs, \"Scientists have studied fewer than a thousand animal toxins... But some 20 million more exist.\" Some of these animal toxins come from endangered species. So every time an animal becomes extinct, it's possible that a new drug is lost, too. For example, the venom of an endangered snake could potentially lead to a medicine that saves human lives.

Takacs explains, \"Once we've allowed something to become extinct … there's no way back … For me, losing biodiversity means losing beauty, … knowledge, and resources, including possibilities for treating diseases.\" Losing species, he says, is \"like peeling4 out pages from a book we've never read, then burning them.\"

Why do snakes not poison themselves?

A snake's venom aims only at a specific part of the body. However, if contact with the target is blocked, the toxin has no effect. For example, when researchers inject5 a cobra with its own venom, nothing happens. This is because cobras have a molecule6 that blocks the toxin from making contact with its target.

1. If you do something for a living, you do it as your main job.2. Pirates are people who attack ships to rob them.3. Medications are medicines that are used to treat and cure illnesses.4. If you peel something, you remove layers from it one at a time.5. If you inject something, such as medicine, you put it into a person or animal using a needle.6. A molecule is the smallest amount of a chemical that can exist by itself.
", + "segments": [ + { + "html": "

Skimming the Passage: Identifying the Main Topic

Let's approach this exercise by quickly examining key elements of the passage to determine its main focus.

", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [ + { + "targets": [ + "question" + ], + "phrases": [ + "Skim the reading passage quickly" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

1. Examining the Title

The title 'The Snake Chaser' immediately suggests that the passage is about a person with an unusual occupation related to snakes.

", + "wordDelay": 200, + "holdDelay": 7000, + "highlight": [ + { + "targets": [ + "additional" + ], + "phrases": [ + "The Snake Chaser" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

2. Opening Sentences

The first paragraph provides more context:

  • 'As a boy, Zoltan Takacs caught snakes and kept them in his room.'
  • 'Now he hunts them for a living.'

These sentences confirm that the passage is about a person's unusual job involving snakes.

", + "wordDelay": 200, + "holdDelay": 10000, + "highlight": [ + { + "targets": [ + "additional" + ], + "phrases": [ + "As a boy, Zoltan Takacs caught snakes and kept them in his room. Now he hunts them for a living." + ] + } + ], + "insertHTML": [] + }, + { + "html": "

3. Subheadings

Skimming through the subheadings, we see:

  • 'Why do toxins make good medications?'
  • 'How does the technology work?'
  • 'Why are we losing potential drugs?'
  • 'Why do snakes not poison themselves?'

These subheadings indicate that the passage explores various aspects of snake venom and its potential medical applications, all related to the snake chaser's work.

", + "wordDelay": 200, + "holdDelay": 12000, + "highlight": [ + { + "targets": [ + "additional" + ], + "phrases": [ + "Why do toxins make good medications?", + "How does the technology work?", + "Why are we losing potential drugs?", + "Why do snakes not poison themselves?" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

Conclusion

Based on our quick skim of the title, opening sentences, and subheadings, we can conclude that the passage is mainly about:

b. a person's job

Specifically, it's about Zoltan Takacs' unique profession as a 'snake chaser' and how his work with snake venom relates to medical research.

", + "wordDelay": 200, + "holdDelay": 10000, + "highlight": [ + { + "targets": [ + "question" + ], + "phrases": [ + "a person's job" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

The Value of Skimming Techniques

This exercise demonstrates the importance of efficient reading strategies:

  • Titles often provide a clear indication of the main topic
  • Opening sentences frequently introduce the central theme or subject
  • Subheadings offer a quick overview of the key points covered

By focusing on these elements, we can quickly grasp the main idea of a passage without reading every word, saving time and improving comprehension.

", + "wordDelay": 200, + "holdDelay": 12000, + "highlight": [], + "insertHTML": [] + } + ] + } + } + ] + }, + { + "page": 107, + "tips": [ + { + "category": "CT Focus", + "embedding": "Figurative language allows a writer to compare one thing to another. When you read, it's important to understand how the two things being compared are similar.", + "text": "Figurative language allows a writer to compare one thing to another. When you read, it's important to understand how the two things being compared are similar.", + "html": "

Figurative language allows a writer to compare one thing to another. When you read, it's important to understand how the two things being compared are similar.

", + "id": "6f742334-21ee-40e2-a804-891f85e20881", + "verified": true, + "standalone": false, + "exercise": { + "question": "

What is the writer's or speaker's meaning in each sentence? Choose a or b.

  1. Takacs's adventures are like action movies.

    • Takacs's life is similar to the life of a famous movie actor.
    • Takacs's job is sometimes like the events in a movie.

  2. Takacs and his colleagues have developed a new technology. It allows the creation of \"toxin libraries\".

    • In a toxin library, toxins are arranged in order on shelves, like books in a library.
    • In a toxin library, a lot of information is stored in a way that's easy to search.

  3. \"Biodiversity loss is like peeling out pages from a book we've never read, then burning them.\"

    • Biodiversity loss can be very dangerous, as it often results from burning large areas of forest.
    • Biodiversity loss is a problem because we lose species before we understand them.
", + "segments": [ + { + "html": "

Understanding Figurative Language in Context

Let's analyze each sentence to understand the writer's intended meaning through the use of figurative language.

", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [ + { + "targets": [ + "question" + ], + "phrases": [ + "What is the writer's or speaker's meaning in each sentence?" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

1. \"Takacs's adventures are like action movies.\"

This comparison suggests that Takacs's experiences in his job share similarities with exciting events typically seen in action movies.Let's consider the options:

  • Option a suggests Takacs lives like a movie actor, which isn't the point of the comparison.
  • Option b correctly interprets that Takacs's job involves events similar to those in action movies - likely dangerous, exciting, or unusual situations.

The correct answer is b. Takacs's job is sometimes like the events in a movie.

", + "wordDelay": 200, + "holdDelay": 12000, + "highlight": [ + { + "targets": [ + "question" + ], + "phrases": [ + "Takacs's adventures are like action movies.", + "Takacs's job is sometimes like the events in a movie." + ] + } + ], + "insertHTML": [] + }, + { + "html": "

2. \"Toxin libraries\"

This phrase uses the concept of a library as a metaphor. Let's examine the options:

  • Option a takes the metaphor too literally, suggesting physical arrangement like books.
  • Option b captures the essence of a library - organized information that's easy to access and search.

The correct answer is b. In a toxin library, a lot of information is stored in a way that's easy to search.

", + "wordDelay": 200, + "holdDelay": 12000, + "highlight": [ + { + "targets": [ + "question" + ], + "phrases": [ + "It allows the creation of \"toxin libraries\".", + "In a toxin library, a lot of information is stored in a way that's easy to search." + ] + } + ], + "insertHTML": [] + }, + { + "html": "

3. \"Biodiversity loss is like peeling out pages from a book we've never read, then burning them.\"

This vivid metaphor compares biodiversity loss to destroying unread book pages. Let's analyze the options:

  • Option a misinterprets the metaphor, focusing on literal burning of forests.
  • Option b correctly captures the metaphor's meaning - losing species before we can study and understand them.

The correct answer is b. Biodiversity loss is a problem because we lose species before we understand them.

", + "wordDelay": 200, + "holdDelay": 12000, + "highlight": [ + { + "targets": [ + "question" + ], + "phrases": [ + "Biodiversity loss is like peeling out pages from a book we've never read, then burning them.", + "Biodiversity loss is a problem because we lose species before we understand them." + ] + } + ], + "insertHTML": [] + }, + { + "html": "

The Power of Figurative Language

These examples demonstrate how figurative language enhances writing by:

  • Creating vivid imagery that helps readers visualize complex ideas
  • Drawing parallels between familiar concepts and new information
  • Conveying emotions and emphasizing the importance of certain ideas

By recognizing and interpreting figurative language, readers can gain a deeper understanding of the writer's message and engage more fully with the text.

", + "wordDelay": 200, + "holdDelay": 12000, + "highlight": [ + { + "targets": [ + "tip" + ], + "phrases": [ + "Figurative language allows a writer to compare one thing to another.", + "understand how the two things being compared are similar" + ] + } + ], + "insertHTML": [] + } + ] + } + } + ] + }, + { + "page": 108, + "tips": [ + { + "category": "Reading Skill", + "embedding": "Identifying Pros and Cons\n\nPros are advantages (positive effects) of something, and cons are disadvantages (negative effects) of something.", + "text": "Identifying Pros and Cons\n\nPros are advantages (positive effects) of something, and cons are disadvantages (negative effects) of something. Writers often provide the pros and cons of an issue in order to make a more balanced argument. Identifying the pros and cons of an issue will help you evaluate the strength of a writer's arguments. It will also help you decide your own opinion on the issue.\n\nLook at the facts below. Is each fact a pro or a con for studying snake venom?\nIt can be very dangerous.\nA snake's venom might be used to cure a serious disease.\nSnake venom is a good model for medications.\n\nThe first fact is a con (a disadvantage of studying snake venom), and the other two are pros.", + "html": "

Identifying Pros and Cons

Pros are advantages (positive effects) of something, and cons are disadvantages (negative effects) of something. Writers often provide the pros and cons of an issue in order to make a more balanced argument. Identifying the pros and cons of an issue will help you evaluate the strength of a writer's arguments. It will also help you decide your own opinion on the issue.

Look at the facts below. Is each fact a pro or a con for studying snake venom?

It can be very dangerous.
A snake's venom might be used to cure a serious disease.
Snake venom is a good model for medications.

The first fact is a con (a disadvantage of studying snake venom), and the other two are pros.

", + "id": "a191ea0f-d097-4510-8c08-345246941aec", + "verified": true, + "standalone": false, + "exercise": { + "question": "

Read the passage about the study of viruses. Then take notes in the table.

Pros of Studying Extinct VirusesCons of Studying Extinct Viruses
", + "additional": "

Should Dead Viruses Be Given New Life?

Scientists called virologists study viruses1 to discover how they work and how to stop people from getting them. Of course, working with viruses is very dangerous. Some viruses can infect large numbers of people very quickly. Other viruses, such as HIV, still have no widely available vaccine2 or cure. In the past few years, some virologists have begun studying extinct viruses — ones that died out long ago. They discovered that all humans have pieces of very old viruses in their bodies. Some of these viruses are hundreds of thousands of years old. The virologists were able to rebuild some of the viruses and bring them back to life.

Although some people think that rebuilding viruses is potentially very dangerous, the virologists argue that studying these extinct viruses can teach us more about how viruses cause disease. They also believe that these viruses can tell us a lot about how our human species developed in the past. In addition, the scientists can develop vaccines for these diseases in case they reappear one day and begin infecting people again.

1. A virus is a germ that can cause disease, such as smallpox, polio, and HIV.2. A vaccine is a substance that doctors put in people's bodies so that they won't get particular diseases.
", + "segments": [ + { + "html": "

Analyzing Pros and Cons of Studying Extinct Viruses

Let's examine the passage to identify the advantages (pros) and disadvantages (cons) of studying extinct viruses.

", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [ + { + "targets": [ + "question" + ], + "phrases": [ + "Pros of Studying Extinct Viruses", + "Cons of Studying Extinct Viruses" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

Pros of Studying Extinct Viruses

Let's identify the advantages mentioned in the passage:

  • Can teach us more about how viruses cause disease
  • Can provide information about human species development
  • Allows for development of vaccines against potential reappearance
", + "wordDelay": 200, + "holdDelay": 10000, + "highlight": [ + { + "targets": [ + "additional" + ], + "phrases": [ + "studying these extinct viruses can teach us more about how viruses cause disease", + "these viruses can tell us a lot about how our human species developed in the past", + "scientists can develop vaccines for these diseases in case they reappear one day" + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-1", + "position": "replace", + "html": "
  • Learn about disease mechanisms
  • Understand human evolution
  • Develop preventive vaccines
" + } + ] + }, + { + "html": "

Cons of Studying Extinct Viruses

Now, let's identify the disadvantages or potential risks:

  • Potentially very dangerous
  • Risk of viruses infecting large numbers of people quickly
", + "wordDelay": 200, + "holdDelay": 10000, + "highlight": [ + { + "targets": [ + "additional" + ], + "phrases": [ + "rebuilding viruses is potentially very dangerous", + "Some viruses can infect large numbers of people very quickly" + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-2", + "position": "replace", + "html": "
  • Potentially dangerous
  • Risk of rapid infection spread
" + } + ] + }, + { + "html": "

Balancing Pros and Cons

By identifying both pros and cons, we can see that:

  • The study of extinct viruses offers significant potential benefits for scientific knowledge and public health
  • However, it also carries serious risks that need to be carefully considered
  • This balanced view helps us understand the complexity of the issue and why it might be controversial
", + "wordDelay": 200, + "holdDelay": 10000, + "highlight": [], + "insertHTML": [] + }, + { + "html": "

The Importance of Identifying Pros and Cons

Recognizing pros and cons in a text is valuable because it:

  • Helps us understand complex issues from multiple perspectives
  • Allows us to evaluate the strength of arguments presented
  • Encourages critical thinking about the topic
  • Aids in forming our own informed opinions on the matter
", + "wordDelay": 200, + "holdDelay": 10000, + "highlight": [], + "insertHTML": [] + } + ] + } + } + ] + }, + { + "page": 110, + "tips": [ + { + "category": "Word Partners", + "embedding": "Use relief.", + "text": "Use relief with: (n.) pain relief, sense of relief; (v.) express relief, feel relief, bring relief, get relief (from), provide relief (for).", + "html": "

Use relief with: (n.) pain relief, sense of relief; (v.) express relief, feel relief, bring relief, get relief (from), provide relief (for).

", + "id": "2fb41a74-409f-47c1-85db-b1cbd0bbf7b5", + "verified": true, + "standalone": true + } + ] + }, + { + "page": 114, + "tips": [ + { + "category": "CT Focus", + "embedding": "Writers often use as if when they make a figurative comparison.", + "text": "Writers often use as if when they make a figurative comparison. For example: He acted as if he was a king.", + "html": "

Writers often use as if when they make a figurative comparison. For example: He acted as if he was a king.

", + "id": "7ca405a9-3d39-4e4d-9021-66f23bf78b6a", + "verified": true, + "standalone": false, + "exercise": { + "question": "

What does the word it refer to in each sentence (1-4) from the reading passage? Use information in the reading to match items a-f with the sentences. Two items are extra.

  • a headache
  • Botox
  • botulinum
  • Fleisher's career
  • Fleisher's hand
  • the feeling
  • 'It was as if my hand had been taken over by aliens.'
  • 'It was not under my control.'
  • 'One gram of it could kill 20 million people.'
  • 'It's used to make skin look younger...'
", + "additional": "

Poison and the Piano Player

In the 1950s and '60s, Leon Fleisher was one of the world's greatest piano players. But one day in 1964, his career suddenly ended. While he was practicing, he started to lose control of the fourth and fifth fingers on his right hand. \"Wow\", he thought, \"I'd better practice harder\". But his problem got worse.

Fleisher saw several different doctors. He had injections and medications and other treatments, but nothing worked. \"It was as if my hand had been taken over by aliens\", he says. \"It was not under my control.\" His career was finished.

Finally, after more than 30 years, Fleisher found out what was wrong. He had focal dystonia, a disease that makes muscles move in strange, and sometimes painful, ways. At last, relief seemed possible. He went to the U.S. National Institutes of Health, where researchers were testing botulinum toxin as a cure for the disease.

Botulinum toxin is one of the most poisonous toxins in the world: One gram of it could kill 20 million people. But scientists have used it to create the drug Botox. This drug is now safely used in small doses to treat many different problems. It's used to make skin look younger, to stop headaches, and even to cure some serious diseases.

The botulinum toxin cured Fleisher, and he got his career back. He began performing again, and he made his first recording in 40 years. Recently, he received a Kennedy Center Award, which is given for important contributions to the arts in America.

", + "segments": [ + { + "html": "

Analyzing the Exercise

This exercise requires us to identify what the word 'it' refers to in four sentences from the reading passage. We need to carefully consider the context and match each sentence with the correct option (a-f). Let's examine each sentence:

", + "wordDelay": 200, + "holdDelay": 6000, + "highlight": [ + { + "targets": [ + "question" + ], + "phrases": [ + "What does the word it refer to in each sentence (1-4) from the reading passage?" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

Sentence 1

'It was as if my hand had been taken over by aliens.'

  • Context: Fleisher is describing the loss of control in his hand.
  • Analysis: 'It' refers to the feeling or situation with his hand.
  • Correct answer: f. the feeling
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "question", + "additional" + ], + "phrases": [ + "It was as if my hand had been taken over by aliens" + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-1", + "position": "replace", + "html": "f" + } + ] + }, + { + "html": "

Sentence 2

'It was not under my control.'

  • Context: This sentence directly follows the previous one, still describing Fleisher's hand.
  • Analysis: Here, 'it' clearly refers to Fleisher's hand itself.
  • Correct answer: e. Fleisher's hand
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "question", + "additional" + ], + "phrases": [ + "It was not under my control" + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-2", + "position": "replace", + "html": "e" + } + ] + }, + { + "html": "

Sentence 3

'One gram of it could kill 20 million people.'

  • Context: This sentence is discussing the potency of botulinum toxin.
  • Analysis: 'It' refers to the botulinum toxin mentioned in the previous sentence.
  • Correct answer: c. botulinum
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "question", + "additional" + ], + "phrases": [ + "One gram of it could kill 20 million people" + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-3", + "position": "replace", + "html": "c" + } + ] + }, + { + "html": "

Sentence 4

'It's used to make skin look younger...'

  • Context: This sentence follows the introduction of Botox, the drug created from botulinum toxin.
  • Analysis: 'It' in this case refers to Botox, not the toxin itself.
  • Correct answer: b. Botox
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "question", + "additional" + ], + "phrases": [ + "It's used to make skin look younger" + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-4", + "position": "replace", + "html": "b" + } + ] + }, + { + "html": "

Understanding the Tip

The tip about using 'as if' in figurative comparisons is particularly relevant to the first sentence:

  • 'It was as if my hand had been taken over by aliens' uses 'as if' to create a vivid comparison.
  • This figurative language helps readers understand the strange feeling Fleisher experienced.
  • The use of 'as if' indicates that 'it' refers to the feeling or situation, not the hand itself.
  • This understanding helps us differentiate between sentences 1 and 2, where 'it' refers to different things despite being about the same topic.
", + "wordDelay": 200, + "holdDelay": 10000, + "highlight": [], + "insertHTML": [] + } + ] + } + } + ] + }, + { + "page": 115, + "tips": [ + { + "category": "Language for Writing", + "embedding": "Making Concessions\n\nMaking a concession is saying that one idea is true, but another idea is stronger or more important, according to the writer. In other words, it is more persuasive. Use although, though, and even though to make concessions.", + "text": "Making Concessions\n\nMaking a concession is saying that one idea is true, but another idea is stronger or more important, according to the writer. In other words, it is more persuasive. Use although, though, and even though to make concessions:\nAlthough botulinum toxin can be deadly, it can also cure several serious diseases.\nEven though botulinum toxin can cure several diseases, it can be deadly.\n\nIn each sentence, the idea in the second clause is emphasized - the writer feels it is stronger and more important.\n\nIn the first sentence, the writer concedes that botulinum toxin is dangerous. However, the writer believes its ability to cure diseases is more important. (In other words, scientists should continue to work with it). In the second sentence, the writer concedes that botulinum toxin can cure diseases. However, the writer believes that the fact that it is dangerous is more important. (Scientists should stop working with it)", + "html": "

Making Concessions

Making a concession is saying that one idea is true, but another idea is stronger or more important, according to the writer. In other words, it is more persuasive. Use although, though, and even though to make concessions:

Although botulinum toxin can be deadly, it can also cure several serious diseases.
Even though botulinum toxin can cure several diseases, it can be deadly.

In each sentence, the idea in the second clause is emphasized - the writer feels it is stronger and more important.

In the first sentence, the writer concedes that botulinum toxin is dangerous. However, the writer believes its ability to cure diseases is more important. (In other words, scientists should continue to work with it). In the second sentence, the writer concedes that botulinum toxin can cure diseases. However, the writer believes that the fact that it is dangerous is more important. (Scientists should stop working with it)

", + "id": "1f1b891c-242d-4bba-9354-af222640b54a", + "verified": true, + "standalone": false, + "exercise": { + "question": "

Use although, even though, and though to connect the ideas below (1-3).

  1. more important: Arsenic is still used to treat leukemia.
    less important: Just a small amount of arsenic can be deadly.
  2. less important: Snake venom is dangerous to humans.
    more important: Snake venom is used in a lot of important medications.
  3. more important: Studying extinct viruses might bring back deadly diseases.
    less important: Studying extinct viruses can tell us about the human species.
", + "segments": [ + { + "html": "

Let's approach this exercise step by step:

We need to create sentences using 'although', 'even though', or 'though' to connect the given ideas. Remember, the structure we're aiming for is:

Although/Even though/Though [less important idea], [more important idea].

", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [ + { + "targets": [ + "question" + ], + "phrases": [ + "Use although, even though, and though to connect the ideas below" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

1. Arsenic and Leukemia Treatment

  • More important: Arsenic is still used to treat leukemia.
  • Less important: Just a small amount of arsenic can be deadly.

Connected sentence:

Although just a small amount of arsenic can be deadly, it is still used to treat leukemia.
", + "wordDelay": 200, + "holdDelay": 7000, + "highlight": [ + { + "targets": [ + "question" + ], + "phrases": [ + "more important: Arsenic is still used to treat leukemia.", + "less important: Just a small amount of arsenic can be deadly." + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-1", + "position": "replace", + "html": "Although just a small amount of arsenic can be deadly, it is still used to treat leukemia." + } + ] + }, + { + "html": "

2. Snake Venom in Medicine

  • Less important: Snake venom is dangerous to humans.
  • More important: Snake venom is used in a lot of important medications.

Connected sentence:

Even though snake venom is dangerous to humans, it is used in a lot of important medications.
", + "wordDelay": 200, + "holdDelay": 7000, + "highlight": [ + { + "targets": [ + "question" + ], + "phrases": [ + "less important: Snake venom is dangerous to humans.", + "more important: Snake venom is used in a lot of important medications." + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-2", + "position": "replace", + "html": "Even though snake venom is dangerous to humans, it is used in a lot of important medications." + } + ] + }, + { + "html": "

3. Studying Extinct Viruses

  • More important: Studying extinct viruses might bring back deadly diseases.
  • Less important: Studying extinct viruses can tell us about the human species.

Connected sentence:

Though studying extinct viruses can tell us about the human species, it might bring back deadly diseases.
", + "wordDelay": 200, + "holdDelay": 7000, + "highlight": [ + { + "targets": [ + "question" + ], + "phrases": [ + "more important: Studying extinct viruses might bring back deadly diseases.", + "less important: Studying extinct viruses can tell us about the human species." + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-3", + "position": "replace", + "html": "Though studying extinct viruses can tell us about the human species, it might bring back deadly diseases." + } + ] + }, + { + "html": "

Reflecting on the Exercise

In this exercise, we practiced creating concessions using 'although', 'even though', and 'though'. These connectors help us acknowledge one point while emphasizing another that we consider more significant.

", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [], + "insertHTML": [] + }, + { + "html": "

The Importance of Making Concessions

Making concessions in writing is a valuable skill because it:

  • Shows you've considered multiple perspectives
  • Strengthens your argument by addressing potential counterpoints
  • Demonstrates a nuanced understanding of complex issues
  • Makes your writing more persuasive and balanced
", + "wordDelay": 200, + "holdDelay": 7000, + "highlight": [ + { + "targets": [ + "additional" + ], + "phrases": [ + "Making a concession is saying that one idea is true, but another idea is stronger or more important" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

The tip provided at the beginning of this exercise is particularly beneficial because it:

  • Clarifies the purpose of concessions in writing
  • Provides clear examples of how to structure concessions
  • Explains how the order of clauses affects the emphasis of ideas
  • Helps writers create more sophisticated and persuasive arguments
", + "wordDelay": 200, + "holdDelay": 7000, + "highlight": [ + { + "targets": [ + "additional" + ], + "phrases": [ + "Use although, though, and even though to make concessions" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

By practicing this technique, you'll be able to present more balanced and thoughtful arguments in your writing, acknowledging different viewpoints while still emphasizing your main points effectively.

", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [], + "insertHTML": [] + } + ] + } + } + ] + }, + { + "page": 116, + "tips": [ + { + "category": "Writing Skill", + "embedding": "Writing a Persuasive Paragraph\n\nIn a persuasive paragraph, you try to convince the reader that something is true. First, you state the issue. Then you state your argument. Finally, you explain the reasons why you think your argument is valid or true.", + "text": "Writing a Persuasive Paragraph\n\nIn a persuasive paragraph, you try to convince the reader that something is true. First, you state the issue. Then you state your argument. Finally, you explain the reasons why you think your argument is valid or true.\n\nMaking concessions in a persuasive paragraph can help strengthen your argument. It shows the reader that you have thought about the different arguments, but you believe that your argument is the strongest and most important.", + "html": "

Writing a Persuasive Paragraph

In a persuasive paragraph, you try to convince the reader that something is true. First, you state the issue. Then you state your argument. Finally, you explain the reasons why you think your argument is valid or true.

Making concessions in a persuasive paragraph can help strengthen your argument. It shows the reader that you have thought about the different arguments, but you believe that your argument is the strongest and most important.

", + "id": "601f2918-dc72-412a-81a2-7167a1e66831", + "verified": true, + "standalone": false, + "exercise": { + "question": "

Read the paragraph about animal testing. Identify the two sentences that make a concession.

Many cosmetic and drug companies test their products on animals to make sure that they are safe. However, this kind of testing is cruel and unnecessary. Although people who support animal testing say that animals are not harmed during tests, animals usually have to live in small cages in laboratories. In addition, animals are often badly injured during testing, and some are even killed. Even though drug companies need to make their products safe for people, their products don't always have the same effect on animals and humans. So it's possible that these tests don't show how products might affect humans. In fact, according to the Food and Drug Administration, over 90 percent of drugs that are used in testing are safe for animals, but are not safe for humans. Since animal testing harms animals and may not help humans, researchers should stop testing products on animals.

", + "segments": [ + { + "html": "

Step 1: Understand the task

We need to identify two sentences in the given paragraph that make a concession. A concession acknowledges an opposing viewpoint before presenting a counter-argument.

", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [ + { + "targets": [ + "segment" + ], + "phrases": [ + "A concession acknowledges an opposing viewpoint" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

Step 2: Analyze the paragraph

Let's break down the paragraph and look for sentences that acknowledge the opposing view (pro-animal testing) before presenting the main argument (against animal testing).

", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [ + { + "targets": [ + "segment" + ], + "phrases": [ + "acknowledge the opposing view", + "before presenting the main argument" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

Step 3: Identify the concessions

After careful analysis, we can identify two sentences that make concessions:

  1. \"Although people who support animal testing say that animals are not harmed during tests, animals usually have to live in small cages in laboratories.\"
  2. \"Even though drug companies need to make their products safe for people, their products don't always have the same effect on animals and humans.\"
", + "wordDelay": 200, + "holdDelay": 7000, + "highlight": [ + { + "targets": [ + "segment", + "question" + ], + "phrases": [ + "Although people who support animal testing say that animals are not harmed during tests", + "Even though drug companies need to make their products safe for people" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

Step 4: Explain the concessions

Both sentences start by acknowledging a point made by those who support animal testing, but then counter with an argument against it:

  • The first concession acknowledges that supporters claim animals aren't harmed, but then argues that they still suffer poor living conditions.
  • The second concession recognizes the need for safety testing, but then points out that animal tests may not accurately predict effects on humans.
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "segment" + ], + "phrases": [ + "acknowledging a point made by those who support animal testing", + "then counter with an argument against it" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

Step 5: Understand the importance of concessions

Using concessions in a persuasive paragraph is a powerful technique. It shows that the writer has considered multiple perspectives and strengthens their argument by addressing potential counterpoints.

", + "wordDelay": 200, + "holdDelay": 6000, + "highlight": [ + { + "targets": [ + "segment", + "additional" + ], + "phrases": [ + "Making concessions in a persuasive paragraph can help strengthen your argument" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

Conclusion

By identifying and understanding these concessions, we can see how the author builds a stronger argument against animal testing. This technique demonstrates a balanced approach to the topic while still maintaining a clear stance.

", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [ + { + "targets": [ + "segment", + "additional" + ], + "phrases": [ + "shows the reader that you have thought about the different arguments" + ] + } + ], + "insertHTML": [] + } + ] + } + } + ] + } + ] + }, + { + "unit": 7, + "title": "Nature's Fury", + "pages": [ + { + "page": 124, + "tips": [ + { + "category": "Word Partners", + "embedding": "Use occur.", + "text": "Use occur with (n.): accidents occur, changes occur, events occur; (adv.): frequently occur, naturally occur, normally occur, often occur.", + "html": "

Use occur with (n.): accidents occur, changes occur, events occur; (adv.): frequently occur, naturally occur, normally occur, often occur.

", + "id": "60239035-4aca-4092-bfc4-340a1d851604", + "verified": true, + "standalone": true + } + ] + }, + { + "page": 127, + "tips": [ + { + "category": "CT Focus", + "embedding": "Writers often quote or paraphrase (restate) the ideas of experts to support information in an article. They may introduce these sources with According to ... or [the expert] thinks/says ...", + "text": "Writers often quote or paraphrase (restate) the ideas of experts to support information in an article. They may introduce these sources with According to ... or [the expert] thinks/says ...", + "html": "

Writers often quote or paraphrase (restate) the ideas of experts to support information in an article. They may introduce these sources with According to ... or [the expert] thinks/says ...

", + "id": "56f09660-c0e9-4c2d-a90c-8f9239b98aaf", + "verified": true, + "standalone": false, + "exercise": { + "question": "

Find the following quote and paraphrase in \"When Tornadoes Strike\". Note the paragraphs where you find each one. Then answer the questions.

Quote: \"There were no limitations\", said tornado expert Tim Samaras. \"It went absolutely crazy. It had nothing but hundreds of miles to grow and develop\".

Paragraph:

Paraphrase: Other people, such as Russell Schneider, director of the U.S. Storm Prediction Center, think it's because of a weather pattern called \"La Niña\".

Paragraph:

  1. Why did the writer quote Samaras? (What idea does it support?)
    Why did the writer paraphrase Schneider? (What idea does it support?)
  2. How does the writer describe Samaras and Schneider? For which source do you have more specific information?
", + "additional": "

When Tornadoes Strike

The tornado that hit Joplin, Missouri, on April 26 2011, threw cars into the air as if they were toys. It pulled buildings apart and even broke up pavement1 — something that only the strongest twisters can do. The Joplin tornado was strong, but it was just one of an amazing number of powerful twisters to strike the United States recently.

A huge number of intense tornadoes hit several regions of the southern United States in 2011. In fact, more violent tornadoes struck the United States in April 2011 than in any other month on record.2 In just two days, from April 26 to April 27, there were more than 100 separate twisters. The tornadoes moved through six states and killed at least 283 people.

The \"Perfect Storm\"

From April 26 to April 27, \"perfect storm\" conditions gave birth to a monster twister in Tuscaloosa, Alabama. \"Perfect storm\" conditions occur when warm, wet air rises and collides with cold, dry air at high altitudes.3

The Tuscaloosa tornado was 1.0 mile (1.6 kilometers) wide, with winds over 260 mph (400 kph). It stayed on the ground for an unusually long time. Tornadoes usually touch the ground for only a few miles before they die. But experts think the Tuscaloosa tornado stayed on the ground and traveled 300 miles (480 kilometers) across a region extending from Alabama to Georgia. \"There were no limitations,\" said tornado expert Tim Samaras. \"It went absolutely crazy. It had nothing but hundreds of miles to grow and develop.\"

Strong, But Not Surprising?

What caused the violent tornadoes in 2011? Experts disagree. Some think warmer-than-normal water temperatures in the Gulf of Mexico were the cause. Other people, such as Russell Schneider, director of the U.S. Storm Prediction Center, think it's because of a weather pattern called \"La Niña.\"4 La Niña can affect the climate in the United States. It makes air drier or wetter and causes temperatures to rise and fall. Some experts, such as Samaras, think we simply don't have enough data to decide.

Because their cause is unclear, scientists around the world continue to study tornadoes. One day their research will help us to better understand the conditions that cause tornadoes to form. Eventually, we may even be able to predict how strong they will be and where they will hit.

1The pavement is the hard surface of a road.2If something is on record, it is written down and remembered from the past.3If something is at a particular altitude, it is at that height above sea level.4La Niña (Spanish for the girl) is a weather pattern that occurs when cold water in the Pacific comes to the surface of the ocean off the coast of South America.
", + "segments": [ + { + "html": "

Step 1: Locating the Quote and Paraphrase

Let's start by finding the requested quote and paraphrase in the 'When Tornadoes Strike' article:

  • The quote by Tim Samaras is in paragraph 4, under 'The \"Perfect Storm\"' section.
  • The paraphrase about Russell Schneider is in paragraph 5, under 'Strong, But Not Surprising?' section.
", + "wordDelay": 200, + "holdDelay": 6000, + "highlight": [ + { + "targets": [ + "segment", + "additional" + ], + "phrases": [ + "There were no limitations,\" said tornado expert Tim Samaras. \"It went absolutely crazy. It had nothing but hundreds of miles to grow and develop.", + "Other people, such as Russell Schneider, director of the U.S. Storm Prediction Center, think it's because of a weather pattern called \"La Niña.\"" + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-1", + "position": "replace", + "html": "4" + }, + { + "target": "question", + "targetId": "blank-2", + "position": "replace", + "html": "5" + } + ] + }, + { + "html": "

Step 2: Analyzing the Quote (Tim Samaras)

The writer quoted Samaras to:

  • Support the idea that the Tuscaloosa tornado was exceptionally large and long-lasting
  • Provide a vivid description of the tornado's unusual behavior
  • Add credibility to the information by including an expert's perspective
", + "wordDelay": 200, + "holdDelay": 7000, + "highlight": [ + { + "targets": [ + "segment", + "additional" + ], + "phrases": [ + "It went absolutely crazy. It had nothing but hundreds of miles to grow and develop." + ] + } + ], + "insertHTML": [] + }, + { + "html": "

Step 3: Analyzing the Paraphrase (Russell Schneider)

The writer paraphrased Schneider to:

  • Present a possible explanation for the violent tornadoes in 2011
  • Introduce the concept of La Niña and its potential impact on tornado formation
  • Show that experts have different theories about the cause of these tornadoes
", + "wordDelay": 200, + "holdDelay": 7000, + "highlight": [ + { + "targets": [ + "segment", + "additional" + ], + "phrases": [ + "think it's because of a weather pattern called \"La Niña.\"" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

Step 4: Comparing Source Descriptions

The writer describes the sources differently:

  • Tim Samaras is introduced as a 'tornado expert'
  • Russell Schneider is described as 'director of the U.S. Storm Prediction Center'

We have more specific information about Schneider's role and affiliation, which may lend more authority to his opinion in this context.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "segment", + "additional" + ], + "phrases": [ + "tornado expert Tim Samaras", + "Russell Schneider, director of the U.S. Storm Prediction Center" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

Step 5: Understanding the Use of Quotes and Paraphrases

The writer employs quotes and paraphrases to:

  • Add credibility to the article by including expert opinions
  • Provide different perspectives on the causes of the tornadoes
  • Make the article more engaging by including direct speech and varied viewpoints
  • Support the main ideas presented in the article with authoritative sources
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "segment", + "additional" + ], + "phrases": [ + "Writers often quote or paraphrase (restate) the ideas of experts to support information in an article" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

Conclusion

By using quotes and paraphrases, the writer effectively supports their points with expert opinions, adding depth and credibility to the article. This technique demonstrates how writers can introduce sources using phrases like 'said tornado expert' or 'think it's because of', which helps to smoothly integrate expert opinions into the text. These methods not only provide valuable information but also make the article more engaging and authoritative.

", + "wordDelay": 200, + "holdDelay": 7000, + "highlight": [ + { + "targets": [ + "segment", + "additional" + ], + "phrases": [ + "They may introduce these sources with According to ... or [the expert] thinks/says ..." + ] + } + ], + "insertHTML": [] + } + ] + } + } + ] + }, + { + "page": 128, + "tips": [ + { + "category": "Reading Skill", + "embedding": "Identifying Sequence\n\nWhen writers describe processes - how things happen - they use transition words and phrases to show the order, or sequence, of the steps or events in the process. Transition words that indicate sequence include first, next, then, second, and finally. Time clauses with before, after, when, as soon as, once, and during also show order.", + "text": "Identifying Sequence\n\nWhen writers describe processes - how things happen - they use transition words and phrases to show the order, or sequence, of the steps or events in the process. Look at these sentences:\nFirst, warm air and cold air collide and form a tube of rotating air.\nNext, the rotating air turns to become a vertical column.\n\nThe words first and next tell you that warm and cold air collide and form a tube before the rotating air becomes a vertical column.\n\nOther transition words that indicate sequence include then, second, and finally. Time clauses with before, after, when, as soon as, once, and during also show order.\nBefore you go out, check the weather report.\nAfter the storm passes, it's safe to go outside.\nOnce the storm hits, go inside.\n\nNote: When, as soon as, and once describe an event that happens just before another event. During shows a period of time in which an event occurs.\nKeep windows closed during the storm.\nAs soon as the storm stops, it's safe to go outside.", + "html": "

Identifying Sequence

When writers describe processes - how things happen - they use transition words and phrases to show the order, or sequence, of the steps or events in the process. Look at these sentences:

First, warm air and cold air collide and form a tube of rotating air.
Next, the rotating air turns to become a vertical column.

The words first and next tell you that warm and cold air collide and form a tube before the rotating air becomes a vertical column.

Other transition words that indicate sequence include then, second, and finally. Time clauses with before, after, when, as soon as, once, and during also show order.

Before you go out, check the weather report.
After the storm passes, it's safe to go outside.
Once the storm hits, go inside.

Note: When, as soon as, and once describe an event that happens just before another event. During shows a period of time in which an event occurs.

Keep windows closed during the storm.
As soon as the storm stops, it's safe to go outside.
", + "id": "f5b96fbe-b210-4f97-a135-9cb035f4074d", + "verified": true, + "standalone": false, + "exercise": { + "question": "

Answer to these questions:

What should you do before a tornado?

What should you do during a tornado?

What should you do when a tornado is over?

", + "additional": "

What to Do When a Tornado Strikes

If you live in a tornado region, it's important to know what to do when tornadoes strike. Follow these steps for what to do before, during, and after a tornado strikes, and you will have the best chance to stay safe.

First, always pay attention to weather reports during tornado season. In addition, keep your eye on the sky. Watch for dark, greenish-colored clouds, and clouds that are close to the ground. This may mean that a tornado is coming. As soon as you know a tornado is about to hit, find shelter immediately if you are outdoors. If you are indoors, go to the lowest level you can, for example, to a basement. Once the tornado hits, stay inside for the entire time.

During a tornado, stay away from windows, as tornadoes can cause them to break. When the storm is over, make sure family members are safe. Check your home and the area around it for damage. Finally, contact disaster relief organizations such as the American Red Cross for help with cleanup and other assistance, such as food and shelter.

", + "segments": [ + { + "html": "

Understanding the Task

We need to identify the actions to take before, during, and after a tornado based on the given text. This exercise helps us practice recognizing sequence in written instructions.

  • Identify actions before a tornado
  • Determine steps during a tornado
  • List actions after a tornado has passed
", + "wordDelay": 200, + "holdDelay": 6000, + "highlight": [ + { + "targets": [ + "segment", + "question" + ], + "phrases": [ + "before a tornado", + "during a tornado", + "when a tornado is over", + "after a tornado has passed" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

Before a Tornado

The text mentions several actions to take before a tornado strikes:

  • Pay attention to weather reports during tornado season
  • Keep an eye on the sky for signs of a tornado
  • Watch for dark, greenish-colored clouds and clouds close to the ground
  • Find shelter immediately if you're outdoors
  • Go to the lowest level possible if you're indoors
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "segment", + "additional" + ], + "phrases": [ + "First, always pay attention to weather reports during tornado season", + "Watch for dark, greenish-colored clouds, and clouds that are close to the ground", + "As soon as you know a tornado is about to hit, find shelter immediately if you are outdoors", + "If you are indoors, go to the lowest level you can, for example, to a basement" + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-1", + "position": "replace", + "html": "Pay attention to weather reports and watch the sky for signs of a tornado" + } + ] + }, + { + "html": "

During a Tornado

The text provides clear instructions for what to do during a tornado:

  • Stay inside for the entire time
  • Stay away from windows to avoid injury from breaking glass
", + "wordDelay": 200, + "holdDelay": 7000, + "highlight": [ + { + "targets": [ + "segment", + "additional" + ], + "phrases": [ + "Once the tornado hits, stay inside for the entire time", + "During a tornado, stay away from windows, as tornadoes can cause them to break" + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-3", + "position": "replace", + "html": "Stay inside and away from windows" + } + ] + }, + { + "html": "

After a Tornado

The text outlines several steps to take after a tornado has passed:

  • Ensure family members are safe
  • Check your home and surrounding area for damage
  • Contact disaster relief organizations for help with cleanup and assistance
", + "wordDelay": 200, + "holdDelay": 7000, + "highlight": [ + { + "targets": [ + "segment", + "additional" + ], + "phrases": [ + "When the storm is over, make sure family members are safe", + "Check your home and the area around it for damage", + "Finally, contact disaster relief organizations such as the American Red Cross for help with cleanup and other assistance" + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-2", + "position": "replace", + "html": "Check for safety, assess damage, and contact disaster relief organizations" + } + ] + }, + { + "html": "

Recognizing Sequence

The text uses several sequence indicators to show the order of actions:

  • 'First' indicates the initial step of paying attention to weather reports
  • 'As soon as' shows immediacy in finding shelter
  • 'Once' marks the beginning of the tornado
  • 'During' specifies actions while the tornado is active
  • 'When' and 'Finally' indicate steps after the tornado has passed
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "segment", + "additional", + "tip" + ], + "phrases": [ + "First", + "As soon as", + "Once", + "During", + "When", + "Finally", + "transition words and phrases to show the order, or sequence" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

Importance of Sequence in Instructions

Recognizing sequence in instructions is crucial because:

  • It helps readers understand the correct order of actions
  • It ensures safety by following steps in the proper sequence
  • It makes complex processes easier to follow and remember
  • It allows for better preparation and response in emergency situations

By paying attention to sequence indicators, we can better understand and follow important safety instructions like those for tornado preparedness.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "segment", + "tip" + ], + "phrases": [ + "Other transition words that indicate sequence include then, second, and finally", + "Time clauses with before, after, when, as soon as, once, and during also show order" + ] + } + ], + "insertHTML": [] + } + ] + } + }, + { + "category": "CT Focus", + "embedding": "One way to evaluate online sources is to look at the suffix in the Web address.", + "text": "One way to evaluate online sources is to look at the suffix in the Web address (e.g., .com = company; .edu = educational institution (school or college); .gov = government). The suffix may help you judge a source's reliability.", + "html": "

One way to evaluate online sources is to look at the suffix in the Web address (e.g., .com = company; .edu = educational institution (school or college); .gov = government). The suffix may help you judge a source's reliability.

", + "id": "e40eae4a-e1a0-4e02-9aaa-9de8804e86ea", + "verified": true, + "standalone": false, + "exercise": { + "question": "

Answer these questions:

What is the source of the paragraph?

Is this a reliable source of information on tornadoes?

Why, or why not?

", + "additional": "

What to Do When a Tornado Strikes

If you live in a tornado region, it's important to know what to do when tornadoes strike. Follow these steps for what to do before, during, and after a tornado strikes, and you will have the best chance to stay safe.

First, always pay attention to weather reports during tornado season. In addition, keep your eye on the sky. Watch for dark, greenish-colored clouds, and clouds that are close to the ground. This may mean that a tornado is coming. As soon as you know a tornado is about to hit, find shelter immediately if you are outdoors. If you are indoors, go to the lowest level you can, for example, to a basement. Once the tornado hits, stay inside for the entire time.

During a tornado, stay away from windows, as tornadoes can cause them to break. When the storm is over, make sure family members are safe. Check your home and the area around it for damage. Finally, contact disaster relief organizations such as the American Red Cross for help with cleanup and other assistance, such as food and shelter.

Source: http://www.fema.gov

", + "segments": [ + { + "html": "

Understanding the Task

We need to evaluate the reliability of the given information about tornado safety. To do this, we'll:

  • Identify the source of the information
  • Determine if it's a reliable source for tornado information
  • Explain our reasoning for the reliability assessment
", + "wordDelay": 200, + "holdDelay": 6000, + "highlight": [ + { + "targets": [ + "segment", + "question" + ], + "phrases": [ + "What is the source of the paragraph?", + "Is this a reliable source of information on tornadoes?", + "Why, or why not?" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

Step 1: Identifying the Source

At the end of the provided text, we can see the source clearly stated:

  • Source: http://www.fema.gov

FEMA stands for the Federal Emergency Management Agency, which is a part of the U.S. government.

", + "wordDelay": 200, + "holdDelay": 7000, + "highlight": [ + { + "targets": [ + "segment", + "additional" + ], + "phrases": [ + "Source: http://www.fema.gov" + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-1", + "position": "replace", + "html": "http://www.fema.gov (FEMA - Federal Emergency Management Agency)" + } + ] + }, + { + "html": "

Step 2: Evaluating the Reliability

To determine if this is a reliable source, let's consider:

  • The domain suffix (.gov)
  • The nature of the organization (FEMA)
  • The relevance to the topic (emergency management)

Based on these factors, we can conclude that this is indeed a reliable source for information on tornadoes.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "segment", + "tip" + ], + "phrases": [ + ".gov = government", + "may help you judge a source's reliability" + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-2", + "position": "replace", + "html": "Yes, it is a reliable source" + } + ] + }, + { + "html": "

Step 3: Explaining the Reliability

This source is reliable for tornado information because:

  • It's a .gov website, indicating it's an official U.S. government source
  • FEMA is specifically responsible for disaster preparedness and response
  • As a government agency, FEMA has access to expert knowledge and resources
  • The information provided aligns with general safety guidelines for tornado situations
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "segment", + "tip" + ], + "phrases": [ + "to evaluate online sources", + "look at the suffix in the Web address" + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-3", + "position": "replace", + "html": "It's a .gov website (U.S. government), and FEMA is responsible for disaster preparedness and response" + } + ] + }, + { + "html": "

The Importance of Source Evaluation

Evaluating online sources is crucial because:

  • It helps ensure the information you're reading is accurate and trustworthy
  • It allows you to distinguish between expert advice and potentially misleading information
  • In emergency situations like tornadoes, reliable information can be life-saving
  • It promotes critical thinking and information literacy skills

By considering factors like website suffixes (.com, .edu, .gov) and the authority of the source, you can make better judgments about the reliability of online information.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "segment", + "tip" + ], + "phrases": [ + ".com = company", + ".edu = educational institution", + ".gov = government", + "The suffix may help you judge a source's reliability" + ] + } + ], + "insertHTML": [] + } + ] + } + } + ] + }, + { + "page": 130, + "tips": [ + { + "category": "Word Partners", + "embedding": "Use experience.", + "text": "Use experience with adjectives: professional experience, valuable experience, past experience, shared experience, learning experience. You can also use experience with nouns: work experience, life experience, experience danger.", + "html": "

Use experience with adjectives: professional experience, valuable experience, past experience, shared experience, learning experience. You can also use experience with nouns: work experience, life experience, experience danger.

", + "id": "9afe3753-35be-4bff-be9d-dc5ca7f39d93", + "verified": true, + "standalone": true + } + ] + }, + { + "page": 134, + "tips": [ + { + "category": "CT Focus", + "embedding": "Evaluating sources: When you see a quote from an expert in an article, think about why the writer included it and the ideas it supports.", + "text": "Evaluating sources: When you see a quote from an expert in an article, think about why the writer included it and the ideas it supports.", + "html": "

Evaluating sources: When you see a quote from an expert in an article, think about why the writer included it and the ideas it supports.

", + "id": "e1b2bd7d-4986-4eae-8530-d5fce12b281f", + "verified": true, + "standalone": false, + "exercise": { + "question": "

Why does the writer quote Jack Cohen?

What idea does his quote support?

According to the two reading passages, what are the main factors that firefighters consider when they are fighting a fire? What are examples of each one? Complete the chart.

FactorShape of the land
ExamplesDry grass, plants
", + "additional": "

Wildfires!

Wildfires occur all around the world, but they are most frequent in areas that have wet seasons followed by long, hot, dry seasons. These conditions exist in parts of Australia, South Africa, Southern Europe, and the western regions of the United States.

Wildfires can move quickly and destroy large areas of land in just a few minutes. Wildfires need three conditions: fuel, oxygen, and a heat source. Fuel is anything in the path of the fire that can burn: trees, grasses, even homes. Air supplies the oxygen. Heat sources include lightning, cigarettes, or just heat from the sun.

From past experience we know that it is difficult to prevent wildfires, but it is possible to stop them from becoming too big. One strategy is to cut down trees. Another strategy is to start fires on purpose. Both of these strategies limit the amount of fuel available for future fires. In addition, people who live in areas where wildfires occur can build fire-resistant1 homes, according to fire researcher Jack Cohen. Cohen says that in some recent California fires, “there were significant cases of communities that did not burn . . . because they were fire-resistant.”

However, most experts agree that no single action will reduce fires or their damage. The best method is to consider all these strategies and use each of them when and where they are the most appropriate.

Fighting Fire

Fighting fires is similar to a military campaign.2 Attacks come from the air and from the ground. The firefighters must consider three main factors: the shape of the land, the weather, and the type of fuel in the path of the fire. For example, southern sides of mountains are sunnier and drier, so they are more likely to burn than the northern sides. Between two mountains, in the canyons, strong winds can suddenly change the direction of a fire. These places, therefore, experience particularly dangerous fires.

  • To control a wildfire, firefighters on the ground first look for something in the area that can block the fire, such as a river or a road. Then they dig a deep trench.3 This is a “fire line,” a line that fire cannot cross.
  • While firefighters on the ground create a fire line, planes and helicopters drop water or chemical fire retardant4 on the fire. Pilots communicate with firefighters on the ground so they know what areas to hit.
  • As soon as the fire line is created, firefighters cut down any dead trees in the area between the fire line and the fire. This helps keep flames from climbing higher into the treetops.
  • At the same time, other firefighters on the ground begin backburning5 in the area between the fire line and the fire.
1 If something is fire-resistant, it does not catch fire easily.2 A military campaign is a planned set of activities for fighting a war.3 A trench is a long, narrow channel.4 Chemical fire retardant is a type of chemical that slows down the burning of fire.5 Backburning is removing fuel, such as plants and trees, from a fire's path, usually by burning it in a controlled way.
", + "segments": [ + { + "html": "

Understanding the Task

We need to analyze the given text to answer questions about:

  • The purpose of Jack Cohen's quote
  • The main factors firefighters consider when fighting wildfires
  • Examples of these factors

Let's break this down step by step.

", + "wordDelay": 200, + "holdDelay": 6000, + "highlight": [ + { + "targets": [ + "segment", + "question" + ], + "phrases": [ + "Why does the writer quote Jack Cohen?", + "What idea does his quote support?", + "what are the main factors that firefighters consider when they are fighting a fire?" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

Analyzing Jack Cohen's Quote

The writer quotes Jack Cohen, a fire researcher, who says: \"there were significant cases of communities that did not burn . . . because they were fire - resistant.\"

This quote is used to:

  • Support the idea that building fire-resistant homes can be an effective strategy against wildfires
  • Provide expert evidence for the effectiveness of this approach
  • Illustrate a practical application of fire prevention strategies
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "segment", + "additional" + ], + "phrases": [ + "Cohen says that in some recent California fires, \"there were significant cases of communities that did not burn . . . because they were fire - resistant.\"" + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-1", + "position": "replace", + "html": "To provide expert evidence on fire-resistant homes" + }, + { + "target": "question", + "targetId": "blank-7", + "position": "replace", + "html": "Building fire-resistant homes can protect communities from wildfires" + } + ] + }, + { + "html": "

Main Factors Firefighters Consider

According to the passage, firefighters consider three main factors when fighting a fire:

  1. Shape of the land
  2. Weather
  3. Type of fuel in the fire's path

Let's look at examples for each factor.

", + "wordDelay": 200, + "holdDelay": 7000, + "highlight": [ + { + "targets": [ + "segment", + "additional" + ], + "phrases": [ + "The firefighters must consider three main factors: the shape of the land, the weather, and the type of fuel in the path of the fire." + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-2", + "position": "replace", + "html": "Weather" + }, + { + "target": "question", + "targetId": "blank-3", + "position": "replace", + "html": "Fuel type" + } + ] + }, + { + "html": "

Examples of Each Factor

1. Shape of the land:

  • Southern sides of mountains (sunnier and drier, more likely to burn)
  • Canyons between mountains (strong winds can change fire direction)

2. Weather:

  • Strong winds (can change fire direction)

3. Fuel type:

  • Dry grass and plants
  • Trees (especially dead trees)
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "segment", + "additional" + ], + "phrases": [ + "southern sides of mountains are sunnier and drier, so they are more likely to burn than the northern sides", + "Between two mountains, in the canyons, strong winds can suddenly change the direction of a fire", + "Dry grass, plants" + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-4", + "position": "replace", + "html": "Southern mountain sides, canyons" + }, + { + "target": "question", + "targetId": "blank-5", + "position": "replace", + "html": "Strong winds" + } + ] + }, + { + "html": "

The Importance of Expert Quotes

Including expert quotes in an article serves several purposes:

  • Adds credibility to the information presented
  • Provides specific examples or data to support main ideas
  • Offers expert insights that the author may not have
  • Helps readers understand complex topics through an expert's perspective

When reading articles, it's important to consider why certain quotes are included and how they support the main ideas being presented.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "segment", + "tip" + ], + "phrases": [ + "When you see a quote from an expert in an article, think about why the writer included it and the ideas it supports." + ] + } + ], + "insertHTML": [] + } + ] + } + } + ] + }, + { + "page": 135, + "tips": [ + { + "category": "Language for Writing", + "embedding": "Verb Forms for Describing a Process\n\nWriters usually use two verb forms when they describe a process - the imperative and the simple present.\n\nIf you are explaining how to do something, use the imperative. The imperative is the base form of a verb. You do not use a subject with the imperative. If you are explaining how something happens, use the simple present. Remember to make subjects and verbs agree when you use the simple present.", + "text": "Verb Forms for Describing a Process\n\nWriters usually use two verb forms when they describe a process - the imperative and the simple present.\n\nIf you are explaining how to do something, use the imperative. The imperative is the base form of a verb. You do not use a subject with the imperative. For example:\nFirst, remove fuel in the fire's path.\n\nThe subject, you, is understood. Remove is the base form of the verb.\n\nIf you are explaining how something happens, use the simple present. For example:\nThen warm air moves upward.\nThen firefighters look for something in the area that can block the fire.\n\nRemember to make subjects and verbs agree when you use the simple present.", + "html": "

Verb Forms for Describing a Process

Writers usually use two verb forms when they describe a process - the imperative and the simple present.

If you are explaining how to do something, use the imperative. The imperative is the base form of a verb. You do not use a subject with the imperative. For example:

First, remove fuel in the fire's path.

The subject, you, is understood. Remove is the base form of the verb.

If you are explaining how something happens, use the simple present. For example:

Then warm air moves upward.
Then firefighters look for something in the area that can block the fire.

Remember to make subjects and verbs agree when you use the simple present.

", + "id": "01638dd5-3d2d-402c-89a9-a35d15098d4a", + "verified": true, + "standalone": false, + "exercise": { + "question": "

Read the information in the box. Complete the sentences (1-3) with the correct form of the verb in parentheses.

  1. __ (move) indoors during a lightning storm, if possible.
  2. Firefighters __ (dig) a trench to block the fire.
  3. First, warm air _ (collide) with cold air at high altitudes.

Write three imperative sentences and three sentences in the simple present. Use the ideas from exercises A and Babove.

Imperative:

Simple Present:

", + "segments": [ + { + "html": "

Understanding the Task

We need to complete two exercises:

  1. Fill in the blanks with the correct verb forms
  2. Write three imperative sentences and three simple present sentences

Let's approach this step-by-step, focusing on the correct use of verb forms in describing processes.

", + "wordDelay": 200, + "holdDelay": 6000, + "highlight": [ + { + "targets": [ + "segment", + "question" + ], + "phrases": [ + "Complete the sentences (1-3) with the correct form of the verb in parentheses", + "Write three imperative sentences and three sentences in the simple present" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

Exercise 1: Completing Sentences

Let's analyze each sentence and determine the correct verb form:

  1. \"_______(move) indoors during a lightning storm, if possible.\"
    This is an instruction, so we use the imperative form: \"Move\"
  2. \"Firefighters _______(dig) a trench to block the fire.\"
    This describes what firefighters do, so we use simple present: \"dig\"
  3. \"First, warm air _______(collide) with cold air at high altitudes.\"
    This explains how something happens, so we use simple present: \"collides\"
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "segment", + "tip" + ], + "phrases": [ + "If you are explaining how to do something, use the imperative", + "If you are explaining how something happens, use the simple present" + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-1", + "position": "replace", + "html": "Move" + }, + { + "target": "question", + "targetId": "blank-2", + "position": "replace", + "html": "dig" + }, + { + "target": "question", + "targetId": "blank-3", + "position": "replace", + "html": "collides" + } + ] + }, + { + "html": "

Exercise 2: Writing Sentences

Now, let's create three imperative and three simple present sentences:

Imperative Sentences:

  1. Stay away from windows during a tornado.
  2. Check weather reports regularly in tornado season.
  3. Create a fire-resistant zone around your home.

Simple Present Sentences:

  1. Tornadoes form when warm and cold air masses collide.
  2. Firefighters use various strategies to control wildfires.
  3. Strong winds change the direction of fires quickly.
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "segment", + "tip" + ], + "phrases": [ + "The imperative is the base form of a verb", + "You do not use a subject with the imperative", + "Remember to make subjects and verbs agree when you use the simple present" + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-4", + "position": "replace", + "html": "Stay away from windows during a tornado.
" + }, + { + "target": "question", + "targetId": "blank-5", + "position": "replace", + "html": "Check weather reports regularly in tornado season." + }, + { + "target": "question", + "targetId": "blank-6", + "position": "replace", + "html": "Create a fire-resistant zone around your home." + }, + { + "target": "question", + "targetId": "blank-7", + "position": "replace", + "html": "Tornadoes form when warm and cold air masses collide." + }, + { + "target": "question", + "targetId": "blank-8", + "position": "replace", + "html": "Firefighters use various strategies to control wildfires." + }, + { + "target": "question", + "targetId": "blank-9", + "position": "replace", + "html": "Strong winds change the direction of fires quickly." + } + ] + }, + { + "html": "

Understanding Verb Forms in Process Descriptions

Using the correct verb forms is crucial when describing processes because:

  • Imperative verbs give clear, direct instructions
  • Simple present verbs explain how things generally happen
  • Correct usage helps readers distinguish between actions to take and natural occurrences
  • It makes the text more coherent and easier to understand
", + "wordDelay": 200, + "holdDelay": 7000, + "highlight": [ + { + "targets": [ + "segment", + "tip" + ], + "phrases": [ + "Writers usually use two verb forms when they describe a process - the imperative and the simple present" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

Applying the Tip to Real-World Writing

This tip is beneficial because:

  • It helps create clear, effective instructions in various fields (e.g., safety guidelines, cooking recipes, user manuals)
  • It improves the ability to explain scientific processes or natural phenomena accurately
  • It enhances overall writing clarity and precision
  • It's a fundamental skill for technical writing and educational content creation

By mastering these verb forms, you can communicate processes more effectively in both academic and professional contexts.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "segment", + "tip" + ], + "phrases": [ + "First, remove fuel in the fire's path", + "Then warm air moves upward", + "Then firefighters look for something in the area that can block the fire" + ] + } + ], + "insertHTML": [] + } + ] + } + } + ] + }, + { + "page": 136, + "tips": [ + { + "category": "Writing Skill", + "embedding": "Organizing a Process Paragraph\n\nWhen you write a process paragraph, you explain steps or events in a process in chronological order - the first event appears first, then the next event, and so on.\n\nTo plan a process paragraph, first list each step or event in the correct order. When you write your paragraph, use transition words and phrases to help the reader follow the order.", + "text": "Organizing a Process Paragraph\n\nWhen you write a process paragraph, you explain steps or events in a process in chronological order - the first event appears first, then the next event, and so on.\n\nTo plan a process paragraph, first list each step or event in the correct order. When you write your paragraph, use transition words and phrases to help the reader follow the order.\nfirst, second, third; then, next, in addition; finally\nbefore, after, once, when, as soon as, during, while\n\nNote that during and while have similar meanings but are used differently in a sentence.\nDuring the storm, it isn't safe to go outside. (during + noun)\nWhile the storm is happening, stay indoors. (while + noun + be + verb + -ing)\n\nWriters usually use the simple present or the imperative to describe a process. You can also use the present perfect with after and once.\nAfter / Once the storm has passed, it's safe to go outside.\n\nNote: A process paragraph is more than a list of steps. It is also important to include details that help the reader understand the steps or events.", + "html": "

Organizing a Process Paragraph

When you write a process paragraph, you explain steps or events in a process in chronological order - the first event appears first, then the next event, and so on.

To plan a process paragraph, first list each step or event in the correct order. When you write your paragraph, use transition words and phrases to help the reader follow the order.

first, second, third; then, next, in addition; finally
before, after, once, when, as soon as, during, while

Note that during and while have similar meanings but are used differently in a sentence.

During the storm, it isn't safe to go outside. (during + noun)
While the storm is happening, stay indoors. (while + noun + be + verb + -ing)

Writers usually use the simple present or the imperative to describe a process. You can also use the present perfect with after and once.

After / Once the storm has passed, it's safe to go outside.

Note: A process paragraph is more than a list of steps. It is also important to include details that help the reader understand the steps or events.

", + "id": "fb0a015f-c414-4e63-bd87-bdd908e89bd6", + "verified": true, + "standalone": false, + "exercise": { + "question": "

Look at the list of events for a process paragraph. Number them to put them in the best order.

After that, turn off any of your home energy sources that can act as fuel, such as natural gas.

Finally, leave the area as quickly as possible. Do not return home until it is safe.

Then go back inside and close all windows, doors, and other openings. This helps prevent the fire from moving easily through the house.

If a fire is approaching your home, first go outside and move any items that can act as fuel for the fire, such as dead plants.

Then fill large containers such as garbage cans and bathtubs withwater. This will slow down the fire.

Now write the paragraph:

Wildfires move quickly and are extremely dangerous, but you can avoid danger if you follow these steps.

If you follow these steps, you will have the best chances for staying safe if a wildfire occurs.

", + "segments": [ + { + "html": "

Understanding the Task

We need to complete two main tasks:

  1. Order the given events in a logical sequence for a process paragraph about wildfire safety.
  2. Write a coherent paragraph using the ordered events, incorporating appropriate transition words.

Let's approach this step-by-step, focusing on creating a clear, chronological process.

", + "wordDelay": 200, + "holdDelay": 6000, + "highlight": [ + { + "targets": [ + "segment", + "question" + ], + "phrases": [ + "Number them to put them in the best order", + "Now write the paragraph" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

Step 1: Ordering the Events

Let's arrange the events in a logical sequence:

  1. If a fire is approaching your home, first go outside and move any items that can act as fuel for the fire, such as dead plants.
  2. Then go back inside and close all windows, doors, and other openings. This helps prevent the fire from moving easily through the house.
  3. After that, turn off any of your home energy sources that can act as fuel, such as natural gas.
  4. Then fill large containers such as garbage cans and bathtubs with water. This will slow down the fire.
  5. Finally, leave the area as quickly as possible. Do not return home until it is safe.
", + "wordDelay": 200, + "holdDelay": 10000, + "highlight": [ + { + "targets": [ + "segment", + "tip" + ], + "phrases": [ + "When you write a process paragraph, you explain steps or events in a process in chronological order" + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-1", + "position": "replace", + "html": "3" + }, + { + "target": "question", + "targetId": "blank-2", + "position": "replace", + "html": "5" + }, + { + "target": "question", + "targetId": "blank-3", + "position": "replace", + "html": "2" + }, + { + "target": "question", + "targetId": "blank-4", + "position": "replace", + "html": "1" + }, + { + "target": "question", + "targetId": "blank-5", + "position": "replace", + "html": "4" + } + ] + }, + { + "html": "

Step 2: Writing the Paragraph

Now, let's write the paragraph using the ordered events and appropriate transition words:

Wildfires move quickly and are extremely dangerous, but you can avoid danger if you follow these steps. First, if a fire is approaching your home, go outside and move any items that can act as fuel for the fire, such as dead plants. Then, go back inside and close all windows, doors, and other openings to prevent the fire from moving easily through the house. After that, turn off any of your home energy sources that can act as fuel, such as natural gas. Next, fill large containers such as garbage cans and bathtubs with water to slow down the fire. Finally, leave the area as quickly as possible and do not return home until it is safe. If you follow these steps, you will have the best chances for staying safe if a wildfire occurs.

", + "wordDelay": 200, + "holdDelay": 15000, + "highlight": [ + { + "targets": [ + "segment", + "tip" + ], + "phrases": [ + "use transition words and phrases to help the reader follow the order", + "first, second, third; then, next, in addition; finally" + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-6", + "position": "replace", + "html": "First, if a fire is approaching your home, go outside and move any items that can act as fuel for the fire, such as dead plants." + }, + { + "target": "question", + "targetId": "blank-7", + "position": "replace", + "html": "Then, go back inside and close all windows, doors, and other openings to prevent the fire from moving easily through the house." + }, + { + "target": "question", + "targetId": "blank-8", + "position": "replace", + "html": "After that, turn off any of your home energy sources that can act as fuel, such as natural gas." + }, + { + "target": "question", + "targetId": "blank-9", + "position": "replace", + "html": "Next, fill large containers such as garbage cans and bathtubs with water to slow down the fire." + }, + { + "target": "question", + "targetId": "blank-10", + "position": "replace", + "html": "Finally, leave the area as quickly as possible and do not return home until it is safe." + } + ] + }, + { + "html": "

Understanding Process Paragraphs

Writing an effective process paragraph involves:

  • Arranging events in chronological order
  • Using transition words to show the sequence clearly
  • Providing sufficient details for each step
  • Using simple present or imperative verb forms
  • Ensuring the paragraph flows logically from start to finish
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "segment", + "tip" + ], + "phrases": [ + "Writers usually use the simple present or the imperative to describe a process", + "A process paragraph is more than a list of steps" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

Benefits of Organizing Process Paragraphs

Understanding how to organize a process paragraph is beneficial because:

  • It improves clarity in instructional or explanatory writing
  • It helps readers follow complex procedures more easily
  • It's a valuable skill for academic and professional writing
  • It enhances overall communication effectiveness
  • It can be applied to various fields, from technical writing to everyday explanations

By mastering this skill, you can create clear, concise, and easily understandable process descriptions in various contexts.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "segment", + "tip" + ], + "phrases": [ + "To plan a process paragraph, first list each step or event in the correct order", + "It is also important to include details that help the reader understand the steps or events" + ] + } + ], + "insertHTML": [] + } + ] + } + } + ] + } + ] + }, + { + "unit": 8, + "title": "Buliding Wonders", + "pages": [ + { + "page": 144, + "tips": [ + { + "category": "Word Partners", + "embedding": "Use style.", + "text": "Use style with (n.) leadership style, learning style, style of music, writing style; (adj.) distinctive style, particular style, personal style.", + "html": "

Use style with (n.) leadership style, learning style, style of music, writing style; (adj.) distinctive style, particular style, personal style.

", + "id": "fe2afa25-31c1-48ba-8b7f-0ca29f3cb889", + "verified": true, + "standalone": true + } + ] + }, + { + "page": 150, + "tips": [ + { + "category": "Strategy", + "embedding": "When you scan, look for paraphrases of key words, as well as the key words themselves.", + "text": "When you scan, look for paraphrases of key words, as well as the key words themselves. For example, began building is a paraphrase for started work on.", + "html": "

When you scan, look for paraphrases of key words, as well as the key words themselves. For example, began building is a paraphrase for started work on.

", + "id": "9cf7701a-4bc2-4f75-8e41-2619c1b47399", + "verified": true, + "standalone": false, + "exercise": { + "question": "

The passage below is about the mysterious statues in Rapa Nui (Easter Island) called moai. Scan the paragraph to find the answers to these questions.

How far is Rapa Nui from Chile? ___________________

When did people probably first come to Rapa Nui? ___________________

Where did the people of Rapa Nui come from? ___________________

How tall are the statues? How much do they weigh? ___________________

", + "additional": "

The Moai of Rapa Nui

Rapa Nui (Easter Island) is an island in the Pacific Ocean located 2,300 miles (3,700 kilometers) west of Chile. It’s home to the mysterious moai statues, enormous figures carved from stone. It's not clear when the island was first settled. Experts guess that a few brave sailors somehow sailed west to Rapa Nui from Polynesian islands around AD 800. Experts do know that the Rapa Nui culture was at its height between the 10th and 16th centuries. They think the Rapa Nui people carved and built the moai in this period. There are 900 moai statues across the island. They are about 13 feet (4 meters) tall and weigh as much as 14 tons. Most scholars think that the moai were created to honor ancestors, chiefs, or other important people.

", + "segments": [ + { + "html": "

Understanding the Task

We need to scan the given passage about the moai statues of Rapa Nui (Easter Island) to find specific information. Let's break down the questions we need to answer:

  • Distance of Rapa Nui from Chile
  • Probable time of first settlement
  • Origin of Rapa Nui people
  • Height and weight of the statues

We'll use scanning techniques to quickly locate this information in the text.

", + "wordDelay": 200, + "holdDelay": 6000, + "highlight": [ + { + "targets": [ + "segment", + "question" + ], + "phrases": [ + "Scan the paragraph to find the answers to these questions" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

Scanning for Answers

Let's go through each question and find the relevant information:

  1. Distance from Chile: 2,300 miles (3,700 kilometers)
  2. First settlement: around AD 800
  3. Origin: Polynesian islands
  4. Statues' dimensions: about 13 feet (4 meters) tall, weighing up to 14 tons

Notice how we didn't need to read the entire passage in detail. We quickly located the specific information by scanning for key words and numbers.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "additional" + ], + "phrases": [ + "2,300 miles (3,700 kilometers) west of Chile", + "around AD 800", + "from Polynesian islands", + "13 feet (4 meters) tall and weigh as much as 14 tons" + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-1", + "position": "replace", + "html": "2,300 miles (3,700 kilometers)" + }, + { + "target": "question", + "targetId": "blank-2", + "position": "replace", + "html": "Around AD 800" + }, + { + "target": "question", + "targetId": "blank-3", + "position": "replace", + "html": "Polynesian islands" + }, + { + "target": "question", + "targetId": "blank-4", + "position": "replace", + "html": "13 feet (4 meters) tall; up to 14 tons" + } + ] + }, + { + "html": "

Effective Scanning Techniques

To scan effectively:

  • Focus on key words related to the questions
  • Look for numbers when searching for dates, measurements, or distances
  • Pay attention to proper nouns for locations or origins
  • Don't read every word; let your eyes quickly move through the text
  • Be aware of paraphrases or synonyms of the key words you're looking for
", + "wordDelay": 200, + "holdDelay": 7000, + "highlight": [ + { + "targets": [ + "segment", + "tip" + ], + "phrases": [ + "look for paraphrases of key words, as well as the key words themselves" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

Importance of Recognizing Paraphrases

Recognizing paraphrases is crucial in scanning because:

  • Authors often use synonyms or rephrase ideas
  • The exact wording from the question may not appear in the text
  • It helps in understanding the context and nuances of the information
  • It improves overall reading comprehension skills
  • It's a valuable skill for academic and professional reading tasks

For example, in our text, 'first settled' could be paraphrased as 'brave sailors somehow sailed west to Rapa Nui', indicating the first settlement.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "segment", + "tip", + "additional" + ], + "phrases": [ + "began building is a paraphrase for started work on", + "Experts guess that a few brave sailors somehow sailed west to Rapa Nui" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

Benefits of Effective Scanning

Mastering the skill of scanning, including recognizing paraphrases, is beneficial because:

  • It saves time when searching for specific information in long texts
  • It's essential for quickly reviewing documents or research materials
  • It helps in test-taking strategies, especially for reading comprehension questions
  • It improves overall reading efficiency and speed
  • It's a valuable skill in both academic and professional settings

By practicing scanning and being aware of paraphrases, you can become a more efficient and effective reader across various contexts.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "segment", + "tip" + ], + "phrases": [ + "When you scan, look for paraphrases of key words, as well as the key words themselves" + ] + } + ], + "insertHTML": [] + } + ] + } + }, + { + "category": "Reading Skill", + "embedding": "Scanning for Specific Information\n\nScanning helps you find details quickly. When you scan, you move your eyes quickly across and down a page and you only look for particular things. For example, to get information about times and dates, look for numbers, and to get information about people and places, look for capitalized words. Read the words around the numbers or capitalized words to understand the context.", + "text": "Scanning for Specific Information\n\nScanning helps you find details quickly. When you scan, you move your eyes quickly across and down a page and you only look for particular things. For example, to get information about times and dates, look for numbers, and to get information about people and places, look for capitalized words. Read the words around the numbers or capitalized words to understand the context.\n\nFor example, to answer the question 'When did Gaudí start work on La Sagrada Família?', first scan the text to find a year. Then read the words near the year for information about 'starting work'.\nAntoní Gaudi began building his church, La Sagrada Família, in 1881.\n\nFirst, your eyes go to 1887. Then your eyes go to began building. You have found the answer to the question - in 1881.", + "html": "

Scanning for Specific Information

Scanning helps you find details quickly. When you scan, you move your eyes quickly across and down a page and you only look for particular things. For example, to get information about times and dates, look for numbers, and to get information about people and places, look for capitalized words. Read the words around the numbers or capitalized words to understand the context.

For example, to answer the question 'When did Gaudí start work on La Sagrada Família?', first scan the text to find a year. Then read the words near the year for information about 'starting work'.

Antoni Gaudí began building his church, La Sagrada Família, in 1881.

First, your eyes go to 1887. Then your eyes go to began building. You have found the answer to the question - in 1881.

", + "id": "84282316-08c8-4e36-9a66-f892315040dc", + "verified": true, + "standalone": false, + "exercise": { + "question": "

The passage below is about the mysterious statues in Rapa Nui (Easter Island) called moai. Scan the paragraph to find the answers to these questions.

How far is Rapa Nui from Chile? ___________________

When did people probably first come to Rapa Nui? ___________________

Where did the people of Rapa Nui come from? ___________________

How tall are the statues? How much do they weigh? ___________________

", + "additional": "

The Moai of Rapa Nui

Rapa Nui (Easter Island) is an island in the Pacific Ocean located 2,300 miles (3,700 kilometers) west of Chile. It's home to the mysterious moai statues, enormous figures carved from stone. It's not clear when the island was first settled. Experts guess that a few brave sailors somehow sailed west to Rapa Nui from Polynesian islands around AD 800. Experts do know that the Rapa Nui culture was at its height between the 10th and 16th centuries. They think the Rapa Nui people carved and built the moai in this period. There are 900 moai statues across the island. They are about 13 feet (4 meters) tall and weigh as much as 14 tons. Most scholars think that the moai were created to honor ancestors, chiefs, or other important people.

", + "segments": [ + { + "html": "

Understanding the Task

We need to scan the given passage about the moai statues of Rapa Nui (Easter Island) to find specific information. Let's break down the questions we need to answer:

  • Distance of Rapa Nui from Chile
  • Probable time of first settlement
  • Origin of Rapa Nui people
  • Height and weight of the statues

We'll use scanning techniques to quickly locate this information in the text.

", + "wordDelay": 200, + "holdDelay": 6000, + "highlight": [ + { + "targets": [ + "segment", + "question" + ], + "phrases": [ + "Scan the paragraph to find the answers to these questions" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

Scanning for Answers

Let's go through each question and find the relevant information:

  1. Distance from Chile: 2,300 miles (3,700 kilometers)
  2. First settlement: around AD 800
  3. Origin: Polynesian islands
  4. Statues' dimensions: about 13 feet (4 meters) tall, weighing up to 14 tons

Notice how we didn't need to read the entire passage in detail. We quickly located the specific information by scanning for key words and numbers.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "additional" + ], + "phrases": [ + "2,300 miles (3,700 kilometers) west of Chile", + "around AD 800", + "from Polynesian islands", + "13 feet (4 meters) tall and weigh as much as 14 tons" + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-1", + "position": "replace", + "html": "2,300 miles (3,700 kilometers)" + }, + { + "target": "question", + "targetId": "blank-2", + "position": "replace", + "html": "Around AD 800" + }, + { + "target": "question", + "targetId": "blank-3", + "position": "replace", + "html": "Polynesian islands" + }, + { + "target": "question", + "targetId": "blank-4", + "position": "replace", + "html": "13 feet (4 meters) tall; up to 14 tons" + } + ] + }, + { + "html": "

Effective Scanning Techniques

To scan effectively:

  • Focus on key words related to the questions
  • Look for numbers when searching for dates, measurements, or distances
  • Pay attention to proper nouns for locations or origins
  • Don't read every word; let your eyes quickly move through the text
  • Be aware of paraphrases or synonyms of the key words you're looking for
", + "wordDelay": 200, + "holdDelay": 7000, + "highlight": [ + { + "targets": [ + "segment", + "tip" + ], + "phrases": [ + "look for paraphrases of key words, as well as the key words themselves" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

Importance of Recognizing Paraphrases

Recognizing paraphrases is crucial in scanning because:

  • Authors often use synonyms or rephrase ideas
  • The exact wording from the question may not appear in the text
  • It helps in understanding the context and nuances of the information
  • It improves overall reading comprehension skills
  • It's a valuable skill for academic and professional reading tasks

For example, in our text, 'first settled' could be paraphrased as 'brave sailors somehow sailed west to Rapa Nui', indicating the first settlement.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "segment", + "tip", + "additional" + ], + "phrases": [ + "began building is a paraphrase for started work on", + "Experts guess that a few brave sailors somehow sailed west to Rapa Nui" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

Benefits of Effective Scanning

Mastering the skill of scanning, including recognizing paraphrases, is beneficial because:

  • It saves time when searching for specific information in long texts
  • It's essential for quickly reviewing documents or research materials
  • It helps in test-taking strategies, especially for reading comprehension questions
  • It improves overall reading efficiency and speed
  • It's a valuable skill in both academic and professional settings

By practicing scanning and being aware of paraphrases, you can become a more efficient and effective reader across various contexts.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "segment", + "tip" + ], + "phrases": [ + "When you scan, look for paraphrases of key words, as well as the key words themselves" + ] + } + ], + "insertHTML": [] + } + ] + } + } + ] + }, + { + "page": 152, + "tips": [ + { + "category": "Word Link", + "embedding": "trans = across", + "text": "trans = across: transport, transportation, transfer, transit, translate. Note that transport can be both a noun and a verb, but the stress is different: (n.) transport, (v.) transport.", + "html": "

trans = across: transport, transportation, transfer, transit, translate. Note that transport can be both a noun and a verb, but the stress is different: (n.) transport, (v.) transport.

", + "id": "9d657295-2afe-4ce2-a47c-e5a0ab70b782", + "verified": true, + "standalone": true + } + ] + }, + { + "page": 156, + "tips": [ + { + "category": "CT Focus", + "embedding": "To identify comparisons, you need to scan for and select relevant details from different parts of the text.", + "text": "To identify comparisons, you need to scan for and select relevant details from different parts of the text, for example, names of people and places, years, dimensions, and other specific details.", + "html": "

To identify comparisons, you need to scan for and select relevant details from different parts of the text, for example, names of people and places, years, dimensions, and other specific details.

", + "id": "aa7d4e33-86aa-4655-b2f2-e5f08316ae37", + "verified": true, + "standalone": false, + "exercise": { + "question": "

Scan the reading passage for information to complete the table.

NameWhen was it built?How was it built?
Göbekli Tepe11,500 B.C.
Chichén Itzá

According to the writer, what was the purpose of each structure? What evidence does the writer give? Scan the reading again and write your answers.

Göbekli Tepe

___________________________________________

___________________________________________

Chichén Itzá

___________________________________________

___________________________________________

In what ways are the structures you read about similar? In what ways are they different? Use your ideas from the previous exercises. Complete the table.

Göbekli TepeBothChichén Itzá
", + "additional": "

Amazing Structures

People have created monuments for various reasons, inspired by different sources. Two of the greatest architectural achievements are on opposite sides of the world, in Turkey and Mexico.

Göbekli Tepe

Göbekli Tepe is one of the oldest man-made structures on Earth. It was already nearly 8,000 years old when both Stonehenge1 and the pyramids of Egypt were built. The structure consists of dozens of stone pillars arranged in rings. The pillars are shaped like capital T's, and many are covered with carvings of animals running and jumping. They are also very big—the tallest pillars are 18 feet (5.5 m) in height and weigh 16 tons (more than 14,500 kg). In fact, archaeologists think that Göbekli Tepe was probably the largest structure on Earth at the time.

How Was It Built?

At the time that Göbekli Tepe was built, most humans lived in small nomadic2 groups. These people survived by gathering plants and hunting animals. They had no writing system and did not use metal. Even wheels did not exist. Amazingly, the structure's builders were able to cut, shape, and transport 16-ton stones. Archaeologists found Stone Age3 tools such as knives at the site. They think hundreds of workers carved and put the pillars in place.

Why Was It Built?

Archaeologists are still excavating Göbekli Tepe and debating its meaning. Many think it is the world's oldest temple. Klaus Schmidt is the archaeologist who originally excavated the site. He thinks that people living nearby created Göbekli Tepe as a holy meeting place. To Schmidt, the T-shaped pillars represent human beings. The pillars face the center of the circle and perhaps represent a religious ritual.

Chichén Itzá

Chichén Itzá is an ancient city made of stepped pyramids, temples, and other stone structures. The largest building in Chichén Itzá is the Temple of Kukfooterkan, a pyramid with 365 steps. A kind of calendar, the temple shows the change of seasons. Twice a year on the spring and autumn equinoxes,4 a shadow falls on the pyramid in the shape of a snake. As the sun sets, this shadowy snake goes down the steps to eventually join a carved snake head on the pyramid's side.

How Was It Built?

The Mayans constructed the pyramids with carved stone. To build a pyramid, Mayan workers created a base and added smaller and smaller levels as the structure rose. Building the pyramids required many workers. Some pyramids took hundreds of years to complete. As at Göbekli Tepe, builders worked without wheels or metal tools.

Why Was It Built?

Chichén Itzá was both an advanced city center and a religious site. Spanish records show that the Mayans made human sacrifices5 to a rain god here. Archaeologists have found bones, jewelry, and other objects that people wore when they were sacrificed. Experts also know that the Mayans were knowledgeable astronomers.6 They used the tops of the pyramids to view Venus and other planets.

1 Stonehenge is a prehistoric monument in southern England, built around 2600 B.C.2 If a person or group is nomadic, they travel from place to place rather than living in one place all the time.3 The Stone Age was a very early period in human history when people used tools and weapons made of stone, not metal.4 An equinox is a time in the year when day and night are of equal length.5 A sacrifice is a religious ceremony in which people or animals are killed.6 An astronomer is a person who studies stars, planets, and other objects in space.
", + "segments": [ + { + "html": "

Understanding the Task

We need to complete three main tasks:

  1. Fill in a table with information about Göbekli Tepe and Chichén Itzá
  2. Identify the purpose and evidence for each structure
  3. Compare and contrast the two structures

Let's approach this step-by-step, using scanning techniques to find the relevant information quickly.

", + "wordDelay": 200, + "holdDelay": 6000, + "highlight": [ + { + "targets": [ + "segment", + "question" + ], + "phrases": [ + "Scan the reading passage for information to complete the table", + "According to the writer, what was the purpose of each structure?", + "In what ways are the structures you read about similar? In what ways are they different?" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

Task 1: Completing the Table

Let's scan for the missing information:

  • Göbekli Tepe: Built in 11,500 B.C. (given)
  • How it was built: Stone Age tools were used, hundreds of workers carved and placed 16-ton stones
  • Chichén Itzá: Built date not specified, but it's much later than Göbekli Tepe
  • How it was built: Constructed with carved stone, workers created a base and added smaller levels, took hundreds of years to complete
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "segment", + "additional" + ], + "phrases": [ + "Archaeologists found Stone Age tools such as knives at the site. They think hundreds of workers carved and put the pillars in place.", + "The Mayans constructed the pyramids with carved stone. To build a pyramid, Mayan workers created a base and added smaller and smaller levels as the structure rose." + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-1", + "position": "replace", + "html": "Stone Age tools, hundreds of workers carved and placed 16-ton stones" + }, + { + "target": "question", + "targetId": "blank-2", + "position": "replace", + "html": "Not specified (much later than Göbekli Tepe)" + }, + { + "target": "question", + "targetId": "blank-3", + "position": "replace", + "html": "Carved stone, base with smaller levels added, took hundreds of years" + } + ] + }, + { + "html": "

Task 2: Purpose and Evidence

Scanning for purpose and evidence:

Göbekli Tepe:

  • Purpose: Likely the world's oldest temple, a holy meeting place
  • Evidence: T-shaped pillars possibly representing humans, arranged in circles facing the center

Chichén Itzá:

  • Purpose: Advanced city center and religious site
  • Evidence: Human sacrifices to rain god, astronomical observations from pyramid tops
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "segment", + "additional" + ], + "phrases": [ + "Many think it is the world's oldest temple", + "Klaus Schmidt is the archaeologist who originally excavated the site. He thinks that people living nearby created Göbekli Tepe as a holy meeting place", + "Chichén Itzá was both an advanced city center and a religious site", + "Spanish records show that the Mayans made human sacrifices to a rain god here", + "They used the tops of the pyramids to view Venus and other planets" + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-4", + "position": "replace", + "html": "World's oldest temple, holy meeting place" + }, + { + "target": "question", + "targetId": "blank-5", + "position": "replace", + "html": "T-shaped pillars representing humans, arranged in circles" + }, + { + "target": "question", + "targetId": "blank-6", + "position": "replace", + "html": "Advanced city center and religious site" + }, + { + "target": "question", + "targetId": "blank-7", + "position": "replace", + "html": "Human sacrifices, astronomical observations from pyramids" + } + ] + }, + { + "html": "

Task 3: Comparing and Contrasting

Let's identify similarities and differences:

Similarities:

  • Both are ancient structures
  • Both had religious purposes
  • Both were built without metal tools or wheels

Differences:

  • Göbekli Tepe is much older (11,500 B.C. vs. unspecified later date)
  • Göbekli Tepe has stone pillars, Chichén Itzá has pyramids
  • Chichén Itzá was also a city center, while Göbekli Tepe was primarily a religious site
  • Chichén Itzá shows advanced astronomical knowledge
", + "wordDelay": 200, + "holdDelay": 10000, + "highlight": [ + { + "targets": [ + "segment", + "tip" + ], + "phrases": [ + "To identify comparisons, you need to scan for and select relevant details from different parts of the text" + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-8", + "position": "replace", + "html": "Much older (11,500 B.C.)" + }, + { + "target": "question", + "targetId": "blank-9", + "position": "replace", + "html": "Ancient structures" + }, + { + "target": "question", + "targetId": "blank-10", + "position": "replace", + "html": "Built much later" + }, + { + "target": "question", + "targetId": "blank-11", + "position": "replace", + "html": "Stone pillars" + }, + { + "target": "question", + "targetId": "blank-12", + "position": "replace", + "html": "Religious purposes" + }, + { + "target": "question", + "targetId": "blank-13", + "position": "replace", + "html": "Pyramids" + }, + { + "target": "question", + "targetId": "blank-14", + "position": "replace", + "html": "Primarily religious site" + }, + { + "target": "question", + "targetId": "blank-15", + "position": "replace", + "html": "Built without metal tools or wheels" + }, + { + "target": "question", + "targetId": "blank-16", + "position": "replace", + "html": "City center and astronomical site" + } + ] + }, + { + "html": "

The Importance of Scanning for Comparisons

Scanning for comparisons is a crucial skill because:

  • It helps organize information from different parts of a text
  • It allows for quick identification of similarities and differences
  • It enhances critical thinking and analytical skills
  • It's useful for summarizing complex information
  • It's applicable in various academic and professional contexts

By practicing this skill, you can improve your ability to process and understand complex texts efficiently.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "segment", + "tip" + ], + "phrases": [ + "for example, names of people and places, years, dimensions, and other specific details" + ] + } + ], + "insertHTML": [] + } + ] + } + } + ] + }, + { + "page": 157, + "tips": [ + { + "category": "Language for Writing", + "embedding": "Using Comparative Adjectives\n\nOne way to make comparisons is to use the comparative forms of adjectives.\nadjective + -er + than\nmore / less + adjective + than", + "text": "Using Comparative Adjectives\n\nOne way to make comparisons is to use the comparative forms of adjectives.\nadjective + -er + than\nmore / less + adjective + than (with most adjectives that have two or more syllables)\n\nExamples:\nGöbekli Tepe is older than Stonehenge.\nThe design of La Sagrada Família is more complex than the design of St. Patrick's Cathedral.\n\nUse (not) as + adjective + as to say things are (or are not) the same.\nExample:\nThe Empire State Building is not as tall as the Tokyo Sky Tree.", + "html": "

Using Comparative Adjectives

One way to make comparisons is to use the comparative forms of adjectives.

adjective + -er + than

more / less + adjective + than (with most adjectives that have two or more syllables)

Examples:

Göbekli Tepe is older than Stonehenge.
The design of La Sagrada Família is more complex than the design of St. Patrick's Cathedral.

Use (not) as + adjective + as to say things are (or are not) the same.

Example:

The Empire State Building is not as tall as the Tokyo Sky Tree.
", + "id": "a0510b3c-a54f-4ffd-b2cf-62887ffc9418", + "verified": true, + "standalone": false, + "exercise": { + "question": "

Complete the sentences (1-3) using comparative adjectives.

  1. The Tokyo Sky Tree is 2,080 feet (634 meters) tall. The Canton Tower is 1,969 feet (600 meters) tall.
    The Tokyo Sky Tree is __________ the Canton Tower. (tall)

  2. St. Paul's Cathedral has a traditional design. The design of St. Mary's Cathedral is partly traditional and partly modern.
    The design of St. Mary's Cathedral is __________ the design of St. Paul's Cathedral. (traditional)

  3. The Great Wall of China is 5,500 miles (8,850 kilometers) long. Hadrian's Wall is 73 miles (120 kilometers) long.
    Hadrian's Wall is not __________ the Great Wall of China. (long)

", + "segments": [ + { + "html": "

Understanding the Task

In this exercise, we need to complete three sentences using comparative adjectives. The task requires us to:

  • Analyze the given information about each pair of structures
  • Identify the appropriate comparative form for each adjective
  • Construct grammatically correct comparative sentences

Let's approach each sentence step by step.

", + "wordDelay": 200, + "holdDelay": 6000, + "highlight": [ + { + "targets": [ + "segment", + "question" + ], + "phrases": [ + "Complete the sentences (1-3) using comparative adjectives" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

Sentence 1: Tokyo Sky Tree vs. Canton Tower

Given information:

  • Tokyo Sky Tree: 2,080 feet (634 meters) tall
  • Canton Tower: 1,969 feet (600 meters) tall

Analysis:

  • The Tokyo Sky Tree is taller
  • We're comparing heights, so we'll use the adjective 'tall'
  • 'Tall' is a one-syllable adjective, so we add '-er' + 'than'

Correct comparative form: 'taller than'

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "segment", + "tip" + ], + "phrases": [ + "adjective + -er + than" + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-1", + "position": "replace", + "html": "taller than" + } + ] + }, + { + "html": "

Sentence 2: St. Mary's Cathedral vs. St. Paul's Cathedral

Given information:

  • St. Paul's Cathedral: traditional design
  • St. Mary's Cathedral: partly traditional and partly modern design

Analysis:

  • St. Mary's Cathedral is less traditional
  • We're comparing traditionality, so we'll use the adjective 'traditional'
  • 'Traditional' has more than two syllables, so we use 'less' + adjective + 'than'

Correct comparative form: 'less traditional than'

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "segment", + "tip" + ], + "phrases": [ + "more / less + adjective + than (with most adjectives that have two or more syllables)" + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-2", + "position": "replace", + "html": "less traditional than" + } + ] + }, + { + "html": "

Sentence 3: Hadrian's Wall vs. Great Wall of China

Given information:

  • Great Wall of China: 5,500 miles (8,850 kilometers) long
  • Hadrian's Wall: 73 miles (120 kilometers) long

Analysis:

  • Hadrian's Wall is significantly shorter
  • We're comparing lengths, so we'll use the adjective 'long'
  • The sentence structure uses 'not as... as', which indicates inequality

Correct comparative form: 'as long as'

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "segment", + "tip" + ], + "phrases": [ + "Use (not) as + adjective + as to say things are (or are not) the same" + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-3", + "position": "replace", + "html": "as long as" + } + ] + }, + { + "html": "

Understanding Comparative Adjectives

Comparative adjectives are used to compare two things. The rules for forming comparatives are:

  • For one-syllable adjectives, add '-er' (e.g., tall → taller)
  • For two-syllable adjectives ending in '-y', change 'y' to 'i' and add '-er' (e.g., happy → happier)
  • For most adjectives with two or more syllables, use 'more' or 'less' before the adjective
  • Some adjectives have irregular comparative forms (e.g., good → better, bad → worse)

The structure 'not as... as' is used to express inequality between two things.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "segment", + "tip" + ], + "phrases": [ + "One way to make comparisons is to use the comparative forms of adjectives" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

Benefits of Using Comparative Adjectives

Understanding and using comparative adjectives is beneficial because:

  • It allows for precise comparisons between two objects or concepts
  • It enhances descriptive writing skills
  • It's essential for academic and professional communication
  • It helps in expressing preferences and making decisions
  • It's useful in various fields like architecture, engineering, and design

By mastering comparative adjectives, you can communicate more effectively and make clearer comparisons in both written and spoken English.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "segment", + "tip" + ], + "phrases": [ + "Göbekli Tepe is older than Stonehenge", + "The design of La Sagrada Família is more complex than the design of St. Patrick's Cathedral", + "The Empire State Building is not as tall as the Tokyo Sky Tree" + ] + } + ], + "insertHTML": [] + } + ] + } + } + ] + }, + { + "page": 158, + "tips": [ + { + "category": "Writing Skill", + "embedding": "Writing a Comparison Paragraph\n\nWhen you write a comparison paragraph, first choose a topic - that is, the items you wish to compare. Next, think of two or three points about the items that you want to discuss. Then think of one or two details to include about each point.", + "text": "Writing a Comparison Paragraph\n\nWhen you write a comparison paragraph, first choose a topic - that is, the items you wish to compare. Next, think of two or three points about the items that you want to discuss. Then think of one or two details to include about each point.\n\nTransition words and phrases in your paragraph help the reader understand your ideas:\nSimilarities: similarly, both, also, too Differences: however, on the other hand, but\nBoth Göbekli Tepe and Stonehenge are ancient monuments. However, Göbekli Tepe is much older.\nThe pyramids at Chichén Itzá showed the change in seasons. Similarly, some experts think people used Stonehenge as a kind of calendar.", + "html": "

Writing a Comparison Paragraph

When you write a comparison paragraph, first choose a topic - that is, the items you wish to compare. Next, think of two or three points about the items that you want to discuss. Then think of one or two details to include about each point.

Transition words and phrases in your paragraph help the reader understand your ideas:

Similarities: similarly, both, also, too

Differences: however, on the other hand, but

Both Göbekli Tepe and Stonehenge are ancient monuments. However, Göbekli Tepe is much older.
The pyramids at Chichén Itzá showed the change in seasons. Similarly, some experts think people used Stonehenge as a kind of calendar.
", + "id": "204019c1-3832-433f-911d-76ed743293f1", + "verified": true, + "standalone": false, + "exercise": { + "question": "

Write five comparison sentences about places.

", + "segments": [ + { + "html": "

Understanding the Task

In this exercise, we need to write five comparison sentences about places. To do this effectively, we should:

  • Choose different places to compare
  • Identify specific aspects of these places to compare
  • Use appropriate comparative structures and transition words
  • Ensure our sentences are clear and informative

Let's approach this task step by step, creating diverse comparison sentences.

", + "wordDelay": 200, + "holdDelay": 6000, + "highlight": [ + { + "targets": [ + "segment", + "question" + ], + "phrases": [ + "Write five comparison sentences about places" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

Sentence 1: Comparing Size

Let's compare two well-known cities:

'New York City is significantly larger than Paris in terms of population, however, both cities are renowned for their iconic landmarks.'

This sentence:

  • Compares the size of two famous cities
  • Uses 'larger than' for comparison
  • Includes a similarity using 'however' and 'both'
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "segment", + "tip" + ], + "phrases": [ + "Similarities: similarly, both, also, too", + "Differences: however, on the other hand, but" + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-1", + "position": "replace", + "html": "New York City is significantly larger than Paris in terms of population, however, both cities are renowned for their iconic landmarks." + } + ] + }, + { + "html": "

Sentence 2: Comparing Climate

Let's compare the climate of two countries:

'While Canada experiences extremely cold winters, Australia has much milder temperatures year-round, but both countries have diverse landscapes that attract nature enthusiasts.'

This sentence:

  • Compares the climate of two countries
  • Uses 'extremely' and 'much milder' for contrast
  • Includes a similarity using 'but' and 'both'
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "segment", + "tip" + ], + "phrases": [ + "When you write a comparison paragraph, first choose a topic - that is, the items you wish to compare" + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-2", + "position": "replace", + "html": "While Canada experiences extremely cold winters, Australia has much milder temperatures year-round, but both countries have diverse landscapes that attract nature enthusiasts." + } + ] + }, + { + "html": "

Sentence 3: Comparing Cultural Aspects

Let's compare two Asian countries:

'Japan and South Korea share many cultural similarities, such as a strong emphasis on respect for elders, however, their traditional cuisines are distinctly different.'

This sentence:

  • Compares cultural aspects of two countries
  • Uses 'similarities' to show likeness
  • Uses 'however' to introduce a difference
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "segment", + "tip" + ], + "phrases": [ + "Next, think of two or three points about the items that you want to discuss" + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-3", + "position": "replace", + "html": "Japan and South Korea share many cultural similarities, such as a strong emphasis on respect for elders, however, their traditional cuisines are distinctly different." + } + ] + }, + { + "html": "

Sentence 4: Comparing Natural Features

Let's compare two natural wonders:

'The Grand Canyon is significantly deeper than the Great Barrier Reef, but both natural wonders attract millions of visitors annually due to their breathtaking beauty.'

This sentence:

  • Compares two famous natural landmarks
  • Uses 'deeper than' for comparison
  • Uses 'but' to introduce a similarity
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "segment", + "tip" + ], + "phrases": [ + "Then think of one or two details to include about each point" + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-4", + "position": "replace", + "html": "The Grand Canyon is significantly deeper than the Great Barrier Reef, but both natural wonders attract millions of visitors annually due to their breathtaking beauty." + } + ] + }, + { + "html": "

Sentence 5: Comparing Historical Significance

Let's compare two ancient structures:

'The pyramids of Egypt are older than the Colosseum in Rome; similarly, both structures serve as remarkable examples of ancient engineering and continue to fascinate modern archaeologists.'

This sentence:

  • Compares two ancient structures
  • Uses 'older than' for comparison
  • Uses 'similarly' to introduce a likeness
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "segment", + "tip" + ], + "phrases": [ + "Transition words and phrases in your paragraph help the reader understand your ideas" + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-5", + "position": "replace", + "html": "The pyramids of Egypt are older than the Colosseum in Rome; similarly, both structures serve as remarkable examples of ancient engineering and continue to fascinate modern archaeologists." + } + ] + }, + { + "html": "

Benefits of Writing Comparison Sentences

Practicing writing comparison sentences is beneficial because it:

  • Enhances critical thinking skills by identifying similarities and differences
  • Improves vocabulary and use of transition words
  • Develops more sophisticated writing structures
  • Helps in organizing thoughts and information effectively
  • Prepares for more complex comparative essays and analyses

By mastering this skill, you can create more engaging and informative writing in various academic and professional contexts.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "segment" + ], + "phrases": [ + "Both Göbekli Tepe and Stonehenge are ancient monuments. However, Göbekli Tepe is much older", + "The pyramids at Chichén Itzá showed the change in seasons. Similarly, some experts think people used Stonehenge as a kind of calendar" + ] + } + ], + "insertHTML": [] + } + ] + } + }, + { + "category": "CT Focus", + "embedding": "Organizing ideas visually, for example, by using a Venn diagram or other graphic organizer, can help you see similarities and differences more clearly. It can also help you remember key information.", + "text": "Organizing ideas visually, for example, by using a Venn diagram or other graphic organizer, can help you see similarities and differences more clearly. It can also help you remember key information.", + "html": "

Organizing ideas visually, for example, by using a Venn diagram or other graphic organizer, can help you see similarities and differences more clearly. It can also help you remember key information.

", + "id": "9e4d61ca-2488-4f87-b0c7-86746d3ffe70", + "verified": true, + "standalone": true + } + ] + }, + { + "page": 160, + "tips": [ + { + "category": "Strategy", + "embedding": "When you write a comparison paragraph, use pronouns (if, they, etc.) to avoid repeating the same nouns too often.", + "text": "When you write a comparison paragraph, use pronouns (if, they, etc.) to avoid repeating the same nouns too often. Make sure it is clear to the reader what the pronoun is referring to.", + "html": "

When you write a comparison paragraph, use pronouns (if, they, etc.) to avoid repeating the same nouns too often. Make sure it is clear to the reader what the pronoun is referring to.

", + "id": "080b86ef-ca41-4d4b-bcf6-a460ca737777", + "verified": true, + "standalone": true + } + ] + } + ] + }, + { + "unit": 9, + "title": "Form and Function", + "pages": [ + { + "page": 166, + "tips": [ + { + "category": "Word Partners", + "embedding": "Use theory.", + "text": "Use theory with: (n.) evidence for a theory, support for a theory; (adj.) a scientific theory, a convincing theory; (v.) develop a theory, propose a theory, put forward a theory, test a theory.", + "html": "

Use theory with: (n.) evidence for a theory, support for a theory; (adj.) a scientific theory, a convincing theory; (v.) develop a theory, propose a theory, put forward a theory, test a theory.

", + "id": "a14dc341-ed16-46c3-a046-f5efd76e9757", + "verified": true, + "standalone": true + }, + { + "category": "Strategy", + "embedding": "A subhead (or section head) indicates the main theme of that section. Reading subheads can give you an overall idea of the theme of a passage and how it is organized, such as whether or not the information is divided into categories.", + "text": "A subhead (or section head) indicates the main theme of that section. Reading subheads can give you an overall idea of the theme of a passage and how it is organized, such as whether or not the information is divided into categories.", + "html": "

A subhead (or section head) indicates the main theme of that section. Reading subheads can give you an overall idea of the theme of a passage and how it is organized, such as whether or not the information is divided into categories.

", + "id": "2ed0ba59-986b-46ce-bf71-478e2c6e8225", + "verified": true, + "standalone": false, + "exercise": { + "question": "

Read the title and the subheads of the reading passage. What is the reading passage mainly about?

  1. three ways in which birds attract the opposite sex
  2. three possible purposes of feathers
  3. three methods birds use to fly
", + "additional": "

What are Feathers For?

Paleontologists1 think feathers have existed for millions of years. Fossils of a 125-million-year-old dinosaur called a theropod show that it had a thin layer of hair on its back—evidence of very primitive feathers. Discoveries such as this are helping scientists understand how and why feathers evolved.

Insulation

Some paleontologists speculate that feathers began as a kind of insulation to keep animals or their young warm. Paleontologists have found theropod fossils that have their front limbs2 spread over nests. They think this shows that the dinosaurs were using feathers to keep their young warm. In addition, many young birds are covered in light, soft feathers, which keep the birds' bodies warm. Even when they become adult birds, they keep a layer of warm feathers closest to their bodies.

Attraction

Another theory is that feathers evolved for display—that is, to be seen. Feathers on birds show a huge range of colors and patterns. In many cases, the purpose of these beautiful feathers is to attract the opposite sex. A peacock spreads his iridescent3 tail to attract a peahen. Other birds use crests — feathers on their heads. A recent discovery supports the display idea: In 2009, scientists found very small sacs4 inside theropod feathers, called melanosomes. Melanosomes give feathers their color. The theropod melanosomes look the same as those in the feathers of living birds.

Flight

We know that feathers help birds to fly. Here's how they work: A bird's feathers are not the same shape on each side. They are thin and hard on one side, and long and flexible on the other. To lift themselves into the air, birds turn their wings at an angle. This movement allows air to go above and below the wings. The difference in air pressure allows them to fly.

Paleontologists are now carefully studying the closest theropod relatives of birds. They are looking for clues to when feathers were first used for flight. A 150-million-year-old bird called Anchiornis, for example, had black-and-white arm feathers. These feathers were almost the same as bird feathers. However, unlike modern bird feathers, the feathers were the same shape on both sides. Because of this, Anchiornis probably wasn't able to fly.

Scientists also found a small, moveable bone in Anchiornis fossils. This bone allowed it to fold its arms to its sides. Modern birds use a similar bone to pull their wings toward their bodies as they fly upwards. Scientists speculate that feathered dinosaurs such as Anchiornis evolved flight by moving their feathered arms up and down as they ran, or by jumping from tree to tree.

Recent research therefore shows that feathers probably evolved because they offered several advantages. The evidence suggests that their special design and bright colors helped dinosaurs, and then birds, stay warm, attract mates, and finally fly high into the sky.

1 Paleontologists are scientists who study fossils.2 Limbs are arms or legs.3 If something is iridescent, it has many bright colors that seem to be changing.4 A sac is a small part of an animal's body that is shaped like a little bag.
", + "segments": [ + { + "html": "

Understanding the Task

We need to determine the main topic of the reading passage by analyzing its title and subheads. This exercise tests our ability to:

  • Identify the overall theme from structural elements
  • Recognize how subheads organize information
  • Infer the main topic without reading the full text

Let's examine the title and subheads to find the answer.

", + "wordDelay": 200, + "holdDelay": 6000, + "highlight": [ + { + "targets": [ + "segment", + "question" + ], + "phrases": [ + "Read the title and the subheads of the reading passage", + "What is the reading passage mainly about?" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

Analyzing the Title and Subheads

Let's look at the structural elements of the passage:

  • Title: 'What are Feathers For?'
  • Subheads:
    1. Insulation
    2. Attraction
    3. Flight

The title poses a question about the purpose of feathers, and the subheads appear to be answering this question by listing different functions of feathers.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "segment", + "additional" + ], + "phrases": [ + "What are Feathers For?", + "Insulation", + "Attraction", + "Flight" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

Interpreting the Information

From our analysis, we can conclude:

  • The passage is focused on feathers and their purposes
  • It discusses three main functions of feathers: insulation, attraction, and flight
  • These functions are likely explained in detail under each subhead

This structure suggests that the passage is exploring different purposes or functions of feathers, rather than focusing solely on attraction or flight methods.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "segment", + "tip" + ], + "phrases": [ + "Reading subheads can give you an overall idea of the theme of a passage and how it is organized" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

Selecting the Correct Answer

Based on our analysis, the correct answer is:

b. three possible purposes of feathers

This answer best reflects the content suggested by the title and subheads. It encompasses all three subheads (insulation, attraction, and flight) as purposes of feathers, whereas the other options focus on only one aspect or are not directly related to the subheads.

", + "wordDelay": 200, + "holdDelay": 7000, + "highlight": [ + { + "targets": [ + "segment", + "question" + ], + "phrases": [ + "three possible purposes of feathers" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

The Importance of Analyzing Subheads

This exercise demonstrates why understanding subheads is crucial:

  • It provides a quick overview of the main points
  • It reveals the organizational structure of the text
  • It helps in predicting the content of each section
  • It aids in efficient reading and comprehension
  • It's particularly useful for academic and professional reading

By mastering this skill, you can quickly grasp the main ideas of a text and improve your reading efficiency across various subjects and contexts.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "segment", + "tip" + ], + "phrases": [ + "A subhead (or section head) indicates the main theme of that section", + "whether or not the information is divided into categories" + ] + } + ], + "insertHTML": [] + } + ] + } + } + ] + }, + { + "page": 169, + "tips": [ + { + "category": "CT Focus", + "embedding": "When you evaluate evidence, consider whether it is relevant, logical, sufficient, plausible and reliable.", + "text": "When you evaluate evidence, consider whether it is relevant (does it relate to the main idea?), logical (does it make sense?), sufficient (does it give enough support for the idea?), plausible (is it believable and does it match what you already know?), and reliable (does the writer state where the evidence comes from?).", + "html": "

When you evaluate evidence, consider whether it is relevant (does it relate to the main idea?), logical (does it make sense?), sufficient (does it give enough support for the idea?), plausible (is it believable and does it match what you already know?), and reliable (does the writer state where the evidence comes from?).

", + "id": "67c3173f-fd42-459c-bafd-160a0c1bd5da", + "verified": true, + "standalone": false, + "exercise": { + "question": "

Scan the reading passage for information to complete the table.

  1. How does the author support the ideas about the purposes of feathers? Find at least one modern-day example in the reading for each purpose. Write each one under “Examples.”

  2. What fossil evidence have scientists found relating to each purpose? Note the information under “Evidence.”

PurposeExamplesEvidence
1. __________ have that keep their bodies warm.
2.
3. A bird's feathers are __________ on one side and __________ on the other — so they can lift themselves into the air.Feathered dinosaurs such as Anchiornis had a __________ that allowed them to fold their arms to their sides. This may eventually have helped them use their feathers to fly.

Think about the scientific evidence in thre previous exercise for each theory about feathers.

In your opinion, does the evidence help support the theories? Does it convince, or persuade, you? Why, or why not?

Do you think one theory is more convincing than the others?

", + "additional": "

What are Feathers For?

Paleontologists1 think feathers have existed for millions of years. Fossils of a 125-million-year-old dinosaur called a theropod show that it had a thin layer of hair on its back—evidence of very primitive feathers. Discoveries such as this are helping scientists understand how and why feathers evolved.

Insulation

Some paleontologists speculate that feathers began as a kind of insulation to keep animals or their young warm. Paleontologists have found theropod fossils that have their front limbs2 spread over nests. They think this shows that the dinosaurs were using feathers to keep their young warm. In addition, many young birds are covered in light, soft feathers, which keep the birds' bodies warm. Even when they become adult birds, they keep a layer of warm feathers closest to their bodies.

Attraction

Another theory is that feathers evolved for display—that is, to be seen. Feathers on birds show a huge range of colors and patterns. In many cases, the purpose of these beautiful feathers is to attract the opposite sex. A peacock spreads his iridescent3 tail to attract a peahen. Other birds use crests — feathers on their heads. A recent discovery supports the display idea: In 2009, scientists found very small sacs4 inside theropod feathers, called melanosomes. Melanosomes give feathers their color. The theropod melanosomes look the same as those in the feathers of living birds.

Flight

We know that feathers help birds to fly. Here's how they work: A bird's feathers are not the same shape on each side. They are thin and hard on one side, and long and flexible on the other. To lift themselves into the air, birds turn their wings at an angle. This movement allows air to go above and below the wings. The difference in air pressure allows them to fly.

Paleontologists are now carefully studying the closest theropod relatives of birds. They are looking for clues to when feathers were first used for flight. A 150-million-year-old bird called Anchiornis, for example, had black-and-white arm feathers. These feathers were almost the same as bird feathers. However, unlike modern bird feathers, the feathers were the same shape on both sides. Because of this, Anchiornis probably wasn't able to fly.

Scientists also found a small, moveable bone in Anchiornis fossils. This bone allowed it to fold its arms to its sides. Modern birds use a similar bone to pull their wings toward their bodies as they fly upwards. Scientists speculate that feathered dinosaurs such as Anchiornis evolved flight by moving their feathered arms up and down as they ran, or by jumping from tree to tree.

Recent research therefore shows that feathers probably evolved because they offered several advantages. The evidence suggests that their special design and bright colors helped dinosaurs, and then birds, stay warm, attract mates, and finally fly high into the sky.

1 Paleontologists are scientists who study fossils.2 Limbs are arms or legs.3 If something is iridescent, it has many bright colors that seem to be changing.4 A sac is a small part of an animal's body that is shaped like a little bag.
", + "segments": [ + { + "html": "

Understanding the Exercise

This exercise requires us to analyze a reading passage about the purposes of feathers and complete a table with examples and evidence for each purpose. Let's break it down step by step:

", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [ + { + "targets": [ + "question" + ], + "phrases": [ + "Scan the reading passage", + "complete the table" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

Step 1: Identify the Purposes of Feathers

  • From the reading, we can identify three main purposes of feathers:
  • 1. Insulation
  • 2. Attraction
  • 3. Flight
", + "wordDelay": 200, + "holdDelay": 6000, + "highlight": [ + { + "targets": [ + "additional" + ], + "phrases": [ + "Insulation", + "Attraction", + "Flight" + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-1", + "position": "replace", + "html": "Insulation" + }, + { + "target": "question", + "targetId": "blank-4", + "position": "replace", + "html": "Attraction" + }, + { + "target": "question", + "targetId": "blank-7", + "position": "replace", + "html": "Flight" + } + ] + }, + { + "html": "

Step 2: Find Modern-Day Examples

Now, let's find examples from the reading for each purpose:

  • Insulation: Many young birds are covered in light, soft feathers for warmth.
  • Attraction: Peacocks spread their iridescent tails to attract peahens.
  • Flight: Birds' feathers are thin and hard on one side, long and flexible on the other.
", + "wordDelay": 200, + "holdDelay": 7000, + "highlight": [ + { + "targets": [ + "additional" + ], + "phrases": [ + "young birds are covered in light, soft feathers", + "peacock spreads his iridescent tail", + "thin and hard on one side, and long and flexible on the other" + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-2", + "position": "replace", + "html": "Young birds" + }, + { + "target": "question", + "targetId": "blank-5", + "position": "replace", + "html": "Peacocks spread their iridescent tails to attract peahens." + }, + { + "target": "question", + "targetId": "blank-8", + "position": "replace", + "html": "thin and hard" + }, + { + "target": "question", + "targetId": "blank-9", + "position": "replace", + "html": "long and flexible" + } + ] + }, + { + "html": "

Step 3: Identify Fossil Evidence

Now, let's find the fossil evidence for each purpose:

  • Insulation: Theropod fossils show front limbs spread over nests, suggesting they used feathers to keep young warm.
  • Attraction: Scientists found melanosomes in theropod feathers, which give feathers their color.
  • Flight: Anchiornis fossils show black-and-white arm feathers and a moveable bone for folding arms.
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "additional" + ], + "phrases": [ + "theropod fossils", + "front limbs spread over nests", + "melanosomes", + "Anchiornis", + "moveable bone" + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-3", + "position": "replace", + "html": "Theropod fossils show front limbs spread over nests, suggesting they used feathers to keep young warm." + }, + { + "target": "question", + "targetId": "blank-6", + "position": "replace", + "html": "Scientists found melanosomes in theropod feathers, which give feathers their color." + }, + { + "target": "question", + "targetId": "blank-10", + "position": "replace", + "html": "moveable bone" + } + ] + }, + { + "html": "

Evaluating the Evidence

Now that we've gathered the information, let's think about how convincing this evidence is:

  • Is it relevant? Yes, all the evidence directly relates to feather purposes.
  • Is it logical? The explanations make sense and follow a logical progression.
  • Is it sufficient? There are multiple pieces of evidence for each purpose.
  • Is it plausible? The evidence aligns with what we observe in modern birds.
  • Is it reliable? The information comes from paleontological discoveries and research.
", + "wordDelay": 200, + "holdDelay": 10000, + "highlight": [ + { + "targets": [ + "additional" + ], + "phrases": [ + "relevant", + "logical", + "sufficient", + "plausible", + "reliable" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

Based on this evaluation, we can conclude that the evidence is generally convincing. Each theory is supported by both modern examples and fossil evidence. However, the flight theory might be considered the most convincing due to the detailed explanation of how feathers enable flight and the clear progression from non-flying to flying creatures in the fossil record.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "question" + ], + "phrases": [ + "In your opinion, does the evidence help support the theories?", + "Do you think one theory is more convincing than the others?" + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-11", + "position": "replace", + "html": "Yes, the evidence supports the theories. It's convincing because it combines fossil evidence with observations of modern birds, providing a logical explanation for feather evolution." + }, + { + "target": "question", + "targetId": "blank-12", + "position": "replace", + "html": "The flight theory seems most convincing due to detailed explanations and clear fossil progression from non-flying to flying creatures." + } + ] + }, + { + "html": "

Tip Application

The tip about evaluating evidence has been crucial in our analysis. By considering relevance, logic, sufficiency, plausibility, and reliability, we've been able to thoroughly assess the strength of the evidence presented for each feather purpose. This systematic approach helps in forming a well-reasoned opinion about the theories and their supporting evidence.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "additional" + ], + "phrases": [ + "relevant", + "logical", + "sufficient", + "plausible", + "reliable" + ] + } + ], + "insertHTML": [] + } + ] + } + } + ] + }, + { + "page": 170, + "tips": [ + { + "category": "Reading Skill", + "embedding": "Identifying Theories\n\nScience writers use verbs such as think, speculate, and suggest when they refer to theories. Writers also use words such as probably and perhaps to indicate theories.", + "text": "Identifying Theories\n\nScience writers use certain words and expressions to differentiate theories from facts. In science, a fact is an idea that has been proven to be true. A theory is an idea that is based on evidence and reasoning, but has not yet been proven. Scientists develop theories in order to explain why something happens, or happened in a particular way.\n\nScience writers use verbs such as think, speculate, and suggest when they refer to theories.\nSome paleontologists speculate that feathers started out as insulation.\nEvidence suggests that their special design and bright colors helped both dinosaurs and birds stay warm.\n\nWriters also use words such as probably and perhaps to indicate theories.\nBecause of this, Anchiornis probally wasn't able to fly.", + "html": "

Identifying Theories

Science writers use certain words and expressions to differentiate theories from facts. In science, a fact is an idea that has been proven to be true. A theory is an idea that is based on evidence and reasoning, but has not yet been proven. Scientists develop theories in order to explain why something happens, or happened in a particular way.

Science writers use verbs such as think, speculate, and suggest when they refer to theories.

Some paleontologists speculate that feathers started out as insulation.
Evidence suggests that their special design and bright colors helped both dinosaurs and birds stay warm.

Writers also use words such as probably and perhaps to indicate theories.

Because of this, Anchiornis probally wasn't able to fly.
", + "id": "54a8d94e-2f3c-45fa-8d60-e4ebecae79fa", + "verified": true, + "standalone": false, + "exercise": { + "question": "

Read the information about a fossil discovery in China. Identify the theories and the words that introduce them.

", + "additional": "

New Discovery Suggests Dinosaurs Were Early Gliders

Many scientists think that a group of dinosaurs closely related to today's birds took the first steps toward flight when their limbs evolved to flap.1 They theorize that this arm flapping possibly led to flying as a result of running or jumping. But recently discovered fossils in China are showing a different picture.

Paleontologists discovered the fossils of a small, feathered dinosaur called Microraptor gui that lived between 120 and 110 million years ago. The Chinese team that studied the fossils doesn't think this animal ran or flapped well enough to take off from the ground. Instead, they think that this animal possibly flew by gliding2 from tree to tree. They further speculate that the feathers formed a sort of \"parachute\"3 that helped the animal stay in the air.

Not everyone agrees with this theory. Some researchers suggest that M. gui's feathers weren't useful for flight at all. They think that the feathers possibly helped the animal to attract a mate, or perhaps to make the tiny dinosaur look bigger.

1 If a bird or insect flaps its wings, the wings go up and down.2 When birds or airplanes glide, they float on air currents.3 A parachute is a device made of cloth that allows a person to jump safely from an airplane.
", + "segments": [ + { + "html": "

Analyzing the Fossil Discovery

Let's examine the information about the fossil discovery in China and identify the theories presented along with the words that introduce them. We'll go through this step-by-step:

", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [ + { + "targets": [ + "question" + ], + "phrases": [ + "Identify the theories and the words that introduce them" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

Theory 1: Traditional View of Flight Evolution

  • Introducing words: 'Many scientists think'
  • Theory: Dinosaurs evolved flight through arm flapping, possibly as a result of running or jumping
", + "wordDelay": 200, + "holdDelay": 6000, + "highlight": [ + { + "targets": [ + "additional" + ], + "phrases": [ + "Many scientists think", + "arm flapping possibly led to flying as a result of running or jumping" + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-1", + "position": "replace", + "html": "Theory 1: Dinosaurs evolved flight through arm flapping while running or jumping. Introduced by: 'Many scientists think'" + } + ] + }, + { + "html": "

Theory 2: Gliding Theory

  • Introducing words: 'they think'
  • Theory: Microraptor gui possibly flew by gliding from tree to tree
", + "wordDelay": 200, + "holdDelay": 6000, + "highlight": [ + { + "targets": [ + "additional" + ], + "phrases": [ + "they think", + "possibly flew by gliding from tree to tree" + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-2", + "position": "replace", + "html": "Theory 2: Microraptor gui flew by gliding from tree to tree. Introduced by: 'they think'" + } + ] + }, + { + "html": "

Theory 3: Parachute Theory

  • Introducing words: 'They further speculate'
  • Theory: Feathers formed a sort of 'parachute' to help the animal stay in the air
", + "wordDelay": 200, + "holdDelay": 6000, + "highlight": [ + { + "targets": [ + "additional" + ], + "phrases": [ + "They further speculate", + "feathers formed a sort of \"parachute\"" + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-3", + "position": "replace", + "html": "Theory 3: Feathers formed a 'parachute' to help stay airborne. Introduced by: 'They further speculate'" + } + ] + }, + { + "html": "

Theory 4: Non-Flight Purpose of Feathers

  • Introducing words: 'Some researchers suggest'
  • Theory: Feathers weren't useful for flight, but possibly helped in mate attraction or making the dinosaur look bigger
", + "wordDelay": 200, + "holdDelay": 6000, + "highlight": [ + { + "targets": [ + "additional" + ], + "phrases": [ + "Some researchers suggest", + "weren't useful for flight at all", + "attract a mate", + "make the tiny dinosaur look bigger" + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-4", + "position": "replace", + "html": "Theory 4: Feathers weren't for flight, but for attraction or appearance. Introduced by: 'Some researchers suggest'" + } + ] + }, + { + "html": "

Evaluating the Evidence

Now that we've identified the theories, let's consider how to evaluate the evidence presented:

  • Relevance: All theories relate to the purpose of feathers in early dinosaurs.
  • Logic: Each theory presents a logical possibility based on the fossil evidence.
  • Sufficiency: The evidence seems limited, as it's based on a single fossil discovery.
  • Plausibility: All theories align with what we know about evolution and animal adaptations.
  • Reliability: The information comes from paleontologists studying the fossils, which lends credibility.
", + "wordDelay": 200, + "holdDelay": 10000, + "highlight": [ + { + "targets": [ + "additional" + ], + "phrases": [ + "Paleontologists discovered", + "Chinese team that studied the fossils" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

This approach to evaluating evidence is crucial when analyzing scientific theories. By considering these aspects, we can better understand the strength of each theory and the overall state of knowledge about early flight evolution. It's important to note that in science, multiple theories can coexist until more evidence is found to support or refute them.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [], + "insertHTML": [] + } + ] + } + } + ] + }, + { + "page": 172, + "tips": [ + { + "category": "Word Partners", + "embedding": "Use involved.", + "text": "Use involved with: (in + n.) involved in a process, involved in an accident, involved in politics, involved in a relationship; (adv.) actively involved, deeply involved, directly involved, emotionally involved, heavily involved, personally involved.", + "html": "

Use involved with: (in + n.) involved in a process, involved in an accident, involved in politics, involved in a relationship; (adv.) actively involved, deeply involved, directly involved, emotionally involved, heavily involved, personally involved.

", + "id": "ba837a64-0e2c-4b48-93e7-7276ea935462", + "verified": true, + "standalone": true + } + ] + }, + { + "page": 176, + "tips": [ + { + "category": "Strategy", + "embedding": "When you look for theories, scan for words like think, believe, suggest, feel, and theorize, as well as qualifiers like can, may, and might.", + "text": "When you look for theories, scan for words like think, believe, suggest, feel, and theorize, as well as qualifiers like can, may, and might.", + "html": "

When you look for theories, scan for words like think, believe, suggest, feel, and theorize, as well as qualifiers like can, may, and might.

", + "id": "fbe9c8b5-827c-4d2e-a572-b3457340531b", + "verified": true, + "standalone": false, + "exercise": { + "question": "

Find two theories in \"Desing by Nature\"

", + "additional": "

Design by Nature

ALL LIVING organisms are uniquely adapted to the environment in which they live. Scientists study their designs to get ideas for products and technologies for humans. This process is called biomimetics. Here are three examples—in the air, on land, and in the water.

Toucan Bills and Car Safety

Toucan bills are so enormous that it's surprising the birds don't fall on their faces. One species of toucan, the toco toucan, has an orange-yellow bill six to nine inches (15-23 centimeters) long. It's about a third of the bird's entire length. Biologists aren't sure why toucans have such large, colorful bills. Charles Darwin theorized that they attracted mates. Others suggest the bills are used for cutting open fruit, for fighting, or for warning predators to stay away.

One thing scientists are certain of is that the toucan's beak is well designed to be both strong and light. The surface is made of keratin, the same material in human fingernails and hair. But the outer layer isn't a solid structure. It's actually made of many layers of tiny overlapping pieces of keratin. The inside of the bill has a foam-like structure—a network of tiny holes held together by light, thin pieces of bone. This design makes the bill hard but very light.

Marc André Meyers is an engineering professor at the University of California, San Diego. He thinks that the automotive and aviation industries can use the design of the toucan bill to make cars and planes safer. “Panels that mimic toucan bills may offer better protection to motorists involved in crashes,” Meyers says.

Beetle Shells and Collecting Water

The Namib Desert in Angola, Africa, is one of the hottest places on Earth. A beetle called Stenocara survives there by using its shell to get drinking water from the air. Zoologist Andrew Parker of the University of Oxford has figured out how Stenocara collects water from desert air.

The surface of Stenocara's armor-like2 shell is covered with bumps. The top of each bump is smooth and attracts water. The sides of each bump and the areas in between the bumps repel water. As the little drops of water join together and become larger and heavier, they roll down the bumps into the areas between them. A channel3 connects these areas to a spot on the beetle's back that leads straight to its mouth.

Parker thinks Stenocara's bumpy armor can help humans survive better, too. He thinks the beetle's shell is a good model for designing inexpensive tent coverings. The shell might also be a model for roofs that can collect water for drinking and farming in dry parts of the world.

Shark Scales and Swimsuits

Sharks are covered in scales made from the same material as teeth. These flexible scales protect the shark and help it swim quickly in water. A shark can move the scales as it swims. This movement helps reduce the water's drag.4

Amy Lang, an aerospace engineer at the University of Alabama, studied the scales on the shortfin mako, a relative of the great white shark. Lang and her team discovered that the mako shark's scales differ in size and in flexibility in different parts of its body. For instance, the scales on the sides of the body are tapered—wide at one end and narrow at the other end. Because they are tapered, these scales move very easily. They can turn up or flatten to adjust to the flow of water around the shark and to reduce drag.

Lang feels that shark scales can inspire designs for machines that experience drag, such as airplanes. Designers are also getting ideas from shark scales for coating ship bottoms and designing swimwear.

1 Charles Darwin was a 19th-century English naturalist who developed a theory of evolution by natural selection.2 If something is armor-like, it is similar to the metal clothing that soldiers wore in the past to protect themselves in battle.3 A channel is a long, narrow passage for water or other liquids to flow along.4 Drag is a force that opposes the motion of an object moving in water or air.
", + "segments": [ + { + "html": "

Analyzing 'Design by Nature'

Let's examine the text to find two theories about nature's designs and how they inspire human technology. We'll look for words that indicate theories or hypotheses.

", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [ + { + "targets": [ + "question" + ], + "phrases": [ + "Find two theories" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

Theory 1: Toucan Bill Function

  • Key phrases: 'theorized', 'suggest'
  • Theories about the purpose of toucan bills:
    • Charles Darwin: attract mates
    • Others: cutting fruit, fighting, warning predators
", + "wordDelay": 200, + "holdDelay": 7000, + "highlight": [ + { + "targets": [ + "additional" + ], + "phrases": [ + "Charles Darwin theorized", + "Others suggest", + "attracted mates", + "cutting open fruit", + "fighting", + "warning predators" + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-1", + "position": "replace", + "html": "Theory 1: Toucan bill function - Darwin theorized they attract mates; others suggest they're for cutting fruit, fighting, or warning predators." + } + ] + }, + { + "html": "

Theory 2: Toucan Bill Design Application

  • Key phrase: 'thinks'
  • Theory: Toucan bill design can be used in automotive and aviation industries
  • Proposed application: Create safer cars and planes
", + "wordDelay": 200, + "holdDelay": 7000, + "highlight": [ + { + "targets": [ + "additional" + ], + "phrases": [ + "thinks", + "automotive and aviation industries can use the design of the toucan bill", + "Panels that mimic toucan bills may offer better protection" + ] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-2", + "position": "replace", + "html": "Theory 2: Toucan bill design application - Marc André Meyers thinks the design can be used to make safer cars and planes." + } + ] + }, + { + "html": "

Additional Theories in the Text

While not required for the exercise, it's worth noting other theories present:

  • Beetle shell design for water collection (Parker 'thinks')
  • Shark scale inspiration for reducing drag (Lang 'feels')
", + "wordDelay": 200, + "holdDelay": 7000, + "highlight": [ + { + "targets": [ + "additional" + ], + "phrases": [ + "Parker thinks", + "Lang feels" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

Applying the Tip

The tip provided is incredibly useful for identifying theories in scientific texts. Let's see how it helped us:

  • We found 'theorized' and 'suggest' for the toucan bill function theories
  • We identified 'thinks' for the toucan bill design application theory
  • We also spotted 'thinks' and 'feels' for the beetle and shark theories

By scanning for these key words, we can quickly pinpoint where scientists are presenting theories rather than established facts.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "additional" + ], + "phrases": [ + "theorized", + "suggest", + "thinks", + "feels" + ] + } + ], + "insertHTML": [] + }, + { + "html": "

This approach not only helps in identifying theories but also in understanding the tentative nature of scientific knowledge. It reminds us that many ideas in science are hypotheses that scientists are still exploring and testing. By recognizing these linguistic cues, we can better distinguish between established facts and developing theories in scientific literature.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": [ + "question" + ], + "phrases": [ + "Find two theories" + ] + } + ], + "insertHTML": [] + } + ] + } + } + ] + }, + { + "page": 177, + "tips": [ + { + "category": "Language for Writing", + "embedding": "Using Synonyms\n\nWhen you write a summary of a passage, you should restate information as much as possible in your own words. One way to do this is to replace some of the original words or phrases with synonyms - words that have a similar meaning.", + "text": "Using Synonyms\n\nWhen you write a summary of a passage, you should restate information as much as possible in your own words. One way to do this is to replace some of the original words or phrases with synonyms - words that have a similar meaning. This is also known as paraphrasing. For example, look at the two sentences below:\nOriginal: Some paleontologists speculate that feathers began as a kind of insulation to keep animals or their young warm.\nParaphrase: Some experts think that feathers started as a way to keep warm.\npaleontologists ➝ experts\nbegan ➝ started\nspeculate ➝ think\ninsulation ➝ a way to keep warm\n(Note: You don't change words that don't have synonyms: feathers ➝ feathers.)\n\nOne way to find synonyms is to use a thesaurus, a type of dictionary that has synonyms and antonyms (words with opposite meaning). Not all synonyms are an exact match for a word, so it's important to understand the context in which you are using a word in order to choose the best synonym. For example, look at the following sentence:\nThe Stenocara beetle collects drinking water from the atmosphere.\nSynonyms in a thesaurus for atmosphere might include: air, sky, feeling, and mood. Only air is correct in this context.", + "html": "

Using Synonyms

When you write a summary of a passage, you should restate information as much as possible in your own words. One way to do this is to replace some of the original words or phrases with synonyms - words that have a similar meaning. This is also known as paraphrasing. For example, look at the two sentences below:

Original: Some paleontologists speculate that feathers began as a kind of insulation to keep animals or their young warm.

Paraphrase: Some experts think that feathers started as a way to keep warm.

paleontologists ➝ expertsbegan ➝ started
speculate ➝ thinkinsulation ➝ a way to keep warm

(Note: You don't change words that don't have synonyms: feathers ➝ feathers.)

One way to find synonyms is to use a thesaurus, a type of dictionary that has synonyms and antonyms (words with opposite meaning). Not all synonyms are an exact match for a word, so it's important to understand the context in which you are using a word in order to choose the best synonym. For example, look at the following sentence:

The Stenocara beetle collects drinking water from the atmosphere.

Synonyms in a thesaurus for atmosphere might include: air, sky, feeling, and mood. Only air is correct in this context.

", + "id": "4451576d-0fd4-4b21-ac96-ff0e2a092cfb", + "verified": true, + "standalone": false, + "exercise": { + "question": "

Read the information in the box. Use the best synonym to complete the sentences (1-3).

  1. This design makes the bill hard but very light.
    • a. difficult
    • b. firm
  2. The bird's feathers are stiff on one side.
    • a. inflexible
    • b. formal
  3. The Stenocara beetle can survive in a very dry environment.
    • a. uninteresting
    • b. arid
", + "segments": [ + { + "html": "

Understanding Synonyms in Context

Let's approach this exercise by examining each sentence and choosing the most appropriate synonym based on the context. We'll analyze the meaning of each word in the given sentences and compare it with the provided options.

", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [ + { + "targets": ["question"], + "phrases": ["Use the best synonym to complete the sentences"] + } + ], + "insertHTML": [] + }, + { + "html": "

1. 'This design makes the bill hard but very light.'

  • Options: a. difficult, b. firm
  • Analysis: In this context, 'hard' refers to the physical property of the bill, not the level of difficulty.
  • 'Firm' better describes the solid, unyielding nature of the bill's structure.
  • Therefore, the best synonym for 'hard' in this sentence is 'firm'.
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": ["question"], + "phrases": ["This design makes the bill hard but very light.", "b. firm"] + } + ], + "insertHTML": [] + }, + { + "html": "

2. 'The bird's feathers are stiff on one side.'

  • Options: a. inflexible, b. formal
  • Analysis: 'Stiff' here describes the physical property of the feathers, not a social behavior.
  • 'Inflexible' accurately captures the rigid, unyielding nature of the feathers.
  • 'Formal' is more related to social contexts and doesn't fit the physical description.
  • Therefore, the best synonym for 'stiff' in this sentence is 'inflexible'.
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": ["question"], + "phrases": ["The bird's feathers are stiff on one side.", "a. inflexible"] + } + ], + "insertHTML": [] + }, + { + "html": "

3. 'The Stenocara beetle can survive in a very dry environment.'

  • Options: a. uninteresting, b. arid
  • Analysis: 'Dry' in this context refers to the lack of moisture in the environment.
  • 'Arid' is a scientific term specifically used to describe extremely dry climates or environments.
  • 'Uninteresting' is unrelated to the environmental conditions being described.
  • Therefore, the best synonym for 'dry' in this sentence is 'arid'.
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": ["question"], + "phrases": ["The Stenocara beetle can survive in a very dry environment.", "b. arid"] + } + ], + "insertHTML": [] + }, + { + "html": "

The Importance of Context in Choosing Synonyms

This exercise demonstrates the crucial role of context in selecting appropriate synonyms. Let's review the key points:

  • Words often have multiple meanings or connotations.
  • The correct synonym depends on how the word is used in the sentence.
  • Scientific or technical writing may require specific terminology (e.g., 'arid' for a dry climate).
  • Some synonyms may be more suitable for physical descriptions, while others are better for abstract concepts or social situations.
", + "wordDelay": 200, + "holdDelay": 10000, + "highlight": [], + "insertHTML": [] + }, + { + "html": "

Applying the Tip on Using Synonyms

The tip provided is particularly valuable for this exercise and for improving writing skills in general. Here's how it applies:

  • It encourages restating information in your own words, which deepens understanding.
  • It highlights the importance of context in choosing synonyms (e.g., 'atmosphere' → 'air' in the given example).
  • It suggests using a thesaurus as a tool for finding synonyms, but emphasizes the need for careful selection based on context.
  • The tip demonstrates how paraphrasing can maintain the original meaning while using different words (e.g., 'insulation' → 'a way to keep warm').

By applying this tip, you can enhance your vocabulary, improve your writing clarity, and develop a deeper understanding of language nuances.

", + "wordDelay": 200, + "holdDelay": 10000, + "highlight": [ + { + "targets": ["additional"], + "phrases": ["restate information as much as possible in your own words", "replace some of the original words or phrases with synonyms", "understand the context", "use a thesaurus", "Not all synonyms are an exact match for a word"] + } + ], + "insertHTML": [] + } + ] + } + } + ] + }, + { + "page": 178, + "tips": [ + { + "category": "Strategy", + "embedding": "When you paraphrase, you can combine sentences into a single sentence.", + "text": "When you paraphrase, you can combine sentences into a single sentence. How many sentences from the original paragraph combine to make the last sentence in the summary?", + "html": "

When you paraphrase, you can combine sentences into a single sentence. How many sentences from the original paragraph combine to make the last sentence in the summary?

", + "id": "1939c5c4-cd9b-42df-9350-88e03b3dacf5", + "verified": true, + "standalone": false, + "exercise": { + "question": "

How many sentences from the original paragraph combine to make the last sentence in the summary?

", + "additional": "

Original

Scientists are studying the adaptations of living organisms in order to use their designs in products and technologies for humans. This process is called biomimetics. Velcro is one example of biomimetics. In 1948, a Swiss scientist, George de Mestral, removed a bur stuck to his dog's fur. De Mestral studied it under a microscope and noticed how well hooks on the bur stuck to things. He copied the design to make a two-piece fastening device. One piece has stiff hooks like the ones on the bur. The other piece has soft loops that allow the hooks to attach to it.

Summary

  • Biomimetics involves studying the ways in which plants and animals adapt to their environments in order to develop useful products and technologies for people.
  • An example of biomimetics is Velcro.
  • A Swiss scientist, George de Mestral, observed how well a bur attached to his dog's fur.
  • He created a two-part fastener by mimicking the loops on the bur and the softness of the dog's fur.
", + "segments": [ + { + "html": "

Understanding Sentence Combination in Paraphrasing

Let's analyze how the original paragraph has been summarized, focusing on sentence combination. We'll compare the original text with the summary to identify which sentences were combined.

", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [ + { + "targets": ["question"], + "phrases": ["How many sentences from the original paragraph combine to make the last sentence in the summary?"] + } + ], + "insertHTML": [] + }, + { + "html": "

Analyzing the Original Paragraph

The original paragraph contains 7 sentences:

  1. Scientists are studying the adaptations of living organisms...
  2. This process is called biomimetics.
  3. Velcro is one example of biomimetics.
  4. In 1948, a Swiss scientist, George de Mestral, removed a bur stuck to his dog's fur.
  5. De Mestral studied it under a microscope and noticed how well hooks on the bur stuck to things.
  6. He copied the design to make a two-piece fastening device.
  7. One piece has stiff hooks like the ones on the bur. The other piece has soft loops that allow the hooks to attach to it.
", + "wordDelay": 200, + "holdDelay": 10000, + "highlight": [ + { + "targets": ["additional"], + "phrases": ["Scientists are studying the adaptations of living organisms", "This process is called biomimetics", "Velcro is one example of biomimetics", "In 1948, a Swiss scientist, George de Mestral, removed a bur stuck to his dog's fur", "De Mestral studied it under a microscope and noticed how well hooks on the bur stuck to things", "He copied the design to make a two-piece fastening device", "One piece has stiff hooks like the ones on the bur. The other piece has soft loops that allow the hooks to attach to it"] + } + ], + "insertHTML": [] + }, + { + "html": "

Examining the Summary

Now, let's look at the summary, particularly the last sentence:

d. He created a two-part fastener by mimicking the loops on the bur and the softness of the dog's fur.

This sentence combines information from the following original sentences:

  • Sentence 5: De Mestral studied it under a microscope and noticed how well hooks on the bur stuck to things.
  • Sentence 6: He copied the design to make a two-piece fastening device.
  • Sentence 7: One piece has stiff hooks like the ones on the bur. The other piece has soft loops that allow the hooks to attach to it.
", + "wordDelay": 200, + "holdDelay": 10000, + "highlight": [ + { + "targets": ["additional"], + "phrases": ["He created a two-part fastener by mimicking the loops on the bur and the softness of the dog's fur", "De Mestral studied it under a microscope and noticed how well hooks on the bur stuck to things", "He copied the design to make a two-piece fastening device", "One piece has stiff hooks like the ones on the bur. The other piece has soft loops that allow the hooks to attach to it"] + } + ], + "insertHTML": [] + }, + { + "html": "

Conclusion

The last sentence in the summary combines information from 3 sentences in the original paragraph. It effectively condenses the key points about de Mestral's observation and invention into a single, concise sentence.

", + "wordDelay": 200, + "holdDelay": 7000, + "highlight": [ + { + "targets": ["question"], + "phrases": ["How many sentences from the original paragraph combine to make the last sentence in the summary?"] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-1", + "position": "replace", + "html": "3 sentences" + } + ] + }, + { + "html": "

The Value of Sentence Combination in Paraphrasing

This exercise demonstrates the power of combining sentences when paraphrasing:

  • It allows for more concise summaries
  • It helps in identifying and focusing on key information
  • It encourages critical thinking about how ideas relate to each other
  • It can improve the flow and coherence of the paraphrased text

By mastering this skill, you can create more effective summaries and demonstrate a deeper understanding of the original text.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": ["additional"], + "phrases": ["When you paraphrase, you can combine sentences into a single sentence"] + } + ], + "insertHTML": [] + } + ] + } + } + ] + } + ] + }, + { + "unit": 10, + "title": "Mobile Revolution", + "pages": [ + { + "page": 186, + "tips": [ + { + "category": "Word Partners", + "embedding": "Use challenge.", + "text": "Use challenge with (adj.) biggest challenge, new challenge; (v.) accept a challenge, face a challenge, present a challenge.", + "html": "

Use challenge with (adj.) biggest challenge, new challenge; (v.) accept a challenge, face a challenge, present a challenge.

", + "id": "858f56f5-7a1a-4e0c-8be1-5e6e5a2483ff", + "verified": true, + "standalone": true + } + ] + }, + { + "page": 189, + "tips": [ + { + "category": "CT Focus", + "embedding": "Relating information to personal experience means comparing situations that you read about to experiences in your own life.", + "text": "Relating information to personal experience means comparing situations that you read about to experiences in your own life. Ask yourself questions: What would I do in that situation? Have I experienced something like that? How might this idea apply to my own life?", + "html": "

Relating information to personal experience means comparing situations that you read about to experiences in your own life. Ask yourself questions: What would I do in that situation? Have I experienced something like that? How might this idea apply to my own life?

", + "id": "2c9b9d65-e45d-4f7c-a053-8fe45babeb68", + "verified": true, + "standalone": false, + "exercise": { + "question": "

Read the passage below and then think of situations in your past where you needed to get important information to a large group of people. How did you do it? What kind of technology did you use? Was it successful?

", + "additional": "

Changing the World with a Cell Phone

Ken Banks does not run health care programs in Africa. He also does not provide information to farmers in El Salvador. However, his computer software1 is helping people do those things—and more.

Simple Solutions for Big Problems

Banks was working in South Africa in 2003 and 2004. He saw that there were many organizations in Africa that were trying to help people. They were doing good work, but it was difficult for them to communicate over great distances. They didn't have much money, and many didn't have Internet access. But they did have cell phones.

Banks had an idea. He created some computer software called FrontlineSMS. “I wrote the software in five weeks at a kitchen table,” Banks says. The software allows users to send information from computers without using the Internet. It can work with any kind of computer. Users install the software on a computer. Then they connect a cell phone to the computer. To send information, users select the people they want to send it to and hit “send.” The cell phone sends the information as a text message from the computer.

Solving Problems around the World

FrontlineSMS software is free. It can work with an inexpensive laptop. It works with old cell phones, too. In fact, it can work almost anywhere in the world, even in places where electricity is not very dependable. Today, people are using FrontlineSMS to send important information in more than 50 nations.

For example, Nigerians used it to monitor their 2007 election2. Voters sent 10,000 texts to describe what was happening when they went to vote. In Malawi, a rural health care program uses FrontlineSMS to contact patients. As a result, workers no longer have to visit patients' homes to update medical records. The program saves thousands of hours of doctor time and thousands of dollars in fuel costs. In other parts of the world, such as Indonesia, Cambodia, Niger, and El Salvador, farmers now receive the most current prices for their crops3 by cell phone. As a result, the farmers can earn more money.

Making Ideas Reality

FrontlineSMS is an example of taking an idea and turning it into a successful reality. So, what should you do if you have an idea for making the world a better place? Banks advises first researching your idea thoroughly. Try to find out if your idea offers something that people really need. The best way to do this kind of research is to go into the community and talk to people. Then take advantage of social media tools such as blogs, he advises. They allow you to get your message out and connect with people who have similar ideas.

Technology is not a solution by itself, but it's a useful tool for solving many of the world's great challenges. Using today's technology, Banks says, makes it faster and easier than ever to make the world a better place.

1 Software is a computer program.2 An election is a process in which people vote to choose a person or a group of people to hold an official position.3 Crops are plants that are grown in large quantities to be harvested.
", + "segments": [ + { + "html": "

Understanding the Exercise

This exercise asks us to relate the information in the passage about FrontlineSMS to our personal experiences with communicating important information to large groups. Let's break down the task and then explore how we can approach it.

", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [ + { + "targets": ["question"], + "phrases": ["think of situations in your past where you needed to get important information to a large group of people"] + } + ], + "insertHTML": [] + }, + { + "html": "

Key Points from the Passage

  • FrontlineSMS is software that allows mass communication via text messages
  • It works with basic technology (cell phones, inexpensive computers)
  • It's used in areas with limited internet access or unreliable electricity
  • Applications include election monitoring, healthcare updates, and sharing market information
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": ["additional"], + "phrases": ["FrontlineSMS", "send information from computers without using the Internet", "send important information in more than 50 nations"] + } + ], + "insertHTML": [] + }, + { + "html": "

Reflecting on Personal Experiences

Now, let's think about our own experiences with mass communication:

  • Consider situations where you needed to reach many people quickly (e.g., organizing an event, sharing urgent news)
  • Think about the technology you used (e.g., social media, email, text messages, phone calls)
  • Reflect on the effectiveness of your chosen method
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": ["question"], + "phrases": ["How did you do it?", "What kind of technology did you use?", "Was it successful?"] + } + ], + "insertHTML": [] + }, + { + "html": "

Example Response

Here's an example of how you might answer these questions:

  1. Situation: Organizing a school fundraiser
  2. Method: Created a Facebook event and used group text messaging
  3. Technology: Smartphone, social media platforms
  4. Success: Mostly successful, reached many people quickly, but missed some who weren't on social media
  5. Comparison to FrontlineSMS: My method worked well in an area with good internet access, but wouldn't have been as effective in areas FrontlineSMS serves
", + "wordDelay": 200, + "holdDelay": 10000, + "highlight": [], + "insertHTML": [ + { + "target": "question", + "targetId": "blank", + "position": "replace", + "html": "

Situation: Organizing a school fundraiser

Method: Created a Facebook event and used group text messaging

Technology: Smartphone, social media platforms

Success: Mostly successful, reached many people quickly

Comparison to FrontlineSMS: Effective with good internet, but limited reach

" + } + ] + }, + { + "html": "

Analyzing Your Experience

After identifying your experience, consider these questions:

  • How does your method compare to FrontlineSMS in terms of accessibility and reach?
  • What challenges did you face that FrontlineSMS might have solved?
  • Are there situations where FrontlineSMS would be more effective than your method?
  • How might your approach need to change in areas with limited technology?
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": ["additional"], + "phrases": ["works with an inexpensive laptop", "can work almost anywhere in the world", "even in places where electricity is not very dependable"] + } + ], + "insertHTML": [] + }, + { + "html": "

The Value of Relating to Personal Experience

By connecting the passage to your own experiences, you can:

  • Better understand the challenges FrontlineSMS addresses
  • Appreciate the importance of adapting communication methods to different contexts
  • Recognize the impact of technology on global communication
  • Develop empathy for those facing communication challenges in resource-limited areas

This approach not only deepens your understanding of the text but also helps you critically evaluate your own communication strategies and their effectiveness in different scenarios.

", + "wordDelay": 200, + "holdDelay": 10000, + "highlight": [ + { + "targets": ["additional"], + "phrases": ["Relating information to personal experience", "What would I do in that situation?", "Have I experienced something like that?", "How might this idea apply to my own life?"] + } + ], + "insertHTML": [] + } + ] + } + } + ] + }, + { + "page": 190, + "tips": [ + { + "category": "Strategy", + "embedding": "When you take notes, remember to only note the key points.", + "text": "When you take notes, remember to only note the key points. Don't write complete sentences. Try to use your own words as much as possible.", + "html": "When you take notes, remember to only note the key points. Don't write complete sentences. Try to use your own words as much as possible.", + "id": "fc18e82a-22d1-437a-8452-f7c7e68a84f8", + "verified": true, + "standalone": false, + "exercise": { + "question": "

Complete the following table with notes on “Changing the World With a Cell Phone.”

Paragraph NumberMain IdeaSupporting Details
2How Banks got the idea for FrontlineSMS

Lived in S. Africa in 2003-04

Trouble communicating without electricity, Internet, etc., but did have cell phones

3
4
5
6
", + "additional": "

Changing the World with a Cell Phone

Ken Banks does not run health care programs in Africa. He also does not provide information to farmers in El Salvador. However, his computer software1 is helping people do those things—and more.

Simple Solutions for Big Problems

Banks was working in South Africa in 2003 and 2004. He saw that there were many organizations in Africa that were trying to help people. They were doing good work, but it was difficult for them to communicate over great distances. They didn't have much money, and many didn't have Internet access. But they did have cell phones.

Banks had an idea. He created some computer software called FrontlineSMS. “I wrote the software in five weeks at a kitchen table,” Banks says. The software allows users to send information from computers without using the Internet. It can work with any kind of computer. Users install the software on a computer. Then they connect a cell phone to the computer. To send information, users select the people they want to send it to and hit “send.” The cell phone sends the information as a text message from the computer.

Solving Problems around the World

FrontlineSMS software is free. It can work with an inexpensive laptop. It works with old cell phones, too. In fact, it can work almost anywhere in the world, even in places where electricity is not very dependable. Today, people are using FrontlineSMS to send important information in more than 50 nations.

For example, Nigerians used it to monitor their 2007 election2. Voters sent 10,000 texts to describe what was happening when they went to vote. In Malawi, a rural health care program uses FrontlineSMS to contact patients. As a result, workers no longer have to visit patients' homes to update medical records. The program saves thousands of hours of doctor time and thousands of dollars in fuel costs. In other parts of the world, such as Indonesia, Cambodia, Niger, and El Salvador, farmers now receive the most current prices for their crops3 by cell phone. As a result, the farmers can earn more money.

Making Ideas Reality

FrontlineSMS is an example of taking an idea and turning it into a successful reality. So, what should you do if you have an idea for making the world a better place? Banks advises first researching your idea thoroughly. Try to find out if your idea offers something that people really need. The best way to do this kind of research is to go into the community and talk to people. Then take advantage of social media tools such as blogs, he advises. They allow you to get your message out and connect with people who have similar ideas.

Technology is not a solution by itself, but it's a useful tool for solving many of the world's great challenges. Using today's technology, Banks says, makes it faster and easier than ever to make the world a better place.

1 Software is a computer program.2 An election is a process in which people vote to choose a person or a group of people to hold an official position.3 Crops are plants that are grown in large quantities to be harvested.
", + "segments": [ + { + "html": "

Analyzing 'Changing the World with a Cell Phone'

Let's break down the article paragraph by paragraph, focusing on the main ideas and supporting details. We'll use the note-taking strategy to capture key points concisely.

", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [ + { + "targets": ["question"], + "phrases": ["Complete the following table with notes"] + } + ], + "insertHTML": [] + }, + { + "html": "

Paragraph 3: How FrontlineSMS Works

Main Idea: Description of FrontlineSMS software

Supporting Details:

  • Created in 5 weeks
  • Sends info without internet
  • Works with any computer
  • Connects cell phone to computer
  • Sends info as text message
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": ["additional"], + "phrases": ["He created some computer software called FrontlineSMS", "I wrote the software in five weeks at a kitchen table", "send information from computers without using the Internet", "can work with any kind of computer", "connect a cell phone to the computer", "cell phone sends the information as a text message"] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-1", + "position": "replace", + "html": "Description of FrontlineSMS software" + }, + { + "target": "question", + "targetId": "blank-2", + "position": "replace", + "html": "

Created in 5 weeks

Sends info without internet

Works with any computer

Connects cell phone to computer

Sends info as text message

" + } + ] + }, + { + "html": "

Paragraph 4: FrontlineSMS Accessibility

Main Idea: FrontlineSMS is widely accessible

Supporting Details:

  • Free software
  • Works with cheap laptops and old phones
  • Functions in areas with unreliable electricity
  • Used in over 50 countries
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": ["additional"], + "phrases": ["FrontlineSMS software is free", "work with an inexpensive laptop", "works with old cell phones", "work almost anywhere in the world", "even in places where electricity is not very dependable", "send important information in more than 50 nations"] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-3", + "position": "replace", + "html": "FrontlineSMS is widely accessible" + }, + { + "target": "question", + "targetId": "blank-4", + "position": "replace", + "html": "

Free software

Works with cheap laptops and old phones

Functions in areas with unreliable electricity

Used in over 50 countries

" + } + ] + }, + { + "html": "

Paragraph 5: Real-world Applications

Main Idea: Examples of FrontlineSMS usage worldwide

Supporting Details:

  • Nigeria: Election monitoring (10,000 texts)
  • Malawi: Rural healthcare (saves time and money)
  • Various countries: Farmers get crop prices
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": ["additional"], + "phrases": ["Nigerians used it to monitor their 2007 election", "Malawi, a rural health care program uses FrontlineSMS to contact patients", "In other parts of the world, such as Indonesia, Cambodia, Niger, and El Salvador, farmers now receive the most current prices for their crops"] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-5", + "position": "replace", + "html": "Examples of FrontlineSMS usage worldwide" + }, + { + "target": "question", + "targetId": "blank-6", + "position": "replace", + "html": "

Nigeria: Election monitoring (10,000 texts)

Malawi: Rural healthcare (saves time and money)

Various countries: Farmers get crop prices

" + } + ] + }, + { + "html": "

Paragraph 6: Advice for Innovators

Main Idea: Banks' advice for turning ideas into reality

Supporting Details:

  • Research thoroughly
  • Ensure idea meets real needs
  • Talk to community members
  • Use social media to connect and share
", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": ["additional"], + "phrases": ["Banks advises first researching your idea thoroughly", "find out if your idea offers something that people really need", "go into the community and talk to people", "take advantage of social media tools such as blogs"] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-7", + "position": "replace", + "html": "Banks' advice for turning ideas into reality" + }, + { + "target": "question", + "targetId": "blank-8", + "position": "replace", + "html": "

Research thoroughly

Ensure idea meets real needs

Talk to community members

Use social media to connect and share

" + } + ] + }, + { + "html": "

The Value of Effective Note-taking

This exercise demonstrates the importance of concise, focused note-taking:

  • Captures main ideas and key supporting details
  • Uses brief phrases instead of full sentences
  • Organizes information logically
  • Facilitates quick review and understanding
  • Encourages active engagement with the text

By practicing this skill, you can improve your ability to extract and retain essential information from any text you read.

", + "wordDelay": 200, + "holdDelay": 10000, + "highlight": [ + { + "targets": ["additional"], + "phrases": ["When you take notes, remember to only note the key points", "Don't write complete sentences", "Try to use your own words as much as possible"] + } + ], + "insertHTML": [] + } + ] + } + } + ] + }, + { + "page": 193, + "tips": [ + { + "category": "Word Partners", + "embedding": "Use imagine.", + "text": "Use imagine with (v.) can / can't imagine something, try to imagine; (adj.) difficult / easy / hard to imagine; possible / impossible to imagine.", + "html": "

Use imagine with (v.) can / can't imagine something, try to imagine; (adj.) difficult / easy / hard to imagine; possible / impossible to imagine.

", + "id": "f5a3c6cb-9680-45c8-b38a-e5ebcaad4b68", + "verified": true, + "standalone": true + } + ] + }, + { + "page": 197, + "tips": [ + { + "category": "Language for Writing", + "embedding": "Using Modals to Discuss Abilities and Possibilities\n\nSome modals express abilities and possibilities. These modals are useful for describing solutions. Can shows present ability. Will, could, may, and might show future possibility.", + "text": "Using Modals to Discuss Abilities and Possibilities\n\nSome modals express abilities and possibilities. These modals are useful for describing solutions.\nCan shows present ability: FrontlineSMS can work with any kind of computer.\n\nWill, could, may, and might show future possibility. The modal you choose depends on your degree of certainty. Will is most certain, could is less certain, and may and might are the least certain.\nRadio collars will solve the problem. (I'm certain of this.)\nRadio collars could solve the problem. (I'm less certain.)\nRadio collars might solve the problem. (I'm the least certain.)\nNote: Remember to use the base form of the verb after a modal.", + "html": "

Using Modals to Discuss Abilities and Possibilities

Some modals express abilities and possibilities. These modals are useful for describing solutions.

Can shows present ability: FrontlineSMS can work with any kind of computer.

Will, could, may, and might show future possibility. The modal you choose depends on your degree of certainty. Will is most certain, could is less certain, and may and might are the least certain.

Radio collars will solve the problem. (I'm certain of this.)
Radio collars could solve the problem. (I'm less certain.)
Radio collars might solve the problem. (I'm the least certain.)

Note: Remember to use the base form of the verb after a modal.

", + "id": "2c181ca9-44c9-4724-9c15-90b1b20946bf", + "verified": true, + "standalone": false, + "exercise": { + "question": "

Read the information in the box. Use the verbs in parentheses and the cues to complete the sentences (1-4).

  1. This solution ____________________ (save) people a lot of money. (future possibility; less certain)
  2. Technicians ____________________ (make) fewer mistakes with Ozcan's cell-phone microscope. (future possibility; least certain)
  3. FrontlineSMS ____________________ (help) farmers get better prices for their crops. (present ability)
  4. BBC Janala ____________________ (help) students who do not have the time or the money to attend classes. (future possibility; most certain)
", + "segments": [ + { + "html": "

Understanding Modal Verbs for Abilities and Possibilities

This exercise focuses on using modal verbs to express different levels of certainty about abilities and possibilities. Let's break down the task and approach each sentence step by step.

", + "wordDelay": 200, + "holdDelay": 5000, + "highlight": [ + { + "targets": ["question"], + "phrases": ["Use the verbs in parentheses and the cues to complete the sentences"] + } + ], + "insertHTML": [] + }, + { + "html": "

Sentence 1: Future Possibility (Less Certain)

  • Verb: save
  • Cue: future possibility; less certain
  • Appropriate modal: could
  • Correct answer: This solution could save people a lot of money.

We use 'could' here because it expresses a future possibility with less certainty than 'will' but more certainty than 'might' or 'may'.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": ["additional"], + "phrases": ["could is less certain"] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-1", + "position": "replace", + "html": "could save" + } + ] + }, + { + "html": "

Sentence 2: Future Possibility (Least Certain)

  • Verb: make
  • Cue: future possibility; least certain
  • Appropriate modal: might or may
  • Correct answer: Technicians might make (or may make) fewer mistakes with Ozcan's cell-phone microscope.

We use 'might' or 'may' here because the cue specifies the least certain possibility.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": ["additional"], + "phrases": ["may and might are the least certain"] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-2", + "position": "replace", + "html": "might make" + } + ] + }, + { + "html": "

Sentence 3: Present Ability

  • Verb: help
  • Cue: present ability
  • Appropriate modal: can
  • Correct answer: FrontlineSMS can help farmers get better prices for their crops.

'Can' is used here because it expresses a present ability or capability.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": ["additional"], + "phrases": ["Can shows present ability"] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-3", + "position": "replace", + "html": "can help" + } + ] + }, + { + "html": "

Sentence 4: Future Possibility (Most Certain)

  • Verb: help
  • Cue: future possibility; most certain
  • Appropriate modal: will
  • Correct answer: BBC Janala will help students who do not have the time or the money to attend classes.

'Will' is used here because it expresses the highest level of certainty about a future possibility.

", + "wordDelay": 200, + "holdDelay": 8000, + "highlight": [ + { + "targets": ["additional"], + "phrases": ["Will is most certain"] + } + ], + "insertHTML": [ + { + "target": "question", + "targetId": "blank-4", + "position": "replace", + "html": "will help" + } + ] + }, + { + "html": "

The Importance of Modal Verbs

Understanding and correctly using modal verbs is crucial for effective communication:

  • They allow you to express different levels of certainty about future events
  • They help in discussing possibilities and abilities clearly
  • They add nuance to your statements, making your language more precise
  • In scientific or technical writing, they're essential for discussing hypotheses and potential outcomes

By mastering the use of modal verbs, you can communicate ideas about abilities and possibilities with greater accuracy and sophistication.

", + "wordDelay": 200, + "holdDelay": 10000, + "highlight": [ + { + "targets": ["additional"], + "phrases": ["Some modals express abilities and possibilities", "These modals are useful for describing solutions", "Remember to use the base form of the verb after a modal"] + } + ], + "insertHTML": [] + } + ] + } + } + ] + }, + { + "page": 198, + "tips": [ + { + "category": "Strategy", + "embedding": "Remember to use transition words and phrases when you describe a solution that involves a sequence of steps.", + "text": "Remember to use transition words and phrases when you describe a solution that involves a sequence of steps.", + "html": "

Remember to use transition words and phrases when you describe a solution that involves a sequence of steps.

", + "id": "b70928a7-b6fe-430e-bfc2-32b15e189186", + "verified": true, + "standalone": true + } + ] + }, + { + "page": 200, + "tips": [ + { + "category": "Strategy", + "embedding": "One way to provide support for your solution is to describe an alternative and say why it isn't as good as your solution.", + "text": "One way to provide support for your solution is to describe an alternative and say why it isn't as good as your solution.", + "html": "

One way to provide support for your solution is to describe an alternative and say why it isn't as good as your solution.

", + "id": "58d66b4a-aebe-445d-a329-4385e239d84f", + "verified": true, + "standalone": true + } + ] + } + ] + } + ] +} \ No newline at end of file diff --git a/scripts/tips/prompt.txt b/scripts/tips/prompt.txt new file mode 100644 index 0000000..61f6943 --- /dev/null +++ b/scripts/tips/prompt.txt @@ -0,0 +1,62 @@ +I am going to give you an exercise and a tip, explain how to solve the exercise and how the tip is beneficial, +your response must be with this format: + +{ + "segments": [ + { + "html": "", + "wordDelay": 0, + "holdDelay"; 0, + "highlight": [ + { + "targets": [], + "phrases": [] + } + ], + "insertHTML": [ + { + "target": "", + "targetId": "", + "position": "replace", + "html": "" + } + ] + } + ] +} + +Basically you are going to produce multiple objects and place it in data with the format above to integrate with a react component that highlights passages and inserts html, +these objects are segments of your explanation that will be presented to a student. + +In the html field place a segment of your response that will be streamed to the component with a delay of "wordDelay" ms and in the end of that segment stream the phrases or words inside +"highlight" will be highlighted for "holdDelay" ms, and the cycle repeats until the whole data array is iterated. Make it so +that the delays are reasonable for the student have time to process the message your trying to send. Take note that +"wordDelay" is the time between words to display (always 200), and "holdDelay" (no less than 5000) is the total time the highlighter will highlight what you put +inside "highlight". + +There are 3 target areas: +- "question": where the question is placed +- "additional": where additional content is placed required to answer the question (this section is optional) +- "segment": a particular segment + +You can use these targets in highlight and insertHTML. In order for insertHTML to work, you will have to place an html element with an "id" attribute +in the targets you will reference and provide the id via the "targetId", by this I mean if you want to use insert you will need to provide me the +html I've sent you with either a placeholder element with an id set or set an id in an existent element. + +If there are already id's in the html I'm giving you then you must use insertHtml. + +Each segment html will be rendered in a div that as margins, you should condense the information don't give me just single short phrases that occupy a whole div. +As previously said this wil be seen by a student so show some train of thought to solve the exercise. +All the segment's html must be wrapped in a div element, and again since this div element will be rendered with some margins make proper use of the segments html. + +Try to make bulletpoints. +Dont explicitely mention the tip right away at the beginning, aim more towards the end. + + +Tip: + + +Target: "question" + + +Target: "additional" diff --git a/scripts/tips/send_tips_to_firestore.py b/scripts/tips/send_tips_to_firestore.py new file mode 100644 index 0000000..b6fb7f6 --- /dev/null +++ b/scripts/tips/send_tips_to_firestore.py @@ -0,0 +1,34 @@ +import json +import os + +from dotenv import load_dotenv + +from pymongo import MongoClient + +load_dotenv() + +# staging: encoach-staging.json +# prod: storied-phalanx-349916.json + +mongo_db = MongoClient(os.getenv('MONGODB_URI'))[os.getenv('MONGODB_DB')] + +if __name__ == "__main__": + with open('pathways_2_rw.json', 'r', encoding='utf-8') as file: + book = json.load(file) + + tips = [] + for unit in book["units"]: + for page in unit["pages"]: + for tip in page["tips"]: + new_tip = { + "id": tip["id"], + "standalone": tip["standalone"], + "tipCategory": tip["category"], + "tipHtml": tip["html"] + } + if not tip["standalone"]: + new_tip["exercise"] = tip["exercise"] + tips.append(new_tip) + + for tip in tips: + doc_ref = mongo_db.walkthrough.insert_one(tip)