Dindn't solve all conflicts in previous commit

This commit is contained in:
Carlos-Mesquita
2024-11-06 00:50:56 +00:00
14 changed files with 40070 additions and 105 deletions

1
.gitignore vendored
View File

@@ -4,3 +4,4 @@ __pycache__
.DS_Store .DS_Store
.venv .venv
_scripts _scripts
*.env

View File

@@ -1,30 +1,35 @@
import uuid import uuid
from typing import Optional from typing import Optional
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
class DemographicInfo(BaseModel): class DemographicInfo(BaseModel):
phone: str phone: str
passport_id: Optional[str] = None passport_id: Optional[str] = None
country: Optional[str] = None country: Optional[str] = None
class Entity(BaseModel):
class UserDTO(BaseModel): id: str
id: uuid.UUID = Field(default_factory=uuid.uuid4) role: str
email: str
name: str
type: str class UserDTO(BaseModel):
passport_id: str id: uuid.UUID = Field(default_factory=uuid.uuid4)
passwordHash: str email: str
passwordSalt: str name: str
groupName: Optional[str] = None type: str
corporate: Optional[str] = None passport_id: str
studentID: Optional[str | int] = None passwordHash: str
expiryDate: Optional[str] = None passwordSalt: str
demographicInformation: Optional[DemographicInfo] = None groupName: Optional[str] = None
corporate: Optional[str] = None
studentID: Optional[str] = None
class BatchUsersDTO(BaseModel): expiryDate: Optional[str] = None
makerID: str demographicInformation: Optional[DemographicInfo] = None
users: list[UserDTO] entities: list[dict] = Field(default_factory=list)
class BatchUsersDTO(BaseModel):
makerID: str
users: list[UserDTO]

View File

@@ -96,7 +96,9 @@ class TrainingService(ITrainingService):
for area in training_content.weak_areas: for area in training_content.weak_areas:
weak_areas["weak_areas"].append(area.dict()) weak_areas["weak_areas"].append(area.dict())
new_id = str(uuid.uuid4())
training_doc = { training_doc = {
'id': new_id,
'created_at': int(datetime.now().timestamp() * 1000), 'created_at': int(datetime.now().timestamp() * 1000),
**exam_map, **exam_map,
**usefull_tips.dict(), **usefull_tips.dict(),
@@ -105,6 +107,7 @@ class TrainingService(ITrainingService):
} }
doc_id = await self._db.save_to_db('training', training_doc) doc_id = await self._db.save_to_db('training', training_doc)
return { return {
"id": new_id
"id": doc_id "id": doc_id
} }

View File

@@ -7,6 +7,11 @@ import shortuuid
from datetime import datetime from datetime import datetime
from logging import getLogger from logging import getLogger
import pandas as pd
from typing import Dict
import shortuuid
from pymongo.database import Database from pymongo.database import Database
from app.dtos.user_batch import BatchUsersDTO, UserDTO from app.dtos.user_batch import BatchUsersDTO, UserDTO
@@ -49,6 +54,15 @@ class UserService(IUserService):
FileHelper.remove_file(path) FileHelper.remove_file(path)
return {"ok": True} return {"ok": True}
@staticmethod
def _map_to_batch(request_data: Dict) -> BatchUsersDTO:
users_list = [{**user} for user in request_data["users"]]
for user in users_list:
user["studentID"] = str(user["studentID"])
users: list[UserDTO] = [UserDTO(**user) for user in users_list]
return BatchUsersDTO(makerID=request_data["makerID"], users=users)
@staticmethod @staticmethod
def _generate_firebase_auth_csv(batch_dto: BatchUsersDTO, path: str): def _generate_firebase_auth_csv(batch_dto: BatchUsersDTO, path: str):
# https://firebase.google.com/docs/cli/auth#file_format # https://firebase.google.com/docs/cli/auth#file_format
@@ -119,12 +133,6 @@ class UserService(IUserService):
self._insert_new_user(user) self._insert_new_user(user)
code = self._create_code(user, maker_id) code = self._create_code(user, maker_id)
if user.type == "corporate":
self._set_corporate_default_groups(user)
if user.corporate:
self._assign_corporate_to_user(user, code)
if user.groupName and len(user.groupName.strip()) > 0: if user.groupName and len(user.groupName.strip()) > 0:
self._assign_user_to_group_by_name(user, maker_id) self._assign_user_to_group_by_name(user, maker_id)
@@ -144,7 +152,8 @@ class UserService(IUserService):
'isFirstLogin': False, 'isFirstLogin': False,
'isVerified': True, 'isVerified': True,
'registrationDate': datetime.now(), 'registrationDate': datetime.now(),
'subscriptionExpirationDate': user.expiryDate 'subscriptionExpirationDate': user.expiryDate,
'entities': user.entities
} }
self._db.users.insert_one(new_user) self._db.users.insert_one(new_user)
@@ -164,74 +173,6 @@ class UserService(IUserService):
}) })
return code return code
def _set_corporate_default_groups(self, user: UserDTO):
user_id = str(user.id)
default_groups = [
{
'admin': user_id,
'id': str(uuid.uuid4()),
'name': "Teachers",
'participants': [],
'disableEditing': True,
},
{
'admin': user_id,
'id': str(uuid.uuid4()),
'name': "Students",
'participants': [],
'disableEditing': True,
},
{
'admin': user_id,
'id': str(uuid.uuid4()),
'name': "Corporate",
'participants': [],
'disableEditing': True,
}
]
for group in default_groups:
self._db.groups.insert_one(group)
def _assign_corporate_to_user(self, user: UserDTO, code: str):
user_id = str(user.id)
corporate_user = self._db.users.find_one(
{"email": user.corporate}
)
if corporate_user:
self._db.codes.update_one(
{"id": code},
{"$set": {"creator": corporate_user["id"]}},
upsert=True
)
group_type = "Students" if user.type == "student" else "Teachers"
group = self._db.groups.find_one(
{
"admin": corporate_user["id"],
"name": group_type
}
)
if group:
participants = group['participants']
if user_id not in participants:
participants.append(user_id)
self._db.groups.update_one(
{"id": group["id"]},
{"$set": {"participants": participants}}
)
else:
group = {
'admin': corporate_user["id"],
'id': str(uuid.uuid4()),
'name': group_type,
'participants': [user_id],
'disableEditing': True,
}
self._db.groups.insert_one(group)
def _assign_user_to_group_by_name(self, user: UserDTO, maker_id: str): def _assign_user_to_group_by_name(self, user: UserDTO, maker_id: str):
user_id = str(user.id) user_id = str(user.id)

62
elai/AvatarEnum.py Normal file
View File

@@ -0,0 +1,62 @@
from enum import Enum
class AvatarEnum(Enum):
# Works
GIA_BUSINESS = {
"avatar_code": "gia.business",
"avatar_gender": "female",
"avatar_url": "https://elai-avatars.s3.us-east-2.amazonaws.com/common/gia/business/gia_business.png",
"avatar_canvas": "https://elai-avatars.s3.us-east-2.amazonaws.com/common/gia/business/gia_business.png",
"voice_id": "EXAVITQu4vr4xnSDxMaL",
"voice_provider": "elevenlabs"
}
# Works
VADIM_BUSINESS = {
"avatar_code": "vadim.business",
"avatar_gender": "male",
"avatar_url": "https://elai-avatars.s3.us-east-2.amazonaws.com/common/vadim/business/vadim_business.png",
"avatar_canvas": "https://d3u63mhbhkevz8.cloudfront.net/common/vadim/business/vadim_business.png",
"voice_id": "flq6f7yk4E4fJM5XTYuZ",
"voice_provider": "elevenlabs"
}
ORHAN_BUSINESS = {
"avatar_code": "orhan.business",
"avatar_gender": "male",
"avatar_url": "https://elai-avatars.s3.us-east-2.amazonaws.com/common/orhan/business/orhan.png",
"avatar_canvas": "https://d3u63mhbhkevz8.cloudfront.net/common/orhan/business/orhan.png",
"voice_id": "en-US-AndrewMultilingualNeural",
"voice_provider": "azure"
}
FLORA_BUSINESS = {
"avatar_code": "flora.business",
"avatar_gender": "female",
"avatar_url": "https://elai-avatars.s3.us-east-2.amazonaws.com/common/flora/business/flora_business.png",
"avatar_canvas": "https://d3u63mhbhkevz8.cloudfront.net/common/flora/business/flora_business.png",
"voice_id": "en-US-JaneNeural",
"voice_provider": "azure"
}
SCARLETT_BUSINESS = {
"avatar_code": "scarlett.business",
"avatar_gender": "female",
"avatar_url": "https://elai-avatars.s3.us-east-2.amazonaws.com/common/scarlett/business/scarlett_business.png",
"avatar_canvas": "https://d3u63mhbhkevz8.cloudfront.net/common/scarlett/business/scarlett_business.png",
"voice_id": "en-US-NancyNeural",
"voice_provider": "azure"
}
PARKER_CASUAL = {
"avatar_code": "parker.casual",
"avatar_gender": "male",
"avatar_url": "https://elai-avatars.s3.us-east-2.amazonaws.com/common/parker/casual/parker_casual.png",
"avatar_canvas": "https://d3u63mhbhkevz8.cloudfront.net/common/parker/casual/parker_casual.png",
"voice_id": "en-US-TonyNeural",
"voice_provider": "azure"
}
ETHAN_BUSINESS = {
"avatar_code": "ethan.business",
"avatar_gender": "male",
"avatar_url": "https://elai-avatars.s3.us-east-2.amazonaws.com/common/ethan/business/ethan_business_low.png",
"avatar_canvas": "https://d3u63mhbhkevz8.cloudfront.net/common/ethan/business/ethan_business_low.png",
"voice_id": "en-US-JasonNeural",
"voice_provider": "azure"
}

1965
elai/avatars.json Normal file

File diff suppressed because it is too large Load Diff

3386
elai/english_voices.json Normal file

File diff suppressed because it is too large Load Diff

18
elai/filter_json.py Normal file
View File

@@ -0,0 +1,18 @@
import json
# Read JSON from a file
input_filename = "english_voices.json"
output_filename = "free_english_voices.json"
with open(input_filename, "r") as json_file:
data = json.load(json_file)
# Filter entries based on "language": "English"
filtered_list = [entry for entry in data["data"]["list"] if not entry["is_paid"]]
data["data"]["list"] = filtered_list
# Write filtered JSON to a new file
with open(output_filename, "w") as json_file:
json.dump(data, json_file, indent=2)
print(f"Filtered JSON written to '{output_filename}'.")

26579
elai/voices.json Normal file

File diff suppressed because it is too large Load Diff

263
helper/elai_api.py Normal file
View File

@@ -0,0 +1,263 @@
import os
import random
import time
from logging import getLogger
import requests
from dotenv import load_dotenv
from helper.constants import *
from helper.firebase_helper import upload_file_firebase_get_url, save_to_db_with_id
from elai.AvatarEnum import AvatarEnum
load_dotenv()
logger = getLogger(__name__)
# Get ELAI token
TOKEN = os.getenv("ELAI_TOKEN")
FIREBASE_BUCKET = os.getenv('FIREBASE_BUCKET')
# POST TO CREATE VIDEO
POST_HEADER = {
"accept": "application/json",
"content-type": "application/json",
"Authorization": f"Bearer {TOKEN}"
}
GET_HEADER = {
"accept": "application/json",
"Authorization": f"Bearer {TOKEN}"
}
def create_videos_and_save_to_db(exercises, template, id):
avatar = random.choice(list(AvatarEnum))
# Speaking 1
# Using list comprehension to find the element with the desired value in the 'type' field
found_exercises_1 = [element for element in exercises if element.get('type') == 1]
# Check if any elements were found
if found_exercises_1:
exercise_1 = found_exercises_1[0]
sp1_questions = []
logger.info('Creating video for speaking part 1')
for question in exercise_1["questions"]:
sp1_result = create_video(question, avatar)
if sp1_result is not None:
sound_file_path = VIDEO_FILES_PATH + sp1_result
firebase_file_path = FIREBASE_SPEAKING_VIDEO_FILES_PATH + sp1_result
url = upload_file_firebase_get_url(FIREBASE_BUCKET, firebase_file_path, sound_file_path)
video = {
"text": question,
"video_path": firebase_file_path,
"video_url": url
}
sp1_questions.append(video)
else:
logger.error("Failed to create video for part 1 question: " + exercise_1["question"])
template["exercises"][0]["prompts"] = sp1_questions
template["exercises"][0]["first_title"] = exercise_1["first_topic"]
template["exercises"][0]["second_title"] = exercise_1["second_topic"]
# Speaking 2
# Using list comprehension to find the element with the desired value in the 'type' field
found_exercises_2 = [element for element in exercises if element.get('type') == 2]
# Check if any elements were found
if found_exercises_2:
exercise_2 = found_exercises_2[0]
logger.info('Creating video for speaking part 2')
sp2_result = create_video(exercise_2["question"], avatar)
if sp2_result is not None:
sound_file_path = VIDEO_FILES_PATH + sp2_result
firebase_file_path = FIREBASE_SPEAKING_VIDEO_FILES_PATH + sp2_result
url = upload_file_firebase_get_url(FIREBASE_BUCKET, firebase_file_path, sound_file_path)
sp2_video_path = firebase_file_path
sp2_video_url = url
template["exercises"][1]["prompts"] = exercise_2["prompts"]
template["exercises"][1]["text"] = exercise_2["question"]
template["exercises"][1]["title"] = exercise_2["topic"]
template["exercises"][1]["video_url"] = sp2_video_url
template["exercises"][1]["video_path"] = sp2_video_path
else:
logger.error("Failed to create video for part 2 question: " + exercise_2["question"])
# Speaking 3
# Using list comprehension to find the element with the desired value in the 'type' field
found_exercises_3 = [element for element in exercises if element.get('type') == 3]
# Check if any elements were found
if found_exercises_3:
exercise_3 = found_exercises_3[0]
sp3_questions = []
logger.info('Creating videos for speaking part 3')
for question in exercise_3["questions"]:
result = create_video(question, avatar)
if result is not None:
sound_file_path = VIDEO_FILES_PATH + result
firebase_file_path = FIREBASE_SPEAKING_VIDEO_FILES_PATH + result
url = upload_file_firebase_get_url(FIREBASE_BUCKET, firebase_file_path, sound_file_path)
video = {
"text": question,
"video_path": firebase_file_path,
"video_url": url
}
sp3_questions.append(video)
else:
logger.error("Failed to create video for part 3 question: " + question)
template["exercises"][2]["prompts"] = sp3_questions
template["exercises"][2]["title"] = exercise_3["topic"]
if not found_exercises_3:
template["exercises"].pop(2)
if not found_exercises_2:
template["exercises"].pop(1)
if not found_exercises_1:
template["exercises"].pop(0)
save_to_db_with_id("speaking", template, id)
logger.info('Saved speaking to DB with id ' + id + " : " + str(template))
def create_video(text, avatar):
# POST TO CREATE VIDEO
create_video_url = "https://apis.elai.io/api/v1/videos"
avatar_url = AvatarEnum[avatar].value.get("avatar_url")
avatar_code = AvatarEnum[avatar].value.get("avatar_code")
avatar_gender = AvatarEnum[avatar].value.get("avatar_gender")
avatar_canvas = AvatarEnum[avatar].value.get("avatar_canvas")
voice_id = AvatarEnum[avatar].value.get("voice_id")
voice_provider = AvatarEnum[avatar].value.get("voice_provider")
data = {
"name": "API test",
"slides": [
{
"id": 1,
"canvas": {
"objects": [
{
"type": "avatar",
"left": 151.5,
"top": 36,
"fill": "#4868FF",
"scaleX": 0.3,
"scaleY": 0.3,
"width": 1080,
"height": 1080,
"src": avatar_url,
"avatarType": "transparent",
"animation": {
"type": None,
"exitType": None
}
},
{
"type": "image",
"version": "5.3.0",
"originX": "left",
"originY": "top",
"left": 30,
"top": 30,
"width": 800,
"height": 600,
"fill": "rgb(0,0,0)",
"stroke": None,
"strokeWidth": 0,
"strokeDashArray": None,
"strokeLineCap": "butt",
"strokeDashOffset": 0,
"strokeLineJoin": "miter",
"strokeUniform": False,
"strokeMiterLimit": 4,
"scaleX": 0.18821429,
"scaleY": 0.18821429,
"angle": 0,
"flipX": False,
"flipY": False,
"opacity": 1,
"shadow": None,
"visible": True,
"backgroundColor": "",
"fillRule": "nonzero",
"paintFirst": "fill",
"globalCompositeOperation": "source-over",
"skewX": 0,
"skewY": 0,
"cropX": 0,
"cropY": 0,
"id": 676845479989,
"src": "https://d3u63mhbhkevz8.cloudfront.net/production/uploads/66f5190349f943682dd776ff/"
"en-coach-main-logo-800x600_sm1ype.jpg?Expires=1727654400&Policy=eyJTdGF0ZW1lbnQiOlt"
"7IlJlc291cmNlIjoiaHR0cHM6Ly9kM3U2M21oYmhrZXZ6OC5jbG91ZGZyb250Lm5ldC9wcm9kdWN0aW9uL3"
"VwbG9hZHMvNjZmNTE5MDM0OWY5NDM2ODJkZDc3NmZmL2VuLWNvYWNoLW1haW4tbG9nby04MDB4NjAwX3NtM"
"XlwZS5qcGciLCJDb25kaXRpb24iOnsiRGF0ZUxlc3NUaGFuIjp7IkFXUzpFcG9jaFRpbWUiOjE3Mjc2NTQ0"
"MDB9fX1dfQ__&Signature=kTVzlDeS7cua2HiAE5G%7E-yFqbhu0bHraFH5SauUln7yuNXoX7vtiKIBYiL"
"%7Eps3LCLEZS77arSZ7H%7EG8CKzabHDjAR-Y6Uc%7ELD5KQaMmk0jbAxbC3Wdoq6cfd0qIwEuodQYlC0It"
"2WBidP8KsgOy3uUQ%7EvcBoqlb255yMFw4pHuptOBB1kPs%7EFyzDV0fnRNsKaYRcy0Fn2EFUp13axm0CZQ"
"clazuLFM622AyCydKMy0vfxV%7Etny3sskwPaUe2OANGMFg07Q1pRuy6fUON0DsbhAh1tA2H6-nnem5KbFw"
"iZK3IIwwYGBx3H41ovzC6Ejt80Fd0%7EPSHw7GzVBnUmtP-IA__&Key-Pair-Id=K1Y7U91AR6T7E5",
"crossOrigin": "anonymous",
"filters": [],
"_exists": True
}
],
"background": "#ffffff",
"version": "4.4.0"
},
"avatar": {
"code": avatar_code,
"gender": avatar_gender,
"canvas": avatar_canvas
},
"animation": "fade_in",
"language": "English",
"speech": text,
"voice": voice_id,
"voiceType": "text",
"voiceProvider": voice_provider
}
]
}
response = requests.post(create_video_url, headers=POST_HEADER, json=data)
logger.info(response.status_code)
logger.info(response.json())
video_id = response.json()["_id"]
if video_id:
# Render Video
render_url = f"https://apis.elai.io/api/v1/videos/render/{video_id}"
requests.post(render_url, headers=GET_HEADER)
status_url = f"https://apis.elai.io/api/v1/videos/{video_id}"
while True:
response = requests.get(status_url, headers=GET_HEADER)
response_data = response.json()
if response_data['status'] == 'ready':
logger.info(response_data)
# DOWNLOAD VIDEO
download_url = response_data.get('url')
output_directory = 'download-video/'
output_filename = video_id + '.mp4'
response = requests.get(download_url)
if response.status_code == 200:
os.makedirs(output_directory, exist_ok=True) # Create the directory if it doesn't exist
output_path = os.path.join(output_directory, output_filename)
with open(output_path, 'wb') as f:
f.write(response.content)
logger.info(f"File '{output_filename}' downloaded successfully.")
return output_filename
else:
logger.error(f"Failed to download file. Status code: {response.status_code}")
return None
elif response_data['status'] == 'failed':
print('Video creation failed.')
break
else:
print('Video is still processing. Checking again in 10 seconds...')
time.sleep(10)

View File

@@ -0,0 +1,67 @@
# Adding new training content
If you're ever tasked with the grueling task of adding more tips from manuals, my condolences.
There are 4 components of a training content tip: the tip itself, the question, the additional and the segment.
The tip is the actual tip, if the manual doesn't have an exercise that relates to that tip fill this out:
```json
{
"category": "<the category of the tip that will be used to categorize the embeddings and also used in the tip header>",
"embedding": "<the relevant part of the tip that is needed to make the embedding (clean the tip of useless info that might mislead the queries)>",
"text": "<The text that the llm will use to assess whether the tip is relevant according to the performance of the student (most of the time just include all the text of the tip)>",
"html": "<The html that will be rendered in the tip component>",
"id": "<a uuid4>",
"verified": <this is just to keep track of the tips that were manually confirmed by you>,
"standalone": <if the tip doesn't have an exercise this is true else it's false>
}
```
If the manual does have an exercise that relates to the tip:
```json
{
// ...
"question": "<the exercise question(s) html>",
"additional": "<context of the question html>",
"segments": [
{
"html": "<the html of a segment, you MUST wrap the html in a single <div> >",
"wordDelay": <the speed at which letters will be placed on the segment, 200ms is a good one>,
"holdDelay": <the total time that the segment will be paused before moving onto the next segment, 5000ms is a good one>,
"highlight": [
{
"targets": ["<the target of the highlight can be: question, additional, segment, all>"],
"phrases": ["<the words/phrases/raw html you want to highlight>"]
}
],
"insertHTML": [
{
"target": "<the target of the insert can be: question, additional>",
"targetId": "<the id of an html element>",
"position": "<the position of the inserted html can be: replace, prepend and append. Most of the time you will only use replace>",
"html": "<the html to replace the element with targetId>"
},
]
}
]
}
```
In order to create these structures you will have to mannually screenshot the tips, exercises, context and send them to an llm (gpt-4o or claude)
with a prompt like "get me the html for this", you will have to check whether the html is properly structured and then
paste them in the prompt.txt file of this directory and send it
back to an llm.
Afterwards you will have to check whether the default styles in /src/components/TrainingContent/FormatTip.ts are adequate, divs
(except for the wrapper div of a segment) and span styles are not overriden but you should aim to use the least ammount of
styles in the tip itself and create custom reusable html elements
in FormatTip.ts.
After checking all of the tips render you will have to create new embeddings in the backend, you CAN'T change ids of existing tips since there
might be training tips that are already stored in firebase.
This is a very tedious task here's a recommendation for [background noise](https://www.youtube.com/watch?v=lDnva_3fcTc).
GL HF

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,62 @@
I am going to give you an exercise and a tip, explain how to solve the exercise and how the tip is beneficial,
your response must be with this format:
{
"segments": [
{
"html": "",
"wordDelay": 0,
"holdDelay"; 0,
"highlight": [
{
"targets": [],
"phrases": []
}
],
"insertHTML": [
{
"target": "",
"targetId": "",
"position": "replace",
"html": ""
}
]
}
]
}
Basically you are going to produce multiple objects and place it in data with the format above to integrate with a react component that highlights passages and inserts html,
these objects are segments of your explanation that will be presented to a student.
In the html field place a segment of your response that will be streamed to the component with a delay of "wordDelay" ms and in the end of that segment stream the phrases or words inside
"highlight" will be highlighted for "holdDelay" ms, and the cycle repeats until the whole data array is iterated. Make it so
that the delays are reasonable for the student have time to process the message your trying to send. Take note that
"wordDelay" is the time between words to display (always 200), and "holdDelay" (no less than 5000) is the total time the highlighter will highlight what you put
inside "highlight".
There are 3 target areas:
- "question": where the question is placed
- "additional": where additional content is placed required to answer the question (this section is optional)
- "segment": a particular segment
You can use these targets in highlight and insertHTML. In order for insertHTML to work, you will have to place an html element with an "id" attribute
in the targets you will reference and provide the id via the "targetId", by this I mean if you want to use insert you will need to provide me the
html I've sent you with either a placeholder element with an id set or set an id in an existent element.
If there are already id's in the html I'm giving you then you must use insertHtml.
Each segment html will be rendered in a div that as margins, you should condense the information don't give me just single short phrases that occupy a whole div.
As previously said this wil be seen by a student so show some train of thought to solve the exercise.
All the segment's html must be wrapped in a div element, and again since this div element will be rendered with some margins make proper use of the segments html.
Try to make bulletpoints.
Dont explicitely mention the tip right away at the beginning, aim more towards the end.
Tip:
Target: "question"
Target: "additional"

View File

@@ -0,0 +1,34 @@
import json
import os
from dotenv import load_dotenv
from pymongo import MongoClient
load_dotenv()
# staging: encoach-staging.json
# prod: storied-phalanx-349916.json
mongo_db = MongoClient(os.getenv('MONGODB_URI'))[os.getenv('MONGODB_DB')]
if __name__ == "__main__":
with open('pathways_2_rw.json', 'r', encoding='utf-8') as file:
book = json.load(file)
tips = []
for unit in book["units"]:
for page in unit["pages"]:
for tip in page["tips"]:
new_tip = {
"id": tip["id"],
"standalone": tip["standalone"],
"tipCategory": tip["category"],
"tipHtml": tip["html"]
}
if not tip["standalone"]:
new_tip["exercise"] = tip["exercise"]
tips.append(new_tip)
for tip in tips:
doc_ref = mongo_db.walkthrough.insert_one(tip)