Batch import wasn't updated

This commit is contained in:
Carlos-Mesquita
2024-11-06 11:01:39 +00:00
parent e51cd891d2
commit a2e96f8e54
18 changed files with 124 additions and 78 deletions

View File

@@ -4,6 +4,7 @@ from typing import Union
import aiofiles
from aiobotocore.client import BaseClient
from app.dtos.listening import Dialog
from app.services.abc import ITextToSpeechService
from app.configs.constants import NeuralVoices
@@ -22,14 +23,15 @@ class AWSPolly(ITextToSpeechService):
)
return await tts_response['AudioStream'].read()
async def text_to_speech(self, text: Union[list[str], str], file_name: str):
if isinstance(text, str):
audio_segments = await self._text_to_speech(text)
elif isinstance(text, list):
audio_segments = await self._conversation_to_speech(text)
else:
async def text_to_speech(self, dialog: Dialog) -> bytes:
if not dialog.conversation and not dialog.monologue:
raise ValueError("Unsupported argument for text_to_speech")
if not dialog.conversation:
audio_segments = await self._text_to_speech(dialog.monologue)
else:
audio_segments = await self._conversation_to_speech(dialog)
final_message = await self.synthesize_speech(
"This audio recording, for the listening exercise, has finished.",
"Stephen"
@@ -40,27 +42,26 @@ class AWSPolly(ITextToSpeechService):
# Combine the audio segments into a single audio file
combined_audio = b"".join(audio_segments)
# Save the combined audio to a single file
async with aiofiles.open(file_name, "wb") as f:
await f.write(combined_audio)
print("Speech segments saved to " + file_name)
return combined_audio
# Save the combined audio to a single file
#async with aiofiles.open(file_name, "wb") as f:
# await f.write(combined_audio)
#print("Speech segments saved to " + file_name)
async def _text_to_speech(self, text: str):
voice = random.choice(NeuralVoices.ALL_NEURAL_VOICES)['Id']
# Initialize an empty list to store audio segments
audio_segments = []
for part in self._divide_text(text):
audio_segments.append(await self.synthesize_speech(part, voice))
return audio_segments
async def _conversation_to_speech(self, conversation: list):
# Initialize an empty list to store audio segments
async def _conversation_to_speech(self, dialog: Dialog):
audio_segments = []
# Iterate through the text segments, convert to audio segments, and store them
for segment in conversation:
audio_segments.append(await self.synthesize_speech(segment["text"], segment["voice"]))
for convo_payload in dialog.conversation:
audio_segments.append(await self.synthesize_speech(convo_payload.text, convo_payload.voice))
return audio_segments