Made it so the Speaking is sent to the backend and saved to Firebase

This commit is contained in:
Tiago Ribeiro
2023-07-11 00:29:32 +01:00
parent ce90de1b74
commit 9637cb6477
7 changed files with 130 additions and 23 deletions

View File

@@ -1,14 +1,10 @@
import {errorButtonStyle, infoButtonStyle} from "@/constants/buttonStyles";
import {SpeakingExercise, WritingExercise} from "@/interfaces/exam";
import {mdiArrowLeft, mdiArrowRight} from "@mdi/js";
import Icon from "@mdi/react";
import clsx from "clsx";
import {SpeakingExercise} from "@/interfaces/exam";
import {CommonProps} from ".";
import {Fragment, useEffect, useState} from "react";
import {toast} from "react-toastify";
import {BsCheckCircleFill, BsMicFill, BsPauseCircle, BsPlayCircle, BsTrashFill} from "react-icons/bs";
import dynamic from "next/dynamic";
import Button from "../Low/Button";
import axios from "axios";
const Waveform = dynamic(() => import("../Waveform"), {ssr: false});
const ReactMediaRecorder = dynamic(() => import("react-media-recorder").then((mod) => mod.ReactMediaRecorder), {
@@ -33,6 +29,30 @@ export default function Speaking({id, title, text, type, prompts, onNext, onBack
};
}, [isRecording]);
useEffect(() => {
const uploadFile = () => {
if (mediaBlob) {
axios.get(mediaBlob, {responseType: "arraybuffer"}).then((response) => {
const audioBlob = Buffer.from(response.data, "binary");
const audioFile = new File([audioBlob], "audio.wav", {type: "audio/wav"});
const formData = new FormData();
formData.append("audio", audioFile, "audio.wav");
const config = {
headers: {
"Content-Type": "audio/mp3",
},
};
axios.post("/api/evaluate/speaking", formData, config);
});
}
};
if (mediaBlob) uploadFile();
}, [mediaBlob]);
return (
<div className="flex flex-col h-full w-full gap-9">
<div className="flex flex-col w-full gap-14 bg-mti-gray-smoke rounded-xl py-8 pb-12 px-16">