106 lines
3.9 KiB
Python
106 lines
3.9 KiB
Python
import openai
|
|
import os
|
|
from dotenv import load_dotenv
|
|
import whisper
|
|
|
|
load_dotenv()
|
|
openai.api_key = os.getenv("OPENAI_API_KEY")
|
|
|
|
def correct_answer(
|
|
max_tokens,
|
|
temperature,
|
|
top_p,
|
|
frequency_penalty,
|
|
question_type,
|
|
question,
|
|
answer_path
|
|
):
|
|
model = whisper.load_model("base")
|
|
# result = model.transcribe("audio-samples/mynameisjeff.wav", fp16=False, language='English', verbose=True)
|
|
if os.path.exists(answer_path):
|
|
result = model.transcribe(answer_path, fp16=False, language='English', verbose=True)
|
|
answer = result["text"]
|
|
print(answer)
|
|
res = openai.ChatCompletion.create(
|
|
model="gpt-3.5-turbo",
|
|
max_tokens=int(max_tokens),
|
|
temperature=float(temperature),
|
|
top_p=float(top_p),
|
|
frequency_penalty=float(frequency_penalty),
|
|
messages=
|
|
[
|
|
{
|
|
"role": "system",
|
|
"content": "You are a IELTS examiner.",
|
|
},
|
|
{
|
|
"role": "system",
|
|
"content": f"The question you have to grade is of type {question_type} and is the following: {question}",
|
|
},
|
|
{
|
|
"role": "user",
|
|
"content": "Please provide a JSON object response with the overall grade and breakdown grades, "
|
|
"formatted as follows: {'overall': 7.0, 'task_response': {'Fluency and Coherence': 8.0, "
|
|
"'Lexical Resource': 6.5, 'Grammatical Range and Accuracy': 7.5, 'Pronunciation': "
|
|
"6.0}}",
|
|
},
|
|
{
|
|
"role": "user",
|
|
"content": "If the answer is unrelated to the question give it the minimum grade.",
|
|
},
|
|
{
|
|
"role": "user",
|
|
"content": f"Evaluate this answer according to ielts grading system: {answer}",
|
|
},
|
|
],
|
|
)
|
|
return res["choices"][0]["message"]["content"]
|
|
else:
|
|
print("File not found:", answer_path)
|
|
|
|
import streamlit as st
|
|
|
|
# Set the application title
|
|
st.title("GPT-3.5 IELTS Examiner")
|
|
|
|
# Selection box to select the question type
|
|
question_type = st.selectbox(
|
|
"What is the question type?",
|
|
(
|
|
"Speaking Part 1",
|
|
"Speaking Part 2",
|
|
"Speaking Part 3"
|
|
),
|
|
)
|
|
|
|
# Provide the input area for question to be answered
|
|
# PT-1: How do you usually spend your weekends? Why?
|
|
# PT-2: Describe someone you know who does something well. You should say who this person is, how do you know this person, what they do well and explain why you think this person is so good at doing this.
|
|
question = st.text_area("Enter the question:", height=100)
|
|
|
|
# Provide the input area for text to be summarized
|
|
# audio-samples/mynameisjeff.wav
|
|
answer_path = st.text_area("Enter the answer path:", height=100)
|
|
|
|
# Initiate two columns for section to be side-by-side
|
|
# col1, col2 = st.columns(2)
|
|
|
|
# Slider to control the model hyperparameter
|
|
# with col1:
|
|
token = st.slider("Token", min_value=0.0, max_value=2000.0, value=1000.0, step=1.0)
|
|
temp = st.slider("Temperature", min_value=0.0, max_value=1.0, value=0.7, step=0.01)
|
|
top_p = st.slider("Top_p", min_value=0.0, max_value=1.0, value=0.9, step=0.01)
|
|
f_pen = st.slider("Frequency Penalty", min_value=-1.0, max_value=1.0, value=0.5, step=0.01)
|
|
|
|
# Showing the current parameter used for the model
|
|
# with col2:
|
|
with st.expander("Current Parameter"):
|
|
st.write("Current Token :", token)
|
|
st.write("Current Temperature :", temp)
|
|
st.write("Current Nucleus Sampling :", top_p)
|
|
st.write("Current Frequency Penalty :", f_pen)
|
|
|
|
# Creating button for execute the text summarization
|
|
if st.button("Grade"):
|
|
st.write(correct_answer(token, temp, top_p, f_pen, question_type, question, answer_path))
|