add playgrounds

This commit is contained in:
Cristiano Ferreira
2023-05-08 09:27:24 +01:00
parent 838411cbdd
commit d1bac041c7
12 changed files with 543 additions and 0 deletions

Binary file not shown.

Binary file not shown.

BIN
audio-samples/weekends.m4a Normal file

Binary file not shown.

102
reading_playground.py Normal file
View File

@@ -0,0 +1,102 @@
import openai
import os
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
def generate_summarizer(
max_tokens,
temperature,
top_p,
frequency_penalty,
question_type,
question,
answer
):
res = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
max_tokens=int(max_tokens),
temperature=float(temperature),
top_p=float(top_p),
frequency_penalty=float(frequency_penalty),
messages=
[
{
"role": "system",
"content": "You are a IELTS examiner.",
},
{
"role": "system",
"content": f"The question you have to grade is of type {question_type} and is the following: {question}",
},
{
"role": "system",
"content": "Please provide a JSON object response with the overall grade and breakdown grades, "
"formatted as follows: {'overall': 7.0, 'task_response': {'Task Achievement': 8.0, "
"'Coherence and Cohesion': 6.5, 'Lexical Resource': 7.5, 'Grammatical Range and Accuracy': "
"6.0}}",
},
{
"role": "system",
"content": "Don't give explanations for the grades, just provide the json with the grades.",
},
{
"role": "user",
"content": f"Evaluate this answer according to ielts grading system: {answer}",
},
],
)
return res["choices"][0]["message"]["content"]
import streamlit as st
# Set the application title
st.title("GPT-3.5 IELTS Examiner")
# qt_col, q_col = st.columns(2)
# Selection box to select the question type
# with qt_col:
question_type = st.selectbox(
"What is the question type?",
(
"Listening",
"Reading",
"Writing Task 1",
"Writing Task 2",
"Speaking Part 1",
"Speaking Part 2"
),
)
# Provide the input area for question to be answered
# with q_col:
question = st.text_area("Enter the question:", height=100)
# Provide the input area for text to be summarized
answer = st.text_area("Enter the answer:", height=100)
# Initiate two columns for section to be side-by-side
# col1, col2 = st.columns(2)
# Slider to control the model hyperparameter
# with col1:
token = st.slider("Token", min_value=0.0, max_value=2000.0, value=1000.0, step=1.0)
temp = st.slider("Temperature", min_value=0.0, max_value=1.0, value=0.7, step=0.01)
top_p = st.slider("Top_p", min_value=0.0, max_value=1.0, value=0.9, step=0.01)
f_pen = st.slider("Frequency Penalty", min_value=-1.0, max_value=1.0, value=0.5, step=0.01)
# Showing the current parameter used for the model
# with col2:
with st.expander("Current Parameter"):
st.write("Current Token :", token)
st.write("Current Temperature :", temp)
st.write("Current Nucleus Sampling :", top_p)
st.write("Current Frequency Penalty :", f_pen)
# Creating button for execute the text summarization
if st.button("Grade"):
st.write(generate_summarizer(token, temp, top_p, f_pen, question_type, question, answer))

5
run.py Normal file
View File

@@ -0,0 +1,5 @@
from streamlit.web import bootstrap
real_s_script = 'sp1_playground.py'
real_w_script = 'wt2_playground.py'
bootstrap.run(real_s_script, f'run.py {real_s_script}', [], {})

109
sp1_playground.py Normal file
View File

@@ -0,0 +1,109 @@
import openai
import os
from dotenv import load_dotenv
import whisper
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
def correct_answer(
max_tokens,
temperature,
top_p,
frequency_penalty,
question_type,
question,
answer_path
):
model = whisper.load_model("base")
# result = model.transcribe("audio-samples/mynameisjeff.wav", fp16=False, language='English', verbose=True)
if os.path.exists(answer_path):
result = model.transcribe(answer_path, fp16=False, language='English', verbose=True)
answer = result["text"]
print(answer)
res = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
max_tokens=int(max_tokens),
temperature=float(temperature),
top_p=float(top_p),
frequency_penalty=float(frequency_penalty),
messages=
[
{
"role": "system",
"content": "You are a IELTS examiner.",
},
{
"role": "system",
"content": f"The question you have to grade is of type {question_type} and is the following: {question}",
},
{
"role": "system",
"content": "Please provide a JSON object response with the overall grade and breakdown grades, "
"formatted as follows: {'overall': 7.0, 'task_response': {'Fluency and Coherence': 8.0, "
"'Lexical Resource': 6.5, 'Grammatical Range and Accuracy': 7.5, 'Pronunciation': "
"6.0}}",
},
{
"role": "system",
"content": "Don't give explanations for the grades, just provide the json with the grades.",
},
{
"role": "system",
"content": "If the answer is unrelated to the question give it the minimum grade.",
},
{
"role": "user",
"content": f"Evaluate this answer according to ielts grading system: {answer}",
},
],
)
return res["choices"][0]["message"]["content"]
else:
print("File not found:", answer_path)
import streamlit as st
# Set the application title
st.title("GPT-3.5 IELTS Examiner")
# Selection box to select the question type
question_type = st.selectbox(
"What is the question type?",
(
"Speaking Part 1",
"Speaking Part 2",
"Speaking Part 3"
),
)
# Provide the input area for question to be answered
# PT-1: How do you usually spend your weekends? Why?
# PT-2: Describe someone you know who does something well. You should say who this person is, how do you know this person, what they do well and explain why you think this person is so good at doing this.
question = st.text_area("Enter the question:", height=100)
# Provide the input area for text to be summarized
# audio-samples/mynameisjeff.wav
answer_path = st.text_area("Enter the answer path:", height=100)
# Initiate two columns for section to be side-by-side
# col1, col2 = st.columns(2)
# Slider to control the model hyperparameter
# with col1:
token = st.slider("Token", min_value=0.0, max_value=2000.0, value=1000.0, step=1.0)
temp = st.slider("Temperature", min_value=0.0, max_value=1.0, value=0.7, step=0.01)
top_p = st.slider("Top_p", min_value=0.0, max_value=1.0, value=0.9, step=0.01)
f_pen = st.slider("Frequency Penalty", min_value=-1.0, max_value=1.0, value=0.5, step=0.01)
# Showing the current parameter used for the model
# with col2:
with st.expander("Current Parameter"):
st.write("Current Token :", token)
st.write("Current Temperature :", temp)
st.write("Current Nucleus Sampling :", top_p)
st.write("Current Frequency Penalty :", f_pen)
# Creating button for execute the text summarization
if st.button("Grade"):
st.write(correct_answer(token, temp, top_p, f_pen, question_type, question, answer_path))

102
sp2_playground.py Normal file
View File

@@ -0,0 +1,102 @@
import openai
import os
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
def generate_summarizer(
max_tokens,
temperature,
top_p,
frequency_penalty,
question_type,
question,
answer
):
res = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
max_tokens=int(max_tokens),
temperature=float(temperature),
top_p=float(top_p),
frequency_penalty=float(frequency_penalty),
messages=
[
{
"role": "system",
"content": "You are a IELTS examiner.",
},
{
"role": "system",
"content": f"The question you have to grade is of type {question_type} and is the following: {question}",
},
{
"role": "system",
"content": "Please provide a JSON object response with the overall grade and breakdown grades, "
"formatted as follows: {'overall': 7.0, 'task_response': {'Task Achievement': 8.0, "
"'Coherence and Cohesion': 6.5, 'Lexical Resource': 7.5, 'Grammatical Range and Accuracy': "
"6.0}}",
},
{
"role": "system",
"content": "Don't give explanations for the grades, just provide the json with the grades.",
},
{
"role": "user",
"content": f"Evaluate this answer according to ielts grading system: {answer}",
},
],
)
return res["choices"][0]["message"]["content"]
import streamlit as st
# Set the application title
st.title("GPT-3.5 IELTS Examiner")
# qt_col, q_col = st.columns(2)
# Selection box to select the question type
# with qt_col:
question_type = st.selectbox(
"What is the question type?",
(
"Listening",
"Reading",
"Writing Task 1",
"Writing Task 2",
"Speaking Part 1",
"Speaking Part 2"
),
)
# Provide the input area for question to be answered
# with q_col:
question = st.text_area("Enter the question:", height=100)
# Provide the input area for text to be summarized
answer = st.text_area("Enter the answer:", height=100)
# Initiate two columns for section to be side-by-side
# col1, col2 = st.columns(2)
# Slider to control the model hyperparameter
# with col1:
token = st.slider("Token", min_value=0.0, max_value=2000.0, value=1000.0, step=1.0)
temp = st.slider("Temperature", min_value=0.0, max_value=1.0, value=0.7, step=0.01)
top_p = st.slider("Top_p", min_value=0.0, max_value=1.0, value=0.9, step=0.01)
f_pen = st.slider("Frequency Penalty", min_value=-1.0, max_value=1.0, value=0.5, step=0.01)
# Showing the current parameter used for the model
# with col2:
with st.expander("Current Parameter"):
st.write("Current Token :", token)
st.write("Current Temperature :", temp)
st.write("Current Nucleus Sampling :", top_p)
st.write("Current Frequency Penalty :", f_pen)
# Creating button for execute the text summarization
if st.button("Grade"):
st.write(generate_summarizer(token, temp, top_p, f_pen, question_type, question, answer))

12
testing.py Normal file
View File

@@ -0,0 +1,12 @@
import whisper
import os
from pydub import AudioSegment
model = whisper.load_model("base")
file_path = "audio-samples/mynameisjeff.wav"
audio_file = AudioSegment.from_file(file_path)
if os.path.exists(file_path):
result = model.transcribe(file_path, fp16=False, language='English', verbose=True)
print(result["text"])
else:
print("File not found:", file_path)

View File

@@ -0,0 +1,9 @@
Q: It is important for children to learn the difference between right and wrong at an early age. Punishment is necessary to help them learn this distinction.
To what extent do you agree or disagree with this opinion?
What sort of punishment should parents and teachers be allowed to use to teach good behaviour to children?
A: In today's world, moral values and ethics play a vital role in shaping the character of an individual. Children are the building blocks of society, and it is important to inculcate a sense of right and wrong in them from an early age. While punishment can be an effective tool in teaching the difference between right and wrong, it should not be the only approach. In my opinion, punishment should be used in moderation, and parents and teachers should focus on positive reinforcement and guidance to teach good behavior.
Punishment can be used to correct behavior and to help children understand the consequences of their actions. However, excessive punishment can be counterproductive and can even have harmful effects on children. Physical punishment, such as hitting or spanking, should be avoided as it can lead to physical and emotional trauma. Instead, parents and teachers should consider alternative forms of punishment such as time-outs, loss of privileges or extra chores. These methods can be effective in conveying the message without causing physical harm.
Furthermore, parents and teachers should focus on positive reinforcement to encourage good behavior. Praising children when they exhibit good behavior can be an effective way to motivate them to continue behaving well. Teachers can use stickers or small rewards to encourage students to work hard and behave well in class. Parents can use similar methods at home to reinforce good behavior.
In conclusion, it is important for children to learn the difference between right and wrong at an early age. Punishment can be a useful tool in teaching this distinction, but it should not be the only approach. Parents and teachers should use positive reinforcement and guidance to encourage good behavior, and should only resort to punishment in moderation and when necessary. Any form of punishment should be non-violent and should not cause physical or emotional harm to the child.

102
wt1_playground.py Normal file
View File

@@ -0,0 +1,102 @@
import openai
import os
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
def generate_summarizer(
max_tokens,
temperature,
top_p,
frequency_penalty,
question_type,
question,
answer
):
res = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
max_tokens=int(max_tokens),
temperature=float(temperature),
top_p=float(top_p),
frequency_penalty=float(frequency_penalty),
messages=
[
{
"role": "system",
"content": "You are a IELTS examiner.",
},
{
"role": "system",
"content": f"The question you have to grade is of type {question_type} and is the following: {question}",
},
{
"role": "system",
"content": "Please provide a JSON object response with the overall grade and breakdown grades, "
"formatted as follows: {'overall': 7.0, 'task_response': {'Task Achievement': 8.0, "
"'Coherence and Cohesion': 6.5, 'Lexical Resource': 7.5, 'Grammatical Range and Accuracy': "
"6.0}}",
},
{
"role": "system",
"content": "Don't give explanations for the grades, just provide the json with the grades.",
},
{
"role": "user",
"content": f"Evaluate this answer according to ielts grading system: {answer}",
},
],
)
return res["choices"][0]["message"]["content"]
import streamlit as st
# Set the application title
st.title("GPT-3.5 IELTS Examiner")
# qt_col, q_col = st.columns(2)
# Selection box to select the question type
# with qt_col:
question_type = st.selectbox(
"What is the question type?",
(
"Listening",
"Reading",
"Writing Task 1",
"Writing Task 2",
"Speaking Part 1",
"Speaking Part 2"
),
)
# Provide the input area for question to be answered
# with q_col:
question = st.text_area("Enter the question:", height=100)
# Provide the input area for text to be summarized
answer = st.text_area("Enter the answer:", height=100)
# Initiate two columns for section to be side-by-side
# col1, col2 = st.columns(2)
# Slider to control the model hyperparameter
# with col1:
token = st.slider("Token", min_value=0.0, max_value=2000.0, value=1000.0, step=1.0)
temp = st.slider("Temperature", min_value=0.0, max_value=1.0, value=0.7, step=0.01)
top_p = st.slider("Top_p", min_value=0.0, max_value=1.0, value=0.9, step=0.01)
f_pen = st.slider("Frequency Penalty", min_value=-1.0, max_value=1.0, value=0.5, step=0.01)
# Showing the current parameter used for the model
# with col2:
with st.expander("Current Parameter"):
st.write("Current Token :", token)
st.write("Current Temperature :", temp)
st.write("Current Nucleus Sampling :", top_p)
st.write("Current Frequency Penalty :", f_pen)
# Creating button for execute the text summarization
if st.button("Grade"):
st.write(generate_summarizer(token, temp, top_p, f_pen, question_type, question, answer))

102
wt2_playground.py Normal file
View File

@@ -0,0 +1,102 @@
import openai
import os
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
def generate_summarizer(
max_tokens,
temperature,
top_p,
frequency_penalty,
question_type,
question,
answer
):
res = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
max_tokens=int(max_tokens),
temperature=float(temperature),
top_p=float(top_p),
frequency_penalty=float(frequency_penalty),
messages=
[
{
"role": "system",
"content": "You are a IELTS examiner.",
},
{
"role": "system",
"content": f"The question you have to grade is of type {question_type} and is the following: {question}",
},
{
"role": "system",
"content": "Please provide a JSON object response with the overall grade and breakdown grades, "
"formatted as follows: {'overall': 7.0, 'task_response': {'Task Achievement': 8.0, "
"'Coherence and Cohesion': 6.5, 'Lexical Resource': 7.5, 'Grammatical Range and Accuracy': "
"6.0}}",
},
{
"role": "system",
"content": "Don't give explanations for the grades, just provide the json with the grades.",
},
{
"role": "user",
"content": f"Evaluate this answer according to ielts grading system: {answer}",
},
],
)
return res["choices"][0]["message"]["content"]
import streamlit as st
# Set the application title
st.title("GPT-3.5 IELTS Examiner")
# qt_col, q_col = st.columns(2)
# Selection box to select the question type
# with qt_col:
question_type = st.selectbox(
"What is the question type?",
(
"Listening",
"Reading",
"Writing Task 1",
"Writing Task 2",
"Speaking Part 1",
"Speaking Part 2"
),
)
# Provide the input area for question to be answered
# with q_col:
question = st.text_area("Enter the question:", height=100)
# Provide the input area for text to be summarized
answer = st.text_area("Enter the answer:", height=100)
# Initiate two columns for section to be side-by-side
# col1, col2 = st.columns(2)
# Slider to control the model hyperparameter
# with col1:
token = st.slider("Token", min_value=0.0, max_value=2000.0, value=1000.0, step=1.0)
temp = st.slider("Temperature", min_value=0.0, max_value=1.0, value=0.7, step=0.01)
top_p = st.slider("Top_p", min_value=0.0, max_value=1.0, value=0.9, step=0.01)
f_pen = st.slider("Frequency Penalty", min_value=-1.0, max_value=1.0, value=0.5, step=0.01)
# Showing the current parameter used for the model
# with col2:
with st.expander("Current Parameter"):
st.write("Current Token :", token)
st.write("Current Temperature :", temp)
st.write("Current Nucleus Sampling :", top_p)
st.write("Current Frequency Penalty :", f_pen)
# Creating button for execute the text summarization
if st.button("Grade"):
st.write(generate_summarizer(token, temp, top_p, f_pen, question_type, question, answer))