From 838411cbdd60801da64c75d67af83c4c73194834 Mon Sep 17 00:00:00 2001 From: Cristiano Ferreira Date: Wed, 19 Apr 2023 23:35:53 +0100 Subject: [PATCH] first commit --- .env | 1 + .idea/{flaskProject.iml => ielts-be.iml} | 2 +- .idea/misc.xml | 2 +- .idea/modules.xml | 2 +- .idea/vcs.xml | 6 ++ app.py | 35 +++++++- playground.py | 102 +++++++++++++++++++++++ 7 files changed, 144 insertions(+), 6 deletions(-) create mode 100644 .env rename .idea/{flaskProject.iml => ielts-be.iml} (90%) create mode 100644 .idea/vcs.xml create mode 100644 playground.py diff --git a/.env b/.env new file mode 100644 index 0000000..a847142 --- /dev/null +++ b/.env @@ -0,0 +1 @@ +OPENAI_API_KEY=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 \ No newline at end of file diff --git a/.idea/flaskProject.iml b/.idea/ielts-be.iml similarity index 90% rename from .idea/flaskProject.iml rename to .idea/ielts-be.iml index 688ba73..a750edf 100644 --- a/.idea/flaskProject.iml +++ b/.idea/ielts-be.iml @@ -7,7 +7,7 @@ - + diff --git a/.idea/misc.xml b/.idea/misc.xml index 5fc5ec2..d56657a 100644 --- a/.idea/misc.xml +++ b/.idea/misc.xml @@ -1,4 +1,4 @@ - + \ No newline at end of file diff --git a/.idea/modules.xml b/.idea/modules.xml index 2c2d842..31940e2 100644 --- a/.idea/modules.xml +++ b/.idea/modules.xml @@ -2,7 +2,7 @@ - + \ No newline at end of file diff --git a/.idea/vcs.xml b/.idea/vcs.xml new file mode 100644 index 0000000..94a25f7 --- /dev/null +++ b/.idea/vcs.xml @@ -0,0 +1,6 @@ + + + + + + \ No newline at end of file diff --git a/app.py b/app.py index 5d20a01..d90183e 100644 --- a/app.py +++ b/app.py @@ -1,12 +1,41 @@ +import openai from flask import Flask +import os +from dotenv import load_dotenv app = Flask(__name__) +load_dotenv() +openai.api_key = os.getenv("OPENAI_API_KEY") @app.route('/') -def hello_world(): # put application's code here - return 'Hello World!' - +def generate_summarizer( + max_tokens, + temperature, + top_p, + frequency_penalty, + prompt, + person_type, +): + res = openai.ChatCompletion.create( + model="gpt-3.5-turbo", + max_tokens=100, + temperature=0.7, + top_p=0.5, + frequency_penalty=0.5, + messages= + [ + { + "role": "system", + "content": "You are a helpful assistant for text summarization.", + }, + { + "role": "user", + "content": f"Summarize this for a {person_type}: {prompt}", + }, + ], + ) + return res["choices"][0]["message"]["content"] if __name__ == '__main__': app.run() diff --git a/playground.py b/playground.py new file mode 100644 index 0000000..7e1dd96 --- /dev/null +++ b/playground.py @@ -0,0 +1,102 @@ +import openai +import os +from dotenv import load_dotenv + +load_dotenv() +openai.api_key = os.getenv("OPENAI_API_KEY") + + +def generate_summarizer( + max_tokens, + temperature, + top_p, + frequency_penalty, + question_type, + question, + answer +): + res = openai.ChatCompletion.create( + model="gpt-3.5-turbo", + max_tokens=int(max_tokens), + temperature=float(temperature), + top_p=float(top_p), + frequency_penalty=float(frequency_penalty), + messages= + [ + { + "role": "system", + "content": "You are a IELTS examiner.", + }, + { + "role": "system", + "content": f"The question you have to grade is of type {question_type} and is the following: {question}", + }, + { + "role": "system", + "content": "Please provide a JSON object response with the overall grade and breakdown grades, " + "formatted as follows: {'overall': 7.0, 'task_response': {'Task Achievement': 8.0, " + "'Coherence and Cohesion': 6.5, 'Lexical Resource': 7.5, 'Grammatical Range and Accuracy': " + "6.0}}", + }, + { + "role": "system", + "content": "Don't give explanations for the grades, just provide the json with the grades.", + }, + { + "role": "user", + "content": f"Evaluate this answer according to ielts grading system: {answer}", + }, + ], + ) + return res["choices"][0]["message"]["content"] + + +import streamlit as st + +# Set the application title +st.title("GPT-3.5 IELTS Examiner") + +# qt_col, q_col = st.columns(2) + +# Selection box to select the question type +# with qt_col: +question_type = st.selectbox( + "What is the question type?", + ( + "Listening", + "Reading", + "Writing Task 1", + "Writing Task 2", + "Speaking Part 1", + "Speaking Part 2" + ), +) + +# Provide the input area for question to be answered +# with q_col: +question = st.text_area("Enter the question:", height=100) + +# Provide the input area for text to be summarized +answer = st.text_area("Enter the answer:", height=100) + +# Initiate two columns for section to be side-by-side +# col1, col2 = st.columns(2) + +# Slider to control the model hyperparameter +# with col1: +token = st.slider("Token", min_value=0.0, max_value=2000.0, value=1000.0, step=1.0) +temp = st.slider("Temperature", min_value=0.0, max_value=1.0, value=0.7, step=0.01) +top_p = st.slider("Top_p", min_value=0.0, max_value=1.0, value=0.9, step=0.01) +f_pen = st.slider("Frequency Penalty", min_value=-1.0, max_value=1.0, value=0.5, step=0.01) + +# Showing the current parameter used for the model +# with col2: +with st.expander("Current Parameter"): + st.write("Current Token :", token) + st.write("Current Temperature :", temp) + st.write("Current Nucleus Sampling :", top_p) + st.write("Current Frequency Penalty :", f_pen) + +# Creating button for execute the text summarization +if st.button("Grade"): + st.write(generate_summarizer(token, temp, top_p, f_pen, question_type, question, answer))