From 07e68e26502917af75efb7dd70baa86ba3421b14 Mon Sep 17 00:00:00 2001 From: Cristiano Ferreira Date: Tue, 20 Jun 2023 23:01:01 +0100 Subject: [PATCH] Api for generating writing task 2 questions and add postman collection. --- app.py | 43 ++++++++- ...rocess_response.py => openai_interface.py} | 21 ++-- postman/ielts-be.postman_environment.json | 15 +++ postman/ielts.postman_collection.json | 95 +++++++++++++++++++ 4 files changed, 159 insertions(+), 15 deletions(-) rename helper/{process_response.py => openai_interface.py} (70%) create mode 100644 postman/ielts-be.postman_environment.json create mode 100644 postman/ielts.postman_collection.json diff --git a/app.py b/app.py index cbc2e03..c24a135 100644 --- a/app.py +++ b/app.py @@ -2,7 +2,7 @@ from flask import Flask, request from flask_jwt_extended import JWTManager, jwt_required from functools import reduce from helper.token_counter import count_tokens -from helper.process_response import make_openai_call +from helper.openai_interface import make_openai_call import os from dotenv import load_dotenv @@ -14,6 +14,11 @@ app = Flask(__name__) app.config['JWT_SECRET_KEY'] = os.getenv("JWT_SECRET_KEY") jwt = JWTManager(app) +GRADING_TEMPERATURE = 0.1 +GEN_QUESTION_TEMPERATURE = 0.7 +WRITING_TASK_2_POST_FIELDS = ['overall', 'comment', 'task_response'] +WRITING_TASK_2_GET_FIELDS = ['question'] + @app.route('/writing_task2', methods=['POST']) @jwt_required() def grade_writing_task(): @@ -58,7 +63,41 @@ def grade_writing_task(): ] token_count = reduce(lambda count, item: count + count_tokens(item)['n_tokens'], map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0) - response = make_openai_call(messages, token_count) + response = make_openai_call(messages, token_count, WRITING_TASK_2_POST_FIELDS, GRADING_TEMPERATURE) + return response + +@app.route('/writing_task2', methods=['GET']) +@jwt_required() +def get_writing_task_question(): + messages = [ + { + "role": "system", + "content": "You are a IELTS program that generates questions for the exams.", + }, + { + "role": "system", + "content": "The question you have to generate is of type Writing Task 2 and is the following.", + }, + { + "role": "user", + "content": "It is mandatory for you to provide your response with the question " + "in the following json format: {'question': 'question'}", + }, + { + "role": "user", + "content": "Example output: { 'question': 'We are becoming increasingly dependent on computers. " + "They are used in businesses, hospitals, crime detection and even to fly planes. What things will " + "they be used for in the future? Is this dependence on computers a good thing or should we he more " + "auspicious of their benefits?'}", + }, + { + "role": "user", + "content": "Generate a question for IELTS exam Writing Task 2.", + }, + ] + token_count = reduce(lambda count, item: count + count_tokens(item)['n_tokens'], + map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0) + response = make_openai_call(messages, token_count, WRITING_TASK_2_GET_FIELDS, GEN_QUESTION_TEMPERATURE) return response diff --git a/helper/process_response.py b/helper/openai_interface.py similarity index 70% rename from helper/process_response.py rename to helper/openai_interface.py index 8c5f633..8fa8a08 100644 --- a/helper/process_response.py +++ b/helper/openai_interface.py @@ -7,8 +7,8 @@ from dotenv import load_dotenv load_dotenv() openai.api_key = os.getenv("OPENAI_API_KEY") + MAX_TOKENS = 4097 -TEMPERATURE = 0.1 TOP_P = 0.9 FREQUENCY_PENALTY = 0.5 @@ -26,28 +26,23 @@ def process_response(input_string): return json_obj +def check_fields(obj, fields): + return all(field in obj for field in fields) -def check_fields(obj): - if "overall" in obj and "task_response" in obj and "comment" in obj: - return True - else: - return False - - -def make_openai_call(messages, token_count): +def make_openai_call(messages, token_count, fields_to_check, temperature): global try_count result = openai.ChatCompletion.create( model="gpt-3.5-turbo", - max_tokens=int(MAX_TOKENS - token_count - 500), - temperature=float(TEMPERATURE), + max_tokens=int(MAX_TOKENS - token_count - 300), + temperature=float(temperature), top_p=float(TOP_P), frequency_penalty=float(FREQUENCY_PENALTY), messages=messages ) processed_response = process_response(result["choices"][0]["message"]["content"]) - if check_fields(processed_response) is False and try_count < TRY_LIMIT: + if check_fields(processed_response, fields_to_check) is False and try_count < TRY_LIMIT: try_count = try_count + 1 - return make_openai_call(messages, token_count) + return make_openai_call(messages, token_count, fields_to_check) elif try_count >= TRY_LIMIT: try_count = 0 return result["choices"][0]["message"]["content"] diff --git a/postman/ielts-be.postman_environment.json b/postman/ielts-be.postman_environment.json new file mode 100644 index 0000000..f5f9262 --- /dev/null +++ b/postman/ielts-be.postman_environment.json @@ -0,0 +1,15 @@ +{ + "id": "e841db7c-7a8e-46ab-b199-6a14a1ec175b", + "name": "ielts-be", + "values": [ + { + "key": "jwt_token", + "value": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiJ0ZXN0In0.Emrs2D3BmMP4b3zMjw0fJTPeyMwWEBDbxx2vvaWguO0", + "type": "secret", + "enabled": true + } + ], + "_postman_variable_scope": "environment", + "_postman_exported_at": "2023-06-20T21:57:42.427Z", + "_postman_exported_using": "Postman/10.15.1" +} \ No newline at end of file diff --git a/postman/ielts.postman_collection.json b/postman/ielts.postman_collection.json new file mode 100644 index 0000000..14fa7c7 --- /dev/null +++ b/postman/ielts.postman_collection.json @@ -0,0 +1,95 @@ +{ + "info": { + "_postman_id": "2e0eed9d-6a6f-4785-9972-087d51ac0265", + "name": "ielts", + "schema": "https://schema.getpostman.com/json/collection/v2.1.0/collection.json", + "_exporter_id": "26107457" + }, + "item": [ + { + "name": "Grade Answer Writing Task 2", + "request": { + "auth": { + "type": "bearer", + "bearer": [ + { + "key": "token", + "value": "{{jwt_token}}", + "type": "string" + } + ] + }, + "method": "POST", + "header": [], + "body": { + "mode": "raw", + "raw": "{\r\n \"question\": \"The average standard of people's health is likely to be lower in the future than it is now. To what extent do you agree or disagree with this statement?\",\r\n \"answer\": \"I completly disagree with the written statment. I believe that most of the people in the world have more information about their health and also about how they can improve their healthy conditions. Nowadays, information about how harmful is to smoke for our bodies can be seen in many packets of cigars. This is a clear example how things can change from our recent past. There is a clear trend in the diminishing of smokers and if this continues it will have a positive impact in our health. On the other hand, the alimentation habbits are changing all over the world and this can affect people’s health. However every one can choose what to eat every day. Mostly everybody, from developed societies, know the importance of having a healthy diet. Advances such as the information showed in the menus of fast food restaurants will help people to have a clever choice before they choose what to eat. Another important issue that I would like to mention is how medicine is changing. There are new discovers and treatments almost every week and that is an inequivoque sintom of how things are changing in order to improve the world’s health.\"\r\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "http://127.0.0.1:5000/writing_task2", + "protocol": "http", + "host": [ + "127", + "0", + "0", + "1" + ], + "port": "5000", + "path": [ + "writing_task2" + ] + } + }, + "response": [] + }, + { + "name": "Gen Question Writing Task 2", + "protocolProfileBehavior": { + "disableBodyPruning": true + }, + "request": { + "auth": { + "type": "bearer", + "bearer": [ + { + "key": "token", + "value": "{{jwt_token}}", + "type": "string" + } + ] + }, + "method": "GET", + "header": [], + "body": { + "mode": "raw", + "raw": "", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "http://127.0.0.1:5000/writing_task2", + "protocol": "http", + "host": [ + "127", + "0", + "0", + "1" + ], + "port": "5000", + "path": [ + "writing_task2" + ] + } + }, + "response": [] + } + ] +} \ No newline at end of file