54 lines
1.4 KiB
Python
54 lines
1.4 KiB
Python
import json
|
|
import openai
|
|
import os
|
|
|
|
from dotenv import load_dotenv
|
|
|
|
load_dotenv()
|
|
openai.api_key = os.getenv("OPENAI_API_KEY")
|
|
|
|
|
|
MAX_TOKENS = 4097
|
|
TOP_P = 0.9
|
|
FREQUENCY_PENALTY = 0.5
|
|
|
|
TRY_LIMIT = 1
|
|
|
|
try_count = 0
|
|
def process_response(input_string):
|
|
json_obj = {}
|
|
parsed_string = input_string.replace("'", "\"")
|
|
parsed_string = parsed_string.replace("\n\n", " ")
|
|
try:
|
|
json_obj = json.loads(parsed_string)
|
|
except json.JSONDecodeError:
|
|
print("Invalid JSON string!")
|
|
|
|
return json_obj
|
|
|
|
def check_fields(obj, fields):
|
|
return all(field in obj for field in fields)
|
|
|
|
def make_openai_call(messages, token_count, fields_to_check, temperature):
|
|
global try_count
|
|
result = openai.ChatCompletion.create(
|
|
model="gpt-3.5-turbo",
|
|
max_tokens=int(MAX_TOKENS - token_count - 300),
|
|
temperature=float(temperature),
|
|
top_p=float(TOP_P),
|
|
frequency_penalty=float(FREQUENCY_PENALTY),
|
|
messages=messages
|
|
)
|
|
processed_response = process_response(result["choices"][0]["message"]["content"])
|
|
if check_fields(processed_response, fields_to_check) is False and try_count < TRY_LIMIT:
|
|
try_count = try_count + 1
|
|
return make_openai_call(messages, token_count, fields_to_check)
|
|
elif try_count >= TRY_LIMIT:
|
|
try_count = 0
|
|
return result["choices"][0]["message"]["content"]
|
|
else:
|
|
try_count = 0
|
|
return processed_response
|
|
|
|
|