Fix writing overall to avoid grades that don't make sense.
This commit is contained in:
9
app.py
9
app.py
@@ -252,6 +252,7 @@ def grade_writing_task_1():
|
|||||||
response = make_openai_instruct_call(GPT_3_5_TURBO_INSTRUCT, message, token_count,
|
response = make_openai_instruct_call(GPT_3_5_TURBO_INSTRUCT, message, token_count,
|
||||||
["comment"],
|
["comment"],
|
||||||
GRADING_TEMPERATURE)
|
GRADING_TEMPERATURE)
|
||||||
|
response["overall"] = fix_writing_overall(response["overall"], response["task_response"])
|
||||||
response['fixed_text'] = get_fixed_text(answer)
|
response['fixed_text'] = get_fixed_text(answer)
|
||||||
return response
|
return response
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@@ -324,11 +325,19 @@ def grade_writing_task_2():
|
|||||||
response = make_openai_instruct_call(GPT_3_5_TURBO_INSTRUCT, message, token_count,
|
response = make_openai_instruct_call(GPT_3_5_TURBO_INSTRUCT, message, token_count,
|
||||||
["comment"],
|
["comment"],
|
||||||
GEN_QUESTION_TEMPERATURE)
|
GEN_QUESTION_TEMPERATURE)
|
||||||
|
response["overall"] = fix_writing_overall(response["overall"], response["task_response"])
|
||||||
response['fixed_text'] = get_fixed_text(answer)
|
response['fixed_text'] = get_fixed_text(answer)
|
||||||
return response
|
return response
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return str(e)
|
return str(e)
|
||||||
|
|
||||||
|
def fix_writing_overall(overall: float, task_response: dict):
|
||||||
|
if overall > max(task_response.values()) or overall < min(task_response.values()):
|
||||||
|
total_sum = sum(task_response.values())
|
||||||
|
average = total_sum / len(task_response.values())
|
||||||
|
rounded_average = round(average, 0)
|
||||||
|
return rounded_average
|
||||||
|
return overall
|
||||||
|
|
||||||
@app.route('/writing_task2_general', methods=['GET'])
|
@app.route('/writing_task2_general', methods=['GET'])
|
||||||
@jwt_required()
|
@jwt_required()
|
||||||
|
|||||||
Reference in New Issue
Block a user