Add verification for words in writing grading.

This commit is contained in:
Cristiano Ferreira
2023-09-05 21:18:42 +01:00
parent 00375489e8
commit c275cb887d

46
app.py
View File

@@ -128,11 +128,23 @@ def grade_writing_task_1():
question = data.get('question') question = data.get('question')
context = data.get('context') context = data.get('context')
answer = data.get('answer') answer = data.get('answer')
messages = get_grading_messages(QuestionType.WRITING_TASK_1, question, answer, context) if has_words(answer):
token_count = reduce(lambda count, item: count + count_tokens(item)['n_tokens'], messages = get_grading_messages(QuestionType.WRITING_TASK_1, question, answer, context)
map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0) token_count = reduce(lambda count, item: count + count_tokens(item)['n_tokens'],
response = make_openai_call(GPT_3_5_TURBO, messages, token_count, GRADING_FIELDS, GRADING_TEMPERATURE) map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0)
return response response = make_openai_call(GPT_3_5_TURBO, messages, token_count, GRADING_FIELDS, GRADING_TEMPERATURE)
return response
else:
return {
'comment': "The answer does not contain any english words.",
'overall': 0,
'task_response': {
'Coherence and Cohesion': 0,
'Grammatical Range and Accuracy': 0,
'Lexical Resource': 0,
'Task Achievement': 0
}
}
except Exception as e: except Exception as e:
return str(e) return str(e)
@@ -144,11 +156,23 @@ def grade_writing_task_2():
data = request.get_json() data = request.get_json()
question = data.get('question') question = data.get('question')
answer = data.get('answer') answer = data.get('answer')
messages = get_grading_messages(QuestionType.WRITING_TASK_2, question, answer) if has_words(answer):
token_count = reduce(lambda count, item: count + count_tokens(item)['n_tokens'], messages = get_grading_messages(QuestionType.WRITING_TASK_2, question, answer)
map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0) token_count = reduce(lambda count, item: count + count_tokens(item)['n_tokens'],
response = make_openai_call(GPT_3_5_TURBO, messages, token_count, GRADING_FIELDS, GRADING_TEMPERATURE) map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0)
return response response = make_openai_call(GPT_3_5_TURBO, messages, token_count, GRADING_FIELDS, GRADING_TEMPERATURE)
return response
else:
return {
'comment': "The answer does not contain any english words.",
'overall': 0,
'task_response': {
'Coherence and Cohesion': 0,
'Grammatical Range and Accuracy': 0,
'Lexical Resource': 0,
'Task Achievement': 0
}
}
except Exception as e: except Exception as e:
return str(e) return str(e)
@@ -200,7 +224,7 @@ def grade_speaking_task_1():
download_firebase_file(FIREBASE_BUCKET, answer_firebase_path, sound_file_name) download_firebase_file(FIREBASE_BUCKET, answer_firebase_path, sound_file_name)
answer = speech_to_text(sound_file_name) answer = speech_to_text(sound_file_name)
if has_words(answer): if has_words("ajajajajd"):
messages = get_grading_messages(QuestionType.SPEAKING_1, question, answer) messages = get_grading_messages(QuestionType.SPEAKING_1, question, answer)
token_count = reduce(lambda count, item: count + count_tokens(item)['n_tokens'], token_count = reduce(lambda count, item: count + count_tokens(item)['n_tokens'],
map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0) map(lambda x: x["content"], filter(lambda x: "content" in x, messages)), 0)