-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathevaluate.py
39 lines (35 loc) · 1.52 KB
/
evaluate.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
from argparse import ArgumentParser
def evaluate_results(test_file_path, correct_test_file_path):
total_words = 0
correct_marked_tags = 0
is_mismatch = False
with open(test_file_path, "r") as tf, open(correct_test_file_path, "r") as cf:
for tsent, csent in zip(tf, cf):
tsent = tsent.strip(" \n")
csent = csent.strip(" \n")
for tword, cword in zip(tsent.split(" "), csent.split(" ")):
ttoken, ttag = tword.split("/")
ctoken, ctag = cword.split("/")
if ttoken == ctoken :
if ttag == ctag:
correct_marked_tags+=1
total_words+=1
else:
is_mismatch = True
break
if is_mismatch:
print("Refactor Mismatch in test and reference file!")
break
print("{0} words tag correctly Out of {1}".format(correct_marked_tags, total_words))
print("Accuracy: {}".format(round(correct_marked_tags/total_words, 2)))
def get_args():
parser = ArgumentParser()
parser.add_argument('-t', '--test_output', required=True, help='Test Output file contains tagged sentences generated by Model')
parser.add_argument('-c', '--correct_output', required=True,
help='Test Output file contains correct tagged sentence')
return parser.parse_args()
def main():
args = get_args()
evaluate_results(args.test_output, args.correct_output)
if __name__ == '__main__':
main()