Merge branch 'main' of https://huggingface.co/datasets/adsabs/FOCAL into main
Browse files
scoring_scripts/score_focal_labels_only.py
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# imports
|
2 |
+
from sklearn.preprocessing import MultiLabelBinarizer
|
3 |
+
from sklearn.metrics import classification_report
|
4 |
+
|
5 |
+
# global param
|
6 |
+
label_list = ['Background', 'Motivation', 'Uses', 'Extends', 'Similarities', 'Differences', 'Compare/Contrast', 'Future Work', 'Unclear']
|
7 |
+
|
8 |
+
# start of function
|
9 |
+
def evaluate_FOCAL_labels(references_jsonl, predictions_jsonl, print_report=False):
|
10 |
+
'''
|
11 |
+
Computes precision, recall and f1-scores for the labels of citations,
|
12 |
+
without looking at the location of these labels in the paragraph,
|
13 |
+
between two datasets loaded from jsonl (list of dicts with same keys).
|
14 |
+
In plain English, this checks that you correctly predicted the reason(s) a given citation was made,
|
15 |
+
without checking if you correctly find the parts of the paragraph that explain the function of the citation.
|
16 |
+
'''
|
17 |
+
|
18 |
+
# sort the refs and pred by unique ID
|
19 |
+
references_jsonl = sorted(references_jsonl, key=lambda x:x['Identifier'])
|
20 |
+
predictions_jsonl = sorted(predictions_jsonl, key=lambda x:x['Identifier'])
|
21 |
+
|
22 |
+
# assert that paragraphs match
|
23 |
+
ref_paragraphs = [e['Paragraph'] for e in references_jsonl]
|
24 |
+
pred_paragraphs = [e['Paragraph'] for e in predictions_jsonl]
|
25 |
+
assert(ref_paragraphs==pred_paragraphs)
|
26 |
+
|
27 |
+
|
28 |
+
# build y_true and y_pred
|
29 |
+
mlb = MultiLabelBinarizer(classes=label_list)
|
30 |
+
y_true = mlb.fit_transform([e['Functions Label'] for e in references_jsonl])
|
31 |
+
y_pred = mlb.fit_transform([e['Functions Label'] for e in predictions_jsonl])
|
32 |
+
|
33 |
+
# build report for printing
|
34 |
+
report_string = classification_report(y_true=y_true,
|
35 |
+
y_pred=y_pred,
|
36 |
+
target_names=label_list,
|
37 |
+
zero_division=0.0,
|
38 |
+
output_dict=False
|
39 |
+
)
|
40 |
+
|
41 |
+
# return report as dict (can't do both at the same time? slight waste of compute)
|
42 |
+
report_dict = classification_report(y_true=y_true,
|
43 |
+
y_pred=y_pred,
|
44 |
+
target_names=label_list,
|
45 |
+
zero_division=0.0,
|
46 |
+
output_dict=True
|
47 |
+
)
|
48 |
+
|
49 |
+
if print_report:
|
50 |
+
print(report_string)
|
51 |
+
|
52 |
+
|
53 |
+
return(report_dict)
|
scoring_scripts/score_focal_seqeval.py
ADDED
@@ -0,0 +1,179 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from seqeval.metrics import classification_report
|
2 |
+
from seqeval.scheme import IOB2
|
3 |
+
import numpy as np
|
4 |
+
import spacy
|
5 |
+
|
6 |
+
# preload the tokenizer
|
7 |
+
nlp = spacy.load("en_core_web_sm")
|
8 |
+
tokenizer = nlp.tokenizer
|
9 |
+
|
10 |
+
def evaluate_FOCAL_seqeval(references_jsonl, predictions_jsonl, print_reports=False):
|
11 |
+
'''
|
12 |
+
Computes SEQEVAL scores.
|
13 |
+
1. convert the text into 'word' tokens using default spaCy tokenizer
|
14 |
+
2. turn the references and the predictions into IOB2 style labels (one label per token, 'O' by default)
|
15 |
+
3. compute f1-scores using SEQEVAL
|
16 |
+
|
17 |
+
Returns 2 dictionaries in classification_report style, the first one with full seqeval scores,
|
18 |
+
the second converting all the labels to a generic LABEL.
|
19 |
+
|
20 |
+
In plain English, this 2nd one checks that you correctly found the parts of the paragraph that explain the function of the citation,
|
21 |
+
without checking if you correctly predicted the reason(s) a given citation was made (the function labels).
|
22 |
+
'''
|
23 |
+
|
24 |
+
|
25 |
+
# sort the refs and pred by unique ID
|
26 |
+
references_jsonl = sorted(references_jsonl, key=lambda x:x['Identifier'])
|
27 |
+
predictions_jsonl = sorted(predictions_jsonl, key=lambda x:x['Identifier'])
|
28 |
+
|
29 |
+
|
30 |
+
# list of columns for easier manipulation
|
31 |
+
ref_functions_texts = [e['Functions Text'] for e in references_jsonl]
|
32 |
+
ref_functions_labels = [e['Functions Label'] for e in references_jsonl]
|
33 |
+
ref_functions_start_end = [e['Functions Start End'] for e in references_jsonl]
|
34 |
+
ref_paragraphs = [e['Paragraph'] for e in references_jsonl]
|
35 |
+
|
36 |
+
pred_functions_texts = [e['Functions Text'] for e in predictions_jsonl]
|
37 |
+
pred_functions_labels = [e['Functions Label'] for e in predictions_jsonl]
|
38 |
+
pred_functions_start_end = [e['Functions Start End'] for e in predictions_jsonl]
|
39 |
+
pred_paragraphs = [e['Paragraph'] for e in predictions_jsonl]
|
40 |
+
|
41 |
+
# what will be used by classification report
|
42 |
+
y_true_all = []
|
43 |
+
y_pred_all = []
|
44 |
+
y_true_generic = []
|
45 |
+
y_pred_generic = []
|
46 |
+
|
47 |
+
# check that ref and pred text is the same
|
48 |
+
assert(ref_paragraphs==pred_paragraphs)
|
49 |
+
|
50 |
+
# go through each paragraph
|
51 |
+
for i, p in enumerate(ref_paragraphs):
|
52 |
+
|
53 |
+
# assign to each character a ref_label and pred_label
|
54 |
+
ref_labels_char = ['O' for _ in p]
|
55 |
+
pred_labels_char = ['O' for _ in p]
|
56 |
+
|
57 |
+
# go through each ref function to verify the data
|
58 |
+
for j,(start,end) in enumerate(ref_functions_start_end[i]):
|
59 |
+
# check that the text of the ref function matches the paragraph [start:end] section defined by the ref's start:end
|
60 |
+
assert(p[start:end]==ref_functions_texts[i][j])
|
61 |
+
|
62 |
+
# fill in the char level labels
|
63 |
+
ref_labels_char[start] = 'B-'+ ref_functions_labels[i][j]
|
64 |
+
for position in range(start+1, end):
|
65 |
+
ref_labels_char[position] = 'I-'+ ref_functions_labels[i][j]
|
66 |
+
|
67 |
+
|
68 |
+
# do the same for the pred functions
|
69 |
+
for j,(start,end) in enumerate(pred_functions_start_end[i]):
|
70 |
+
# check that the text of the pred function matches the paragraph [start:end] section defined by the pred's start:end
|
71 |
+
assert(p[start:end]==pred_functions_texts[i][j])
|
72 |
+
|
73 |
+
# fill in the char level labels
|
74 |
+
pred_labels_char[start] = 'B-'+ pred_functions_labels[i][j]
|
75 |
+
for position in range(start+1, end):
|
76 |
+
pred_labels_char[position] = 'I-'+ pred_functions_labels[i][j]
|
77 |
+
|
78 |
+
# tokenize the text
|
79 |
+
tokens = tokenizer(p)
|
80 |
+
|
81 |
+
|
82 |
+
# assign to each token a ref_label and a pred_label
|
83 |
+
ref_labels_tokens = ['O' for _ in tokens]
|
84 |
+
pred_labels_tokens = ['O' for _ in tokens]
|
85 |
+
# same but with making all labels the same generic label
|
86 |
+
ref_labels_tokens_generic= ['O' for _ in tokens]
|
87 |
+
pred_labels_tokens_generic = ['O' for _ in tokens]
|
88 |
+
|
89 |
+
for token_idx, token in enumerate(tokens):
|
90 |
+
# note that token_idx is the position in tokens
|
91 |
+
# and token.idx the position in characters
|
92 |
+
|
93 |
+
# heuristics to assign label
|
94 |
+
# assign the first non-'O' label we find
|
95 |
+
|
96 |
+
# for refs
|
97 |
+
label = next((x for x in ref_labels_char[token.idx: token.idx+len(token)] if x!='O'), 'O')
|
98 |
+
if label!='O':
|
99 |
+
# if the label starts on a white space, we might miss the B- since the tokenizer often skips whitespaces
|
100 |
+
# check if we need to change an I- into a B-
|
101 |
+
if label[:2]=='I-':
|
102 |
+
if token_idx==0 or (ref_labels_tokens!=ref_labels_tokens[token_idx-1]):
|
103 |
+
label='B-'+label[2:]
|
104 |
+
ref_labels_tokens[token_idx] = label
|
105 |
+
# use the B- or I- portion of the label for the generic label
|
106 |
+
ref_labels_tokens_generic[token_idx] = label[:2] + 'LABEL'
|
107 |
+
|
108 |
+
# based on construction, we should never have an I- label without either an I- or B- label before
|
109 |
+
if token_idx==0:
|
110 |
+
assert(label=='O' or label.startswith('B-'))
|
111 |
+
else:
|
112 |
+
if label.startswith('I-'):
|
113 |
+
# check prev label is same
|
114 |
+
assert(label[2:]==ref_labels_tokens[token_idx-1][2:] )
|
115 |
+
|
116 |
+
# for preds
|
117 |
+
label = next((x for x in pred_labels_char[token.idx: token.idx+len(token)] if x!='O'), 'O')
|
118 |
+
|
119 |
+
if label!='O':
|
120 |
+
if label[:2]=='I-':
|
121 |
+
if token_idx==0 or (pred_labels_tokens!=pred_labels_tokens[token_idx-1]):
|
122 |
+
label='B-'+label[2:]
|
123 |
+
pred_labels_tokens[token_idx] = label
|
124 |
+
# use the B- or I- portion of the label for the generic label
|
125 |
+
pred_labels_tokens_generic[token_idx] = label[:2] + 'LABEL'
|
126 |
+
|
127 |
+
# based on construction, we should never have an I- label without either an I- or B- label before
|
128 |
+
if token_idx==0:
|
129 |
+
assert(label=='O' or label.startswith('B-'))
|
130 |
+
else:
|
131 |
+
if label.startswith('I-'):
|
132 |
+
# check prev label is same
|
133 |
+
assert(label[2:]==pred_labels_tokens[token_idx-1][2:] )
|
134 |
+
|
135 |
+
y_true_all.append(ref_labels_tokens)
|
136 |
+
y_pred_all.append(pred_labels_tokens)
|
137 |
+
|
138 |
+
y_true_generic.append(ref_labels_tokens_generic)
|
139 |
+
y_pred_generic.append(pred_labels_tokens_generic)
|
140 |
+
|
141 |
+
|
142 |
+
|
143 |
+
# now we can evaluate using seqeval
|
144 |
+
# build report for printing
|
145 |
+
report_string_all = classification_report(y_true=y_true_all,
|
146 |
+
y_pred=y_pred_all,
|
147 |
+
scheme=IOB2,
|
148 |
+
zero_division=0.0,
|
149 |
+
output_dict=False
|
150 |
+
)
|
151 |
+
|
152 |
+
# return report as dict (can't do both at the same time? slight waste of compute)
|
153 |
+
report_dict_all = classification_report(y_true=y_true_all,
|
154 |
+
y_pred=y_pred_all,
|
155 |
+
scheme=IOB2,
|
156 |
+
zero_division=0.0,
|
157 |
+
output_dict=True
|
158 |
+
)
|
159 |
+
if print_reports:
|
160 |
+
print(report_string_all)
|
161 |
+
|
162 |
+
report_string_generic = classification_report(y_true=y_true_generic,
|
163 |
+
y_pred=y_pred_generic,
|
164 |
+
scheme=IOB2,
|
165 |
+
zero_division=0.0,
|
166 |
+
output_dict=False
|
167 |
+
)
|
168 |
+
|
169 |
+
# return report as dict (can't do both at the same time? slight waste of compute)
|
170 |
+
report_dict_generic = classification_report(y_true=y_true_generic,
|
171 |
+
y_pred=y_pred_generic,
|
172 |
+
scheme=IOB2,
|
173 |
+
zero_division=0.0,
|
174 |
+
output_dict=True
|
175 |
+
)
|
176 |
+
if print_reports:
|
177 |
+
print(report_string_generic)
|
178 |
+
|
179 |
+
return(report_dict_all, report_dict_generic)
|