-
Notifications
You must be signed in to change notification settings - Fork 0
/
data.py
44 lines (32 loc) · 1.08 KB
/
data.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
from itertools import chain
import numpy as np
def tokenize(example, tokenizer, label2id, max_length):
# rebuild text from tokens
text = []
labels = []
for t, l, ws in zip(
example["tokens"], example["provided_labels"], example["trailing_whitespace"]
):
text.append(t)
labels.extend([l] * len(t))
if ws:
text.append(" ")
labels.append("O")
# actual tokenization
tokenized = tokenizer(
"".join(text), return_offsets_mapping=True, max_length=max_length
)
labels = np.array(labels)
text = "".join(text)
token_labels = []
for start_idx, end_idx in tokenized.offset_mapping:
# CLS token
if start_idx == 0 and end_idx == 0:
token_labels.append(label2id["O"])
continue
# case when token starts with whitespace
if text[start_idx].isspace():
start_idx += 1
token_labels.append(label2id[labels[start_idx]])
length = len(tokenized.input_ids)
return {**tokenized, "labels": token_labels, "length": length}