-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathcreateNERModel.py
More file actions
132 lines (101 loc) · 2.89 KB
/
createNERModel.py
File metadata and controls
132 lines (101 loc) · 2.89 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
import spacy
from TASTEset.src.utils import prepare_data, ENTITIES
print("hello world!")
recipes, entities = prepare_data("TASTEset/data/TASTEset.csv")
print(recipes[0])
print(ENTITIES)
print(entities[0])
print([f"{recipes[0][start:end]}: {ent}" for start, end, ent in entities[0]])
annotations = [{'text': ' '.join(recipe.splitlines()), 'entities': ents} for recipe, ents in zip(recipes, entities)]
training_data = {'classes': ENTITIES, 'annotations': annotations}
from spacy.tokens import DocBin
nlp = spacy.blank("en")
def doc_from_annotations(annotations):
doc_bin = DocBin()
for example in annotations:
doc = nlp.make_doc(example['text'])
ents = []
for entity in example['entities']:
span = doc.char_span(*entity)
ents.append(span)
doc.ents = ents
doc_bin.add(doc)
return doc_bin
train_len = int(0.8*len(annotations))
print("training length=", train_len)
train_bin = doc_from_annotations(annotations[:train_len])
dev_bin = doc_from_annotations(annotations[train_len:])
train_bin.to_disk("nerfr_train.spacy")
dev_bin.to_disk("nerfr_dev.spacy")
# Base config for efficiency optimization
# This is an auto-generated partial config. To use it with 'spacy train'
# you can run spacy init fill-config to auto-fill all default settings:
# python -m spacy init fill-config ./base_config.cfg ./config.cfg
BASE_CONFIG = """[paths]
train = nerfr_train.spacy
dev = nerfr_dev.spacy
vectors = null
[system]
gpu_allocator = null
[nlp]
lang = "en"
pipeline = ["tok2vec","ner"]
batch_size = 1000
[components]
[components.tok2vec]
factory = "tok2vec"
[components.tok2vec.model]
@architectures = "spacy.Tok2Vec.v2"
[components.tok2vec.model.embed]
@architectures = "spacy.MultiHashEmbed.v2"
width = ${components.tok2vec.model.encode.width}
attrs = ["NORM", "PREFIX", "SUFFIX", "SHAPE"]
rows = [5000, 1000, 2500, 2500]
include_static_vectors = false
[components.tok2vec.model.encode]
@architectures = "spacy.MaxoutWindowEncoder.v2"
width = 96
depth = 4
window_size = 1
maxout_pieces = 3
[components.ner]
factory = "ner"
[components.ner.model]
@architectures = "spacy.TransitionBasedParser.v2"
state_type = "ner"
extra_state_tokens = false
hidden_width = 64
maxout_pieces = 2
use_upper = true
nO = null
[components.ner.model.tok2vec]
@architectures = "spacy.Tok2VecListener.v1"
width = ${components.tok2vec.model.encode.width}
[corpora]
[corpora.train]
@readers = "spacy.Corpus.v1"
path = ${paths.train}
max_length = 0
[corpora.dev]
@readers = "spacy.Corpus.v1"
path = ${paths.dev}
max_length = 0
[training]
dev_corpus = "corpora.dev"
train_corpus = "corpora.train"
[training.optimizer]
@optimizers = "Adam.v1"
[training.batcher]
@batchers = "spacy.batch_by_words.v1"
discard_oversize = false
tolerance = 0.2
[training.batcher.size]
@schedules = "compounding.v1"
start = 100
stop = 1000
compound = 1.001
[initialize]
vectors = ${paths.vectors}
"""
with open("base_config.cfg", 'w') as f:
f.write(BASE_CONFIG)