harsh13333 commited on
Commit
55175ae
1 Parent(s): 931262a

Create shipping_label_ner.py

Browse files
Files changed (1) hide show
  1. shipping_label_ner.py +172 -0
shipping_label_ner.py ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """The WNUT 17 Emerging Entities Dataset."""
18
+
19
+
20
+ import datasets
21
+
22
+
23
+ logger = datasets.logging.get_logger(__name__)
24
+
25
+
26
+ _CITATION = """\
27
+ @inproceedings{derczynski-etal-2017-results,
28
+ title = "Results of the {WNUT}2017 Shared Task on Novel and Emerging Entity Recognition",
29
+ author = "Derczynski, Leon and
30
+ Nichols, Eric and
31
+ van Erp, Marieke and
32
+ Limsopatham, Nut",
33
+ booktitle = "Proceedings of the 3rd Workshop on Noisy User-generated Text",
34
+ month = sep,
35
+ year = "2017",
36
+ address = "Copenhagen, Denmark",
37
+ publisher = "Association for Computational Linguistics",
38
+ url = "https://www.aclweb.org/anthology/W17-4418",
39
+ doi = "10.18653/v1/W17-4418",
40
+ pages = "140--147",
41
+ abstract = "This shared task focuses on identifying unusual, previously-unseen entities in the context of emerging discussions.
42
+ Named entities form the basis of many modern approaches to other tasks (like event clustering and summarization),
43
+ but recall on them is a real problem in noisy text - even among annotators.
44
+ This drop tends to be due to novel entities and surface forms.
45
+ Take for example the tweet {``}so.. kktny in 30 mins?!{''} {--} even human experts find the entity {`}kktny{'}
46
+ hard to detect and resolve. The goal of this task is to provide a definition of emerging and of rare entities,
47
+ and based on that, also datasets for detecting these entities. The task as described in this paper evaluated the
48
+ ability of participating entries to detect and classify novel and emerging named entities in noisy text.",
49
+ }
50
+ """
51
+
52
+ _DESCRIPTION = """\
53
+ WNUT 17: Emerging and Rare entity recognition
54
+
55
+ This shared task focuses on identifying unusual, previously-unseen entities in the context of emerging discussions.
56
+ Named entities form the basis of many modern approaches to other tasks (like event clustering and summarisation),
57
+ but recall on them is a real problem in noisy text - even among annotators. This drop tends to be due to novel entities and surface forms.
58
+ Take for example the tweet “so.. kktny in 30 mins?” - even human experts find entity kktny hard to detect and resolve.
59
+ This task will evaluate the ability to detect and classify novel, emerging, singleton named entities in noisy text.
60
+
61
+ The goal of this task is to provide a definition of emerging and of rare entities, and based on that, also datasets for detecting these entities.
62
+ """
63
+
64
+ _URL = "https://raw.githubusercontent.com/SanghaviHarshPankajkumar/shipping_label_project/main/"
65
+ _TRAINING_FILE = "train.txt"
66
+ _TEST_FILE = "test.txt"
67
+
68
+
69
+ class WNUT_17Config(datasets.BuilderConfig):
70
+ """The WNUT 17 Emerging Entities Dataset."""
71
+
72
+ def __init__(self, **kwargs):
73
+ """BuilderConfig for WNUT 17.
74
+
75
+ Args:
76
+ **kwargs: keyword arguments forwarded to super.
77
+ """
78
+ super(WNUT_17Config, self).__init__(**kwargs)
79
+
80
+
81
+ class WNUT_17(datasets.GeneratorBasedBuilder):
82
+ """The WNUT 17 Emerging Entities Dataset."""
83
+
84
+ BUILDER_CONFIGS = [
85
+ WNUT_17Config(
86
+ name="wnut_17", version=datasets.Version("1.0.0"), description="The WNUT 17 Emerging Entities Dataset"
87
+ ),
88
+ ]
89
+
90
+ def _info(self):
91
+ return datasets.DatasetInfo(
92
+ description=_DESCRIPTION,
93
+ features=datasets.Features(
94
+ {
95
+ "id": datasets.Value("string"),
96
+ "tokens": datasets.Sequence(datasets.Value("string")),
97
+ "ner_tags": datasets.Sequence(
98
+ datasets.features.ClassLabel(
99
+ names=[
100
+ "O",
101
+ "B-corporation",
102
+ "I-corporation",
103
+ "B-creative-work",
104
+ "I-creative-work",
105
+ "B-group",
106
+ "I-group",
107
+ "B-location",
108
+ "I-location",
109
+ "B-person",
110
+ "I-person",
111
+ "B-product",
112
+ "I-product",
113
+ ]
114
+ )
115
+ ),
116
+ }
117
+ ),
118
+ supervised_keys=None,
119
+ homepage="http://noisy-text.github.io/2017/emerging-rare-entities.html",
120
+ citation=_CITATION,
121
+ )
122
+
123
+ def _split_generators(self, dl_manager):
124
+ """Returns SplitGenerators."""
125
+ urls_to_download = {
126
+ "train": f"{_URL}{_TRAINING_FILE}",
127
+ "test": f"{_URL}{_TEST_FILE}",
128
+ }
129
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
130
+
131
+ return [
132
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
133
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
134
+ ]
135
+
136
+ def _generate_examples(self, filepath):
137
+ logger.info("⏳ Generating examples from = %s", filepath)
138
+ with open(filepath, encoding="utf-8") as f:
139
+ current_tokens = []
140
+ current_labels = []
141
+ sentence_counter = 0
142
+ for row in f:
143
+ row = row.rstrip()
144
+ if row:
145
+ token, label = row.split("\t")
146
+ current_tokens.append(token)
147
+ current_labels.append(label)
148
+ else:
149
+ # New sentence
150
+ if not current_tokens:
151
+ # Consecutive empty lines will cause empty sentences
152
+ continue
153
+ assert len(current_tokens) == len(current_labels), "💔 between len of tokens & labels"
154
+ sentence = (
155
+ sentence_counter,
156
+ {
157
+ "id": str(sentence_counter),
158
+ "tokens": current_tokens,
159
+ "ner_tags": current_labels,
160
+ },
161
+ )
162
+ sentence_counter += 1
163
+ current_tokens = []
164
+ current_labels = []
165
+ yield sentence
166
+ # Don't forget last sentence in dataset 🧐
167
+ if current_tokens:
168
+ yield sentence_counter, {
169
+ "id": str(sentence_counter),
170
+ "tokens": current_tokens,
171
+ "ner_tags": current_labels,
172
+ }