Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
pandas
License:
dipteshkanojia commited on
Commit
ecaed14
1 Parent(s): 7923436

Rename PLOD-CW_old.py to PLOD-CW.py

Browse files
Files changed (1) hide show
  1. PLOD-CW_old.py → PLOD-CW.py +1 -80
PLOD-CW_old.py → PLOD-CW.py RENAMED
@@ -81,83 +81,4 @@ class PLODfilteredConfig(datasets.GeneratorBasedBuilder):
81
  homepage="https://github.com/surrey-nlp/PLOD-AbbreviationDetection",
82
  citation=_CITATION,
83
  )
84
- _URL = "https://huggingface.co/datasets/surrey-nlp/PLOD-CW/resolve/main/data/"
85
- _URLS = {
86
- "train": _URL + "PLOS-train70-filtered-pos_bio.json",
87
- "dev": _URL + "PLOS-val15-filtered-pos_bio.json",
88
- "test": _URL + "PLOS-test15-filtered-pos_bio.json"
89
- }
90
-
91
- def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
92
- urls_to_download = self._URLS
93
- downloaded_files = dl_manager.download_and_extract(urls_to_download)
94
-
95
- return [
96
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
97
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
98
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]})
99
- ]
100
-
101
- def _generate_examples(self, filepath):
102
- """This function returns the examples in the raw (text) form."""
103
- logger.info("generating examples from = %s", filepath)
104
- with open(filepath) as f:
105
- plod = json.load(f)
106
- for object in plod:
107
- id_ = int(object['id'])
108
- yield id_, {
109
- "id": str(id_),
110
- "tokens": object['tokens'],
111
- "pos_tags": object['pos_tags'],
112
- "ner_tags": object['ner_tags'],
113
- }
114
- _URL = "https://huggingface.co/datasets/surrey-nlp/PLOD-CW/resolve/main/data/"
115
- _URLS = {
116
- "train": _URL + "train.conll",
117
- "dev": _URL + "dev.conll",
118
- "test": _URL + "test.conll"
119
- }
120
-
121
- def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
122
- urls_to_download = self._URLS
123
- downloaded_files = dl_manager.download_and_extract(urls_to_download)
124
-
125
- return [
126
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
127
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
128
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]})
129
- ]
130
-
131
- def _generate_examples(self, filepath):
132
- logger.info("⏳ Generating examples from = %s", filepath)
133
- with open(filepath, encoding="utf-8") as f:
134
- guid = 0
135
- tokens = []
136
- pos_tags = []
137
- ner_tags = []
138
- for line in f:
139
- if line.startswith("-DOCSTART-") or line == "" or line == "\n":
140
- if tokens:
141
- yield guid, {
142
- "id": str(guid),
143
- "tokens": tokens,
144
- "pos_tags": pos_tags,
145
- "ner_tags": ner_tags,
146
- }
147
- guid += 1
148
- tokens = []
149
- pos_tags = []
150
- ner_tags = []
151
- else:
152
- splits = line.split(" ")
153
- tokens.append(splits[0])
154
- pos_tags.append(splits[1])
155
- ner_tags.append(splits[2].rstrip())
156
- # last example
157
- if tokens:
158
- yield guid, {
159
- "id": str(guid),
160
- "tokens": tokens,
161
- "pos_tags": pos_tags,
162
- "ner_tags": ner_tags,
163
- }
 
81
  homepage="https://github.com/surrey-nlp/PLOD-AbbreviationDetection",
82
  citation=_CITATION,
83
  )
84
+