Datasets:
Tasks:
Token Classification
Modalities:
Text
Formats:
parquet
Sub-tasks:
named-entity-recognition
Languages:
English
Size:
1K - 10K
ArXiv:
License:
dipteshkanojia
commited on
Commit
•
a5649cf
1
Parent(s):
497dc80
Rename PLOD-CW.py to PLOD-CW_old.py
Browse files- PLOD-CW.py → PLOD-CW_old.py +29 -0
PLOD-CW.py → PLOD-CW_old.py
RENAMED
@@ -81,7 +81,36 @@ class PLODfilteredConfig(datasets.GeneratorBasedBuilder):
|
|
81 |
homepage="https://github.com/surrey-nlp/PLOD-AbbreviationDetection",
|
82 |
citation=_CITATION,
|
83 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
84 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
85 |
_URL = "https://huggingface.co/datasets/surrey-nlp/PLOD-CW/resolve/main/data/"
|
86 |
_URLS = {
|
87 |
"train": _URL + "train.conll",
|
|
|
81 |
homepage="https://github.com/surrey-nlp/PLOD-AbbreviationDetection",
|
82 |
citation=_CITATION,
|
83 |
)
|
84 |
+
_URL = "https://huggingface.co/datasets/surrey-nlp/PLOD-CW/resolve/main/data/"
|
85 |
+
_URLS = {
|
86 |
+
"train": _URL + "PLOS-train70-filtered-pos_bio.json",
|
87 |
+
"dev": _URL + "PLOS-val15-filtered-pos_bio.json",
|
88 |
+
"test": _URL + "PLOS-test15-filtered-pos_bio.json"
|
89 |
+
}
|
90 |
+
|
91 |
+
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
|
92 |
+
urls_to_download = self._URLS
|
93 |
+
downloaded_files = dl_manager.download_and_extract(urls_to_download)
|
94 |
|
95 |
+
return [
|
96 |
+
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
|
97 |
+
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
|
98 |
+
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]})
|
99 |
+
]
|
100 |
+
|
101 |
+
def _generate_examples(self, filepath):
|
102 |
+
"""This function returns the examples in the raw (text) form."""
|
103 |
+
logger.info("generating examples from = %s", filepath)
|
104 |
+
with open(filepath) as f:
|
105 |
+
plod = json.load(f)
|
106 |
+
for object in plod:
|
107 |
+
id_ = int(object['id'])
|
108 |
+
yield id_, {
|
109 |
+
"id": str(id_),
|
110 |
+
"tokens": object['tokens'],
|
111 |
+
"pos_tags": object['pos_tags'],
|
112 |
+
"ner_tags": object['ner_tags'],
|
113 |
+
}
|
114 |
_URL = "https://huggingface.co/datasets/surrey-nlp/PLOD-CW/resolve/main/data/"
|
115 |
_URLS = {
|
116 |
"train": _URL + "train.conll",
|