Datasets:

Modalities:
Text
Languages:
English
ArXiv:
Libraries:
Datasets
License:
PLOD-filtered / PLOD-filtered.py
dipteshkanojia's picture
cahngeS
4575131
raw
history blame
5.35 kB
import os
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """
"""
_DESCRIPTION = """
This is the dataset repository for PLOD Dataset accepted to be published at LREC 2022.
The dataset can help build sequence labelling models for the task Abbreviation Detection.
"""
_TRAINING_FILE_URL = "https://huggingface.co/datasets/surrey-nlp/PLOD-filtered/resolve/main/data/PLOS-train70-filtered-pos_bio.json"
_DEV_FILE_URL = "https://huggingface.co/datasets/surrey-nlp/PLOD-filtered/resolve/main/data/PLOS-val15-filtered-pos_bio.json"
_TEST_FILE_URL = "https://huggingface.co/datasets/surrey-nlp/PLOD-filtered/resolve/main/data/PLOS-test15-filtered-pos_bio.json"
_TRAINING_FILE = "PLOS-train70-filtered-pos_bio.json"
_DEV_FILE = "PLOS-val15-filtered-pos_bio.json"
_TEST_FILE = "PLOS-test15-filtered-pos_bio.json"
class PLODfilteredConfig(datasets.BuilderConfig):
"""BuilderConfig for Conll2003"""
def __init__(self, **kwargs):
"""BuilderConfig forConll2003.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(PLODfilteredConfig, self).__init__(**kwargs)
class PLODfilteredConfig(datasets.GeneratorBasedBuilder):
"""PLOD Filtered dataset."""
BUILDER_CONFIGS = [
PLODfilteredConfig(name="PLODfiltered", version=datasets.Version("0.0.2"), description="PLOD filtered dataset"),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"tokens": datasets.Sequence(datasets.Value("string")),
"pos_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"ADJ",
"ADP",
"ADV",
"AUX",
"CONJ",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
"SPACE"
]
)
),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"B-O",
"B-AC",
"I-AC",
"B-LF",
"I-LF"
]
)
),
}
),
supervised_keys=None,
homepage="https://github.com/surrey-nlp/PLOD-AbbreviationDetection",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
downloaded_train = dl_manager.download_and_extract(_TRAINING_FILE_URL)
downloaded_val = dl_manager.download_and_extract(_DEV_FILE_URL)
downloaded_test = dl_manager.download_and_extract(_TEST_FILE_URL)
data_files = {
"train": _TRAINING_FILE,
"dev": _DEV_FILE,
"test": _TEST_FILE,
}
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_files["train"]}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_files["dev"]}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_files["test"]}),
]
def _generate_examples(self, filepath):
logger.info("⏳ Generating examples from = %s", filepath)
with open(filepath, encoding="utf-8") as f:
guid = 0
tokens = []
pos_tags = []
ner_tags = []
for line in f:
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
if tokens:
yield guid, {
"id": str(guid),
"tokens": tokens,
"pos_tags": pos_tags,
"ner_tags": ner_tags,
}
guid += 1
tokens = []
pos_tags = []
ner_tags = []
else:
# conll2003 tokens are space separated
splits = line.split(" ")
tokens.append(splits[0])
pos_tags.append(splits[1].strip())
ner_tags.append(splits[2].strip())
# last example
yield guid, {
"id": str(guid),
"tokens": tokens,
"pos_tags": pos_tags,
"ner_tags": ner_tags,
}