PEYMA / PEYMA.py
hojjat-m's picture
Update PEYMA.py
a482bc2
raw
history blame
No virus
4.75 kB
import json
import datasets
import os
_CITATION = """\\
@article{shahshahani2018peyma,
title={PEYMA: A Tagged Corpus for Persian Named Entities},
author={Mahsa Sadat Shahshahani and Mahdi Mohseni and Azadeh Shakery and Heshaam Faili},
year=2018,
journal={ArXiv},
volume={abs/1801.09936}
}
"""
_DESCRIPTION = """PEYMA dataset includes 7,145 sentences with a total of 302,530 tokens from which 41,148 tokens are tagged with seven different classes."""
_DATA_PATH = {
'train': os.path.join('data', 'train.txt'),
'test': os.path.join('data', 'test.txt'),
'val': os.path.join('data', 'dev.txt')
}
class PEYMAConfig(datasets.BuilderConfig):
"""BuilderConfig for PEYMA."""
def __init__(self, **kwargs):
super(PEYMAConfig, self).__init__(**kwargs)
class PEYMA(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
PEYMAConfig(name="PEYMA", version=datasets.Version("1.0.0"), description="persian ner dataset"),
]
def _info(self):
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# datasets.features.FeatureConnectors
features=datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"tags": datasets.Sequence(
datasets.ClassLabel(
names=[
"O",
"B_DAT",
"B_LOC",
"B_MON",
"B_ORG",
"B_PCT",
"B_PER",
"B_TIM",
"I_DAT",
"I_LOC",
"I_MON",
"I_ORG",
"I_PCT",
"I_PER",
"I_TIM",
]
)
),
}
),
supervised_keys=('tokens', 'tags'),
# Homepage of the dataset for documentation
homepage="https://hooshvare.github.io/docs/datasets/ner#peyma",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": _DATA_PATH["train"],
"split": "train",
},),
datasets.SplitGenerator(
name=datasets.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": _DATA_PATH["test"],
"split": "test"},),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": _DATA_PATH["val"],
"split": "validation",
},
),
]
def _generate_examples(self, filepath, split):
with open(filepath, "r", encoding="utf-8") as f:
id_ = 0
tokens = []
ner_labels = []
for line in f:
stripped_line = line.strip(" \n") # strip away whitespaces AND new line characters
if len(stripped_line) == 0:
# If line is empty, it means we reached the end of a sentence.
# We can yield the tokens and labels
if len(tokens) > 0 and len(ner_labels) > 0:
yield id_, {
"tokens": tokens,
"tags": ner_labels,
}
else:
# Do not yield if tokens or ner_labels is empty
# It can be the case if several empty lines are contiguous
continue
# Then we need to increment the _id and reset the tokens and ner_labels list
id_ += 1
tokens = []
ner_labels = []
else:
try:
token, ner_label = line.split("|") # Retrieve token and label
tokens.append(token)
ner_labels.append(ner_label)
except:
continue