Datasets:

Languages:
Persian
License:
hojjat-m commited on
Commit
135883e
1 Parent(s): eafc946

Create PEYMA.py

Browse files
Files changed (1) hide show
  1. PEYMA.py +79 -0
PEYMA.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import pandas as pd
3
+ import datasets
4
+ import requests
5
+ import os
6
+
7
+ _CITATION = """\\
8
+ @article{shahshahani2018peyma,
9
+ title={PEYMA: A Tagged Corpus for Persian Named Entities},
10
+ author={Mahsa Sadat Shahshahani and Mahdi Mohseni and Azadeh Shakery and Heshaam Faili},
11
+ year=2018,
12
+ journal={ArXiv},
13
+ volume={abs/1801.09936}
14
+ }
15
+ """
16
+ _DESCRIPTION = """\\\\\\\\
17
+ PEYMA dataset includes 7,145 sentences with a total of 302,530 tokens from which 41,148 tokens are tagged with seven different classes.
18
+ """
19
+
20
+ _DRIVE_URL = "https://drive.google.com/uc?export=download&id=1WZxpFRtEs5HZWyWQ2Pyg9CCuIBs1Kmvx"
21
+
22
+ class PEYMAConfig(datasets.BuilderConfig):
23
+ """BuilderConfig for PEYMA."""
24
+ def __init__(self, **kwargs):
25
+ super(PEYMAConfig, self).__init__(**kwargs)
26
+
27
+ class PEYMA(datasets.GeneratorBasedBuilder):
28
+ BUILDER_CONFIGS = [
29
+ PEYMAConfig(name="PEYMA", version=datasets.Version("1.0.0"), description="persian ner dataset"),
30
+ ]
31
+ def _info(self):
32
+ return datasets.DatasetInfo(
33
+ # This is the description that will appear on the datasets page.
34
+ description=_DESCRIPTION,
35
+ # datasets.features.FeatureConnectors
36
+ features=datasets.Features(
37
+ {
38
+ "token": datasets.Value("string"),
39
+ "label": datasets.Value("string")
40
+ }
41
+ ),
42
+ supervised_keys=None,
43
+ # Homepage of the dataset for documentation
44
+ homepage="https://hooshvare.github.io/docs/datasets/ner#peyma",
45
+ citation=_CITATION,
46
+ )
47
+
48
+ def custom_dataset(self, src_url, dest_path):
49
+ response = requests.get(src_url)
50
+ response.raise_for_status()
51
+
52
+ with open(dest_path, 'wb') as f:
53
+ f.write(response.content)
54
+
55
+
56
+ def _split_generators(self, dl_manager):
57
+ """Returns SplitGenerators."""
58
+ # dl_manager is a datasets.download.DownloadManager that can be used to
59
+ # download and extract URLs
60
+
61
+ downloaded_file = dl_manager.download_custom(_DRIVE_URL, self.custom_dataset)
62
+ extracted_file = dl_manager.extract(downloaded_file)
63
+ return [
64
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": os.path.join(extracted_file, 'peyma/train.txt')}),
65
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": os.path.join(extracted_file, 'peyma/test.txt')}),
66
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": os.path.join(extracted_file, 'peyma/dev.txt')}),
67
+ ]
68
+
69
+ def _generate_examples(self, filepath):
70
+ try:
71
+ df = pd.read_csv(filepath, error_bad_lines=False, engine='python',
72
+ sep='|', names=["token", "label"])
73
+ for idx, row in enumerate(reader):
74
+ yield idx, {
75
+ "token": row["token"],
76
+ "label": row["label"]
77
+ }
78
+ except Exception as e:
79
+ print(e)