hojjat-m commited on
Commit
a5c71bc
1 Parent(s): ae2bb3b

python file fixed

Browse files
Files changed (2) hide show
  1. PEYMA.py +128 -0
  2. README.md +36 -0
PEYMA.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import datasets
3
+ import os
4
+
5
+ _CITATION = """\\
6
+ @article{shahshahani2018peyma,
7
+ title={PEYMA: A Tagged Corpus for Persian Named Entities},
8
+ author={Mahsa Sadat Shahshahani and Mahdi Mohseni and Azadeh Shakery and Heshaam Faili},
9
+ year=2018,
10
+ journal={ArXiv},
11
+ volume={abs/1801.09936}
12
+ }
13
+ """
14
+ _DESCRIPTION = """PEYMA dataset includes 7,145 sentences with a total of 302,530 tokens from which 41,148 tokens are tagged with seven different classes."""
15
+
16
+ _DATA_PATH = {
17
+ 'train': os.path.join('PEYMA', 'data', 'train.txt'),
18
+ 'test': os.path.join('PEYMA', 'data', 'test.txt'),
19
+ 'val': os.path.join('PEYMA', 'data', 'dev.txt')
20
+ }
21
+
22
+ class PEYMAConfig(datasets.BuilderConfig):
23
+ """BuilderConfig for PEYMA."""
24
+ def __init__(self, **kwargs):
25
+ super(PEYMAConfig, self).__init__(**kwargs)
26
+
27
+
28
+ class PEYMA(datasets.GeneratorBasedBuilder):
29
+ BUILDER_CONFIGS = [
30
+ PEYMAConfig(name="PEYMA", version=datasets.Version("1.0.0"), description="persian ner dataset"),
31
+ ]
32
+
33
+ def _info(self):
34
+ return datasets.DatasetInfo(
35
+ # This is the description that will appear on the datasets page.
36
+ description=_DESCRIPTION,
37
+ # datasets.features.FeatureConnectors
38
+ features=datasets.Features(
39
+ {
40
+ "tokens": datasets.Sequence(datasets.Value("string")),
41
+ "tags": datasets.Sequence(
42
+ datasets.ClassLabel(
43
+ names=[
44
+ "O",
45
+ "B_DAT",
46
+ "B_LOC",
47
+ "B_MON",
48
+ "B_ORG",
49
+ "B_PCT",
50
+ "B_PER",
51
+ "B_TIM",
52
+ "I_DAT",
53
+ "I_LOC",
54
+ "I_MON",
55
+ "I_ORG",
56
+ "I_PCT",
57
+ "I_PER",
58
+ "I_TIM",
59
+ ]
60
+ )
61
+ ),
62
+ }
63
+ ),
64
+ supervised_keys=('tokens', 'tags'),
65
+ # Homepage of the dataset for documentation
66
+ homepage="https://hooshvare.github.io/docs/datasets/ner#peyma",
67
+ citation=_CITATION,
68
+ )
69
+
70
+ def _split_generators(self, dl_manager):
71
+ """Returns SplitGenerators."""
72
+ return [
73
+ datasets.SplitGenerator(
74
+ name=datasets.Split.TRAIN,
75
+ # These kwargs will be passed to _generate_examples
76
+ gen_kwargs={
77
+ "filepath": _DATA_PATH["train"],
78
+ "split": "train",
79
+ },),
80
+ datasets.SplitGenerator(
81
+ name=datasets.Split.TEST,
82
+ # These kwargs will be passed to _generate_examples
83
+ gen_kwargs={
84
+ "filepath": _DATA_PATH["test"],
85
+ "split": "test"},),
86
+ datasets.SplitGenerator(
87
+ name=datasets.Split.VALIDATION,
88
+ # These kwargs will be passed to _generate_examples
89
+ gen_kwargs={
90
+ "filepath": _DATA_PATH["val"],
91
+ "split": "validation",
92
+ },
93
+ ),
94
+ ]
95
+
96
+ def _generate_examples(self, filepath, split):
97
+ with open(filepath, "r", encoding="utf-8") as f:
98
+ id_ = 0
99
+ tokens = []
100
+ ner_labels = []
101
+ for line in f:
102
+ stripped_line = line.strip(" \n") # strip away whitespaces AND new line characters
103
+ if len(stripped_line) == 0:
104
+ # If line is empty, it means we reached the end of a sentence.
105
+ # We can yield the tokens and labels
106
+ if len(tokens) > 0 and len(ner_labels) > 0:
107
+ yield id_, {
108
+ "tokens": tokens,
109
+ "tags": ner_labels,
110
+ }
111
+ else:
112
+ # Do not yield if tokens or ner_labels is empty
113
+ # It can be the case if several empty lines are contiguous
114
+ continue
115
+ # Then we need to increment the _id and reset the tokens and ner_labels list
116
+ id_ += 1
117
+ tokens = []
118
+ ner_labels = []
119
+ else:
120
+ try:
121
+ token, ner_label = line.split("|") # Retrieve token and label
122
+ tokens.append(token)
123
+ ner_labels.append(ner_label)
124
+ except:
125
+ continue
126
+
127
+
128
+
README.md CHANGED
@@ -1,3 +1,39 @@
1
  ---
2
  license: apache-2.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  ---
 
1
  ---
2
  license: apache-2.0
3
+ dataset_info:
4
+ config_name: PEYMA
5
+ features:
6
+ - name: tokens
7
+ sequence: string
8
+ - name: tags
9
+ sequence:
10
+ class_label:
11
+ names:
12
+ '0': O
13
+ '1': B_DAT
14
+ '2': B_LOC
15
+ '3': B_MON
16
+ '4': B_ORG
17
+ '5': B_PCT
18
+ '6': B_PER
19
+ '7': B_TIM
20
+ '8': I_DAT
21
+ '9': I_LOC
22
+ '10': I_MON
23
+ '11': I_ORG
24
+ '12': I_PCT
25
+ '13': I_PER
26
+ '14': I_TIM
27
+ splits:
28
+ - name: train
29
+ num_bytes: 4885030
30
+ num_examples: 8028
31
+ - name: test
32
+ num_bytes: 648919
33
+ num_examples: 1026
34
+ - name: validation
35
+ num_bytes: 535910
36
+ num_examples: 925
37
+ download_size: 0
38
+ dataset_size: 6069859
39
  ---