Mohammad Al-Fetyani commited on
Commit
8f287d0
1 Parent(s): 1701327

Add dataset loader

Browse files
Files changed (1) hide show
  1. names.py +81 -0
names.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Dataset that includes list of Arabic first names with meaning and origin of most names"""
16
+
17
+
18
+ import csv
19
+ import os
20
+
21
+ import datasets
22
+
23
+ _CITATION = """\
24
+ @software{Al-Fetyani_Maha_Processing_Library_2021,
25
+ author = {Al-Fetyani, Mohammad},
26
+ month = {11},
27
+ title = {{Maha Processing Library}},
28
+ url = {https://github.com/TRoboto/Maha},
29
+ year = {2021}
30
+ }
31
+ """
32
+
33
+ _DESCRIPTION = """\
34
+ List of Arabic first names with meaning and origin of most names
35
+ """
36
+
37
+ _TRAIN_FILE_NAME = "train.tsv"
38
+
39
+
40
+ class Names(datasets.GeneratorBasedBuilder):
41
+ """List of Arabic first names with meaning and origin of most names"""
42
+
43
+ VERSION = datasets.Version("1.0.0")
44
+
45
+ def _info(self):
46
+ return datasets.DatasetInfo(
47
+ description=_DESCRIPTION,
48
+ features=datasets.Features(
49
+ {
50
+ "name": datasets.Value("string"),
51
+ "description": datasets.Value("string"),
52
+ "origin": datasets.Value("string"),
53
+ }
54
+ ),
55
+ citation=_CITATION,
56
+ )
57
+
58
+ def _split_generators(self, dl_manager):
59
+ return [
60
+ datasets.SplitGenerator(
61
+ name=datasets.Split.TRAIN,
62
+ gen_kwargs={
63
+ "filepath": os.path.join("data", _TRAIN_FILE_NAME),
64
+ "split": datasets.Split.TRAIN,
65
+ },
66
+ ),
67
+ ]
68
+
69
+ def _generate_examples(self, filepath, split):
70
+ with open(filepath, encoding="utf-8") as f:
71
+ data = csv.reader(f, delimiter="\t")
72
+ next(data, None) # skip the headers
73
+ for row_id, row in enumerate(data):
74
+ # if any of the fields is missing, skip the row
75
+ if any(row[field] is None for field in row):
76
+ continue
77
+ yield row_id, {
78
+ "name": row["name"],
79
+ "description": row["description"],
80
+ "origin": row["origin"],
81
+ }