parquet-converter commited on
Commit
822f8d5
•
1 Parent(s): ee42464

Update parquet files

Browse files
Data/archives/mgb2_wav.test.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:9ac233a5615a8f7cb80bb2fa23baf3440cd3cf8abca00498df036505fbf1530b
3
- size 973854088
 
 
 
 
Data/archives/mgb2_wav.train.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:c6b09a2e4c8462a3311a2007292e60866a771525a1094ee0e06d69c92b7abb7b
3
- size 1878461786
 
 
 
 
Data/archives/mgb2_txt.test.zip → default/mg2_data-test-00000-of-00002.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b902d250f308e7c88f5916c04a0a1c00117c0847430c47d622d2a4467cea5e1f
3
- size 1990292
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:42ad0ec6d545136bcb0c0ae2bae6b6a0d65e1be7ea381ea012abf3f5213f29f9
3
+ size 610061470
Data/archives/mgb2_txt.train.zip → default/mg2_data-test-00001-of-00002.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:193d19de9e5f41fae1a006c16b69667bcba87e7036b5fd9d59c27cf90f4ced8c
3
- size 2345723
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:442adaa8f01f1ef08ac2346f25bd5f0c05aabb0db79ea2acfa9f8e1723fda46e
3
+ size 551615745
Data/archives/mgb2_wav.dev.zip → default/mg2_data-train-00000-of-00004.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7a5a873f99a623edfd87f9095269db3b4e52a171f757db4e53fba1c557a70480
3
- size 980264478
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:297e57699d696163d36ba0040b199a8889a13020d5276bb20df51cdc9f638fcc
3
+ size 630919911
default/mg2_data-train-00001-of-00004.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2266911a421fc4b5d89da14a8d2c016ff0a2e9a127549e2e5160c9299ded5886
3
+ size 712639960
default/mg2_data-train-00002-of-00004.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce6ce23881009a89fdf51d00c09e8dfa129c31a6d176cd62b84e871c2b51ec32
3
+ size 732015172
Data/archives/mgb2_txt.dev.zip → default/mg2_data-train-00003-of-00004.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5f384ebed4dc134c872db84f513c4034098cd8f622d787d23703e555cec6eca8
3
- size 1973980
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ba8eb9dbe5b1cc0c4708f5a98092c131932efd1a103456980f3f805a679f850c
3
+ size 69129734
default/mg2_data-validation-00000-of-00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a7cd7425162f2bf0f6b83d0619571514a2a2917c7afd4093010ab6d44dce57c
3
+ size 579600253
default/mg2_data-validation-00001-of-00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0f0a17fe8492fb691adbb4c14724db3059dab3169d00dcd71fc0102f21a9cab4
3
+ size 562415466
mg2_data.py DELETED
@@ -1,250 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- """Untitled2.ipynb
3
-
4
- Automatically generated by Colaboratory.
5
-
6
- Original file is located at
7
- https://colab.research.google.com/drive/1Jy8fwFO774TM_FTwK-0to2L0qHoUAT-U
8
- """
9
-
10
- # -*- coding: utf-8 -*-
11
- """MGB2.ipynb
12
- Automatically generated by Colaboratory.
13
- Original file is located at
14
- https://colab.research.google.com/drive/15ejoy2EWN9bj2s5ORQRZb5aTmFlcgA9d
15
- """
16
-
17
- import datasets
18
- import os
19
-
20
-
21
- _DESCRIPTION = "MGB2 speech recognition dataset AR"
22
- _HOMEPAGE = "https://arabicspeech.org/mgb2/"
23
- _LICENSE = "MGB-2 License agreement"
24
- _CITATION = """@misc{https://doi.org/10.48550/arxiv.1609.05625,
25
- doi = {10.48550/ARXIV.1609.05625},
26
-
27
- url = {https://arxiv.org/abs/1609.05625},
28
-
29
- author = {Ali, Ahmed and Bell, Peter and Glass, James and Messaoui, Yacine and Mubarak, Hamdy and Renals, Steve and Zhang, Yifan},
30
-
31
- keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences},
32
-
33
- title = {The MGB-2 Challenge: Arabic Multi-Dialect Broadcast Media Recognition},
34
-
35
- publisher = {arXiv},
36
-
37
- year = {2016},
38
-
39
- copyright = {arXiv.org perpetual, non-exclusive license}
40
- }
41
- """
42
- _DATA_ARCHIVE_ROOT = "Data/archives/"
43
- _DATA_URL = {
44
- "test": _DATA_ARCHIVE_ROOT + "mgb2_wav.test.zip",
45
- "dev": _DATA_ARCHIVE_ROOT + "mgb2_wav.dev.zip",
46
- "train": _DATA_ARCHIVE_ROOT + "mgb2_wav.train.zip",
47
-
48
- #"train": [_DATA_ARCHIVE_ROOT + f"mgb2_wav_{x}.train.tar.gz" for x in range(48)], # we have 48 archives
49
- }
50
- _TEXT_URL = {
51
- "test": _DATA_ARCHIVE_ROOT + "mgb2_txt.test.zip",
52
- "dev": _DATA_ARCHIVE_ROOT + "mgb2_txt.dev.zip",
53
- "train": _DATA_ARCHIVE_ROOT + "mgb2_txt.train.zip",
54
- }
55
-
56
-
57
-
58
- def absoluteFilePaths(directory):
59
- for dirpath,_,filenames in os.walk(directory):
60
- for f in filenames:
61
- yield os.path.abspath(os.path.join(dirpath, f))
62
-
63
-
64
- class MGDB2Dataset(datasets.GeneratorBasedBuilder):
65
- def _info(self):
66
- return datasets.DatasetInfo(
67
- description=_DESCRIPTION,
68
- features=datasets.Features(
69
- {
70
- "path": datasets.Value("string"),
71
- "audio": datasets.Audio(sampling_rate=16_000),
72
- "sentence": datasets.Value("string"),
73
- }
74
- ),
75
- supervised_keys=None,
76
- homepage=_HOMEPAGE,
77
- license=_LICENSE,
78
- citation=_CITATION,
79
- )
80
-
81
- def _split_generators(self, dl_manager):
82
- wav_archive = dl_manager.download(_DATA_URL)
83
- txt_archive = dl_manager.download(_TEXT_URL)
84
- test_dir = "dataset/test"
85
- dev_dir = "dataset/dev"
86
- train_dir = "dataset/train"
87
-
88
-
89
- print("Starting write datasets.........................................................")
90
-
91
-
92
- if dl_manager.is_streaming:
93
- print("from streaming.........................................................")
94
-
95
-
96
-
97
-
98
- return [
99
- datasets.SplitGenerator(
100
- name=datasets.Split.TEST,
101
- gen_kwargs={
102
- "path_to_txt": test_dir + "/txt",
103
- "path_to_wav": test_dir + "/wav",
104
- "wav_files": dl_manager.iter_archive(wav_archive['test']),
105
- "txt_files": dl_manager.iter_archive(txt_archive['test']),
106
- },
107
- ),
108
- datasets.SplitGenerator(
109
- name=datasets.Split.VALIDATION,
110
- gen_kwargs={
111
- "path_to_txt": dev_dir + "/txt",
112
- "path_to_wav": dev_dir + "/wav",
113
- "wav_files": dl_manager.iter_archive(wav_archive['dev']),
114
- "txt_files": dl_manager.iter_archive(txt_archive['dev']),
115
- },
116
- ),
117
- datasets.SplitGenerator(
118
- name=datasets.Split.TRAIN,
119
- gen_kwargs={
120
- "path_to_txt": train_dir + "/txt",
121
- "path_to_wav": train_dir + "/wav",
122
- "wav_files": dl_manager.iter_archive(wav_archive['train']),
123
- "txt_files": dl_manager.iter_archive(txt_archive['train']),
124
- },
125
- ),
126
- ]
127
- else:
128
- print("from non streaming.........................................................")
129
-
130
-
131
- dstZipFileName=txt_archive['test']
132
-
133
- sz=os.path.getsize(dstZipFileName)
134
-
135
- print("file size=",sz)
136
-
137
-
138
- #test_txt_files=dl_manager.extract(txt_archive['test']);
139
-
140
- #flist=os.listdir(test_txt_files)
141
-
142
- #print(flist)
143
-
144
- #f = open(test_txt_files, 'r')
145
- #file_contents = f.read()
146
- #print (file_contents)
147
- #f.close()
148
-
149
- return [
150
- datasets.SplitGenerator(
151
- name=datasets.Split.TEST,
152
- gen_kwargs={
153
- "path_to_txt": test_dir + "/txt",
154
- "path_to_wav": test_dir + "/wav",
155
- "wav_files": absoluteFilePaths(dl_manager.extract(wav_archive['test'])),
156
- "txt_files": absoluteFilePaths(dl_manager.extract(txt_archive['test'])),
157
- "data_type":2,
158
- },
159
- ),
160
- datasets.SplitGenerator(
161
- name=datasets.Split.VALIDATION,
162
- gen_kwargs={
163
- "path_to_txt": dev_dir + "/txt",
164
- "path_to_wav": dev_dir + "/wav",
165
- "wav_files": absoluteFilePaths(dl_manager.extract(wav_archive['dev'])),
166
- "txt_files": absoluteFilePaths(dl_manager.extract(txt_archive['dev'])),
167
- "data_type":1,
168
- },
169
- ),
170
- datasets.SplitGenerator(
171
- name=datasets.Split.TRAIN,
172
- gen_kwargs={
173
- "path_to_txt": train_dir + "/txt",
174
- "path_to_wav": train_dir + "/wav",
175
- "wav_files": absoluteFilePaths(dl_manager.extract(wav_archive['train'])),
176
- "txt_files": absoluteFilePaths(dl_manager.extract(txt_archive['train'])),
177
- "data_type":0,
178
- },
179
- ),
180
- ]
181
- print("end of generation.........................................................")
182
-
183
-
184
- #0 --> train
185
- #1--> validation
186
- #2-->test
187
-
188
- def _generate_examples(self, path_to_txt, path_to_wav, wav_files, txt_files,data_type):
189
- """
190
- This assumes that the text directory alphabetically precedes the wav dir
191
- The file names for wav and text seem to match and are unique
192
- We can use them for the dictionary matching them
193
- """
194
-
195
- print("start of generate examples.........................................................")
196
-
197
- print("txt file names............................",txt_files)
198
- print("wav_files names....................................",wav_files)
199
-
200
- examples = {}
201
- id_ = 0
202
- # need to prepare the transcript - wave map
203
- for item in txt_files:
204
-
205
-
206
- #print("copying txt file...............",item)
207
-
208
- if type(item) is tuple:
209
- # iter_archive will return path and file
210
- path, f = item
211
- txt = f.read().decode(encoding="utf-8").strip()
212
- else:
213
- # extract will return path only
214
- path = item
215
- with open(path, encoding="utf-8") as f:
216
- txt = f.read().strip()
217
-
218
- #if os.path.exists(path_to_txt)==False:
219
- # os.makedirs(path_to_txt)
220
- #if path.find(path_to_txt) > -1:
221
- # construct the wav path
222
- # which is used as an identifier
223
- wav_path = os.path.split(path)[1].replace("_utf8", "").replace(".txt", ".wav").strip()
224
- #print(wav_path)
225
- examples[wav_path] = {
226
- "sentence": txt,
227
- "path": wav_path,
228
- }
229
-
230
- #for wf in wav_files:
231
- #print(wf)
232
- for item in wav_files:#wf:
233
- #print(item)
234
- if type(item) is tuple:
235
- path, f = item
236
- wav_data = f.read()
237
- else:
238
- path = item
239
- with open(path, "rb") as f:
240
- wav_data = f.read()
241
- #if os.path.exists(path_to_wav)==False:
242
- # os.makedirs(path_to_wav)
243
- #if path.find(path_to_wav) > -1:
244
- wav_path = os.path.split(path)[1].strip()
245
- if not (wav_path in examples):
246
- print("wav file mismatch:",wav_path)
247
- continue
248
- audio = {"path": path, "bytes": wav_data}
249
- yield id_, {**examples[wav_path], "audio": audio}
250
- id_ += 1