Datasets:

Modalities:
Audio
Text
Formats:
parquet
ArXiv:
Libraries:
Datasets
Dask

add slue-vp_nel config to slue-phase-2.py

#5
by ankitap - opened
Files changed (1) hide show
  1. slue-phase-2.py +112 -27
slue-phase-2.py CHANGED
@@ -3,6 +3,7 @@ import os
3
  import csv
4
  import ast
5
  import gzip
 
6
 
7
  import datasets
8
  from datasets.utils.logging import get_logger
@@ -14,6 +15,7 @@ _URL = "https://asappresearch.github.io/slue-toolkit/"
14
  _DL_URLS = {
15
  "slue-hvb": "data/slue-hvb_blind.zip",
16
  "slue-sqa5": "data/slue-sqa5_blind.zip",
 
17
  }
18
 
19
  _LICENSE = """
@@ -56,6 +58,11 @@ For questions from the other 4 datasets, their question texts, answer strings, a
56
 
57
  SLUE-SQA-5 also contains a subset of Spoken Wikipedia, including the audios placed in “document” directories and their transcripts (document_text and normalized_document_text column in .tsv files). Additionally, we provide the text-to-speech alignments (.txt files in “word2time” directories).These contents are licensed with the same Creative Commons (CC BY-SA 4.0) license as Spoken Wikipedia.
58
  =======================================================
 
 
 
 
 
59
 
60
  """
61
 
@@ -97,6 +104,26 @@ def load_word2time(word2time_file):
97
  )
98
  return word2time
99
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
100
  class SLUE2Config(datasets.BuilderConfig):
101
  """BuilderConfig for SLUE."""
102
 
@@ -128,6 +155,10 @@ class SLUE2(datasets.GeneratorBasedBuilder):
128
  name="sqa5",
129
  description="SLUE-SQA-5 set which includes Spoken Question Answering task.",
130
  ),
 
 
 
 
131
  ]
132
 
133
  def _info(self):
@@ -175,6 +206,30 @@ class SLUE2(datasets.GeneratorBasedBuilder):
175
  }
176
  ),
177
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
178
  return datasets.DatasetInfo(
179
  description=_DESCRIPTION,
180
  features=datasets.Features(features),
@@ -194,33 +249,42 @@ class SLUE2(datasets.GeneratorBasedBuilder):
194
  data_dir = os.path.join(dl_dir, config_name)
195
  print(data_dir)
196
 
197
- splits = [
198
- datasets.SplitGenerator(
199
- name=datasets.Split.TRAIN,
200
- gen_kwargs={
201
- "filepath": os.path.join(
202
- data_dir or "", f"{config_name}_fine-tune.tsv"
203
- ),
204
- "data_dir": data_dir,
205
- },
206
- ),
207
- datasets.SplitGenerator(
208
- name=datasets.Split.VALIDATION,
209
- gen_kwargs={
210
- "filepath": os.path.join(data_dir or "", f"{config_name}_dev.tsv"),
211
- "data_dir": data_dir,
212
- },
213
- ),
214
- datasets.SplitGenerator(
215
- name=datasets.Split.TEST,
216
- gen_kwargs={
217
- "filepath": os.path.join(
218
- data_dir or "", f"{config_name}_test_blind.tsv"
219
- ),
220
- "data_dir": data_dir,
221
- },
222
- ),
223
- ]
 
 
 
 
 
 
 
 
 
224
  if self.config.name == "sqa5":
225
  splits.append(
226
  datasets.SplitGenerator(
@@ -288,4 +352,25 @@ class SLUE2(datasets.GeneratorBasedBuilder):
288
  "word2time": load_word2time(word2time_file),
289
  "answer_spans": parse_qa_answer_spans(row.get("answer_spans", "[]")),
290
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
291
  yield idx, example
 
3
  import csv
4
  import ast
5
  import gzip
6
+ import json
7
 
8
  import datasets
9
  from datasets.utils.logging import get_logger
 
15
  _DL_URLS = {
16
  "slue-hvb": "data/slue-hvb_blind.zip",
17
  "slue-sqa5": "data/slue-sqa5_blind.zip",
18
+ "slue-vp_nel": "data/slue-vp_nel_blind.zip",
19
  }
20
 
21
  _LICENSE = """
 
58
 
59
  SLUE-SQA-5 also contains a subset of Spoken Wikipedia, including the audios placed in “document” directories and their transcripts (document_text and normalized_document_text column in .tsv files). Additionally, we provide the text-to-speech alignments (.txt files in “word2time” directories).These contents are licensed with the same Creative Commons (CC BY-SA 4.0) license as Spoken Wikipedia.
60
  =======================================================
61
+ SLUE-VP-NEL Dataset
62
+
63
+ SLUE-VP-NEL includes word-level time stamps for dev and test splits of the SLUE-voxpopuli corpus.
64
+ For the dev split, the dataset also contains named entity annotations and corresponding time-stamps in a tsv format.
65
+ =======================================================
66
 
67
  """
68
 
 
104
  )
105
  return word2time
106
 
107
+ def parse_nel_time_spans(nel_timestamps):
108
+ nel_timestamps = ast.literal_eval(nel_timestamps)
109
+ return [
110
+ {
111
+ "ne_label": ne,
112
+ "start_char_idx": start,
113
+ "char_offset": off,
114
+ "start_sec": t0,
115
+ "end_sec": t1,
116
+ }
117
+ for ne, start, off, t0, t1 in nel_timestamps
118
+ ]
119
+
120
+ def read_word_timestamps(word_alignments_fn):
121
+ data = json.loads(open(word_alignments_fn).read())
122
+ return [
123
+ {"word": word, "start_sec": start, "end_sec": end}
124
+ for word, start, end in data["timestamps"]
125
+ ]
126
+
127
  class SLUE2Config(datasets.BuilderConfig):
128
  """BuilderConfig for SLUE."""
129
 
 
155
  name="sqa5",
156
  description="SLUE-SQA-5 set which includes Spoken Question Answering task.",
157
  ),
158
+ SLUE2Config(
159
+ name="vp_nel",
160
+ description="SLUE-VP-NEL set with named entity labels and time-stamps.",
161
+ ),
162
  ]
163
 
164
  def _info(self):
 
206
  }
207
  ),
208
  }
209
+ elif self.config.name == "vp_nel":
210
+ features = {
211
+ "id": datasets.Value("string"),
212
+ "split": datasets.Value("string"),
213
+ "audio": datasets.Audio(sampling_rate=16_000),
214
+ "speaker_id": datasets.Value("string"),
215
+ "normalized_text": datasets.Value("string"),
216
+ "word_timestamps": datasets.Sequence(
217
+ {
218
+ "word": datasets.Value("string"),
219
+ "start_sec": datasets.Value("float64"),
220
+ "end_sec": datasets.Value("float64"),
221
+ }
222
+ ),
223
+ "normalized_nel": datasets.Sequence(
224
+ {
225
+ "ne_label": datasets.Value("string"),
226
+ "start_char_idx": datasets.Value("int32"),
227
+ "char_offset": datasets.Value("int32"),
228
+ "start_sec": datasets.Value("float64"),
229
+ "end_sec": datasets.Value("float64"),
230
+ }
231
+ ),
232
+ }
233
  return datasets.DatasetInfo(
234
  description=_DESCRIPTION,
235
  features=datasets.Features(features),
 
249
  data_dir = os.path.join(dl_dir, config_name)
250
  print(data_dir)
251
 
252
+ splits = []
253
+ if self.config.name in ["hvb", "sqa5"]:
254
+ splits.append(
255
+ datasets.SplitGenerator(
256
+ name=datasets.Split.TRAIN,
257
+ gen_kwargs={
258
+ "filepath": os.path.join(
259
+ data_dir or "", f"{config_name}_fine-tune.tsv"
260
+ ),
261
+ "data_dir": data_dir,
262
+ },
263
+ )
264
+ )
265
+ if self.config.name in ["hvb", "sqa5", "vp_nel"]:
266
+ splits.append(
267
+ datasets.SplitGenerator(
268
+ name=datasets.Split.VALIDATION,
269
+ gen_kwargs={
270
+ "filepath": os.path.join(
271
+ data_dir or "", f"{config_name}_dev.tsv"
272
+ ),
273
+ "data_dir": data_dir,
274
+ },
275
+ ),
276
+ )
277
+ splits.append(
278
+ datasets.SplitGenerator(
279
+ name=datasets.Split.TEST,
280
+ gen_kwargs={
281
+ "filepath": os.path.join(
282
+ data_dir or "", f"{config_name}_test_blind.tsv"
283
+ ),
284
+ "data_dir": data_dir,
285
+ },
286
+ ),
287
+ )
288
  if self.config.name == "sqa5":
289
  splits.append(
290
  datasets.SplitGenerator(
 
352
  "word2time": load_word2time(word2time_file),
353
  "answer_spans": parse_qa_answer_spans(row.get("answer_spans", "[]")),
354
  }
355
+ elif self.config.name == "slue_nel":
356
+ split = "test" if "test" in filepath else "dev"
357
+ utt_id = row["id"]
358
+ word_alignments_fn = os.path.join(
359
+ data_dir, "word_timestamps", split, f"{utt_id}.json"
360
+ )
361
+ audio_file = os.path.join(
362
+ data_dir,
363
+ split,
364
+ f"{utt_id}.ogg",
365
+ )
366
+ example = {
367
+ "id": utt_id,
368
+ "audio": audio_file,
369
+ "speaker_id": row["speaker_id"],
370
+ "text": row["normalized_text"],
371
+ "ne_timestamps": parse_nel_time_spans(
372
+ row.get("normalized_nel", "[]")
373
+ ),
374
+ "word_timestamps": read_word_timestamps(word_alignments_fn),
375
+ }
376
  yield idx, example