system HF staff commited on
Commit
9999447
1 Parent(s): 4d8fc47

Update files from the datasets library (from 1.16.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.16.0

Files changed (2) hide show
  1. README.md +1 -0
  2. empathetic_dialogues.py +28 -29
README.md CHANGED
@@ -1,4 +1,5 @@
1
  ---
 
2
  languages:
3
  - en
4
  paperswithcode_id: empatheticdialogues
 
1
  ---
2
+ pretty_name: EmpatheticDialogues
3
  languages:
4
  - en
5
  paperswithcode_id: empatheticdialogues
empathetic_dialogues.py CHANGED
@@ -2,7 +2,6 @@
2
 
3
 
4
  import csv
5
- import os
6
 
7
  import datasets
8
 
@@ -61,47 +60,47 @@ class EmpatheticDialogues(datasets.GeneratorBasedBuilder):
61
  # TODO(empathetic_dialogues): Downloads the data and defines the splits
62
  # dl_manager is a datasets.download.DownloadManager that can be used to
63
  # download and extract URLs
64
- dl_dir = dl_manager.download_and_extract(_URL)
65
- data_dir = os.path.join(dl_dir, "empatheticdialogues")
66
  return [
67
  datasets.SplitGenerator(
68
  name=datasets.Split.TRAIN,
69
  # These kwargs will be passed to _generate_examples
70
- gen_kwargs={"filepath": os.path.join(data_dir, "train.csv")},
71
  ),
72
  datasets.SplitGenerator(
73
  name=datasets.Split.VALIDATION,
74
  # These kwargs will be passed to _generate_examples
75
- gen_kwargs={"filepath": os.path.join(data_dir, "valid.csv")},
76
  ),
77
  datasets.SplitGenerator(
78
  name=datasets.Split.TEST,
79
  # These kwargs will be passed to _generate_examples
80
- gen_kwargs={"filepath": os.path.join(data_dir, "test.csv")},
81
  ),
82
  ]
83
 
84
- def _generate_examples(self, filepath):
85
  """Yields examples."""
86
- # TODO(empathetic_dialogues): Yields (key, example) tuples from the dataset
87
- with open(filepath, encoding="utf-8") as f:
88
- data = csv.DictReader(f)
89
- for id_, row in enumerate(data):
90
- utterance = row["utterance"]
91
- speaker_id = int(row["speaker_idx"])
92
- context = row["context"]
93
- conv_id = row["conv_id"]
94
- tags = row["tags"] if row["tags"] else ""
95
- selfeval = row["selfeval"] if row["selfeval"] else ""
96
- utterance_id = int(row["utterance_idx"])
97
- prompt = row["prompt"]
98
- yield id_, {
99
- "utterance": utterance,
100
- "utterance_idx": utterance_id,
101
- "context": context,
102
- "speaker_idx": speaker_id,
103
- "conv_id": conv_id,
104
- "selfeval": selfeval,
105
- "prompt": prompt,
106
- "tags": tags,
107
- }
 
 
2
 
3
 
4
  import csv
 
5
 
6
  import datasets
7
 
 
60
  # TODO(empathetic_dialogues): Downloads the data and defines the splits
61
  # dl_manager is a datasets.download.DownloadManager that can be used to
62
  # download and extract URLs
63
+ archive = dl_manager.download(_URL)
 
64
  return [
65
  datasets.SplitGenerator(
66
  name=datasets.Split.TRAIN,
67
  # These kwargs will be passed to _generate_examples
68
+ gen_kwargs={"files": dl_manager.iter_archive(archive), "split_file": "empatheticdialogues/train.csv"},
69
  ),
70
  datasets.SplitGenerator(
71
  name=datasets.Split.VALIDATION,
72
  # These kwargs will be passed to _generate_examples
73
+ gen_kwargs={"files": dl_manager.iter_archive(archive), "split_file": "empatheticdialogues/valid.csv"},
74
  ),
75
  datasets.SplitGenerator(
76
  name=datasets.Split.TEST,
77
  # These kwargs will be passed to _generate_examples
78
+ gen_kwargs={"files": dl_manager.iter_archive(archive), "split_file": "empatheticdialogues/test.csv"},
79
  ),
80
  ]
81
 
82
+ def _generate_examples(self, files, split_file):
83
  """Yields examples."""
84
+ for path, f in files:
85
+ if split_file == path:
86
+ data = csv.DictReader(line.decode("utf-8") for line in f)
87
+ for id_, row in enumerate(data):
88
+ utterance = row["utterance"]
89
+ speaker_id = int(row["speaker_idx"])
90
+ context = row["context"]
91
+ conv_id = row["conv_id"]
92
+ tags = row["tags"] if row["tags"] else ""
93
+ selfeval = row["selfeval"] if row["selfeval"] else ""
94
+ utterance_id = int(row["utterance_idx"])
95
+ prompt = row["prompt"]
96
+ yield id_, {
97
+ "utterance": utterance,
98
+ "utterance_idx": utterance_id,
99
+ "context": context,
100
+ "speaker_idx": speaker_id,
101
+ "conv_id": conv_id,
102
+ "selfeval": selfeval,
103
+ "prompt": prompt,
104
+ "tags": tags,
105
+ }
106
+ break