Datasets:
Tasks:
Text Generation
Modalities:
Text
Sub-tasks:
language-modeling
Languages:
English
Size:
10K - 100K
ArXiv:
Tags:
question-generation
License:
update
Browse files- data/processed/amazon.test00.jsonl +0 -0
- data/processed/amazon.test01.jsonl +0 -0
- data/processed/amazon.test02.jsonl +0 -0
- data/processed/amazon.test03.jsonl +0 -0
- data/processed/amazon.test04.jsonl +0 -0
- data/processed/amazon.test05.jsonl +0 -0
- data/processed/amazon.test06.jsonl +0 -0
- data/processed/new_wiki.test00.jsonl +0 -0
- data/processed/new_wiki.test01.jsonl +0 -0
- data/processed/new_wiki.test02.jsonl +0 -0
- data/processed/new_wiki.test03.jsonl +0 -0
- data/processed/new_wiki.test04.jsonl +0 -0
- data/processed/new_wiki.test05.jsonl +0 -0
- data/processed/nyt.test00.jsonl +0 -0
- data/processed/nyt.test01.jsonl +0 -0
- data/processed/nyt.test02.jsonl +0 -0
- data/processed/nyt.test03.jsonl +0 -0
- data/processed/nyt.test04.jsonl +0 -0
- data/processed/nyt.test05.jsonl +0 -0
- data/processed/nyt.test06.jsonl +0 -0
- data/processed/reddit.test00.jsonl +0 -0
- data/processed/reddit.test01.jsonl +0 -0
- data/processed/reddit.test02.jsonl +0 -0
- data/processed/reddit.test03.jsonl +0 -0
- data/processed/reddit.test04.jsonl +0 -0
- data/processed/reddit.test05.jsonl +0 -0
- data/processed/reddit.test06.jsonl +0 -0
- generate_reference_files.py +5 -1
- process.py +3 -2
- qg_squadshifts.py +3 -2
data/processed/amazon.test00.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/amazon.test01.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/amazon.test02.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/amazon.test03.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/amazon.test04.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/amazon.test05.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/amazon.test06.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/new_wiki.test00.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/new_wiki.test01.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/new_wiki.test02.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/new_wiki.test03.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/new_wiki.test04.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/new_wiki.test05.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/nyt.test00.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/nyt.test01.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/nyt.test02.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/nyt.test03.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/nyt.test04.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/nyt.test05.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/nyt.test06.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/reddit.test00.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/reddit.test01.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/reddit.test02.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/reddit.test03.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/reddit.test04.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/reddit.test05.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/reddit.test06.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
generate_reference_files.py
CHANGED
@@ -9,5 +9,9 @@ for split in ['test']:
|
|
9 |
dataset = load_dataset('asahi417/qg_squadshifts', domain, split=split)
|
10 |
for data in ['question', 'answer', 'sentence', 'paragraph']:
|
11 |
with open('./reference_files/{}-{}.{}txt'.format(data, split, "" if domain == 'default' else f"{domain}."), 'w') as f:
|
12 |
-
|
|
|
|
|
|
|
|
|
13 |
|
|
|
9 |
dataset = load_dataset('asahi417/qg_squadshifts', domain, split=split)
|
10 |
for data in ['question', 'answer', 'sentence', 'paragraph']:
|
11 |
with open('./reference_files/{}-{}.{}txt'.format(data, split, "" if domain == 'default' else f"{domain}."), 'w') as f:
|
12 |
+
if data == 'paragraph':
|
13 |
+
f.write('\n'.join(dataset['paragraph_id']))
|
14 |
+
else:
|
15 |
+
f.write('\n'.join(dataset[data]))
|
16 |
+
|
17 |
|
process.py
CHANGED
@@ -101,5 +101,6 @@ if __name__ == '__main__':
|
|
101 |
assert type(answer_str) is str, answer_str
|
102 |
assert type(question_str) is str, question_str
|
103 |
assert type(paragraph_str) is str, paragraph_str
|
104 |
-
|
105 |
-
|
|
|
|
101 |
assert type(answer_str) is str, answer_str
|
102 |
assert type(question_str) is str, question_str
|
103 |
assert type(paragraph_str) is str, paragraph_str
|
104 |
+
tmp_data = process_single_data(question=question_str, paragraph=paragraph_str, answer=answer_str)
|
105 |
+
tmp_data['paragraph_id'] = single_data['id']
|
106 |
+
f.write(json.dumps(tmp_data) + '\n')
|
qg_squadshifts.py
CHANGED
@@ -31,7 +31,7 @@ class QGSQuADShiftsConfig(datasets.BuilderConfig):
|
|
31 |
class QGSQuADShifts(datasets.GeneratorBasedBuilder):
|
32 |
|
33 |
BUILDER_CONFIGS = [QGSQuADShiftsConfig(name="default", description="All domain.")]
|
34 |
-
BUILDER_CONFIGS += [QGSQuADShiftsConfig(name=i, description=i) for i in sorted(_FILES.keys())]
|
35 |
|
36 |
def _info(self):
|
37 |
return datasets.DatasetInfo(
|
@@ -44,7 +44,8 @@ class QGSQuADShifts(datasets.GeneratorBasedBuilder):
|
|
44 |
"paragraph": datasets.Value("string"),
|
45 |
"sentence_answer": datasets.Value("string"),
|
46 |
"paragraph_answer": datasets.Value("string"),
|
47 |
-
"paragraph_sentence": datasets.Value("string")
|
|
|
48 |
}
|
49 |
),
|
50 |
supervised_keys=None,
|
|
|
31 |
class QGSQuADShifts(datasets.GeneratorBasedBuilder):
|
32 |
|
33 |
BUILDER_CONFIGS = [QGSQuADShiftsConfig(name="default", description="All domain.")]
|
34 |
+
BUILDER_CONFIGS += [QGSQuADShiftsConfig(name=i, description=f"Domain {i}") for i in sorted(_FILES.keys())]
|
35 |
|
36 |
def _info(self):
|
37 |
return datasets.DatasetInfo(
|
|
|
44 |
"paragraph": datasets.Value("string"),
|
45 |
"sentence_answer": datasets.Value("string"),
|
46 |
"paragraph_answer": datasets.Value("string"),
|
47 |
+
"paragraph_sentence": datasets.Value("string"),
|
48 |
+
"paragraph_id": datasets.Value("string")
|
49 |
}
|
50 |
),
|
51 |
supervised_keys=None,
|