Datasets:
Tasks:
Text Generation
Modalities:
Text
Sub-tasks:
language-modeling
Languages:
English
Size:
10K - 100K
ArXiv:
Tags:
question-generation
License:
import os | |
from glob import glob | |
from datasets import load_dataset | |
dataset_name = 'asahi417/qg_squadshifts' | |
os.makedirs('./reference_files', exist_ok=True) | |
for split in ['test', 'validation']: | |
for domain in ["default", 'new_wiki', 'nyt', 'reddit', 'amazon']: | |
dataset = load_dataset(dataset_name, domain, split=split, download_mode='force_redownload') | |
print(dataset, split, domain) | |
for data in ['question', 'answer', 'sentence', 'paragraph']: | |
with open('./reference_files/{}-{}.{}txt'.format(data, split, "" if domain == 'default' else f"{domain}."), 'w') as f: | |
if data == 'paragraph': | |
tmp_data = dataset['paragraph_id'] | |
else: | |
tmp_data = dataset[data] | |
f.write('\n'.join([i.replace('\n', '.') for i in tmp_data])) | |
# for domain in ['new_wiki', 'nyt', 'reddit', 'amazon']: | |
# length = [len(open(i).read().split('\n')) for i in glob(f'reference_files/*{domain}*.txt')] | |
# assert len(list(set(length))) == 1, length | |