|
Hugging Face's logo |
|
Hugging Face |
|
Search models, datasets, users... |
|
Models |
|
Datasets |
|
Spaces |
|
Docs |
|
Solutions |
|
Pricing |
|
|
|
|
|
|
|
Datasets: |
|
|
|
Skylion007 |
|
/ |
|
openwebtext |
|
|
|
like |
|
199 |
|
Tasks: |
|
Text Generation |
|
Fill-Mask |
|
Sub-tasks: |
|
language-modeling |
|
masked-language-modeling |
|
Languages: |
|
English |
|
Multilinguality: |
|
monolingual |
|
Size Categories: |
|
1M<n<10M |
|
Language Creators: |
|
found |
|
Annotations Creators: |
|
no-annotation |
|
Source Datasets: |
|
original |
|
License: |
|
cc0-1.0 |
|
Dataset card |
|
Files and versions |
|
Community |
|
9 |
|
openwebtext |
|
/ |
|
openwebtext.py |
|
lhoestq's picture |
|
lhoestq |
|
HF STAFF |
|
Make the dataset streamable ( |
|
39d1a6d |
|
6 months ago |
|
raw |
|
history |
|
blame |
|
contribute |
|
delete |
|
No virus |
|
2.73 kB |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""The Open WebText Corpus""" |
|
|
|
import re |
|
|
|
import datasets |
|
from glob import glob |
|
|
|
_CITATION = """\ |
|
Dummy text |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
An open-source replication of the WebText dataset from OpenAI. |
|
""" |
|
|
|
_N_DATA_FILES = 1 |
|
_DATA_FILES = [f for f in glob("data/*.tar")] |
|
|
|
|
|
class Openwebtext(datasets.GeneratorBasedBuilder): |
|
"""The Open WebText dataset.""" |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig( |
|
name="plain_text", |
|
description="Plain text", |
|
version=datasets.Version("1.0.0"), |
|
) |
|
] |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features({"text": datasets.Value("string")}), |
|
homepage="", |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
archives = dl_manager.download(_DATA_FILES) |
|
return [ |
|
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={ |
|
"archive_iterators": [ |
|
dl_manager.iter_archive(archive) for archive in archives |
|
], |
|
"iter_archive": dl_manager.iter_archive |
|
}), |
|
] |
|
|
|
def _generate_examples(self, archive_iterators, iter_archive): |
|
"""Yields examples.""" |
|
for archive_iterator in archive_iterators: |
|
for xz_filepath, xz_f in archive_iterator: |
|
if not xz_filepath.endswith(".xz"): |
|
continue |
|
for txt_filepath, txt_f in iter_archive(xz_f): |
|
if not txt_filepath.endswith(".txt"): |
|
continue |
|
idx = f"{xz_filepath}/{txt_filepath}" |
|
yield idx, {"text": re.sub("\n\n\n+", "\n\n", txt_f.read().decode("utf-8")).strip()} |
|
|
|
|