File size: 3,094 Bytes
0213c8d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 |
Hugging Face's logo
Hugging Face
Search models, datasets, users...
Models
Datasets
Spaces
Docs
Solutions
Pricing
Datasets:
Skylion007
/
openwebtext
like
199
Tasks:
Text Generation
Fill-Mask
Sub-tasks:
language-modeling
masked-language-modeling
Languages:
English
Multilinguality:
monolingual
Size Categories:
1M<n<10M
Language Creators:
found
Annotations Creators:
no-annotation
Source Datasets:
original
License:
cc0-1.0
Dataset card
Files and versions
Community
9
openwebtext
/
openwebtext.py
lhoestq's picture
lhoestq
HF STAFF
Make the dataset streamable (#3)
39d1a6d
6 months ago
raw
history
blame
contribute
delete
No virus
2.73 kB
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Open WebText Corpus"""
import re
import datasets
from glob import glob
_CITATION = """\
Dummy text
"""
_DESCRIPTION = """\
An open-source replication of the WebText dataset from OpenAI.
"""
_N_DATA_FILES = 1
_DATA_FILES = [f for f in glob("data/*.tar")]
class Openwebtext(datasets.GeneratorBasedBuilder):
"""The Open WebText dataset."""
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="plain_text",
description="Plain text",
version=datasets.Version("1.0.0"),
)
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({"text": datasets.Value("string")}),
homepage="",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
archives = dl_manager.download(_DATA_FILES)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={
"archive_iterators": [
dl_manager.iter_archive(archive) for archive in archives
],
"iter_archive": dl_manager.iter_archive
}),
]
def _generate_examples(self, archive_iterators, iter_archive):
"""Yields examples."""
for archive_iterator in archive_iterators:
for xz_filepath, xz_f in archive_iterator:
if not xz_filepath.endswith(".xz"):
continue
for txt_filepath, txt_f in iter_archive(xz_f):
if not txt_filepath.endswith(".txt"):
continue
idx = f"{xz_filepath}/{txt_filepath}"
yield idx, {"text": re.sub("\n\n\n+", "\n\n", txt_f.read().decode("utf-8")).strip()}
|