Upload dummy-text.py
Browse files- dummy-text.py +135 -0
dummy-text.py
ADDED
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Hugging Face's logo
|
2 |
+
Hugging Face
|
3 |
+
Search models, datasets, users...
|
4 |
+
Models
|
5 |
+
Datasets
|
6 |
+
Spaces
|
7 |
+
Docs
|
8 |
+
Solutions
|
9 |
+
Pricing
|
10 |
+
|
11 |
+
|
12 |
+
|
13 |
+
Datasets:
|
14 |
+
|
15 |
+
Skylion007
|
16 |
+
/
|
17 |
+
openwebtext
|
18 |
+
|
19 |
+
like
|
20 |
+
199
|
21 |
+
Tasks:
|
22 |
+
Text Generation
|
23 |
+
Fill-Mask
|
24 |
+
Sub-tasks:
|
25 |
+
language-modeling
|
26 |
+
masked-language-modeling
|
27 |
+
Languages:
|
28 |
+
English
|
29 |
+
Multilinguality:
|
30 |
+
monolingual
|
31 |
+
Size Categories:
|
32 |
+
1M<n<10M
|
33 |
+
Language Creators:
|
34 |
+
found
|
35 |
+
Annotations Creators:
|
36 |
+
no-annotation
|
37 |
+
Source Datasets:
|
38 |
+
original
|
39 |
+
License:
|
40 |
+
cc0-1.0
|
41 |
+
Dataset card
|
42 |
+
Files and versions
|
43 |
+
Community
|
44 |
+
9
|
45 |
+
openwebtext
|
46 |
+
/
|
47 |
+
openwebtext.py
|
48 |
+
lhoestq's picture
|
49 |
+
lhoestq
|
50 |
+
HF STAFF
|
51 |
+
Make the dataset streamable (#3)
|
52 |
+
39d1a6d
|
53 |
+
6 months ago
|
54 |
+
raw
|
55 |
+
history
|
56 |
+
blame
|
57 |
+
contribute
|
58 |
+
delete
|
59 |
+
No virus
|
60 |
+
2.73 kB
|
61 |
+
# coding=utf-8
|
62 |
+
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
63 |
+
#
|
64 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
65 |
+
# you may not use this file except in compliance with the License.
|
66 |
+
# You may obtain a copy of the License at
|
67 |
+
#
|
68 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
69 |
+
#
|
70 |
+
# Unless required by applicable law or agreed to in writing, software
|
71 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
72 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
73 |
+
# See the License for the specific language governing permissions and
|
74 |
+
# limitations under the License.
|
75 |
+
"""The Open WebText Corpus"""
|
76 |
+
|
77 |
+
import re
|
78 |
+
|
79 |
+
import datasets
|
80 |
+
from glob import glob
|
81 |
+
|
82 |
+
_CITATION = """\
|
83 |
+
Dummy text
|
84 |
+
"""
|
85 |
+
|
86 |
+
_DESCRIPTION = """\
|
87 |
+
An open-source replication of the WebText dataset from OpenAI.
|
88 |
+
"""
|
89 |
+
|
90 |
+
_N_DATA_FILES = 1
|
91 |
+
_DATA_FILES = [f for f in glob("data/*.tar")]
|
92 |
+
|
93 |
+
|
94 |
+
class Openwebtext(datasets.GeneratorBasedBuilder):
|
95 |
+
"""The Open WebText dataset."""
|
96 |
+
|
97 |
+
BUILDER_CONFIGS = [
|
98 |
+
datasets.BuilderConfig(
|
99 |
+
name="plain_text",
|
100 |
+
description="Plain text",
|
101 |
+
version=datasets.Version("1.0.0"),
|
102 |
+
)
|
103 |
+
]
|
104 |
+
|
105 |
+
def _info(self):
|
106 |
+
return datasets.DatasetInfo(
|
107 |
+
description=_DESCRIPTION,
|
108 |
+
features=datasets.Features({"text": datasets.Value("string")}),
|
109 |
+
homepage="",
|
110 |
+
citation=_CITATION,
|
111 |
+
)
|
112 |
+
|
113 |
+
def _split_generators(self, dl_manager):
|
114 |
+
archives = dl_manager.download(_DATA_FILES)
|
115 |
+
return [
|
116 |
+
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={
|
117 |
+
"archive_iterators": [
|
118 |
+
dl_manager.iter_archive(archive) for archive in archives
|
119 |
+
],
|
120 |
+
"iter_archive": dl_manager.iter_archive
|
121 |
+
}),
|
122 |
+
]
|
123 |
+
|
124 |
+
def _generate_examples(self, archive_iterators, iter_archive):
|
125 |
+
"""Yields examples."""
|
126 |
+
for archive_iterator in archive_iterators:
|
127 |
+
for xz_filepath, xz_f in archive_iterator:
|
128 |
+
if not xz_filepath.endswith(".xz"):
|
129 |
+
continue
|
130 |
+
for txt_filepath, txt_f in iter_archive(xz_f):
|
131 |
+
if not txt_filepath.endswith(".txt"):
|
132 |
+
continue
|
133 |
+
idx = f"{xz_filepath}/{txt_filepath}"
|
134 |
+
yield idx, {"text": re.sub("\n\n\n+", "\n\n", txt_f.read().decode("utf-8")).strip()}
|
135 |
+
|