|
import gzip |
|
import json |
|
|
|
import datasets |
|
|
|
logger = datasets.logging.get_logger(__name__) |
|
|
|
|
|
_HOMEPAGE = "https://github.com/allenai/peS2o" |
|
|
|
|
|
_DESCRIPTION = "\ |
|
The peS2o dataset is a collection of ~40M creative commmon licensed academic \ |
|
papers, cleaned, filtered, and formatted for pre-training of language models. \ |
|
It is derived from the Semantic Scholar Open Research Corpus(Lo et al, 2020), \ |
|
or S2ORC.\ |
|
" |
|
|
|
_LICENSE = "odc-by" |
|
|
|
_VARIANTS = { |
|
"v1": { |
|
"version": "1.0.0", |
|
"download_size": 100702002904, |
|
"dataset_size": 67787014, |
|
"splits": { |
|
"train": { |
|
"num_bytes": 100145555091, |
|
"num_examples": 67624463, |
|
"files": [ |
|
"data/v1/train-00000-of-00020.json.gz", |
|
"data/v1/train-00001-of-00020.json.gz", |
|
"data/v1/train-00002-of-00020.json.gz", |
|
"data/v1/train-00003-of-00020.json.gz", |
|
"data/v1/train-00004-of-00020.json.gz", |
|
"data/v1/train-00005-of-00020.json.gz", |
|
"data/v1/train-00006-of-00020.json.gz", |
|
"data/v1/train-00007-of-00020.json.gz", |
|
"data/v1/train-00008-of-00020.json.gz", |
|
"data/v1/train-00009-of-00020.json.gz", |
|
"data/v1/train-00010-of-00020.json.gz", |
|
"data/v1/train-00011-of-00020.json.gz", |
|
"data/v1/train-00012-of-00020.json.gz", |
|
"data/v1/train-00013-of-00020.json.gz", |
|
"data/v1/train-00014-of-00020.json.gz", |
|
"data/v1/train-00015-of-00020.json.gz", |
|
"data/v1/train-00016-of-00020.json.gz", |
|
"data/v1/train-00017-of-00020.json.gz", |
|
"data/v1/train-00018-of-00020.json.gz", |
|
"data/v1/train-00019-of-00020.json.gz", |
|
], |
|
}, |
|
"validation": { |
|
"num_bytes": 556447813, |
|
"num_examples": 162551, |
|
"files": [ |
|
"data/v1/validation-00000-of-00002.json.gz", |
|
"data/v1/validation-00001-of-00002.json.gz", |
|
], |
|
}, |
|
}, |
|
}, |
|
"v2": { |
|
"version": "1.0.0", |
|
"download_size": 87129236480, |
|
"dataset_size": 38972211, |
|
"splits": { |
|
"train": { |
|
"num_bytes": 86572382178, |
|
"num_examples": 38811179, |
|
"files": [ |
|
"data/v2/train-00000-of-00020.json.gz", |
|
"data/v2/train-00001-of-00020.json.gz", |
|
"data/v2/train-00002-of-00020.json.gz", |
|
"data/v2/train-00003-of-00020.json.gz", |
|
"data/v2/train-00004-of-00020.json.gz", |
|
"data/v2/train-00005-of-00020.json.gz", |
|
"data/v2/train-00006-of-00020.json.gz", |
|
"data/v2/train-00007-of-00020.json.gz", |
|
"data/v2/train-00008-of-00020.json.gz", |
|
"data/v2/train-00009-of-00020.json.gz", |
|
"data/v2/train-00010-of-00020.json.gz", |
|
"data/v2/train-00011-of-00020.json.gz", |
|
"data/v2/train-00012-of-00020.json.gz", |
|
"data/v2/train-00013-of-00020.json.gz", |
|
"data/v2/train-00014-of-00020.json.gz", |
|
"data/v2/train-00015-of-00020.json.gz", |
|
"data/v2/train-00016-of-00020.json.gz", |
|
"data/v2/train-00017-of-00020.json.gz", |
|
"data/v2/train-00018-of-00020.json.gz", |
|
"data/v2/train-00019-of-00020.json.gz", |
|
], |
|
}, |
|
"validation": { |
|
"num_bytes": 556854302, |
|
"num_examples": 161032, |
|
"files": [ |
|
"data/v2/validation-00000-of-00002.json.gz", |
|
"data/v2/validation-00001-of-00002.json.gz", |
|
], |
|
}, |
|
}, |
|
}, |
|
} |
|
|
|
_FEATURES = datasets.Features( |
|
added=datasets.Value("string"), |
|
created=datasets.Value("string"), |
|
id=datasets.Value("string"), |
|
source=datasets.Value("string"), |
|
text=datasets.Value("string"), |
|
version=datasets.Value("string"), |
|
) |
|
|
|
_CITATION = """\ |
|
@techreport{peS2o, |
|
author = {Luca Soldaini and Kyle Lo}, |
|
year = 2023, |
|
title = {{peS2o (Pretraining Efficiently on S2ORC) Dataset}}, |
|
institution = {{Allen Institute for AI}}, |
|
note = {ODC-By, \url{https://github.com/allenai/pes2o}} |
|
} |
|
""" |
|
|
|
|
|
class PeS2o(datasets.GeneratorBasedBuilder): |
|
"""Pretraining Efficiently on S2ORC!""" |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig(name=name, version=config["version"]) |
|
for name, config in _VARIANTS.items() |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "v2" |
|
|
|
def _info(self): |
|
"""Give information and typings for the dataset.""" |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=_FEATURES, |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
dataset_size=_VARIANTS[self.config.name]["dataset_size"], |
|
download_size=_VARIANTS[self.config.name]["download_size"], |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
train_downloaded_files = dl_manager.download( |
|
_VARIANTS[self.config.name]["splits"]["train"]["files"] |
|
) |
|
validation_downloaded_files = dl_manager.download( |
|
_VARIANTS[self.config.name]["splits"]["validation"]["files"] |
|
) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=str(datasets.Split.TRAIN), |
|
gen_kwargs={"filepaths": train_downloaded_files}, |
|
), |
|
datasets.SplitGenerator( |
|
name=str(datasets.Split.VALIDATION), |
|
gen_kwargs={"filepaths": validation_downloaded_files}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, filepaths): |
|
"""This function returns the examples in the raw (text) form by |
|
iterating on all the files.""" |
|
id_ = 0 |
|
for filepath in filepaths: |
|
logger.info("generating examples from = %s", filepath) |
|
with gzip.open(open(filepath, "rb"), "rt", encoding="utf-8") as f: |
|
for line in f: |
|
if line: |
|
example = json.loads(line) |
|
yield id_, example |
|
id_ += 1 |
|
|