|
import datasets |
|
from datasets.tasks import TextClassification |
|
|
|
_DESCRIPTION = """ |
|
Movie Review Dataset. |
|
|
|
This is a dataset containing 4,265 positive and 4,265 negative processed |
|
sentences from Rotten Tomatoes movie reviews. |
|
""" |
|
|
|
_CITATION = """ |
|
@InProceedings{Pang+Lee:05a, |
|
author = {Bo Pang and Lillian Lee}, |
|
title = {Seeing stars: Exploiting class relationships for sentiment |
|
categorization with respect to rating scales}, |
|
booktitle = {Proceedings of the ACL}, |
|
year = 2005 |
|
} |
|
""" |
|
|
|
_DOWNLOAD_URL = "https://testerstories.com/files/ai_learn/rt-polaritydata.tar.gz" |
|
|
|
|
|
class RottenTomatoesReviews(datasets.GeneratorBasedBuilder): |
|
VERSION = datasets.Version("1.0.0") |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"text": datasets.Value("string"), |
|
"label": datasets.features.ClassLabel(names=["neg", "pos"]), |
|
} |
|
), |
|
supervised_keys=[""], |
|
homepage="http://www.cs.cornell.edu/people/pabo/movie-review-data/", |
|
citation=_CITATION, |
|
task_templates=[ |
|
TextClassification(text_column="text", label_column="label") |
|
], |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
archive = dl_manager.download(_DOWNLOAD_URL) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"split_key": "train", |
|
"files": dl_manager.iter_archive(archive), |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={ |
|
"split_key": "validation", |
|
"files": dl_manager.iter_archive(archive), |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"split_key": "test", |
|
"files": dl_manager.iter_archive(archive), |
|
}, |
|
), |
|
] |
|
|
|
def _get_examples_from_split(self, split_key, files): |
|
data_dir = "rt-polaritydata/" |
|
pos_samples, neg_samples = None, None |
|
|
|
for path, f in files: |
|
if path == data_dir + "rt-polarity.pos": |
|
pos_samples = [line.decode("latin-1").strip() for line in f] |
|
elif path == data_dir + "rt-polarity.neg": |
|
neg_samples = [line.decode("latin-1").strip() for line in f] |
|
|
|
if pos_samples is not None and neg_samples is not None: |
|
break |
|
|
|
i1 = int(len(pos_samples) * 0.8 + 0.5) |
|
i2 = int(len(pos_samples) * 0.9 + 0.5) |
|
|
|
train_samples = pos_samples[:i1] + neg_samples[:i1] |
|
train_labels = (["pos"] * i1) + (["neg"] * i1) |
|
|
|
validation_samples = pos_samples[i1:i2] + neg_samples[i1:i2] |
|
validation_labels = (["pos"] * (i2 - i1)) + (["neg"] * (i2 - i1)) |
|
|
|
test_samples = pos_samples[i2:] + neg_samples[i2:] |
|
test_labels = (["pos"] * (len(pos_samples) - i2)) + ( |
|
["neg"] * (len(pos_samples) - i2) |
|
) |
|
|
|
if split_key == "train": |
|
return (train_samples, train_labels) |
|
if split_key == "validation": |
|
return (validation_samples, validation_labels) |
|
if split_key == "test": |
|
return (test_samples, test_labels) |
|
else: |
|
raise ValueError(f"Invalid split key {split_key}") |
|
|
|
def _generate_examples(self, split_key, files): |
|
split_text, split_labels = self._get_examples_from_split(split_key, files) |
|
|
|
for text, label in zip(split_text, split_labels): |
|
data_key = split_key + "_" + text |
|
feature_dict = {"text": text, "label": label} |
|
|
|
yield data_key, feature_dict |
|
|