|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""The Multilingual SemEval2016 Task5 Reviews Corpus""" |
|
|
|
import datasets |
|
|
|
_CITATION = """\ |
|
@inproceedings{pontiki2016semeval, |
|
title={Semeval-2016 task 5: Aspect based sentiment analysis}, |
|
author={Pontiki, Maria and Galanis, Dimitrios and Papageorgiou, Haris and Androutsopoulos, Ion and Manandhar, Suresh and Al-Smadi, Mohammad and Al-Ayyoub, Mahmoud and Zhao, Yanyan and Qin, Bing and De Clercq, Orph{\'e}e and others}, |
|
booktitle={International workshop on semantic evaluation}, |
|
pages={19--30}, |
|
year={2016} |
|
} |
|
""" |
|
|
|
_LICENSE = """\ |
|
Please click on the homepage URL for license details. |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
A collection of SemEval2016 specifically designed to aid research in multilingual Aspect Based Sentiment Analysis. |
|
""" |
|
|
|
_CONFIG = [ |
|
|
|
"restaurants_english", |
|
"restaurants_french", |
|
"restaurants_spanish", |
|
"restaurants_russian", |
|
"restaurants_dutch", |
|
"restaurants_turkish", |
|
|
|
|
|
"hotels_arabic", |
|
|
|
|
|
"mobilephones_dutch", |
|
"mobilephones_chinese", |
|
"laptops_english", |
|
"digitalcameras_chinese" |
|
] |
|
|
|
_VERSION = "0.1.0" |
|
|
|
_HOMEPAGE_URL = "https://alt.qcri.org/semeval2016/task5/index.php?id=data-and-tools/" |
|
_DOWNLOAD_URL = "https://raw.githubusercontent.com/YaxinCui/ABSADataset/main/SemEval2016Task5Corrected/{split}/{domain}_{split}_{lang}.xml" |
|
|
|
|
|
class SemEval2016Task5NLTKConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for SemEval2016Config.""" |
|
|
|
def __init__(self, _CONFIG, **kwargs): |
|
super(SemEval2016Task5NLTKConfig, self).__init__(version=datasets.Version(_VERSION, ""), **kwargs), |
|
self.configs = _CONFIG |
|
|
|
|
|
class SemEval2016Task5NLTK(datasets.GeneratorBasedBuilder): |
|
"""The Multilingual SemEval2016 ABSA Corpus""" |
|
|
|
BUILDER_CONFIGS = [ |
|
SemEval2016Task5NLTKConfig( |
|
name="All", |
|
_CONFIG=_CONFIG, |
|
description="A collection of SemEval2016 specifically designed to aid research in multilingual Aspect Based Sentiment Analysis.", |
|
) |
|
] + [ |
|
SemEval2016Task5NLTKConfig( |
|
name=config, |
|
_CONFIG=[config], |
|
description=f"{config} of SemEval2016 specifically designed to aid research in multilingual Aspect Based Sentiment Analysis", |
|
) |
|
for config in _CONFIG |
|
] |
|
|
|
BUILDER_CONFIG_CLASS = SemEval2016Task5NLTKConfig |
|
DEFAULT_CONFIG_NAME = "restaurants_english" |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{'text': datasets.Value(dtype='string'), |
|
'opinions': [ |
|
{'category': datasets.Value(dtype='string'), |
|
'from': datasets.Value(dtype='string'), |
|
'polarity': datasets.Value(dtype='string'), |
|
'target': datasets.Value(dtype='string'), |
|
'to': datasets.Value(dtype='string')} |
|
], |
|
'tokens': datasets.Sequence(datasets.Value(dtype='string')), |
|
'ATESP_BIEOS_tags': datasets.Sequence(datasets.ClassLabel(num_classes=17, names=['B-NEG', 'B-NEU', 'B-POS', 'B-CON', 'I-NEG', 'I-NEU', 'I-POS', 'I-CON', 'E-NEG', 'E-NEU', 'E-POS', 'E-CON', 'S-NEG', 'S-NEU', 'S-POS', 'S-CON', 'O'])), |
|
'ATESP_BIO_tags': datasets.Sequence(datasets.ClassLabel(num_classes=9, names=['B-NEG', 'B-NEU', 'B-POS', 'B-CON', 'I-NEG', 'I-NEU', 'I-POS', 'I-CON', 'O'])), |
|
'ATE_BIEOS_tags': datasets.Sequence(datasets.ClassLabel(num_classes=5, names=['B', 'I', 'E', 'O', 'S'])), |
|
'ATE_BIO_tags': datasets.Sequence(datasets.ClassLabel(num_classes=3, names=['B', 'I', 'O'])), |
|
|
|
'domain': datasets.Value(dtype='string'), |
|
'reviewId': datasets.Value(dtype='string'), |
|
'sentenceId': datasets.Value(dtype='string') |
|
} |
|
), |
|
supervised_keys=None, |
|
license=_LICENSE, |
|
homepage=_HOMEPAGE_URL, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
lang_list = [] |
|
domain_list = [] |
|
|
|
for config in self.config.configs: |
|
domain_list.append(config.split('_')[0]) |
|
lang_list.append(config.split('_')[1]) |
|
|
|
train_urls = [_DOWNLOAD_URL.format(split="train", domain=config.split('_')[0], lang=config.split('_')[1]) for config in self.config.configs] |
|
dev_urls = [_DOWNLOAD_URL.format(split="trial", domain=config.split('_')[0], lang=config.split('_')[1]) for config in self.config.configs] |
|
test_urls = [_DOWNLOAD_URL.format(split="test", domain=config.split('_')[0], lang=config.split('_')[1]) for config in self.config.configs] |
|
|
|
train_paths = dl_manager.download_and_extract(train_urls) |
|
dev_paths = dl_manager.download_and_extract(dev_urls) |
|
test_paths = dl_manager.download_and_extract(test_urls) |
|
|
|
return [ |
|
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"file_paths": train_paths, "lang_list": lang_list, "domain_list": domain_list}), |
|
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"file_paths": dev_paths, "lang_list": lang_list, "domain_list": domain_list}), |
|
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"file_paths": test_paths, "lang_list": lang_list, "domain_list": domain_list}), |
|
] |
|
|
|
def _generate_examples(self, file_paths, lang_list, domain_list): |
|
row_count = 0 |
|
assert len(file_paths)==len(lang_list) and len(lang_list)==len(domain_list) |
|
|
|
for i in range(len(file_paths)): |
|
file_path, domain, language = file_paths[i], domain_list[i], lang_list[i] |
|
semEvalDataset = SemEvalXMLDataset(file_path, language, domain) |
|
|
|
for example in semEvalDataset.SentenceWithOpinions: |
|
|
|
yield row_count, example |
|
row_count += 1 |
|
|
|
|
|
|
|
|
|
|
|
|
|
from xml.dom.minidom import parse |
|
|
|
class SemEvalXMLDataset(): |
|
def __init__(self, file_name, language, domain): |
|
|
|
|
|
self.SentenceWithOpinions = [] |
|
self.xml_path = file_name |
|
|
|
self.sentenceXmlList = parse(self.xml_path).getElementsByTagName('sentence') |
|
|
|
for sentenceXml in self.sentenceXmlList: |
|
reviewId = sentenceXml.getAttribute("id").split(':')[0] |
|
sentenceId = sentenceXml.getAttribute("id") |
|
if len(sentenceXml.getElementsByTagName("text")[0].childNodes) < 1: |
|
|
|
continue |
|
text = sentenceXml.getElementsByTagName("text")[0].childNodes[0].nodeValue |
|
OpinionXmlList = sentenceXml.getElementsByTagName("Opinion") |
|
Opinions = [] |
|
for opinionXml in OpinionXmlList: |
|
|
|
target = opinionXml.getAttribute("target") |
|
category = opinionXml.getAttribute("category") |
|
polarity = opinionXml.getAttribute("polarity") |
|
from_ = opinionXml.getAttribute("from") |
|
to = opinionXml.getAttribute("to") |
|
|
|
opinionDict = { |
|
"target": target, |
|
"category": category, |
|
"polarity": polarity, |
|
"from": from_, |
|
"to": to |
|
} |
|
Opinions.append(opinionDict) |
|
|
|
|
|
example = { |
|
"text": text, |
|
"opinions": Opinions, |
|
"domain": domain, |
|
"reviewId": reviewId, |
|
"sentenceId": sentenceId |
|
} |
|
example = addTokenAndLabel(example) |
|
self.SentenceWithOpinions.append(example) |
|
|
|
import nltk |
|
|
|
|
|
def clearOpinion(example): |
|
opinions = example['opinions'] |
|
skipNullOpinions = [] |
|
|
|
for opinion in opinions: |
|
targetKey = 'target' |
|
target = opinion[targetKey] |
|
from_ = opinion['from'] |
|
to = opinion['to'] |
|
|
|
if target.lower() == 'null' or target == '' or from_ == to: |
|
continue |
|
skipNullOpinions.append(opinion) |
|
|
|
|
|
skipNullOpinions.sort(key=lambda x: int(x['from'])) |
|
UniOpinions = [] |
|
for opinion in skipNullOpinions: |
|
if len(UniOpinions) < 1: |
|
UniOpinions.append(opinion) |
|
else: |
|
if opinion['from'] != UniOpinions[-1]['from'] and opinion['to'] != UniOpinions[-1]['to']: |
|
UniOpinions.append(opinion) |
|
return UniOpinions |
|
|
|
|
|
def addTokenAndLabel(example): |
|
tokens = [] |
|
labels = [] |
|
|
|
text = example['text'] |
|
UniOpinions = clearOpinion(example) |
|
text_begin = 0 |
|
|
|
for aspect in UniOpinions: |
|
polarity = aspect['polarity'][:3].upper() |
|
pre_O_tokens = nltk.word_tokenize(text[text_begin: int(aspect['from'])]) |
|
tokens.extend(pre_O_tokens) |
|
labels.extend(['O']*len(pre_O_tokens)) |
|
|
|
BIES_tokens = nltk.word_tokenize(text[int(aspect['from']): int(aspect['to'])]) |
|
tokens.extend(BIES_tokens) |
|
|
|
assert len(BIES_tokens) > 0, print(f'error in BIES_tokens length: {tokens}') |
|
|
|
if len(BIES_tokens)==1: |
|
labels.append('S-'+polarity) |
|
elif len(BIES_tokens)==2: |
|
labels.append('B-'+polarity) |
|
labels.append('E-'+polarity) |
|
else: |
|
labels.append('B-'+polarity) |
|
labels.extend(['I-'+polarity]*(len(BIES_tokens)-2)) |
|
labels.append('E-'+polarity) |
|
|
|
text_begin = int(aspect['to']) |
|
|
|
|
|
pre_O_tokens = nltk.word_tokenize(text[text_begin: ]) |
|
labels.extend(['O']*len(pre_O_tokens)) |
|
tokens.extend(pre_O_tokens) |
|
|
|
example['tokens'] = tokens |
|
example['ATESP_BIEOS_tags'] = labels |
|
|
|
ATESP_BIO_labels = [] |
|
for label in labels: |
|
ATESP_BIO_labels.append(label.replace('E-', 'I-').replace('S-', 'B-')) |
|
example['ATESP_BIO_tags'] = ATESP_BIO_labels |
|
|
|
|
|
ATE_BIEOS_labels = [] |
|
for label in labels: |
|
ATE_BIEOS_labels.append(label[0]) |
|
example['ATE_BIEOS_tags'] = ATE_BIEOS_labels |
|
|
|
ATE_BIO_labels = [] |
|
for label in ATESP_BIO_labels: |
|
ATE_BIO_labels.append(label[0]) |
|
example['ATE_BIO_tags'] = ATE_BIO_labels |
|
|
|
return example |