SemEval2016Task5NLTK / SemEval2016Task5NLTK.py
Yaxin's picture
Update SemEval2016Task5NLTK.py
068e19c
# coding=utf-8
# Copyright 2020 HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Multilingual SemEval2016 Task5 Reviews Corpus"""
import datasets
_CITATION = """\
@inproceedings{pontiki2016semeval,
title={Semeval-2016 task 5: Aspect based sentiment analysis},
author={Pontiki, Maria and Galanis, Dimitrios and Papageorgiou, Haris and Androutsopoulos, Ion and Manandhar, Suresh and Al-Smadi, Mohammad and Al-Ayyoub, Mahmoud and Zhao, Yanyan and Qin, Bing and De Clercq, Orph{\'e}e and others},
booktitle={International workshop on semantic evaluation},
pages={19--30},
year={2016}
}
"""
_LICENSE = """\
Please click on the homepage URL for license details.
"""
_DESCRIPTION = """\
A collection of SemEval2016 specifically designed to aid research in multilingual Aspect Based Sentiment Analysis.
"""
_CONFIG = [
# restaruants Domain
"restaurants_english",
"restaurants_french",
"restaurants_spanish",
"restaurants_russian",
"restaurants_dutch",
"restaurants_turkish",
# hotels domain
"hotels_arabic",
# Consumer Electronics Domain
"mobilephones_dutch",
"mobilephones_chinese",
"laptops_english",
"digitalcameras_chinese"
]
_VERSION = "0.1.0"
_HOMEPAGE_URL = "https://alt.qcri.org/semeval2016/task5/index.php?id=data-and-tools/"
_DOWNLOAD_URL = "https://raw.githubusercontent.com/YaxinCui/ABSADataset/main/SemEval2016Task5Corrected/{split}/{domain}_{split}_{lang}.xml"
class SemEval2016Task5NLTKConfig(datasets.BuilderConfig):
"""BuilderConfig for SemEval2016Config."""
def __init__(self, _CONFIG, **kwargs):
super(SemEval2016Task5NLTKConfig, self).__init__(version=datasets.Version(_VERSION, ""), **kwargs),
self.configs = _CONFIG
class SemEval2016Task5NLTK(datasets.GeneratorBasedBuilder):
"""The Multilingual SemEval2016 ABSA Corpus"""
BUILDER_CONFIGS = [
SemEval2016Task5NLTKConfig(
name="All",
_CONFIG=_CONFIG,
description="A collection of SemEval2016 specifically designed to aid research in multilingual Aspect Based Sentiment Analysis.",
)
] + [
SemEval2016Task5NLTKConfig(
name=config,
_CONFIG=[config],
description=f"{config} of SemEval2016 specifically designed to aid research in multilingual Aspect Based Sentiment Analysis",
)
for config in _CONFIG
]
BUILDER_CONFIG_CLASS = SemEval2016Task5NLTKConfig
DEFAULT_CONFIG_NAME = "restaurants_english"
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{'text': datasets.Value(dtype='string'),
'opinions': [
{'category': datasets.Value(dtype='string'),
'from': datasets.Value(dtype='string'),
'polarity': datasets.Value(dtype='string'),
'target': datasets.Value(dtype='string'),
'to': datasets.Value(dtype='string')}
],
'tokens': datasets.Sequence(datasets.Value(dtype='string')),
'ATESP_BIEOS_tags': datasets.Sequence(datasets.ClassLabel(num_classes=17, names=['B-NEG', 'B-NEU', 'B-POS', 'B-CON', 'I-NEG', 'I-NEU', 'I-POS', 'I-CON', 'E-NEG', 'E-NEU', 'E-POS', 'E-CON', 'S-NEG', 'S-NEU', 'S-POS', 'S-CON', 'O'])),
'ATESP_BIO_tags': datasets.Sequence(datasets.ClassLabel(num_classes=9, names=['B-NEG', 'B-NEU', 'B-POS', 'B-CON', 'I-NEG', 'I-NEU', 'I-POS', 'I-CON', 'O'])),
'ATE_BIEOS_tags': datasets.Sequence(datasets.ClassLabel(num_classes=5, names=['B', 'I', 'E', 'O', 'S'])),
'ATE_BIO_tags': datasets.Sequence(datasets.ClassLabel(num_classes=3, names=['B', 'I', 'O'])),
'domain': datasets.Value(dtype='string'),
'reviewId': datasets.Value(dtype='string'),
'sentenceId': datasets.Value(dtype='string')
}
),
supervised_keys=None,
license=_LICENSE,
homepage=_HOMEPAGE_URL,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
lang_list = []
domain_list = []
for config in self.config.configs:
domain_list.append(config.split('_')[0])
lang_list.append(config.split('_')[1])
train_urls = [_DOWNLOAD_URL.format(split="train", domain=config.split('_')[0], lang=config.split('_')[1]) for config in self.config.configs]
dev_urls = [_DOWNLOAD_URL.format(split="trial", domain=config.split('_')[0], lang=config.split('_')[1]) for config in self.config.configs]
test_urls = [_DOWNLOAD_URL.format(split="test", domain=config.split('_')[0], lang=config.split('_')[1]) for config in self.config.configs]
train_paths = dl_manager.download_and_extract(train_urls)
dev_paths = dl_manager.download_and_extract(dev_urls)
test_paths = dl_manager.download_and_extract(test_urls)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"file_paths": train_paths, "lang_list": lang_list, "domain_list": domain_list}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"file_paths": dev_paths, "lang_list": lang_list, "domain_list": domain_list}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"file_paths": test_paths, "lang_list": lang_list, "domain_list": domain_list}),
]
def _generate_examples(self, file_paths, lang_list, domain_list):
row_count = 0
assert len(file_paths)==len(lang_list) and len(lang_list)==len(domain_list)
for i in range(len(file_paths)):
file_path, domain, language = file_paths[i], domain_list[i], lang_list[i]
semEvalDataset = SemEvalXMLDataset(file_path, language, domain)
for example in semEvalDataset.SentenceWithOpinions:
yield row_count, example
row_count += 1
# 输入:xlm文件的文件路径
# 输出:一个DataSet,每个样例包含[reviewid, sentenceId, text, UniOpinions]
# 每个样例包含的Opinion,是一个列表,包含的是单个Opinion的详情
from xml.dom.minidom import parse
class SemEvalXMLDataset():
def __init__(self, file_name, language, domain):
# 获得SentenceWithOpinions,一个List包含(reviewId, sentenceId, text, Opinions)
self.SentenceWithOpinions = []
self.xml_path = file_name
self.sentenceXmlList = parse(self.xml_path).getElementsByTagName('sentence')
for sentenceXml in self.sentenceXmlList:
reviewId = sentenceXml.getAttribute("id").split(':')[0]
sentenceId = sentenceXml.getAttribute("id")
if len(sentenceXml.getElementsByTagName("text")[0].childNodes) < 1:
# skip no reviews part
continue
text = sentenceXml.getElementsByTagName("text")[0].childNodes[0].nodeValue
OpinionXmlList = sentenceXml.getElementsByTagName("Opinion")
Opinions = []
for opinionXml in OpinionXmlList:
# some text maybe have no opinion
target = opinionXml.getAttribute("target")
category = opinionXml.getAttribute("category")
polarity = opinionXml.getAttribute("polarity")
from_ = opinionXml.getAttribute("from")
to = opinionXml.getAttribute("to")
opinionDict = {
"target": target,
"category": category,
"polarity": polarity,
"from": from_,
"to": to
}
Opinions.append(opinionDict)
# 从小到大排序
example = {
"text": text,
"opinions": Opinions,
"domain": domain,
"reviewId": reviewId,
"sentenceId": sentenceId
}
example = addTokenAndLabel(example)
self.SentenceWithOpinions.append(example)
import nltk
#nltk.download('punkt')
def clearOpinion(example):
opinions = example['opinions']
skipNullOpinions = []
# 去掉NULL的opinion
for opinion in opinions:
targetKey = 'target'
target = opinion[targetKey]
from_ = opinion['from']
to = opinion['to']
# skill NULL
if target.lower() == 'null' or target == '' or from_ == to:
continue
skipNullOpinions.append(opinion)
# delete repeate Opinions
skipNullOpinions.sort(key=lambda x: int(x['from'])) # 从小到大排序
UniOpinions = []
for opinion in skipNullOpinions:
if len(UniOpinions) < 1:
UniOpinions.append(opinion)
else:
if opinion['from'] != UniOpinions[-1]['from'] and opinion['to'] != UniOpinions[-1]['to']:
UniOpinions.append(opinion)
return UniOpinions
def addTokenAndLabel(example):
tokens = []
labels = []
text = example['text']
UniOpinions = clearOpinion(example)
text_begin = 0
for aspect in UniOpinions:
polarity = aspect['polarity'][:3].upper()
pre_O_tokens = nltk.word_tokenize(text[text_begin: int(aspect['from'])])
tokens.extend(pre_O_tokens)
labels.extend(['O']*len(pre_O_tokens))
BIES_tokens = nltk.word_tokenize(text[int(aspect['from']): int(aspect['to'])])
tokens.extend(BIES_tokens)
assert len(BIES_tokens) > 0, print(f'error in BIES_tokens length: {tokens}')
if len(BIES_tokens)==1:
labels.append('S-'+polarity)
elif len(BIES_tokens)==2:
labels.append('B-'+polarity)
labels.append('E-'+polarity)
else:
labels.append('B-'+polarity)
labels.extend(['I-'+polarity]*(len(BIES_tokens)-2))
labels.append('E-'+polarity)
text_begin = int(aspect['to'])
pre_O_tokens = nltk.word_tokenize(text[text_begin: ])
labels.extend(['O']*len(pre_O_tokens))
tokens.extend(pre_O_tokens)
example['tokens'] = tokens
example['ATESP_BIEOS_tags'] = labels
ATESP_BIO_labels = []
for label in labels:
ATESP_BIO_labels.append(label.replace('E-', 'I-').replace('S-', 'B-'))
example['ATESP_BIO_tags'] = ATESP_BIO_labels
ATE_BIEOS_labels = []
for label in labels:
ATE_BIEOS_labels.append(label[0])
example['ATE_BIEOS_tags'] = ATE_BIEOS_labels
ATE_BIO_labels = []
for label in ATESP_BIO_labels:
ATE_BIO_labels.append(label[0])
example['ATE_BIO_tags'] = ATE_BIO_labels
return example