rebel-pt / rebel-pt.py
ju-resplande's picture
dataset builder
4fb7cb6
raw
history blame
5.81 kB
# Lint as: python3
"""REBEL"""
from __future__ import absolute_import, division, print_function
import datasets
import os
import re
import json
import logging
_DESCRIPTION = """\
REBEL-Portuguese is an REBEL adaptation for Portuguese.
"""
_URL = "https://huggingface.co/datasets/ju-resplande/rebel-pt/resolve/main/pt.zip"
_LICENSE = "Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License (CC BY-NC-SA 4.0)"
_CITATION = """\
@inproceedings{huguet-cabot-navigli-2021-rebel,
title = "REBEL: Relation Extraction By End-to-end Language generation",
author = "Huguet Cabot, Pere-Llu{\'\i}s and
Navigli, Roberto",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2021",
month = nov,
year = "2021",
address = "Online and in the Barceló Bávaro Convention Centre, Punta Cana, Dominican Republic",
publisher = "Association for Computational Linguistics",
url = "https://github.com/Babelscape/rebel/blob/main/docs/EMNLP_2021_REBEL__Camera_Ready_.pdf",
}
"""
_HOMEPAGE = "https://github.com/ju-resplande/crocodile"
class RebelConfig(datasets.BuilderConfig):
"""BuilderConfig for REBEL."""
def __init__(self, **kwargs):
"""BuilderConfig for REBEL.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(RebelConfig, self).__init__(**kwargs)
class Rebel(datasets.GeneratorBasedBuilder):
"""Rebel 1.0"""
BUILDER_CONFIGS = [
RebelConfig(
name="REBEL",
version=datasets.Version("1.0.0"),
description=_DESCRIPTION,
),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"title": datasets.Value("string"),
"context": datasets.Value("string"),
"triplets": datasets.Value("string"),
}
),
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
license=_LICENSE,
)
def _split_generators(self, dl_manager):
if self.config.data_dir:
data_dir = self.config.data_dir
else:
data_dir = dl_manager.download_and_extract(_URL)
return [
datasets.SplitGenerator(name='pt', gen_kwargs={"filepath": os.path.join(data_dir, "pt.jsonl")})
#datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": os.path.join(data_dir, "en_train.jsonl")}),
#datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": os.path.join(data_dir,"en_val.jsonl")}),
#datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": os.path.join(data_dir,"en_test.jsonl")}),
]
def _generate_examples(self, filepath):
"""This function returns the examples in the raw (text) form."""
logging.info("generating examples from = %s", filepath)
with open(filepath, encoding="utf-8") as f:
for id_, row in enumerate(f):
article = json.loads(row)
prev_len = 0
if len(article['triples']) == 0:
continue
count = 0
for text_paragraph in article['text'].split('\n'):
if len(text_paragraph) == 0:
continue
sentences = re.split(r'(?<=[.])\s', text_paragraph)
text = ''
for sentence in sentences:
text += sentence + ' '
if any([entity['boundaries'][0] < len(text) + prev_len < entity['boundaries'][1] for entity in article['entities']]):
continue
entities = sorted([entity for entity in article['entities'] if prev_len < entity['boundaries'][1] <= len(text)+prev_len], key=lambda tup: tup['boundaries'][0])
decoder_output = '<triplet> '
for int_ent, entity in enumerate(entities):
triplets = sorted([triplet for triplet in article['triples'] if triplet['subject'] == entity and prev_len< triplet['subject']['boundaries'][1]<=len(text) + prev_len and prev_len< triplet['object']['boundaries'][1]<=len(text)+ prev_len], key=lambda tup: tup['object']['boundaries'][0])
if len(triplets) == 0:
continue
decoder_output += entity['surfaceform'] + ' <subj> '
for triplet in triplets:
decoder_output += triplet['object']['surfaceform'] + ' <obj> ' + triplet['predicate']['surfaceform'] + ' <subj> '
decoder_output = decoder_output[:-len(' <subj> ')]
decoder_output += ' <triplet> '
decoder_output = decoder_output[:-len(' <triplet> ')]
count += 1
prev_len += len(text)
if len(decoder_output) == 0:
text = ''
continue
text = re.sub('([\[\].,!?()])', r' \1 ', text.replace('()', ''))
text = re.sub('\s{2,}', ' ', text)
yield article['docid'] + '-' + str(count), {
"title": article['title'],
"context": text,
"id": article['uri'] + '-' + str(count),
"triplets": decoder_output,
}
text = ''