Datasets:
File size: 17,404 Bytes
bf8cc42 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 |
"""ssj500k is a partially annotated training corpus for multiple syntactic and semantic tasks."""
import re
import xml.etree.ElementTree as ET
import os
import datasets
_CITATION = """\
@InProceedings{krek2020ssj500k,
title = {The ssj500k Training Corpus for Slovene Language Processing},
author={Krek, Simon and Erjavec, Tomaž and Dobrovoljc, Kaja and Gantar, Polona and Arhar Holdt, Spela and Čibej, Jaka and Brank, Janez},
booktitle={Proceedings of the Conference on Language Technologies and Digital Humanities},
year={2020},
pages={24-33}
}
"""
_DESCRIPTION = """\
The ssj500k training corpus contains about 500 000 tokens manually annotated on the levels of tokenisation,
sentence segmentation, morphosyntactic tagging, and lemmatisation. About half of the corpus is also manually annotated
with syntactic dependencies, named entities, and verbal multiword expressions. About a quarter of the corpus is also
annotated with semantic role labels. The morphosyntactic tags and syntactic dependencies are included both in the
JOS/MULTEXT-East framework, as well as in the framework of Universal Dependencies.
"""
_HOMEPAGE = "http://hdl.handle.net/11356/1434"
_LICENSE = "Creative Commons - Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)"
_URLS = {
"ssj500k-en.tei": "https://www.clarin.si/repository/xmlui/bitstream/handle/11356/1434/ssj500k-en.TEI.zip"
}
XML_NAMESPACE = "{http://www.w3.org/XML/1998/namespace}"
IDX_ROOT_WORD = -1
IDX_NA_HEAD = -2
NA_TAG = "N/A"
def namespace(element):
# https://stackoverflow.com/a/12946675
m = re.match(r'\{.*\}', element.tag)
return m.group(0) if m else ''
def word_information(w_or_pc_el):
if w_or_pc_el.tag.endswith("pc"):
id_word = w_or_pc_el.attrib[f"{XML_NAMESPACE}id"]
form = w_or_pc_el.text.strip()
lemma = w_or_pc_el.text.strip()
msd = w_or_pc_el.attrib[f"msd"]
else: # word - w
id_word = w_or_pc_el.attrib[f"{XML_NAMESPACE}id"]
form = w_or_pc_el.text.strip()
lemma = w_or_pc_el.attrib["lemma"]
msd = w_or_pc_el.attrib[f"msd"]
return id_word, form, lemma, msd
class Ssj500k(datasets.GeneratorBasedBuilder):
"""ssj500k is a partially annotated training corpus for multiple syntactic and semantic tasks."""
VERSION = datasets.Version("2.3.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="all_data", version=VERSION,
description="The entire dataset with all annotations, in some cases partially missing."),
datasets.BuilderConfig(name="named_entity_recognition", version=VERSION,
description="The data subset with annotated named entities."),
datasets.BuilderConfig(name="dependency_parsing_ud", version=VERSION,
description="The data subset with annotated dependencies (UD schema)."),
datasets.BuilderConfig(name="dependency_parsing_jos", version=VERSION,
description="The data subset with annotated dependencies (JOS schema)."),
datasets.BuilderConfig(name="semantic_role_labeling", version=VERSION,
description="The data subset with annotated semantic roles."),
datasets.BuilderConfig(name="multiword_expressions", version=VERSION,
description="The data subset with annotated named entities.")
]
DEFAULT_CONFIG_NAME = "all_data"
def _info(self):
features_dict = {
"id_doc": datasets.Value("string"),
"idx_par": datasets.Value("int32"),
"idx_sent": datasets.Value("int32"),
"id_words": datasets.Sequence(datasets.Value("string")),
"words": datasets.Sequence(datasets.Value("string")),
"lemmas": datasets.Sequence(datasets.Value("string")),
"msds": datasets.Sequence(datasets.Value("string"))
}
ret_all_data = self.config.name == "all_data"
if ret_all_data:
features_dict.update({
"has_ne_ann": datasets.Value("bool"), "has_ud_dep_ann": datasets.Value("bool"),
"has_jos_dep_ann": datasets.Value("bool"), "has_srl_ann": datasets.Value("bool"),
"has_mwe_ann": datasets.Value("bool")
})
if ret_all_data or self.config.name == "named_entity_recognition":
features_dict["ne_tags"] = datasets.Sequence(datasets.Value("string"))
if ret_all_data or self.config.name == "dependency_parsing_ud":
features_dict.update({
"ud_dep_head": datasets.Sequence(datasets.Value("int32")),
"ud_dep_rel": datasets.Sequence(datasets.Value("string"))
})
if ret_all_data or self.config.name == "dependency_parsing_jos":
features_dict.update({
"jos_dep_head": datasets.Sequence(datasets.Value("int32")),
"jos_dep_rel": datasets.Sequence(datasets.Value("string"))
})
if ret_all_data or self.config.name == "semantic_role_labeling":
features_dict.update({
"srl_info": [{
"idx_arg": datasets.Value("uint32"),
"idx_head": datasets.Value("uint32"),
"role": datasets.Value("string")
}]
})
if ret_all_data or self.config.name == "multiword_expressions":
features_dict["mwe_info"] = [{
"type": datasets.Value("string"),
"word_indices": datasets.Sequence(datasets.Value("uint32"))
}]
features = datasets.Features(features_dict)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
urls = _URLS["ssj500k-en.tei"]
data_dir = dl_manager.download_and_extract(urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"file_path": os.path.join(data_dir, "ssj500k-en.TEI", "ssj500k-en.body.xml")}
)
]
def _generate_examples(self, file_path):
ret_all_data = self.config.name == "all_data"
ret_ne_only = self.config.name == "named_entity_recognition"
ret_ud_dep_only = self.config.name == "dependency_parsing_ud"
ret_jos_dep_only = self.config.name == "dependency_parsing_jos"
ret_srl_only = self.config.name == "semantic_role_labeling"
ret_mwe_only = self.config.name == "multiword_expressions"
curr_doc = ET.parse(file_path)
root = curr_doc.getroot()
NAMESPACE = namespace(root)
idx_example = 0
for idx_doc, curr_doc in enumerate(root.iterfind(f"{NAMESPACE}div")):
id_doc = curr_doc.attrib[f"{XML_NAMESPACE}id"]
doc_metadata = {}
metadata_el = curr_doc.find(f"{NAMESPACE}bibl")
if metadata_el is not None:
for child in metadata_el:
if child.tag.endswith("term"):
if child.attrib[f"{XML_NAMESPACE}lang"] != "en":
continue
parts = child.text.strip().split(" / ")
attr_name = parts[0]
attr_value = " / ".join(parts[1:])
elif child.tag.endswith("note"):
attr_name = child.attrib["type"]
attr_value = child.text.strip()
else:
attr_name = child.tag[len(NAMESPACE):]
attr_value = child.text.strip()
doc_metadata[attr_name] = attr_value
# IMPORTANT: This is a hack, because it is not clear which documents are annotated with NEs
# The numbers of annotated docs are obtained from the paper provided in `_CITATION` (Table 1)
has_ne = idx_doc < 498
has_mwe = idx_doc < 754
has_srl = idx_doc < 228
for idx_par, curr_par in enumerate(curr_doc.iterfind(f"{NAMESPACE}p")):
for idx_sent, curr_sent in enumerate(curr_par.iterfind(f"{NAMESPACE}s")):
id2position = {}
id_words, words, lemmas, msds = [], [], [], []
# Optional (partial) annotations
named_ents = []
has_ud_dep, ud_dep_heads, ud_dep_rels = False, [], []
has_jos_dep, jos_dep_heads, jos_dep_rels = False, [], []
srl_info = []
mwe_info = []
# Note: assuming that all words of a sentence are observed before processing the optional annotations
# i.e., that <w> and <pc> elements come first, then the optional <linkGroup> annotations
for curr_el in curr_sent:
# Words
if curr_el.tag.endswith(("w", "pc")):
id_word, word, lemma, msd = word_information(curr_el)
id2position[id_word] = len(id2position)
id_words.append(id_word)
words.append(word)
lemmas.append(lemma)
msds.append(msd)
named_ents.append("O")
# Named entities
elif curr_el.tag.endswith("seg"):
has_ne = True
ne_type = curr_el.attrib["subtype"] # {"per", "loc", "org", "misc", "deriv-per"}
if ne_type.startswith("deriv-"):
ne_type = ne_type[len("deriv-"):]
ne_type = ne_type.upper()
num_ne_tokens = 0
for curr_child in curr_el:
num_ne_tokens += 1
id_word, word, lemma, msd = word_information(curr_child)
id2position[id_word] = len(id2position)
id_words.append(id_word)
words.append(word)
lemmas.append(lemma)
msds.append(msd)
assert num_ne_tokens > 0
nes = [f"B-{ne_type.upper()}"] + [f"I-{ne_type.upper()}" for _ in range(num_ne_tokens - 1)]
named_ents.extend(nes)
elif curr_el.tag.endswith("linkGrp"):
# UD dependencies
if curr_el.attrib["type"] == "UD-SYN":
has_ud_dep = True
ud_dep_heads = [None for _ in range(len(words))]
ud_dep_rels = [None for _ in range(len(words))]
for link in curr_el:
dep_rel = link.attrib["ana"].split(":")[-1]
id_head_word, id_dependant = tuple(map(
lambda _t_id: _t_id[1:] if _t_id.startswith("#") else _t_id,
link.attrib["target"].split(" ")
))
idx_head_word = id2position[id_head_word] if dep_rel != "root" else IDX_ROOT_WORD
idx_dep_word = id2position[id_dependant]
ud_dep_heads[idx_dep_word] = idx_head_word
ud_dep_rels[idx_dep_word] = dep_rel
# JOS dependencies
elif curr_el.attrib["type"] == "JOS-SYN":
has_jos_dep = True
jos_dep_heads = [None for _ in range(len(words))]
jos_dep_rels = [None for _ in range(len(words))]
for link in curr_el:
dep_rel = link.attrib["ana"].split(":")[-1]
id_head_word, id_dependant = tuple(map(
lambda _t_id: _t_id[1:] if _t_id.startswith("#") else _t_id,
link.attrib["target"].split(" ")
))
idx_head_word = id2position[id_head_word] if dep_rel != "Root" else IDX_ROOT_WORD
idx_dep_word = id2position[id_dependant]
jos_dep_heads[idx_dep_word] = idx_head_word
jos_dep_rels[idx_dep_word] = dep_rel
# Semantic role labels
elif curr_el.attrib["type"] == "SRL":
for link in curr_el:
sem_role = link.attrib["ana"].split(":")[-1]
id_head_word, id_arg_word = tuple(map(
lambda _t_id: _t_id[1:] if _t_id.startswith("#") else _t_id,
link.attrib["target"].split(" ")
))
idx_head_word = id2position[id_head_word]
idx_arg_word = id2position[id_arg_word]
srl_info.append({
"idx_arg": idx_arg_word,
"idx_head": idx_head_word,
"role": sem_role
})
# Multi-word expressions
elif curr_el.attrib["type"] == "MWE":
has_mwe = True
# Follow the KOMET/G-KOMET format, i.e. list of {"type": ..., "word_indices": ...}
for link in curr_el:
mwe_type = link.attrib["ana"].split(":")[-1]
involved_words = list(map(
lambda _t_id: _t_id[1:] if _t_id.startswith("#") else _t_id,
link.attrib["target"].split(" "))
)
word_indices = [id2position[_curr_tok] for _curr_tok in involved_words]
mwe_info.append({"type": mwe_type, "word_indices": word_indices})
# Specified config expects only annotated instances, but there are none for the current instance
if (ret_ne_only and not has_ne) or (ret_ud_dep_only and not has_ud_dep) or \
(ret_jos_dep_only and not has_jos_dep) or (ret_srl_only and not has_srl) or \
(ret_mwe_only and not has_mwe):
continue
instance_dict = {
"id_doc": id_doc,
"idx_par": idx_par,
"idx_sent": idx_sent,
"id_words": id_words,
"words": words,
"lemmas": lemmas,
"msds": msds
}
if ret_ne_only or ret_all_data:
if not has_ne:
named_ents = [NA_TAG for _ in range(len(words))]
instance_dict["ne_tags"] = named_ents
if ret_ud_dep_only or ret_all_data:
if not has_ud_dep:
ud_dep_heads = [IDX_NA_HEAD for _ in range(len(words))]
ud_dep_rels = [NA_TAG for _ in range(len(words))]
instance_dict["ud_dep_head"] = ud_dep_heads
instance_dict["ud_dep_rel"] = ud_dep_rels
if ret_jos_dep_only or ret_all_data:
if not has_jos_dep:
jos_dep_heads = [IDX_NA_HEAD for _ in range(len(words))]
jos_dep_rels = [NA_TAG for _ in range(len(words))]
instance_dict["jos_dep_head"] = jos_dep_heads
instance_dict["jos_dep_rel"] = jos_dep_rels
if ret_srl_only or ret_all_data:
instance_dict["srl_info"] = srl_info
if ret_mwe_only or ret_all_data:
instance_dict["mwe_info"] = mwe_info
# When all data is returned, some instances are unannotated or partially annotated, mark instances with flags
if ret_all_data:
instance_dict.update({
"has_ne_ann": has_ne, "has_ud_dep_ann": has_ud_dep, "has_jos_dep_ann": has_jos_dep,
"has_srl_ann": has_srl, "has_mwe_ann": has_mwe
})
yield idx_example, instance_dict
idx_example += 1
|