|
import logging |
|
import os |
|
import re |
|
import xml.etree.ElementTree as ET |
|
from typing import Optional |
|
|
|
import datasets |
|
|
|
_CITATION = """\ |
|
@misc{solar3.0, |
|
title = {Developmental corpus {\v S}olar 3.0}, |
|
author = {Arhar Holdt, {\v S}pela and Rozman, Tadeja and Stritar Ku{\v c}uk, Mojca and Krek, Simon and Krap{\v s} Vodopivec, Irena and Stabej, Marko and Pori, Eva and Goli, Teja and Lavri{\v c}, Polona and Laskowski, Cyprian and Kocjan{\v c}i{\v c}, Polonca and Klemenc, Bojan and Krsnik, Luka and Kosem, Iztok}, |
|
url = {http://hdl.handle.net/11356/1589}, |
|
note = {Slovenian language resource repository {CLARIN}.{SI}}, |
|
year = {2022} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
Šolar is a developmental corpus of 5485 school texts (e.g., essays), written by students in Slovenian secondary schools |
|
(age 15-19) and pupils in the 7th-9th grade of primary school (13-15), with a small percentage also from the 6th grade. |
|
Part of the corpus (2,094 texts) is annotated with teachers' corrections using a system of labels described in the |
|
document available at https://www.clarin.si/repository/xmlui/bitstream/handle/11356/1589/Smernice-za-oznacevanje-korpusa-Solar_V1.1.pdf (in Slovenian). |
|
""" |
|
|
|
_HOMEPAGE = "http://hdl.handle.net/11356/1589" |
|
|
|
_LICENSE = "Creative Commons - Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)" |
|
|
|
_URLS = { |
|
"solar_tei": "https://www.clarin.si/repository/xmlui/bitstream/handle/11356/1589/Solar.TEI.zip" |
|
} |
|
|
|
XML_NAMESPACE = "{http://www.w3.org/XML/1998/namespace}" |
|
|
|
|
|
def namespace(element): |
|
|
|
m = re.match(r'\{.*\}', element.tag) |
|
return m.group(0) if m else '' |
|
|
|
|
|
def resolve_element(tag_el, ne_tag: Optional[str] = "O"): |
|
if not tag_el.tag.endswith(("w", "pc", "seg")): |
|
logging.info(f"Skipping tag {tag_el.tag}") |
|
return [] |
|
|
|
if tag_el.tag.endswith(("w", "pc")): |
|
form = tag_el.text.strip() |
|
lemma = tag_el.text.strip() if tag_el.tag.endswith("pc") else tag_el.attrib["lemma"] |
|
msd = tag_el.attrib["ana"] |
|
ret_ne_tag = ne_tag |
|
id_tag = tag_el.attrib[f"{XML_NAMESPACE}id"] |
|
|
|
return [(id_tag, form, lemma, msd, ret_ne_tag)] |
|
|
|
elif tag_el.tag.endswith("seg"): |
|
anns = [] |
|
ret_ne_tag = tag_el.attrib["subtype"].upper() |
|
for curr_child in tag_el: |
|
anns.extend(resolve_element(curr_child, ne_tag=ret_ne_tag)) |
|
|
|
return anns |
|
|
|
|
|
def extract_sent_id(tok_id): |
|
|
|
_tok_id = tok_id[1:] if tok_id.startswith("#") else tok_id |
|
return ".".join(_tok_id.split(".")[: -1]) |
|
|
|
|
|
def find_involved_sents(correction_group_el): |
|
src_sent_ids = set() |
|
tgt_sent_ids = set() |
|
for _curr_corr in correction_group_el: |
|
sent_ids = list(map(lambda _tok_id: extract_sent_id(_tok_id), |
|
_curr_corr.attrib["target"].split(" "))) |
|
|
|
for _s_id in sent_ids: |
|
if "t" in _s_id: |
|
tgt_sent_ids.add(_s_id) |
|
else: |
|
src_sent_ids.add(_s_id) |
|
|
|
return sorted(list(src_sent_ids)), sorted(list(tgt_sent_ids)) |
|
|
|
|
|
def read_data(data_path): |
|
data = {} |
|
tree = ET.parse(data_path) |
|
root = tree.getroot() |
|
NAMESPACE = namespace(root) |
|
|
|
for curr_text in root.iterfind(f".//{NAMESPACE}div"): |
|
id_text = curr_text.attrib[f"{XML_NAMESPACE}id"] |
|
bibl_el = curr_text.find(f"{NAMESPACE}bibl") |
|
if bibl_el is None: |
|
text_title = "Unknown_title" |
|
logging.warning(f"The following text does not have a 'bibl' element: {curr_text.attrib}. " |
|
f"Setting title to 'Unknown_title'") |
|
is_manually_validated = False |
|
else: |
|
text_title = bibl_el.attrib["n"] |
|
note_el = bibl_el.find(f"{NAMESPACE}note") |
|
is_manually_validated = note_el.text == "DA" |
|
|
|
for idx_par, curr_par in enumerate(curr_text.iterfind(f".//{NAMESPACE}p")): |
|
for idx_sent, curr_sent in enumerate(curr_par.iterfind(f".//{NAMESPACE}s")): |
|
id_sent = curr_sent.attrib[f"{XML_NAMESPACE}id"] |
|
ids, forms, lemmas, msds, nes = [], [], [], [], [] |
|
for curr_el in curr_sent: |
|
curr_annotations = resolve_element(curr_el) |
|
for curr_ann in curr_annotations: |
|
ids.append(curr_ann[0]) |
|
forms.append(curr_ann[1]) |
|
lemmas.append(curr_ann[2]) |
|
msds.append(curr_ann[3]) |
|
nes.append(curr_ann[4]) |
|
|
|
data[id_sent] = { |
|
"id_doc": id_text, |
|
"doc_title": text_title, |
|
"id_token": ids, "form": forms, "lemma": lemmas, "msd": msds, "ne_tag": nes, |
|
"is_manually_validated": is_manually_validated |
|
} |
|
|
|
return data |
|
|
|
|
|
class Solar3(datasets.GeneratorBasedBuilder): |
|
"""Šolar is a developmental corpus of school texts (e.g., essays), annotated with metadata and (partially) |
|
with teachers' corrections. """ |
|
|
|
VERSION = datasets.Version("3.0.0") |
|
|
|
def _info(self): |
|
features = datasets.Features( |
|
{ |
|
"id_doc": datasets.Value("string"), |
|
"doc_title": datasets.Value("string"), |
|
"is_manually_validated": datasets.Value("bool"), |
|
"id_src_tokens": datasets.Sequence(datasets.Value("string")), |
|
"src_tokens": datasets.Sequence(datasets.Value("string")), |
|
"id_tgt_tokens": datasets.Sequence(datasets.Value("string")), |
|
"tgt_tokens": datasets.Sequence(datasets.Value("string")), |
|
"corrections": [ |
|
{ |
|
"idx_src": datasets.Sequence(datasets.Value("int32")), |
|
"idx_tgt": datasets.Sequence(datasets.Value("int32")), |
|
"corr_types": datasets.Sequence(datasets.Value("string")) |
|
} |
|
] |
|
} |
|
) |
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
urls = _URLS["solar_tei"] |
|
data_dir = dl_manager.download_and_extract(urls) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"source_path": os.path.join(data_dir, "Solar.TEI", "solar-orig.xml"), |
|
"target_path": os.path.join(data_dir, "Solar.TEI", "solar-corr.xml"), |
|
"links_path": os.path.join(data_dir, "Solar.TEI", "solar-errs.xml") |
|
} |
|
) |
|
] |
|
|
|
def _generate_examples(self, source_path, target_path, links_path): |
|
source_data = read_data(source_path) |
|
target_data = read_data(target_path) |
|
|
|
data = ET.parse(links_path) |
|
root = data.getroot() |
|
NAMESPACE = namespace(root) |
|
|
|
for idx_corr, corrected_sent in enumerate(root.iterfind(f"{NAMESPACE}linkGrp")): |
|
involved_sents = corrected_sent.attrib["corresp"].split(" ") |
|
|
|
|
|
involved_src_sents, involved_tgt_sents = find_involved_sents(corrected_sent) |
|
|
|
id_doc, doc_title, is_manually_validated = None, None, False |
|
src_sent_data, tgt_sent_data = {}, {} |
|
tok2position = {} |
|
assert len(involved_src_sents) > 0 or len(involved_tgt_sents) > 0 |
|
|
|
if len(involved_src_sents) > 0: |
|
src_sent_data = source_data[involved_src_sents[0]] |
|
for src_sent_id in involved_src_sents[1:]: |
|
curr_sent_data = source_data[src_sent_id] |
|
src_sent_data["id_token"].extend(curr_sent_data["id_token"]) |
|
src_sent_data["form"].extend(curr_sent_data["form"]) |
|
src_sent_data["lemma"].extend(curr_sent_data["lemma"]) |
|
src_sent_data["msd"].extend(curr_sent_data["msd"]) |
|
src_sent_data["ne_tag"].extend(curr_sent_data["ne_tag"]) |
|
|
|
id_doc = src_sent_data["id_doc"] |
|
doc_title = src_sent_data["doc_title"] |
|
is_manually_validated |= src_sent_data["is_manually_validated"] |
|
for _pos, _tok in enumerate(src_sent_data["id_token"]): |
|
tok2position[_tok] = _pos |
|
|
|
if len(involved_tgt_sents) > 0: |
|
tgt_sent_data = target_data[involved_tgt_sents[0]] |
|
for tgt_sent_id in involved_tgt_sents[1:]: |
|
curr_sent_data = target_data[tgt_sent_id] |
|
tgt_sent_data["id_token"].extend(curr_sent_data["id_token"]) |
|
tgt_sent_data["form"].extend(curr_sent_data["form"]) |
|
tgt_sent_data["lemma"].extend(curr_sent_data["lemma"]) |
|
tgt_sent_data["msd"].extend(curr_sent_data["msd"]) |
|
tgt_sent_data["ne_tag"].extend(curr_sent_data["ne_tag"]) |
|
|
|
id_doc = tgt_sent_data["id_doc"] |
|
doc_title = tgt_sent_data["doc_title"] |
|
is_manually_validated |= tgt_sent_data["is_manually_validated"] |
|
for _pos, _tok in enumerate(tgt_sent_data["id_token"]): |
|
tok2position[_tok] = _pos |
|
|
|
corr_data = [] |
|
for token_info in corrected_sent.findall(f"{NAMESPACE}link"): |
|
connections = token_info.attrib["target"].split(" ") |
|
|
|
corrections = token_info.attrib["type"] |
|
if corrections == "ID": |
|
continue |
|
|
|
src_inds, tgt_inds = [], [] |
|
corr_types = [] |
|
for curr_corr in corrections.split("|"): |
|
corr_types.append(curr_corr) |
|
|
|
for curr_tok in connections: |
|
|
|
idx_tok = tok2position[curr_tok[1:]] |
|
if "t" in curr_tok: |
|
tgt_inds.append(idx_tok) |
|
else: |
|
src_inds.append(idx_tok) |
|
|
|
corr_data.append({"idx_src": src_inds, "idx_tgt": tgt_inds, "corr_types": corr_types}) |
|
|
|
yield idx_corr, { |
|
"id_doc": id_doc[:-1], |
|
"doc_title": doc_title, |
|
"is_manually_validated": is_manually_validated, |
|
"id_src_tokens": src_sent_data.get("id_token", []), |
|
"src_tokens": src_sent_data.get("form", []), |
|
"id_tgt_tokens": tgt_sent_data.get("id_token", []), |
|
"tgt_tokens": tgt_sent_data.get("form", []), |
|
"corrections": corr_data |
|
} |
|
|