|
import xml.etree.ElementTree as ET |
|
import datasets |
|
import pandas as pd |
|
from huggingface_hub import hf_hub_url |
|
|
|
logger = datasets.logging.get_logger(__name__) |
|
|
|
class LaCourConfig(datasets.BuilderConfig): |
|
def __init__(self, **kwargs): |
|
super(LaCourConfig, self).__init__(**kwargs) |
|
|
|
|
|
class LaCourDataset(datasets.GeneratorBasedBuilder): |
|
""" |
|
A class used to represent a Dataset. |
|
|
|
... |
|
|
|
Attributes |
|
---------- |
|
VERSION : datasets.Version |
|
a version number for the dataset |
|
|
|
BUILDER_CONFIGS : list |
|
a list of BuilderConfig instances |
|
|
|
Methods |
|
------- |
|
_info(): |
|
Returns the dataset information. |
|
_split_generators(download_manager: datasets.DownloadManager): |
|
Returns SplitGenerators. |
|
_generate_examples(): |
|
Yields examples. |
|
""" |
|
|
|
|
|
VERSION = datasets.Version("0.1.0") |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig( |
|
name="transcripts", |
|
version=VERSION, |
|
description="transcript dataset based on xml files", |
|
), |
|
datasets.BuilderConfig( |
|
name="documents", |
|
version=VERSION, |
|
description="linked documents associated with the webcast" |
|
) |
|
|
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "transcripts" |
|
|
|
def _info(self): |
|
""" |
|
Returns the dataset information. |
|
|
|
... |
|
|
|
Returns |
|
------- |
|
datasets.DatasetInfo |
|
a DatasetInfo instance containing information about the dataset |
|
""" |
|
if self.config.name == "transcripts": |
|
return datasets.DatasetInfo( |
|
features=datasets.Features( |
|
{ |
|
"id": datasets.Value("int32"), |
|
"webcast_id": datasets.Value("string"), |
|
"segment_id": datasets.features.Value("int32"), |
|
"speaker_name": datasets.features.Value("string"), |
|
"speaker_role": datasets.features.Value("string"), |
|
"data": datasets.features.Sequence({ |
|
"begin": datasets.features.Value("float32"), |
|
"end": datasets.features.Value("float32"), |
|
"language": datasets.features.Value("string"), |
|
"text": datasets.features.Value("string"), |
|
}) |
|
} |
|
), |
|
supervised_keys=None, |
|
) |
|
else: |
|
return datasets.DatasetInfo( |
|
features=datasets.Features( |
|
{ |
|
"id": datasets.Value("int32"), |
|
"webcast_id": datasets.Value("string"), |
|
"hearing_title": datasets.Value("string"), |
|
"hearing_date": datasets.Value("string"), |
|
"hearing_type": datasets.Value("string"), |
|
"application_number": datasets.features.Sequence(datasets.Value("string")), |
|
"case_id": datasets.Value("string"), |
|
"case_name": datasets.Value("string"), |
|
"case_url": datasets.Value("string"), |
|
"ecli": datasets.Value("string"), |
|
"type": datasets.Value("string"), |
|
"document_date": datasets.Value("string"), |
|
"importance": datasets.Value("int32"), |
|
"articles": datasets.features.Sequence(datasets.Value("string")), |
|
"respondent_government": datasets.features.Sequence(datasets.Value("string")), |
|
"issue": datasets.Value("string"), |
|
"strasbourg_caselaw": datasets.Value("string"), |
|
"external_sources": datasets.Value("string"), |
|
"conclusion": datasets.Value("string"), |
|
"separate_opinion": datasets.Value("bool") |
|
} |
|
), |
|
supervised_keys=None, |
|
) |
|
|
|
|
|
def _split_generators(self, dl_manager): |
|
""" |
|
Returns SplitGenerators. |
|
|
|
Parameters |
|
---------- |
|
download_manager : datasets.DownloadManager |
|
a DownloadManager instance |
|
|
|
Returns |
|
------- |
|
list |
|
a list of SplitGenerator instances |
|
""" |
|
base_url_xml = hf_hub_url("TrustHLT/LaCour", filename="lacourxml.tar.gz", repo_type="dataset") |
|
base_url_json = hf_hub_url("TrustHLT/LaCour", filename="lacour_linked_documents.json", repo_type="dataset") |
|
|
|
if self.config.name == "transcripts": |
|
path = dl_manager.download(base_url_xml) |
|
xmlpath = dl_manager.iter_archive(path) |
|
return [ |
|
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": xmlpath}), |
|
] |
|
else: |
|
jsonpath = dl_manager.download(base_url_json) |
|
return [ |
|
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": jsonpath}), |
|
] |
|
|
|
def _generate_examples(self, filepaths): |
|
""" |
|
This method reads the files in the provided transcripts, parses the data, and yields it in a structured format. |
|
|
|
For the configuration "xml", it reads XML files and extracts speaker segments and associated metadata. |
|
|
|
Parameters |
|
---------- |
|
filepaths : list |
|
A list of filepaths to the data files. |
|
|
|
Yields |
|
------ |
|
tuple |
|
A tuple containing an ID and a dictionary with the data. The dictionary keys include 'id' and 'data'. |
|
'data' is a list of lists, where each inner list contains a key and a value extracted from the data file. |
|
""" |
|
if self.config.name == "transcripts": |
|
id_ = 0 |
|
for fpath, file in filepaths: |
|
logger.info("generating examples from = %s", fpath) |
|
tree = ET.parse(file) |
|
root = tree.getroot() |
|
segment_id = 0 |
|
for speakerSegment in root.findall('SpeakerSegment'): |
|
text_segments = [] |
|
for segment in speakerSegment.findall('Segment'): |
|
meta_data = segment.find('meta_data') |
|
text_segments.append({ |
|
"begin": meta_data.findtext('TimestampBegin', ''), |
|
"end": meta_data.findtext('TimestampEnd', ''), |
|
"language": meta_data.findtext('Language', ''), |
|
"text": segment.findtext('text', '').strip(), |
|
}) |
|
feature = id_, { |
|
"id": id_, |
|
"webcast_id": fpath.split('_')[1] + "_" + fpath.split('_')[2].split('.')[0], |
|
"segment_id": segment_id, |
|
"speaker_role": meta_data.findtext('Role', ''), |
|
"speaker_name": meta_data.findtext('Name', ''), |
|
"data": text_segments |
|
} |
|
yield feature |
|
id_ += 1 |
|
segment_id += 1 |
|
|
|
elif self.config.name == "documents": |
|
id_ = 0 |
|
df = pd.read_json(filepaths, orient="index", dtype={"webcast_id": str}) |
|
logger.info("generating examples from = %s", filepaths) |
|
cols = df.columns.tolist() |
|
cols.remove('appno') |
|
|
|
df['judges'] = df['judges'].fillna('') |
|
|
|
df = df.groupby(cols)['appno'].apply(';'.join).reset_index() |
|
for _, row in df.iterrows(): |
|
feature = id_,{ |
|
"id": id_, |
|
"webcast_id": row["webcast_id"], |
|
"hearing_title": row["hearing_title"], |
|
"hearing_date": row["hearing_date"], |
|
"hearing_type": row["hearing_type"], |
|
"application_number": row["appno"].split(';'), |
|
"case_id": row["case_id"], |
|
"case_name": row["case_name"], |
|
"case_url": row["case_url"], |
|
"ecli": row["ecli"], |
|
"type": row["type"], |
|
"document_date": row["document_date"], |
|
"importance": row["importance"], |
|
"articles": row["articles"].split(';'), |
|
"respondent_government": row["respondent"].split(';'), |
|
"issue": row["issue"], |
|
"strasbourg_caselaw": row["strasbourg_caselaw"], |
|
"external_sources": row["external_sources"], |
|
"conclusion": row["conclusion"], |
|
"separate_opinion": row["separate_opinion"] |
|
} |
|
yield feature |
|
id_ += 1 |