yassine-thlija commited on
Commit
823603f
1 Parent(s): 957a82f

Init LaCourFinished

Browse files
.gitattributes CHANGED
@@ -53,3 +53,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
56
+ lacourxml.tar.gz filter=lfs diff=lfs merge=lfs -text
LaCourfinished.py ADDED
@@ -0,0 +1,223 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import xml.etree.ElementTree as ET
2
+ import datasets
3
+ import pandas as pd
4
+ from huggingface_hub import hf_hub_url
5
+
6
+ logger = datasets.logging.get_logger(__name__)
7
+
8
+ class LaCourConfig(datasets.BuilderConfig):
9
+ def __init__(self, **kwargs):
10
+ super(LaCourConfig, self).__init__(**kwargs)
11
+
12
+
13
+ class LaCourDataset(datasets.GeneratorBasedBuilder):
14
+ """
15
+ A class used to represent a Dataset.
16
+
17
+ ...
18
+
19
+ Attributes
20
+ ----------
21
+ VERSION : datasets.Version
22
+ a version number for the dataset
23
+
24
+ BUILDER_CONFIGS : list
25
+ a list of BuilderConfig instances
26
+
27
+ Methods
28
+ -------
29
+ _info():
30
+ Returns the dataset information.
31
+ _split_generators(download_manager: datasets.DownloadManager):
32
+ Returns SplitGenerators.
33
+ _generate_examples():
34
+ Yields examples.
35
+ """
36
+ # Version history
37
+ # 0.1.0 initial release
38
+ VERSION = datasets.Version("0.1.0")
39
+
40
+ BUILDER_CONFIGS = [
41
+ datasets.BuilderConfig(
42
+ name="transcripts",
43
+ version=VERSION,
44
+ description="transcript dataset based on xml files",
45
+ ),
46
+ datasets.BuilderConfig(
47
+ name="documents",
48
+ version=VERSION,
49
+ description="linked documents associated with the webcast"
50
+ )
51
+
52
+ ]
53
+
54
+ DEFAULT_CONFIG_NAME = "transcripts"
55
+
56
+ def _info(self):
57
+ """
58
+ Returns the dataset information.
59
+
60
+ ...
61
+
62
+ Returns
63
+ -------
64
+ datasets.DatasetInfo
65
+ a DatasetInfo instance containing information about the dataset
66
+ """
67
+ if self.config.name == "transcripts":
68
+ return datasets.DatasetInfo(
69
+ features=datasets.Features(
70
+ {
71
+ "id": datasets.Value("int32"),
72
+ "webcast_id": datasets.Value("string"),
73
+ "segment_id": datasets.features.Value("int32"),
74
+ "speaker_name": datasets.features.Value("string"),
75
+ "speaker_role": datasets.features.Value("string"),
76
+ "data": datasets.features.Sequence({
77
+ "begin": datasets.features.Value("float32"),
78
+ "end": datasets.features.Value("float32"),
79
+ "language": datasets.features.Value("string"),
80
+ "text": datasets.features.Value("string"),
81
+ })
82
+ }
83
+ ),
84
+ supervised_keys=None,
85
+ )
86
+ else:
87
+ return datasets.DatasetInfo(
88
+ features=datasets.Features(
89
+ {
90
+ "id": datasets.Value("int32"),
91
+ "webcast_id": datasets.Value("string"),
92
+ "hearing_title": datasets.Value("string"),
93
+ "hearing_date": datasets.Value("string"),
94
+ "hearing_type": datasets.Value("string"),
95
+ "application_number": datasets.features.Sequence(datasets.Value("string")),
96
+ "case_id": datasets.Value("string"),
97
+ "case_name": datasets.Value("string"),
98
+ "case_url": datasets.Value("string"),
99
+ "ecli": datasets.Value("string"),
100
+ "type": datasets.Value("string"),
101
+ "document_date": datasets.Value("string"),
102
+ "importance": datasets.Value("int32"),
103
+ "articles": datasets.features.Sequence(datasets.Value("string")),
104
+ "respondent_government": datasets.features.Sequence(datasets.Value("string")),
105
+ "issue": datasets.Value("string"),
106
+ "strasbourg_caselaw": datasets.Value("string"),
107
+ "external_sources": datasets.Value("string"),
108
+ "conclusion": datasets.Value("string"),
109
+ "separate_opinion": datasets.Value("bool")
110
+ }
111
+ ),
112
+ supervised_keys=None,
113
+ )
114
+
115
+
116
+ def _split_generators(self, dl_manager):
117
+ """
118
+ Returns SplitGenerators.
119
+
120
+ Parameters
121
+ ----------
122
+ download_manager : datasets.DownloadManager
123
+ a DownloadManager instance
124
+
125
+ Returns
126
+ -------
127
+ list
128
+ a list of SplitGenerator instances
129
+ """
130
+ base_url_xml = hf_hub_url("TrustHLT-ECALP/LaCourfinished]", filename="lacourxml.tar.gz", repo_type="dataset")
131
+ base_url_json = hf_hub_url("TrustHLT-ECALP/LaCourfinished]", filename="lacour_linked_documents.json", repo_type="dataset")
132
+
133
+ if self.config.name == "transcripts":
134
+ path = dl_manager.download(base_url_xml)
135
+ xmlpath = dl_manager.iter_archive(path)
136
+ return [
137
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": xmlpath}),
138
+ ]
139
+ else:
140
+ jsonpath = dl_manager.download(base_url_json)
141
+ return [
142
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": jsonpath}),
143
+ ]
144
+
145
+ def _generate_examples(self, filepaths):
146
+ """
147
+ This method reads the files in the provided transcripts, parses the data, and yields it in a structured format.
148
+
149
+ For the configuration "xml", it reads XML files and extracts speaker segments and associated metadata.
150
+
151
+ Parameters
152
+ ----------
153
+ filepaths : list
154
+ A list of filepaths to the data files.
155
+
156
+ Yields
157
+ ------
158
+ tuple
159
+ A tuple containing an ID and a dictionary with the data. The dictionary keys include 'id' and 'data'.
160
+ 'data' is a list of lists, where each inner list contains a key and a value extracted from the data file.
161
+ """
162
+ if self.config.name == "transcripts":
163
+ id_ = 0
164
+ for fpath, file in filepaths:
165
+ logger.info("generating examples from = %s", fpath)
166
+ tree = ET.parse(file)
167
+ root = tree.getroot()
168
+ segment_id = 0
169
+ for speakerSegment in root.findall('SpeakerSegment'):
170
+ text_segments = []
171
+ for segment in speakerSegment.findall('Segment'):
172
+ meta_data = segment.find('meta_data')
173
+ text_segments.append({
174
+ "begin": meta_data.findtext('TimestampBegin', ''),
175
+ "end": meta_data.findtext('TimestampEnd', ''),
176
+ "language": meta_data.findtext('Language', ''),
177
+ "text": segment.findtext('text', '').strip(),
178
+ })
179
+ feature = id_, {
180
+ "id": id_,
181
+ "webcast_id": fpath.split('_')[1] + "_" + fpath.split('_')[2].split('.')[0],
182
+ "segment_id": segment_id,
183
+ "speaker_role": meta_data.findtext('Role', ''),
184
+ "speaker_name": meta_data.findtext('Name', ''),
185
+ "data": text_segments
186
+ }
187
+ yield feature
188
+ id_ += 1
189
+ segment_id += 1
190
+
191
+ elif self.config.name == "documents":
192
+ id_ = 0
193
+ df = pd.read_json(filepaths, orient="index", dtype={"webcast_id": str})
194
+ logger.info("generating examples from = %s", filepaths)
195
+ cols = df.columns.tolist()
196
+ cols.remove('appno')
197
+ # group appnos to avoid duplicates
198
+ df = df.groupby(cols)['appno'].apply(';'.join).reset_index()
199
+ for _, row in df.iterrows():
200
+ feature = id_,{
201
+ "id": id_,
202
+ "webcast_id": row["webcast_id"],
203
+ "hearing_title": row["hearing_title"],
204
+ "hearing_date": row["hearing_date"],
205
+ "hearing_type": row["hearing_type"],
206
+ "application_number": row["appno"].split(';'),
207
+ "case_id": row["case_id"],
208
+ "case_name": row["case_name"],
209
+ "case_url": row["case_url"],
210
+ "ecli": row["ecli"],
211
+ "type": row["type"],
212
+ "document_date": row["document_date"],
213
+ "importance": row["importance"],
214
+ "articles": row["articles"].split(';'),
215
+ "respondent_government": row["respondent"].split(';'),
216
+ "issue": row["issue"],
217
+ "strasbourg_caselaw": row["strasbourg_caselaw"],
218
+ "external_sources": row["external_sources"],
219
+ "conclusion": row["conclusion"],
220
+ "separate_opinion": row["separate_opinion"]
221
+ }
222
+ yield feature
223
+ id_ += 1
README.md ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ - fr
5
+ - ru
6
+ - es
7
+ - hr
8
+ - it
9
+ - pt
10
+ - tr
11
+ - pl
12
+ - lt
13
+ - de
14
+ - uk
15
+ - hu
16
+ - nl
17
+ - sq
18
+ - ro
19
+ - sr
20
+ pretty_name: LaCour!
21
+ language_creators:
22
+ - found
23
+ - machine-generated
24
+ license:
25
+ - cc-by-sa-4.0
26
+ multilinguality:
27
+ - multilingual
28
+ size_categories:
29
+ - 1K<n<10K
30
+ - n<1K
31
+ tags:
32
+ - legal
33
+ - hearing
34
+ - oral argument
35
+ - transcript
36
+ - echr
37
+ - dialog
38
+ ---
39
+
40
+ ## Dataset Description
41
+
42
+ - **Homepage: https://trusthlt.org/lacour**
43
+ - **Repository: https://github.com/trusthlt/lacour-corpus**
44
+ - **Paper: tbd**
45
+
46
+ ### Dataset Summary
47
+
48
+ This dataset contains transcribed court hearings sourced from official hearings of the __European Court of Human Rights__ ([https://www.echr.coe.int/webcasts-of-hearings](https://www.echr.coe.int/webcasts-of-hearings)). The hearings are 154 selected webcasts (videos) from 2012-2022 in their original language (no interpretation). With manual annotation for language labels and automatic processing of the extracted audio with [pyannote](https://huggingface.co/pyannote/speaker-diarization) and [whisper-large-v2](https://huggingface.co/openai/whisper-large-v2), the resulting dataset contains 4000 speaker turns and 88920 individual lines. The dataset contains two subsets, the transcripts and the metadata with linked documents. The transcripts are additionally available as .txt or .xml.
49
+
50
+ ### Languages
51
+
52
+ The largest amounts in the transcripts are:
53
+
54
+ English, French
55
+
56
+ A smaller portion also contains the following languages:
57
+
58
+ Russian, Spanish, Croatian, Italian, Portuguese, Turkish, Polish, Lithuanian, German, Ukrainian, Hungarian, Dutch, Albanian, Romanian, Serbian
59
+
60
+ The collected metadata is:
61
+
62
+ English
63
+
64
+ ## Dataset Structure
65
+
66
+ ### Data Instances
67
+
68
+ Each instance in transcripts represents an entire segment of a transcript, similar to a conversation turn in a dialog.
69
+
70
+ ```
71
+ {
72
+ 'id': 0,
73
+ 'webcast_id': '1021112_29112017',
74
+ 'segment_id': 0,
75
+ 'speaker_name': 'UNK',
76
+ 'speaker_role': 'Announcer',
77
+ 'data': {
78
+ 'begin': [12.479999542236328],
79
+ 'end': [13.359999656677246],
80
+ 'language': ['fr'],
81
+ 'text': ['La Cour!']
82
+ }
83
+ }
84
+ ```
85
+
86
+ Each instance in documents represents a information on a document in hudoc associated with a hearing and the metadata associated with a hearing. The actual document is linked and can also be found in [hudoc](https://hudoc.echr.coe.int) with the case_id. Note: `hearing_type` states the type of the hearing, `type` states the type of the document. If the hearing is a "Grand Chamber hearing", the "CHAMBER" document refers to a different hearing.
87
+
88
+ ```
89
+ {
90
+ 'id': 16,
91
+ 'webcast_id': '1232311_02102012',
92
+ 'hearing_title': 'Michaud v. France (nos. 12323/11)',
93
+ 'hearing_date': '2012-10-02 00:00:00',
94
+ 'hearing_type': 'Chamber hearing',
95
+ 'application_number': ['12323/11'],
96
+ 'case_id': '001-115377',
97
+ 'case_name': 'CASE OF MICHAUD v. FRANCE',
98
+ 'case_url': 'https://hudoc.echr.coe.int/eng?i=001-115377',
99
+ 'ecli': 'ECLI:CE:ECHR:2012:1206JUD001232311',
100
+ 'type': 'CHAMBER',
101
+ 'document_date': '2012-12-06 00:00:00',
102
+ 'importance': 1,
103
+ 'articles': ['8', '8-1', '8-2', '34', '35'],
104
+ 'respondent_government': ['FRA'],
105
+ 'issue': 'Decision of the National Bar Council of 12 July 2007 “adopting regulations on internal procedures for implementing the obligation to combat money laundering and terrorist financing, and an internal supervisory mechanism to guarantee compliance with those procedures” ; Article 21-1 of the Law of 31 December 1971 ; Law no. 2004-130 of 11 February 2004 ; Monetary and Financial Code',
106
+ 'strasbourg_caselaw': 'André and Other v. France, no 18603/03, 24 July 2008;Bosphorus Hava Yollari Turizm ve Ticaret Anonim Sirketi v. Ireland [GC], no 45036/98, ECHR 2005-VI;[...]',
107
+ 'external_sources': 'Directive 91/308/EEC, 10 June 1991;Article 6 of the Treaty on European Union;Charter of Fundamental Rights of the European Union;Articles 169, 170, 173, 175, 177, 184 and 189 of the Treaty establishing the European Community;Recommendations 12 and 16 of the financial action task force (“FATF”) on money laundering;Council of Europe Convention on Laundering, Search, Seizure and Confiscation of the Proceeds from Crime and on the Financing of Terrorism (16 May 2005)',
108
+ 'conclusion': 'Remainder inadmissible;No violation of Article 8 - Right to respect for private and family life (Article 8-1 - Respect for correspondence;Respect for private life)',
109
+ 'separate_opinion': True
110
+ }
111
+
112
+
113
+ ```
114
+
115
+ ### Data Fields
116
+
117
+ transcripts:
118
+
119
+ * id: the identifier
120
+ * webcast_id: the identifier for the hearing
121
+ * segment_id: the identifier of the current speaker segment in the current hearing
122
+ * speaker_name: the name of the speaker (not given for Applicant, Government or Third Party)
123
+ * speaker_role: the role/party the speaker represents (`Announcer` for announcements, `Judge` for judges, `JudgeP` for judge president, `Applicant` for representatives of the applicant, `Government` for representatives of the respondent government, `ThirdParty` for representatives of third party interveners)
124
+ * data: sequence of the following fields
125
+ * begin: the timestamp for begin of line (in seconds)
126
+ * end: the timestamp for end of line (in seconds)
127
+ * language: the language spoken (in ISO 639-1)
128
+ * text: the spoken line
129
+
130
+ documents:
131
+
132
+ * id: the identifier
133
+ * webcast_id: the identifier for the hearing (allows linking to transcripts)
134
+ * hearing_title: the title of the hearing
135
+ * hearing_date: the date of the hearing
136
+ * hearing_type: the type of hearing (Grand Chamber, Chamber or Grand Chamber Judgment Hearing)
137
+ * application_number: the application numbers which are associated with the hearing and case
138
+ * case_id: the id of the case
139
+ * case_name: the name of the case
140
+ * case_url: the direct link to the document
141
+ * ecli: the ECLI (European Case Law Identifier)
142
+ * type: the type of the document
143
+ * document_date: the date of the document
144
+ * importance: the importance score of the case (1 is the highest importance, key case)
145
+ * articles: the concerning articles of the Convention of Human Rights
146
+ * respondent_government: the code of the respondent government(s) (in ISO-3166 Alpha-3)
147
+ * issue: the references to the issue of the case
148
+ * strasbourg_caselaw: the list of cases in the ECHR which are relevant to the current case
149
+ * external_sources: the relevant references outside of the ECHR
150
+ * conclusion: the short textual description of the conclusion
151
+ * separate_opinion: the indicator if there is a separate opinion
152
+
153
+ ### Data Splits
154
+
155
+ The dataset is only split into a train set.
156
+
157
+
158
+ ## Dataset Creation
159
+
160
+ ### Curation Rationale
161
+
162
+ This datasets provides partly corrected transcribed webcasts to enable the processing of hearings in legal NLP. No specific task is given.
163
+
164
+ ### Source Data
165
+
166
+ #### Data Collection
167
+
168
+ The data was collected by transcribing the publicly available [webcasts of the ECHR](https://www.echr.coe.int/webcasts-of-hearings) with the help of [pyannote](https://huggingface.co/pyannote/speaker-diarization) and [whisper-large-v2](https://huggingface.co/openai/whisper-large-v2). The documents were sourced from the [ECHR hudoc database](https://hudoc.echr.coe.int).
169
+
170
+ #### Who are the source producers?
171
+
172
+ Participants in hearings before the ECHR for the audio and video material. Employees and judges of the ECHR for the documents.
173
+
174
+ ### Annotations
175
+
176
+ #### Annotation process
177
+
178
+ **language identification** Spoken languages were manually identified by research assistants. Disagreements were discussed to achieve the final language label.
179
+
180
+ **transcript correction** All parts spoken by Judge or Judge President are corrected for the languages English and French by research assistants with a high proficiency in the respective language.
181
+
182
+ #### Personal and Sensitive Information
183
+
184
+ The dataset contains names of judges and other participants in the hearings. Due to those names being available in the public court material, we did not remove them. The machine-generated transcripts may also contain names, which were neither checked nor removed. In case of sensitive information, we rely on the provided material to provide protection (occasionally bleeping out names which should not have been mentioned in webcasts, appropriate anonymization in the documents).
185
+
186
+
187
+ ## Additional Information
188
+
189
+ Download the transcripts and linked documents:
190
+
191
+ ```
192
+ from datasets import load_dataset
193
+
194
+ lacour_transcripts = load_dataset("TrustHLT/LaCour", "transcripts") # default config
195
+ lacour_documents = load_dataset("TrustHLT/LaCour", "documents")
196
+
197
+ ```
198
+
199
+ Formatted versions of the transcripts in .txt and .xml and more information on the collection and creation can be found on [github](https://github.com/trusthlt/lacour-corpus).
200
+
201
+
202
+ ### Citation Information
203
+
204
+ Please cite this data using:
205
+
206
+ ```
207
+ [tbd]
208
+ ```
lacour_linked_documents.json ADDED
The diff for this file is too large to render. See raw diff
 
lacourxml.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d4b37dadbb952c48ff4bcc023ec585d7fb7d057e3d63f688a9b596f347a5a64
3
+ size 5475134