Datasets:
Tasks:
Text Classification
Modalities:
Text
Formats:
parquet
Size:
10K - 100K
Tags:
stance-detection
License:
Commit
•
5727a4c
1
Parent(s):
7dd6f2f
Delete loading script
Browse files- catalonia_independence.py +0 -120
catalonia_independence.py
DELETED
@@ -1,120 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
"""This dataset contains two corpora in Spanish and Catalan that consist of annotated Twitter messages for automatic stance detection."""
|
16 |
-
|
17 |
-
|
18 |
-
import csv
|
19 |
-
import os
|
20 |
-
|
21 |
-
import datasets
|
22 |
-
|
23 |
-
|
24 |
-
_CITATION = """\
|
25 |
-
@inproceedings{zotova-etal-2020-multilingual,
|
26 |
-
title = "Multilingual Stance Detection in Tweets: The {C}atalonia Independence Corpus",
|
27 |
-
author = "Zotova, Elena and
|
28 |
-
Agerri, Rodrigo and
|
29 |
-
Nunez, Manuel and
|
30 |
-
Rigau, German",
|
31 |
-
booktitle = "Proceedings of the 12th Language Resources and Evaluation Conference",
|
32 |
-
month = may,
|
33 |
-
year = "2020",
|
34 |
-
address = "Marseille, France",
|
35 |
-
publisher = "European Language Resources Association",
|
36 |
-
url = "https://www.aclweb.org/anthology/2020.lrec-1.171",
|
37 |
-
pages = "1368--1375",
|
38 |
-
abstract = "Stance detection aims to determine the attitude of a given text with respect to a specific topic or claim. While stance detection has been fairly well researched in the last years, most the work has been focused on English. This is mainly due to the relative lack of annotated data in other languages. The TW-10 referendum Dataset released at IberEval 2018 is a previous effort to provide multilingual stance-annotated data in Catalan and Spanish. Unfortunately, the TW-10 Catalan subset is extremely imbalanced. This paper addresses these issues by presenting a new multilingual dataset for stance detection in Twitter for the Catalan and Spanish languages, with the aim of facilitating research on stance detection in multilingual and cross-lingual settings. The dataset is annotated with stance towards one topic, namely, the ndependence of Catalonia. We also provide a semi-automatic method to annotate the dataset based on a categorization of Twitter users. We experiment on the new corpus with a number of supervised approaches, including linear classifiers and deep learning methods. Comparison of our new corpus with the with the TW-1O dataset shows both the benefits and potential of a well balanced corpus for multilingual and cross-lingual research on stance detection. Finally, we establish new state-of-the-art results on the TW-10 dataset, both for Catalan and Spanish.",
|
39 |
-
language = "English",
|
40 |
-
ISBN = "979-10-95546-34-4",
|
41 |
-
}
|
42 |
-
"""
|
43 |
-
|
44 |
-
_DESCRIPTION = """\
|
45 |
-
This dataset contains two corpora in Spanish and Catalan that consist of annotated Twitter messages for automatic stance detection. The data was collected over 12 days during February and March of 2019 from tweets posted in Barcelona, and during September of 2018 from tweets posted in the town of Terrassa, Catalonia.
|
46 |
-
|
47 |
-
Each corpus is annotated with three classes: AGAINST, FAVOR and NEUTRAL, which express the stance towards the target - independence of Catalonia.
|
48 |
-
"""
|
49 |
-
|
50 |
-
_HOMEPAGE = "https://github.com/ixa-ehu/catalonia-independence-corpus"
|
51 |
-
|
52 |
-
_LICENSE = "CC BY-NC-SA 4.0"
|
53 |
-
|
54 |
-
_URLs = {
|
55 |
-
"catalan": "https://github.com/ixa-ehu/catalonia-independence-corpus/raw/master/01_CIC_CA.zip",
|
56 |
-
"spanish": "https://github.com/ixa-ehu/catalonia-independence-corpus/raw/master/02_CIC_ES.zip",
|
57 |
-
}
|
58 |
-
|
59 |
-
|
60 |
-
class CataloniaIndependence(datasets.GeneratorBasedBuilder):
|
61 |
-
"""This dataset contains two corpora in Spanish and Catalan that consist of annotated Twitter messages for automatic stance detection."""
|
62 |
-
|
63 |
-
VERSION = datasets.Version("1.1.0")
|
64 |
-
|
65 |
-
BUILDER_CONFIGS = [
|
66 |
-
datasets.BuilderConfig(
|
67 |
-
name="catalan",
|
68 |
-
version=VERSION,
|
69 |
-
description="This part of the corpus contains annotated tweets posted in Catalan.",
|
70 |
-
),
|
71 |
-
datasets.BuilderConfig(
|
72 |
-
name="spanish",
|
73 |
-
version=VERSION,
|
74 |
-
description="This part of the corpus contains annotated tweets posted in Spanish.",
|
75 |
-
),
|
76 |
-
]
|
77 |
-
|
78 |
-
DEFAULT_CONFIG_NAME = "catalan"
|
79 |
-
|
80 |
-
def _info(self):
|
81 |
-
features = datasets.Features(
|
82 |
-
{
|
83 |
-
"id_str": datasets.Value("string"),
|
84 |
-
"TWEET": datasets.Value("string"),
|
85 |
-
"LABEL": datasets.ClassLabel(names=["AGAINST", "FAVOR", "NEUTRAL"]),
|
86 |
-
}
|
87 |
-
)
|
88 |
-
return datasets.DatasetInfo(
|
89 |
-
description=_DESCRIPTION,
|
90 |
-
features=features,
|
91 |
-
supervised_keys=None,
|
92 |
-
homepage=_HOMEPAGE,
|
93 |
-
license=_LICENSE,
|
94 |
-
citation=_CITATION,
|
95 |
-
)
|
96 |
-
|
97 |
-
def _split_generators(self, dl_manager):
|
98 |
-
data_dir = dl_manager.download_and_extract(_URLs[self.config.name])
|
99 |
-
return [
|
100 |
-
datasets.SplitGenerator(
|
101 |
-
name=datasets.Split.TRAIN,
|
102 |
-
gen_kwargs={"filepath": os.path.join(data_dir, f"{self.config.name}_train.csv")},
|
103 |
-
),
|
104 |
-
datasets.SplitGenerator(
|
105 |
-
name=datasets.Split.TEST,
|
106 |
-
gen_kwargs={"filepath": os.path.join(data_dir, f"{self.config.name}_test.csv")},
|
107 |
-
),
|
108 |
-
datasets.SplitGenerator(
|
109 |
-
name=datasets.Split.VALIDATION,
|
110 |
-
gen_kwargs={"filepath": os.path.join(data_dir, f"{self.config.name}_val.csv")},
|
111 |
-
),
|
112 |
-
]
|
113 |
-
|
114 |
-
def _generate_examples(self, filepath):
|
115 |
-
with open(filepath, encoding="utf-8") as csv_file:
|
116 |
-
csv_reader = csv.reader(csv_file, delimiter="\t")
|
117 |
-
# skip header
|
118 |
-
next(csv_reader)
|
119 |
-
for _id, row in enumerate(csv_reader):
|
120 |
-
yield _id, {"id_str": row[0], "TWEET": row[1], "LABEL": row[2]}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|