jcblaise commited on
Commit
d9f1fca
1 Parent(s): 5a05e8c

Create hatespeech_filipino.py

Browse files
Files changed (1) hide show
  1. hatespeech_filipino.py +111 -0
hatespeech_filipino.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Hate Speech Text Classification Dataset in Filipino."""
16
+
17
+ import csv
18
+ import os
19
+
20
+ import datasets
21
+ from datasets.tasks import TextClassification
22
+
23
+
24
+ _DESCRIPTION = """\
25
+ Contains 10k tweets (training set) that are labeled as hate speech or non-hate speech. Released with 4,232 validation and 4,232 testing samples. Collected during the 2016 Philippine Presidential Elections.
26
+ """
27
+
28
+ _CITATION = """\
29
+ @article{Cabasag-2019-hate-speech,
30
+ title={Hate speech in Philippine election-related tweets: Automatic detection and classification using natural language processing.},
31
+ author={Neil Vicente Cabasag, Vicente Raphael Chan, Sean Christian Lim, Mark Edward Gonzales, and Charibeth Cheng},
32
+ journal={Philippine Computing Journal},
33
+ volume={XIV},
34
+ number={1},
35
+ month={August},
36
+ year={2019}
37
+ }
38
+ """
39
+
40
+ _HOMEPAGE = "https://github.com/jcblaisecruz02/Filipino-Text-Benchmarks"
41
+
42
+ # TODO: Add the licence for the dataset here if you can find it
43
+ _LICENSE = ""
44
+
45
+ _URL = "https://huggingface.co/datasets/jcblaise/hatespeech_filipino/resolve/main/hatespeech_raw.zip"
46
+
47
+
48
+ class HateSpeechFilipino(datasets.GeneratorBasedBuilder):
49
+ """Hate Speech Text Classification Dataset in Filipino."""
50
+
51
+ VERSION = datasets.Version("1.0.0")
52
+
53
+ def _info(self):
54
+ # Labels: 0="Non-hate Speech", 1="Hate Speech"
55
+ features = datasets.Features(
56
+ {"text": datasets.Value("string"), "label": datasets.features.ClassLabel(names=["0", "1"])}
57
+ )
58
+ return datasets.DatasetInfo(
59
+ description=_DESCRIPTION,
60
+ features=features,
61
+ supervised_keys=None,
62
+ homepage=_HOMEPAGE,
63
+ license=_LICENSE,
64
+ citation=_CITATION,
65
+ task_templates=[TextClassification(text_column="text", label_column="label")],
66
+ )
67
+
68
+ def _split_generators(self, dl_manager):
69
+ """Returns SplitGenerators."""
70
+ data_dir = dl_manager.download_and_extract(_URL)
71
+ train_path = os.path.join(data_dir, "hatespeech", "train.csv")
72
+ test_path = os.path.join(data_dir, "hatespeech", "train.csv")
73
+ validation_path = os.path.join(data_dir, "hatespeech", "valid.csv")
74
+
75
+ return [
76
+ datasets.SplitGenerator(
77
+ name=datasets.Split.TRAIN,
78
+ gen_kwargs={
79
+ "filepath": train_path,
80
+ "split": "train",
81
+ },
82
+ ),
83
+ datasets.SplitGenerator(
84
+ name=datasets.Split.TEST,
85
+ gen_kwargs={
86
+ "filepath": test_path,
87
+ "split": "test",
88
+ },
89
+ ),
90
+ datasets.SplitGenerator(
91
+ name=datasets.Split.VALIDATION,
92
+ gen_kwargs={
93
+ "filepath": validation_path,
94
+ "split": "dev",
95
+ },
96
+ ),
97
+ ]
98
+
99
+ def _generate_examples(self, filepath, split):
100
+ """Yields examples."""
101
+ with open(filepath, encoding="utf-8") as csv_file:
102
+ csv_reader = csv.reader(
103
+ csv_file, quotechar='"', delimiter=",", quoting=csv.QUOTE_ALL, skipinitialspace=True
104
+ )
105
+ next(csv_reader)
106
+ for id_, row in enumerate(csv_reader):
107
+ try:
108
+ text, label = row
109
+ yield id_, {"text": text, "label": label}
110
+ except ValueError:
111
+ pass