holylovenia commited on
Commit
596f5f8
1 Parent(s): 97593c0

Upload toxicity_200.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. toxicity_200.py +140 -0
toxicity_200.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import os
17
+ from pathlib import Path
18
+ from typing import Dict, List, Tuple
19
+ from zipfile import ZipFile
20
+
21
+ import datasets
22
+
23
+ from seacrowd.utils.configs import SEACrowdConfig
24
+ from seacrowd.utils.constants import DEFAULT_SEACROWD_VIEW_NAME, DEFAULT_SOURCE_VIEW_NAME
25
+
26
+ _SOURCE_VIEW_NAME = DEFAULT_SOURCE_VIEW_NAME
27
+ _UNIFIED_VIEW_NAME = DEFAULT_SEACROWD_VIEW_NAME
28
+ _LOCAL = False
29
+
30
+ _CITATION = """\
31
+ @article{nllb2022,
32
+ author = {NLLB Team, Marta R. Costa-jussà, James Cross, Onur Çelebi, Maha Elbayad, Kenneth Heafield, Kevin Heffernan, Elahe Kalbassi, Janice Lam, Daniel Licht, Jean Maillard, Anna Sun, Skyler Wang, Guillaume Wenzek, Al Youngblood, Bapi Akula, Loic Barrault, Gabriel Mejia Gonzalez, Prangthip Hansanti, John Hoffman, Semarley Jarrett, Kaushik Ram Sadagopan, Dirk Rowe, Shannon Spruit, Chau Tran, Pierre Andrews, Necip Fazil Ayan, Shruti Bhosale, Sergey Edunov, Angela Fan, Cynthia Gao, Vedanuj Goswami, Francisco Guzmán, Philipp Koehn, Alexandre Mourachko, Christophe Ropers, Safiyyah Saleem, Holger Schwenk, Jeff Wang},
33
+ title = {No Language Left Behind: Scaling Human-Centered Machine Translation},
34
+ year = {2022}
35
+ }
36
+ """
37
+
38
+ _DATASETNAME = "toxicity_200"
39
+
40
+ _DESCRIPTION = """\
41
+ Toxicity-200 is a wordlist to detect toxicity in 200 languages. It contains files that include frequent words and phrases generally considered toxic because they represent: 1) frequently used profanities; 2) frequently used insults and hate speech terms, or language used to bully, denigrate, or demean; 3) pornographic terms; and 4) terms for body parts associated with sexual activity.
42
+ """
43
+
44
+ _HOMEPAGE = "https://github.com/facebookresearch/flores/blob/main/toxicity"
45
+
46
+ _LICENSE = "CC-BY-SA 4.0"
47
+ _LANGUAGES = ["ind", "ace", "bjn", "bug", "jav"]
48
+ _LANGUAGE_MAP = {"ind": "Indonesia", "ace": "Aceh", "bjn": "Banjar", "bug": "Bugis", "jav": "Java"}
49
+ _URLS = {
50
+ "toxicity_200": "https://tinyurl.com/NLLB200TWL",
51
+ }
52
+ _PASS = "tL4nLLb"
53
+ _SUPPORTED_TASKS = [] # [Tasks.SELF_SUPERVISED_PRETRAINING] # example: [Tasks.TRANSLATION, Tasks.NAMED_ENTITY_RECOGNITION, Tasks.RELATION_EXTRACTION]
54
+
55
+ _SOURCE_VERSION = "1.0.0"
56
+
57
+ _SEACROWD_VERSION = "2024.06.20"
58
+
59
+
60
+ def seacrowd_config_constructor(lang, schema, version):
61
+ if lang == "":
62
+ raise ValueError(f"Invalid lang {lang}")
63
+
64
+ if schema != "source":
65
+ raise ValueError(f"Invalid schema: {schema}")
66
+
67
+ return SEACrowdConfig(
68
+ name="toxicity_200_{lang}_{schema}".format(lang=lang, schema=schema),
69
+ version=datasets.Version(version),
70
+ description="toxicity 200 with {schema} schema for {lang} language".format(lang=_LANGUAGE_MAP[lang], schema=schema),
71
+ schema=schema,
72
+ subset_id="toxicity_200",
73
+ )
74
+
75
+ def extract_toxic_zip(filepath):
76
+ with ZipFile(filepath, "r") as zip:
77
+ zip.extractall(path=filepath[:-4], pwd=_PASS.encode("utf-8"))
78
+ return filepath[:-4]
79
+
80
+ class Toxicity200(datasets.GeneratorBasedBuilder):
81
+ """TODO: Short description of my dataset."""
82
+
83
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
84
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
85
+
86
+ BUILDER_CONFIGS = [seacrowd_config_constructor(lang, "source", _SOURCE_VERSION) for lang in _LANGUAGES]
87
+
88
+ DEFAULT_CONFIG_NAME = "toxicity_200_ind_source"
89
+
90
+ def _info(self) -> datasets.DatasetInfo:
91
+ if self.config.schema == "source":
92
+ features = datasets.Features({"id": datasets.Value("string"), "toxic_word": [datasets.Value("string")]})
93
+ else:
94
+ raise NotImplementedError()
95
+
96
+ return datasets.DatasetInfo(
97
+ description=_DESCRIPTION,
98
+ features=features,
99
+ homepage=_HOMEPAGE,
100
+ license=_LICENSE,
101
+ citation=_CITATION,
102
+ )
103
+
104
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
105
+ """Returns SplitGenerators."""
106
+ urls = _URLS[_DATASETNAME]
107
+ data_dir = Path(dl_manager.download_and_extract(urls)) / "NLLB-200_TWL"
108
+
109
+ data_subdir = {
110
+ "ind": os.path.join(data_dir, "ind_Latn_twl.zip"),
111
+ "ace": os.path.join(data_dir, "ace_Latn_twl.zip"),
112
+ "bjn": os.path.join(data_dir, "bjn_Latn_twl.zip"),
113
+ "bug": os.path.join(data_dir, "bug_Latn_twl.zip"),
114
+ "jav": os.path.join(data_dir, "jav_Latn_twl.zip"),
115
+ }
116
+
117
+ lang = self.config.name.split("_")[2]
118
+ text_dir = extract_toxic_zip(data_subdir[lang])
119
+
120
+ return [
121
+ datasets.SplitGenerator(
122
+ name=datasets.Split.TRAIN,
123
+ gen_kwargs={
124
+ "filepath": {"text_file": os.path.join(text_dir, lang + "_Latn_twl.txt")},
125
+ "split": "train",
126
+ },
127
+ )
128
+ ]
129
+
130
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
131
+ """Yields examples as (key, example) tuples."""
132
+ text = open(filepath["text_file"], "r").readlines()
133
+ word_list = list(map(str.strip, text))
134
+ print(text[:5])
135
+ if self.config.schema == "source":
136
+ for id, word in enumerate(word_list):
137
+ row = {"id": str(id), "toxic_word": [word]}
138
+ yield id, row
139
+ else:
140
+ raise ValueError(f"Invalid config: {self.config.name}")