File size: 4,572 Bytes
918b382 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 |
import os
from pathlib import Path
from typing import Dict, List, Tuple
import datasets
import pandas as pd
from seacrowd.utils import schemas
from seacrowd.utils.configs import SEACrowdConfig
from seacrowd.utils.constants import DEFAULT_SEACROWD_VIEW_NAME, DEFAULT_SOURCE_VIEW_NAME, Tasks
_DATASETNAME = "su_emot"
_SOURCE_VIEW_NAME = DEFAULT_SOURCE_VIEW_NAME
_UNIFIED_VIEW_NAME = DEFAULT_SEACROWD_VIEW_NAME
_LANGUAGES = ["sun"]
_LOCAL = False
_CITATION = """\
@INPROCEEDINGS{
9297929,
author={Putra, Oddy Virgantara and Wasmanson, Fathin Muhammad and Harmini, Triana and Utama, Shoffin Nahwa},
booktitle={2020 International Conference on Computer Engineering, Network, and Intelligent Multimedia (CENIM)},
title={Sundanese Twitter Dataset for Emotion Classification},
year={2020},
volume={},
number={},
pages={391--395},
doi={10.1109/CENIM51130.2020.9297929}
}
"""
_DESCRIPTION = """\
This is a dataset for emotion classification of Sundanese text. The dataset is gathered from Twitter API between January and March 2019 with 2518 tweets in total.
The tweets filtered by using some hashtags which are represented Sundanese emotion, for instance, #persib, #corona, #saredih, #nyakakak, #garoblog, #sangsara, #gumujeng, #bungah, #sararieun, #ceurik, and #hariwang.
This dataset contains four distinctive emotions: anger, joy, fear, and sadness. Each tweet is annotated using related emotion. For data
validation, the authors consulted a Sundanese language teacher for expert validation.
"""
_HOMEPAGE = "https://github.com/virgantara/sundanese-twitter-dataset"
_LICENSE = "UNKNOWN"
_URLS = {
"datasets": "https://raw.githubusercontent.com/virgantara/sundanese-twitter-dataset/master/newdataset.csv"
}
_SUPPORTED_TASKS = [Tasks.EMOTION_CLASSIFICATION]
_SOURCE_VERSION = "1.0.0"
_SEACROWD_VERSION = "2024.06.20"
class SuEmot(datasets.GeneratorBasedBuilder):
"""This is a dataset for emotion classification of Sundanese text. The dataset is gathered from Twitter API between January and March 2019 with 2518 tweets in total."""
SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
BUILDER_CONFIGS = [
SEACrowdConfig(
name="su_emot_source",
version=SOURCE_VERSION,
description="Sundanese Twitter Dataset for Emotion source schema",
schema="source",
subset_id="su_emot",
),
SEACrowdConfig(
name="su_emot_seacrowd_text",
version=SEACROWD_VERSION,
description="Sundanese Twitter Dataset for Emotion Nusantara schema",
schema="seacrowd_text",
subset_id="su_emot",
),
]
DEFAULT_CONFIG_NAME = "su_emot_source"
def _info(self) -> datasets.DatasetInfo:
if self.config.schema == "source":
features = datasets.Features({
"index": datasets.Value("string"),
"data": datasets.Value("string"),
"label": datasets.Value("string")})
# For example seacrowd_kb, seacrowd_t2t
elif self.config.schema == "seacrowd_text":
features = schemas.text_features(["anger", "joy", "fear", "sadness"])
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
urls = _URLS
data_dir = Path(dl_manager.download_and_extract(urls['datasets']))
data_files = {"train":data_dir}
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": data_files['train'],
"split": "train",
},
)
]
def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
df = pd.read_csv(filepath, sep=",", header="infer").reset_index()
df.columns = ["index","label", "data"]
if self.config.schema == "source":
for row in df.itertuples():
ex = {"index": str(row.index+1), "data": row.data, "label": row.label}
yield row.index, ex
elif self.config.schema == "seacrowd_text":
for row in df.itertuples():
ex = {"id": str(row.index+1), "text": row.data, "label": row.label}
yield row.index, ex
|