Datasets:

Languages:
Vietnamese
ArXiv:
License:
holylovenia commited on
Commit
a006cd3
1 Parent(s): 5ccb04f

Upload uit_vsmec.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. uit_vsmec.py +130 -0
uit_vsmec.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ from pathlib import Path
3
+ from typing import Dict, List, Tuple
4
+
5
+ import datasets
6
+ import pandas as pd
7
+
8
+ from seacrowd.utils import schemas
9
+ from seacrowd.utils.configs import SEACrowdConfig
10
+ from seacrowd.utils.constants import Licenses, Tasks
11
+
12
+ _CITATION = """\
13
+ @inproceedings{ho2020emotion,
14
+ title={Emotion recognition for vietnamese social media text},
15
+ author={Ho, Vong Anh and Nguyen, Duong Huynh-Cong and Nguyen, Danh Hoang and Pham, Linh Thi-Van and Nguyen, Duc-Vu and Nguyen, Kiet Van and Nguyen, Ngan Luu-Thuy},
16
+ booktitle={Computational Linguistics: 16th International Conference of the Pacific Association for Computational Linguistics, PACLING 2019, Hanoi, Vietnam, October 11--13, 2019, Revised Selected Papers 16},
17
+ pages={319--333},
18
+ year={2020},
19
+ organization={Springer}
20
+ }
21
+ """
22
+
23
+ _DATASETNAME = "uit_vsmec"
24
+
25
+ _DESCRIPTION = """\
26
+ This dataset consists of Vietnamese Facebook comments that were manually annotated for sentiment.
27
+ There are seven possible emotion labels: enjoyment, sadness, fear, anger, disgust, surprise or other (for comments with no or neutral emotions).
28
+ Two rounds of manual annotations were done to train annotators with tagging and editing guidelines.
29
+ Annotation was performed until inter-annotator agreement reached at least 80%.
30
+ """
31
+
32
+ _HOMEPAGE = "https://drive.google.com/drive/folders/1HooABJyrddVGzll7fgkJ6VzkG_XuWfRu"
33
+
34
+ _LICENSE = Licenses.UNKNOWN.value
35
+
36
+ _LANGUAGES = ["vie"]
37
+
38
+ _LOCAL = False
39
+
40
+ _URLS = {
41
+ "train": "https://docs.google.com/spreadsheets/export?id=10VYzfK7JLg-vfmqH0UmKX62z_uaXU-Hp&format=csv",
42
+ "valid": "https://docs.google.com/spreadsheets/export?id=1EsSFZ94fj2yTvFKO6EyxM0wBRcG0s1KE&format=csv",
43
+ "test": "https://docs.google.com/spreadsheets/export?id=1D16FCKKgJ0T6t2aSA3biWVwvD9fa4G9a&format=csv",
44
+ }
45
+
46
+ _SUPPORTED_TASKS = [Tasks.EMOTION_CLASSIFICATION]
47
+
48
+ _SOURCE_VERSION = "1.0.0"
49
+
50
+ _SEACROWD_VERSION = "2024.06.20"
51
+
52
+
53
+ class UITVSMECDataset(datasets.GeneratorBasedBuilder):
54
+ """
55
+ This is the main class of SEACrowd dataloader for UIT-VSMEC, focusing on emotion/sentiment classification task.
56
+ """
57
+
58
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
59
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
60
+
61
+ BUILDER_CONFIGS = [
62
+ SEACrowdConfig(
63
+ name=f"{_DATASETNAME}_source",
64
+ version=SOURCE_VERSION,
65
+ description=f"{_DATASETNAME} source schema",
66
+ schema="source",
67
+ subset_id=f"{_DATASETNAME}",
68
+ ),
69
+ SEACrowdConfig(
70
+ name=f"{_DATASETNAME}_seacrowd_text",
71
+ version=SEACROWD_VERSION,
72
+ description=f"{_DATASETNAME} SEACrowd schema",
73
+ schema="seacrowd_text",
74
+ subset_id=f"{_DATASETNAME}",
75
+ ),
76
+ ]
77
+ LABEL_NAMES = ["Other", "Disgust", "Enjoyment", "Anger", "Surprise", "Sadness", "Fear"]
78
+ DEFAULT_CONFIG_NAME = "uit_vsmec_source"
79
+
80
+ def _info(self) -> datasets.DatasetInfo:
81
+ if self.config.schema == "source":
82
+ features = datasets.Features({"Emotion": datasets.Value("string"), "Sentence": datasets.Value("string")})
83
+
84
+ elif self.config.schema == "seacrowd_text":
85
+ features = schemas.text_features(self.LABEL_NAMES)
86
+
87
+ return datasets.DatasetInfo(
88
+ description=_DESCRIPTION,
89
+ features=features,
90
+ homepage=_HOMEPAGE,
91
+ license=_LICENSE,
92
+ citation=_CITATION,
93
+ )
94
+
95
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
96
+ path_dict = dl_manager.download_and_extract(_URLS)
97
+ train_path, valid_path, test_path = path_dict["train"], path_dict["valid"], path_dict["test"]
98
+
99
+ return [
100
+ datasets.SplitGenerator(
101
+ name=datasets.Split.TRAIN,
102
+ gen_kwargs={
103
+ "filepath": train_path,
104
+ },
105
+ ),
106
+ datasets.SplitGenerator(
107
+ name=datasets.Split.TEST,
108
+ gen_kwargs={
109
+ "filepath": test_path,
110
+ },
111
+ ),
112
+ datasets.SplitGenerator(
113
+ name=datasets.Split.VALIDATION,
114
+ gen_kwargs={
115
+ "filepath": valid_path,
116
+ },
117
+ ),
118
+ ]
119
+
120
+ def _generate_examples(self, filepath: Path) -> Tuple[int, Dict]:
121
+ df = pd.read_csv(filepath).reset_index()
122
+ if self.config.schema == "source":
123
+ for row in df.itertuples():
124
+ ex = {"Emotion": row.Emotion, "Sentence": row.Sentence}
125
+ yield row.index, ex
126
+
127
+ elif self.config.schema == "seacrowd_text":
128
+ for row in df.itertuples():
129
+ ex = {"id": str(row.index), "text": row.Sentence, "label": self.LABEL_NAMES.index(row.Emotion)}
130
+ yield row.index, ex