holylovenia
commited on
Commit
•
33231c1
1
Parent(s):
e21af22
Upload paracotta_id.py with huggingface_hub
Browse files- paracotta_id.py +26 -14
paracotta_id.py
CHANGED
@@ -4,12 +4,17 @@ from typing import Dict, List, Tuple
|
|
4 |
|
5 |
import datasets
|
6 |
|
7 |
-
from
|
8 |
-
from
|
9 |
-
from
|
10 |
import jsonlines
|
11 |
from nltk.tokenize.treebank import TreebankWordDetokenizer
|
12 |
|
|
|
|
|
|
|
|
|
|
|
13 |
|
14 |
_CITATION = """\
|
15 |
@article{aji2022paracotta,
|
@@ -41,7 +46,7 @@ _SUPPORTED_TASKS = [Tasks.PARAPHRASING]
|
|
41 |
|
42 |
# Dataset does not have versioning
|
43 |
_SOURCE_VERSION = "1.0.0"
|
44 |
-
|
45 |
|
46 |
|
47 |
class ParaCotta(datasets.GeneratorBasedBuilder):
|
@@ -49,21 +54,21 @@ class ParaCotta(datasets.GeneratorBasedBuilder):
|
|
49 |
"""
|
50 |
|
51 |
SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
|
52 |
-
|
53 |
|
54 |
BUILDER_CONFIGS = [
|
55 |
-
|
56 |
name="paracotta_id_source",
|
57 |
version=SOURCE_VERSION,
|
58 |
description="paracotta_id source schema",
|
59 |
schema="source",
|
60 |
subset_id="paracotta_id",
|
61 |
),
|
62 |
-
|
63 |
-
name="
|
64 |
-
version=
|
65 |
description="paracotta_id Nusantara schema",
|
66 |
-
schema="
|
67 |
subset_id="paracotta_id",
|
68 |
),
|
69 |
]
|
@@ -79,7 +84,7 @@ class ParaCotta(datasets.GeneratorBasedBuilder):
|
|
79 |
"tgt": datasets.Value("string"),
|
80 |
}
|
81 |
)
|
82 |
-
elif self.config.schema == "
|
83 |
features = schemas.text2text_features
|
84 |
|
85 |
return datasets.DatasetInfo(
|
@@ -93,13 +98,20 @@ class ParaCotta(datasets.GeneratorBasedBuilder):
|
|
93 |
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
|
94 |
"""Returns SplitGenerators."""
|
95 |
urls = _URLS[_DATASETNAME]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
96 |
|
97 |
-
data_dir = Path(dl_manager.download(urls))
|
98 |
return [
|
99 |
datasets.SplitGenerator(
|
100 |
name=datasets.Split.TRAIN,
|
101 |
gen_kwargs={
|
102 |
-
"filepath":
|
103 |
"split": "test",
|
104 |
},
|
105 |
),
|
@@ -120,7 +132,7 @@ class ParaCotta(datasets.GeneratorBasedBuilder):
|
|
120 |
id += 1
|
121 |
yield id, ex
|
122 |
|
123 |
-
elif self.config.schema == "
|
124 |
with open(filepath, 'r') as f:
|
125 |
data = f.readlines()
|
126 |
id = 0
|
|
|
4 |
|
5 |
import datasets
|
6 |
|
7 |
+
from seacrowd.utils.configs import SEACrowdConfig
|
8 |
+
from seacrowd.utils.constants import Tasks
|
9 |
+
from seacrowd.utils import schemas
|
10 |
import jsonlines
|
11 |
from nltk.tokenize.treebank import TreebankWordDetokenizer
|
12 |
|
13 |
+
try:
|
14 |
+
import gdown
|
15 |
+
except:
|
16 |
+
print("Please install `gdown` to proceed.")
|
17 |
+
|
18 |
|
19 |
_CITATION = """\
|
20 |
@article{aji2022paracotta,
|
|
|
46 |
|
47 |
# Dataset does not have versioning
|
48 |
_SOURCE_VERSION = "1.0.0"
|
49 |
+
_SEACROWD_VERSION = "2024.06.20"
|
50 |
|
51 |
|
52 |
class ParaCotta(datasets.GeneratorBasedBuilder):
|
|
|
54 |
"""
|
55 |
|
56 |
SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
|
57 |
+
SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
|
58 |
|
59 |
BUILDER_CONFIGS = [
|
60 |
+
SEACrowdConfig(
|
61 |
name="paracotta_id_source",
|
62 |
version=SOURCE_VERSION,
|
63 |
description="paracotta_id source schema",
|
64 |
schema="source",
|
65 |
subset_id="paracotta_id",
|
66 |
),
|
67 |
+
SEACrowdConfig(
|
68 |
+
name="paracotta_id_seacrowd_t2t",
|
69 |
+
version=SEACROWD_VERSION,
|
70 |
description="paracotta_id Nusantara schema",
|
71 |
+
schema="seacrowd_t2t",
|
72 |
subset_id="paracotta_id",
|
73 |
),
|
74 |
]
|
|
|
84 |
"tgt": datasets.Value("string"),
|
85 |
}
|
86 |
)
|
87 |
+
elif self.config.schema == "seacrowd_t2t":
|
88 |
features = schemas.text2text_features
|
89 |
|
90 |
return datasets.DatasetInfo(
|
|
|
98 |
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
|
99 |
"""Returns SplitGenerators."""
|
100 |
urls = _URLS[_DATASETNAME]
|
101 |
+
# download data from gdrive
|
102 |
+
output_dir = Path.cwd() / "data" / _DATASETNAME
|
103 |
+
output_dir.mkdir(parents=True, exist_ok=True)
|
104 |
+
output_file = output_dir / f"{_DATASETNAME}.tsv"
|
105 |
+
if not output_file.exists():
|
106 |
+
gdown.download(urls, str(output_file), fuzzy=True)
|
107 |
+
else:
|
108 |
+
print(f"File already downloaded: {str(output_file)}")
|
109 |
|
|
|
110 |
return [
|
111 |
datasets.SplitGenerator(
|
112 |
name=datasets.Split.TRAIN,
|
113 |
gen_kwargs={
|
114 |
+
"filepath": output_file,
|
115 |
"split": "test",
|
116 |
},
|
117 |
),
|
|
|
132 |
id += 1
|
133 |
yield id, ex
|
134 |
|
135 |
+
elif self.config.schema == "seacrowd_t2t":
|
136 |
with open(filepath, 'r') as f:
|
137 |
data = f.readlines()
|
138 |
id = 0
|