Datasets:
Tasks:
Question Answering
Modalities:
Text
Formats:
parquet
Languages:
Icelandic
Size:
10K - 100K
License:
Upload make_hf_dataset.py
Browse files- make_hf_dataset.py +65 -0
make_hf_dataset.py
ADDED
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from datasets import Dataset, DatasetDict, load_dataset, concatenate_datasets
|
2 |
+
import pandas as pd
|
3 |
+
import os
|
4 |
+
|
5 |
+
|
6 |
+
def prepare_nqii(path, split="train"):
|
7 |
+
ds = load_dataset(path)
|
8 |
+
ds = ds[split]
|
9 |
+
|
10 |
+
# to dataframe
|
11 |
+
df = pd.DataFrame(ds)
|
12 |
+
df = df.drop(columns=["title"])
|
13 |
+
|
14 |
+
df["start"] = df["answers"].apply(lambda x: x["answer_start"])
|
15 |
+
df["answers"] = df["answers"].apply(lambda x: x["text"])
|
16 |
+
|
17 |
+
# change type
|
18 |
+
ds = Dataset.from_pandas(df)
|
19 |
+
|
20 |
+
return ds
|
21 |
+
|
22 |
+
|
23 |
+
def prepare_ruquad(path):
|
24 |
+
df = pd.read_json(path)
|
25 |
+
|
26 |
+
# filter
|
27 |
+
df = df[df["type"] == "ANSWERED_WITH_SPAN"]
|
28 |
+
|
29 |
+
# drop index
|
30 |
+
df = df.reset_index(drop=True)
|
31 |
+
|
32 |
+
df = df.rename(columns={"question_id":"id", "paragraph": "context", "span":"answers"})
|
33 |
+
df = df.drop(columns=[ "type", "answer_id", "article_id", "end", "source", "answer"])
|
34 |
+
|
35 |
+
df["start"] = df["start"].astype(int)
|
36 |
+
|
37 |
+
df["answers"] = df["answers"].apply(lambda x: [x])
|
38 |
+
df["start"] = df["start"].apply(lambda x: [x])
|
39 |
+
|
40 |
+
ds = Dataset.from_pandas(df)
|
41 |
+
|
42 |
+
return ds
|
43 |
+
|
44 |
+
|
45 |
+
def download_ruquad():
|
46 |
+
os.system("curl --remote-name-all https://repository.clarin.is/repository/xmlui/bitstream/handle/20.500.12537/310{/RUQuAD1.zip}")
|
47 |
+
os.system("unzip RUQuAD1.zip")
|
48 |
+
|
49 |
+
|
50 |
+
|
51 |
+
if __name__ == "__main__":
|
52 |
+
nqii_train = prepare_nqii("vesteinn/icelandic-qa-NQiI", split="train")
|
53 |
+
ruquad_train = prepare_ruquad("train.json")
|
54 |
+
train = concatenate_datasets([nqii_train, ruquad_train])
|
55 |
+
|
56 |
+
val = prepare_nqii("vesteinn/icelandic-qa-NQiI", split="validation")
|
57 |
+
|
58 |
+
nqi_test = prepare_nqii("vesteinn/icelandic-qa-NQiI", split="test")
|
59 |
+
ruquad_test = prepare_ruquad("test.json")
|
60 |
+
test = concatenate_datasets([nqi_test, ruquad_test])
|
61 |
+
|
62 |
+
dataset = DatasetDict({"train": train, "validation": val, "test": test})
|
63 |
+
print(dataset)
|
64 |
+
|
65 |
+
dataset.push_to_hub("Sigurdur/nqii_ruqad")
|