Add passage generation
Browse files
wikipedia_snippets_streamed.py
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
"""Wikipedia snippets in parquet format"""
|
2 |
-
import
|
3 |
import pyarrow.parquet as pq
|
4 |
import datasets
|
5 |
|
@@ -25,16 +25,20 @@ _LICENSE = (
|
|
25 |
|
26 |
|
27 |
class WikipediaSnippetsStreamed(datasets.GeneratorBasedBuilder):
|
28 |
-
"""Bengali wikipedia from 03/20/2021"""
|
29 |
|
30 |
def _info(self):
|
31 |
return datasets.DatasetInfo(
|
32 |
description=_DESCRIPTION,
|
33 |
features=datasets.Features(
|
34 |
{
|
35 |
-
"
|
36 |
-
"
|
37 |
-
"
|
|
|
|
|
|
|
|
|
|
|
38 |
}
|
39 |
),
|
40 |
supervised_keys=None,
|
@@ -49,10 +53,50 @@ class WikipediaSnippetsStreamed(datasets.GeneratorBasedBuilder):
|
|
49 |
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_file}),
|
50 |
]
|
51 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
def _generate_examples(self, filepath):
|
53 |
logger.info("generating examples from = %s", filepath)
|
|
|
54 |
with open(filepath, "rb") as f:
|
55 |
pf = pq.ParquetFile(f)
|
56 |
for i in range(pf.num_row_groups):
|
57 |
-
|
58 |
-
|
|
|
|
|
|
|
|
|
|
1 |
"""Wikipedia snippets in parquet format"""
|
2 |
+
import math
|
3 |
import pyarrow.parquet as pq
|
4 |
import datasets
|
5 |
|
|
|
25 |
|
26 |
|
27 |
class WikipediaSnippetsStreamed(datasets.GeneratorBasedBuilder):
|
|
|
28 |
|
29 |
def _info(self):
|
30 |
return datasets.DatasetInfo(
|
31 |
description=_DESCRIPTION,
|
32 |
features=datasets.Features(
|
33 |
{
|
34 |
+
"wiki_id": datasets.Value("string"),
|
35 |
+
"start_paragraph": datasets.Value("int32"),
|
36 |
+
"start_character": datasets.Value("int32"),
|
37 |
+
"end_paragraph": datasets.Value("int32"),
|
38 |
+
"end_character": datasets.Value("int32"),
|
39 |
+
"article_title": datasets.Value("string"),
|
40 |
+
"section_title": datasets.Value("string"),
|
41 |
+
"passage_text": datasets.Value("string"),
|
42 |
}
|
43 |
),
|
44 |
supervised_keys=None,
|
|
|
53 |
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_file}),
|
54 |
]
|
55 |
|
56 |
+
def wiki40b_article_snippets(self, article, passage_len=100, overlap=0):
|
57 |
+
paragraphs = article["text"].split("\n")
|
58 |
+
aticle_idx = paragraphs.index("_START_ARTICLE_") + 1
|
59 |
+
article_title = paragraphs[aticle_idx] if aticle_idx < len(paragraphs) else ""
|
60 |
+
section_indices = [i + 1 for i, par in enumerate(paragraphs[:-1]) if par == "_START_SECTION_"]
|
61 |
+
par_tabs = [par.split(" ") for par in paragraphs]
|
62 |
+
word_map = [
|
63 |
+
(i, len(" ".join(par[:j])), w)
|
64 |
+
for i, par in enumerate(par_tabs)
|
65 |
+
if not par[0].startswith("_START_")
|
66 |
+
for j, w in enumerate(par)
|
67 |
+
if i > 0
|
68 |
+
]
|
69 |
+
step_size = passage_len - overlap
|
70 |
+
passages = []
|
71 |
+
for i in range(math.ceil(len(word_map) / step_size)):
|
72 |
+
pre_toks = word_map[i * step_size: i * step_size + passage_len]
|
73 |
+
start_section_id = max([0] + [j for j in section_indices if j <= pre_toks[0][0]])
|
74 |
+
section_ids = [j for j in section_indices if j >= start_section_id and j <= pre_toks[-1][0]]
|
75 |
+
section_ids = section_ids if len(section_ids) > 0 else [0]
|
76 |
+
passage_text = " ".join([w for p_id, s_id, w in pre_toks])
|
77 |
+
passages += [
|
78 |
+
{
|
79 |
+
"article_title": article_title,
|
80 |
+
"section_title": " & ".join([paragraphs[j] for j in section_ids]),
|
81 |
+
"wiki_id": article["wikidata_id"],
|
82 |
+
"start_paragraph": pre_toks[0][0],
|
83 |
+
"start_character": pre_toks[0][1],
|
84 |
+
"end_paragraph": pre_toks[-1][0],
|
85 |
+
"end_character": pre_toks[-1][1] + len(pre_toks[-1][2]) + 1,
|
86 |
+
"passage_text": passage_text.replace("_NEWLINE_", "\n"),
|
87 |
+
}
|
88 |
+
]
|
89 |
+
return passages
|
90 |
+
|
91 |
def _generate_examples(self, filepath):
|
92 |
logger.info("generating examples from = %s", filepath)
|
93 |
+
passage_counter = 0
|
94 |
with open(filepath, "rb") as f:
|
95 |
pf = pq.ParquetFile(f)
|
96 |
for i in range(pf.num_row_groups):
|
97 |
+
batch_table_dict = pf.read_row_group(i).to_pydict()
|
98 |
+
for idx, article_text in enumerate(batch_table_dict["text"]):
|
99 |
+
article = {"text": article_text, "wikidata_id": batch_table_dict["wikidata_id"][idx]}
|
100 |
+
passages = self.wiki40b_article_snippets(article)
|
101 |
+
for passage in passages:
|
102 |
+
yield ++passage_counter, passage
|