Update files from the datasets library (from 1.6.0)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.6.0
- wikipedia.py +5 -18
wikipedia.py
CHANGED
@@ -16,27 +16,19 @@
|
|
16 |
# Lint as: python3
|
17 |
"""Wikipedia dataset containing cleaned articles of all languages."""
|
18 |
|
19 |
-
from __future__ import absolute_import, division, print_function
|
20 |
|
|
|
21 |
import codecs
|
22 |
import json
|
23 |
import re
|
24 |
import xml.etree.cElementTree as etree
|
25 |
|
26 |
-
import six
|
27 |
-
|
28 |
import datasets
|
29 |
|
30 |
|
31 |
logger = datasets.logging.get_logger(__name__)
|
32 |
|
33 |
|
34 |
-
if six.PY3:
|
35 |
-
import bz2 # pylint:disable=g-import-not-at-top
|
36 |
-
else:
|
37 |
-
# py2's built-in bz2 package does not support reading from file objects.
|
38 |
-
import bz2file as bz2 # pylint:disable=g-import-not-at-top
|
39 |
-
|
40 |
_CITATION = """\
|
41 |
@ONLINE {wikidump,
|
42 |
author = {Wikimedia Foundation},
|
@@ -466,13 +458,8 @@ class Wikipedia(datasets.BeamBasedBuilder):
|
|
466 |
logger.info("generating examples from = %s", filepath)
|
467 |
with beam.io.filesystems.FileSystems.open(filepath) as f:
|
468 |
f = bz2.BZ2File(filename=f)
|
469 |
-
|
470 |
-
|
471 |
-
# https://github.com/tensorflow/tensorflow/issues/33563
|
472 |
-
utf_f = codecs.getreader("utf-8")(f)
|
473 |
-
else:
|
474 |
-
utf_f = f
|
475 |
-
|
476 |
context = etree.iterparse(utf_f, events=("end",))
|
477 |
for unused_event, elem in context:
|
478 |
if not elem.tag.endswith("page"):
|
@@ -533,10 +520,10 @@ def _parse_and_clean_wikicode(raw_content, parser):
|
|
533 |
re_rm_wikilink = re.compile("^(?:File|Image|Media):", flags=re.IGNORECASE | re.UNICODE)
|
534 |
|
535 |
def rm_wikilink(obj):
|
536 |
-
return bool(re_rm_wikilink.match(
|
537 |
|
538 |
def rm_tag(obj):
|
539 |
-
return
|
540 |
|
541 |
def rm_template(obj):
|
542 |
return obj.name.lower() in {"reflist", "notelist", "notelist-ua", "notelist-lr", "notelist-ur", "notelist-lg"}
|
|
|
16 |
# Lint as: python3
|
17 |
"""Wikipedia dataset containing cleaned articles of all languages."""
|
18 |
|
|
|
19 |
|
20 |
+
import bz2
|
21 |
import codecs
|
22 |
import json
|
23 |
import re
|
24 |
import xml.etree.cElementTree as etree
|
25 |
|
|
|
|
|
26 |
import datasets
|
27 |
|
28 |
|
29 |
logger = datasets.logging.get_logger(__name__)
|
30 |
|
31 |
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
_CITATION = """\
|
33 |
@ONLINE {wikidump,
|
34 |
author = {Wikimedia Foundation},
|
|
|
458 |
logger.info("generating examples from = %s", filepath)
|
459 |
with beam.io.filesystems.FileSystems.open(filepath) as f:
|
460 |
f = bz2.BZ2File(filename=f)
|
461 |
+
# Workaround due to: https://github.com/tensorflow/tensorflow/issues/33563
|
462 |
+
utf_f = codecs.getreader("utf-8")(f)
|
|
|
|
|
|
|
|
|
|
|
463 |
context = etree.iterparse(utf_f, events=("end",))
|
464 |
for unused_event, elem in context:
|
465 |
if not elem.tag.endswith("page"):
|
|
|
520 |
re_rm_wikilink = re.compile("^(?:File|Image|Media):", flags=re.IGNORECASE | re.UNICODE)
|
521 |
|
522 |
def rm_wikilink(obj):
|
523 |
+
return bool(re_rm_wikilink.match(str(obj.title)))
|
524 |
|
525 |
def rm_tag(obj):
|
526 |
+
return str(obj.tag) in {"ref", "table"}
|
527 |
|
528 |
def rm_template(obj):
|
529 |
return obj.name.lower() in {"reflist", "notelist", "notelist-ua", "notelist-lr", "notelist-ur", "notelist-lg"}
|