|
|
|
|
|
""" |
|
Wikipedia URLs Extractor: |
|
|
|
Script to download the Wikipedia dataset from Hugging Face, extract URLs, |
|
and save them to a text file for further processing. |
|
|
|
Required: |
|
pip install datasets |
|
|
|
Using MIN_LENGTH = 1400 results in approximately 1,100,000 URLs. |
|
Using MIN_LENGTH = 1000 results in approximately 1,800,000 URLs. |
|
|
|
Author : Guillaume Eckendoerffer |
|
Date : 14-09-23 |
|
Repository : https://github.com/Eckendoerffer/TorchTrainerFlow/ |
|
https://huggingface.co/datasets/eckendoerffer/wikipedia_fr |
|
""" |
|
|
|
import os |
|
from datasets import load_dataset |
|
|
|
|
|
MIN_LENGTH = 1400 |
|
EXCLUDE_TITLES_START = ['Liste ', 'Abréviations ', '(homonymie)'] |
|
|
|
|
|
PATH = os.path.dirname(os.path.abspath(__file__)) |
|
URL_FILEPATH = os.path.join(PATH, "wiki_link.txt") |
|
|
|
|
|
with open(URL_FILEPATH, 'w', encoding="utf8") as url_file: |
|
url_file.write("") |
|
|
|
|
|
dataset = load_dataset('wikipedia', '20220301.fr') |
|
subset = dataset["train"] |
|
|
|
add = 0 |
|
for i, row in enumerate(subset): |
|
text = row["text"] |
|
title = row["title"] |
|
url = row["url"] |
|
|
|
|
|
if not any(title.startswith(e) for e in EXCLUDE_TITLES_START) and len(text) >= MIN_LENGTH: |
|
add += 1 |
|
print(f"{add} : {len(text)} : {url} : {title}") |
|
with open(URL_FILEPATH, 'a', encoding="utf8") as url_file: |
|
url_file.write(f"{url.strip()} \n") |
|
|
|
|
|
|
|
|
|
|