# -*- coding: utf-8 -*- """ Wikipedia Source Extractor: This script retrieves the source code of Wikipedia pages based on URLs found in a text file. Instead of saving the entire HTML of the page, it trims the content, focusing on the main article section, thereby limiting the size of each record. Required: pip install aiohttp aiofiles Usage: - Ensure you have a file named "wiki_link.txt" in the same directory as the script. - The file should contain one Wikipedia URL per line. - Run the script. - Extracted content will be saved under the "sources/html_wiki" directory with the name format "{index}.txt". Author : Guillaume Eckendoerffer Date : 14-09-23 Repository : https://github.com/Eckendoerffer/TorchTrainerFlow/ https://huggingface.co/datasets/eckendoerffer/wikipedia_fr """ import os import asyncio import aiohttp import aiofiles START_INDEX = 0 path = os.path.dirname(os.path.abspath(__file__)) async def fetch_page_content(session, link): """Fetches the page content given a URL.""" try: async with session.get(link) as response: return await response.text() except: print(f"Error fetching content from {link}") return None def extract_content(source): """Extracts the main article section from the full page source.""" start_idx = source.find('