File size: 2,656 Bytes
8ae2c07
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
# -*- coding: utf-8 -*-

"""
Wikipedia Source Extractor:

This script retrieves the source code of Wikipedia pages based on URLs found in a text file.
Instead of saving the entire HTML of the page, it trims the content, focusing on the main article 
section, thereby limiting the size of each record.

Required:
pip install aiohttp aiofiles

Usage:
- Ensure you have a file named "wiki_link.txt" in the same directory as the script.
- The file should contain one Wikipedia URL per line.
- Run the script.
- Extracted content will be saved under the "sources/html_wiki" directory with the name format "{index}.txt".

Author     : Guillaume Eckendoerffer
Date       : 14-09-23
Repository : https://github.com/Eckendoerffer/TorchTrainerFlow/
             https://huggingface.co/datasets/eckendoerffer/wikipedia_fr
"""

import os
import asyncio
import aiohttp
import aiofiles

START_INDEX = 0
path = os.path.dirname(os.path.abspath(__file__))

async def fetch_page_content(session, link):
    """Fetches the page content given a URL."""
    try:
        async with session.get(link) as response:
            return await response.text()
    except:
        print(f"Error fetching content from {link}")
        return None

def extract_content(source):
    """Extracts the main article section from the full page source."""
    start_idx = source.find('<div id="siteSub"')
    if start_idx == -1:
        return None

    source = source[start_idx:]
    end_markers = ['id="Notes_et_références"', 'id="Articles_connexes"']
    for marker in end_markers:
        end_idx = source.find(marker)
        if end_idx != -1:
            source = source[:end_idx] + '>'
            break
    return source

async def main():
    """Main async function to process each link."""
    async with aiohttp.ClientSession() as session:
        with open(os.path.join(path, "wiki_link.txt"), "r") as f:
            links = f.readlines()
        
        for i, link in enumerate(links[START_INDEX:], start=START_INDEX+1):
            print(f"Processing link {i}/{len(links)}")
            
            html_content = await fetch_page_content(session, link.strip())
            if not html_content:
                continue

            content = extract_content(html_content)
            if not content:
                print(f"Unable to extract content from {link}")
                continue

            output_file_path = os.path.join(path, f"sources/html_wiki/{i}.txt")
            async with aiofiles.open(output_file_path, "w", encoding="utf-8") as out_file:
                await out_file.write(content)

loop = asyncio.get_event_loop()
loop.run_until_complete(main())