File size: 10,293 Bytes
8ae2c07
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
# -*- coding: utf-8 -*-

"""
Wikipedia Text Extractor:

This script consists of two main stages, transforming raw HTML source code to testing and filtering lines to retain.

Required:
pip install beautifulsoup4 langid

Step 1: /html_wiki/ -> /txt_wiki/
- Removal of undesired tags and classes.
- Extraction of text content from the HTML code.

Step 2: /txt_wiki/ -> /txt_wiki_lines/
- Sentence-by-sentence testing to determine elements for exclusion based on criteria like count of special characters, language detection, etc.
- Language testing is performed on each line and also for every element within parentheses.

The two stages have been deliberately separated. Additionally, numerous log files are generated to aid in debugging, testing, and refining exclusion parameters.
The goal here is to have a fruitful harvest for some quality text juice, not to hit the lottery jackpot numbers.

Author     : Guillaume Eckendoerffer
Date       : 22-09-23
Repository : https://github.com/Eckendoerffer/TorchTrainerFlow/
             https://huggingface.co/datasets/eckendoerffer/wikipedia_fr
"""

import os, re
from bs4 import BeautifulSoup
from langid.langid import LanguageIdentifier, model
identifier = LanguageIdentifier.from_modelstring(model, norm_probs=True)

MAX_LENGTH             = int(330)       # nb words per line
MAX_SENTENCE_LENGTH    = int(350)       # max words per sentence
# LANG EXCLUDE
ALLOWED_LANGUAGE       = ['fr']    
# MATH EXCLUDE
MAX_NUM_COUNT          = 26
MAX_PLUS_SIGN_COUNT    = 5
MAX_EQUALS_SIGN_COUNT  = 5
# CHAR EXCLUDE
MAX_DOUBLE_QUOTE_COUNT = 18
MAX_PARENTHESIS_COUNT  = 14
MAX_BRACKET_COUNT      = 12
MAX_COMMA_COUNT        = 40
MAX_DOLLAR_COUNT       = 5
# LONG EXCLUDE
LONG_WORD_CHARS        = 29             # LONG_WORD size in CHARS
MAXIMUM_LONG_WORDS     = 3              # Remove line if LONG_WORDS count > MAXIMUM_LONG_WORDS
# CLASS EXCLUDE
CLASSES_TO_REMOVE      = ['bandeau-container', 'bandeau-section', 'metadata', 'bandeau-niveau-information', 'gallery', 'infobox_v3']
# TAG EXCLUDE
TAG_TO_REMOVE          = ['nav', 'menu', 'ul', 'ol', 'table', 'h1', 'h2', 'h3', 'h4', 'h5']
# PATH
PATH                   = os.path.dirname(os.path.abspath(__file__)).replace('\\', '/') 
HTML_PATH              = PATH + '/sources/html_wiki/'
TARGET_LONG            = PATH + "/excluded_long.txt"
TARGET_LANG            = PATH + "/excluded_lang.txt"
TARGET_MATH            = PATH + "/excluded_math.txt"
TARGET_CHARS           = PATH + "/excluded_chars.txt"
TARGET_PARENTH         = PATH + "/excluded_parentheses.txt"
TARGET_LATEX           = PATH + "/excluded_latex.txt"
FILES = [f for f in os.listdir(HTML_PATH) if os.path.isfile(os.path.join(HTML_PATH, f))]

with open(TARGET_LONG, 'w', encoding="utf8") as f:
    f.write("")
with open(TARGET_LANG, 'w', encoding="utf8") as f:
    f.write("")
with open(TARGET_MATH, 'w', encoding="utf8") as f:
    f.write("")
with open(TARGET_CHARS, 'w', encoding="utf8") as f:
    f.write("")
with open(TARGET_PARENTH, 'w', encoding="utf8") as f:
    f.write("")
with open(TARGET_LATEX, 'w', encoding="utf8") as f:
    f.write("")

def extract_wikipedia_text(html_path, txt_path):
    """ Step 1: Extraction of text content from the HTML code """
    with open(html_path, "r", encoding="utf-8") as f:
        wiki_content = f.read()
    soup = BeautifulSoup(wiki_content, 'html.parser')

    # Remove menus, sub-menus, lists, and tables
    for tag in soup.find_all(TAG_TO_REMOVE):
        tag.decompose()

    # Find and remove the divs from classes to remove
    for class_name in CLASSES_TO_REMOVE:
        for div in soup.find_all("div", class_=class_name):
            div.decompose()

    # Retrieve only the text
    text = soup.get_text()
    text = text.replace(chr(8217), "'")
    text = text.replace("`", "'")
    text = text.replace("‘", "'")
    text = re.sub(r'\[\d+\]', ' ', text)
    text = re.sub(r'\{[^\}]*\}', ' ', text)
    text = re.sub(r'\[[^\}]*\]', ' ', text)

    with open(txt_path, "w", encoding="utf-8") as f:
        f.write(text)
    return len(text)

def split_into_sentences(text):
    sentences = re.split(r'([.;!?]\s*\u00BB|[.;!?\u00BB]\s*(?!\u00BB)|\s*--\s*)', text)
    sentences = ["".join(i) for i in zip(sentences[0::2], sentences[1::2])]
    return sentences

def text_standardize(text):
    text = text.replace('—', '-')
    text = text.replace('–', '-')
    text = text.replace('―', '-')
    text = text.replace('…', '...')
    text = re.sub('''(~+|!+|"+|;+|\?+|\++|,+|\)+|\(+|\\+|\/+|\*+|\[+|\]+|}+|{+|\|+|_+)''', r' \1 ', text)
    text = re.sub('\s*\n\s*', ' \n ', text)
    text = re.sub('[^\S\n]+', ' ', text)
    text = re.sub(r"\s{2,}", " ", text)
    return text.strip()

def has_repeated_uniq_char(text):
    pattern = r'([a-zA-Z0-9] ){5,}'
    return bool(re.search(pattern, text))

def countLongText(text):
    nb_long = 0
    t = text.split()
    for w in t:
        if len(w) > LONG_WORD_CHARS:
            if w.count('-')!= 0:
                nb_long += 1
    return nb_long

def remove_latex(text): 
    sp_chars = 0
    sp_chars += text.count('(')
    sp_chars += text.count(')')
    sp_chars += text.count('{')
    sp_chars += text.count('}')
    sp_chars += text.count('_')
    sp_chars += text.count('/')
    sp_chars += text.count(' ')
    if sp_chars > (len(text)/2) and len(text) > 4:
        text = ''
    return text

def extract_parentheses(text):
    stack = []
    results = []
    for i, char in enumerate(text):
        if char == '(':
            stack.append(i)
        elif char == ')' and stack:
            start = stack.pop()
            results.append((start, i))
    return results

def is_date_or_year_range(content):
    return bool(re.match(r'^\d{4}(-\d{4})?$', content.strip()))

def remove_language_in_parentheses(line, target_file_parentheses):
    for start, end in reversed(extract_parentheses(line)):
        match = line[start+1:end]
        if is_date_or_year_range(match):
            continue
        lang = identifier.classify(match)
        if lang[0] not in ALLOWED_LANGUAGE:
            line = line[:start] + line[end+1:]
            target_file_parentheses.write(f'({match})' + "\n")
    return line

def test_exclude(line_add, target_file_math, target_file_chars, target_file_lang, target_file_long, target_file, target_file_parentheses):
    nb_words_line = len(line_add.split())
    if countLongText(line_add.strip()) > MAXIMUM_LONG_WORDS \
        or len( re.findall( r'\d+ ', line_add.strip() ) ) > MAX_NUM_COUNT \
        or line_add.count('=') > MAX_EQUALS_SIGN_COUNT or line_add.count('+') > MAX_PLUS_SIGN_COUNT:
        target_file_math.write(f"{line_add.strip()} \n")
    elif line_add.count('"') > MAX_DOUBLE_QUOTE_COUNT or line_add.count('(') > MAX_PARENTHESIS_COUNT \
        or line_add.count('[') > MAX_BRACKET_COUNT \
        or line_add.count(',') > MAX_COMMA_COUNT \
        or line_add.count('$') > MAX_DOLLAR_COUNT:
        target_file_chars.write(f"{line_add.strip()} \n")
    else:
        lang = identifier.classify(line_add)
        if lang[0] not in ALLOWED_LANGUAGE:
            target_file_lang.write(f"[{lang[0]}] {line_add.strip()} \n")
        else:
            if len(line_add.split()) > MAX_SENTENCE_LENGTH or has_repeated_uniq_char(line_add.strip()):
                target_file_long.write(f"[{nb_words_line}] {line_add.strip()} \n")
            else:
                line_add = re.sub(r"\s{2,}", " ",remove_language_in_parentheses(line_add.strip(), target_file_parentheses))
                target_file.write(f"{line_add} \n")

def test_line(full_path, full_target_path):
    """ Step 2: Exclusion based on criteria """
    nb_words_line = 0
    line_add = ""

    with open(full_target_path, 'w', encoding="utf8") as f:
        f.write("")

    with open(full_path, "r", encoding="utf8", errors="ignore") as source_file, \
        open(full_target_path, "a", encoding="utf8") as target_file, \
        open(TARGET_LONG, "a", encoding="utf8") as target_file_long, \
        open(TARGET_LANG, "a", encoding="utf8") as target_file_lang, \
        open(TARGET_MATH, "a", encoding="utf8") as target_file_math, \
        open(TARGET_CHARS, "a", encoding="utf8") as target_file_chars, \
        open(TARGET_PARENTH, "a", encoding="utf8") as target_file_parentheses, \
        open(TARGET_LATEX, "a", encoding="utf8") as target_file_latex:

        for line in source_file:
            line = '' if line.count('Articles détaillés :') else line.strip() 
            line = line.replace("Un article de Wikipédia, l'encyclopédie libre.", "")
            if line.count('Ce document provient de') \
            or line.count('https://') \
            or line.count('wikipedia.') \
            or line.count('index.') \
            or line.count('php?') \
            or line.count('title='): 
                line=''
         
            line = re.sub(r',\s,', ' ', line)
            line = re.sub(r'\.\s\.', '.', line)
            line = re.sub(r',\s\.', '.', line)

            sentences = split_into_sentences(re.sub(r"\s{2,}", " ", line))
            for sentence in sentences:
                if remove_latex(text_standardize(sentence)) =='':
                    target_file_latex.write(f"{sentence.strip()} \n")
                    sentence =''
                words = len(sentence.split())
                if len(line_add.split()) + words < MAX_LENGTH:
                    nb_words_line += words
                    line_add += f" {text_standardize(sentence)}" 
                else:
                    test_exclude(line_add, target_file_math, target_file_chars, target_file_lang, target_file_long, target_file, target_file_parentheses)
                    nb_words_line = len(sentence.split())
                    line_add = f" {text_standardize(sentence)}" 
   
        if nb_words_line:
            test_exclude(line_add, target_file_math, target_file_chars, target_file_lang, target_file_long, target_file, target_file_parentheses)


for i, file in enumerate(FILES):
    html_path = HTML_PATH + file
    txt_path = html_path.replace('html_wiki', 'txt_wiki')
    txt_len = extract_wikipedia_text(html_path, txt_path)
    txt_lines_path = html_path.replace('html_wiki', 'txt_wiki_lines')
    test_line(txt_path, txt_lines_path)
    print(f"({i+1}/{len(FILES)}) {file} {txt_len}")