eckendoerffer
commited on
Commit
•
8ae2c07
1
Parent(s):
d94be47
Upload 6 files
Browse files
extract_wiki/1_extract_link.py
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
|
3 |
+
"""
|
4 |
+
Wikipedia URLs Extractor:
|
5 |
+
|
6 |
+
Script to download the Wikipedia dataset from Hugging Face, extract URLs,
|
7 |
+
and save them to a text file for further processing.
|
8 |
+
|
9 |
+
Required:
|
10 |
+
pip install datasets
|
11 |
+
|
12 |
+
Using MIN_LENGTH = 1400 results in approximately 1,100,000 URLs.
|
13 |
+
Using MIN_LENGTH = 1000 results in approximately 1,800,000 URLs.
|
14 |
+
|
15 |
+
Author : Guillaume Eckendoerffer
|
16 |
+
Date : 14-09-23
|
17 |
+
Repository : https://github.com/Eckendoerffer/TorchTrainerFlow/
|
18 |
+
https://huggingface.co/datasets/eckendoerffer/wikipedia_fr
|
19 |
+
"""
|
20 |
+
|
21 |
+
import os
|
22 |
+
from datasets import load_dataset
|
23 |
+
|
24 |
+
# Constants
|
25 |
+
MIN_LENGTH = 1400 # Minimum text length in number of characters to be added to the URL list.
|
26 |
+
EXCLUDE_TITLES_START = ['Liste ', 'Abréviations ', '(homonymie)']
|
27 |
+
|
28 |
+
# File path configurations
|
29 |
+
PATH = os.path.dirname(os.path.abspath(__file__))
|
30 |
+
URL_FILEPATH = os.path.join(PATH, "wiki_link.txt")
|
31 |
+
|
32 |
+
# Resetting the output file
|
33 |
+
with open(URL_FILEPATH, 'w', encoding="utf8") as url_file:
|
34 |
+
url_file.write("")
|
35 |
+
|
36 |
+
# Loading the dataset
|
37 |
+
dataset = load_dataset('wikipedia', '20220301.fr')
|
38 |
+
subset = dataset["train"]
|
39 |
+
|
40 |
+
add = 0
|
41 |
+
for i, row in enumerate(subset):
|
42 |
+
text = row["text"]
|
43 |
+
title = row["title"]
|
44 |
+
url = row["url"]
|
45 |
+
|
46 |
+
# Checking conditions to add the URL
|
47 |
+
if not any(title.startswith(e) for e in EXCLUDE_TITLES_START) and len(text) >= MIN_LENGTH:
|
48 |
+
add += 1
|
49 |
+
print(f"{add} : {len(text)} : {url} : {title}")
|
50 |
+
with open(URL_FILEPATH, 'a', encoding="utf8") as url_file:
|
51 |
+
url_file.write(f"{url.strip()} \n")
|
52 |
+
|
53 |
+
|
54 |
+
|
55 |
+
|
extract_wiki/2_extract_content.py
ADDED
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
|
3 |
+
"""
|
4 |
+
Wikipedia Source Extractor:
|
5 |
+
|
6 |
+
This script retrieves the source code of Wikipedia pages based on URLs found in a text file.
|
7 |
+
Instead of saving the entire HTML of the page, it trims the content, focusing on the main article
|
8 |
+
section, thereby limiting the size of each record.
|
9 |
+
|
10 |
+
Required:
|
11 |
+
pip install aiohttp aiofiles
|
12 |
+
|
13 |
+
Usage:
|
14 |
+
- Ensure you have a file named "wiki_link.txt" in the same directory as the script.
|
15 |
+
- The file should contain one Wikipedia URL per line.
|
16 |
+
- Run the script.
|
17 |
+
- Extracted content will be saved under the "sources/html_wiki" directory with the name format "{index}.txt".
|
18 |
+
|
19 |
+
Author : Guillaume Eckendoerffer
|
20 |
+
Date : 14-09-23
|
21 |
+
Repository : https://github.com/Eckendoerffer/TorchTrainerFlow/
|
22 |
+
https://huggingface.co/datasets/eckendoerffer/wikipedia_fr
|
23 |
+
"""
|
24 |
+
|
25 |
+
import os
|
26 |
+
import asyncio
|
27 |
+
import aiohttp
|
28 |
+
import aiofiles
|
29 |
+
|
30 |
+
START_INDEX = 0
|
31 |
+
path = os.path.dirname(os.path.abspath(__file__))
|
32 |
+
|
33 |
+
async def fetch_page_content(session, link):
|
34 |
+
"""Fetches the page content given a URL."""
|
35 |
+
try:
|
36 |
+
async with session.get(link) as response:
|
37 |
+
return await response.text()
|
38 |
+
except:
|
39 |
+
print(f"Error fetching content from {link}")
|
40 |
+
return None
|
41 |
+
|
42 |
+
def extract_content(source):
|
43 |
+
"""Extracts the main article section from the full page source."""
|
44 |
+
start_idx = source.find('<div id="siteSub"')
|
45 |
+
if start_idx == -1:
|
46 |
+
return None
|
47 |
+
|
48 |
+
source = source[start_idx:]
|
49 |
+
end_markers = ['id="Notes_et_références"', 'id="Articles_connexes"']
|
50 |
+
for marker in end_markers:
|
51 |
+
end_idx = source.find(marker)
|
52 |
+
if end_idx != -1:
|
53 |
+
source = source[:end_idx] + '>'
|
54 |
+
break
|
55 |
+
return source
|
56 |
+
|
57 |
+
async def main():
|
58 |
+
"""Main async function to process each link."""
|
59 |
+
async with aiohttp.ClientSession() as session:
|
60 |
+
with open(os.path.join(path, "wiki_link.txt"), "r") as f:
|
61 |
+
links = f.readlines()
|
62 |
+
|
63 |
+
for i, link in enumerate(links[START_INDEX:], start=START_INDEX+1):
|
64 |
+
print(f"Processing link {i}/{len(links)}")
|
65 |
+
|
66 |
+
html_content = await fetch_page_content(session, link.strip())
|
67 |
+
if not html_content:
|
68 |
+
continue
|
69 |
+
|
70 |
+
content = extract_content(html_content)
|
71 |
+
if not content:
|
72 |
+
print(f"Unable to extract content from {link}")
|
73 |
+
continue
|
74 |
+
|
75 |
+
output_file_path = os.path.join(path, f"sources/html_wiki/{i}.txt")
|
76 |
+
async with aiofiles.open(output_file_path, "w", encoding="utf-8") as out_file:
|
77 |
+
await out_file.write(content)
|
78 |
+
|
79 |
+
loop = asyncio.get_event_loop()
|
80 |
+
loop.run_until_complete(main())
|
extract_wiki/3_extract_txt.py
ADDED
@@ -0,0 +1,251 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
|
3 |
+
"""
|
4 |
+
Wikipedia Text Extractor:
|
5 |
+
|
6 |
+
This script consists of two main stages, transforming raw HTML source code to testing and filtering lines to retain.
|
7 |
+
|
8 |
+
Required:
|
9 |
+
pip install beautifulsoup4 langid
|
10 |
+
|
11 |
+
Step 1: /html_wiki/ -> /txt_wiki/
|
12 |
+
- Removal of undesired tags and classes.
|
13 |
+
- Extraction of text content from the HTML code.
|
14 |
+
|
15 |
+
Step 2: /txt_wiki/ -> /txt_wiki_lines/
|
16 |
+
- Sentence-by-sentence testing to determine elements for exclusion based on criteria like count of special characters, language detection, etc.
|
17 |
+
- Language testing is performed on each line and also for every element within parentheses.
|
18 |
+
|
19 |
+
The two stages have been deliberately separated. Additionally, numerous log files are generated to aid in debugging, testing, and refining exclusion parameters.
|
20 |
+
The goal here is to have a fruitful harvest for some quality text juice, not to hit the lottery jackpot numbers.
|
21 |
+
|
22 |
+
Author : Guillaume Eckendoerffer
|
23 |
+
Date : 22-09-23
|
24 |
+
Repository : https://github.com/Eckendoerffer/TorchTrainerFlow/
|
25 |
+
https://huggingface.co/datasets/eckendoerffer/wikipedia_fr
|
26 |
+
"""
|
27 |
+
|
28 |
+
import os, re
|
29 |
+
from bs4 import BeautifulSoup
|
30 |
+
from langid.langid import LanguageIdentifier, model
|
31 |
+
identifier = LanguageIdentifier.from_modelstring(model, norm_probs=True)
|
32 |
+
|
33 |
+
MAX_LENGTH = int(330) # nb words per line
|
34 |
+
MAX_SENTENCE_LENGTH = int(350) # max words per sentence
|
35 |
+
# LANG EXCLUDE
|
36 |
+
ALLOWED_LANGUAGE = ['fr']
|
37 |
+
# MATH EXCLUDE
|
38 |
+
MAX_NUM_COUNT = 26
|
39 |
+
MAX_PLUS_SIGN_COUNT = 5
|
40 |
+
MAX_EQUALS_SIGN_COUNT = 5
|
41 |
+
# CHAR EXCLUDE
|
42 |
+
MAX_DOUBLE_QUOTE_COUNT = 18
|
43 |
+
MAX_PARENTHESIS_COUNT = 14
|
44 |
+
MAX_BRACKET_COUNT = 12
|
45 |
+
MAX_COMMA_COUNT = 40
|
46 |
+
MAX_DOLLAR_COUNT = 5
|
47 |
+
# LONG EXCLUDE
|
48 |
+
LONG_WORD_CHARS = 29 # LONG_WORD size in CHARS
|
49 |
+
MAXIMUM_LONG_WORDS = 3 # Remove line if LONG_WORDS count > MAXIMUM_LONG_WORDS
|
50 |
+
# CLASS EXCLUDE
|
51 |
+
CLASSES_TO_REMOVE = ['bandeau-container', 'bandeau-section', 'metadata', 'bandeau-niveau-information', 'gallery', 'infobox_v3']
|
52 |
+
# TAG EXCLUDE
|
53 |
+
TAG_TO_REMOVE = ['nav', 'menu', 'ul', 'ol', 'table', 'h1', 'h2', 'h3', 'h4', 'h5']
|
54 |
+
# PATH
|
55 |
+
PATH = os.path.dirname(os.path.abspath(__file__)).replace('\\', '/')
|
56 |
+
HTML_PATH = PATH + '/sources/html_wiki/'
|
57 |
+
TARGET_LONG = PATH + "/excluded_long.txt"
|
58 |
+
TARGET_LANG = PATH + "/excluded_lang.txt"
|
59 |
+
TARGET_MATH = PATH + "/excluded_math.txt"
|
60 |
+
TARGET_CHARS = PATH + "/excluded_chars.txt"
|
61 |
+
TARGET_PARENTH = PATH + "/excluded_parentheses.txt"
|
62 |
+
TARGET_LATEX = PATH + "/excluded_latex.txt"
|
63 |
+
FILES = [f for f in os.listdir(HTML_PATH) if os.path.isfile(os.path.join(HTML_PATH, f))]
|
64 |
+
|
65 |
+
with open(TARGET_LONG, 'w', encoding="utf8") as f:
|
66 |
+
f.write("")
|
67 |
+
with open(TARGET_LANG, 'w', encoding="utf8") as f:
|
68 |
+
f.write("")
|
69 |
+
with open(TARGET_MATH, 'w', encoding="utf8") as f:
|
70 |
+
f.write("")
|
71 |
+
with open(TARGET_CHARS, 'w', encoding="utf8") as f:
|
72 |
+
f.write("")
|
73 |
+
with open(TARGET_PARENTH, 'w', encoding="utf8") as f:
|
74 |
+
f.write("")
|
75 |
+
with open(TARGET_LATEX, 'w', encoding="utf8") as f:
|
76 |
+
f.write("")
|
77 |
+
|
78 |
+
def extract_wikipedia_text(html_path, txt_path):
|
79 |
+
""" Step 1: Extraction of text content from the HTML code """
|
80 |
+
with open(html_path, "r", encoding="utf-8") as f:
|
81 |
+
wiki_content = f.read()
|
82 |
+
soup = BeautifulSoup(wiki_content, 'html.parser')
|
83 |
+
|
84 |
+
# Remove menus, sub-menus, lists, and tables
|
85 |
+
for tag in soup.find_all(TAG_TO_REMOVE):
|
86 |
+
tag.decompose()
|
87 |
+
|
88 |
+
# Find and remove the divs from classes to remove
|
89 |
+
for class_name in CLASSES_TO_REMOVE:
|
90 |
+
for div in soup.find_all("div", class_=class_name):
|
91 |
+
div.decompose()
|
92 |
+
|
93 |
+
# Retrieve only the text
|
94 |
+
text = soup.get_text()
|
95 |
+
text = text.replace(chr(8217), "'")
|
96 |
+
text = text.replace("`", "'")
|
97 |
+
text = text.replace("‘", "'")
|
98 |
+
text = re.sub(r'\[\d+\]', ' ', text)
|
99 |
+
text = re.sub(r'\{[^\}]*\}', ' ', text)
|
100 |
+
text = re.sub(r'\[[^\}]*\]', ' ', text)
|
101 |
+
|
102 |
+
with open(txt_path, "w", encoding="utf-8") as f:
|
103 |
+
f.write(text)
|
104 |
+
return len(text)
|
105 |
+
|
106 |
+
def split_into_sentences(text):
|
107 |
+
sentences = re.split(r'([.;!?]\s*\u00BB|[.;!?\u00BB]\s*(?!\u00BB)|\s*--\s*)', text)
|
108 |
+
sentences = ["".join(i) for i in zip(sentences[0::2], sentences[1::2])]
|
109 |
+
return sentences
|
110 |
+
|
111 |
+
def text_standardize(text):
|
112 |
+
text = text.replace('—', '-')
|
113 |
+
text = text.replace('–', '-')
|
114 |
+
text = text.replace('―', '-')
|
115 |
+
text = text.replace('…', '...')
|
116 |
+
text = re.sub('''(~+|!+|"+|;+|\?+|\++|,+|\)+|\(+|\\+|\/+|\*+|\[+|\]+|}+|{+|\|+|_+)''', r' \1 ', text)
|
117 |
+
text = re.sub('\s*\n\s*', ' \n ', text)
|
118 |
+
text = re.sub('[^\S\n]+', ' ', text)
|
119 |
+
text = re.sub(r"\s{2,}", " ", text)
|
120 |
+
return text.strip()
|
121 |
+
|
122 |
+
def has_repeated_uniq_char(text):
|
123 |
+
pattern = r'([a-zA-Z0-9] ){5,}'
|
124 |
+
return bool(re.search(pattern, text))
|
125 |
+
|
126 |
+
def countLongText(text):
|
127 |
+
nb_long = 0
|
128 |
+
t = text.split()
|
129 |
+
for w in t:
|
130 |
+
if len(w) > LONG_WORD_CHARS:
|
131 |
+
if w.count('-')!= 0:
|
132 |
+
nb_long += 1
|
133 |
+
return nb_long
|
134 |
+
|
135 |
+
def remove_latex(text):
|
136 |
+
sp_chars = 0
|
137 |
+
sp_chars += text.count('(')
|
138 |
+
sp_chars += text.count(')')
|
139 |
+
sp_chars += text.count('{')
|
140 |
+
sp_chars += text.count('}')
|
141 |
+
sp_chars += text.count('_')
|
142 |
+
sp_chars += text.count('/')
|
143 |
+
sp_chars += text.count(' ')
|
144 |
+
if sp_chars > (len(text)/2) and len(text) > 4:
|
145 |
+
text = ''
|
146 |
+
return text
|
147 |
+
|
148 |
+
def extract_parentheses(text):
|
149 |
+
stack = []
|
150 |
+
results = []
|
151 |
+
for i, char in enumerate(text):
|
152 |
+
if char == '(':
|
153 |
+
stack.append(i)
|
154 |
+
elif char == ')' and stack:
|
155 |
+
start = stack.pop()
|
156 |
+
results.append((start, i))
|
157 |
+
return results
|
158 |
+
|
159 |
+
def is_date_or_year_range(content):
|
160 |
+
return bool(re.match(r'^\d{4}(-\d{4})?$', content.strip()))
|
161 |
+
|
162 |
+
def remove_language_in_parentheses(line, target_file_parentheses):
|
163 |
+
for start, end in reversed(extract_parentheses(line)):
|
164 |
+
match = line[start+1:end]
|
165 |
+
if is_date_or_year_range(match):
|
166 |
+
continue
|
167 |
+
lang = identifier.classify(match)
|
168 |
+
if lang[0] not in ALLOWED_LANGUAGE:
|
169 |
+
line = line[:start] + line[end+1:]
|
170 |
+
target_file_parentheses.write(f'({match})' + "\n")
|
171 |
+
return line
|
172 |
+
|
173 |
+
def test_exclude(line_add, target_file_math, target_file_chars, target_file_lang, target_file_long, target_file, target_file_parentheses):
|
174 |
+
nb_words_line = len(line_add.split())
|
175 |
+
if countLongText(line_add.strip()) > MAXIMUM_LONG_WORDS \
|
176 |
+
or len( re.findall( r'\d+ ', line_add.strip() ) ) > MAX_NUM_COUNT \
|
177 |
+
or line_add.count('=') > MAX_EQUALS_SIGN_COUNT or line_add.count('+') > MAX_PLUS_SIGN_COUNT:
|
178 |
+
target_file_math.write(f"{line_add.strip()} \n")
|
179 |
+
elif line_add.count('"') > MAX_DOUBLE_QUOTE_COUNT or line_add.count('(') > MAX_PARENTHESIS_COUNT \
|
180 |
+
or line_add.count('[') > MAX_BRACKET_COUNT \
|
181 |
+
or line_add.count(',') > MAX_COMMA_COUNT \
|
182 |
+
or line_add.count('$') > MAX_DOLLAR_COUNT:
|
183 |
+
target_file_chars.write(f"{line_add.strip()} \n")
|
184 |
+
else:
|
185 |
+
lang = identifier.classify(line_add)
|
186 |
+
if lang[0] not in ALLOWED_LANGUAGE:
|
187 |
+
target_file_lang.write(f"[{lang[0]}] {line_add.strip()} \n")
|
188 |
+
else:
|
189 |
+
if len(line_add.split()) > MAX_SENTENCE_LENGTH or has_repeated_uniq_char(line_add.strip()):
|
190 |
+
target_file_long.write(f"[{nb_words_line}] {line_add.strip()} \n")
|
191 |
+
else:
|
192 |
+
line_add = re.sub(r"\s{2,}", " ",remove_language_in_parentheses(line_add.strip(), target_file_parentheses))
|
193 |
+
target_file.write(f"{line_add} \n")
|
194 |
+
|
195 |
+
def test_line(full_path, full_target_path):
|
196 |
+
""" Step 2: Exclusion based on criteria """
|
197 |
+
nb_words_line = 0
|
198 |
+
line_add = ""
|
199 |
+
|
200 |
+
with open(full_target_path, 'w', encoding="utf8") as f:
|
201 |
+
f.write("")
|
202 |
+
|
203 |
+
with open(full_path, "r", encoding="utf8", errors="ignore") as source_file, \
|
204 |
+
open(full_target_path, "a", encoding="utf8") as target_file, \
|
205 |
+
open(TARGET_LONG, "a", encoding="utf8") as target_file_long, \
|
206 |
+
open(TARGET_LANG, "a", encoding="utf8") as target_file_lang, \
|
207 |
+
open(TARGET_MATH, "a", encoding="utf8") as target_file_math, \
|
208 |
+
open(TARGET_CHARS, "a", encoding="utf8") as target_file_chars, \
|
209 |
+
open(TARGET_PARENTH, "a", encoding="utf8") as target_file_parentheses, \
|
210 |
+
open(TARGET_LATEX, "a", encoding="utf8") as target_file_latex:
|
211 |
+
|
212 |
+
for line in source_file:
|
213 |
+
line = '' if line.count('Articles détaillés :') else line.strip()
|
214 |
+
line = line.replace("Un article de Wikipédia, l'encyclopédie libre.", "")
|
215 |
+
if line.count('Ce document provient de') \
|
216 |
+
or line.count('https://') \
|
217 |
+
or line.count('wikipedia.') \
|
218 |
+
or line.count('index.') \
|
219 |
+
or line.count('php?') \
|
220 |
+
or line.count('title='):
|
221 |
+
line=''
|
222 |
+
|
223 |
+
line = re.sub(r',\s,', ' ', line)
|
224 |
+
line = re.sub(r'\.\s\.', '.', line)
|
225 |
+
line = re.sub(r',\s\.', '.', line)
|
226 |
+
|
227 |
+
sentences = split_into_sentences(re.sub(r"\s{2,}", " ", line))
|
228 |
+
for sentence in sentences:
|
229 |
+
if remove_latex(text_standardize(sentence)) =='':
|
230 |
+
target_file_latex.write(f"{sentence.strip()} \n")
|
231 |
+
sentence =''
|
232 |
+
words = len(sentence.split())
|
233 |
+
if len(line_add.split()) + words < MAX_LENGTH:
|
234 |
+
nb_words_line += words
|
235 |
+
line_add += f" {text_standardize(sentence)}"
|
236 |
+
else:
|
237 |
+
test_exclude(line_add, target_file_math, target_file_chars, target_file_lang, target_file_long, target_file, target_file_parentheses)
|
238 |
+
nb_words_line = len(sentence.split())
|
239 |
+
line_add = f" {text_standardize(sentence)}"
|
240 |
+
|
241 |
+
if nb_words_line:
|
242 |
+
test_exclude(line_add, target_file_math, target_file_chars, target_file_lang, target_file_long, target_file, target_file_parentheses)
|
243 |
+
|
244 |
+
|
245 |
+
for i, file in enumerate(FILES):
|
246 |
+
html_path = HTML_PATH + file
|
247 |
+
txt_path = html_path.replace('html_wiki', 'txt_wiki')
|
248 |
+
txt_len = extract_wikipedia_text(html_path, txt_path)
|
249 |
+
txt_lines_path = html_path.replace('html_wiki', 'txt_wiki_lines')
|
250 |
+
test_line(txt_path, txt_lines_path)
|
251 |
+
print(f"({i+1}/{len(FILES)}) {file} {txt_len}")
|
extract_wiki/sources/html_wiki/.gitkeep
ADDED
File without changes
|
extract_wiki/sources/txt_wiki/.gitkeep
ADDED
File without changes
|
extract_wiki/sources/txt_wiki_lines/.gitkeep
ADDED
File without changes
|