KaraKaraWitch commited on
Commit
981bf66
β€’
1 Parent(s): 580939d
This view is limited to 50 files because it contains too many changes. Β  See raw diff
Files changed (50) hide show
  1. Scrape.py +414 -0
  2. BLiterature_00.7z β†’ data/BLiterature_00.7z +0 -0
  3. BLiterature_01.7z β†’ data/BLiterature_01.7z +0 -0
  4. BLiterature_02.7z β†’ data/BLiterature_02.7z +0 -0
  5. BLiterature_03.7z β†’ data/BLiterature_03.7z +0 -0
  6. BLiterature_04.7z β†’ data/BLiterature_04.7z +0 -0
  7. BLiterature_05.7z β†’ data/BLiterature_05.7z +0 -0
  8. BLiterature_06.7z β†’ data/BLiterature_06.7z +0 -0
  9. BLiterature_07.7z β†’ data/BLiterature_07.7z +0 -0
  10. BLiterature_08.7z β†’ data/BLiterature_08.7z +0 -0
  11. BLiterature_09.7z β†’ data/BLiterature_09.7z +0 -0
  12. BLiterature_10.7z β†’ data/BLiterature_10.7z +0 -0
  13. BLiterature_100.7z β†’ data/BLiterature_100.7z +0 -0
  14. BLiterature_101.7z β†’ data/BLiterature_101.7z +0 -0
  15. BLiterature_102.7z β†’ data/BLiterature_102.7z +0 -0
  16. BLiterature_103.7z β†’ data/BLiterature_103.7z +0 -0
  17. BLiterature_104.7z β†’ data/BLiterature_104.7z +0 -0
  18. BLiterature_11.7z β†’ data/BLiterature_11.7z +0 -0
  19. BLiterature_12.7z β†’ data/BLiterature_12.7z +0 -0
  20. BLiterature_13.7z β†’ data/BLiterature_13.7z +0 -0
  21. BLiterature_14.7z β†’ data/BLiterature_14.7z +0 -0
  22. BLiterature_15.7z β†’ data/BLiterature_15.7z +0 -0
  23. BLiterature_16.7z β†’ data/BLiterature_16.7z +0 -0
  24. BLiterature_17.7z β†’ data/BLiterature_17.7z +0 -0
  25. BLiterature_18.7z β†’ data/BLiterature_18.7z +0 -0
  26. BLiterature_19.7z β†’ data/BLiterature_19.7z +0 -0
  27. BLiterature_20.7z β†’ data/BLiterature_20.7z +0 -0
  28. BLiterature_21.7z β†’ data/BLiterature_21.7z +0 -0
  29. BLiterature_22.7z β†’ data/BLiterature_22.7z +0 -0
  30. BLiterature_23.7z β†’ data/BLiterature_23.7z +0 -0
  31. BLiterature_24.7z β†’ data/BLiterature_24.7z +0 -0
  32. BLiterature_25.7z β†’ data/BLiterature_25.7z +0 -0
  33. BLiterature_26.7z β†’ data/BLiterature_26.7z +0 -0
  34. BLiterature_27.7z β†’ data/BLiterature_27.7z +0 -0
  35. BLiterature_28.7z β†’ data/BLiterature_28.7z +0 -0
  36. BLiterature_29.7z β†’ data/BLiterature_29.7z +0 -0
  37. BLiterature_30.7z β†’ data/BLiterature_30.7z +0 -0
  38. BLiterature_31.7z β†’ data/BLiterature_31.7z +0 -0
  39. BLiterature_32.7z β†’ data/BLiterature_32.7z +0 -0
  40. BLiterature_33.7z β†’ data/BLiterature_33.7z +0 -0
  41. BLiterature_34.7z β†’ data/BLiterature_34.7z +0 -0
  42. BLiterature_35.7z β†’ data/BLiterature_35.7z +0 -0
  43. BLiterature_36.7z β†’ data/BLiterature_36.7z +0 -0
  44. BLiterature_37.7z β†’ data/BLiterature_37.7z +0 -0
  45. BLiterature_38.7z β†’ data/BLiterature_38.7z +0 -0
  46. BLiterature_39.7z β†’ data/BLiterature_39.7z +0 -0
  47. BLiterature_40.7z β†’ data/BLiterature_40.7z +0 -0
  48. BLiterature_41.7z β†’ data/BLiterature_41.7z +0 -0
  49. BLiterature_42.7z β†’ data/BLiterature_42.7z +0 -0
  50. BLiterature_43.7z β†’ data/BLiterature_43.7z +0 -0
Scrape.py ADDED
@@ -0,0 +1,414 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import json
3
+ import random
4
+ import subprocess
5
+ import httpx
6
+ import tqdm
7
+ import pathlib
8
+ import backoff
9
+ import urllib.parse
10
+ import aiofiles
11
+ from concurrent.futures import ThreadPoolExecutor
12
+ from natsort import natsorted, ns
13
+ import orjson
14
+ from bs4 import BeautifulSoup
15
+ import idna
16
+ import inspect
17
+
18
+ ROOT = pathlib.Path.cwd()
19
+ GOOGLE = False
20
+ ASYNC_CALL = 250 # No. of requests at once.
21
+
22
+ def get_write_path(url, filename: pathlib.Path = None):
23
+ if filename is None:
24
+ path = urllib.parse.urlsplit(url).path
25
+ if path.startswith("/"):
26
+ path = path[1:]
27
+ filename = pathlib.Path(path)
28
+ filepath = ROOT.resolve() / filename
29
+ if not filepath.suffix.lower() in [".json", ".html", ".xml"]:
30
+ filepath = filepath.with_suffix(".html")
31
+ return filepath
32
+
33
+ def record_response(response: httpx.Response, filename: pathlib.Path = None):
34
+ filepath = get_write_path(str(response.url), filename=filename)
35
+ parent = filepath.parent
36
+ reserved = ["con","prn","aux","clock","nul",
37
+ "com1","com2","com3","com4","com5", "com6","com7","com8","com9",
38
+ "lpt1","lpt2","lpt3","lpt4","lpt5", "lpt6","lpt7","lpt8","lpt9",
39
+ ]
40
+ if parent.stem in reserved:
41
+ # fuck
42
+ parent = parent.with_stem(f"!{parent.stem}")
43
+ parent.mkdir(parents=True, exist_ok=True)
44
+ (parent / filepath.name).write_text(response.text, encoding="utf-8")
45
+
46
+ def agent():
47
+ if GOOGLE:
48
+ return f"Mozilla/5.0 AppleWebKit/537.36 (KHTML, like Gecko; compatible; Googlebot/2.1; +http://www.google.com/bot.html) Chrome/{random.randint(100,111)}.0.0.0 Safari/537.36"
49
+ return f"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/{random.randint(100,111)}.0.0.0 Safari/537.36"
50
+
51
+ limits = httpx.Limits(max_keepalive_connections=None, max_connections=None)
52
+
53
+ session = httpx.AsyncClient(limits=limits, verify=False)
54
+
55
+ # Scraping.. - Shinon
56
+
57
+ @backoff.on_exception(backoff.expo, httpx.HTTPError)
58
+ @backoff.on_predicate(backoff.expo)
59
+ async def get_url(url, record_filename=None, use_cached=True, no_read=False, noisy=0.0, record_func=record_response):
60
+ session.headers.update(
61
+ {
62
+ "User-Agent": agent(),
63
+ "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
64
+ "accept-encoding": "gzip, deflate",
65
+ "accept-language": "jp",
66
+ }
67
+ )
68
+ if noisy:
69
+ await asyncio.sleep(random.uniform(0, noisy))
70
+ session.cookies.clear()
71
+ session.cookies.set("age_check","1", ".blog.fc2.com")
72
+ session.cookies.set("blog_language","ja", ".blog.fc2.com")
73
+ if use_cached:
74
+ if get_write_path(url, filename=record_filename).exists():
75
+ if no_read:
76
+ return True
77
+ return get_write_path(url, filename=record_filename).read_text(encoding="utf-8")
78
+
79
+ try:
80
+ response = await session.get(url,)
81
+ except httpx.TimeoutException:
82
+ return False # Error?
83
+ except idna.core.InvalidCodepoint:
84
+ print(f"What: {url}")
85
+ return f'<?xml version="1.0" encoding="utf-8"?><error>idna.core.InvalidCodepoint: {url}</error>'
86
+ except idna.core.InvalidCodepointContext:
87
+ print(f"What: {url}")
88
+ return f'<?xml version="1.0" encoding="utf-8"?><error>idna.core.InvalidCodepointContext {url}</error>'
89
+ except idna.core.IDNAError:
90
+ print(f"What: {url}")
91
+ return f'<?xml version="1.0" encoding="utf-8"?><error>idna.core.IDNAError: {url}</error>'
92
+ except Exception as e:
93
+ print(f"What: {url}", e)
94
+ return f'<?xml version="1.0" encoding="utf-8"?><error>Uncaught Error: {url}</error>'
95
+ if response.status_code in [200, 404]:
96
+ if record_func:
97
+ if inspect.isawaitable(record_func):
98
+ await record_func(response, filename=record_filename)
99
+ else:
100
+ record_func(response, filename=record_filename)
101
+ return response.text
102
+ elif response.status_code in [301, 302]:
103
+ redirected = response.headers.get('location')
104
+ if "https://error.fc2.com/" in redirected:
105
+ if "https://error.fc2.com/blog/e/404/" in redirected:
106
+ return '<?xml version="1.0" encoding="utf-8"?><error>404, does not exist.</error>'
107
+ if "https://error.fc2.com/blog/syntax_error/" in redirected:
108
+ return '<?xml version="1.0" encoding="utf-8"?><error>syntax_error for requested page.</error>'
109
+ if "https://error.fc2.com/" in redirected:
110
+ print(f"Error: {response.status_code} for {response}: {redirected} | {url}")
111
+ return False # Error?
112
+ print(f"Error: {response.status_code} for {response}: {redirected} | {url}")
113
+ return False # Error?
114
+ else:
115
+ if redirected.startswith("/"):
116
+ redirected = urllib.parse.urlunparse(urllib.parse.urlparse(url)._replace(path=redirected))
117
+ if redirected.startswith("https://") or redirected.startswith("http://"):
118
+ # Https:// redirect
119
+ return await get_url(
120
+ redirected, record_filename=record_filename,
121
+ use_cached=use_cached, no_read=no_read, record_func=record_func)
122
+ else:
123
+ print(f"Error: {response.status_code} for {response}: {redirected} | {url}")
124
+ return False
125
+ #print(f"Error: {response.status_code} for {response}: {}")
126
+ #return False
127
+ elif response.status_code in [502, 503]:
128
+ # Retry on 502
129
+ return False
130
+ print(f"Error: {response.status_code} for {response}")
131
+ return False
132
+
133
+ shlk = asyncio.Queue(maxsize=10000)
134
+
135
+ from huggingface_hub import HfApi
136
+ api = HfApi()
137
+
138
+ def sync_upload(idx):
139
+ subprocess.call(["7zz", "a", f"/home/shinon/fc2Warui/BLiterature_{str(idx).zfill(2)}.7z", f"/home/shinon/fc2Warui/page_scrape_{idx}.jsonl"])
140
+ api.upload_file(
141
+ path_or_fileobj=f"/home/shinon/fc2Warui/BLiterature_{str(idx).zfill(2)}.7z",
142
+ path_in_repo=f"BLiterature_{str(idx).zfill(2)}.7z",
143
+ repo_id="RyokoAI-Internal/BLiterature",
144
+ repo_type="dataset",
145
+ )
146
+ print(f"Deleting parts: page_scrape_{idx}.jsonl | BLiterature_{str(idx).zfill(2)}.7z")
147
+ pathlib.Path(f"page_scrape_{idx}.jsonl").unlink()
148
+ pathlib.Path(f"BLiterature_{str(idx).zfill(2)}.7z").unlink()
149
+
150
+ hf_upload_executor = ThreadPoolExecutor(10)
151
+
152
+ async def hf_upload(idx):
153
+ loop = asyncio.get_running_loop()
154
+ await loop.run_in_executor(hf_upload_executor, sync_upload, idx)
155
+
156
+
157
+ async def scrape_compiled_pages():
158
+ f = [str(f.resolve()) for f in pathlib.Path("pp").iterdir() if (f.is_file() and f.suffix.endswith(".jsonl"))]
159
+ f = natsorted(f, alg=ns.PATH)
160
+ f = [pathlib.Path(filepath) for filepath in f]
161
+
162
+ shared_queue = asyncio.Queue(maxsize=5000)
163
+ write_queue = asyncio.Queue(maxsize=5000)
164
+
165
+ executor = ThreadPoolExecutor(max_workers=4)
166
+
167
+ up_tasks = []
168
+
169
+ write_resume = 0
170
+
171
+ async def write_thread():
172
+ rotation = 0
173
+ pbar = tqdm.tqdm(desc="Write Thread")
174
+ fs = await aiofiles.open(f"page_scrape_{rotation}.jsonl", "wb")
175
+ fs2 = await aiofiles.open(f"index.jsonl", "wb")
176
+ while True:
177
+ if write_queue.empty():
178
+ await asyncio.sleep(0.5)
179
+ continue
180
+ buffer = []
181
+ buffer2 = []
182
+ while not write_queue.empty():
183
+ q = await write_queue.get()
184
+ if q:
185
+ r = await loop.run_in_executor(executor, orjson.dumps, q)
186
+ buffer.append(r + b"\n")
187
+ buffer2.append(q[0].encode() + b"\n")
188
+ pbar.update(1)
189
+ pbar.desc = f"Write Thread: {write_queue.qsize()}"
190
+ if buffer and buffer2:
191
+ await fs.write(b"".join(buffer))
192
+ await fs2.write(b"".join(buffer2))
193
+ n_rotation = pbar.n // 2500000
194
+ if n_rotation != rotation:
195
+ await fs.close()
196
+ if write_resume < n_rotation:
197
+ up_tasks.append(loop.create_task(hf_upload(rotation)))
198
+ else:
199
+ print("Not writing", f"page_scrape_{rotation}.jsonl")
200
+ rotation = n_rotation
201
+ fs = await aiofiles.open(f"page_scrape_{rotation}.jsonl", "wb")
202
+
203
+
204
+ if pathlib.Path("resume").exists():
205
+ start_from = int(pathlib.Path("resume").read_text())
206
+ write_resume = start_from // 2500000
207
+ else:
208
+ start_from = 0
209
+ pbaru = tqdm.tqdm(desc="Task Threads")
210
+ async def url_find():
211
+ while True:
212
+ if shared_queue.empty():
213
+ await asyncio.sleep(0.5)
214
+ continue
215
+ url = await shared_queue.get()
216
+ if url is not None:
217
+ await write_queue.put((url, await get_url(url, record_func=None, use_cached=False)))
218
+ else:
219
+ await write_queue.put(None)
220
+ #pbar.desc = "Task Thread: " + url.split("/")[-1] + f" {shared_queue.qsize()}"
221
+ pbaru.update(1)
222
+ loop = asyncio.get_running_loop()
223
+ tasks = [loop.create_task(url_find()) for _ in range(6000)] + [loop.create_task(write_thread())]
224
+ print(start_from, "start_from")
225
+ if start_from > 0:
226
+ print("resuming from:", start_from)
227
+ for file in f:
228
+ async with aiofiles.open(file,"r") as fp:
229
+ print(f"Process:", str(file))
230
+ async for line in fp:
231
+ #print(line)
232
+ try:
233
+ load = await loop.run_in_executor(executor, orjson.loads, line)
234
+ except Exception as e:
235
+ print("Eror whle loading json:", line, "error", e, "file", file)
236
+ url = load[1]
237
+ #print(url)
238
+ if "/blog-entry-" in url:
239
+ #print(f"put {url}")
240
+ if start_from > 0:
241
+ url = None
242
+ start_from -= 1
243
+ await shared_queue.put(url)
244
+
245
+ await asyncio.gather(*up_tasks)
246
+
247
+
248
+
249
+
250
+
251
+ async def compile_pages():
252
+
253
+ pbar = tqdm.tqdm(desc="Pages Parsed")
254
+ pbar2 = tqdm.tqdm(desc="Sites Parsed")
255
+ rotation = 0
256
+ fs = await aiofiles.open(f"pages_{rotation}.jsonl", "wb")
257
+ for blog_path in pathlib.Path("blog").iterdir():
258
+ lines = []
259
+ if blog_path.is_file():
260
+ continue
261
+ blog_path = blog_path / "sitemap.xml"
262
+ username = blog_path.parent.name
263
+ soup = BeautifulSoup(blog_path.read_text(encoding="utf-8", errors="ignore"),"lxml")
264
+ if soup.find("error"):
265
+ # Page probably does not exist.
266
+ return
267
+ for route in soup.find_all("loc"):
268
+ url = route.text
269
+ lines.append(orjson.dumps([username, url]) + b"\n")
270
+ pbar.update(1)
271
+ await fs.write(b"".join(lines))
272
+ pbar2.update(1)
273
+ n_rotation = pbar.n // 2500000
274
+ if n_rotation != rotation:
275
+ await fs.close()
276
+ rotation = n_rotation
277
+ fs = await aiofiles.open(f"pages_{rotation}.jsonl", "wb")
278
+ pbar.close()
279
+
280
+
281
+ async def blogs():
282
+ sem = asyncio.Semaphore(ASYNC_CALL)
283
+ links_t = set()
284
+ loop = asyncio.get_running_loop()
285
+ for community in range(6, 53):
286
+ html = list((ROOT / pathlib.Path(f"genre/{community}/ranking")).iterdir())
287
+ with tqdm.tqdm(total=len(html)) as pbar:
288
+ async def fetch_file(file:pathlib.Path):
289
+ async with sem:
290
+ async with aiofiles.open(file,encoding="utf-8") as f:
291
+ pbar.update(1)
292
+ return await f.read()
293
+ tasks = [loop.create_task(fetch_file(fs)) for fs in html]
294
+ contents = await asyncio.gather(*tasks, return_exceptions=True)
295
+ print("Parsing")
296
+ with tqdm.tqdm(total=len(contents)) as pbar:
297
+ for content in contents:
298
+ soup = BeautifulSoup(content, "lxml")
299
+ for links in soup.select(".blogranking_title > a"):
300
+ links_t.add(links['href'])
301
+ pbar.update(1)
302
+ del contents
303
+ (ROOT / "blogs.json").write_text(json.dumps(list(links_t), ensure_ascii=False, indent=2))
304
+
305
+ async def blog_sitemaps():
306
+ sem = asyncio.Semaphore(ASYNC_CALL)
307
+ blogs = json.loads((ROOT / "blogs.json").read_text(encoding="utf-8"))
308
+ maps = [f"{blog}sitemaps.xml" for blog in blogs]
309
+ pbar = tqdm.tqdm(total=len(maps), smoothing=0.8)
310
+ def cond(url):
311
+ par = urllib.parse.urlparse(url)
312
+ username = par.netloc.split(".")[0]
313
+ fs=f"blog/{username}/sitemap.xml"
314
+ pbar.update(1)
315
+ if (ROOT / pathlib.Path(fs)).exists():
316
+ return False
317
+ return True
318
+ maps = [sitemap for sitemap in maps if cond(sitemap)]
319
+ pbar.close()
320
+
321
+ with tqdm.tqdm(total=len(maps), smoothing=0.8) as pbar:
322
+ async def scrape_page(url):
323
+ async with sem:
324
+ par = urllib.parse.urlparse(url)
325
+ username = par.netloc.split(".")[0]
326
+ await get_url(url, record_filename=f"blog/{username}/sitemap.xml", no_read=True, noisy=1)
327
+ pbar.update(1)
328
+ loop = asyncio.get_running_loop()
329
+ print("Creating tasks")
330
+ tasks = [loop.create_task(scrape_page(sitemap)) for sitemap in maps]
331
+ print("Task creation done. Requesting...")
332
+ await asyncio.gather(*tasks, return_exceptions=True)
333
+
334
+
335
+ async def genre():
336
+ """Scrapes public blogs via the ranking category.
337
+
338
+ """
339
+ #root = "https://blog.fc2.com"
340
+ #community_page = await get_url("https://blog.fc2.com/community/")
341
+ #soup = BeautifulSoup(community_page, "lxml")
342
+
343
+ #selects = soup.select("li.community_genre_item > a")
344
+ # https://blog.fc2.com/genre/52/ranking/
345
+ sem = asyncio.Semaphore(ASYNC_CALL)
346
+ for community in range(6, 53):
347
+ print(f"https://blog.fc2.com/genre/{community}/ranking/")
348
+ community_page = await get_url(f"https://blog.fc2.com/genre/{community}/ranking/")
349
+ if isinstance(community_page, bool):
350
+ raise Exception("Weird?")
351
+ soup = BeautifulSoup(community_page, "lxml")
352
+ pagers = soup.select("div.pager > div > a")
353
+ print(pagers)
354
+ last_ref = pagers[-1]['href']
355
+ if last_ref.startswith("/a/"): # Adult / Community 23 is... weird.
356
+ max_pg = int(last_ref.replace(f"/a/genre/ranking/","").split("/")[0])
357
+ else:
358
+ max_pg = int(last_ref.replace(f"/genre/{community}/ranking/","").split("/")[0])
359
+ shuffled_page = list(range(1,max_pg))
360
+ random.shuffle(shuffled_page)
361
+
362
+ with tqdm.tqdm(total=len(shuffled_page)) as pbar:
363
+ async def scrape_page(idx):
364
+ async with sem:
365
+ url = f"https://blog.fc2.com/genre/{community}/ranking/{idx}"
366
+ await get_url(url, no_read=True)
367
+ pbar.update(1)
368
+ loop = asyncio.get_running_loop()
369
+ tasks = [loop.create_task(scrape_page(page)) for page in shuffled_page]
370
+ await asyncio.gather(*tasks)
371
+
372
+
373
+
374
+ async def communities():
375
+ root = "https://blog.fc2.com"
376
+ community_page = await get_url("https://blog.fc2.com/community/")
377
+ soup = BeautifulSoup(community_page, "lxml")
378
+ selects = soup.select("li.community_genre_item > a")
379
+ print(f"Found: {len(selects)} communities")
380
+ sem = asyncio.Semaphore(ASYNC_CALL)
381
+ for community in selects:
382
+ community_url = root + community['href']
383
+ print(f"comm_url: {community_url}")
384
+ community_page = await get_url(community_url)
385
+ soup = BeautifulSoup(community_page, "lxml")
386
+ pagers = soup.select("div.pager > div > a")
387
+ last_ref = pagers[-1]['href']
388
+ shuffled_page = list(range(1,int(last_ref.replace(community['href'],"").split("/")[1])))
389
+ random.shuffle(shuffled_page)
390
+ print(f"Max for shuffled_page: {max(shuffled_page)}")
391
+ with tqdm.tqdm(total=len(shuffled_page)) as pbar:
392
+ async def scrape_page(cat, idx):
393
+
394
+ async with sem:
395
+ url = f"{community_url}/page/{idx}/?&order_by=member"
396
+ if not (ROOT.resolve() / f"community/category/{cat}/{idx}").with_suffix(".html").exists():
397
+ await get_url(url, record_filename=f"community/category/{cat}/{idx}", no_read=True)
398
+ pbar.update(1)
399
+
400
+ loop = asyncio.get_running_loop()
401
+ tasks = [loop.create_task(scrape_page(community['href'].split("/")[-2], page)) for page in shuffled_page]
402
+ await asyncio.gather(*tasks)
403
+
404
+
405
+ async def do():
406
+ #await communities()
407
+ #await genre()
408
+ await scrape_compiled_pages()
409
+ #await blog_sitemaps()
410
+ await session.aclose()
411
+
412
+
413
+ if __name__ == "__main__":
414
+ asyncio.run(do())
BLiterature_00.7z β†’ data/BLiterature_00.7z RENAMED
File without changes
BLiterature_01.7z β†’ data/BLiterature_01.7z RENAMED
File without changes
BLiterature_02.7z β†’ data/BLiterature_02.7z RENAMED
File without changes
BLiterature_03.7z β†’ data/BLiterature_03.7z RENAMED
File without changes
BLiterature_04.7z β†’ data/BLiterature_04.7z RENAMED
File without changes
BLiterature_05.7z β†’ data/BLiterature_05.7z RENAMED
File without changes
BLiterature_06.7z β†’ data/BLiterature_06.7z RENAMED
File without changes
BLiterature_07.7z β†’ data/BLiterature_07.7z RENAMED
File without changes
BLiterature_08.7z β†’ data/BLiterature_08.7z RENAMED
File without changes
BLiterature_09.7z β†’ data/BLiterature_09.7z RENAMED
File without changes
BLiterature_10.7z β†’ data/BLiterature_10.7z RENAMED
File without changes
BLiterature_100.7z β†’ data/BLiterature_100.7z RENAMED
File without changes
BLiterature_101.7z β†’ data/BLiterature_101.7z RENAMED
File without changes
BLiterature_102.7z β†’ data/BLiterature_102.7z RENAMED
File without changes
BLiterature_103.7z β†’ data/BLiterature_103.7z RENAMED
File without changes
BLiterature_104.7z β†’ data/BLiterature_104.7z RENAMED
File without changes
BLiterature_11.7z β†’ data/BLiterature_11.7z RENAMED
File without changes
BLiterature_12.7z β†’ data/BLiterature_12.7z RENAMED
File without changes
BLiterature_13.7z β†’ data/BLiterature_13.7z RENAMED
File without changes
BLiterature_14.7z β†’ data/BLiterature_14.7z RENAMED
File without changes
BLiterature_15.7z β†’ data/BLiterature_15.7z RENAMED
File without changes
BLiterature_16.7z β†’ data/BLiterature_16.7z RENAMED
File without changes
BLiterature_17.7z β†’ data/BLiterature_17.7z RENAMED
File without changes
BLiterature_18.7z β†’ data/BLiterature_18.7z RENAMED
File without changes
BLiterature_19.7z β†’ data/BLiterature_19.7z RENAMED
File without changes
BLiterature_20.7z β†’ data/BLiterature_20.7z RENAMED
File without changes
BLiterature_21.7z β†’ data/BLiterature_21.7z RENAMED
File without changes
BLiterature_22.7z β†’ data/BLiterature_22.7z RENAMED
File without changes
BLiterature_23.7z β†’ data/BLiterature_23.7z RENAMED
File without changes
BLiterature_24.7z β†’ data/BLiterature_24.7z RENAMED
File without changes
BLiterature_25.7z β†’ data/BLiterature_25.7z RENAMED
File without changes
BLiterature_26.7z β†’ data/BLiterature_26.7z RENAMED
File without changes
BLiterature_27.7z β†’ data/BLiterature_27.7z RENAMED
File without changes
BLiterature_28.7z β†’ data/BLiterature_28.7z RENAMED
File without changes
BLiterature_29.7z β†’ data/BLiterature_29.7z RENAMED
File without changes
BLiterature_30.7z β†’ data/BLiterature_30.7z RENAMED
File without changes
BLiterature_31.7z β†’ data/BLiterature_31.7z RENAMED
File without changes
BLiterature_32.7z β†’ data/BLiterature_32.7z RENAMED
File without changes
BLiterature_33.7z β†’ data/BLiterature_33.7z RENAMED
File without changes
BLiterature_34.7z β†’ data/BLiterature_34.7z RENAMED
File without changes
BLiterature_35.7z β†’ data/BLiterature_35.7z RENAMED
File without changes
BLiterature_36.7z β†’ data/BLiterature_36.7z RENAMED
File without changes
BLiterature_37.7z β†’ data/BLiterature_37.7z RENAMED
File without changes
BLiterature_38.7z β†’ data/BLiterature_38.7z RENAMED
File without changes
BLiterature_39.7z β†’ data/BLiterature_39.7z RENAMED
File without changes
BLiterature_40.7z β†’ data/BLiterature_40.7z RENAMED
File without changes
BLiterature_41.7z β†’ data/BLiterature_41.7z RENAMED
File without changes
BLiterature_42.7z β†’ data/BLiterature_42.7z RENAMED
File without changes
BLiterature_43.7z β†’ data/BLiterature_43.7z RENAMED
File without changes