File size: 5,392 Bytes
0b17507 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 |
import time, os, json
from tqdm import tqdm
from curl_cffi import requests
import concurrent.futures
from pathlib import Path
import tarfile
import shutil
from huggingface_hub import HfApi
os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
def main():
danbooru_path = Path("danbooru")
selected_range = set(['%03d' % i for i in [400]])
notexists, total = 0, 0
notexistsid = {}
with open("posts.json", "r") as f:
bar = tqdm(desc="Indexing files", ascii=True, leave=False)
cache = {}
data_size = {}
for d in selected_range:
data = []
fp = ('0' + d)
for file_path in (danbooru_path / fp).iterdir():
if file_path.is_file():
data.append(file_path.stem)
data_size[file_path.stem] = file_path.stat().st_size
bar.update(1)
cache[fp] = set(data)
bar = tqdm(desc="Checking files", total=6_857_737, ascii=True, leave=False)
for line in f:
post = json.loads(line)
file_id = post['id']
cutoff = str(file_id)[-3:]
if cutoff not in selected_range:
bar.update(1)
continue
cutoff = '0' + cutoff
assert (danbooru_path / cutoff).exists(), f"{(danbooru_path / cutoff)} not exixts"
exists = str(file_id) in cache[cutoff] and (data_size[str(file_id)] == post["file_size"] or int(file_id) < 5_020_995)
total += 1
if not exists and "file_url" in post:
notexists += 1
if cutoff not in notexistsid:
notexistsid[cutoff] = []
notexistsid[cutoff].append((
file_id, cutoff, post["file_url"],
))
# print(post["file_url"])
bar.update(1)
bar.set_postfix_str(f"not exists: {notexists}")
bar.close()
print(f"not exists: {notexists}, total: {total}")
with concurrent.futures.ThreadPoolExecutor(max_workers=4) as tar_executor:
for d in selected_range:
cut = '0' + d
if cut not in notexistsid:
tar_executor.submit(archive_and_upload, Path("danbooru") / cut, cut)
for key, group in notexistsid.items():
keybar = tqdm(desc=f"Downloading files in key={key}", total=len(group), position=1, ascii=True, leave=False)
ok = False
while not ok:
with concurrent.futures.ThreadPoolExecutor(max_workers=3) as executor:
for file_id, cutoff, file_url in group:
executor.submit(download, file_id, cutoff, file_url, keybar)
ok = True
for file_id, cutoff, file_url in group:
suffix = Path(file_url).suffix
if file_url != "" and not Path(f"danbooru/{cutoff}/{file_id}{suffix}").is_file():
ok = False
tar_executor.submit(archive_and_upload, Path(f"danbooru/{cutoff}"), cutoff)
print(f"Finished download group {cutoff}")
keybar.close()
def rm_tree(pth: Path):
for child in pth.iterdir():
if child.is_file():
child.unlink()
else:
rm_tree(child)
pth.rmdir()
def archive_and_upload(dirname, name):
tar_name = Path("danbooru-tars") / f"data-{name}.tar"
# Check if the directory exists
if not os.path.isdir(dirname):
print("The specified directory does not exist.")
return
# Create the tar file
print(f"Creating {tar_name}")
with tarfile.open(tar_name, "w") as tar:
# Sort and add files to the tar file
for root, dirs, files in os.walk(dirname):
# Sort files for consistent ordering
for file in tqdm(sorted(files), desc=f"Creating {tar_name}", ascii=True):
full_path = os.path.join(root, file)
# Add the file to the tar archive
tar.add(full_path, arcname=file)
# Remove the original directory after archiving
rm_tree(dirname)
print(f"The directory {dirname} has been removed.")
api = HfApi()
print(api.upload_file(
path_or_fileobj=tar_name,
path_in_repo=f"original/data-{name}.tar",
repo_id="nyanko7/danbooru2023",
repo_type="dataset",
))
Path(tar_name).unlink()
def download(idx, cutoff, file_url, bar):
suffix = Path(file_url).suffix
max_attempts = 3 # specify the maximum number of attempts
for attempt in range(max_attempts):
try:
r = requests.get(file_url, impersonate="chrome110", timeout=120)
if r.status_code == 200:
with open(f"danbooru/{cutoff}/{idx}{suffix}", "wb") as f:
f.write(r.content)
break # if the download is successful, break the loop
else:
print(f"Attempt {attempt+1} failed to download {file_url}: error {r.status_code}")
except Exception as e:
print(f"Attempt {attempt+1} failed to download {file_url}: error {e}")
time.sleep(1) # wait for 1 second before the next attempt
if attempt+1 == max_attempts:
print(f"Failed to download {file_url} after {max_attempts} attempts.")
bar.update(1)
if __name__ == "__main__":
main()
|