import os from huggingface_hub import hf_hub_download # ALL langauges langs = { 'train': ['ar', 'bn', 'en', 'es', 'fa', 'fi', 'fr', 'hi', 'id', 'ja', 'ko', 'ru', 'sw', 'te', 'th', 'zh'], 'dev': ['ar', 'bn', 'en', 'es', 'fa', 'fi', 'fr', 'hi', 'id', 'ja', 'ko', 'ru', 'sw', 'te', 'th', 'zh'], 'testA': ['ar', 'bn', 'en', 'fi', 'id', 'ja', 'ko', 'ru', 'sw', 'te', 'th'] } # the langauges we used in ESSIR # the miracl corpus has been spitted into shards languages2filesize = {'en': 66 , 'fa': 5, 'ru': 20, 'zh': 10} for lang in languages2filesize.keys(): for split in range(languages2filesize[lang]): hf_hub_download('miracl/miracl-corpus', filename=f'docs-{split}.jsonl.gz', subfolder=f'miracl-corpus-v1.0-{lang}', repo_type='dataset', cache_dir='/home/dju/datasets/essir-xlir/miracl/archived', force_filename=f'miracl-{lang}-{split}.jsonl.gz' ) cmd = f'gunzip /home/dju/datasets/essir-xlir/miracl/archived/*gz' os.system(cmd) cmd = f'rm -rvf /home/dju/datasets/essir-xlir/miracl/archived/*lock' os.system(cmd) for lang in languages2filesize.keys(): # merge into a big file cmd = f'cat /home/dju/datasets/essir-xlir/miracl/archived/miracl-{lang}*.jsonl > /home/dju/datasets/essir-xlir/miracl/miracl-{lang}.jsonl' os.system(cmd) # remove shards cmd = f'rm -rf /home/dju/datasets/essir-xlir/miracl/archived/miracl-{lang}*.jsonl'