Datasets:

Tasks:
Other
Modalities:
Text
ArXiv:
Libraries:
Datasets
License:
File size: 1,967 Bytes
60bc123
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
#!/bin/bash
#SBATCH --job-name=xp3mt # job name
#SBATCH --ntasks=1                   # number of MP tasks
#SBATCH --nodes=1
#SBATCH --cpus-per-task=40           # number of cores per tasks
#SBATCH --hint=nomultithread         # we get physical cores not logical
#SBATCH --time=10:00:00             # maximum execution time (HH:MM:SS)
#SBATCH --output=%x-%j.out          # output file name
#SBATCH --account=ajs@cpu
#SBATCH --partition=cpu_p1
#SBATCH --qos=qos_cpu-t3

set -x -e

source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
export HF_DATASETS_OFFLINE=1
export TRANSFORMERS_OFFLINE=1

MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed

TOKENIZER_PATH="bigscience/tokenizer"

#ar  bn	en  es	fr  gu	hi  id	ig  mr	ne  pa	prep.py  pt  rn  sw  ta  te  to_meg.slurm  ur  vi  yo  zh
LANGS=(
ar
bn
es
fr
gu
hi
id
ig
mr
ne
pa
pt
sw
ta
te
ur
vi
yo
zh
)


DATA_PATH=/gpfswork/rech/six/commun/bigscience-training/jsonls/xP3mt

for val in {0..20}; do
    LANG=${LANGS[$val]}
    cd $DATA_PATH/$LANG
    # Merge
    cat *.jsonl > merged_dups_$LANG.jsonl
    # Drop duplicates (~1G / 37G for en) + Shuffle
    sort -u merged_dups_$LANG.jsonl | shuf > merged_$LANG.jsonl
    OUTPUT=/gpfswork/rech/six/commun/bigscience-training/xp3mt/xp3_$LANG
    cd $MEGATRON_DEEPSPEED_REPO
    #python tools/preprocess_data.py \
    #    --input $DATA_PATH/$LANG/merged_$LANG.jsonl \
    #    --output-prefix $OUTPUT \
    #    --dataset-impl mmap \
    #    --json-key inputs \
    #    --tokenizer-type PretrainedFromHF \
    #    --tokenizer-name-or-path $TOKENIZER_PATH \
    #    --workers 35
    #python tools/preprocess_data.py \
    #    --input $DATA_PATH/$LANG/merged_$LANG.jsonl \
    #    --output-prefix $OUTPUT \
    #    --dataset-impl mmap \
    #    --json-key targets \
    #    --tokenizer-type PretrainedFromHF \
    #    --tokenizer-name-or-path $TOKENIZER_PATH \
    #    --append-eod \
    #    --prepend-space \
    #    --workers 35
done