File size: 2,997 Bytes
d7cccf2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
# GPT-2 pretraining setup
{
   # parallelism settings ( you will want to change these based on your cluster setup, ideally scheduling pipeline stages
   # across the node boundaries )
   "pipe-parallel-size": 1,
   "model-parallel-size": 4,

   # model settings
   "num-layers": 36,
   "hidden-size": 5120,
   "num-attention-heads": 40,
   "seq-length": 2048,
   "max-position-embeddings": 2048,
   "norm": "layernorm",
   "pos-emb": "rotary",
   "rotary_pct": 0.25,
   "no-weight-tying": true,
   "gpt_j_residual": true,
   "output_layer_parallelism": "column",

   # these should provide some speedup but takes a while to build, set to true if desired
   "scaled-upper-triang-masked-softmax-fusion": true,
   "bias-gelu-fusion": true,

   # optimizer settings
   "optimizer": {
     "type": "Adam",
     "params": {
       "lr": 0.00012,
       "betas": [0.9, 0.95],
       "eps": 1.0e-8,
     }
   },
   "zero_optimization": {
    "stage": 1,
    "allgather_partitions": True,
    "allgather_bucket_size": 1260000000,
    "overlap_comm": True,
    "reduce_scatter": True,
    "reduce_bucket_size": 1260000000,
    "contiguous_gradients": True,
    "cpu_offload": False
  },

   # batch / data settings
   "train_micro_batch_size_per_gpu": 8,
   "gradient_accumulation_steps": 2,
   "data-impl": "mmap",

   # activation checkpointing
   "checkpoint-activations": true,
   "checkpoint-num-layers": 1,
   "partition-activations": true,
   "synchronize-each-layer": true,

   # regularization
   "gradient_clipping": 1.0,
   "weight-decay": 0.1,
   "hidden-dropout": 0,
   "attention-dropout": 0,

   # precision settings
   "fp16": {
     "fp16": true,
     "enabled": true,
     "loss_scale": 0,
     "loss_scale_window": 1000,
     "initial_scale_power": 12,
     "hysteresis": 2,
     "min_loss_scale": 1
   },

   # misc. training settings
   "train-iters": 143000,
   "lr-decay-iters": 143000,
   "distributed-backend": "nccl",
   "lr-decay-style": "cosine",
   "warmup": 0.01,
   "save-interval": 1000,
   "eval-interval": 1000,
   "eval-iters": 10,

   # logging
   "log-interval": 10,
   "steps_per_print": 10,
   "wall_clock_breakdown": true,

  "use_wandb": True,
  "wandb_host": "https://api.wandb.ai",

  # Data paths and options when using EleutherAI cluster
  "train-data-paths": ["/fsx/pile_deduped/pile_0.87_deduped_text_document"],
  "valid-data-paths": ["/fsx/pile_deduped/pile_0.87_deduped_text_document"],
  "test-data-paths": ["/fsx/pile_deduped/pile_0.87_deduped_text_document"],

  "tokenizer_type": "HFTokenizer",
  "vocab-file": "/fsx/pile/20B_tokenizer.json",

  "save": "/fsx/shiv/gpt-neox-old/runs/checkpoints/pythia/13B_deduped_new/",
  "load": "/fsx/shiv/gpt-neox-old/runs/checkpoints/pythia/13B_deduped_new/",
  "tensorboard-dir": "/fsx/shiv/gpt-neox-old/runs/tensorboard",
  "log-dir": "/fsx/shiv/gpt-neox-old/runs/logs",

  "wandb_team": "eleutherai",
  "wandb_project": "pythia",
  "wandb_group": "13B dedupe",
  "launcher": "openmpi",
  "deepspeed_mpi": true
}