tmnam20 commited on
Commit
af34006
1 Parent(s): 60b7000

Training in progress, step 200, checkpoint

Browse files
checkpoint-200/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4bc9380a831f85d3b9908a8ba41371e777acc0a767a283ea6eb76f2d2bd1f557
3
  size 1474893317
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6bafe9677d96eb63bae43314ebe31a30361b1347d590fab78e657d5093cda7e5
3
  size 1474893317
checkpoint-200/pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6b7511b52f20dd02d2703e7e44318cf5908bd01588cd51cce0f7fcdfeb37be13
3
  size 737457141
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b06790aa3e1d971e160c719ebdd5ee22a691c28aff1977c8a9e4b150aaeac7a
3
  size 737457141
checkpoint-200/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:84709f6d9c1533f7f23a098ed0b25c34f2bbc10611fbeb71c88a308a8846e0ed
3
  size 14575
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f3284e82c30f90f720fbb01dfc9a4aa926d82378c51e59b01cb9aa4aa352e1b
3
  size 14575
checkpoint-200/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:28635ac4e12d078e632eeed3d7a0151748b7fe2eb5d64a612000c08dddfa0646
3
  size 627
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f2099172cf643e0621a49c4fd812f032200315a0b3fd63d5198b626196954959
3
  size 627
checkpoint-200/trainer_state.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "best_metric": 0.5637454986572266,
3
  "best_model_checkpoint": "/kaggle/output/checkpoint-200",
4
  "epoch": 0.4132231404958678,
5
  "eval_steps": 50,
@@ -10,44 +10,44 @@
10
  "log_history": [
11
  {
12
  "epoch": 0.0,
13
- "learning_rate": 2.777777777777778e-07,
14
- "loss": 0.664,
15
  "step": 1
16
  },
17
  {
18
  "epoch": 0.1,
19
  "eval_accuracy": 0.75,
20
- "eval_loss": 0.5625454783439636,
21
- "eval_runtime": 28.0598,
22
- "eval_samples_per_second": 42.196,
23
- "eval_steps_per_second": 21.098,
24
  "step": 50
25
  },
26
  {
27
  "epoch": 0.21,
28
  "eval_accuracy": 0.75,
29
- "eval_loss": 0.5684965252876282,
30
- "eval_runtime": 27.869,
31
- "eval_samples_per_second": 42.485,
32
- "eval_steps_per_second": 21.242,
33
  "step": 100
34
  },
35
  {
36
  "epoch": 0.31,
37
  "eval_accuracy": 0.75,
38
- "eval_loss": 0.5661544799804688,
39
- "eval_runtime": 29.0027,
40
- "eval_samples_per_second": 40.824,
41
- "eval_steps_per_second": 20.412,
42
  "step": 150
43
  },
44
  {
45
  "epoch": 0.41,
46
  "eval_accuracy": 0.75,
47
- "eval_loss": 0.5637454986572266,
48
- "eval_runtime": 27.8992,
49
- "eval_samples_per_second": 42.439,
50
- "eval_steps_per_second": 21.219,
51
  "step": 200
52
  }
53
  ],
@@ -55,7 +55,7 @@
55
  "max_steps": 1000,
56
  "num_train_epochs": 3,
57
  "save_steps": 100,
58
- "total_flos": 634000068849168.0,
59
  "trial_name": null,
60
  "trial_params": null
61
  }
 
1
  {
2
+ "best_metric": 0.5658872723579407,
3
  "best_model_checkpoint": "/kaggle/output/checkpoint-200",
4
  "epoch": 0.4132231404958678,
5
  "eval_steps": 50,
 
10
  "log_history": [
11
  {
12
  "epoch": 0.0,
13
+ "learning_rate": 1.6666666666666667e-06,
14
+ "loss": 0.6979,
15
  "step": 1
16
  },
17
  {
18
  "epoch": 0.1,
19
  "eval_accuracy": 0.75,
20
+ "eval_loss": 0.6723740696907043,
21
+ "eval_runtime": 28.903,
22
+ "eval_samples_per_second": 40.965,
23
+ "eval_steps_per_second": 20.482,
24
  "step": 50
25
  },
26
  {
27
  "epoch": 0.21,
28
  "eval_accuracy": 0.75,
29
+ "eval_loss": 0.6272028088569641,
30
+ "eval_runtime": 28.756,
31
+ "eval_samples_per_second": 41.174,
32
+ "eval_steps_per_second": 20.587,
33
  "step": 100
34
  },
35
  {
36
  "epoch": 0.31,
37
  "eval_accuracy": 0.75,
38
+ "eval_loss": 0.5726658701896667,
39
+ "eval_runtime": 28.7791,
40
+ "eval_samples_per_second": 41.141,
41
+ "eval_steps_per_second": 20.57,
42
  "step": 150
43
  },
44
  {
45
  "epoch": 0.41,
46
  "eval_accuracy": 0.75,
47
+ "eval_loss": 0.5658872723579407,
48
+ "eval_runtime": 28.8005,
49
+ "eval_samples_per_second": 41.11,
50
+ "eval_steps_per_second": 20.555,
51
  "step": 200
52
  }
53
  ],
 
55
  "max_steps": 1000,
56
  "num_train_epochs": 3,
57
  "save_steps": 100,
58
+ "total_flos": 648800331133968.0,
59
  "trial_name": null,
60
  "trial_params": null
61
  }
checkpoint-200/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:359ac16579635edbe192e2a03146d3aa8c5fab4b7a991e049fba5f7b90d4807f
3
  size 4027
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fbe5e88ca49040823bdb83dc035b8a4470c58ae105b0482f624b7d48f5e08cbe
3
  size 4027