lesso06's picture
Training in progress, step 50, checkpoint
b7a47d5 verified
raw
history blame
9.54 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.0847457627118644,
"eval_steps": 9,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.001694915254237288,
"grad_norm": NaN,
"learning_rate": 1e-05,
"loss": 0.0,
"step": 1
},
{
"epoch": 0.001694915254237288,
"eval_loss": NaN,
"eval_runtime": 55.7318,
"eval_samples_per_second": 8.918,
"eval_steps_per_second": 1.13,
"step": 1
},
{
"epoch": 0.003389830508474576,
"grad_norm": NaN,
"learning_rate": 2e-05,
"loss": 0.0,
"step": 2
},
{
"epoch": 0.005084745762711864,
"grad_norm": NaN,
"learning_rate": 3e-05,
"loss": 0.0,
"step": 3
},
{
"epoch": 0.006779661016949152,
"grad_norm": NaN,
"learning_rate": 4e-05,
"loss": 0.0,
"step": 4
},
{
"epoch": 0.00847457627118644,
"grad_norm": NaN,
"learning_rate": 5e-05,
"loss": 0.0,
"step": 5
},
{
"epoch": 0.010169491525423728,
"grad_norm": NaN,
"learning_rate": 6e-05,
"loss": 0.0,
"step": 6
},
{
"epoch": 0.011864406779661017,
"grad_norm": NaN,
"learning_rate": 7e-05,
"loss": 0.0,
"step": 7
},
{
"epoch": 0.013559322033898305,
"grad_norm": NaN,
"learning_rate": 8e-05,
"loss": 0.0,
"step": 8
},
{
"epoch": 0.015254237288135594,
"grad_norm": NaN,
"learning_rate": 9e-05,
"loss": 0.0,
"step": 9
},
{
"epoch": 0.015254237288135594,
"eval_loss": NaN,
"eval_runtime": 55.4106,
"eval_samples_per_second": 8.969,
"eval_steps_per_second": 1.137,
"step": 9
},
{
"epoch": 0.01694915254237288,
"grad_norm": NaN,
"learning_rate": 0.0001,
"loss": 0.0,
"step": 10
},
{
"epoch": 0.01864406779661017,
"grad_norm": NaN,
"learning_rate": 9.99695413509548e-05,
"loss": 0.0,
"step": 11
},
{
"epoch": 0.020338983050847456,
"grad_norm": NaN,
"learning_rate": 9.987820251299122e-05,
"loss": 0.0,
"step": 12
},
{
"epoch": 0.022033898305084745,
"grad_norm": NaN,
"learning_rate": 9.972609476841367e-05,
"loss": 0.0,
"step": 13
},
{
"epoch": 0.023728813559322035,
"grad_norm": NaN,
"learning_rate": 9.951340343707852e-05,
"loss": 0.0,
"step": 14
},
{
"epoch": 0.025423728813559324,
"grad_norm": NaN,
"learning_rate": 9.924038765061042e-05,
"loss": 0.0,
"step": 15
},
{
"epoch": 0.02711864406779661,
"grad_norm": NaN,
"learning_rate": 9.890738003669029e-05,
"loss": 0.0,
"step": 16
},
{
"epoch": 0.0288135593220339,
"grad_norm": NaN,
"learning_rate": 9.851478631379982e-05,
"loss": 0.0,
"step": 17
},
{
"epoch": 0.030508474576271188,
"grad_norm": NaN,
"learning_rate": 9.806308479691595e-05,
"loss": 0.0,
"step": 18
},
{
"epoch": 0.030508474576271188,
"eval_loss": NaN,
"eval_runtime": 55.3991,
"eval_samples_per_second": 8.971,
"eval_steps_per_second": 1.137,
"step": 18
},
{
"epoch": 0.03220338983050847,
"grad_norm": NaN,
"learning_rate": 9.755282581475769e-05,
"loss": 0.0,
"step": 19
},
{
"epoch": 0.03389830508474576,
"grad_norm": NaN,
"learning_rate": 9.698463103929542e-05,
"loss": 0.0,
"step": 20
},
{
"epoch": 0.03559322033898305,
"grad_norm": NaN,
"learning_rate": 9.635919272833938e-05,
"loss": 0.0,
"step": 21
},
{
"epoch": 0.03728813559322034,
"grad_norm": NaN,
"learning_rate": 9.567727288213005e-05,
"loss": 0.0,
"step": 22
},
{
"epoch": 0.03898305084745763,
"grad_norm": NaN,
"learning_rate": 9.493970231495835e-05,
"loss": 0.0,
"step": 23
},
{
"epoch": 0.04067796610169491,
"grad_norm": NaN,
"learning_rate": 9.414737964294636e-05,
"loss": 0.0,
"step": 24
},
{
"epoch": 0.0423728813559322,
"grad_norm": NaN,
"learning_rate": 9.330127018922194e-05,
"loss": 0.0,
"step": 25
},
{
"epoch": 0.04406779661016949,
"grad_norm": NaN,
"learning_rate": 9.24024048078213e-05,
"loss": 0.0,
"step": 26
},
{
"epoch": 0.04576271186440678,
"grad_norm": NaN,
"learning_rate": 9.145187862775209e-05,
"loss": 0.0,
"step": 27
},
{
"epoch": 0.04576271186440678,
"eval_loss": NaN,
"eval_runtime": 55.5182,
"eval_samples_per_second": 8.952,
"eval_steps_per_second": 1.135,
"step": 27
},
{
"epoch": 0.04745762711864407,
"grad_norm": NaN,
"learning_rate": 9.045084971874738e-05,
"loss": 0.0,
"step": 28
},
{
"epoch": 0.04915254237288136,
"grad_norm": NaN,
"learning_rate": 8.940053768033609e-05,
"loss": 0.0,
"step": 29
},
{
"epoch": 0.05084745762711865,
"grad_norm": NaN,
"learning_rate": 8.83022221559489e-05,
"loss": 0.0,
"step": 30
},
{
"epoch": 0.05254237288135593,
"grad_norm": NaN,
"learning_rate": 8.715724127386972e-05,
"loss": 0.0,
"step": 31
},
{
"epoch": 0.05423728813559322,
"grad_norm": NaN,
"learning_rate": 8.596699001693255e-05,
"loss": 0.0,
"step": 32
},
{
"epoch": 0.05593220338983051,
"grad_norm": NaN,
"learning_rate": 8.473291852294987e-05,
"loss": 0.0,
"step": 33
},
{
"epoch": 0.0576271186440678,
"grad_norm": NaN,
"learning_rate": 8.345653031794292e-05,
"loss": 0.0,
"step": 34
},
{
"epoch": 0.059322033898305086,
"grad_norm": NaN,
"learning_rate": 8.213938048432697e-05,
"loss": 0.0,
"step": 35
},
{
"epoch": 0.061016949152542375,
"grad_norm": NaN,
"learning_rate": 8.07830737662829e-05,
"loss": 0.0,
"step": 36
},
{
"epoch": 0.061016949152542375,
"eval_loss": NaN,
"eval_runtime": 55.3687,
"eval_samples_per_second": 8.976,
"eval_steps_per_second": 1.138,
"step": 36
},
{
"epoch": 0.06271186440677966,
"grad_norm": NaN,
"learning_rate": 7.938926261462366e-05,
"loss": 0.0,
"step": 37
},
{
"epoch": 0.06440677966101695,
"grad_norm": NaN,
"learning_rate": 7.795964517353735e-05,
"loss": 0.0,
"step": 38
},
{
"epoch": 0.06610169491525424,
"grad_norm": NaN,
"learning_rate": 7.649596321166024e-05,
"loss": 0.0,
"step": 39
},
{
"epoch": 0.06779661016949153,
"grad_norm": NaN,
"learning_rate": 7.500000000000001e-05,
"loss": 0.0,
"step": 40
},
{
"epoch": 0.06949152542372881,
"grad_norm": NaN,
"learning_rate": 7.347357813929454e-05,
"loss": 0.0,
"step": 41
},
{
"epoch": 0.0711864406779661,
"grad_norm": NaN,
"learning_rate": 7.191855733945387e-05,
"loss": 0.0,
"step": 42
},
{
"epoch": 0.07288135593220339,
"grad_norm": NaN,
"learning_rate": 7.033683215379002e-05,
"loss": 0.0,
"step": 43
},
{
"epoch": 0.07457627118644068,
"grad_norm": NaN,
"learning_rate": 6.873032967079561e-05,
"loss": 0.0,
"step": 44
},
{
"epoch": 0.07627118644067797,
"grad_norm": NaN,
"learning_rate": 6.710100716628344e-05,
"loss": 0.0,
"step": 45
},
{
"epoch": 0.07627118644067797,
"eval_loss": NaN,
"eval_runtime": 55.3207,
"eval_samples_per_second": 8.984,
"eval_steps_per_second": 1.139,
"step": 45
},
{
"epoch": 0.07796610169491526,
"grad_norm": NaN,
"learning_rate": 6.545084971874738e-05,
"loss": 0.0,
"step": 46
},
{
"epoch": 0.07966101694915254,
"grad_norm": NaN,
"learning_rate": 6.378186779084995e-05,
"loss": 0.0,
"step": 47
},
{
"epoch": 0.08135593220338982,
"grad_norm": NaN,
"learning_rate": 6.209609477998338e-05,
"loss": 0.0,
"step": 48
},
{
"epoch": 0.08305084745762711,
"grad_norm": NaN,
"learning_rate": 6.0395584540887963e-05,
"loss": 0.0,
"step": 49
},
{
"epoch": 0.0847457627118644,
"grad_norm": NaN,
"learning_rate": 5.868240888334653e-05,
"loss": 0.0,
"step": 50
}
],
"logging_steps": 1,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 3.528709079511859e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}