| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.9933774834437086, | |
| "eval_steps": 50, | |
| "global_step": 75, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.06622516556291391, | |
| "grad_norm": 13.524764060974121, | |
| "learning_rate": 5e-05, | |
| "loss": 4.4945, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.13245033112582782, | |
| "grad_norm": 3.622286081314087, | |
| "learning_rate": 9.994504457428558e-05, | |
| "loss": 2.5797, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.1986754966887417, | |
| "grad_norm": 5.005734920501709, | |
| "learning_rate": 9.8034259378842e-05, | |
| "loss": 2.1351, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.26490066225165565, | |
| "grad_norm": 1.716973900794983, | |
| "learning_rate": 9.349531862043952e-05, | |
| "loss": 2.1166, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.33112582781456956, | |
| "grad_norm": 1.4910792112350464, | |
| "learning_rate": 8.657656676318346e-05, | |
| "loss": 2.0585, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.3973509933774834, | |
| "grad_norm": 3.35893177986145, | |
| "learning_rate": 7.765655770625997e-05, | |
| "loss": 1.9947, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.46357615894039733, | |
| "grad_norm": 4.509174823760986, | |
| "learning_rate": 6.722334251421665e-05, | |
| "loss": 2.1419, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.5298013245033113, | |
| "grad_norm": 2.4678330421447754, | |
| "learning_rate": 5.584776609860414e-05, | |
| "loss": 2.0142, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.5960264900662252, | |
| "grad_norm": 2.499786138534546, | |
| "learning_rate": 4.415223390139588e-05, | |
| "loss": 1.8712, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.6622516556291391, | |
| "grad_norm": 3.2667858600616455, | |
| "learning_rate": 3.277665748578336e-05, | |
| "loss": 1.89, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.7284768211920529, | |
| "grad_norm": 1.6122734546661377, | |
| "learning_rate": 2.234344229374003e-05, | |
| "loss": 2.0688, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.7947019867549668, | |
| "grad_norm": 2.721144437789917, | |
| "learning_rate": 1.3423433236816563e-05, | |
| "loss": 1.9595, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.8609271523178808, | |
| "grad_norm": 3.3693900108337402, | |
| "learning_rate": 6.50468137956049e-06, | |
| "loss": 1.9349, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.9271523178807947, | |
| "grad_norm": 3.006711483001709, | |
| "learning_rate": 1.9657406211579966e-06, | |
| "loss": 2.0043, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.9933774834437086, | |
| "grad_norm": 2.028134346008301, | |
| "learning_rate": 5.4955425714431353e-08, | |
| "loss": 1.8812, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.9933774834437086, | |
| "step": 75, | |
| "total_flos": 9123266204270592.0, | |
| "train_loss": 2.2096694183349608, | |
| "train_runtime": 100.1114, | |
| "train_samples_per_second": 3.017, | |
| "train_steps_per_second": 0.749 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 75, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 9123266204270592.0, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |