| { | |
| "best_metric": 1.8643771409988403, | |
| "best_model_checkpoint": "./lora_weights/checkpoint-600", | |
| "epoch": 2.496424392146665, | |
| "global_step": 600, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 2.9999999999999997e-05, | |
| "loss": 2.7371, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 5.9999999999999995e-05, | |
| "loss": 2.713, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 8.999999999999999e-05, | |
| "loss": 2.6222, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "learning_rate": 0.00011999999999999999, | |
| "loss": 2.4152, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "learning_rate": 0.00015, | |
| "loss": 2.2329, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "learning_rate": 0.00017999999999999998, | |
| "loss": 2.1497, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "learning_rate": 0.00020999999999999998, | |
| "loss": 2.0945, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "learning_rate": 0.00023999999999999998, | |
| "loss": 2.069, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "learning_rate": 0.00027, | |
| "loss": 2.0207, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "learning_rate": 0.0003, | |
| "loss": 2.0078, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "learning_rate": 0.00029516129032258065, | |
| "loss": 1.9622, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "learning_rate": 0.00029032258064516127, | |
| "loss": 1.9848, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "learning_rate": 0.00028548387096774194, | |
| "loss": 1.9573, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.58, | |
| "learning_rate": 0.00028064516129032256, | |
| "loss": 1.9482, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "learning_rate": 0.0002758064516129032, | |
| "loss": 1.9576, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.67, | |
| "learning_rate": 0.00027096774193548386, | |
| "loss": 1.9558, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.71, | |
| "learning_rate": 0.0002661290322580645, | |
| "loss": 1.9453, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "learning_rate": 0.00026129032258064515, | |
| "loss": 1.9386, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.79, | |
| "learning_rate": 0.00025645161290322577, | |
| "loss": 1.9421, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.83, | |
| "learning_rate": 0.00025161290322580645, | |
| "loss": 1.9392, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.83, | |
| "eval_loss": 1.9201855659484863, | |
| "eval_runtime": 141.1699, | |
| "eval_samples_per_second": 14.167, | |
| "eval_steps_per_second": 1.771, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.87, | |
| "learning_rate": 0.00024677419354838707, | |
| "loss": 1.9406, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "learning_rate": 0.00024193548387096771, | |
| "loss": 1.9091, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "learning_rate": 0.00023709677419354836, | |
| "loss": 1.905, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "learning_rate": 0.000232258064516129, | |
| "loss": 1.9165, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 1.04, | |
| "learning_rate": 0.00022741935483870966, | |
| "loss": 1.9074, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 1.08, | |
| "learning_rate": 0.0002225806451612903, | |
| "loss": 1.8788, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 1.12, | |
| "learning_rate": 0.00021774193548387095, | |
| "loss": 1.9058, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 1.16, | |
| "learning_rate": 0.0002129032258064516, | |
| "loss": 1.9167, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 1.21, | |
| "learning_rate": 0.00020806451612903225, | |
| "loss": 1.9063, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 1.25, | |
| "learning_rate": 0.00020322580645161287, | |
| "loss": 1.8964, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 1.29, | |
| "learning_rate": 0.0001983870967741935, | |
| "loss": 1.8961, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 1.33, | |
| "learning_rate": 0.00019354838709677416, | |
| "loss": 1.8911, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 1.37, | |
| "learning_rate": 0.0001887096774193548, | |
| "loss": 1.8947, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 1.41, | |
| "learning_rate": 0.00018387096774193548, | |
| "loss": 1.8794, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 1.46, | |
| "learning_rate": 0.00017903225806451613, | |
| "loss": 1.9045, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 1.5, | |
| "learning_rate": 0.00017419354838709678, | |
| "loss": 1.8951, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 1.54, | |
| "learning_rate": 0.00016935483870967742, | |
| "loss": 1.8777, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 1.58, | |
| "learning_rate": 0.00016451612903225804, | |
| "loss": 1.8832, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 1.62, | |
| "learning_rate": 0.0001596774193548387, | |
| "loss": 1.8811, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 1.66, | |
| "learning_rate": 0.00015483870967741934, | |
| "loss": 1.8763, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 1.66, | |
| "eval_loss": 1.8789199590682983, | |
| "eval_runtime": 141.9625, | |
| "eval_samples_per_second": 14.088, | |
| "eval_steps_per_second": 1.761, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 1.71, | |
| "learning_rate": 0.00015, | |
| "loss": 1.8746, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 1.75, | |
| "learning_rate": 0.00014516129032258063, | |
| "loss": 1.8887, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 1.79, | |
| "learning_rate": 0.00014032258064516128, | |
| "loss": 1.8878, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 1.83, | |
| "learning_rate": 0.00013548387096774193, | |
| "loss": 1.8893, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 1.87, | |
| "learning_rate": 0.00013064516129032258, | |
| "loss": 1.8852, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 1.91, | |
| "learning_rate": 0.00012580645161290322, | |
| "loss": 1.8952, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 1.96, | |
| "learning_rate": 0.00012096774193548386, | |
| "loss": 1.8755, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "learning_rate": 0.0001161290322580645, | |
| "loss": 1.8865, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 2.04, | |
| "learning_rate": 0.00011129032258064515, | |
| "loss": 1.8651, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 2.08, | |
| "learning_rate": 0.0001064516129032258, | |
| "loss": 1.8589, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 2.12, | |
| "learning_rate": 0.00010161290322580643, | |
| "loss": 1.8578, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 2.16, | |
| "learning_rate": 9.677419354838708e-05, | |
| "loss": 1.8727, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 2.21, | |
| "learning_rate": 9.193548387096774e-05, | |
| "loss": 1.8558, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 2.25, | |
| "learning_rate": 8.709677419354839e-05, | |
| "loss": 1.8616, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 2.29, | |
| "learning_rate": 8.225806451612902e-05, | |
| "loss": 1.8502, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 2.33, | |
| "learning_rate": 7.741935483870967e-05, | |
| "loss": 1.861, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 2.37, | |
| "learning_rate": 7.258064516129032e-05, | |
| "loss": 1.8473, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 2.41, | |
| "learning_rate": 6.774193548387096e-05, | |
| "loss": 1.8586, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 2.45, | |
| "learning_rate": 6.290322580645161e-05, | |
| "loss": 1.8621, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 2.5, | |
| "learning_rate": 5.806451612903225e-05, | |
| "loss": 1.8692, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 2.5, | |
| "eval_loss": 1.8643771409988403, | |
| "eval_runtime": 141.1773, | |
| "eval_samples_per_second": 14.167, | |
| "eval_steps_per_second": 1.771, | |
| "step": 600 | |
| } | |
| ], | |
| "max_steps": 720, | |
| "num_train_epochs": 3, | |
| "total_flos": 7.128893746084577e+17, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |