| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 15.318627450980392, | |
| "global_step": 250000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 2.3999999999999997e-05, | |
| "loss": 0.8848, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "learning_rate": 4.7999999999999994e-05, | |
| "loss": 0.7411, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "eval_loss": 0.9070383906364441, | |
| "eval_runtime": 1.3223, | |
| "eval_samples_per_second": 756.258, | |
| "eval_steps_per_second": 12.1, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 7.199999999999999e-05, | |
| "loss": 0.7398, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 9.599999999999999e-05, | |
| "loss": 0.7395, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "eval_loss": 0.9064154028892517, | |
| "eval_runtime": 1.194, | |
| "eval_samples_per_second": 837.495, | |
| "eval_steps_per_second": 13.4, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "learning_rate": 0.00011999999999999999, | |
| "loss": 0.7392, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "learning_rate": 0.00014399999999999998, | |
| "loss": 0.7387, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "eval_loss": 0.9047173857688904, | |
| "eval_runtime": 1.3077, | |
| "eval_samples_per_second": 764.675, | |
| "eval_steps_per_second": 12.235, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "learning_rate": 0.000168, | |
| "loss": 0.7384, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "learning_rate": 0.00019199999999999998, | |
| "loss": 0.7382, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "eval_loss": 0.9014638662338257, | |
| "eval_runtime": 1.2645, | |
| "eval_samples_per_second": 790.849, | |
| "eval_steps_per_second": 12.654, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "learning_rate": 0.00021599999999999996, | |
| "loss": 0.7382, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "learning_rate": 0.00023999999999999998, | |
| "loss": 0.7381, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "eval_loss": 0.9044438600540161, | |
| "eval_runtime": 1.2394, | |
| "eval_samples_per_second": 806.828, | |
| "eval_steps_per_second": 12.909, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "learning_rate": 0.00026399999999999997, | |
| "loss": 0.7379, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "learning_rate": 0.00028799999999999995, | |
| "loss": 0.7379, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "eval_loss": 0.9041813015937805, | |
| "eval_runtime": 1.2368, | |
| "eval_samples_per_second": 808.566, | |
| "eval_steps_per_second": 12.937, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "learning_rate": 0.000312, | |
| "loss": 0.7381, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "learning_rate": 0.000336, | |
| "loss": 0.7379, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "eval_loss": 0.9053561687469482, | |
| "eval_runtime": 1.2638, | |
| "eval_samples_per_second": 791.247, | |
| "eval_steps_per_second": 12.66, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "learning_rate": 0.00035999999999999997, | |
| "loss": 0.7378, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "learning_rate": 0.00038399999999999996, | |
| "loss": 0.7378, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "eval_loss": 0.9035025835037231, | |
| "eval_runtime": 1.2079, | |
| "eval_samples_per_second": 827.911, | |
| "eval_steps_per_second": 13.247, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "learning_rate": 0.000408, | |
| "loss": 0.7378, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "learning_rate": 0.00043199999999999993, | |
| "loss": 0.7378, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "eval_loss": 0.9025757312774658, | |
| "eval_runtime": 1.2745, | |
| "eval_samples_per_second": 784.598, | |
| "eval_steps_per_second": 12.554, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 0.58, | |
| "learning_rate": 0.00045599999999999997, | |
| "loss": 0.7375, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 0.61, | |
| "learning_rate": 0.00047999999999999996, | |
| "loss": 0.7371, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 0.61, | |
| "eval_loss": 0.903812050819397, | |
| "eval_runtime": 1.2376, | |
| "eval_samples_per_second": 807.983, | |
| "eval_steps_per_second": 12.928, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "learning_rate": 0.0005039999999999999, | |
| "loss": 0.7369, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 0.67, | |
| "learning_rate": 0.0005279999999999999, | |
| "loss": 0.7369, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 0.67, | |
| "eval_loss": 0.9027426838874817, | |
| "eval_runtime": 1.2459, | |
| "eval_samples_per_second": 802.657, | |
| "eval_steps_per_second": 12.843, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "learning_rate": 0.000552, | |
| "loss": 0.7369, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 0.74, | |
| "learning_rate": 0.0005759999999999999, | |
| "loss": 0.7368, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 0.74, | |
| "eval_loss": 0.9022310972213745, | |
| "eval_runtime": 1.2295, | |
| "eval_samples_per_second": 813.368, | |
| "eval_steps_per_second": 13.014, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 0.77, | |
| "learning_rate": 0.0006, | |
| "loss": 0.7367, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "learning_rate": 0.0005999935478721662, | |
| "loss": 0.7368, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "eval_loss": 0.8987133502960205, | |
| "eval_runtime": 1.2643, | |
| "eval_samples_per_second": 790.939, | |
| "eval_steps_per_second": 12.655, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 0.83, | |
| "learning_rate": 0.000599974191770902, | |
| "loss": 0.7375, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 0.86, | |
| "learning_rate": 0.0005999419325429058, | |
| "loss": 0.7374, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 0.86, | |
| "eval_loss": 0.9013972282409668, | |
| "eval_runtime": 1.236, | |
| "eval_samples_per_second": 809.042, | |
| "eval_steps_per_second": 12.945, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 0.89, | |
| "learning_rate": 0.0005998967715993009, | |
| "loss": 0.7369, | |
| "step": 14500 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "learning_rate": 0.0005998387109155732, | |
| "loss": 0.7369, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "eval_loss": 0.9001522660255432, | |
| "eval_runtime": 1.235, | |
| "eval_samples_per_second": 809.697, | |
| "eval_steps_per_second": 12.955, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 0.95, | |
| "learning_rate": 0.000599767753031485, | |
| "loss": 0.737, | |
| "step": 15500 | |
| }, | |
| { | |
| "epoch": 0.98, | |
| "learning_rate": 0.0005996839010509641, | |
| "loss": 0.7369, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 0.98, | |
| "eval_loss": 0.9001864790916443, | |
| "eval_runtime": 1.1685, | |
| "eval_samples_per_second": 855.784, | |
| "eval_steps_per_second": 13.693, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 1.01, | |
| "learning_rate": 0.0005995871586419678, | |
| "loss": 0.7369, | |
| "step": 16500 | |
| }, | |
| { | |
| "epoch": 1.04, | |
| "learning_rate": 0.0005994775300363225, | |
| "loss": 0.7372, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 1.04, | |
| "eval_loss": 0.9018534421920776, | |
| "eval_runtime": 1.1865, | |
| "eval_samples_per_second": 842.819, | |
| "eval_steps_per_second": 13.485, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 1.07, | |
| "learning_rate": 0.0005993550200295384, | |
| "loss": 0.7371, | |
| "step": 17500 | |
| }, | |
| { | |
| "epoch": 1.1, | |
| "learning_rate": 0.0005992196339806002, | |
| "loss": 0.737, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 1.1, | |
| "eval_loss": 0.9001176953315735, | |
| "eval_runtime": 1.2219, | |
| "eval_samples_per_second": 818.375, | |
| "eval_steps_per_second": 13.094, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 1.13, | |
| "learning_rate": 0.0005990713778117324, | |
| "loss": 0.7369, | |
| "step": 18500 | |
| }, | |
| { | |
| "epoch": 1.16, | |
| "learning_rate": 0.0005989102580081398, | |
| "loss": 0.737, | |
| "step": 19000 | |
| }, | |
| { | |
| "epoch": 1.16, | |
| "eval_loss": 0.9006143808364868, | |
| "eval_runtime": 1.2544, | |
| "eval_samples_per_second": 797.163, | |
| "eval_steps_per_second": 12.755, | |
| "step": 19000 | |
| }, | |
| { | |
| "epoch": 1.19, | |
| "learning_rate": 0.0005987362816177249, | |
| "loss": 0.7369, | |
| "step": 19500 | |
| }, | |
| { | |
| "epoch": 1.23, | |
| "learning_rate": 0.0005985494562507783, | |
| "loss": 0.7369, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 1.23, | |
| "eval_loss": 0.9006676077842712, | |
| "eval_runtime": 1.2815, | |
| "eval_samples_per_second": 780.312, | |
| "eval_steps_per_second": 12.485, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 1.26, | |
| "learning_rate": 0.000598349790079647, | |
| "loss": 0.737, | |
| "step": 20500 | |
| }, | |
| { | |
| "epoch": 1.29, | |
| "learning_rate": 0.000598137291838376, | |
| "loss": 0.7365, | |
| "step": 21000 | |
| }, | |
| { | |
| "epoch": 1.29, | |
| "eval_loss": 0.8698467016220093, | |
| "eval_runtime": 1.2584, | |
| "eval_samples_per_second": 794.654, | |
| "eval_steps_per_second": 12.714, | |
| "step": 21000 | |
| }, | |
| { | |
| "epoch": 1.32, | |
| "learning_rate": 0.000597911970822327, | |
| "loss": 0.7364, | |
| "step": 21500 | |
| }, | |
| { | |
| "epoch": 1.35, | |
| "learning_rate": 0.000597673836887771, | |
| "loss": 0.7363, | |
| "step": 22000 | |
| }, | |
| { | |
| "epoch": 1.35, | |
| "eval_loss": 0.870046079158783, | |
| "eval_runtime": 1.2567, | |
| "eval_samples_per_second": 795.734, | |
| "eval_steps_per_second": 12.732, | |
| "step": 22000 | |
| }, | |
| { | |
| "epoch": 1.38, | |
| "learning_rate": 0.0005974229004514577, | |
| "loss": 0.7363, | |
| "step": 22500 | |
| }, | |
| { | |
| "epoch": 1.41, | |
| "learning_rate": 0.0005971591724901598, | |
| "loss": 0.7366, | |
| "step": 23000 | |
| }, | |
| { | |
| "epoch": 1.41, | |
| "eval_loss": 0.9021453857421875, | |
| "eval_runtime": 1.277, | |
| "eval_samples_per_second": 783.107, | |
| "eval_steps_per_second": 12.53, | |
| "step": 23000 | |
| }, | |
| { | |
| "epoch": 1.44, | |
| "learning_rate": 0.0005968826645401927, | |
| "loss": 0.7368, | |
| "step": 23500 | |
| }, | |
| { | |
| "epoch": 1.47, | |
| "learning_rate": 0.00059659338869691, | |
| "loss": 0.7362, | |
| "step": 24000 | |
| }, | |
| { | |
| "epoch": 1.47, | |
| "eval_loss": 0.8762597441673279, | |
| "eval_runtime": 1.2918, | |
| "eval_samples_per_second": 774.091, | |
| "eval_steps_per_second": 12.385, | |
| "step": 24000 | |
| }, | |
| { | |
| "epoch": 1.5, | |
| "learning_rate": 0.0005962913576141742, | |
| "loss": 0.7354, | |
| "step": 24500 | |
| }, | |
| { | |
| "epoch": 1.53, | |
| "learning_rate": 0.000595976584503803, | |
| "loss": 0.7082, | |
| "step": 25000 | |
| }, | |
| { | |
| "epoch": 1.53, | |
| "eval_loss": 0.8719052672386169, | |
| "eval_runtime": 1.2648, | |
| "eval_samples_per_second": 790.61, | |
| "eval_steps_per_second": 12.65, | |
| "step": 25000 | |
| }, | |
| { | |
| "epoch": 1.56, | |
| "learning_rate": 0.0005956490831349923, | |
| "loss": 0.6914, | |
| "step": 25500 | |
| }, | |
| { | |
| "epoch": 1.59, | |
| "learning_rate": 0.0005953088678337129, | |
| "loss": 0.6774, | |
| "step": 26000 | |
| }, | |
| { | |
| "epoch": 1.59, | |
| "eval_loss": 0.8876385688781738, | |
| "eval_runtime": 1.3046, | |
| "eval_samples_per_second": 766.511, | |
| "eval_steps_per_second": 12.264, | |
| "step": 26000 | |
| }, | |
| { | |
| "epoch": 1.62, | |
| "learning_rate": 0.0005949559534820841, | |
| "loss": 0.6675, | |
| "step": 26500 | |
| }, | |
| { | |
| "epoch": 1.65, | |
| "learning_rate": 0.0005945903555177229, | |
| "loss": 0.6525, | |
| "step": 27000 | |
| }, | |
| { | |
| "epoch": 1.65, | |
| "eval_loss": 0.8904880285263062, | |
| "eval_runtime": 1.319, | |
| "eval_samples_per_second": 758.176, | |
| "eval_steps_per_second": 12.131, | |
| "step": 27000 | |
| }, | |
| { | |
| "epoch": 1.69, | |
| "learning_rate": 0.0005942120899330687, | |
| "loss": 0.6186, | |
| "step": 27500 | |
| }, | |
| { | |
| "epoch": 1.72, | |
| "learning_rate": 0.0005938211732746836, | |
| "loss": 0.6022, | |
| "step": 28000 | |
| }, | |
| { | |
| "epoch": 1.72, | |
| "eval_loss": 0.8856265544891357, | |
| "eval_runtime": 1.2877, | |
| "eval_samples_per_second": 776.587, | |
| "eval_steps_per_second": 12.425, | |
| "step": 28000 | |
| }, | |
| { | |
| "epoch": 1.75, | |
| "learning_rate": 0.0005934176226425286, | |
| "loss": 0.593, | |
| "step": 28500 | |
| }, | |
| { | |
| "epoch": 1.78, | |
| "learning_rate": 0.0005930014556892158, | |
| "loss": 0.5874, | |
| "step": 29000 | |
| }, | |
| { | |
| "epoch": 1.78, | |
| "eval_loss": 0.8793675303459167, | |
| "eval_runtime": 1.2489, | |
| "eval_samples_per_second": 800.707, | |
| "eval_steps_per_second": 12.811, | |
| "step": 29000 | |
| }, | |
| { | |
| "epoch": 1.81, | |
| "learning_rate": 0.0005925726906192357, | |
| "loss": 0.5837, | |
| "step": 29500 | |
| }, | |
| { | |
| "epoch": 1.84, | |
| "learning_rate": 0.0005921313461881617, | |
| "loss": 0.5765, | |
| "step": 30000 | |
| }, | |
| { | |
| "epoch": 1.84, | |
| "eval_loss": 0.8805580735206604, | |
| "eval_runtime": 1.2758, | |
| "eval_samples_per_second": 783.816, | |
| "eval_steps_per_second": 12.541, | |
| "step": 30000 | |
| }, | |
| { | |
| "epoch": 1.87, | |
| "learning_rate": 0.0005916774417018287, | |
| "loss": 0.5714, | |
| "step": 30500 | |
| }, | |
| { | |
| "epoch": 1.9, | |
| "learning_rate": 0.0005912109970154897, | |
| "loss": 0.5685, | |
| "step": 31000 | |
| }, | |
| { | |
| "epoch": 1.9, | |
| "eval_loss": 0.8747313022613525, | |
| "eval_runtime": 1.3003, | |
| "eval_samples_per_second": 769.027, | |
| "eval_steps_per_second": 12.304, | |
| "step": 31000 | |
| }, | |
| { | |
| "epoch": 1.93, | |
| "learning_rate": 0.0005907320325329461, | |
| "loss": 0.566, | |
| "step": 31500 | |
| }, | |
| { | |
| "epoch": 1.96, | |
| "learning_rate": 0.0005902405692056561, | |
| "loss": 0.564, | |
| "step": 32000 | |
| }, | |
| { | |
| "epoch": 1.96, | |
| "eval_loss": 0.8779122233390808, | |
| "eval_runtime": 1.293, | |
| "eval_samples_per_second": 773.405, | |
| "eval_steps_per_second": 12.374, | |
| "step": 32000 | |
| }, | |
| { | |
| "epoch": 1.99, | |
| "learning_rate": 0.0005897366285318178, | |
| "loss": 0.5617, | |
| "step": 32500 | |
| }, | |
| { | |
| "epoch": 2.02, | |
| "learning_rate": 0.0005892202325554288, | |
| "loss": 0.5606, | |
| "step": 33000 | |
| }, | |
| { | |
| "epoch": 2.02, | |
| "eval_loss": 0.8761873245239258, | |
| "eval_runtime": 1.3816, | |
| "eval_samples_per_second": 723.824, | |
| "eval_steps_per_second": 11.581, | |
| "step": 33000 | |
| }, | |
| { | |
| "epoch": 2.05, | |
| "learning_rate": 0.0005886914038653217, | |
| "loss": 0.5583, | |
| "step": 33500 | |
| }, | |
| { | |
| "epoch": 2.08, | |
| "learning_rate": 0.0005881501655941771, | |
| "loss": 0.5574, | |
| "step": 34000 | |
| }, | |
| { | |
| "epoch": 2.08, | |
| "eval_loss": 0.8702684044837952, | |
| "eval_runtime": 1.3086, | |
| "eval_samples_per_second": 764.158, | |
| "eval_steps_per_second": 12.227, | |
| "step": 34000 | |
| }, | |
| { | |
| "epoch": 2.11, | |
| "learning_rate": 0.00058759654141751, | |
| "loss": 0.5548, | |
| "step": 34500 | |
| }, | |
| { | |
| "epoch": 2.14, | |
| "learning_rate": 0.0005870305555526355, | |
| "loss": 0.5528, | |
| "step": 35000 | |
| }, | |
| { | |
| "epoch": 2.14, | |
| "eval_loss": 0.8663867115974426, | |
| "eval_runtime": 1.2619, | |
| "eval_samples_per_second": 792.469, | |
| "eval_steps_per_second": 12.679, | |
| "step": 35000 | |
| }, | |
| { | |
| "epoch": 2.18, | |
| "learning_rate": 0.0005864522327576088, | |
| "loss": 0.5509, | |
| "step": 35500 | |
| }, | |
| { | |
| "epoch": 2.21, | |
| "learning_rate": 0.0005858615983301424, | |
| "loss": 0.5494, | |
| "step": 36000 | |
| }, | |
| { | |
| "epoch": 2.21, | |
| "eval_loss": 0.8717171549797058, | |
| "eval_runtime": 1.2691, | |
| "eval_samples_per_second": 787.953, | |
| "eval_steps_per_second": 12.607, | |
| "step": 36000 | |
| }, | |
| { | |
| "epoch": 2.24, | |
| "learning_rate": 0.0005852586781064997, | |
| "loss": 0.5471, | |
| "step": 36500 | |
| }, | |
| { | |
| "epoch": 2.27, | |
| "learning_rate": 0.0005846434984603645, | |
| "loss": 0.5448, | |
| "step": 37000 | |
| }, | |
| { | |
| "epoch": 2.27, | |
| "eval_loss": 0.8672583103179932, | |
| "eval_runtime": 1.268, | |
| "eval_samples_per_second": 788.629, | |
| "eval_steps_per_second": 12.618, | |
| "step": 37000 | |
| }, | |
| { | |
| "epoch": 2.3, | |
| "learning_rate": 0.0005840160863016872, | |
| "loss": 0.5433, | |
| "step": 37500 | |
| }, | |
| { | |
| "epoch": 2.33, | |
| "learning_rate": 0.0005833764690755083, | |
| "loss": 0.5419, | |
| "step": 38000 | |
| }, | |
| { | |
| "epoch": 2.33, | |
| "eval_loss": 0.8636866211891174, | |
| "eval_runtime": 1.3526, | |
| "eval_samples_per_second": 739.295, | |
| "eval_steps_per_second": 11.829, | |
| "step": 38000 | |
| }, | |
| { | |
| "epoch": 2.36, | |
| "learning_rate": 0.0005827246747607574, | |
| "loss": 0.5398, | |
| "step": 38500 | |
| }, | |
| { | |
| "epoch": 2.39, | |
| "learning_rate": 0.0005820607318690293, | |
| "loss": 0.5385, | |
| "step": 39000 | |
| }, | |
| { | |
| "epoch": 2.39, | |
| "eval_loss": 0.8634054064750671, | |
| "eval_runtime": 1.2705, | |
| "eval_samples_per_second": 787.122, | |
| "eval_steps_per_second": 12.594, | |
| "step": 39000 | |
| }, | |
| { | |
| "epoch": 2.42, | |
| "learning_rate": 0.0005813846694433368, | |
| "loss": 0.5374, | |
| "step": 39500 | |
| }, | |
| { | |
| "epoch": 2.45, | |
| "learning_rate": 0.0005806965170568409, | |
| "loss": 0.536, | |
| "step": 40000 | |
| }, | |
| { | |
| "epoch": 2.45, | |
| "eval_loss": 0.8661102652549744, | |
| "eval_runtime": 1.3401, | |
| "eval_samples_per_second": 746.204, | |
| "eval_steps_per_second": 11.939, | |
| "step": 40000 | |
| }, | |
| { | |
| "epoch": 2.48, | |
| "learning_rate": 0.0005799963048115559, | |
| "loss": 0.5353, | |
| "step": 40500 | |
| }, | |
| { | |
| "epoch": 2.51, | |
| "learning_rate": 0.0005792840633370341, | |
| "loss": 0.5336, | |
| "step": 41000 | |
| }, | |
| { | |
| "epoch": 2.51, | |
| "eval_loss": 0.8631040453910828, | |
| "eval_runtime": 1.3088, | |
| "eval_samples_per_second": 764.037, | |
| "eval_steps_per_second": 12.225, | |
| "step": 41000 | |
| }, | |
| { | |
| "epoch": 2.54, | |
| "learning_rate": 0.0005785598237890247, | |
| "loss": 0.5327, | |
| "step": 41500 | |
| }, | |
| { | |
| "epoch": 2.57, | |
| "learning_rate": 0.0005778236178481119, | |
| "loss": 0.5316, | |
| "step": 42000 | |
| }, | |
| { | |
| "epoch": 2.57, | |
| "eval_loss": 0.8605585098266602, | |
| "eval_runtime": 1.2836, | |
| "eval_samples_per_second": 779.077, | |
| "eval_steps_per_second": 12.465, | |
| "step": 42000 | |
| }, | |
| { | |
| "epoch": 2.6, | |
| "learning_rate": 0.0005770754777183285, | |
| "loss": 0.5306, | |
| "step": 42500 | |
| }, | |
| { | |
| "epoch": 2.63, | |
| "learning_rate": 0.0005763154361257473, | |
| "loss": 0.5297, | |
| "step": 43000 | |
| }, | |
| { | |
| "epoch": 2.63, | |
| "eval_loss": 0.8589205145835876, | |
| "eval_runtime": 1.2763, | |
| "eval_samples_per_second": 783.485, | |
| "eval_steps_per_second": 12.536, | |
| "step": 43000 | |
| }, | |
| { | |
| "epoch": 2.67, | |
| "learning_rate": 0.0005755435263170498, | |
| "loss": 0.5287, | |
| "step": 43500 | |
| }, | |
| { | |
| "epoch": 2.7, | |
| "learning_rate": 0.0005747597820580717, | |
| "loss": 0.5305, | |
| "step": 44000 | |
| }, | |
| { | |
| "epoch": 2.7, | |
| "eval_loss": 0.8569635152816772, | |
| "eval_runtime": 1.2713, | |
| "eval_samples_per_second": 786.624, | |
| "eval_steps_per_second": 12.586, | |
| "step": 44000 | |
| }, | |
| { | |
| "epoch": 2.73, | |
| "learning_rate": 0.000573964237632326, | |
| "loss": 0.527, | |
| "step": 44500 | |
| }, | |
| { | |
| "epoch": 2.76, | |
| "learning_rate": 0.0005731569278395029, | |
| "loss": 0.5262, | |
| "step": 45000 | |
| }, | |
| { | |
| "epoch": 2.76, | |
| "eval_loss": 0.8558768033981323, | |
| "eval_runtime": 1.2738, | |
| "eval_samples_per_second": 785.051, | |
| "eval_steps_per_second": 12.561, | |
| "step": 45000 | |
| }, | |
| { | |
| "epoch": 2.79, | |
| "learning_rate": 0.0005723378879939481, | |
| "loss": 0.5254, | |
| "step": 45500 | |
| }, | |
| { | |
| "epoch": 2.82, | |
| "learning_rate": 0.0005715071539231178, | |
| "loss": 0.5247, | |
| "step": 46000 | |
| }, | |
| { | |
| "epoch": 2.82, | |
| "eval_loss": 0.8633874654769897, | |
| "eval_runtime": 1.2747, | |
| "eval_samples_per_second": 784.504, | |
| "eval_steps_per_second": 12.552, | |
| "step": 46000 | |
| }, | |
| { | |
| "epoch": 2.85, | |
| "learning_rate": 0.0005706647619660116, | |
| "loss": 0.5243, | |
| "step": 46500 | |
| }, | |
| { | |
| "epoch": 2.88, | |
| "learning_rate": 0.0005698107489715823, | |
| "loss": 0.5235, | |
| "step": 47000 | |
| }, | |
| { | |
| "epoch": 2.88, | |
| "eval_loss": 0.8606237769126892, | |
| "eval_runtime": 1.3283, | |
| "eval_samples_per_second": 752.838, | |
| "eval_steps_per_second": 12.045, | |
| "step": 47000 | |
| }, | |
| { | |
| "epoch": 2.91, | |
| "learning_rate": 0.0005689451522971252, | |
| "loss": 0.5228, | |
| "step": 47500 | |
| }, | |
| { | |
| "epoch": 2.94, | |
| "learning_rate": 0.0005680680098066429, | |
| "loss": 0.5227, | |
| "step": 48000 | |
| }, | |
| { | |
| "epoch": 2.94, | |
| "eval_loss": 0.8610469698905945, | |
| "eval_runtime": 1.2783, | |
| "eval_samples_per_second": 782.274, | |
| "eval_steps_per_second": 12.516, | |
| "step": 48000 | |
| }, | |
| { | |
| "epoch": 2.97, | |
| "learning_rate": 0.0005671793598691895, | |
| "loss": 0.5215, | |
| "step": 48500 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "learning_rate": 0.0005662792413571921, | |
| "loss": 0.5206, | |
| "step": 49000 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_loss": 0.8610268235206604, | |
| "eval_runtime": 1.3555, | |
| "eval_samples_per_second": 737.72, | |
| "eval_steps_per_second": 11.804, | |
| "step": 49000 | |
| }, | |
| { | |
| "epoch": 3.03, | |
| "learning_rate": 0.0005653676936447504, | |
| "loss": 0.5201, | |
| "step": 49500 | |
| }, | |
| { | |
| "epoch": 3.06, | |
| "learning_rate": 0.0005644447566059142, | |
| "loss": 0.5194, | |
| "step": 50000 | |
| }, | |
| { | |
| "epoch": 3.06, | |
| "eval_loss": 0.8611247539520264, | |
| "eval_runtime": 1.2902, | |
| "eval_samples_per_second": 775.099, | |
| "eval_steps_per_second": 12.402, | |
| "step": 50000 | |
| }, | |
| { | |
| "epoch": 3.09, | |
| "learning_rate": 0.0005635104706129397, | |
| "loss": 0.5189, | |
| "step": 50500 | |
| }, | |
| { | |
| "epoch": 3.12, | |
| "learning_rate": 0.0005625648765345228, | |
| "loss": 0.5183, | |
| "step": 51000 | |
| }, | |
| { | |
| "epoch": 3.12, | |
| "eval_loss": 0.8579334616661072, | |
| "eval_runtime": 1.3546, | |
| "eval_samples_per_second": 738.246, | |
| "eval_steps_per_second": 11.812, | |
| "step": 51000 | |
| }, | |
| { | |
| "epoch": 3.16, | |
| "learning_rate": 0.0005616080157340118, | |
| "loss": 0.5178, | |
| "step": 51500 | |
| }, | |
| { | |
| "epoch": 3.19, | |
| "learning_rate": 0.0005606399300675978, | |
| "loss": 0.5175, | |
| "step": 52000 | |
| }, | |
| { | |
| "epoch": 3.19, | |
| "eval_loss": 0.8597957491874695, | |
| "eval_runtime": 1.3415, | |
| "eval_samples_per_second": 745.449, | |
| "eval_steps_per_second": 11.927, | |
| "step": 52000 | |
| }, | |
| { | |
| "epoch": 3.22, | |
| "learning_rate": 0.0005596606618824843, | |
| "loss": 0.5186, | |
| "step": 52500 | |
| }, | |
| { | |
| "epoch": 3.25, | |
| "learning_rate": 0.0005586702540150338, | |
| "loss": 0.5163, | |
| "step": 53000 | |
| }, | |
| { | |
| "epoch": 3.25, | |
| "eval_loss": 0.8520950078964233, | |
| "eval_runtime": 1.3505, | |
| "eval_samples_per_second": 740.484, | |
| "eval_steps_per_second": 11.848, | |
| "step": 53000 | |
| }, | |
| { | |
| "epoch": 3.28, | |
| "learning_rate": 0.000557668749788895, | |
| "loss": 0.5159, | |
| "step": 53500 | |
| }, | |
| { | |
| "epoch": 3.31, | |
| "learning_rate": 0.0005566561930131072, | |
| "loss": 0.5156, | |
| "step": 54000 | |
| }, | |
| { | |
| "epoch": 3.31, | |
| "eval_loss": 0.8550420999526978, | |
| "eval_runtime": 1.3983, | |
| "eval_samples_per_second": 715.143, | |
| "eval_steps_per_second": 11.442, | |
| "step": 54000 | |
| }, | |
| { | |
| "epoch": 3.34, | |
| "learning_rate": 0.000555632627980184, | |
| "loss": 0.5153, | |
| "step": 54500 | |
| }, | |
| { | |
| "epoch": 3.37, | |
| "learning_rate": 0.0005545980994641758, | |
| "loss": 0.5148, | |
| "step": 55000 | |
| }, | |
| { | |
| "epoch": 3.37, | |
| "eval_loss": 0.8503537178039551, | |
| "eval_runtime": 1.2794, | |
| "eval_samples_per_second": 781.636, | |
| "eval_steps_per_second": 12.506, | |
| "step": 55000 | |
| }, | |
| { | |
| "epoch": 3.4, | |
| "learning_rate": 0.0005535526527187115, | |
| "loss": 0.5144, | |
| "step": 55500 | |
| }, | |
| { | |
| "epoch": 3.43, | |
| "learning_rate": 0.0005524963334750183, | |
| "loss": 0.5139, | |
| "step": 56000 | |
| }, | |
| { | |
| "epoch": 3.43, | |
| "eval_loss": 0.8530069589614868, | |
| "eval_runtime": 1.2989, | |
| "eval_samples_per_second": 769.886, | |
| "eval_steps_per_second": 12.318, | |
| "step": 56000 | |
| }, | |
| { | |
| "epoch": 3.46, | |
| "learning_rate": 0.0005514291879399219, | |
| "loss": 0.5136, | |
| "step": 56500 | |
| }, | |
| { | |
| "epoch": 3.49, | |
| "learning_rate": 0.000550351262793825, | |
| "loss": 0.5133, | |
| "step": 57000 | |
| }, | |
| { | |
| "epoch": 3.49, | |
| "eval_loss": 0.8588612675666809, | |
| "eval_runtime": 1.2931, | |
| "eval_samples_per_second": 773.351, | |
| "eval_steps_per_second": 12.374, | |
| "step": 57000 | |
| }, | |
| { | |
| "epoch": 3.52, | |
| "learning_rate": 0.0005492626051886659, | |
| "loss": 0.5129, | |
| "step": 57500 | |
| }, | |
| { | |
| "epoch": 3.55, | |
| "learning_rate": 0.0005481632627458546, | |
| "loss": 0.5126, | |
| "step": 58000 | |
| }, | |
| { | |
| "epoch": 3.55, | |
| "eval_loss": 0.8561201095581055, | |
| "eval_runtime": 1.244, | |
| "eval_samples_per_second": 803.841, | |
| "eval_steps_per_second": 12.861, | |
| "step": 58000 | |
| }, | |
| { | |
| "epoch": 3.58, | |
| "learning_rate": 0.0005470532835541911, | |
| "loss": 0.5122, | |
| "step": 58500 | |
| }, | |
| { | |
| "epoch": 3.62, | |
| "learning_rate": 0.000545932716167761, | |
| "loss": 0.5119, | |
| "step": 59000 | |
| }, | |
| { | |
| "epoch": 3.62, | |
| "eval_loss": 0.8574157953262329, | |
| "eval_runtime": 1.3446, | |
| "eval_samples_per_second": 743.721, | |
| "eval_steps_per_second": 11.9, | |
| "step": 59000 | |
| }, | |
| { | |
| "epoch": 3.65, | |
| "learning_rate": 0.000544801609603812, | |
| "loss": 0.5115, | |
| "step": 59500 | |
| }, | |
| { | |
| "epoch": 3.68, | |
| "learning_rate": 0.0005436600133406095, | |
| "loss": 0.5127, | |
| "step": 60000 | |
| }, | |
| { | |
| "epoch": 3.68, | |
| "eval_loss": 0.8624024987220764, | |
| "eval_runtime": 1.324, | |
| "eval_samples_per_second": 755.286, | |
| "eval_steps_per_second": 12.085, | |
| "step": 60000 | |
| }, | |
| { | |
| "epoch": 3.71, | |
| "learning_rate": 0.0005425079773152721, | |
| "loss": 0.5113, | |
| "step": 60500 | |
| }, | |
| { | |
| "epoch": 3.74, | |
| "learning_rate": 0.0005413455519215879, | |
| "loss": 0.5105, | |
| "step": 61000 | |
| }, | |
| { | |
| "epoch": 3.74, | |
| "eval_loss": 0.8522316813468933, | |
| "eval_runtime": 1.2934, | |
| "eval_samples_per_second": 773.185, | |
| "eval_steps_per_second": 12.371, | |
| "step": 61000 | |
| }, | |
| { | |
| "epoch": 3.77, | |
| "learning_rate": 0.0005401727880078093, | |
| "loss": 0.5102, | |
| "step": 61500 | |
| }, | |
| { | |
| "epoch": 3.8, | |
| "learning_rate": 0.0005389897368744289, | |
| "loss": 0.5099, | |
| "step": 62000 | |
| }, | |
| { | |
| "epoch": 3.8, | |
| "eval_loss": 0.8549780249595642, | |
| "eval_runtime": 1.3356, | |
| "eval_samples_per_second": 748.732, | |
| "eval_steps_per_second": 11.98, | |
| "step": 62000 | |
| }, | |
| { | |
| "epoch": 3.83, | |
| "learning_rate": 0.0005377964502719361, | |
| "loss": 0.5099, | |
| "step": 62500 | |
| }, | |
| { | |
| "epoch": 3.86, | |
| "learning_rate": 0.0005365929803985524, | |
| "loss": 0.5094, | |
| "step": 63000 | |
| }, | |
| { | |
| "epoch": 3.86, | |
| "eval_loss": 0.8536927700042725, | |
| "eval_runtime": 1.2799, | |
| "eval_samples_per_second": 781.291, | |
| "eval_steps_per_second": 12.501, | |
| "step": 63000 | |
| }, | |
| { | |
| "epoch": 3.89, | |
| "learning_rate": 0.0005353793798979489, | |
| "loss": 0.5093, | |
| "step": 63500 | |
| }, | |
| { | |
| "epoch": 3.92, | |
| "learning_rate": 0.000534155701856943, | |
| "loss": 0.509, | |
| "step": 64000 | |
| }, | |
| { | |
| "epoch": 3.92, | |
| "eval_loss": 0.8535122871398926, | |
| "eval_runtime": 1.2782, | |
| "eval_samples_per_second": 782.329, | |
| "eval_steps_per_second": 12.517, | |
| "step": 64000 | |
| }, | |
| { | |
| "epoch": 3.95, | |
| "learning_rate": 0.0005329219998031763, | |
| "loss": 0.5087, | |
| "step": 64500 | |
| }, | |
| { | |
| "epoch": 3.98, | |
| "learning_rate": 0.0005316783277027734, | |
| "loss": 0.5091, | |
| "step": 65000 | |
| }, | |
| { | |
| "epoch": 3.98, | |
| "eval_loss": 0.8591586351394653, | |
| "eval_runtime": 1.3143, | |
| "eval_samples_per_second": 760.882, | |
| "eval_steps_per_second": 12.174, | |
| "step": 65000 | |
| }, | |
| { | |
| "epoch": 4.01, | |
| "learning_rate": 0.0005304247399579808, | |
| "loss": 0.508, | |
| "step": 65500 | |
| }, | |
| { | |
| "epoch": 4.04, | |
| "learning_rate": 0.0005291612914047876, | |
| "loss": 0.5079, | |
| "step": 66000 | |
| }, | |
| { | |
| "epoch": 4.04, | |
| "eval_loss": 0.8554427027702332, | |
| "eval_runtime": 1.3483, | |
| "eval_samples_per_second": 741.678, | |
| "eval_steps_per_second": 11.867, | |
| "step": 66000 | |
| }, | |
| { | |
| "epoch": 4.07, | |
| "learning_rate": 0.0005278880373105263, | |
| "loss": 0.5077, | |
| "step": 66500 | |
| }, | |
| { | |
| "epoch": 4.11, | |
| "learning_rate": 0.0005266050333714561, | |
| "loss": 0.5074, | |
| "step": 67000 | |
| }, | |
| { | |
| "epoch": 4.11, | |
| "eval_loss": 0.8515585660934448, | |
| "eval_runtime": 1.2858, | |
| "eval_samples_per_second": 777.712, | |
| "eval_steps_per_second": 12.443, | |
| "step": 67000 | |
| }, | |
| { | |
| "epoch": 4.14, | |
| "learning_rate": 0.0005253123357103253, | |
| "loss": 0.5074, | |
| "step": 67500 | |
| }, | |
| { | |
| "epoch": 4.17, | |
| "learning_rate": 0.0005240100008739177, | |
| "loss": 0.5069, | |
| "step": 68000 | |
| }, | |
| { | |
| "epoch": 4.17, | |
| "eval_loss": 0.8490995168685913, | |
| "eval_runtime": 1.2974, | |
| "eval_samples_per_second": 770.751, | |
| "eval_steps_per_second": 12.332, | |
| "step": 68000 | |
| }, | |
| { | |
| "epoch": 4.2, | |
| "learning_rate": 0.0005226980858305778, | |
| "loss": 0.5067, | |
| "step": 68500 | |
| }, | |
| { | |
| "epoch": 4.23, | |
| "learning_rate": 0.0005213766479677197, | |
| "loss": 0.5066, | |
| "step": 69000 | |
| }, | |
| { | |
| "epoch": 4.23, | |
| "eval_loss": 0.8570588231086731, | |
| "eval_runtime": 1.2957, | |
| "eval_samples_per_second": 771.773, | |
| "eval_steps_per_second": 12.348, | |
| "step": 69000 | |
| }, | |
| { | |
| "epoch": 4.26, | |
| "learning_rate": 0.0005200457450893163, | |
| "loss": 0.5063, | |
| "step": 69500 | |
| }, | |
| { | |
| "epoch": 4.29, | |
| "learning_rate": 0.0005187054354133712, | |
| "loss": 0.5068, | |
| "step": 70000 | |
| }, | |
| { | |
| "epoch": 4.29, | |
| "eval_loss": 0.8535866141319275, | |
| "eval_runtime": 1.3217, | |
| "eval_samples_per_second": 756.603, | |
| "eval_steps_per_second": 12.106, | |
| "step": 70000 | |
| }, | |
| { | |
| "epoch": 4.32, | |
| "learning_rate": 0.0005173557775693715, | |
| "loss": 0.5058, | |
| "step": 70500 | |
| }, | |
| { | |
| "epoch": 4.35, | |
| "learning_rate": 0.0005159968305957235, | |
| "loss": 0.5066, | |
| "step": 71000 | |
| }, | |
| { | |
| "epoch": 4.35, | |
| "eval_loss": 0.9288005232810974, | |
| "eval_runtime": 1.2769, | |
| "eval_samples_per_second": 783.148, | |
| "eval_steps_per_second": 12.53, | |
| "step": 71000 | |
| }, | |
| { | |
| "epoch": 4.38, | |
| "learning_rate": 0.0005146286539371703, | |
| "loss": 0.5084, | |
| "step": 71500 | |
| }, | |
| { | |
| "epoch": 4.41, | |
| "learning_rate": 0.0005132513074421913, | |
| "loss": 0.5051, | |
| "step": 72000 | |
| }, | |
| { | |
| "epoch": 4.41, | |
| "eval_loss": 0.8597245812416077, | |
| "eval_runtime": 1.3157, | |
| "eval_samples_per_second": 760.048, | |
| "eval_steps_per_second": 12.161, | |
| "step": 72000 | |
| }, | |
| { | |
| "epoch": 4.44, | |
| "learning_rate": 0.0005118648513603841, | |
| "loss": 0.5051, | |
| "step": 72500 | |
| }, | |
| { | |
| "epoch": 4.47, | |
| "learning_rate": 0.0005104693463398293, | |
| "loss": 0.5045, | |
| "step": 73000 | |
| }, | |
| { | |
| "epoch": 4.47, | |
| "eval_loss": 0.8554754257202148, | |
| "eval_runtime": 1.3269, | |
| "eval_samples_per_second": 753.64, | |
| "eval_steps_per_second": 12.058, | |
| "step": 73000 | |
| }, | |
| { | |
| "epoch": 4.5, | |
| "learning_rate": 0.0005090648534244371, | |
| "loss": 0.5045, | |
| "step": 73500 | |
| }, | |
| { | |
| "epoch": 4.53, | |
| "learning_rate": 0.0005076514340512776, | |
| "loss": 0.5043, | |
| "step": 74000 | |
| }, | |
| { | |
| "epoch": 4.53, | |
| "eval_loss": 0.8547331094741821, | |
| "eval_runtime": 1.2736, | |
| "eval_samples_per_second": 785.148, | |
| "eval_steps_per_second": 12.562, | |
| "step": 74000 | |
| }, | |
| { | |
| "epoch": 4.56, | |
| "learning_rate": 0.0005062291500478931, | |
| "loss": 0.5039, | |
| "step": 74500 | |
| }, | |
| { | |
| "epoch": 4.6, | |
| "learning_rate": 0.0005047980636295937, | |
| "loss": 0.5039, | |
| "step": 75000 | |
| }, | |
| { | |
| "epoch": 4.6, | |
| "eval_loss": 0.8560617566108704, | |
| "eval_runtime": 1.2429, | |
| "eval_samples_per_second": 804.574, | |
| "eval_steps_per_second": 12.873, | |
| "step": 75000 | |
| }, | |
| { | |
| "epoch": 4.63, | |
| "learning_rate": 0.0005033582373967348, | |
| "loss": 0.5036, | |
| "step": 75500 | |
| }, | |
| { | |
| "epoch": 4.66, | |
| "learning_rate": 0.0005019097343319809, | |
| "loss": 0.504, | |
| "step": 76000 | |
| }, | |
| { | |
| "epoch": 4.66, | |
| "eval_loss": 0.854142427444458, | |
| "eval_runtime": 1.3399, | |
| "eval_samples_per_second": 746.336, | |
| "eval_steps_per_second": 11.941, | |
| "step": 76000 | |
| }, | |
| { | |
| "epoch": 4.69, | |
| "learning_rate": 0.0005004526177975481, | |
| "loss": 0.503, | |
| "step": 76500 | |
| }, | |
| { | |
| "epoch": 4.72, | |
| "learning_rate": 0.0004989869515324342, | |
| "loss": 0.5026, | |
| "step": 77000 | |
| }, | |
| { | |
| "epoch": 4.72, | |
| "eval_loss": 0.8489631414413452, | |
| "eval_runtime": 1.2942, | |
| "eval_samples_per_second": 772.659, | |
| "eval_steps_per_second": 12.363, | |
| "step": 77000 | |
| }, | |
| { | |
| "epoch": 4.75, | |
| "learning_rate": 0.0004975127996496297, | |
| "loss": 0.5024, | |
| "step": 77500 | |
| }, | |
| { | |
| "epoch": 4.78, | |
| "learning_rate": 0.0004960302266333135, | |
| "loss": 0.5024, | |
| "step": 78000 | |
| }, | |
| { | |
| "epoch": 4.78, | |
| "eval_loss": 0.8499003648757935, | |
| "eval_runtime": 1.3145, | |
| "eval_samples_per_second": 760.727, | |
| "eval_steps_per_second": 12.172, | |
| "step": 78000 | |
| }, | |
| { | |
| "epoch": 4.81, | |
| "learning_rate": 0.0004945392973360323, | |
| "loss": 0.502, | |
| "step": 78500 | |
| }, | |
| { | |
| "epoch": 4.84, | |
| "learning_rate": 0.0004930400769758634, | |
| "loss": 0.5019, | |
| "step": 79000 | |
| }, | |
| { | |
| "epoch": 4.84, | |
| "eval_loss": 0.8521845936775208, | |
| "eval_runtime": 1.3201, | |
| "eval_samples_per_second": 757.527, | |
| "eval_steps_per_second": 12.12, | |
| "step": 79000 | |
| }, | |
| { | |
| "epoch": 4.87, | |
| "learning_rate": 0.0004915326311335622, | |
| "loss": 0.5017, | |
| "step": 79500 | |
| }, | |
| { | |
| "epoch": 4.9, | |
| "learning_rate": 0.0004900170257496933, | |
| "loss": 0.5014, | |
| "step": 80000 | |
| }, | |
| { | |
| "epoch": 4.9, | |
| "eval_loss": 0.8508275747299194, | |
| "eval_runtime": 1.3001, | |
| "eval_samples_per_second": 769.194, | |
| "eval_steps_per_second": 12.307, | |
| "step": 80000 | |
| }, | |
| { | |
| "epoch": 4.93, | |
| "learning_rate": 0.0004884933271217461, | |
| "loss": 0.501, | |
| "step": 80500 | |
| }, | |
| { | |
| "epoch": 4.96, | |
| "learning_rate": 0.0004869616019012347, | |
| "loss": 0.5008, | |
| "step": 81000 | |
| }, | |
| { | |
| "epoch": 4.96, | |
| "eval_loss": 0.8512425422668457, | |
| "eval_runtime": 1.2521, | |
| "eval_samples_per_second": 798.677, | |
| "eval_steps_per_second": 12.779, | |
| "step": 81000 | |
| }, | |
| { | |
| "epoch": 4.99, | |
| "learning_rate": 0.00048542191709078226, | |
| "loss": 0.5004, | |
| "step": 81500 | |
| }, | |
| { | |
| "epoch": 5.02, | |
| "learning_rate": 0.00048387434004119027, | |
| "loss": 0.5002, | |
| "step": 82000 | |
| }, | |
| { | |
| "epoch": 5.02, | |
| "eval_loss": 0.8470181822776794, | |
| "eval_runtime": 1.2894, | |
| "eval_samples_per_second": 775.545, | |
| "eval_steps_per_second": 12.409, | |
| "step": 82000 | |
| }, | |
| { | |
| "epoch": 5.06, | |
| "learning_rate": 0.0004823189384484924, | |
| "loss": 0.5007, | |
| "step": 82500 | |
| }, | |
| { | |
| "epoch": 5.09, | |
| "learning_rate": 0.000480755780350993, | |
| "loss": 0.4995, | |
| "step": 83000 | |
| }, | |
| { | |
| "epoch": 5.09, | |
| "eval_loss": 0.846192479133606, | |
| "eval_runtime": 1.2808, | |
| "eval_samples_per_second": 780.79, | |
| "eval_steps_per_second": 12.493, | |
| "step": 83000 | |
| }, | |
| { | |
| "epoch": 5.12, | |
| "learning_rate": 0.0004791849341262914, | |
| "loss": 0.4993, | |
| "step": 83500 | |
| }, | |
| { | |
| "epoch": 5.15, | |
| "learning_rate": 0.0004776064684882901, | |
| "loss": 0.4991, | |
| "step": 84000 | |
| }, | |
| { | |
| "epoch": 5.15, | |
| "eval_loss": 0.8454980254173279, | |
| "eval_runtime": 1.3356, | |
| "eval_samples_per_second": 748.729, | |
| "eval_steps_per_second": 11.98, | |
| "step": 84000 | |
| }, | |
| { | |
| "epoch": 5.18, | |
| "learning_rate": 0.0004760204524841897, | |
| "loss": 0.4985, | |
| "step": 84500 | |
| }, | |
| { | |
| "epoch": 5.21, | |
| "learning_rate": 0.0004744269554914683, | |
| "loss": 0.4982, | |
| "step": 85000 | |
| }, | |
| { | |
| "epoch": 5.21, | |
| "eval_loss": 0.8464594483375549, | |
| "eval_runtime": 1.368, | |
| "eval_samples_per_second": 730.977, | |
| "eval_steps_per_second": 11.696, | |
| "step": 85000 | |
| }, | |
| { | |
| "epoch": 5.24, | |
| "learning_rate": 0.00047282604721484643, | |
| "loss": 0.4979, | |
| "step": 85500 | |
| }, | |
| { | |
| "epoch": 5.27, | |
| "learning_rate": 0.0004712177976832385, | |
| "loss": 0.4978, | |
| "step": 86000 | |
| }, | |
| { | |
| "epoch": 5.27, | |
| "eval_loss": 0.8433752655982971, | |
| "eval_runtime": 1.3279, | |
| "eval_samples_per_second": 753.055, | |
| "eval_steps_per_second": 12.049, | |
| "step": 86000 | |
| }, | |
| { | |
| "epoch": 5.3, | |
| "learning_rate": 0.0004696022772466888, | |
| "loss": 0.4974, | |
| "step": 86500 | |
| }, | |
| { | |
| "epoch": 5.33, | |
| "learning_rate": 0.00046797955657329487, | |
| "loss": 0.4969, | |
| "step": 87000 | |
| }, | |
| { | |
| "epoch": 5.33, | |
| "eval_loss": 0.843237042427063, | |
| "eval_runtime": 1.3349, | |
| "eval_samples_per_second": 749.108, | |
| "eval_steps_per_second": 11.986, | |
| "step": 87000 | |
| }, | |
| { | |
| "epoch": 5.36, | |
| "learning_rate": 0.000466349706646116, | |
| "loss": 0.4967, | |
| "step": 87500 | |
| }, | |
| { | |
| "epoch": 5.39, | |
| "learning_rate": 0.00046471279876006763, | |
| "loss": 0.4964, | |
| "step": 88000 | |
| }, | |
| { | |
| "epoch": 5.39, | |
| "eval_loss": 0.8417282104492188, | |
| "eval_runtime": 1.3291, | |
| "eval_samples_per_second": 752.394, | |
| "eval_steps_per_second": 12.038, | |
| "step": 88000 | |
| }, | |
| { | |
| "epoch": 5.42, | |
| "learning_rate": 0.00046306890451880395, | |
| "loss": 0.4964, | |
| "step": 88500 | |
| }, | |
| { | |
| "epoch": 5.45, | |
| "learning_rate": 0.0004614180958315844, | |
| "loss": 0.4957, | |
| "step": 89000 | |
| }, | |
| { | |
| "epoch": 5.45, | |
| "eval_loss": 0.8362923860549927, | |
| "eval_runtime": 1.2708, | |
| "eval_samples_per_second": 786.921, | |
| "eval_steps_per_second": 12.591, | |
| "step": 89000 | |
| }, | |
| { | |
| "epoch": 5.48, | |
| "learning_rate": 0.00045976044491012884, | |
| "loss": 0.4954, | |
| "step": 89500 | |
| }, | |
| { | |
| "epoch": 5.51, | |
| "learning_rate": 0.00045809602426545847, | |
| "loss": 0.495, | |
| "step": 90000 | |
| }, | |
| { | |
| "epoch": 5.51, | |
| "eval_loss": 0.8392152190208435, | |
| "eval_runtime": 1.26, | |
| "eval_samples_per_second": 793.671, | |
| "eval_steps_per_second": 12.699, | |
| "step": 90000 | |
| }, | |
| { | |
| "epoch": 5.55, | |
| "learning_rate": 0.00045642490670472436, | |
| "loss": 0.4947, | |
| "step": 90500 | |
| }, | |
| { | |
| "epoch": 5.58, | |
| "learning_rate": 0.0004547471653280225, | |
| "loss": 0.4946, | |
| "step": 91000 | |
| }, | |
| { | |
| "epoch": 5.58, | |
| "eval_loss": 0.8400516510009766, | |
| "eval_runtime": 1.2358, | |
| "eval_samples_per_second": 809.182, | |
| "eval_steps_per_second": 12.947, | |
| "step": 91000 | |
| }, | |
| { | |
| "epoch": 5.61, | |
| "learning_rate": 0.00045306287352519543, | |
| "loss": 0.4939, | |
| "step": 91500 | |
| }, | |
| { | |
| "epoch": 5.64, | |
| "learning_rate": 0.00045137210497262333, | |
| "loss": 0.4935, | |
| "step": 92000 | |
| }, | |
| { | |
| "epoch": 5.64, | |
| "eval_loss": 0.8373098969459534, | |
| "eval_runtime": 1.3142, | |
| "eval_samples_per_second": 760.929, | |
| "eval_steps_per_second": 12.175, | |
| "step": 92000 | |
| }, | |
| { | |
| "epoch": 5.67, | |
| "learning_rate": 0.0004496749336299999, | |
| "loss": 0.4931, | |
| "step": 92500 | |
| }, | |
| { | |
| "epoch": 5.7, | |
| "learning_rate": 0.0004479714337370977, | |
| "loss": 0.4929, | |
| "step": 93000 | |
| }, | |
| { | |
| "epoch": 5.7, | |
| "eval_loss": 0.840123176574707, | |
| "eval_runtime": 1.2896, | |
| "eval_samples_per_second": 775.451, | |
| "eval_steps_per_second": 12.407, | |
| "step": 93000 | |
| }, | |
| { | |
| "epoch": 5.73, | |
| "learning_rate": 0.00044626167981052036, | |
| "loss": 0.4924, | |
| "step": 93500 | |
| }, | |
| { | |
| "epoch": 5.76, | |
| "learning_rate": 0.00044454574664044404, | |
| "loss": 0.492, | |
| "step": 94000 | |
| }, | |
| { | |
| "epoch": 5.76, | |
| "eval_loss": 0.8355880379676819, | |
| "eval_runtime": 1.2671, | |
| "eval_samples_per_second": 789.19, | |
| "eval_steps_per_second": 12.627, | |
| "step": 94000 | |
| }, | |
| { | |
| "epoch": 5.79, | |
| "learning_rate": 0.000442823709287344, | |
| "loss": 0.4916, | |
| "step": 94500 | |
| }, | |
| { | |
| "epoch": 5.82, | |
| "learning_rate": 0.0004410956430787129, | |
| "loss": 0.4912, | |
| "step": 95000 | |
| }, | |
| { | |
| "epoch": 5.82, | |
| "eval_loss": 0.8333644866943359, | |
| "eval_runtime": 1.299, | |
| "eval_samples_per_second": 769.834, | |
| "eval_steps_per_second": 12.317, | |
| "step": 95000 | |
| }, | |
| { | |
| "epoch": 5.85, | |
| "learning_rate": 0.0004393616236057647, | |
| "loss": 0.4912, | |
| "step": 95500 | |
| }, | |
| { | |
| "epoch": 5.88, | |
| "learning_rate": 0.00043762172672012875, | |
| "loss": 0.4904, | |
| "step": 96000 | |
| }, | |
| { | |
| "epoch": 5.88, | |
| "eval_loss": 0.8280515074729919, | |
| "eval_runtime": 1.2803, | |
| "eval_samples_per_second": 781.038, | |
| "eval_steps_per_second": 12.497, | |
| "step": 96000 | |
| }, | |
| { | |
| "epoch": 5.91, | |
| "learning_rate": 0.0004358760285305312, | |
| "loss": 0.4901, | |
| "step": 96500 | |
| }, | |
| { | |
| "epoch": 5.94, | |
| "learning_rate": 0.0004341246053994663, | |
| "loss": 0.4898, | |
| "step": 97000 | |
| }, | |
| { | |
| "epoch": 5.94, | |
| "eval_loss": 0.8338386416435242, | |
| "eval_runtime": 1.3216, | |
| "eval_samples_per_second": 756.685, | |
| "eval_steps_per_second": 12.107, | |
| "step": 97000 | |
| }, | |
| { | |
| "epoch": 5.97, | |
| "learning_rate": 0.00043236753393985534, | |
| "loss": 0.4892, | |
| "step": 97500 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "learning_rate": 0.0004306048910116964, | |
| "loss": 0.4891, | |
| "step": 98000 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_loss": 0.8300430774688721, | |
| "eval_runtime": 1.316, | |
| "eval_samples_per_second": 759.879, | |
| "eval_steps_per_second": 12.158, | |
| "step": 98000 | |
| }, | |
| { | |
| "epoch": 6.04, | |
| "learning_rate": 0.0004288367537187012, | |
| "loss": 0.4887, | |
| "step": 98500 | |
| }, | |
| { | |
| "epoch": 6.07, | |
| "learning_rate": 0.00042706319940492284, | |
| "loss": 0.4882, | |
| "step": 99000 | |
| }, | |
| { | |
| "epoch": 6.07, | |
| "eval_loss": 0.8262238502502441, | |
| "eval_runtime": 1.3745, | |
| "eval_samples_per_second": 727.548, | |
| "eval_steps_per_second": 11.641, | |
| "step": 99000 | |
| }, | |
| { | |
| "epoch": 6.1, | |
| "learning_rate": 0.00042528430565137254, | |
| "loss": 0.488, | |
| "step": 99500 | |
| }, | |
| { | |
| "epoch": 6.13, | |
| "learning_rate": 0.00042350015027262593, | |
| "loss": 0.4876, | |
| "step": 100000 | |
| }, | |
| { | |
| "epoch": 6.13, | |
| "eval_loss": 0.8171582221984863, | |
| "eval_runtime": 1.3024, | |
| "eval_samples_per_second": 767.786, | |
| "eval_steps_per_second": 12.285, | |
| "step": 100000 | |
| }, | |
| { | |
| "epoch": 6.16, | |
| "learning_rate": 0.00042171081131341917, | |
| "loss": 0.4877, | |
| "step": 100500 | |
| }, | |
| { | |
| "epoch": 6.19, | |
| "learning_rate": 0.00041991636704523497, | |
| "loss": 0.4868, | |
| "step": 101000 | |
| }, | |
| { | |
| "epoch": 6.19, | |
| "eval_loss": 0.8239555954933167, | |
| "eval_runtime": 1.2567, | |
| "eval_samples_per_second": 795.735, | |
| "eval_steps_per_second": 12.732, | |
| "step": 101000 | |
| }, | |
| { | |
| "epoch": 6.22, | |
| "learning_rate": 0.00041811689596287893, | |
| "loss": 0.4864, | |
| "step": 101500 | |
| }, | |
| { | |
| "epoch": 6.25, | |
| "learning_rate": 0.0004163124767810454, | |
| "loss": 0.4861, | |
| "step": 102000 | |
| }, | |
| { | |
| "epoch": 6.25, | |
| "eval_loss": 0.8212010264396667, | |
| "eval_runtime": 1.2247, | |
| "eval_samples_per_second": 816.535, | |
| "eval_steps_per_second": 13.065, | |
| "step": 102000 | |
| }, | |
| { | |
| "epoch": 6.28, | |
| "learning_rate": 0.00041450318843087506, | |
| "loss": 0.4858, | |
| "step": 102500 | |
| }, | |
| { | |
| "epoch": 6.31, | |
| "learning_rate": 0.00041268911005650166, | |
| "loss": 0.4854, | |
| "step": 103000 | |
| }, | |
| { | |
| "epoch": 6.31, | |
| "eval_loss": 0.8242572546005249, | |
| "eval_runtime": 1.357, | |
| "eval_samples_per_second": 736.942, | |
| "eval_steps_per_second": 11.791, | |
| "step": 103000 | |
| }, | |
| { | |
| "epoch": 6.34, | |
| "learning_rate": 0.00041087032101159006, | |
| "loss": 0.4851, | |
| "step": 103500 | |
| }, | |
| { | |
| "epoch": 6.37, | |
| "learning_rate": 0.00040904690085586515, | |
| "loss": 0.4847, | |
| "step": 104000 | |
| }, | |
| { | |
| "epoch": 6.37, | |
| "eval_loss": 0.8227641582489014, | |
| "eval_runtime": 1.3326, | |
| "eval_samples_per_second": 750.437, | |
| "eval_steps_per_second": 12.007, | |
| "step": 104000 | |
| }, | |
| { | |
| "epoch": 6.4, | |
| "learning_rate": 0.0004072189293516316, | |
| "loss": 0.4844, | |
| "step": 104500 | |
| }, | |
| { | |
| "epoch": 6.43, | |
| "learning_rate": 0.0004053864864602847, | |
| "loss": 0.4841, | |
| "step": 105000 | |
| }, | |
| { | |
| "epoch": 6.43, | |
| "eval_loss": 0.8184976577758789, | |
| "eval_runtime": 1.3395, | |
| "eval_samples_per_second": 746.538, | |
| "eval_steps_per_second": 11.945, | |
| "step": 105000 | |
| }, | |
| { | |
| "epoch": 6.46, | |
| "learning_rate": 0.00040354965233881297, | |
| "loss": 0.4846, | |
| "step": 105500 | |
| }, | |
| { | |
| "epoch": 6.5, | |
| "learning_rate": 0.0004017085073362913, | |
| "loss": 0.4837, | |
| "step": 106000 | |
| }, | |
| { | |
| "epoch": 6.5, | |
| "eval_loss": 0.8177208304405212, | |
| "eval_runtime": 1.2838, | |
| "eval_samples_per_second": 778.932, | |
| "eval_steps_per_second": 12.463, | |
| "step": 106000 | |
| }, | |
| { | |
| "epoch": 6.53, | |
| "learning_rate": 0.00039986313199036664, | |
| "loss": 0.4828, | |
| "step": 106500 | |
| }, | |
| { | |
| "epoch": 6.56, | |
| "learning_rate": 0.00039801360702373484, | |
| "loss": 0.4827, | |
| "step": 107000 | |
| }, | |
| { | |
| "epoch": 6.56, | |
| "eval_loss": 0.8140051364898682, | |
| "eval_runtime": 1.3554, | |
| "eval_samples_per_second": 737.763, | |
| "eval_steps_per_second": 11.804, | |
| "step": 107000 | |
| }, | |
| { | |
| "epoch": 6.59, | |
| "learning_rate": 0.00039616001334060954, | |
| "loss": 0.4824, | |
| "step": 107500 | |
| }, | |
| { | |
| "epoch": 6.62, | |
| "learning_rate": 0.00039430243202318314, | |
| "loss": 0.4819, | |
| "step": 108000 | |
| }, | |
| { | |
| "epoch": 6.62, | |
| "eval_loss": 0.8147432208061218, | |
| "eval_runtime": 1.3502, | |
| "eval_samples_per_second": 740.632, | |
| "eval_steps_per_second": 11.85, | |
| "step": 108000 | |
| }, | |
| { | |
| "epoch": 6.65, | |
| "learning_rate": 0.00039244094432808034, | |
| "loss": 0.4816, | |
| "step": 108500 | |
| }, | |
| { | |
| "epoch": 6.68, | |
| "learning_rate": 0.0003905756316828033, | |
| "loss": 0.4813, | |
| "step": 109000 | |
| }, | |
| { | |
| "epoch": 6.68, | |
| "eval_loss": 0.8172094225883484, | |
| "eval_runtime": 1.4168, | |
| "eval_samples_per_second": 705.811, | |
| "eval_steps_per_second": 11.293, | |
| "step": 109000 | |
| }, | |
| { | |
| "epoch": 6.71, | |
| "learning_rate": 0.00038870657568216963, | |
| "loss": 0.4808, | |
| "step": 109500 | |
| }, | |
| { | |
| "epoch": 6.74, | |
| "learning_rate": 0.00038683385808474416, | |
| "loss": 0.4807, | |
| "step": 110000 | |
| }, | |
| { | |
| "epoch": 6.74, | |
| "eval_loss": 0.8148666024208069, | |
| "eval_runtime": 1.2865, | |
| "eval_samples_per_second": 777.303, | |
| "eval_steps_per_second": 12.437, | |
| "step": 110000 | |
| }, | |
| { | |
| "epoch": 6.77, | |
| "learning_rate": 0.00038495756080926107, | |
| "loss": 0.4802, | |
| "step": 110500 | |
| }, | |
| { | |
| "epoch": 6.8, | |
| "learning_rate": 0.0003830777659310416, | |
| "loss": 0.4801, | |
| "step": 111000 | |
| }, | |
| { | |
| "epoch": 6.8, | |
| "eval_loss": 0.8152140974998474, | |
| "eval_runtime": 1.3248, | |
| "eval_samples_per_second": 754.82, | |
| "eval_steps_per_second": 12.077, | |
| "step": 111000 | |
| }, | |
| { | |
| "epoch": 6.83, | |
| "learning_rate": 0.0003811945556784033, | |
| "loss": 0.4795, | |
| "step": 111500 | |
| }, | |
| { | |
| "epoch": 6.86, | |
| "learning_rate": 0.00037930801242906366, | |
| "loss": 0.4792, | |
| "step": 112000 | |
| }, | |
| { | |
| "epoch": 6.86, | |
| "eval_loss": 0.8089267015457153, | |
| "eval_runtime": 1.2928, | |
| "eval_samples_per_second": 773.538, | |
| "eval_steps_per_second": 12.377, | |
| "step": 112000 | |
| }, | |
| { | |
| "epoch": 6.89, | |
| "learning_rate": 0.00037741821870653576, | |
| "loss": 0.4789, | |
| "step": 112500 | |
| }, | |
| { | |
| "epoch": 6.92, | |
| "learning_rate": 0.00037552525717651905, | |
| "loss": 0.4785, | |
| "step": 113000 | |
| }, | |
| { | |
| "epoch": 6.92, | |
| "eval_loss": 0.808442234992981, | |
| "eval_runtime": 1.3221, | |
| "eval_samples_per_second": 756.389, | |
| "eval_steps_per_second": 12.102, | |
| "step": 113000 | |
| }, | |
| { | |
| "epoch": 6.95, | |
| "learning_rate": 0.0003736292106432834, | |
| "loss": 0.4782, | |
| "step": 113500 | |
| }, | |
| { | |
| "epoch": 6.99, | |
| "learning_rate": 0.00037173016204604647, | |
| "loss": 0.4777, | |
| "step": 114000 | |
| }, | |
| { | |
| "epoch": 6.99, | |
| "eval_loss": 0.8103413581848145, | |
| "eval_runtime": 1.3239, | |
| "eval_samples_per_second": 755.361, | |
| "eval_steps_per_second": 12.086, | |
| "step": 114000 | |
| }, | |
| { | |
| "epoch": 7.02, | |
| "learning_rate": 0.0003698281944553456, | |
| "loss": 0.4774, | |
| "step": 114500 | |
| }, | |
| { | |
| "epoch": 7.05, | |
| "learning_rate": 0.0003679233910694053, | |
| "loss": 0.477, | |
| "step": 115000 | |
| }, | |
| { | |
| "epoch": 7.05, | |
| "eval_loss": 0.8103800415992737, | |
| "eval_runtime": 1.3524, | |
| "eval_samples_per_second": 739.44, | |
| "eval_steps_per_second": 11.831, | |
| "step": 115000 | |
| }, | |
| { | |
| "epoch": 7.08, | |
| "learning_rate": 0.000366015835210496, | |
| "loss": 0.4767, | |
| "step": 115500 | |
| }, | |
| { | |
| "epoch": 7.11, | |
| "learning_rate": 0.0003641056103212908, | |
| "loss": 0.4772, | |
| "step": 116000 | |
| }, | |
| { | |
| "epoch": 7.11, | |
| "eval_loss": 0.8142406344413757, | |
| "eval_runtime": 1.3149, | |
| "eval_samples_per_second": 760.539, | |
| "eval_steps_per_second": 12.169, | |
| "step": 116000 | |
| }, | |
| { | |
| "epoch": 7.14, | |
| "learning_rate": 0.00036219279996121446, | |
| "loss": 0.4758, | |
| "step": 116500 | |
| }, | |
| { | |
| "epoch": 7.17, | |
| "learning_rate": 0.0003602774878027888, | |
| "loss": 0.4754, | |
| "step": 117000 | |
| }, | |
| { | |
| "epoch": 7.17, | |
| "eval_loss": 0.8158754706382751, | |
| "eval_runtime": 1.2794, | |
| "eval_samples_per_second": 781.632, | |
| "eval_steps_per_second": 12.506, | |
| "step": 117000 | |
| }, | |
| { | |
| "epoch": 7.2, | |
| "learning_rate": 0.00035835975762797245, | |
| "loss": 0.4749, | |
| "step": 117500 | |
| }, | |
| { | |
| "epoch": 7.23, | |
| "learning_rate": 0.0003564396933244957, | |
| "loss": 0.4748, | |
| "step": 118000 | |
| }, | |
| { | |
| "epoch": 7.23, | |
| "eval_loss": 0.8092423677444458, | |
| "eval_runtime": 1.319, | |
| "eval_samples_per_second": 758.146, | |
| "eval_steps_per_second": 12.13, | |
| "step": 118000 | |
| }, | |
| { | |
| "epoch": 7.26, | |
| "learning_rate": 0.0003545173788821915, | |
| "loss": 0.4742, | |
| "step": 118500 | |
| }, | |
| { | |
| "epoch": 7.29, | |
| "learning_rate": 0.00035259289838932104, | |
| "loss": 0.4738, | |
| "step": 119000 | |
| }, | |
| { | |
| "epoch": 7.29, | |
| "eval_loss": 0.8036469221115112, | |
| "eval_runtime": 1.3425, | |
| "eval_samples_per_second": 744.902, | |
| "eval_steps_per_second": 11.918, | |
| "step": 119000 | |
| }, | |
| { | |
| "epoch": 7.32, | |
| "learning_rate": 0.0003506663360288954, | |
| "loss": 0.4736, | |
| "step": 119500 | |
| }, | |
| { | |
| "epoch": 7.35, | |
| "learning_rate": 0.00034873777607499334, | |
| "loss": 0.473, | |
| "step": 120000 | |
| }, | |
| { | |
| "epoch": 7.35, | |
| "eval_loss": 0.8085124492645264, | |
| "eval_runtime": 1.3135, | |
| "eval_samples_per_second": 761.353, | |
| "eval_steps_per_second": 12.182, | |
| "step": 120000 | |
| }, | |
| { | |
| "epoch": 7.38, | |
| "learning_rate": 0.0003468073028890751, | |
| "loss": 0.4725, | |
| "step": 120500 | |
| }, | |
| { | |
| "epoch": 7.41, | |
| "learning_rate": 0.0003448750009162916, | |
| "loss": 0.4724, | |
| "step": 121000 | |
| }, | |
| { | |
| "epoch": 7.41, | |
| "eval_loss": 0.8083821535110474, | |
| "eval_runtime": 1.2822, | |
| "eval_samples_per_second": 779.917, | |
| "eval_steps_per_second": 12.479, | |
| "step": 121000 | |
| }, | |
| { | |
| "epoch": 7.44, | |
| "learning_rate": 0.00034294095468179094, | |
| "loss": 0.4721, | |
| "step": 121500 | |
| }, | |
| { | |
| "epoch": 7.48, | |
| "learning_rate": 0.00034100524878702073, | |
| "loss": 0.4714, | |
| "step": 122000 | |
| }, | |
| { | |
| "epoch": 7.48, | |
| "eval_loss": 0.8066253662109375, | |
| "eval_runtime": 1.3256, | |
| "eval_samples_per_second": 754.393, | |
| "eval_steps_per_second": 12.07, | |
| "step": 122000 | |
| }, | |
| { | |
| "epoch": 7.51, | |
| "learning_rate": 0.000339067967906028, | |
| "loss": 0.4711, | |
| "step": 122500 | |
| }, | |
| { | |
| "epoch": 7.54, | |
| "learning_rate": 0.0003371291967817539, | |
| "loss": 0.4705, | |
| "step": 123000 | |
| }, | |
| { | |
| "epoch": 7.54, | |
| "eval_loss": 0.8094301819801331, | |
| "eval_runtime": 1.2682, | |
| "eval_samples_per_second": 788.537, | |
| "eval_steps_per_second": 12.617, | |
| "step": 123000 | |
| }, | |
| { | |
| "epoch": 7.57, | |
| "learning_rate": 0.0003351890202223285, | |
| "loss": 0.4704, | |
| "step": 123500 | |
| }, | |
| { | |
| "epoch": 7.6, | |
| "learning_rate": 0.0003332475230973597, | |
| "loss": 0.4699, | |
| "step": 124000 | |
| }, | |
| { | |
| "epoch": 7.6, | |
| "eval_loss": 0.8094833493232727, | |
| "eval_runtime": 1.3119, | |
| "eval_samples_per_second": 762.242, | |
| "eval_steps_per_second": 12.196, | |
| "step": 124000 | |
| }, | |
| { | |
| "epoch": 7.63, | |
| "learning_rate": 0.00033130479033422134, | |
| "loss": 0.4697, | |
| "step": 124500 | |
| }, | |
| { | |
| "epoch": 7.66, | |
| "learning_rate": 0.0003293609069143381, | |
| "loss": 0.4693, | |
| "step": 125000 | |
| }, | |
| { | |
| "epoch": 7.66, | |
| "eval_loss": 0.8100947141647339, | |
| "eval_runtime": 1.3107, | |
| "eval_samples_per_second": 762.972, | |
| "eval_steps_per_second": 12.208, | |
| "step": 125000 | |
| }, | |
| { | |
| "epoch": 7.69, | |
| "learning_rate": 0.00032741595786946783, | |
| "loss": 0.4688, | |
| "step": 125500 | |
| }, | |
| { | |
| "epoch": 7.72, | |
| "learning_rate": 0.000325470028277983, | |
| "loss": 0.4685, | |
| "step": 126000 | |
| }, | |
| { | |
| "epoch": 7.72, | |
| "eval_loss": 0.8091694116592407, | |
| "eval_runtime": 1.2961, | |
| "eval_samples_per_second": 771.538, | |
| "eval_steps_per_second": 12.345, | |
| "step": 126000 | |
| }, | |
| { | |
| "epoch": 7.75, | |
| "learning_rate": 0.00032352320326114754, | |
| "loss": 0.4686, | |
| "step": 126500 | |
| }, | |
| { | |
| "epoch": 7.78, | |
| "learning_rate": 0.00032157556797939436, | |
| "loss": 0.4679, | |
| "step": 127000 | |
| }, | |
| { | |
| "epoch": 7.78, | |
| "eval_loss": 0.8025205135345459, | |
| "eval_runtime": 1.2855, | |
| "eval_samples_per_second": 777.911, | |
| "eval_steps_per_second": 12.447, | |
| "step": 127000 | |
| }, | |
| { | |
| "epoch": 7.81, | |
| "learning_rate": 0.00031962720762860057, | |
| "loss": 0.4676, | |
| "step": 127500 | |
| }, | |
| { | |
| "epoch": 7.84, | |
| "learning_rate": 0.0003176782074363595, | |
| "loss": 0.4672, | |
| "step": 128000 | |
| }, | |
| { | |
| "epoch": 7.84, | |
| "eval_loss": 0.800028920173645, | |
| "eval_runtime": 1.3752, | |
| "eval_samples_per_second": 727.176, | |
| "eval_steps_per_second": 11.635, | |
| "step": 128000 | |
| }, | |
| { | |
| "epoch": 7.87, | |
| "learning_rate": 0.0003157286526582535, | |
| "loss": 0.4669, | |
| "step": 128500 | |
| }, | |
| { | |
| "epoch": 7.9, | |
| "learning_rate": 0.0003137786285741241, | |
| "loss": 0.4665, | |
| "step": 129000 | |
| }, | |
| { | |
| "epoch": 7.9, | |
| "eval_loss": 0.802001416683197, | |
| "eval_runtime": 1.3448, | |
| "eval_samples_per_second": 743.623, | |
| "eval_steps_per_second": 11.898, | |
| "step": 129000 | |
| }, | |
| { | |
| "epoch": 7.94, | |
| "learning_rate": 0.0003118282204843421, | |
| "loss": 0.4663, | |
| "step": 129500 | |
| }, | |
| { | |
| "epoch": 7.97, | |
| "learning_rate": 0.0003098775137060758, | |
| "loss": 0.4659, | |
| "step": 130000 | |
| }, | |
| { | |
| "epoch": 7.97, | |
| "eval_loss": 0.8022414445877075, | |
| "eval_runtime": 1.3715, | |
| "eval_samples_per_second": 729.142, | |
| "eval_steps_per_second": 11.666, | |
| "step": 130000 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "learning_rate": 0.00030792659356955893, | |
| "loss": 0.4657, | |
| "step": 130500 | |
| }, | |
| { | |
| "epoch": 8.03, | |
| "learning_rate": 0.0003059755454143586, | |
| "loss": 0.4653, | |
| "step": 131000 | |
| }, | |
| { | |
| "epoch": 8.03, | |
| "eval_loss": 0.8070600628852844, | |
| "eval_runtime": 1.2854, | |
| "eval_samples_per_second": 777.995, | |
| "eval_steps_per_second": 12.448, | |
| "step": 131000 | |
| }, | |
| { | |
| "epoch": 8.06, | |
| "learning_rate": 0.00030402445458564144, | |
| "loss": 0.4649, | |
| "step": 131500 | |
| }, | |
| { | |
| "epoch": 8.09, | |
| "learning_rate": 0.0003020734064304411, | |
| "loss": 0.4647, | |
| "step": 132000 | |
| }, | |
| { | |
| "epoch": 8.09, | |
| "eval_loss": 0.799366295337677, | |
| "eval_runtime": 1.2985, | |
| "eval_samples_per_second": 770.128, | |
| "eval_steps_per_second": 12.322, | |
| "step": 132000 | |
| }, | |
| { | |
| "epoch": 8.12, | |
| "learning_rate": 0.00030012248629392423, | |
| "loss": 0.4647, | |
| "step": 132500 | |
| }, | |
| { | |
| "epoch": 8.15, | |
| "learning_rate": 0.00029817177951565793, | |
| "loss": 0.4639, | |
| "step": 133000 | |
| }, | |
| { | |
| "epoch": 8.15, | |
| "eval_loss": 0.8033633232116699, | |
| "eval_runtime": 1.2955, | |
| "eval_samples_per_second": 771.897, | |
| "eval_steps_per_second": 12.35, | |
| "step": 133000 | |
| }, | |
| { | |
| "epoch": 8.18, | |
| "learning_rate": 0.00029622137142587594, | |
| "loss": 0.4637, | |
| "step": 133500 | |
| }, | |
| { | |
| "epoch": 8.21, | |
| "learning_rate": 0.0002942713473417466, | |
| "loss": 0.4634, | |
| "step": 134000 | |
| }, | |
| { | |
| "epoch": 8.21, | |
| "eval_loss": 0.8022355437278748, | |
| "eval_runtime": 1.3019, | |
| "eval_samples_per_second": 768.12, | |
| "eval_steps_per_second": 12.29, | |
| "step": 134000 | |
| }, | |
| { | |
| "epoch": 8.24, | |
| "learning_rate": 0.00029232179256364054, | |
| "loss": 0.4631, | |
| "step": 134500 | |
| }, | |
| { | |
| "epoch": 8.27, | |
| "learning_rate": 0.0002903727923713994, | |
| "loss": 0.4656, | |
| "step": 135000 | |
| }, | |
| { | |
| "epoch": 8.27, | |
| "eval_loss": 0.8051571249961853, | |
| "eval_runtime": 1.3053, | |
| "eval_samples_per_second": 766.083, | |
| "eval_steps_per_second": 12.257, | |
| "step": 135000 | |
| }, | |
| { | |
| "epoch": 8.3, | |
| "learning_rate": 0.00028842443202060556, | |
| "loss": 0.4625, | |
| "step": 135500 | |
| }, | |
| { | |
| "epoch": 8.33, | |
| "learning_rate": 0.00028647679673885255, | |
| "loss": 0.4623, | |
| "step": 136000 | |
| }, | |
| { | |
| "epoch": 8.33, | |
| "eval_loss": 0.7988797426223755, | |
| "eval_runtime": 1.3231, | |
| "eval_samples_per_second": 755.795, | |
| "eval_steps_per_second": 12.093, | |
| "step": 136000 | |
| }, | |
| { | |
| "epoch": 8.36, | |
| "learning_rate": 0.000284529971722017, | |
| "loss": 0.462, | |
| "step": 136500 | |
| }, | |
| { | |
| "epoch": 8.39, | |
| "learning_rate": 0.0002825840421305321, | |
| "loss": 0.4617, | |
| "step": 137000 | |
| }, | |
| { | |
| "epoch": 8.39, | |
| "eval_loss": 0.7993477582931519, | |
| "eval_runtime": 1.2892, | |
| "eval_samples_per_second": 775.645, | |
| "eval_steps_per_second": 12.41, | |
| "step": 137000 | |
| }, | |
| { | |
| "epoch": 8.43, | |
| "learning_rate": 0.00028063909308566196, | |
| "loss": 0.4616, | |
| "step": 137500 | |
| }, | |
| { | |
| "epoch": 8.46, | |
| "learning_rate": 0.00027869520966577874, | |
| "loss": 0.4612, | |
| "step": 138000 | |
| }, | |
| { | |
| "epoch": 8.46, | |
| "eval_loss": 0.8003228902816772, | |
| "eval_runtime": 1.2968, | |
| "eval_samples_per_second": 771.107, | |
| "eval_steps_per_second": 12.338, | |
| "step": 138000 | |
| }, | |
| { | |
| "epoch": 8.49, | |
| "learning_rate": 0.00027675247690264027, | |
| "loss": 0.461, | |
| "step": 138500 | |
| }, | |
| { | |
| "epoch": 8.52, | |
| "learning_rate": 0.0002748109797776715, | |
| "loss": 0.4608, | |
| "step": 139000 | |
| }, | |
| { | |
| "epoch": 8.52, | |
| "eval_loss": 0.7989851236343384, | |
| "eval_runtime": 1.2801, | |
| "eval_samples_per_second": 781.164, | |
| "eval_steps_per_second": 12.499, | |
| "step": 139000 | |
| }, | |
| { | |
| "epoch": 8.55, | |
| "learning_rate": 0.0002728708032182461, | |
| "loss": 0.4603, | |
| "step": 139500 | |
| }, | |
| { | |
| "epoch": 8.58, | |
| "learning_rate": 0.0002709320320939721, | |
| "loss": 0.4603, | |
| "step": 140000 | |
| }, | |
| { | |
| "epoch": 8.58, | |
| "eval_loss": 0.8073873519897461, | |
| "eval_runtime": 1.2866, | |
| "eval_samples_per_second": 777.265, | |
| "eval_steps_per_second": 12.436, | |
| "step": 140000 | |
| }, | |
| { | |
| "epoch": 8.61, | |
| "learning_rate": 0.00026899475121297924, | |
| "loss": 0.46, | |
| "step": 140500 | |
| }, | |
| { | |
| "epoch": 8.64, | |
| "learning_rate": 0.00026705904531820914, | |
| "loss": 0.4597, | |
| "step": 141000 | |
| }, | |
| { | |
| "epoch": 8.64, | |
| "eval_loss": 0.8088939189910889, | |
| "eval_runtime": 1.2732, | |
| "eval_samples_per_second": 785.451, | |
| "eval_steps_per_second": 12.567, | |
| "step": 141000 | |
| }, | |
| { | |
| "epoch": 8.67, | |
| "learning_rate": 0.0002651249990837085, | |
| "loss": 0.4596, | |
| "step": 141500 | |
| }, | |
| { | |
| "epoch": 8.7, | |
| "learning_rate": 0.00026319269711092485, | |
| "loss": 0.4591, | |
| "step": 142000 | |
| }, | |
| { | |
| "epoch": 8.7, | |
| "eval_loss": 0.8040044903755188, | |
| "eval_runtime": 1.2887, | |
| "eval_samples_per_second": 775.97, | |
| "eval_steps_per_second": 12.416, | |
| "step": 142000 | |
| }, | |
| { | |
| "epoch": 8.73, | |
| "learning_rate": 0.0002612622239250066, | |
| "loss": 0.459, | |
| "step": 142500 | |
| }, | |
| { | |
| "epoch": 8.76, | |
| "learning_rate": 0.0002593336639711046, | |
| "loss": 0.4586, | |
| "step": 143000 | |
| }, | |
| { | |
| "epoch": 8.76, | |
| "eval_loss": 0.7993264198303223, | |
| "eval_runtime": 1.3493, | |
| "eval_samples_per_second": 741.132, | |
| "eval_steps_per_second": 11.858, | |
| "step": 143000 | |
| }, | |
| { | |
| "epoch": 8.79, | |
| "learning_rate": 0.000257407101610679, | |
| "loss": 0.4583, | |
| "step": 143500 | |
| }, | |
| { | |
| "epoch": 8.82, | |
| "learning_rate": 0.00025548262111780846, | |
| "loss": 0.4584, | |
| "step": 144000 | |
| }, | |
| { | |
| "epoch": 8.82, | |
| "eval_loss": 0.8003845810890198, | |
| "eval_runtime": 1.306, | |
| "eval_samples_per_second": 765.701, | |
| "eval_steps_per_second": 12.251, | |
| "step": 144000 | |
| }, | |
| { | |
| "epoch": 8.85, | |
| "learning_rate": 0.0002535603066755043, | |
| "loss": 0.4579, | |
| "step": 144500 | |
| }, | |
| { | |
| "epoch": 8.88, | |
| "learning_rate": 0.00025164024237202764, | |
| "loss": 0.4594, | |
| "step": 145000 | |
| }, | |
| { | |
| "epoch": 8.88, | |
| "eval_loss": 0.7990729808807373, | |
| "eval_runtime": 1.2616, | |
| "eval_samples_per_second": 792.645, | |
| "eval_steps_per_second": 12.682, | |
| "step": 145000 | |
| }, | |
| { | |
| "epoch": 8.92, | |
| "learning_rate": 0.00024972251219721115, | |
| "loss": 0.4573, | |
| "step": 145500 | |
| }, | |
| { | |
| "epoch": 8.95, | |
| "learning_rate": 0.00024780720003878557, | |
| "loss": 0.4574, | |
| "step": 146000 | |
| }, | |
| { | |
| "epoch": 8.95, | |
| "eval_loss": 0.7956343293190002, | |
| "eval_runtime": 1.2847, | |
| "eval_samples_per_second": 778.391, | |
| "eval_steps_per_second": 12.454, | |
| "step": 146000 | |
| }, | |
| { | |
| "epoch": 8.98, | |
| "learning_rate": 0.00024589438967870925, | |
| "loss": 0.4571, | |
| "step": 146500 | |
| }, | |
| { | |
| "epoch": 9.01, | |
| "learning_rate": 0.00024398416478950394, | |
| "loss": 0.4571, | |
| "step": 147000 | |
| }, | |
| { | |
| "epoch": 9.01, | |
| "eval_loss": 0.7948459386825562, | |
| "eval_runtime": 1.3733, | |
| "eval_samples_per_second": 728.166, | |
| "eval_steps_per_second": 11.651, | |
| "step": 147000 | |
| }, | |
| { | |
| "epoch": 9.04, | |
| "learning_rate": 0.00024207660893059467, | |
| "loss": 0.4565, | |
| "step": 147500 | |
| }, | |
| { | |
| "epoch": 9.07, | |
| "learning_rate": 0.0002401718055446543, | |
| "loss": 0.4565, | |
| "step": 148000 | |
| }, | |
| { | |
| "epoch": 9.07, | |
| "eval_loss": 0.7982079982757568, | |
| "eval_runtime": 1.249, | |
| "eval_samples_per_second": 800.631, | |
| "eval_steps_per_second": 12.81, | |
| "step": 148000 | |
| }, | |
| { | |
| "epoch": 9.1, | |
| "learning_rate": 0.00023826983795395364, | |
| "loss": 0.4561, | |
| "step": 148500 | |
| }, | |
| { | |
| "epoch": 9.13, | |
| "learning_rate": 0.00023637078935671656, | |
| "loss": 0.4563, | |
| "step": 149000 | |
| }, | |
| { | |
| "epoch": 9.13, | |
| "eval_loss": 0.7960088849067688, | |
| "eval_runtime": 1.2502, | |
| "eval_samples_per_second": 799.9, | |
| "eval_steps_per_second": 12.798, | |
| "step": 149000 | |
| }, | |
| { | |
| "epoch": 9.16, | |
| "learning_rate": 0.00023447474282348085, | |
| "loss": 0.4558, | |
| "step": 149500 | |
| }, | |
| { | |
| "epoch": 9.19, | |
| "learning_rate": 0.00023258178129346424, | |
| "loss": 0.4555, | |
| "step": 150000 | |
| }, | |
| { | |
| "epoch": 9.19, | |
| "eval_loss": 0.8043127655982971, | |
| "eval_runtime": 1.3326, | |
| "eval_samples_per_second": 750.39, | |
| "eval_steps_per_second": 12.006, | |
| "step": 150000 | |
| }, | |
| { | |
| "epoch": 9.22, | |
| "learning_rate": 0.00023069198757093631, | |
| "loss": 0.4555, | |
| "step": 150500 | |
| }, | |
| { | |
| "epoch": 9.25, | |
| "learning_rate": 0.00022880544432159663, | |
| "loss": 0.4551, | |
| "step": 151000 | |
| }, | |
| { | |
| "epoch": 9.25, | |
| "eval_loss": 0.8021363615989685, | |
| "eval_runtime": 1.3155, | |
| "eval_samples_per_second": 760.176, | |
| "eval_steps_per_second": 12.163, | |
| "step": 151000 | |
| }, | |
| { | |
| "epoch": 9.28, | |
| "learning_rate": 0.00022692223406895848, | |
| "loss": 0.4554, | |
| "step": 151500 | |
| }, | |
| { | |
| "epoch": 9.31, | |
| "learning_rate": 0.000225042439190739, | |
| "loss": 0.4549, | |
| "step": 152000 | |
| }, | |
| { | |
| "epoch": 9.31, | |
| "eval_loss": 0.7971563339233398, | |
| "eval_runtime": 1.2684, | |
| "eval_samples_per_second": 788.389, | |
| "eval_steps_per_second": 12.614, | |
| "step": 152000 | |
| }, | |
| { | |
| "epoch": 9.34, | |
| "learning_rate": 0.00022316614191525587, | |
| "loss": 0.4546, | |
| "step": 152500 | |
| }, | |
| { | |
| "epoch": 9.38, | |
| "learning_rate": 0.00022129342431783026, | |
| "loss": 0.4545, | |
| "step": 153000 | |
| }, | |
| { | |
| "epoch": 9.38, | |
| "eval_loss": 0.8002565503120422, | |
| "eval_runtime": 1.2924, | |
| "eval_samples_per_second": 773.739, | |
| "eval_steps_per_second": 12.38, | |
| "step": 153000 | |
| }, | |
| { | |
| "epoch": 9.41, | |
| "learning_rate": 0.00021942436831719677, | |
| "loss": 0.4542, | |
| "step": 153500 | |
| }, | |
| { | |
| "epoch": 9.44, | |
| "learning_rate": 0.00021755905567191967, | |
| "loss": 0.4542, | |
| "step": 154000 | |
| }, | |
| { | |
| "epoch": 9.44, | |
| "eval_loss": 0.7999687790870667, | |
| "eval_runtime": 1.2876, | |
| "eval_samples_per_second": 776.658, | |
| "eval_steps_per_second": 12.427, | |
| "step": 154000 | |
| }, | |
| { | |
| "epoch": 9.47, | |
| "learning_rate": 0.00021569756797681686, | |
| "loss": 0.4538, | |
| "step": 154500 | |
| }, | |
| { | |
| "epoch": 9.5, | |
| "learning_rate": 0.00021383998665939054, | |
| "loss": 0.4539, | |
| "step": 155000 | |
| }, | |
| { | |
| "epoch": 9.5, | |
| "eval_loss": 0.7960466146469116, | |
| "eval_runtime": 1.3325, | |
| "eval_samples_per_second": 750.454, | |
| "eval_steps_per_second": 12.007, | |
| "step": 155000 | |
| }, | |
| { | |
| "epoch": 9.53, | |
| "learning_rate": 0.00021198639297626516, | |
| "loss": 0.4537, | |
| "step": 155500 | |
| }, | |
| { | |
| "epoch": 9.56, | |
| "learning_rate": 0.0002101368680096334, | |
| "loss": 0.4533, | |
| "step": 156000 | |
| }, | |
| { | |
| "epoch": 9.56, | |
| "eval_loss": 0.8035251498222351, | |
| "eval_runtime": 1.3033, | |
| "eval_samples_per_second": 767.279, | |
| "eval_steps_per_second": 12.276, | |
| "step": 156000 | |
| }, | |
| { | |
| "epoch": 9.59, | |
| "learning_rate": 0.00020829149266370862, | |
| "loss": 0.4532, | |
| "step": 156500 | |
| }, | |
| { | |
| "epoch": 9.62, | |
| "learning_rate": 0.00020645034766118703, | |
| "loss": 0.453, | |
| "step": 157000 | |
| }, | |
| { | |
| "epoch": 9.62, | |
| "eval_loss": 0.7953096628189087, | |
| "eval_runtime": 1.3024, | |
| "eval_samples_per_second": 767.789, | |
| "eval_steps_per_second": 12.285, | |
| "step": 157000 | |
| }, | |
| { | |
| "epoch": 9.65, | |
| "learning_rate": 0.00020461351353971526, | |
| "loss": 0.4527, | |
| "step": 157500 | |
| }, | |
| { | |
| "epoch": 9.68, | |
| "learning_rate": 0.00020278107064836847, | |
| "loss": 0.4527, | |
| "step": 158000 | |
| }, | |
| { | |
| "epoch": 9.68, | |
| "eval_loss": 0.7937498688697815, | |
| "eval_runtime": 1.2999, | |
| "eval_samples_per_second": 769.267, | |
| "eval_steps_per_second": 12.308, | |
| "step": 158000 | |
| }, | |
| { | |
| "epoch": 9.71, | |
| "learning_rate": 0.00020095309914413485, | |
| "loss": 0.4526, | |
| "step": 158500 | |
| }, | |
| { | |
| "epoch": 9.74, | |
| "learning_rate": 0.00019912967898840997, | |
| "loss": 0.4524, | |
| "step": 159000 | |
| }, | |
| { | |
| "epoch": 9.74, | |
| "eval_loss": 0.8021422624588013, | |
| "eval_runtime": 1.3181, | |
| "eval_samples_per_second": 758.64, | |
| "eval_steps_per_second": 12.138, | |
| "step": 159000 | |
| }, | |
| { | |
| "epoch": 9.77, | |
| "learning_rate": 0.00019731088994349834, | |
| "loss": 0.4524, | |
| "step": 159500 | |
| }, | |
| { | |
| "epoch": 9.8, | |
| "learning_rate": 0.0001954968115691248, | |
| "loss": 0.4519, | |
| "step": 160000 | |
| }, | |
| { | |
| "epoch": 9.8, | |
| "eval_loss": 0.8028143644332886, | |
| "eval_runtime": 1.282, | |
| "eval_samples_per_second": 780.028, | |
| "eval_steps_per_second": 12.48, | |
| "step": 160000 | |
| }, | |
| { | |
| "epoch": 9.83, | |
| "learning_rate": 0.00019368752321895452, | |
| "loss": 0.4519, | |
| "step": 160500 | |
| }, | |
| { | |
| "epoch": 9.87, | |
| "learning_rate": 0.00019188310403712105, | |
| "loss": 0.4517, | |
| "step": 161000 | |
| }, | |
| { | |
| "epoch": 9.87, | |
| "eval_loss": 0.8005779385566711, | |
| "eval_runtime": 1.3237, | |
| "eval_samples_per_second": 755.47, | |
| "eval_steps_per_second": 12.088, | |
| "step": 161000 | |
| }, | |
| { | |
| "epoch": 9.9, | |
| "learning_rate": 0.00019008363295476495, | |
| "loss": 0.4516, | |
| "step": 161500 | |
| }, | |
| { | |
| "epoch": 9.93, | |
| "learning_rate": 0.0001882891886865808, | |
| "loss": 0.4514, | |
| "step": 162000 | |
| }, | |
| { | |
| "epoch": 9.93, | |
| "eval_loss": 0.8066567182540894, | |
| "eval_runtime": 1.307, | |
| "eval_samples_per_second": 765.087, | |
| "eval_steps_per_second": 12.241, | |
| "step": 162000 | |
| }, | |
| { | |
| "epoch": 9.96, | |
| "learning_rate": 0.00018649984972737404, | |
| "loss": 0.4513, | |
| "step": 162500 | |
| }, | |
| { | |
| "epoch": 9.99, | |
| "learning_rate": 0.00018471569434862749, | |
| "loss": 0.4512, | |
| "step": 163000 | |
| }, | |
| { | |
| "epoch": 9.99, | |
| "eval_loss": 0.7989670038223267, | |
| "eval_runtime": 1.3388, | |
| "eval_samples_per_second": 746.958, | |
| "eval_steps_per_second": 11.951, | |
| "step": 163000 | |
| }, | |
| { | |
| "epoch": 10.02, | |
| "learning_rate": 0.00018293680059507713, | |
| "loss": 0.4512, | |
| "step": 163500 | |
| }, | |
| { | |
| "epoch": 10.05, | |
| "learning_rate": 0.00018116324628129882, | |
| "loss": 0.4508, | |
| "step": 164000 | |
| }, | |
| { | |
| "epoch": 10.05, | |
| "eval_loss": 0.804061770439148, | |
| "eval_runtime": 1.3751, | |
| "eval_samples_per_second": 727.204, | |
| "eval_steps_per_second": 11.635, | |
| "step": 164000 | |
| }, | |
| { | |
| "epoch": 10.08, | |
| "learning_rate": 0.00017939510898830357, | |
| "loss": 0.4505, | |
| "step": 164500 | |
| }, | |
| { | |
| "epoch": 10.11, | |
| "learning_rate": 0.0001776324660601446, | |
| "loss": 0.4504, | |
| "step": 165000 | |
| }, | |
| { | |
| "epoch": 10.11, | |
| "eval_loss": 0.7995474934577942, | |
| "eval_runtime": 1.3093, | |
| "eval_samples_per_second": 763.796, | |
| "eval_steps_per_second": 12.221, | |
| "step": 165000 | |
| }, | |
| { | |
| "epoch": 10.14, | |
| "learning_rate": 0.00017587539460053368, | |
| "loss": 0.4505, | |
| "step": 165500 | |
| }, | |
| { | |
| "epoch": 10.17, | |
| "learning_rate": 0.0001741239714694688, | |
| "loss": 0.4501, | |
| "step": 166000 | |
| }, | |
| { | |
| "epoch": 10.17, | |
| "eval_loss": 0.7978888154029846, | |
| "eval_runtime": 1.2913, | |
| "eval_samples_per_second": 774.39, | |
| "eval_steps_per_second": 12.39, | |
| "step": 166000 | |
| }, | |
| { | |
| "epoch": 10.2, | |
| "learning_rate": 0.0001723782732798713, | |
| "loss": 0.4501, | |
| "step": 166500 | |
| }, | |
| { | |
| "epoch": 10.23, | |
| "learning_rate": 0.00017063837639423517, | |
| "loss": 0.4499, | |
| "step": 167000 | |
| }, | |
| { | |
| "epoch": 10.23, | |
| "eval_loss": 0.796922504901886, | |
| "eval_runtime": 1.3094, | |
| "eval_samples_per_second": 763.7, | |
| "eval_steps_per_second": 12.219, | |
| "step": 167000 | |
| }, | |
| { | |
| "epoch": 10.26, | |
| "learning_rate": 0.00016890435692128712, | |
| "loss": 0.4498, | |
| "step": 167500 | |
| }, | |
| { | |
| "epoch": 10.29, | |
| "learning_rate": 0.0001671762907126559, | |
| "loss": 0.4497, | |
| "step": 168000 | |
| }, | |
| { | |
| "epoch": 10.29, | |
| "eval_loss": 0.8040737509727478, | |
| "eval_runtime": 1.3701, | |
| "eval_samples_per_second": 729.885, | |
| "eval_steps_per_second": 11.678, | |
| "step": 168000 | |
| }, | |
| { | |
| "epoch": 10.32, | |
| "learning_rate": 0.00016545425335955596, | |
| "loss": 0.4498, | |
| "step": 168500 | |
| }, | |
| { | |
| "epoch": 10.36, | |
| "learning_rate": 0.00016373832018947945, | |
| "loss": 0.4495, | |
| "step": 169000 | |
| }, | |
| { | |
| "epoch": 10.36, | |
| "eval_loss": 0.8050036430358887, | |
| "eval_runtime": 1.3107, | |
| "eval_samples_per_second": 762.923, | |
| "eval_steps_per_second": 12.207, | |
| "step": 169000 | |
| }, | |
| { | |
| "epoch": 10.39, | |
| "learning_rate": 0.0001620285662629024, | |
| "loss": 0.4492, | |
| "step": 169500 | |
| }, | |
| { | |
| "epoch": 10.42, | |
| "learning_rate": 0.0001603250663700002, | |
| "loss": 0.4492, | |
| "step": 170000 | |
| }, | |
| { | |
| "epoch": 10.42, | |
| "eval_loss": 0.7998891472816467, | |
| "eval_runtime": 1.3642, | |
| "eval_samples_per_second": 733.005, | |
| "eval_steps_per_second": 11.728, | |
| "step": 170000 | |
| }, | |
| { | |
| "epoch": 10.45, | |
| "learning_rate": 0.00015862789502737648, | |
| "loss": 0.4491, | |
| "step": 170500 | |
| }, | |
| { | |
| "epoch": 10.48, | |
| "learning_rate": 0.00015693712647480446, | |
| "loss": 0.4494, | |
| "step": 171000 | |
| }, | |
| { | |
| "epoch": 10.48, | |
| "eval_loss": 0.7991927266120911, | |
| "eval_runtime": 1.3282, | |
| "eval_samples_per_second": 752.902, | |
| "eval_steps_per_second": 12.046, | |
| "step": 171000 | |
| }, | |
| { | |
| "epoch": 10.51, | |
| "learning_rate": 0.00015525283467197743, | |
| "loss": 0.4487, | |
| "step": 171500 | |
| }, | |
| { | |
| "epoch": 10.54, | |
| "learning_rate": 0.00015357509329527556, | |
| "loss": 0.4486, | |
| "step": 172000 | |
| }, | |
| { | |
| "epoch": 10.54, | |
| "eval_loss": 0.8018996715545654, | |
| "eval_runtime": 1.3294, | |
| "eval_samples_per_second": 752.227, | |
| "eval_steps_per_second": 12.036, | |
| "step": 172000 | |
| }, | |
| { | |
| "epoch": 10.57, | |
| "learning_rate": 0.00015190397573454158, | |
| "loss": 0.4488, | |
| "step": 172500 | |
| }, | |
| { | |
| "epoch": 10.6, | |
| "learning_rate": 0.00015023955508987127, | |
| "loss": 0.4485, | |
| "step": 173000 | |
| }, | |
| { | |
| "epoch": 10.6, | |
| "eval_loss": 0.8025578260421753, | |
| "eval_runtime": 1.3279, | |
| "eval_samples_per_second": 753.043, | |
| "eval_steps_per_second": 12.049, | |
| "step": 173000 | |
| }, | |
| { | |
| "epoch": 10.63, | |
| "learning_rate": 0.00014858190416841565, | |
| "loss": 0.4483, | |
| "step": 173500 | |
| }, | |
| { | |
| "epoch": 10.66, | |
| "learning_rate": 0.00014693109548119591, | |
| "loss": 0.4483, | |
| "step": 174000 | |
| }, | |
| { | |
| "epoch": 10.66, | |
| "eval_loss": 0.8008602261543274, | |
| "eval_runtime": 1.2568, | |
| "eval_samples_per_second": 795.696, | |
| "eval_steps_per_second": 12.731, | |
| "step": 174000 | |
| }, | |
| { | |
| "epoch": 10.69, | |
| "learning_rate": 0.00014528720123993226, | |
| "loss": 0.448, | |
| "step": 174500 | |
| }, | |
| { | |
| "epoch": 10.72, | |
| "learning_rate": 0.0001436502933538841, | |
| "loss": 0.448, | |
| "step": 175000 | |
| }, | |
| { | |
| "epoch": 10.72, | |
| "eval_loss": 0.8021511435508728, | |
| "eval_runtime": 1.2829, | |
| "eval_samples_per_second": 779.473, | |
| "eval_steps_per_second": 12.472, | |
| "step": 175000 | |
| }, | |
| { | |
| "epoch": 10.75, | |
| "learning_rate": 0.00014202044342670508, | |
| "loss": 0.448, | |
| "step": 175500 | |
| }, | |
| { | |
| "epoch": 10.78, | |
| "learning_rate": 0.00014039772275331125, | |
| "loss": 0.4479, | |
| "step": 176000 | |
| }, | |
| { | |
| "epoch": 10.78, | |
| "eval_loss": 0.8016372323036194, | |
| "eval_runtime": 1.3768, | |
| "eval_samples_per_second": 726.333, | |
| "eval_steps_per_second": 11.621, | |
| "step": 176000 | |
| }, | |
| { | |
| "epoch": 10.81, | |
| "learning_rate": 0.00013878220231676152, | |
| "loss": 0.4475, | |
| "step": 176500 | |
| }, | |
| { | |
| "epoch": 10.85, | |
| "learning_rate": 0.00013717395278515355, | |
| "loss": 0.4476, | |
| "step": 177000 | |
| }, | |
| { | |
| "epoch": 10.85, | |
| "eval_loss": 0.7988106608390808, | |
| "eval_runtime": 1.2962, | |
| "eval_samples_per_second": 771.516, | |
| "eval_steps_per_second": 12.344, | |
| "step": 177000 | |
| }, | |
| { | |
| "epoch": 10.88, | |
| "learning_rate": 0.00013557304450853162, | |
| "loss": 0.4472, | |
| "step": 177500 | |
| }, | |
| { | |
| "epoch": 10.91, | |
| "learning_rate": 0.00013397954751581014, | |
| "loss": 0.4474, | |
| "step": 178000 | |
| }, | |
| { | |
| "epoch": 10.91, | |
| "eval_loss": 0.8025058507919312, | |
| "eval_runtime": 1.3447, | |
| "eval_samples_per_second": 743.664, | |
| "eval_steps_per_second": 11.899, | |
| "step": 178000 | |
| }, | |
| { | |
| "epoch": 10.94, | |
| "learning_rate": 0.00013239353151170983, | |
| "loss": 0.4471, | |
| "step": 178500 | |
| }, | |
| { | |
| "epoch": 10.97, | |
| "learning_rate": 0.00013081506587370853, | |
| "loss": 0.4471, | |
| "step": 179000 | |
| }, | |
| { | |
| "epoch": 10.97, | |
| "eval_loss": 0.8035358190536499, | |
| "eval_runtime": 1.362, | |
| "eval_samples_per_second": 734.202, | |
| "eval_steps_per_second": 11.747, | |
| "step": 179000 | |
| }, | |
| { | |
| "epoch": 11.0, | |
| "learning_rate": 0.00012924421964900695, | |
| "loss": 0.447, | |
| "step": 179500 | |
| }, | |
| { | |
| "epoch": 11.03, | |
| "learning_rate": 0.00012768106155150758, | |
| "loss": 0.4471, | |
| "step": 180000 | |
| }, | |
| { | |
| "epoch": 11.03, | |
| "eval_loss": 0.7982646226882935, | |
| "eval_runtime": 1.3411, | |
| "eval_samples_per_second": 745.664, | |
| "eval_steps_per_second": 11.931, | |
| "step": 180000 | |
| }, | |
| { | |
| "epoch": 11.06, | |
| "learning_rate": 0.00012612565995880976, | |
| "loss": 0.4467, | |
| "step": 180500 | |
| }, | |
| { | |
| "epoch": 11.09, | |
| "learning_rate": 0.00012457808290921774, | |
| "loss": 0.4467, | |
| "step": 181000 | |
| }, | |
| { | |
| "epoch": 11.09, | |
| "eval_loss": 0.8010080456733704, | |
| "eval_runtime": 1.2919, | |
| "eval_samples_per_second": 774.08, | |
| "eval_steps_per_second": 12.385, | |
| "step": 181000 | |
| }, | |
| { | |
| "epoch": 11.12, | |
| "learning_rate": 0.00012303839809876525, | |
| "loss": 0.4466, | |
| "step": 181500 | |
| }, | |
| { | |
| "epoch": 11.15, | |
| "learning_rate": 0.00012150667287825382, | |
| "loss": 0.4463, | |
| "step": 182000 | |
| }, | |
| { | |
| "epoch": 11.15, | |
| "eval_loss": 0.8034773468971252, | |
| "eval_runtime": 1.2819, | |
| "eval_samples_per_second": 780.103, | |
| "eval_steps_per_second": 12.482, | |
| "step": 182000 | |
| }, | |
| { | |
| "epoch": 11.18, | |
| "learning_rate": 0.00011998297425030656, | |
| "loss": 0.4464, | |
| "step": 182500 | |
| }, | |
| { | |
| "epoch": 11.21, | |
| "learning_rate": 0.00011846736886643775, | |
| "loss": 0.4463, | |
| "step": 183000 | |
| }, | |
| { | |
| "epoch": 11.21, | |
| "eval_loss": 0.8048831820487976, | |
| "eval_runtime": 1.3614, | |
| "eval_samples_per_second": 734.528, | |
| "eval_steps_per_second": 11.752, | |
| "step": 183000 | |
| }, | |
| { | |
| "epoch": 11.24, | |
| "learning_rate": 0.00011695992302413651, | |
| "loss": 0.4462, | |
| "step": 183500 | |
| }, | |
| { | |
| "epoch": 11.27, | |
| "learning_rate": 0.00011546070266396771, | |
| "loss": 0.4462, | |
| "step": 184000 | |
| }, | |
| { | |
| "epoch": 11.27, | |
| "eval_loss": 0.7998443841934204, | |
| "eval_runtime": 1.3341, | |
| "eval_samples_per_second": 749.594, | |
| "eval_steps_per_second": 11.994, | |
| "step": 184000 | |
| }, | |
| { | |
| "epoch": 11.31, | |
| "learning_rate": 0.00011396977336668645, | |
| "loss": 0.4459, | |
| "step": 184500 | |
| }, | |
| { | |
| "epoch": 11.34, | |
| "learning_rate": 0.00011248720035037021, | |
| "loss": 0.4459, | |
| "step": 185000 | |
| }, | |
| { | |
| "epoch": 11.34, | |
| "eval_loss": 0.7987710237503052, | |
| "eval_runtime": 1.3437, | |
| "eval_samples_per_second": 744.203, | |
| "eval_steps_per_second": 11.907, | |
| "step": 185000 | |
| }, | |
| { | |
| "epoch": 11.37, | |
| "learning_rate": 0.00011101304846756577, | |
| "loss": 0.4458, | |
| "step": 185500 | |
| }, | |
| { | |
| "epoch": 11.4, | |
| "learning_rate": 0.00010954738220245183, | |
| "loss": 0.4457, | |
| "step": 186000 | |
| }, | |
| { | |
| "epoch": 11.4, | |
| "eval_loss": 0.8063639402389526, | |
| "eval_runtime": 1.2501, | |
| "eval_samples_per_second": 799.917, | |
| "eval_steps_per_second": 12.799, | |
| "step": 186000 | |
| }, | |
| { | |
| "epoch": 11.43, | |
| "learning_rate": 0.00010809026566801912, | |
| "loss": 0.4457, | |
| "step": 186500 | |
| }, | |
| { | |
| "epoch": 11.46, | |
| "learning_rate": 0.00010664176260326507, | |
| "loss": 0.4456, | |
| "step": 187000 | |
| }, | |
| { | |
| "epoch": 11.46, | |
| "eval_loss": 0.8042049407958984, | |
| "eval_runtime": 1.3155, | |
| "eval_samples_per_second": 760.186, | |
| "eval_steps_per_second": 12.163, | |
| "step": 187000 | |
| }, | |
| { | |
| "epoch": 11.49, | |
| "learning_rate": 0.00010520193637040641, | |
| "loss": 0.4454, | |
| "step": 187500 | |
| }, | |
| { | |
| "epoch": 11.52, | |
| "learning_rate": 0.00010377084995210682, | |
| "loss": 0.4454, | |
| "step": 188000 | |
| }, | |
| { | |
| "epoch": 11.52, | |
| "eval_loss": 0.7998358607292175, | |
| "eval_runtime": 1.3257, | |
| "eval_samples_per_second": 754.322, | |
| "eval_steps_per_second": 12.069, | |
| "step": 188000 | |
| }, | |
| { | |
| "epoch": 11.55, | |
| "learning_rate": 0.00010234856594872234, | |
| "loss": 0.4452, | |
| "step": 188500 | |
| }, | |
| { | |
| "epoch": 11.58, | |
| "learning_rate": 0.00010093514657556295, | |
| "loss": 0.4453, | |
| "step": 189000 | |
| }, | |
| { | |
| "epoch": 11.58, | |
| "eval_loss": 0.8026143908500671, | |
| "eval_runtime": 1.2929, | |
| "eval_samples_per_second": 773.468, | |
| "eval_steps_per_second": 12.375, | |
| "step": 189000 | |
| }, | |
| { | |
| "epoch": 11.61, | |
| "learning_rate": 9.953065366017073e-05, | |
| "loss": 0.4451, | |
| "step": 189500 | |
| }, | |
| { | |
| "epoch": 11.64, | |
| "learning_rate": 9.813514863961586e-05, | |
| "loss": 0.4449, | |
| "step": 190000 | |
| }, | |
| { | |
| "epoch": 11.64, | |
| "eval_loss": 0.7992528080940247, | |
| "eval_runtime": 1.2891, | |
| "eval_samples_per_second": 775.752, | |
| "eval_steps_per_second": 12.412, | |
| "step": 190000 | |
| }, | |
| { | |
| "epoch": 11.67, | |
| "learning_rate": 9.67486925578087e-05, | |
| "loss": 0.4449, | |
| "step": 190500 | |
| }, | |
| { | |
| "epoch": 11.7, | |
| "learning_rate": 9.537134606282964e-05, | |
| "loss": 0.4448, | |
| "step": 191000 | |
| }, | |
| { | |
| "epoch": 11.7, | |
| "eval_loss": 0.8036643266677856, | |
| "eval_runtime": 1.3143, | |
| "eval_samples_per_second": 760.834, | |
| "eval_steps_per_second": 12.173, | |
| "step": 191000 | |
| }, | |
| { | |
| "epoch": 11.73, | |
| "learning_rate": 9.400316940427652e-05, | |
| "loss": 0.4447, | |
| "step": 191500 | |
| }, | |
| { | |
| "epoch": 11.76, | |
| "learning_rate": 9.264422243062844e-05, | |
| "loss": 0.4448, | |
| "step": 192000 | |
| }, | |
| { | |
| "epoch": 11.76, | |
| "eval_loss": 0.8037863969802856, | |
| "eval_runtime": 1.3425, | |
| "eval_samples_per_second": 744.863, | |
| "eval_steps_per_second": 11.918, | |
| "step": 192000 | |
| }, | |
| { | |
| "epoch": 11.8, | |
| "learning_rate": 9.129456458662876e-05, | |
| "loss": 0.4445, | |
| "step": 192500 | |
| }, | |
| { | |
| "epoch": 11.83, | |
| "learning_rate": 8.995425491068365e-05, | |
| "loss": 0.4445, | |
| "step": 193000 | |
| }, | |
| { | |
| "epoch": 11.83, | |
| "eval_loss": 0.8010460734367371, | |
| "eval_runtime": 1.3665, | |
| "eval_samples_per_second": 731.803, | |
| "eval_steps_per_second": 11.709, | |
| "step": 193000 | |
| }, | |
| { | |
| "epoch": 11.86, | |
| "learning_rate": 8.862335203228025e-05, | |
| "loss": 0.4444, | |
| "step": 193500 | |
| }, | |
| { | |
| "epoch": 11.89, | |
| "learning_rate": 8.73019141694222e-05, | |
| "loss": 0.4442, | |
| "step": 194000 | |
| }, | |
| { | |
| "epoch": 11.89, | |
| "eval_loss": 0.7977059483528137, | |
| "eval_runtime": 1.3628, | |
| "eval_samples_per_second": 733.81, | |
| "eval_steps_per_second": 11.741, | |
| "step": 194000 | |
| }, | |
| { | |
| "epoch": 11.92, | |
| "learning_rate": 8.598999912608229e-05, | |
| "loss": 0.4442, | |
| "step": 194500 | |
| }, | |
| { | |
| "epoch": 11.95, | |
| "learning_rate": 8.468766428967468e-05, | |
| "loss": 0.4443, | |
| "step": 195000 | |
| }, | |
| { | |
| "epoch": 11.95, | |
| "eval_loss": 0.80078125, | |
| "eval_runtime": 1.316, | |
| "eval_samples_per_second": 759.905, | |
| "eval_steps_per_second": 12.158, | |
| "step": 195000 | |
| }, | |
| { | |
| "epoch": 11.98, | |
| "learning_rate": 8.339496662854397e-05, | |
| "loss": 0.444, | |
| "step": 195500 | |
| }, | |
| { | |
| "epoch": 12.01, | |
| "learning_rate": 8.211196268947367e-05, | |
| "loss": 0.4441, | |
| "step": 196000 | |
| }, | |
| { | |
| "epoch": 12.01, | |
| "eval_loss": 0.8048492670059204, | |
| "eval_runtime": 1.3312, | |
| "eval_samples_per_second": 751.193, | |
| "eval_steps_per_second": 12.019, | |
| "step": 196000 | |
| }, | |
| { | |
| "epoch": 12.04, | |
| "learning_rate": 8.083870859521251e-05, | |
| "loss": 0.4441, | |
| "step": 196500 | |
| }, | |
| { | |
| "epoch": 12.07, | |
| "learning_rate": 7.95752600420192e-05, | |
| "loss": 0.4439, | |
| "step": 197000 | |
| }, | |
| { | |
| "epoch": 12.07, | |
| "eval_loss": 0.8033810257911682, | |
| "eval_runtime": 1.3277, | |
| "eval_samples_per_second": 753.203, | |
| "eval_steps_per_second": 12.051, | |
| "step": 197000 | |
| }, | |
| { | |
| "epoch": 12.1, | |
| "learning_rate": 7.832167229722666e-05, | |
| "loss": 0.4438, | |
| "step": 197500 | |
| }, | |
| { | |
| "epoch": 12.13, | |
| "learning_rate": 7.707800019682362e-05, | |
| "loss": 0.4438, | |
| "step": 198000 | |
| }, | |
| { | |
| "epoch": 12.13, | |
| "eval_loss": 0.8051833510398865, | |
| "eval_runtime": 1.2928, | |
| "eval_samples_per_second": 773.488, | |
| "eval_steps_per_second": 12.376, | |
| "step": 198000 | |
| }, | |
| { | |
| "epoch": 12.16, | |
| "learning_rate": 7.5844298143057e-05, | |
| "loss": 0.4437, | |
| "step": 198500 | |
| }, | |
| { | |
| "epoch": 12.19, | |
| "learning_rate": 7.462062010205106e-05, | |
| "loss": 0.4437, | |
| "step": 199000 | |
| }, | |
| { | |
| "epoch": 12.19, | |
| "eval_loss": 0.8041102290153503, | |
| "eval_runtime": 1.3104, | |
| "eval_samples_per_second": 763.133, | |
| "eval_steps_per_second": 12.21, | |
| "step": 199000 | |
| }, | |
| { | |
| "epoch": 12.22, | |
| "learning_rate": 7.340701960144751e-05, | |
| "loss": 0.4437, | |
| "step": 199500 | |
| }, | |
| { | |
| "epoch": 12.25, | |
| "learning_rate": 7.220354972806392e-05, | |
| "loss": 0.4434, | |
| "step": 200000 | |
| }, | |
| { | |
| "epoch": 12.25, | |
| "eval_loss": 0.8000948429107666, | |
| "eval_runtime": 1.2692, | |
| "eval_samples_per_second": 787.882, | |
| "eval_steps_per_second": 12.606, | |
| "step": 200000 | |
| }, | |
| { | |
| "epoch": 12.29, | |
| "learning_rate": 7.101026312557103e-05, | |
| "loss": 0.4433, | |
| "step": 200500 | |
| }, | |
| { | |
| "epoch": 12.32, | |
| "learning_rate": 6.982721199219075e-05, | |
| "loss": 0.4434, | |
| "step": 201000 | |
| }, | |
| { | |
| "epoch": 12.32, | |
| "eval_loss": 0.8013458251953125, | |
| "eval_runtime": 1.4369, | |
| "eval_samples_per_second": 695.945, | |
| "eval_steps_per_second": 11.135, | |
| "step": 201000 | |
| }, | |
| { | |
| "epoch": 12.35, | |
| "learning_rate": 6.865444807841203e-05, | |
| "loss": 0.4434, | |
| "step": 201500 | |
| }, | |
| { | |
| "epoch": 12.38, | |
| "learning_rate": 6.749202268472787e-05, | |
| "loss": 0.4432, | |
| "step": 202000 | |
| }, | |
| { | |
| "epoch": 12.38, | |
| "eval_loss": 0.7986797094345093, | |
| "eval_runtime": 1.2992, | |
| "eval_samples_per_second": 769.697, | |
| "eval_steps_per_second": 12.315, | |
| "step": 202000 | |
| }, | |
| { | |
| "epoch": 12.41, | |
| "learning_rate": 6.633998665939053e-05, | |
| "loss": 0.4433, | |
| "step": 202500 | |
| }, | |
| { | |
| "epoch": 12.44, | |
| "learning_rate": 6.519839039618793e-05, | |
| "loss": 0.443, | |
| "step": 203000 | |
| }, | |
| { | |
| "epoch": 12.44, | |
| "eval_loss": 0.7961978316307068, | |
| "eval_runtime": 1.3561, | |
| "eval_samples_per_second": 737.396, | |
| "eval_steps_per_second": 11.798, | |
| "step": 203000 | |
| }, | |
| { | |
| "epoch": 12.47, | |
| "learning_rate": 6.406728383223897e-05, | |
| "loss": 0.4431, | |
| "step": 203500 | |
| }, | |
| { | |
| "epoch": 12.5, | |
| "learning_rate": 6.294671644580888e-05, | |
| "loss": 0.443, | |
| "step": 204000 | |
| }, | |
| { | |
| "epoch": 12.5, | |
| "eval_loss": 0.8016523122787476, | |
| "eval_runtime": 1.3833, | |
| "eval_samples_per_second": 722.928, | |
| "eval_steps_per_second": 11.567, | |
| "step": 204000 | |
| }, | |
| { | |
| "epoch": 12.53, | |
| "learning_rate": 6.18367372541454e-05, | |
| "loss": 0.443, | |
| "step": 204500 | |
| }, | |
| { | |
| "epoch": 12.56, | |
| "learning_rate": 6.073739481133408e-05, | |
| "loss": 0.4429, | |
| "step": 205000 | |
| }, | |
| { | |
| "epoch": 12.56, | |
| "eval_loss": 0.7996025681495667, | |
| "eval_runtime": 1.274, | |
| "eval_samples_per_second": 784.9, | |
| "eval_steps_per_second": 12.558, | |
| "step": 205000 | |
| }, | |
| { | |
| "epoch": 12.59, | |
| "learning_rate": 5.96487372061749e-05, | |
| "loss": 0.4428, | |
| "step": 205500 | |
| }, | |
| { | |
| "epoch": 12.62, | |
| "learning_rate": 5.857081206007811e-05, | |
| "loss": 0.4428, | |
| "step": 206000 | |
| }, | |
| { | |
| "epoch": 12.62, | |
| "eval_loss": 0.7997360229492188, | |
| "eval_runtime": 1.3318, | |
| "eval_samples_per_second": 750.873, | |
| "eval_steps_per_second": 12.014, | |
| "step": 206000 | |
| }, | |
| { | |
| "epoch": 12.65, | |
| "learning_rate": 5.750366652498169e-05, | |
| "loss": 0.4426, | |
| "step": 206500 | |
| }, | |
| { | |
| "epoch": 12.68, | |
| "learning_rate": 5.6447347281288474e-05, | |
| "loss": 0.4425, | |
| "step": 207000 | |
| }, | |
| { | |
| "epoch": 12.68, | |
| "eval_loss": 0.801701009273529, | |
| "eval_runtime": 1.3022, | |
| "eval_samples_per_second": 767.911, | |
| "eval_steps_per_second": 12.287, | |
| "step": 207000 | |
| }, | |
| { | |
| "epoch": 12.71, | |
| "learning_rate": 5.540190053582401e-05, | |
| "loss": 0.4426, | |
| "step": 207500 | |
| }, | |
| { | |
| "epoch": 12.75, | |
| "learning_rate": 5.43673720198159e-05, | |
| "loss": 0.4424, | |
| "step": 208000 | |
| }, | |
| { | |
| "epoch": 12.75, | |
| "eval_loss": 0.8007999062538147, | |
| "eval_runtime": 1.3254, | |
| "eval_samples_per_second": 754.474, | |
| "eval_steps_per_second": 12.072, | |
| "step": 208000 | |
| }, | |
| { | |
| "epoch": 12.78, | |
| "learning_rate": 5.33438069868928e-05, | |
| "loss": 0.4425, | |
| "step": 208500 | |
| }, | |
| { | |
| "epoch": 12.81, | |
| "learning_rate": 5.2331250211105e-05, | |
| "loss": 0.4424, | |
| "step": 209000 | |
| }, | |
| { | |
| "epoch": 12.81, | |
| "eval_loss": 0.8051698207855225, | |
| "eval_runtime": 1.3459, | |
| "eval_samples_per_second": 742.979, | |
| "eval_steps_per_second": 11.888, | |
| "step": 209000 | |
| }, | |
| { | |
| "epoch": 12.84, | |
| "learning_rate": 5.13297459849662e-05, | |
| "loss": 0.4424, | |
| "step": 209500 | |
| }, | |
| { | |
| "epoch": 12.87, | |
| "learning_rate": 5.0339338117515696e-05, | |
| "loss": 0.4422, | |
| "step": 210000 | |
| }, | |
| { | |
| "epoch": 12.87, | |
| "eval_loss": 0.8003759980201721, | |
| "eval_runtime": 1.333, | |
| "eval_samples_per_second": 750.181, | |
| "eval_steps_per_second": 12.003, | |
| "step": 210000 | |
| }, | |
| { | |
| "epoch": 12.9, | |
| "learning_rate": 4.9360069932402115e-05, | |
| "loss": 0.4423, | |
| "step": 210500 | |
| }, | |
| { | |
| "epoch": 12.93, | |
| "learning_rate": 4.839198426598824e-05, | |
| "loss": 0.4421, | |
| "step": 211000 | |
| }, | |
| { | |
| "epoch": 12.93, | |
| "eval_loss": 0.8022798895835876, | |
| "eval_runtime": 1.2766, | |
| "eval_samples_per_second": 783.354, | |
| "eval_steps_per_second": 12.534, | |
| "step": 211000 | |
| }, | |
| { | |
| "epoch": 12.96, | |
| "learning_rate": 4.7435123465477156e-05, | |
| "loss": 0.4422, | |
| "step": 211500 | |
| }, | |
| { | |
| "epoch": 12.99, | |
| "learning_rate": 4.6489529387060306e-05, | |
| "loss": 0.4421, | |
| "step": 212000 | |
| }, | |
| { | |
| "epoch": 12.99, | |
| "eval_loss": 0.8013637661933899, | |
| "eval_runtime": 1.3245, | |
| "eval_samples_per_second": 754.989, | |
| "eval_steps_per_second": 12.08, | |
| "step": 212000 | |
| }, | |
| { | |
| "epoch": 13.02, | |
| "learning_rate": 4.555524339408575e-05, | |
| "loss": 0.4421, | |
| "step": 212500 | |
| }, | |
| { | |
| "epoch": 13.05, | |
| "learning_rate": 4.46323063552496e-05, | |
| "loss": 0.442, | |
| "step": 213000 | |
| }, | |
| { | |
| "epoch": 13.05, | |
| "eval_loss": 0.7998749017715454, | |
| "eval_runtime": 1.3047, | |
| "eval_samples_per_second": 766.488, | |
| "eval_steps_per_second": 12.264, | |
| "step": 213000 | |
| }, | |
| { | |
| "epoch": 13.08, | |
| "learning_rate": 4.3720758642807817e-05, | |
| "loss": 0.4419, | |
| "step": 213500 | |
| }, | |
| { | |
| "epoch": 13.11, | |
| "learning_rate": 4.2820640130810446e-05, | |
| "loss": 0.4418, | |
| "step": 214000 | |
| }, | |
| { | |
| "epoch": 13.11, | |
| "eval_loss": 0.8018962144851685, | |
| "eval_runtime": 1.2527, | |
| "eval_samples_per_second": 798.295, | |
| "eval_steps_per_second": 12.773, | |
| "step": 214000 | |
| }, | |
| { | |
| "epoch": 13.14, | |
| "learning_rate": 4.193199019335709e-05, | |
| "loss": 0.4418, | |
| "step": 214500 | |
| }, | |
| { | |
| "epoch": 13.17, | |
| "learning_rate": 4.105484770287477e-05, | |
| "loss": 0.4417, | |
| "step": 215000 | |
| }, | |
| { | |
| "epoch": 13.17, | |
| "eval_loss": 0.7995709180831909, | |
| "eval_runtime": 1.3104, | |
| "eval_samples_per_second": 763.154, | |
| "eval_steps_per_second": 12.21, | |
| "step": 215000 | |
| }, | |
| { | |
| "epoch": 13.2, | |
| "learning_rate": 4.018925102841773e-05, | |
| "loss": 0.4416, | |
| "step": 215500 | |
| }, | |
| { | |
| "epoch": 13.24, | |
| "learning_rate": 3.9335238033988484e-05, | |
| "loss": 0.4416, | |
| "step": 216000 | |
| }, | |
| { | |
| "epoch": 13.24, | |
| "eval_loss": 0.8006547689437866, | |
| "eval_runtime": 1.3204, | |
| "eval_samples_per_second": 757.326, | |
| "eval_steps_per_second": 12.117, | |
| "step": 216000 | |
| }, | |
| { | |
| "epoch": 13.27, | |
| "learning_rate": 3.8492846076882115e-05, | |
| "loss": 0.4414, | |
| "step": 216500 | |
| }, | |
| { | |
| "epoch": 13.3, | |
| "learning_rate": 3.766211200605186e-05, | |
| "loss": 0.4414, | |
| "step": 217000 | |
| }, | |
| { | |
| "epoch": 13.3, | |
| "eval_loss": 0.8029482960700989, | |
| "eval_runtime": 1.3397, | |
| "eval_samples_per_second": 746.459, | |
| "eval_steps_per_second": 11.943, | |
| "step": 217000 | |
| }, | |
| { | |
| "epoch": 13.33, | |
| "learning_rate": 3.684307216049706e-05, | |
| "loss": 0.4415, | |
| "step": 217500 | |
| }, | |
| { | |
| "epoch": 13.36, | |
| "learning_rate": 3.6035762367673984e-05, | |
| "loss": 0.4415, | |
| "step": 218000 | |
| }, | |
| { | |
| "epoch": 13.36, | |
| "eval_loss": 0.7989561557769775, | |
| "eval_runtime": 1.2937, | |
| "eval_samples_per_second": 773.007, | |
| "eval_steps_per_second": 12.368, | |
| "step": 218000 | |
| }, | |
| { | |
| "epoch": 13.39, | |
| "learning_rate": 3.52402179419282e-05, | |
| "loss": 0.4414, | |
| "step": 218500 | |
| }, | |
| { | |
| "epoch": 13.42, | |
| "learning_rate": 3.4456473682950194e-05, | |
| "loss": 0.4413, | |
| "step": 219000 | |
| }, | |
| { | |
| "epoch": 13.42, | |
| "eval_loss": 0.7997331619262695, | |
| "eval_runtime": 1.2967, | |
| "eval_samples_per_second": 771.217, | |
| "eval_steps_per_second": 12.339, | |
| "step": 219000 | |
| }, | |
| { | |
| "epoch": 13.45, | |
| "learning_rate": 3.3684563874252695e-05, | |
| "loss": 0.4412, | |
| "step": 219500 | |
| }, | |
| { | |
| "epoch": 13.48, | |
| "learning_rate": 3.2924522281671496e-05, | |
| "loss": 0.4413, | |
| "step": 220000 | |
| }, | |
| { | |
| "epoch": 13.48, | |
| "eval_loss": 0.7997393608093262, | |
| "eval_runtime": 1.3064, | |
| "eval_samples_per_second": 765.469, | |
| "eval_steps_per_second": 12.247, | |
| "step": 220000 | |
| }, | |
| { | |
| "epoch": 13.51, | |
| "learning_rate": 3.2176382151888054e-05, | |
| "loss": 0.4414, | |
| "step": 220500 | |
| }, | |
| { | |
| "epoch": 13.54, | |
| "learning_rate": 3.1440176210975204e-05, | |
| "loss": 0.4412, | |
| "step": 221000 | |
| }, | |
| { | |
| "epoch": 13.54, | |
| "eval_loss": 0.7996479272842407, | |
| "eval_runtime": 1.3112, | |
| "eval_samples_per_second": 762.651, | |
| "eval_steps_per_second": 12.202, | |
| "step": 221000 | |
| }, | |
| { | |
| "epoch": 13.57, | |
| "learning_rate": 3.071593666296585e-05, | |
| "loss": 0.4411, | |
| "step": 221500 | |
| }, | |
| { | |
| "epoch": 13.6, | |
| "learning_rate": 3.000369518844396e-05, | |
| "loss": 0.4411, | |
| "step": 222000 | |
| }, | |
| { | |
| "epoch": 13.6, | |
| "eval_loss": 0.8002747297286987, | |
| "eval_runtime": 1.2963, | |
| "eval_samples_per_second": 771.412, | |
| "eval_steps_per_second": 12.343, | |
| "step": 222000 | |
| }, | |
| { | |
| "epoch": 13.63, | |
| "learning_rate": 2.9303482943159077e-05, | |
| "loss": 0.4411, | |
| "step": 222500 | |
| }, | |
| { | |
| "epoch": 13.66, | |
| "learning_rate": 2.861533055666306e-05, | |
| "loss": 0.4411, | |
| "step": 223000 | |
| }, | |
| { | |
| "epoch": 13.66, | |
| "eval_loss": 0.7992754578590393, | |
| "eval_runtime": 1.2911, | |
| "eval_samples_per_second": 774.504, | |
| "eval_steps_per_second": 12.392, | |
| "step": 223000 | |
| }, | |
| { | |
| "epoch": 13.69, | |
| "learning_rate": 2.793926813097066e-05, | |
| "loss": 0.4411, | |
| "step": 223500 | |
| }, | |
| { | |
| "epoch": 13.73, | |
| "learning_rate": 2.7275325239242546e-05, | |
| "loss": 0.4411, | |
| "step": 224000 | |
| }, | |
| { | |
| "epoch": 13.73, | |
| "eval_loss": 0.800546407699585, | |
| "eval_runtime": 6.2685, | |
| "eval_samples_per_second": 159.527, | |
| "eval_steps_per_second": 2.552, | |
| "step": 224000 | |
| }, | |
| { | |
| "epoch": 13.76, | |
| "learning_rate": 2.6623530924491626e-05, | |
| "loss": 0.4409, | |
| "step": 224500 | |
| }, | |
| { | |
| "epoch": 13.79, | |
| "learning_rate": 2.5983913698312782e-05, | |
| "loss": 0.4409, | |
| "step": 225000 | |
| }, | |
| { | |
| "epoch": 13.79, | |
| "eval_loss": 0.8013263940811157, | |
| "eval_runtime": 1.3128, | |
| "eval_samples_per_second": 761.734, | |
| "eval_steps_per_second": 12.188, | |
| "step": 225000 | |
| }, | |
| { | |
| "epoch": 13.82, | |
| "learning_rate": 2.5356501539635512e-05, | |
| "loss": 0.441, | |
| "step": 225500 | |
| }, | |
| { | |
| "epoch": 13.85, | |
| "learning_rate": 2.4741321893500244e-05, | |
| "loss": 0.4409, | |
| "step": 226000 | |
| }, | |
| { | |
| "epoch": 13.85, | |
| "eval_loss": 0.8015850782394409, | |
| "eval_runtime": 1.3378, | |
| "eval_samples_per_second": 747.504, | |
| "eval_steps_per_second": 11.96, | |
| "step": 226000 | |
| }, | |
| { | |
| "epoch": 13.88, | |
| "learning_rate": 2.4138401669857587e-05, | |
| "loss": 0.4408, | |
| "step": 226500 | |
| }, | |
| { | |
| "epoch": 13.91, | |
| "learning_rate": 2.3547767242391212e-05, | |
| "loss": 0.4409, | |
| "step": 227000 | |
| }, | |
| { | |
| "epoch": 13.91, | |
| "eval_loss": 0.7994450926780701, | |
| "eval_runtime": 1.2846, | |
| "eval_samples_per_second": 778.448, | |
| "eval_steps_per_second": 12.455, | |
| "step": 227000 | |
| }, | |
| { | |
| "epoch": 13.94, | |
| "learning_rate": 2.2969444447364498e-05, | |
| "loss": 0.4409, | |
| "step": 227500 | |
| }, | |
| { | |
| "epoch": 13.97, | |
| "learning_rate": 2.240345858248992e-05, | |
| "loss": 0.4408, | |
| "step": 228000 | |
| }, | |
| { | |
| "epoch": 13.97, | |
| "eval_loss": 0.8022862672805786, | |
| "eval_runtime": 1.3268, | |
| "eval_samples_per_second": 753.683, | |
| "eval_steps_per_second": 12.059, | |
| "step": 228000 | |
| }, | |
| { | |
| "epoch": 14.0, | |
| "learning_rate": 2.184983440582284e-05, | |
| "loss": 0.4408, | |
| "step": 228500 | |
| }, | |
| { | |
| "epoch": 14.03, | |
| "learning_rate": 2.1308596134678134e-05, | |
| "loss": 0.4407, | |
| "step": 229000 | |
| }, | |
| { | |
| "epoch": 14.03, | |
| "eval_loss": 0.8013246059417725, | |
| "eval_runtime": 1.2694, | |
| "eval_samples_per_second": 787.747, | |
| "eval_steps_per_second": 12.604, | |
| "step": 229000 | |
| }, | |
| { | |
| "epoch": 14.06, | |
| "learning_rate": 2.0779767444571236e-05, | |
| "loss": 0.4406, | |
| "step": 229500 | |
| }, | |
| { | |
| "epoch": 14.09, | |
| "learning_rate": 2.0263371468182175e-05, | |
| "loss": 0.4406, | |
| "step": 230000 | |
| }, | |
| { | |
| "epoch": 14.09, | |
| "eval_loss": 0.8037849068641663, | |
| "eval_runtime": 1.356, | |
| "eval_samples_per_second": 737.479, | |
| "eval_steps_per_second": 11.8, | |
| "step": 230000 | |
| }, | |
| { | |
| "epoch": 14.12, | |
| "learning_rate": 1.975943079434381e-05, | |
| "loss": 0.4406, | |
| "step": 230500 | |
| }, | |
| { | |
| "epoch": 14.15, | |
| "learning_rate": 1.9267967467053834e-05, | |
| "loss": 0.4408, | |
| "step": 231000 | |
| }, | |
| { | |
| "epoch": 14.15, | |
| "eval_loss": 0.799385130405426, | |
| "eval_runtime": 1.2054, | |
| "eval_samples_per_second": 829.62, | |
| "eval_steps_per_second": 13.274, | |
| "step": 231000 | |
| }, | |
| { | |
| "epoch": 14.19, | |
| "learning_rate": 1.878900298451024e-05, | |
| "loss": 0.4406, | |
| "step": 231500 | |
| }, | |
| { | |
| "epoch": 14.22, | |
| "learning_rate": 1.83225582981712e-05, | |
| "loss": 0.4406, | |
| "step": 232000 | |
| }, | |
| { | |
| "epoch": 14.22, | |
| "eval_loss": 0.800686776638031, | |
| "eval_runtime": 1.3097, | |
| "eval_samples_per_second": 763.545, | |
| "eval_steps_per_second": 12.217, | |
| "step": 232000 | |
| }, | |
| { | |
| "epoch": 14.25, | |
| "learning_rate": 1.7868653811838307e-05, | |
| "loss": 0.4406, | |
| "step": 232500 | |
| }, | |
| { | |
| "epoch": 14.28, | |
| "learning_rate": 1.7427309380764227e-05, | |
| "loss": 0.4404, | |
| "step": 233000 | |
| }, | |
| { | |
| "epoch": 14.28, | |
| "eval_loss": 0.8005698323249817, | |
| "eval_runtime": 1.2953, | |
| "eval_samples_per_second": 772.049, | |
| "eval_steps_per_second": 12.353, | |
| "step": 233000 | |
| }, | |
| { | |
| "epoch": 14.31, | |
| "learning_rate": 1.6998544310784175e-05, | |
| "loss": 0.4406, | |
| "step": 233500 | |
| }, | |
| { | |
| "epoch": 14.34, | |
| "learning_rate": 1.6582377357471285e-05, | |
| "loss": 0.4403, | |
| "step": 234000 | |
| }, | |
| { | |
| "epoch": 14.34, | |
| "eval_loss": 0.7986977696418762, | |
| "eval_runtime": 1.3526, | |
| "eval_samples_per_second": 739.295, | |
| "eval_steps_per_second": 11.829, | |
| "step": 234000 | |
| }, | |
| { | |
| "epoch": 14.37, | |
| "learning_rate": 1.617882672531633e-05, | |
| "loss": 0.4405, | |
| "step": 234500 | |
| }, | |
| { | |
| "epoch": 14.4, | |
| "learning_rate": 1.578791006693124e-05, | |
| "loss": 0.4405, | |
| "step": 235000 | |
| }, | |
| { | |
| "epoch": 14.4, | |
| "eval_loss": 0.8009754419326782, | |
| "eval_runtime": 1.2995, | |
| "eval_samples_per_second": 769.543, | |
| "eval_steps_per_second": 12.313, | |
| "step": 235000 | |
| }, | |
| { | |
| "epoch": 14.43, | |
| "learning_rate": 1.5409644482277073e-05, | |
| "loss": 0.4403, | |
| "step": 235500 | |
| }, | |
| { | |
| "epoch": 14.46, | |
| "learning_rate": 1.504404651791591e-05, | |
| "loss": 0.4404, | |
| "step": 236000 | |
| }, | |
| { | |
| "epoch": 14.46, | |
| "eval_loss": 0.798170268535614, | |
| "eval_runtime": 1.2848, | |
| "eval_samples_per_second": 778.322, | |
| "eval_steps_per_second": 12.453, | |
| "step": 236000 | |
| }, | |
| { | |
| "epoch": 14.49, | |
| "learning_rate": 1.4691132166287069e-05, | |
| "loss": 0.4403, | |
| "step": 236500 | |
| }, | |
| { | |
| "epoch": 14.52, | |
| "learning_rate": 1.4350916865007609e-05, | |
| "loss": 0.4404, | |
| "step": 237000 | |
| }, | |
| { | |
| "epoch": 14.52, | |
| "eval_loss": 0.7984534502029419, | |
| "eval_runtime": 1.2843, | |
| "eval_samples_per_second": 778.661, | |
| "eval_steps_per_second": 12.459, | |
| "step": 237000 | |
| }, | |
| { | |
| "epoch": 14.55, | |
| "learning_rate": 1.4023415496196915e-05, | |
| "loss": 0.4402, | |
| "step": 237500 | |
| }, | |
| { | |
| "epoch": 14.58, | |
| "learning_rate": 1.3708642385825806e-05, | |
| "loss": 0.4403, | |
| "step": 238000 | |
| }, | |
| { | |
| "epoch": 14.58, | |
| "eval_loss": 0.8015879392623901, | |
| "eval_runtime": 1.3525, | |
| "eval_samples_per_second": 739.353, | |
| "eval_steps_per_second": 11.83, | |
| "step": 238000 | |
| }, | |
| { | |
| "epoch": 14.61, | |
| "learning_rate": 1.3406611303089918e-05, | |
| "loss": 0.4404, | |
| "step": 238500 | |
| }, | |
| { | |
| "epoch": 14.64, | |
| "learning_rate": 1.311733545980718e-05, | |
| "loss": 0.4402, | |
| "step": 239000 | |
| }, | |
| { | |
| "epoch": 14.64, | |
| "eval_loss": 0.8024547696113586, | |
| "eval_runtime": 1.3287, | |
| "eval_samples_per_second": 752.597, | |
| "eval_steps_per_second": 12.042, | |
| "step": 239000 | |
| }, | |
| { | |
| "epoch": 14.68, | |
| "learning_rate": 1.284082750984018e-05, | |
| "loss": 0.4401, | |
| "step": 239500 | |
| }, | |
| { | |
| "epoch": 14.71, | |
| "learning_rate": 1.2577099548542319e-05, | |
| "loss": 0.4402, | |
| "step": 240000 | |
| }, | |
| { | |
| "epoch": 14.71, | |
| "eval_loss": 0.8019887804985046, | |
| "eval_runtime": 1.3351, | |
| "eval_samples_per_second": 748.984, | |
| "eval_steps_per_second": 11.984, | |
| "step": 240000 | |
| }, | |
| { | |
| "epoch": 14.74, | |
| "learning_rate": 1.232616311222906e-05, | |
| "loss": 0.4402, | |
| "step": 240500 | |
| }, | |
| { | |
| "epoch": 14.77, | |
| "learning_rate": 1.2088029177672978e-05, | |
| "loss": 0.4401, | |
| "step": 241000 | |
| }, | |
| { | |
| "epoch": 14.77, | |
| "eval_loss": 0.8008501529693604, | |
| "eval_runtime": 1.2829, | |
| "eval_samples_per_second": 779.48, | |
| "eval_steps_per_second": 12.472, | |
| "step": 241000 | |
| }, | |
| { | |
| "epoch": 14.8, | |
| "learning_rate": 1.1862708161623889e-05, | |
| "loss": 0.4402, | |
| "step": 241500 | |
| }, | |
| { | |
| "epoch": 14.83, | |
| "learning_rate": 1.165020992035296e-05, | |
| "loss": 0.4401, | |
| "step": 242000 | |
| }, | |
| { | |
| "epoch": 14.83, | |
| "eval_loss": 0.8015211224555969, | |
| "eval_runtime": 1.2647, | |
| "eval_samples_per_second": 790.696, | |
| "eval_steps_per_second": 12.651, | |
| "step": 242000 | |
| }, | |
| { | |
| "epoch": 14.86, | |
| "learning_rate": 1.1450543749221632e-05, | |
| "loss": 0.4401, | |
| "step": 242500 | |
| }, | |
| { | |
| "epoch": 14.89, | |
| "learning_rate": 1.126371838227509e-05, | |
| "loss": 0.4401, | |
| "step": 243000 | |
| }, | |
| { | |
| "epoch": 14.89, | |
| "eval_loss": 0.8009896278381348, | |
| "eval_runtime": 1.3003, | |
| "eval_samples_per_second": 769.043, | |
| "eval_steps_per_second": 12.305, | |
| "step": 243000 | |
| }, | |
| { | |
| "epoch": 14.92, | |
| "learning_rate": 1.1089741991860081e-05, | |
| "loss": 0.44, | |
| "step": 243500 | |
| }, | |
| { | |
| "epoch": 14.95, | |
| "learning_rate": 1.0928622188267536e-05, | |
| "loss": 0.44, | |
| "step": 244000 | |
| }, | |
| { | |
| "epoch": 14.95, | |
| "eval_loss": 0.799572765827179, | |
| "eval_runtime": 1.3134, | |
| "eval_samples_per_second": 761.387, | |
| "eval_steps_per_second": 12.182, | |
| "step": 244000 | |
| }, | |
| { | |
| "epoch": 14.98, | |
| "learning_rate": 1.0780366019399665e-05, | |
| "loss": 0.4401, | |
| "step": 244500 | |
| }, | |
| { | |
| "epoch": 15.01, | |
| "learning_rate": 1.0644979970461512e-05, | |
| "loss": 0.4402, | |
| "step": 245000 | |
| }, | |
| { | |
| "epoch": 15.01, | |
| "eval_loss": 0.8014461398124695, | |
| "eval_runtime": 1.3153, | |
| "eval_samples_per_second": 760.258, | |
| "eval_steps_per_second": 12.164, | |
| "step": 245000 | |
| }, | |
| { | |
| "epoch": 15.04, | |
| "learning_rate": 1.0522469963677483e-05, | |
| "loss": 0.4401, | |
| "step": 245500 | |
| }, | |
| { | |
| "epoch": 15.07, | |
| "learning_rate": 1.0412841358032126e-05, | |
| "loss": 0.44, | |
| "step": 246000 | |
| }, | |
| { | |
| "epoch": 15.07, | |
| "eval_loss": 0.800745964050293, | |
| "eval_runtime": 1.3042, | |
| "eval_samples_per_second": 766.776, | |
| "eval_steps_per_second": 12.268, | |
| "step": 246000 | |
| }, | |
| { | |
| "epoch": 15.1, | |
| "learning_rate": 1.0316098949035847e-05, | |
| "loss": 0.44, | |
| "step": 246500 | |
| }, | |
| { | |
| "epoch": 15.13, | |
| "learning_rate": 1.0232246968514984e-05, | |
| "loss": 0.44, | |
| "step": 247000 | |
| }, | |
| { | |
| "epoch": 15.13, | |
| "eval_loss": 0.7984172701835632, | |
| "eval_runtime": 1.3184, | |
| "eval_samples_per_second": 758.509, | |
| "eval_steps_per_second": 12.136, | |
| "step": 247000 | |
| }, | |
| { | |
| "epoch": 15.17, | |
| "learning_rate": 1.0161289084426815e-05, | |
| "loss": 0.44, | |
| "step": 247500 | |
| }, | |
| { | |
| "epoch": 15.2, | |
| "learning_rate": 1.0103228400699063e-05, | |
| "loss": 0.44, | |
| "step": 248000 | |
| }, | |
| { | |
| "epoch": 15.2, | |
| "eval_loss": 0.8008962273597717, | |
| "eval_runtime": 1.3201, | |
| "eval_samples_per_second": 757.502, | |
| "eval_steps_per_second": 12.12, | |
| "step": 248000 | |
| }, | |
| { | |
| "epoch": 15.23, | |
| "learning_rate": 1.0058067457094136e-05, | |
| "loss": 0.44, | |
| "step": 248500 | |
| }, | |
| { | |
| "epoch": 15.26, | |
| "learning_rate": 1.0025808229097982e-05, | |
| "loss": 0.4399, | |
| "step": 249000 | |
| }, | |
| { | |
| "epoch": 15.26, | |
| "eval_loss": 0.8005866408348083, | |
| "eval_runtime": 1.3752, | |
| "eval_samples_per_second": 727.163, | |
| "eval_steps_per_second": 11.635, | |
| "step": 249000 | |
| }, | |
| { | |
| "epoch": 15.29, | |
| "learning_rate": 1.0006452127833747e-05, | |
| "loss": 0.4399, | |
| "step": 249500 | |
| }, | |
| { | |
| "epoch": 15.32, | |
| "learning_rate": 1e-05, | |
| "loss": 0.4399, | |
| "step": 250000 | |
| }, | |
| { | |
| "epoch": 15.32, | |
| "eval_loss": 0.8015783429145813, | |
| "eval_runtime": 1.3631, | |
| "eval_samples_per_second": 733.637, | |
| "eval_steps_per_second": 11.738, | |
| "step": 250000 | |
| } | |
| ], | |
| "max_steps": 250000, | |
| "num_train_epochs": 16, | |
| "total_flos": 4.004049914090876e+21, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |