| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.999829030603522, | |
| "eval_steps": 100, | |
| "global_step": 731, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.006838775859121217, | |
| "grad_norm": 4.283134460449219, | |
| "learning_rate": 1.3513513513513515e-06, | |
| "loss": 1.3383, | |
| "mean_token_accuracy": 0.6803730711340904, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.013677551718242434, | |
| "grad_norm": 2.8879575729370117, | |
| "learning_rate": 2.702702702702703e-06, | |
| "loss": 1.2973, | |
| "mean_token_accuracy": 0.6889803171157837, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.02051632757736365, | |
| "grad_norm": 1.4510750770568848, | |
| "learning_rate": 4.0540540540540545e-06, | |
| "loss": 1.2647, | |
| "mean_token_accuracy": 0.6867526054382325, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.027355103436484868, | |
| "grad_norm": 2.0351901054382324, | |
| "learning_rate": 5.405405405405406e-06, | |
| "loss": 1.1765, | |
| "mean_token_accuracy": 0.7007667943835258, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.034193879295606085, | |
| "grad_norm": 1.0107135772705078, | |
| "learning_rate": 6.7567567567567575e-06, | |
| "loss": 1.1098, | |
| "mean_token_accuracy": 0.7095919325947762, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.0410326551547273, | |
| "grad_norm": 0.7789012789726257, | |
| "learning_rate": 8.108108108108109e-06, | |
| "loss": 1.0612, | |
| "mean_token_accuracy": 0.717575378715992, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.04787143101384852, | |
| "grad_norm": 0.6472299098968506, | |
| "learning_rate": 9.45945945945946e-06, | |
| "loss": 1.0272, | |
| "mean_token_accuracy": 0.7225469693541526, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.054710206872969736, | |
| "grad_norm": 0.5901947021484375, | |
| "learning_rate": 1.0810810810810812e-05, | |
| "loss": 0.9874, | |
| "mean_token_accuracy": 0.7313357755541802, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.06154898273209095, | |
| "grad_norm": 0.6146127581596375, | |
| "learning_rate": 1.2162162162162164e-05, | |
| "loss": 0.9738, | |
| "mean_token_accuracy": 0.7330265626311302, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.06838775859121217, | |
| "grad_norm": 0.6039953827857971, | |
| "learning_rate": 1.3513513513513515e-05, | |
| "loss": 0.9604, | |
| "mean_token_accuracy": 0.7354672074317932, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.0752265344503334, | |
| "grad_norm": 0.5497832894325256, | |
| "learning_rate": 1.4864864864864865e-05, | |
| "loss": 0.9501, | |
| "mean_token_accuracy": 0.7374408826231956, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.0820653103094546, | |
| "grad_norm": 0.5113657712936401, | |
| "learning_rate": 1.6216216216216218e-05, | |
| "loss": 0.9237, | |
| "mean_token_accuracy": 0.7434996381402016, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.08890408616857583, | |
| "grad_norm": 0.5408641695976257, | |
| "learning_rate": 1.756756756756757e-05, | |
| "loss": 0.9378, | |
| "mean_token_accuracy": 0.7394065439701081, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.09574286202769704, | |
| "grad_norm": 0.5544885396957397, | |
| "learning_rate": 1.891891891891892e-05, | |
| "loss": 0.9347, | |
| "mean_token_accuracy": 0.739613801240921, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.10258163788681826, | |
| "grad_norm": 0.5568746328353882, | |
| "learning_rate": 1.9999885675796825e-05, | |
| "loss": 0.9144, | |
| "mean_token_accuracy": 0.7448009416460991, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.10942041374593947, | |
| "grad_norm": 0.5509134531021118, | |
| "learning_rate": 1.9995884603149403e-05, | |
| "loss": 0.9181, | |
| "mean_token_accuracy": 0.7419906392693519, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.1162591896050607, | |
| "grad_norm": 0.548611044883728, | |
| "learning_rate": 1.9986169934079135e-05, | |
| "loss": 0.9106, | |
| "mean_token_accuracy": 0.7441679835319519, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.1230979654641819, | |
| "grad_norm": 0.5514124631881714, | |
| "learning_rate": 1.9970747221441084e-05, | |
| "loss": 0.9151, | |
| "mean_token_accuracy": 0.7427051544189454, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.12993674132330313, | |
| "grad_norm": 0.549062967300415, | |
| "learning_rate": 1.994962528077878e-05, | |
| "loss": 0.9029, | |
| "mean_token_accuracy": 0.7451761096715928, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.13677551718242434, | |
| "grad_norm": 0.573813796043396, | |
| "learning_rate": 1.9922816185285264e-05, | |
| "loss": 0.8884, | |
| "mean_token_accuracy": 0.7493397817015648, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.14361429304154558, | |
| "grad_norm": 0.5562133193016052, | |
| "learning_rate": 1.9890335258902177e-05, | |
| "loss": 0.8855, | |
| "mean_token_accuracy": 0.7493105083703995, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.1504530689006668, | |
| "grad_norm": 0.5802770853042603, | |
| "learning_rate": 1.9852201067560607e-05, | |
| "loss": 0.9027, | |
| "mean_token_accuracy": 0.7449040159583091, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.157291844759788, | |
| "grad_norm": 0.5636809468269348, | |
| "learning_rate": 1.9808435408568938e-05, | |
| "loss": 0.8876, | |
| "mean_token_accuracy": 0.7495187863707542, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.1641306206189092, | |
| "grad_norm": 0.5830625891685486, | |
| "learning_rate": 1.97590632981536e-05, | |
| "loss": 0.8871, | |
| "mean_token_accuracy": 0.7475770503282547, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.17096939647803044, | |
| "grad_norm": 0.5543267726898193, | |
| "learning_rate": 1.970411295715994e-05, | |
| "loss": 0.8918, | |
| "mean_token_accuracy": 0.7472289979457856, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.17780817233715165, | |
| "grad_norm": 0.5292794704437256, | |
| "learning_rate": 1.964361579492132e-05, | |
| "loss": 0.8764, | |
| "mean_token_accuracy": 0.7520371958613395, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.18464694819627286, | |
| "grad_norm": 0.5680862069129944, | |
| "learning_rate": 1.9577606391305705e-05, | |
| "loss": 0.8934, | |
| "mean_token_accuracy": 0.7469688639044761, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.19148572405539407, | |
| "grad_norm": 0.5672624111175537, | |
| "learning_rate": 1.950612247694998e-05, | |
| "loss": 0.889, | |
| "mean_token_accuracy": 0.7479311898350716, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.1983244999145153, | |
| "grad_norm": 0.5509658455848694, | |
| "learning_rate": 1.9429204911693333e-05, | |
| "loss": 0.8708, | |
| "mean_token_accuracy": 0.7525908648967743, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.20516327577363652, | |
| "grad_norm": 0.573887825012207, | |
| "learning_rate": 1.9346897661221957e-05, | |
| "loss": 0.8748, | |
| "mean_token_accuracy": 0.7516056269407272, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.21200205163275773, | |
| "grad_norm": 0.5755806565284729, | |
| "learning_rate": 1.92592477719385e-05, | |
| "loss": 0.88, | |
| "mean_token_accuracy": 0.7498792737722397, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 0.21884082749187894, | |
| "grad_norm": 0.5494788885116577, | |
| "learning_rate": 1.916630534407058e-05, | |
| "loss": 0.8739, | |
| "mean_token_accuracy": 0.7512936532497406, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.22567960335100018, | |
| "grad_norm": 0.543487548828125, | |
| "learning_rate": 1.9068123503033752e-05, | |
| "loss": 0.8587, | |
| "mean_token_accuracy": 0.7555688112974167, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 0.2325183792101214, | |
| "grad_norm": 0.5982221364974976, | |
| "learning_rate": 1.8964758369065303e-05, | |
| "loss": 0.8923, | |
| "mean_token_accuracy": 0.7462443545460701, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.2393571550692426, | |
| "grad_norm": 0.5857303142547607, | |
| "learning_rate": 1.8856269025146182e-05, | |
| "loss": 0.8738, | |
| "mean_token_accuracy": 0.750712414085865, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.2461959309283638, | |
| "grad_norm": 0.5462734699249268, | |
| "learning_rate": 1.874271748322951e-05, | |
| "loss": 0.8523, | |
| "mean_token_accuracy": 0.7573387727141381, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.253034706787485, | |
| "grad_norm": 0.5329501032829285, | |
| "learning_rate": 1.8624168648794833e-05, | |
| "loss": 0.8671, | |
| "mean_token_accuracy": 0.7528049916028976, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 0.25987348264660626, | |
| "grad_norm": 0.516778290271759, | |
| "learning_rate": 1.8500690283748502e-05, | |
| "loss": 0.8363, | |
| "mean_token_accuracy": 0.7607189759612083, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.2667122585057275, | |
| "grad_norm": 0.5503281354904175, | |
| "learning_rate": 1.837235296769131e-05, | |
| "loss": 0.8646, | |
| "mean_token_accuracy": 0.7525520265102387, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 0.2735510343648487, | |
| "grad_norm": 0.5709648728370667, | |
| "learning_rate": 1.8239230057575542e-05, | |
| "loss": 0.8547, | |
| "mean_token_accuracy": 0.7552181035280228, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.2803898102239699, | |
| "grad_norm": 0.5268268585205078, | |
| "learning_rate": 1.810139764577454e-05, | |
| "loss": 0.8569, | |
| "mean_token_accuracy": 0.7549711287021637, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 0.28722858608309115, | |
| "grad_norm": 0.5442251563072205, | |
| "learning_rate": 1.7958934516588665e-05, | |
| "loss": 0.8596, | |
| "mean_token_accuracy": 0.7541473567485809, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.29406736194221234, | |
| "grad_norm": 0.5293693542480469, | |
| "learning_rate": 1.7811922101212622e-05, | |
| "loss": 0.8692, | |
| "mean_token_accuracy": 0.7512467160820961, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 0.3009061378013336, | |
| "grad_norm": 0.5479506850242615, | |
| "learning_rate": 1.766044443118978e-05, | |
| "loss": 0.8619, | |
| "mean_token_accuracy": 0.7537136003375053, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.30774491366045476, | |
| "grad_norm": 0.5542939901351929, | |
| "learning_rate": 1.75045880903802e-05, | |
| "loss": 0.8419, | |
| "mean_token_accuracy": 0.7578989654779434, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 0.314583689519576, | |
| "grad_norm": 0.5070068836212158, | |
| "learning_rate": 1.7344442165469714e-05, | |
| "loss": 0.8448, | |
| "mean_token_accuracy": 0.7581931978464127, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.32142246537869723, | |
| "grad_norm": 0.5048102140426636, | |
| "learning_rate": 1.7180098195048458e-05, | |
| "loss": 0.8317, | |
| "mean_token_accuracy": 0.7602518483996391, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 0.3282612412378184, | |
| "grad_norm": 0.5216901302337646, | |
| "learning_rate": 1.7011650117287868e-05, | |
| "loss": 0.8427, | |
| "mean_token_accuracy": 0.7580358654260635, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.33510001709693965, | |
| "grad_norm": 0.5171140432357788, | |
| "learning_rate": 1.683919421624611e-05, | |
| "loss": 0.8286, | |
| "mean_token_accuracy": 0.7620488360524178, | |
| "step": 245 | |
| }, | |
| { | |
| "epoch": 0.3419387929560609, | |
| "grad_norm": 0.5429340600967407, | |
| "learning_rate": 1.6662829066832595e-05, | |
| "loss": 0.8452, | |
| "mean_token_accuracy": 0.7569812595844269, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.34877756881518207, | |
| "grad_norm": 0.5294564962387085, | |
| "learning_rate": 1.648265547846308e-05, | |
| "loss": 0.8478, | |
| "mean_token_accuracy": 0.756881557404995, | |
| "step": 255 | |
| }, | |
| { | |
| "epoch": 0.3556163446743033, | |
| "grad_norm": 0.530605137348175, | |
| "learning_rate": 1.6298776437437526e-05, | |
| "loss": 0.8495, | |
| "mean_token_accuracy": 0.7560626447200776, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.3624551205334245, | |
| "grad_norm": 0.5185920000076294, | |
| "learning_rate": 1.611129704807362e-05, | |
| "loss": 0.8438, | |
| "mean_token_accuracy": 0.7576473146677017, | |
| "step": 265 | |
| }, | |
| { | |
| "epoch": 0.36929389639254573, | |
| "grad_norm": 0.5438268184661865, | |
| "learning_rate": 1.592032447262973e-05, | |
| "loss": 0.8481, | |
| "mean_token_accuracy": 0.7563759610056877, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.37613267225166697, | |
| "grad_norm": 0.5615527033805847, | |
| "learning_rate": 1.572596787005149e-05, | |
| "loss": 0.8418, | |
| "mean_token_accuracy": 0.757819227874279, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 0.38297144811078815, | |
| "grad_norm": 0.48962655663490295, | |
| "learning_rate": 1.55283383335771e-05, | |
| "loss": 0.8406, | |
| "mean_token_accuracy": 0.7577673494815826, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.3898102239699094, | |
| "grad_norm": 0.4873958230018616, | |
| "learning_rate": 1.5327548827237008e-05, | |
| "loss": 0.8389, | |
| "mean_token_accuracy": 0.7587939977645874, | |
| "step": 285 | |
| }, | |
| { | |
| "epoch": 0.3966489998290306, | |
| "grad_norm": 0.49630922079086304, | |
| "learning_rate": 1.512371412128424e-05, | |
| "loss": 0.8501, | |
| "mean_token_accuracy": 0.7546382084488868, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.4034877756881518, | |
| "grad_norm": 0.4898976683616638, | |
| "learning_rate": 1.4916950726592322e-05, | |
| "loss": 0.8335, | |
| "mean_token_accuracy": 0.7595216274261475, | |
| "step": 295 | |
| }, | |
| { | |
| "epoch": 0.41032655154727304, | |
| "grad_norm": 0.486189603805542, | |
| "learning_rate": 1.4707376828058264e-05, | |
| "loss": 0.8334, | |
| "mean_token_accuracy": 0.7587142795324325, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.4171653274063943, | |
| "grad_norm": 0.4933895170688629, | |
| "learning_rate": 1.449511221704866e-05, | |
| "loss": 0.8543, | |
| "mean_token_accuracy": 0.7549725160002708, | |
| "step": 305 | |
| }, | |
| { | |
| "epoch": 0.42400410326551546, | |
| "grad_norm": 0.4862573444843292, | |
| "learning_rate": 1.428027822292758e-05, | |
| "loss": 0.8328, | |
| "mean_token_accuracy": 0.7595344498753548, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.4308428791246367, | |
| "grad_norm": 0.48415231704711914, | |
| "learning_rate": 1.4062997643705308e-05, | |
| "loss": 0.8282, | |
| "mean_token_accuracy": 0.7610621720552444, | |
| "step": 315 | |
| }, | |
| { | |
| "epoch": 0.4376816549837579, | |
| "grad_norm": 0.5168640613555908, | |
| "learning_rate": 1.3843394675847635e-05, | |
| "loss": 0.8472, | |
| "mean_token_accuracy": 0.7552296608686447, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.4445204308428791, | |
| "grad_norm": 0.5019128918647766, | |
| "learning_rate": 1.3621594843285801e-05, | |
| "loss": 0.8287, | |
| "mean_token_accuracy": 0.7620115980505944, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 0.45135920670200036, | |
| "grad_norm": 0.5697439312934875, | |
| "learning_rate": 1.3397724925667657e-05, | |
| "loss": 0.8361, | |
| "mean_token_accuracy": 0.7582670867443084, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.45819798256112154, | |
| "grad_norm": 0.5009385943412781, | |
| "learning_rate": 1.3171912885891063e-05, | |
| "loss": 0.8402, | |
| "mean_token_accuracy": 0.756958456337452, | |
| "step": 335 | |
| }, | |
| { | |
| "epoch": 0.4650367584202428, | |
| "grad_norm": 0.5170038938522339, | |
| "learning_rate": 1.2944287796960949e-05, | |
| "loss": 0.8277, | |
| "mean_token_accuracy": 0.7602688521146774, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.471875534279364, | |
| "grad_norm": 0.500216007232666, | |
| "learning_rate": 1.2714979768211854e-05, | |
| "loss": 0.8267, | |
| "mean_token_accuracy": 0.760731266438961, | |
| "step": 345 | |
| }, | |
| { | |
| "epoch": 0.4787143101384852, | |
| "grad_norm": 0.5052452683448792, | |
| "learning_rate": 1.2484119870938102e-05, | |
| "loss": 0.8289, | |
| "mean_token_accuracy": 0.760765828192234, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.48555308599760644, | |
| "grad_norm": 0.47336429357528687, | |
| "learning_rate": 1.2251840063474108e-05, | |
| "loss": 0.8225, | |
| "mean_token_accuracy": 0.7622879460453987, | |
| "step": 355 | |
| }, | |
| { | |
| "epoch": 0.4923918618567276, | |
| "grad_norm": 0.4850134551525116, | |
| "learning_rate": 1.2018273115767673e-05, | |
| "loss": 0.8289, | |
| "mean_token_accuracy": 0.7606528863310814, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.49923063771584886, | |
| "grad_norm": 0.49699971079826355, | |
| "learning_rate": 1.1783552533489372e-05, | |
| "loss": 0.8221, | |
| "mean_token_accuracy": 0.762169836461544, | |
| "step": 365 | |
| }, | |
| { | |
| "epoch": 0.50606941357497, | |
| "grad_norm": 0.5126599073410034, | |
| "learning_rate": 1.1547812481721387e-05, | |
| "loss": 0.8363, | |
| "mean_token_accuracy": 0.757779236137867, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.5129081894340913, | |
| "grad_norm": 0.5152341723442078, | |
| "learning_rate": 1.1311187708269442e-05, | |
| "loss": 0.8414, | |
| "mean_token_accuracy": 0.756208673119545, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 0.5197469652932125, | |
| "grad_norm": 0.47980859875679016, | |
| "learning_rate": 1.1073813466641633e-05, | |
| "loss": 0.8262, | |
| "mean_token_accuracy": 0.7605276107788086, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.5265857411523337, | |
| "grad_norm": 0.5068893432617188, | |
| "learning_rate": 1.0835825438738232e-05, | |
| "loss": 0.8334, | |
| "mean_token_accuracy": 0.7591528907418251, | |
| "step": 385 | |
| }, | |
| { | |
| "epoch": 0.533424517011455, | |
| "grad_norm": 0.4852905571460724, | |
| "learning_rate": 1.0597359657296602e-05, | |
| "loss": 0.8242, | |
| "mean_token_accuracy": 0.7600131437182427, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.5402632928705762, | |
| "grad_norm": 0.4789692461490631, | |
| "learning_rate": 1.0358552428135576e-05, | |
| "loss": 0.8224, | |
| "mean_token_accuracy": 0.7617440149188042, | |
| "step": 395 | |
| }, | |
| { | |
| "epoch": 0.5471020687296974, | |
| "grad_norm": 0.5026122331619263, | |
| "learning_rate": 1.0119540252243755e-05, | |
| "loss": 0.8374, | |
| "mean_token_accuracy": 0.7577085196971893, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.5539408445888186, | |
| "grad_norm": 0.5124115347862244, | |
| "learning_rate": 9.880459747756247e-06, | |
| "loss": 0.8158, | |
| "mean_token_accuracy": 0.7633502900600433, | |
| "step": 405 | |
| }, | |
| { | |
| "epoch": 0.5607796204479398, | |
| "grad_norm": 0.46995291113853455, | |
| "learning_rate": 9.641447571864429e-06, | |
| "loss": 0.8309, | |
| "mean_token_accuracy": 0.7596037566661835, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.567618396307061, | |
| "grad_norm": 0.4657708704471588, | |
| "learning_rate": 9.402640342703401e-06, | |
| "loss": 0.8258, | |
| "mean_token_accuracy": 0.7604287952184677, | |
| "step": 415 | |
| }, | |
| { | |
| "epoch": 0.5744571721661823, | |
| "grad_norm": 0.4966000020503998, | |
| "learning_rate": 9.164174561261771e-06, | |
| "loss": 0.8191, | |
| "mean_token_accuracy": 0.7622967541217804, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.5812959480253035, | |
| "grad_norm": 0.5118598937988281, | |
| "learning_rate": 8.92618653335837e-06, | |
| "loss": 0.8145, | |
| "mean_token_accuracy": 0.7636432871222496, | |
| "step": 425 | |
| }, | |
| { | |
| "epoch": 0.5881347238844247, | |
| "grad_norm": 0.47218063473701477, | |
| "learning_rate": 8.688812291730565e-06, | |
| "loss": 0.8175, | |
| "mean_token_accuracy": 0.7625760570168495, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.5949734997435459, | |
| "grad_norm": 0.4943382441997528, | |
| "learning_rate": 8.452187518278615e-06, | |
| "loss": 0.8282, | |
| "mean_token_accuracy": 0.7598957225680352, | |
| "step": 435 | |
| }, | |
| { | |
| "epoch": 0.6018122756026671, | |
| "grad_norm": 0.4632488489151001, | |
| "learning_rate": 8.216447466510633e-06, | |
| "loss": 0.8166, | |
| "mean_token_accuracy": 0.7627444177865982, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.6086510514617883, | |
| "grad_norm": 0.46436184644699097, | |
| "learning_rate": 7.981726884232328e-06, | |
| "loss": 0.8141, | |
| "mean_token_accuracy": 0.7631665468215942, | |
| "step": 445 | |
| }, | |
| { | |
| "epoch": 0.6154898273209095, | |
| "grad_norm": 0.4806179702281952, | |
| "learning_rate": 7.748159936525896e-06, | |
| "loss": 0.8158, | |
| "mean_token_accuracy": 0.762781310081482, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.6223286031800308, | |
| "grad_norm": 0.4713290333747864, | |
| "learning_rate": 7.5158801290619e-06, | |
| "loss": 0.8197, | |
| "mean_token_accuracy": 0.7615951552987099, | |
| "step": 455 | |
| }, | |
| { | |
| "epoch": 0.629167379039152, | |
| "grad_norm": 0.46885767579078674, | |
| "learning_rate": 7.285020231788149e-06, | |
| "loss": 0.8073, | |
| "mean_token_accuracy": 0.7656721040606499, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.6360061548982732, | |
| "grad_norm": 0.495272159576416, | |
| "learning_rate": 7.0557122030390545e-06, | |
| "loss": 0.8063, | |
| "mean_token_accuracy": 0.765167286992073, | |
| "step": 465 | |
| }, | |
| { | |
| "epoch": 0.6428449307573945, | |
| "grad_norm": 0.47429198026657104, | |
| "learning_rate": 6.8280871141089415e-06, | |
| "loss": 0.8026, | |
| "mean_token_accuracy": 0.7673456132411957, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.6496837066165156, | |
| "grad_norm": 0.48846620321273804, | |
| "learning_rate": 6.602275074332345e-06, | |
| "loss": 0.8201, | |
| "mean_token_accuracy": 0.7618905574083328, | |
| "step": 475 | |
| }, | |
| { | |
| "epoch": 0.6565224824756368, | |
| "grad_norm": 0.5203046798706055, | |
| "learning_rate": 6.378405156714202e-06, | |
| "loss": 0.8054, | |
| "mean_token_accuracy": 0.7656926274299621, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.6633612583347581, | |
| "grad_norm": 0.481134295463562, | |
| "learning_rate": 6.156605324152369e-06, | |
| "loss": 0.7985, | |
| "mean_token_accuracy": 0.7675068721175193, | |
| "step": 485 | |
| }, | |
| { | |
| "epoch": 0.6702000341938793, | |
| "grad_norm": 0.465944766998291, | |
| "learning_rate": 5.937002356294699e-06, | |
| "loss": 0.8082, | |
| "mean_token_accuracy": 0.7647843450307846, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.6770388100530005, | |
| "grad_norm": 0.4643535614013672, | |
| "learning_rate": 5.719721777072425e-06, | |
| "loss": 0.8081, | |
| "mean_token_accuracy": 0.7649053990840912, | |
| "step": 495 | |
| }, | |
| { | |
| "epoch": 0.6838775859121218, | |
| "grad_norm": 0.48063191771507263, | |
| "learning_rate": 5.504887782951343e-06, | |
| "loss": 0.8067, | |
| "mean_token_accuracy": 0.765461964905262, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.690716361771243, | |
| "grad_norm": 0.47620853781700134, | |
| "learning_rate": 5.29262317194174e-06, | |
| "loss": 0.8017, | |
| "mean_token_accuracy": 0.7668887257575989, | |
| "step": 505 | |
| }, | |
| { | |
| "epoch": 0.6975551376303641, | |
| "grad_norm": 0.46232712268829346, | |
| "learning_rate": 5.083049273407681e-06, | |
| "loss": 0.8034, | |
| "mean_token_accuracy": 0.7657584965229034, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.7043939134894854, | |
| "grad_norm": 0.45751717686653137, | |
| "learning_rate": 4.876285878715764e-06, | |
| "loss": 0.8035, | |
| "mean_token_accuracy": 0.766502857208252, | |
| "step": 515 | |
| }, | |
| { | |
| "epoch": 0.7112326893486066, | |
| "grad_norm": 0.4558106064796448, | |
| "learning_rate": 4.672451172762998e-06, | |
| "loss": 0.8103, | |
| "mean_token_accuracy": 0.7645265579223632, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.7180714652077278, | |
| "grad_norm": 0.4815528988838196, | |
| "learning_rate": 4.471661666422899e-06, | |
| "loss": 0.814, | |
| "mean_token_accuracy": 0.7626894161105156, | |
| "step": 525 | |
| }, | |
| { | |
| "epoch": 0.724910241066849, | |
| "grad_norm": 0.4826851189136505, | |
| "learning_rate": 4.274032129948512e-06, | |
| "loss": 0.8078, | |
| "mean_token_accuracy": 0.7646446511149406, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.7317490169259703, | |
| "grad_norm": 0.4646717309951782, | |
| "learning_rate": 4.079675527370273e-06, | |
| "loss": 0.8139, | |
| "mean_token_accuracy": 0.7622727945446968, | |
| "step": 535 | |
| }, | |
| { | |
| "epoch": 0.7385877927850915, | |
| "grad_norm": 0.48112139105796814, | |
| "learning_rate": 3.888702951926384e-06, | |
| "loss": 0.7982, | |
| "mean_token_accuracy": 0.7667050585150719, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.7454265686442126, | |
| "grad_norm": 0.47457870841026306, | |
| "learning_rate": 3.701223562562478e-06, | |
| "loss": 0.8082, | |
| "mean_token_accuracy": 0.7652096658945083, | |
| "step": 545 | |
| }, | |
| { | |
| "epoch": 0.7522653445033339, | |
| "grad_norm": 0.48485058546066284, | |
| "learning_rate": 3.5173445215369183e-06, | |
| "loss": 0.8027, | |
| "mean_token_accuracy": 0.7657500252127647, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.7591041203624551, | |
| "grad_norm": 0.49953824281692505, | |
| "learning_rate": 3.3371709331674075e-06, | |
| "loss": 0.8036, | |
| "mean_token_accuracy": 0.7657319813966751, | |
| "step": 555 | |
| }, | |
| { | |
| "epoch": 0.7659428962215763, | |
| "grad_norm": 0.45230066776275635, | |
| "learning_rate": 3.1608057837538976e-06, | |
| "loss": 0.8029, | |
| "mean_token_accuracy": 0.7664909601211548, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.7727816720806976, | |
| "grad_norm": 0.4862216114997864, | |
| "learning_rate": 2.988349882712135e-06, | |
| "loss": 0.82, | |
| "mean_token_accuracy": 0.7606521427631379, | |
| "step": 565 | |
| }, | |
| { | |
| "epoch": 0.7796204479398188, | |
| "grad_norm": 0.4636688232421875, | |
| "learning_rate": 2.819901804951547e-06, | |
| "loss": 0.7885, | |
| "mean_token_accuracy": 0.7696284845471382, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 0.78645922379894, | |
| "grad_norm": 0.46034541726112366, | |
| "learning_rate": 2.655557834530288e-06, | |
| "loss": 0.8148, | |
| "mean_token_accuracy": 0.7626620322465897, | |
| "step": 575 | |
| }, | |
| { | |
| "epoch": 0.7932979996580612, | |
| "grad_norm": 0.46980300545692444, | |
| "learning_rate": 2.495411909619804e-06, | |
| "loss": 0.8209, | |
| "mean_token_accuracy": 0.760961389541626, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.8001367755171824, | |
| "grad_norm": 0.4673324227333069, | |
| "learning_rate": 2.339555568810221e-06, | |
| "loss": 0.8131, | |
| "mean_token_accuracy": 0.7633547097444534, | |
| "step": 585 | |
| }, | |
| { | |
| "epoch": 0.8069755513763036, | |
| "grad_norm": 0.46234130859375, | |
| "learning_rate": 2.1880778987873806e-06, | |
| "loss": 0.7929, | |
| "mean_token_accuracy": 0.7690023958683014, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 0.8138143272354249, | |
| "grad_norm": 0.4616515338420868, | |
| "learning_rate": 2.0410654834113362e-06, | |
| "loss": 0.7883, | |
| "mean_token_accuracy": 0.7698821023106575, | |
| "step": 595 | |
| }, | |
| { | |
| "epoch": 0.8206531030945461, | |
| "grad_norm": 0.4555530548095703, | |
| "learning_rate": 1.8986023542254617e-06, | |
| "loss": 0.7933, | |
| "mean_token_accuracy": 0.7684802070260048, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.8274918789536673, | |
| "grad_norm": 0.45201539993286133, | |
| "learning_rate": 1.7607699424244583e-06, | |
| "loss": 0.8116, | |
| "mean_token_accuracy": 0.7643455028533935, | |
| "step": 605 | |
| }, | |
| { | |
| "epoch": 0.8343306548127886, | |
| "grad_norm": 0.47511157393455505, | |
| "learning_rate": 1.6276470323086936e-06, | |
| "loss": 0.798, | |
| "mean_token_accuracy": 0.7679879561066627, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 0.8411694306719097, | |
| "grad_norm": 0.443029522895813, | |
| "learning_rate": 1.499309716251498e-06, | |
| "loss": 0.8061, | |
| "mean_token_accuracy": 0.7647668555378914, | |
| "step": 615 | |
| }, | |
| { | |
| "epoch": 0.8480082065310309, | |
| "grad_norm": 0.4504885673522949, | |
| "learning_rate": 1.3758313512051702e-06, | |
| "loss": 0.7991, | |
| "mean_token_accuracy": 0.7665026307106018, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.8548469823901521, | |
| "grad_norm": 0.4775781035423279, | |
| "learning_rate": 1.257282516770494e-06, | |
| "loss": 0.8099, | |
| "mean_token_accuracy": 0.7643145814538002, | |
| "step": 625 | |
| }, | |
| { | |
| "epoch": 0.8616857582492734, | |
| "grad_norm": 0.4560346305370331, | |
| "learning_rate": 1.1437309748538205e-06, | |
| "loss": 0.788, | |
| "mean_token_accuracy": 0.7699791714549065, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 0.8685245341083946, | |
| "grad_norm": 0.46563640236854553, | |
| "learning_rate": 1.0352416309347003e-06, | |
| "loss": 0.7882, | |
| "mean_token_accuracy": 0.7693348750472069, | |
| "step": 635 | |
| }, | |
| { | |
| "epoch": 0.8753633099675158, | |
| "grad_norm": 0.4712006449699402, | |
| "learning_rate": 9.318764969662475e-07, | |
| "loss": 0.7948, | |
| "mean_token_accuracy": 0.7693790286779404, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.8822020858266371, | |
| "grad_norm": 0.4499385952949524, | |
| "learning_rate": 8.336946559294223e-07, | |
| "loss": 0.7904, | |
| "mean_token_accuracy": 0.7691716372966766, | |
| "step": 645 | |
| }, | |
| { | |
| "epoch": 0.8890408616857582, | |
| "grad_norm": 0.46574312448501587, | |
| "learning_rate": 7.40752228061502e-07, | |
| "loss": 0.8119, | |
| "mean_token_accuracy": 0.7633272022008896, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.8958796375448794, | |
| "grad_norm": 0.4548204243183136, | |
| "learning_rate": 6.531023387780433e-07, | |
| "loss": 0.7967, | |
| "mean_token_accuracy": 0.7682895168662072, | |
| "step": 655 | |
| }, | |
| { | |
| "epoch": 0.9027184134040007, | |
| "grad_norm": 0.475398987531662, | |
| "learning_rate": 5.707950883066681e-07, | |
| "loss": 0.8046, | |
| "mean_token_accuracy": 0.7655658751726151, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 0.9095571892631219, | |
| "grad_norm": 0.4481120705604553, | |
| "learning_rate": 4.938775230500192e-07, | |
| "loss": 0.7998, | |
| "mean_token_accuracy": 0.7667894646525383, | |
| "step": 665 | |
| }, | |
| { | |
| "epoch": 0.9163959651222431, | |
| "grad_norm": 0.46256303787231445, | |
| "learning_rate": 4.223936086942981e-07, | |
| "loss": 0.7981, | |
| "mean_token_accuracy": 0.7671072691679001, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 0.9232347409813644, | |
| "grad_norm": 0.45697101950645447, | |
| "learning_rate": 3.5638420507868145e-07, | |
| "loss": 0.7947, | |
| "mean_token_accuracy": 0.7680518299341201, | |
| "step": 675 | |
| }, | |
| { | |
| "epoch": 0.9300735168404856, | |
| "grad_norm": 0.44780802726745605, | |
| "learning_rate": 2.9588704284006176e-07, | |
| "loss": 0.7935, | |
| "mean_token_accuracy": 0.7683573782444, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 0.9369122926996067, | |
| "grad_norm": 0.4538120925426483, | |
| "learning_rate": 2.4093670184640263e-07, | |
| "loss": 0.8087, | |
| "mean_token_accuracy": 0.7649404585361481, | |
| "step": 685 | |
| }, | |
| { | |
| "epoch": 0.943751068558728, | |
| "grad_norm": 0.44170472025871277, | |
| "learning_rate": 1.9156459143106598e-07, | |
| "loss": 0.8082, | |
| "mean_token_accuracy": 0.764902551472187, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 0.9505898444178492, | |
| "grad_norm": 0.450748473405838, | |
| "learning_rate": 1.4779893243939358e-07, | |
| "loss": 0.7977, | |
| "mean_token_accuracy": 0.7670714050531388, | |
| "step": 695 | |
| }, | |
| { | |
| "epoch": 0.9574286202769704, | |
| "grad_norm": 0.4557766020298004, | |
| "learning_rate": 1.0966474109782354e-07, | |
| "loss": 0.794, | |
| "mean_token_accuracy": 0.7679675281047821, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.9642673961360917, | |
| "grad_norm": 0.4443546533584595, | |
| "learning_rate": 7.718381471473524e-08, | |
| "loss": 0.7979, | |
| "mean_token_accuracy": 0.7666744440793991, | |
| "step": 705 | |
| }, | |
| { | |
| "epoch": 0.9711061719952129, | |
| "grad_norm": 0.4413938522338867, | |
| "learning_rate": 5.037471922122561e-08, | |
| "loss": 0.8083, | |
| "mean_token_accuracy": 0.7643239527940751, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 0.9779449478543341, | |
| "grad_norm": 0.4516150653362274, | |
| "learning_rate": 2.925277855891695e-08, | |
| "loss": 0.8064, | |
| "mean_token_accuracy": 0.7641888409852982, | |
| "step": 715 | |
| }, | |
| { | |
| "epoch": 0.9847837237134552, | |
| "grad_norm": 0.4401575028896332, | |
| "learning_rate": 1.3830065920867886e-08, | |
| "loss": 0.8108, | |
| "mean_token_accuracy": 0.7633798211812973, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 0.9916224995725765, | |
| "grad_norm": 0.4507163465023041, | |
| "learning_rate": 4.11539685059914e-09, | |
| "loss": 0.7984, | |
| "mean_token_accuracy": 0.7672414898872375, | |
| "step": 725 | |
| }, | |
| { | |
| "epoch": 0.9984612754316977, | |
| "grad_norm": 0.4516432285308838, | |
| "learning_rate": 1.1432420317758486e-10, | |
| "loss": 0.7992, | |
| "mean_token_accuracy": 0.7661866798996926, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 0.999829030603522, | |
| "mean_token_accuracy": 0.7705116346478462, | |
| "step": 731, | |
| "total_flos": 72689419026432.0, | |
| "train_loss": 0.8537281411617138, | |
| "train_runtime": 90647.457, | |
| "train_samples_per_second": 1.032, | |
| "train_steps_per_second": 0.008 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 731, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": false, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 72689419026432.0, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |