love-r1-stage3 / trainer_state.json
PhilipC's picture
Upload folder using huggingface_hub
dcb5144 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.057279236276849645,
"eval_steps": 500,
"global_step": 48,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"completions/clipped_ratio": 0.0,
"completions/max_length": 12863.0,
"completions/max_terminated_length": 12863.0,
"completions/mean_length": 10745.375,
"completions/mean_terminated_length": 10745.375,
"completions/min_length": 8883.0,
"completions/min_terminated_length": 8883.0,
"epoch": 0.0011933174224343676,
"grad_norm": 0.6463546752929688,
"is_mask": 0.84375,
"is_sample": 1.0,
"learning_rate": 9.999964864089826e-07,
"loss": -0.0221,
"num_tokens": 285255.0,
"reward": 0.75,
"reward_std": 0.8457233309745789,
"rewards/accuracy_reward/mean": 0.09375,
"rewards/accuracy_reward/std": 0.2961445748806,
"rewards/format_reward/mean": 0.5,
"rewards/format_reward/std": 0.5080004930496216,
"step": 1
},
{
"epoch": 0.002386634844868735,
"grad_norm": 0.8406618237495422,
"learning_rate": 9.999859456853114e-07,
"loss": 0.0001,
"step": 2
},
{
"epoch": 0.003579952267303103,
"grad_norm": 0.760502815246582,
"learning_rate": 9.9996837797713e-07,
"loss": -0.0221,
"step": 3
},
{
"epoch": 0.00477326968973747,
"grad_norm": 0.9957342743873596,
"learning_rate": 9.999437835313409e-07,
"loss": -0.0221,
"step": 4
},
{
"completions/clipped_ratio": 0.0,
"completions/max_length": 12883.0,
"completions/max_terminated_length": 12883.0,
"completions/mean_length": 10608.40625,
"completions/mean_terminated_length": 10608.40625,
"completions/min_length": 8903.0,
"completions/min_terminated_length": 8903.0,
"epoch": 0.0059665871121718375,
"grad_norm": 0.0,
"is_mask": 0.875,
"is_sample": 1.0,
"learning_rate": 9.999121626936037e-07,
"loss": 0.0,
"num_tokens": 571454.0,
"reward": 0.53125,
"reward_std": 0.5971629619598389,
"rewards/accuracy_reward/mean": 0.0,
"rewards/accuracy_reward/std": 0.0,
"rewards/format_reward/mean": 0.46875,
"rewards/format_reward/std": 0.507007360458374,
"step": 5
},
{
"epoch": 0.007159904534606206,
"grad_norm": 0.0,
"learning_rate": 9.998735159083292e-07,
"loss": 0.0,
"step": 6
},
{
"epoch": 0.008353221957040573,
"grad_norm": 0.0,
"learning_rate": 9.998278437186732e-07,
"loss": 0.0,
"step": 7
},
{
"epoch": 0.00954653937947494,
"grad_norm": 0.0,
"learning_rate": 9.997751467665294e-07,
"loss": 0.0,
"step": 8
},
{
"completions/clipped_ratio": 0.0,
"completions/max_length": 13108.0,
"completions/max_terminated_length": 13108.0,
"completions/mean_length": 11084.09375,
"completions/mean_terminated_length": 11084.09375,
"completions/min_length": 8894.0,
"completions/min_terminated_length": 8894.0,
"epoch": 0.010739856801909307,
"grad_norm": 0.934722363948822,
"is_mask": 0.8125,
"is_sample": 0.96875,
"learning_rate": 9.997154257925197e-07,
"loss": -0.0,
"num_tokens": 858170.0,
"reward": 1.09375,
"reward_std": 0.6487956643104553,
"rewards/accuracy_reward/mean": 0.09375,
"rewards/accuracy_reward/std": 0.2961445748806,
"rewards/format_reward/mean": 0.6875,
"rewards/format_reward/std": 0.4709290862083435,
"step": 9
},
{
"epoch": 0.011933174224343675,
"grad_norm": 0.9142937064170837,
"learning_rate": 9.99648681635985e-07,
"loss": 0.0002,
"step": 10
},
{
"epoch": 0.013126491646778043,
"grad_norm": 0.8181187510490417,
"learning_rate": 9.995749152349713e-07,
"loss": 0.0001,
"step": 11
},
{
"epoch": 0.014319809069212411,
"grad_norm": 1.1284211874008179,
"learning_rate": 9.994941276262188e-07,
"loss": -0.0002,
"step": 12
},
{
"completions/clipped_ratio": 0.0,
"completions/max_length": 12847.0,
"completions/max_terminated_length": 12847.0,
"completions/mean_length": 10575.90625,
"completions/mean_terminated_length": 10575.90625,
"completions/min_length": 8888.0,
"completions/min_terminated_length": 8888.0,
"epoch": 0.015513126491646777,
"grad_norm": 0.8638948202133179,
"is_mask": 0.71875,
"is_sample": 1.0,
"learning_rate": 9.99406319945146e-07,
"loss": 0.0,
"num_tokens": 1144312.0,
"reward": 1.03125,
"reward_std": 0.6127826571464539,
"rewards/accuracy_reward/mean": 0.1875,
"rewards/accuracy_reward/std": 0.3965577781200409,
"rewards/format_reward/mean": 0.5625,
"rewards/format_reward/std": 0.504016101360321,
"step": 13
},
{
"epoch": 0.016706443914081145,
"grad_norm": 0.9017719626426697,
"learning_rate": 9.99311493425834e-07,
"loss": -0.0001,
"step": 14
},
{
"epoch": 0.017899761336515514,
"grad_norm": 0.8770928978919983,
"learning_rate": 9.99209649401009e-07,
"loss": 0.0002,
"step": 15
},
{
"epoch": 0.01909307875894988,
"grad_norm": 1.0520905256271362,
"learning_rate": 9.99100789302024e-07,
"loss": 0.0002,
"step": 16
},
{
"completions/clipped_ratio": 0.0,
"completions/max_length": 12937.0,
"completions/max_terminated_length": 12937.0,
"completions/mean_length": 10770.40625,
"completions/mean_terminated_length": 10770.40625,
"completions/min_length": 8897.0,
"completions/min_terminated_length": 8897.0,
"epoch": 0.02028639618138425,
"grad_norm": 0.0,
"is_mask": 0.84375,
"is_sample": 1.0,
"learning_rate": 9.98984914658839e-07,
"loss": 0.0,
"num_tokens": 1430298.0,
"reward": 0.75,
"reward_std": 0.7129100561141968,
"rewards/accuracy_reward/mean": 0.09375,
"rewards/accuracy_reward/std": 0.2961445748806,
"rewards/format_reward/mean": 0.5625,
"rewards/format_reward/std": 0.504016101360321,
"step": 17
},
{
"epoch": 0.021479713603818614,
"grad_norm": 0.0,
"learning_rate": 9.988620270999978e-07,
"loss": 0.0,
"step": 18
},
{
"epoch": 0.022673031026252982,
"grad_norm": 0.0,
"learning_rate": 9.98732128352607e-07,
"loss": 0.0,
"step": 19
},
{
"epoch": 0.02386634844868735,
"grad_norm": 0.0,
"learning_rate": 9.985952202423114e-07,
"loss": 0.0,
"step": 20
},
{
"completions/clipped_ratio": 0.0,
"completions/max_length": 12980.0,
"completions/max_terminated_length": 12980.0,
"completions/mean_length": 10494.15625,
"completions/mean_terminated_length": 10494.15625,
"completions/min_length": 8879.0,
"completions/min_terminated_length": 8879.0,
"epoch": 0.025059665871121718,
"grad_norm": 0.7287543416023254,
"is_mask": 0.75,
"is_sample": 1.0,
"learning_rate": 9.98451304693267e-07,
"loss": -0.0361,
"num_tokens": 1716335.0,
"reward": 0.84375,
"reward_std": 0.7163528203964233,
"rewards/accuracy_reward/mean": 0.09375,
"rewards/accuracy_reward/std": 0.2961445748806,
"rewards/format_reward/mean": 0.59375,
"rewards/format_reward/std": 0.49899089336395264,
"step": 21
},
{
"epoch": 0.026252983293556086,
"grad_norm": 1.313971996307373,
"learning_rate": 9.983003837281153e-07,
"loss": 0.0,
"step": 22
},
{
"epoch": 0.027446300715990454,
"grad_norm": 0.7719064950942993,
"learning_rate": 9.981424594679544e-07,
"loss": -0.0364,
"step": 23
},
{
"epoch": 0.028639618138424822,
"grad_norm": 1.057080864906311,
"learning_rate": 9.979775341323098e-07,
"loss": -0.0365,
"step": 24
},
{
"completions/clipped_ratio": 0.0,
"completions/max_length": 12923.0,
"completions/max_terminated_length": 12923.0,
"completions/mean_length": 10913.5625,
"completions/mean_terminated_length": 10913.5625,
"completions/min_length": 8888.0,
"completions/min_terminated_length": 8888.0,
"epoch": 0.029832935560859187,
"grad_norm": 0.0,
"is_mask": 0.75,
"is_sample": 1.0,
"learning_rate": 9.978056100391016e-07,
"loss": 0.0,
"num_tokens": 2003376.0,
"reward": 0.78125,
"reward_std": 0.5361451506614685,
"rewards/accuracy_reward/mean": 0.0625,
"rewards/accuracy_reward/std": 0.24593468010425568,
"rewards/format_reward/mean": 0.71875,
"rewards/format_reward/std": 0.45680341124534607,
"step": 25
},
{
"epoch": 0.031026252983293555,
"grad_norm": 0.0,
"learning_rate": 9.976266896046142e-07,
"loss": 0.0,
"step": 26
},
{
"epoch": 0.032219570405727926,
"grad_norm": 0.0,
"learning_rate": 9.974407753434602e-07,
"loss": 0.0,
"step": 27
},
{
"epoch": 0.03341288782816229,
"grad_norm": 0.0,
"learning_rate": 9.972478698685462e-07,
"loss": 0.0,
"step": 28
},
{
"completions/clipped_ratio": 0.0,
"completions/max_length": 12984.0,
"completions/max_terminated_length": 12984.0,
"completions/mean_length": 10578.90625,
"completions/mean_terminated_length": 10578.90625,
"completions/min_length": 8891.0,
"completions/min_terminated_length": 8891.0,
"epoch": 0.034606205250596656,
"grad_norm": 0.0,
"is_mask": 0.875,
"is_sample": 1.0,
"learning_rate": 9.970479758910363e-07,
"loss": 0.0,
"num_tokens": 2289897.0,
"reward": 0.6875,
"reward_std": 0.604997992515564,
"rewards/accuracy_reward/mean": 0.09375,
"rewards/accuracy_reward/std": 0.2961445748806,
"rewards/format_reward/mean": 0.59375,
"rewards/format_reward/std": 0.49899089336395264,
"step": 29
},
{
"epoch": 0.03579952267303103,
"grad_norm": 0.0,
"learning_rate": 9.96841096220313e-07,
"loss": 0.0,
"step": 30
},
{
"epoch": 0.03699284009546539,
"grad_norm": 0.0,
"learning_rate": 9.966272337639385e-07,
"loss": 0.0,
"step": 31
},
{
"epoch": 0.03818615751789976,
"grad_norm": 0.0,
"learning_rate": 9.96406391527614e-07,
"loss": 0.0,
"step": 32
},
{
"completions/clipped_ratio": 0.03125,
"completions/max_length": 12975.0,
"completions/max_terminated_length": 12975.0,
"completions/mean_length": 10250.5,
"completions/mean_terminated_length": 10291.2255859375,
"completions/min_length": 8905.0,
"completions/min_terminated_length": 8905.0,
"epoch": 0.03937947494033413,
"grad_norm": 0.44783833622932434,
"is_mask": 0.8125,
"is_sample": 1.0,
"learning_rate": 9.961785726151362e-07,
"loss": -0.0361,
"num_tokens": 2577231.0,
"reward": 0.6145833730697632,
"reward_std": 0.667420506477356,
"rewards/accuracy_reward/mean": 0.0625,
"rewards/accuracy_reward/std": 0.24593468010425568,
"rewards/format_reward/mean": 0.40625,
"rewards/format_reward/std": 0.49899089336395264,
"step": 33
},
{
"epoch": 0.0405727923627685,
"grad_norm": 0.9860613942146301,
"learning_rate": 9.95943780228355e-07,
"loss": -0.0001,
"step": 34
},
{
"epoch": 0.041766109785202864,
"grad_norm": 0.440227746963501,
"learning_rate": 9.957020176671287e-07,
"loss": -0.0361,
"step": 35
},
{
"epoch": 0.04295942720763723,
"grad_norm": 0.5511691570281982,
"learning_rate": 9.954532883292758e-07,
"loss": -0.036,
"step": 36
},
{
"completions/clipped_ratio": 0.0,
"completions/max_length": 12907.0,
"completions/max_terminated_length": 12907.0,
"completions/mean_length": 10436.34375,
"completions/mean_terminated_length": 10436.34375,
"completions/min_length": 8886.0,
"completions/min_terminated_length": 8886.0,
"epoch": 0.0441527446300716,
"grad_norm": 1.3848559856414795,
"is_mask": 0.6875,
"is_sample": 0.96875,
"learning_rate": 9.951975957105292e-07,
"loss": -0.0221,
"num_tokens": 2862659.0,
"reward": 0.65625,
"reward_std": 0.5863522887229919,
"rewards/accuracy_reward/mean": 0.0625,
"rewards/accuracy_reward/std": 0.24593468010425568,
"rewards/format_reward/mean": 0.53125,
"rewards/format_reward/std": 0.507007360458374,
"step": 37
},
{
"epoch": 0.045346062052505964,
"grad_norm": 1.3644936084747314,
"learning_rate": 9.94934943404486e-07,
"loss": -0.0,
"step": 38
},
{
"epoch": 0.046539379474940336,
"grad_norm": 1.1380318403244019,
"learning_rate": 9.946653351025574e-07,
"loss": -0.022,
"step": 39
},
{
"epoch": 0.0477326968973747,
"grad_norm": 1.575966715812683,
"learning_rate": 9.943887745939163e-07,
"loss": -0.0223,
"step": 40
},
{
"completions/clipped_ratio": 0.0,
"completions/max_length": 12842.0,
"completions/max_terminated_length": 12842.0,
"completions/mean_length": 10952.875,
"completions/mean_terminated_length": 10952.875,
"completions/min_length": 8886.0,
"completions/min_terminated_length": 8886.0,
"epoch": 0.04892601431980907,
"grad_norm": 0.5767437815666199,
"is_mask": 0.6875,
"is_sample": 1.0,
"learning_rate": 9.941052657654451e-07,
"loss": -0.0,
"num_tokens": 3148683.0,
"reward": 1.0625,
"reward_std": 0.4053322970867157,
"rewards/accuracy_reward/mean": 0.21875,
"rewards/accuracy_reward/std": 0.420013427734375,
"rewards/format_reward/mean": 0.75,
"rewards/format_reward/std": 0.4399413466453552,
"step": 41
},
{
"epoch": 0.050119331742243436,
"grad_norm": 0.5937145352363586,
"learning_rate": 9.938148126016804e-07,
"loss": -0.0002,
"step": 42
},
{
"epoch": 0.0513126491646778,
"grad_norm": 0.6468254327774048,
"learning_rate": 9.93517419184756e-07,
"loss": 0.0002,
"step": 43
},
{
"epoch": 0.05250596658711217,
"grad_norm": 0.6882703304290771,
"learning_rate": 9.932130896943477e-07,
"loss": 0.0003,
"step": 44
},
{
"completions/clipped_ratio": 0.0,
"completions/max_length": 13055.0,
"completions/max_terminated_length": 13055.0,
"completions/mean_length": 10370.34375,
"completions/mean_terminated_length": 10370.34375,
"completions/min_length": 8880.0,
"completions/min_terminated_length": 8880.0,
"epoch": 0.05369928400954654,
"grad_norm": 0.926037073135376,
"is_mask": 0.65625,
"is_sample": 1.0,
"learning_rate": 9.929018284076127e-07,
"loss": -0.085,
"num_tokens": 3434802.0,
"reward": 0.8854166865348816,
"reward_std": 0.8571243286132812,
"rewards/accuracy_reward/mean": 0.21875,
"rewards/accuracy_reward/std": 0.420013427734375,
"rewards/format_reward/mean": 0.6875,
"rewards/format_reward/std": 0.4709290862083435,
"step": 45
},
{
"epoch": 0.05489260143198091,
"grad_norm": 1.330098271369934,
"learning_rate": 9.925836396991307e-07,
"loss": 0.0001,
"step": 46
},
{
"epoch": 0.05608591885441527,
"grad_norm": 0.9060226082801819,
"learning_rate": 9.922585280408415e-07,
"loss": -0.0855,
"step": 47
},
{
"epoch": 0.057279236276849645,
"grad_norm": 1.2443565130233765,
"learning_rate": 9.919264980019829e-07,
"loss": -0.085,
"step": 48
}
],
"logging_steps": 1.0,
"max_steps": 838,
"num_input_tokens_seen": 3434802,
"num_train_epochs": 1,
"save_steps": 48,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}