erich-hf's picture
Pyramids
3246927
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.38326001167297363,
"min": 0.3646904528141022,
"max": 1.3517982959747314,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11528.4609375,
"min": 10818.177734375,
"max": 41008.15234375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989948.0,
"min": 29952.0,
"max": 989948.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989948.0,
"min": 29952.0,
"max": 989948.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5532466769218445,
"min": -0.07840621471405029,
"max": 0.5565416812896729,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 153.80258178710938,
"min": -18.97430419921875,
"max": 153.80258178710938,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.01744486391544342,
"min": -0.026949116960167885,
"max": 0.54945969581604,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 4.849672317504883,
"min": -7.276261329650879,
"max": 130.22195434570312,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0671634659485703,
"min": 0.06508727559481658,
"max": 0.07482523151362935,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9402885232799841,
"min": 0.5064664202211719,
"max": 1.1223784727044404,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01649443761700587,
"min": 0.0007308875295051967,
"max": 0.01649443761700587,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.23092212663808218,
"min": 0.009501537883567557,
"max": 0.23092212663808218,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.67943315450714e-06,
"min": 7.67943315450714e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010751206416309996,
"min": 0.00010751206416309996,
"max": 0.0037587856470715003,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10255977857142855,
"min": 0.10255977857142855,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4358368999999997,
"min": 1.3886848,
"max": 2.6529285000000002,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026572187928571424,
"min": 0.00026572187928571424,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0037201063099999996,
"min": 0.0037201063099999996,
"max": 0.12530755715,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01760769449174404,
"min": 0.017308497801423073,
"max": 0.7073035836219788,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.24650773406028748,
"min": 0.24231895804405212,
"max": 4.951125144958496,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 321.39772727272725,
"min": 321.39772727272725,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28283.0,
"min": 15984.0,
"max": 33803.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6786022513088854,
"min": -1.0000000521540642,
"max": 1.6786022513088854,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 147.71699811518192,
"min": -29.64100157469511,
"max": 147.71699811518192,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6786022513088854,
"min": -1.0000000521540642,
"max": 1.6786022513088854,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 147.71699811518192,
"min": -29.64100157469511,
"max": 147.71699811518192,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.058175617599441794,
"min": 0.058175617599441794,
"max": 14.939263613894582,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 5.119454348750878,
"min": 5.119454348750878,
"max": 239.0282178223133,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1664325614",
"python_version": "3.8.13 (default, Mar 28 2022, 06:59:08) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\erich\\miniconda3\\envs\\hf-ml-agents\\Scripts\\mlagents-learn .\\config\\ppo\\PyramidsRND.yaml --env .\\trained-envs-executables\\windows\\Pyramids\\ --run-id first training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.12.1",
"numpy_version": "1.23.1",
"end_time_seconds": "1664327737"
},
"total": 2123.5308058,
"count": 1,
"self": 0.7082409999998163,
"children": {
"run_training.setup": {
"total": 0.13103909999999974,
"count": 1,
"self": 0.13103909999999974
},
"TrainerController.start_learning": {
"total": 2122.6915257,
"count": 1,
"self": 1.52914479999572,
"children": {
"TrainerController._reset_env": {
"total": 8.6770876,
"count": 1,
"self": 8.6770876
},
"TrainerController.advance": {
"total": 2112.352217500005,
"count": 63875,
"self": 1.3369779000145172,
"children": {
"env_step": {
"total": 1023.3772189999993,
"count": 63875,
"self": 830.224870199926,
"children": {
"SubprocessEnvManager._take_step": {
"total": 192.16534390005074,
"count": 63875,
"self": 5.35020100007813,
"children": {
"TorchPolicy.evaluate": {
"total": 186.8151428999726,
"count": 62577,
"self": 80.90848829997962,
"children": {
"TorchPolicy.sample_actions": {
"total": 105.90665459999299,
"count": 62577,
"self": 105.90665459999299
}
}
}
}
},
"workers": {
"total": 0.9870049000225123,
"count": 63875,
"self": 0.0,
"children": {
"worker_root": {
"total": 2112.9039863999556,
"count": 63875,
"is_parallel": true,
"self": 1392.6909349999332,
"children": {
"steps_from_proto": {
"total": 0.001105100000000192,
"count": 1,
"is_parallel": true,
"self": 0.0002996999999993477,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0008054000000008443,
"count": 8,
"is_parallel": true,
"self": 0.0008054000000008443
}
}
},
"UnityEnvironment.step": {
"total": 720.2119463000224,
"count": 63875,
"is_parallel": true,
"self": 19.97030120003717,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 17.104151000009637,
"count": 63875,
"is_parallel": true,
"self": 17.104151000009637
},
"communicator.exchange": {
"total": 626.5370090999984,
"count": 63875,
"is_parallel": true,
"self": 626.5370090999984
},
"steps_from_proto": {
"total": 56.60048499997715,
"count": 63875,
"is_parallel": true,
"self": 14.52378619997161,
"children": {
"_process_rank_one_or_two_observation": {
"total": 42.07669880000554,
"count": 511000,
"is_parallel": true,
"self": 42.07669880000554
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1087.6380205999913,
"count": 63875,
"self": 3.020272999999179,
"children": {
"process_trajectory": {
"total": 209.0864455999906,
"count": 63875,
"self": 208.77805729999045,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3083883000001606,
"count": 2,
"self": 0.3083883000001606
}
}
},
"_update_policy": {
"total": 875.5313020000015,
"count": 458,
"self": 230.43345849998923,
"children": {
"TorchPPOOptimizer.update": {
"total": 645.0978435000122,
"count": 22767,
"self": 645.0978435000122
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.999997251317836e-07,
"count": 1,
"self": 7.999997251317836e-07
},
"TrainerController._save_models": {
"total": 0.13307499999973516,
"count": 1,
"self": 0.006068600000162405,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12700639999957275,
"count": 1,
"self": 0.12700639999957275
}
}
}
}
}
}
}