Harm's picture
First Push
9a4534a
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.7714424729347229,
"min": 0.7714424729347229,
"max": 2.864002227783203,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 7357.24658203125,
"min": 7357.24658203125,
"max": 29330.24609375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.001533508300781,
"min": 0.37358927726745605,
"max": 13.001533508300781,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2535.299072265625,
"min": 72.476318359375,
"max": 2620.111328125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06310037083526635,
"min": 0.06310037083526635,
"max": 0.07278948185657831,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2524014833410654,
"min": 0.2524014833410654,
"max": 0.3571343759175244,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20431435013226434,
"min": 0.12397251939903214,
"max": 0.29946552713712055,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8172574005290574,
"min": 0.4958900775961286,
"max": 1.4568299835219103,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.65909090909091,
"min": 3.727272727272727,
"max": 25.65909090909091,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1129.0,
"min": 164.0,
"max": 1398.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.65909090909091,
"min": 3.727272727272727,
"max": 25.65909090909091,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1129.0,
"min": 164.0,
"max": 1398.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1674306955",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1674307380"
},
"total": 424.78033899900004,
"count": 1,
"self": 0.5662534810001034,
"children": {
"run_training.setup": {
"total": 0.1118649830000038,
"count": 1,
"self": 0.1118649830000038
},
"TrainerController.start_learning": {
"total": 424.10222053499996,
"count": 1,
"self": 0.557742935994554,
"children": {
"TrainerController._reset_env": {
"total": 9.697719118000009,
"count": 1,
"self": 9.697719118000009
},
"TrainerController.advance": {
"total": 413.7325065680054,
"count": 18201,
"self": 0.26897888600177566,
"children": {
"env_step": {
"total": 413.46352768200364,
"count": 18201,
"self": 271.8077081090003,
"children": {
"SubprocessEnvManager._take_step": {
"total": 141.38111366400423,
"count": 18201,
"self": 1.4108565189987132,
"children": {
"TorchPolicy.evaluate": {
"total": 139.97025714500552,
"count": 18201,
"self": 32.3085324339996,
"children": {
"TorchPolicy.sample_actions": {
"total": 107.66172471100592,
"count": 18201,
"self": 107.66172471100592
}
}
}
}
},
"workers": {
"total": 0.2747059089990955,
"count": 18201,
"self": 0.0,
"children": {
"worker_root": {
"total": 422.8349134300019,
"count": 18201,
"is_parallel": true,
"self": 200.7698799529972,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.01091956699997354,
"count": 1,
"is_parallel": true,
"self": 0.006076662999987548,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.004842903999985992,
"count": 10,
"is_parallel": true,
"self": 0.004842903999985992
}
}
},
"UnityEnvironment.step": {
"total": 0.03795020699999441,
"count": 1,
"is_parallel": true,
"self": 0.0003324320000501757,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003563889999895764,
"count": 1,
"is_parallel": true,
"self": 0.0003563889999895764
},
"communicator.exchange": {
"total": 0.03621890699997721,
"count": 1,
"is_parallel": true,
"self": 0.03621890699997721
},
"steps_from_proto": {
"total": 0.0010424789999774475,
"count": 1,
"is_parallel": true,
"self": 0.00026391500003342117,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007785639999440264,
"count": 10,
"is_parallel": true,
"self": 0.0007785639999440264
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 222.0650334770047,
"count": 18200,
"is_parallel": true,
"self": 8.18113887500067,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 4.795634956996452,
"count": 18200,
"is_parallel": true,
"self": 4.795634956996452
},
"communicator.exchange": {
"total": 177.57237945400473,
"count": 18200,
"is_parallel": true,
"self": 177.57237945400473
},
"steps_from_proto": {
"total": 31.51588019100285,
"count": 18200,
"is_parallel": true,
"self": 6.4810426740008324,
"children": {
"_process_rank_one_or_two_observation": {
"total": 25.034837517002018,
"count": 182000,
"is_parallel": true,
"self": 25.034837517002018
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 6.253699996250361e-05,
"count": 1,
"self": 6.253699996250361e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 411.0266416509798,
"count": 341952,
"is_parallel": true,
"self": 8.83277317796393,
"children": {
"process_trajectory": {
"total": 237.44195398901604,
"count": 341952,
"is_parallel": true,
"self": 236.7433664850161,
"children": {
"RLTrainer._checkpoint": {
"total": 0.6985875039999314,
"count": 4,
"is_parallel": true,
"self": 0.6985875039999314
}
}
},
"_update_policy": {
"total": 164.7519144839998,
"count": 90,
"is_parallel": true,
"self": 41.63945072800027,
"children": {
"TorchPPOOptimizer.update": {
"total": 123.11246375599953,
"count": 4587,
"is_parallel": true,
"self": 123.11246375599953
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.114189376000013,
"count": 1,
"self": 0.0008465380000188816,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11334283799999412,
"count": 1,
"self": 0.11334283799999412
}
}
}
}
}
}
}