Worm-PPO / run_logs /timers.json
ibadrehman's picture
with 10M steps
c7908f8
raw
history blame
17.9 kB
{
"name": "root",
"gauges": {
"Worm.Policy.Entropy.mean": {
"value": 0.8221731185913086,
"min": 0.8221731185913086,
"max": 1.418938398361206,
"count": 233
},
"Worm.Policy.Entropy.sum": {
"value": 24665.193359375,
"min": 24665.193359375,
"max": 42568.15234375,
"count": 233
},
"Worm.Environment.EpisodeLength.mean": {
"value": 999.0,
"min": 999.0,
"max": 999.0,
"count": 233
},
"Worm.Environment.EpisodeLength.sum": {
"value": 29970.0,
"min": 29970.0,
"max": 29970.0,
"count": 233
},
"Worm.Step.mean": {
"value": 6989000.0,
"min": 29000.0,
"max": 6989000.0,
"count": 233
},
"Worm.Step.sum": {
"value": 6989000.0,
"min": 29000.0,
"max": 6989000.0,
"count": 233
},
"Worm.Policy.ExtrinsicValueEstimate.mean": {
"value": 184.09725952148438,
"min": -0.0652509480714798,
"max": 186.07147216796875,
"count": 233
},
"Worm.Policy.ExtrinsicValueEstimate.sum": {
"value": 5522.91796875,
"min": -1.8922775983810425,
"max": 5582.14404296875,
"count": 233
},
"Worm.Environment.CumulativeReward.mean": {
"value": 906.8447224934896,
"min": 0.24607567513982456,
"max": 927.7546081542969,
"count": 233
},
"Worm.Environment.CumulativeReward.sum": {
"value": 27205.341674804688,
"min": 7.3822702541947365,
"max": 27832.638244628906,
"count": 233
},
"Worm.Policy.ExtrinsicReward.mean": {
"value": 906.8447224934896,
"min": 0.24607567513982456,
"max": 927.7546081542969,
"count": 233
},
"Worm.Policy.ExtrinsicReward.sum": {
"value": 27205.341674804688,
"min": 7.3822702541947365,
"max": 27832.638244628906,
"count": 233
},
"Worm.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 233
},
"Worm.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 233
},
"Worm.Losses.PolicyLoss.mean": {
"value": 0.019184567398735367,
"min": 0.012249553687849715,
"max": 0.02299641739643578,
"count": 232
},
"Worm.Losses.PolicyLoss.sum": {
"value": 0.019184567398735367,
"min": 0.012249553687849715,
"max": 0.02299641739643578,
"count": 232
},
"Worm.Losses.ValueLoss.mean": {
"value": 12.055086635407948,
"min": 0.0013937619791942694,
"max": 13.641971610841297,
"count": 232
},
"Worm.Losses.ValueLoss.sum": {
"value": 12.055086635407948,
"min": 0.0013937619791942694,
"max": 13.641971610841297,
"count": 232
},
"Worm.Policy.LearningRate.mean": {
"value": 1.7143851428571302e-06,
"min": 1.7143851428571302e-06,
"max": 0.00029871428614285713,
"count": 232
},
"Worm.Policy.LearningRate.sum": {
"value": 1.7143851428571302e-06,
"min": 1.7143851428571302e-06,
"max": 0.00029871428614285713,
"count": 232
},
"Worm.Policy.Epsilon.mean": {
"value": 0.10057142857142858,
"min": 0.10057142857142858,
"max": 0.1995714285714285,
"count": 232
},
"Worm.Policy.Epsilon.sum": {
"value": 0.10057142857142858,
"min": 0.10057142857142858,
"max": 0.1995714285714285,
"count": 232
},
"Worm.Policy.Beta.mean": {
"value": 3.851428571428551e-05,
"min": 3.851428571428551e-05,
"max": 0.004978614285714285,
"count": 232
},
"Worm.Policy.Beta.sum": {
"value": 3.851428571428551e-05,
"min": 3.851428571428551e-05,
"max": 0.004978614285714285,
"count": 232
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677339145",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Worm.yaml --env=./training-envs-executables/linux/Worm/Worm --run-id=Worm1 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1677348017"
},
"total": 8871.884378606,
"count": 1,
"self": 0.7404704019991186,
"children": {
"run_training.setup": {
"total": 0.11830611400000635,
"count": 1,
"self": 0.11830611400000635
},
"TrainerController.start_learning": {
"total": 8871.025602090001,
"count": 1,
"self": 12.78901379904164,
"children": {
"TrainerController._reset_env": {
"total": 9.554177722999952,
"count": 1,
"self": 9.554177722999952
},
"TrainerController.advance": {
"total": 8848.513144629962,
"count": 701000,
"self": 13.301491338304913,
"children": {
"env_step": {
"total": 7211.55251523357,
"count": 701000,
"self": 6295.088516940831,
"children": {
"SubprocessEnvManager._take_step": {
"total": 908.3159043414037,
"count": 701000,
"self": 58.44413287145119,
"children": {
"TorchPolicy.evaluate": {
"total": 849.8717714699525,
"count": 701000,
"self": 215.4783726915581,
"children": {
"TorchPolicy.sample_actions": {
"total": 634.3933987783944,
"count": 701000,
"self": 634.3933987783944
}
}
}
}
},
"workers": {
"total": 8.148093951335227,
"count": 701000,
"self": 0.0,
"children": {
"worker_root": {
"total": 8847.38566314735,
"count": 701000,
"is_parallel": true,
"self": 3420.5015540230297,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0033676790000072288,
"count": 1,
"is_parallel": true,
"self": 0.0003364730000612326,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.003031205999945996,
"count": 2,
"is_parallel": true,
"self": 0.003031205999945996
}
}
},
"UnityEnvironment.step": {
"total": 0.05471578399999544,
"count": 1,
"is_parallel": true,
"self": 0.0004163020000760298,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00041888900000230933,
"count": 1,
"is_parallel": true,
"self": 0.00041888900000230933
},
"communicator.exchange": {
"total": 0.05328967199989165,
"count": 1,
"is_parallel": true,
"self": 0.05328967199989165
},
"steps_from_proto": {
"total": 0.0005909210000254461,
"count": 1,
"is_parallel": true,
"self": 0.00022206899996035645,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00036885200006508967,
"count": 2,
"is_parallel": true,
"self": 0.00036885200006508967
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 5426.88410912432,
"count": 700999,
"is_parallel": true,
"self": 125.59259609108722,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 213.36215607184738,
"count": 700999,
"is_parallel": true,
"self": 213.36215607184738
},
"communicator.exchange": {
"total": 4792.88967906307,
"count": 700999,
"is_parallel": true,
"self": 4792.88967906307
},
"steps_from_proto": {
"total": 295.03967789831574,
"count": 700999,
"is_parallel": true,
"self": 111.48677653683853,
"children": {
"_process_rank_one_or_two_observation": {
"total": 183.5529013614772,
"count": 1401998,
"is_parallel": true,
"self": 183.5529013614772
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1623.6591380580862,
"count": 701000,
"self": 14.372375751952404,
"children": {
"process_trajectory": {
"total": 426.2992628431318,
"count": 701000,
"self": 424.64527483613347,
"children": {
"RLTrainer._checkpoint": {
"total": 1.653988006998361,
"count": 14,
"self": 1.653988006998361
}
}
},
"_update_policy": {
"total": 1182.987499463002,
"count": 233,
"self": 997.1830883320675,
"children": {
"TorchPPOOptimizer.update": {
"total": 185.80441113093445,
"count": 9786,
"self": 185.80441113093445
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2519994925241917e-06,
"count": 1,
"self": 1.2519994925241917e-06
},
"TrainerController._save_models": {
"total": 0.16926468599922373,
"count": 1,
"self": 0.0028902989979542326,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1663743870012695,
"count": 1,
"self": 0.1663743870012695
}
}
}
}
}
}
}