poca-SoccerTwos / run_logs /timers.json
Ktang2k's picture
First Push
093e557
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.796678066253662,
"min": 1.6508715152740479,
"max": 3.2956461906433105,
"count": 639
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 35818.57421875,
"min": 30144.603515625,
"max": 113694.0,
"count": 639
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 58.654761904761905,
"min": 37.40769230769231,
"max": 999.0,
"count": 639
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19708.0,
"min": 16716.0,
"max": 23924.0,
"count": 639
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1455.7522662230756,
"min": 1185.6997976068483,
"max": 1503.2913772759614,
"count": 631
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 244566.38072547672,
"min": 2372.8602712855063,
"max": 362195.66942273127,
"count": 631
},
"SoccerTwos.Step.mean": {
"value": 6389884.0,
"min": 9982.0,
"max": 6389884.0,
"count": 639
},
"SoccerTwos.Step.sum": {
"value": 6389884.0,
"min": 9982.0,
"max": 6389884.0,
"count": 639
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.003226757748052478,
"min": -0.12121902406215668,
"max": 0.1877116858959198,
"count": 639
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.5420953035354614,
"min": -28.00159454345703,
"max": 36.97920227050781,
"count": 639
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.0032982449047267437,
"min": -0.12614789605140686,
"max": 0.1865699142217636,
"count": 639
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.5541051626205444,
"min": -29.14016342163086,
"max": 36.7542724609375,
"count": 639
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 639
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 639
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.17651428637050448,
"min": -0.7869333326816559,
"max": 0.6341866610778703,
"count": 639
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 29.65440011024475,
"min": -54.91200006008148,
"max": 55.27120041847229,
"count": 639
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.17651428637050448,
"min": -0.7869333326816559,
"max": 0.6341866610778703,
"count": 639
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 29.65440011024475,
"min": -54.91200006008148,
"max": 55.27120041847229,
"count": 639
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 639
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 639
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.012226999139723678,
"min": 0.010700129181107817,
"max": 0.023625865237166484,
"count": 307
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.012226999139723678,
"min": 0.010700129181107817,
"max": 0.023625865237166484,
"count": 307
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.10100149686137835,
"min": 0.00040658007201272995,
"max": 0.12078153441349665,
"count": 307
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.10100149686137835,
"min": 0.00040658007201272995,
"max": 0.12078153441349665,
"count": 307
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.10279191260536512,
"min": 0.00039623022894375026,
"max": 0.1227528507510821,
"count": 307
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.10279191260536512,
"min": 0.00039623022894375026,
"max": 0.1227528507510821,
"count": 307
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 307
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 307
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 307
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 307
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 307
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 307
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1684353327",
"python_version": "3.9.16 (main, Mar 8 2023, 14:00:05) \n[GCC 11.2.0]",
"command_line_arguments": "/home/kent/conda3/envs/rl-soccer2/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/linux/SoccerTwos/SoccerTwos --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1684362960"
},
"total": 9633.589431040004,
"count": 1,
"self": 0.0038056609919294715,
"children": {
"run_training.setup": {
"total": 0.007691809005336836,
"count": 1,
"self": 0.007691809005336836
},
"TrainerController.start_learning": {
"total": 9633.577933570006,
"count": 1,
"self": 6.980726071502431,
"children": {
"TrainerController._reset_env": {
"total": 2.5145168869494228,
"count": 32,
"self": 2.5145168869494228
},
"TrainerController.advance": {
"total": 9623.713544371552,
"count": 437536,
"self": 6.1688790749176405,
"children": {
"env_step": {
"total": 7378.757593222574,
"count": 437536,
"self": 6198.510448100147,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1175.7724007374345,
"count": 437536,
"self": 32.29881483585632,
"children": {
"TorchPolicy.evaluate": {
"total": 1143.4735859015782,
"count": 807176,
"self": 1143.4735859015782
}
}
},
"workers": {
"total": 4.474744384991936,
"count": 437536,
"self": 0.0,
"children": {
"worker_root": {
"total": 9618.226115238416,
"count": 437536,
"is_parallel": true,
"self": 4240.423115636455,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0034081870107911527,
"count": 2,
"is_parallel": true,
"self": 0.0005932310014031827,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00281495600938797,
"count": 8,
"is_parallel": true,
"self": 0.00281495600938797
}
}
},
"UnityEnvironment.step": {
"total": 0.026518134996877052,
"count": 1,
"is_parallel": true,
"self": 0.0009760199900483713,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0006545910000568256,
"count": 1,
"is_parallel": true,
"self": 0.0006545910000568256
},
"communicator.exchange": {
"total": 0.022079807007685304,
"count": 1,
"is_parallel": true,
"self": 0.022079807007685304
},
"steps_from_proto": {
"total": 0.0028077169990865514,
"count": 2,
"is_parallel": true,
"self": 0.00042655297147575766,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0023811640276107937,
"count": 8,
"is_parallel": true,
"self": 0.0023811640276107937
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 5377.716560485875,
"count": 437535,
"is_parallel": true,
"self": 389.5137392156612,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 233.52408491376264,
"count": 437535,
"is_parallel": true,
"self": 233.52408491376264
},
"communicator.exchange": {
"total": 3662.8266292031767,
"count": 437535,
"is_parallel": true,
"self": 3662.8266292031767
},
"steps_from_proto": {
"total": 1091.8521071532741,
"count": 875070,
"is_parallel": true,
"self": 158.82564080308657,
"children": {
"_process_rank_one_or_two_observation": {
"total": 933.0264663501875,
"count": 3500280,
"is_parallel": true,
"self": 933.0264663501875
}
}
}
}
},
"steps_from_proto": {
"total": 0.08643911608669441,
"count": 62,
"is_parallel": true,
"self": 0.012416926969308406,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.074022189117386,
"count": 248,
"is_parallel": true,
"self": 0.074022189117386
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 2238.787072074061,
"count": 437536,
"self": 57.899110328624374,
"children": {
"process_trajectory": {
"total": 657.2002860603534,
"count": 437536,
"self": 654.284158256356,
"children": {
"RLTrainer._checkpoint": {
"total": 2.9161278039973695,
"count": 12,
"self": 2.9161278039973695
}
}
},
"_update_policy": {
"total": 1523.6876756850834,
"count": 308,
"self": 765.2689602988976,
"children": {
"TorchPOCAOptimizer.update": {
"total": 758.4187153861858,
"count": 9210,
"self": 758.4187153861858
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.899943739175797e-07,
"count": 1,
"self": 7.899943739175797e-07
},
"TrainerController._save_models": {
"total": 0.3691454500076361,
"count": 1,
"self": 0.0015013960073702037,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3676440540002659,
"count": 1,
"self": 0.3676440540002659
}
}
}
}
}
}
}