akanametov's picture
Trained SoccerTwos agent upload
9477107
raw
history blame contribute delete
No virus
20.6 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.657888412475586,
"min": 2.657888412475586,
"max": 3.2957236766815186,
"count": 175
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 55539.23828125,
"min": 14741.529296875,
"max": 105463.15625,
"count": 175
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 75.73846153846154,
"min": 65.14864864864865,
"max": 999.0,
"count": 175
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19692.0,
"min": 4548.0,
"max": 30124.0,
"count": 175
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1386.2681286181073,
"min": 1200.2449734819195,
"max": 1396.1038562938586,
"count": 170
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 180214.85672035394,
"min": 2400.489946963839,
"max": 205106.69011488685,
"count": 170
},
"SoccerTwos.Step.mean": {
"value": 1749992.0,
"min": 9078.0,
"max": 1749992.0,
"count": 175
},
"SoccerTwos.Step.sum": {
"value": 1749992.0,
"min": 9078.0,
"max": 1749992.0,
"count": 175
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.015134832821786404,
"min": -0.03400610014796257,
"max": 0.22032798826694489,
"count": 175
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 1.967528223991394,
"min": -2.22676157951355,
"max": 25.89499855041504,
"count": 175
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.016398712992668152,
"min": -0.0341365747153759,
"max": 0.22337739169597626,
"count": 175
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 2.1318325996398926,
"min": -1.9799213409423828,
"max": 26.016578674316406,
"count": 175
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 175
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 175
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.1394676923751831,
"min": -0.6666666666666666,
"max": 0.4782557370232754,
"count": 175
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -18.130800008773804,
"min": -22.25000011920929,
"max": 46.45100021362305,
"count": 175
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.1394676923751831,
"min": -0.6666666666666666,
"max": 0.4782557370232754,
"count": 175
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -18.130800008773804,
"min": -22.25000011920929,
"max": 46.45100021362305,
"count": 175
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 175
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 175
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.014425198807536314,
"min": 0.011495149041244683,
"max": 0.022163717014094194,
"count": 82
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.014425198807536314,
"min": 0.011495149041244683,
"max": 0.022163717014094194,
"count": 82
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.06796828570465246,
"min": 0.0005441805726150051,
"max": 0.07227968672911327,
"count": 82
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.06796828570465246,
"min": 0.0005441805726150051,
"max": 0.07227968672911327,
"count": 82
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.06986609995365142,
"min": 0.0005505990595944847,
"max": 0.07365259279807408,
"count": 82
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.06986609995365142,
"min": 0.0005505990595944847,
"max": 0.07365259279807408,
"count": 82
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 82
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 82
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 82
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 82
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 82
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 82
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675496556",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/linux/SoccerTwos/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1675500123"
},
"total": 3567.1157116850004,
"count": 1,
"self": 0.38995706100013194,
"children": {
"run_training.setup": {
"total": 0.12406606999991254,
"count": 1,
"self": 0.12406606999991254
},
"TrainerController.start_learning": {
"total": 3566.6016885540002,
"count": 1,
"self": 2.5552655759602203,
"children": {
"TrainerController._reset_env": {
"total": 11.077153099000952,
"count": 7,
"self": 11.077153099000952
},
"TrainerController.advance": {
"total": 3552.968809056039,
"count": 114262,
"self": 2.863754190096188,
"children": {
"env_step": {
"total": 2787.9899902270085,
"count": 114262,
"self": 2176.454435612988,
"children": {
"SubprocessEnvManager._take_step": {
"total": 609.9606745129976,
"count": 114262,
"self": 18.856439643956037,
"children": {
"TorchPolicy.evaluate": {
"total": 591.1042348690415,
"count": 222602,
"self": 117.86353533415661,
"children": {
"TorchPolicy.sample_actions": {
"total": 473.2406995348849,
"count": 222602,
"self": 473.2406995348849
}
}
}
}
},
"workers": {
"total": 1.5748801010225861,
"count": 114261,
"self": 0.0,
"children": {
"worker_root": {
"total": 3559.806630536064,
"count": 114261,
"is_parallel": true,
"self": 1714.8426980490776,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006528958999979295,
"count": 2,
"is_parallel": true,
"self": 0.0034178479999127376,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0031111110000665576,
"count": 8,
"is_parallel": true,
"self": 0.0031111110000665576
}
}
},
"UnityEnvironment.step": {
"total": 0.07459809600004519,
"count": 1,
"is_parallel": true,
"self": 0.0010330710000516774,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0006686719999606794,
"count": 1,
"is_parallel": true,
"self": 0.0006686719999606794
},
"communicator.exchange": {
"total": 0.05843402600009995,
"count": 1,
"is_parallel": true,
"self": 0.05843402600009995
},
"steps_from_proto": {
"total": 0.014462326999932884,
"count": 2,
"is_parallel": true,
"self": 0.008763741999928243,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.005698585000004641,
"count": 8,
"is_parallel": true,
"self": 0.005698585000004641
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1844.9486983879863,
"count": 114260,
"is_parallel": true,
"self": 101.10968987900787,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 72.27898529695847,
"count": 114260,
"is_parallel": true,
"self": 72.27898529695847
},
"communicator.exchange": {
"total": 1329.5433840110036,
"count": 114260,
"is_parallel": true,
"self": 1329.5433840110036
},
"steps_from_proto": {
"total": 342.01663920101623,
"count": 228520,
"is_parallel": true,
"self": 68.58797477892244,
"children": {
"_process_rank_one_or_two_observation": {
"total": 273.4286644220938,
"count": 914080,
"is_parallel": true,
"self": 273.4286644220938
}
}
}
}
},
"steps_from_proto": {
"total": 0.015234099000053902,
"count": 12,
"is_parallel": true,
"self": 0.0030179560010310524,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.01221614299902285,
"count": 48,
"is_parallel": true,
"self": 0.01221614299902285
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 762.1150646389342,
"count": 114261,
"self": 19.26499103793367,
"children": {
"process_trajectory": {
"total": 257.3978115079997,
"count": 114261,
"self": 256.740726689,
"children": {
"RLTrainer._checkpoint": {
"total": 0.6570848189996923,
"count": 3,
"self": 0.6570848189996923
}
}
},
"_update_policy": {
"total": 485.45226209300085,
"count": 82,
"self": 297.09873524500824,
"children": {
"TorchPOCAOptimizer.update": {
"total": 188.3535268479926,
"count": 2472,
"self": 188.3535268479926
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.168999915535096e-06,
"count": 1,
"self": 1.168999915535096e-06
},
"TrainerController._save_models": {
"total": 0.0004596540002239635,
"count": 1,
"self": 3.9323000237345695e-05,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0004203309999866178,
"count": 1,
"self": 0.0004203309999866178
}
}
}
}
}
}
}