poca-SoccerTwos / run_logs /timers.json
SyntaxTheRed's picture
First training
58715d3 verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.1111496686935425,
"min": 1.1111496686935425,
"max": 3.2737064361572266,
"count": 140
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 111612.765625,
"min": 110508.125,
"max": 380168.96875,
"count": 140
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 69.36974789915966,
"min": 67.05177111716621,
"max": 939.1538461538462,
"count": 140
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 99060.0,
"min": 95652.0,
"max": 103312.0,
"count": 140
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1620.0112989184006,
"min": 1198.1827098176377,
"max": 1624.1894708270681,
"count": 140
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 1156688.067427738,
"min": 4793.2610647804995,
"max": 1175263.0834165935,
"count": 140
},
"SoccerTwos.Step.mean": {
"value": 6999958.0,
"min": 49368.0,
"max": 6999958.0,
"count": 140
},
"SoccerTwos.Step.sum": {
"value": 6999958.0,
"min": 49368.0,
"max": 6999958.0,
"count": 140
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.01854032836854458,
"min": -0.0850742906332016,
"max": 0.18131530284881592,
"count": 140
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -13.219253540039062,
"min": -54.193031311035156,
"max": 81.95451354980469,
"count": 140
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.01944023370742798,
"min": -0.09013748168945312,
"max": 0.18171048164367676,
"count": 140
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -13.860886573791504,
"min": -56.44042205810547,
"max": 82.13314056396484,
"count": 140
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 140
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 140
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.04991304422996489,
"min": -0.14181917824157297,
"max": 0.3555840008854866,
"count": 140
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 35.588000535964966,
"min": -85.3135998249054,
"max": 88.89600022137165,
"count": 140
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.04991304422996489,
"min": -0.14181917824157297,
"max": 0.3555840008854866,
"count": 140
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 35.588000535964966,
"min": -85.3135998249054,
"max": 88.89600022137165,
"count": 140
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.017009093132801353,
"min": 0.016676689825956905,
"max": 0.038463993404099416,
"count": 140
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.034018186265602705,
"min": 0.03381389796035364,
"max": 0.11539198021229824,
"count": 140
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.09284576013684273,
"min": 0.0006058299935345228,
"max": 0.09538967862725257,
"count": 140
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.18569152027368546,
"min": 0.0017943214339902625,
"max": 0.28615084499120713,
"count": 140
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.09270931206643582,
"min": 0.0007139226215076632,
"max": 0.0956115986406803,
"count": 140
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.18541862413287163,
"min": 0.001950476081110537,
"max": 0.2868347959220409,
"count": 140
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 1.8214567785857097e-06,
"min": 1.8214567785857097e-06,
"max": 0.0004977067147443714,
"count": 140
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 3.6429135571714194e-06,
"min": 3.6429135571714194e-06,
"max": 0.0014725147197827714,
"count": 140
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.10036427142857145,
"min": 0.10036427142857145,
"max": 0.19954134285714287,
"count": 140
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.2007285428571429,
"min": 0.2007285428571429,
"max": 0.5945029428571429,
"count": 140
},
"SoccerTwos.Policy.Beta.mean": {
"value": 2.8177144285714247e-05,
"min": 2.8177144285714247e-05,
"max": 0.004977113008571428,
"count": 140
},
"SoccerTwos.Policy.Beta.sum": {
"value": 5.6354288571428495e-05,
"min": 5.6354288571428495e-05,
"max": 0.014725696848571428,
"count": 140
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 140
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 140
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1710029953",
"python_version": "3.10.12 | packaged by Anaconda, Inc. | (main, Jul 5 2023, 19:01:18) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\C:\\Users\\ThinkPad\\miniconda3\\envs\\rl\\Scripts\\mlagents-learn .\\config\\poca\\SoccerTwos.yaml --env .\\training-envs-executables\\SoccerTwos.exe --run-id SoccerTwos --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cpu",
"numpy_version": "1.22.3",
"end_time_seconds": "1710092643"
},
"total": 62690.205816400005,
"count": 1,
"self": 1.5007168000156526,
"children": {
"run_training.setup": {
"total": 0.2303831999997783,
"count": 1,
"self": 0.2303831999997783
},
"TrainerController.start_learning": {
"total": 62688.47471639999,
"count": 1,
"self": 15.482313503540354,
"children": {
"TrainerController._reset_env": {
"total": 14.328854400015189,
"count": 35,
"self": 14.328854400015189
},
"TrainerController.advance": {
"total": 62658.390361796424,
"count": 470579,
"self": 16.80483680150064,
"children": {
"env_step": {
"total": 13982.906283896229,
"count": 470579,
"self": 10639.312883600187,
"children": {
"SubprocessEnvManager._take_step": {
"total": 3333.282808997774,
"count": 470579,
"self": 111.35021650398812,
"children": {
"TorchPolicy.evaluate": {
"total": 3221.9325924937857,
"count": 883156,
"self": 3221.9325924937857
}
}
},
"workers": {
"total": 10.310591298268264,
"count": 470579,
"self": 0.0,
"children": {
"worker_root": {
"total": 62650.755524501685,
"count": 470579,
"is_parallel": true,
"self": 54088.76279749896,
"children": {
"steps_from_proto": {
"total": 0.11634059997959412,
"count": 70,
"is_parallel": true,
"self": 0.03372089993354166,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.08261970004605246,
"count": 280,
"is_parallel": true,
"self": 0.08261970004605246
}
}
},
"UnityEnvironment.step": {
"total": 8561.87638640275,
"count": 470579,
"is_parallel": true,
"self": 467.88636530699114,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 491.09489839930757,
"count": 470579,
"is_parallel": true,
"self": 491.09489839930757
},
"communicator.exchange": {
"total": 5946.11686529972,
"count": 470579,
"is_parallel": true,
"self": 5946.11686529972
},
"steps_from_proto": {
"total": 1656.7782573967306,
"count": 941158,
"is_parallel": true,
"self": 328.72728599630864,
"children": {
"_process_rank_one_or_two_observation": {
"total": 1328.050971400422,
"count": 3764632,
"is_parallel": true,
"self": 1328.050971400422
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 48658.6792410987,
"count": 470579,
"self": 115.30534269938653,
"children": {
"process_trajectory": {
"total": 3572.8538944993197,
"count": 470579,
"self": 3569.30090909932,
"children": {
"RLTrainer._checkpoint": {
"total": 3.5529853999996703,
"count": 14,
"self": 3.5529853999996703
}
}
},
"_update_policy": {
"total": 44970.52000389999,
"count": 337,
"self": 2890.5338558003423,
"children": {
"TorchPOCAOptimizer.update": {
"total": 42079.98614809965,
"count": 16850,
"self": 42079.98614809965
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.300009898841381e-06,
"count": 1,
"self": 1.300009898841381e-06
},
"TrainerController._save_models": {
"total": 0.27318540000123903,
"count": 1,
"self": 0.02383410000766162,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2493512999935774,
"count": 1,
"self": 0.2493512999935774
}
}
}
}
}
}
}