ppo-Huggy / run_logs /timers.json
albertcalin's picture
Huggy
30d5372
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4020122289657593,
"min": 1.4020122289657593,
"max": 1.425384283065796,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69008.4453125,
"min": 68651.15625,
"max": 77172.5625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 84.1839863713799,
"min": 77.4006309148265,
"max": 439.9122807017544,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49416.0,
"min": 49072.0,
"max": 50150.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999973.0,
"min": 49872.0,
"max": 1999973.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999973.0,
"min": 49872.0,
"max": 1999973.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.431661367416382,
"min": -0.04455912113189697,
"max": 2.5106396675109863,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1427.38525390625,
"min": -5.035180568695068,
"max": 1536.5849609375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.8262577784934684,
"min": 1.6998885949101068,
"max": 4.009516522931538,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2246.013315975666,
"min": 192.08741122484207,
"max": 2451.2090931534767,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.8262577784934684,
"min": 1.6998885949101068,
"max": 4.009516522931538,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2246.013315975666,
"min": 192.08741122484207,
"max": 2451.2090931534767,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01708588835244882,
"min": 0.013998973861028692,
"max": 0.02028737307021705,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05125766505734646,
"min": 0.027997947722057385,
"max": 0.05775148950973137,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.06018526628613472,
"min": 0.02427160210079617,
"max": 0.061733924349149066,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.18055579885840417,
"min": 0.048608883967002234,
"max": 0.18055579885840417,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.7034487655500058e-06,
"min": 3.7034487655500058e-06,
"max": 0.000295272526575825,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1110346296650017e-05,
"min": 1.1110346296650017e-05,
"max": 0.0008436513187828998,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10123444999999999,
"min": 0.10123444999999999,
"max": 0.19842417499999998,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30370335,
"min": 0.20760619999999996,
"max": 0.5812171,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.159905500000008e-05,
"min": 7.159905500000008e-05,
"max": 0.0049213663325,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00021479716500000026,
"min": 0.00021479716500000026,
"max": 0.014062733290000004,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678218072",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1678220682"
},
"total": 2610.2354277249997,
"count": 1,
"self": 0.7141889609993086,
"children": {
"run_training.setup": {
"total": 0.18243856799995228,
"count": 1,
"self": 0.18243856799995228
},
"TrainerController.start_learning": {
"total": 2609.338800196,
"count": 1,
"self": 4.6517087569673095,
"children": {
"TrainerController._reset_env": {
"total": 10.449346287999958,
"count": 1,
"self": 10.449346287999958
},
"TrainerController.advance": {
"total": 2594.058951229033,
"count": 232592,
"self": 4.997317096955612,
"children": {
"env_step": {
"total": 2025.0050671790843,
"count": 232592,
"self": 1698.3696464842064,
"children": {
"SubprocessEnvManager._take_step": {
"total": 323.5420270740044,
"count": 232592,
"self": 17.50086417807745,
"children": {
"TorchPolicy.evaluate": {
"total": 306.04116289592696,
"count": 222946,
"self": 75.55325684098665,
"children": {
"TorchPolicy.sample_actions": {
"total": 230.4879060549403,
"count": 222946,
"self": 230.4879060549403
}
}
}
}
},
"workers": {
"total": 3.093393620873428,
"count": 232592,
"self": 0.0,
"children": {
"worker_root": {
"total": 2599.9792436989646,
"count": 232592,
"is_parallel": true,
"self": 1223.3000844579558,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001209076999998615,
"count": 1,
"is_parallel": true,
"self": 0.00047536800002490054,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007337089999737145,
"count": 2,
"is_parallel": true,
"self": 0.0007337089999737145
}
}
},
"UnityEnvironment.step": {
"total": 0.033507318999966174,
"count": 1,
"is_parallel": true,
"self": 0.00032381399989844795,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00024077800003396987,
"count": 1,
"is_parallel": true,
"self": 0.00024077800003396987
},
"communicator.exchange": {
"total": 0.03122050299998591,
"count": 1,
"is_parallel": true,
"self": 0.03122050299998591
},
"steps_from_proto": {
"total": 0.001722224000047845,
"count": 1,
"is_parallel": true,
"self": 0.0003252840000413926,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013969400000064525,
"count": 2,
"is_parallel": true,
"self": 0.0013969400000064525
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1376.6791592410088,
"count": 232591,
"is_parallel": true,
"self": 40.58929485600083,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 86.70012888296361,
"count": 232591,
"is_parallel": true,
"self": 86.70012888296361
},
"communicator.exchange": {
"total": 1149.4463667370132,
"count": 232591,
"is_parallel": true,
"self": 1149.4463667370132
},
"steps_from_proto": {
"total": 99.94336876503127,
"count": 232591,
"is_parallel": true,
"self": 43.24792374302916,
"children": {
"_process_rank_one_or_two_observation": {
"total": 56.695445022002104,
"count": 465182,
"is_parallel": true,
"self": 56.695445022002104
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 564.0565669529931,
"count": 232592,
"self": 7.123361813032716,
"children": {
"process_trajectory": {
"total": 180.21473560396186,
"count": 232592,
"self": 178.86360881996148,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3511267840003711,
"count": 10,
"self": 1.3511267840003711
}
}
},
"_update_policy": {
"total": 376.7184695359985,
"count": 97,
"self": 317.5616824509966,
"children": {
"TorchPPOOptimizer.update": {
"total": 59.156787085001895,
"count": 2910,
"self": 59.156787085001895
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3429998944047838e-06,
"count": 1,
"self": 1.3429998944047838e-06
},
"TrainerController._save_models": {
"total": 0.17879257899994627,
"count": 1,
"self": 0.002652647999639157,
"children": {
"RLTrainer._checkpoint": {
"total": 0.17613993100030712,
"count": 1,
"self": 0.17613993100030712
}
}
}
}
}
}
}