testpyramidsrnd / run_logs /timers.json
turhancan97's picture
First Pyramids
cae7ec9
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.7669917941093445,
"min": 0.7669917941093445,
"max": 1.4810545444488525,
"count": 8
},
"Pyramids.Policy.Entropy.sum": {
"value": 23500.62890625,
"min": 23500.62890625,
"max": 44929.26953125,
"count": 8
},
"Pyramids.Step.mean": {
"value": 239958.0,
"min": 29974.0,
"max": 239958.0,
"count": 8
},
"Pyramids.Step.sum": {
"value": 239958.0,
"min": 29974.0,
"max": 239958.0,
"count": 8
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.08021598309278488,
"min": -0.10526439547538757,
"max": 0.006860638968646526,
"count": 8
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -19.57270050048828,
"min": -25.473983764648438,
"max": 1.6328320503234863,
"count": 8
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.03885841742157936,
"min": 0.03885841742157936,
"max": 0.28153514862060547,
"count": 8
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 9.481453895568848,
"min": 9.481453895568848,
"max": 67.00536346435547,
"count": 8
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07220267972760229,
"min": 0.0672382620456064,
"max": 0.07328388441462232,
"count": 8
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9386348364588297,
"min": 0.5129871909023562,
"max": 0.9386348364588297,
"count": 8
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.0023970643957495386,
"min": 0.00013133329527695973,
"max": 0.004100341063228366,
"count": 8
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.031161837144744003,
"min": 0.0014446662480465569,
"max": 0.031161837144744003,
"count": 8
},
"Pyramids.Policy.LearningRate.mean": {
"value": 2.94312901896e-05,
"min": 2.94312901896e-05,
"max": 0.00028060252075154277,
"count": 8
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0003826067724648,
"min": 0.0003826067724648,
"max": 0.0027170185943272006,
"count": 8
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1098104,
"min": 0.1098104,
"max": 0.19353417142857146,
"count": 8
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4275352000000001,
"min": 1.3547392000000003,
"max": 2.0056728,
"count": 8
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0009900589599999999,
"min": 0.0009900589599999999,
"max": 0.009354063725714288,
"count": 8
},
"Pyramids.Policy.Beta.sum": {
"value": 0.012870766479999999,
"min": 0.012870766479999999,
"max": 0.09058671272000002,
"count": 8
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.03127488121390343,
"min": 0.03127488121390343,
"max": 0.33825868368148804,
"count": 8
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.40657344460487366,
"min": 0.40657344460487366,
"max": 2.3678107261657715,
"count": 8
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 960.8787878787879,
"min": 955.4545454545455,
"max": 999.0,
"count": 8
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31709.0,
"min": 16773.0,
"max": 31968.0,
"count": 8
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.6585818630727854,
"min": -0.9999375520274043,
"max": -0.6585818630727854,
"count": 8
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -21.73320148140192,
"min": -31.998001664876938,
"max": -14.789000898599625,
"count": 8
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.6585818630727854,
"min": -0.9999375520274043,
"max": -0.6585818630727854,
"count": 8
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -21.73320148140192,
"min": -31.998001664876938,
"max": -14.789000898599625,
"count": 8
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.3107151434347598,
"min": 0.3107151434347598,
"max": 6.8928938650471325,
"count": 8
},
"Pyramids.Policy.RndReward.sum": {
"value": 10.253599733347073,
"min": 10.253599733347073,
"max": 117.17919570580125,
"count": 8
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 8
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 8
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1659354850",
"python_version": "3.7.13 (default, Apr 24 2022, 01:04:09) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./trained-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1659355350"
},
"total": 499.16679812100006,
"count": 1,
"self": 0.48353962600003797,
"children": {
"run_training.setup": {
"total": 0.046288968999988356,
"count": 1,
"self": 0.046288968999988356
},
"TrainerController.start_learning": {
"total": 498.63696952600003,
"count": 1,
"self": 0.40598806899595274,
"children": {
"TrainerController._reset_env": {
"total": 10.23266580500001,
"count": 1,
"self": 10.23266580500001
},
"TrainerController.advance": {
"total": 487.88847962400405,
"count": 15708,
"self": 0.432629593003071,
"children": {
"env_step": {
"total": 304.30467526898906,
"count": 15708,
"self": 272.7768432099872,
"children": {
"SubprocessEnvManager._take_step": {
"total": 31.306041705999064,
"count": 15708,
"self": 1.2695325499835235,
"children": {
"TorchPolicy.evaluate": {
"total": 30.03650915601554,
"count": 15674,
"self": 9.969247708017974,
"children": {
"TorchPolicy.sample_actions": {
"total": 20.067261447997566,
"count": 15674,
"self": 20.067261447997566
}
}
}
}
},
"workers": {
"total": 0.22179035300280248,
"count": 15708,
"self": 0.0,
"children": {
"worker_root": {
"total": 497.3854300300019,
"count": 15708,
"is_parallel": true,
"self": 252.28970552599753,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005047757999932401,
"count": 1,
"is_parallel": true,
"self": 0.0037834449998399577,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001264313000092443,
"count": 8,
"is_parallel": true,
"self": 0.001264313000092443
}
}
},
"UnityEnvironment.step": {
"total": 0.050933747000044605,
"count": 1,
"is_parallel": true,
"self": 0.0005918800000017654,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005456559999856836,
"count": 1,
"is_parallel": true,
"self": 0.0005456559999856836
},
"communicator.exchange": {
"total": 0.04787152100004732,
"count": 1,
"is_parallel": true,
"self": 0.04787152100004732
},
"steps_from_proto": {
"total": 0.0019246900000098321,
"count": 1,
"is_parallel": true,
"self": 0.0005081189998463742,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001416571000163458,
"count": 8,
"is_parallel": true,
"self": 0.001416571000163458
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 245.09572450400435,
"count": 15707,
"is_parallel": true,
"self": 7.156004362004182,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 6.069965124005989,
"count": 15707,
"is_parallel": true,
"self": 6.069965124005989
},
"communicator.exchange": {
"total": 207.88473008700532,
"count": 15707,
"is_parallel": true,
"self": 207.88473008700532
},
"steps_from_proto": {
"total": 23.985024930988857,
"count": 15707,
"is_parallel": true,
"self": 6.12303310196387,
"children": {
"_process_rank_one_or_two_observation": {
"total": 17.861991829024987,
"count": 125656,
"is_parallel": true,
"self": 17.861991829024987
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 183.15117476201192,
"count": 15708,
"self": 0.592759306008702,
"children": {
"process_trajectory": {
"total": 41.06375867500287,
"count": 15708,
"self": 41.06375867500287
},
"_update_policy": {
"total": 141.49465678100034,
"count": 92,
"self": 54.37541224799304,
"children": {
"TorchPPOOptimizer.update": {
"total": 87.1192445330073,
"count": 5745,
"self": 87.1192445330073
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1739998626580928e-06,
"count": 1,
"self": 1.1739998626580928e-06
},
"TrainerController._save_models": {
"total": 0.10983485400015525,
"count": 1,
"self": 0.0016721680001410277,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10816268600001422,
"count": 1,
"self": 0.10816268600001422
}
}
}
}
}
}
}