Nishant91's picture
First Push
6ebd3c9
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8127557039260864,
"min": 0.810983419418335,
"max": 2.8731558322906494,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 7760.19140625,
"min": 7760.19140625,
"max": 29423.98828125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.055801391601562,
"min": 0.26122593879699707,
"max": 13.077880859375,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2545.88134765625,
"min": 50.67782974243164,
"max": 2667.8876953125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06433301769121659,
"min": 0.0625133138284368,
"max": 0.07504908876948962,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.25733207076486636,
"min": 0.256538518928174,
"max": 0.3752454438474481,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.19114929978169648,
"min": 0.10991448852776861,
"max": 0.3110489137470722,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7645971991267859,
"min": 0.43965795411107444,
"max": 1.385029620399662,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.863636363636363,
"min": 2.977272727272727,
"max": 25.863636363636363,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1138.0,
"min": 131.0,
"max": 1413.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.863636363636363,
"min": 2.977272727272727,
"max": 25.863636363636363,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1138.0,
"min": 131.0,
"max": 1413.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1674323017",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1674323506"
},
"total": 488.53702098199994,
"count": 1,
"self": 0.3953393150000011,
"children": {
"run_training.setup": {
"total": 0.1115964680000161,
"count": 1,
"self": 0.1115964680000161
},
"TrainerController.start_learning": {
"total": 488.0300851989999,
"count": 1,
"self": 0.6894928569820422,
"children": {
"TrainerController._reset_env": {
"total": 9.932165766000026,
"count": 1,
"self": 9.932165766000026
},
"TrainerController.advance": {
"total": 477.27202520501794,
"count": 18201,
"self": 0.3606337270346103,
"children": {
"env_step": {
"total": 476.91139147798333,
"count": 18201,
"self": 313.4284189489831,
"children": {
"SubprocessEnvManager._take_step": {
"total": 163.1445530619883,
"count": 18201,
"self": 1.7959171119982784,
"children": {
"TorchPolicy.evaluate": {
"total": 161.34863594999,
"count": 18201,
"self": 37.58109736498591,
"children": {
"TorchPolicy.sample_actions": {
"total": 123.7675385850041,
"count": 18201,
"self": 123.7675385850041
}
}
}
}
},
"workers": {
"total": 0.3384194670119314,
"count": 18201,
"self": 0.0,
"children": {
"worker_root": {
"total": 486.3592254169911,
"count": 18201,
"is_parallel": true,
"self": 226.60297920400944,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0059990140000536485,
"count": 1,
"is_parallel": true,
"self": 0.0035038109998595246,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002495203000194124,
"count": 10,
"is_parallel": true,
"self": 0.002495203000194124
}
}
},
"UnityEnvironment.step": {
"total": 0.03698322499997175,
"count": 1,
"is_parallel": true,
"self": 0.00035926299995026056,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00033937900002456445,
"count": 1,
"is_parallel": true,
"self": 0.00033937900002456445
},
"communicator.exchange": {
"total": 0.03445413899999039,
"count": 1,
"is_parallel": true,
"self": 0.03445413899999039
},
"steps_from_proto": {
"total": 0.001830444000006537,
"count": 1,
"is_parallel": true,
"self": 0.0004557790001626927,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013746649998438443,
"count": 10,
"is_parallel": true,
"self": 0.0013746649998438443
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 259.75624621298164,
"count": 18200,
"is_parallel": true,
"self": 9.69179004697105,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.761454752990062,
"count": 18200,
"is_parallel": true,
"self": 5.761454752990062
},
"communicator.exchange": {
"total": 206.56625469300081,
"count": 18200,
"is_parallel": true,
"self": 206.56625469300081
},
"steps_from_proto": {
"total": 37.73674672001971,
"count": 18200,
"is_parallel": true,
"self": 8.180710088004844,
"children": {
"_process_rank_one_or_two_observation": {
"total": 29.556036632014866,
"count": 182000,
"is_parallel": true,
"self": 29.556036632014866
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 4.936999994242797e-05,
"count": 1,
"self": 4.936999994242797e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 473.6166840919137,
"count": 425787,
"is_parallel": true,
"self": 11.76156093883742,
"children": {
"process_trajectory": {
"total": 277.06716441707556,
"count": 425787,
"is_parallel": true,
"self": 276.31926125207576,
"children": {
"RLTrainer._checkpoint": {
"total": 0.7479031649997978,
"count": 4,
"is_parallel": true,
"self": 0.7479031649997978
}
}
},
"_update_policy": {
"total": 184.78795873600075,
"count": 90,
"is_parallel": true,
"self": 43.0425393450048,
"children": {
"TorchPPOOptimizer.update": {
"total": 141.74541939099595,
"count": 4587,
"is_parallel": true,
"self": 141.74541939099595
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.13635200099997746,
"count": 1,
"self": 0.0009308799999416806,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13542112100003578,
"count": 1,
"self": 0.13542112100003578
}
}
}
}
}
}
}