IlyasMoutawwakil HF staff commited on
Commit
9a1d154
·
verified ·
1 Parent(s): 34bcfac

Upload cuda_inference_diffusers_stable-diffusion_CompVis/stable-diffusion-v1-4/benchmark.json with huggingface_hub

Browse files
cuda_inference_diffusers_stable-diffusion_CompVis/stable-diffusion-v1-4/benchmark.json CHANGED
@@ -3,7 +3,7 @@
3
  "name": "cuda_inference_diffusers_stable-diffusion_CompVis/stable-diffusion-v1-4",
4
  "backend": {
5
  "name": "pytorch",
6
- "version": "2.3.0+cu121",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "stable-diffusion",
9
  "library": "diffusers",
@@ -73,10 +73,10 @@
73
  "environment": {
74
  "cpu": " AMD EPYC 7R32",
75
  "cpu_count": 16,
76
- "cpu_ram_mb": 66697.29792,
77
  "system": "Linux",
78
  "machine": "x86_64",
79
- "platform": "Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35",
80
  "processor": "x86_64",
81
  "python_version": "3.10.12",
82
  "gpu": [
@@ -86,15 +86,15 @@
86
  "gpu_vram_mb": 24146608128,
87
  "optimum_benchmark_version": "0.2.1",
88
  "optimum_benchmark_commit": null,
89
- "transformers_version": "4.41.1",
90
  "transformers_commit": null,
91
- "accelerate_version": "0.30.1",
92
  "accelerate_commit": null,
93
- "diffusers_version": "0.27.2",
94
  "diffusers_commit": null,
95
  "optimum_version": null,
96
  "optimum_commit": null,
97
- "timm_version": "1.0.3",
98
  "timm_commit": null,
99
  "peft_version": null,
100
  "peft_commit": null
@@ -104,41 +104,41 @@
104
  "call": {
105
  "memory": {
106
  "unit": "MB",
107
- "max_ram": 1304.956928,
108
- "max_global_vram": 8143.765504,
109
  "max_process_vram": 0.0,
110
- "max_reserved": 7488.929792,
111
- "max_allocated": 6528.907264
112
  },
113
  "latency": {
114
  "unit": "s",
115
  "count": 2,
116
- "total": 1.271260986328125,
117
- "mean": 0.6356304931640625,
118
- "stdev": 0.0016174926757812624,
119
- "p50": 0.6356304931640625,
120
- "p90": 0.6369244873046875,
121
- "p95": 0.6370862365722656,
122
- "p99": 0.6372156359863281,
123
  "values": [
124
- 0.6372479858398438,
125
- 0.6340130004882812
126
  ]
127
  },
128
  "throughput": {
129
  "unit": "images/s",
130
- "value": 1.5732410744207173
131
  },
132
  "energy": {
133
  "unit": "kWh",
134
- "cpu": 7.507175538274977e-06,
135
- "ram": 4.106399692773266e-06,
136
- "gpu": 4.3452256983999866e-05,
137
- "total": 5.506583221504811e-05
138
  },
139
  "efficiency": {
140
  "unit": "images/kWh",
141
- "value": 18160.081483826645
142
  }
143
  }
144
  }
 
3
  "name": "cuda_inference_diffusers_stable-diffusion_CompVis/stable-diffusion-v1-4",
4
  "backend": {
5
  "name": "pytorch",
6
+ "version": "2.3.1+cu121",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "stable-diffusion",
9
  "library": "diffusers",
 
73
  "environment": {
74
  "cpu": " AMD EPYC 7R32",
75
  "cpu_count": 16,
76
+ "cpu_ram_mb": 66697.293824,
77
  "system": "Linux",
78
  "machine": "x86_64",
79
+ "platform": "Linux-5.10.219-208.866.amzn2.x86_64-x86_64-with-glibc2.35",
80
  "processor": "x86_64",
81
  "python_version": "3.10.12",
82
  "gpu": [
 
86
  "gpu_vram_mb": 24146608128,
87
  "optimum_benchmark_version": "0.2.1",
88
  "optimum_benchmark_commit": null,
89
+ "transformers_version": "4.42.3",
90
  "transformers_commit": null,
91
+ "accelerate_version": "0.31.0",
92
  "accelerate_commit": null,
93
+ "diffusers_version": "0.29.2",
94
  "diffusers_commit": null,
95
  "optimum_version": null,
96
  "optimum_commit": null,
97
+ "timm_version": "1.0.7",
98
  "timm_commit": null,
99
  "peft_version": null,
100
  "peft_commit": null
 
104
  "call": {
105
  "memory": {
106
  "unit": "MB",
107
+ "max_ram": 1306.042368,
108
+ "max_global_vram": 8162.639872,
109
  "max_process_vram": 0.0,
110
+ "max_reserved": 7507.80416,
111
+ "max_allocated": 6526.59712
112
  },
113
  "latency": {
114
  "unit": "s",
115
  "count": 2,
116
+ "total": 1.2634773559570314,
117
+ "mean": 0.6317386779785157,
118
+ "stdev": 0.0014798278808593746,
119
+ "p50": 0.6317386779785157,
120
+ "p90": 0.6329225402832032,
121
+ "p95": 0.6330705230712891,
122
+ "p99": 0.6331889093017579,
123
  "values": [
124
+ 0.633218505859375,
125
+ 0.6302588500976563
126
  ]
127
  },
128
  "throughput": {
129
  "unit": "images/s",
130
+ "value": 1.5829329988150704
131
  },
132
  "energy": {
133
  "unit": "kWh",
134
+ "cpu": 7.459994736644957e-06,
135
+ "ram": 4.08063814680934e-06,
136
+ "gpu": 4.202170028400022e-05,
137
+ "total": 5.356233316745451e-05
138
  },
139
  "efficiency": {
140
  "unit": "images/kWh",
141
+ "value": 18669.836447819624
142
  }
143
  }
144
  }