IlyasMoutawwakil HF staff commited on
Commit
3fbbc80
·
verified ·
1 Parent(s): 5a7bffb

Upload cuda_inference_diffusers_text-to-image_CompVis/stable-diffusion-v1-4/benchmark.json with huggingface_hub

Browse files
cuda_inference_diffusers_text-to-image_CompVis/stable-diffusion-v1-4/benchmark.json CHANGED
@@ -3,7 +3,7 @@
3
  "name": "cuda_inference_diffusers_text-to-image_CompVis/stable-diffusion-v1-4",
4
  "backend": {
5
  "name": "pytorch",
6
- "version": "2.4.1+cu124",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "text-to-image",
9
  "library": "diffusers",
@@ -44,9 +44,9 @@
44
  "duration": 1,
45
  "warmup_runs": 1,
46
  "input_shapes": {
47
- "batch_size": 1,
48
- "num_choices": 2,
49
- "sequence_length": 2
50
  },
51
  "new_tokens": null,
52
  "memory": true,
@@ -73,10 +73,10 @@
73
  "environment": {
74
  "cpu": " AMD EPYC 7R32",
75
  "cpu_count": 16,
76
- "cpu_ram_mb": 66697.261056,
77
  "system": "Linux",
78
  "machine": "x86_64",
79
- "platform": "Linux-5.10.225-213.878.amzn2.x86_64-x86_64-with-glibc2.35",
80
  "processor": "x86_64",
81
  "python_version": "3.10.12",
82
  "gpu": [
@@ -86,15 +86,15 @@
86
  "gpu_vram_mb": 24146608128,
87
  "optimum_benchmark_version": "0.5.0.dev0",
88
  "optimum_benchmark_commit": null,
89
- "transformers_version": "4.45.2",
90
  "transformers_commit": null,
91
- "accelerate_version": "1.0.1",
92
  "accelerate_commit": null,
93
- "diffusers_version": "0.30.3",
94
  "diffusers_commit": null,
95
  "optimum_version": null,
96
  "optimum_commit": null,
97
- "timm_version": "1.0.9",
98
  "timm_commit": null,
99
  "peft_version": "0.13.2",
100
  "peft_commit": null
@@ -106,24 +106,24 @@
106
  "load": {
107
  "memory": {
108
  "unit": "MB",
109
- "max_ram": 4283.82208,
110
- "max_global_vram": 6219.628544,
111
  "max_process_vram": 0.0,
112
- "max_reserved": 5626.658816,
113
- "max_allocated": 5501.886464
114
  },
115
  "latency": {
116
  "unit": "s",
117
  "values": [
118
- 49.230109375
119
  ],
120
  "count": 1,
121
- "total": 49.230109375,
122
- "mean": 49.230109375,
123
- "p50": 49.230109375,
124
- "p90": 49.230109375,
125
- "p95": 49.230109375,
126
- "p99": 49.230109375,
127
  "stdev": 0,
128
  "stdev_": 0
129
  },
@@ -134,42 +134,41 @@
134
  "call": {
135
  "memory": {
136
  "unit": "MB",
137
- "max_ram": 1506.287616,
138
- "max_global_vram": 8585.216,
139
  "max_process_vram": 0.0,
140
- "max_reserved": 7933.526016,
141
- "max_allocated": 6587.545088
142
  },
143
  "latency": {
144
  "unit": "s",
145
  "values": [
146
- 0.5973391723632813,
147
- 0.5979462280273438
148
  ],
149
- "count": 2,
150
- "total": 1.195285400390625,
151
- "mean": 0.5976427001953125,
152
- "p50": 0.5976427001953125,
153
- "p90": 0.5978855224609375,
154
- "p95": 0.5979158752441406,
155
- "p99": 0.5979401574707032,
156
- "stdev": 0.00030352783203124734,
157
- "stdev_": 0.05078750764161479
158
  },
159
  "throughput": {
160
  "unit": "images/s",
161
- "value": 1.6732405493670301
162
  },
163
  "energy": {
164
  "unit": "kWh",
165
- "cpu": 7.084126890971337e-06,
166
- "ram": 3.872776825554681e-06,
167
- "gpu": 3.882919773000055e-05,
168
- "total": 4.978610144652657e-05
169
  },
170
  "efficiency": {
171
  "unit": "images/kWh",
172
- "value": 20085.927014672227
173
  }
174
  }
175
  }
 
3
  "name": "cuda_inference_diffusers_text-to-image_CompVis/stable-diffusion-v1-4",
4
  "backend": {
5
  "name": "pytorch",
6
+ "version": "2.5.1+cu124",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "text-to-image",
9
  "library": "diffusers",
 
44
  "duration": 1,
45
  "warmup_runs": 1,
46
  "input_shapes": {
47
+ "batch_size": 2,
48
+ "sequence_length": 16,
49
+ "num_choices": 2
50
  },
51
  "new_tokens": null,
52
  "memory": true,
 
73
  "environment": {
74
  "cpu": " AMD EPYC 7R32",
75
  "cpu_count": 16,
76
+ "cpu_ram_mb": 66697.248768,
77
  "system": "Linux",
78
  "machine": "x86_64",
79
+ "platform": "Linux-5.10.227-219.884.amzn2.x86_64-x86_64-with-glibc2.35",
80
  "processor": "x86_64",
81
  "python_version": "3.10.12",
82
  "gpu": [
 
86
  "gpu_vram_mb": 24146608128,
87
  "optimum_benchmark_version": "0.5.0.dev0",
88
  "optimum_benchmark_commit": null,
89
+ "transformers_version": "4.46.3",
90
  "transformers_commit": null,
91
+ "accelerate_version": "1.1.1",
92
  "accelerate_commit": null,
93
+ "diffusers_version": "0.31.0",
94
  "diffusers_commit": null,
95
  "optimum_version": null,
96
  "optimum_commit": null,
97
+ "timm_version": "1.0.11",
98
  "timm_commit": null,
99
  "peft_version": "0.13.2",
100
  "peft_commit": null
 
106
  "load": {
107
  "memory": {
108
  "unit": "MB",
109
+ "max_ram": 4251.5456,
110
+ "max_global_vram": 6194.46272,
111
  "max_process_vram": 0.0,
112
+ "max_reserved": 5601.492992,
113
+ "max_allocated": 5506.998272
114
  },
115
  "latency": {
116
  "unit": "s",
117
  "values": [
118
+ 87.3890625
119
  ],
120
  "count": 1,
121
+ "total": 87.3890625,
122
+ "mean": 87.3890625,
123
+ "p50": 87.3890625,
124
+ "p90": 87.3890625,
125
+ "p95": 87.3890625,
126
+ "p99": 87.3890625,
127
  "stdev": 0,
128
  "stdev_": 0
129
  },
 
134
  "call": {
135
  "memory": {
136
  "unit": "MB",
137
+ "max_ram": 1477.369856,
138
+ "max_global_vram": 9335.996416,
139
  "max_process_vram": 0.0,
140
+ "max_reserved": 8684.306432,
141
+ "max_allocated": 7536.1024
142
  },
143
  "latency": {
144
  "unit": "s",
145
  "values": [
146
+ 1.1839676513671875
 
147
  ],
148
+ "count": 1,
149
+ "total": 1.1839676513671875,
150
+ "mean": 1.1839676513671875,
151
+ "p50": 1.1839676513671875,
152
+ "p90": 1.1839676513671875,
153
+ "p95": 1.1839676513671875,
154
+ "p99": 1.1839676513671875,
155
+ "stdev": 0,
156
+ "stdev_": 0
157
  },
158
  "throughput": {
159
  "unit": "images/s",
160
+ "value": 1.6892353415994927
161
  },
162
  "energy": {
163
  "unit": "kWh",
164
+ "cpu": 1.4046913342361242e-05,
165
+ "ram": 7.678935756169944e-06,
166
+ "gpu": 7.709533945400061e-05,
167
+ "total": 9.882118855253179e-05
168
  },
169
  "efficiency": {
170
  "unit": "images/kWh",
171
+ "value": 20238.574634597026
172
  }
173
  }
174
  }