IlyasMoutawwakil HF staff commited on
Commit
e61a4d4
·
verified ·
1 Parent(s): 0a85e99

Upload cuda_training_transformers_multiple-choice_FacebookAI/roberta-base/benchmark.json with huggingface_hub

Browse files
cuda_training_transformers_multiple-choice_FacebookAI/roberta-base/benchmark.json CHANGED
@@ -3,7 +3,7 @@
3
  "name": "cuda_training_transformers_multiple-choice_FacebookAI/roberta-base",
4
  "backend": {
5
  "name": "pytorch",
6
- "version": "2.4.1+cu124",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "multiple-choice",
9
  "library": "transformers",
@@ -79,10 +79,10 @@
79
  "environment": {
80
  "cpu": " AMD EPYC 7R32",
81
  "cpu_count": 16,
82
- "cpu_ram_mb": 66697.261056,
83
  "system": "Linux",
84
  "machine": "x86_64",
85
- "platform": "Linux-5.10.225-213.878.amzn2.x86_64-x86_64-with-glibc2.35",
86
  "processor": "x86_64",
87
  "python_version": "3.10.12",
88
  "gpu": [
@@ -92,15 +92,15 @@
92
  "gpu_vram_mb": 24146608128,
93
  "optimum_benchmark_version": "0.5.0.dev0",
94
  "optimum_benchmark_commit": null,
95
- "transformers_version": "4.45.2",
96
  "transformers_commit": null,
97
- "accelerate_version": "1.0.1",
98
  "accelerate_commit": null,
99
- "diffusers_version": "0.30.3",
100
  "diffusers_commit": null,
101
  "optimum_version": null,
102
  "optimum_commit": null,
103
- "timm_version": "1.0.9",
104
  "timm_commit": null,
105
  "peft_version": "0.13.2",
106
  "peft_commit": null
@@ -112,7 +112,7 @@
112
  "overall": {
113
  "memory": {
114
  "unit": "MB",
115
- "max_ram": 1337.192448,
116
  "max_global_vram": 3384.27904,
117
  "max_process_vram": 0.0,
118
  "max_reserved": 2728.394752,
@@ -121,42 +121,42 @@
121
  "latency": {
122
  "unit": "s",
123
  "values": [
124
- 0.35576217651367187,
125
- 0.04703129577636719,
126
- 0.04605235290527344,
127
- 0.046074878692626955,
128
- 0.04605542373657227
129
  ],
130
  "count": 5,
131
- "total": 0.5409761276245116,
132
- "mean": 0.10819522552490232,
133
- "p50": 0.046074878692626955,
134
- "p90": 0.23226982421875003,
135
- "p95": 0.2940160003662109,
136
- "p99": 0.34341294128417965,
137
- "stdev": 0.12378404630519566,
138
- "stdev_": 114.40804869592456
139
  },
140
  "throughput": {
141
  "unit": "samples/s",
142
- "value": 92.42552017878451
143
  },
144
  "energy": {
145
  "unit": "kWh",
146
- "cpu": 1.055118112430383e-05,
147
- "ram": 5.7624615054751955e-06,
148
- "gpu": 1.7052791419999887e-05,
149
- "total": 3.336643404977891e-05
150
  },
151
  "efficiency": {
152
  "unit": "samples/kWh",
153
- "value": 299702.389086024
154
  }
155
  },
156
  "warmup": {
157
  "memory": {
158
  "unit": "MB",
159
- "max_ram": 1337.192448,
160
  "max_global_vram": 3384.27904,
161
  "max_process_vram": 0.0,
162
  "max_reserved": 2728.394752,
@@ -165,22 +165,22 @@
165
  "latency": {
166
  "unit": "s",
167
  "values": [
168
- 0.35576217651367187,
169
- 0.04703129577636719
170
  ],
171
  "count": 2,
172
- "total": 0.40279347229003903,
173
- "mean": 0.20139673614501952,
174
- "p50": 0.20139673614501952,
175
- "p90": 0.3248890884399414,
176
- "p95": 0.34032563247680664,
177
- "p99": 0.3526748677062988,
178
- "stdev": 0.15436544036865232,
179
- "stdev_": 76.64743894235635
180
  },
181
  "throughput": {
182
  "unit": "samples/s",
183
- "value": 19.86129505653818
184
  },
185
  "energy": null,
186
  "efficiency": null
@@ -188,7 +188,7 @@
188
  "train": {
189
  "memory": {
190
  "unit": "MB",
191
- "max_ram": 1337.192448,
192
  "max_global_vram": 3384.27904,
193
  "max_process_vram": 0.0,
194
  "max_reserved": 2728.394752,
@@ -197,23 +197,23 @@
197
  "latency": {
198
  "unit": "s",
199
  "values": [
200
- 0.04605235290527344,
201
- 0.046074878692626955,
202
- 0.04605542373657227
203
  ],
204
  "count": 3,
205
- "total": 0.13818265533447266,
206
- "mean": 0.04606088511149089,
207
- "p50": 0.04605542373657227,
208
- "p90": 0.046070987701416016,
209
- "p95": 0.046072933197021486,
210
- "p99": 0.04607448959350586,
211
- "stdev": 9.974057548636764e-06,
212
- "stdev_": 0.021654072700718726
213
  },
214
  "throughput": {
215
  "unit": "samples/s",
216
- "value": 130.2623687208123
217
  },
218
  "energy": null,
219
  "efficiency": null
 
3
  "name": "cuda_training_transformers_multiple-choice_FacebookAI/roberta-base",
4
  "backend": {
5
  "name": "pytorch",
6
+ "version": "2.5.1+cu124",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "multiple-choice",
9
  "library": "transformers",
 
79
  "environment": {
80
  "cpu": " AMD EPYC 7R32",
81
  "cpu_count": 16,
82
+ "cpu_ram_mb": 66697.248768,
83
  "system": "Linux",
84
  "machine": "x86_64",
85
+ "platform": "Linux-5.10.227-219.884.amzn2.x86_64-x86_64-with-glibc2.35",
86
  "processor": "x86_64",
87
  "python_version": "3.10.12",
88
  "gpu": [
 
92
  "gpu_vram_mb": 24146608128,
93
  "optimum_benchmark_version": "0.5.0.dev0",
94
  "optimum_benchmark_commit": null,
95
+ "transformers_version": "4.46.3",
96
  "transformers_commit": null,
97
+ "accelerate_version": "1.1.1",
98
  "accelerate_commit": null,
99
+ "diffusers_version": "0.31.0",
100
  "diffusers_commit": null,
101
  "optimum_version": null,
102
  "optimum_commit": null,
103
+ "timm_version": "1.0.11",
104
  "timm_commit": null,
105
  "peft_version": "0.13.2",
106
  "peft_commit": null
 
112
  "overall": {
113
  "memory": {
114
  "unit": "MB",
115
+ "max_ram": 1345.72032,
116
  "max_global_vram": 3384.27904,
117
  "max_process_vram": 0.0,
118
  "max_reserved": 2728.394752,
 
121
  "latency": {
122
  "unit": "s",
123
  "values": [
124
+ 0.35756851196289063,
125
+ 0.047387649536132816,
126
+ 0.04659695816040039,
127
+ 0.04753299331665039,
128
+ 0.046721023559570314
129
  ],
130
  "count": 5,
131
+ "total": 0.5458071365356446,
132
+ "mean": 0.10916142730712892,
133
+ "p50": 0.047387649536132816,
134
+ "p90": 0.23355430450439457,
135
+ "p95": 0.2955614082336425,
136
+ "p99": 0.34516709121704103,
137
+ "stdev": 0.12420407402590627,
138
+ "stdev_": 113.78018507989496
139
  },
140
  "throughput": {
141
  "unit": "samples/s",
142
+ "value": 91.60745005527184
143
  },
144
  "energy": {
145
  "unit": "kWh",
146
+ "cpu": 9.039191384721572e-06,
147
+ "ram": 4.909473005839129e-06,
148
+ "gpu": 1.5694734778000288e-05,
149
+ "total": 2.9643399168560992e-05
150
  },
151
  "efficiency": {
152
  "unit": "samples/kWh",
153
+ "value": 337343.2292004399
154
  }
155
  },
156
  "warmup": {
157
  "memory": {
158
  "unit": "MB",
159
+ "max_ram": 1345.72032,
160
  "max_global_vram": 3384.27904,
161
  "max_process_vram": 0.0,
162
  "max_reserved": 2728.394752,
 
165
  "latency": {
166
  "unit": "s",
167
  "values": [
168
+ 0.35756851196289063,
169
+ 0.047387649536132816
170
  ],
171
  "count": 2,
172
+ "total": 0.40495616149902347,
173
+ "mean": 0.20247808074951174,
174
+ "p50": 0.20247808074951174,
175
+ "p90": 0.32655042572021487,
176
+ "p95": 0.34205946884155275,
177
+ "p99": 0.35446670333862307,
178
+ "stdev": 0.15509043121337893,
179
+ "stdev_": 76.59615828008727
180
  },
181
  "throughput": {
182
  "unit": "samples/s",
183
+ "value": 19.755224788743686
184
  },
185
  "energy": null,
186
  "efficiency": null
 
188
  "train": {
189
  "memory": {
190
  "unit": "MB",
191
+ "max_ram": 1345.72032,
192
  "max_global_vram": 3384.27904,
193
  "max_process_vram": 0.0,
194
  "max_reserved": 2728.394752,
 
197
  "latency": {
198
  "unit": "s",
199
  "values": [
200
+ 0.04659695816040039,
201
+ 0.04753299331665039,
202
+ 0.046721023559570314
203
  ],
204
  "count": 3,
205
+ "total": 0.14085097503662108,
206
+ "mean": 0.04695032501220703,
207
+ "p50": 0.046721023559570314,
208
+ "p90": 0.04737059936523438,
209
+ "p95": 0.04745179634094238,
210
+ "p99": 0.04751675392150879,
211
+ "stdev": 0.0004151102829931439,
212
+ "stdev_": 0.8841478368578188
213
  },
214
  "throughput": {
215
  "unit": "samples/s",
216
+ "value": 127.79464249587213
217
  },
218
  "energy": null,
219
  "efficiency": null