IlyasMoutawwakil HF staff commited on
Commit
d5552e9
·
verified ·
1 Parent(s): adff6ec

Upload cuda_training_transformers_text-classification_FacebookAI/roberta-base/benchmark.json with huggingface_hub

Browse files
cuda_training_transformers_text-classification_FacebookAI/roberta-base/benchmark.json CHANGED
@@ -3,7 +3,7 @@
3
  "name": "cuda_training_transformers_text-classification_FacebookAI/roberta-base",
4
  "backend": {
5
  "name": "pytorch",
6
- "version": "2.4.1+cu124",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "text-classification",
9
  "library": "transformers",
@@ -79,10 +79,10 @@
79
  "environment": {
80
  "cpu": " AMD EPYC 7R32",
81
  "cpu_count": 16,
82
- "cpu_ram_mb": 66697.261056,
83
  "system": "Linux",
84
  "machine": "x86_64",
85
- "platform": "Linux-5.10.225-213.878.amzn2.x86_64-x86_64-with-glibc2.35",
86
  "processor": "x86_64",
87
  "python_version": "3.10.12",
88
  "gpu": [
@@ -92,15 +92,15 @@
92
  "gpu_vram_mb": 24146608128,
93
  "optimum_benchmark_version": "0.5.0.dev0",
94
  "optimum_benchmark_commit": null,
95
- "transformers_version": "4.45.2",
96
  "transformers_commit": null,
97
- "accelerate_version": "1.0.1",
98
  "accelerate_commit": null,
99
- "diffusers_version": "0.30.3",
100
  "diffusers_commit": null,
101
  "optimum_version": null,
102
  "optimum_commit": null,
103
- "timm_version": "1.0.9",
104
  "timm_commit": null,
105
  "peft_version": "0.13.2",
106
  "peft_commit": null
@@ -112,7 +112,7 @@
112
  "overall": {
113
  "memory": {
114
  "unit": "MB",
115
- "max_ram": 1323.085824,
116
  "max_global_vram": 3384.27904,
117
  "max_process_vram": 0.0,
118
  "max_reserved": 2728.394752,
@@ -121,42 +121,42 @@
121
  "latency": {
122
  "unit": "s",
123
  "values": [
124
- 0.3465441284179688,
125
- 0.04596223831176758,
126
- 0.045655040740966796,
127
- 0.04518291091918945,
128
- 0.045499393463134766
129
  ],
130
  "count": 5,
131
- "total": 0.5288437118530275,
132
- "mean": 0.10576874237060549,
133
- "p50": 0.045655040740966796,
134
- "p90": 0.22631137237548832,
135
- "p95": 0.2864277503967285,
136
- "p99": 0.33452085281372074,
137
- "stdev": 0.12038795535102645,
138
- "stdev_": 113.82186518697213
139
  },
140
  "throughput": {
141
  "unit": "samples/s",
142
- "value": 94.54589111933254
143
  },
144
  "energy": {
145
  "unit": "kWh",
146
- "cpu": 1.0490307170139897e-05,
147
- "ram": 5.714536570311505e-06,
148
- "gpu": 1.8112236711999644e-05,
149
- "total": 3.431708045245105e-05
150
  },
151
  "efficiency": {
152
  "unit": "samples/kWh",
153
- "value": 291400.08031440113
154
  }
155
  },
156
  "warmup": {
157
  "memory": {
158
  "unit": "MB",
159
- "max_ram": 1323.085824,
160
  "max_global_vram": 3384.27904,
161
  "max_process_vram": 0.0,
162
  "max_reserved": 2728.394752,
@@ -165,22 +165,22 @@
165
  "latency": {
166
  "unit": "s",
167
  "values": [
168
- 0.3465441284179688,
169
- 0.04596223831176758
170
  ],
171
  "count": 2,
172
- "total": 0.3925063667297364,
173
- "mean": 0.1962531833648682,
174
- "p50": 0.1962531833648682,
175
- "p90": 0.31648593940734865,
176
- "p95": 0.3315150339126587,
177
- "p99": 0.3435383095169068,
178
- "stdev": 0.1502909450531006,
179
- "stdev_": 76.58013107164945
180
  },
181
  "throughput": {
182
  "unit": "samples/s",
183
- "value": 20.38183499201293
184
  },
185
  "energy": null,
186
  "efficiency": null
@@ -188,7 +188,7 @@
188
  "train": {
189
  "memory": {
190
  "unit": "MB",
191
- "max_ram": 1323.085824,
192
  "max_global_vram": 3384.27904,
193
  "max_process_vram": 0.0,
194
  "max_reserved": 2728.394752,
@@ -197,23 +197,23 @@
197
  "latency": {
198
  "unit": "s",
199
  "values": [
200
- 0.045655040740966796,
201
- 0.04518291091918945,
202
- 0.045499393463134766
203
  ],
204
  "count": 3,
205
- "total": 0.13633734512329101,
206
- "mean": 0.04544578170776367,
207
- "p50": 0.045499393463134766,
208
- "p90": 0.04562391128540039,
209
- "p95": 0.04563947601318359,
210
- "p99": 0.045651927795410156,
211
- "stdev": 0.00019643880707632385,
212
- "stdev_": 0.4322487141700227
213
  },
214
  "throughput": {
215
  "unit": "samples/s",
216
- "value": 132.02545482840705
217
  },
218
  "energy": null,
219
  "efficiency": null
 
3
  "name": "cuda_training_transformers_text-classification_FacebookAI/roberta-base",
4
  "backend": {
5
  "name": "pytorch",
6
+ "version": "2.5.1+cu124",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "text-classification",
9
  "library": "transformers",
 
79
  "environment": {
80
  "cpu": " AMD EPYC 7R32",
81
  "cpu_count": 16,
82
+ "cpu_ram_mb": 66697.248768,
83
  "system": "Linux",
84
  "machine": "x86_64",
85
+ "platform": "Linux-5.10.227-219.884.amzn2.x86_64-x86_64-with-glibc2.35",
86
  "processor": "x86_64",
87
  "python_version": "3.10.12",
88
  "gpu": [
 
92
  "gpu_vram_mb": 24146608128,
93
  "optimum_benchmark_version": "0.5.0.dev0",
94
  "optimum_benchmark_commit": null,
95
+ "transformers_version": "4.46.3",
96
  "transformers_commit": null,
97
+ "accelerate_version": "1.1.1",
98
  "accelerate_commit": null,
99
+ "diffusers_version": "0.31.0",
100
  "diffusers_commit": null,
101
  "optimum_version": null,
102
  "optimum_commit": null,
103
+ "timm_version": "1.0.11",
104
  "timm_commit": null,
105
  "peft_version": "0.13.2",
106
  "peft_commit": null
 
112
  "overall": {
113
  "memory": {
114
  "unit": "MB",
115
+ "max_ram": 1332.40832,
116
  "max_global_vram": 3384.27904,
117
  "max_process_vram": 0.0,
118
  "max_reserved": 2728.394752,
 
121
  "latency": {
122
  "unit": "s",
123
  "values": [
124
+ 0.35358001708984377,
125
+ 0.046873600006103515,
126
+ 0.048377857208251954,
127
+ 0.04718991851806641,
128
+ 0.04760780715942383
129
  ],
130
  "count": 5,
131
+ "total": 0.5436291999816895,
132
+ "mean": 0.10872583999633789,
133
+ "p50": 0.04760780715942383,
134
+ "p90": 0.23149915313720706,
135
+ "p95": 0.29253958511352535,
136
+ "p99": 0.3413719306945801,
137
+ "stdev": 0.12242812604019773,
138
+ "stdev_": 112.6026030650316
139
  },
140
  "throughput": {
141
  "unit": "samples/s",
142
+ "value": 91.97445612134908
143
  },
144
  "energy": {
145
  "unit": "kWh",
146
+ "cpu": 9.134229152777431e-06,
147
+ "ram": 4.986785702676617e-06,
148
+ "gpu": 1.595223498400064e-05,
149
+ "total": 3.007324983945469e-05
150
  },
151
  "efficiency": {
152
  "unit": "samples/kWh",
153
+ "value": 332521.4286246002
154
  }
155
  },
156
  "warmup": {
157
  "memory": {
158
  "unit": "MB",
159
+ "max_ram": 1332.40832,
160
  "max_global_vram": 3384.27904,
161
  "max_process_vram": 0.0,
162
  "max_reserved": 2728.394752,
 
165
  "latency": {
166
  "unit": "s",
167
  "values": [
168
+ 0.35358001708984377,
169
+ 0.046873600006103515
170
  ],
171
  "count": 2,
172
+ "total": 0.4004536170959473,
173
+ "mean": 0.20022680854797364,
174
+ "p50": 0.20022680854797364,
175
+ "p90": 0.32290937538146974,
176
+ "p95": 0.3382446962356567,
177
+ "p99": 0.35051295291900636,
178
+ "stdev": 0.15335320854187012,
179
+ "stdev_": 76.58974822301442
180
  },
181
  "throughput": {
182
  "unit": "samples/s",
183
+ "value": 19.977344837125614
184
  },
185
  "energy": null,
186
  "efficiency": null
 
188
  "train": {
189
  "memory": {
190
  "unit": "MB",
191
+ "max_ram": 1332.40832,
192
  "max_global_vram": 3384.27904,
193
  "max_process_vram": 0.0,
194
  "max_reserved": 2728.394752,
 
197
  "latency": {
198
  "unit": "s",
199
  "values": [
200
+ 0.048377857208251954,
201
+ 0.04718991851806641,
202
+ 0.04760780715942383
203
  ],
204
  "count": 3,
205
+ "total": 0.1431755828857422,
206
+ "mean": 0.0477251942952474,
207
+ "p50": 0.04760780715942383,
208
+ "p90": 0.04822384719848633,
209
+ "p95": 0.04830085220336914,
210
+ "p99": 0.04836245620727539,
211
+ "stdev": 0.0004920260072073315,
212
+ "stdev_": 1.030956530346339
213
  },
214
  "throughput": {
215
  "unit": "samples/s",
216
+ "value": 125.719760570938
217
  },
218
  "energy": null,
219
  "efficiency": null