IlyasMoutawwakil HF staff commited on
Commit
3f484cd
·
verified ·
1 Parent(s): 70b8e89

Upload cuda_training_transformers_fill-mask_google-bert/bert-base-uncased/benchmark.json with huggingface_hub

Browse files
cuda_training_transformers_fill-mask_google-bert/bert-base-uncased/benchmark.json CHANGED
@@ -3,7 +3,7 @@
3
  "name": "cuda_training_transformers_fill-mask_google-bert/bert-base-uncased",
4
  "backend": {
5
  "name": "pytorch",
6
- "version": "2.4.0+cu124",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "fill-mask",
9
  "library": "transformers",
@@ -110,7 +110,7 @@
110
  "overall": {
111
  "memory": {
112
  "unit": "MB",
113
- "max_ram": 1282.228224,
114
  "max_global_vram": 3169.32096,
115
  "max_process_vram": 0.0,
116
  "max_reserved": 2520.776704,
@@ -119,24 +119,24 @@
119
  "latency": {
120
  "unit": "s",
121
  "count": 5,
122
- "total": 0.5040353355407715,
123
- "mean": 0.1008070671081543,
124
- "stdev": 0.11561416168642699,
125
- "p50": 0.042821632385253904,
126
- "p90": 0.21672100372314457,
127
- "p95": 0.27437753067016596,
128
- "p99": 0.3205027522277832,
129
  "values": [
130
- 0.3320340576171875,
131
- 0.04375142288208008,
132
- 0.042821632385253904,
133
- 0.04278681564331055,
134
- 0.042641407012939454
135
  ]
136
  },
137
  "throughput": {
138
  "unit": "samples/s",
139
- "value": 99.19939431697937
140
  },
141
  "energy": null,
142
  "efficiency": null
@@ -144,7 +144,7 @@
144
  "warmup": {
145
  "memory": {
146
  "unit": "MB",
147
- "max_ram": 1282.228224,
148
  "max_global_vram": 3169.32096,
149
  "max_process_vram": 0.0,
150
  "max_reserved": 2520.776704,
@@ -153,21 +153,21 @@
153
  "latency": {
154
  "unit": "s",
155
  "count": 2,
156
- "total": 0.3757854804992676,
157
- "mean": 0.1878927402496338,
158
- "stdev": 0.1441413173675537,
159
- "p50": 0.1878927402496338,
160
- "p90": 0.30320579414367677,
161
- "p95": 0.31761992588043214,
162
- "p99": 0.3291512312698364,
163
  "values": [
164
- 0.3320340576171875,
165
- 0.04375142288208008
166
  ]
167
  },
168
  "throughput": {
169
  "unit": "samples/s",
170
- "value": 21.288741622936634
171
  },
172
  "energy": null,
173
  "efficiency": null
@@ -175,7 +175,7 @@
175
  "train": {
176
  "memory": {
177
  "unit": "MB",
178
- "max_ram": 1282.228224,
179
  "max_global_vram": 3169.32096,
180
  "max_process_vram": 0.0,
181
  "max_reserved": 2520.776704,
@@ -184,22 +184,22 @@
184
  "latency": {
185
  "unit": "s",
186
  "count": 3,
187
- "total": 0.1282498550415039,
188
- "mean": 0.0427499516805013,
189
- "stdev": 7.805771378482985e-05,
190
- "p50": 0.04278681564331055,
191
- "p90": 0.04281466903686523,
192
- "p95": 0.042818150711059565,
193
- "p99": 0.04282093605041504,
194
  "values": [
195
- 0.042821632385253904,
196
- 0.04278681564331055,
197
- 0.042641407012939454
198
  ]
199
  },
200
  "throughput": {
201
  "unit": "samples/s",
202
- "value": 140.35103582904546
203
  },
204
  "energy": null,
205
  "efficiency": null
 
3
  "name": "cuda_training_transformers_fill-mask_google-bert/bert-base-uncased",
4
  "backend": {
5
  "name": "pytorch",
6
+ "version": "2.4.1+cu124",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "fill-mask",
9
  "library": "transformers",
 
110
  "overall": {
111
  "memory": {
112
  "unit": "MB",
113
+ "max_ram": 1280.892928,
114
  "max_global_vram": 3169.32096,
115
  "max_process_vram": 0.0,
116
  "max_reserved": 2520.776704,
 
119
  "latency": {
120
  "unit": "s",
121
  "count": 5,
122
+ "total": 0.5081374588012695,
123
+ "mean": 0.1016274917602539,
124
+ "stdev": 0.11492488009987316,
125
+ "p50": 0.044593151092529294,
126
+ "p90": 0.21703618011474612,
127
+ "p95": 0.27425402145385736,
128
+ "p99": 0.3200282945251465,
129
  "values": [
130
+ 0.33147186279296875,
131
+ 0.044593151092529294,
132
+ 0.043551742553710936,
133
+ 0.04538265609741211,
134
+ 0.043138046264648434
135
  ]
136
  },
137
  "throughput": {
138
  "unit": "samples/s",
139
+ "value": 98.39857135892592
140
  },
141
  "energy": null,
142
  "efficiency": null
 
144
  "warmup": {
145
  "memory": {
146
  "unit": "MB",
147
+ "max_ram": 1280.892928,
148
  "max_global_vram": 3169.32096,
149
  "max_process_vram": 0.0,
150
  "max_reserved": 2520.776704,
 
153
  "latency": {
154
  "unit": "s",
155
  "count": 2,
156
+ "total": 0.37606501388549807,
157
+ "mean": 0.18803250694274903,
158
+ "stdev": 0.14343935585021972,
159
+ "p50": 0.18803250694274903,
160
+ "p90": 0.3027839916229248,
161
+ "p95": 0.31712792720794675,
162
+ "p99": 0.32860307567596436,
163
  "values": [
164
+ 0.33147186279296875,
165
+ 0.044593151092529294
166
  ]
167
  },
168
  "throughput": {
169
  "unit": "samples/s",
170
+ "value": 21.272917460053304
171
  },
172
  "energy": null,
173
  "efficiency": null
 
175
  "train": {
176
  "memory": {
177
  "unit": "MB",
178
+ "max_ram": 1280.892928,
179
  "max_global_vram": 3169.32096,
180
  "max_process_vram": 0.0,
181
  "max_reserved": 2520.776704,
 
184
  "latency": {
185
  "unit": "s",
186
  "count": 3,
187
+ "total": 0.13207244491577147,
188
+ "mean": 0.04402414830525716,
189
+ "stdev": 0.0009753439464808654,
190
+ "p50": 0.043551742553710936,
191
+ "p90": 0.045016473388671874,
192
+ "p95": 0.04519956474304199,
193
+ "p99": 0.04534603782653808,
194
  "values": [
195
+ 0.043551742553710936,
196
+ 0.04538265609741211,
197
+ 0.043138046264648434
198
  ]
199
  },
200
  "throughput": {
201
  "unit": "samples/s",
202
+ "value": 136.28883762604235
203
  },
204
  "energy": null,
205
  "efficiency": null