IlyasMoutawwakil HF staff commited on
Commit
33dfddf
·
verified ·
1 Parent(s): 8ae218e

Upload cuda_training_transformers_token-classification_microsoft/deberta-v3-base/benchmark.json with huggingface_hub

Browse files
cuda_training_transformers_token-classification_microsoft/deberta-v3-base/benchmark.json CHANGED
@@ -3,7 +3,7 @@
3
  "name": "cuda_training_transformers_token-classification_microsoft/deberta-v3-base",
4
  "backend": {
5
  "name": "pytorch",
6
- "version": "2.4.0+cu124",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "token-classification",
9
  "library": "transformers",
@@ -110,7 +110,7 @@
110
  "overall": {
111
  "memory": {
112
  "unit": "MB",
113
- "max_ram": 1358.06976,
114
  "max_global_vram": 4597.481472,
115
  "max_process_vram": 0.0,
116
  "max_reserved": 3948.937216,
@@ -119,24 +119,24 @@
119
  "latency": {
120
  "unit": "s",
121
  "count": 5,
122
- "total": 0.7560836791992188,
123
- "mean": 0.15121673583984377,
124
- "stdev": 0.1303638076136866,
125
- "p50": 0.08636109161376954,
126
- "p90": 0.2821431091308594,
127
- "p95": 0.34704094543457026,
128
- "p99": 0.39895921447753907,
129
  "values": [
130
- 0.41193878173828125,
131
- 0.08744960021972656,
132
- 0.08636109161376954,
133
- 0.08507698822021484,
134
- 0.08525721740722657
135
  ]
136
  },
137
  "throughput": {
138
  "unit": "samples/s",
139
- "value": 66.13024639409736
140
  },
141
  "energy": null,
142
  "efficiency": null
@@ -144,7 +144,7 @@
144
  "warmup": {
145
  "memory": {
146
  "unit": "MB",
147
- "max_ram": 1358.06976,
148
  "max_global_vram": 4597.481472,
149
  "max_process_vram": 0.0,
150
  "max_reserved": 3948.937216,
@@ -153,21 +153,21 @@
153
  "latency": {
154
  "unit": "s",
155
  "count": 2,
156
- "total": 0.49938838195800783,
157
- "mean": 0.24969419097900392,
158
- "stdev": 0.16224459075927733,
159
- "p50": 0.24969419097900392,
160
- "p90": 0.3794898635864258,
161
- "p95": 0.3957143226623535,
162
- "p99": 0.4086938899230957,
163
  "values": [
164
- 0.41193878173828125,
165
- 0.08744960021972656
166
  ]
167
  },
168
  "throughput": {
169
  "unit": "samples/s",
170
- "value": 16.01959574756927
171
  },
172
  "energy": null,
173
  "efficiency": null
@@ -175,7 +175,7 @@
175
  "train": {
176
  "memory": {
177
  "unit": "MB",
178
- "max_ram": 1358.06976,
179
  "max_global_vram": 4597.481472,
180
  "max_process_vram": 0.0,
181
  "max_reserved": 3948.937216,
@@ -184,22 +184,22 @@
184
  "latency": {
185
  "unit": "s",
186
  "count": 3,
187
- "total": 0.25669529724121093,
188
- "mean": 0.08556509908040365,
189
- "stdev": 0.0005676405698699433,
190
- "p50": 0.08525721740722657,
191
- "p90": 0.08614031677246094,
192
- "p95": 0.08625070419311524,
193
- "p99": 0.08633901412963868,
194
  "values": [
195
- 0.08636109161376954,
196
- 0.08507698822021484,
197
- 0.08525721740722657
198
  ]
199
  },
200
  "throughput": {
201
  "unit": "samples/s",
202
- "value": 70.12204817716545
203
  },
204
  "energy": null,
205
  "efficiency": null
 
3
  "name": "cuda_training_transformers_token-classification_microsoft/deberta-v3-base",
4
  "backend": {
5
  "name": "pytorch",
6
+ "version": "2.4.1+cu124",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "token-classification",
9
  "library": "transformers",
 
110
  "overall": {
111
  "memory": {
112
  "unit": "MB",
113
+ "max_ram": 1357.651968,
114
  "max_global_vram": 4597.481472,
115
  "max_process_vram": 0.0,
116
  "max_reserved": 3948.937216,
 
119
  "latency": {
120
  "unit": "s",
121
  "count": 5,
122
+ "total": 0.7563120727539061,
123
+ "mean": 0.1512624145507812,
124
+ "stdev": 0.13025976035996578,
125
+ "p50": 0.08672767639160156,
126
+ "p90": 0.2818945098876954,
127
+ "p95": 0.34683577423095696,
128
+ "p99": 0.3987887857055664,
129
  "values": [
130
+ 0.41177703857421877,
131
+ 0.08707071685791015,
132
+ 0.08599552154541015,
133
+ 0.08474111938476563,
134
+ 0.08672767639160156
135
  ]
136
  },
137
  "throughput": {
138
  "unit": "samples/s",
139
+ "value": 66.11027616938404
140
  },
141
  "energy": null,
142
  "efficiency": null
 
144
  "warmup": {
145
  "memory": {
146
  "unit": "MB",
147
+ "max_ram": 1357.651968,
148
  "max_global_vram": 4597.481472,
149
  "max_process_vram": 0.0,
150
  "max_reserved": 3948.937216,
 
153
  "latency": {
154
  "unit": "s",
155
  "count": 2,
156
+ "total": 0.4988477554321289,
157
+ "mean": 0.24942387771606445,
158
+ "stdev": 0.16235316085815432,
159
+ "p50": 0.24942387771606445,
160
+ "p90": 0.3793064064025879,
161
+ "p95": 0.3955417224884033,
162
+ "p99": 0.40852997535705565,
163
  "values": [
164
+ 0.41177703857421877,
165
+ 0.08707071685791015
166
  ]
167
  },
168
  "throughput": {
169
  "unit": "samples/s",
170
+ "value": 16.036956993161105
171
  },
172
  "energy": null,
173
  "efficiency": null
 
175
  "train": {
176
  "memory": {
177
  "unit": "MB",
178
+ "max_ram": 1357.651968,
179
  "max_global_vram": 4597.481472,
180
  "max_process_vram": 0.0,
181
  "max_reserved": 3948.937216,
 
184
  "latency": {
185
  "unit": "s",
186
  "count": 3,
187
+ "total": 0.2574643173217773,
188
+ "mean": 0.0858214391072591,
189
+ "stdev": 0.0008202969815079962,
190
+ "p50": 0.08599552154541015,
191
+ "p90": 0.08658124542236328,
192
+ "p95": 0.08665446090698242,
193
+ "p99": 0.08671303329467774,
194
  "values": [
195
+ 0.08599552154541015,
196
+ 0.08474111938476563,
197
+ 0.08672767639160156
198
  ]
199
  },
200
  "throughput": {
201
  "unit": "samples/s",
202
+ "value": 69.91260065566178
203
  },
204
  "energy": null,
205
  "efficiency": null