IlyasMoutawwakil HF staff commited on
Commit
eee2c7f
·
verified ·
1 Parent(s): d908c33

Upload cuda_training_transformers_fill-mask_google-bert/bert-base-uncased/benchmark.json with huggingface_hub

Browse files
cuda_training_transformers_fill-mask_google-bert/bert-base-uncased/benchmark.json CHANGED
@@ -3,7 +3,7 @@
3
  "name": "cuda_training_transformers_fill-mask_google-bert/bert-base-uncased",
4
  "backend": {
5
  "name": "pytorch",
6
- "version": "2.4.1+cu124",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "fill-mask",
9
  "library": "transformers",
@@ -82,7 +82,7 @@
82
  "cpu_ram_mb": 66697.261056,
83
  "system": "Linux",
84
  "machine": "x86_64",
85
- "platform": "Linux-5.10.223-212.873.amzn2.x86_64-x86_64-with-glibc2.35",
86
  "processor": "x86_64",
87
  "python_version": "3.10.12",
88
  "gpu": [
@@ -94,9 +94,9 @@
94
  "optimum_benchmark_commit": null,
95
  "transformers_version": "4.44.2",
96
  "transformers_commit": null,
97
- "accelerate_version": "0.34.0",
98
  "accelerate_commit": null,
99
- "diffusers_version": "0.30.2",
100
  "diffusers_commit": null,
101
  "optimum_version": null,
102
  "optimum_commit": null,
@@ -110,8 +110,8 @@
110
  "overall": {
111
  "memory": {
112
  "unit": "MB",
113
- "max_ram": 1292.582912,
114
- "max_global_vram": 3169.32096,
115
  "max_process_vram": 0.0,
116
  "max_reserved": 2520.776704,
117
  "max_allocated": 2211.86048
@@ -119,24 +119,24 @@
119
  "latency": {
120
  "unit": "s",
121
  "count": 5,
122
- "total": 0.5003356170654296,
123
- "mean": 0.10006712341308592,
124
- "stdev": 0.11484821405786928,
125
- "p50": 0.04248473739624024,
126
- "p90": 0.21513953399658203,
127
- "p95": 0.27245117568969723,
128
- "p99": 0.31830048904418945,
129
  "values": [
130
- 0.3297628173828125,
131
- 0.04320460891723633,
132
- 0.042434558868408204,
133
- 0.04244889450073242,
134
- 0.04248473739624024
135
  ]
136
  },
137
  "throughput": {
138
  "unit": "samples/s",
139
- "value": 99.9329216122174
140
  },
141
  "energy": null,
142
  "efficiency": null
@@ -144,8 +144,8 @@
144
  "warmup": {
145
  "memory": {
146
  "unit": "MB",
147
- "max_ram": 1292.582912,
148
- "max_global_vram": 3169.32096,
149
  "max_process_vram": 0.0,
150
  "max_reserved": 2520.776704,
151
  "max_allocated": 2211.86048
@@ -153,21 +153,21 @@
153
  "latency": {
154
  "unit": "s",
155
  "count": 2,
156
- "total": 0.3729674263000488,
157
- "mean": 0.1864837131500244,
158
- "stdev": 0.14327910423278808,
159
- "p50": 0.1864837131500244,
160
- "p90": 0.3011069965362549,
161
- "p95": 0.3154349069595337,
162
- "p99": 0.32689723529815673,
163
  "values": [
164
- 0.3297628173828125,
165
- 0.04320460891723633
166
  ]
167
  },
168
  "throughput": {
169
  "unit": "samples/s",
170
- "value": 21.449594350269276
171
  },
172
  "energy": null,
173
  "efficiency": null
@@ -175,8 +175,8 @@
175
  "train": {
176
  "memory": {
177
  "unit": "MB",
178
- "max_ram": 1292.582912,
179
- "max_global_vram": 3169.32096,
180
  "max_process_vram": 0.0,
181
  "max_reserved": 2520.776704,
182
  "max_allocated": 2211.86048
@@ -184,22 +184,22 @@
184
  "latency": {
185
  "unit": "s",
186
  "count": 3,
187
- "total": 0.12736819076538086,
188
- "mean": 0.04245606358846029,
189
- "stdev": 2.110320714938691e-05,
190
- "p50": 0.04244889450073242,
191
- "p90": 0.042477568817138676,
192
- "p95": 0.04248115310668946,
193
- "p99": 0.04248402053833008,
194
  "values": [
195
- 0.042434558868408204,
196
- 0.04244889450073242,
197
- 0.04248473739624024
198
  ]
199
  },
200
  "throughput": {
201
  "unit": "samples/s",
202
- "value": 141.32256956650173
203
  },
204
  "energy": null,
205
  "efficiency": null
 
3
  "name": "cuda_training_transformers_fill-mask_google-bert/bert-base-uncased",
4
  "backend": {
5
  "name": "pytorch",
6
+ "version": "2.6.0.dev20240917+cu124",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "fill-mask",
9
  "library": "transformers",
 
82
  "cpu_ram_mb": 66697.261056,
83
  "system": "Linux",
84
  "machine": "x86_64",
85
+ "platform": "Linux-5.10.224-212.876.amzn2.x86_64-x86_64-with-glibc2.35",
86
  "processor": "x86_64",
87
  "python_version": "3.10.12",
88
  "gpu": [
 
94
  "optimum_benchmark_commit": null,
95
  "transformers_version": "4.44.2",
96
  "transformers_commit": null,
97
+ "accelerate_version": "0.34.2",
98
  "accelerate_commit": null,
99
+ "diffusers_version": "0.30.3",
100
  "diffusers_commit": null,
101
  "optimum_version": null,
102
  "optimum_commit": null,
 
110
  "overall": {
111
  "memory": {
112
  "unit": "MB",
113
+ "max_ram": 1297.772544,
114
+ "max_global_vram": 3176.660992,
115
  "max_process_vram": 0.0,
116
  "max_reserved": 2520.776704,
117
  "max_allocated": 2211.86048
 
119
  "latency": {
120
  "unit": "s",
121
  "count": 5,
122
+ "total": 0.5019750442504882,
123
+ "mean": 0.10039500885009764,
124
+ "stdev": 0.11377259029865176,
125
+ "p50": 0.04337254333496094,
126
+ "p90": 0.21454110870361331,
127
+ "p95": 0.2712395790100097,
128
+ "p99": 0.3165983552551269,
129
  "values": [
130
+ 0.32793804931640624,
131
+ 0.04444569778442383,
132
+ 0.04314112091064453,
133
+ 0.04337254333496094,
134
+ 0.04307763290405273
135
  ]
136
  },
137
  "throughput": {
138
  "unit": "samples/s",
139
+ "value": 99.60654533066734
140
  },
141
  "energy": null,
142
  "efficiency": null
 
144
  "warmup": {
145
  "memory": {
146
  "unit": "MB",
147
+ "max_ram": 1297.772544,
148
+ "max_global_vram": 3176.660992,
149
  "max_process_vram": 0.0,
150
  "max_reserved": 2520.776704,
151
  "max_allocated": 2211.86048
 
153
  "latency": {
154
  "unit": "s",
155
  "count": 2,
156
+ "total": 0.37238374710083005,
157
+ "mean": 0.18619187355041503,
158
+ "stdev": 0.14174617576599122,
159
+ "p50": 0.18619187355041503,
160
+ "p90": 0.299588814163208,
161
+ "p95": 0.3137634317398071,
162
+ "p99": 0.3251031258010864,
163
  "values": [
164
+ 0.32793804931640624,
165
+ 0.04444569778442383
166
  ]
167
  },
168
  "throughput": {
169
  "unit": "samples/s",
170
+ "value": 21.483214727504865
171
  },
172
  "energy": null,
173
  "efficiency": null
 
175
  "train": {
176
  "memory": {
177
  "unit": "MB",
178
+ "max_ram": 1297.772544,
179
+ "max_global_vram": 3176.660992,
180
  "max_process_vram": 0.0,
181
  "max_reserved": 2520.776704,
182
  "max_allocated": 2211.86048
 
184
  "latency": {
185
  "unit": "s",
186
  "count": 3,
187
+ "total": 0.1295912971496582,
188
+ "mean": 0.04319709904988606,
189
+ "stdev": 0.00012673648414281995,
190
+ "p50": 0.04314112091064453,
191
+ "p90": 0.04332625885009766,
192
+ "p95": 0.0433494010925293,
193
+ "p99": 0.04336791488647461,
194
  "values": [
195
+ 0.04314112091064453,
196
+ 0.04337254333496094,
197
+ 0.04307763290405273
198
  ]
199
  },
200
  "throughput": {
201
  "unit": "samples/s",
202
+ "value": 138.8982161295349
203
  },
204
  "energy": null,
205
  "efficiency": null