IlyasMoutawwakil HF staff commited on
Commit
c5b4de4
·
verified ·
1 Parent(s): 60a93e2

Upload cuda_training_transformers_fill-mask_google-bert/bert-base-uncased/benchmark.json with huggingface_hub

Browse files
cuda_training_transformers_fill-mask_google-bert/bert-base-uncased/benchmark.json CHANGED
@@ -3,7 +3,7 @@
3
  "name": "cuda_training_transformers_fill-mask_google-bert/bert-base-uncased",
4
  "backend": {
5
  "name": "pytorch",
6
- "version": "2.3.0+cu121",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "fill-mask",
9
  "library": "transformers",
@@ -76,10 +76,10 @@
76
  "environment": {
77
  "cpu": " AMD EPYC 7R32",
78
  "cpu_count": 16,
79
- "cpu_ram_mb": 66697.29792,
80
  "system": "Linux",
81
  "machine": "x86_64",
82
- "platform": "Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35",
83
  "processor": "x86_64",
84
  "python_version": "3.10.12",
85
  "gpu": [
@@ -89,15 +89,15 @@
89
  "gpu_vram_mb": 24146608128,
90
  "optimum_benchmark_version": "0.2.1",
91
  "optimum_benchmark_commit": null,
92
- "transformers_version": "4.41.1",
93
  "transformers_commit": null,
94
- "accelerate_version": "0.30.1",
95
  "accelerate_commit": null,
96
- "diffusers_version": "0.27.2",
97
  "diffusers_commit": null,
98
  "optimum_version": null,
99
  "optimum_commit": null,
100
- "timm_version": "1.0.3",
101
  "timm_commit": null,
102
  "peft_version": null,
103
  "peft_commit": null
@@ -107,33 +107,33 @@
107
  "overall": {
108
  "memory": {
109
  "unit": "MB",
110
- "max_ram": 1091.817472,
111
- "max_global_vram": 3072.851968,
112
  "max_process_vram": 0.0,
113
- "max_reserved": 2426.404864,
114
  "max_allocated": 2211.86048
115
  },
116
  "latency": {
117
  "unit": "s",
118
  "count": 5,
119
- "total": 0.9614978790283203,
120
- "mean": 0.19229957580566406,
121
- "stdev": 0.24877770459224804,
122
- "p50": 0.06784095764160156,
123
- "p90": 0.4417623626708985,
124
- "p95": 0.5658068405151366,
125
- "p99": 0.6650424227905273,
126
  "values": [
127
- 0.689851318359375,
128
- 0.0696289291381836,
129
- 0.06744268798828125,
130
- 0.06784095764160156,
131
- 0.06673398590087891
132
  ]
133
  },
134
  "throughput": {
135
  "unit": "samples/s",
136
- "value": 52.00219479478153
137
  },
138
  "energy": null,
139
  "efficiency": null
@@ -141,30 +141,30 @@
141
  "warmup": {
142
  "memory": {
143
  "unit": "MB",
144
- "max_ram": 1091.817472,
145
- "max_global_vram": 3072.851968,
146
  "max_process_vram": 0.0,
147
- "max_reserved": 2426.404864,
148
  "max_allocated": 2211.86048
149
  },
150
  "latency": {
151
  "unit": "s",
152
  "count": 2,
153
- "total": 0.7594802474975586,
154
- "mean": 0.3797401237487793,
155
- "stdev": 0.3101111946105957,
156
- "p50": 0.3797401237487793,
157
- "p90": 0.6278290794372559,
158
- "p95": 0.6588401988983154,
159
- "p99": 0.683649094467163,
160
  "values": [
161
- 0.689851318359375,
162
- 0.0696289291381836
163
  ]
164
  },
165
  "throughput": {
166
  "unit": "samples/s",
167
- "value": 10.533519504107598
168
  },
169
  "energy": null,
170
  "efficiency": null
@@ -172,31 +172,31 @@
172
  "train": {
173
  "memory": {
174
  "unit": "MB",
175
- "max_ram": 1091.817472,
176
- "max_global_vram": 3072.851968,
177
  "max_process_vram": 0.0,
178
- "max_reserved": 2426.404864,
179
  "max_allocated": 2211.86048
180
  },
181
  "latency": {
182
  "unit": "s",
183
  "count": 3,
184
- "total": 0.20201763153076174,
185
- "mean": 0.06733921051025392,
186
- "stdev": 0.00045780439784825653,
187
- "p50": 0.06744268798828125,
188
- "p90": 0.0677613037109375,
189
- "p95": 0.06780113067626953,
190
- "p99": 0.06783299224853515,
191
  "values": [
192
- 0.06744268798828125,
193
- 0.06784095764160156,
194
- 0.06673398590087891
195
  ]
196
  },
197
  "throughput": {
198
  "unit": "samples/s",
199
- "value": 89.10113371594049
200
  },
201
  "energy": null,
202
  "efficiency": null
 
3
  "name": "cuda_training_transformers_fill-mask_google-bert/bert-base-uncased",
4
  "backend": {
5
  "name": "pytorch",
6
+ "version": "2.3.1+cu121",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "fill-mask",
9
  "library": "transformers",
 
76
  "environment": {
77
  "cpu": " AMD EPYC 7R32",
78
  "cpu_count": 16,
79
+ "cpu_ram_mb": 66697.293824,
80
  "system": "Linux",
81
  "machine": "x86_64",
82
+ "platform": "Linux-5.10.219-208.866.amzn2.x86_64-x86_64-with-glibc2.35",
83
  "processor": "x86_64",
84
  "python_version": "3.10.12",
85
  "gpu": [
 
89
  "gpu_vram_mb": 24146608128,
90
  "optimum_benchmark_version": "0.2.1",
91
  "optimum_benchmark_commit": null,
92
+ "transformers_version": "4.42.3",
93
  "transformers_commit": null,
94
+ "accelerate_version": "0.31.0",
95
  "accelerate_commit": null,
96
+ "diffusers_version": "0.29.2",
97
  "diffusers_commit": null,
98
  "optimum_version": null,
99
  "optimum_commit": null,
100
+ "timm_version": "1.0.7",
101
  "timm_commit": null,
102
  "peft_version": null,
103
  "peft_commit": null
 
107
  "overall": {
108
  "memory": {
109
  "unit": "MB",
110
+ "max_ram": 1654.919168,
111
+ "max_global_vram": 3169.32096,
112
  "max_process_vram": 0.0,
113
+ "max_reserved": 2520.776704,
114
  "max_allocated": 2211.86048
115
  },
116
  "latency": {
117
  "unit": "s",
118
  "count": 5,
119
+ "total": 0.8833986511230468,
120
+ "mean": 0.17667973022460937,
121
+ "stdev": 0.2680315316413682,
122
+ "p50": 0.04294041442871094,
123
+ "p90": 0.44497039031982427,
124
+ "p95": 0.5788561363220214,
125
+ "p99": 0.6859647331237793,
126
  "values": [
127
+ 0.7127418823242188,
128
+ 0.04331315231323242,
129
+ 0.04294041442871094,
130
+ 0.04181708908081055,
131
+ 0.04258611297607422
132
  ]
133
  },
134
  "throughput": {
135
  "unit": "samples/s",
136
+ "value": 56.59958834715902
137
  },
138
  "energy": null,
139
  "efficiency": null
 
141
  "warmup": {
142
  "memory": {
143
  "unit": "MB",
144
+ "max_ram": 1654.919168,
145
+ "max_global_vram": 3169.32096,
146
  "max_process_vram": 0.0,
147
+ "max_reserved": 2520.776704,
148
  "max_allocated": 2211.86048
149
  },
150
  "latency": {
151
  "unit": "s",
152
  "count": 2,
153
+ "total": 0.7560550346374512,
154
+ "mean": 0.3780275173187256,
155
+ "stdev": 0.3347143650054932,
156
+ "p50": 0.3780275173187256,
157
+ "p90": 0.6457990093231202,
158
+ "p95": 0.6792704458236694,
159
+ "p99": 0.7060475950241089,
160
  "values": [
161
+ 0.7127418823242188,
162
+ 0.04331315231323242
163
  ]
164
  },
165
  "throughput": {
166
  "unit": "samples/s",
167
+ "value": 10.581240297984678
168
  },
169
  "energy": null,
170
  "efficiency": null
 
172
  "train": {
173
  "memory": {
174
  "unit": "MB",
175
+ "max_ram": 1654.919168,
176
+ "max_global_vram": 3169.32096,
177
  "max_process_vram": 0.0,
178
+ "max_reserved": 2520.776704,
179
  "max_allocated": 2211.86048
180
  },
181
  "latency": {
182
  "unit": "s",
183
  "count": 3,
184
+ "total": 0.12734361648559572,
185
+ "mean": 0.04244787216186524,
186
+ "stdev": 0.0004688978931805624,
187
+ "p50": 0.04258611297607422,
188
+ "p90": 0.042869554138183594,
189
+ "p95": 0.042904984283447266,
190
+ "p99": 0.0429333283996582,
191
  "values": [
192
+ 0.04294041442871094,
193
+ 0.04181708908081055,
194
+ 0.04258611297607422
195
  ]
196
  },
197
  "throughput": {
198
  "unit": "samples/s",
199
+ "value": 141.34984145071803
200
  },
201
  "energy": null,
202
  "efficiency": null