IlyasMoutawwakil HF staff commited on
Commit
10a245f
·
verified ·
1 Parent(s): 623b6eb

Upload cuda_training_transformers_text-generation_openai-community/gpt2/benchmark.json with huggingface_hub

Browse files
cuda_training_transformers_text-generation_openai-community/gpt2/benchmark.json CHANGED
@@ -3,7 +3,7 @@
3
  "name": "cuda_training_transformers_text-generation_openai-community/gpt2",
4
  "backend": {
5
  "name": "pytorch",
6
- "version": "2.4.1+cu124",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "text-generation",
9
  "library": "transformers",
@@ -79,10 +79,10 @@
79
  "environment": {
80
  "cpu": " AMD EPYC 7R32",
81
  "cpu_count": 16,
82
- "cpu_ram_mb": 66697.261056,
83
  "system": "Linux",
84
  "machine": "x86_64",
85
- "platform": "Linux-5.10.225-213.878.amzn2.x86_64-x86_64-with-glibc2.35",
86
  "processor": "x86_64",
87
  "python_version": "3.10.12",
88
  "gpu": [
@@ -92,15 +92,15 @@
92
  "gpu_vram_mb": 24146608128,
93
  "optimum_benchmark_version": "0.5.0.dev0",
94
  "optimum_benchmark_commit": null,
95
- "transformers_version": "4.45.2",
96
  "transformers_commit": null,
97
- "accelerate_version": "1.0.1",
98
  "accelerate_commit": null,
99
- "diffusers_version": "0.30.3",
100
  "diffusers_commit": null,
101
  "optimum_version": null,
102
  "optimum_commit": null,
103
- "timm_version": "1.0.9",
104
  "timm_commit": null,
105
  "peft_version": "0.13.2",
106
  "peft_commit": null
@@ -112,7 +112,7 @@
112
  "overall": {
113
  "memory": {
114
  "unit": "MB",
115
- "max_ram": 1350.053888,
116
  "max_global_vram": 3566.731264,
117
  "max_process_vram": 0.0,
118
  "max_reserved": 2910.846976,
@@ -121,42 +121,42 @@
121
  "latency": {
122
  "unit": "s",
123
  "values": [
124
- 0.35609701538085936,
125
- 0.044365825653076174,
126
- 0.04398284912109375,
127
- 0.043514881134033206,
128
- 0.04412108612060547
129
  ],
130
  "count": 5,
131
- "total": 0.532081657409668,
132
- "mean": 0.1064163314819336,
133
- "p50": 0.04412108612060547,
134
- "p90": 0.2314045394897461,
135
- "p95": 0.29375077743530265,
136
- "p99": 0.34362776779174803,
137
- "stdev": 0.12484064959463413,
138
- "stdev_": 117.3134309895173
139
  },
140
  "throughput": {
141
  "unit": "samples/s",
142
- "value": 93.97053873913808
143
  },
144
  "energy": {
145
  "unit": "kWh",
146
- "cpu": 1.046066729236246e-05,
147
- "ram": 5.713810620613953e-06,
148
- "gpu": 1.5273067774000063e-05,
149
- "total": 3.144754568697648e-05
150
  },
151
  "efficiency": {
152
  "unit": "samples/kWh",
153
- "value": 317989.8393196817
154
  }
155
  },
156
  "warmup": {
157
  "memory": {
158
  "unit": "MB",
159
- "max_ram": 1350.053888,
160
  "max_global_vram": 3566.731264,
161
  "max_process_vram": 0.0,
162
  "max_reserved": 2910.846976,
@@ -165,22 +165,22 @@
165
  "latency": {
166
  "unit": "s",
167
  "values": [
168
- 0.35609701538085936,
169
- 0.044365825653076174
170
  ],
171
  "count": 2,
172
- "total": 0.40046284103393553,
173
- "mean": 0.20023142051696777,
174
- "p50": 0.20023142051696777,
175
- "p90": 0.32492389640808106,
176
- "p95": 0.3405104558944702,
177
- "p99": 0.35297970348358154,
178
- "stdev": 0.1558655948638916,
179
- "stdev_": 77.84272541315933
180
  },
181
  "throughput": {
182
  "unit": "samples/s",
183
- "value": 19.976884695082294
184
  },
185
  "energy": null,
186
  "efficiency": null
@@ -188,7 +188,7 @@
188
  "train": {
189
  "memory": {
190
  "unit": "MB",
191
- "max_ram": 1350.053888,
192
  "max_global_vram": 3566.731264,
193
  "max_process_vram": 0.0,
194
  "max_reserved": 2910.846976,
@@ -197,23 +197,23 @@
197
  "latency": {
198
  "unit": "s",
199
  "values": [
200
- 0.04398284912109375,
201
- 0.043514881134033206,
202
- 0.04412108612060547
203
  ],
204
  "count": 3,
205
- "total": 0.13161881637573242,
206
- "mean": 0.04387293879191081,
207
- "p50": 0.04398284912109375,
208
- "p90": 0.04409343872070312,
209
- "p95": 0.0441072624206543,
210
- "p99": 0.044118321380615236,
211
- "stdev": 0.0002593984474151631,
212
- "stdev_": 0.5912493089316151
213
  },
214
  "throughput": {
215
  "unit": "samples/s",
216
- "value": 136.75856154651456
217
  },
218
  "energy": null,
219
  "efficiency": null
 
3
  "name": "cuda_training_transformers_text-generation_openai-community/gpt2",
4
  "backend": {
5
  "name": "pytorch",
6
+ "version": "2.5.1+cu124",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "text-generation",
9
  "library": "transformers",
 
79
  "environment": {
80
  "cpu": " AMD EPYC 7R32",
81
  "cpu_count": 16,
82
+ "cpu_ram_mb": 66697.248768,
83
  "system": "Linux",
84
  "machine": "x86_64",
85
+ "platform": "Linux-5.10.227-219.884.amzn2.x86_64-x86_64-with-glibc2.35",
86
  "processor": "x86_64",
87
  "python_version": "3.10.12",
88
  "gpu": [
 
92
  "gpu_vram_mb": 24146608128,
93
  "optimum_benchmark_version": "0.5.0.dev0",
94
  "optimum_benchmark_commit": null,
95
+ "transformers_version": "4.46.3",
96
  "transformers_commit": null,
97
+ "accelerate_version": "1.1.1",
98
  "accelerate_commit": null,
99
+ "diffusers_version": "0.31.0",
100
  "diffusers_commit": null,
101
  "optimum_version": null,
102
  "optimum_commit": null,
103
+ "timm_version": "1.0.11",
104
  "timm_commit": null,
105
  "peft_version": "0.13.2",
106
  "peft_commit": null
 
112
  "overall": {
113
  "memory": {
114
  "unit": "MB",
115
+ "max_ram": 1359.392768,
116
  "max_global_vram": 3566.731264,
117
  "max_process_vram": 0.0,
118
  "max_reserved": 2910.846976,
 
121
  "latency": {
122
  "unit": "s",
123
  "values": [
124
+ 0.36362753295898437,
125
+ 0.045039615631103515,
126
+ 0.04426649475097656,
127
+ 0.04447641754150391,
128
+ 0.04424300765991211
129
  ],
130
  "count": 5,
131
+ "total": 0.5416530685424804,
132
+ "mean": 0.10833061370849609,
133
+ "p50": 0.04447641754150391,
134
+ "p90": 0.23619236602783206,
135
+ "p95": 0.29990994949340816,
136
+ "p99": 0.3508840162658691,
137
+ "stdev": 0.12764878250214318,
138
+ "stdev_": 117.83260348328666
139
  },
140
  "throughput": {
141
  "unit": "samples/s",
142
+ "value": 92.31000967933893
143
  },
144
  "energy": {
145
  "unit": "kWh",
146
+ "cpu": 9.03662806805588e-06,
147
+ "ram": 4.827364832180661e-06,
148
+ "gpu": 1.4324733682000157e-05,
149
+ "total": 2.8188726582236697e-05
150
  },
151
  "efficiency": {
152
  "unit": "samples/kWh",
153
+ "value": 354751.74697325856
154
  }
155
  },
156
  "warmup": {
157
  "memory": {
158
  "unit": "MB",
159
+ "max_ram": 1359.392768,
160
  "max_global_vram": 3566.731264,
161
  "max_process_vram": 0.0,
162
  "max_reserved": 2910.846976,
 
165
  "latency": {
166
  "unit": "s",
167
  "values": [
168
+ 0.36362753295898437,
169
+ 0.045039615631103515
170
  ],
171
  "count": 2,
172
+ "total": 0.4086671485900879,
173
+ "mean": 0.20433357429504395,
174
+ "p50": 0.20433357429504395,
175
+ "p90": 0.33176874122619626,
176
+ "p95": 0.3476981370925903,
177
+ "p99": 0.36044165378570553,
178
+ "stdev": 0.15929395866394044,
179
+ "stdev_": 77.95779974657061
180
  },
181
  "throughput": {
182
  "unit": "samples/s",
183
+ "value": 19.575833358762026
184
  },
185
  "energy": null,
186
  "efficiency": null
 
188
  "train": {
189
  "memory": {
190
  "unit": "MB",
191
+ "max_ram": 1359.392768,
192
  "max_global_vram": 3566.731264,
193
  "max_process_vram": 0.0,
194
  "max_reserved": 2910.846976,
 
197
  "latency": {
198
  "unit": "s",
199
  "values": [
200
+ 0.04426649475097656,
201
+ 0.04447641754150391,
202
+ 0.04424300765991211
203
  ],
204
  "count": 3,
205
+ "total": 0.13298591995239256,
206
+ "mean": 0.044328639984130856,
207
+ "p50": 0.04426649475097656,
208
+ "p90": 0.04443443298339844,
209
+ "p95": 0.04445542526245117,
210
+ "p99": 0.04447221908569336,
211
+ "stdev": 0.00010493352088830884,
212
+ "stdev_": 0.23671721245198102
213
  },
214
  "throughput": {
215
  "unit": "samples/s",
216
+ "value": 135.35267497825177
217
  },
218
  "energy": null,
219
  "efficiency": null