File size: 3,213 Bytes
8b4f35f
 
 
 
0806ec8
8b4f35f
 
d7d0ae4
18217ba
3350d7e
 
8b4f35f
 
 
 
 
3350d7e
 
8b4f35f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fe044d8
8b4f35f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d9b3021
 
8b4f35f
 
 
 
 
83d9895
8b4f35f
 
0806ec8
8b4f35f
5ac05de
8b4f35f
 
 
 
 
996a842
8b4f35f
f709eca
8b4f35f
333631c
8b4f35f
0806ec8
8b4f35f
 
 
f709eca
8b4f35f
 
5147abd
8b4f35f
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
{
    "name": "cuda_inference_transformers_image-classification_google/vit-base-patch16-224",
    "backend": {
        "name": "pytorch",
        "version": "2.3.1+cu121",
        "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
        "task": "image-classification",
        "library": "transformers",
        "model_type": "vit",
        "model": "google/vit-base-patch16-224",
        "processor": "google/vit-base-patch16-224",
        "device": "cuda",
        "device_ids": "0",
        "seed": 42,
        "inter_op_num_threads": null,
        "intra_op_num_threads": null,
        "model_kwargs": {},
        "processor_kwargs": {},
        "no_weights": true,
        "device_map": null,
        "torch_dtype": null,
        "eval_mode": true,
        "to_bettertransformer": false,
        "low_cpu_mem_usage": null,
        "attn_implementation": null,
        "cache_implementation": null,
        "autocast_enabled": false,
        "autocast_dtype": null,
        "torch_compile": false,
        "torch_compile_target": "forward",
        "torch_compile_config": {},
        "quantization_scheme": null,
        "quantization_config": {},
        "deepspeed_inference": false,
        "deepspeed_inference_config": {},
        "peft_type": null,
        "peft_config": {}
    },
    "scenario": {
        "name": "inference",
        "_target_": "optimum_benchmark.scenarios.inference.scenario.InferenceScenario",
        "iterations": 1,
        "duration": 1,
        "warmup_runs": 1,
        "input_shapes": {
            "batch_size": 1,
            "num_choices": 2,
            "sequence_length": 2
        },
        "new_tokens": null,
        "memory": true,
        "latency": true,
        "energy": true,
        "forward_kwargs": {},
        "generate_kwargs": {
            "max_new_tokens": 2,
            "min_new_tokens": 2
        },
        "call_kwargs": {
            "num_inference_steps": 2
        }
    },
    "launcher": {
        "name": "process",
        "_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
        "device_isolation": true,
        "device_isolation_action": "error",
        "numactl": false,
        "numactl_kwargs": {},
        "start_method": "spawn"
    },
    "environment": {
        "cpu": " AMD EPYC 7R32",
        "cpu_count": 16,
        "cpu_ram_mb": 66697.293824,
        "system": "Linux",
        "machine": "x86_64",
        "platform": "Linux-5.10.219-208.866.amzn2.x86_64-x86_64-with-glibc2.35",
        "processor": "x86_64",
        "python_version": "3.10.12",
        "gpu": [
            "NVIDIA A10G"
        ],
        "gpu_count": 1,
        "gpu_vram_mb": 24146608128,
        "optimum_benchmark_version": "0.3.1",
        "optimum_benchmark_commit": null,
        "transformers_version": "4.43.3",
        "transformers_commit": null,
        "accelerate_version": "0.33.0",
        "accelerate_commit": null,
        "diffusers_version": "0.29.2",
        "diffusers_commit": null,
        "optimum_version": null,
        "optimum_commit": null,
        "timm_version": "1.0.8",
        "timm_commit": null,
        "peft_version": null,
        "peft_commit": null
    }
}