File size: 3,202 Bytes
5fc1a40
 
 
 
4a848fa
5fc1a40
 
ddde096
45b1b0f
c117528
 
5fc1a40
 
 
 
 
c117528
 
5fc1a40
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bc65d8d
5fc1a40
 
 
 
 
 
 
 
 
 
 
 
 
 
1df4296
aff875f
 
5fc1a40
 
 
 
 
d8ac7b3
5fc1a40
 
4a848fa
5fc1a40
5bd605e
5fc1a40
 
 
 
 
0de3bfb
5fc1a40
761947c
5fc1a40
4a848fa
5fc1a40
4a848fa
5fc1a40
 
 
761947c
5fc1a40
fb1c4b9
b79c675
5fc1a40
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
{
    "name": "cuda_inference_transformers_text-generation_openai-community/gpt2",
    "backend": {
        "name": "pytorch",
        "version": "2.6.0.dev20240917+cu124",
        "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
        "task": "text-generation",
        "library": "transformers",
        "model_type": "gpt2",
        "model": "openai-community/gpt2",
        "processor": "openai-community/gpt2",
        "device": "cuda",
        "device_ids": "0",
        "seed": 42,
        "inter_op_num_threads": null,
        "intra_op_num_threads": null,
        "model_kwargs": {},
        "processor_kwargs": {},
        "no_weights": true,
        "device_map": null,
        "torch_dtype": null,
        "eval_mode": true,
        "to_bettertransformer": false,
        "low_cpu_mem_usage": null,
        "attn_implementation": null,
        "cache_implementation": null,
        "autocast_enabled": false,
        "autocast_dtype": null,
        "torch_compile": false,
        "torch_compile_target": "forward",
        "torch_compile_config": {},
        "quantization_scheme": null,
        "quantization_config": {},
        "deepspeed_inference": false,
        "deepspeed_inference_config": {},
        "peft_type": null,
        "peft_config": {}
    },
    "scenario": {
        "name": "inference",
        "_target_": "optimum_benchmark.scenarios.inference.scenario.InferenceScenario",
        "iterations": 1,
        "duration": 1,
        "warmup_runs": 1,
        "input_shapes": {
            "batch_size": 1,
            "num_choices": 2,
            "sequence_length": 2
        },
        "new_tokens": null,
        "memory": true,
        "latency": true,
        "energy": true,
        "forward_kwargs": {},
        "generate_kwargs": {
            "max_new_tokens": 2,
            "min_new_tokens": 2
        },
        "call_kwargs": {
            "num_inference_steps": 2
        }
    },
    "launcher": {
        "name": "process",
        "_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
        "device_isolation": true,
        "device_isolation_action": "error",
        "numactl": false,
        "numactl_kwargs": {},
        "start_method": "spawn"
    },
    "environment": {
        "cpu": " AMD EPYC 7R32",
        "cpu_count": 16,
        "cpu_ram_mb": 66697.261056,
        "system": "Linux",
        "machine": "x86_64",
        "platform": "Linux-5.10.224-212.876.amzn2.x86_64-x86_64-with-glibc2.35",
        "processor": "x86_64",
        "python_version": "3.10.12",
        "gpu": [
            "NVIDIA A10G"
        ],
        "gpu_count": 1,
        "gpu_vram_mb": 24146608128,
        "optimum_benchmark_version": "0.4.0",
        "optimum_benchmark_commit": null,
        "transformers_version": "4.44.2",
        "transformers_commit": null,
        "accelerate_version": "0.34.2",
        "accelerate_commit": null,
        "diffusers_version": "0.30.3",
        "diffusers_commit": null,
        "optimum_version": null,
        "optimum_commit": null,
        "timm_version": "1.0.9",
        "timm_commit": null,
        "peft_version": "0.12.0",
        "peft_commit": null
    }
}