machine
stringclasses
4 values
hardware
stringclasses
2 values
subsets
stringclasses
2 values
backends
stringclasses
1 value
model
stringclasses
47 values
success
bool
2 classes
traceback
stringlengths
0
7.23k
last_updated
stringlengths
26
26
run_id
stringclasses
12 values
run_start_time
stringclasses
12 values
1xT4
cuda
torchao
pytorch
deepseek-ai/DeepSeek-V2-Lite
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7fe7d803e830>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:20:32.454062
bd27598f-6f94-4e3d-9747-85c8ce835edd
2025-02-06T10:09:56.868153
32vCPU-C7i
cpu
unquantized
pytorch
deepseek-ai/DeepSeek-V2-Lite
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 57, in launch raise RuntimeError(f"Isolated process exited with non-zero code {isolated_process.exitcode}") RuntimeError: Isolated process exited with non-zero code -6
2025-02-06T10:20:42.720544
9fd70c10-e9cd-4ae1-ad3b-2f56445ec3b5
2025-02-06T10:09:24.419768
1xA10
cuda
torchao
pytorch
Qwen/Qwen2-0.5B
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f092c6eeb90>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:20:48.348796
fc85a78b-b92a-46d9-b36b-715516040a48
2025-02-06T10:12:52.699176
32vCPU-C7i
cpu
unquantized
pytorch
deepseek-ai/DeepSeek-V2-Lite
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 57, in launch raise RuntimeError(f"Isolated process exited with non-zero code {isolated_process.exitcode}") RuntimeError: Isolated process exited with non-zero code -6
2025-02-06T10:21:01.711256
9fd70c10-e9cd-4ae1-ad3b-2f56445ec3b5
2025-02-06T10:09:24.419768
1xA10
cuda
torchao
pytorch
Qwen/Qwen2-0.5B
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f76283b51b0>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:21:16.433139
fc85a78b-b92a-46d9-b36b-715516040a48
2025-02-06T10:12:52.699176
32vCPU-C7i
cpu
unquantized
pytorch
deepseek-ai/DeepSeek-V2-Lite
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 57, in launch raise RuntimeError(f"Isolated process exited with non-zero code {isolated_process.exitcode}") RuntimeError: Isolated process exited with non-zero code -6
2025-02-06T10:21:19.653086
9fd70c10-e9cd-4ae1-ad3b-2f56445ec3b5
2025-02-06T10:09:24.419768
1xT4
cuda
torchao
pytorch
HuggingFaceH4/zephyr-7b-beta
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7fbc79b2e7a0>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:21:28.002236
bd27598f-6f94-4e3d-9747-85c8ce835edd
2025-02-06T10:09:56.868153
32vCPU-C7i
cpu
unquantized
pytorch
deepseek-ai/DeepSeek-V2-Lite
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 57, in launch raise RuntimeError(f"Isolated process exited with non-zero code {isolated_process.exitcode}") RuntimeError: Isolated process exited with non-zero code -6
2025-02-06T10:21:37.479031
9fd70c10-e9cd-4ae1-ad3b-2f56445ec3b5
2025-02-06T10:09:24.419768
1xA10
cuda
torchao
pytorch
deepseek-ai/DeepSeek-V2-Lite
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f1e346f44c0>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:21:43.711594
fc85a78b-b92a-46d9-b36b-715516040a48
2025-02-06T10:12:52.699176
1xT4
cuda
torchao
pytorch
trl-internal-testing/tiny-random-LlamaForCausalLM
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f710483d120>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:21:56.472495
bd27598f-6f94-4e3d-9747-85c8ce835edd
2025-02-06T10:09:56.868153
32vCPU-C7i
cpu
unquantized
pytorch
mistralai/Mistral-7B-Instruct-v0.2
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 57, in launch raise RuntimeError(f"Isolated process exited with non-zero code {isolated_process.exitcode}") RuntimeError: Isolated process exited with non-zero code -6
2025-02-06T10:21:58.319845
9fd70c10-e9cd-4ae1-ad3b-2f56445ec3b5
2025-02-06T10:09:24.419768
1xA10
cuda
torchao
pytorch
deepseek-ai/DeepSeek-V2-Lite
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7fc5d6b82830>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:22:11.773378
fc85a78b-b92a-46d9-b36b-715516040a48
2025-02-06T10:12:52.699176
32vCPU-C7i
cpu
unquantized
pytorch
mistralai/Mistral-7B-Instruct-v0.2
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 57, in launch raise RuntimeError(f"Isolated process exited with non-zero code {isolated_process.exitcode}") RuntimeError: Isolated process exited with non-zero code -6
2025-02-06T10:22:15.693615
9fd70c10-e9cd-4ae1-ad3b-2f56445ec3b5
2025-02-06T10:09:24.419768
1xT4
cuda
torchao
pytorch
trl-internal-testing/tiny-random-LlamaForCausalLM
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f9e60b23490>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:22:24.194685
bd27598f-6f94-4e3d-9747-85c8ce835edd
2025-02-06T10:09:56.868153
32vCPU-C7i
cpu
unquantized
pytorch
mistralai/Mistral-7B-Instruct-v0.2
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 57, in launch raise RuntimeError(f"Isolated process exited with non-zero code {isolated_process.exitcode}") RuntimeError: Isolated process exited with non-zero code -6
2025-02-06T10:22:32.618839
9fd70c10-e9cd-4ae1-ad3b-2f56445ec3b5
2025-02-06T10:09:24.419768
1xA10
cuda
torchao
pytorch
HuggingFaceH4/zephyr-7b-beta
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7fd786270430>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:22:40.764264
fc85a78b-b92a-46d9-b36b-715516040a48
2025-02-06T10:12:52.699176
1xT4
cuda
torchao
pytorch
openai-community/gpt2-large
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f6c49eea950>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:22:51.212664
bd27598f-6f94-4e3d-9747-85c8ce835edd
2025-02-06T10:09:56.868153
32vCPU-C7i
cpu
unquantized
pytorch
mistralai/Mistral-7B-Instruct-v0.2
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 57, in launch raise RuntimeError(f"Isolated process exited with non-zero code {isolated_process.exitcode}") RuntimeError: Isolated process exited with non-zero code -6
2025-02-06T10:23:08.314528
9fd70c10-e9cd-4ae1-ad3b-2f56445ec3b5
2025-02-06T10:09:24.419768
1xT4
cuda
torchao
pytorch
openai-community/gpt2-large
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7fdc2f824dc0>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:23:18.211473
bd27598f-6f94-4e3d-9747-85c8ce835edd
2025-02-06T10:09:56.868153
32vCPU-C7i
cpu
unquantized
pytorch
mistralai/Mistral-7B-Instruct-v0.2
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 57, in launch raise RuntimeError(f"Isolated process exited with non-zero code {isolated_process.exitcode}") RuntimeError: Isolated process exited with non-zero code -6
2025-02-06T10:23:28.766093
9fd70c10-e9cd-4ae1-ad3b-2f56445ec3b5
2025-02-06T10:09:24.419768
1xA10
cuda
torchao
pytorch
trl-internal-testing/tiny-random-LlamaForCausalLM
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f7e80c71120>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:23:36.549609
fc85a78b-b92a-46d9-b36b-715516040a48
2025-02-06T10:12:52.699176
1xT4
cuda
torchao
pytorch
mistralai/Mistral-7B-Instruct-v0.2
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f3fa8d94550>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:23:45.909608
bd27598f-6f94-4e3d-9747-85c8ce835edd
2025-02-06T10:09:56.868153
32vCPU-C7i
cpu
unquantized
pytorch
meta-llama/Llama-2-7b-hf
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 57, in launch raise RuntimeError(f"Isolated process exited with non-zero code {isolated_process.exitcode}") RuntimeError: Isolated process exited with non-zero code -6
2025-02-06T10:23:48.996367
9fd70c10-e9cd-4ae1-ad3b-2f56445ec3b5
2025-02-06T10:09:24.419768
1xA10
cuda
torchao
pytorch
trl-internal-testing/tiny-random-LlamaForCausalLM
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f1e63e3f490>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:24:01.671297
fc85a78b-b92a-46d9-b36b-715516040a48
2025-02-06T10:12:52.699176
32vCPU-C7i
cpu
unquantized
pytorch
meta-llama/Llama-2-7b-hf
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 57, in launch raise RuntimeError(f"Isolated process exited with non-zero code {isolated_process.exitcode}") RuntimeError: Isolated process exited with non-zero code -6
2025-02-06T10:24:07.681859
9fd70c10-e9cd-4ae1-ad3b-2f56445ec3b5
2025-02-06T10:09:24.419768
1xT4
cuda
torchao
pytorch
mistralai/Mistral-7B-Instruct-v0.2
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f8c6837e830>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:24:22.365183
bd27598f-6f94-4e3d-9747-85c8ce835edd
2025-02-06T10:09:56.868153
32vCPU-C7i
cpu
unquantized
pytorch
meta-llama/Llama-2-7b-hf
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 57, in launch raise RuntimeError(f"Isolated process exited with non-zero code {isolated_process.exitcode}") RuntimeError: Isolated process exited with non-zero code -6
2025-02-06T10:24:32.672938
9fd70c10-e9cd-4ae1-ad3b-2f56445ec3b5
2025-02-06T10:09:24.419768
1xT4
cuda
torchao
pytorch
meta-llama/Llama-2-7b-hf
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7eff71935000>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:24:50.829049
bd27598f-6f94-4e3d-9747-85c8ce835edd
2025-02-06T10:09:56.868153
32vCPU-C7i
cpu
unquantized
pytorch
meta-llama/Llama-2-7b-hf
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 57, in launch raise RuntimeError(f"Isolated process exited with non-zero code {isolated_process.exitcode}") RuntimeError: Isolated process exited with non-zero code -6
2025-02-06T10:24:55.879913
9fd70c10-e9cd-4ae1-ad3b-2f56445ec3b5
2025-02-06T10:09:24.419768
1xA10
cuda
torchao
pytorch
openai-community/gpt2-large
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7fdb2e5dcdc0>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:25:01.972445
fc85a78b-b92a-46d9-b36b-715516040a48
2025-02-06T10:12:52.699176
1xT4
cuda
torchao
pytorch
meta-llama/Llama-2-7b-hf
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7fabb6383490>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:25:17.540430
bd27598f-6f94-4e3d-9747-85c8ce835edd
2025-02-06T10:09:56.868153
1xA10
cuda
torchao
pytorch
mistralai/Mistral-7B-Instruct-v0.2
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7feb1816c550>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:25:28.319605
fc85a78b-b92a-46d9-b36b-715516040a48
2025-02-06T10:12:52.699176
32vCPU-C7i
cpu
unquantized
pytorch
meta-llama/Llama-2-7b-hf
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 57, in launch raise RuntimeError(f"Isolated process exited with non-zero code {isolated_process.exitcode}") RuntimeError: Isolated process exited with non-zero code -6
2025-02-06T10:25:34.181754
9fd70c10-e9cd-4ae1-ad3b-2f56445ec3b5
2025-02-06T10:09:24.419768
1xT4
cuda
torchao
pytorch
peft-internal-testing/tiny-dummy-qwen2
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7ff39cce6ef0>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:25:46.625377
bd27598f-6f94-4e3d-9747-85c8ce835edd
2025-02-06T10:09:56.868153
32vCPU-C7i
cpu
unquantized
pytorch
peft-internal-testing/tiny-dummy-qwen2
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 57, in launch raise RuntimeError(f"Isolated process exited with non-zero code {isolated_process.exitcode}") RuntimeError: Isolated process exited with non-zero code -6
2025-02-06T10:25:51.940672
9fd70c10-e9cd-4ae1-ad3b-2f56445ec3b5
2025-02-06T10:09:24.419768
1xA10
cuda
torchao
pytorch
mistralai/Mistral-7B-Instruct-v0.2
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f7623f2e830>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:25:53.750067
fc85a78b-b92a-46d9-b36b-715516040a48
2025-02-06T10:12:52.699176
32vCPU-C7i
cpu
unquantized
pytorch
peft-internal-testing/tiny-dummy-qwen2
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 57, in launch raise RuntimeError(f"Isolated process exited with non-zero code {isolated_process.exitcode}") RuntimeError: Isolated process exited with non-zero code -6
2025-02-06T10:26:09.767691
9fd70c10-e9cd-4ae1-ad3b-2f56445ec3b5
2025-02-06T10:09:24.419768
1xT4
cuda
torchao
pytorch
peft-internal-testing/tiny-dummy-qwen2
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7fec9d0b9240>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:26:13.711938
bd27598f-6f94-4e3d-9747-85c8ce835edd
2025-02-06T10:09:56.868153
1xA10
cuda
torchao
pytorch
meta-llama/Llama-2-7b-hf
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f2199805000>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:26:20.876917
fc85a78b-b92a-46d9-b36b-715516040a48
2025-02-06T10:12:52.699176
32vCPU-C7i
cpu
unquantized
pytorch
peft-internal-testing/tiny-dummy-qwen2
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 57, in launch raise RuntimeError(f"Isolated process exited with non-zero code {isolated_process.exitcode}") RuntimeError: Isolated process exited with non-zero code -6
2025-02-06T10:26:29.630561
9fd70c10-e9cd-4ae1-ad3b-2f56445ec3b5
2025-02-06T10:09:24.419768
1xT4
cuda
torchao
pytorch
meta-llama/Llama-3.1-70B-Instruct
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7faa98577d90>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:26:42.801253
bd27598f-6f94-4e3d-9747-85c8ce835edd
2025-02-06T10:09:56.868153
1xA10
cuda
torchao
pytorch
meta-llama/Llama-2-7b-hf
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7fe2aaac3490>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:26:46.605318
fc85a78b-b92a-46d9-b36b-715516040a48
2025-02-06T10:12:52.699176
32vCPU-C7i
cpu
unquantized
pytorch
peft-internal-testing/tiny-dummy-qwen2
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 57, in launch raise RuntimeError(f"Isolated process exited with non-zero code {isolated_process.exitcode}") RuntimeError: Isolated process exited with non-zero code -6
2025-02-06T10:26:48.717537
9fd70c10-e9cd-4ae1-ad3b-2f56445ec3b5
2025-02-06T10:09:24.419768
32vCPU-C7i
cpu
unquantized
pytorch
peft-internal-testing/tiny-dummy-qwen2
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 57, in launch raise RuntimeError(f"Isolated process exited with non-zero code {isolated_process.exitcode}") RuntimeError: Isolated process exited with non-zero code -6
2025-02-06T10:27:11.980251
9fd70c10-e9cd-4ae1-ad3b-2f56445ec3b5
2025-02-06T10:09:24.419768
1xA10
cuda
torchao
pytorch
peft-internal-testing/tiny-dummy-qwen2
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f9840046ef0>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:27:14.242146
fc85a78b-b92a-46d9-b36b-715516040a48
2025-02-06T10:12:52.699176
32vCPU-C7i
cpu
unquantized
pytorch
peft-internal-testing/tiny-dummy-qwen2
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 57, in launch raise RuntimeError(f"Isolated process exited with non-zero code {isolated_process.exitcode}") RuntimeError: Isolated process exited with non-zero code -6
2025-02-06T10:27:32.826732
9fd70c10-e9cd-4ae1-ad3b-2f56445ec3b5
2025-02-06T10:09:24.419768
1xA10
cuda
torchao
pytorch
peft-internal-testing/tiny-dummy-qwen2
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7ff101df9240>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:27:40.367458
fc85a78b-b92a-46d9-b36b-715516040a48
2025-02-06T10:12:52.699176
1xT4
cuda
torchao
pytorch
Qwen/Qwen1.5-1.8B
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7fb3bc0feb90>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:27:43.755894
bd27598f-6f94-4e3d-9747-85c8ce835edd
2025-02-06T10:09:56.868153
32vCPU-C7i
cpu
unquantized
pytorch
meta-llama/Llama-3.1-70B-Instruct
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 57, in launch raise RuntimeError(f"Isolated process exited with non-zero code {isolated_process.exitcode}") RuntimeError: Isolated process exited with non-zero code -6
2025-02-06T10:27:53.037098
9fd70c10-e9cd-4ae1-ad3b-2f56445ec3b5
2025-02-06T10:09:24.419768
1xA10
cuda
torchao
pytorch
meta-llama/Llama-3.1-70B-Instruct
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7febc493bd90>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:28:09.173095
fc85a78b-b92a-46d9-b36b-715516040a48
2025-02-06T10:12:52.699176
32vCPU-C7i
cpu
unquantized
pytorch
meta-llama/Llama-3.1-70B-Instruct
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 57, in launch raise RuntimeError(f"Isolated process exited with non-zero code {isolated_process.exitcode}") RuntimeError: Isolated process exited with non-zero code -6
2025-02-06T10:28:28.388604
9fd70c10-e9cd-4ae1-ad3b-2f56445ec3b5
2025-02-06T10:09:24.419768
1xT4
cuda
torchao
pytorch
Qwen/Qwen1.5-0.5B
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f01100e6b90>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:28:39.310956
bd27598f-6f94-4e3d-9747-85c8ce835edd
2025-02-06T10:09:56.868153
32vCPU-C7i
cpu
unquantized
pytorch
meta-llama/Llama-3.1-70B-Instruct
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 57, in launch raise RuntimeError(f"Isolated process exited with non-zero code {isolated_process.exitcode}") RuntimeError: Isolated process exited with non-zero code -6
2025-02-06T10:28:47.365100
9fd70c10-e9cd-4ae1-ad3b-2f56445ec3b5
2025-02-06T10:09:24.419768
1xT4
cuda
torchao
pytorch
Qwen/Qwen1.5-0.5B
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f3e345a91b0>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:29:06.601589
bd27598f-6f94-4e3d-9747-85c8ce835edd
2025-02-06T10:09:56.868153
32vCPU-C7i
cpu
unquantized
pytorch
meta-llama/Llama-3.1-70B-Instruct
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 57, in launch raise RuntimeError(f"Isolated process exited with non-zero code {isolated_process.exitcode}") RuntimeError: Isolated process exited with non-zero code -6
2025-02-06T10:29:24.978016
9fd70c10-e9cd-4ae1-ad3b-2f56445ec3b5
2025-02-06T10:09:24.419768
1xA10
cuda
torchao
pytorch
Qwen/Qwen1.5-1.8B
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f3c683b51b0>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:29:32.185074
fc85a78b-b92a-46d9-b36b-715516040a48
2025-02-06T10:12:52.699176
1xT4
cuda
torchao
pytorch
google/gemma-2-9b-it
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7fb892547d90>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:29:35.399520
bd27598f-6f94-4e3d-9747-85c8ce835edd
2025-02-06T10:09:56.868153
32vCPU-C7i
cpu
unquantized
pytorch
Qwen/Qwen1.5-1.8B
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 57, in launch raise RuntimeError(f"Isolated process exited with non-zero code {isolated_process.exitcode}") RuntimeError: Isolated process exited with non-zero code -6
2025-02-06T10:29:43.609336
9fd70c10-e9cd-4ae1-ad3b-2f56445ec3b5
2025-02-06T10:09:24.419768
1xA10
cuda
torchao
pytorch
Qwen/Qwen1.5-0.5B
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7fcddc302b90>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:29:59.685673
fc85a78b-b92a-46d9-b36b-715516040a48
2025-02-06T10:12:52.699176
1xT4
cuda
torchao
pytorch
google/gemma-2-9b-it
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f520c51a440>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:30:02.602905
bd27598f-6f94-4e3d-9747-85c8ce835edd
2025-02-06T10:09:56.868153
32vCPU-C7i
cpu
unquantized
pytorch
Qwen/Qwen1.5-1.8B
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 57, in launch raise RuntimeError(f"Isolated process exited with non-zero code {isolated_process.exitcode}") RuntimeError: Isolated process exited with non-zero code -6
2025-02-06T10:30:20.558509
9fd70c10-e9cd-4ae1-ad3b-2f56445ec3b5
2025-02-06T10:09:24.419768
1xA10
cuda
torchao
pytorch
Qwen/Qwen1.5-0.5B
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f7fd01e51b0>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:30:25.362147
fc85a78b-b92a-46d9-b36b-715516040a48
2025-02-06T10:12:52.699176
32vCPU-C7i
cpu
unquantized
pytorch
Qwen/Qwen1.5-1.8B
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 57, in launch raise RuntimeError(f"Isolated process exited with non-zero code {isolated_process.exitcode}") RuntimeError: Isolated process exited with non-zero code -6
2025-02-06T10:30:39.559354
9fd70c10-e9cd-4ae1-ad3b-2f56445ec3b5
2025-02-06T10:09:24.419768
1xT4
cuda
torchao
pytorch
Qwen/Qwen2.5-1.5B-Instruct
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f8fe1ddaf80>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:30:44.917529
bd27598f-6f94-4e3d-9747-85c8ce835edd
2025-02-06T10:09:56.868153
1xA10
cuda
torchao
pytorch
google/gemma-2-9b-it
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f18c0813d90>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:30:53.207744
fc85a78b-b92a-46d9-b36b-715516040a48
2025-02-06T10:12:52.699176
32vCPU-C7i
cpu
unquantized
pytorch
Qwen/Qwen1.5-1.8B
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 57, in launch raise RuntimeError(f"Isolated process exited with non-zero code {isolated_process.exitcode}") RuntimeError: Isolated process exited with non-zero code -6
2025-02-06T10:30:58.207231
9fd70c10-e9cd-4ae1-ad3b-2f56445ec3b5
2025-02-06T10:09:24.419768
1xT4
cuda
torchao
pytorch
Qwen/Qwen2.5-1.5B-Instruct
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f87e3ee93f0>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:31:13.761998
bd27598f-6f94-4e3d-9747-85c8ce835edd
2025-02-06T10:09:56.868153
1xA10
cuda
torchao
pytorch
google/gemma-2-9b-it
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f0bc0f26440>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:31:20.312199
fc85a78b-b92a-46d9-b36b-715516040a48
2025-02-06T10:12:52.699176
32vCPU-C7i
cpu
unquantized
pytorch
Qwen/Qwen1.5-0.5B
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 57, in launch raise RuntimeError(f"Isolated process exited with non-zero code {isolated_process.exitcode}") RuntimeError: Isolated process exited with non-zero code -6
2025-02-06T10:31:39.093069
9fd70c10-e9cd-4ae1-ad3b-2f56445ec3b5
2025-02-06T10:09:24.419768
1xT4
cuda
torchao
pytorch
meta-llama/Llama-2-7b-chat-hf
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f6564f111b0>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:31:42.215941
bd27598f-6f94-4e3d-9747-85c8ce835edd
2025-02-06T10:09:56.868153
1xA10
cuda
torchao
pytorch
Qwen/Qwen2.5-1.5B-Instruct
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f55622bef80>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:31:58.166712
fc85a78b-b92a-46d9-b36b-715516040a48
2025-02-06T10:12:52.699176
1xT4
cuda
torchao
pytorch
meta-llama/Llama-2-7b-chat-hf
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7fb4809d7490>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:32:10.232384
bd27598f-6f94-4e3d-9747-85c8ce835edd
2025-02-06T10:09:56.868153
32vCPU-C7i
cpu
unquantized
pytorch
Qwen/Qwen1.5-0.5B
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 57, in launch raise RuntimeError(f"Isolated process exited with non-zero code {isolated_process.exitcode}") RuntimeError: Isolated process exited with non-zero code -6
2025-02-06T10:32:18.818241
9fd70c10-e9cd-4ae1-ad3b-2f56445ec3b5
2025-02-06T10:09:24.419768
1xA10
cuda
torchao
pytorch
Qwen/Qwen2.5-1.5B-Instruct
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f42dc4e13f0>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:32:25.568917
fc85a78b-b92a-46d9-b36b-715516040a48
2025-02-06T10:12:52.699176
32vCPU-C7i
cpu
unquantized
pytorch
Qwen/Qwen1.5-0.5B
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 57, in launch raise RuntimeError(f"Isolated process exited with non-zero code {isolated_process.exitcode}") RuntimeError: Isolated process exited with non-zero code -6
2025-02-06T10:32:37.595622
9fd70c10-e9cd-4ae1-ad3b-2f56445ec3b5
2025-02-06T10:09:24.419768
1xA10
cuda
torchao
pytorch
meta-llama/Llama-2-7b-chat-hf
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7fe0279551b0>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:32:51.640341
fc85a78b-b92a-46d9-b36b-715516040a48
2025-02-06T10:12:52.699176
32vCPU-C7i
cpu
unquantized
pytorch
Qwen/Qwen1.5-0.5B
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 57, in launch raise RuntimeError(f"Isolated process exited with non-zero code {isolated_process.exitcode}") RuntimeError: Isolated process exited with non-zero code -6
2025-02-06T10:32:59.468947
9fd70c10-e9cd-4ae1-ad3b-2f56445ec3b5
2025-02-06T10:09:24.419768
1xT4
cuda
torchao
pytorch
microsoft/git-base
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f6cc9145bd0>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:33:06.251215
bd27598f-6f94-4e3d-9747-85c8ce835edd
2025-02-06T10:09:56.868153
1xA10
cuda
torchao
pytorch
meta-llama/Llama-2-7b-chat-hf
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f2167fe3490>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:33:16.901035
fc85a78b-b92a-46d9-b36b-715516040a48
2025-02-06T10:12:52.699176
32vCPU-C7i
cpu
unquantized
pytorch
Qwen/Qwen1.5-0.5B
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 57, in launch raise RuntimeError(f"Isolated process exited with non-zero code {isolated_process.exitcode}") RuntimeError: Isolated process exited with non-zero code -6
2025-02-06T10:33:18.330549
9fd70c10-e9cd-4ae1-ad3b-2f56445ec3b5
2025-02-06T10:09:24.419768
32vCPU-C7i
cpu
unquantized
pytorch
google/gemma-2-9b-it
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 57, in launch raise RuntimeError(f"Isolated process exited with non-zero code {isolated_process.exitcode}") RuntimeError: Isolated process exited with non-zero code -6
2025-02-06T10:33:39.966891
9fd70c10-e9cd-4ae1-ad3b-2f56445ec3b5
2025-02-06T10:09:24.419768
1xA10
cuda
torchao
pytorch
microsoft/git-base
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f1b18a17640>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:33:42.938387
fc85a78b-b92a-46d9-b36b-715516040a48
2025-02-06T10:12:52.699176
32vCPU-C7i
cpu
unquantized
pytorch
google/gemma-2-9b-it
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 57, in launch raise RuntimeError(f"Isolated process exited with non-zero code {isolated_process.exitcode}") RuntimeError: Isolated process exited with non-zero code -6
2025-02-06T10:34:01.349662
9fd70c10-e9cd-4ae1-ad3b-2f56445ec3b5
2025-02-06T10:09:24.419768
1xT4
cuda
torchao
pytorch
mistralai/Mixtral-8x7B-Instruct-v0.1
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7fe0983367a0>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:34:06.074595
bd27598f-6f94-4e3d-9747-85c8ce835edd
2025-02-06T10:09:56.868153
1xA10
cuda
torchao
pytorch
microsoft/git-base
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7fa804459bd0>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:34:09.338585
fc85a78b-b92a-46d9-b36b-715516040a48
2025-02-06T10:12:52.699176
32vCPU-C7i
cpu
unquantized
pytorch
google/gemma-2-9b-it
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 57, in launch raise RuntimeError(f"Isolated process exited with non-zero code {isolated_process.exitcode}") RuntimeError: Isolated process exited with non-zero code -6
2025-02-06T10:34:20.851951
9fd70c10-e9cd-4ae1-ad3b-2f56445ec3b5
2025-02-06T10:09:24.419768
1xT4
cuda
torchao
pytorch
meta-llama/Llama-2-13b-chat-hf
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7ff5ec1ed1b0>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:34:33.620695
bd27598f-6f94-4e3d-9747-85c8ce835edd
2025-02-06T10:09:56.868153
1xA10
cuda
torchao
pytorch
mistralai/Mixtral-8x7B-Instruct-v0.1
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f4f704cc430>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:34:35.242160
fc85a78b-b92a-46d9-b36b-715516040a48
2025-02-06T10:12:52.699176
32vCPU-C7i
cpu
unquantized
pytorch
google/gemma-2-9b-it
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 57, in launch raise RuntimeError(f"Isolated process exited with non-zero code {isolated_process.exitcode}") RuntimeError: Isolated process exited with non-zero code -6
2025-02-06T10:34:40.946548
9fd70c10-e9cd-4ae1-ad3b-2f56445ec3b5
2025-02-06T10:09:24.419768
32vCPU-C7i
cpu
unquantized
pytorch
google/gemma-2-9b-it
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 57, in launch raise RuntimeError(f"Isolated process exited with non-zero code {isolated_process.exitcode}") RuntimeError: Isolated process exited with non-zero code -6
2025-02-06T10:35:01.441286
9fd70c10-e9cd-4ae1-ad3b-2f56445ec3b5
2025-02-06T10:09:24.419768
32vCPU-C7i
cpu
unquantized
pytorch
google/gemma-2-9b-it
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 57, in launch raise RuntimeError(f"Isolated process exited with non-zero code {isolated_process.exitcode}") RuntimeError: Isolated process exited with non-zero code -6
2025-02-06T10:35:21.821584
9fd70c10-e9cd-4ae1-ad3b-2f56445ec3b5
2025-02-06T10:09:24.419768
1xA10
cuda
torchao
pytorch
meta-llama/Llama-2-13b-chat-hf
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f950a7cd1b0>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:35:27.604640
fc85a78b-b92a-46d9-b36b-715516040a48
2025-02-06T10:12:52.699176
1xT4
cuda
torchao
pytorch
lmsys/vicuna-7b-v1.5
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f70ad34c8b0>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:35:28.957136
bd27598f-6f94-4e3d-9747-85c8ce835edd
2025-02-06T10:09:56.868153
1xT4
cuda
torchao
pytorch
lmsys/vicuna-7b-v1.5
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f0994d66d40>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:36:02.574698
bd27598f-6f94-4e3d-9747-85c8ce835edd
2025-02-06T10:09:56.868153
1xA10
cuda
torchao
pytorch
lmsys/vicuna-7b-v1.5
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7ff6683848b0>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:36:29.193095
fc85a78b-b92a-46d9-b36b-715516040a48
2025-02-06T10:12:52.699176
32vCPU-C7i
cpu
unquantized
pytorch
Qwen/Qwen2.5-1.5B-Instruct
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 57, in launch raise RuntimeError(f"Isolated process exited with non-zero code {isolated_process.exitcode}") RuntimeError: Isolated process exited with non-zero code -6
2025-02-06T10:36:41.233878
9fd70c10-e9cd-4ae1-ad3b-2f56445ec3b5
2025-02-06T10:09:24.419768
1xA10
cuda
torchao
pytorch
lmsys/vicuna-7b-v1.5
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f4da0446d40>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:36:58.724659
fc85a78b-b92a-46d9-b36b-715516040a48
2025-02-06T10:12:52.699176
1xT4
cuda
torchao
pytorch
meta-llama/Llama-3.1-8B
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7fb44414e050>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:37:11.147192
bd27598f-6f94-4e3d-9747-85c8ce835edd
2025-02-06T10:09:56.868153
1xA10
cuda
torchao
pytorch
meta-llama/Llama-3.1-8B
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7fd1b054fac0>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:37:38.063857
fc85a78b-b92a-46d9-b36b-715516040a48
2025-02-06T10:12:52.699176
1xA10
cuda
torchao
pytorch
meta-llama/Llama-3.1-8B
false
Traceback (most recent call last): File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark benchmark_report = Benchmark.launch(benchmark_config) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch report = launcher.launch(worker=Benchmark.run, worker_args=[config]) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch raise ChildProcessError(response["traceback"]) ChildProcessError: Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target report = worker(*worker_args) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run report = scenario.run(backend) File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run self.run_model_loading_tracking() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking self.backend.load() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load self.load_transformers_model() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model self.process_quantization_config() File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config self.quantization_config = AutoQuantizationConfig.from_dict( File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict return target_cls.from_dict(quantization_config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict config = cls(**config_dict) File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__ self.post_init() File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init raise ValueError( ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7fc544172050>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
2025-02-06T10:38:04.142606
fc85a78b-b92a-46d9-b36b-715516040a48
2025-02-06T10:12:52.699176