Device side assertion?

#4
by invisietch - opened

I'm trying to run this on 3x A5000, the only change from your script to mine is:

# For 2 x 24 GB. If using 1 x 48 GB or more (lucky you), you can just use device_map="auto"
device_map = {
    "model.vision_backbone": "cpu", # Seems to be required to not run out of memory at 48 GB
    "model.transformer.wte": 0,
    "model.transformer.ln_f": 0,
    "model.transformer.ff_out": 1,
}
# For 2 x 24 GB, this works for *only* 38 or 39. Any higher or lower and it'll either only work for 1 token of output or fail completely.
switch_point = 27 # layer index to switch to second GPU
switch_point_2 = 54
device_map |= {f"model.transformer.blocks.{i}": 0 for i in range(0, switch_point)}
device_map |= {f"model.transformer.blocks.{i}": 1 for i in range(switch_point, switch_point_2)}
device_map |= {f"model.transformer.blocks.{i}": 2 for i in range(switch_point_2, 80)}

It's failing with a lot of:

...
/pytorch/aten/src/ATen/native/cuda/IndexKernel.cu:94: operator(): block: [44,0,0], thread: [86,0,0] Assertion `-sizes[i] <= index && index < sizes[i] && "index out of bounds"` failed.
/pytorch/aten/src/ATen/native/cuda/IndexKernel.cu:94: operator(): block: [44,0,0], thread: [87,0,0] Assertion `-sizes[i] <= index && index < sizes[i] && "index out of bounds"` failed.
/pytorch/aten/src/ATen/native/cuda/IndexKernel.cu:94: operator(): block: [44,0,0], thread: [88,0,0] Assertion `-sizes[i] <= index && index < sizes[i] && "index out of bounds"` failed.
/pytorch/aten/src/ATen/native/cuda/IndexKernel.cu:94: operator(): block: [44,0,0], thread: [89,0,0] Assertion `-sizes[i] <= index && index < sizes[i] && "index out of bounds"` failed.
/pytorch/aten/src/ATen/native/cuda/IndexKernel.cu:94: operator(): block: [44,0,0], thread: [90,0,0] Assertion `-sizes[i] <= index && index < sizes[i] && "index out of bounds"` failed.

And this which seems to be the traceback:

Traceback (most recent call last):
  File "/mnt/0n1/molmo.py", line 46, in <module>
    output = model.generate_from_batch(
             ^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/test/miniconda3/envs/axolotl/lib/python3.11/site-packages/torch/utils/_contextlib.py", line 116, in decorate_context
    return func(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^
  File "/home/test/.cache/huggingface/modules/transformers_modules/SeanScripts/Molmo-72B-0924-nf4/74538bbc40d3164c11d9646003b28891c21133e7/modeling_molmo.py", line 2213, in generat
e_from_batch
    out = super().generate(
          ^^^^^^^^^^^^^^^^^
  File "/home/test/miniconda3/envs/axolotl/lib/python3.11/site-packages/torch/utils/_contextlib.py", line 116, in decorate_context
    return func(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^
  File "/home/test/miniconda3/envs/axolotl/lib/python3.11/site-packages/transformers/generation/utils.py", line 2255, in generate
    result = self._sample(
             ^^^^^^^^^^^^^
  File "/home/test/miniconda3/envs/axolotl/lib/python3.11/site-packages/transformers/generation/utils.py", line 3254, in _sample
    outputs = self(**model_inputs, return_dict=True)
              ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/test/miniconda3/envs/axolotl/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1739, in _wrapped_call_impl
    return self._call_impl(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/test/miniconda3/envs/axolotl/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1750, in _call_impl
    return forward_call(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/test/miniconda3/envs/axolotl/lib/python3.11/site-packages/accelerate/hooks.py", line 170, in new_forward
    output = module._old_forward(*args, **kwargs)
             ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/test/.cache/huggingface/modules/transformers_modules/SeanScripts/Molmo-72B-0924-nf4/74538bbc40d3164c11d9646003b28891c21133e7/modeling_molmo.py", line 2107, in forward
    outputs = self.model.forward(
              ^^^^^^^^^^^^^^^^^^^
  File "/home/test/.cache/huggingface/modules/transformers_modules/SeanScripts/Molmo-72B-0924-nf4/74538bbc40d3164c11d9646003b28891c21133e7/modeling_molmo.py", line 1956, in forward
    x, cache = block(x, attention_bias=attention_bias, position_ids=position_ids, layer_past=layer_past, use_cache=use_cache)
               ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/test/miniconda3/envs/axolotl/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1739, in _wrapped_call_impl
    return self._call_impl(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/test/miniconda3/envs/axolotl/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1750, in _call_impl
    return forward_call(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/test/miniconda3/envs/axolotl/lib/python3.11/site-packages/accelerate/hooks.py", line 170, in new_forward
    output = module._old_forward(*args, **kwargs)
             ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/test/.cache/huggingface/modules/transformers_modules/SeanScripts/Molmo-72B-0924-nf4/74538bbc40d3164c11d9646003b28891c21133e7/modeling_molmo.py", line 498, in forward
    att, cache = self.attention(q, k, v, attention_bias, position_ids=position_ids, layer_past=layer_past, use_cache=use_cache)
                 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/test/.cache/huggingface/modules/transformers_modules/SeanScripts/Molmo-72B-0924-nf4/74538bbc40d3164c11d9646003b28891c21133e7/modeling_molmo.py", line 364, in attentio
n
    q, k = self.rotary_emb(q, k, position_ids=position_ids)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/test/miniconda3/envs/axolotl/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1739, in _wrapped_call_impl
    return self._call_impl(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/test/miniconda3/envs/axolotl/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1750, in _call_impl
    return forward_call(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/test/.cache/huggingface/modules/transformers_modules/SeanScripts/Molmo-72B-0924-nf4/74538bbc40d3164c11d9646003b28891c21133e7/modeling_molmo.py", line 177, in forward
    pos_sin = pos_sin[0, 0][position_ids].view(
              ~~~~~~~~~~~~~^^^^^^^^^^^^^^
RuntimeError: CUDA error: device-side assert triggered
Compile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.

I tried with JPG, PNG and 336x336 specifically PNG.

Sign up or log in to comment