No matter how much I increase split_num or other parameters, it always crashes.
The att library is one I compiled using the address you provided.
A10 graphics card, Python 3.12.11, CUDA 12.8
error info:
infer tiny mode
8
0%| | 0/9 [00:00During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/ai/comfyui-vi-env/ComfyUI/execution.py", line 510, in execute
output_data, output_ui, has_subgraph, has_pending_tasks = await get_output_data(prompt_id, unique_id, obj, input_data_all, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb, hidden_inputs=hidden_inputs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/ai/comfyui-vi-env/ComfyUI/execution.py", line 324, in get_output_data
return_values = await _async_map_node_over_list(prompt_id, unique_id, obj, input_data_all, obj.FUNCTION, allow_interrupt=True, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb, hidden_inputs=hidden_inputs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/ai/comfyui-vi-env/ComfyUI/execution.py", line 298, in _async_map_node_over_list
await process_inputs(input_dict, i)
File "/ai/comfyui-vi-env/ComfyUI/execution.py", line 286, in process_inputs
result = f(**inputs)
^^^^^^^^^^^
File "/ai/comfyui-vi-env/ComfyUI/comfy_api/internal/init.py", line 149, in wrapped_func
return method(locked_class, **inputs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/ai/comfyui-vi-env/ComfyUI/comfy_api/latest/_io.py", line 1275, in EXECUTE_NORMALIZED
to_return = cls.execute(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/ai/comfyui-vi-env/ComfyUI/custom_nodes/flashvsr/FlashVSR_node.py", line 129, in execute
images=run_inference_tiny(model,image,seed,scale,kv_ratio,local_range,steps,cfg,sparse_ratio,color_fix,fix_method,split_num )
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/ai/comfyui-vi-env/ComfyUI/custom_nodes/flashvsr/FlashVSR/examples/WanVSR/infer_flashvsr_tiny.py", line 270, in run_inference_tiny
frames,LQ_cur_idx = pipe(
^^^^^
File "/ai/comfyui-vi-env/lib/python3.12/site-packages/torch/utils/_contextlib.py", line 120, in decorate_context
return func(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^
File "/ai/comfyui-vi-env/ComfyUI/custom_nodes/flashvsr/FlashVSR/diffsynth/pipelines/flashvsr_tiny.py", line 392, in call
noise_pred_posi, pre_cache_k, pre_cache_v = model_fn_wan_video(
^^^^^^^^^^^^^^^^^^^
File "/ai/comfyui-vi-env/ComfyUI/custom_nodes/flashvsr/FlashVSR/diffsynth/pipelines/flashvsr_tiny.py", line 556, in model_fn_wan_video
x, last_pre_cache_k, last_pre_cache_v = block(
^^^^^^
File "/ai/comfyui-vi-env/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1773, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/ai/comfyui-vi-env/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1784, in _call_impl
return forward_call(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/ai/comfyui-vi-env/ComfyUI/custom_nodes/flashvsr/FlashVSR/diffsynth/models/wan_video_dit.py", line 470, in forward
self_attn_output, self_attn_cache_k, self_attn_cache_v = self.self_attn(
^^^^^^^^^^^^^^^
File "/ai/comfyui-vi-env/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1773, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/ai/comfyui-vi-env/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1784, in _call_impl
return forward_call(*args, **kwargs)
No matter how much I increase
split_numor other parameters, it always crashes.The att library is one I compiled using the address you provided.
A10 graphics card, Python 3.12.11, CUDA 12.8
error info:
infer tiny mode 8 0%| | 0/9 [00:00During handling of the above exception, another exception occurred:Traceback (most recent call last):
File "/ai/comfyui-vi-env/ComfyUI/execution.py", line 510, in execute
output_data, output_ui, has_subgraph, has_pending_tasks = await get_output_data(prompt_id, unique_id, obj, input_data_all, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb, hidden_inputs=hidden_inputs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/ai/comfyui-vi-env/ComfyUI/execution.py", line 324, in get_output_data
return_values = await _async_map_node_over_list(prompt_id, unique_id, obj, input_data_all, obj.FUNCTION, allow_interrupt=True, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb, hidden_inputs=hidden_inputs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/ai/comfyui-vi-env/ComfyUI/execution.py", line 298, in _async_map_node_over_list
await process_inputs(input_dict, i)
File "/ai/comfyui-vi-env/ComfyUI/execution.py", line 286, in process_inputs
result = f(**inputs)
^^^^^^^^^^^
File "/ai/comfyui-vi-env/ComfyUI/comfy_api/internal/init.py", line 149, in wrapped_func
return method(locked_class, **inputs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/ai/comfyui-vi-env/ComfyUI/comfy_api/latest/_io.py", line 1275, in EXECUTE_NORMALIZED
to_return = cls.execute(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/ai/comfyui-vi-env/ComfyUI/custom_nodes/flashvsr/FlashVSR_node.py", line 129, in execute
images=run_inference_tiny(model,image,seed,scale,kv_ratio,local_range,steps,cfg,sparse_ratio,color_fix,fix_method,split_num )
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/ai/comfyui-vi-env/ComfyUI/custom_nodes/flashvsr/FlashVSR/examples/WanVSR/infer_flashvsr_tiny.py", line 270, in run_inference_tiny
frames,LQ_cur_idx = pipe(
^^^^^
File "/ai/comfyui-vi-env/lib/python3.12/site-packages/torch/utils/_contextlib.py", line 120, in decorate_context
return func(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^
File "/ai/comfyui-vi-env/ComfyUI/custom_nodes/flashvsr/FlashVSR/diffsynth/pipelines/flashvsr_tiny.py", line 392, in call
noise_pred_posi, pre_cache_k, pre_cache_v = model_fn_wan_video(
^^^^^^^^^^^^^^^^^^^
File "/ai/comfyui-vi-env/ComfyUI/custom_nodes/flashvsr/FlashVSR/diffsynth/pipelines/flashvsr_tiny.py", line 556, in model_fn_wan_video
x, last_pre_cache_k, last_pre_cache_v = block(
^^^^^^
File "/ai/comfyui-vi-env/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1773, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/ai/comfyui-vi-env/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1784, in _call_impl
return forward_call(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/ai/comfyui-vi-env/ComfyUI/custom_nodes/flashvsr/FlashVSR/diffsynth/models/wan_video_dit.py", line 470, in forward
self_attn_output, self_attn_cache_k, self_attn_cache_v = self.self_attn(
^^^^^^^^^^^^^^^
File "/ai/comfyui-vi-env/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1773, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/ai/comfyui-vi-env/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1784, in _call_impl
return forward_call(*args, **kwargs)