-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathnodes.py
More file actions
527 lines (424 loc) · 23.2 KB
/
nodes.py
File metadata and controls
527 lines (424 loc) · 23.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
import torch
import folder_paths
import comfy.model_patcher
import comfy.model_management
import comfy.utils
from omegaconf import OmegaConf
from typing_extensions import override
from comfy_api.latest import io
import os
from .kandinsky_patcher import KandinskyModelHandler, KandinskyPatcher
from .src.kandinsky.magcache_utils import set_magcache_params
from .src.kandinsky.models.text_embedders import Qwen2_5_VLTextEmbedder
class KandinskyLoader(io.ComfyNode):
@classmethod
def define_schema(cls) -> io.Schema:
from .kandinsky_patcher import KANDINSKY_CONFIGS
if "diffusion_models" in folder_paths.folder_names_and_paths:
paths, extensions = folder_paths.folder_names_and_paths["diffusion_models"]
if '.gguf' not in extensions:
extensions.add('.gguf')
folder_paths.folder_names_and_paths["diffusion_models"] = (paths, extensions)
checkpoint_files = folder_paths.get_filename_list("diffusion_models")
return io.Schema(
node_id="KandinskyV5_Loader",
display_name="Kandinsky 5 Loader",
category="Kandinsky",
description="Loads a Kandinsky-5 text-to-video model variant.",
inputs=[
io.Combo.Input("variant", options=list(KANDINSKY_CONFIGS.keys()), default="sft_5s"),
io.Combo.Input("checkpoint_file", options=checkpoint_files,
tooltip="Select checkpoint file (.safetensors or .gguf) from diffusion_models folder. Leave at default to use the variant's default checkpoint."),
io.Boolean.Input("use_magcache", default=False, tooltip="Enable MagCache for faster inference with non distilled models. Use 50 steps with it."),
io.Float.Input("magcache_threshold", default=0.12, min=0.01, max=0.3, step=0.01,
tooltip="MagCache quality threshold. Lower = better quality, slower. Higher = faster, more artifacts. Recomended: 0.02-0.06"),
io.Int.Input("blocks_in_memory", default=0, min=0, max=60,
tooltip="Block swapping: Keep N transformer blocks in VRAM, swap others to RAM. 0=disabled (load all to VRAM). 2B: 32 blocks total. 20B: 60 blocks total."),
io.Combo.Input("quantize_to_fp8", options=["off", "on"], default="off",
tooltip="Quantize to FP8: Converts a standard float32/float16 checkpoint to FP8 for faster inference. Not compatible with GGUF."),
],
outputs=[io.Model.Output()],
)
@classmethod
def execute(cls, variant: str, checkpoint_file: str, use_magcache: bool, magcache_threshold: float, blocks_in_memory: int, quantize_to_fp8: str) -> io.NodeOutput:
from .kandinsky_patcher import KANDINSKY_CONFIGS
config_data = KANDINSKY_CONFIGS[variant]
base_path = os.path.dirname(__file__)
config_path = os.path.join(base_path, 'src', 'configs', config_data["config"])
if not os.path.exists(config_path):
raise FileNotFoundError(f"Config file not found at '{config_path}'.")
try:
ckpt_path = folder_paths.get_full_path_or_raise("diffusion_models", checkpoint_file)
except FileNotFoundError:
raise FileNotFoundError(f"Checkpoint file '{checkpoint_file}' not found in diffusion_models folder.")
conf = OmegaConf.load(config_path)
conf.blocks_in_memory = blocks_in_memory
is_gguf = checkpoint_file.lower().endswith('.gguf')
is_fp8 = not is_gguf and quantize_to_fp8 == "on"
if is_gguf:
print(f"Loading GGUF model: {checkpoint_file}")
elif is_fp8:
print(f"Quantizing model to FP8 on-the-fly...")
else:
print(f"Loading model: {checkpoint_file}")
handler = KandinskyModelHandler(conf, ckpt_path)
patcher = KandinskyPatcher(
handler,
load_device=comfy.model_management.get_torch_device(),
offload_device=comfy.model_management.unet_offload_device()
)
handler.conf.use_magcache = use_magcache
handler.conf.magcache_threshold = magcache_threshold
handler.conf.use_fp8 = is_fp8
handler.conf.use_gguf = is_gguf
handler.conf.fp8_mode = "on_the_fly" if is_fp8 else "off"
return io.NodeOutput(patcher)
class KandinskyTextEncode(io.ComfyNode):
@classmethod
def define_schema(cls) -> io.Schema:
return io.Schema(
node_id="KandinskyV5_TextEncode",
display_name="Kandinsky 5 Text Encode",
category="Kandinsky",
description="Encodes text using Kandinsky's combined CLIP and Qwen2.5-VL embedding logic.",
inputs=[
io.Clip.Input("clip", tooltip="A standard CLIP-L/14 model. Use CLIPLoader with 'stable_diffusion' type."),
io.Clip.Input("qwen_vl", tooltip="The Qwen2.5-VL model. Use CLIPLoader with 'qwen_image' type."),
io.String.Input("text", multiline=True, dynamic_prompts=True),
io.String.Input("negative_text", multiline=True, dynamic_prompts=True, default="Static, 2D cartoon, cartoon, 2d animation, paintings, images, worst quality, low quality, ugly, deformed, walking backwards"),
io.Combo.Input("content_type", options=["video", "image"], default="video"),
],
outputs=[
io.Conditioning.Output(display_name="positive"),
io.Conditioning.Output(display_name="negative"),
],
)
@classmethod
def _get_raw_embeds(cls, text: str, clip_wrapper: comfy.sd.CLIP, qwen_vl_wrapper: comfy.sd.CLIP, content_type: str):
import comfy.model_management as mm
load_device = mm.get_torch_device()
try:
if hasattr(clip_wrapper, 'cond_stage_model'):
clip_wrapper.cond_stage_model.to(load_device)
if hasattr(qwen_vl_wrapper, 'cond_stage_model'):
qwen_vl_wrapper.cond_stage_model.to(load_device)
except Exception:
pass
clip_tokens = clip_wrapper.tokenize(text)
_, pooled_embed = clip_wrapper.encode_from_tokens(clip_tokens, return_pooled=True)
qwen_template_config = Qwen2_5_VLTextEmbedder.PROMPT_TEMPLATE
prompt_template = "\n".join(qwen_template_config["template"][content_type])
full_text = prompt_template.format(text)
qwen_tokens = qwen_vl_wrapper.tokenize(full_text)
encoded_output = qwen_vl_wrapper.encode_from_tokens(qwen_tokens, return_dict=True)
text_embeds_padded = encoded_output.get('cond')
attention_mask = encoded_output.get('attention_mask')
if attention_mask is None:
is_padding = torch.abs(text_embeds_padded).sum(dim=-1) < 1e-6
attention_mask = (~is_padding).long()
return text_embeds_padded, attention_mask, pooled_embed
@classmethod
def execute(cls, clip, qwen_vl, text, negative_text, content_type) -> io.NodeOutput:
import comfy.model_management as mm
offload_device = mm.unet_offload_device()
pos_text_embeds, pos_attn_mask, pos_pooled_embed = cls._get_raw_embeds(text.split('\n')[0], clip, qwen_vl, content_type)
neg_text_embeds, neg_attn_mask, neg_pooled_embed = cls._get_raw_embeds(negative_text.split('\n')[0], clip, qwen_vl, content_type)
offloaded_models = []
try:
if hasattr(clip, 'cond_stage_model'):
clip.cond_stage_model.to(offload_device)
offloaded_models.append("CLIP")
if hasattr(qwen_vl, 'cond_stage_model'):
qwen_vl.cond_stage_model.to(offload_device)
offloaded_models.append("Qwen-VL")
if torch.cuda.is_available():
torch.cuda.empty_cache()
if offloaded_models:
print(f"CLIP models offloaded to CPU: {', '.join(offloaded_models)}")
except Exception as e:
print(f"Warning: CLIP offload failed: {e}")
if pos_text_embeds.shape[1] != neg_text_embeds.shape[1]:
max_len = max(pos_text_embeds.shape[1], neg_text_embeds.shape[1])
if pos_text_embeds.shape[1] < max_len:
pad_amount = max_len - pos_text_embeds.shape[1]
padding = torch.zeros((1, pad_amount, pos_text_embeds.shape[2]), dtype=pos_text_embeds.dtype, device=pos_text_embeds.device)
pos_text_embeds = torch.cat([pos_text_embeds, padding], dim=1)
mask_padding = torch.zeros((1, pad_amount), dtype=pos_attn_mask.dtype, device=pos_attn_mask.device)
pos_attn_mask = torch.cat([pos_attn_mask, mask_padding], dim=1)
if neg_text_embeds.shape[1] < max_len:
pad_amount = max_len - neg_text_embeds.shape[1]
padding = torch.zeros((1, pad_amount, neg_text_embeds.shape[2]), dtype=neg_text_embeds.dtype, device=neg_text_embeds.device)
neg_text_embeds = torch.cat([neg_text_embeds, padding], dim=1)
mask_padding = torch.zeros((1, pad_amount), dtype=neg_attn_mask.dtype, device=neg_attn_mask.device)
neg_attn_mask = torch.cat([neg_attn_mask, mask_padding], dim=1)
pos_embeds = {"text_embeds": pos_text_embeds, "pooled_embed": pos_pooled_embed, "attention_mask": pos_attn_mask}
positive = [[torch.zeros(1), {"kandinsky_embeds": pos_embeds}]]
neg_embeds = {"text_embeds": neg_text_embeds, "pooled_embed": neg_pooled_embed, "attention_mask": neg_attn_mask}
negative = [[torch.zeros(1), {"kandinsky_embeds": neg_embeds}]]
return io.NodeOutput(positive, negative)
class EmptyKandinskyLatent(io.ComfyNode):
@classmethod
def define_schema(cls) -> io.Schema:
return io.Schema(
node_id="EmptyKandinskyV5_Latent",
display_name="Empty Kandinsky 5 Latent",
category="Kandinsky",
description="Creates an empty latent tensor with the correct shape for Kandinsky-5.",
inputs=[
io.Int.Input("width", default=768, min=64, max=4096, step=64),
io.Int.Input("height", default=512, min=64, max=4096, step=64),
io.Float.Input("time_length", default=5.0, min=0.0, max=30.0, step=0.1, tooltip="Time in seconds. 0 for image generation."),
io.Int.Input("batch_size", default=1, min=1, max=64),
],
outputs=[io.Latent.Output()],
)
@classmethod
def execute(cls, width, height, time_length, batch_size) -> io.NodeOutput:
if time_length == 0:
latent_frames = 1
else:
latent_frames = int(time_length * 24 // 4 + 1)
latent = torch.zeros([batch_size, 16, latent_frames, height // 8, width // 8], device=comfy.model_management.intermediate_device())
return io.NodeOutput({"samples": latent})
class KandinskyImageToVideoLatent(io.ComfyNode):
@classmethod
def define_schema(cls) -> io.Schema:
return io.Schema(
node_id="KandinskyV5_ImageToVideoLatent",
display_name="Kandinsky 5 Image to Video Latent",
category="Kandinsky",
description="Encodes an image for image-to-video generation with Kandinsky-5.",
inputs=[
io.Vae.Input("vae", tooltip="The Kandinsky VAE from the VAE Loader."),
io.Image.Input("image", tooltip="Input image to condition the video generation."),
io.Float.Input("time_length", default=5.0, min=0.1, max=30.0, step=0.1, tooltip="Time in seconds for the output video."),
io.Int.Input("batch_size", default=1, min=1, max=64),
],
outputs=[io.Latent.Output()],
)
@classmethod
def execute(cls, vae, image, time_length, batch_size) -> io.NodeOutput:
from math import floor, sqrt
import comfy.model_management as mm
device = mm.get_torch_device()
offload_device = mm.unet_offload_device()
image = image.permute(0, 3, 1, 2).to(device)
MAX_AREA = 768 * 512
B, C, H, W = image.shape
area = H * W
k = sqrt(MAX_AREA / area) / 16
new_h = int(floor(H * k) * 16)
new_w = int(floor(W * k) * 16)
if new_h != H or new_w != W:
import torch.nn.functional as F_torch
image = F_torch.interpolate(image, size=(new_h, new_w), mode='bilinear', align_corners=False)
image = image.unsqueeze(2)
with torch.no_grad():
image_scaled = image * 2.0 - 1.0
vae_model = vae.first_stage_model
vae_model = vae_model.to(device)
vae_dtype = next(vae_model.parameters()).dtype
image_scaled = image_scaled.to(dtype=vae_dtype)
original_use_tiling = getattr(vae_model, 'use_tiling', None)
original_use_framewise = getattr(vae_model, 'use_framewise_encoding', None)
if hasattr(vae_model, 'use_tiling'):
vae_model.use_tiling = False
if hasattr(vae_model, 'use_framewise_encoding'):
vae_model.use_framewise_encoding = False
try:
encoded = vae_model.encode(image_scaled, opt_tiling=False)
except TypeError:
encoded = vae_model.encode(image_scaled)
if original_use_tiling is not None:
vae_model.use_tiling = original_use_tiling
if original_use_framewise is not None:
vae_model.use_framewise_encoding = original_use_framewise
if hasattr(encoded, 'latent_dist'):
image_latent = encoded.latent_dist.sample()
elif hasattr(encoded, 'sample'):
image_latent = encoded.sample
else:
image_latent = encoded
scaling_factor = 0.476986
image_latent = image_latent * scaling_factor
try:
vae_model.to(offload_device)
if torch.cuda.is_available():
torch.cuda.empty_cache()
except Exception as e:
print(f"Warning: VAE offload failed: {e}")
latent_frames = int(time_length * 24 // 4 + 1)
_, C_latent, _, H_latent, W_latent = image_latent.shape
video_latent = torch.zeros(
[batch_size, C_latent, latent_frames, H_latent, W_latent],
device=comfy.model_management.intermediate_device(),
dtype=image_latent.dtype
)
if image_latent.shape[0] == 1 and batch_size > 1:
image_latent = image_latent.repeat(batch_size, 1, 1, 1, 1)
visual_cond_mask = torch.zeros(
[batch_size, latent_frames, H_latent, W_latent, 1],
device=comfy.model_management.intermediate_device(),
dtype=image_latent.dtype
)
visual_cond_mask[:, 0:1, :, :, :] = 1.0
image_latent = image_latent.to(comfy.model_management.intermediate_device())
return io.NodeOutput({
"samples": video_latent,
"visual_cond": image_latent,
"visual_cond_mask": visual_cond_mask
})
class KandinskyPruneFrames(io.ComfyNode):
@classmethod
def define_schema(cls) -> io.Schema:
return io.Schema(
node_id="KandinskyV5_PruneFrames",
display_name="Kandinsky 5 Prune Frames",
category="Kandinsky",
description="Remove frames from the start or end of a video batch.",
inputs=[
io.Image.Input("images", tooltip="Video frames to prune."),
io.Int.Input("prune_start", default=0, min=0, max=100, tooltip="Number of frames to remove from the start."),
io.Int.Input("prune_end", default=0, min=0, max=100, tooltip="Number of frames to remove from the end."),
],
outputs=[io.Image.Output()],
)
@classmethod
def execute(cls, images, prune_start, prune_end) -> io.NodeOutput:
total_frames = images.shape[0]
if prune_start + prune_end >= total_frames:
raise ValueError(f"Cannot prune {prune_start + prune_end} frames from a video with {total_frames} frames. Must leave at least 1 frame.")
start_idx = prune_start
end_idx = total_frames - prune_end if prune_end > 0 else total_frames
pruned_images = images[start_idx:end_idx]
return io.NodeOutput(pruned_images)
class KandinskyVAELoader(io.ComfyNode):
@classmethod
def define_schema(cls) -> io.Schema:
import folder_paths
vae_files = folder_paths.get_filename_list("vae")
return io.Schema(
node_id="KandinskyV5_VAELoader",
display_name="Kandinsky 5 VAE Loader",
category="Kandinsky",
description="Loads the Hunyuan Video VAE for Kandinsky-5. Use hunyuan_video_vae_bf16.safetensors.",
inputs=[
io.Combo.Input("vae_name", options=vae_files, tooltip="Select the Hunyuan Video VAE (hunyuan_video_vae_bf16.safetensors)"),
],
outputs=[io.Vae.Output()],
)
@classmethod
def execute(cls, vae_name) -> io.NodeOutput:
import folder_paths
import os
import comfy.utils
from .src.kandinsky.models.vae import AutoencoderKLHunyuanVideo
vae_path = folder_paths.get_full_path_or_raise("vae", vae_name)
if os.path.isfile(vae_path):
sd = comfy.utils.load_torch_file(vae_path)
vae = AutoencoderKLHunyuanVideo(
in_channels=3,
out_channels=3,
latent_channels=16,
block_out_channels=(128, 256, 512, 512),
layers_per_block=2,
act_fn="silu",
norm_num_groups=32,
scaling_factor=0.476986,
spatial_compression_ratio=8,
temporal_compression_ratio=4,
)
vae.load_state_dict(sd, strict=True)
vae = vae.to(dtype=torch.bfloat16)
else:
vae = AutoencoderKLHunyuanVideo.from_pretrained(
vae_path,
torch_dtype=torch.bfloat16
)
vae.eval()
class VAEWrapper:
def __init__(self, vae_model):
self.first_stage_model = vae_model
return io.NodeOutput(VAEWrapper(vae))
class KandinskyVAEDecode(io.ComfyNode):
@classmethod
def define_schema(cls) -> io.Schema:
return io.Schema(
node_id="KandinskyV5_VAEDecode",
category="Kandinsky",
description="Decodes Kandinsky-5 5D video latents to frames using the VAE's built-in tiling.",
inputs=[
io.Vae.Input("vae", tooltip="The Kandinsky VAE from the VAE Loader."),
io.Latent.Input("samples", tooltip="Latent samples from Kandinsky 5 Sampler."),
io.Boolean.Input("enable_tiling", default=True, tooltip="Enable tiling to reduce VRAM usage. Disable only for very small videos."),
io.Int.Input("tile_min_frames", default=16, min=1, max=64, tooltip="Temporal size. Lower = more VRAM efficient but slower."),
io.Int.Input("tile_min_height", default=256, min=64, max=1024, step=64, tooltip="Tile height. Lower = more VRAM efficient but slower."),
io.Int.Input("tile_min_width", default=256, min=64, max=1024, step=64, tooltip="Tile width. Lower = more VRAM efficient but slower."),
],
outputs=[io.Image.Output()],
)
@classmethod
def execute(cls, vae, samples, enable_tiling, tile_min_frames, tile_min_height, tile_min_width) -> io.NodeOutput:
import comfy.model_management as mm
device = mm.get_torch_device()
offload_device = mm.unet_offload_device()
latent = samples["samples"]
B, C, F, H, W = latent.shape
latent = latent.to(device)
scaling_factor = 0.476986
latent = latent / scaling_factor
vae_model = vae.first_stage_model
vae_model = vae_model.to(device)
vae_dtype = next(vae_model.parameters()).dtype
latent = latent.to(dtype=vae_dtype)
has_tiling = hasattr(vae_model, 'use_tiling')
if not has_tiling and enable_tiling:
raise RuntimeError(
"Please use the 'Kandinsky 5 VAE Loader' node "
)
if enable_tiling:
original_use_tiling = vae_model.use_tiling
original_use_framewise = vae_model.use_framewise_decoding
original_tile_min_frames = vae_model.tile_sample_min_num_frames
original_tile_min_height = vae_model.tile_sample_min_height
original_tile_min_width = vae_model.tile_sample_min_width
vae_model.use_tiling = True
vae_model.use_framewise_decoding = True
vae_model.tile_sample_min_num_frames = tile_min_frames
vae_model.tile_sample_min_height = tile_min_height
vae_model.tile_sample_min_width = tile_min_width
vae_model.tile_sample_stride_num_frames = max(1, tile_min_frames * 3 // 4)
vae_model.tile_sample_stride_height = tile_min_height * 3 // 4
vae_model.tile_sample_stride_width = tile_min_width * 3 // 4
else:
original_use_tiling = vae_model.use_tiling if has_tiling else None
original_use_framewise = vae_model.use_framewise_decoding if has_tiling else None
if has_tiling:
vae_model.use_tiling = False
vae_model.use_framewise_decoding = False
with torch.no_grad():
autocast_dtype = torch.bfloat16 if vae_dtype == torch.bfloat16 else torch.float16
with torch.autocast(device_type=device.type, dtype=autocast_dtype, enabled=(vae_dtype != torch.float32)):
decoded = vae_model.decode(latent, return_dict=False)[0]
B, C, F, H, W = decoded.shape
decoded = decoded.permute(0, 2, 3, 4, 1)
decoded = decoded.reshape(B * F, H, W, C)
decoded = (decoded + 1.0) / 2.0
decoded = decoded.clamp(0.0, 1.0)
if enable_tiling and has_tiling:
vae_model.use_tiling = original_use_tiling
vae_model.use_framewise_decoding = original_use_framewise
vae_model.tile_sample_min_num_frames = original_tile_min_frames
vae_model.tile_sample_min_height = original_tile_min_height
vae_model.tile_sample_min_width = original_tile_min_width
elif has_tiling and original_use_tiling is not None:
vae_model.use_tiling = original_use_tiling
vae_model.use_framewise_decoding = original_use_framewise
try:
vae_model.to(offload_device)
if torch.cuda.is_available():
torch.cuda.empty_cache()
except Exception as e:
print(f"Warning: VAE offload failed: {e}")
decoded = decoded.cpu().float()
return io.NodeOutput(decoded)