Implement lazy evaluation for expensive operations:
class ExpensiveNode(io.ComfyNode): @classmethod def define_schema(cls) -> io.Schema: return io.Schema( node_id="ExpensiveNode", inputs=[ io.Image.Input("image"), io.Boolean.Input("enable_processing"), io.Int.Input( "iterations", lazy=True, # Only evaluate if needed default=10 ), ], outputs=[io.Image.Output()], ) @classmethod def check_lazy_status(cls, image, enable_processing, iterations): # Only request iterations if processing is enabled if enable_processing: return ["iterations"] return [] @classmethod def execute(cls, image, enable_processing, iterations): if not enable_processing: return io.NodeOutput(image) # iterations is guaranteed to be available here for _ in range(iterations): image = process(image) return io.NodeOutput(image)
Avoid re-execution when inputs haven’t meaningfully changed:
import hashlibclass ImageLoader(io.ComfyNode): @classmethod def fingerprint_inputs(cls, image_path): # Only reload if file actually changed import os if os.path.exists(image_path): stat = os.stat(image_path) return f"{image_path}:{stat.st_mtime}:{stat.st_size}" return image_path
import torchclass MemoryEfficientNode(io.ComfyNode): @classmethod def execute(cls, large_tensor): # Process in smaller chunks batch_size = 8 results = [] for i in range(0, len(large_tensor), batch_size): batch = large_tensor[i:i+batch_size] result = process_batch(batch) results.append(result.cpu()) # Move to CPU # Clear GPU memory if torch.cuda.is_available(): torch.cuda.empty_cache() return io.NodeOutput(torch.cat(results).cuda())
class ImageBlend(io.ComfyNode): """ Blends two images together using various blend modes. Supports multiple blend modes including normal, multiply, screen, and overlay. The blend strength can be controlled with the opacity parameter. The images must have the same dimensions. If they differ, the second image will be automatically resized to match the first. """ @classmethod def define_schema(cls) -> io.Schema: ...
@classmethoddef execute(cls, images): # Process entire batch at once (efficient) results = batch_process(images) # Or process individually if needed # results = [process_single(img) for img in images] # results = torch.stack(results) return io.NodeOutput(results)
Always test your nodes with different batch sizes to ensure correct behavior.