Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
27 commits
Select commit Hold shift + click to select a range
dcbdcfe
Preliminary work for ComfyUI native API integration.
BuffMcBigHuge Mar 18, 2025
b3a95ad
Cleanup of pre/post processing of frames.
BuffMcBigHuge Mar 25, 2025
50ca4a1
Added built-in nodes for base64 string and websocket image send, redu…
BuffMcBigHuge Mar 25, 2025
d66dac4
Preliminary work on multi-Comfy server inference.
BuffMcBigHuge Mar 25, 2025
d3b0b3c
Work on frame timing and management, added max_frame_wait argument, a…
BuffMcBigHuge Mar 25, 2025
63014e3
Cleaned up prompt manipulation with custom nodes.
BuffMcBigHuge Mar 25, 2025
15b51a8
Added frame tracking, add frame timing stability, added mismatched fr…
BuffMcBigHuge Mar 25, 2025
80636b6
Removed requirement for workspace in app startup.
BuffMcBigHuge Apr 1, 2025
2cdb6fa
Cleanup of logging, added log_level argument, testing of send tensor …
BuffMcBigHuge Apr 1, 2025
c4d2ea1
Setting a few logs to debug.
BuffMcBigHuge Apr 1, 2025
0ee6222
Update requirements.txt
BuffMcBigHuge Apr 1, 2025
61a03fa
Added native nodes into root nodes.
BuffMcBigHuge Apr 1, 2025
77e28ff
Rebuilt get_available_nodes using native ComfyUI api for retrofit to …
BuffMcBigHuge Apr 8, 2025
13b511a
Modified base64 processing to use torchvision instead of numpy interm…
BuffMcBigHuge Apr 8, 2025
ee88589
Merged upstream of main 0.0.5, small modification to logging.
BuffMcBigHuge Apr 15, 2025
c9aff80
Built Comfy subprocess spawn client mode, built dynamic output pacer …
BuffMcBigHuge Apr 15, 2025
0def790
Merge branch 'main' into comfy-native-local.
BuffMcBigHuge Apr 15, 2025
937384f
Small fix, cleanup.
BuffMcBigHuge Apr 15, 2025
3b1cdd7
Merge branch 'main' into comfy-native-local
BuffMcBigHuge Apr 16, 2025
f7326c4
Added cuda-devices and workers-start-port params for multi-gpu spawni…
BuffMcBigHuge Apr 16, 2025
75bd88e
Merge branch 'main' into comfy-native-local
BuffMcBigHuge Apr 22, 2025
0c1aa0e
Fixed issue with cleanup not properly resetting the clients for subse…
BuffMcBigHuge Apr 22, 2025
6c07c65
Better error handling for Comfy instances via spawn, reorganization o…
BuffMcBigHuge Apr 23, 2025
93308b6
Fix to linux subprocess command.
BuffMcBigHuge Apr 24, 2025
98b78e8
Added spawned comfy specific logging, modified client logging, modifi…
BuffMcBigHuge Apr 25, 2025
5b3ac66
Modification to logging handler for subprocesses.
BuffMcBigHuge Apr 29, 2025
766277b
Merge branch 'main' into comfy-native-local
BuffMcBigHuge Apr 29, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 13 additions & 0 deletions configs/comfy.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
# Configuration for multiple ComfyUI servers

[[servers]]
host = "127.0.0.1"
port = 8188
client_id = "client1"

# Adding more servers:

# [[servers]]
# host = "127.0.0.1"
# port = 8189
# client_id = "client2"
3 changes: 2 additions & 1 deletion nodes/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
from .audio_utils import *
from .tensor_utils import *
from .video_stream_utils import *
from .native_utils import *
from .api import *
from .web import *

Expand All @@ -11,7 +12,7 @@
NODE_DISPLAY_NAME_MAPPINGS = {}

# Import and update mappings from submodules
for module in [audio_utils, tensor_utils, video_stream_utils, api, web]:
for module in [audio_utils, tensor_utils, video_stream_utils, api, web, native_utils]:
if hasattr(module, 'NODE_CLASS_MAPPINGS'):
NODE_CLASS_MAPPINGS.update(module.NODE_CLASS_MAPPINGS)
if hasattr(module, 'NODE_DISPLAY_NAME_MAPPINGS'):
Expand Down
20 changes: 20 additions & 0 deletions nodes/native_utils/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
from .load_image_base64 import LoadImageBase64
from .send_image_websocket import SendImageWebsocket
from .send_tensor_websocket import SendTensorWebSocket

# This dictionary is used by ComfyUI to register the nodes
NODE_CLASS_MAPPINGS = {
"LoadImageBase64": LoadImageBase64,
"SendImageWebsocket": SendImageWebsocket,
"SendTensorWebSocket": SendTensorWebSocket
}

# This dictionary provides display names for the nodes in the UI
NODE_DISPLAY_NAME_MAPPINGS = {
"LoadImageBase64": "Load Image Base64 (ComfyStream)",
"SendImageWebsocket": "Send Image Websocket (ComfyStream)",
"SendTensorWebSocket": "Save Tensor WebSocket (ComfyStream)"
}

# Export these variables for ComfyUI to use
__all__ = ["NODE_CLASS_MAPPINGS", "NODE_DISPLAY_NAME_MAPPINGS"]
37 changes: 37 additions & 0 deletions nodes/native_utils/load_image_base64.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
# borrowed from Acly's comfyui-tooling-nodes
# https://github.com/Acly/comfyui-tooling-nodes/blob/main/nodes.py

# TODO: I think we can recieve tensor data directly from the pipeline through the /prompt endpoint as JSON
# This may be more efficient than sending base64 encoded images through the websocket and
# allow for alternative data formats.

from PIL import Image
import base64
import numpy as np
import torch
from io import BytesIO

class LoadImageBase64:
@classmethod
def INPUT_TYPES(s):
return {"required": {"image": ("STRING", {"multiline": False})}}

RETURN_TYPES = ("IMAGE", "MASK")
CATEGORY = "external_tooling"
FUNCTION = "load_image"

def load_image(self, image):
imgdata = base64.b64decode(image)
img = Image.open(BytesIO(imgdata))

if "A" in img.getbands():
mask = np.array(img.getchannel("A")).astype(np.float32) / 255.0
mask = torch.from_numpy(mask)
else:
mask = None

img = img.convert("RGB")
img = np.array(img).astype(np.float32) / 255.0
img = torch.from_numpy(img)[None,]

return (img, mask)
44 changes: 44 additions & 0 deletions nodes/native_utils/send_image_websocket.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
# borrowed from Acly's comfyui-tooling-nodes
# https://github.com/Acly/comfyui-tooling-nodes/blob/main/nodes.py

# TODO: I think we can send tensor data directly to the pipeline in the websocket response.
# Worth talking to ComfyAnonymous about this.

import numpy as np
from PIL import Image
from server import PromptServer, BinaryEventTypes

class SendImageWebsocket:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"images": ("IMAGE",),
"format": (["PNG", "JPEG"], {"default": "PNG"}),
}
}

RETURN_TYPES = ()
FUNCTION = "send_images"
OUTPUT_NODE = True
CATEGORY = "external_tooling"

def send_images(self, images, format):
results = []
for tensor in images:
array = 255.0 * tensor.cpu().numpy()
image = Image.fromarray(np.clip(array, 0, 255).astype(np.uint8))

server = PromptServer.instance
server.send_sync(
BinaryEventTypes.UNENCODED_PREVIEW_IMAGE,
[format, image, None],
server.client_id,
)
results.append({
"source": "websocket",
"content-type": f"image/{format.lower()}",
"type": "output",
})

return {"ui": {"images": results}}
Loading