Skip to content

Commit 5fbc8a1

Browse files
committed
Merge branch 'master' into dr-support-pip-cm
2 parents b180f47 + 7a88384 commit 5fbc8a1

23 files changed

+1348
-1325
lines changed

README.md

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -197,7 +197,9 @@ comfy install
197197

198198
## Manual Install (Windows, Linux)
199199

200-
Python 3.13 is very well supported. If you have trouble with some custom node dependencies you can try 3.12
200+
Python 3.14 will work if you comment out the `kornia` dependency in the requirements.txt file (breaks the canny node) and install pytorch nightly but it is not recommended.
201+
202+
Python 3.13 is very well supported. If you have trouble with some custom node dependencies on 3.13 you can try 3.12
201203

202204
Git clone this repo.
203205

comfy/ops.py

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,8 @@
2424
import comfy.rmsnorm
2525
import contextlib
2626

27+
def run_every_op():
28+
comfy.model_management.throw_exception_if_processing_interrupted()
2729

2830
def scaled_dot_product_attention(q, k, v, *args, **kwargs):
2931
return torch.nn.functional.scaled_dot_product_attention(q, k, v, *args, **kwargs)
@@ -109,6 +111,7 @@ def forward_comfy_cast_weights(self, input):
109111
return torch.nn.functional.linear(input, weight, bias)
110112

111113
def forward(self, *args, **kwargs):
114+
run_every_op()
112115
if self.comfy_cast_weights or len(self.weight_function) > 0 or len(self.bias_function) > 0:
113116
return self.forward_comfy_cast_weights(*args, **kwargs)
114117
else:
@@ -123,6 +126,7 @@ def forward_comfy_cast_weights(self, input):
123126
return self._conv_forward(input, weight, bias)
124127

125128
def forward(self, *args, **kwargs):
129+
run_every_op()
126130
if self.comfy_cast_weights or len(self.weight_function) > 0 or len(self.bias_function) > 0:
127131
return self.forward_comfy_cast_weights(*args, **kwargs)
128132
else:
@@ -137,6 +141,7 @@ def forward_comfy_cast_weights(self, input):
137141
return self._conv_forward(input, weight, bias)
138142

139143
def forward(self, *args, **kwargs):
144+
run_every_op()
140145
if self.comfy_cast_weights or len(self.weight_function) > 0 or len(self.bias_function) > 0:
141146
return self.forward_comfy_cast_weights(*args, **kwargs)
142147
else:
@@ -151,6 +156,7 @@ def forward_comfy_cast_weights(self, input):
151156
return self._conv_forward(input, weight, bias)
152157

153158
def forward(self, *args, **kwargs):
159+
run_every_op()
154160
if self.comfy_cast_weights or len(self.weight_function) > 0 or len(self.bias_function) > 0:
155161
return self.forward_comfy_cast_weights(*args, **kwargs)
156162
else:
@@ -165,6 +171,7 @@ def forward_comfy_cast_weights(self, input):
165171
return torch.nn.functional.group_norm(input, self.num_groups, weight, bias, self.eps)
166172

167173
def forward(self, *args, **kwargs):
174+
run_every_op()
168175
if self.comfy_cast_weights or len(self.weight_function) > 0 or len(self.bias_function) > 0:
169176
return self.forward_comfy_cast_weights(*args, **kwargs)
170177
else:
@@ -183,6 +190,7 @@ def forward_comfy_cast_weights(self, input):
183190
return torch.nn.functional.layer_norm(input, self.normalized_shape, weight, bias, self.eps)
184191

185192
def forward(self, *args, **kwargs):
193+
run_every_op()
186194
if self.comfy_cast_weights or len(self.weight_function) > 0 or len(self.bias_function) > 0:
187195
return self.forward_comfy_cast_weights(*args, **kwargs)
188196
else:
@@ -202,6 +210,7 @@ def forward_comfy_cast_weights(self, input):
202210
# return torch.nn.functional.rms_norm(input, self.normalized_shape, weight, self.eps)
203211

204212
def forward(self, *args, **kwargs):
213+
run_every_op()
205214
if self.comfy_cast_weights or len(self.weight_function) > 0 or len(self.bias_function) > 0:
206215
return self.forward_comfy_cast_weights(*args, **kwargs)
207216
else:
@@ -223,6 +232,7 @@ def forward_comfy_cast_weights(self, input, output_size=None):
223232
output_padding, self.groups, self.dilation)
224233

225234
def forward(self, *args, **kwargs):
235+
run_every_op()
226236
if self.comfy_cast_weights or len(self.weight_function) > 0 or len(self.bias_function) > 0:
227237
return self.forward_comfy_cast_weights(*args, **kwargs)
228238
else:
@@ -244,6 +254,7 @@ def forward_comfy_cast_weights(self, input, output_size=None):
244254
output_padding, self.groups, self.dilation)
245255

246256
def forward(self, *args, **kwargs):
257+
run_every_op()
247258
if self.comfy_cast_weights or len(self.weight_function) > 0 or len(self.bias_function) > 0:
248259
return self.forward_comfy_cast_weights(*args, **kwargs)
249260
else:
@@ -262,6 +273,7 @@ def forward_comfy_cast_weights(self, input, out_dtype=None):
262273
return torch.nn.functional.embedding(input, weight, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse).to(dtype=output_dtype)
263274

264275
def forward(self, *args, **kwargs):
276+
run_every_op()
265277
if self.comfy_cast_weights or len(self.weight_function) > 0 or len(self.bias_function) > 0:
266278
return self.forward_comfy_cast_weights(*args, **kwargs)
267279
else:

comfy_api/latest/__init__.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -114,7 +114,9 @@ class Types:
114114
ComfyAPISync: Type[comfy_api.latest.generated.ComfyAPISyncStub.ComfyAPISyncStub]
115115
ComfyAPISync = create_sync_class(ComfyAPI_latest)
116116

117-
comfy_io = io # create the new alias for io
117+
# create new aliases for io and ui
118+
IO = io
119+
UI = ui
118120

119121
__all__ = [
120122
"ComfyAPI",
@@ -124,6 +126,7 @@ class Types:
124126
"Types",
125127
"ComfyExtension",
126128
"io",
127-
"comfy_io",
129+
"IO",
128130
"ui",
131+
"UI",
129132
]

comfy_api_nodes/apinode_utils.py

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
import io
44
import logging
55
import mimetypes
6+
import os
67
from typing import Optional, Union
78
from comfy.utils import common_upscale
89
from comfy_api.input_impl import VideoFromFile
@@ -702,3 +703,16 @@ def image_tensor_pair_to_batch(
702703
"center",
703704
).movedim(1, -1)
704705
return torch.cat((image1, image2), dim=0)
706+
707+
708+
def get_size(path_or_object: Union[str, io.BytesIO]) -> int:
709+
if isinstance(path_or_object, str):
710+
return os.path.getsize(path_or_object)
711+
return len(path_or_object.getvalue())
712+
713+
714+
def validate_container_format_is_mp4(video: VideoInput) -> None:
715+
"""Validates video container format is MP4."""
716+
container_format = video.get_container_format()
717+
if container_format not in ["mp4", "mov,mp4,m4a,3gp,3g2,mj2"]:
718+
raise ValueError(f"Only MP4 container format supported. Got: {container_format}")

comfy_api_nodes/apis/client.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -845,7 +845,7 @@ def _display_text_on_node(self, text: str):
845845
if not self.node_id:
846846
return
847847
if self.extracted_price is not None:
848-
text = f"Price: {self.extracted_price}$\n{text}"
848+
text = f"Price: ${self.extracted_price}\n{text}"
849849
PromptServer.instance.send_progress_text(text, self.node_id)
850850

851851
def _display_time_progress_on_node(self, time_completed: int | float):

0 commit comments

Comments
 (0)