Skip to content

Commit 98b5183

Browse files
committed
Merge branch 'master' into dr-support-pip-cm
2 parents 16a0b24 + 260a5ca commit 98b5183

File tree

6 files changed

+48
-33
lines changed

6 files changed

+48
-33
lines changed

comfy/sd.py

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818
import comfy.ldm.ace.vae.music_dcae_pipeline
1919
import yaml
2020
import math
21+
import os
2122

2223
import comfy.utils
2324

@@ -977,6 +978,12 @@ def load_gligen(ckpt_path):
977978
model = model.half()
978979
return comfy.model_patcher.ModelPatcher(model, load_device=model_management.get_torch_device(), offload_device=model_management.unet_offload_device())
979980

981+
def model_detection_error_hint(path, state_dict):
982+
filename = os.path.basename(path)
983+
if 'lora' in filename.lower():
984+
return "\nHINT: This seems to be a Lora file and Lora files should be put in the lora folder and loaded with a lora loader node.."
985+
return ""
986+
980987
def load_checkpoint(config_path=None, ckpt_path=None, output_vae=True, output_clip=True, embedding_directory=None, state_dict=None, config=None):
981988
logging.warning("Warning: The load checkpoint with config function is deprecated and will eventually be removed, please use the other one.")
982989
model, clip, vae, _ = load_checkpoint_guess_config(ckpt_path, output_vae=output_vae, output_clip=output_clip, output_clipvision=False, embedding_directory=embedding_directory, output_model=True)
@@ -1005,7 +1012,7 @@ def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, o
10051012
sd, metadata = comfy.utils.load_torch_file(ckpt_path, return_metadata=True)
10061013
out = load_state_dict_guess_config(sd, output_vae, output_clip, output_clipvision, embedding_directory, output_model, model_options, te_model_options=te_model_options, metadata=metadata)
10071014
if out is None:
1008-
raise RuntimeError("ERROR: Could not detect model type of: {}".format(ckpt_path))
1015+
raise RuntimeError("ERROR: Could not detect model type of: {}\n{}".format(ckpt_path, model_detection_error_hint(ckpt_path, sd)))
10091016
return out
10101017

10111018
def load_state_dict_guess_config(sd, output_vae=True, output_clip=True, output_clipvision=False, embedding_directory=None, output_model=True, model_options={}, te_model_options={}, metadata=None):
@@ -1177,7 +1184,7 @@ def load_diffusion_model(unet_path, model_options={}):
11771184
model = load_diffusion_model_state_dict(sd, model_options=model_options)
11781185
if model is None:
11791186
logging.error("ERROR UNSUPPORTED DIFFUSION MODEL {}".format(unet_path))
1180-
raise RuntimeError("ERROR: Could not detect model type of: {}".format(unet_path))
1187+
raise RuntimeError("ERROR: Could not detect model type of: {}\n{}".format(unet_path, model_detection_error_hint(unet_path, sd)))
11811188
return model
11821189

11831190
def load_unet(unet_path, dtype=None):

comfy_extras/nodes_pixart.py

Lines changed: 24 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -1,24 +1,24 @@
1-
from nodes import MAX_RESOLUTION
2-
3-
class CLIPTextEncodePixArtAlpha:
4-
@classmethod
5-
def INPUT_TYPES(s):
6-
return {"required": {
7-
"width": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}),
8-
"height": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}),
9-
# "aspect_ratio": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
10-
"text": ("STRING", {"multiline": True, "dynamicPrompts": True}), "clip": ("CLIP", ),
11-
}}
12-
13-
RETURN_TYPES = ("CONDITIONING",)
14-
FUNCTION = "encode"
15-
CATEGORY = "advanced/conditioning"
16-
DESCRIPTION = "Encodes text and sets the resolution conditioning for PixArt Alpha. Does not apply to PixArt Sigma."
17-
18-
def encode(self, clip, width, height, text):
19-
tokens = clip.tokenize(text)
20-
return (clip.encode_from_tokens_scheduled(tokens, add_dict={"width": width, "height": height}),)
21-
22-
NODE_CLASS_MAPPINGS = {
23-
"CLIPTextEncodePixArtAlpha": CLIPTextEncodePixArtAlpha,
24-
}
1+
from nodes import MAX_RESOLUTION
2+
3+
class CLIPTextEncodePixArtAlpha:
4+
@classmethod
5+
def INPUT_TYPES(s):
6+
return {"required": {
7+
"width": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}),
8+
"height": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}),
9+
# "aspect_ratio": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
10+
"text": ("STRING", {"multiline": True, "dynamicPrompts": True}), "clip": ("CLIP", ),
11+
}}
12+
13+
RETURN_TYPES = ("CONDITIONING",)
14+
FUNCTION = "encode"
15+
CATEGORY = "advanced/conditioning"
16+
DESCRIPTION = "Encodes text and sets the resolution conditioning for PixArt Alpha. Does not apply to PixArt Sigma."
17+
18+
def encode(self, clip, width, height, text):
19+
tokens = clip.tokenize(text)
20+
return (clip.encode_from_tokens_scheduled(tokens, add_dict={"width": width, "height": height}),)
21+
22+
NODE_CLASS_MAPPINGS = {
23+
"CLIPTextEncodePixArtAlpha": CLIPTextEncodePixArtAlpha,
24+
}

execution.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -123,6 +123,8 @@ def recursive_debug_dump(self):
123123
}
124124
return result
125125

126+
SENSITIVE_EXTRA_DATA_KEYS = ("auth_token_comfy_org", "api_key_comfy_org")
127+
126128
def get_input_data(inputs, class_def, unique_id, outputs=None, dynprompt=None, extra_data={}):
127129
valid_inputs = class_def.INPUT_TYPES()
128130
input_data_all = {}
@@ -1045,6 +1047,11 @@ def task_done(self, item_id, history_result,
10451047
if status is not None:
10461048
status_dict = copy.deepcopy(status._asdict())
10471049

1050+
# Remove sensitive data from extra_data before storing in history
1051+
for sensitive_val in SENSITIVE_EXTRA_DATA_KEYS:
1052+
if sensitive_val in prompt[3]:
1053+
prompt[3].pop(sensitive_val)
1054+
10481055
self.history[prompt[1]] = {
10491056
"prompt": prompt,
10501057
"outputs": {},

requirements.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
comfyui-frontend-package==1.23.4
2-
comfyui-workflow-templates==0.1.35
2+
comfyui-workflow-templates==0.1.36
33
comfyui-embedded-docs==0.2.4
44
comfyui_manager
55
torch

script_examples/websockets_api_example.py

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -10,11 +10,11 @@
1010
server_address = "127.0.0.1:8188"
1111
client_id = str(uuid.uuid4())
1212

13-
def queue_prompt(prompt):
14-
p = {"prompt": prompt, "client_id": client_id}
13+
def queue_prompt(prompt, prompt_id):
14+
p = {"prompt": prompt, "client_id": client_id, "prompt_id": prompt_id}
1515
data = json.dumps(p).encode('utf-8')
16-
req = urllib.request.Request("http://{}/prompt".format(server_address), data=data)
17-
return json.loads(urllib.request.urlopen(req).read())
16+
req = urllib.request.Request("http://{}/prompt".format(server_address), data=data)
17+
urllib.request.urlopen(req).read()
1818

1919
def get_image(filename, subfolder, folder_type):
2020
data = {"filename": filename, "subfolder": subfolder, "type": folder_type}
@@ -27,7 +27,8 @@ def get_history(prompt_id):
2727
return json.loads(response.read())
2828

2929
def get_images(ws, prompt):
30-
prompt_id = queue_prompt(prompt)['prompt_id']
30+
prompt_id = str(uuid.uuid4())
31+
queue_prompt(prompt, prompt_id)
3132
output_images = {}
3233
while True:
3334
out = ws.recv()

server.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -684,7 +684,7 @@ async def post_prompt(request):
684684

685685
if "prompt" in json_data:
686686
prompt = json_data["prompt"]
687-
prompt_id = str(uuid.uuid4())
687+
prompt_id = str(json_data.get("prompt_id", uuid.uuid4()))
688688
valid = await execution.validate_prompt(prompt_id, prompt)
689689
extra_data = {}
690690
if "extra_data" in json_data:

0 commit comments

Comments
 (0)