Skip to content

Commit e44d0ac

Browse files
Make --novram completely offload weights.
This flag is mainly used for testing the weight offloading, it shouldn't actually be used in practice. Remove useless import.
1 parent 56bc64f commit e44d0ac

File tree

2 files changed

+1
-2
lines changed

2 files changed

+1
-2
lines changed

comfy/ldm/pixart/blocks.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,6 @@
66
import torch.nn.functional as F
77
from einops import rearrange
88

9-
from comfy import model_management
109
from comfy.ldm.modules.diffusionmodules.mmdit import TimestepEmbedder, Mlp, timestep_embedding
1110
from comfy.ldm.modules.attention import optimized_attention
1211

comfy/model_management.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -521,7 +521,7 @@ def load_models_gpu(models, memory_required=0, force_patch_weights=False, minimu
521521
lowvram_model_memory = 0
522522

523523
if vram_set_state == VRAMState.NO_VRAM:
524-
lowvram_model_memory = 64 * 1024 * 1024
524+
lowvram_model_memory = 0.1
525525

526526
loaded_model.model_load(lowvram_model_memory, force_patch_weights=force_patch_weights)
527527
current_loaded_models.insert(0, loaded_model)

0 commit comments

Comments
 (0)