Skip to content

Commit c88e3de

Browse files
committed
merge PR lllyasviel#158
1 parent 4096d33 commit c88e3de

File tree

2 files changed

+5
-24
lines changed

2 files changed

+5
-24
lines changed

README.md

Lines changed: 0 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,3 @@
1-
Fork info:
2-
3-
To use lora, launch with the '--lora path_to_lora' argument
4-
5-
(I've replaced the model_config.json to avoid confusion and simplify usage)
6-
71
<p align="center">
82
<img src="https://github.com/user-attachments/assets/2cc030b4-87e1-40a0-b5bf-1b7d6b62820b" width="300">
93
</p>
@@ -477,12 +471,6 @@ You can also write prompts yourself. Concise prompts are usually preferred, for
477471

478472
and so on.
479473

480-
# LoRA
481-
482-
Experimental LoRA support. Retrain of LoRA is necessary.
483-
484-
Launch with the '--lora path_to_your_lora' argument.
485-
486474
# Cite
487475

488476
@article{zhang2025framepack,

demo_gradio.py

Lines changed: 5 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -34,8 +34,6 @@
3434
parser.add_argument("--server", type=str, default='0.0.0.0')
3535
parser.add_argument("--port", type=int, required=False)
3636
parser.add_argument("--inbrowser", action='store_true')
37-
parser.add_argument("--lora", type=str, default=None, help="Lora path")
38-
parser.add_argument("--lora_is_diffusers", action='store_true', help="Lora is diffusers format")
3937
args = parser.parse_args()
4038

4139
# for win desktop probably use --server 127.0.0.1 --inbrowser
@@ -84,12 +82,6 @@
8482
image_encoder.requires_grad_(False)
8583
transformer.requires_grad_(False)
8684

87-
if args.lora:
88-
lora = args.lora
89-
lora_path, lora_name = os.path.split(lora)
90-
print("Loading lora")
91-
transformer = load_lora(transformer, lora_path, lora_name, args.lora_is_diffusers)
92-
9385
if not high_vram:
9486
# DynamicSwapInstaller is same as huggingface's enable_sequential_offload but 3x faster
9587
DynamicSwapInstaller.install_model(transformer, device=gpu)
@@ -108,7 +100,7 @@
108100

109101

110102
@torch.no_grad()
111-
def worker(input_image, prompt, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf):
103+
def worker(input_image, prompt, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf, resolution):
112104
total_latent_sections = (total_second_length * 30) / (latent_window_size * 4)
113105
total_latent_sections = int(max(round(total_latent_sections), 1))
114106

@@ -328,14 +320,14 @@ def callback(d):
328320
return
329321

330322

331-
def process(input_image, prompt, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf):
323+
def process(input_image, prompt, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf, resolution):
332324
global stream
333325

334326
yield None, None, '', '', gr.update(interactive=False), gr.update(interactive=True)
335327

336328
stream = AsyncStream()
337329

338-
async_run(worker, input_image, prompt, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf)
330+
async_run(worker, input_image, prompt, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf, resolution)
339331

340332
output_filename = None
341333

@@ -372,7 +364,8 @@ def end_process():
372364
gr.Markdown('# FramePack')
373365
with gr.Row():
374366
with gr.Column():
375-
input_image = gr.Image(sources='upload', type="numpy", label="Image", height=320)
367+
input_image = gr.Image(sources='upload', type="numpy", label="Image", height=320)
368+
resolution = gr.Slider(label="Resolution", minimum=240, maximum=720, value=640, step=16)
376369
prompt = gr.Textbox(label="Prompt", value='')
377370
example_quick_prompts = gr.Dataset(samples=quick_prompts, label='Quick List', samples_per_page=1000, components=[prompt])
378371
example_quick_prompts.click(lambda x: x[0], inputs=[example_quick_prompts], outputs=prompt, show_progress=False, queue=False)

0 commit comments

Comments
 (0)