Skip to content

Commit

Permalink
fix: update packages
Browse files Browse the repository at this point in the history
  • Loading branch information
justinmerrell committed Oct 18, 2023
1 parent f9c71cb commit 0e14ad3
Show file tree
Hide file tree
Showing 3 changed files with 43 additions and 36 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -158,3 +158,4 @@ cython_debug/
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
runpod.toml
11 changes: 5 additions & 6 deletions builder/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,16 +1,15 @@
# Required Python packages get listed here, one per line.
# Reccomended to lock the version number to avoid unexpected changes.

runpod==1.2.1
# git+https://github.com/runpod/runpod-python.git
runpod==1.3.0

diffusers>=0.18.3
transformers==4.31.0
accelerate==0.21.0
diffusers==0.21.4
transformers==4.34.0
accelerate==0.23.0
safetensors==0.3.2
scipy == 1.11.2
numpy>=1.17
PyWavelets>=1.1.1
opencv-python>=4.1.0.25
invisible-watermark==0.2.0
xformers==0.0.20
xformers==0.0.22
67 changes: 37 additions & 30 deletions src/rp_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,32 +25,39 @@
from rp_schemas import INPUT_SCHEMA


# -------------------------------- Load Models ------------------------------- #
def load_base():
base_pipe = StableDiffusionXLPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0",
torch_dtype=torch.float16, variant="fp16", use_safetensors=True, add_watermarker=False
).to("cuda", silence_dtype_warnings=True)
base_pipe.enable_xformers_memory_efficient_attention()
return base_pipe


def load_refiner():
refiner_pipe = StableDiffusionXLImg2ImgPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-refiner-1.0",
torch_dtype=torch.float16, variant="fp16", use_safetensors=True, add_watermarker=False
).to("cuda", silence_dtype_warnings=True)
refiner_pipe.enable_xformers_memory_efficient_attention()
return refiner_pipe


with concurrent.futures.ThreadPoolExecutor() as executor:
future_base = executor.submit(load_base)
future_refiner = executor.submit(load_refiner)

base = future_base.result()
refiner = future_refiner.result()

# ------------------------------- Model Handler ------------------------------ #
class ModelHandler:
def __init__(self):
self.base = None
self.refiner = None
self.load_models()

def load_base(self):
base_pipe = StableDiffusionXLPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0",
torch_dtype=torch.float16, variant="fp16", use_safetensors=True, add_watermarker=False
).to("cuda", silence_dtype_warnings=True)
base_pipe.enable_xformers_memory_efficient_attention()
return base_pipe


def load_refiner(self):
refiner_pipe = StableDiffusionXLImg2ImgPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-refiner-1.0",
torch_dtype=torch.float16, variant="fp16", use_safetensors=True, add_watermarker=False
).to("cuda", silence_dtype_warnings=True)
refiner_pipe.enable_xformers_memory_efficient_attention()
return refiner_pipe

def load_models(self):
with concurrent.futures.ThreadPoolExecutor() as executor:
future_base = executor.submit(self.load_base)
future_refiner = executor.submit(self.load_refiner)

self.base = future_base.result()
self.refiner = future_refiner.result()

MODELS = ModelHandler()

# ---------------------------------- Helper ---------------------------------- #
def _save_and_upload_images(images, job_id):
Expand Down Expand Up @@ -103,11 +110,11 @@ def generate_image(job):

generator = torch.Generator("cuda").manual_seed(job_input['seed'])

base.scheduler = make_scheduler(job_input['scheduler'], base.scheduler.config)
MODELS.base.scheduler = make_scheduler(job_input['scheduler'], MODELS.base.scheduler.config)

if starting_image: # If image_url is provided, run only the refiner pipeline
init_image = load_image(starting_image).convert("RGB")
output = refiner(
output = MODELS.refiner(
prompt=job_input['prompt'],
num_inference_steps=job_input['refiner_inference_steps'],
strength=job_input['strength'],
Expand All @@ -116,7 +123,7 @@ def generate_image(job):
).images
else:
# Generate latent image using pipe
image = base(
image = MODELS.base(
prompt=job_input['prompt'],
negative_prompt=job_input['negative_prompt'],
height=job_input['height'],
Expand All @@ -129,7 +136,7 @@ def generate_image(job):
).images

# Refine the image using refiner with refiner_inference_steps
output = refiner(
output = MODELS.refiner(
prompt=job_input['prompt'],
num_inference_steps=job_input['refiner_inference_steps'],
strength=job_input['strength'],
Expand Down

0 comments on commit 0e14ad3

Please sign in to comment.