Skip to content

Commit 5085a8c

Browse files
committed
Rename ConcatenatedLoRALayer to MergedLayerPatch. And other minor cleanup.
1 parent 31cea61 commit 5085a8c

File tree

4 files changed

+9
-9
lines changed

4 files changed

+9
-9
lines changed

invokeai/app/invocations/flux_text_encoder.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,7 @@ def _t5_encode(self, context: InvocationContext) -> torch.Tensor:
9898
raise ValueError(f"Unsupported model format: {t5_encoder_config.format}")
9999

100100
# Apply LoRA models to the T5 encoder.
101-
# Note: We apply the LoRA after the transformer has been moved to its target device for faster patching.
101+
# Note: We apply the LoRA after the encoder has been moved to its target device for faster patching.
102102
exit_stack.enter_context(
103103
LayerPatcher.apply_smart_model_patches(
104104
model=t5_text_encoder,

invokeai/backend/patches/layers/concatenated_lora_layer.py renamed to invokeai/backend/patches/layers/merged_layer_patch.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,8 +13,8 @@ class Range:
1313
end: int
1414

1515

16-
class ConcatenatedLoRALayer(BaseLayerPatch):
17-
"""A patch layer that is composed of multiple sub-layers concatenated together.
16+
class MergedLayerPatch(BaseLayerPatch):
17+
"""A patch layer that is composed of multiple sub-layers merged together.
1818
1919
This class was created to handle a special case with FLUX LoRA models. In the BFL FLUX model format, the attention
2020
Q, K, V matrices are concatenated along the first dimension. In the diffusers LoRA format, the Q, K, V matrices are

invokeai/backend/patches/lora_conversions/flux_diffusers_lora_conversion_utils.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
import torch
44

55
from invokeai.backend.patches.layers.base_layer_patch import BaseLayerPatch
6-
from invokeai.backend.patches.layers.concatenated_lora_layer import ConcatenatedLoRALayer, Range
6+
from invokeai.backend.patches.layers.merged_layer_patch import MergedLayerPatch, Range
77
from invokeai.backend.patches.layers.utils import any_lora_layer_from_state_dict
88
from invokeai.backend.patches.lora_conversions.flux_lora_constants import FLUX_LORA_TRANSFORMER_PREFIX
99
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
@@ -113,7 +113,7 @@ def add_qkv_lora_layer_if_present(
113113

114114
dim_0_offset += src_weight_shape[0]
115115

116-
layers[dst_qkv_key] = ConcatenatedLoRALayer(sub_layers, sub_layer_ranges)
116+
layers[dst_qkv_key] = MergedLayerPatch(sub_layers, sub_layer_ranges)
117117

118118
# time_text_embed.timestep_embedder -> time_in.
119119
add_lora_layer_if_present("time_text_embed.timestep_embedder.linear_1", "time_in.in_layer")

tests/backend/model_manager/load/model_cache/torch_module_autocast/custom_modules/test_all_custom_modules.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -13,10 +13,10 @@
1313
)
1414
from invokeai.backend.patches.layer_patcher import LayerPatcher
1515
from invokeai.backend.patches.layers.base_layer_patch import BaseLayerPatch
16-
from invokeai.backend.patches.layers.concatenated_lora_layer import ConcatenatedLoRALayer, Range
1716
from invokeai.backend.patches.layers.flux_control_lora_layer import FluxControlLoRALayer
1817
from invokeai.backend.patches.layers.lokr_layer import LoKRLayer
1918
from invokeai.backend.patches.layers.lora_layer import LoRALayer
19+
from invokeai.backend.patches.layers.merged_layer_patch import MergedLayerPatch, Range
2020
from invokeai.backend.util.original_weights_storage import OriginalWeightsStorage
2121
from tests.backend.model_manager.load.model_cache.torch_module_autocast.custom_modules.test_custom_invoke_linear_8_bit_lt import (
2222
build_linear_8bit_lt_layer,
@@ -328,7 +328,7 @@ def patch_under_test(request: pytest.FixtureRequest) -> PatchUnderTest:
328328
elif layer_type == "concatenated_lora":
329329
sub_layer_out_features = [16, 16, 32]
330330

331-
# Create a ConcatenatedLoRA layer.
331+
# Create a MergedLayerPatch.
332332
sub_layers: list[LoRALayer] = []
333333
sub_layer_ranges: list[Range] = []
334334
dim_0_offset = 0
@@ -339,10 +339,10 @@ def patch_under_test(request: pytest.FixtureRequest) -> PatchUnderTest:
339339
sub_layers.append(LoRALayer(up=up, mid=None, down=down, alpha=1.0, bias=bias))
340340
sub_layer_ranges.append(Range(dim_0_offset, dim_0_offset + out_features))
341341
dim_0_offset += out_features
342-
concatenated_lora_layer = ConcatenatedLoRALayer(sub_layers, sub_layer_ranges)
342+
merged_layer_patch = MergedLayerPatch(sub_layers, sub_layer_ranges)
343343

344344
input = torch.randn(1, in_features)
345-
return ([(concatenated_lora_layer, 0.7)], input)
345+
return ([(merged_layer_patch, 0.7)], input)
346346
elif layer_type == "flux_control_lora":
347347
# Create a FluxControlLoRALayer.
348348
patched_in_features = 40

0 commit comments

Comments
 (0)