|
14 | 14 | from invokeai.backend.patches.layer_patcher import LayerPatcher |
15 | 15 | from invokeai.backend.patches.layers.base_layer_patch import BaseLayerPatch |
16 | 16 | from invokeai.backend.patches.layers.flux_control_lora_layer import FluxControlLoRALayer |
| 17 | +from invokeai.backend.patches.layers.diffusers_ada_ln_lora_layer import DiffusersAdaLN_LoRALayer |
17 | 18 | from invokeai.backend.patches.layers.lokr_layer import LoKRLayer |
18 | 19 | from invokeai.backend.patches.layers.lora_layer import LoRALayer |
19 | 20 | from invokeai.backend.patches.layers.merged_layer_patch import MergedLayerPatch, Range |
@@ -283,6 +284,7 @@ def test_inference_autocast_from_cpu_to_device(device: str, layer_under_test: La |
283 | 284 | "multiple_loras", |
284 | 285 | "concatenated_lora", |
285 | 286 | "flux_control_lora", |
| 287 | + "diffusers_adaLN_lora", |
286 | 288 | "single_lokr", |
287 | 289 | ] |
288 | 290 | ) |
@@ -370,6 +372,16 @@ def patch_under_test(request: pytest.FixtureRequest) -> PatchUnderTest: |
370 | 372 | ) |
371 | 373 | input = torch.randn(1, in_features) |
372 | 374 | return ([(lokr_layer, 0.7)], input) |
| 375 | + elif layer_type == "diffusers_adaLN_lora": |
| 376 | + lora_layer = DiffusersAdaLN_LoRALayer( |
| 377 | + up=torch.randn(out_features, rank), |
| 378 | + mid=None, |
| 379 | + down=torch.randn(rank, in_features), |
| 380 | + alpha=1.0, |
| 381 | + bias=torch.randn(out_features), |
| 382 | + ) |
| 383 | + input = torch.randn(1, in_features) |
| 384 | + return ([(lora_layer, 0.7)], input) |
373 | 385 | else: |
374 | 386 | raise ValueError(f"Unsupported layer_type: {layer_type}") |
375 | 387 |
|
|
0 commit comments