|  | 
| 14 | 14 | from invokeai.backend.patches.layer_patcher import LayerPatcher | 
| 15 | 15 | from invokeai.backend.patches.layers.base_layer_patch import BaseLayerPatch | 
| 16 | 16 | from invokeai.backend.patches.layers.flux_control_lora_layer import FluxControlLoRALayer | 
| 17 |  | -from invokeai.backend.patches.layers.diffusers_ada_ln_lora_layer import DiffusersAdaLN_LoRALayer | 
| 18 | 17 | from invokeai.backend.patches.layers.lokr_layer import LoKRLayer | 
| 19 | 18 | from invokeai.backend.patches.layers.lora_layer import LoRALayer | 
| 20 | 19 | from invokeai.backend.patches.layers.merged_layer_patch import MergedLayerPatch, Range | 
| @@ -284,7 +283,6 @@ def test_inference_autocast_from_cpu_to_device(device: str, layer_under_test: La | 
| 284 | 283 |         "multiple_loras", | 
| 285 | 284 |         "concatenated_lora", | 
| 286 | 285 |         "flux_control_lora", | 
| 287 |  | -        "diffusers_adaLN_lora", | 
| 288 | 286 |         "single_lokr", | 
| 289 | 287 |     ] | 
| 290 | 288 | ) | 
| @@ -372,16 +370,6 @@ def patch_under_test(request: pytest.FixtureRequest) -> PatchUnderTest: | 
| 372 | 370 |         ) | 
| 373 | 371 |         input = torch.randn(1, in_features) | 
| 374 | 372 |         return ([(lokr_layer, 0.7)], input) | 
| 375 |  | -    elif layer_type == "diffusers_adaLN_lora": | 
| 376 |  | -        lora_layer = DiffusersAdaLN_LoRALayer( | 
| 377 |  | -            up=torch.randn(out_features, rank), | 
| 378 |  | -            mid=None, | 
| 379 |  | -            down=torch.randn(rank, in_features), | 
| 380 |  | -            alpha=1.0, | 
| 381 |  | -            bias=torch.randn(out_features), | 
| 382 |  | -        ) | 
| 383 |  | -        input = torch.randn(1, in_features) | 
| 384 |  | -        return ([(lora_layer, 0.7)], input) | 
| 385 | 373 |     else: | 
| 386 | 374 |         raise ValueError(f"Unsupported layer_type: {layer_type}") | 
| 387 | 375 | 
 | 
|  | 
0 commit comments