From 42c19fdd0d49fba6c17f7b9565e290444fdf70ad Mon Sep 17 00:00:00 2001 From: DN6 Date: Tue, 26 Aug 2025 08:35:26 +0530 Subject: [PATCH 1/7] update --- tests/conftest.py | 4 +- tests/hooks/__init__.py | 0 tests/hooks/test_group_offloading.py | 3 +- tests/hooks/test_hooks.py | 3 +- tests/lora/__init__.py | 0 tests/lora/test_lora_layers_auraflow.py | 5 +- tests/lora/test_lora_layers_cogvideox.py | 5 +- tests/lora/test_lora_layers_cogview4.py | 5 +- tests/lora/test_lora_layers_flux.py | 5 +- tests/lora/test_lora_layers_hunyuanvideo.py | 5 +- tests/lora/test_lora_layers_ltx_video.py | 5 +- tests/lora/test_lora_layers_lumina2.py | 5 +- tests/lora/test_lora_layers_mochi.py | 5 +- tests/lora/test_lora_layers_qwenimage.py | 5 +- tests/lora/test_lora_layers_sana.py | 5 +- tests/lora/test_lora_layers_sd.py | 5 +- tests/lora/test_lora_layers_sd3.py | 5 +- tests/lora/test_lora_layers_sdxl.py | 5 +- tests/lora/test_lora_layers_wan.py | 5 +- tests/lora/test_lora_layers_wanvace.py | 5 +- tests/lora/utils.py | 3 +- .../test_models_asymmetric_autoencoder_kl.py | 4 +- .../test_models_autoencoder_cosmos.py | 2 +- .../test_models_autoencoder_dc.py | 4 +- .../test_models_autoencoder_hunyuan_video.py | 4 +- .../test_models_autoencoder_kl.py | 4 +- .../test_models_autoencoder_kl_cogvideox.py | 4 +- ..._models_autoencoder_kl_temporal_decoder.py | 4 +- .../test_models_autoencoder_ltx_video.py | 4 +- .../test_models_autoencoder_magvit.py | 2 +- .../test_models_autoencoder_mochi.py | 4 +- .../test_models_autoencoder_oobleck.py | 4 +- .../test_models_autoencoder_tiny.py | 4 +- .../test_models_autoencoder_wan.py | 2 +- .../test_models_consistency_decoder_vae.py | 6 +- .../autoencoders/test_models_vae_flax.py | 2 +- tests/models/autoencoders/test_models_vq.py | 4 +- tests/models/test_attention_processor.py | 3 +- tests/models/test_layers_utils.py | 3 +- tests/models/test_modeling_common.py | 8 +- tests/models/test_modeling_common_flax.py | 3 +- .../test_models_dit_transformer2d.py | 4 +- .../test_models_pixart_transformer2d.py | 4 +- .../models/transformers/test_models_prior.py | 4 +- .../test_models_transformer_allegro.py | 4 +- .../test_models_transformer_aura_flow.py | 2 +- .../test_models_transformer_bria.py | 2 +- .../test_models_transformer_chroma.py | 2 +- .../test_models_transformer_cogvideox.py | 4 +- .../test_models_transformer_cogview3plus.py | 4 +- .../test_models_transformer_cogview4.py | 2 +- .../test_models_transformer_consisid.py | 4 +- .../test_models_transformer_cosmos.py | 2 +- .../test_models_transformer_easyanimate.py | 2 +- .../test_models_transformer_flux.py | 2 +- .../test_models_transformer_hidream.py | 4 +- .../test_models_transformer_hunyuan_dit.py | 4 +- .../test_models_transformer_hunyuan_video.py | 4 +- ...els_transformer_hunyuan_video_framepack.py | 4 +- .../test_models_transformer_latte.py | 4 +- .../test_models_transformer_ltx.py | 2 +- .../test_models_transformer_lumina.py | 4 +- .../test_models_transformer_lumina2.py | 4 +- .../test_models_transformer_mochi.py | 2 +- .../test_models_transformer_omnigen.py | 2 +- .../test_models_transformer_qwenimage.py | 2 +- .../test_models_transformer_sana.py | 4 +- .../test_models_transformer_sd3.py | 4 +- .../test_models_transformer_skyreels_v2.py | 4 +- .../test_models_transformer_temporal.py | 4 +- .../test_models_transformer_wan.py | 4 +- tests/models/unets/test_models_unet_1d.py | 4 +- tests/models/unets/test_models_unet_2d.py | 4 +- .../unets/test_models_unet_2d_condition.py | 4 +- .../models/unets/test_models_unet_2d_flax.py | 3 +- .../unets/test_models_unet_3d_condition.py | 2 +- .../unets/test_models_unet_controlnetxs.py | 2 +- tests/models/unets/test_models_unet_motion.py | 4 +- .../unets/test_models_unet_spatiotemporal.py | 4 +- tests/models/unets/test_unet_2d_blocks.py | 2 +- tests/models/unets/test_unet_blocks_common.py | 5 +- ...st_modular_pipeline_stable_diffusion_xl.py | 10 +- .../test_modular_pipelines_common.py | 3 +- tests/others/test_config.py | 3 +- tests/others/test_ema.py | 3 +- tests/others/test_outputs.py | 3 +- tests/others/test_training.py | 3 +- tests/others/test_utils.py | 3 +- tests/pipelines/allegro/test_allegro.py | 4 +- .../pipelines/animatediff/test_animatediff.py | 4 +- .../test_animatediff_controlnet.py | 2 +- .../animatediff/test_animatediff_sdxl.py | 2 +- .../test_animatediff_sparsectrl.py | 2 +- .../test_animatediff_video2video.py | 2 +- ...test_animatediff_video2video_controlnet.py | 2 +- tests/pipelines/audioldm2/test_audioldm2.py | 4 +- tests/pipelines/bria/test_pipeline_bria.py | 9 +- .../pipelines/chroma/test_pipeline_chroma.py | 2 +- .../chroma/test_pipeline_chroma_img2img.py | 2 +- tests/pipelines/cogvideo/test_cogvideox.py | 4 +- .../cogvideo/test_cogvideox_fun_control.py | 4 +- .../cogvideo/test_cogvideox_image2video.py | 4 +- .../cogvideo/test_cogvideox_video2video.py | 2 +- tests/pipelines/cogview3/test_cogview3plus.py | 4 +- tests/pipelines/cogview4/test_cogview4.py | 2 +- tests/pipelines/consisid/test_consisid.py | 4 +- .../test_consistency_models.py | 6 +- tests/pipelines/controlnet/test_controlnet.py | 6 +- .../controlnet/test_controlnet_img2img.py | 6 +- .../controlnet/test_controlnet_inpaint.py | 6 +- .../test_controlnet_inpaint_sdxl.py | 4 +- .../controlnet/test_controlnet_sdxl.py | 6 +- .../test_controlnet_sdxl_img2img.py | 4 +- .../controlnet/test_flax_controlnet.py | 3 +- .../controlnet_flux/test_controlnet_flux.py | 6 +- .../test_controlnet_flux_img2img.py | 6 +- .../test_controlnet_flux_inpaint.py | 6 +- .../test_controlnet_hunyuandit.py | 6 +- .../test_controlnet_inpaint_sd3.py | 6 +- .../controlnet_sd3/test_controlnet_sd3.py | 6 +- tests/pipelines/cosmos/test_cosmos.py | 2 +- .../cosmos/test_cosmos2_text2image.py | 2 +- .../cosmos/test_cosmos2_video2world.py | 2 +- .../cosmos/test_cosmos_video2world.py | 2 +- tests/pipelines/ddim/test_ddim.py | 2 +- tests/pipelines/ddpm/test_ddpm.py | 3 +- tests/pipelines/deepfloyd_if/__init__.py | 2 +- tests/pipelines/deepfloyd_if/test_if.py | 4 +- .../pipelines/deepfloyd_if/test_if_img2img.py | 4 +- .../test_if_img2img_superresolution.py | 4 +- .../deepfloyd_if/test_if_inpainting.py | 4 +- .../test_if_inpainting_superresolution.py | 4 +- .../deepfloyd_if/test_if_superresolution.py | 4 +- tests/pipelines/dit/test_dit.py | 4 +- .../pipelines/easyanimate/test_easyanimate.py | 4 +- tests/pipelines/flux/test_pipeline_flux.py | 4 +- .../flux/test_pipeline_flux_control.py | 2 +- .../test_pipeline_flux_control_img2img.py | 2 +- .../test_pipeline_flux_control_inpaint.py | 4 +- .../pipelines/flux/test_pipeline_flux_fill.py | 4 +- .../flux/test_pipeline_flux_img2img.py | 4 +- .../flux/test_pipeline_flux_inpaint.py | 4 +- .../flux/test_pipeline_flux_kontext.py | 2 +- .../test_pipeline_flux_kontext_inpaint.py | 2 +- .../flux/test_pipeline_flux_redux.py | 3 +- .../hidream_image/test_pipeline_hidream.py | 2 +- .../hunyuan_video/test_hunyuan_image2video.py | 2 +- .../test_hunyuan_skyreels_image2video.py | 2 +- .../hunyuan_video/test_hunyuan_video.py | 2 +- .../test_hunyuan_video_framepack.py | 4 +- .../pipelines/hunyuandit/test_hunyuan_dit.py | 4 +- tests/pipelines/ip_adapters/__init__.py | 0 .../test_ip_adapter_stable_diffusion.py | 3 +- tests/pipelines/kandinsky/test_kandinsky.py | 4 +- .../kandinsky/test_kandinsky_combined.py | 2 +- .../kandinsky/test_kandinsky_img2img.py | 4 +- .../kandinsky/test_kandinsky_inpaint.py | 4 +- .../kandinsky/test_kandinsky_prior.py | 2 +- .../pipelines/kandinsky2_2/test_kandinsky.py | 4 +- .../kandinsky2_2/test_kandinsky_combined.py | 2 +- .../kandinsky2_2/test_kandinsky_controlnet.py | 4 +- .../test_kandinsky_controlnet_img2img.py | 4 +- .../kandinsky2_2/test_kandinsky_img2img.py | 4 +- .../kandinsky2_2/test_kandinsky_inpaint.py | 4 +- .../kandinsky2_2/test_kandinsky_prior.py | 2 +- .../test_kandinsky_prior_emb2emb.py | 4 +- tests/pipelines/kandinsky3/test_kandinsky3.py | 4 +- .../kandinsky3/test_kandinsky3_img2img.py | 4 +- tests/pipelines/kolors/test_kolors.py | 2 +- tests/pipelines/kolors/test_kolors_img2img.py | 4 +- .../test_latent_consistency_models.py | 4 +- .../test_latent_consistency_models_img2img.py | 4 +- .../latent_diffusion/test_latent_diffusion.py | 4 +- .../test_latent_diffusion_superresolution.py | 3 +- tests/pipelines/latte/test_latte.py | 4 +- .../test_ledits_pp_stable_diffusion.py | 3 +- .../test_ledits_pp_stable_diffusion_xl.py | 2 +- tests/pipelines/ltx/test_ltx.py | 2 +- tests/pipelines/ltx/test_ltx_condition.py | 2 +- tests/pipelines/ltx/test_ltx_image2video.py | 2 +- .../pipelines/ltx/test_ltx_latent_upsample.py | 2 +- tests/pipelines/lumina/test_lumina_nextdit.py | 4 +- .../pipelines/marigold/test_marigold_depth.py | 4 +- .../marigold/test_marigold_intrinsics.py | 4 +- .../marigold/test_marigold_normals.py | 4 +- tests/pipelines/mochi/test_mochi.py | 4 +- .../omnigen/test_pipeline_omnigen.py | 4 +- tests/pipelines/pag/test_pag_animatediff.py | 2 +- tests/pipelines/pag/test_pag_controlnet_sd.py | 2 +- .../pag/test_pag_controlnet_sd_inpaint.py | 2 +- .../pipelines/pag/test_pag_controlnet_sdxl.py | 2 +- .../pag/test_pag_controlnet_sdxl_img2img.py | 2 +- tests/pipelines/pag/test_pag_hunyuan_dit.py | 2 +- tests/pipelines/pag/test_pag_kolors.py | 2 +- tests/pipelines/pag/test_pag_pixart_sigma.py | 4 +- tests/pipelines/pag/test_pag_sana.py | 2 +- tests/pipelines/pag/test_pag_sd.py | 4 +- tests/pipelines/pag/test_pag_sd3.py | 4 +- tests/pipelines/pag/test_pag_sd3_img2img.py | 4 +- tests/pipelines/pag/test_pag_sd_img2img.py | 4 +- tests/pipelines/pag/test_pag_sd_inpaint.py | 4 +- tests/pipelines/pag/test_pag_sdxl.py | 4 +- tests/pipelines/pag/test_pag_sdxl_img2img.py | 4 +- tests/pipelines/pag/test_pag_sdxl_inpaint.py | 4 +- tests/pipelines/pixart_alpha/test_pixart.py | 4 +- tests/pipelines/pixart_sigma/test_pixart.py | 4 +- tests/pipelines/pndm/test_pndm.py | 3 +- tests/pipelines/qwenimage/test_qwenimage.py | 2 +- .../qwenimage/test_qwenimage_edit.py | 2 +- .../qwenimage/test_qwenimage_img2img.py | 4 +- .../qwenimage/test_qwenimage_inpaint.py | 2 +- tests/pipelines/sana/test_sana.py | 4 +- tests/pipelines/sana/test_sana_controlnet.py | 6 +- tests/pipelines/sana/test_sana_sprint.py | 4 +- .../sana/test_sana_sprint_img2img.py | 6 +- tests/pipelines/shap_e/test_shap_e.py | 4 +- tests/pipelines/shap_e/test_shap_e_img2img.py | 4 +- .../pipelines/skyreels_v2/test_skyreels_v2.py | 4 +- .../skyreels_v2/test_skyreels_v2_df.py | 4 +- .../test_skyreels_v2_df_image_to_video.py | 2 +- .../test_skyreels_v2_df_video_to_video.py | 4 +- .../test_skyreels_v2_image_to_video.py | 2 +- .../stable_audio/test_stable_audio.py | 4 +- .../test_stable_cascade_combined.py | 2 +- .../test_stable_cascade_decoder.py | 6 +- .../test_stable_cascade_prior.py | 3 +- .../test_onnx_stable_diffusion.py | 2 +- .../test_onnx_stable_diffusion_img2img.py | 4 +- .../test_onnx_stable_diffusion_inpaint.py | 4 +- .../test_onnx_stable_diffusion_upscale.py | 4 +- .../stable_diffusion/test_stable_diffusion.py | 4 +- .../test_stable_diffusion_img2img.py | 4 +- .../test_stable_diffusion_inpaint.py | 4 +- ...st_stable_diffusion_instruction_pix2pix.py | 4 +- .../test_stable_diffusion.py | 4 +- .../test_stable_diffusion_depth.py | 4 +- .../test_stable_diffusion_flax.py | 3 +- .../test_stable_diffusion_flax_inpaint.py | 3 +- .../test_stable_diffusion_inpaint.py | 4 +- .../test_stable_diffusion_latent_upscale.py | 4 +- .../test_stable_diffusion_upscale.py | 3 +- .../test_stable_diffusion_v_pred.py | 3 +- .../test_pipeline_stable_diffusion_3.py | 4 +- ...est_pipeline_stable_diffusion_3_img2img.py | 4 +- ...est_pipeline_stable_diffusion_3_inpaint.py | 4 +- .../test_stable_diffusion_adapter.py | 4 +- .../test_stable_diffusion_image_variation.py | 4 +- .../test_stable_diffusion_xl.py | 4 +- .../test_stable_diffusion_xl_adapter.py | 4 +- .../test_stable_diffusion_xl_img2img.py | 4 +- .../test_stable_diffusion_xl_inpaint.py | 4 +- ...stable_diffusion_xl_instruction_pix2pix.py | 2 +- .../stable_unclip/test_stable_unclip.py | 4 +- .../test_stable_unclip_img2img.py | 4 +- .../test_stable_video_diffusion.py | 4 +- tests/pipelines/test_pipeline_utils.py | 3 +- tests/pipelines/test_pipelines.py | 5 +- tests/pipelines/test_pipelines_auto.py | 3 +- tests/pipelines/test_pipelines_common.py | 26 +- tests/pipelines/test_pipelines_flax.py | 3 +- tests/pipelines/test_pipelines_onnx_common.py | 2 +- .../test_pipeline_visualcloze_combined.py | 4 +- .../test_pipeline_visualcloze_generation.py | 4 +- tests/pipelines/wan/test_wan.py | 4 +- tests/pipelines/wan/test_wan_22.py | 4 +- .../wan/test_wan_22_image_to_video.py | 4 +- .../pipelines/wan/test_wan_image_to_video.py | 2 +- tests/pipelines/wan/test_wan_vace.py | 2 +- .../pipelines/wan/test_wan_video_to_video.py | 4 +- tests/quantization/bnb/test_4bit.py | 4 +- tests/quantization/bnb/test_mixed_int8.py | 4 +- tests/quantization/gguf/test_gguf.py | 4 +- tests/quantization/quanto/test_quanto.py | 3 +- .../test_pipeline_level_quantization.py | 3 +- .../quantization/test_torch_compile_utils.py | 3 +- tests/quantization/torchao/test_torchao.py | 4 +- tests/quantization/utils.py | 3 +- tests/remote/test_remote_decode.py | 5 +- tests/remote/test_remote_encode.py | 3 +- tests/schedulers/test_scheduler_dpm_sde.py | 2 +- tests/schedulers/test_scheduler_euler.py | 2 +- .../test_scheduler_euler_ancestral.py | 2 +- tests/schedulers/test_scheduler_flax.py | 3 +- tests/schedulers/test_scheduler_heun.py | 2 +- .../test_scheduler_kdpm2_ancestral.py | 2 +- .../test_scheduler_kdpm2_discrete.py | 2 +- tests/schedulers/test_scheduler_lcm.py | 2 +- tests/schedulers/test_scheduler_lms.py | 2 +- tests/schedulers/test_scheduler_sasolver.py | 2 +- tests/schedulers/test_schedulers.py | 2 +- .../single_file/single_file_testing_utils.py | 3 +- tests/single_file/test_lumina2_transformer.py | 3 +- .../test_model_autoencoder_dc_single_file.py | 3 +- .../test_model_controlnet_single_file.py | 3 +- ...test_model_flux_transformer_single_file.py | 3 +- .../test_model_motion_adapter_single_file.py | 3 +- .../test_model_sd_cascade_unet_single_file.py | 3 +- .../single_file/test_model_vae_single_file.py | 3 +- .../test_model_wan_autoencoder_single_file.py | 3 +- ...est_model_wan_transformer3d_single_file.py | 3 +- tests/single_file/test_sana_transformer.py | 3 +- ...iffusion_controlnet_img2img_single_file.py | 4 +- ...iffusion_controlnet_inpaint_single_file.py | 4 +- ...stable_diffusion_controlnet_single_file.py | 4 +- ...st_stable_diffusion_img2img_single_file.py | 4 +- ...st_stable_diffusion_inpaint_single_file.py | 4 +- .../test_stable_diffusion_single_file.py | 4 +- ...st_stable_diffusion_upscale_single_file.py | 4 +- ...stable_diffusion_xl_adapter_single_file.py | 4 +- ...ble_diffusion_xl_controlnet_single_file.py | 4 +- ...stable_diffusion_xl_img2img_single_file.py | 4 +- ...st_stable_diffusion_xl_instruct_pix2pix.py | 3 +- .../test_stable_diffusion_xl_single_file.py | 4 +- tests/testing_utils.py | 1557 +++++++++++++++++ 314 files changed, 2149 insertions(+), 527 deletions(-) create mode 100644 tests/hooks/__init__.py create mode 100644 tests/lora/__init__.py create mode 100644 tests/pipelines/ip_adapters/__init__.py create mode 100644 tests/testing_utils.py diff --git a/tests/conftest.py b/tests/conftest.py index 3237fb9c7bb0..fd76d1c84ee7 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -35,13 +35,13 @@ def pytest_configure(config): def pytest_addoption(parser): - from diffusers.utils.testing_utils import pytest_addoption_shared + from .testing_utils import pytest_addoption_shared pytest_addoption_shared(parser) def pytest_terminal_summary(terminalreporter): - from diffusers.utils.testing_utils import pytest_terminal_summary_main + from .testing_utils import pytest_terminal_summary_main make_reports = terminalreporter.config.getoption("--make-reports") if make_reports: diff --git a/tests/hooks/__init__.py b/tests/hooks/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/hooks/test_group_offloading.py b/tests/hooks/test_group_offloading.py index ea08dec19cfc..96cbecfbf530 100644 --- a/tests/hooks/test_group_offloading.py +++ b/tests/hooks/test_group_offloading.py @@ -24,7 +24,8 @@ from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.utils import get_logger from diffusers.utils.import_utils import compare_versions -from diffusers.utils.testing_utils import ( + +from ..testing_utils import ( backend_empty_cache, backend_max_memory_allocated, backend_reset_peak_memory_stats, diff --git a/tests/hooks/test_hooks.py b/tests/hooks/test_hooks.py index 7f60acf8d3b4..1e845cc40c7d 100644 --- a/tests/hooks/test_hooks.py +++ b/tests/hooks/test_hooks.py @@ -20,7 +20,8 @@ from diffusers.hooks import HookRegistry, ModelHook from diffusers.training_utils import free_memory from diffusers.utils.logging import get_logger -from diffusers.utils.testing_utils import CaptureLogger, torch_device + +from ..testing_utils import CaptureLogger, torch_device logger = get_logger(__name__) # pylint: disable=invalid-name diff --git a/tests/lora/__init__.py b/tests/lora/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/lora/test_lora_layers_auraflow.py b/tests/lora/test_lora_layers_auraflow.py index d119feae20d0..67084dd6d078 100644 --- a/tests/lora/test_lora_layers_auraflow.py +++ b/tests/lora/test_lora_layers_auraflow.py @@ -23,7 +23,8 @@ AuraFlowTransformer2DModel, FlowMatchEulerDiscreteScheduler, ) -from diffusers.utils.testing_utils import ( + +from ..testing_utils import ( floats_tensor, is_peft_available, require_peft_backend, @@ -35,7 +36,7 @@ sys.path.append(".") -from utils import PeftLoraLoaderMixinTests # noqa: E402 +from .utils import PeftLoraLoaderMixinTests # noqa: E402 @require_peft_backend diff --git a/tests/lora/test_lora_layers_cogvideox.py b/tests/lora/test_lora_layers_cogvideox.py index 565d6db69727..16147f35c71d 100644 --- a/tests/lora/test_lora_layers_cogvideox.py +++ b/tests/lora/test_lora_layers_cogvideox.py @@ -26,7 +26,8 @@ CogVideoXPipeline, CogVideoXTransformer3DModel, ) -from diffusers.utils.testing_utils import ( + +from ..testing_utils import ( floats_tensor, require_peft_backend, require_torch_accelerator, @@ -35,7 +36,7 @@ sys.path.append(".") -from utils import PeftLoraLoaderMixinTests # noqa: E402 +from .utils import PeftLoraLoaderMixinTests # noqa: E402 @require_peft_backend diff --git a/tests/lora/test_lora_layers_cogview4.py b/tests/lora/test_lora_layers_cogview4.py index b7367d9b0946..3b8a56c40302 100644 --- a/tests/lora/test_lora_layers_cogview4.py +++ b/tests/lora/test_lora_layers_cogview4.py @@ -22,7 +22,8 @@ from transformers import AutoTokenizer, GlmModel from diffusers import AutoencoderKL, CogView4Pipeline, CogView4Transformer2DModel, FlowMatchEulerDiscreteScheduler -from diffusers.utils.testing_utils import ( + +from ..testing_utils import ( floats_tensor, require_peft_backend, require_torch_accelerator, @@ -33,7 +34,7 @@ sys.path.append(".") -from utils import PeftLoraLoaderMixinTests # noqa: E402 +from .utils import PeftLoraLoaderMixinTests # noqa: E402 class TokenizerWrapper: diff --git a/tests/lora/test_lora_layers_flux.py b/tests/lora/test_lora_layers_flux.py index 95f1e137e94b..7d99bcad8087 100644 --- a/tests/lora/test_lora_layers_flux.py +++ b/tests/lora/test_lora_layers_flux.py @@ -28,7 +28,8 @@ from diffusers import FlowMatchEulerDiscreteScheduler, FluxControlPipeline, FluxPipeline, FluxTransformer2DModel from diffusers.utils import load_image, logging -from diffusers.utils.testing_utils import ( + +from ..testing_utils import ( CaptureLogger, backend_empty_cache, floats_tensor, @@ -48,7 +49,7 @@ sys.path.append(".") -from utils import PeftLoraLoaderMixinTests, check_if_lora_correctly_set # noqa: E402 +from .utils import PeftLoraLoaderMixinTests, check_if_lora_correctly_set # noqa: E402 @require_peft_backend diff --git a/tests/lora/test_lora_layers_hunyuanvideo.py b/tests/lora/test_lora_layers_hunyuanvideo.py index 4cbd6523e712..62d045f8364d 100644 --- a/tests/lora/test_lora_layers_hunyuanvideo.py +++ b/tests/lora/test_lora_layers_hunyuanvideo.py @@ -26,7 +26,8 @@ HunyuanVideoPipeline, HunyuanVideoTransformer3DModel, ) -from diffusers.utils.testing_utils import ( + +from ..testing_utils import ( Expectations, backend_empty_cache, floats_tensor, @@ -42,7 +43,7 @@ sys.path.append(".") -from utils import PeftLoraLoaderMixinTests # noqa: E402 +from .utils import PeftLoraLoaderMixinTests # noqa: E402 @require_peft_backend diff --git a/tests/lora/test_lora_layers_ltx_video.py b/tests/lora/test_lora_layers_ltx_video.py index 88949227cf94..a8ad30e44827 100644 --- a/tests/lora/test_lora_layers_ltx_video.py +++ b/tests/lora/test_lora_layers_ltx_video.py @@ -24,12 +24,13 @@ LTXPipeline, LTXVideoTransformer3DModel, ) -from diffusers.utils.testing_utils import floats_tensor, require_peft_backend + +from ..testing_utils import floats_tensor, require_peft_backend sys.path.append(".") -from utils import PeftLoraLoaderMixinTests # noqa: E402 +from .utils import PeftLoraLoaderMixinTests # noqa: E402 @require_peft_backend diff --git a/tests/lora/test_lora_layers_lumina2.py b/tests/lora/test_lora_layers_lumina2.py index d7096e79b93c..0ebc831b1147 100644 --- a/tests/lora/test_lora_layers_lumina2.py +++ b/tests/lora/test_lora_layers_lumina2.py @@ -26,12 +26,13 @@ Lumina2Pipeline, Lumina2Transformer2DModel, ) -from diffusers.utils.testing_utils import floats_tensor, is_torch_version, require_peft_backend, skip_mps, torch_device + +from ..testing_utils import floats_tensor, is_torch_version, require_peft_backend, skip_mps, torch_device sys.path.append(".") -from utils import PeftLoraLoaderMixinTests, check_if_lora_correctly_set # noqa: E402 +from .utils import PeftLoraLoaderMixinTests, check_if_lora_correctly_set # noqa: E402 @require_peft_backend diff --git a/tests/lora/test_lora_layers_mochi.py b/tests/lora/test_lora_layers_mochi.py index 501a4b35f48e..21cc5f11a352 100644 --- a/tests/lora/test_lora_layers_mochi.py +++ b/tests/lora/test_lora_layers_mochi.py @@ -19,7 +19,8 @@ from transformers import AutoTokenizer, T5EncoderModel from diffusers import AutoencoderKLMochi, FlowMatchEulerDiscreteScheduler, MochiPipeline, MochiTransformer3DModel -from diffusers.utils.testing_utils import ( + +from ..testing_utils import ( floats_tensor, require_peft_backend, skip_mps, @@ -28,7 +29,7 @@ sys.path.append(".") -from utils import PeftLoraLoaderMixinTests # noqa: E402 +from .utils import PeftLoraLoaderMixinTests # noqa: E402 @require_peft_backend diff --git a/tests/lora/test_lora_layers_qwenimage.py b/tests/lora/test_lora_layers_qwenimage.py index 5850626308ef..44ef9b0a37b3 100644 --- a/tests/lora/test_lora_layers_qwenimage.py +++ b/tests/lora/test_lora_layers_qwenimage.py @@ -24,12 +24,13 @@ QwenImagePipeline, QwenImageTransformer2DModel, ) -from diffusers.utils.testing_utils import floats_tensor, require_peft_backend + +from ..testing_utils import floats_tensor, require_peft_backend sys.path.append(".") -from utils import PeftLoraLoaderMixinTests # noqa: E402 +from .utils import PeftLoraLoaderMixinTests # noqa: E402 @require_peft_backend diff --git a/tests/lora/test_lora_layers_sana.py b/tests/lora/test_lora_layers_sana.py index 24beb46b95ff..a08908c6108a 100644 --- a/tests/lora/test_lora_layers_sana.py +++ b/tests/lora/test_lora_layers_sana.py @@ -19,12 +19,13 @@ from transformers import Gemma2Model, GemmaTokenizer from diffusers import AutoencoderDC, FlowMatchEulerDiscreteScheduler, SanaPipeline, SanaTransformer2DModel -from diffusers.utils.testing_utils import floats_tensor, require_peft_backend + +from ..testing_utils import floats_tensor, require_peft_backend sys.path.append(".") -from utils import PeftLoraLoaderMixinTests # noqa: E402 +from .utils import PeftLoraLoaderMixinTests # noqa: E402 @require_peft_backend diff --git a/tests/lora/test_lora_layers_sd.py b/tests/lora/test_lora_layers_sd.py index 1c5a9b00e9da..933bf2336a59 100644 --- a/tests/lora/test_lora_layers_sd.py +++ b/tests/lora/test_lora_layers_sd.py @@ -32,7 +32,8 @@ StableDiffusionPipeline, ) from diffusers.utils.import_utils import is_accelerate_available -from diffusers.utils.testing_utils import ( + +from ..testing_utils import ( Expectations, backend_empty_cache, load_image, @@ -47,7 +48,7 @@ sys.path.append(".") -from utils import PeftLoraLoaderMixinTests, check_if_lora_correctly_set # noqa: E402 +from .utils import PeftLoraLoaderMixinTests, check_if_lora_correctly_set # noqa: E402 if is_accelerate_available(): diff --git a/tests/lora/test_lora_layers_sd3.py b/tests/lora/test_lora_layers_sd3.py index 8928ccbac2dd..95f6f325e4c7 100644 --- a/tests/lora/test_lora_layers_sd3.py +++ b/tests/lora/test_lora_layers_sd3.py @@ -28,7 +28,8 @@ ) from diffusers.utils import load_image from diffusers.utils.import_utils import is_accelerate_available -from diffusers.utils.testing_utils import ( + +from ..testing_utils import ( backend_empty_cache, is_flaky, nightly, @@ -42,7 +43,7 @@ sys.path.append(".") -from utils import PeftLoraLoaderMixinTests # noqa: E402 +from .utils import PeftLoraLoaderMixinTests # noqa: E402 if is_accelerate_available(): diff --git a/tests/lora/test_lora_layers_sdxl.py b/tests/lora/test_lora_layers_sdxl.py index 267650056aad..ac1d65abdaa7 100644 --- a/tests/lora/test_lora_layers_sdxl.py +++ b/tests/lora/test_lora_layers_sdxl.py @@ -35,7 +35,8 @@ ) from diffusers.utils import logging from diffusers.utils.import_utils import is_accelerate_available -from diffusers.utils.testing_utils import ( + +from ..testing_utils import ( CaptureLogger, backend_empty_cache, is_flaky, @@ -51,7 +52,7 @@ sys.path.append(".") -from utils import PeftLoraLoaderMixinTests, check_if_lora_correctly_set, state_dicts_almost_equal # noqa: E402 +from .utils import PeftLoraLoaderMixinTests, check_if_lora_correctly_set, state_dicts_almost_equal # noqa: E402 if is_accelerate_available(): diff --git a/tests/lora/test_lora_layers_wan.py b/tests/lora/test_lora_layers_wan.py index fe26a56e77cf..0ba80d2be1d1 100644 --- a/tests/lora/test_lora_layers_wan.py +++ b/tests/lora/test_lora_layers_wan.py @@ -24,7 +24,8 @@ WanPipeline, WanTransformer3DModel, ) -from diffusers.utils.testing_utils import ( + +from ..testing_utils import ( floats_tensor, require_peft_backend, skip_mps, @@ -33,7 +34,7 @@ sys.path.append(".") -from utils import PeftLoraLoaderMixinTests # noqa: E402 +from .utils import PeftLoraLoaderMixinTests # noqa: E402 @require_peft_backend diff --git a/tests/lora/test_lora_layers_wanvace.py b/tests/lora/test_lora_layers_wanvace.py index a0954fa4fa05..d8dde32dd8ec 100644 --- a/tests/lora/test_lora_layers_wanvace.py +++ b/tests/lora/test_lora_layers_wanvace.py @@ -25,7 +25,8 @@ from diffusers import AutoencoderKLWan, FlowMatchEulerDiscreteScheduler, WanVACEPipeline, WanVACETransformer3DModel from diffusers.utils.import_utils import is_peft_available -from diffusers.utils.testing_utils import ( + +from ..testing_utils import ( floats_tensor, is_flaky, require_peft_backend, @@ -40,7 +41,7 @@ sys.path.append(".") -from utils import PeftLoraLoaderMixinTests # noqa: E402 +from .utils import PeftLoraLoaderMixinTests # noqa: E402 @require_peft_backend diff --git a/tests/lora/utils.py b/tests/lora/utils.py index f09f0d8ecb89..72c1dddaa228 100644 --- a/tests/lora/utils.py +++ b/tests/lora/utils.py @@ -32,7 +32,8 @@ ) from diffusers.utils import logging from diffusers.utils.import_utils import is_peft_available -from diffusers.utils.testing_utils import ( + +from ..testing_utils import ( CaptureLogger, check_if_dicts_are_equal, floats_tensor, diff --git a/tests/models/autoencoders/test_models_asymmetric_autoencoder_kl.py b/tests/models/autoencoders/test_models_asymmetric_autoencoder_kl.py index 47416483591a..7eb830cd5097 100644 --- a/tests/models/autoencoders/test_models_asymmetric_autoencoder_kl.py +++ b/tests/models/autoencoders/test_models_asymmetric_autoencoder_kl.py @@ -21,7 +21,8 @@ from diffusers import AsymmetricAutoencoderKL from diffusers.utils.import_utils import is_xformers_available -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( Expectations, backend_empty_cache, enable_full_determinism, @@ -34,7 +35,6 @@ torch_all_close, torch_device, ) - from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin diff --git a/tests/models/autoencoders/test_models_autoencoder_cosmos.py b/tests/models/autoencoders/test_models_autoencoder_cosmos.py index bc0011a2f0f1..ceccc2364e26 100644 --- a/tests/models/autoencoders/test_models_autoencoder_cosmos.py +++ b/tests/models/autoencoders/test_models_autoencoder_cosmos.py @@ -15,8 +15,8 @@ import unittest from diffusers import AutoencoderKLCosmos -from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, torch_device +from ...testing_utils import enable_full_determinism, floats_tensor, torch_device from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin diff --git a/tests/models/autoencoders/test_models_autoencoder_dc.py b/tests/models/autoencoders/test_models_autoencoder_dc.py index 4d2c3dc663c2..56f172f1c869 100644 --- a/tests/models/autoencoders/test_models_autoencoder_dc.py +++ b/tests/models/autoencoders/test_models_autoencoder_dc.py @@ -16,12 +16,12 @@ import unittest from diffusers import AutoencoderDC -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( enable_full_determinism, floats_tensor, torch_device, ) - from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin diff --git a/tests/models/autoencoders/test_models_autoencoder_hunyuan_video.py b/tests/models/autoencoders/test_models_autoencoder_hunyuan_video.py index 40479991e986..6f91f8bfa91b 100644 --- a/tests/models/autoencoders/test_models_autoencoder_hunyuan_video.py +++ b/tests/models/autoencoders/test_models_autoencoder_hunyuan_video.py @@ -19,12 +19,12 @@ from diffusers import AutoencoderKLHunyuanVideo from diffusers.models.autoencoders.autoencoder_kl_hunyuan_video import prepare_causal_attention_mask -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( enable_full_determinism, floats_tensor, torch_device, ) - from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin diff --git a/tests/models/autoencoders/test_models_autoencoder_kl.py b/tests/models/autoencoders/test_models_autoencoder_kl.py index 2c323e4f03fa..662a3f1b80b7 100644 --- a/tests/models/autoencoders/test_models_autoencoder_kl.py +++ b/tests/models/autoencoders/test_models_autoencoder_kl.py @@ -21,7 +21,8 @@ from diffusers import AutoencoderKL from diffusers.utils.import_utils import is_xformers_available -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, enable_full_determinism, floats_tensor, @@ -34,7 +35,6 @@ torch_all_close, torch_device, ) - from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin diff --git a/tests/models/autoencoders/test_models_autoencoder_kl_cogvideox.py b/tests/models/autoencoders/test_models_autoencoder_kl_cogvideox.py index 7ab9520ce64a..739daf2a492d 100644 --- a/tests/models/autoencoders/test_models_autoencoder_kl_cogvideox.py +++ b/tests/models/autoencoders/test_models_autoencoder_kl_cogvideox.py @@ -18,12 +18,12 @@ import torch from diffusers import AutoencoderKLCogVideoX -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( enable_full_determinism, floats_tensor, torch_device, ) - from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin diff --git a/tests/models/autoencoders/test_models_autoencoder_kl_temporal_decoder.py b/tests/models/autoencoders/test_models_autoencoder_kl_temporal_decoder.py index 618a448ecab3..6cb427bff8e1 100644 --- a/tests/models/autoencoders/test_models_autoencoder_kl_temporal_decoder.py +++ b/tests/models/autoencoders/test_models_autoencoder_kl_temporal_decoder.py @@ -16,12 +16,12 @@ import unittest from diffusers import AutoencoderKLTemporalDecoder -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( enable_full_determinism, floats_tensor, torch_device, ) - from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin diff --git a/tests/models/autoencoders/test_models_autoencoder_ltx_video.py b/tests/models/autoencoders/test_models_autoencoder_ltx_video.py index c056930a5e0b..21ab3896c890 100644 --- a/tests/models/autoencoders/test_models_autoencoder_ltx_video.py +++ b/tests/models/autoencoders/test_models_autoencoder_ltx_video.py @@ -18,12 +18,12 @@ import torch from diffusers import AutoencoderKLLTXVideo -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( enable_full_determinism, floats_tensor, torch_device, ) - from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin diff --git a/tests/models/autoencoders/test_models_autoencoder_magvit.py b/tests/models/autoencoders/test_models_autoencoder_magvit.py index c1711766273e..58cbfc05bd03 100644 --- a/tests/models/autoencoders/test_models_autoencoder_magvit.py +++ b/tests/models/autoencoders/test_models_autoencoder_magvit.py @@ -16,8 +16,8 @@ import unittest from diffusers import AutoencoderKLMagvit -from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, torch_device +from ...testing_utils import enable_full_determinism, floats_tensor, torch_device from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin diff --git a/tests/models/autoencoders/test_models_autoencoder_mochi.py b/tests/models/autoencoders/test_models_autoencoder_mochi.py index d646693c575a..b8c5aaaa1eb6 100755 --- a/tests/models/autoencoders/test_models_autoencoder_mochi.py +++ b/tests/models/autoencoders/test_models_autoencoder_mochi.py @@ -16,12 +16,12 @@ import unittest from diffusers import AutoencoderKLMochi -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( enable_full_determinism, floats_tensor, torch_device, ) - from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin diff --git a/tests/models/autoencoders/test_models_autoencoder_oobleck.py b/tests/models/autoencoders/test_models_autoencoder_oobleck.py index 5c1d7c8b71f1..eb7bd50f4a54 100644 --- a/tests/models/autoencoders/test_models_autoencoder_oobleck.py +++ b/tests/models/autoencoders/test_models_autoencoder_oobleck.py @@ -21,7 +21,8 @@ from parameterized import parameterized from diffusers import AutoencoderOobleck -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, enable_full_determinism, floats_tensor, @@ -29,7 +30,6 @@ torch_all_close, torch_device, ) - from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin diff --git a/tests/models/autoencoders/test_models_autoencoder_tiny.py b/tests/models/autoencoders/test_models_autoencoder_tiny.py index fba2c9eb1b67..4d1dc69cfaad 100644 --- a/tests/models/autoencoders/test_models_autoencoder_tiny.py +++ b/tests/models/autoencoders/test_models_autoencoder_tiny.py @@ -21,7 +21,8 @@ from parameterized import parameterized from diffusers import AutoencoderTiny -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, enable_full_determinism, floats_tensor, @@ -30,7 +31,6 @@ torch_all_close, torch_device, ) - from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin diff --git a/tests/models/autoencoders/test_models_autoencoder_wan.py b/tests/models/autoencoders/test_models_autoencoder_wan.py index c0af4f5834b7..cc9c88868157 100644 --- a/tests/models/autoencoders/test_models_autoencoder_wan.py +++ b/tests/models/autoencoders/test_models_autoencoder_wan.py @@ -18,8 +18,8 @@ import torch from diffusers import AutoencoderKLWan -from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, torch_device +from ...testing_utils import enable_full_determinism, floats_tensor, torch_device from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin diff --git a/tests/models/autoencoders/test_models_consistency_decoder_vae.py b/tests/models/autoencoders/test_models_consistency_decoder_vae.py index cdce013cfb29..7e44edba3624 100644 --- a/tests/models/autoencoders/test_models_consistency_decoder_vae.py +++ b/tests/models/autoencoders/test_models_consistency_decoder_vae.py @@ -20,7 +20,9 @@ import torch from diffusers import ConsistencyDecoderVAE, StableDiffusionPipeline -from diffusers.utils.testing_utils import ( +from diffusers.utils.torch_utils import randn_tensor + +from ...testing_utils import ( backend_empty_cache, enable_full_determinism, load_image, @@ -28,8 +30,6 @@ torch_all_close, torch_device, ) -from diffusers.utils.torch_utils import randn_tensor - from ..test_modeling_common import ModelTesterMixin diff --git a/tests/models/autoencoders/test_models_vae_flax.py b/tests/models/autoencoders/test_models_vae_flax.py index 8fedb85eccfc..3023a7c32c0d 100644 --- a/tests/models/autoencoders/test_models_vae_flax.py +++ b/tests/models/autoencoders/test_models_vae_flax.py @@ -2,8 +2,8 @@ from diffusers import FlaxAutoencoderKL from diffusers.utils import is_flax_available -from diffusers.utils.testing_utils import require_flax +from ...testing_utils import require_flax from ..test_modeling_common_flax import FlaxModelTesterMixin diff --git a/tests/models/autoencoders/test_models_vq.py b/tests/models/autoencoders/test_models_vq.py index e8ed98f44a26..1c636b081733 100644 --- a/tests/models/autoencoders/test_models_vq.py +++ b/tests/models/autoencoders/test_models_vq.py @@ -18,13 +18,13 @@ import torch from diffusers import VQModel -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_manual_seed, enable_full_determinism, floats_tensor, torch_device, ) - from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin diff --git a/tests/models/test_attention_processor.py b/tests/models/test_attention_processor.py index d070f6ea33e3..ccf36b092b46 100644 --- a/tests/models/test_attention_processor.py +++ b/tests/models/test_attention_processor.py @@ -7,7 +7,8 @@ from diffusers import DiffusionPipeline from diffusers.models.attention_processor import Attention, AttnAddedKVProcessor -from diffusers.utils.testing_utils import torch_device + +from ..testing_utils import torch_device class AttnAddedKVProcessorTests(unittest.TestCase): diff --git a/tests/models/test_layers_utils.py b/tests/models/test_layers_utils.py index ec8e01b4b76b..eaeffa699db2 100644 --- a/tests/models/test_layers_utils.py +++ b/tests/models/test_layers_utils.py @@ -24,7 +24,8 @@ from diffusers.models.embeddings import get_timestep_embedding from diffusers.models.resnet import Downsample2D, ResnetBlock2D, Upsample2D from diffusers.models.transformers.transformer_2d import Transformer2DModel -from diffusers.utils.testing_utils import ( + +from ..testing_utils import ( backend_manual_seed, require_torch_accelerator_with_fp64, require_torch_version_greater_equal, diff --git a/tests/models/test_modeling_common.py b/tests/models/test_modeling_common.py index 1e08191f56aa..36eb2c1ef488 100644 --- a/tests/models/test_modeling_common.py +++ b/tests/models/test_modeling_common.py @@ -59,7 +59,10 @@ logging, ) from diffusers.utils.hub_utils import _add_variant -from diffusers.utils.testing_utils import ( +from diffusers.utils.torch_utils import get_torch_cuda_device_capability + +from ..others.test_utils import TOKEN, USER, is_staging_test +from ..testing_utils import ( CaptureLogger, _check_safetensors_serialization, backend_empty_cache, @@ -82,9 +85,6 @@ torch_all_close, torch_device, ) -from diffusers.utils.torch_utils import get_torch_cuda_device_capability - -from ..others.test_utils import TOKEN, USER, is_staging_test if is_peft_available(): diff --git a/tests/models/test_modeling_common_flax.py b/tests/models/test_modeling_common_flax.py index 8945aed7c93f..41e970b56664 100644 --- a/tests/models/test_modeling_common_flax.py +++ b/tests/models/test_modeling_common_flax.py @@ -1,7 +1,8 @@ import inspect from diffusers.utils import is_flax_available -from diffusers.utils.testing_utils import require_flax + +from ..testing_utils import require_flax if is_flax_available(): diff --git a/tests/models/transformers/test_models_dit_transformer2d.py b/tests/models/transformers/test_models_dit_transformer2d.py index 307032167347..473a87637578 100644 --- a/tests/models/transformers/test_models_dit_transformer2d.py +++ b/tests/models/transformers/test_models_dit_transformer2d.py @@ -18,13 +18,13 @@ import torch from diffusers import DiTTransformer2DModel, Transformer2DModel -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( enable_full_determinism, floats_tensor, slow, torch_device, ) - from ..test_modeling_common import ModelTesterMixin diff --git a/tests/models/transformers/test_models_pixart_transformer2d.py b/tests/models/transformers/test_models_pixart_transformer2d.py index 38fada0b4be2..17c400cf1911 100644 --- a/tests/models/transformers/test_models_pixart_transformer2d.py +++ b/tests/models/transformers/test_models_pixart_transformer2d.py @@ -18,13 +18,13 @@ import torch from diffusers import PixArtTransformer2DModel, Transformer2DModel -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( enable_full_determinism, floats_tensor, slow, torch_device, ) - from ..test_modeling_common import ModelTesterMixin diff --git a/tests/models/transformers/test_models_prior.py b/tests/models/transformers/test_models_prior.py index 5d66aadb1b89..af5ac4bbbd76 100644 --- a/tests/models/transformers/test_models_prior.py +++ b/tests/models/transformers/test_models_prior.py @@ -21,7 +21,8 @@ from parameterized import parameterized from diffusers import PriorTransformer -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, enable_full_determinism, floats_tensor, @@ -29,7 +30,6 @@ torch_all_close, torch_device, ) - from ..test_modeling_common import ModelTesterMixin diff --git a/tests/models/transformers/test_models_transformer_allegro.py b/tests/models/transformers/test_models_transformer_allegro.py index 8a0c4755838d..7c002f87819e 100644 --- a/tests/models/transformers/test_models_transformer_allegro.py +++ b/tests/models/transformers/test_models_transformer_allegro.py @@ -17,11 +17,11 @@ import torch from diffusers import AllegroTransformer3DModel -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( enable_full_determinism, torch_device, ) - from ..test_modeling_common import ModelTesterMixin diff --git a/tests/models/transformers/test_models_transformer_aura_flow.py b/tests/models/transformers/test_models_transformer_aura_flow.py index 8dff07373ece..ae8c3b7234a3 100644 --- a/tests/models/transformers/test_models_transformer_aura_flow.py +++ b/tests/models/transformers/test_models_transformer_aura_flow.py @@ -18,8 +18,8 @@ import torch from diffusers import AuraFlowTransformer2DModel -from diffusers.utils.testing_utils import enable_full_determinism, torch_device +from ...testing_utils import enable_full_determinism, torch_device from ..test_modeling_common import ModelTesterMixin diff --git a/tests/models/transformers/test_models_transformer_bria.py b/tests/models/transformers/test_models_transformer_bria.py index 8a8d0dcecffc..9056590edffe 100644 --- a/tests/models/transformers/test_models_transformer_bria.py +++ b/tests/models/transformers/test_models_transformer_bria.py @@ -20,8 +20,8 @@ from diffusers import BriaTransformer2DModel from diffusers.models.attention_processor import FluxIPAdapterJointAttnProcessor2_0 from diffusers.models.embeddings import ImageProjection -from diffusers.utils.testing_utils import enable_full_determinism, torch_device +from ...testing_utils import enable_full_determinism, torch_device from ..test_modeling_common import LoraHotSwappingForModelTesterMixin, ModelTesterMixin, TorchCompileTesterMixin diff --git a/tests/models/transformers/test_models_transformer_chroma.py b/tests/models/transformers/test_models_transformer_chroma.py index e9fd5a0bfb6e..92ac8198ed06 100644 --- a/tests/models/transformers/test_models_transformer_chroma.py +++ b/tests/models/transformers/test_models_transformer_chroma.py @@ -20,8 +20,8 @@ from diffusers import ChromaTransformer2DModel from diffusers.models.attention_processor import FluxIPAdapterJointAttnProcessor2_0 from diffusers.models.embeddings import ImageProjection -from diffusers.utils.testing_utils import enable_full_determinism, torch_device +from ...testing_utils import enable_full_determinism, torch_device from ..test_modeling_common import LoraHotSwappingForModelTesterMixin, ModelTesterMixin, TorchCompileTesterMixin diff --git a/tests/models/transformers/test_models_transformer_cogvideox.py b/tests/models/transformers/test_models_transformer_cogvideox.py index 54d1242bf731..f632add7e5a7 100644 --- a/tests/models/transformers/test_models_transformer_cogvideox.py +++ b/tests/models/transformers/test_models_transformer_cogvideox.py @@ -18,11 +18,11 @@ import torch from diffusers import CogVideoXTransformer3DModel -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( enable_full_determinism, torch_device, ) - from ..test_modeling_common import ModelTesterMixin diff --git a/tests/models/transformers/test_models_transformer_cogview3plus.py b/tests/models/transformers/test_models_transformer_cogview3plus.py index 57131dc3f105..d38d77531d4c 100644 --- a/tests/models/transformers/test_models_transformer_cogview3plus.py +++ b/tests/models/transformers/test_models_transformer_cogview3plus.py @@ -18,11 +18,11 @@ import torch from diffusers import CogView3PlusTransformer2DModel -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( enable_full_determinism, torch_device, ) - from ..test_modeling_common import ModelTesterMixin diff --git a/tests/models/transformers/test_models_transformer_cogview4.py b/tests/models/transformers/test_models_transformer_cogview4.py index 798453e86d31..084c3b7cea41 100644 --- a/tests/models/transformers/test_models_transformer_cogview4.py +++ b/tests/models/transformers/test_models_transformer_cogview4.py @@ -17,8 +17,8 @@ import torch from diffusers import CogView4Transformer2DModel -from diffusers.utils.testing_utils import enable_full_determinism, torch_device +from ...testing_utils import enable_full_determinism, torch_device from ..test_modeling_common import ModelTesterMixin diff --git a/tests/models/transformers/test_models_transformer_consisid.py b/tests/models/transformers/test_models_transformer_consisid.py index af2e1e6338d0..77fc172d078a 100644 --- a/tests/models/transformers/test_models_transformer_consisid.py +++ b/tests/models/transformers/test_models_transformer_consisid.py @@ -18,11 +18,11 @@ import torch from diffusers import ConsisIDTransformer3DModel -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( enable_full_determinism, torch_device, ) - from ..test_modeling_common import ModelTesterMixin diff --git a/tests/models/transformers/test_models_transformer_cosmos.py b/tests/models/transformers/test_models_transformer_cosmos.py index 7d26004d7557..d7390e105c45 100644 --- a/tests/models/transformers/test_models_transformer_cosmos.py +++ b/tests/models/transformers/test_models_transformer_cosmos.py @@ -17,8 +17,8 @@ import torch from diffusers import CosmosTransformer3DModel -from diffusers.utils.testing_utils import enable_full_determinism, torch_device +from ...testing_utils import enable_full_determinism, torch_device from ..test_modeling_common import ModelTesterMixin diff --git a/tests/models/transformers/test_models_transformer_easyanimate.py b/tests/models/transformers/test_models_transformer_easyanimate.py index 0a255f4d4ed7..d7b90a47d974 100644 --- a/tests/models/transformers/test_models_transformer_easyanimate.py +++ b/tests/models/transformers/test_models_transformer_easyanimate.py @@ -18,8 +18,8 @@ import torch from diffusers import EasyAnimateTransformer3DModel -from diffusers.utils.testing_utils import enable_full_determinism, torch_device +from ...testing_utils import enable_full_determinism, torch_device from ..test_modeling_common import ModelTesterMixin diff --git a/tests/models/transformers/test_models_transformer_flux.py b/tests/models/transformers/test_models_transformer_flux.py index 14ef6f1514d7..3ab02f797b5b 100644 --- a/tests/models/transformers/test_models_transformer_flux.py +++ b/tests/models/transformers/test_models_transformer_flux.py @@ -20,8 +20,8 @@ from diffusers import FluxTransformer2DModel from diffusers.models.attention_processor import FluxIPAdapterJointAttnProcessor2_0 from diffusers.models.embeddings import ImageProjection -from diffusers.utils.testing_utils import enable_full_determinism, is_peft_available, torch_device +from ...testing_utils import enable_full_determinism, is_peft_available, torch_device from ..test_modeling_common import LoraHotSwappingForModelTesterMixin, ModelTesterMixin, TorchCompileTesterMixin diff --git a/tests/models/transformers/test_models_transformer_hidream.py b/tests/models/transformers/test_models_transformer_hidream.py index fa0fa5123ac8..fdd5f8c7fd07 100644 --- a/tests/models/transformers/test_models_transformer_hidream.py +++ b/tests/models/transformers/test_models_transformer_hidream.py @@ -18,11 +18,11 @@ import torch from diffusers import HiDreamImageTransformer2DModel -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( enable_full_determinism, torch_device, ) - from ..test_modeling_common import ModelTesterMixin diff --git a/tests/models/transformers/test_models_transformer_hunyuan_dit.py b/tests/models/transformers/test_models_transformer_hunyuan_dit.py index 242ce1f283a6..d82a62d58ec3 100644 --- a/tests/models/transformers/test_models_transformer_hunyuan_dit.py +++ b/tests/models/transformers/test_models_transformer_hunyuan_dit.py @@ -18,11 +18,11 @@ import torch from diffusers import HunyuanDiT2DModel -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( enable_full_determinism, torch_device, ) - from ..test_modeling_common import ModelTesterMixin diff --git a/tests/models/transformers/test_models_transformer_hunyuan_video.py b/tests/models/transformers/test_models_transformer_hunyuan_video.py index b42a3cb5dcad..385a5eefd58b 100644 --- a/tests/models/transformers/test_models_transformer_hunyuan_video.py +++ b/tests/models/transformers/test_models_transformer_hunyuan_video.py @@ -17,11 +17,11 @@ import torch from diffusers import HunyuanVideoTransformer3DModel -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( enable_full_determinism, torch_device, ) - from ..test_modeling_common import ModelTesterMixin, TorchCompileTesterMixin diff --git a/tests/models/transformers/test_models_transformer_hunyuan_video_framepack.py b/tests/models/transformers/test_models_transformer_hunyuan_video_framepack.py index ddb79925a7fe..00a2b27e02b6 100644 --- a/tests/models/transformers/test_models_transformer_hunyuan_video_framepack.py +++ b/tests/models/transformers/test_models_transformer_hunyuan_video_framepack.py @@ -17,11 +17,11 @@ import torch from diffusers import HunyuanVideoFramepackTransformer3DModel -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( enable_full_determinism, torch_device, ) - from ..test_modeling_common import ModelTesterMixin diff --git a/tests/models/transformers/test_models_transformer_latte.py b/tests/models/transformers/test_models_transformer_latte.py index db93421b4488..7bf2c52e6269 100644 --- a/tests/models/transformers/test_models_transformer_latte.py +++ b/tests/models/transformers/test_models_transformer_latte.py @@ -18,11 +18,11 @@ import torch from diffusers import LatteTransformer3DModel -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( enable_full_determinism, torch_device, ) - from ..test_modeling_common import ModelTesterMixin diff --git a/tests/models/transformers/test_models_transformer_ltx.py b/tests/models/transformers/test_models_transformer_ltx.py index 2c61658f58dc..e912463bbf6a 100644 --- a/tests/models/transformers/test_models_transformer_ltx.py +++ b/tests/models/transformers/test_models_transformer_ltx.py @@ -18,8 +18,8 @@ import torch from diffusers import LTXVideoTransformer3DModel -from diffusers.utils.testing_utils import enable_full_determinism, torch_device +from ...testing_utils import enable_full_determinism, torch_device from ..test_modeling_common import ModelTesterMixin, TorchCompileTesterMixin diff --git a/tests/models/transformers/test_models_transformer_lumina.py b/tests/models/transformers/test_models_transformer_lumina.py index d0103eb47373..0024aa106c6d 100644 --- a/tests/models/transformers/test_models_transformer_lumina.py +++ b/tests/models/transformers/test_models_transformer_lumina.py @@ -18,11 +18,11 @@ import torch from diffusers import LuminaNextDiT2DModel -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( enable_full_determinism, torch_device, ) - from ..test_modeling_common import ModelTesterMixin diff --git a/tests/models/transformers/test_models_transformer_lumina2.py b/tests/models/transformers/test_models_transformer_lumina2.py index 731e2ff3d536..4efae3d4b713 100644 --- a/tests/models/transformers/test_models_transformer_lumina2.py +++ b/tests/models/transformers/test_models_transformer_lumina2.py @@ -18,11 +18,11 @@ import torch from diffusers import Lumina2Transformer2DModel -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( enable_full_determinism, torch_device, ) - from ..test_modeling_common import ModelTesterMixin diff --git a/tests/models/transformers/test_models_transformer_mochi.py b/tests/models/transformers/test_models_transformer_mochi.py index db65c03292ec..931b5874ee78 100644 --- a/tests/models/transformers/test_models_transformer_mochi.py +++ b/tests/models/transformers/test_models_transformer_mochi.py @@ -18,8 +18,8 @@ import torch from diffusers import MochiTransformer3DModel -from diffusers.utils.testing_utils import enable_full_determinism, torch_device +from ...testing_utils import enable_full_determinism, torch_device from ..test_modeling_common import ModelTesterMixin diff --git a/tests/models/transformers/test_models_transformer_omnigen.py b/tests/models/transformers/test_models_transformer_omnigen.py index 25f25a8d6300..f1963ddb7709 100644 --- a/tests/models/transformers/test_models_transformer_omnigen.py +++ b/tests/models/transformers/test_models_transformer_omnigen.py @@ -18,8 +18,8 @@ import torch from diffusers import OmniGenTransformer2DModel -from diffusers.utils.testing_utils import enable_full_determinism, torch_device +from ...testing_utils import enable_full_determinism, torch_device from ..test_modeling_common import ModelTesterMixin diff --git a/tests/models/transformers/test_models_transformer_qwenimage.py b/tests/models/transformers/test_models_transformer_qwenimage.py index 498acb8d73c9..b24fa90503ef 100644 --- a/tests/models/transformers/test_models_transformer_qwenimage.py +++ b/tests/models/transformers/test_models_transformer_qwenimage.py @@ -19,8 +19,8 @@ import torch from diffusers import QwenImageTransformer2DModel -from diffusers.utils.testing_utils import enable_full_determinism, torch_device +from ...testing_utils import enable_full_determinism, torch_device from ..test_modeling_common import ModelTesterMixin, TorchCompileTesterMixin diff --git a/tests/models/transformers/test_models_transformer_sana.py b/tests/models/transformers/test_models_transformer_sana.py index 6586af0e8213..2e316c3aedc1 100644 --- a/tests/models/transformers/test_models_transformer_sana.py +++ b/tests/models/transformers/test_models_transformer_sana.py @@ -17,11 +17,11 @@ import torch from diffusers import SanaTransformer2DModel -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( enable_full_determinism, torch_device, ) - from ..test_modeling_common import ModelTesterMixin diff --git a/tests/models/transformers/test_models_transformer_sd3.py b/tests/models/transformers/test_models_transformer_sd3.py index 10469c0ca964..c4ee7017a380 100644 --- a/tests/models/transformers/test_models_transformer_sd3.py +++ b/tests/models/transformers/test_models_transformer_sd3.py @@ -19,11 +19,11 @@ from diffusers import SD3Transformer2DModel from diffusers.utils.import_utils import is_xformers_available -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( enable_full_determinism, torch_device, ) - from ..test_modeling_common import ModelTesterMixin diff --git a/tests/models/transformers/test_models_transformer_skyreels_v2.py b/tests/models/transformers/test_models_transformer_skyreels_v2.py index 884f168308cc..8c36d8256ee9 100644 --- a/tests/models/transformers/test_models_transformer_skyreels_v2.py +++ b/tests/models/transformers/test_models_transformer_skyreels_v2.py @@ -17,11 +17,11 @@ import torch from diffusers import SkyReelsV2Transformer3DModel -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( enable_full_determinism, torch_device, ) - from ..test_modeling_common import ModelTesterMixin, TorchCompileTesterMixin diff --git a/tests/models/transformers/test_models_transformer_temporal.py b/tests/models/transformers/test_models_transformer_temporal.py index 183ef2298247..aff83be51124 100644 --- a/tests/models/transformers/test_models_transformer_temporal.py +++ b/tests/models/transformers/test_models_transformer_temporal.py @@ -18,11 +18,11 @@ import torch from diffusers.models.transformers import TransformerTemporalModel -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( enable_full_determinism, torch_device, ) - from ..test_modeling_common import ModelTesterMixin diff --git a/tests/models/transformers/test_models_transformer_wan.py b/tests/models/transformers/test_models_transformer_wan.py index 932f25598439..9f248f990c8a 100644 --- a/tests/models/transformers/test_models_transformer_wan.py +++ b/tests/models/transformers/test_models_transformer_wan.py @@ -17,11 +17,11 @@ import torch from diffusers import WanTransformer3DModel -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( enable_full_determinism, torch_device, ) - from ..test_modeling_common import ModelTesterMixin, TorchCompileTesterMixin diff --git a/tests/models/unets/test_models_unet_1d.py b/tests/models/unets/test_models_unet_1d.py index e3dd608a250b..bac017e7e7d3 100644 --- a/tests/models/unets/test_models_unet_1d.py +++ b/tests/models/unets/test_models_unet_1d.py @@ -19,13 +19,13 @@ import torch from diffusers import UNet1DModel -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_manual_seed, floats_tensor, slow, torch_device, ) - from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin diff --git a/tests/models/unets/test_models_unet_2d.py b/tests/models/unets/test_models_unet_2d.py index f6fa82aeb713..e289f44303f2 100644 --- a/tests/models/unets/test_models_unet_2d.py +++ b/tests/models/unets/test_models_unet_2d.py @@ -21,7 +21,8 @@ from diffusers import UNet2DModel from diffusers.utils import logging -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, enable_full_determinism, floats_tensor, @@ -30,7 +31,6 @@ torch_all_close, torch_device, ) - from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin diff --git a/tests/models/unets/test_models_unet_2d_condition.py b/tests/models/unets/test_models_unet_2d_condition.py index 123dff16f8b0..4dbb8ca7c075 100644 --- a/tests/models/unets/test_models_unet_2d_condition.py +++ b/tests/models/unets/test_models_unet_2d_condition.py @@ -34,7 +34,8 @@ from diffusers.models.embeddings import ImageProjection, IPAdapterFaceIDImageProjection, IPAdapterPlusImageProjection from diffusers.utils import logging from diffusers.utils.import_utils import is_xformers_available -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, backend_max_memory_allocated, backend_reset_max_memory_allocated, @@ -51,7 +52,6 @@ torch_all_close, torch_device, ) - from ..test_modeling_common import ( LoraHotSwappingForModelTesterMixin, ModelTesterMixin, diff --git a/tests/models/unets/test_models_unet_2d_flax.py b/tests/models/unets/test_models_unet_2d_flax.py index 69a0704dca9d..3bc9a04b3c04 100644 --- a/tests/models/unets/test_models_unet_2d_flax.py +++ b/tests/models/unets/test_models_unet_2d_flax.py @@ -5,7 +5,8 @@ from diffusers import FlaxUNet2DConditionModel from diffusers.utils import is_flax_available -from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow + +from ...testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): diff --git a/tests/models/unets/test_models_unet_3d_condition.py b/tests/models/unets/test_models_unet_3d_condition.py index 72d692b6e73d..f73e3461c38e 100644 --- a/tests/models/unets/test_models_unet_3d_condition.py +++ b/tests/models/unets/test_models_unet_3d_condition.py @@ -21,8 +21,8 @@ from diffusers.models import ModelMixin, UNet3DConditionModel from diffusers.utils import logging from diffusers.utils.import_utils import is_xformers_available -from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, skip_mps, torch_device +from ...testing_utils import enable_full_determinism, floats_tensor, skip_mps, torch_device from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin diff --git a/tests/models/unets/test_models_unet_controlnetxs.py b/tests/models/unets/test_models_unet_controlnetxs.py index cebd18c10d8e..40773536df70 100644 --- a/tests/models/unets/test_models_unet_controlnetxs.py +++ b/tests/models/unets/test_models_unet_controlnetxs.py @@ -21,8 +21,8 @@ from diffusers import ControlNetXSAdapter, UNet2DConditionModel, UNetControlNetXSModel from diffusers.utils import logging -from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, is_flaky, torch_device +from ...testing_utils import enable_full_determinism, floats_tensor, is_flaky, torch_device from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin diff --git a/tests/models/unets/test_models_unet_motion.py b/tests/models/unets/test_models_unet_motion.py index bf8d6bd007fa..d931b345fd09 100644 --- a/tests/models/unets/test_models_unet_motion.py +++ b/tests/models/unets/test_models_unet_motion.py @@ -24,12 +24,12 @@ from diffusers import MotionAdapter, UNet2DConditionModel, UNetMotionModel from diffusers.utils import logging from diffusers.utils.import_utils import is_xformers_available -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( enable_full_determinism, floats_tensor, torch_device, ) - from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin diff --git a/tests/models/unets/test_models_unet_spatiotemporal.py b/tests/models/unets/test_models_unet_spatiotemporal.py index 86aa0f6a0e66..7df868c9e95b 100644 --- a/tests/models/unets/test_models_unet_spatiotemporal.py +++ b/tests/models/unets/test_models_unet_spatiotemporal.py @@ -21,13 +21,13 @@ from diffusers import UNetSpatioTemporalConditionModel from diffusers.utils import logging from diffusers.utils.import_utils import is_xformers_available -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( enable_full_determinism, floats_tensor, skip_mps, torch_device, ) - from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin diff --git a/tests/models/unets/test_unet_2d_blocks.py b/tests/models/unets/test_unet_2d_blocks.py index 21c0c0f08b26..5c006963e30c 100644 --- a/tests/models/unets/test_unet_2d_blocks.py +++ b/tests/models/unets/test_unet_2d_blocks.py @@ -15,8 +15,8 @@ import unittest from diffusers.models.unets.unet_2d_blocks import * # noqa F403 -from diffusers.utils.testing_utils import torch_device +from ...testing_utils import torch_device from .test_unet_blocks_common import UNetBlockTesterMixin diff --git a/tests/models/unets/test_unet_blocks_common.py b/tests/models/unets/test_unet_blocks_common.py index ada7c832697f..85f9bf8353bf 100644 --- a/tests/models/unets/test_unet_blocks_common.py +++ b/tests/models/unets/test_unet_blocks_common.py @@ -16,14 +16,15 @@ import torch -from diffusers.utils.testing_utils import ( +from diffusers.utils.torch_utils import randn_tensor + +from ...testing_utils import ( floats_tensor, require_torch, require_torch_accelerator_with_training, torch_all_close, torch_device, ) -from diffusers.utils.torch_utils import randn_tensor @require_torch diff --git a/tests/modular_pipelines/stable_diffusion_xl/test_modular_pipeline_stable_diffusion_xl.py b/tests/modular_pipelines/stable_diffusion_xl/test_modular_pipeline_stable_diffusion_xl.py index 044cdd57daea..86062394a018 100644 --- a/tests/modular_pipelines/stable_diffusion_xl/test_modular_pipeline_stable_diffusion_xl.py +++ b/tests/modular_pipelines/stable_diffusion_xl/test_modular_pipeline_stable_diffusion_xl.py @@ -27,11 +27,6 @@ StableDiffusionXLModularPipeline, ) from diffusers.loaders import ModularIPAdapterMixin -from diffusers.utils.testing_utils import ( - enable_full_determinism, - floats_tensor, - torch_device, -) from ...models.unets.test_models_unet_2d_condition import ( create_ip_adapter_state_dict, @@ -39,6 +34,11 @@ from ..test_modular_pipelines_common import ( ModularPipelineTesterMixin, ) +from ..testing_utils import ( + enable_full_determinism, + floats_tensor, + torch_device, +) enable_full_determinism() diff --git a/tests/modular_pipelines/test_modular_pipelines_common.py b/tests/modular_pipelines/test_modular_pipelines_common.py index 36595b02a24c..6e612726939c 100644 --- a/tests/modular_pipelines/test_modular_pipelines_common.py +++ b/tests/modular_pipelines/test_modular_pipelines_common.py @@ -9,7 +9,8 @@ import diffusers from diffusers import ComponentsManager, ModularPipeline, ModularPipelineBlocks from diffusers.utils import logging -from diffusers.utils.testing_utils import ( + +from ..testing_utils import ( backend_empty_cache, numpy_cosine_similarity_distance, require_accelerator, diff --git a/tests/others/test_config.py b/tests/others/test_config.py index a8f93024f751..232bf9d473b8 100644 --- a/tests/others/test_config.py +++ b/tests/others/test_config.py @@ -28,7 +28,8 @@ logging, ) from diffusers.configuration_utils import ConfigMixin, register_to_config -from diffusers.utils.testing_utils import CaptureLogger + +from ..testing_utils import CaptureLogger class SampleObject(ConfigMixin): diff --git a/tests/others/test_ema.py b/tests/others/test_ema.py index 14808b9e58e1..436bbe1d53ff 100644 --- a/tests/others/test_ema.py +++ b/tests/others/test_ema.py @@ -20,7 +20,8 @@ from diffusers import UNet2DConditionModel from diffusers.training_utils import EMAModel -from diffusers.utils.testing_utils import enable_full_determinism, skip_mps, torch_device + +from ..testing_utils import enable_full_determinism, skip_mps, torch_device enable_full_determinism() diff --git a/tests/others/test_outputs.py b/tests/others/test_outputs.py index cf709d93f709..c8069e6916ed 100644 --- a/tests/others/test_outputs.py +++ b/tests/others/test_outputs.py @@ -7,7 +7,8 @@ import PIL.Image from diffusers.utils.outputs import BaseOutput -from diffusers.utils.testing_utils import require_torch + +from ..testing_utils import require_torch @dataclass diff --git a/tests/others/test_training.py b/tests/others/test_training.py index fb6420530159..2038a98a813e 100644 --- a/tests/others/test_training.py +++ b/tests/others/test_training.py @@ -19,7 +19,8 @@ from diffusers import DDIMScheduler, DDPMScheduler, UNet2DModel from diffusers.training_utils import set_seed -from diffusers.utils.testing_utils import slow + +from ..testing_utils import slow torch.backends.cuda.matmul.allow_tf32 = False diff --git a/tests/others/test_utils.py b/tests/others/test_utils.py index 01b423f556f1..747b8d584058 100755 --- a/tests/others/test_utils.py +++ b/tests/others/test_utils.py @@ -20,7 +20,8 @@ from diffusers import __version__ from diffusers.utils import deprecate -from diffusers.utils.testing_utils import Expectations, str_to_bool + +from ..testing_utils import Expectations, str_to_bool # Used to test the hub diff --git a/tests/pipelines/allegro/test_allegro.py b/tests/pipelines/allegro/test_allegro.py index c33b48e7e90b..b2e588de0647 100644 --- a/tests/pipelines/allegro/test_allegro.py +++ b/tests/pipelines/allegro/test_allegro.py @@ -23,7 +23,8 @@ from transformers import AutoTokenizer, T5Config, T5EncoderModel from diffusers import AllegroPipeline, AllegroTransformer3DModel, AutoencoderKLAllegro, DDIMScheduler -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, enable_full_determinism, numpy_cosine_similarity_distance, @@ -33,7 +34,6 @@ slow, torch_device, ) - from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, PyramidAttentionBroadcastTesterMixin, to_np diff --git a/tests/pipelines/animatediff/test_animatediff.py b/tests/pipelines/animatediff/test_animatediff.py index 4088d46df5b2..8d4cd4cf2c1a 100644 --- a/tests/pipelines/animatediff/test_animatediff.py +++ b/tests/pipelines/animatediff/test_animatediff.py @@ -19,7 +19,8 @@ ) from diffusers.models.attention import FreeNoiseTransformerBlock from diffusers.utils import is_xformers_available, logging -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, numpy_cosine_similarity_distance, require_accelerator, @@ -27,7 +28,6 @@ slow, torch_device, ) - from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( IPAdapterTesterMixin, diff --git a/tests/pipelines/animatediff/test_animatediff_controlnet.py b/tests/pipelines/animatediff/test_animatediff_controlnet.py index 7bde663b111e..4b0eb01d067c 100644 --- a/tests/pipelines/animatediff/test_animatediff_controlnet.py +++ b/tests/pipelines/animatediff/test_animatediff_controlnet.py @@ -21,8 +21,8 @@ from diffusers.models.attention import FreeNoiseTransformerBlock from diffusers.utils import logging from diffusers.utils.import_utils import is_xformers_available -from diffusers.utils.testing_utils import require_accelerator, torch_device +from ...testing_utils import require_accelerator, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( IPAdapterTesterMixin, diff --git a/tests/pipelines/animatediff/test_animatediff_sdxl.py b/tests/pipelines/animatediff/test_animatediff_sdxl.py index f9686ec005f7..b5dcd8779623 100644 --- a/tests/pipelines/animatediff/test_animatediff_sdxl.py +++ b/tests/pipelines/animatediff/test_animatediff_sdxl.py @@ -14,8 +14,8 @@ UNetMotionModel, ) from diffusers.utils import is_xformers_available, logging -from diffusers.utils.testing_utils import require_accelerator, torch_device +from ...testing_utils import require_accelerator, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( IPAdapterTesterMixin, diff --git a/tests/pipelines/animatediff/test_animatediff_sparsectrl.py b/tests/pipelines/animatediff/test_animatediff_sparsectrl.py index 3e33326c8a87..6b9f672cc4a1 100644 --- a/tests/pipelines/animatediff/test_animatediff_sparsectrl.py +++ b/tests/pipelines/animatediff/test_animatediff_sparsectrl.py @@ -20,8 +20,8 @@ ) from diffusers.utils import logging from diffusers.utils.import_utils import is_xformers_available -from diffusers.utils.testing_utils import require_accelerator, torch_device +from ...testing_utils import require_accelerator, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( IPAdapterTesterMixin, diff --git a/tests/pipelines/animatediff/test_animatediff_video2video.py b/tests/pipelines/animatediff/test_animatediff_video2video.py index bc771e148eb2..1adb13dc4cc5 100644 --- a/tests/pipelines/animatediff/test_animatediff_video2video.py +++ b/tests/pipelines/animatediff/test_animatediff_video2video.py @@ -19,8 +19,8 @@ ) from diffusers.models.attention import FreeNoiseTransformerBlock from diffusers.utils import is_xformers_available, logging -from diffusers.utils.testing_utils import require_accelerator, torch_device +from ...testing_utils import require_accelerator, torch_device from ..pipeline_params import TEXT_TO_IMAGE_PARAMS, VIDEO_TO_VIDEO_BATCH_PARAMS from ..test_pipelines_common import IPAdapterTesterMixin, PipelineFromPipeTesterMixin, PipelineTesterMixin diff --git a/tests/pipelines/animatediff/test_animatediff_video2video_controlnet.py b/tests/pipelines/animatediff/test_animatediff_video2video_controlnet.py index 3babbbe4ba11..c71c8c8817dc 100644 --- a/tests/pipelines/animatediff/test_animatediff_video2video_controlnet.py +++ b/tests/pipelines/animatediff/test_animatediff_video2video_controlnet.py @@ -20,8 +20,8 @@ ) from diffusers.models.attention import FreeNoiseTransformerBlock from diffusers.utils import is_xformers_available, logging -from diffusers.utils.testing_utils import require_accelerator, torch_device +from ...testing_utils import require_accelerator, torch_device from ..pipeline_params import TEXT_TO_IMAGE_PARAMS, VIDEO_TO_VIDEO_BATCH_PARAMS from ..test_pipelines_common import IPAdapterTesterMixin, PipelineFromPipeTesterMixin, PipelineTesterMixin diff --git a/tests/pipelines/audioldm2/test_audioldm2.py b/tests/pipelines/audioldm2/test_audioldm2.py index 12b96945674b..e4bc5cc11003 100644 --- a/tests/pipelines/audioldm2/test_audioldm2.py +++ b/tests/pipelines/audioldm2/test_audioldm2.py @@ -46,14 +46,14 @@ PNDMScheduler, ) from diffusers.utils import is_transformers_version -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, enable_full_determinism, is_torch_version, nightly, torch_device, ) - from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin diff --git a/tests/pipelines/bria/test_pipeline_bria.py b/tests/pipelines/bria/test_pipeline_bria.py index b290160a65e8..844488e76f2e 100644 --- a/tests/pipelines/bria/test_pipeline_bria.py +++ b/tests/pipelines/bria/test_pipeline_bria.py @@ -27,7 +27,11 @@ FlowMatchEulerDiscreteScheduler, ) from diffusers.pipelines.bria import BriaPipeline -from diffusers.utils.testing_utils import ( + +# from ..test_pipelines_common import PipelineTesterMixin, check_qkv_fused_layers_exist +from tests.pipelines.test_pipelines_common import PipelineTesterMixin, to_np + +from ...testing_utils import ( backend_empty_cache, enable_full_determinism, numpy_cosine_similarity_distance, @@ -36,9 +40,6 @@ torch_device, ) -# from ..test_pipelines_common import PipelineTesterMixin, check_qkv_fused_layers_exist -from tests.pipelines.test_pipelines_common import PipelineTesterMixin, to_np - enable_full_determinism() diff --git a/tests/pipelines/chroma/test_pipeline_chroma.py b/tests/pipelines/chroma/test_pipeline_chroma.py index 5121a2b52d75..3edd58b75f82 100644 --- a/tests/pipelines/chroma/test_pipeline_chroma.py +++ b/tests/pipelines/chroma/test_pipeline_chroma.py @@ -5,8 +5,8 @@ from transformers import AutoTokenizer, T5EncoderModel from diffusers import AutoencoderKL, ChromaPipeline, ChromaTransformer2DModel, FlowMatchEulerDiscreteScheduler -from diffusers.utils.testing_utils import torch_device +from ...testing_utils import torch_device from ..test_pipelines_common import FluxIPAdapterTesterMixin, PipelineTesterMixin, check_qkv_fused_layers_exist diff --git a/tests/pipelines/chroma/test_pipeline_chroma_img2img.py b/tests/pipelines/chroma/test_pipeline_chroma_img2img.py index d518e1b7b8d1..4ed1393037b9 100644 --- a/tests/pipelines/chroma/test_pipeline_chroma_img2img.py +++ b/tests/pipelines/chroma/test_pipeline_chroma_img2img.py @@ -6,8 +6,8 @@ from transformers import AutoTokenizer, T5EncoderModel from diffusers import AutoencoderKL, ChromaImg2ImgPipeline, ChromaTransformer2DModel, FlowMatchEulerDiscreteScheduler -from diffusers.utils.testing_utils import floats_tensor, torch_device +from ...testing_utils import floats_tensor, torch_device from ..test_pipelines_common import FluxIPAdapterTesterMixin, PipelineTesterMixin, check_qkv_fused_layers_exist diff --git a/tests/pipelines/cogvideo/test_cogvideox.py b/tests/pipelines/cogvideo/test_cogvideox.py index a6cb558513e7..dca1725d8a74 100644 --- a/tests/pipelines/cogvideo/test_cogvideox.py +++ b/tests/pipelines/cogvideo/test_cogvideox.py @@ -21,7 +21,8 @@ from transformers import AutoTokenizer, T5EncoderModel from diffusers import AutoencoderKLCogVideoX, CogVideoXPipeline, CogVideoXTransformer3DModel, DDIMScheduler -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, enable_full_determinism, numpy_cosine_similarity_distance, @@ -29,7 +30,6 @@ slow, torch_device, ) - from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( FasterCacheTesterMixin, diff --git a/tests/pipelines/cogvideo/test_cogvideox_fun_control.py b/tests/pipelines/cogvideo/test_cogvideox_fun_control.py index 685823dc06d9..097e8df7b35f 100644 --- a/tests/pipelines/cogvideo/test_cogvideox_fun_control.py +++ b/tests/pipelines/cogvideo/test_cogvideox_fun_control.py @@ -21,11 +21,11 @@ from transformers import AutoTokenizer, T5EncoderModel from diffusers import AutoencoderKLCogVideoX, CogVideoXFunControlPipeline, CogVideoXTransformer3DModel, DDIMScheduler -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( enable_full_determinism, torch_device, ) - from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineTesterMixin, diff --git a/tests/pipelines/cogvideo/test_cogvideox_image2video.py b/tests/pipelines/cogvideo/test_cogvideox_image2video.py index 90f767f9a748..1dd5e2ae1405 100644 --- a/tests/pipelines/cogvideo/test_cogvideox_image2video.py +++ b/tests/pipelines/cogvideo/test_cogvideox_image2video.py @@ -23,7 +23,8 @@ from diffusers import AutoencoderKLCogVideoX, CogVideoXImageToVideoPipeline, CogVideoXTransformer3DModel, DDIMScheduler from diffusers.utils import load_image -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, enable_full_determinism, numpy_cosine_similarity_distance, @@ -31,7 +32,6 @@ slow, torch_device, ) - from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineTesterMixin, diff --git a/tests/pipelines/cogvideo/test_cogvideox_video2video.py b/tests/pipelines/cogvideo/test_cogvideox_video2video.py index ba48079e74c5..3a1da7c4e7f7 100644 --- a/tests/pipelines/cogvideo/test_cogvideox_video2video.py +++ b/tests/pipelines/cogvideo/test_cogvideox_video2video.py @@ -21,8 +21,8 @@ from transformers import AutoTokenizer, T5EncoderModel from diffusers import AutoencoderKLCogVideoX, CogVideoXTransformer3DModel, CogVideoXVideoToVideoPipeline, DDIMScheduler -from diffusers.utils.testing_utils import enable_full_determinism, torch_device +from ...testing_utils import enable_full_determinism, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineTesterMixin, diff --git a/tests/pipelines/cogview3/test_cogview3plus.py b/tests/pipelines/cogview3/test_cogview3plus.py index d868beffba20..819d4b952fc7 100644 --- a/tests/pipelines/cogview3/test_cogview3plus.py +++ b/tests/pipelines/cogview3/test_cogview3plus.py @@ -21,7 +21,8 @@ from transformers import AutoTokenizer, T5EncoderModel from diffusers import AutoencoderKL, CogVideoXDDIMScheduler, CogView3PlusPipeline, CogView3PlusTransformer2DModel -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, enable_full_determinism, numpy_cosine_similarity_distance, @@ -29,7 +30,6 @@ slow, torch_device, ) - from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineTesterMixin, diff --git a/tests/pipelines/cogview4/test_cogview4.py b/tests/pipelines/cogview4/test_cogview4.py index 20d2afaea997..a1f0fc7a715b 100644 --- a/tests/pipelines/cogview4/test_cogview4.py +++ b/tests/pipelines/cogview4/test_cogview4.py @@ -20,8 +20,8 @@ from transformers import AutoTokenizer, GlmConfig, GlmForCausalLM from diffusers import AutoencoderKL, CogView4Pipeline, CogView4Transformer2DModel, FlowMatchEulerDiscreteScheduler -from diffusers.utils.testing_utils import enable_full_determinism, torch_device +from ...testing_utils import enable_full_determinism, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, to_np diff --git a/tests/pipelines/consisid/test_consisid.py b/tests/pipelines/consisid/test_consisid.py index 66bb7acf25a2..4fd9e536cddc 100644 --- a/tests/pipelines/consisid/test_consisid.py +++ b/tests/pipelines/consisid/test_consisid.py @@ -23,7 +23,8 @@ from diffusers import AutoencoderKLCogVideoX, ConsisIDPipeline, ConsisIDTransformer3DModel, DDIMScheduler from diffusers.utils import load_image -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, enable_full_determinism, numpy_cosine_similarity_distance, @@ -31,7 +32,6 @@ slow, torch_device, ) - from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineTesterMixin, diff --git a/tests/pipelines/consistency_models/test_consistency_models.py b/tests/pipelines/consistency_models/test_consistency_models.py index 7c7cecdfb014..0ab0c0af2588 100644 --- a/tests/pipelines/consistency_models/test_consistency_models.py +++ b/tests/pipelines/consistency_models/test_consistency_models.py @@ -10,7 +10,9 @@ ConsistencyModelPipeline, UNet2DModel, ) -from diffusers.utils.testing_utils import ( +from diffusers.utils.torch_utils import randn_tensor + +from ...testing_utils import ( Expectations, backend_empty_cache, enable_full_determinism, @@ -19,8 +21,6 @@ require_torch_accelerator, torch_device, ) -from diffusers.utils.torch_utils import randn_tensor - from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin diff --git a/tests/pipelines/controlnet/test_controlnet.py b/tests/pipelines/controlnet/test_controlnet.py index bd558a50cf19..b142c2baf957 100644 --- a/tests/pipelines/controlnet/test_controlnet.py +++ b/tests/pipelines/controlnet/test_controlnet.py @@ -32,7 +32,9 @@ ) from diffusers.pipelines.controlnet.pipeline_controlnet import MultiControlNetModel from diffusers.utils.import_utils import is_xformers_available -from diffusers.utils.testing_utils import ( +from diffusers.utils.torch_utils import randn_tensor + +from ...testing_utils import ( backend_empty_cache, backend_max_memory_allocated, backend_reset_max_memory_allocated, @@ -44,8 +46,6 @@ slow, torch_device, ) -from diffusers.utils.torch_utils import randn_tensor - from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_BATCH_PARAMS, diff --git a/tests/pipelines/controlnet/test_controlnet_img2img.py b/tests/pipelines/controlnet/test_controlnet_img2img.py index dd7bb002a1b4..c5d438e93427 100644 --- a/tests/pipelines/controlnet/test_controlnet_img2img.py +++ b/tests/pipelines/controlnet/test_controlnet_img2img.py @@ -35,7 +35,9 @@ from diffusers.pipelines.controlnet.pipeline_controlnet import MultiControlNetModel from diffusers.utils import load_image from diffusers.utils.import_utils import is_xformers_available -from diffusers.utils.testing_utils import ( +from diffusers.utils.torch_utils import randn_tensor + +from ...testing_utils import ( backend_empty_cache, enable_full_determinism, floats_tensor, @@ -44,8 +46,6 @@ slow, torch_device, ) -from diffusers.utils.torch_utils import randn_tensor - from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, diff --git a/tests/pipelines/controlnet/test_controlnet_inpaint.py b/tests/pipelines/controlnet/test_controlnet_inpaint.py index c457c324c520..ebbe869e9e5e 100644 --- a/tests/pipelines/controlnet/test_controlnet_inpaint.py +++ b/tests/pipelines/controlnet/test_controlnet_inpaint.py @@ -35,7 +35,9 @@ from diffusers.pipelines.controlnet.pipeline_controlnet import MultiControlNetModel from diffusers.utils import load_image from diffusers.utils.import_utils import is_xformers_available -from diffusers.utils.testing_utils import ( +from diffusers.utils.torch_utils import randn_tensor + +from ...testing_utils import ( backend_empty_cache, enable_full_determinism, floats_tensor, @@ -45,8 +47,6 @@ slow, torch_device, ) -from diffusers.utils.torch_utils import randn_tensor - from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, diff --git a/tests/pipelines/controlnet/test_controlnet_inpaint_sdxl.py b/tests/pipelines/controlnet/test_controlnet_inpaint_sdxl.py index ee12ce17233d..c91f2c700c15 100644 --- a/tests/pipelines/controlnet/test_controlnet_inpaint_sdxl.py +++ b/tests/pipelines/controlnet/test_controlnet_inpaint_sdxl.py @@ -37,13 +37,13 @@ UNet2DConditionModel, ) from diffusers.utils.import_utils import is_xformers_available -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( enable_full_determinism, floats_tensor, require_torch_accelerator, torch_device, ) - from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_BATCH_PARAMS, diff --git a/tests/pipelines/controlnet/test_controlnet_sdxl.py b/tests/pipelines/controlnet/test_controlnet_sdxl.py index 47d0920b7458..42ec446dbfae 100644 --- a/tests/pipelines/controlnet/test_controlnet_sdxl.py +++ b/tests/pipelines/controlnet/test_controlnet_sdxl.py @@ -34,7 +34,9 @@ from diffusers.models.unets.unet_2d_blocks import UNetMidBlock2D from diffusers.pipelines.controlnet.pipeline_controlnet import MultiControlNetModel from diffusers.utils.import_utils import is_xformers_available -from diffusers.utils.testing_utils import ( +from diffusers.utils.torch_utils import randn_tensor + +from ...testing_utils import ( backend_empty_cache, enable_full_determinism, load_image, @@ -42,8 +44,6 @@ slow, torch_device, ) -from diffusers.utils.torch_utils import randn_tensor - from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_BATCH_PARAMS, diff --git a/tests/pipelines/controlnet/test_controlnet_sdxl_img2img.py b/tests/pipelines/controlnet/test_controlnet_sdxl_img2img.py index 5a8dd10ad53b..bd4a233741e8 100644 --- a/tests/pipelines/controlnet/test_controlnet_sdxl_img2img.py +++ b/tests/pipelines/controlnet/test_controlnet_sdxl_img2img.py @@ -28,13 +28,13 @@ UNet2DConditionModel, ) from diffusers.utils.import_utils import is_xformers_available -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( enable_full_determinism, floats_tensor, require_torch_accelerator, torch_device, ) - from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, diff --git a/tests/pipelines/controlnet/test_flax_controlnet.py b/tests/pipelines/controlnet/test_flax_controlnet.py index 07d3a09e5d27..e9cff4c9571e 100644 --- a/tests/pipelines/controlnet/test_flax_controlnet.py +++ b/tests/pipelines/controlnet/test_flax_controlnet.py @@ -18,7 +18,8 @@ from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline from diffusers.utils import is_flax_available, load_image -from diffusers.utils.testing_utils import require_flax, slow + +from ...testing_utils import require_flax, slow if is_flax_available(): diff --git a/tests/pipelines/controlnet_flux/test_controlnet_flux.py b/tests/pipelines/controlnet_flux/test_controlnet_flux.py index 5b336edc7a88..0895d9de3581 100644 --- a/tests/pipelines/controlnet_flux/test_controlnet_flux.py +++ b/tests/pipelines/controlnet_flux/test_controlnet_flux.py @@ -29,7 +29,9 @@ ) from diffusers.models import FluxControlNetModel from diffusers.utils import load_image -from diffusers.utils.testing_utils import ( +from diffusers.utils.torch_utils import randn_tensor + +from ...testing_utils import ( backend_empty_cache, enable_full_determinism, nightly, @@ -37,8 +39,6 @@ require_big_accelerator, torch_device, ) -from diffusers.utils.torch_utils import randn_tensor - from ..test_pipelines_common import FluxIPAdapterTesterMixin, PipelineTesterMixin diff --git a/tests/pipelines/controlnet_flux/test_controlnet_flux_img2img.py b/tests/pipelines/controlnet_flux/test_controlnet_flux_img2img.py index ab4cf3273489..3d8378a5786d 100644 --- a/tests/pipelines/controlnet_flux/test_controlnet_flux_img2img.py +++ b/tests/pipelines/controlnet_flux/test_controlnet_flux_img2img.py @@ -11,11 +11,11 @@ FluxControlNetModel, FluxTransformer2DModel, ) -from diffusers.utils.testing_utils import ( - torch_device, -) from diffusers.utils.torch_utils import randn_tensor +from ...testing_utils import ( + torch_device, +) from ..test_pipelines_common import PipelineTesterMixin, check_qkv_fused_layers_exist diff --git a/tests/pipelines/controlnet_flux/test_controlnet_flux_inpaint.py b/tests/pipelines/controlnet_flux/test_controlnet_flux_inpaint.py index 94d97e9962b7..3ba475deb8a8 100644 --- a/tests/pipelines/controlnet_flux/test_controlnet_flux_inpaint.py +++ b/tests/pipelines/controlnet_flux/test_controlnet_flux_inpaint.py @@ -20,13 +20,13 @@ FluxControlNetModel, FluxTransformer2DModel, ) -from diffusers.utils.testing_utils import ( +from diffusers.utils.torch_utils import randn_tensor + +from ...testing_utils import ( enable_full_determinism, floats_tensor, torch_device, ) -from diffusers.utils.torch_utils import randn_tensor - from ..test_pipelines_common import PipelineTesterMixin diff --git a/tests/pipelines/controlnet_hunyuandit/test_controlnet_hunyuandit.py b/tests/pipelines/controlnet_hunyuandit/test_controlnet_hunyuandit.py index d6e7af34bdcc..961984377901 100644 --- a/tests/pipelines/controlnet_hunyuandit/test_controlnet_hunyuandit.py +++ b/tests/pipelines/controlnet_hunyuandit/test_controlnet_hunyuandit.py @@ -28,15 +28,15 @@ ) from diffusers.models import HunyuanDiT2DControlNetModel, HunyuanDiT2DMultiControlNetModel from diffusers.utils import load_image -from diffusers.utils.testing_utils import ( +from diffusers.utils.torch_utils import randn_tensor + +from ...testing_utils import ( backend_empty_cache, enable_full_determinism, require_torch_accelerator, slow, torch_device, ) -from diffusers.utils.torch_utils import randn_tensor - from ..test_pipelines_common import PipelineTesterMixin diff --git a/tests/pipelines/controlnet_sd3/test_controlnet_inpaint_sd3.py b/tests/pipelines/controlnet_sd3/test_controlnet_inpaint_sd3.py index fcf8cade6731..34c34b7a2ce7 100644 --- a/tests/pipelines/controlnet_sd3/test_controlnet_inpaint_sd3.py +++ b/tests/pipelines/controlnet_sd3/test_controlnet_inpaint_sd3.py @@ -26,12 +26,12 @@ StableDiffusion3ControlNetInpaintingPipeline, ) from diffusers.models import SD3ControlNetModel -from diffusers.utils.testing_utils import ( +from diffusers.utils.torch_utils import randn_tensor + +from ...testing_utils import ( enable_full_determinism, torch_device, ) -from diffusers.utils.torch_utils import randn_tensor - from ..test_pipelines_common import PipelineTesterMixin diff --git a/tests/pipelines/controlnet_sd3/test_controlnet_sd3.py b/tests/pipelines/controlnet_sd3/test_controlnet_sd3.py index 1f1f800bcf23..2b6cf8d1e8be 100644 --- a/tests/pipelines/controlnet_sd3/test_controlnet_sd3.py +++ b/tests/pipelines/controlnet_sd3/test_controlnet_sd3.py @@ -29,7 +29,9 @@ ) from diffusers.models import SD3ControlNetModel, SD3MultiControlNetModel from diffusers.utils import load_image -from diffusers.utils.testing_utils import ( +from diffusers.utils.torch_utils import randn_tensor + +from ...testing_utils import ( backend_empty_cache, enable_full_determinism, numpy_cosine_similarity_distance, @@ -37,8 +39,6 @@ slow, torch_device, ) -from diffusers.utils.torch_utils import randn_tensor - from ..test_pipelines_common import PipelineTesterMixin diff --git a/tests/pipelines/cosmos/test_cosmos.py b/tests/pipelines/cosmos/test_cosmos.py index 4d3202f78508..32eea9c98c2c 100644 --- a/tests/pipelines/cosmos/test_cosmos.py +++ b/tests/pipelines/cosmos/test_cosmos.py @@ -23,8 +23,8 @@ from transformers import AutoTokenizer, T5EncoderModel from diffusers import AutoencoderKLCosmos, CosmosTextToWorldPipeline, CosmosTransformer3DModel, EDMEulerScheduler -from diffusers.utils.testing_utils import enable_full_determinism, torch_device +from ...testing_utils import enable_full_determinism, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, to_np from .cosmos_guardrail import DummyCosmosSafetyChecker diff --git a/tests/pipelines/cosmos/test_cosmos2_text2image.py b/tests/pipelines/cosmos/test_cosmos2_text2image.py index cc2fcec64175..8e3c5e4c29f4 100644 --- a/tests/pipelines/cosmos/test_cosmos2_text2image.py +++ b/tests/pipelines/cosmos/test_cosmos2_text2image.py @@ -28,8 +28,8 @@ CosmosTransformer3DModel, FlowMatchEulerDiscreteScheduler, ) -from diffusers.utils.testing_utils import enable_full_determinism, torch_device +from ...testing_utils import enable_full_determinism, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, to_np from .cosmos_guardrail import DummyCosmosSafetyChecker diff --git a/tests/pipelines/cosmos/test_cosmos2_video2world.py b/tests/pipelines/cosmos/test_cosmos2_video2world.py index b23c8aed1734..b0ca0e160d98 100644 --- a/tests/pipelines/cosmos/test_cosmos2_video2world.py +++ b/tests/pipelines/cosmos/test_cosmos2_video2world.py @@ -29,8 +29,8 @@ CosmosTransformer3DModel, FlowMatchEulerDiscreteScheduler, ) -from diffusers.utils.testing_utils import enable_full_determinism, torch_device +from ...testing_utils import enable_full_determinism, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, to_np from .cosmos_guardrail import DummyCosmosSafetyChecker diff --git a/tests/pipelines/cosmos/test_cosmos_video2world.py b/tests/pipelines/cosmos/test_cosmos_video2world.py index d0dba5575bb7..2633c2007ac2 100644 --- a/tests/pipelines/cosmos/test_cosmos_video2world.py +++ b/tests/pipelines/cosmos/test_cosmos_video2world.py @@ -24,8 +24,8 @@ from transformers import AutoTokenizer, T5EncoderModel from diffusers import AutoencoderKLCosmos, CosmosTransformer3DModel, CosmosVideoToWorldPipeline, EDMEulerScheduler -from diffusers.utils.testing_utils import enable_full_determinism, torch_device +from ...testing_utils import enable_full_determinism, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, to_np from .cosmos_guardrail import DummyCosmosSafetyChecker diff --git a/tests/pipelines/ddim/test_ddim.py b/tests/pipelines/ddim/test_ddim.py index 57b97b4649b8..731635bea605 100644 --- a/tests/pipelines/ddim/test_ddim.py +++ b/tests/pipelines/ddim/test_ddim.py @@ -19,8 +19,8 @@ import torch from diffusers import DDIMPipeline, DDIMScheduler, UNet2DModel -from diffusers.utils.testing_utils import enable_full_determinism, require_torch_accelerator, slow, torch_device +from ...testing_utils import enable_full_determinism, require_torch_accelerator, slow, torch_device from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin diff --git a/tests/pipelines/ddpm/test_ddpm.py b/tests/pipelines/ddpm/test_ddpm.py index 97bb53128d34..04ee741d8eb8 100644 --- a/tests/pipelines/ddpm/test_ddpm.py +++ b/tests/pipelines/ddpm/test_ddpm.py @@ -19,7 +19,8 @@ import torch from diffusers import DDPMPipeline, DDPMScheduler, UNet2DModel -from diffusers.utils.testing_utils import enable_full_determinism, require_torch_accelerator, slow, torch_device + +from ...testing_utils import enable_full_determinism, require_torch_accelerator, slow, torch_device enable_full_determinism() diff --git a/tests/pipelines/deepfloyd_if/__init__.py b/tests/pipelines/deepfloyd_if/__init__.py index 094254a61875..d47374b07e22 100644 --- a/tests/pipelines/deepfloyd_if/__init__.py +++ b/tests/pipelines/deepfloyd_if/__init__.py @@ -7,8 +7,8 @@ from diffusers import DDPMScheduler, UNet2DConditionModel from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.pipelines.deepfloyd_if import IFWatermarker -from diffusers.utils.testing_utils import torch_device +from ...testing_utils import torch_device from ..test_pipelines_common import to_np diff --git a/tests/pipelines/deepfloyd_if/test_if.py b/tests/pipelines/deepfloyd_if/test_if.py index 633d802ab92b..e1870ddcbae9 100644 --- a/tests/pipelines/deepfloyd_if/test_if.py +++ b/tests/pipelines/deepfloyd_if/test_if.py @@ -23,7 +23,8 @@ ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, backend_max_memory_allocated, backend_reset_max_memory_allocated, @@ -37,7 +38,6 @@ slow, torch_device, ) - from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin diff --git a/tests/pipelines/deepfloyd_if/test_if_img2img.py b/tests/pipelines/deepfloyd_if/test_if_img2img.py index 739d2a0e16d1..9d3c96052be6 100644 --- a/tests/pipelines/deepfloyd_if/test_if_img2img.py +++ b/tests/pipelines/deepfloyd_if/test_if_img2img.py @@ -22,7 +22,8 @@ from diffusers import IFImg2ImgPipeline from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, backend_max_memory_allocated, backend_reset_max_memory_allocated, @@ -37,7 +38,6 @@ slow, torch_device, ) - from ..pipeline_params import ( TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, diff --git a/tests/pipelines/deepfloyd_if/test_if_img2img_superresolution.py b/tests/pipelines/deepfloyd_if/test_if_img2img_superresolution.py index fb89aab8e288..e2114910edb0 100644 --- a/tests/pipelines/deepfloyd_if/test_if_img2img_superresolution.py +++ b/tests/pipelines/deepfloyd_if/test_if_img2img_superresolution.py @@ -22,7 +22,8 @@ from diffusers import IFImg2ImgSuperResolutionPipeline from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, backend_max_memory_allocated, backend_reset_max_memory_allocated, @@ -37,7 +38,6 @@ slow, torch_device, ) - from ..pipeline_params import ( TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, diff --git a/tests/pipelines/deepfloyd_if/test_if_inpainting.py b/tests/pipelines/deepfloyd_if/test_if_inpainting.py index 127ae19aa6db..2679e0b77690 100644 --- a/tests/pipelines/deepfloyd_if/test_if_inpainting.py +++ b/tests/pipelines/deepfloyd_if/test_if_inpainting.py @@ -22,7 +22,8 @@ from diffusers import IFInpaintingPipeline from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, backend_max_memory_allocated, backend_reset_max_memory_allocated, @@ -37,7 +38,6 @@ slow, torch_device, ) - from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, diff --git a/tests/pipelines/deepfloyd_if/test_if_inpainting_superresolution.py b/tests/pipelines/deepfloyd_if/test_if_inpainting_superresolution.py index 8b5210194ae8..3d64556c6e41 100644 --- a/tests/pipelines/deepfloyd_if/test_if_inpainting_superresolution.py +++ b/tests/pipelines/deepfloyd_if/test_if_inpainting_superresolution.py @@ -22,7 +22,8 @@ from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, backend_max_memory_allocated, backend_reset_max_memory_allocated, @@ -37,7 +38,6 @@ slow, torch_device, ) - from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, diff --git a/tests/pipelines/deepfloyd_if/test_if_superresolution.py b/tests/pipelines/deepfloyd_if/test_if_superresolution.py index c16b3d6a563b..fa7c0fb2e062 100644 --- a/tests/pipelines/deepfloyd_if/test_if_superresolution.py +++ b/tests/pipelines/deepfloyd_if/test_if_superresolution.py @@ -22,7 +22,8 @@ from diffusers import IFSuperResolutionPipeline from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, backend_max_memory_allocated, backend_reset_max_memory_allocated, @@ -37,7 +38,6 @@ slow, torch_device, ) - from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin diff --git a/tests/pipelines/dit/test_dit.py b/tests/pipelines/dit/test_dit.py index 46e28a28e1d3..cd5c08ced3fc 100644 --- a/tests/pipelines/dit/test_dit.py +++ b/tests/pipelines/dit/test_dit.py @@ -21,7 +21,8 @@ from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DiTTransformer2DModel, DPMSolverMultistepScheduler from diffusers.utils import is_xformers_available -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, enable_full_determinism, load_numpy, @@ -30,7 +31,6 @@ require_torch_accelerator, torch_device, ) - from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, diff --git a/tests/pipelines/easyanimate/test_easyanimate.py b/tests/pipelines/easyanimate/test_easyanimate.py index 161734a166f4..2dbb8639f174 100644 --- a/tests/pipelines/easyanimate/test_easyanimate.py +++ b/tests/pipelines/easyanimate/test_easyanimate.py @@ -26,7 +26,8 @@ EasyAnimateTransformer3DModel, FlowMatchEulerDiscreteScheduler, ) -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, enable_full_determinism, numpy_cosine_similarity_distance, @@ -34,7 +35,6 @@ slow, torch_device, ) - from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, to_np diff --git a/tests/pipelines/flux/test_pipeline_flux.py b/tests/pipelines/flux/test_pipeline_flux.py index cc8266e1a54c..c3e8517d6407 100644 --- a/tests/pipelines/flux/test_pipeline_flux.py +++ b/tests/pipelines/flux/test_pipeline_flux.py @@ -13,7 +13,8 @@ FluxPipeline, FluxTransformer2DModel, ) -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, nightly, numpy_cosine_similarity_distance, @@ -21,7 +22,6 @@ slow, torch_device, ) - from ..test_pipelines_common import ( FasterCacheTesterMixin, FirstBlockCacheTesterMixin, diff --git a/tests/pipelines/flux/test_pipeline_flux_control.py b/tests/pipelines/flux/test_pipeline_flux_control.py index 42283da6fd03..7e966470a336 100644 --- a/tests/pipelines/flux/test_pipeline_flux_control.py +++ b/tests/pipelines/flux/test_pipeline_flux_control.py @@ -6,8 +6,8 @@ from transformers import AutoTokenizer, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, T5EncoderModel from diffusers import AutoencoderKL, FlowMatchEulerDiscreteScheduler, FluxControlPipeline, FluxTransformer2DModel -from diffusers.utils.testing_utils import torch_device +from ...testing_utils import torch_device from ..test_pipelines_common import PipelineTesterMixin, check_qkv_fused_layers_exist diff --git a/tests/pipelines/flux/test_pipeline_flux_control_img2img.py b/tests/pipelines/flux/test_pipeline_flux_control_img2img.py index 966543f63aeb..e56136f2e91b 100644 --- a/tests/pipelines/flux/test_pipeline_flux_control_img2img.py +++ b/tests/pipelines/flux/test_pipeline_flux_control_img2img.py @@ -11,8 +11,8 @@ FluxControlImg2ImgPipeline, FluxTransformer2DModel, ) -from diffusers.utils.testing_utils import enable_full_determinism, torch_device +from ...testing_utils import enable_full_determinism, torch_device from ..test_pipelines_common import PipelineTesterMixin diff --git a/tests/pipelines/flux/test_pipeline_flux_control_inpaint.py b/tests/pipelines/flux/test_pipeline_flux_control_inpaint.py index 0abd08e37300..e42c5fc2aab5 100644 --- a/tests/pipelines/flux/test_pipeline_flux_control_inpaint.py +++ b/tests/pipelines/flux/test_pipeline_flux_control_inpaint.py @@ -11,10 +11,10 @@ FluxControlInpaintPipeline, FluxTransformer2DModel, ) -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( torch_device, ) - from ..test_pipelines_common import PipelineTesterMixin, check_qkv_fused_layers_exist diff --git a/tests/pipelines/flux/test_pipeline_flux_fill.py b/tests/pipelines/flux/test_pipeline_flux_fill.py index 04d4c68db8f3..25a4a3354820 100644 --- a/tests/pipelines/flux/test_pipeline_flux_fill.py +++ b/tests/pipelines/flux/test_pipeline_flux_fill.py @@ -6,12 +6,12 @@ from transformers import AutoTokenizer, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, T5EncoderModel from diffusers import AutoencoderKL, FlowMatchEulerDiscreteScheduler, FluxFillPipeline, FluxTransformer2DModel -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( enable_full_determinism, floats_tensor, torch_device, ) - from ..test_pipelines_common import PipelineTesterMixin diff --git a/tests/pipelines/flux/test_pipeline_flux_img2img.py b/tests/pipelines/flux/test_pipeline_flux_img2img.py index 6d33ca721b6c..6f435760aef5 100644 --- a/tests/pipelines/flux/test_pipeline_flux_img2img.py +++ b/tests/pipelines/flux/test_pipeline_flux_img2img.py @@ -6,12 +6,12 @@ from transformers import AutoTokenizer, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, T5EncoderModel from diffusers import AutoencoderKL, FlowMatchEulerDiscreteScheduler, FluxImg2ImgPipeline, FluxTransformer2DModel -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( enable_full_determinism, floats_tensor, torch_device, ) - from ..test_pipelines_common import FluxIPAdapterTesterMixin, PipelineTesterMixin diff --git a/tests/pipelines/flux/test_pipeline_flux_inpaint.py b/tests/pipelines/flux/test_pipeline_flux_inpaint.py index 161348455ca4..6324ff236e10 100644 --- a/tests/pipelines/flux/test_pipeline_flux_inpaint.py +++ b/tests/pipelines/flux/test_pipeline_flux_inpaint.py @@ -6,12 +6,12 @@ from transformers import AutoTokenizer, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, T5EncoderModel from diffusers import AutoencoderKL, FlowMatchEulerDiscreteScheduler, FluxInpaintPipeline, FluxTransformer2DModel -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( enable_full_determinism, floats_tensor, torch_device, ) - from ..test_pipelines_common import FluxIPAdapterTesterMixin, PipelineTesterMixin diff --git a/tests/pipelines/flux/test_pipeline_flux_kontext.py b/tests/pipelines/flux/test_pipeline_flux_kontext.py index 7471d78ad58c..5c78964ea54f 100644 --- a/tests/pipelines/flux/test_pipeline_flux_kontext.py +++ b/tests/pipelines/flux/test_pipeline_flux_kontext.py @@ -12,8 +12,8 @@ FluxKontextPipeline, FluxTransformer2DModel, ) -from diffusers.utils.testing_utils import torch_device +from ...testing_utils import torch_device from ..test_pipelines_common import ( FasterCacheTesterMixin, FluxIPAdapterTesterMixin, diff --git a/tests/pipelines/flux/test_pipeline_flux_kontext_inpaint.py b/tests/pipelines/flux/test_pipeline_flux_kontext_inpaint.py index 615209264d15..9a2e32056dcb 100644 --- a/tests/pipelines/flux/test_pipeline_flux_kontext_inpaint.py +++ b/tests/pipelines/flux/test_pipeline_flux_kontext_inpaint.py @@ -12,8 +12,8 @@ FluxKontextInpaintPipeline, FluxTransformer2DModel, ) -from diffusers.utils.testing_utils import floats_tensor, torch_device +from ...testing_utils import floats_tensor, torch_device from ..test_pipelines_common import ( FasterCacheTesterMixin, FluxIPAdapterTesterMixin, diff --git a/tests/pipelines/flux/test_pipeline_flux_redux.py b/tests/pipelines/flux/test_pipeline_flux_redux.py index b73050a64df9..bbeee28e6a62 100644 --- a/tests/pipelines/flux/test_pipeline_flux_redux.py +++ b/tests/pipelines/flux/test_pipeline_flux_redux.py @@ -6,7 +6,8 @@ from diffusers import FluxPipeline, FluxPriorReduxPipeline from diffusers.utils import load_image -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( Expectations, backend_empty_cache, numpy_cosine_similarity_distance, diff --git a/tests/pipelines/hidream_image/test_pipeline_hidream.py b/tests/pipelines/hidream_image/test_pipeline_hidream.py index 1c5f30e8704f..ec8d36e1d355 100644 --- a/tests/pipelines/hidream_image/test_pipeline_hidream.py +++ b/tests/pipelines/hidream_image/test_pipeline_hidream.py @@ -32,8 +32,8 @@ HiDreamImagePipeline, HiDreamImageTransformer2DModel, ) -from diffusers.utils.testing_utils import enable_full_determinism +from ...testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin diff --git a/tests/pipelines/hunyuan_video/test_hunyuan_image2video.py b/tests/pipelines/hunyuan_video/test_hunyuan_image2video.py index 82281f28bc84..27b5bde31050 100644 --- a/tests/pipelines/hunyuan_video/test_hunyuan_image2video.py +++ b/tests/pipelines/hunyuan_video/test_hunyuan_image2video.py @@ -36,8 +36,8 @@ HunyuanVideoImageToVideoPipeline, HunyuanVideoTransformer3DModel, ) -from diffusers.utils.testing_utils import enable_full_determinism, torch_device +from ...testing_utils import enable_full_determinism, torch_device from ..test_pipelines_common import PipelineTesterMixin, PyramidAttentionBroadcastTesterMixin, to_np diff --git a/tests/pipelines/hunyuan_video/test_hunyuan_skyreels_image2video.py b/tests/pipelines/hunyuan_video/test_hunyuan_skyreels_image2video.py index fad159c06b0f..7ebe797febfa 100644 --- a/tests/pipelines/hunyuan_video/test_hunyuan_skyreels_image2video.py +++ b/tests/pipelines/hunyuan_video/test_hunyuan_skyreels_image2video.py @@ -26,8 +26,8 @@ HunyuanSkyreelsImageToVideoPipeline, HunyuanVideoTransformer3DModel, ) -from diffusers.utils.testing_utils import enable_full_determinism, torch_device +from ...testing_utils import enable_full_determinism, torch_device from ..test_pipelines_common import PipelineTesterMixin, PyramidAttentionBroadcastTesterMixin, to_np diff --git a/tests/pipelines/hunyuan_video/test_hunyuan_video.py b/tests/pipelines/hunyuan_video/test_hunyuan_video.py index 26ec861522a9..4bdf3ee20e1b 100644 --- a/tests/pipelines/hunyuan_video/test_hunyuan_video.py +++ b/tests/pipelines/hunyuan_video/test_hunyuan_video.py @@ -26,8 +26,8 @@ HunyuanVideoPipeline, HunyuanVideoTransformer3DModel, ) -from diffusers.utils.testing_utils import enable_full_determinism, torch_device +from ...testing_utils import enable_full_determinism, torch_device from ..test_pipelines_common import ( FasterCacheTesterMixin, FirstBlockCacheTesterMixin, diff --git a/tests/pipelines/hunyuan_video/test_hunyuan_video_framepack.py b/tests/pipelines/hunyuan_video/test_hunyuan_video_framepack.py index 297c3df45a10..51c258b15c38 100644 --- a/tests/pipelines/hunyuan_video/test_hunyuan_video_framepack.py +++ b/tests/pipelines/hunyuan_video/test_hunyuan_video_framepack.py @@ -36,11 +36,11 @@ HunyuanVideoFramepackPipeline, HunyuanVideoFramepackTransformer3DModel, ) -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( enable_full_determinism, torch_device, ) - from ..test_pipelines_common import ( FasterCacheTesterMixin, PipelineTesterMixin, diff --git a/tests/pipelines/hunyuandit/test_hunyuan_dit.py b/tests/pipelines/hunyuandit/test_hunyuan_dit.py index 7a5f807213ca..2a329f10bc80 100644 --- a/tests/pipelines/hunyuandit/test_hunyuan_dit.py +++ b/tests/pipelines/hunyuandit/test_hunyuan_dit.py @@ -22,7 +22,8 @@ from transformers import AutoTokenizer, BertModel, T5EncoderModel from diffusers import AutoencoderKL, DDPMScheduler, HunyuanDiT2DModel, HunyuanDiTPipeline -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, enable_full_determinism, numpy_cosine_similarity_distance, @@ -30,7 +31,6 @@ slow, torch_device, ) - from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineTesterMixin, diff --git a/tests/pipelines/ip_adapters/__init__.py b/tests/pipelines/ip_adapters/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/pipelines/ip_adapters/test_ip_adapter_stable_diffusion.py b/tests/pipelines/ip_adapters/test_ip_adapter_stable_diffusion.py index f5980f218a70..32590111cdf3 100644 --- a/tests/pipelines/ip_adapters/test_ip_adapter_stable_diffusion.py +++ b/tests/pipelines/ip_adapters/test_ip_adapter_stable_diffusion.py @@ -33,7 +33,8 @@ ) from diffusers.image_processor import IPAdapterMaskProcessor from diffusers.utils import load_image -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( Expectations, backend_empty_cache, enable_full_determinism, diff --git a/tests/pipelines/kandinsky/test_kandinsky.py b/tests/pipelines/kandinsky/test_kandinsky.py index 65a5195a8b64..911c6e49aeba 100644 --- a/tests/pipelines/kandinsky/test_kandinsky.py +++ b/tests/pipelines/kandinsky/test_kandinsky.py @@ -23,7 +23,8 @@ from diffusers import DDIMScheduler, KandinskyPipeline, KandinskyPriorPipeline, UNet2DConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, enable_full_determinism, floats_tensor, @@ -32,7 +33,6 @@ slow, torch_device, ) - from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference diff --git a/tests/pipelines/kandinsky/test_kandinsky_combined.py b/tests/pipelines/kandinsky/test_kandinsky_combined.py index 6dd889595293..d744d1082135 100644 --- a/tests/pipelines/kandinsky/test_kandinsky_combined.py +++ b/tests/pipelines/kandinsky/test_kandinsky_combined.py @@ -18,8 +18,8 @@ import numpy as np from diffusers import KandinskyCombinedPipeline, KandinskyImg2ImgCombinedPipeline, KandinskyInpaintCombinedPipeline -from diffusers.utils.testing_utils import enable_full_determinism, require_torch_accelerator, torch_device +from ...testing_utils import enable_full_determinism, require_torch_accelerator, torch_device from ..test_pipelines_common import PipelineTesterMixin from .test_kandinsky import Dummies from .test_kandinsky_img2img import Dummies as Img2ImgDummies diff --git a/tests/pipelines/kandinsky/test_kandinsky_img2img.py b/tests/pipelines/kandinsky/test_kandinsky_img2img.py index 5a0107838a76..4074c8db22a0 100644 --- a/tests/pipelines/kandinsky/test_kandinsky_img2img.py +++ b/tests/pipelines/kandinsky/test_kandinsky_img2img.py @@ -31,7 +31,8 @@ VQModel, ) from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, enable_full_determinism, floats_tensor, @@ -42,7 +43,6 @@ slow, torch_device, ) - from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference diff --git a/tests/pipelines/kandinsky/test_kandinsky_inpaint.py b/tests/pipelines/kandinsky/test_kandinsky_inpaint.py index 03b555b2f036..b789a63cdd03 100644 --- a/tests/pipelines/kandinsky/test_kandinsky_inpaint.py +++ b/tests/pipelines/kandinsky/test_kandinsky_inpaint.py @@ -24,7 +24,8 @@ from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNet2DConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, enable_full_determinism, floats_tensor, @@ -34,7 +35,6 @@ require_torch_accelerator, torch_device, ) - from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference diff --git a/tests/pipelines/kandinsky/test_kandinsky_prior.py b/tests/pipelines/kandinsky/test_kandinsky_prior.py index 8ecf2d855fde..903a1e5decfa 100644 --- a/tests/pipelines/kandinsky/test_kandinsky_prior.py +++ b/tests/pipelines/kandinsky/test_kandinsky_prior.py @@ -28,8 +28,8 @@ ) from diffusers import KandinskyPriorPipeline, PriorTransformer, UnCLIPScheduler -from diffusers.utils.testing_utils import enable_full_determinism, skip_mps, torch_device +from ...testing_utils import enable_full_determinism, skip_mps, torch_device from ..test_pipelines_common import PipelineTesterMixin diff --git a/tests/pipelines/kandinsky2_2/test_kandinsky.py b/tests/pipelines/kandinsky2_2/test_kandinsky.py index 0ad5620eee9c..38294aa4c111 100644 --- a/tests/pipelines/kandinsky2_2/test_kandinsky.py +++ b/tests/pipelines/kandinsky2_2/test_kandinsky.py @@ -21,7 +21,8 @@ import torch from diffusers import DDIMScheduler, KandinskyV22Pipeline, KandinskyV22PriorPipeline, UNet2DConditionModel, VQModel -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, enable_full_determinism, floats_tensor, @@ -31,7 +32,6 @@ slow, torch_device, ) - from ..test_pipelines_common import PipelineTesterMixin diff --git a/tests/pipelines/kandinsky2_2/test_kandinsky_combined.py b/tests/pipelines/kandinsky2_2/test_kandinsky_combined.py index 1e064d3368c6..476fc584cc56 100644 --- a/tests/pipelines/kandinsky2_2/test_kandinsky_combined.py +++ b/tests/pipelines/kandinsky2_2/test_kandinsky_combined.py @@ -22,8 +22,8 @@ KandinskyV22Img2ImgCombinedPipeline, KandinskyV22InpaintCombinedPipeline, ) -from diffusers.utils.testing_utils import enable_full_determinism, require_torch_accelerator, torch_device +from ...testing_utils import enable_full_determinism, require_torch_accelerator, torch_device from ..test_pipelines_common import PipelineTesterMixin from .test_kandinsky import Dummies from .test_kandinsky_img2img import Dummies as Img2ImgDummies diff --git a/tests/pipelines/kandinsky2_2/test_kandinsky_controlnet.py b/tests/pipelines/kandinsky2_2/test_kandinsky_controlnet.py index b2d6f0fc0590..4054e38c5691 100644 --- a/tests/pipelines/kandinsky2_2/test_kandinsky_controlnet.py +++ b/tests/pipelines/kandinsky2_2/test_kandinsky_controlnet.py @@ -27,7 +27,8 @@ UNet2DConditionModel, VQModel, ) -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, enable_full_determinism, floats_tensor, @@ -38,7 +39,6 @@ require_torch_accelerator, torch_device, ) - from ..test_pipelines_common import PipelineTesterMixin diff --git a/tests/pipelines/kandinsky2_2/test_kandinsky_controlnet_img2img.py b/tests/pipelines/kandinsky2_2/test_kandinsky_controlnet_img2img.py index 4f50f51819d0..a4346605929b 100644 --- a/tests/pipelines/kandinsky2_2/test_kandinsky_controlnet_img2img.py +++ b/tests/pipelines/kandinsky2_2/test_kandinsky_controlnet_img2img.py @@ -28,7 +28,8 @@ UNet2DConditionModel, VQModel, ) -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, enable_full_determinism, floats_tensor, @@ -39,7 +40,6 @@ require_torch_accelerator, torch_device, ) - from ..test_pipelines_common import PipelineTesterMixin diff --git a/tests/pipelines/kandinsky2_2/test_kandinsky_img2img.py b/tests/pipelines/kandinsky2_2/test_kandinsky_img2img.py index bc1477b97e2a..99f3fe0f40f1 100644 --- a/tests/pipelines/kandinsky2_2/test_kandinsky_img2img.py +++ b/tests/pipelines/kandinsky2_2/test_kandinsky_img2img.py @@ -28,7 +28,8 @@ UNet2DConditionModel, VQModel, ) -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, enable_full_determinism, floats_tensor, @@ -39,7 +40,6 @@ slow, torch_device, ) - from ..test_pipelines_common import PipelineTesterMixin diff --git a/tests/pipelines/kandinsky2_2/test_kandinsky_inpaint.py b/tests/pipelines/kandinsky2_2/test_kandinsky_inpaint.py index 8b3d8f74ec94..d4eb650263af 100644 --- a/tests/pipelines/kandinsky2_2/test_kandinsky_inpaint.py +++ b/tests/pipelines/kandinsky2_2/test_kandinsky_inpaint.py @@ -28,7 +28,8 @@ UNet2DConditionModel, VQModel, ) -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, enable_full_determinism, floats_tensor, @@ -40,7 +41,6 @@ slow, torch_device, ) - from ..test_pipelines_common import PipelineTesterMixin diff --git a/tests/pipelines/kandinsky2_2/test_kandinsky_prior.py b/tests/pipelines/kandinsky2_2/test_kandinsky_prior.py index f5c2d6037bec..adcc6cc2167c 100644 --- a/tests/pipelines/kandinsky2_2/test_kandinsky_prior.py +++ b/tests/pipelines/kandinsky2_2/test_kandinsky_prior.py @@ -29,8 +29,8 @@ ) from diffusers import KandinskyV22PriorPipeline, PriorTransformer, UnCLIPScheduler -from diffusers.utils.testing_utils import enable_full_determinism, skip_mps, torch_device +from ...testing_utils import enable_full_determinism, skip_mps, torch_device from ..test_pipelines_common import PipelineTesterMixin diff --git a/tests/pipelines/kandinsky2_2/test_kandinsky_prior_emb2emb.py b/tests/pipelines/kandinsky2_2/test_kandinsky_prior_emb2emb.py index 54a9cf6d6002..5377d917791a 100644 --- a/tests/pipelines/kandinsky2_2/test_kandinsky_prior_emb2emb.py +++ b/tests/pipelines/kandinsky2_2/test_kandinsky_prior_emb2emb.py @@ -30,13 +30,13 @@ ) from diffusers import KandinskyV22PriorEmb2EmbPipeline, PriorTransformer, UnCLIPScheduler -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( enable_full_determinism, floats_tensor, skip_mps, torch_device, ) - from ..test_pipelines_common import PipelineTesterMixin diff --git a/tests/pipelines/kandinsky3/test_kandinsky3.py b/tests/pipelines/kandinsky3/test_kandinsky3.py index 1acf076b3d67..55500f729bbb 100644 --- a/tests/pipelines/kandinsky3/test_kandinsky3.py +++ b/tests/pipelines/kandinsky3/test_kandinsky3.py @@ -30,7 +30,8 @@ ) from diffusers.image_processor import VaeImageProcessor from diffusers.schedulers.scheduling_ddpm import DDPMScheduler -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, enable_full_determinism, load_image, @@ -38,7 +39,6 @@ slow, torch_device, ) - from ..pipeline_params import ( TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, diff --git a/tests/pipelines/kandinsky3/test_kandinsky3_img2img.py b/tests/pipelines/kandinsky3/test_kandinsky3_img2img.py index edad5b7d378c..503fdb242dff 100644 --- a/tests/pipelines/kandinsky3/test_kandinsky3_img2img.py +++ b/tests/pipelines/kandinsky3/test_kandinsky3_img2img.py @@ -30,7 +30,8 @@ ) from diffusers.image_processor import VaeImageProcessor from diffusers.schedulers.scheduling_ddpm import DDPMScheduler -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, enable_full_determinism, floats_tensor, @@ -39,7 +40,6 @@ slow, torch_device, ) - from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, diff --git a/tests/pipelines/kolors/test_kolors.py b/tests/pipelines/kolors/test_kolors.py index 839d06ab93bb..f1d4982d4d74 100644 --- a/tests/pipelines/kolors/test_kolors.py +++ b/tests/pipelines/kolors/test_kolors.py @@ -25,8 +25,8 @@ UNet2DConditionModel, ) from diffusers.pipelines.kolors import ChatGLMModel, ChatGLMTokenizer -from diffusers.utils.testing_utils import enable_full_determinism +from ...testing_utils import enable_full_determinism from ..pipeline_params import ( TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, diff --git a/tests/pipelines/kolors/test_kolors_img2img.py b/tests/pipelines/kolors/test_kolors_img2img.py index c8429322cae6..5a5d31a46456 100644 --- a/tests/pipelines/kolors/test_kolors_img2img.py +++ b/tests/pipelines/kolors/test_kolors_img2img.py @@ -26,11 +26,11 @@ UNet2DConditionModel, ) from diffusers.pipelines.kolors import ChatGLMModel, ChatGLMTokenizer -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( enable_full_determinism, floats_tensor, ) - from ..pipeline_params import ( TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, diff --git a/tests/pipelines/latent_consistency_models/test_latent_consistency_models.py b/tests/pipelines/latent_consistency_models/test_latent_consistency_models.py index 570fa8fadf39..c7666244b35f 100644 --- a/tests/pipelines/latent_consistency_models/test_latent_consistency_models.py +++ b/tests/pipelines/latent_consistency_models/test_latent_consistency_models.py @@ -12,14 +12,14 @@ LCMScheduler, UNet2DConditionModel, ) -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, enable_full_determinism, require_torch_accelerator, slow, torch_device, ) - from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import IPAdapterTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin diff --git a/tests/pipelines/latent_consistency_models/test_latent_consistency_models_img2img.py b/tests/pipelines/latent_consistency_models/test_latent_consistency_models_img2img.py index 88e31a97aac5..d8e7745b7805 100644 --- a/tests/pipelines/latent_consistency_models/test_latent_consistency_models_img2img.py +++ b/tests/pipelines/latent_consistency_models/test_latent_consistency_models_img2img.py @@ -13,7 +13,8 @@ LCMScheduler, UNet2DConditionModel, ) -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, enable_full_determinism, floats_tensor, @@ -22,7 +23,6 @@ slow, torch_device, ) - from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, diff --git a/tests/pipelines/latent_diffusion/test_latent_diffusion.py b/tests/pipelines/latent_diffusion/test_latent_diffusion.py index ec52f5aebff8..21c5bcf5a5b9 100644 --- a/tests/pipelines/latent_diffusion/test_latent_diffusion.py +++ b/tests/pipelines/latent_diffusion/test_latent_diffusion.py @@ -21,7 +21,8 @@ from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNet2DConditionModel -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, enable_full_determinism, load_numpy, @@ -29,7 +30,6 @@ require_torch_accelerator, torch_device, ) - from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin diff --git a/tests/pipelines/latent_diffusion/test_latent_diffusion_superresolution.py b/tests/pipelines/latent_diffusion/test_latent_diffusion_superresolution.py index 2884dd35087d..b2cbdb9f5b45 100644 --- a/tests/pipelines/latent_diffusion/test_latent_diffusion_superresolution.py +++ b/tests/pipelines/latent_diffusion/test_latent_diffusion_superresolution.py @@ -21,7 +21,8 @@ from diffusers import DDIMScheduler, LDMSuperResolutionPipeline, UNet2DModel, VQModel from diffusers.utils import PIL_INTERPOLATION -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( enable_full_determinism, floats_tensor, load_image, diff --git a/tests/pipelines/latte/test_latte.py b/tests/pipelines/latte/test_latte.py index 97b7edeb6c90..a40d4bf8eede 100644 --- a/tests/pipelines/latte/test_latte.py +++ b/tests/pipelines/latte/test_latte.py @@ -31,7 +31,8 @@ PyramidAttentionBroadcastConfig, ) from diffusers.utils.import_utils import is_xformers_available -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, enable_full_determinism, numpy_cosine_similarity_distance, @@ -39,7 +40,6 @@ slow, torch_device, ) - from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( FasterCacheTesterMixin, diff --git a/tests/pipelines/ledits_pp/test_ledits_pp_stable_diffusion.py b/tests/pipelines/ledits_pp/test_ledits_pp_stable_diffusion.py index ab0221dc815e..6db20a464f19 100644 --- a/tests/pipelines/ledits_pp/test_ledits_pp_stable_diffusion.py +++ b/tests/pipelines/ledits_pp/test_ledits_pp_stable_diffusion.py @@ -28,7 +28,8 @@ LEditsPPPipelineStableDiffusion, UNet2DConditionModel, ) -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( Expectations, backend_empty_cache, enable_full_determinism, diff --git a/tests/pipelines/ledits_pp/test_ledits_pp_stable_diffusion_xl.py b/tests/pipelines/ledits_pp/test_ledits_pp_stable_diffusion_xl.py index 75795a33422b..06c1ceb0cf5a 100644 --- a/tests/pipelines/ledits_pp/test_ledits_pp_stable_diffusion_xl.py +++ b/tests/pipelines/ledits_pp/test_ledits_pp_stable_diffusion_xl.py @@ -37,7 +37,7 @@ ) # from diffusers.image_processor import VaeImageProcessor -from diffusers.utils.testing_utils import ( +from ...testing_utils import ( enable_full_determinism, floats_tensor, load_image, diff --git a/tests/pipelines/ltx/test_ltx.py b/tests/pipelines/ltx/test_ltx.py index bf0c7fde591f..aaf4161b51fb 100644 --- a/tests/pipelines/ltx/test_ltx.py +++ b/tests/pipelines/ltx/test_ltx.py @@ -20,8 +20,8 @@ from transformers import AutoTokenizer, T5EncoderModel from diffusers import AutoencoderKLLTXVideo, FlowMatchEulerDiscreteScheduler, LTXPipeline, LTXVideoTransformer3DModel -from diffusers.utils.testing_utils import enable_full_determinism, torch_device +from ...testing_utils import enable_full_determinism, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import FirstBlockCacheTesterMixin, PipelineTesterMixin, to_np diff --git a/tests/pipelines/ltx/test_ltx_condition.py b/tests/pipelines/ltx/test_ltx_condition.py index a586fadaa702..f5dfb0186209 100644 --- a/tests/pipelines/ltx/test_ltx_condition.py +++ b/tests/pipelines/ltx/test_ltx_condition.py @@ -26,8 +26,8 @@ LTXVideoTransformer3DModel, ) from diffusers.pipelines.ltx.pipeline_ltx_condition import LTXVideoCondition -from diffusers.utils.testing_utils import enable_full_determinism, torch_device +from ...testing_utils import enable_full_determinism, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, to_np diff --git a/tests/pipelines/ltx/test_ltx_image2video.py b/tests/pipelines/ltx/test_ltx_image2video.py index f43f66df5395..2702993d4a59 100644 --- a/tests/pipelines/ltx/test_ltx_image2video.py +++ b/tests/pipelines/ltx/test_ltx_image2video.py @@ -25,8 +25,8 @@ LTXImageToVideoPipeline, LTXVideoTransformer3DModel, ) -from diffusers.utils.testing_utils import enable_full_determinism, torch_device +from ...testing_utils import enable_full_determinism, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, to_np diff --git a/tests/pipelines/ltx/test_ltx_latent_upsample.py b/tests/pipelines/ltx/test_ltx_latent_upsample.py index f9ddb121869d..0044a85c644b 100644 --- a/tests/pipelines/ltx/test_ltx_latent_upsample.py +++ b/tests/pipelines/ltx/test_ltx_latent_upsample.py @@ -19,8 +19,8 @@ from diffusers import AutoencoderKLLTXVideo, LTXLatentUpsamplePipeline from diffusers.pipelines.ltx.modeling_latent_upsampler import LTXLatentUpsamplerModel -from diffusers.utils.testing_utils import enable_full_determinism +from ...testing_utils import enable_full_determinism from ..test_pipelines_common import PipelineTesterMixin, to_np diff --git a/tests/pipelines/lumina/test_lumina_nextdit.py b/tests/pipelines/lumina/test_lumina_nextdit.py index c270a8384181..d2c114825d34 100644 --- a/tests/pipelines/lumina/test_lumina_nextdit.py +++ b/tests/pipelines/lumina/test_lumina_nextdit.py @@ -11,14 +11,14 @@ LuminaNextDiT2DModel, LuminaPipeline, ) -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, numpy_cosine_similarity_distance, require_torch_accelerator, slow, torch_device, ) - from ..test_pipelines_common import PipelineTesterMixin diff --git a/tests/pipelines/marigold/test_marigold_depth.py b/tests/pipelines/marigold/test_marigold_depth.py index 13f9a421861b..3e8ccbf5c07e 100644 --- a/tests/pipelines/marigold/test_marigold_depth.py +++ b/tests/pipelines/marigold/test_marigold_depth.py @@ -31,7 +31,8 @@ MarigoldDepthPipeline, UNet2DConditionModel, ) -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, enable_full_determinism, floats_tensor, @@ -41,7 +42,6 @@ slow, torch_device, ) - from ..test_pipelines_common import PipelineTesterMixin diff --git a/tests/pipelines/marigold/test_marigold_intrinsics.py b/tests/pipelines/marigold/test_marigold_intrinsics.py index f00650634aee..3f7ab9bf6e17 100644 --- a/tests/pipelines/marigold/test_marigold_intrinsics.py +++ b/tests/pipelines/marigold/test_marigold_intrinsics.py @@ -32,7 +32,8 @@ MarigoldIntrinsicsPipeline, UNet2DConditionModel, ) -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, enable_full_determinism, floats_tensor, @@ -41,7 +42,6 @@ slow, torch_device, ) - from ..test_pipelines_common import PipelineTesterMixin, to_np diff --git a/tests/pipelines/marigold/test_marigold_normals.py b/tests/pipelines/marigold/test_marigold_normals.py index 1797f99b213b..108163bf22ec 100644 --- a/tests/pipelines/marigold/test_marigold_normals.py +++ b/tests/pipelines/marigold/test_marigold_normals.py @@ -31,7 +31,8 @@ MarigoldNormalsPipeline, UNet2DConditionModel, ) -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, enable_full_determinism, floats_tensor, @@ -40,7 +41,6 @@ slow, torch_device, ) - from ..test_pipelines_common import PipelineTesterMixin diff --git a/tests/pipelines/mochi/test_mochi.py b/tests/pipelines/mochi/test_mochi.py index f1684cce72e1..5615720a9343 100644 --- a/tests/pipelines/mochi/test_mochi.py +++ b/tests/pipelines/mochi/test_mochi.py @@ -21,7 +21,8 @@ from transformers import AutoTokenizer, T5EncoderModel from diffusers import AutoencoderKLMochi, FlowMatchEulerDiscreteScheduler, MochiPipeline, MochiTransformer3DModel -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, enable_full_determinism, nightly, @@ -30,7 +31,6 @@ require_torch_accelerator, torch_device, ) - from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import FasterCacheTesterMixin, FirstBlockCacheTesterMixin, PipelineTesterMixin, to_np diff --git a/tests/pipelines/omnigen/test_pipeline_omnigen.py b/tests/pipelines/omnigen/test_pipeline_omnigen.py index e8f84eb9131c..28648aa76f00 100644 --- a/tests/pipelines/omnigen/test_pipeline_omnigen.py +++ b/tests/pipelines/omnigen/test_pipeline_omnigen.py @@ -6,7 +6,8 @@ from transformers import AutoTokenizer from diffusers import AutoencoderKL, FlowMatchEulerDiscreteScheduler, OmniGenPipeline, OmniGenTransformer2DModel -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( Expectations, backend_empty_cache, numpy_cosine_similarity_distance, @@ -14,7 +15,6 @@ slow, torch_device, ) - from ..test_pipelines_common import PipelineTesterMixin diff --git a/tests/pipelines/pag/test_pag_animatediff.py b/tests/pipelines/pag/test_pag_animatediff.py index b9ce29c70bdf..b1cbd82d7679 100644 --- a/tests/pipelines/pag/test_pag_animatediff.py +++ b/tests/pipelines/pag/test_pag_animatediff.py @@ -19,8 +19,8 @@ ) from diffusers.models.attention import FreeNoiseTransformerBlock from diffusers.utils import is_xformers_available -from diffusers.utils.testing_utils import require_accelerator, torch_device +from ...testing_utils import require_accelerator, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( IPAdapterTesterMixin, diff --git a/tests/pipelines/pag/test_pag_controlnet_sd.py b/tests/pipelines/pag/test_pag_controlnet_sd.py index 378f0a130c03..36d5ae100a58 100644 --- a/tests/pipelines/pag/test_pag_controlnet_sd.py +++ b/tests/pipelines/pag/test_pag_controlnet_sd.py @@ -28,9 +28,9 @@ StableDiffusionControlNetPipeline, UNet2DConditionModel, ) -from diffusers.utils.testing_utils import enable_full_determinism, torch_device from diffusers.utils.torch_utils import randn_tensor +from ...testing_utils import enable_full_determinism, torch_device from ..pipeline_params import ( TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, diff --git a/tests/pipelines/pag/test_pag_controlnet_sd_inpaint.py b/tests/pipelines/pag/test_pag_controlnet_sd_inpaint.py index 5eff71ed640b..948381f9769e 100644 --- a/tests/pipelines/pag/test_pag_controlnet_sd_inpaint.py +++ b/tests/pipelines/pag/test_pag_controlnet_sd_inpaint.py @@ -32,9 +32,9 @@ StableDiffusionControlNetPAGInpaintPipeline, UNet2DConditionModel, ) -from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, torch_device from diffusers.utils.torch_utils import randn_tensor +from ...testing_utils import enable_full_determinism, floats_tensor, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, diff --git a/tests/pipelines/pag/test_pag_controlnet_sdxl.py b/tests/pipelines/pag/test_pag_controlnet_sdxl.py index 4d7e4f072e77..51b00f6932bc 100644 --- a/tests/pipelines/pag/test_pag_controlnet_sdxl.py +++ b/tests/pipelines/pag/test_pag_controlnet_sdxl.py @@ -28,9 +28,9 @@ StableDiffusionXLControlNetPipeline, UNet2DConditionModel, ) -from diffusers.utils.testing_utils import enable_full_determinism from diffusers.utils.torch_utils import randn_tensor +from ...testing_utils import enable_full_determinism from ..pipeline_params import ( TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, diff --git a/tests/pipelines/pag/test_pag_controlnet_sdxl_img2img.py b/tests/pipelines/pag/test_pag_controlnet_sdxl_img2img.py index dec029a499ef..3c1088adbcf2 100644 --- a/tests/pipelines/pag/test_pag_controlnet_sdxl_img2img.py +++ b/tests/pipelines/pag/test_pag_controlnet_sdxl_img2img.py @@ -29,8 +29,8 @@ StableDiffusionXLControlNetPAGImg2ImgPipeline, UNet2DConditionModel, ) -from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor +from ...testing_utils import enable_full_determinism, floats_tensor from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, diff --git a/tests/pipelines/pag/test_pag_hunyuan_dit.py b/tests/pipelines/pag/test_pag_hunyuan_dit.py index 65f39f585d70..f268a614f85c 100644 --- a/tests/pipelines/pag/test_pag_hunyuan_dit.py +++ b/tests/pipelines/pag/test_pag_hunyuan_dit.py @@ -28,8 +28,8 @@ HunyuanDiTPAGPipeline, HunyuanDiTPipeline, ) -from diffusers.utils.testing_utils import enable_full_determinism, torch_device +from ...testing_utils import enable_full_determinism, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, to_np diff --git a/tests/pipelines/pag/test_pag_kolors.py b/tests/pipelines/pag/test_pag_kolors.py index b504b77801f4..1bbb4e79e4bc 100644 --- a/tests/pipelines/pag/test_pag_kolors.py +++ b/tests/pipelines/pag/test_pag_kolors.py @@ -27,8 +27,8 @@ UNet2DConditionModel, ) from diffusers.pipelines.kolors import ChatGLMModel, ChatGLMTokenizer -from diffusers.utils.testing_utils import enable_full_determinism +from ...testing_utils import enable_full_determinism from ..pipeline_params import ( TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, diff --git a/tests/pipelines/pag/test_pag_pixart_sigma.py b/tests/pipelines/pag/test_pag_pixart_sigma.py index eb9399c9b3da..c04ebad08fdc 100644 --- a/tests/pipelines/pag/test_pag_pixart_sigma.py +++ b/tests/pipelines/pag/test_pag_pixart_sigma.py @@ -30,12 +30,12 @@ PixArtTransformer2DModel, ) from diffusers.utils import logging -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( CaptureLogger, enable_full_determinism, torch_device, ) - from ..pipeline_params import ( TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, diff --git a/tests/pipelines/pag/test_pag_sana.py b/tests/pipelines/pag/test_pag_sana.py index 31b384f3ebbf..5408595c729d 100644 --- a/tests/pipelines/pag/test_pag_sana.py +++ b/tests/pipelines/pag/test_pag_sana.py @@ -26,8 +26,8 @@ SanaPipeline, SanaTransformer2DModel, ) -from diffusers.utils.testing_utils import enable_full_determinism, torch_device +from ...testing_utils import enable_full_determinism, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, to_np diff --git a/tests/pipelines/pag/test_pag_sd.py b/tests/pipelines/pag/test_pag_sd.py index ee9a74ed0384..064815d13693 100644 --- a/tests/pipelines/pag/test_pag_sd.py +++ b/tests/pipelines/pag/test_pag_sd.py @@ -29,14 +29,14 @@ StableDiffusionPipeline, UNet2DConditionModel, ) -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, enable_full_determinism, require_torch_accelerator, slow, torch_device, ) - from ..pipeline_params import ( TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, diff --git a/tests/pipelines/pag/test_pag_sd3.py b/tests/pipelines/pag/test_pag_sd3.py index 737e238e5fbf..26e6ca099286 100644 --- a/tests/pipelines/pag/test_pag_sd3.py +++ b/tests/pipelines/pag/test_pag_sd3.py @@ -12,10 +12,10 @@ StableDiffusion3PAGPipeline, StableDiffusion3Pipeline, ) -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( torch_device, ) - from ..test_pipelines_common import ( PipelineTesterMixin, check_qkv_fusion_matches_attn_procs_length, diff --git a/tests/pipelines/pag/test_pag_sd3_img2img.py b/tests/pipelines/pag/test_pag_sd3_img2img.py index fe593d47dc75..19a36e283de4 100644 --- a/tests/pipelines/pag/test_pag_sd3_img2img.py +++ b/tests/pipelines/pag/test_pag_sd3_img2img.py @@ -15,7 +15,8 @@ StableDiffusion3Img2ImgPipeline, StableDiffusion3PAGImg2ImgPipeline, ) -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, enable_full_determinism, floats_tensor, @@ -24,7 +25,6 @@ slow, torch_device, ) - from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, diff --git a/tests/pipelines/pag/test_pag_sd_img2img.py b/tests/pipelines/pag/test_pag_sd_img2img.py index 668e79846377..0b440d5ec9fc 100644 --- a/tests/pipelines/pag/test_pag_sd_img2img.py +++ b/tests/pipelines/pag/test_pag_sd_img2img.py @@ -31,7 +31,8 @@ StableDiffusionPAGImg2ImgPipeline, UNet2DConditionModel, ) -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, enable_full_determinism, floats_tensor, @@ -40,7 +41,6 @@ slow, torch_device, ) - from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, diff --git a/tests/pipelines/pag/test_pag_sd_inpaint.py b/tests/pipelines/pag/test_pag_sd_inpaint.py index f85604142226..709df6837055 100644 --- a/tests/pipelines/pag/test_pag_sd_inpaint.py +++ b/tests/pipelines/pag/test_pag_sd_inpaint.py @@ -29,7 +29,8 @@ StableDiffusionPAGInpaintPipeline, UNet2DConditionModel, ) -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, enable_full_determinism, floats_tensor, @@ -38,7 +39,6 @@ slow, torch_device, ) - from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, diff --git a/tests/pipelines/pag/test_pag_sdxl.py b/tests/pipelines/pag/test_pag_sdxl.py index 5c1608d210d9..cca5c61651b3 100644 --- a/tests/pipelines/pag/test_pag_sdxl.py +++ b/tests/pipelines/pag/test_pag_sdxl.py @@ -29,14 +29,14 @@ StableDiffusionXLPipeline, UNet2DConditionModel, ) -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, enable_full_determinism, require_torch_accelerator, slow, torch_device, ) - from ..pipeline_params import ( TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, diff --git a/tests/pipelines/pag/test_pag_sdxl_img2img.py b/tests/pipelines/pag/test_pag_sdxl_img2img.py index 2e18fdcebb83..d311500d3ca7 100644 --- a/tests/pipelines/pag/test_pag_sdxl_img2img.py +++ b/tests/pipelines/pag/test_pag_sdxl_img2img.py @@ -38,7 +38,8 @@ StableDiffusionXLPAGImg2ImgPipeline, UNet2DConditionModel, ) -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, enable_full_determinism, floats_tensor, @@ -47,7 +48,6 @@ slow, torch_device, ) - from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, diff --git a/tests/pipelines/pag/test_pag_sdxl_inpaint.py b/tests/pipelines/pag/test_pag_sdxl_inpaint.py index e36716b60302..00a07582e205 100644 --- a/tests/pipelines/pag/test_pag_sdxl_inpaint.py +++ b/tests/pipelines/pag/test_pag_sdxl_inpaint.py @@ -39,7 +39,8 @@ StableDiffusionXLPAGInpaintPipeline, UNet2DConditionModel, ) -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, enable_full_determinism, floats_tensor, @@ -48,7 +49,6 @@ slow, torch_device, ) - from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, diff --git a/tests/pipelines/pixart_alpha/test_pixart.py b/tests/pipelines/pixart_alpha/test_pixart.py index 933a005c4a80..fd41c9887dcc 100644 --- a/tests/pipelines/pixart_alpha/test_pixart.py +++ b/tests/pipelines/pixart_alpha/test_pixart.py @@ -27,7 +27,8 @@ PixArtAlphaPipeline, PixArtTransformer2DModel, ) -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, enable_full_determinism, numpy_cosine_similarity_distance, @@ -35,7 +36,6 @@ slow, torch_device, ) - from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, to_np diff --git a/tests/pipelines/pixart_sigma/test_pixart.py b/tests/pipelines/pixart_sigma/test_pixart.py index cda7b442d732..2cb80df81adf 100644 --- a/tests/pipelines/pixart_sigma/test_pixart.py +++ b/tests/pipelines/pixart_sigma/test_pixart.py @@ -27,7 +27,8 @@ PixArtSigmaPipeline, PixArtTransformer2DModel, ) -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, enable_full_determinism, numpy_cosine_similarity_distance, @@ -35,7 +36,6 @@ slow, torch_device, ) - from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineTesterMixin, diff --git a/tests/pipelines/pndm/test_pndm.py b/tests/pipelines/pndm/test_pndm.py index 2c12690ad1b5..61d6efe88ccd 100644 --- a/tests/pipelines/pndm/test_pndm.py +++ b/tests/pipelines/pndm/test_pndm.py @@ -19,7 +19,8 @@ import torch from diffusers import PNDMPipeline, PNDMScheduler, UNet2DModel -from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch, torch_device + +from ...testing_utils import enable_full_determinism, nightly, require_torch, torch_device enable_full_determinism() diff --git a/tests/pipelines/qwenimage/test_qwenimage.py b/tests/pipelines/qwenimage/test_qwenimage.py index a312d0658fea..8ebfe7d08bc1 100644 --- a/tests/pipelines/qwenimage/test_qwenimage.py +++ b/tests/pipelines/qwenimage/test_qwenimage.py @@ -24,8 +24,8 @@ QwenImagePipeline, QwenImageTransformer2DModel, ) -from diffusers.utils.testing_utils import enable_full_determinism, torch_device +from ...testing_utils import enable_full_determinism, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, to_np diff --git a/tests/pipelines/qwenimage/test_qwenimage_edit.py b/tests/pipelines/qwenimage/test_qwenimage_edit.py index 647c65ada6bf..058548cf5f1b 100644 --- a/tests/pipelines/qwenimage/test_qwenimage_edit.py +++ b/tests/pipelines/qwenimage/test_qwenimage_edit.py @@ -26,8 +26,8 @@ QwenImageEditPipeline, QwenImageTransformer2DModel, ) -from diffusers.utils.testing_utils import enable_full_determinism, torch_device +from ...testing_utils import enable_full_determinism, torch_device from ..pipeline_params import TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, to_np diff --git a/tests/pipelines/qwenimage/test_qwenimage_img2img.py b/tests/pipelines/qwenimage/test_qwenimage_img2img.py index 9f21257299ed..07e683ec7f5a 100644 --- a/tests/pipelines/qwenimage/test_qwenimage_img2img.py +++ b/tests/pipelines/qwenimage/test_qwenimage_img2img.py @@ -11,12 +11,12 @@ QwenImageImg2ImgPipeline, QwenImageTransformer2DModel, ) -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( enable_full_determinism, floats_tensor, torch_device, ) - from ..test_pipelines_common import PipelineTesterMixin, to_np diff --git a/tests/pipelines/qwenimage/test_qwenimage_inpaint.py b/tests/pipelines/qwenimage/test_qwenimage_inpaint.py index 1a40630a2db8..b564624540c3 100644 --- a/tests/pipelines/qwenimage/test_qwenimage_inpaint.py +++ b/tests/pipelines/qwenimage/test_qwenimage_inpaint.py @@ -25,8 +25,8 @@ QwenImageInpaintPipeline, QwenImageTransformer2DModel, ) -from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, torch_device +from ...testing_utils import enable_full_determinism, floats_tensor, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, to_np diff --git a/tests/pipelines/sana/test_sana.py b/tests/pipelines/sana/test_sana.py index 26c06c1c9e4a..34ea3079b143 100644 --- a/tests/pipelines/sana/test_sana.py +++ b/tests/pipelines/sana/test_sana.py @@ -21,14 +21,14 @@ from transformers import Gemma2Config, Gemma2Model, GemmaTokenizer from diffusers import AutoencoderDC, FlowMatchEulerDiscreteScheduler, SanaPipeline, SanaTransformer2DModel -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, enable_full_determinism, require_torch_accelerator, slow, torch_device, ) - from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, to_np diff --git a/tests/pipelines/sana/test_sana_controlnet.py b/tests/pipelines/sana/test_sana_controlnet.py index 9b5c9e439e29..043e276fcb84 100644 --- a/tests/pipelines/sana/test_sana_controlnet.py +++ b/tests/pipelines/sana/test_sana_controlnet.py @@ -26,12 +26,12 @@ SanaControlNetPipeline, SanaTransformer2DModel, ) -from diffusers.utils.testing_utils import ( +from diffusers.utils.torch_utils import randn_tensor + +from ...testing_utils import ( enable_full_determinism, torch_device, ) -from diffusers.utils.torch_utils import randn_tensor - from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, to_np diff --git a/tests/pipelines/sana/test_sana_sprint.py b/tests/pipelines/sana/test_sana_sprint.py index 021e5596373d..fee2304dce1b 100644 --- a/tests/pipelines/sana/test_sana_sprint.py +++ b/tests/pipelines/sana/test_sana_sprint.py @@ -20,11 +20,11 @@ from transformers import Gemma2Config, Gemma2Model, GemmaTokenizer from diffusers import AutoencoderDC, SanaSprintPipeline, SanaTransformer2DModel, SCMScheduler -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( enable_full_determinism, torch_device, ) - from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, to_np diff --git a/tests/pipelines/sana/test_sana_sprint_img2img.py b/tests/pipelines/sana/test_sana_sprint_img2img.py index c0e4bf8e356f..c218abb8e951 100644 --- a/tests/pipelines/sana/test_sana_sprint_img2img.py +++ b/tests/pipelines/sana/test_sana_sprint_img2img.py @@ -20,12 +20,12 @@ from transformers import Gemma2Config, Gemma2Model, GemmaTokenizer from diffusers import AutoencoderDC, SanaSprintImg2ImgPipeline, SanaTransformer2DModel, SCMScheduler -from diffusers.utils.testing_utils import ( +from diffusers.utils.torch_utils import randn_tensor + +from ...testing_utils import ( enable_full_determinism, torch_device, ) -from diffusers.utils.torch_utils import randn_tensor - from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, diff --git a/tests/pipelines/shap_e/test_shap_e.py b/tests/pipelines/shap_e/test_shap_e.py index 47cc97844ecf..99fd28692981 100644 --- a/tests/pipelines/shap_e/test_shap_e.py +++ b/tests/pipelines/shap_e/test_shap_e.py @@ -21,14 +21,14 @@ from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline from diffusers.pipelines.shap_e import ShapERenderer -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, load_numpy, nightly, require_torch_accelerator, torch_device, ) - from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference diff --git a/tests/pipelines/shap_e/test_shap_e_img2img.py b/tests/pipelines/shap_e/test_shap_e_img2img.py index ba9f9fe521b8..b1867db249ea 100644 --- a/tests/pipelines/shap_e/test_shap_e_img2img.py +++ b/tests/pipelines/shap_e/test_shap_e_img2img.py @@ -22,7 +22,8 @@ from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImg2ImgPipeline from diffusers.pipelines.shap_e import ShapERenderer -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, floats_tensor, load_image, @@ -31,7 +32,6 @@ require_torch_accelerator, torch_device, ) - from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference diff --git a/tests/pipelines/skyreels_v2/test_skyreels_v2.py b/tests/pipelines/skyreels_v2/test_skyreels_v2.py index adbbf05325f3..1bcec877c30d 100644 --- a/tests/pipelines/skyreels_v2/test_skyreels_v2.py +++ b/tests/pipelines/skyreels_v2/test_skyreels_v2.py @@ -24,10 +24,10 @@ SkyReelsV2Transformer3DModel, UniPCMultistepScheduler, ) -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( enable_full_determinism, ) - from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineTesterMixin, diff --git a/tests/pipelines/skyreels_v2/test_skyreels_v2_df.py b/tests/pipelines/skyreels_v2/test_skyreels_v2_df.py index cf9070bb9533..74235d59efd6 100644 --- a/tests/pipelines/skyreels_v2/test_skyreels_v2_df.py +++ b/tests/pipelines/skyreels_v2/test_skyreels_v2_df.py @@ -24,10 +24,10 @@ SkyReelsV2Transformer3DModel, UniPCMultistepScheduler, ) -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( enable_full_determinism, ) - from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineTesterMixin, diff --git a/tests/pipelines/skyreels_v2/test_skyreels_v2_df_image_to_video.py b/tests/pipelines/skyreels_v2/test_skyreels_v2_df_image_to_video.py index 7b8a2992815c..f0cbc710df05 100644 --- a/tests/pipelines/skyreels_v2/test_skyreels_v2_df_image_to_video.py +++ b/tests/pipelines/skyreels_v2/test_skyreels_v2_df_image_to_video.py @@ -28,8 +28,8 @@ SkyReelsV2Transformer3DModel, UniPCMultistepScheduler, ) -from diffusers.utils.testing_utils import enable_full_determinism +from ...testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin diff --git a/tests/pipelines/skyreels_v2/test_skyreels_v2_df_video_to_video.py b/tests/pipelines/skyreels_v2/test_skyreels_v2_df_video_to_video.py index bc6a9acbf7e2..1b0b23318e63 100644 --- a/tests/pipelines/skyreels_v2/test_skyreels_v2_df_video_to_video.py +++ b/tests/pipelines/skyreels_v2/test_skyreels_v2_df_video_to_video.py @@ -26,11 +26,11 @@ SkyReelsV2Transformer3DModel, UniPCMultistepScheduler, ) -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( enable_full_determinism, torch_device, ) - from ..pipeline_params import TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineTesterMixin, diff --git a/tests/pipelines/skyreels_v2/test_skyreels_v2_image_to_video.py b/tests/pipelines/skyreels_v2/test_skyreels_v2_image_to_video.py index 3ca5862072c9..784f701a29d2 100644 --- a/tests/pipelines/skyreels_v2/test_skyreels_v2_image_to_video.py +++ b/tests/pipelines/skyreels_v2/test_skyreels_v2_image_to_video.py @@ -31,8 +31,8 @@ SkyReelsV2Transformer3DModel, UniPCMultistepScheduler, ) -from diffusers.utils.testing_utils import enable_full_determinism +from ...testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin diff --git a/tests/pipelines/stable_audio/test_stable_audio.py b/tests/pipelines/stable_audio/test_stable_audio.py index 5167dfdf0c0f..dd03f4d07f07 100644 --- a/tests/pipelines/stable_audio/test_stable_audio.py +++ b/tests/pipelines/stable_audio/test_stable_audio.py @@ -32,7 +32,8 @@ StableAudioProjectionModel, ) from diffusers.utils import is_xformers_available -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( Expectations, backend_empty_cache, enable_full_determinism, @@ -40,7 +41,6 @@ require_torch_accelerator, torch_device, ) - from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS from ..test_pipelines_common import PipelineTesterMixin diff --git a/tests/pipelines/stable_cascade/test_stable_cascade_combined.py b/tests/pipelines/stable_cascade/test_stable_cascade_combined.py index 0a75b1e8b92a..afa0db39f3fa 100644 --- a/tests/pipelines/stable_cascade/test_stable_cascade_combined.py +++ b/tests/pipelines/stable_cascade/test_stable_cascade_combined.py @@ -22,8 +22,8 @@ from diffusers import DDPMWuerstchenScheduler, StableCascadeCombinedPipeline from diffusers.models import StableCascadeUNet from diffusers.pipelines.wuerstchen import PaellaVQModel -from diffusers.utils.testing_utils import enable_full_determinism, require_torch_accelerator, torch_device +from ...testing_utils import enable_full_determinism, require_torch_accelerator, torch_device from ..test_pipelines_common import PipelineTesterMixin diff --git a/tests/pipelines/stable_cascade/test_stable_cascade_decoder.py b/tests/pipelines/stable_cascade/test_stable_cascade_decoder.py index d0c9fc891ff9..5b3acb8705b3 100644 --- a/tests/pipelines/stable_cascade/test_stable_cascade_decoder.py +++ b/tests/pipelines/stable_cascade/test_stable_cascade_decoder.py @@ -23,7 +23,9 @@ from diffusers import DDPMWuerstchenScheduler, StableCascadeDecoderPipeline from diffusers.models import StableCascadeUNet from diffusers.pipelines.wuerstchen import PaellaVQModel -from diffusers.utils.testing_utils import ( +from diffusers.utils.torch_utils import randn_tensor + +from ...testing_utils import ( backend_empty_cache, enable_full_determinism, load_numpy, @@ -34,8 +36,6 @@ slow, torch_device, ) -from diffusers.utils.torch_utils import randn_tensor - from ..test_pipelines_common import PipelineTesterMixin diff --git a/tests/pipelines/stable_cascade/test_stable_cascade_prior.py b/tests/pipelines/stable_cascade/test_stable_cascade_prior.py index 90633adea919..f8267186db14 100644 --- a/tests/pipelines/stable_cascade/test_stable_cascade_prior.py +++ b/tests/pipelines/stable_cascade/test_stable_cascade_prior.py @@ -23,7 +23,8 @@ from diffusers import DDPMWuerstchenScheduler, StableCascadePriorPipeline from diffusers.models import StableCascadeUNet from diffusers.utils.import_utils import is_peft_available -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, enable_full_determinism, load_numpy, diff --git a/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion.py b/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion.py index 69c105743b5e..62414f3f1947 100644 --- a/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion.py +++ b/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion.py @@ -27,8 +27,8 @@ OnnxStableDiffusionPipeline, PNDMScheduler, ) -from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu +from ...testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin diff --git a/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion_img2img.py b/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion_img2img.py index 8a470fc668ab..28d1d0f37ff8 100644 --- a/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion_img2img.py +++ b/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion_img2img.py @@ -26,7 +26,8 @@ OnnxStableDiffusionImg2ImgPipeline, PNDMScheduler, ) -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( floats_tensor, is_onnx_available, load_image, @@ -34,7 +35,6 @@ require_onnxruntime, require_torch_gpu, ) - from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin diff --git a/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion_inpaint.py b/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion_inpaint.py index 6bca7b288c5f..1d46ff9a2f5f 100644 --- a/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion_inpaint.py +++ b/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion_inpaint.py @@ -18,14 +18,14 @@ import numpy as np from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) - from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin diff --git a/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion_upscale.py b/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion_upscale.py index e25118575f3d..55d9d38d64bd 100644 --- a/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion_upscale.py +++ b/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion_upscale.py @@ -26,7 +26,8 @@ OnnxStableDiffusionUpscalePipeline, PNDMScheduler, ) -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( floats_tensor, is_onnx_available, load_image, @@ -34,7 +35,6 @@ require_onnxruntime, require_torch_gpu, ) - from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion.py b/tests/pipelines/stable_diffusion/test_stable_diffusion.py index bcad693501c6..c9d9525b2e45 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion.py @@ -41,7 +41,8 @@ UNet2DConditionModel, logging, ) -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( CaptureLogger, backend_empty_cache, backend_max_memory_allocated, @@ -58,7 +59,6 @@ slow, torch_device, ) - from ..pipeline_params import ( TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion_img2img.py b/tests/pipelines/stable_diffusion/test_stable_diffusion_img2img.py index c80667656e59..a0b7268b9dd4 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion_img2img.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion_img2img.py @@ -33,7 +33,8 @@ StableDiffusionImg2ImgPipeline, UNet2DConditionModel, ) -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, backend_max_memory_allocated, backend_reset_max_memory_allocated, @@ -48,7 +49,6 @@ slow, torch_device, ) - from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint.py b/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint.py index 20a984811875..259806a9479c 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint.py @@ -35,7 +35,8 @@ StableDiffusionInpaintPipeline, UNet2DConditionModel, ) -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( Expectations, backend_empty_cache, backend_max_memory_allocated, @@ -50,7 +51,6 @@ slow, torch_device, ) - from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion_instruction_pix2pix.py b/tests/pipelines/stable_diffusion/test_stable_diffusion_instruction_pix2pix.py index 1654831a9988..4758c5dab44b 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion_instruction_pix2pix.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion_instruction_pix2pix.py @@ -32,7 +32,8 @@ UNet2DConditionModel, ) from diffusers.image_processor import VaeImageProcessor -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, backend_max_memory_allocated, backend_reset_max_memory_allocated, @@ -44,7 +45,6 @@ slow, torch_device, ) - from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion.py index b3b5ba3de410..3b2552b432d3 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion.py @@ -31,7 +31,8 @@ UNet2DConditionModel, logging, ) -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( CaptureLogger, backend_empty_cache, backend_max_memory_allocated, @@ -45,7 +46,6 @@ slow, torch_device, ) - from ..pipeline_params import ( TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_depth.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_depth.py index 6f772e5df1d9..bea7c099046f 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_depth.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_depth.py @@ -36,7 +36,8 @@ StableDiffusionDepth2ImgPipeline, UNet2DConditionModel, ) -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, enable_full_determinism, floats_tensor, @@ -50,7 +51,6 @@ slow, torch_device, ) - from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_flax.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_flax.py index 77014bd7a518..92effcacadb5 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_flax.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_flax.py @@ -18,7 +18,8 @@ from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available -from diffusers.utils.testing_utils import nightly, require_flax + +from ...testing_utils import nightly, require_flax if is_flax_available(): diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_flax_inpaint.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_flax_inpaint.py index d83c69673676..cdd088b531b8 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_flax_inpaint.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_flax_inpaint.py @@ -18,7 +18,8 @@ from diffusers import FlaxStableDiffusionInpaintPipeline from diffusers.utils import is_flax_available, load_image -from diffusers.utils.testing_utils import require_flax, slow + +from ...testing_utils import require_flax, slow if is_flax_available(): diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_inpaint.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_inpaint.py index 238874c7f863..f010c1b03fe3 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_inpaint.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_inpaint.py @@ -23,7 +23,8 @@ from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNet2DConditionModel -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, backend_max_memory_allocated, backend_reset_max_memory_allocated, @@ -36,7 +37,6 @@ slow, torch_device, ) - from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_latent_upscale.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_latent_upscale.py index 50cb9aa4b7f7..2e4b428dfeb5 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_latent_upscale.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_latent_upscale.py @@ -30,7 +30,8 @@ UNet2DConditionModel, ) from diffusers.schedulers import KarrasDiffusionSchedulers -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, enable_full_determinism, floats_tensor, @@ -40,7 +41,6 @@ slow, torch_device, ) - from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_upscale.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_upscale.py index a0949db7ee41..481ac7f2d10f 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_upscale.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_upscale.py @@ -24,7 +24,8 @@ from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNet2DConditionModel -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, backend_max_memory_allocated, backend_reset_max_memory_allocated, diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_v_pred.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_v_pred.py index 55d801fd6c21..37b309c4cac4 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_v_pred.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_v_pred.py @@ -30,7 +30,8 @@ StableDiffusionPipeline, UNet2DConditionModel, ) -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, backend_max_memory_allocated, backend_reset_max_memory_allocated, diff --git a/tests/pipelines/stable_diffusion_3/test_pipeline_stable_diffusion_3.py b/tests/pipelines/stable_diffusion_3/test_pipeline_stable_diffusion_3.py index 43d91d55c949..3ccefe3de35d 100644 --- a/tests/pipelines/stable_diffusion_3/test_pipeline_stable_diffusion_3.py +++ b/tests/pipelines/stable_diffusion_3/test_pipeline_stable_diffusion_3.py @@ -6,14 +6,14 @@ from transformers import AutoTokenizer, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, T5EncoderModel from diffusers import AutoencoderKL, FlowMatchEulerDiscreteScheduler, SD3Transformer2DModel, StableDiffusion3Pipeline -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, numpy_cosine_similarity_distance, require_big_accelerator, slow, torch_device, ) - from ..test_pipelines_common import ( PipelineTesterMixin, check_qkv_fusion_matches_attn_procs_length, diff --git a/tests/pipelines/stable_diffusion_3/test_pipeline_stable_diffusion_3_img2img.py b/tests/pipelines/stable_diffusion_3/test_pipeline_stable_diffusion_3_img2img.py index 6714fd139695..9025b1060c9e 100644 --- a/tests/pipelines/stable_diffusion_3/test_pipeline_stable_diffusion_3_img2img.py +++ b/tests/pipelines/stable_diffusion_3/test_pipeline_stable_diffusion_3_img2img.py @@ -13,7 +13,8 @@ StableDiffusion3Img2ImgPipeline, ) from diffusers.utils import load_image -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( Expectations, backend_empty_cache, floats_tensor, @@ -22,7 +23,6 @@ slow, torch_device, ) - from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, diff --git a/tests/pipelines/stable_diffusion_3/test_pipeline_stable_diffusion_3_inpaint.py b/tests/pipelines/stable_diffusion_3/test_pipeline_stable_diffusion_3_inpaint.py index b537d6a0b638..628930340294 100644 --- a/tests/pipelines/stable_diffusion_3/test_pipeline_stable_diffusion_3_inpaint.py +++ b/tests/pipelines/stable_diffusion_3/test_pipeline_stable_diffusion_3_inpaint.py @@ -11,12 +11,12 @@ SD3Transformer2DModel, StableDiffusion3InpaintPipeline, ) -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( enable_full_determinism, floats_tensor, torch_device, ) - from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, diff --git a/tests/pipelines/stable_diffusion_adapter/test_stable_diffusion_adapter.py b/tests/pipelines/stable_diffusion_adapter/test_stable_diffusion_adapter.py index 009c75df4249..79b38d1cad1c 100644 --- a/tests/pipelines/stable_diffusion_adapter/test_stable_diffusion_adapter.py +++ b/tests/pipelines/stable_diffusion_adapter/test_stable_diffusion_adapter.py @@ -34,7 +34,8 @@ ) from diffusers.utils import logging from diffusers.utils.import_utils import is_xformers_available -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, enable_full_determinism, floats_tensor, @@ -45,7 +46,6 @@ slow, torch_device, ) - from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineFromPipeTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference diff --git a/tests/pipelines/stable_diffusion_image_variation/test_stable_diffusion_image_variation.py b/tests/pipelines/stable_diffusion_image_variation/test_stable_diffusion_image_variation.py index 5eca6c23804e..dbf5a7b68eae 100644 --- a/tests/pipelines/stable_diffusion_image_variation/test_stable_diffusion_image_variation.py +++ b/tests/pipelines/stable_diffusion_image_variation/test_stable_diffusion_image_variation.py @@ -29,7 +29,8 @@ StableDiffusionImageVariationPipeline, UNet2DConditionModel, ) -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, backend_max_memory_allocated, backend_reset_max_memory_allocated, @@ -44,7 +45,6 @@ slow, torch_device, ) - from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin diff --git a/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl.py b/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl.py index 966d86484367..b318a505e9db 100644 --- a/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl.py +++ b/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl.py @@ -34,7 +34,8 @@ UNet2DConditionModel, UniPCMultistepScheduler, ) -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, enable_full_determinism, load_image, @@ -43,7 +44,6 @@ slow, torch_device, ) - from ..pipeline_params import ( TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, diff --git a/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_adapter.py b/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_adapter.py index c39c9bedafe2..3d72270dda5c 100644 --- a/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_adapter.py +++ b/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_adapter.py @@ -32,12 +32,12 @@ UNet2DConditionModel, ) from diffusers.utils import logging -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( enable_full_determinism, floats_tensor, torch_device, ) - from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import ( IPAdapterTesterMixin, diff --git a/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_img2img.py b/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_img2img.py index 450891b25744..c5499847069f 100644 --- a/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_img2img.py +++ b/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_img2img.py @@ -38,7 +38,8 @@ StableDiffusionXLImg2ImgPipeline, UNet2DConditionModel, ) -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, enable_full_determinism, floats_tensor, @@ -47,7 +48,6 @@ slow, torch_device, ) - from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, diff --git a/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_inpaint.py b/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_inpaint.py index 6ac820547d19..d3f5779c7633 100644 --- a/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_inpaint.py +++ b/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_inpaint.py @@ -41,14 +41,14 @@ UNet2DConditionModel, UniPCMultistepScheduler, ) -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( enable_full_determinism, floats_tensor, require_torch_accelerator, slow, torch_device, ) - from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, diff --git a/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_instruction_pix2pix.py b/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_instruction_pix2pix.py index 932a24968995..20a03583e7a9 100644 --- a/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_instruction_pix2pix.py +++ b/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_instruction_pix2pix.py @@ -29,8 +29,8 @@ from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_instruct_pix2pix import ( StableDiffusionXLInstructPix2PixPipeline, ) -from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, torch_device +from ...testing_utils import enable_full_determinism, floats_tensor, torch_device from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, diff --git a/tests/pipelines/stable_unclip/test_stable_unclip.py b/tests/pipelines/stable_unclip/test_stable_unclip.py index e3cbb1891b13..8923c2f63cee 100644 --- a/tests/pipelines/stable_unclip/test_stable_unclip.py +++ b/tests/pipelines/stable_unclip/test_stable_unclip.py @@ -13,7 +13,8 @@ UNet2DConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, backend_max_memory_allocated, backend_reset_max_memory_allocated, @@ -24,7 +25,6 @@ require_torch_accelerator, torch_device, ) - from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, diff --git a/tests/pipelines/stable_unclip/test_stable_unclip_img2img.py b/tests/pipelines/stable_unclip/test_stable_unclip_img2img.py index 8ca5723ce634..e7a0fbccef67 100644 --- a/tests/pipelines/stable_unclip/test_stable_unclip_img2img.py +++ b/tests/pipelines/stable_unclip/test_stable_unclip_img2img.py @@ -17,7 +17,8 @@ from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.import_utils import is_xformers_available -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, backend_max_memory_allocated, backend_reset_max_memory_allocated, @@ -31,7 +32,6 @@ skip_mps, torch_device, ) - from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, diff --git a/tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py b/tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py index f77a5b1620d2..52595f7a8cd9 100644 --- a/tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py +++ b/tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py @@ -20,7 +20,8 @@ ) from diffusers.utils import load_image, logging from diffusers.utils.import_utils import is_xformers_available -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( CaptureLogger, backend_empty_cache, enable_full_determinism, @@ -32,7 +33,6 @@ slow, torch_device, ) - from ..test_pipelines_common import PipelineTesterMixin diff --git a/tests/pipelines/test_pipeline_utils.py b/tests/pipelines/test_pipeline_utils.py index f49ad282f37f..6d9e68197976 100644 --- a/tests/pipelines/test_pipeline_utils.py +++ b/tests/pipelines/test_pipeline_utils.py @@ -19,7 +19,8 @@ UNet2DConditionModel, ) from diffusers.pipelines.pipeline_loading_utils import is_safetensors_compatible, variant_compatible_siblings -from diffusers.utils.testing_utils import require_torch_accelerator, torch_device + +from ..testing_utils import require_torch_accelerator, torch_device class IsSafetensorsCompatibleTests(unittest.TestCase): diff --git a/tests/pipelines/test_pipelines.py b/tests/pipelines/test_pipelines.py index 6c342bcbe6b3..09df140f1af8 100644 --- a/tests/pipelines/test_pipelines.py +++ b/tests/pipelines/test_pipelines.py @@ -66,7 +66,9 @@ CONFIG_NAME, WEIGHTS_NAME, ) -from diffusers.utils.testing_utils import ( +from diffusers.utils.torch_utils import is_compiled_module + +from ..testing_utils import ( CaptureLogger, backend_empty_cache, enable_full_determinism, @@ -89,7 +91,6 @@ slow, torch_device, ) -from diffusers.utils.torch_utils import is_compiled_module enable_full_determinism() diff --git a/tests/pipelines/test_pipelines_auto.py b/tests/pipelines/test_pipelines_auto.py index de4b447f66aa..f3c639c367f7 100644 --- a/tests/pipelines/test_pipelines_auto.py +++ b/tests/pipelines/test_pipelines_auto.py @@ -35,7 +35,8 @@ AUTO_INPAINT_PIPELINES_MAPPING, AUTO_TEXT2IMAGE_PIPELINES_MAPPING, ) -from diffusers.utils.testing_utils import slow + +from ..testing_utils import slow PRETRAINED_MODEL_REPO_MAPPING = OrderedDict( diff --git a/tests/pipelines/test_pipelines_common.py b/tests/pipelines/test_pipelines_common.py index ed6a56c5faf5..dcef33897e6a 100644 --- a/tests/pipelines/test_pipelines_common.py +++ b/tests/pipelines/test_pipelines_common.py @@ -48,19 +48,6 @@ from diffusers.utils import logging from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.source_code_parsing_utils import ReturnNameVisitor -from diffusers.utils.testing_utils import ( - CaptureLogger, - backend_empty_cache, - numpy_cosine_similarity_distance, - require_accelerate_version_greater, - require_accelerator, - require_hf_hub_version_greater, - require_torch, - require_torch_accelerator, - require_transformers_version_greater, - skip_mps, - torch_device, -) from ..models.autoencoders.vae import ( get_asym_autoencoder_kl_config, @@ -74,6 +61,19 @@ create_ip_adapter_state_dict, ) from ..others.test_utils import TOKEN, USER, is_staging_test +from ..testing_utils import ( + CaptureLogger, + backend_empty_cache, + numpy_cosine_similarity_distance, + require_accelerate_version_greater, + require_accelerator, + require_hf_hub_version_greater, + require_torch, + require_torch_accelerator, + require_transformers_version_greater, + skip_mps, + torch_device, +) def to_np(tensor): diff --git a/tests/pipelines/test_pipelines_flax.py b/tests/pipelines/test_pipelines_flax.py index ffe43ac9d76d..dbb5c7bfed1d 100644 --- a/tests/pipelines/test_pipelines_flax.py +++ b/tests/pipelines/test_pipelines_flax.py @@ -20,7 +20,8 @@ import numpy as np from diffusers.utils import is_flax_available -from diffusers.utils.testing_utils import require_flax, slow + +from ..testing_utils import require_flax, slow if is_flax_available(): diff --git a/tests/pipelines/test_pipelines_onnx_common.py b/tests/pipelines/test_pipelines_onnx_common.py index 575ecd007531..fa077efb8ab0 100644 --- a/tests/pipelines/test_pipelines_onnx_common.py +++ b/tests/pipelines/test_pipelines_onnx_common.py @@ -1,4 +1,4 @@ -from diffusers.utils.testing_utils import require_onnxruntime +from ..testing_utils import require_onnxruntime @require_onnxruntime diff --git a/tests/pipelines/visualcloze/test_pipeline_visualcloze_combined.py b/tests/pipelines/visualcloze/test_pipeline_visualcloze_combined.py index 7e2aa257099c..00ae0441fe99 100644 --- a/tests/pipelines/visualcloze/test_pipeline_visualcloze_combined.py +++ b/tests/pipelines/visualcloze/test_pipeline_visualcloze_combined.py @@ -10,14 +10,14 @@ import diffusers from diffusers import AutoencoderKL, FlowMatchEulerDiscreteScheduler, FluxTransformer2DModel, VisualClozePipeline from diffusers.utils import logging -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( CaptureLogger, enable_full_determinism, floats_tensor, require_accelerator, torch_device, ) - from ..test_pipelines_common import PipelineTesterMixin, to_np diff --git a/tests/pipelines/visualcloze/test_pipeline_visualcloze_generation.py b/tests/pipelines/visualcloze/test_pipeline_visualcloze_generation.py index 0cd714af1789..ab6b3ca5c587 100644 --- a/tests/pipelines/visualcloze/test_pipeline_visualcloze_generation.py +++ b/tests/pipelines/visualcloze/test_pipeline_visualcloze_generation.py @@ -15,14 +15,14 @@ VisualClozeGenerationPipeline, ) from diffusers.utils import logging -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( CaptureLogger, enable_full_determinism, floats_tensor, require_accelerator, torch_device, ) - from ..test_pipelines_common import PipelineTesterMixin, to_np diff --git a/tests/pipelines/wan/test_wan.py b/tests/pipelines/wan/test_wan.py index 90b7978ec760..106a7b294646 100644 --- a/tests/pipelines/wan/test_wan.py +++ b/tests/pipelines/wan/test_wan.py @@ -21,14 +21,14 @@ from transformers import AutoTokenizer, T5EncoderModel from diffusers import AutoencoderKLWan, FlowMatchEulerDiscreteScheduler, WanPipeline, WanTransformer3DModel -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, enable_full_determinism, require_torch_accelerator, slow, torch_device, ) - from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin diff --git a/tests/pipelines/wan/test_wan_22.py b/tests/pipelines/wan/test_wan_22.py index 9fdae6698069..56ef5ceb97ed 100644 --- a/tests/pipelines/wan/test_wan_22.py +++ b/tests/pipelines/wan/test_wan_22.py @@ -20,11 +20,11 @@ from transformers import AutoTokenizer, T5EncoderModel from diffusers import AutoencoderKLWan, UniPCMultistepScheduler, WanPipeline, WanTransformer3DModel -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( enable_full_determinism, torch_device, ) - from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin diff --git a/tests/pipelines/wan/test_wan_22_image_to_video.py b/tests/pipelines/wan/test_wan_22_image_to_video.py index 3f72a74e4498..6294d62044f3 100644 --- a/tests/pipelines/wan/test_wan_22_image_to_video.py +++ b/tests/pipelines/wan/test_wan_22_image_to_video.py @@ -21,11 +21,11 @@ from transformers import AutoTokenizer, T5EncoderModel from diffusers import AutoencoderKLWan, UniPCMultistepScheduler, WanImageToVideoPipeline, WanTransformer3DModel -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( enable_full_determinism, torch_device, ) - from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin diff --git a/tests/pipelines/wan/test_wan_image_to_video.py b/tests/pipelines/wan/test_wan_image_to_video.py index 1c938ce2dea3..07a9142f2553 100644 --- a/tests/pipelines/wan/test_wan_image_to_video.py +++ b/tests/pipelines/wan/test_wan_image_to_video.py @@ -27,8 +27,8 @@ ) from diffusers import AutoencoderKLWan, FlowMatchEulerDiscreteScheduler, WanImageToVideoPipeline, WanTransformer3DModel -from diffusers.utils.testing_utils import enable_full_determinism, torch_device +from ...testing_utils import enable_full_determinism, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin diff --git a/tests/pipelines/wan/test_wan_vace.py b/tests/pipelines/wan/test_wan_vace.py index 885defcfb432..ed13d5649dc3 100644 --- a/tests/pipelines/wan/test_wan_vace.py +++ b/tests/pipelines/wan/test_wan_vace.py @@ -20,8 +20,8 @@ from transformers import AutoTokenizer, T5EncoderModel from diffusers import AutoencoderKLWan, FlowMatchEulerDiscreteScheduler, WanVACEPipeline, WanVACETransformer3DModel -from diffusers.utils.testing_utils import enable_full_determinism +from ...testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin diff --git a/tests/pipelines/wan/test_wan_video_to_video.py b/tests/pipelines/wan/test_wan_video_to_video.py index f4bb0960acee..27ada121ca48 100644 --- a/tests/pipelines/wan/test_wan_video_to_video.py +++ b/tests/pipelines/wan/test_wan_video_to_video.py @@ -19,10 +19,10 @@ from transformers import AutoTokenizer, T5EncoderModel from diffusers import AutoencoderKLWan, UniPCMultistepScheduler, WanTransformer3DModel, WanVideoToVideoPipeline -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( enable_full_determinism, ) - from ..pipeline_params import TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineTesterMixin, diff --git a/tests/quantization/bnb/test_4bit.py b/tests/quantization/bnb/test_4bit.py index 08c0fee43b80..c1da8f1ece78 100644 --- a/tests/quantization/bnb/test_4bit.py +++ b/tests/quantization/bnb/test_4bit.py @@ -32,7 +32,8 @@ ) from diffusers.quantizers import PipelineQuantizationConfig from diffusers.utils import is_accelerate_version, logging -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( CaptureLogger, backend_empty_cache, is_bitsandbytes_available, @@ -50,7 +51,6 @@ slow, torch_device, ) - from ..test_torch_compile_utils import QuantCompileTests diff --git a/tests/quantization/bnb/test_mixed_int8.py b/tests/quantization/bnb/test_mixed_int8.py index 8ddbf11cfd62..fde3966dec97 100644 --- a/tests/quantization/bnb/test_mixed_int8.py +++ b/tests/quantization/bnb/test_mixed_int8.py @@ -32,7 +32,8 @@ ) from diffusers.quantizers import PipelineQuantizationConfig from diffusers.utils import is_accelerate_version -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( CaptureLogger, backend_empty_cache, is_bitsandbytes_available, @@ -51,7 +52,6 @@ slow, torch_device, ) - from ..test_torch_compile_utils import QuantCompileTests diff --git a/tests/quantization/gguf/test_gguf.py b/tests/quantization/gguf/test_gguf.py index 3bd454c5a500..38322459e761 100644 --- a/tests/quantization/gguf/test_gguf.py +++ b/tests/quantization/gguf/test_gguf.py @@ -20,7 +20,8 @@ WanVACETransformer3DModel, ) from diffusers.utils import load_image -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( Expectations, backend_empty_cache, backend_max_memory_allocated, @@ -38,7 +39,6 @@ require_torch_version_greater, torch_device, ) - from ..test_torch_compile_utils import QuantCompileTests diff --git a/tests/quantization/quanto/test_quanto.py b/tests/quantization/quanto/test_quanto.py index d7bde6591dcf..28555a6076b8 100644 --- a/tests/quantization/quanto/test_quanto.py +++ b/tests/quantization/quanto/test_quanto.py @@ -5,7 +5,8 @@ from diffusers import FluxPipeline, FluxTransformer2DModel, QuantoConfig from diffusers.models.attention_processor import Attention from diffusers.utils import is_optimum_quanto_available, is_torch_available -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, backend_reset_peak_memory_stats, enable_full_determinism, diff --git a/tests/quantization/test_pipeline_level_quantization.py b/tests/quantization/test_pipeline_level_quantization.py index e91fe6d4cbab..51cf4057d64e 100644 --- a/tests/quantization/test_pipeline_level_quantization.py +++ b/tests/quantization/test_pipeline_level_quantization.py @@ -22,7 +22,8 @@ from diffusers import BitsAndBytesConfig, DiffusionPipeline, QuantoConfig from diffusers.quantizers import PipelineQuantizationConfig from diffusers.utils import logging -from diffusers.utils.testing_utils import ( + +from ..testing_utils import ( CaptureLogger, is_transformers_available, require_accelerate, diff --git a/tests/quantization/test_torch_compile_utils.py b/tests/quantization/test_torch_compile_utils.py index 91ed173fc69b..29758cbdd735 100644 --- a/tests/quantization/test_torch_compile_utils.py +++ b/tests/quantization/test_torch_compile_utils.py @@ -18,7 +18,8 @@ import torch from diffusers import DiffusionPipeline -from diffusers.utils.testing_utils import backend_empty_cache, require_torch_accelerator, slow, torch_device + +from ..testing_utils import backend_empty_cache, require_torch_accelerator, slow, torch_device @require_torch_accelerator diff --git a/tests/quantization/torchao/test_torchao.py b/tests/quantization/torchao/test_torchao.py index 5dcc207e655b..920c3a55f56c 100644 --- a/tests/quantization/torchao/test_torchao.py +++ b/tests/quantization/torchao/test_torchao.py @@ -31,7 +31,8 @@ ) from diffusers.models.attention_processor import Attention from diffusers.quantizers import PipelineQuantizationConfig -from diffusers.utils.testing_utils import ( + +from ...testing_utils import ( backend_empty_cache, backend_synchronize, enable_full_determinism, @@ -45,7 +46,6 @@ slow, torch_device, ) - from ..test_torch_compile_utils import QuantCompileTests diff --git a/tests/quantization/utils.py b/tests/quantization/utils.py index d458a3e6d554..a74ece5a3a3a 100644 --- a/tests/quantization/utils.py +++ b/tests/quantization/utils.py @@ -1,5 +1,6 @@ from diffusers.utils import is_torch_available -from diffusers.utils.testing_utils import ( + +from ..testing_utils import ( backend_empty_cache, backend_max_memory_allocated, backend_reset_peak_memory_stats, diff --git a/tests/remote/test_remote_decode.py b/tests/remote/test_remote_decode.py index cec96e729a48..27170cba0835 100644 --- a/tests/remote/test_remote_decode.py +++ b/tests/remote/test_remote_decode.py @@ -30,13 +30,14 @@ from diffusers.utils.remote_utils import ( remote_decode, ) -from diffusers.utils.testing_utils import ( +from diffusers.video_processor import VideoProcessor + +from ..testing_utils import ( enable_full_determinism, slow, torch_all_close, torch_device, ) -from diffusers.video_processor import VideoProcessor enable_full_determinism() diff --git a/tests/remote/test_remote_encode.py b/tests/remote/test_remote_encode.py index 62ed97ee8f49..4c0daf08fd8c 100644 --- a/tests/remote/test_remote_encode.py +++ b/tests/remote/test_remote_encode.py @@ -31,7 +31,8 @@ remote_decode, remote_encode, ) -from diffusers.utils.testing_utils import ( + +from ..testing_utils import ( enable_full_determinism, slow, ) diff --git a/tests/schedulers/test_scheduler_dpm_sde.py b/tests/schedulers/test_scheduler_dpm_sde.py index 69b611173423..e4dde67344ac 100644 --- a/tests/schedulers/test_scheduler_dpm_sde.py +++ b/tests/schedulers/test_scheduler_dpm_sde.py @@ -1,8 +1,8 @@ import torch from diffusers import DPMSolverSDEScheduler -from diffusers.utils.testing_utils import require_torchsde, torch_device +from ..testing_utils import require_torchsde, torch_device from .test_schedulers import SchedulerCommonTest diff --git a/tests/schedulers/test_scheduler_euler.py b/tests/schedulers/test_scheduler_euler.py index 01e173a631cd..ee99465abfc3 100644 --- a/tests/schedulers/test_scheduler_euler.py +++ b/tests/schedulers/test_scheduler_euler.py @@ -1,8 +1,8 @@ import torch from diffusers import EulerDiscreteScheduler -from diffusers.utils.testing_utils import torch_device +from ..testing_utils import torch_device from .test_schedulers import SchedulerCommonTest diff --git a/tests/schedulers/test_scheduler_euler_ancestral.py b/tests/schedulers/test_scheduler_euler_ancestral.py index 9f22ab38ddaf..c4fe61bfc387 100644 --- a/tests/schedulers/test_scheduler_euler_ancestral.py +++ b/tests/schedulers/test_scheduler_euler_ancestral.py @@ -1,8 +1,8 @@ import torch from diffusers import EulerAncestralDiscreteScheduler -from diffusers.utils.testing_utils import torch_device +from ..testing_utils import torch_device from .test_schedulers import SchedulerCommonTest diff --git a/tests/schedulers/test_scheduler_flax.py b/tests/schedulers/test_scheduler_flax.py index c8121d334164..e6e4fd7d7631 100644 --- a/tests/schedulers/test_scheduler_flax.py +++ b/tests/schedulers/test_scheduler_flax.py @@ -19,7 +19,8 @@ from diffusers import FlaxDDIMScheduler, FlaxDDPMScheduler, FlaxPNDMScheduler from diffusers.utils import is_flax_available -from diffusers.utils.testing_utils import require_flax + +from ..testing_utils import require_flax if is_flax_available(): diff --git a/tests/schedulers/test_scheduler_heun.py b/tests/schedulers/test_scheduler_heun.py index 90012f5525ab..97bef50048ba 100644 --- a/tests/schedulers/test_scheduler_heun.py +++ b/tests/schedulers/test_scheduler_heun.py @@ -1,8 +1,8 @@ import torch from diffusers import HeunDiscreteScheduler -from diffusers.utils.testing_utils import torch_device +from ..testing_utils import torch_device from .test_schedulers import SchedulerCommonTest diff --git a/tests/schedulers/test_scheduler_kdpm2_ancestral.py b/tests/schedulers/test_scheduler_kdpm2_ancestral.py index fa85c2be45ed..135534db4536 100644 --- a/tests/schedulers/test_scheduler_kdpm2_ancestral.py +++ b/tests/schedulers/test_scheduler_kdpm2_ancestral.py @@ -1,8 +1,8 @@ import torch from diffusers import KDPM2AncestralDiscreteScheduler -from diffusers.utils.testing_utils import torch_device +from ..testing_utils import torch_device from .test_schedulers import SchedulerCommonTest diff --git a/tests/schedulers/test_scheduler_kdpm2_discrete.py b/tests/schedulers/test_scheduler_kdpm2_discrete.py index 4d8923b6946b..370ba2253ee2 100644 --- a/tests/schedulers/test_scheduler_kdpm2_discrete.py +++ b/tests/schedulers/test_scheduler_kdpm2_discrete.py @@ -1,8 +1,8 @@ import torch from diffusers import KDPM2DiscreteScheduler -from diffusers.utils.testing_utils import torch_device +from ..testing_utils import torch_device from .test_schedulers import SchedulerCommonTest diff --git a/tests/schedulers/test_scheduler_lcm.py b/tests/schedulers/test_scheduler_lcm.py index f3f6e9ba5837..f54970e0eba3 100644 --- a/tests/schedulers/test_scheduler_lcm.py +++ b/tests/schedulers/test_scheduler_lcm.py @@ -4,8 +4,8 @@ import torch from diffusers import LCMScheduler -from diffusers.utils.testing_utils import torch_device +from ..testing_utils import torch_device from .test_schedulers import SchedulerCommonTest diff --git a/tests/schedulers/test_scheduler_lms.py b/tests/schedulers/test_scheduler_lms.py index 3bfcd57c1b6d..c4abca3ac973 100644 --- a/tests/schedulers/test_scheduler_lms.py +++ b/tests/schedulers/test_scheduler_lms.py @@ -1,8 +1,8 @@ import torch from diffusers import LMSDiscreteScheduler -from diffusers.utils.testing_utils import torch_device +from ..testing_utils import torch_device from .test_schedulers import SchedulerCommonTest diff --git a/tests/schedulers/test_scheduler_sasolver.py b/tests/schedulers/test_scheduler_sasolver.py index baa2736b2fcc..2c2d2c0397bb 100644 --- a/tests/schedulers/test_scheduler_sasolver.py +++ b/tests/schedulers/test_scheduler_sasolver.py @@ -1,8 +1,8 @@ import torch from diffusers import SASolverScheduler -from diffusers.utils.testing_utils import require_torchsde, torch_device +from ..testing_utils import require_torchsde, torch_device from .test_schedulers import SchedulerCommonTest diff --git a/tests/schedulers/test_schedulers.py b/tests/schedulers/test_schedulers.py index cd8dc5ccf1c3..5a8380e659fc 100755 --- a/tests/schedulers/test_schedulers.py +++ b/tests/schedulers/test_schedulers.py @@ -41,9 +41,9 @@ from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.schedulers.scheduling_utils import SchedulerMixin from diffusers.utils import logging -from diffusers.utils.testing_utils import CaptureLogger, torch_device from ..others.test_utils import TOKEN, USER, is_staging_test +from ..testing_utils import CaptureLogger, torch_device torch.backends.cuda.matmul.allow_tf32 = False diff --git a/tests/single_file/single_file_testing_utils.py b/tests/single_file/single_file_testing_utils.py index 4e1713c9ceb1..3510d3371ca5 100644 --- a/tests/single_file/single_file_testing_utils.py +++ b/tests/single_file/single_file_testing_utils.py @@ -7,7 +7,8 @@ from diffusers.loaders.single_file_utils import _extract_repo_id_and_weights_name from diffusers.models.attention_processor import AttnProcessor -from diffusers.utils.testing_utils import ( + +from ..testing_utils import ( numpy_cosine_similarity_distance, torch_device, ) diff --git a/tests/single_file/test_lumina2_transformer.py b/tests/single_file/test_lumina2_transformer.py index 2ac681897d4e..99d9b71395c6 100644 --- a/tests/single_file/test_lumina2_transformer.py +++ b/tests/single_file/test_lumina2_transformer.py @@ -19,7 +19,8 @@ from diffusers import ( Lumina2Transformer2DModel, ) -from diffusers.utils.testing_utils import ( + +from ..testing_utils import ( backend_empty_cache, enable_full_determinism, require_torch_accelerator, diff --git a/tests/single_file/test_model_autoencoder_dc_single_file.py b/tests/single_file/test_model_autoencoder_dc_single_file.py index 184498ca2f5c..5195f8e52f8d 100644 --- a/tests/single_file/test_model_autoencoder_dc_single_file.py +++ b/tests/single_file/test_model_autoencoder_dc_single_file.py @@ -21,7 +21,8 @@ from diffusers import ( AutoencoderDC, ) -from diffusers.utils.testing_utils import ( + +from ..testing_utils import ( backend_empty_cache, enable_full_determinism, load_hf_numpy, diff --git a/tests/single_file/test_model_controlnet_single_file.py b/tests/single_file/test_model_controlnet_single_file.py index ade6f63a507d..e5214fe3f209 100644 --- a/tests/single_file/test_model_controlnet_single_file.py +++ b/tests/single_file/test_model_controlnet_single_file.py @@ -21,7 +21,8 @@ from diffusers import ( ControlNetModel, ) -from diffusers.utils.testing_utils import ( + +from ..testing_utils import ( backend_empty_cache, enable_full_determinism, require_torch_accelerator, diff --git a/tests/single_file/test_model_flux_transformer_single_file.py b/tests/single_file/test_model_flux_transformer_single_file.py index 2f837bd18eac..a7e07e517f4d 100644 --- a/tests/single_file/test_model_flux_transformer_single_file.py +++ b/tests/single_file/test_model_flux_transformer_single_file.py @@ -19,7 +19,8 @@ from diffusers import ( FluxTransformer2DModel, ) -from diffusers.utils.testing_utils import ( + +from ..testing_utils import ( backend_empty_cache, enable_full_determinism, require_torch_accelerator, diff --git a/tests/single_file/test_model_motion_adapter_single_file.py b/tests/single_file/test_model_motion_adapter_single_file.py index dc08a95b841e..7aaf4b577e4b 100644 --- a/tests/single_file/test_model_motion_adapter_single_file.py +++ b/tests/single_file/test_model_motion_adapter_single_file.py @@ -18,7 +18,8 @@ from diffusers import ( MotionAdapter, ) -from diffusers.utils.testing_utils import ( + +from ..testing_utils import ( enable_full_determinism, ) diff --git a/tests/single_file/test_model_sd_cascade_unet_single_file.py b/tests/single_file/test_model_sd_cascade_unet_single_file.py index a16278c6b040..a5ec9dba30df 100644 --- a/tests/single_file/test_model_sd_cascade_unet_single_file.py +++ b/tests/single_file/test_model_sd_cascade_unet_single_file.py @@ -20,7 +20,8 @@ from diffusers import StableCascadeUNet from diffusers.utils import logging -from diffusers.utils.testing_utils import ( + +from ..testing_utils import ( backend_empty_cache, enable_full_determinism, require_torch_accelerator, diff --git a/tests/single_file/test_model_vae_single_file.py b/tests/single_file/test_model_vae_single_file.py index 9d994b5b4978..3b9e619f13e6 100644 --- a/tests/single_file/test_model_vae_single_file.py +++ b/tests/single_file/test_model_vae_single_file.py @@ -21,7 +21,8 @@ from diffusers import ( AutoencoderKL, ) -from diffusers.utils.testing_utils import ( + +from ..testing_utils import ( backend_empty_cache, enable_full_determinism, load_hf_numpy, diff --git a/tests/single_file/test_model_wan_autoencoder_single_file.py b/tests/single_file/test_model_wan_autoencoder_single_file.py index 7f0e1c1a4b0b..a1f7155c1072 100644 --- a/tests/single_file/test_model_wan_autoencoder_single_file.py +++ b/tests/single_file/test_model_wan_autoencoder_single_file.py @@ -19,7 +19,8 @@ from diffusers import ( AutoencoderKLWan, ) -from diffusers.utils.testing_utils import ( + +from ..testing_utils import ( backend_empty_cache, enable_full_determinism, require_torch_accelerator, diff --git a/tests/single_file/test_model_wan_transformer3d_single_file.py b/tests/single_file/test_model_wan_transformer3d_single_file.py index 72b4b3a58aa6..d7c758d3d933 100644 --- a/tests/single_file/test_model_wan_transformer3d_single_file.py +++ b/tests/single_file/test_model_wan_transformer3d_single_file.py @@ -21,7 +21,8 @@ from diffusers import ( WanTransformer3DModel, ) -from diffusers.utils.testing_utils import ( + +from ..testing_utils import ( backend_empty_cache, enable_full_determinism, require_big_accelerator, diff --git a/tests/single_file/test_sana_transformer.py b/tests/single_file/test_sana_transformer.py index e74c5be6ff86..c1543ba17137 100644 --- a/tests/single_file/test_sana_transformer.py +++ b/tests/single_file/test_sana_transformer.py @@ -4,7 +4,8 @@ from diffusers import ( SanaTransformer2DModel, ) -from diffusers.utils.testing_utils import ( + +from ..testing_utils import ( backend_empty_cache, enable_full_determinism, require_torch_accelerator, diff --git a/tests/single_file/test_stable_diffusion_controlnet_img2img_single_file.py b/tests/single_file/test_stable_diffusion_controlnet_img2img_single_file.py index 7589b48028c2..e558eeaf6f47 100644 --- a/tests/single_file/test_stable_diffusion_controlnet_img2img_single_file.py +++ b/tests/single_file/test_stable_diffusion_controlnet_img2img_single_file.py @@ -7,7 +7,8 @@ from diffusers import ControlNetModel, StableDiffusionControlNetPipeline from diffusers.loaders.single_file_utils import _extract_repo_id_and_weights_name from diffusers.utils import load_image -from diffusers.utils.testing_utils import ( + +from ..testing_utils import ( backend_empty_cache, enable_full_determinism, numpy_cosine_similarity_distance, @@ -15,7 +16,6 @@ slow, torch_device, ) - from .single_file_testing_utils import ( SDSingleFileTesterMixin, download_diffusers_config, diff --git a/tests/single_file/test_stable_diffusion_controlnet_inpaint_single_file.py b/tests/single_file/test_stable_diffusion_controlnet_inpaint_single_file.py index 1555831db6db..54224f51a9b5 100644 --- a/tests/single_file/test_stable_diffusion_controlnet_inpaint_single_file.py +++ b/tests/single_file/test_stable_diffusion_controlnet_inpaint_single_file.py @@ -7,7 +7,8 @@ from diffusers import ControlNetModel, StableDiffusionControlNetInpaintPipeline from diffusers.loaders.single_file_utils import _extract_repo_id_and_weights_name from diffusers.utils import load_image -from diffusers.utils.testing_utils import ( + +from ..testing_utils import ( backend_empty_cache, enable_full_determinism, numpy_cosine_similarity_distance, @@ -15,7 +16,6 @@ slow, torch_device, ) - from .single_file_testing_utils import ( SDSingleFileTesterMixin, download_diffusers_config, diff --git a/tests/single_file/test_stable_diffusion_controlnet_single_file.py b/tests/single_file/test_stable_diffusion_controlnet_single_file.py index 2c1e414e5e36..e90e648a9de9 100644 --- a/tests/single_file/test_stable_diffusion_controlnet_single_file.py +++ b/tests/single_file/test_stable_diffusion_controlnet_single_file.py @@ -7,7 +7,8 @@ from diffusers import ControlNetModel, StableDiffusionControlNetPipeline from diffusers.loaders.single_file_utils import _extract_repo_id_and_weights_name from diffusers.utils import load_image -from diffusers.utils.testing_utils import ( + +from ..testing_utils import ( backend_empty_cache, enable_full_determinism, numpy_cosine_similarity_distance, @@ -15,7 +16,6 @@ slow, torch_device, ) - from .single_file_testing_utils import ( SDSingleFileTesterMixin, download_diffusers_config, diff --git a/tests/single_file/test_stable_diffusion_img2img_single_file.py b/tests/single_file/test_stable_diffusion_img2img_single_file.py index 9ad935582409..387f09471dd7 100644 --- a/tests/single_file/test_stable_diffusion_img2img_single_file.py +++ b/tests/single_file/test_stable_diffusion_img2img_single_file.py @@ -7,14 +7,14 @@ StableDiffusionImg2ImgPipeline, ) from diffusers.utils import load_image -from diffusers.utils.testing_utils import ( + +from ..testing_utils import ( backend_empty_cache, enable_full_determinism, require_torch_accelerator, slow, torch_device, ) - from .single_file_testing_utils import SDSingleFileTesterMixin diff --git a/tests/single_file/test_stable_diffusion_inpaint_single_file.py b/tests/single_file/test_stable_diffusion_inpaint_single_file.py index b05a098c0bcb..84636ec0f0fa 100644 --- a/tests/single_file/test_stable_diffusion_inpaint_single_file.py +++ b/tests/single_file/test_stable_diffusion_inpaint_single_file.py @@ -7,14 +7,14 @@ StableDiffusionInpaintPipeline, ) from diffusers.utils import load_image -from diffusers.utils.testing_utils import ( + +from ..testing_utils import ( backend_empty_cache, enable_full_determinism, require_torch_accelerator, slow, torch_device, ) - from .single_file_testing_utils import SDSingleFileTesterMixin diff --git a/tests/single_file/test_stable_diffusion_single_file.py b/tests/single_file/test_stable_diffusion_single_file.py index 78baeb94929c..4601b75c3ab6 100644 --- a/tests/single_file/test_stable_diffusion_single_file.py +++ b/tests/single_file/test_stable_diffusion_single_file.py @@ -7,7 +7,8 @@ from diffusers import EulerDiscreteScheduler, StableDiffusionInstructPix2PixPipeline, StableDiffusionPipeline from diffusers.loaders.single_file_utils import _extract_repo_id_and_weights_name from diffusers.utils import load_image -from diffusers.utils.testing_utils import ( + +from ..testing_utils import ( backend_empty_cache, enable_full_determinism, nightly, @@ -15,7 +16,6 @@ slow, torch_device, ) - from .single_file_testing_utils import ( SDSingleFileTesterMixin, download_original_config, diff --git a/tests/single_file/test_stable_diffusion_upscale_single_file.py b/tests/single_file/test_stable_diffusion_upscale_single_file.py index 398fc9ece359..39ec7b0194a6 100644 --- a/tests/single_file/test_stable_diffusion_upscale_single_file.py +++ b/tests/single_file/test_stable_diffusion_upscale_single_file.py @@ -8,7 +8,8 @@ StableDiffusionUpscalePipeline, ) from diffusers.utils import load_image -from diffusers.utils.testing_utils import ( + +from ..testing_utils import ( backend_empty_cache, enable_full_determinism, numpy_cosine_similarity_distance, @@ -16,7 +17,6 @@ slow, torch_device, ) - from .single_file_testing_utils import SDSingleFileTesterMixin diff --git a/tests/single_file/test_stable_diffusion_xl_adapter_single_file.py b/tests/single_file/test_stable_diffusion_xl_adapter_single_file.py index fb5f8725b86e..3de9ee736417 100644 --- a/tests/single_file/test_stable_diffusion_xl_adapter_single_file.py +++ b/tests/single_file/test_stable_diffusion_xl_adapter_single_file.py @@ -10,7 +10,8 @@ ) from diffusers.loaders.single_file_utils import _extract_repo_id_and_weights_name from diffusers.utils import load_image -from diffusers.utils.testing_utils import ( + +from ..testing_utils import ( backend_empty_cache, enable_full_determinism, numpy_cosine_similarity_distance, @@ -18,7 +19,6 @@ slow, torch_device, ) - from .single_file_testing_utils import ( SDXLSingleFileTesterMixin, download_diffusers_config, diff --git a/tests/single_file/test_stable_diffusion_xl_controlnet_single_file.py b/tests/single_file/test_stable_diffusion_xl_controlnet_single_file.py index 6d8c4369e1e1..a0a1aba1030f 100644 --- a/tests/single_file/test_stable_diffusion_xl_controlnet_single_file.py +++ b/tests/single_file/test_stable_diffusion_xl_controlnet_single_file.py @@ -7,7 +7,8 @@ from diffusers import ControlNetModel, StableDiffusionXLControlNetPipeline from diffusers.loaders.single_file_utils import _extract_repo_id_and_weights_name from diffusers.utils import load_image -from diffusers.utils.testing_utils import ( + +from ..testing_utils import ( backend_empty_cache, enable_full_determinism, numpy_cosine_similarity_distance, @@ -15,7 +16,6 @@ slow, torch_device, ) - from .single_file_testing_utils import ( SDXLSingleFileTesterMixin, download_diffusers_config, diff --git a/tests/single_file/test_stable_diffusion_xl_img2img_single_file.py b/tests/single_file/test_stable_diffusion_xl_img2img_single_file.py index 7df8b84bc235..810f412f8def 100644 --- a/tests/single_file/test_stable_diffusion_xl_img2img_single_file.py +++ b/tests/single_file/test_stable_diffusion_xl_img2img_single_file.py @@ -8,7 +8,8 @@ StableDiffusionXLImg2ImgPipeline, ) from diffusers.utils import load_image -from diffusers.utils.testing_utils import ( + +from ..testing_utils import ( backend_empty_cache, enable_full_determinism, numpy_cosine_similarity_distance, @@ -16,7 +17,6 @@ slow, torch_device, ) - from .single_file_testing_utils import SDXLSingleFileTesterMixin diff --git a/tests/single_file/test_stable_diffusion_xl_instruct_pix2pix.py b/tests/single_file/test_stable_diffusion_xl_instruct_pix2pix.py index 5a014638633b..011d59222a5b 100644 --- a/tests/single_file/test_stable_diffusion_xl_instruct_pix2pix.py +++ b/tests/single_file/test_stable_diffusion_xl_instruct_pix2pix.py @@ -4,7 +4,8 @@ import torch from diffusers import StableDiffusionXLInstructPix2PixPipeline -from diffusers.utils.testing_utils import ( + +from ..testing_utils import ( backend_empty_cache, enable_full_determinism, require_torch_accelerator, diff --git a/tests/single_file/test_stable_diffusion_xl_single_file.py b/tests/single_file/test_stable_diffusion_xl_single_file.py index 77f58d859209..0ad180de17db 100644 --- a/tests/single_file/test_stable_diffusion_xl_single_file.py +++ b/tests/single_file/test_stable_diffusion_xl_single_file.py @@ -6,14 +6,14 @@ from diffusers import ( StableDiffusionXLPipeline, ) -from diffusers.utils.testing_utils import ( + +from ..testing_utils import ( backend_empty_cache, enable_full_determinism, require_torch_accelerator, slow, torch_device, ) - from .single_file_testing_utils import SDXLSingleFileTesterMixin diff --git a/tests/testing_utils.py b/tests/testing_utils.py new file mode 100644 index 000000000000..7f849219c16f --- /dev/null +++ b/tests/testing_utils.py @@ -0,0 +1,1557 @@ +import functools +import glob +import importlib +import importlib.metadata +import inspect +import io +import logging +import multiprocessing +import os +import random +import re +import struct +import sys +import tempfile +import time +import unittest +import urllib.parse +from collections import UserDict +from contextlib import contextmanager +from io import BytesIO, StringIO +from pathlib import Path +from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Set, Tuple, Union + +import numpy as np +import PIL.Image +import PIL.ImageOps +import requests +from numpy.linalg import norm +from packaging import version + +from diffusers.utils.constants import DIFFUSERS_REQUEST_TIMEOUT +from diffusers.utils.import_utils import ( + BACKENDS_MAPPING, + is_accelerate_available, + is_bitsandbytes_available, + is_compel_available, + is_flax_available, + is_gguf_available, + is_kernels_available, + is_note_seq_available, + is_onnx_available, + is_opencv_available, + is_optimum_quanto_available, + is_peft_available, + is_timm_available, + is_torch_available, + is_torch_version, + is_torchao_available, + is_torchsde_available, + is_transformers_available, +) +from diffusers.utils.logging import get_logger + + +if is_torch_available(): + import torch + + IS_ROCM_SYSTEM = torch.version.hip is not None + IS_CUDA_SYSTEM = torch.version.cuda is not None + IS_XPU_SYSTEM = getattr(torch.version, "xpu", None) is not None +else: + IS_ROCM_SYSTEM = False + IS_CUDA_SYSTEM = False + IS_XPU_SYSTEM = False + +global_rng = random.Random() + +logger = get_logger(__name__) + +_required_peft_version = is_peft_available() and version.parse( + version.parse(importlib.metadata.version("peft")).base_version +) > version.parse("0.5") +_required_transformers_version = is_transformers_available() and version.parse( + version.parse(importlib.metadata.version("transformers")).base_version +) > version.parse("4.33") + +USE_PEFT_BACKEND = _required_peft_version and _required_transformers_version +BIG_GPU_MEMORY = int(os.getenv("BIG_GPU_MEMORY", 40)) + +if is_torch_available(): + import torch + + # Set a backend environment variable for any extra module import required for a custom accelerator + if "DIFFUSERS_TEST_BACKEND" in os.environ: + backend = os.environ["DIFFUSERS_TEST_BACKEND"] + try: + _ = importlib.import_module(backend) + except ModuleNotFoundError as e: + raise ModuleNotFoundError( + f"Failed to import `DIFFUSERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module \ + to enable a specified backend.):\n{e}" + ) from e + + if "DIFFUSERS_TEST_DEVICE" in os.environ: + torch_device = os.environ["DIFFUSERS_TEST_DEVICE"] + try: + # try creating device to see if provided device is valid + _ = torch.device(torch_device) + except RuntimeError as e: + raise RuntimeError( + f"Unknown testing device specified by environment variable `DIFFUSERS_TEST_DEVICE`: {torch_device}" + ) from e + logger.info(f"torch_device overrode to {torch_device}") + else: + if torch.cuda.is_available(): + torch_device = "cuda" + elif torch.xpu.is_available(): + torch_device = "xpu" + else: + torch_device = "cpu" + is_torch_higher_equal_than_1_12 = version.parse( + version.parse(torch.__version__).base_version + ) >= version.parse("1.12") + + if is_torch_higher_equal_than_1_12: + # Some builds of torch 1.12 don't have the mps backend registered. See #892 for more details + mps_backend_registered = hasattr(torch.backends, "mps") + torch_device = "mps" if (mps_backend_registered and torch.backends.mps.is_available()) else torch_device + + from diffusers.utils.torch_utils import get_torch_cuda_device_capability + + +def torch_all_close(a, b, *args, **kwargs): + if not is_torch_available(): + raise ValueError("PyTorch needs to be installed to use this function.") + if not torch.allclose(a, b, *args, **kwargs): + assert False, f"Max diff is absolute {(a - b).abs().max()}. Diff tensor is {(a - b).abs()}." + return True + + +def numpy_cosine_similarity_distance(a, b): + similarity = np.dot(a, b) / (norm(a) * norm(b)) + distance = 1.0 - similarity.mean() + + return distance + + +def check_if_dicts_are_equal(dict1, dict2): + dict1, dict2 = dict1.copy(), dict2.copy() + + for key, value in dict1.items(): + if isinstance(value, set): + dict1[key] = sorted(value) + for key, value in dict2.items(): + if isinstance(value, set): + dict2[key] = sorted(value) + + for key in dict1: + if key not in dict2: + return False + if dict1[key] != dict2[key]: + return False + + for key in dict2: + if key not in dict1: + return False + + return True + + +def print_tensor_test( + tensor, + limit_to_slices=None, + max_torch_print=None, + filename="test_corrections.txt", + expected_tensor_name="expected_slice", +): + if max_torch_print: + torch.set_printoptions(threshold=10_000) + + test_name = os.environ.get("PYTEST_CURRENT_TEST") + if not torch.is_tensor(tensor): + tensor = torch.from_numpy(tensor) + if limit_to_slices: + tensor = tensor[0, -3:, -3:, -1] + + tensor_str = str(tensor.detach().cpu().flatten().to(torch.float32)).replace("\n", "") + # format is usually: + # expected_slice = np.array([-0.5713, -0.3018, -0.9814, 0.04663, -0.879, 0.76, -1.734, 0.1044, 1.161]) + output_str = tensor_str.replace("tensor", f"{expected_tensor_name} = np.array") + test_file, test_class, test_fn = test_name.split("::") + test_fn = test_fn.split()[0] + with open(filename, "a") as f: + print("::".join([test_file, test_class, test_fn, output_str]), file=f) + + +def get_tests_dir(append_path=None): + """ + Args: + append_path: optional path to append to the tests dir path + Return: + The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is + joined after the `tests` dir the former is provided. + """ + # this function caller's __file__ + caller__file__ = inspect.stack()[1][1] + tests_dir = os.path.abspath(os.path.dirname(caller__file__)) + + while not tests_dir.endswith("tests"): + tests_dir = os.path.dirname(tests_dir) + + if append_path: + return Path(tests_dir, append_path).as_posix() + else: + return tests_dir + + +# Taken from the following PR: +# https://github.com/huggingface/accelerate/pull/1964 +def str_to_bool(value) -> int: + """ + Converts a string representation of truth to `True` (1) or `False` (0). True values are `y`, `yes`, `t`, `true`, + `on`, and `1`; False value are `n`, `no`, `f`, `false`, `off`, and `0`; + """ + value = value.lower() + if value in ("y", "yes", "t", "true", "on", "1"): + return 1 + elif value in ("n", "no", "f", "false", "off", "0"): + return 0 + else: + raise ValueError(f"invalid truth value {value}") + + +def parse_flag_from_env(key, default=False): + try: + value = os.environ[key] + except KeyError: + # KEY isn't set, default to `default`. + _value = default + else: + # KEY is set, convert it to True or False. + try: + _value = str_to_bool(value) + except ValueError: + # More values are supported, but let's keep the message simple. + raise ValueError(f"If set, {key} must be yes or no.") + return _value + + +_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False) +_run_nightly_tests = parse_flag_from_env("RUN_NIGHTLY", default=False) +_run_compile_tests = parse_flag_from_env("RUN_COMPILE", default=False) + + +def floats_tensor(shape, scale=1.0, rng=None, name=None): + """Creates a random float32 tensor""" + if rng is None: + rng = global_rng + + total_dims = 1 + for dim in shape: + total_dims *= dim + + values = [] + for _ in range(total_dims): + values.append(rng.random() * scale) + + return torch.tensor(data=values, dtype=torch.float).view(shape).contiguous() + + +def slow(test_case): + """ + Decorator marking a test as slow. + + Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them. + + """ + return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case) + + +def nightly(test_case): + """ + Decorator marking a test that runs nightly in the diffusers CI. + + Slow tests are skipped by default. Set the RUN_NIGHTLY environment variable to a truthy value to run them. + + """ + return unittest.skipUnless(_run_nightly_tests, "test is nightly")(test_case) + + +def is_torch_compile(test_case): + """ + Decorator marking a test that runs compile tests in the diffusers CI. + + Compile tests are skipped by default. Set the RUN_COMPILE environment variable to a truthy value to run them. + + """ + return unittest.skipUnless(_run_compile_tests, "test is torch compile")(test_case) + + +def require_torch(test_case): + """ + Decorator marking a test that requires PyTorch. These tests are skipped when PyTorch isn't installed. + """ + return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case) + + +def require_torch_2(test_case): + """ + Decorator marking a test that requires PyTorch 2. These tests are skipped when it isn't installed. + """ + return unittest.skipUnless(is_torch_available() and is_torch_version(">=", "2.0.0"), "test requires PyTorch 2")( + test_case + ) + + +def require_torch_version_greater_equal(torch_version): + """Decorator marking a test that requires torch with a specific version or greater.""" + + def decorator(test_case): + correct_torch_version = is_torch_available() and is_torch_version(">=", torch_version) + return unittest.skipUnless( + correct_torch_version, f"test requires torch with the version greater than or equal to {torch_version}" + )(test_case) + + return decorator + + +def require_torch_version_greater(torch_version): + """Decorator marking a test that requires torch with a specific version greater.""" + + def decorator(test_case): + correct_torch_version = is_torch_available() and is_torch_version(">", torch_version) + return unittest.skipUnless( + correct_torch_version, f"test requires torch with the version greater than {torch_version}" + )(test_case) + + return decorator + + +def require_torch_gpu(test_case): + """Decorator marking a test that requires CUDA and PyTorch.""" + return unittest.skipUnless(is_torch_available() and torch_device == "cuda", "test requires PyTorch+CUDA")( + test_case + ) + + +def require_torch_cuda_compatibility(expected_compute_capability): + def decorator(test_case): + if torch.cuda.is_available(): + current_compute_capability = get_torch_cuda_device_capability() + return unittest.skipUnless( + float(current_compute_capability) == float(expected_compute_capability), + "Test not supported for this compute capability.", + ) + + return decorator + + +# These decorators are for accelerator-specific behaviours that are not GPU-specific +def require_torch_accelerator(test_case): + """Decorator marking a test that requires an accelerator backend and PyTorch.""" + return unittest.skipUnless(is_torch_available() and torch_device != "cpu", "test requires accelerator+PyTorch")( + test_case + ) + + +def require_torch_multi_gpu(test_case): + """ + Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without + multiple GPUs. To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests + -k "multi_gpu" + """ + if not is_torch_available(): + return unittest.skip("test requires PyTorch")(test_case) + + import torch + + return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case) + + +def require_torch_multi_accelerator(test_case): + """ + Decorator marking a test that requires a multi-accelerator setup (in PyTorch). These tests are skipped on a machine + without multiple hardware accelerators. + """ + if not is_torch_available(): + return unittest.skip("test requires PyTorch")(test_case) + + import torch + + return unittest.skipUnless( + torch.cuda.device_count() > 1 or torch.xpu.device_count() > 1, "test requires multiple hardware accelerators" + )(test_case) + + +def require_torch_accelerator_with_fp16(test_case): + """Decorator marking a test that requires an accelerator with support for the FP16 data type.""" + return unittest.skipUnless(_is_torch_fp16_available(torch_device), "test requires accelerator with fp16 support")( + test_case + ) + + +def require_torch_accelerator_with_fp64(test_case): + """Decorator marking a test that requires an accelerator with support for the FP64 data type.""" + return unittest.skipUnless(_is_torch_fp64_available(torch_device), "test requires accelerator with fp64 support")( + test_case + ) + + +def require_big_gpu_with_torch_cuda(test_case): + """ + Decorator marking a test that requires a bigger GPU (24GB) for execution. Some example pipelines: Flux, SD3, Cog, + etc. + """ + if not is_torch_available(): + return unittest.skip("test requires PyTorch")(test_case) + + import torch + + if not torch.cuda.is_available(): + return unittest.skip("test requires PyTorch CUDA")(test_case) + + device_properties = torch.cuda.get_device_properties(0) + total_memory = device_properties.total_memory / (1024**3) + return unittest.skipUnless( + total_memory >= BIG_GPU_MEMORY, f"test requires a GPU with at least {BIG_GPU_MEMORY} GB memory" + )(test_case) + + +def require_big_accelerator(test_case): + """ + Decorator marking a test that requires a bigger hardware accelerator (24GB) for execution. Some example pipelines: + Flux, SD3, Cog, etc. + """ + import pytest + + test_case = pytest.mark.big_accelerator(test_case) + + if not is_torch_available(): + return unittest.skip("test requires PyTorch")(test_case) + + import torch + + if not (torch.cuda.is_available() or torch.xpu.is_available()): + return unittest.skip("test requires PyTorch CUDA")(test_case) + + if torch.xpu.is_available(): + device_properties = torch.xpu.get_device_properties(0) + else: + device_properties = torch.cuda.get_device_properties(0) + + total_memory = device_properties.total_memory / (1024**3) + return unittest.skipUnless( + total_memory >= BIG_GPU_MEMORY, + f"test requires a hardware accelerator with at least {BIG_GPU_MEMORY} GB memory", + )(test_case) + + +def require_torch_accelerator_with_training(test_case): + """Decorator marking a test that requires an accelerator with support for training.""" + return unittest.skipUnless( + is_torch_available() and backend_supports_training(torch_device), + "test requires accelerator with training support", + )(test_case) + + +def skip_mps(test_case): + """Decorator marking a test to skip if torch_device is 'mps'""" + return unittest.skipUnless(torch_device != "mps", "test requires non 'mps' device")(test_case) + + +def require_flax(test_case): + """ + Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed + """ + return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case) + + +def require_compel(test_case): + """ + Decorator marking a test that requires compel: https://github.com/damian0815/compel. These tests are skipped when + the library is not installed. + """ + return unittest.skipUnless(is_compel_available(), "test requires compel")(test_case) + + +def require_onnxruntime(test_case): + """ + Decorator marking a test that requires onnxruntime. These tests are skipped when onnxruntime isn't installed. + """ + return unittest.skipUnless(is_onnx_available(), "test requires onnxruntime")(test_case) + + +def require_note_seq(test_case): + """ + Decorator marking a test that requires note_seq. These tests are skipped when note_seq isn't installed. + """ + return unittest.skipUnless(is_note_seq_available(), "test requires note_seq")(test_case) + + +def require_accelerator(test_case): + """ + Decorator marking a test that requires a hardware accelerator backend. These tests are skipped when there are no + hardware accelerator available. + """ + return unittest.skipUnless(torch_device != "cpu", "test requires a hardware accelerator")(test_case) + + +def require_torchsde(test_case): + """ + Decorator marking a test that requires torchsde. These tests are skipped when torchsde isn't installed. + """ + return unittest.skipUnless(is_torchsde_available(), "test requires torchsde")(test_case) + + +def require_peft_backend(test_case): + """ + Decorator marking a test that requires PEFT backend, this would require some specific versions of PEFT and + transformers. + """ + return unittest.skipUnless(USE_PEFT_BACKEND, "test requires PEFT backend")(test_case) + + +def require_timm(test_case): + """ + Decorator marking a test that requires timm. These tests are skipped when timm isn't installed. + """ + return unittest.skipUnless(is_timm_available(), "test requires timm")(test_case) + + +def require_bitsandbytes(test_case): + """ + Decorator marking a test that requires bitsandbytes. These tests are skipped when bitsandbytes isn't installed. + """ + return unittest.skipUnless(is_bitsandbytes_available(), "test requires bitsandbytes")(test_case) + + +def require_quanto(test_case): + """ + Decorator marking a test that requires quanto. These tests are skipped when quanto isn't installed. + """ + return unittest.skipUnless(is_optimum_quanto_available(), "test requires quanto")(test_case) + + +def require_accelerate(test_case): + """ + Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed. + """ + return unittest.skipUnless(is_accelerate_available(), "test requires accelerate")(test_case) + + +def require_peft_version_greater(peft_version): + """ + Decorator marking a test that requires PEFT backend with a specific version, this would require some specific + versions of PEFT and transformers. + """ + + def decorator(test_case): + correct_peft_version = is_peft_available() and version.parse( + version.parse(importlib.metadata.version("peft")).base_version + ) > version.parse(peft_version) + return unittest.skipUnless( + correct_peft_version, f"test requires PEFT backend with the version greater than {peft_version}" + )(test_case) + + return decorator + + +def require_transformers_version_greater(transformers_version): + """ + Decorator marking a test that requires transformers with a specific version, this would require some specific + versions of PEFT and transformers. + """ + + def decorator(test_case): + correct_transformers_version = is_transformers_available() and version.parse( + version.parse(importlib.metadata.version("transformers")).base_version + ) > version.parse(transformers_version) + return unittest.skipUnless( + correct_transformers_version, + f"test requires transformers with the version greater than {transformers_version}", + )(test_case) + + return decorator + + +def require_accelerate_version_greater(accelerate_version): + def decorator(test_case): + correct_accelerate_version = is_accelerate_available() and version.parse( + version.parse(importlib.metadata.version("accelerate")).base_version + ) > version.parse(accelerate_version) + return unittest.skipUnless( + correct_accelerate_version, f"Test requires accelerate with the version greater than {accelerate_version}." + )(test_case) + + return decorator + + +def require_bitsandbytes_version_greater(bnb_version): + def decorator(test_case): + correct_bnb_version = is_bitsandbytes_available() and version.parse( + version.parse(importlib.metadata.version("bitsandbytes")).base_version + ) > version.parse(bnb_version) + return unittest.skipUnless( + correct_bnb_version, f"Test requires bitsandbytes with the version greater than {bnb_version}." + )(test_case) + + return decorator + + +def require_hf_hub_version_greater(hf_hub_version): + def decorator(test_case): + correct_hf_hub_version = version.parse( + version.parse(importlib.metadata.version("huggingface_hub")).base_version + ) > version.parse(hf_hub_version) + return unittest.skipUnless( + correct_hf_hub_version, f"Test requires huggingface_hub with the version greater than {hf_hub_version}." + )(test_case) + + return decorator + + +def require_gguf_version_greater_or_equal(gguf_version): + def decorator(test_case): + correct_gguf_version = is_gguf_available() and version.parse( + version.parse(importlib.metadata.version("gguf")).base_version + ) >= version.parse(gguf_version) + return unittest.skipUnless( + correct_gguf_version, f"Test requires gguf with the version greater than {gguf_version}." + )(test_case) + + return decorator + + +def require_torchao_version_greater_or_equal(torchao_version): + def decorator(test_case): + correct_torchao_version = is_torchao_available() and version.parse( + version.parse(importlib.metadata.version("torchao")).base_version + ) >= version.parse(torchao_version) + return unittest.skipUnless( + correct_torchao_version, f"Test requires torchao with version greater than {torchao_version}." + )(test_case) + + return decorator + + +def require_kernels_version_greater_or_equal(kernels_version): + def decorator(test_case): + correct_kernels_version = is_kernels_available() and version.parse( + version.parse(importlib.metadata.version("kernels")).base_version + ) >= version.parse(kernels_version) + return unittest.skipUnless( + correct_kernels_version, f"Test requires kernels with version greater than {kernels_version}." + )(test_case) + + return decorator + + +def deprecate_after_peft_backend(test_case): + """ + Decorator marking a test that will be skipped after PEFT backend + """ + return unittest.skipUnless(not USE_PEFT_BACKEND, "test skipped in favor of PEFT backend")(test_case) + + +def get_python_version(): + sys_info = sys.version_info + major, minor = sys_info.major, sys_info.minor + return major, minor + + +def load_numpy(arry: Union[str, np.ndarray], local_path: Optional[str] = None) -> np.ndarray: + if isinstance(arry, str): + if local_path is not None: + # local_path can be passed to correct images of tests + return Path(local_path, arry.split("/")[-5], arry.split("/")[-2], arry.split("/")[-1]).as_posix() + elif arry.startswith("http://") or arry.startswith("https://"): + response = requests.get(arry, timeout=DIFFUSERS_REQUEST_TIMEOUT) + response.raise_for_status() + arry = np.load(BytesIO(response.content)) + elif os.path.isfile(arry): + arry = np.load(arry) + else: + raise ValueError( + f"Incorrect path or url, URLs must start with `http://` or `https://`, and {arry} is not a valid path" + ) + elif isinstance(arry, np.ndarray): + pass + else: + raise ValueError( + "Incorrect format used for numpy ndarray. Should be an url linking to an image, a local path, or a" + " ndarray." + ) + + return arry + + +def load_pt(url: str, map_location: Optional[str] = None, weights_only: Optional[bool] = True): + response = requests.get(url, timeout=DIFFUSERS_REQUEST_TIMEOUT) + response.raise_for_status() + arry = torch.load(BytesIO(response.content), map_location=map_location, weights_only=weights_only) + return arry + + +def load_image(image: Union[str, PIL.Image.Image]) -> PIL.Image.Image: + """ + Loads `image` to a PIL Image. + + Args: + image (`str` or `PIL.Image.Image`): + The image to convert to the PIL Image format. + Returns: + `PIL.Image.Image`: + A PIL Image. + """ + if isinstance(image, str): + if image.startswith("http://") or image.startswith("https://"): + image = PIL.Image.open(requests.get(image, stream=True, timeout=DIFFUSERS_REQUEST_TIMEOUT).raw) + elif os.path.isfile(image): + image = PIL.Image.open(image) + else: + raise ValueError( + f"Incorrect path or url, URLs must start with `http://` or `https://`, and {image} is not a valid path" + ) + elif isinstance(image, PIL.Image.Image): + image = image + else: + raise ValueError( + "Incorrect format used for image. Should be an url linking to an image, a local path, or a PIL image." + ) + image = PIL.ImageOps.exif_transpose(image) + image = image.convert("RGB") + return image + + +def preprocess_image(image: PIL.Image, batch_size: int): + w, h = image.size + w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 + image = image.resize((w, h), resample=PIL.Image.LANCZOS) + image = np.array(image).astype(np.float32) / 255.0 + image = np.vstack([image[None].transpose(0, 3, 1, 2)] * batch_size) + image = torch.from_numpy(image) + return 2.0 * image - 1.0 + + +def export_to_gif(image: List[PIL.Image.Image], output_gif_path: str = None) -> str: + if output_gif_path is None: + output_gif_path = tempfile.NamedTemporaryFile(suffix=".gif").name + + image[0].save( + output_gif_path, + save_all=True, + append_images=image[1:], + optimize=False, + duration=100, + loop=0, + ) + return output_gif_path + + +@contextmanager +def buffered_writer(raw_f): + f = io.BufferedWriter(raw_f) + yield f + f.flush() + + +def export_to_ply(mesh, output_ply_path: str = None): + """ + Write a PLY file for a mesh. + """ + if output_ply_path is None: + output_ply_path = tempfile.NamedTemporaryFile(suffix=".ply").name + + coords = mesh.verts.detach().cpu().numpy() + faces = mesh.faces.cpu().numpy() + rgb = np.stack([mesh.vertex_channels[x].detach().cpu().numpy() for x in "RGB"], axis=1) + + with buffered_writer(open(output_ply_path, "wb")) as f: + f.write(b"ply\n") + f.write(b"format binary_little_endian 1.0\n") + f.write(bytes(f"element vertex {len(coords)}\n", "ascii")) + f.write(b"property float x\n") + f.write(b"property float y\n") + f.write(b"property float z\n") + if rgb is not None: + f.write(b"property uchar red\n") + f.write(b"property uchar green\n") + f.write(b"property uchar blue\n") + if faces is not None: + f.write(bytes(f"element face {len(faces)}\n", "ascii")) + f.write(b"property list uchar int vertex_index\n") + f.write(b"end_header\n") + + if rgb is not None: + rgb = (rgb * 255.499).round().astype(int) + vertices = [ + (*coord, *rgb) + for coord, rgb in zip( + coords.tolist(), + rgb.tolist(), + ) + ] + format = struct.Struct("<3f3B") + for item in vertices: + f.write(format.pack(*item)) + else: + format = struct.Struct("<3f") + for vertex in coords.tolist(): + f.write(format.pack(*vertex)) + + if faces is not None: + format = struct.Struct(" str: + if is_opencv_available(): + import cv2 + else: + raise ImportError(BACKENDS_MAPPING["opencv"][1].format("export_to_video")) + if output_video_path is None: + output_video_path = tempfile.NamedTemporaryFile(suffix=".mp4").name + + fourcc = cv2.VideoWriter_fourcc(*"mp4v") + h, w, c = video_frames[0].shape + video_writer = cv2.VideoWriter(output_video_path, fourcc, fps=8, frameSize=(w, h)) + for i in range(len(video_frames)): + img = cv2.cvtColor(video_frames[i], cv2.COLOR_RGB2BGR) + video_writer.write(img) + return output_video_path + + +def load_hf_numpy(path) -> np.ndarray: + base_url = "https://huggingface.co/datasets/fusing/diffusers-testing/resolve/main" + + if not path.startswith("http://") and not path.startswith("https://"): + path = os.path.join(base_url, urllib.parse.quote(path)) + + return load_numpy(path) + + +# --- pytest conf functions --- # + +# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once +pytest_opt_registered = {} + + +def pytest_addoption_shared(parser): + """ + This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there. + + It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest` + option. + + """ + option = "--make-reports" + if option not in pytest_opt_registered: + parser.addoption( + option, + action="store", + default=False, + help="generate report files. The value of this option is used as a prefix to report names", + ) + pytest_opt_registered[option] = 1 + + +def pytest_terminal_summary_main(tr, id): + """ + Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current + directory. The report files are prefixed with the test suite name. + + This function emulates --duration and -rA pytest arguments. + + This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined + there. + + Args: + - tr: `terminalreporter` passed from `conftest.py` + - id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is + needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other. + + NB: this functions taps into a private _pytest API and while unlikely, it could break should + pytest do internal changes - also it calls default internal methods of terminalreporter which + can be hijacked by various `pytest-` plugins and interfere. + + """ + from _pytest.config import create_terminal_writer + + if not len(id): + id = "tests" + + config = tr.config + orig_writer = config.get_terminal_writer() + orig_tbstyle = config.option.tbstyle + orig_reportchars = tr.reportchars + + dir = "reports" + Path(dir).mkdir(parents=True, exist_ok=True) + report_files = { + k: f"{dir}/{id}_{k}.txt" + for k in [ + "durations", + "errors", + "failures_long", + "failures_short", + "failures_line", + "passes", + "stats", + "summary_short", + "warnings", + ] + } + + # custom durations report + # note: there is no need to call pytest --durations=XX to get this separate report + # adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66 + dlist = [] + for replist in tr.stats.values(): + for rep in replist: + if hasattr(rep, "duration"): + dlist.append(rep) + if dlist: + dlist.sort(key=lambda x: x.duration, reverse=True) + with open(report_files["durations"], "w") as f: + durations_min = 0.05 # sec + f.write("slowest durations\n") + for i, rep in enumerate(dlist): + if rep.duration < durations_min: + f.write(f"{len(dlist) - i} durations < {durations_min} secs were omitted") + break + f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n") + + def summary_failures_short(tr): + # expecting that the reports were --tb=long (default) so we chop them off here to the last frame + reports = tr.getreports("failed") + if not reports: + return + tr.write_sep("=", "FAILURES SHORT STACK") + for rep in reports: + msg = tr._getfailureheadline(rep) + tr.write_sep("_", msg, red=True, bold=True) + # chop off the optional leading extra frames, leaving only the last one + longrepr = re.sub(r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S) + tr._tw.line(longrepr) + # note: not printing out any rep.sections to keep the report short + + # use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each + # adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814 + # note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g. + # pytest-instafail does that) + + # report failures with line/short/long styles + config.option.tbstyle = "auto" # full tb + with open(report_files["failures_long"], "w") as f: + tr._tw = create_terminal_writer(config, f) + tr.summary_failures() + + # config.option.tbstyle = "short" # short tb + with open(report_files["failures_short"], "w") as f: + tr._tw = create_terminal_writer(config, f) + summary_failures_short(tr) + + config.option.tbstyle = "line" # one line per error + with open(report_files["failures_line"], "w") as f: + tr._tw = create_terminal_writer(config, f) + tr.summary_failures() + + with open(report_files["errors"], "w") as f: + tr._tw = create_terminal_writer(config, f) + tr.summary_errors() + + with open(report_files["warnings"], "w") as f: + tr._tw = create_terminal_writer(config, f) + tr.summary_warnings() # normal warnings + tr.summary_warnings() # final warnings + + tr.reportchars = "wPpsxXEf" # emulate -rA (used in summary_passes() and short_test_summary()) + with open(report_files["passes"], "w") as f: + tr._tw = create_terminal_writer(config, f) + tr.summary_passes() + + with open(report_files["summary_short"], "w") as f: + tr._tw = create_terminal_writer(config, f) + tr.short_test_summary() + + with open(report_files["stats"], "w") as f: + tr._tw = create_terminal_writer(config, f) + tr.summary_stats() + + # restore: + tr._tw = orig_writer + tr.reportchars = orig_reportchars + config.option.tbstyle = orig_tbstyle + + +# Adapted from https://github.com/huggingface/transformers/blob/000e52aec8850d3fe2f360adc6fd256e5b47fe4c/src/transformers..testing_utils.py#L1905 +def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None): + """ + To decorate flaky tests (methods or entire classes). They will be retried on failures. + + Args: + max_attempts (`int`, *optional*, defaults to 5): + The maximum number of attempts to retry the flaky test. + wait_before_retry (`float`, *optional*): + If provided, will wait that number of seconds before retrying the test. + description (`str`, *optional*): + A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors, + etc.) + """ + + def decorator(obj): + # If decorating a class, wrap each test method on it + if inspect.isclass(obj): + for attr_name, attr_value in list(obj.__dict__.items()): + if callable(attr_value) and attr_name.startswith("test"): + # recursively decorate the method + setattr(obj, attr_name, decorator(attr_value)) + return obj + + # Otherwise we're decorating a single test function / method + @functools.wraps(obj) + def wrapper(*args, **kwargs): + retry_count = 1 + while retry_count < max_attempts: + try: + return obj(*args, **kwargs) + except Exception as err: + msg = ( + f"[FLAKY] {description or obj.__name__!r} " + f"failed on attempt {retry_count}/{max_attempts}: {err}" + ) + print(msg, file=sys.stderr) + if wait_before_retry is not None: + time.sleep(wait_before_retry) + retry_count += 1 + + return obj(*args, **kwargs) + + return wrapper + + return decorator + + +# Taken from: https://github.com/huggingface/transformers/blob/3658488ff77ff8d45101293e749263acf437f4d5/src/transformers..testing_utils.py#L1787 +def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None): + """ + To run a test in a subprocess. In particular, this can avoid (GPU) memory issue. + + Args: + test_case (`unittest.TestCase`): + The test that will run `target_func`. + target_func (`Callable`): + The function implementing the actual testing logic. + inputs (`dict`, *optional*, defaults to `None`): + The inputs that will be passed to `target_func` through an (input) queue. + timeout (`int`, *optional*, defaults to `None`): + The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env. + variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`. + """ + if timeout is None: + timeout = int(os.environ.get("PYTEST_TIMEOUT", 600)) + + start_methohd = "spawn" + ctx = multiprocessing.get_context(start_methohd) + + input_queue = ctx.Queue(1) + output_queue = ctx.JoinableQueue(1) + + # We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle. + input_queue.put(inputs, timeout=timeout) + + process = ctx.Process(target=target_func, args=(input_queue, output_queue, timeout)) + process.start() + # Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents + # the test to exit properly. + try: + results = output_queue.get(timeout=timeout) + output_queue.task_done() + except Exception as e: + process.terminate() + test_case.fail(e) + process.join(timeout=timeout) + + if results["error"] is not None: + test_case.fail(f"{results['error']}") + + +class CaptureLogger: + """ + Args: + Context manager to capture `logging` streams + logger: 'logging` logger object + Returns: + The captured output is available via `self.out` + Example: + ```python + >>> from diffusers import logging + >>> from diffusers..testing_utils import CaptureLogger + + >>> msg = "Testing 1, 2, 3" + >>> logging.set_verbosity_info() + >>> logger = logging.get_logger("diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.py") + >>> with CaptureLogger(logger) as cl: + ... logger.info(msg) + >>> assert cl.out, msg + "\n" + ``` + """ + + def __init__(self, logger): + self.logger = logger + self.io = StringIO() + self.sh = logging.StreamHandler(self.io) + self.out = "" + + def __enter__(self): + self.logger.addHandler(self.sh) + return self + + def __exit__(self, *exc): + self.logger.removeHandler(self.sh) + self.out = self.io.getvalue() + + def __repr__(self): + return f"captured: {self.out}\n" + + +def enable_full_determinism(): + """ + Helper function for reproducible behavior during distributed training. See + - https://pytorch.org/docs/stable/notes/randomness.html for pytorch + """ + # Enable PyTorch deterministic mode. This potentially requires either the environment + # variable 'CUDA_LAUNCH_BLOCKING' or 'CUBLAS_WORKSPACE_CONFIG' to be set, + # depending on the CUDA version, so we set them both here + os.environ["CUDA_LAUNCH_BLOCKING"] = "1" + os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":16:8" + torch.use_deterministic_algorithms(True) + + # Enable CUDNN deterministic mode + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + torch.backends.cuda.matmul.allow_tf32 = False + + +def disable_full_determinism(): + os.environ["CUDA_LAUNCH_BLOCKING"] = "0" + os.environ["CUBLAS_WORKSPACE_CONFIG"] = "" + torch.use_deterministic_algorithms(False) + + +# Utils for custom and alternative accelerator devices +def _is_torch_fp16_available(device): + if not is_torch_available(): + return False + + import torch + + device = torch.device(device) + + try: + x = torch.zeros((2, 2), dtype=torch.float16).to(device) + _ = torch.mul(x, x) + return True + + except Exception as e: + if device.type == "cuda": + raise ValueError( + f"You have passed a device of type 'cuda' which should work with 'fp16', but 'cuda' does not seem to be correctly installed on your machine: {e}" + ) + + return False + + +def _is_torch_fp64_available(device): + if not is_torch_available(): + return False + + import torch + + device = torch.device(device) + + try: + x = torch.zeros((2, 2), dtype=torch.float64).to(device) + _ = torch.mul(x, x) + return True + + except Exception as e: + if device.type == "cuda": + raise ValueError( + f"You have passed a device of type 'cuda' which should work with 'fp64', but 'cuda' does not seem to be correctly installed on your machine: {e}" + ) + + return False + + +# Guard these lookups for when Torch is not used - alternative accelerator support is for PyTorch +if is_torch_available(): + # Behaviour flags + BACKEND_SUPPORTS_TRAINING = {"cuda": True, "xpu": True, "cpu": True, "mps": False, "default": True} + + # Function definitions + BACKEND_EMPTY_CACHE = { + "cuda": torch.cuda.empty_cache, + "xpu": torch.xpu.empty_cache, + "cpu": None, + "mps": torch.mps.empty_cache, + "default": None, + } + BACKEND_DEVICE_COUNT = { + "cuda": torch.cuda.device_count, + "xpu": torch.xpu.device_count, + "cpu": lambda: 0, + "mps": lambda: 0, + "default": 0, + } + BACKEND_MANUAL_SEED = { + "cuda": torch.cuda.manual_seed, + "xpu": torch.xpu.manual_seed, + "cpu": torch.manual_seed, + "mps": torch.mps.manual_seed, + "default": torch.manual_seed, + } + BACKEND_RESET_PEAK_MEMORY_STATS = { + "cuda": torch.cuda.reset_peak_memory_stats, + "xpu": getattr(torch.xpu, "reset_peak_memory_stats", None), + "cpu": None, + "mps": None, + "default": None, + } + BACKEND_RESET_MAX_MEMORY_ALLOCATED = { + "cuda": torch.cuda.reset_max_memory_allocated, + "xpu": getattr(torch.xpu, "reset_peak_memory_stats", None), + "cpu": None, + "mps": None, + "default": None, + } + BACKEND_MAX_MEMORY_ALLOCATED = { + "cuda": torch.cuda.max_memory_allocated, + "xpu": getattr(torch.xpu, "max_memory_allocated", None), + "cpu": 0, + "mps": 0, + "default": 0, + } + BACKEND_SYNCHRONIZE = { + "cuda": torch.cuda.synchronize, + "xpu": getattr(torch.xpu, "synchronize", None), + "cpu": None, + "mps": None, + "default": None, + } + + +# This dispatches a defined function according to the accelerator from the function definitions. +def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs): + if device not in dispatch_table: + return dispatch_table["default"](*args, **kwargs) + + fn = dispatch_table[device] + + # Some device agnostic functions return values. Need to guard against 'None' instead at + # user level + if not callable(fn): + return fn + + return fn(*args, **kwargs) + + +# These are callables which automatically dispatch the function specific to the accelerator +def backend_manual_seed(device: str, seed: int): + return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed) + + +def backend_synchronize(device: str): + return _device_agnostic_dispatch(device, BACKEND_SYNCHRONIZE) + + +def backend_empty_cache(device: str): + return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE) + + +def backend_device_count(device: str): + return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT) + + +def backend_reset_peak_memory_stats(device: str): + return _device_agnostic_dispatch(device, BACKEND_RESET_PEAK_MEMORY_STATS) + + +def backend_reset_max_memory_allocated(device: str): + return _device_agnostic_dispatch(device, BACKEND_RESET_MAX_MEMORY_ALLOCATED) + + +def backend_max_memory_allocated(device: str): + return _device_agnostic_dispatch(device, BACKEND_MAX_MEMORY_ALLOCATED) + + +# These are callables which return boolean behaviour flags and can be used to specify some +# device agnostic alternative where the feature is unsupported. +def backend_supports_training(device: str): + if not is_torch_available(): + return False + + if device not in BACKEND_SUPPORTS_TRAINING: + device = "default" + + return BACKEND_SUPPORTS_TRAINING[device] + + +# Guard for when Torch is not available +if is_torch_available(): + # Update device function dict mapping + def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str): + try: + # Try to import the function directly + spec_fn = getattr(device_spec_module, attribute_name) + device_fn_dict[torch_device] = spec_fn + except AttributeError as e: + # If the function doesn't exist, and there is no default, throw an error + if "default" not in device_fn_dict: + raise AttributeError( + f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found." + ) from e + + if "DIFFUSERS_TEST_DEVICE_SPEC" in os.environ: + device_spec_path = os.environ["DIFFUSERS_TEST_DEVICE_SPEC"] + if not Path(device_spec_path).is_file(): + raise ValueError(f"Specified path to device specification file is not found. Received {device_spec_path}") + + try: + import_name = device_spec_path[: device_spec_path.index(".py")] + except ValueError as e: + raise ValueError(f"Provided device spec file is not a Python file! Received {device_spec_path}") from e + + device_spec_module = importlib.import_module(import_name) + + try: + device_name = device_spec_module.DEVICE_NAME + except AttributeError: + raise AttributeError("Device spec file did not contain `DEVICE_NAME`") + + if "DIFFUSERS_TEST_DEVICE" in os.environ and torch_device != device_name: + msg = f"Mismatch between environment variable `DIFFUSERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n" + msg += "Either unset `DIFFUSERS_TEST_DEVICE` or ensure it matches device spec name." + raise ValueError(msg) + + torch_device = device_name + + # Add one entry here for each `BACKEND_*` dictionary. + update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN") + update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN") + update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN") + update_mapping_from_spec(BACKEND_SUPPORTS_TRAINING, "SUPPORTS_TRAINING") + update_mapping_from_spec(BACKEND_RESET_PEAK_MEMORY_STATS, "RESET_PEAK_MEMORY_STATS_FN") + update_mapping_from_spec(BACKEND_RESET_MAX_MEMORY_ALLOCATED, "RESET_MAX_MEMORY_ALLOCATED_FN") + update_mapping_from_spec(BACKEND_MAX_MEMORY_ALLOCATED, "MAX_MEMORY_ALLOCATED_FN") + + +# Modified from https://github.com/huggingface/transformers/blob/cdfb018d0300fef3b07d9220f3efe9c2a9974662/src/transformers..testing_utils.py#L3090 + +# Type definition of key used in `Expectations` class. +DeviceProperties = Tuple[Union[str, None], Union[int, None]] + + +@functools.lru_cache +def get_device_properties() -> DeviceProperties: + """ + Get environment device properties. + """ + if IS_CUDA_SYSTEM or IS_ROCM_SYSTEM: + import torch + + major, _ = torch.cuda.get_device_capability() + if IS_ROCM_SYSTEM: + return ("rocm", major) + else: + return ("cuda", major) + elif IS_XPU_SYSTEM: + import torch + + # To get more info of the architecture meaning and bit allocation, refer to https://github.com/intel/llvm/blob/sycl/sycl/include/sycl/ext/oneapi/experimental/device_architecture.def + arch = torch.xpu.get_device_capability()["architecture"] + gen_mask = 0x000000FF00000000 + gen = (arch & gen_mask) >> 32 + return ("xpu", gen) + else: + return (torch_device, None) + + +if TYPE_CHECKING: + DevicePropertiesUserDict = UserDict[DeviceProperties, Any] +else: + DevicePropertiesUserDict = UserDict + +if is_torch_available(): + from diffusers.hooks._common import _GO_LC_SUPPORTED_PYTORCH_LAYERS + from diffusers.hooks.group_offloading import ( + _GROUP_ID_LAZY_LEAF, + _compute_group_hash, + _find_parent_module_in_module_dict, + _gather_buffers_with_no_group_offloading_parent, + _gather_parameters_with_no_group_offloading_parent, + ) + + def _get_expected_safetensors_files( + module: torch.nn.Module, + offload_to_disk_path: str, + offload_type: str, + num_blocks_per_group: Optional[int] = None, + ) -> Set[str]: + expected_files = set() + + def get_hashed_filename(group_id: str) -> str: + short_hash = _compute_group_hash(group_id) + return os.path.join(offload_to_disk_path, f"group_{short_hash}.safetensors") + + if offload_type == "block_level": + if num_blocks_per_group is None: + raise ValueError("num_blocks_per_group must be provided for 'block_level' offloading.") + + # Handle groups of ModuleList and Sequential blocks + unmatched_modules = [] + for name, submodule in module.named_children(): + if not isinstance(submodule, (torch.nn.ModuleList, torch.nn.Sequential)): + unmatched_modules.append(module) + continue + + for i in range(0, len(submodule), num_blocks_per_group): + current_modules = submodule[i : i + num_blocks_per_group] + if not current_modules: + continue + group_id = f"{name}_{i}_{i + len(current_modules) - 1}" + expected_files.add(get_hashed_filename(group_id)) + + # Handle the group for unmatched top-level modules and parameters + for module in unmatched_modules: + expected_files.add(get_hashed_filename(f"{module.__class__.__name__}_unmatched_group")) + + elif offload_type == "leaf_level": + # Handle leaf-level module groups + for name, submodule in module.named_modules(): + if isinstance(submodule, _GO_LC_SUPPORTED_PYTORCH_LAYERS): + # These groups will always have parameters, so a file is expected + expected_files.add(get_hashed_filename(name)) + + # Handle groups for non-leaf parameters/buffers + modules_with_group_offloading = { + name for name, sm in module.named_modules() if isinstance(sm, _GO_LC_SUPPORTED_PYTORCH_LAYERS) + } + parameters = _gather_parameters_with_no_group_offloading_parent(module, modules_with_group_offloading) + buffers = _gather_buffers_with_no_group_offloading_parent(module, modules_with_group_offloading) + + all_orphans = parameters + buffers + if all_orphans: + parent_to_tensors = {} + module_dict = dict(module.named_modules()) + for tensor_name, _ in all_orphans: + parent_name = _find_parent_module_in_module_dict(tensor_name, module_dict) + if parent_name not in parent_to_tensors: + parent_to_tensors[parent_name] = [] + parent_to_tensors[parent_name].append(tensor_name) + + for parent_name in parent_to_tensors: + # A file is expected for each parent that gathers orphaned tensors + expected_files.add(get_hashed_filename(parent_name)) + expected_files.add(get_hashed_filename(_GROUP_ID_LAZY_LEAF)) + + else: + raise ValueError(f"Unsupported offload_type: {offload_type}") + + return expected_files + + def _check_safetensors_serialization( + module: torch.nn.Module, + offload_to_disk_path: str, + offload_type: str, + num_blocks_per_group: Optional[int] = None, + ) -> bool: + if not os.path.isdir(offload_to_disk_path): + return False, None, None + + expected_files = _get_expected_safetensors_files( + module, offload_to_disk_path, offload_type, num_blocks_per_group + ) + actual_files = set(glob.glob(os.path.join(offload_to_disk_path, "*.safetensors"))) + missing_files = expected_files - actual_files + extra_files = actual_files - expected_files + + is_correct = not missing_files and not extra_files + return is_correct, extra_files, missing_files + + +class Expectations(DevicePropertiesUserDict): + def get_expectation(self) -> Any: + """ + Find best matching expectation based on environment device properties. + """ + return self.find_expectation(get_device_properties()) + + @staticmethod + def is_default(key: DeviceProperties) -> bool: + return all(p is None for p in key) + + @staticmethod + def score(key: DeviceProperties, other: DeviceProperties) -> int: + """ + Returns score indicating how similar two instances of the `Properties` tuple are. Points are calculated using + bits, but documented as int. Rules are as follows: + * Matching `type` gives 8 points. + * Semi-matching `type`, for example cuda and rocm, gives 4 points. + * Matching `major` (compute capability major version) gives 2 points. + * Default expectation (if present) gives 1 points. + """ + (device_type, major) = key + (other_device_type, other_major) = other + + score = 0b0 + if device_type == other_device_type: + score |= 0b1000 + elif device_type in ["cuda", "rocm"] and other_device_type in ["cuda", "rocm"]: + score |= 0b100 + + if major == other_major and other_major is not None: + score |= 0b10 + + if Expectations.is_default(other): + score |= 0b1 + + return int(score) + + def find_expectation(self, key: DeviceProperties = (None, None)) -> Any: + """ + Find best matching expectation based on provided device properties. + """ + (result_key, result) = max(self.data.items(), key=lambda x: Expectations.score(key, x[0])) + + if Expectations.score(key, result_key) == 0: + raise ValueError(f"No matching expectation found for {key}") + + return result + + def __repr__(self): + return f"{self.data}" From 80b06b0d5f6893836e5644139796ef5616c719dd Mon Sep 17 00:00:00 2001 From: DN6 Date: Tue, 26 Aug 2025 08:36:19 +0530 Subject: [PATCH 2/7] update --- src/diffusers/utils/testing_utils.py | 5 +- src/diffusers/utils/torch_utils.py | 140 ++++++++++++++++++++++++++- 2 files changed, 143 insertions(+), 2 deletions(-) diff --git a/src/diffusers/utils/testing_utils.py b/src/diffusers/utils/testing_utils.py index a0307c108ad4..1231cea6c8e6 100644 --- a/src/diffusers/utils/testing_utils.py +++ b/src/diffusers/utils/testing_utils.py @@ -66,7 +66,10 @@ global_rng = random.Random() logger = get_logger(__name__) - +logger.warning( + "diffusers.utils.testing_utils' is deprecated and will be removed in a future version. " + "Please use `diffusers.utils.torch_utils` instead. " +) _required_peft_version = is_peft_available() and version.parse( version.parse(importlib.metadata.version("peft")).base_version ) > version.parse("0.5") diff --git a/src/diffusers/utils/torch_utils.py b/src/diffusers/utils/torch_utils.py index 5bc708a60c29..4bbbe059a95f 100644 --- a/src/diffusers/utils/torch_utils.py +++ b/src/diffusers/utils/torch_utils.py @@ -16,7 +16,8 @@ """ import functools -from typing import List, Optional, Tuple, Union +import os +from typing import Callable, Dict, List, Optional, Tuple, Union from . import logging from .import_utils import is_torch_available, is_torch_npu_available, is_torch_version @@ -36,6 +37,116 @@ def maybe_allow_in_graph(cls): return cls +# Behaviour flags +BACKEND_SUPPORTS_TRAINING = {"cuda": True, "xpu": True, "cpu": True, "mps": False, "default": True} +# Function definitions +BACKEND_EMPTY_CACHE = { + "cuda": torch.cuda.empty_cache, + "xpu": torch.xpu.empty_cache, + "cpu": None, + "mps": torch.mps.empty_cache, + "default": None, +} +BACKEND_DEVICE_COUNT = { + "cuda": torch.cuda.device_count, + "xpu": torch.xpu.device_count, + "cpu": lambda: 0, + "mps": lambda: 0, + "default": 0, +} +BACKEND_MANUAL_SEED = { + "cuda": torch.cuda.manual_seed, + "xpu": torch.xpu.manual_seed, + "cpu": torch.manual_seed, + "mps": torch.mps.manual_seed, + "default": torch.manual_seed, +} +BACKEND_RESET_PEAK_MEMORY_STATS = { + "cuda": torch.cuda.reset_peak_memory_stats, + "xpu": getattr(torch.xpu, "reset_peak_memory_stats", None), + "cpu": None, + "mps": None, + "default": None, +} +BACKEND_RESET_MAX_MEMORY_ALLOCATED = { + "cuda": torch.cuda.reset_max_memory_allocated, + "xpu": getattr(torch.xpu, "reset_peak_memory_stats", None), + "cpu": None, + "mps": None, + "default": None, +} +BACKEND_MAX_MEMORY_ALLOCATED = { + "cuda": torch.cuda.max_memory_allocated, + "xpu": getattr(torch.xpu, "max_memory_allocated", None), + "cpu": 0, + "mps": 0, + "default": 0, +} +BACKEND_SYNCHRONIZE = { + "cuda": torch.cuda.synchronize, + "xpu": getattr(torch.xpu, "synchronize", None), + "cpu": None, + "mps": None, + "default": None, +} + + +# This dispatches a defined function according to the accelerator from the function definitions. +def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs): + if device not in dispatch_table: + return dispatch_table["default"](*args, **kwargs) + + fn = dispatch_table[device] + + # Some device agnostic functions return values. Need to guard against 'None' instead at + # user level + if not callable(fn): + return fn + + return fn(*args, **kwargs) + + +# These are callables which automatically dispatch the function specific to the accelerator +def backend_manual_seed(device: str, seed: int): + return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed) + + +def backend_synchronize(device: str): + return _device_agnostic_dispatch(device, BACKEND_SYNCHRONIZE) + + +def backend_empty_cache(device: str): + return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE) + + +def backend_device_count(device: str): + return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT) + + +def backend_reset_peak_memory_stats(device: str): + return _device_agnostic_dispatch(device, BACKEND_RESET_PEAK_MEMORY_STATS) + + +def backend_reset_max_memory_allocated(device: str): + return _device_agnostic_dispatch(device, BACKEND_RESET_MAX_MEMORY_ALLOCATED) + + +def backend_max_memory_allocated(device: str): + return _device_agnostic_dispatch(device, BACKEND_MAX_MEMORY_ALLOCATED) + + +# These are callables which return boolean behaviour flags and can be used to specify some +# device agnostic alternative where the feature is unsupported. +def backend_supports_training(device: str): + if not is_torch_available(): + return False + + if device not in BACKEND_SUPPORTS_TRAINING: + device = "default" + + return BACKEND_SUPPORTS_TRAINING[device] + + def randn_tensor( shape: Union[Tuple, List], generator: Optional[Union[List["torch.Generator"], "torch.Generator"]] = None, @@ -197,3 +308,30 @@ def device_synchronize(device_type: Optional[str] = None): device_type = get_device() device_mod = getattr(torch, device_type, torch.cuda) device_mod.synchronize() + + +def enable_full_determinism(): + """ + Helper function for reproducible behavior during distributed training. See + - https://pytorch.org/docs/stable/notes/randomness.html for pytorch + """ + # Enable PyTorch deterministic mode. This potentially requires either the environment + # variable 'CUDA_LAUNCH_BLOCKING' or 'CUBLAS_WORKSPACE_CONFIG' to be set, + # depending on the CUDA version, so we set them both here + os.environ["CUDA_LAUNCH_BLOCKING"] = "1" + os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":16:8" + torch.use_deterministic_algorithms(True) + + # Enable CUDNN deterministic mode + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + torch.backends.cuda.matmul.allow_tf32 = False + + +def disable_full_determinism(): + os.environ["CUDA_LAUNCH_BLOCKING"] = "0" + os.environ["CUBLAS_WORKSPACE_CONFIG"] = "" + torch.use_deterministic_algorithms(False) + + +torch_device = get_device() From ce12925a2335260d79656664571407d28d8571bb Mon Sep 17 00:00:00 2001 From: DN6 Date: Tue, 26 Aug 2025 09:39:50 +0530 Subject: [PATCH 3/7] update --- examples/conftest.py | 9 +- examples/controlnet/train_controlnet_sd3.py | 5 +- examples/vqgan/test_vqgan.py | 8 +- src/diffusers/utils/testing_utils.py | 99 +++++++++++++++------ 4 files changed, 87 insertions(+), 34 deletions(-) diff --git a/examples/conftest.py b/examples/conftest.py index 9b8996430fd1..ff7543ba8286 100644 --- a/examples/conftest.py +++ b/examples/conftest.py @@ -25,6 +25,11 @@ git_repo_path = abspath(join(dirname(dirname(dirname(__file__))), "src")) sys.path.insert(1, git_repo_path) +# Add parent directory to path so we can import from tests +repo_root = abspath(dirname(dirname(__file__))) +if repo_root not in sys.path: + sys.path.insert(0, repo_root) + # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality @@ -32,13 +37,13 @@ def pytest_addoption(parser): - from diffusers.utils.testing_utils import pytest_addoption_shared + from tests.testing_utils import pytest_addoption_shared pytest_addoption_shared(parser) def pytest_terminal_summary(terminalreporter): - from diffusers.utils.testing_utils import pytest_terminal_summary_main + from tests.testing_utils import pytest_terminal_summary_main make_reports = terminalreporter.config.getoption("--make-reports") if make_reports: diff --git a/examples/controlnet/train_controlnet_sd3.py b/examples/controlnet/train_controlnet_sd3.py index 20ef5c31b9f1..1d6fc57640c3 100644 --- a/examples/controlnet/train_controlnet_sd3.py +++ b/examples/controlnet/train_controlnet_sd3.py @@ -24,6 +24,8 @@ import os import random import shutil + +# Add repo root to path to import from tests from pathlib import Path import accelerate @@ -54,8 +56,7 @@ from diffusers.training_utils import compute_density_for_timestep_sampling, compute_loss_weighting_for_sd3, free_memory from diffusers.utils import check_min_version, is_wandb_available, make_image_grid from diffusers.utils.hub_utils import load_or_create_model_card, populate_model_card -from diffusers.utils.testing_utils import backend_empty_cache -from diffusers.utils.torch_utils import is_compiled_module +from diffusers.utils.torch_utils import backend_empty_cache, is_compiled_module if is_wandb_available(): diff --git a/examples/vqgan/test_vqgan.py b/examples/vqgan/test_vqgan.py index d13e102e7816..a3c8ee1e84b1 100644 --- a/examples/vqgan/test_vqgan.py +++ b/examples/vqgan/test_vqgan.py @@ -24,12 +24,18 @@ import torch from diffusers import VQModel -from diffusers.utils.testing_utils import require_timm +# Add parent directories to path to import from tests sys.path.append("..") +repo_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")) +if repo_root not in sys.path: + sys.path.insert(0, repo_root) + from test_examples_utils import ExamplesTestsAccelerate, run_command # noqa: E402 +from tests.testing_utils import require_timm # noqa + logging.basicConfig(level=logging.DEBUG) diff --git a/src/diffusers/utils/testing_utils.py b/src/diffusers/utils/testing_utils.py index 1231cea6c8e6..6d6a7d6ce4bb 100644 --- a/src/diffusers/utils/testing_utils.py +++ b/src/diffusers/utils/testing_utils.py @@ -68,7 +68,7 @@ logger = get_logger(__name__) logger.warning( "diffusers.utils.testing_utils' is deprecated and will be removed in a future version. " - "Please use `diffusers.utils.torch_utils` instead. " + "Determinism and device backend utilities have been moved to `diffusers.utils.torch_utils`. " ) _required_peft_version = is_peft_available() and version.parse( version.parse(importlib.metadata.version("peft")).base_version @@ -804,10 +804,9 @@ def export_to_ply(mesh, output_ply_path: str = None): f.write(format.pack(*vertex)) if faces is not None: - format = struct.Struct(" Date: Tue, 26 Aug 2025 10:37:30 +0530 Subject: [PATCH 4/7] update --- src/diffusers/utils/torch_utils.py | 107 ++++++++++++++--------------- 1 file changed, 52 insertions(+), 55 deletions(-) diff --git a/src/diffusers/utils/torch_utils.py b/src/diffusers/utils/torch_utils.py index 4bbbe059a95f..a1ab8cda431f 100644 --- a/src/diffusers/utils/torch_utils.py +++ b/src/diffusers/utils/torch_utils.py @@ -27,6 +27,56 @@ import torch from torch.fft import fftn, fftshift, ifftn, ifftshift + BACKEND_SUPPORTS_TRAINING = {"cuda": True, "xpu": True, "cpu": True, "mps": False, "default": True} + BACKEND_EMPTY_CACHE = { + "cuda": torch.cuda.empty_cache, + "xpu": torch.xpu.empty_cache, + "cpu": None, + "mps": torch.mps.empty_cache, + "default": None, + } + BACKEND_DEVICE_COUNT = { + "cuda": torch.cuda.device_count, + "xpu": torch.xpu.device_count, + "cpu": lambda: 0, + "mps": lambda: 0, + "default": 0, + } + BACKEND_MANUAL_SEED = { + "cuda": torch.cuda.manual_seed, + "xpu": torch.xpu.manual_seed, + "cpu": torch.manual_seed, + "mps": torch.mps.manual_seed, + "default": torch.manual_seed, + } + BACKEND_RESET_PEAK_MEMORY_STATS = { + "cuda": torch.cuda.reset_peak_memory_stats, + "xpu": getattr(torch.xpu, "reset_peak_memory_stats", None), + "cpu": None, + "mps": None, + "default": None, + } + BACKEND_RESET_MAX_MEMORY_ALLOCATED = { + "cuda": torch.cuda.reset_max_memory_allocated, + "xpu": getattr(torch.xpu, "reset_peak_memory_stats", None), + "cpu": None, + "mps": None, + "default": None, + } + BACKEND_MAX_MEMORY_ALLOCATED = { + "cuda": torch.cuda.max_memory_allocated, + "xpu": getattr(torch.xpu, "max_memory_allocated", None), + "cpu": 0, + "mps": 0, + "default": 0, + } + BACKEND_SYNCHRONIZE = { + "cuda": torch.cuda.synchronize, + "xpu": getattr(torch.xpu, "synchronize", None), + "cpu": None, + "mps": None, + "default": None, + } logger = logging.get_logger(__name__) # pylint: disable=invalid-name try: @@ -37,60 +87,6 @@ def maybe_allow_in_graph(cls): return cls -# Behaviour flags -BACKEND_SUPPORTS_TRAINING = {"cuda": True, "xpu": True, "cpu": True, "mps": False, "default": True} -# Function definitions -BACKEND_EMPTY_CACHE = { - "cuda": torch.cuda.empty_cache, - "xpu": torch.xpu.empty_cache, - "cpu": None, - "mps": torch.mps.empty_cache, - "default": None, -} -BACKEND_DEVICE_COUNT = { - "cuda": torch.cuda.device_count, - "xpu": torch.xpu.device_count, - "cpu": lambda: 0, - "mps": lambda: 0, - "default": 0, -} -BACKEND_MANUAL_SEED = { - "cuda": torch.cuda.manual_seed, - "xpu": torch.xpu.manual_seed, - "cpu": torch.manual_seed, - "mps": torch.mps.manual_seed, - "default": torch.manual_seed, -} -BACKEND_RESET_PEAK_MEMORY_STATS = { - "cuda": torch.cuda.reset_peak_memory_stats, - "xpu": getattr(torch.xpu, "reset_peak_memory_stats", None), - "cpu": None, - "mps": None, - "default": None, -} -BACKEND_RESET_MAX_MEMORY_ALLOCATED = { - "cuda": torch.cuda.reset_max_memory_allocated, - "xpu": getattr(torch.xpu, "reset_peak_memory_stats", None), - "cpu": None, - "mps": None, - "default": None, -} -BACKEND_MAX_MEMORY_ALLOCATED = { - "cuda": torch.cuda.max_memory_allocated, - "xpu": getattr(torch.xpu, "max_memory_allocated", None), - "cpu": 0, - "mps": 0, - "default": 0, -} -BACKEND_SYNCHRONIZE = { - "cuda": torch.cuda.synchronize, - "xpu": getattr(torch.xpu, "synchronize", None), - "cpu": None, - "mps": None, - "default": None, -} - - # This dispatches a defined function according to the accelerator from the function definitions. def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs): if device not in dispatch_table: @@ -334,4 +330,5 @@ def disable_full_determinism(): torch.use_deterministic_algorithms(False) -torch_device = get_device() +if is_torch_available(): + torch_device = get_device() From 2a52a25b9ac264f0cf84e35365f6c7b6ef6bc3cd Mon Sep 17 00:00:00 2001 From: DN6 Date: Tue, 26 Aug 2025 10:50:02 +0530 Subject: [PATCH 5/7] update --- .../test_modular_pipeline_stable_diffusion_xl.py | 8 ++++---- tests/others/__init__.py | 0 2 files changed, 4 insertions(+), 4 deletions(-) create mode 100644 tests/others/__init__.py diff --git a/tests/modular_pipelines/stable_diffusion_xl/test_modular_pipeline_stable_diffusion_xl.py b/tests/modular_pipelines/stable_diffusion_xl/test_modular_pipeline_stable_diffusion_xl.py index 86062394a018..efc91416d0e2 100644 --- a/tests/modular_pipelines/stable_diffusion_xl/test_modular_pipeline_stable_diffusion_xl.py +++ b/tests/modular_pipelines/stable_diffusion_xl/test_modular_pipeline_stable_diffusion_xl.py @@ -31,14 +31,14 @@ from ...models.unets.test_models_unet_2d_condition import ( create_ip_adapter_state_dict, ) -from ..test_modular_pipelines_common import ( - ModularPipelineTesterMixin, -) -from ..testing_utils import ( +from ...testing_utils import ( enable_full_determinism, floats_tensor, torch_device, ) +from ..test_modular_pipelines_common import ( + ModularPipelineTesterMixin, +) enable_full_determinism() diff --git a/tests/others/__init__.py b/tests/others/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 From 65efbcead58644b31596ed2d714f7cee0e0238d3 Mon Sep 17 00:00:00 2001 From: DN6 Date: Thu, 28 Aug 2025 14:56:46 +0530 Subject: [PATCH 6/7] merge main --- .github/workflows/pr_flax_dependency_test.yml | 38 - README.md | 10 +- docker/diffusers-flax-cpu/Dockerfile | 49 - docker/diffusers-flax-tpu/Dockerfile | 51 - docs/source/en/_toctree.yml | 12 +- docs/source/en/api/models/autoencoderkl.md | 12 - docs/source/en/api/models/controlnet.md | 8 - docs/source/en/api/models/overview.md | 4 - docs/source/en/api/models/unet2d-cond.md | 6 - docs/source/en/api/outputs.md | 4 - docs/source/en/api/pipelines/controlnet.md | 8 - docs/source/en/api/pipelines/overview.md | 4 - docs/source/en/api/pipelines/skyreels_v2.md | 275 +++--- .../api/pipelines/stable_diffusion/img2img.md | 10 - .../api/pipelines/stable_diffusion/inpaint.md | 10 - .../pipelines/stable_diffusion/text2img.md | 10 - docs/source/en/api/pipelines/wan.md | 4 +- docs/source/en/installation.md | 25 +- docs/source/en/optimization/fp16.md | 2 +- .../en/optimization/speed-memory-optims.md | 5 +- docs/source/en/training/controlnet.md | 83 +- docs/source/en/training/dreambooth.md | 80 +- docs/source/en/training/kandinsky.md | 2 +- docs/source/en/training/lora.md | 14 - docs/source/en/training/overview.md | 26 +- docs/source/en/training/sdxl.md | 2 +- docs/source/en/training/text2image.md | 85 +- docs/source/en/training/text_inversion.md | 84 +- .../en/tutorials/using_peft_for_inference.md | 4 +- docs/source/en/using-diffusers/loading.md | 641 +++--------- .../en/using-diffusers/other-formats.md | 3 +- .../en/using-diffusers/reusing_seeds.md | 119 +-- docs/source/en/using-diffusers/schedulers.md | 47 - .../stable_diffusion_jax_how_to.md | 225 ----- .../source/en/using-diffusers/text-img2vid.md | 2 +- examples/community/README.md | 2 + .../community/composable_stable_diffusion.py | 2 +- examples/community/imagic_stable_diffusion.py | 2 +- examples/community/img2img_inpainting.py | 2 +- .../community/interpolate_stable_diffusion.py | 2 +- examples/community/lpw_stable_diffusion.py | 4 +- .../community/lpw_stable_diffusion_onnx.py | 4 +- examples/community/lpw_stable_diffusion_xl.py | 2 +- .../multilingual_stable_diffusion.py | 2 +- .../pipeline_controlnet_xl_kolors.py | 2 +- .../pipeline_controlnet_xl_kolors_img2img.py | 2 +- .../pipeline_controlnet_xl_kolors_inpaint.py | 2 +- .../community/pipeline_demofusion_sdxl.py | 2 +- .../pipeline_faithdiff_stable_diffusion_xl.py | 2 +- .../pipeline_flux_differential_img2img.py | 4 +- .../pipeline_flux_kontext_multiple_images.py | 2 +- .../community/pipeline_flux_rf_inversion.py | 2 +- .../pipeline_flux_semantic_guidance.py | 2 +- examples/community/pipeline_flux_with_cfg.py | 2 +- .../pipeline_kolors_differential_img2img.py | 2 +- .../community/pipeline_kolors_inpainting.py | 2 +- examples/community/pipeline_prompt2prompt.py | 2 +- .../community/pipeline_sdxl_style_aligned.py | 2 +- ...stable_diffusion_3_differential_img2img.py | 2 +- ...ine_stable_diffusion_3_instruct_pix2pix.py | 2 +- ...ne_stable_diffusion_xl_attentive_eraser.py | 2 +- ..._stable_diffusion_xl_controlnet_adapter.py | 2 +- ...diffusion_xl_controlnet_adapter_inpaint.py | 2 +- ...table_diffusion_xl_differential_img2img.py | 2 +- .../pipeline_stable_diffusion_xl_ipex.py | 2 +- examples/community/pipeline_stg_cogvideox.py | 2 +- examples/community/pipeline_stg_ltx.py | 2 +- .../community/pipeline_stg_ltx_image2video.py | 2 +- examples/community/pipeline_stg_mochi.py | 2 +- examples/community/pipeline_zero1to3.py | 2 +- examples/community/rerender_a_video.py | 2 +- examples/community/run_onnx_controlnet.py | 2 +- examples/community/run_tensorrt_controlnet.py | 2 +- examples/community/sd_text2img_k_diffusion.py | 2 +- .../community/seed_resize_stable_diffusion.py | 2 +- .../community/stable_diffusion_comparison.py | 2 +- .../stable_diffusion_controlnet_img2img.py | 2 +- .../stable_diffusion_controlnet_inpaint.py | 2 +- ...le_diffusion_controlnet_inpaint_img2img.py | 2 +- .../stable_diffusion_controlnet_reference.py | 2 +- examples/community/stable_diffusion_ipex.py | 2 +- .../community/stable_diffusion_reference.py | 2 +- .../community/stable_diffusion_repaint.py | 2 +- .../stable_diffusion_xl_reference.py | 2 +- examples/community/text_inpainting.py | 2 +- examples/community/tiled_upscaling.py | 2 +- .../community/wildcard_stable_diffusion.py | 2 +- examples/dreambooth/README_qwen.md | 2 +- examples/dreambooth/train_dreambooth_flux.py | 8 + .../dreambooth/train_dreambooth_lora_flux.py | 9 + .../train_dreambooth_lora_flux_kontext.py | 8 + .../pipeline_pixart_alpha_controlnet.py | 2 +- .../research_projects/rdm/pipeline_rdm.py | 2 +- src/diffusers/models/attention_flax.py | 30 + .../models/controlnets/controlnet_flax.py | 15 +- src/diffusers/models/embeddings_flax.py | 15 + src/diffusers/models/modeling_flax_utils.py | 4 + src/diffusers/models/resnet_flax.py | 20 + .../models/transformers/transformer_flux.py | 17 +- .../transformers/transformer_skyreels_v2.py | 334 +++++-- .../models/unets/unet_2d_blocks_flax.py | 29 + .../models/unets/unet_2d_condition_flax.py | 10 +- src/diffusers/models/vae_flax.py | 54 +- .../modular_pipelines/modular_pipeline.py | 152 ++- .../stable_diffusion_xl/before_denoise.py | 2 +- .../pipelines/allegro/pipeline_allegro.py | 2 +- .../animatediff/pipeline_animatediff_sdxl.py | 2 +- .../pipelines/aura_flow/pipeline_aura_flow.py | 2 +- .../blip_diffusion/pipeline_blip_diffusion.py | 2 +- src/diffusers/pipelines/bria/pipeline_bria.py | 2 +- .../pipelines/chroma/pipeline_chroma.py | 2 +- .../chroma/pipeline_chroma_img2img.py | 2 +- .../pipelines/cogvideo/pipeline_cogvideox.py | 2 +- .../pipeline_cogvideox_fun_control.py | 2 +- .../pipeline_cogvideox_image2video.py | 2 +- .../pipeline_cogvideox_video2video.py | 2 +- .../cogview3/pipeline_cogview3plus.py | 2 +- .../pipelines/cogview4/pipeline_cogview4.py | 2 +- .../cogview4/pipeline_cogview4_control.py | 2 +- .../pipelines/consisid/pipeline_consisid.py | 2 +- .../pipeline_controlnet_blip_diffusion.py | 2 +- .../pipeline_controlnet_inpaint_sd_xl.py | 2 +- .../pipeline_controlnet_sd_xl_img2img.py | 2 +- ...pipeline_controlnet_union_inpaint_sd_xl.py | 2 +- ...pipeline_controlnet_union_sd_xl_img2img.py | 2 +- .../pipeline_stable_diffusion_3_controlnet.py | 2 +- ...table_diffusion_3_controlnet_inpainting.py | 2 +- .../pipeline_stable_diffusion_pix2pix_zero.py | 4 +- .../pipelines/flux/pipeline_flux_control.py | 2 +- .../flux/pipeline_flux_control_img2img.py | 2 +- .../flux/pipeline_flux_control_inpaint.py | 4 +- .../flux/pipeline_flux_controlnet.py | 2 +- .../pipelines/flux/pipeline_flux_fill.py | 4 +- .../pipelines/flux/pipeline_flux_img2img.py | 2 +- .../pipelines/flux/pipeline_flux_inpaint.py | 4 +- .../pipelines/flux/pipeline_flux_kontext.py | 2 +- .../flux/pipeline_flux_kontext_inpaint.py | 2 +- .../hidream_image/pipeline_hidream_image.py | 2 +- .../pipelines/kandinsky/pipeline_kandinsky.py | 2 +- .../kandinsky/pipeline_kandinsky_combined.py | 6 +- .../kandinsky/pipeline_kandinsky_inpaint.py | 2 +- .../kandinsky/pipeline_kandinsky_prior.py | 4 +- .../kandinsky2_2/pipeline_kandinsky2_2.py | 2 +- .../pipeline_kandinsky2_2_combined.py | 6 +- .../pipeline_kandinsky2_2_controlnet.py | 2 +- .../pipeline_kandinsky2_2_inpainting.py | 2 +- .../pipeline_kandinsky2_2_prior.py | 4 +- .../pipeline_kandinsky2_2_prior_emb2emb.py | 2 +- .../pipelines/kolors/pipeline_kolors.py | 2 +- .../kolors/pipeline_kolors_img2img.py | 2 +- .../pipelines/latte/pipeline_latte.py | 2 +- src/diffusers/pipelines/ltx/pipeline_ltx.py | 2 +- .../pipelines/ltx/pipeline_ltx_condition.py | 2 +- .../pipelines/ltx/pipeline_ltx_image2video.py | 2 +- .../pipelines/lumina/pipeline_lumina.py | 2 +- .../pipelines/lumina2/pipeline_lumina2.py | 2 +- .../pipelines/mochi/pipeline_mochi.py | 2 +- .../pipelines/omnigen/pipeline_omnigen.py | 2 +- .../pipeline_pag_controlnet_sd_xl_img2img.py | 2 +- .../pipelines/pag/pipeline_pag_kolors.py | 2 +- .../pag/pipeline_pag_pixart_sigma.py | 2 +- .../pipelines/pag/pipeline_pag_sana.py | 2 +- .../pipelines/pag/pipeline_pag_sd_3.py | 2 +- .../pag/pipeline_pag_sd_3_img2img.py | 2 +- .../pipelines/pag/pipeline_pag_sd_xl.py | 2 +- .../pag/pipeline_pag_sd_xl_img2img.py | 2 +- .../pag/pipeline_pag_sd_xl_inpaint.py | 2 +- .../pipelines/pipeline_flax_utils.py | 5 + src/diffusers/pipelines/pipeline_utils.py | 30 + .../pixart_alpha/pipeline_pixart_alpha.py | 2 +- .../pixart_alpha/pipeline_pixart_sigma.py | 2 +- .../pipelines/qwenimage/pipeline_qwenimage.py | 51 +- .../pipeline_qwenimage_controlnet.py | 46 +- .../qwenimage/pipeline_qwenimage_edit.py | 51 +- .../qwenimage/pipeline_qwenimage_img2img.py | 51 +- .../qwenimage/pipeline_qwenimage_inpaint.py | 53 +- src/diffusers/pipelines/sana/pipeline_sana.py | 2 +- .../sana/pipeline_sana_controlnet.py | 2 +- .../pipelines/sana/pipeline_sana_sprint.py | 2 +- .../sana/pipeline_sana_sprint_img2img.py | 2 +- .../stable_cascade/pipeline_stable_cascade.py | 2 +- .../pipeline_stable_cascade_combined.py | 2 +- .../pipeline_stable_cascade_prior.py | 2 +- .../pipeline_onnx_stable_diffusion.py | 2 +- .../pipeline_onnx_stable_diffusion_inpaint.py | 2 +- .../pipeline_onnx_stable_diffusion_upscale.py | 2 +- .../pipeline_stable_diffusion_3.py | 2 +- .../pipeline_stable_diffusion_3_img2img.py | 2 +- .../pipeline_stable_diffusion_3_inpaint.py | 4 +- .../pipeline_stable_diffusion_k_diffusion.py | 2 +- ...ipeline_stable_diffusion_xl_k_diffusion.py | 2 +- .../pipeline_stable_diffusion_xl.py | 2 +- .../pipeline_stable_diffusion_xl_img2img.py | 2 +- .../pipeline_stable_diffusion_xl_inpaint.py | 2 +- ...ne_stable_diffusion_xl_instruct_pix2pix.py | 2 +- .../pipeline_stable_diffusion_adapter.py | 2 +- .../pipeline_stable_diffusion_xl_adapter.py | 2 +- .../pipeline_text_to_video_zero_sdxl.py | 2 +- .../pipeline_visualcloze_combined.py | 2 +- .../pipeline_visualcloze_generation.py | 2 +- .../wuerstchen/pipeline_wuerstchen.py | 2 +- .../pipeline_wuerstchen_combined.py | 2 +- .../wuerstchen/pipeline_wuerstchen_prior.py | 2 +- .../schedulers/scheduling_utils_flax.py | 8 +- src/diffusers/utils/import_utils.py | 7 +- tests/hooks/test_hooks.py | 1 + .../autoencoders/test_models_vae_flax.py | 39 - tests/models/test_modeling_common_flax.py | 67 -- .../models/unets/test_models_unet_2d_flax.py | 105 -- .../controlnet/test_flax_controlnet.py | 128 --- .../qwenimage/test_qwenimage_controlnet.py | 339 +++++++ .../test_stable_diffusion_flax.py | 109 --- .../test_stable_diffusion_flax_inpaint.py | 83 -- tests/pipelines/test_pipelines_flax.py | 261 ----- tests/schedulers/test_scheduler_flax.py | 921 ------------------ 215 files changed, 1671 insertions(+), 3675 deletions(-) delete mode 100644 .github/workflows/pr_flax_dependency_test.yml delete mode 100644 docker/diffusers-flax-cpu/Dockerfile delete mode 100644 docker/diffusers-flax-tpu/Dockerfile delete mode 100644 docs/source/en/using-diffusers/stable_diffusion_jax_how_to.md delete mode 100644 tests/models/autoencoders/test_models_vae_flax.py delete mode 100644 tests/models/test_modeling_common_flax.py delete mode 100644 tests/models/unets/test_models_unet_2d_flax.py delete mode 100644 tests/pipelines/controlnet/test_flax_controlnet.py create mode 100644 tests/pipelines/qwenimage/test_qwenimage_controlnet.py delete mode 100644 tests/pipelines/stable_diffusion_2/test_stable_diffusion_flax.py delete mode 100644 tests/pipelines/stable_diffusion_2/test_stable_diffusion_flax_inpaint.py delete mode 100644 tests/pipelines/test_pipelines_flax.py delete mode 100644 tests/schedulers/test_scheduler_flax.py diff --git a/.github/workflows/pr_flax_dependency_test.yml b/.github/workflows/pr_flax_dependency_test.yml deleted file mode 100644 index e091b5f2d7b3..000000000000 --- a/.github/workflows/pr_flax_dependency_test.yml +++ /dev/null @@ -1,38 +0,0 @@ -name: Run Flax dependency tests - -on: - pull_request: - branches: - - main - paths: - - "src/diffusers/**.py" - push: - branches: - - main - -concurrency: - group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} - cancel-in-progress: true - -jobs: - check_flax_dependencies: - runs-on: ubuntu-22.04 - steps: - - uses: actions/checkout@v3 - - name: Set up Python - uses: actions/setup-python@v4 - with: - python-version: "3.8" - - name: Install dependencies - run: | - python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH" - python -m pip install --upgrade pip uv - python -m uv pip install -e . - python -m uv pip install "jax[cpu]>=0.2.16,!=0.3.2" - python -m uv pip install "flax>=0.4.1" - python -m uv pip install "jaxlib>=0.1.65" - python -m uv pip install pytest - - name: Check for soft dependencies - run: | - python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH" - pytest tests/others/test_dependencies.py diff --git a/README.md b/README.md index dac3b3598aaf..68202ba095ee 100644 --- a/README.md +++ b/README.md @@ -37,7 +37,7 @@ limitations under the License. ## Installation -We recommend installing 🤗 Diffusers in a virtual environment from PyPI or Conda. For more details about installing [PyTorch](https://pytorch.org/get-started/locally/) and [Flax](https://flax.readthedocs.io/en/latest/#installation), please refer to their official documentation. +We recommend installing 🤗 Diffusers in a virtual environment from PyPI or Conda. For more details about installing [PyTorch](https://pytorch.org/get-started/locally/), please refer to their official documentation. ### PyTorch @@ -53,14 +53,6 @@ With `conda` (maintained by the community): conda install -c conda-forge diffusers ``` -### Flax - -With `pip` (official package): - -```bash -pip install --upgrade diffusers[flax] -``` - ### Apple Silicon (M1/M2) support Please refer to the [How to use Stable Diffusion in Apple Silicon](https://huggingface.co/docs/diffusers/optimization/mps) guide. diff --git a/docker/diffusers-flax-cpu/Dockerfile b/docker/diffusers-flax-cpu/Dockerfile deleted file mode 100644 index 051008aa9a2e..000000000000 --- a/docker/diffusers-flax-cpu/Dockerfile +++ /dev/null @@ -1,49 +0,0 @@ -FROM ubuntu:20.04 -LABEL maintainer="Hugging Face" -LABEL repository="diffusers" - -ENV DEBIAN_FRONTEND=noninteractive - -RUN apt-get -y update \ - && apt-get install -y software-properties-common \ - && add-apt-repository ppa:deadsnakes/ppa - -RUN apt install -y bash \ - build-essential \ - git \ - git-lfs \ - curl \ - ca-certificates \ - libsndfile1-dev \ - libgl1 \ - python3.10 \ - python3-pip \ - python3.10-venv && \ - rm -rf /var/lib/apt/lists - -# make sure to use venv -RUN python3.10 -m venv /opt/venv -ENV PATH="/opt/venv/bin:$PATH" - -# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py) -# follow the instructions here: https://cloud.google.com/tpu/docs/run-in-container#train_a_jax_model_in_a_docker_container -RUN python3 -m pip install --no-cache-dir --upgrade pip uv==0.1.11 && \ - python3 -m uv pip install --upgrade --no-cache-dir \ - clu \ - "jax[cpu]>=0.2.16,!=0.3.2" \ - "flax>=0.4.1" \ - "jaxlib>=0.1.65" && \ - python3 -m uv pip install --no-cache-dir \ - accelerate \ - datasets \ - hf-doc-builder \ - huggingface-hub \ - Jinja2 \ - librosa \ - numpy==1.26.4 \ - scipy \ - tensorboard \ - transformers \ - hf_transfer - -CMD ["/bin/bash"] \ No newline at end of file diff --git a/docker/diffusers-flax-tpu/Dockerfile b/docker/diffusers-flax-tpu/Dockerfile deleted file mode 100644 index 405f068923b7..000000000000 --- a/docker/diffusers-flax-tpu/Dockerfile +++ /dev/null @@ -1,51 +0,0 @@ -FROM ubuntu:20.04 -LABEL maintainer="Hugging Face" -LABEL repository="diffusers" - -ENV DEBIAN_FRONTEND=noninteractive - -RUN apt-get -y update \ - && apt-get install -y software-properties-common \ - && add-apt-repository ppa:deadsnakes/ppa - -RUN apt install -y bash \ - build-essential \ - git \ - git-lfs \ - curl \ - ca-certificates \ - libsndfile1-dev \ - libgl1 \ - python3.10 \ - python3-pip \ - python3.10-venv && \ - rm -rf /var/lib/apt/lists - -# make sure to use venv -RUN python3.10 -m venv /opt/venv -ENV PATH="/opt/venv/bin:$PATH" - -# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py) -# follow the instructions here: https://cloud.google.com/tpu/docs/run-in-container#train_a_jax_model_in_a_docker_container -RUN python3 -m pip install --no-cache-dir --upgrade pip uv==0.1.11 && \ - python3 -m pip install --no-cache-dir \ - "jax[tpu]>=0.2.16,!=0.3.2" \ - -f https://storage.googleapis.com/jax-releases/libtpu_releases.html && \ - python3 -m uv pip install --upgrade --no-cache-dir \ - clu \ - "flax>=0.4.1" \ - "jaxlib>=0.1.65" && \ - python3 -m uv pip install --no-cache-dir \ - accelerate \ - datasets \ - hf-doc-builder \ - huggingface-hub \ - Jinja2 \ - librosa \ - numpy==1.26.4 \ - scipy \ - tensorboard \ - transformers \ - hf_transfer - -CMD ["/bin/bash"] \ No newline at end of file diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index 42558b636cd2..a0ddf8f25654 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -9,11 +9,11 @@ - local: stable_diffusion title: Basic performance -- title: DiffusionPipeline +- title: Pipelines isExpanded: false sections: - local: using-diffusers/loading - title: Load pipelines + title: DiffusionPipeline - local: tutorials/autopipeline title: AutoPipeline - local: using-diffusers/custom_pipeline_overview @@ -21,7 +21,7 @@ - local: using-diffusers/callback title: Pipeline callbacks - local: using-diffusers/reusing_seeds - title: Reproducible pipelines + title: Reproducibility - local: using-diffusers/schedulers title: Load schedulers and models - local: using-diffusers/scheduler_features @@ -62,8 +62,6 @@ title: Scheduler features - local: using-diffusers/callback title: Pipeline callbacks - - local: using-diffusers/reusing_seeds - title: Reproducible pipelines - local: using-diffusers/image_quality title: Controlling image quality @@ -77,7 +75,7 @@ - local: optimization/memory title: Reduce memory usage - local: optimization/speed-memory-optims - title: Compile and offloading quantized models + title: Compiling and offloading quantized models - title: Community optimizations sections: - local: optimization/pruna @@ -194,8 +192,6 @@ - title: Model accelerators and hardware isExpanded: false sections: - - local: using-diffusers/stable_diffusion_jax_how_to - title: JAX/Flax - local: optimization/onnx title: ONNX - local: optimization/open_vino diff --git a/docs/source/en/api/models/autoencoderkl.md b/docs/source/en/api/models/autoencoderkl.md index baeab4017be3..3d949e9bb06c 100644 --- a/docs/source/en/api/models/autoencoderkl.md +++ b/docs/source/en/api/models/autoencoderkl.md @@ -44,15 +44,3 @@ model = AutoencoderKL.from_single_file(url) ## DecoderOutput [[autodoc]] models.autoencoders.vae.DecoderOutput - -## FlaxAutoencoderKL - -[[autodoc]] FlaxAutoencoderKL - -## FlaxAutoencoderKLOutput - -[[autodoc]] models.vae_flax.FlaxAutoencoderKLOutput - -## FlaxDecoderOutput - -[[autodoc]] models.vae_flax.FlaxDecoderOutput diff --git a/docs/source/en/api/models/controlnet.md b/docs/source/en/api/models/controlnet.md index 7ce14f17d56a..f56b7383a0d7 100644 --- a/docs/source/en/api/models/controlnet.md +++ b/docs/source/en/api/models/controlnet.md @@ -40,11 +40,3 @@ pipe = StableDiffusionControlNetPipeline.from_single_file(url, controlnet=contro ## ControlNetOutput [[autodoc]] models.controlnets.controlnet.ControlNetOutput - -## FlaxControlNetModel - -[[autodoc]] FlaxControlNetModel - -## FlaxControlNetOutput - -[[autodoc]] models.controlnets.controlnet_flax.FlaxControlNetOutput diff --git a/docs/source/en/api/models/overview.md b/docs/source/en/api/models/overview.md index 1c6a2092e684..eb9722739f99 100644 --- a/docs/source/en/api/models/overview.md +++ b/docs/source/en/api/models/overview.md @@ -19,10 +19,6 @@ All models are built from the base [`ModelMixin`] class which is a [`torch.nn.Mo ## ModelMixin [[autodoc]] ModelMixin -## FlaxModelMixin - -[[autodoc]] FlaxModelMixin - ## PushToHubMixin [[autodoc]] utils.PushToHubMixin diff --git a/docs/source/en/api/models/unet2d-cond.md b/docs/source/en/api/models/unet2d-cond.md index 175fb1122019..99a7c41ab286 100644 --- a/docs/source/en/api/models/unet2d-cond.md +++ b/docs/source/en/api/models/unet2d-cond.md @@ -23,9 +23,3 @@ The abstract from the paper is: ## UNet2DConditionOutput [[autodoc]] models.unets.unet_2d_condition.UNet2DConditionOutput - -## FlaxUNet2DConditionModel -[[autodoc]] models.unets.unet_2d_condition_flax.FlaxUNet2DConditionModel - -## FlaxUNet2DConditionOutput -[[autodoc]] models.unets.unet_2d_condition_flax.FlaxUNet2DConditionOutput diff --git a/docs/source/en/api/outputs.md b/docs/source/en/api/outputs.md index bed92f10f94a..a13bc89f2bea 100644 --- a/docs/source/en/api/outputs.md +++ b/docs/source/en/api/outputs.md @@ -54,10 +54,6 @@ To check a specific pipeline or model output, refer to its corresponding API doc [[autodoc]] pipelines.ImagePipelineOutput -## FlaxImagePipelineOutput - -[[autodoc]] pipelines.pipeline_flax_utils.FlaxImagePipelineOutput - ## AudioPipelineOutput [[autodoc]] pipelines.AudioPipelineOutput diff --git a/docs/source/en/api/pipelines/controlnet.md b/docs/source/en/api/pipelines/controlnet.md index eea3473d3609..2a654a37357f 100644 --- a/docs/source/en/api/pipelines/controlnet.md +++ b/docs/source/en/api/pipelines/controlnet.md @@ -72,11 +72,3 @@ Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) ## StableDiffusionPipelineOutput [[autodoc]] pipelines.stable_diffusion.StableDiffusionPipelineOutput - -## FlaxStableDiffusionControlNetPipeline -[[autodoc]] FlaxStableDiffusionControlNetPipeline - - all - - __call__ - -## FlaxStableDiffusionControlNetPipelineOutput -[[autodoc]] pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput diff --git a/docs/source/en/api/pipelines/overview.md b/docs/source/en/api/pipelines/overview.md index b5e3825fef6d..d3cc318a5459 100644 --- a/docs/source/en/api/pipelines/overview.md +++ b/docs/source/en/api/pipelines/overview.md @@ -106,10 +106,6 @@ The table below lists all the pipelines currently available in 🤗 Diffusers an [[autodoc]] pipelines.StableDiffusionMixin.disable_freeu -## FlaxDiffusionPipeline - -[[autodoc]] pipelines.pipeline_flax_utils.FlaxDiffusionPipeline - ## PushToHubMixin [[autodoc]] utils.PushToHubMixin diff --git a/docs/source/en/api/pipelines/skyreels_v2.md b/docs/source/en/api/pipelines/skyreels_v2.md index cd94f2a75c08..6730f1551607 100644 --- a/docs/source/en/api/pipelines/skyreels_v2.md +++ b/docs/source/en/api/pipelines/skyreels_v2.md @@ -1,4 +1,4 @@ - -# Compile and offloading quantized models +# Compiling and offloading quantized models Optimizing models often involves trade-offs between [inference speed](./fp16) and [memory-usage](./memory). For instance, while [caching](./cache) can boost inference speed, it also increases memory consumption since it needs to store the outputs of intermediate attention layers. A more balanced optimization strategy combines quantizing a model, [torch.compile](./fp16#torchcompile) and various [offloading methods](./memory#offloading). @@ -28,7 +28,8 @@ The table below provides a comparison of optimization strategy combinations and | quantization | 32.602 | 14.9453 | | quantization, torch.compile | 25.847 | 14.9448 | | quantization, torch.compile, model CPU offloading | 32.312 | 12.2369 | -These results are benchmarked on Flux with a RTX 4090. The transformer and text_encoder components are quantized. Refer to the [benchmarking script](https://gist.github.com/sayakpaul/0db9d8eeeb3d2a0e5ed7cf0d9ca19b7d) if you're interested in evaluating your own model. + +These results are benchmarked on Flux with a RTX 4090. The transformer and text_encoder components are quantized. Refer to the benchmarking script if you're interested in evaluating your own model. This guide will show you how to compile and offload a quantized model with [bitsandbytes](../quantization/bitsandbytes#torchcompile). Make sure you are using [PyTorch nightly](https://pytorch.org/get-started/locally/) and the latest version of bitsandbytes. diff --git a/docs/source/en/training/controlnet.md b/docs/source/en/training/controlnet.md index 0170ff3da9ea..17da819db84b 100644 --- a/docs/source/en/training/controlnet.md +++ b/docs/source/en/training/controlnet.md @@ -14,7 +14,7 @@ specific language governing permissions and limitations under the License. [ControlNet](https://hf.co/papers/2302.05543) models are adapters trained on top of another pretrained model. It allows for a greater degree of control over image generation by conditioning the model with an additional input image. The input image can be a canny edge, depth map, human pose, and many more. -If you're training on a GPU with limited vRAM, you should try enabling the `gradient_checkpointing`, `gradient_accumulation_steps`, and `mixed_precision` parameters in the training command. You can also reduce your memory footprint by using memory-efficient attention with [xFormers](../optimization/xformers). JAX/Flax training is also supported for efficient training on TPUs and GPUs, but it doesn't support gradient checkpointing or xFormers. You should have a GPU with >30GB of memory if you want to train faster with Flax. +If you're training on a GPU with limited vRAM, you should try enabling the `gradient_checkpointing`, `gradient_accumulation_steps`, and `mixed_precision` parameters in the training command. You can also reduce your memory footprint by using memory-efficient attention with [xFormers](../optimization/xformers). This guide will explore the [train_controlnet.py](https://github.com/huggingface/diffusers/blob/main/examples/controlnet/train_controlnet.py) training script to help you become familiar with it, and how you can adapt it for your own use-case. @@ -28,45 +28,10 @@ pip install . Then navigate to the example folder containing the training script and install the required dependencies for the script you're using: - - ```bash cd examples/controlnet pip install -r requirements.txt ``` - - - -If you have access to a TPU, the Flax training script runs even faster! Let's run the training script on the [Google Cloud TPU VM](https://cloud.google.com/tpu/docs/run-calculation-jax). Create a single TPU v4-8 VM and connect to it: - -```bash -ZONE=us-central2-b -TPU_TYPE=v4-8 -VM_NAME=hg_flax - -gcloud alpha compute tpus tpu-vm create $VM_NAME \ - --zone $ZONE \ - --accelerator-type $TPU_TYPE \ - --version tpu-vm-v4-base - -gcloud alpha compute tpus tpu-vm ssh $VM_NAME --zone $ZONE -- \ -``` - -Install JAX 0.4.5: - -```bash -pip install "jax[tpu]==0.4.5" -f https://storage.googleapis.com/jax-releases/libtpu_releases.html -``` - -Then install the required dependencies for the Flax script: - -```bash -cd examples/controlnet -pip install -r requirements_flax.txt -``` - - - @@ -120,7 +85,7 @@ Many of the basic and important parameters are described in the [Text-to-image]( ### Min-SNR weighting -The [Min-SNR](https://huggingface.co/papers/2303.09556) weighting strategy can help with training by rebalancing the loss to achieve faster convergence. The training script supports predicting `epsilon` (noise) or `v_prediction`, but Min-SNR is compatible with both prediction types. This weighting strategy is only supported by PyTorch and is unavailable in the Flax training script. +The [Min-SNR](https://huggingface.co/papers/2303.09556) weighting strategy can help with training by rebalancing the loss to achieve faster convergence. The training script supports predicting `epsilon` (noise) or `v_prediction`, but Min-SNR is compatible with both prediction types. This weighting strategy is only supported by PyTorch. Add the `--snr_gamma` parameter and set it to the recommended value of 5.0: @@ -272,9 +237,6 @@ That's it! You don't need to add any additional parameters to your training comm - - - ```bash export MODEL_DIR="stable-diffusion-v1-5/stable-diffusion-v1-5" export OUTPUT_DIR="path/to/save/model" @@ -292,47 +254,6 @@ accelerate launch train_controlnet.py \ --push_to_hub ``` - - - -With Flax, you can [profile your code](https://jax.readthedocs.io/en/latest/profiling.html) by adding the `--profile_steps==5` parameter to your training command. Install the Tensorboard profile plugin: - -```bash -pip install tensorflow tensorboard-plugin-profile -tensorboard --logdir runs/fill-circle-100steps-20230411_165612/ -``` - -Then you can inspect the profile at [http://localhost:6006/#profile](http://localhost:6006/#profile). - - - -If you run into version conflicts with the plugin, try uninstalling and reinstalling all versions of TensorFlow and Tensorboard. The debugging functionality of the profile plugin is still experimental, and not all views are fully functional. The `trace_viewer` cuts off events after 1M, which can result in all your device traces getting lost if for example, you profile the compilation step by accident. - - - -```bash -python3 train_controlnet_flax.py \ - --pretrained_model_name_or_path=$MODEL_DIR \ - --output_dir=$OUTPUT_DIR \ - --dataset_name=fusing/fill50k \ - --resolution=512 \ - --learning_rate=1e-5 \ - --validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \ - --validation_prompt "red circle with blue background" "cyan circle with brown floral background" \ - --validation_steps=1000 \ - --train_batch_size=2 \ - --revision="non-ema" \ - --from_pt \ - --report_to="wandb" \ - --tracker_project_name=$HUB_MODEL_ID \ - --num_train_epochs=11 \ - --push_to_hub \ - --hub_model_id=$HUB_MODEL_ID -``` - - - - Once training is complete, you can use your newly trained model for inference! ```py diff --git a/docs/source/en/training/dreambooth.md b/docs/source/en/training/dreambooth.md index cff2bb500dab..3a5ba5aa39c3 100644 --- a/docs/source/en/training/dreambooth.md +++ b/docs/source/en/training/dreambooth.md @@ -14,7 +14,7 @@ specific language governing permissions and limitations under the License. [DreamBooth](https://huggingface.co/papers/2208.12242) is a training technique that updates the entire diffusion model by training on just a few images of a subject or style. It works by associating a special word in the prompt with the example images. -If you're training on a GPU with limited vRAM, you should try enabling the `gradient_checkpointing` and `mixed_precision` parameters in the training command. You can also reduce your memory footprint by using memory-efficient attention with [xFormers](../optimization/xformers). JAX/Flax training is also supported for efficient training on TPUs and GPUs, but it doesn't support gradient checkpointing or xFormers. You should have a GPU with >30GB of memory if you want to train faster with Flax. +If you're training on a GPU with limited vRAM, you should try enabling the `gradient_checkpointing` and `mixed_precision` parameters in the training command. You can also reduce your memory footprint by using memory-efficient attention with [xFormers](../optimization/xformers). This guide will explore the [train_dreambooth.py](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth.py) script to help you become more familiar with it, and how you can adapt it for your own use-case. @@ -28,25 +28,11 @@ pip install . Navigate to the example folder with the training script and install the required dependencies for the script you're using: - - - ```bash cd examples/dreambooth pip install -r requirements.txt ``` - - - -```bash -cd examples/dreambooth -pip install -r requirements_flax.txt -``` - - - - 🤗 Accelerate is a library for helping you train on multiple GPUs/TPUs or with mixed-precision. It'll automatically configure your training setup based on your hardware and environment. Take a look at the 🤗 Accelerate [Quick tour](https://huggingface.co/docs/accelerate/quicktour) to learn more. @@ -110,7 +96,7 @@ Some basic and important parameters to know and specify are: ### Min-SNR weighting -The [Min-SNR](https://huggingface.co/papers/2303.09556) weighting strategy can help with training by rebalancing the loss to achieve faster convergence. The training script supports predicting `epsilon` (noise) or `v_prediction`, but Min-SNR is compatible with both prediction types. This weighting strategy is only supported by PyTorch and is unavailable in the Flax training script. +The [Min-SNR](https://huggingface.co/papers/2303.09556) weighting strategy can help with training by rebalancing the loss to achieve faster convergence. The training script supports predicting `epsilon` (noise) or `v_prediction`, but Min-SNR is compatible with both prediction types. This weighting strategy is only supported by PyTorch. Add the `--snr_gamma` parameter and set it to the recommended value of 5.0: @@ -311,9 +297,6 @@ That's it! You don't need to add any additional parameters to your training comm - - - ```bash export MODEL_NAME="stable-diffusion-v1-5/stable-diffusion-v1-5" export INSTANCE_DIR="./dog" @@ -334,29 +317,6 @@ accelerate launch train_dreambooth.py \ --push_to_hub ``` - - - -```bash -export MODEL_NAME="duongna/stable-diffusion-v1-4-flax" -export INSTANCE_DIR="./dog" -export OUTPUT_DIR="path-to-save-model" - -python train_dreambooth_flax.py \ - --pretrained_model_name_or_path=$MODEL_NAME \ - --instance_data_dir=$INSTANCE_DIR \ - --output_dir=$OUTPUT_DIR \ - --instance_prompt="a photo of sks dog" \ - --resolution=512 \ - --train_batch_size=1 \ - --learning_rate=5e-6 \ - --max_train_steps=400 \ - --push_to_hub -``` - - - - Once training is complete, you can use your newly trained model for inference! @@ -383,9 +343,6 @@ image.save("dog-bucket.png") - - - ```py from diffusers import DiffusionPipeline import torch @@ -395,39 +352,6 @@ image = pipeline("A photo of sks dog in a bucket", num_inference_steps=50, guida image.save("dog-bucket.png") ``` - - - -```py -import jax -import numpy as np -from flax.jax_utils import replicate -from flax.training.common_utils import shard -from diffusers import FlaxStableDiffusionPipeline - -pipeline, params = FlaxStableDiffusionPipeline.from_pretrained("path-to-your-trained-model", dtype=jax.numpy.bfloat16) - -prompt = "A photo of sks dog in a bucket" -prng_seed = jax.random.PRNGKey(0) -num_inference_steps = 50 - -num_samples = jax.device_count() -prompt = num_samples * [prompt] -prompt_ids = pipeline.prepare_inputs(prompt) - -# shard inputs and rng -params = replicate(params) -prng_seed = jax.random.split(prng_seed, jax.device_count()) -prompt_ids = shard(prompt_ids) - -images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images -images = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:]))) -image.save("dog-bucket.png") -``` - - - - ## LoRA LoRA is a training technique for significantly reducing the number of trainable parameters. As a result, training is faster and it is easier to store the resulting weights because they are a lot smaller (~100MBs). Use the [train_dreambooth_lora.py](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth_lora.py) script to train with LoRA. diff --git a/docs/source/en/training/kandinsky.md b/docs/source/en/training/kandinsky.md index 77f7af03b801..561bc1c351b7 100644 --- a/docs/source/en/training/kandinsky.md +++ b/docs/source/en/training/kandinsky.md @@ -88,7 +88,7 @@ Most of the parameters are identical to the parameters in the [Text-to-image](te ### Min-SNR weighting -The [Min-SNR](https://huggingface.co/papers/2303.09556) weighting strategy can help with training by rebalancing the loss to achieve faster convergence. The training script supports predicting `epsilon` (noise) or `v_prediction`, but Min-SNR is compatible with both prediction types. This weighting strategy is only supported by PyTorch and is unavailable in the Flax training script. +The [Min-SNR](https://huggingface.co/papers/2303.09556) weighting strategy can help with training by rebalancing the loss to achieve faster convergence. The training script supports predicting `epsilon` (noise) or `v_prediction`, but Min-SNR is compatible with both prediction types. This weighting strategy is only supported by PyTorch. Add the `--snr_gamma` parameter and set it to the recommended value of 5.0: diff --git a/docs/source/en/training/lora.md b/docs/source/en/training/lora.md index 9a3512dd76df..e97d8acdac46 100644 --- a/docs/source/en/training/lora.md +++ b/docs/source/en/training/lora.md @@ -38,25 +38,11 @@ pip install . Navigate to the example folder with the training script and install the required dependencies for the script you're using: - - - ```bash cd examples/text_to_image pip install -r requirements.txt ``` - - - -```bash -cd examples/text_to_image -pip install -r requirements_flax.txt -``` - - - - 🤗 Accelerate is a library for helping you train on multiple GPUs/TPUs or with mixed-precision. It'll automatically configure your training setup based on your hardware and environment. Take a look at the 🤗 Accelerate [Quick tour](https://huggingface.co/docs/accelerate/quicktour) to learn more. diff --git a/docs/source/en/training/overview.md b/docs/source/en/training/overview.md index 032900d9ac20..55d6b1966137 100644 --- a/docs/source/en/training/overview.md +++ b/docs/source/en/training/overview.md @@ -23,18 +23,18 @@ Each training script is: Our current collection of training scripts include: -| Training | SDXL-support | LoRA-support | Flax-support | -|---|---|---|---| -| [unconditional image generation](https://github.com/huggingface/diffusers/tree/main/examples/unconditional_image_generation) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/training_example.ipynb) | | | | -| [text-to-image](https://github.com/huggingface/diffusers/tree/main/examples/text_to_image) | 👍 | 👍 | 👍 | -| [textual inversion](https://github.com/huggingface/diffusers/tree/main/examples/textual_inversion) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb) | | | 👍 | -| [DreamBooth](https://github.com/huggingface/diffusers/tree/main/examples/dreambooth) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_training.ipynb) | 👍 | 👍 | 👍 | -| [ControlNet](https://github.com/huggingface/diffusers/tree/main/examples/controlnet) | 👍 | | 👍 | -| [InstructPix2Pix](https://github.com/huggingface/diffusers/tree/main/examples/instruct_pix2pix) | 👍 | | | -| [Custom Diffusion](https://github.com/huggingface/diffusers/tree/main/examples/custom_diffusion) | | | | -| [T2I-Adapters](https://github.com/huggingface/diffusers/tree/main/examples/t2i_adapter) | 👍 | | | -| [Kandinsky 2.2](https://github.com/huggingface/diffusers/tree/main/examples/kandinsky2_2/text_to_image) | | 👍 | | -| [Wuerstchen](https://github.com/huggingface/diffusers/tree/main/examples/wuerstchen/text_to_image) | | 👍 | | +| Training | SDXL-support | LoRA-support | +|---|---|---| +| [unconditional image generation](https://github.com/huggingface/diffusers/tree/main/examples/unconditional_image_generation) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/training_example.ipynb) | | | +| [text-to-image](https://github.com/huggingface/diffusers/tree/main/examples/text_to_image) | 👍 | 👍 | +| [textual inversion](https://github.com/huggingface/diffusers/tree/main/examples/textual_inversion) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb) | | | +| [DreamBooth](https://github.com/huggingface/diffusers/tree/main/examples/dreambooth) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_training.ipynb) | 👍 | 👍 | +| [ControlNet](https://github.com/huggingface/diffusers/tree/main/examples/controlnet) | 👍 | | +| [InstructPix2Pix](https://github.com/huggingface/diffusers/tree/main/examples/instruct_pix2pix) | 👍 | | +| [Custom Diffusion](https://github.com/huggingface/diffusers/tree/main/examples/custom_diffusion) | | | +| [T2I-Adapters](https://github.com/huggingface/diffusers/tree/main/examples/t2i_adapter) | 👍 | | +| [Kandinsky 2.2](https://github.com/huggingface/diffusers/tree/main/examples/kandinsky2_2/text_to_image) | | 👍 | +| [Wuerstchen](https://github.com/huggingface/diffusers/tree/main/examples/wuerstchen/text_to_image) | | 👍 | These examples are **actively** maintained, so please feel free to open an issue if they aren't working as expected. If you feel like another training example should be included, you're more than welcome to start a [Feature Request](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=&template=feature_request.md&title=) to discuss your feature idea with us and whether it meets our criteria of being self-contained, easy-to-tweak, beginner-friendly, and single-purpose. @@ -48,7 +48,7 @@ cd diffusers pip install . ``` -Then navigate to the folder of the training script (for example, [DreamBooth](https://github.com/huggingface/diffusers/tree/main/examples/dreambooth)) and install the `requirements.txt` file. Some training scripts have a specific requirement file for SDXL, LoRA or Flax. If you're using one of these scripts, make sure you install its corresponding requirements file. +Then navigate to the folder of the training script (for example, [DreamBooth](https://github.com/huggingface/diffusers/tree/main/examples/dreambooth)) and install the `requirements.txt` file. Some training scripts have a specific requirement file for SDXL or LoRA. If you're using one of these scripts, make sure you install its corresponding requirements file. ```bash cd examples/dreambooth diff --git a/docs/source/en/training/sdxl.md b/docs/source/en/training/sdxl.md index da8b93b6d690..12051b7c2d11 100644 --- a/docs/source/en/training/sdxl.md +++ b/docs/source/en/training/sdxl.md @@ -96,7 +96,7 @@ Most of the parameters are identical to the parameters in the [Text-to-image](te ### Min-SNR weighting -The [Min-SNR](https://huggingface.co/papers/2303.09556) weighting strategy can help with training by rebalancing the loss to achieve faster convergence. The training script supports predicting either `epsilon` (noise) or `v_prediction`, but Min-SNR is compatible with both prediction types. This weighting strategy is only supported by PyTorch and is unavailable in the Flax training script. +The [Min-SNR](https://huggingface.co/papers/2303.09556) weighting strategy can help with training by rebalancing the loss to achieve faster convergence. The training script supports predicting either `epsilon` (noise) or `v_prediction`, but Min-SNR is compatible with both prediction types. This weighting strategy is only supported by PyTorch. Add the `--snr_gamma` parameter and set it to the recommended value of 5.0: diff --git a/docs/source/en/training/text2image.md b/docs/source/en/training/text2image.md index 182621e89bdf..5212fe8393bc 100644 --- a/docs/source/en/training/text2image.md +++ b/docs/source/en/training/text2image.md @@ -20,7 +20,7 @@ The text-to-image script is experimental, and it's easy to overfit and run into Text-to-image models like Stable Diffusion are conditioned to generate images given a text prompt. -Training a model can be taxing on your hardware, but if you enable `gradient_checkpointing` and `mixed_precision`, it is possible to train a model on a single 24GB GPU. If you're training with larger batch sizes or want to train faster, it's better to use GPUs with more than 30GB of memory. You can reduce your memory footprint by enabling memory-efficient attention with [xFormers](../optimization/xformers). JAX/Flax training is also supported for efficient training on TPUs and GPUs, but it doesn't support gradient checkpointing, gradient accumulation or xFormers. A GPU with at least 30GB of memory or a TPU v3 is recommended for training with Flax. +Training a model can be taxing on your hardware, but if you enable `gradient_checkpointing` and `mixed_precision`, it is possible to train a model on a single 24GB GPU. If you're training with larger batch sizes or want to train faster, it's better to use GPUs with more than 30GB of memory. You can reduce your memory footprint by enabling memory-efficient attention with [xFormers](../optimization/xformers). This guide will explore the [train_text_to_image.py](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image.py) training script to help you become familiar with it, and how you can adapt it for your own use-case. @@ -34,20 +34,10 @@ pip install . Then navigate to the example folder containing the training script and install the required dependencies for the script you're using: - - ```bash cd examples/text_to_image pip install -r requirements.txt ``` - - -```bash -cd examples/text_to_image -pip install -r requirements_flax.txt -``` - - @@ -106,7 +96,7 @@ Some basic and important parameters include: ### Min-SNR weighting -The [Min-SNR](https://huggingface.co/papers/2303.09556) weighting strategy can help with training by rebalancing the loss to achieve faster convergence. The training script supports predicting `epsilon` (noise) or `v_prediction`, but Min-SNR is compatible with both prediction types. This weighting strategy is only supported by PyTorch and is unavailable in the Flax training script. +The [Min-SNR](https://huggingface.co/papers/2303.09556) weighting strategy can help with training by rebalancing the loss to achieve faster convergence. The training script supports predicting `epsilon` (noise) or `v_prediction`, but Min-SNR is compatible with both prediction types. This weighting strategy is only supported by PyTorch. Add the `--snr_gamma` parameter and set it to the recommended value of 5.0: @@ -155,9 +145,6 @@ Lastly, the [training loop](https://github.com/huggingface/diffusers/blob/8959c5 Once you've made all your changes or you're okay with the default configuration, you're ready to launch the training script! 🚀 - - - Let's train on the [Naruto BLIP captions](https://huggingface.co/datasets/lambdalabs/naruto-blip-captions) dataset to generate your own Naruto characters. Set the environment variables `MODEL_NAME` and `dataset_name` to the model and the dataset (either from the Hub or a local path). If you're training on more than one GPU, add the `--multi_gpu` parameter to the `accelerate launch` command. @@ -187,43 +174,8 @@ accelerate launch --mixed_precision="fp16" train_text_to_image.py \ --push_to_hub ``` - - - -Training with Flax can be faster on TPUs and GPUs thanks to [@duongna211](https://github.com/duongna21). Flax is more efficient on a TPU, but GPU performance is also great. - -Set the environment variables `MODEL_NAME` and `dataset_name` to the model and the dataset (either from the Hub or a local path). - - - -To train on a local dataset, set the `TRAIN_DIR` and `OUTPUT_DIR` environment variables to the path of the dataset and where to save the model to. - - - -```bash -export MODEL_NAME="stable-diffusion-v1-5/stable-diffusion-v1-5" -export dataset_name="lambdalabs/naruto-blip-captions" - -python train_text_to_image_flax.py \ - --pretrained_model_name_or_path=$MODEL_NAME \ - --dataset_name=$dataset_name \ - --resolution=512 --center_crop --random_flip \ - --train_batch_size=1 \ - --max_train_steps=15000 \ - --learning_rate=1e-05 \ - --max_grad_norm=1 \ - --output_dir="sd-naruto-model" \ - --push_to_hub -``` - - - - Once training is complete, you can use your newly trained model for inference: - - - ```py from diffusers import StableDiffusionPipeline import torch @@ -234,39 +186,6 @@ image = pipeline(prompt="yoda").images[0] image.save("yoda-naruto.png") ``` - - - -```py -import jax -import numpy as np -from flax.jax_utils import replicate -from flax.training.common_utils import shard -from diffusers import FlaxStableDiffusionPipeline - -pipeline, params = FlaxStableDiffusionPipeline.from_pretrained("path/to/saved_model", dtype=jax.numpy.bfloat16) - -prompt = "yoda naruto" -prng_seed = jax.random.PRNGKey(0) -num_inference_steps = 50 - -num_samples = jax.device_count() -prompt = num_samples * [prompt] -prompt_ids = pipeline.prepare_inputs(prompt) - -# shard inputs and rng -params = replicate(params) -prng_seed = jax.random.split(prng_seed, jax.device_count()) -prompt_ids = shard(prompt_ids) - -images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images -images = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:]))) -image.save("yoda-naruto.png") -``` - - - - ## Next steps Congratulations on training your own text-to-image model! To learn more about how to use your new model, the following guides may be helpful: diff --git a/docs/source/en/training/text_inversion.md b/docs/source/en/training/text_inversion.md index b7083ae589ed..91af2f6afb81 100644 --- a/docs/source/en/training/text_inversion.md +++ b/docs/source/en/training/text_inversion.md @@ -14,7 +14,7 @@ specific language governing permissions and limitations under the License. [Textual Inversion](https://hf.co/papers/2208.01618) is a training technique for personalizing image generation models with just a few example images of what you want it to learn. This technique works by learning and updating the text embeddings (the new embeddings are tied to a special word you must use in the prompt) to match the example images you provide. -If you're training on a GPU with limited vRAM, you should try enabling the `gradient_checkpointing` and `mixed_precision` parameters in the training command. You can also reduce your memory footprint by using memory-efficient attention with [xFormers](../optimization/xformers). JAX/Flax training is also supported for efficient training on TPUs and GPUs, but it doesn't support gradient checkpointing or xFormers. With the same configuration and setup as PyTorch, the Flax training script should be at least ~70% faster! +If you're training on a GPU with limited vRAM, you should try enabling the `gradient_checkpointing` and `mixed_precision` parameters in the training command. You can also reduce your memory footprint by using memory-efficient attention with [xFormers](../optimization/xformers). This guide will explore the [textual_inversion.py](https://github.com/huggingface/diffusers/blob/main/examples/textual_inversion/textual_inversion.py) script to help you become more familiar with it, and how you can adapt it for your own use-case. @@ -28,25 +28,10 @@ pip install . Navigate to the example folder with the training script and install the required dependencies for the script you're using: - - - ```bash cd examples/textual_inversion pip install -r requirements.txt ``` - - - - -```bash -cd examples/textual_inversion -pip install -r requirements_flax.txt -``` - - - - 🤗 Accelerate is a library for helping you train on multiple GPUs/TPUs or with mixed-precision. It'll automatically configure your training setup based on your hardware and environment. Take a look at the 🤗 Accelerate [Quick tour](https://huggingface.co/docs/accelerate/quicktour) to learn more. @@ -189,9 +174,6 @@ One more thing before you launch the script. If you're interested in following a --validation_steps=100 ``` - - - ```bash export MODEL_NAME="stable-diffusion-v1-5/stable-diffusion-v1-5" export DATA_DIR="./cat" @@ -214,36 +196,8 @@ accelerate launch textual_inversion.py \ --push_to_hub ``` - - - -```bash -export MODEL_NAME="duongna/stable-diffusion-v1-4-flax" -export DATA_DIR="./cat" - -python textual_inversion_flax.py \ - --pretrained_model_name_or_path=$MODEL_NAME \ - --train_data_dir=$DATA_DIR \ - --learnable_property="object" \ - --placeholder_token="" \ - --initializer_token="toy" \ - --resolution=512 \ - --train_batch_size=1 \ - --max_train_steps=3000 \ - --learning_rate=5.0e-04 \ - --scale_lr \ - --output_dir="textual_inversion_cat" \ - --push_to_hub -``` - - - - After training is complete, you can use your newly trained model for inference like: - - - ```py from diffusers import StableDiffusionPipeline import torch @@ -254,42 +208,6 @@ image = pipeline("A train", num_inference_steps=50).images[0] image.save("cat-train.png") ``` - - - -Flax doesn't support the [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] method, but the textual_inversion_flax.py script [saves](https://github.com/huggingface/diffusers/blob/c0f058265161178f2a88849e92b37ffdc81f1dcc/examples/textual_inversion/textual_inversion_flax.py#L636C2-L636C2) the learned embeddings as a part of the model after training. This means you can use the model for inference like any other Flax model: - -```py -import jax -import numpy as np -from flax.jax_utils import replicate -from flax.training.common_utils import shard -from diffusers import FlaxStableDiffusionPipeline - -model_path = "path-to-your-trained-model" -pipeline, params = FlaxStableDiffusionPipeline.from_pretrained(model_path, dtype=jax.numpy.bfloat16) - -prompt = "A train" -prng_seed = jax.random.PRNGKey(0) -num_inference_steps = 50 - -num_samples = jax.device_count() -prompt = num_samples * [prompt] -prompt_ids = pipeline.prepare_inputs(prompt) - -# shard inputs and rng -params = replicate(params) -prng_seed = jax.random.split(prng_seed, jax.device_count()) -prompt_ids = shard(prompt_ids) - -images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images -images = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:]))) -image.save("cat-train.png") -``` - - - - ## Next steps Congratulations on training your own Textual Inversion model! 🎉 To learn more about how to use your new model, the following guides may be helpful: diff --git a/docs/source/en/tutorials/using_peft_for_inference.md b/docs/source/en/tutorials/using_peft_for_inference.md index 5cd47f8674e1..7bdd2a1ee969 100644 --- a/docs/source/en/tutorials/using_peft_for_inference.md +++ b/docs/source/en/tutorials/using_peft_for_inference.md @@ -94,7 +94,7 @@ pipeline = AutoPipelineForText2Image.from_pretrained( pipeline.unet.load_lora_adapter( "jbilcke-hf/sdxl-cinematic-1", weight_name="pytorch_lora_weights.safetensors", - adapter_name="cinematic" + adapter_name="cinematic", prefix="unet" ) # use cnmt in the prompt to trigger the LoRA @@ -688,4 +688,4 @@ Browse the [LoRA Studio](https://lorastudio.co/models) for different LoRAs to us You can find additional LoRAs in the [FLUX LoRA the Explorer](https://huggingface.co/spaces/multimodalart/flux-lora-the-explorer) and [LoRA the Explorer](https://huggingface.co/spaces/multimodalart/LoraTheExplorer) Spaces. -Check out the [Fast LoRA inference for Flux with Diffusers and PEFT](https://huggingface.co/blog/lora-fast) blog post to learn how to optimize LoRA inference with methods like FlashAttention-3 and fp8 quantization. \ No newline at end of file +Check out the [Fast LoRA inference for Flux with Diffusers and PEFT](https://huggingface.co/blog/lora-fast) blog post to learn how to optimize LoRA inference with methods like FlashAttention-3 and fp8 quantization. diff --git a/docs/source/en/using-diffusers/loading.md b/docs/source/en/using-diffusers/loading.md index 20f0cc51e0af..f86ea104cf69 100644 --- a/docs/source/en/using-diffusers/loading.md +++ b/docs/source/en/using-diffusers/loading.md @@ -10,598 +10,267 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express o specific language governing permissions and limitations under the License. --> -# Load pipelines - [[open-in-colab]] -Diffusion systems consist of multiple components like parameterized models and schedulers that interact in complex ways. That is why we designed the [`DiffusionPipeline`] to wrap the complexity of the entire diffusion system into an easy-to-use API. At the same time, the [`DiffusionPipeline`] is entirely customizable so you can modify each component to build a diffusion system for your use case. - -This guide will show you how to load: - -- pipelines from the Hub and locally -- different components into a pipeline -- multiple pipelines without increasing memory usage -- checkpoint variants such as different floating point types or non-exponential mean averaged (EMA) weights - -## Load a pipeline - -> [!TIP] -> Skip to the [DiffusionPipeline explained](#diffusionpipeline-explained) section if you're interested in an explanation about how the [`DiffusionPipeline`] class works. +# DiffusionPipeline -There are two ways to load a pipeline for a task: +Diffusion models consists of multiple components like UNets or diffusion transformers (DiTs), text encoders, variational autoencoders (VAEs), and schedulers. The [`DiffusionPipeline`] wraps all of these components into a single easy-to-use API without giving up the flexibility to modify it's components. -1. Load the generic [`DiffusionPipeline`] class and allow it to automatically detect the correct pipeline class from the checkpoint. -2. Load a specific pipeline class for a specific task. +This guide will show you how to load a [`DiffusionPipeline`]. - - +## Loading a pipeline -The [`DiffusionPipeline`] class is a simple and generic way to load the latest trending diffusion model from the [Hub](https://huggingface.co/models?library=diffusers&sort=trending). It uses the [`~DiffusionPipeline.from_pretrained`] method to automatically detect the correct pipeline class for a task from the checkpoint, downloads and caches all the required configuration and weight files, and returns a pipeline ready for inference. +[`DiffusionPipeline`] is a base pipeline class that automatically selects and returns an instance of a model's pipeline subclass, like [`QwenImagePipeline`], by scanning the `model_index.json` file for the class name. -```python -from diffusers import DiffusionPipeline - -pipeline = DiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", use_safetensors=True) -``` - -This same checkpoint can also be used for an image-to-image task. The [`DiffusionPipeline`] class can handle any task as long as you provide the appropriate inputs. For example, for an image-to-image task, you need to pass an initial image to the pipeline. +Pass a model id to [`~DiffusionPipeline.from_pretrained`] to load a pipeline. ```py +import torch from diffusers import DiffusionPipeline -pipeline = DiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", use_safetensors=True) - -init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-init.png") -prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k" -image = pipeline("Astronaut in a jungle, cold color palette, muted colors, detailed, 8k", image=init_image).images[0] +pipeline = DiffusionPipeline.from_pretrained( + "Qwen/Qwen-Image", torch_dtype=torch.bfloat16, device_map="cuda" +) ``` - - - -Checkpoints can be loaded by their specific pipeline class if you already know it. For example, to load a Stable Diffusion model, use the [`StableDiffusionPipeline`] class. +Every model has a specific pipeline subclass that inherits from [`DiffusionPipeline`]. A subclass usually has a narrow focus and are task-specific. See the table below for an example. -```python -from diffusers import StableDiffusionPipeline - -pipeline = StableDiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", use_safetensors=True) -``` +| pipeline subclass | task | +|---|---| +| [`QwenImagePipeline`] | text-to-image | +| [`QwenImageImg2ImgPipeline`] | image-to-image | +| [`QwenImageInpaintPipeline`] | inpaint | -This same checkpoint may also be used for another task like image-to-image. To differentiate what task you want to use the checkpoint for, you have to use the corresponding task-specific pipeline class. For example, to use the same checkpoint for image-to-image, use the [`StableDiffusionImg2ImgPipeline`] class. +You could use the subclass directly by passing a model id to [`~QwenImagePipeline.from_pretrained`]. ```py -from diffusers import StableDiffusionImg2ImgPipeline - -pipeline = StableDiffusionImg2ImgPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", use_safetensors=True) -``` - - - - -Use the Space below to gauge a pipeline's memory requirements before you download and load it to see if it runs on your hardware. - -
- -
- - -### Specifying Component-Specific Data Types - -You can customize the data types for individual sub-models by passing a dictionary to the `torch_dtype` parameter. This allows you to load different components of a pipeline in different floating point precisions. For instance, if you want to load the transformer with `torch.bfloat16` and all other components with `torch.float16`, you can pass a dictionary mapping: - -```python -from diffusers import HunyuanVideoPipeline import torch +from diffusers import QwenImagePipeline -pipe = HunyuanVideoPipeline.from_pretrained( - "hunyuanvideo-community/HunyuanVideo", - torch_dtype={"transformer": torch.bfloat16, "default": torch.float16}, +pipeline = QwenImagePipeline.from_pretrained( + "Qwen/Qwen-Image", torch_dtype=torch.bfloat16, device_map="cuda" ) -print(pipe.transformer.dtype, pipe.vae.dtype) # (torch.bfloat16, torch.float16) ``` -If a component is not explicitly specified in the dictionary and no `default` is provided, it will be loaded with `torch.float32`. +### Local pipelines -### Parallel loading +Pipelines can also be run locally. Use [`~huggingface_hub.snapshot_download`] to download a model repository. -Large models are often [sharded](../training/distributed_inference#model-sharding) into smaller files so that they are easier to load. Diffusers supports loading shards in parallel to speed up the loading process. - -Set the environment variables below to enable parallel loading. +```py +from huggingface_hub import snapshot_download -- Set `HF_ENABLE_PARALLEL_LOADING` to `"YES"` to enable parallel loading of shards. -- Set `HF_PARALLEL_LOADING_WORKERS` to configure the number of parallel threads to use when loading shards. More workers loads a model faster but uses more memory. +snapshot_download(repo_id="Qwen/Qwen-Image") +``` -The `device_map` argument should be set to `"cuda"` to pre-allocate a large chunk of memory based on the model size. This substantially reduces model load time because warming up the memory allocator now avoids many smaller calls to the allocator later. +The model is downloaded to your [cache](../installation#cache). Pass the folder path to [`~QwenImagePipeline.from_pretrained`] to load it. ```py -import os import torch -from diffusers import DiffusionPipeline +from diffusers import QwenImagePipeline -os.environ["HF_ENABLE_PARALLEL_LOADING"] = "YES" -pipeline = DiffusionPipeline.from_pretrained( - "Wan-AI/Wan2.2-I2V-A14B-Diffusers", - torch_dtype=torch.bfloat16, - device_map="cuda" +pipeline = QwenImagePipeline.from_pretrained( + "path/to/your/cache", torch_dtype=torch.bfloat16, device_map="cuda" ) ``` -### Local pipeline - -To load a pipeline locally, use [git-lfs](https://git-lfs.github.com/) to manually download a checkpoint to your local disk. - -```bash -git-lfs install -git clone https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5 -``` - -This creates a local folder, ./stable-diffusion-v1-5, on your disk and you should pass its path to [`~DiffusionPipeline.from_pretrained`]. - -```python -from diffusers import DiffusionPipeline - -stable_diffusion = DiffusionPipeline.from_pretrained("./stable-diffusion-v1-5", use_safetensors=True) -``` - -The [`~DiffusionPipeline.from_pretrained`] method won't download files from the Hub when it detects a local path, but this also means it won't download and cache the latest changes to a checkpoint. - -## Customize a pipeline +The [`~QwenImagePipeline.from_pretrained`] method won't download files from the Hub when it detects a local path. But this also means it won't download and cache any updates that have been made to the model either. -You can customize a pipeline by loading different components into it. This is important because you can: +## Pipeline data types -- change to a scheduler with faster generation speed or higher generation quality depending on your needs (call the `scheduler.compatibles` method on your pipeline to see compatible schedulers) -- change a default pipeline component to a newer and better performing one +Use the `torch_dtype` argument in [`~DiffusionPipeline.from_pretrained`] to load a model with a specific data type. This allows you to load different models in different precisions. For example, loading a large transformer model in half-precision reduces the memory required. -For example, let's customize the default [stabilityai/stable-diffusion-xl-base-1.0](https://hf.co/stabilityai/stable-diffusion-xl-base-1.0) checkpoint with: - -- The [`HeunDiscreteScheduler`] to generate higher quality images at the expense of slower generation speed. You must pass the `subfolder="scheduler"` parameter in [`~HeunDiscreteScheduler.from_pretrained`] to load the scheduler configuration into the correct [subfolder](https://hf.co/stabilityai/stable-diffusion-xl-base-1.0/tree/main/scheduler) of the pipeline repository. -- A more stable VAE that runs in fp16. +Pass the data type for each model as a dictionary to `torch_dtype`. Use the `default` key to set the default data type. If a model isn't in the dictionary and `default` isn't provided, it is loaded in full precision (`torch.float32`). ```py -from diffusers import StableDiffusionXLPipeline, HeunDiscreteScheduler, AutoencoderKL import torch +from diffusers import QwenImagePipeline -scheduler = HeunDiscreteScheduler.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", subfolder="scheduler") -vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16, use_safetensors=True) +pipeline = QwenImagePipeline.from_pretrained( + "Qwen/Qwen-Image", + torch_dtype={"transformer": torch.bfloat16, "default": torch.float16}, +) +print(pipeline.transformer.dtype, pipeline.vae.dtype) ``` -Now pass the new scheduler and VAE to the [`StableDiffusionXLPipeline`]. +You don't need to use a dictionary if you're loading all the models in the same data type. ```py -pipeline = StableDiffusionXLPipeline.from_pretrained( - "stabilityai/stable-diffusion-xl-base-1.0", - scheduler=scheduler, - vae=vae, - torch_dtype=torch.float16, - variant="fp16", - use_safetensors=True -).to("cuda") -``` - -## Reuse a pipeline - -When you load multiple pipelines that share the same model components, it makes sense to reuse the shared components instead of reloading everything into memory again, especially if your hardware is memory-constrained. For example: - -1. You generated an image with the [`StableDiffusionPipeline`] but you want to improve its quality with the [`StableDiffusionSAGPipeline`]. Both of these pipelines share the same pretrained model, so it'd be a waste of memory to load the same model twice. -2. You want to add a model component, like a [`MotionAdapter`](../api/pipelines/animatediff#animatediffpipeline), to [`AnimateDiffPipeline`] which was instantiated from an existing [`StableDiffusionPipeline`]. Again, both pipelines share the same pretrained model, so it'd be a waste of memory to load an entirely new pipeline again. - -With the [`DiffusionPipeline.from_pipe`] API, you can switch between multiple pipelines to take advantage of their different features without increasing memory-usage. It is similar to turning on and off a feature in your pipeline. - -> [!TIP] -> To switch between tasks (rather than features), use the [`~DiffusionPipeline.from_pipe`] method with the [AutoPipeline](../api/pipelines/auto_pipeline) class, which automatically identifies the pipeline class based on the task (learn more in the [AutoPipeline](../tutorials/autopipeline) tutorial). - -Let's start with a [`StableDiffusionPipeline`] and then reuse the loaded model components to create a [`StableDiffusionSAGPipeline`] to increase generation quality. You'll use the [`StableDiffusionPipeline`] with an [IP-Adapter](./ip_adapter) to generate a bear eating pizza. - -```python -from diffusers import DiffusionPipeline, StableDiffusionSAGPipeline import torch -import gc -from diffusers.utils import load_image -from accelerate.utils import compute_module_sizes - -image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/load_neg_embed.png") - -pipe_sd = DiffusionPipeline.from_pretrained("SG161222/Realistic_Vision_V6.0_B1_noVAE", torch_dtype=torch.float16) -pipe_sd.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin") -pipe_sd.set_ip_adapter_scale(0.6) -pipe_sd.to("cuda") - -generator = torch.Generator(device="cpu").manual_seed(33) -out_sd = pipe_sd( - prompt="bear eats pizza", - negative_prompt="wrong white balance, dark, sketches,worst quality,low quality", - ip_adapter_image=image, - num_inference_steps=50, - generator=generator, -).images[0] -out_sd -``` - -
- -
- -For reference, you can check how much memory this process consumed. +from diffusers import QwenImagePipeline -```python -def bytes_to_giga_bytes(bytes): - return bytes / 1024 / 1024 / 1024 -print(f"Max memory allocated: {bytes_to_giga_bytes(torch.cuda.max_memory_allocated())} GB") -"Max memory allocated: 4.406213283538818 GB" +pipeline = QwenImagePipeline.from_pretrained( + "Qwen/Qwen-Image", torch_dtype=torch.bfloat16 +) +print(pipeline.transformer.dtype, pipeline.vae.dtype) ``` -Now, reuse the same pipeline components from [`StableDiffusionPipeline`] in [`StableDiffusionSAGPipeline`] with the [`~DiffusionPipeline.from_pipe`] method. +## Device placement -> [!WARNING] -> Some pipeline methods may not function properly on new pipelines created with [`~DiffusionPipeline.from_pipe`]. For instance, the [`~DiffusionPipeline.enable_model_cpu_offload`] method installs hooks on the model components based on a unique offloading sequence for each pipeline. If the models are executed in a different order in the new pipeline, the CPU offloading may not work correctly. -> -> To ensure everything works as expected, we recommend re-applying a pipeline method on a new pipeline created with [`~DiffusionPipeline.from_pipe`]. +The `device_map` argument determines individual model or pipeline placement on an accelerator like a GPU. It is especially helpful when there are multiple GPUs. -```python -pipe_sag = StableDiffusionSAGPipeline.from_pipe( - pipe_sd -) +Diffusers currently provides three options to `device_map`, `"cuda"`, `"balanced"` and `"auto"`. Refer to the table below to compare the three placement strategies. -generator = torch.Generator(device="cpu").manual_seed(33) -out_sag = pipe_sag( - prompt="bear eats pizza", - negative_prompt="wrong white balance, dark, sketches,worst quality,low quality", - ip_adapter_image=image, - num_inference_steps=50, - generator=generator, - guidance_scale=1.0, - sag_scale=0.75 -).images[0] -out_sag -``` +| parameter | description | +|---|---| +| `"cuda"` | places model or pipeline on CUDA device | +| `"balanced"` | evenly distributes model or pipeline on all GPUs | +| `"auto"` | distribute model from fastest device first to slowest | -
- -
+Use the `max_memory` argument in [`~DiffusionPipeline.from_pretrained`] to allocate a maximum amount of memory to use on each device. By default, Diffusers uses the maximum amount available. -If you check the memory usage, you'll see it remains the same as before because [`StableDiffusionPipeline`] and [`StableDiffusionSAGPipeline`] are sharing the same pipeline components. This allows you to use them interchangeably without any additional memory overhead. + + ```py -print(f"Max memory allocated: {bytes_to_giga_bytes(torch.cuda.max_memory_allocated())} GB") -"Max memory allocated: 4.406213283538818 GB" +import torch +from diffusers import DiffusionPipeline + +pipeline = DiffusionPipeline.from_pretrained( + "Qwen/Qwen-Image", + torch_dtype=torch.bfloat16, + device_map="cuda", +) ``` -Let's animate the image with the [`AnimateDiffPipeline`] and also add a [`MotionAdapter`] module to the pipeline. For the [`AnimateDiffPipeline`], you need to unload the IP-Adapter first and reload it *after* you've created your new pipeline (this only applies to the [`AnimateDiffPipeline`]). + + ```py -from diffusers import AnimateDiffPipeline, MotionAdapter, DDIMScheduler -from diffusers.utils import export_to_gif - -pipe_sag.unload_ip_adapter() -adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2", torch_dtype=torch.float16) - -pipe_animate = AnimateDiffPipeline.from_pipe(pipe_sd, motion_adapter=adapter) -pipe_animate.scheduler = DDIMScheduler.from_config(pipe_animate.scheduler.config, beta_schedule="linear") -# load IP-Adapter and LoRA weights again -pipe_animate.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin") -pipe_animate.load_lora_weights("guoyww/animatediff-motion-lora-zoom-out", adapter_name="zoom-out") -pipe_animate.to("cuda") - -generator = torch.Generator(device="cpu").manual_seed(33) -pipe_animate.set_adapters("zoom-out", adapter_weights=0.75) -out = pipe_animate( - prompt="bear eats pizza", - num_frames=16, - num_inference_steps=50, - ip_adapter_image=image, - generator=generator, -).frames[0] -export_to_gif(out, "out_animate.gif") +import torch +from diffusers import AutoModel + +max_memory = {0: "16GB", 1: "16GB"} +transformer = AutoModel.from_pretrained( + "Qwen/Qwen-Image", + subfolder="transformer", + torch_dtype=torch.bfloat16 + device_map="cuda", + max_memory=max_memory +) ``` -
- -
+
+
-The [`AnimateDiffPipeline`] is more memory-intensive and consumes 15GB of memory (see the [Memory-usage of from_pipe](#memory-usage-of-from_pipe) section to learn what this means for your memory-usage). +The `hf_device_map` attribute allows you to access and view the `device_map`. ```py -print(f"Max memory allocated: {bytes_to_giga_bytes(torch.cuda.max_memory_allocated())} GB") -"Max memory allocated: 15.178664207458496 GB" +print(pipeline.hf_device_map) +# {'unet': 1, 'vae': 1, 'safety_checker': 0, 'text_encoder': 0} ``` -### Modify from_pipe components - -Pipelines loaded with [`~DiffusionPipeline.from_pipe`] can be customized with different model components or methods. However, whenever you modify the *state* of the model components, it affects all the other pipelines that share the same components. For example, if you call [`~diffusers.loaders.IPAdapterMixin.unload_ip_adapter`] on the [`StableDiffusionSAGPipeline`], you won't be able to use IP-Adapter with the [`StableDiffusionPipeline`] because it's been removed from their shared components. +Reset a pipeline's `device_map` with the [`~DiffusionPipeline.reset_device_map`] method. This is necessary if you want to use methods such as `.to()`, [`~DiffusionPipeline.enable_sequential_cpu_offload`], and [`~DiffusionPipeline.enable_model_cpu_offload`]. ```py -pipe.sag_unload_ip_adapter() - -generator = torch.Generator(device="cpu").manual_seed(33) -out_sd = pipe_sd( - prompt="bear eats pizza", - negative_prompt="wrong white balance, dark, sketches,worst quality,low quality", - ip_adapter_image=image, - num_inference_steps=50, - generator=generator, -).images[0] -"AttributeError: 'NoneType' object has no attribute 'image_projection_layers'" +pipeline.reset_device_map() ``` -### Memory usage of from_pipe +## Parallel loading -The memory requirement of loading multiple pipelines with [`~DiffusionPipeline.from_pipe`] is determined by the pipeline with the highest memory-usage regardless of the number of pipelines you create. - -| Pipeline | Memory usage (GB) | -|---|---| -| StableDiffusionPipeline | 4.400 | -| StableDiffusionSAGPipeline | 4.400 | -| AnimateDiffPipeline | 15.178 | - -The [`AnimateDiffPipeline`] has the highest memory requirement, so the *total memory-usage* is based only on the [`AnimateDiffPipeline`]. Your memory-usage will not increase if you create additional pipelines as long as their memory requirements doesn't exceed that of the [`AnimateDiffPipeline`]. Each pipeline can be used interchangeably without any additional memory overhead. +Large models are often [sharded](../training/distributed_inference#model-sharding) into smaller files so that they are easier to load. Diffusers supports loading shards in parallel to speed up the loading process. -## Safety checker +Set `HF_ENABLE_PARALLEL_LOADING` to `"YES"` to enable parallel loading of shards. -Diffusers implements a [safety checker](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/safety_checker.py) for Stable Diffusion models which can generate harmful content. The safety checker screens the generated output against known hardcoded not-safe-for-work (NSFW) content. If for whatever reason you'd like to disable the safety checker, pass `safety_checker=None` to the [`~DiffusionPipeline.from_pretrained`] method. +The `device_map` argument should be set to `"cuda"` to pre-allocate a large chunk of memory based on the model size. This substantially reduces model load time because warming up the memory allocator now avoids many smaller calls to the allocator later. -```python +```py +import os +import torch from diffusers import DiffusionPipeline -pipeline = DiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None, use_safetensors=True) -""" -You have disabled the safety checker for by passing `safety_checker=None`. Ensure that you abide by the conditions of the Stable Diffusion license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend keeping the safety filter enabled in all public-facing circumstances, disabling it only for use cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 . -""" -``` - -## Checkpoint variants - -A checkpoint variant is usually a checkpoint whose weights are: - -- Stored in a different floating point type, such as [torch.float16](https://pytorch.org/docs/stable/tensors.html#data-types), because it only requires half the bandwidth and storage to download. You can't use this variant if you're continuing training or using a CPU. -- Non-exponential mean averaged (EMA) weights which shouldn't be used for inference. You should use this variant to continue finetuning a model. - -> [!TIP] -> When the checkpoints have identical model structures, but they were trained on different datasets and with a different training setup, they should be stored in separate repositories. For example, [stabilityai/stable-diffusion-2](https://hf.co/stabilityai/stable-diffusion-2) and [stabilityai/stable-diffusion-2-1](https://hf.co/stabilityai/stable-diffusion-2-1) are stored in separate repositories. - -Otherwise, a variant is **identical** to the original checkpoint. They have exactly the same serialization format (like [safetensors](./using_safetensors)), model structure, and their weights have identical tensor shapes. - -| **checkpoint type** | **weight name** | **argument for loading weights** | -|---------------------|---------------------------------------------|----------------------------------| -| original | diffusion_pytorch_model.safetensors | | -| floating point | diffusion_pytorch_model.fp16.safetensors | `variant`, `torch_dtype` | -| non-EMA | diffusion_pytorch_model.non_ema.safetensors | `variant` | - -There are two important arguments for loading variants: +os.environ["HF_ENABLE_PARALLEL_LOADING"] = "YES" -- `torch_dtype` specifies the floating point precision of the loaded checkpoint. For example, if you want to save bandwidth by loading a fp16 variant, you should set `variant="fp16"` and `torch_dtype=torch.float16` to *convert the weights* to fp16. Otherwise, the fp16 weights are converted to the default fp32 precision. +pipeline = DiffusionPipeline.from_pretrained( + "Wan-AI/Wan2.2-I2V-A14B-Diffusers", torch_dtype=torch.bfloat16, device_map="cuda" +) +``` - If you only set `torch_dtype=torch.float16`, the default fp32 weights are downloaded first and then converted to fp16. +## Replacing models in a pipeline -- `variant` specifies which files should be loaded from the repository. For example, if you want to load a non-EMA variant of a UNet from [stable-diffusion-v1-5/stable-diffusion-v1-5](https://hf.co/stable-diffusion-v1-5/stable-diffusion-v1-5/tree/main/unet), set `variant="non_ema"` to download the `non_ema` file. +[`DiffusionPipeline`] is flexible and accommodates loading different models or schedulers. You can experiment with different schedulers to optimize for generation speed or quality, and you can replace models with more performant ones. - - +The example below swaps the default scheduler to generate higher quality images and a more stable VAE version. Pass the `subfolder` argument in [`~HeunDiscreteScheduler.from_pretrained`] to load the scheduler to the correct subfolder. ```py -from diffusers import DiffusionPipeline import torch +from diffusers import DiffusionPipeline, HeunDiscreteScheduler, AutoModel -pipeline = DiffusionPipeline.from_pretrained( - "stable-diffusion-v1-5/stable-diffusion-v1-5", variant="fp16", torch_dtype=torch.float16, use_safetensors=True +scheduler = HeunDiscreteScheduler.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", subfolder="scheduler" +) +vae = AutoModel.from_pretrained( + "madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16 ) -``` - - - -```py pipeline = DiffusionPipeline.from_pretrained( - "stable-diffusion-v1-5/stable-diffusion-v1-5", variant="non_ema", use_safetensors=True + "stabilityai/stable-diffusion-xl-base-1.0", + scheduler=scheduler, + vae=vae, + torch_dtype=torch.float16, + device_map="cuda" ) ``` - - +## Reusing models in multiple pipelines -Use the `variant` parameter in the [`DiffusionPipeline.save_pretrained`] method to save a checkpoint as a different floating point type or as a non-EMA variant. You should try save a variant to the same folder as the original checkpoint, so you have the option of loading both from the same folder. +When working with multiple pipelines that use the same model, the [`~DiffusionPipeline.from_pipe`] method enables reusing a model instead of reloading it each time. This allows you to use multiple pipelines without increasing memory usage. - - +Memory usage is determined by the pipeline with the highest memory requirement regardless of the number of pipelines. -```python -from diffusers import DiffusionPipeline +The example below loads a pipeline and then loads a second pipeline with [`~DiffusionPipeline.from_pipe`] to use [perturbed-attention guidance (PAG)](../api/pipelines/pag) to improve generation quality. -pipeline.save_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", variant="fp16") -``` - - - +> [!WARNING] +> Use [`AutoPipelineForText2Image`] because [`DiffusionPipeline`] doesn't support PAG. Refer to the [AutoPipeline](../tutorials/autopipeline) docs to learn more. ```py -pipeline.save_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", variant="non_ema") -``` - - - - -If you don't save the variant to an existing folder, you must specify the `variant` argument otherwise it'll throw an `Exception` because it can't find the original checkpoint. +import torch +from diffusers import AutoPipelineForText2Image -```python -# 👎 this won't work -pipeline = DiffusionPipeline.from_pretrained( - "./stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True -) -# 👍 this works -pipeline = DiffusionPipeline.from_pretrained( - "./stable-diffusion-v1-5", variant="fp16", torch_dtype=torch.float16, use_safetensors=True +pipeline_sdxl = AutoPipelineForText2Image.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, device_map="cuda" ) +prompt = """ +cinematic film still of a cat sipping a margarita in a pool in Palm Springs, California +highly detailed, high budget hollywood movie, cinemascope, moody, epic, gorgeous, film grain +""" +image = pipeline_sdxl(prompt).images[0] +print(f"Max memory reserved: {torch.cuda.max_memory_allocated() / 1024**3:.2f} GB") +# Max memory reserved: 10.47 GB ``` -## DiffusionPipeline explained - -As a class method, [`DiffusionPipeline.from_pretrained`] is responsible for two things: - -- Download the latest version of the folder structure required for inference and cache it. If the latest folder structure is available in the local cache, [`DiffusionPipeline.from_pretrained`] reuses the cache and won't redownload the files. -- Load the cached weights into the correct pipeline [class](../api/pipelines/overview#diffusers-summary) - retrieved from the `model_index.json` file - and return an instance of it. - -The pipelines' underlying folder structure corresponds directly with their class instances. For example, the [`StableDiffusionPipeline`] corresponds to the folder structure in [`stable-diffusion-v1-5/stable-diffusion-v1-5`](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5). - -```python -from diffusers import DiffusionPipeline +Set `enable_pag=True` in the second pipeline to enable PAG. The second pipeline uses the same amount of memory because it shares model weights with the first one. -repo_id = "stable-diffusion-v1-5/stable-diffusion-v1-5" -pipeline = DiffusionPipeline.from_pretrained(repo_id, use_safetensors=True) -print(pipeline) +```py +pipeline = AutoPipelineForText2Image.from_pipe( + pipeline_sdxl, enable_pag=True +) +prompt = """ +cinematic film still of a cat sipping a margarita in a pool in Palm Springs, California +highly detailed, high budget hollywood movie, cinemascope, moody, epic, gorgeous, film grain +""" +image = pipeline(prompt).images[0] +print(f"Max memory reserved: {torch.cuda.max_memory_allocated() / 1024**3:.2f} GB") +# Max memory reserved: 10.47 GB ``` -You'll see pipeline is an instance of [`StableDiffusionPipeline`], which consists of seven components: - -- `"feature_extractor"`: a [`~transformers.CLIPImageProcessor`] from 🤗 Transformers. -- `"safety_checker"`: a [component](https://github.com/huggingface/diffusers/blob/e55687e1e15407f60f32242027b7bb8170e58266/src/diffusers/pipelines/stable_diffusion/safety_checker.py#L32) for screening against harmful content. -- `"scheduler"`: an instance of [`PNDMScheduler`]. -- `"text_encoder"`: a [`~transformers.CLIPTextModel`] from 🤗 Transformers. -- `"tokenizer"`: a [`~transformers.CLIPTokenizer`] from 🤗 Transformers. -- `"unet"`: an instance of [`UNet2DConditionModel`]. -- `"vae"`: an instance of [`AutoencoderKL`]. - -```json -StableDiffusionPipeline { - "feature_extractor": [ - "transformers", - "CLIPImageProcessor" - ], - "safety_checker": [ - "stable_diffusion", - "StableDiffusionSafetyChecker" - ], - "scheduler": [ - "diffusers", - "PNDMScheduler" - ], - "text_encoder": [ - "transformers", - "CLIPTextModel" - ], - "tokenizer": [ - "transformers", - "CLIPTokenizer" - ], - "unet": [ - "diffusers", - "UNet2DConditionModel" - ], - "vae": [ - "diffusers", - "AutoencoderKL" - ] -} -``` +> [!WARNING] +> Pipelines created by [`~DiffusionPipeline.from_pipe`] share the same models and *state*. Modifying the state of a model in one pipeline affects all the other pipelines that share the same model. -Compare the components of the pipeline instance to the [`stable-diffusion-v1-5/stable-diffusion-v1-5`](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5/tree/main) folder structure, and you'll see there is a separate folder for each of the components in the repository: +Some methods may not work correctly on pipelines created with [`~DiffusionPipeline.from_pipe`]. For example, [`~DiffusionPipeline.enable_model_cpu_offload`] relies on a unique model execution order, which may differ in the new pipeline. To ensure proper functionality, reapply these methods on the new pipeline. -``` -. -├── feature_extractor -│   └── preprocessor_config.json -├── model_index.json -├── safety_checker -│   ├── config.json -| ├── model.fp16.safetensors -│ ├── model.safetensors -│ ├── pytorch_model.bin -| └── pytorch_model.fp16.bin -├── scheduler -│   └── scheduler_config.json -├── text_encoder -│   ├── config.json -| ├── model.fp16.safetensors -│ ├── model.safetensors -│ |── pytorch_model.bin -| └── pytorch_model.fp16.bin -├── tokenizer -│   ├── merges.txt -│   ├── special_tokens_map.json -│   ├── tokenizer_config.json -│   └── vocab.json -├── unet -│   ├── config.json -│   ├── diffusion_pytorch_model.bin -| |── diffusion_pytorch_model.fp16.bin -│ |── diffusion_pytorch_model.f16.safetensors -│ |── diffusion_pytorch_model.non_ema.bin -│ |── diffusion_pytorch_model.non_ema.safetensors -│ └── diffusion_pytorch_model.safetensors -|── vae -. ├── config.json -. ├── diffusion_pytorch_model.bin - ├── diffusion_pytorch_model.fp16.bin - ├── diffusion_pytorch_model.fp16.safetensors - └── diffusion_pytorch_model.safetensors -``` +## Safety checker + +Diffusers provides a [safety checker](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/safety_checker.py) for older Stable Diffusion models to prevent generating harmful content. It screens the generated output against a set of hardcoded harmful concepts. -You can access each of the components of the pipeline as an attribute to view its configuration: +If you want to disable the safety checker, pass `safety_checker=None` in [`~DiffusionPipeline.from_pretrained`] as shown below. ```py -pipeline.tokenizer -CLIPTokenizer( - name_or_path="/root/.cache/huggingface/hub/models--runwayml--stable-diffusion-v1-5/snapshots/39593d5650112b4cc580433f6b0435385882d819/tokenizer", - vocab_size=49408, - model_max_length=77, - is_fast=False, - padding_side="right", - truncation_side="right", - special_tokens={ - "bos_token": AddedToken("<|startoftext|>", rstrip=False, lstrip=False, single_word=False, normalized=True), - "eos_token": AddedToken("<|endoftext|>", rstrip=False, lstrip=False, single_word=False, normalized=True), - "unk_token": AddedToken("<|endoftext|>", rstrip=False, lstrip=False, single_word=False, normalized=True), - "pad_token": "<|endoftext|>", - }, - clean_up_tokenization_spaces=True -) -``` +from diffusers import DiffusionPipeline -Every pipeline expects a [`model_index.json`](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5/blob/main/model_index.json) file that tells the [`DiffusionPipeline`]: - -- which pipeline class to load from `_class_name` -- which version of 🧨 Diffusers was used to create the model in `_diffusers_version` -- what components from which library are stored in the subfolders (`name` corresponds to the component and subfolder name, `library` corresponds to the name of the library to load the class from, and `class` corresponds to the class name) - -```json -{ - "_class_name": "StableDiffusionPipeline", - "_diffusers_version": "0.6.0", - "feature_extractor": [ - "transformers", - "CLIPImageProcessor" - ], - "safety_checker": [ - "stable_diffusion", - "StableDiffusionSafetyChecker" - ], - "scheduler": [ - "diffusers", - "PNDMScheduler" - ], - "text_encoder": [ - "transformers", - "CLIPTextModel" - ], - "tokenizer": [ - "transformers", - "CLIPTokenizer" - ], - "unet": [ - "diffusers", - "UNet2DConditionModel" - ], - "vae": [ - "diffusers", - "AutoencoderKL" - ] -} -``` +pipeline = DiffusionPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None +) +""" +You have disabled the safety checker for by passing `safety_checker=None`. Ensure that you abide by the conditions of the Stable Diffusion license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend keeping the safety filter enabled in all public-facing circumstances, disabling it only for use cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 . +""" +``` \ No newline at end of file diff --git a/docs/source/en/using-diffusers/other-formats.md b/docs/source/en/using-diffusers/other-formats.md index 11afbf29d3f2..59835bbf2622 100644 --- a/docs/source/en/using-diffusers/other-formats.md +++ b/docs/source/en/using-diffusers/other-formats.md @@ -176,7 +176,7 @@ Benefits of using the Diffusers-multifolder layout include: ).to("cuda") turbo_pipeline.scheduler = EulerDiscreteScheduler.from_config( turbo_pipeline.scheduler.config, - timestep+spacing="trailing" + timestep_spacing="trailing" ) image = turbo_pipeline( "an astronaut riding a unicorn on mars", @@ -267,6 +267,7 @@ pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_d save_folder = "flux-dev" pipe.save_pretrained("flux-dev") export_folder_as_dduf("flux-dev.dduf", folder_path=save_folder) +``` > [!TIP] > Packaging and loading quantized checkpoints in the DDUF format is supported as long as they respect the multi-folder structure. diff --git a/docs/source/en/using-diffusers/reusing_seeds.md b/docs/source/en/using-diffusers/reusing_seeds.md index ac9350f24caa..b4aed0aa6354 100644 --- a/docs/source/en/using-diffusers/reusing_seeds.md +++ b/docs/source/en/using-diffusers/reusing_seeds.md @@ -10,129 +10,86 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express o specific language governing permissions and limitations under the License. --> -# Reproducible pipelines +# Reproducibility -Diffusion models are inherently random which is what allows it to generate different outputs every time it is run. But there are certain times when you want to generate the same output every time, like when you're testing, replicating results, and even [improving image quality](#deterministic-batch-generation). While you can't expect to get identical results across platforms, you can expect reproducible results across releases and platforms within a certain tolerance range (though even this may vary). +Diffusion is a random process that generates a different output every time. For certain situations like testing and replicating results, you want to generate the same result each time, across releases and platforms within a certain tolerance range. -This guide will show you how to control randomness for deterministic generation on a CPU and GPU. +This guide will show you how to control sources of randomness and enable deterministic algorithms. -> [!TIP] -> We strongly recommend reading PyTorch's [statement about reproducibility](https://pytorch.org/docs/stable/notes/randomness.html): -> -> "Completely reproducible results are not guaranteed across PyTorch releases, individual commits, or different platforms. Furthermore, results may not be reproducible between CPU and GPU executions, even when using identical seeds." - -## Control randomness - -During inference, pipelines rely heavily on random sampling operations which include creating the -Gaussian noise tensors to denoise and adding noise to the scheduling step. - -Take a look at the tensor values in the [`DDIMPipeline`] after two inference steps. - -```python -from diffusers import DDIMPipeline -import numpy as np - -ddim = DDIMPipeline.from_pretrained( "google/ddpm-cifar10-32", use_safetensors=True) -image = ddim(num_inference_steps=2, output_type="np").images -print(np.abs(image).sum()) -``` - -Running the code above prints one value, but if you run it again you get a different value. - -Each time the pipeline is run, [torch.randn](https://pytorch.org/docs/stable/generated/torch.randn.html) uses a different random seed to create the Gaussian noise tensors. This leads to a different result each time it is run and enables the diffusion pipeline to generate a different random image each time. +## Generator -But if you need to reliably generate the same image, that depends on whether you're running the pipeline on a CPU or GPU. +Pipelines rely on [torch.randn](https://pytorch.org/docs/stable/generated/torch.randn.html), which uses a different random seed each time, to create the initial noisy tensors. To generate the same output on a CPU or GPU, use a [Generator](https://docs.pytorch.org/docs/stable/generated/torch.Generator.html) to manage how random values are generated. > [!TIP] -> It might seem unintuitive to pass `Generator` objects to a pipeline instead of the integer value representing the seed. However, this is the recommended design when working with probabilistic models in PyTorch because a `Generator` is a *random state* that can be passed to multiple pipelines in a sequence. As soon as the `Generator` is consumed, the *state* is changed in place which means even if you passed the same `Generator` to a different pipeline, it won't produce the same result because the state is already changed. +> If reproducibility is important to your use case, we recommend always using a CPU `Generator`. The performance loss is often negligible and you'll generate more similar values. - - + + + +The GPU uses a different random number generator than the CPU. Diffusers solves this issue with the [`~utils.torch_utils.randn_tensor`] function to create the random tensor on a CPU and then moving it to the GPU. This function is used everywhere inside the pipeline and you don't need to explicitly call it. -To generate reproducible results on a CPU, you'll need to use a PyTorch [Generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) and set a seed. Now when you run the code, it always prints a value of `1491.1711` because the `Generator` object with the seed is passed to all the random functions in the pipeline. You should get a similar, if not the same, result on whatever hardware and PyTorch version you're using. +Use [manual_seed](https://docs.pytorch.org/docs/stable/generated/torch.manual_seed.html) as shown below to set a seed. -```python +```py import torch import numpy as np from diffusers import DDIMPipeline -ddim = DDIMPipeline.from_pretrained("google/ddpm-cifar10-32", use_safetensors=True) -generator = torch.Generator(device="cpu").manual_seed(0) +ddim = DDIMPipeline.from_pretrained("google/ddpm-cifar10-32", device_map="cuda") +generator = torch.manual_seed(0) image = ddim(num_inference_steps=2, output_type="np", generator=generator).images print(np.abs(image).sum()) ``` - + -Writing a reproducible pipeline on a GPU is a bit trickier, and full reproducibility across different hardware is not guaranteed because matrix multiplication - which diffusion pipelines require a lot of - is less deterministic on a GPU than a CPU. For example, if you run the same code example from the CPU example, you'll get a different result even though the seed is identical. This is because the GPU uses a different random number generator than the CPU. +Set `device="cpu"` in the `Generator` and use [manual_seed](https://docs.pytorch.org/docs/stable/generated/torch.manual_seed.html) to set a seed for generating random numbers. -```python +```py import torch import numpy as np from diffusers import DDIMPipeline -ddim = DDIMPipeline.from_pretrained("google/ddpm-cifar10-32", use_safetensors=True) -ddim.to("cuda") -generator = torch.Generator(device="cuda").manual_seed(0) +ddim = DDIMPipeline.from_pretrained("google/ddpm-cifar10-32") +generator = torch.Generator(device="cpu").manual_seed(0) image = ddim(num_inference_steps=2, output_type="np", generator=generator).images print(np.abs(image).sum()) ``` -To avoid this issue, Diffusers has a [`~utils.torch_utils.randn_tensor`] function for creating random noise on the CPU, and then moving the tensor to a GPU if necessary. The [`~utils.torch_utils.randn_tensor`] function is used everywhere inside the pipeline. Now you can call [torch.manual_seed](https://pytorch.org/docs/stable/generated/torch.manual_seed.html) which automatically creates a CPU `Generator` that can be passed to the pipeline even if it is being run on a GPU. + + -```python -import torch -import numpy as np -from diffusers import DDIMPipeline +The `Generator` object should be passed to the pipeline instead of an integer seed. `Generator` maintains a *random state* that is consumed and modified when used. Once consumed, the same `Generator` object produces different results in subsequent calls, even across different pipelines, because it's *state* has changed. -ddim = DDIMPipeline.from_pretrained("google/ddpm-cifar10-32", use_safetensors=True) -ddim.to("cuda") +```py generator = torch.manual_seed(0) -image = ddim(num_inference_steps=2, output_type="np", generator=generator).images -print(np.abs(image).sum()) -``` - -> [!TIP] -> If reproducibility is important to your use case, we recommend always passing a CPU `Generator`. The performance loss is often negligible and you'll generate more similar values than if the pipeline had been run on a GPU. - -Finally, more complex pipelines such as [`UnCLIPPipeline`], are often extremely -susceptible to precision error propagation. You'll need to use -exactly the same hardware and PyTorch version for full reproducibility. - - +for _ in range(5): +- image = pipeline(prompt, generator=generator) ++ image = pipeline(prompt, generator=torch.manual_seed(0)) +``` ## Deterministic algorithms -You can also configure PyTorch to use deterministic algorithms to create a reproducible pipeline. The downside is that deterministic algorithms may be slower than non-deterministic ones and you may observe a decrease in performance. +PyTorch supports [deterministic algorithms](https://docs.pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms) - where available - for certain operations so they produce the same results. Deterministic algorithms may be slower and decrease performance. -Non-deterministic behavior occurs when operations are launched in more than one CUDA stream. To avoid this, set the environment variable [CUBLAS_WORKSPACE_CONFIG](https://docs.nvidia.com/cuda/cublas/index.html#results-reproducibility) to `:16:8` to only use one buffer size during runtime. - -PyTorch typically benchmarks multiple algorithms to select the fastest one, but if you want reproducibility, you should disable this feature because the benchmark may select different algorithms each time. Set Diffusers [enable_full_determinism](https://github.com/huggingface/diffusers/blob/142f353e1c638ff1d20bd798402b68f72c1ebbdd/src/diffusers/utils/testing_utils.py#L861) to enable deterministic algorithms. +Use Diffusers' [enable_full_determinism](https://github.com/huggingface/diffusers/blob/142f353e1c638ff1d20bd798402b68f72c1ebbdd/src/diffusers/utils/testing_utils.py#L861) function to enable deterministic algorithms. ```py +import torch +from diffusers_utils import enable_full_determinism + enable_full_determinism() ``` -Now when you run the same pipeline twice, you'll get identical results. +Under the hood, `enable_full_determinism` works by: -```py -import torch -from diffusers import DDIMScheduler, StableDiffusionPipeline - -pipe = StableDiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", use_safetensors=True).to("cuda") -pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) -g = torch.Generator(device="cuda") +- Setting the environment variable [CUBLAS_WORKSPACE_CONFIG](https://docs.nvidia.com/cuda/cublas/index.html#results-reproducibility) to `:16:8` to only use one buffer size during rntime. Non-deterministic behavior occurs when operations are used in more than one CUDA stream. +- Disabling benchmarking to find the fastest convolution operation by setting `torch.backends.cudnn.benchmark=False`. Non-deterministic behavior occurs because the benchmark may select different algorithms each time depending on hardware or benchmarking noise. +- Disabling TensorFloat32 (TF32) operations in favor of more precise and consistent full-precision operations. -prompt = "A bear is playing a guitar on Times Square" -g.manual_seed(0) -result1 = pipe(prompt=prompt, num_inference_steps=50, generator=g, output_type="latent").images +## Resources -g.manual_seed(0) -result2 = pipe(prompt=prompt, num_inference_steps=50, generator=g, output_type="latent").images - -print("L_inf dist =", abs(result1 - result2).max()) -"L_inf dist = tensor(0., device='cuda:0')" -``` +We strongly recommend reading PyTorch's developer notes about [Reproducibility](https://docs.pytorch.org/docs/stable/notes/randomness.html). You can try to limit randomness, but it is not *guaranteed* even with an identical seed. \ No newline at end of file diff --git a/docs/source/en/using-diffusers/schedulers.md b/docs/source/en/using-diffusers/schedulers.md index aabb9dd31c96..6d928f8037c4 100644 --- a/docs/source/en/using-diffusers/schedulers.md +++ b/docs/source/en/using-diffusers/schedulers.md @@ -165,53 +165,6 @@ image Most images look very similar and are comparable in quality. Again, it often comes down to your specific use case so a good approach is to run multiple different schedulers and compare the results. -### Flax schedulers - -To compare Flax schedulers, you need to additionally load the scheduler state into the model parameters. For example, let's change the default scheduler in [`FlaxStableDiffusionPipeline`] to use the super fast [`FlaxDPMSolverMultistepScheduler`]. - -> [!WARNING] -> The [`FlaxLMSDiscreteScheduler`] and [`FlaxDDPMScheduler`] are not compatible with the [`FlaxStableDiffusionPipeline`] yet. - -```py -import jax -import numpy as np -from flax.jax_utils import replicate -from flax.training.common_utils import shard -from diffusers import FlaxStableDiffusionPipeline, FlaxDPMSolverMultistepScheduler - -scheduler, scheduler_state = FlaxDPMSolverMultistepScheduler.from_pretrained( - "stable-diffusion-v1-5/stable-diffusion-v1-5", - subfolder="scheduler" -) -pipeline, params = FlaxStableDiffusionPipeline.from_pretrained( - "stable-diffusion-v1-5/stable-diffusion-v1-5", - scheduler=scheduler, - variant="bf16", - dtype=jax.numpy.bfloat16, -) -params["scheduler"] = scheduler_state -``` - -Then you can take advantage of Flax's compatibility with TPUs to generate a number of images in parallel. You'll need to make a copy of the model parameters for each available device and then split the inputs across them to generate your desired number of images. - -```py -# Generate 1 image per parallel device (8 on TPUv2-8 or TPUv3-8) -prompt = "A photograph of an astronaut riding a horse on Mars, high resolution, high definition." -num_samples = jax.device_count() -prompt_ids = pipeline.prepare_inputs([prompt] * num_samples) - -prng_seed = jax.random.PRNGKey(0) -num_inference_steps = 25 - -# shard inputs and rng -params = replicate(params) -prng_seed = jax.random.split(prng_seed, jax.device_count()) -prompt_ids = shard(prompt_ids) - -images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images -images = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:]))) -``` - ## Models Models are loaded from the [`ModelMixin.from_pretrained`] method, which downloads and caches the latest version of the model weights and configurations. If the latest files are available in the local cache, [`~ModelMixin.from_pretrained`] reuses files in the cache instead of re-downloading them. diff --git a/docs/source/en/using-diffusers/stable_diffusion_jax_how_to.md b/docs/source/en/using-diffusers/stable_diffusion_jax_how_to.md deleted file mode 100644 index ac9ffe0dfc11..000000000000 --- a/docs/source/en/using-diffusers/stable_diffusion_jax_how_to.md +++ /dev/null @@ -1,225 +0,0 @@ - - -# JAX/Flax - -[[open-in-colab]] - -🤗 Diffusers supports Flax for super fast inference on Google TPUs, such as those available in Colab, Kaggle or Google Cloud Platform. This guide shows you how to run inference with Stable Diffusion using JAX/Flax. - -Before you begin, make sure you have the necessary libraries installed: - -```py -# uncomment to install the necessary libraries in Colab -#!pip install -q jax==0.3.25 jaxlib==0.3.25 flax transformers ftfy -#!pip install -q diffusers -``` - -You should also make sure you're using a TPU backend. While JAX does not run exclusively on TPUs, you'll get the best performance on a TPU because each server has 8 TPU accelerators working in parallel. - -If you are running this guide in Colab, select *Runtime* in the menu above, select the option *Change runtime type*, and then select *TPU* under the *Hardware accelerator* setting. Import JAX and quickly check whether you're using a TPU: - -```python -import jax -import jax.tools.colab_tpu -jax.tools.colab_tpu.setup_tpu() - -num_devices = jax.device_count() -device_type = jax.devices()[0].device_kind - -print(f"Found {num_devices} JAX devices of type {device_type}.") -assert ( - "TPU" in device_type, - "Available device is not a TPU, please select TPU from Runtime > Change runtime type > Hardware accelerator" -) -# Found 8 JAX devices of type Cloud TPU. -``` - -Great, now you can import the rest of the dependencies you'll need: - -```python -import jax.numpy as jnp -from jax import pmap -from flax.jax_utils import replicate -from flax.training.common_utils import shard - -from diffusers import FlaxStableDiffusionPipeline -``` - -## Load a model - -Flax is a functional framework, so models are stateless and parameters are stored outside of them. Loading a pretrained Flax pipeline returns *both* the pipeline and the model weights (or parameters). In this guide, you'll use `bfloat16`, a more efficient half-float type that is supported by TPUs (you can also use `float32` for full precision if you want). - -```python -dtype = jnp.bfloat16 -pipeline, params = FlaxStableDiffusionPipeline.from_pretrained( - "CompVis/stable-diffusion-v1-4", - variant="bf16", - dtype=dtype, -) -``` - -## Inference - -TPUs usually have 8 devices working in parallel, so let's use the same prompt for each device. This means you can perform inference on 8 devices at once, with each device generating one image. As a result, you'll get 8 images in the same amount of time it takes for one chip to generate a single image! - - - -Learn more details in the [How does parallelization work?](#how-does-parallelization-work) section. - - - -After replicating the prompt, get the tokenized text ids by calling the `prepare_inputs` function on the pipeline. The length of the tokenized text is set to 77 tokens as required by the configuration of the underlying CLIP text model. - -```python -prompt = "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of field, close up, split lighting, cinematic" -prompt = [prompt] * jax.device_count() -prompt_ids = pipeline.prepare_inputs(prompt) -prompt_ids.shape -# (8, 77) -``` - -Model parameters and inputs have to be replicated across the 8 parallel devices. The parameters dictionary is replicated with [`flax.jax_utils.replicate`](https://flax.readthedocs.io/en/latest/api_reference/flax.jax_utils.html#flax.jax_utils.replicate) which traverses the dictionary and changes the shape of the weights so they are repeated 8 times. Arrays are replicated using `shard`. - -```python -# parameters -p_params = replicate(params) - -# arrays -prompt_ids = shard(prompt_ids) -prompt_ids.shape -# (8, 1, 77) -``` - -This shape means each one of the 8 devices receives as an input a `jnp` array with shape `(1, 77)`, where `1` is the batch size per device. On TPUs with sufficient memory, you could have a batch size larger than `1` if you want to generate multiple images (per chip) at once. - -Next, create a random number generator to pass to the generation function. This is standard procedure in Flax, which is very serious and opinionated about random numbers. All functions that deal with random numbers are expected to receive a generator to ensure reproducibility, even when you're training across multiple distributed devices. - -The helper function below uses a seed to initialize a random number generator. As long as you use the same seed, you'll get the exact same results. Feel free to use different seeds when exploring results later in the guide. - -```python -def create_key(seed=0): - return jax.random.PRNGKey(seed) -``` - -The helper function, or `rng`, is split 8 times so each device receives a different generator and generates a different image. - -```python -rng = create_key(0) -rng = jax.random.split(rng, jax.device_count()) -``` - -To take advantage of JAX's optimized speed on a TPU, pass `jit=True` to the pipeline to compile the JAX code into an efficient representation and to ensure the model runs in parallel across the 8 devices. - - - -You need to ensure all your inputs have the same shape in subsequent calls, otherwise JAX will need to recompile the code which is slower. - - - -The first inference run takes more time because it needs to compile the code, but subsequent calls (even with different inputs) are much faster. For example, it took more than a minute to compile on a TPU v2-8, but then it takes about **7s** on a future inference run! - -```py -%%time -images = pipeline(prompt_ids, p_params, rng, jit=True)[0] - -# CPU times: user 56.2 s, sys: 42.5 s, total: 1min 38s -# Wall time: 1min 29s -``` - -The returned array has shape `(8, 1, 512, 512, 3)` which should be reshaped to remove the second dimension and get 8 images of `512 × 512 × 3`. Then you can use the [`~utils.numpy_to_pil`] function to convert the arrays into images. - -```python -from diffusers.utils import make_image_grid - -images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:]) -images = pipeline.numpy_to_pil(images) -make_image_grid(images, rows=2, cols=4) -``` - -![img](https://huggingface.co/datasets/YiYiXu/test-doc-assets/resolve/main/stable_diffusion_jax_how_to_cell_38_output_0.jpeg) - -## Using different prompts - -You don't necessarily have to use the same prompt on all devices. For example, to generate 8 different prompts: - -```python -prompts = [ - "Labrador in the style of Hokusai", - "Painting of a squirrel skating in New York", - "HAL-9000 in the style of Van Gogh", - "Times Square under water, with fish and a dolphin swimming around", - "Ancient Roman fresco showing a man working on his laptop", - "Close-up photograph of young black woman against urban background, high quality, bokeh", - "Armchair in the shape of an avocado", - "Clown astronaut in space, with Earth in the background", -] - -prompt_ids = pipeline.prepare_inputs(prompts) -prompt_ids = shard(prompt_ids) - -images = pipeline(prompt_ids, p_params, rng, jit=True).images -images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:]) -images = pipeline.numpy_to_pil(images) - -make_image_grid(images, 2, 4) -``` - -![img](https://huggingface.co/datasets/YiYiXu/test-doc-assets/resolve/main/stable_diffusion_jax_how_to_cell_43_output_0.jpeg) - -## How does parallelization work? - -The Flax pipeline in 🤗 Diffusers automatically compiles the model and runs it in parallel on all available devices. Let's take a closer look at how that process works. - -JAX parallelization can be done in multiple ways. The easiest one revolves around using the [`jax.pmap`](https://jax.readthedocs.io/en/latest/_autosummary/jax.pmap.html) function to achieve single-program multiple-data (SPMD) parallelization. It means running several copies of the same code, each on different data inputs. More sophisticated approaches are possible, and you can go over to the JAX [documentation](https://jax.readthedocs.io/en/latest/index.html) to explore this topic in more detail if you are interested! - -`jax.pmap` does two things: - -1. Compiles (or "`jit`s") the code which is similar to `jax.jit()`. This does not happen when you call `pmap`, and only the first time the `pmap`ped function is called. -2. Ensures the compiled code runs in parallel on all available devices. - -To demonstrate, call `pmap` on the pipeline's `_generate` method (this is a private method that generates images and may be renamed or removed in future releases of 🤗 Diffusers): - -```python -p_generate = pmap(pipeline._generate) -``` - -After calling `pmap`, the prepared function `p_generate` will: - -1. Make a copy of the underlying function, `pipeline._generate`, on each device. -2. Send each device a different portion of the input arguments (this is why it's necessary to call the *shard* function). In this case, `prompt_ids` has shape `(8, 1, 77, 768)` so the array is split into 8 and each copy of `_generate` receives an input with shape `(1, 77, 768)`. - -The most important thing to pay attention to here is the batch size (1 in this example), and the input dimensions that make sense for your code. You don't have to change anything else to make the code work in parallel. - -The first time you call the pipeline takes more time, but the calls afterward are much faster. The `block_until_ready` function is used to correctly measure inference time because JAX uses asynchronous dispatch and returns control to the Python loop as soon as it can. You don't need to use that in your code; blocking occurs automatically when you want to use the result of a computation that has not yet been materialized. - -```py -%%time -images = p_generate(prompt_ids, p_params, rng) -images = images.block_until_ready() - -# CPU times: user 1min 15s, sys: 18.2 s, total: 1min 34s -# Wall time: 1min 15s -``` - -Check your image dimensions to see if they're correct: - -```python -images.shape -# (8, 1, 512, 512, 3) -``` - -## Resources - -To learn more about how JAX works with Stable Diffusion, you may be interested in reading: - -* [Accelerating Stable Diffusion XL Inference with JAX on Cloud TPU v5e](https://hf.co/blog/sdxl_jax) diff --git a/docs/source/en/using-diffusers/text-img2vid.md b/docs/source/en/using-diffusers/text-img2vid.md index 67d1fd118e4d..ade3e0de329f 100644 --- a/docs/source/en/using-diffusers/text-img2vid.md +++ b/docs/source/en/using-diffusers/text-img2vid.md @@ -287,7 +287,7 @@ export_to_video(output, "output.mp4", fps=16) ## Reduce memory usage -Recent video models like [`HunyuanVideoPipeline`] and [`WanPipeline`], which have 10B+ parameters, require a lot of memory and it often exceeds the memory availabe on consumer hardware. Diffusers offers several techniques for reducing the memory requirements of these large models. +Recent video models like [`HunyuanVideoPipeline`] and [`WanPipeline`], which have 10B+ parameters, require a lot of memory and it often exceeds the memory available on consumer hardware. Diffusers offers several techniques for reducing the memory requirements of these large models. > [!TIP] > Refer to the [Reduce memory usage](../optimization/memory) guide for more details about other memory saving techniques. diff --git a/examples/community/README.md b/examples/community/README.md index e4fbd7936686..e314463077f0 100644 --- a/examples/community/README.md +++ b/examples/community/README.md @@ -88,6 +88,8 @@ PIXART-α Controlnet pipeline | Implementation of the controlnet model for pixar | FaithDiff Stable Diffusion XL Pipeline | Implementation of [(CVPR 2025) FaithDiff: Unleashing Diffusion Priors for Faithful Image Super-resolutionUnleashing Diffusion Priors for Faithful Image Super-resolution](https://huggingface.co/papers/2411.18824) - FaithDiff is a faithful image super-resolution method that leverages latent diffusion models by actively adapting the diffusion prior and jointly fine-tuning its components (encoder and diffusion model) with an alignment module to ensure high fidelity and structural consistency. | [FaithDiff Stable Diffusion XL Pipeline](#faithdiff-stable-diffusion-xl-pipeline) | [![Hugging Face Models](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Models-blue)](https://huggingface.co/jychen9811/FaithDiff) | [Junyang Chen, Jinshan Pan, Jiangxin Dong, IMAG Lab, (Adapted by Eliseu Silva)](https://github.com/JyChen9811/FaithDiff) | | Stable Diffusion 3 InstructPix2Pix Pipeline | Implementation of Stable Diffusion 3 InstructPix2Pix Pipeline | [Stable Diffusion 3 InstructPix2Pix Pipeline](#stable-diffusion-3-instructpix2pix-pipeline) | [![Hugging Face Models](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Models-blue)](https://huggingface.co/BleachNick/SD3_UltraEdit_freeform) [![Hugging Face Models](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Models-blue)](https://huggingface.co/CaptainZZZ/sd3-instructpix2pix) | [Jiayu Zhang](https://github.com/xduzhangjiayu) and [Haozhe Zhao](https://github.com/HaozheZhao)| | Flux Kontext multiple images | A modified version of the `FluxKontextPipeline` that supports calling Flux Kontext with multiple reference images.| [Flux Kontext multiple input Pipeline](#flux-kontext-multiple-images) | - | [Net-Mist](https://github.com/Net-Mist) | + + To load a custom pipeline you just need to pass the `custom_pipeline` argument to `DiffusionPipeline`, as one of the files in `diffusers/examples/community`. Feel free to send a PR with your own pipelines, we will merge them quickly. ```py diff --git a/examples/community/composable_stable_diffusion.py b/examples/community/composable_stable_diffusion.py index ec653bcdb4c6..a7c540ceb984 100644 --- a/examples/community/composable_stable_diffusion.py +++ b/examples/community/composable_stable_diffusion.py @@ -398,7 +398,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/examples/community/imagic_stable_diffusion.py b/examples/community/imagic_stable_diffusion.py index a2561c919858..091d0fbf8d3a 100644 --- a/examples/community/imagic_stable_diffusion.py +++ b/examples/community/imagic_stable_diffusion.py @@ -147,7 +147,7 @@ def train( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `nd.array`. diff --git a/examples/community/img2img_inpainting.py b/examples/community/img2img_inpainting.py index 7b9bd043d099..499230b1e2cd 100644 --- a/examples/community/img2img_inpainting.py +++ b/examples/community/img2img_inpainting.py @@ -197,7 +197,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/examples/community/interpolate_stable_diffusion.py b/examples/community/interpolate_stable_diffusion.py index 460bb464f3b1..5b96c14d6367 100644 --- a/examples/community/interpolate_stable_diffusion.py +++ b/examples/community/interpolate_stable_diffusion.py @@ -173,7 +173,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/examples/community/lpw_stable_diffusion.py b/examples/community/lpw_stable_diffusion.py index ccb17a51e615..cb017c0bbe29 100644 --- a/examples/community/lpw_stable_diffusion.py +++ b/examples/community/lpw_stable_diffusion.py @@ -888,7 +888,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. @@ -1131,7 +1131,7 @@ def text2img( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/lpw_stable_diffusion_onnx.py b/examples/community/lpw_stable_diffusion_onnx.py index ab1462b81b39..92effc193329 100644 --- a/examples/community/lpw_stable_diffusion_onnx.py +++ b/examples/community/lpw_stable_diffusion_onnx.py @@ -721,7 +721,7 @@ def __call__( latents (`np.ndarray`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. max_embeddings_multiples (`int`, *optional*, defaults to `3`): The max multiple length of prompt embeddings compared to the max output length of text encoder. output_type (`str`, *optional*, defaults to `"pil"`): @@ -918,7 +918,7 @@ def text2img( latents (`np.ndarray`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. max_embeddings_multiples (`int`, *optional*, defaults to `3`): The max multiple length of prompt embeddings compared to the max output length of text encoder. output_type (`str`, *optional*, defaults to `"pil"`): diff --git a/examples/community/lpw_stable_diffusion_xl.py b/examples/community/lpw_stable_diffusion_xl.py index ea67738ab74c..272c5d5652c5 100644 --- a/examples/community/lpw_stable_diffusion_xl.py +++ b/examples/community/lpw_stable_diffusion_xl.py @@ -1519,7 +1519,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. prompt_embeds (`torch.Tensor`, *optional*): diff --git a/examples/community/multilingual_stable_diffusion.py b/examples/community/multilingual_stable_diffusion.py index 5e7453ed1201..afef4e9e9719 100644 --- a/examples/community/multilingual_stable_diffusion.py +++ b/examples/community/multilingual_stable_diffusion.py @@ -187,7 +187,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/examples/community/pipeline_controlnet_xl_kolors.py b/examples/community/pipeline_controlnet_xl_kolors.py index af5586990e2e..dc90aacdbc6b 100644 --- a/examples/community/pipeline_controlnet_xl_kolors.py +++ b/examples/community/pipeline_controlnet_xl_kolors.py @@ -888,7 +888,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_controlnet_xl_kolors_img2img.py b/examples/community/pipeline_controlnet_xl_kolors_img2img.py index c0831945ed8e..189d0312143f 100644 --- a/examples/community/pipeline_controlnet_xl_kolors_img2img.py +++ b/examples/community/pipeline_controlnet_xl_kolors_img2img.py @@ -1066,7 +1066,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_controlnet_xl_kolors_inpaint.py b/examples/community/pipeline_controlnet_xl_kolors_inpaint.py index db15d99ac3ea..4b6123cc1f8b 100644 --- a/examples/community/pipeline_controlnet_xl_kolors_inpaint.py +++ b/examples/community/pipeline_controlnet_xl_kolors_inpaint.py @@ -1298,7 +1298,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/examples/community/pipeline_demofusion_sdxl.py b/examples/community/pipeline_demofusion_sdxl.py index c9b57a6ece8c..119b39cefe68 100644 --- a/examples/community/pipeline_demofusion_sdxl.py +++ b/examples/community/pipeline_demofusion_sdxl.py @@ -724,7 +724,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_faithdiff_stable_diffusion_xl.py b/examples/community/pipeline_faithdiff_stable_diffusion_xl.py index 43ef55d32c3d..aa95d2ec719e 100644 --- a/examples/community/pipeline_faithdiff_stable_diffusion_xl.py +++ b/examples/community/pipeline_faithdiff_stable_diffusion_xl.py @@ -1906,7 +1906,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_flux_differential_img2img.py b/examples/community/pipeline_flux_differential_img2img.py index 7d6358cb3258..3677e73136f7 100644 --- a/examples/community/pipeline_flux_differential_img2img.py +++ b/examples/community/pipeline_flux_differential_img2img.py @@ -730,7 +730,7 @@ def __call__( 1)`, or `(H, W)`. mask_image_latent (`torch.Tensor`, `List[torch.Tensor]`): `Tensor` representing an image batch to mask `image` generated by VAE. If not provided, the mask - latents tensor will ge generated by `mask_image`. + latents tensor will be generated by `mask_image`. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. This is set to 1024 by default for the best results. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): @@ -769,7 +769,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_flux_kontext_multiple_images.py b/examples/community/pipeline_flux_kontext_multiple_images.py index ef0c643a405e..7e4a9ed0fadc 100644 --- a/examples/community/pipeline_flux_kontext_multiple_images.py +++ b/examples/community/pipeline_flux_kontext_multiple_images.py @@ -885,7 +885,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_flux_rf_inversion.py b/examples/community/pipeline_flux_rf_inversion.py index 631d04b762d4..8f8b4817acf2 100644 --- a/examples/community/pipeline_flux_rf_inversion.py +++ b/examples/community/pipeline_flux_rf_inversion.py @@ -711,7 +711,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_flux_semantic_guidance.py b/examples/community/pipeline_flux_semantic_guidance.py index 93bcd3af75e6..b3d2b3a4b4e1 100644 --- a/examples/community/pipeline_flux_semantic_guidance.py +++ b/examples/community/pipeline_flux_semantic_guidance.py @@ -853,7 +853,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_flux_with_cfg.py b/examples/community/pipeline_flux_with_cfg.py index 1b8dc9ecb85e..3916aff257f0 100644 --- a/examples/community/pipeline_flux_with_cfg.py +++ b/examples/community/pipeline_flux_with_cfg.py @@ -639,7 +639,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_kolors_differential_img2img.py b/examples/community/pipeline_kolors_differential_img2img.py index 9491447409e2..d299c839815e 100644 --- a/examples/community/pipeline_kolors_differential_img2img.py +++ b/examples/community/pipeline_kolors_differential_img2img.py @@ -904,7 +904,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_kolors_inpainting.py b/examples/community/pipeline_kolors_inpainting.py index cce9f10ded3d..3cab8ecac002 100644 --- a/examples/community/pipeline_kolors_inpainting.py +++ b/examples/community/pipeline_kolors_inpainting.py @@ -1246,7 +1246,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/examples/community/pipeline_prompt2prompt.py b/examples/community/pipeline_prompt2prompt.py index 065edc0cfbe8..8d94dc9248c1 100644 --- a/examples/community/pipeline_prompt2prompt.py +++ b/examples/community/pipeline_prompt2prompt.py @@ -611,7 +611,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/examples/community/pipeline_sdxl_style_aligned.py b/examples/community/pipeline_sdxl_style_aligned.py index ea168036c196..10438af365f9 100644 --- a/examples/community/pipeline_sdxl_style_aligned.py +++ b/examples/community/pipeline_sdxl_style_aligned.py @@ -1480,7 +1480,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_stable_diffusion_3_differential_img2img.py b/examples/community/pipeline_stable_diffusion_3_differential_img2img.py index 693485d1758d..643386232bc3 100644 --- a/examples/community/pipeline_stable_diffusion_3_differential_img2img.py +++ b/examples/community/pipeline_stable_diffusion_3_differential_img2img.py @@ -748,7 +748,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_stable_diffusion_3_instruct_pix2pix.py b/examples/community/pipeline_stable_diffusion_3_instruct_pix2pix.py index 6923db23a6d3..d9cee800e8ad 100644 --- a/examples/community/pipeline_stable_diffusion_3_instruct_pix2pix.py +++ b/examples/community/pipeline_stable_diffusion_3_instruct_pix2pix.py @@ -945,7 +945,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_stable_diffusion_xl_attentive_eraser.py b/examples/community/pipeline_stable_diffusion_xl_attentive_eraser.py index ab8064c6e378..a881814c2a91 100644 --- a/examples/community/pipeline_stable_diffusion_xl_attentive_eraser.py +++ b/examples/community/pipeline_stable_diffusion_xl_attentive_eraser.py @@ -1786,7 +1786,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/examples/community/pipeline_stable_diffusion_xl_controlnet_adapter.py b/examples/community/pipeline_stable_diffusion_xl_controlnet_adapter.py index ccf1098c614c..564a19e923d2 100644 --- a/examples/community/pipeline_stable_diffusion_xl_controlnet_adapter.py +++ b/examples/community/pipeline_stable_diffusion_xl_controlnet_adapter.py @@ -973,7 +973,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_stable_diffusion_xl_controlnet_adapter_inpaint.py b/examples/community/pipeline_stable_diffusion_xl_controlnet_adapter_inpaint.py index 38db19148d43..c73433b20f88 100644 --- a/examples/community/pipeline_stable_diffusion_xl_controlnet_adapter_inpaint.py +++ b/examples/community/pipeline_stable_diffusion_xl_controlnet_adapter_inpaint.py @@ -1329,7 +1329,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_stable_diffusion_xl_differential_img2img.py b/examples/community/pipeline_stable_diffusion_xl_differential_img2img.py index b9f00cb82d83..89388e10cb19 100644 --- a/examples/community/pipeline_stable_diffusion_xl_differential_img2img.py +++ b/examples/community/pipeline_stable_diffusion_xl_differential_img2img.py @@ -1053,7 +1053,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_stable_diffusion_xl_ipex.py b/examples/community/pipeline_stable_diffusion_xl_ipex.py index eda6089f594f..aa2b24f3965a 100644 --- a/examples/community/pipeline_stable_diffusion_xl_ipex.py +++ b/examples/community/pipeline_stable_diffusion_xl_ipex.py @@ -832,7 +832,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_stg_cogvideox.py b/examples/community/pipeline_stg_cogvideox.py index 1c98ae0f6d8e..bdb6aecc30c3 100644 --- a/examples/community/pipeline_stg_cogvideox.py +++ b/examples/community/pipeline_stg_cogvideox.py @@ -632,7 +632,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_stg_ltx.py b/examples/community/pipeline_stg_ltx.py index f7ccf99e96ae..70069a33f5d9 100644 --- a/examples/community/pipeline_stg_ltx.py +++ b/examples/community/pipeline_stg_ltx.py @@ -620,7 +620,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_stg_ltx_image2video.py b/examples/community/pipeline_stg_ltx_image2video.py index 3b3d2333805d..c32805e1419f 100644 --- a/examples/community/pipeline_stg_ltx_image2video.py +++ b/examples/community/pipeline_stg_ltx_image2video.py @@ -682,7 +682,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_stg_mochi.py b/examples/community/pipeline_stg_mochi.py index b6ab1b192c1e..dbe5d2525ad3 100644 --- a/examples/community/pipeline_stg_mochi.py +++ b/examples/community/pipeline_stg_mochi.py @@ -603,7 +603,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_zero1to3.py b/examples/community/pipeline_zero1to3.py index 0db543b1697c..9e29566978e8 100644 --- a/examples/community/pipeline_zero1to3.py +++ b/examples/community/pipeline_zero1to3.py @@ -657,7 +657,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/rerender_a_video.py b/examples/community/rerender_a_video.py index 133c23294395..78a15a03b099 100644 --- a/examples/community/rerender_a_video.py +++ b/examples/community/rerender_a_video.py @@ -656,7 +656,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/run_onnx_controlnet.py b/examples/community/run_onnx_controlnet.py index 2221fc09dbde..f0ab2a2b9643 100644 --- a/examples/community/run_onnx_controlnet.py +++ b/examples/community/run_onnx_controlnet.py @@ -591,7 +591,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/run_tensorrt_controlnet.py b/examples/community/run_tensorrt_controlnet.py index b9e71724c046..e4f1abc83b0b 100644 --- a/examples/community/run_tensorrt_controlnet.py +++ b/examples/community/run_tensorrt_controlnet.py @@ -695,7 +695,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/sd_text2img_k_diffusion.py b/examples/community/sd_text2img_k_diffusion.py index ab6cf2d9cd3f..4d5cea497f8c 100755 --- a/examples/community/sd_text2img_k_diffusion.py +++ b/examples/community/sd_text2img_k_diffusion.py @@ -326,7 +326,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/examples/community/seed_resize_stable_diffusion.py b/examples/community/seed_resize_stable_diffusion.py index 3c823012c102..eafe7572aab5 100644 --- a/examples/community/seed_resize_stable_diffusion.py +++ b/examples/community/seed_resize_stable_diffusion.py @@ -122,7 +122,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/examples/community/stable_diffusion_comparison.py b/examples/community/stable_diffusion_comparison.py index 36e7dba2de62..22f3b3e0c385 100644 --- a/examples/community/stable_diffusion_comparison.py +++ b/examples/community/stable_diffusion_comparison.py @@ -279,7 +279,7 @@ def _call_( latents (`torch.Tensor`, optional): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, optional, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/examples/community/stable_diffusion_controlnet_img2img.py b/examples/community/stable_diffusion_controlnet_img2img.py index 877464454a61..6d8038cfd4ae 100644 --- a/examples/community/stable_diffusion_controlnet_img2img.py +++ b/examples/community/stable_diffusion_controlnet_img2img.py @@ -670,7 +670,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/stable_diffusion_controlnet_inpaint.py b/examples/community/stable_diffusion_controlnet_inpaint.py index 175c47d01523..fe7b808b6beb 100644 --- a/examples/community/stable_diffusion_controlnet_inpaint.py +++ b/examples/community/stable_diffusion_controlnet_inpaint.py @@ -810,7 +810,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/stable_diffusion_controlnet_inpaint_img2img.py b/examples/community/stable_diffusion_controlnet_inpaint_img2img.py index 51e7ac38dd54..2b5dc77fe5aa 100644 --- a/examples/community/stable_diffusion_controlnet_inpaint_img2img.py +++ b/examples/community/stable_diffusion_controlnet_inpaint_img2img.py @@ -804,7 +804,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/stable_diffusion_controlnet_reference.py b/examples/community/stable_diffusion_controlnet_reference.py index aa9ab1b24211..e5dd249e0424 100644 --- a/examples/community/stable_diffusion_controlnet_reference.py +++ b/examples/community/stable_diffusion_controlnet_reference.py @@ -179,7 +179,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/stable_diffusion_ipex.py b/examples/community/stable_diffusion_ipex.py index 18d5e8feaa43..7d1cd4f5d09e 100644 --- a/examples/community/stable_diffusion_ipex.py +++ b/examples/community/stable_diffusion_ipex.py @@ -615,7 +615,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/stable_diffusion_reference.py b/examples/community/stable_diffusion_reference.py index 69fa0722cf8a..6f7dce982339 100644 --- a/examples/community/stable_diffusion_reference.py +++ b/examples/community/stable_diffusion_reference.py @@ -885,7 +885,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/stable_diffusion_repaint.py b/examples/community/stable_diffusion_repaint.py index 9f6172f3b838..94b9f8b01b51 100644 --- a/examples/community/stable_diffusion_repaint.py +++ b/examples/community/stable_diffusion_repaint.py @@ -678,7 +678,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/stable_diffusion_xl_reference.py b/examples/community/stable_diffusion_xl_reference.py index 11926a5d9ac9..eb055574966d 100644 --- a/examples/community/stable_diffusion_xl_reference.py +++ b/examples/community/stable_diffusion_xl_reference.py @@ -380,7 +380,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/text_inpainting.py b/examples/community/text_inpainting.py index 2908388029dd..f262cf2cac6d 100644 --- a/examples/community/text_inpainting.py +++ b/examples/community/text_inpainting.py @@ -180,7 +180,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/examples/community/tiled_upscaling.py b/examples/community/tiled_upscaling.py index 56eb3e89b5d0..7a5e77155cd0 100644 --- a/examples/community/tiled_upscaling.py +++ b/examples/community/tiled_upscaling.py @@ -231,7 +231,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. tile_size (`int`, *optional*): The size of the tiles. Too big can result in an OOM-error. tile_border (`int`, *optional*): diff --git a/examples/community/wildcard_stable_diffusion.py b/examples/community/wildcard_stable_diffusion.py index c750610ca34f..d40221e5b1cf 100644 --- a/examples/community/wildcard_stable_diffusion.py +++ b/examples/community/wildcard_stable_diffusion.py @@ -209,7 +209,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/examples/dreambooth/README_qwen.md b/examples/dreambooth/README_qwen.md index 0f0b640c8b5c..68c546a25df9 100644 --- a/examples/dreambooth/README_qwen.md +++ b/examples/dreambooth/README_qwen.md @@ -77,7 +77,7 @@ export MODEL_NAME="Qwen/Qwen-Image" export INSTANCE_DIR="dog" export OUTPUT_DIR="trained-qwenimage-lora" -accelerate launch train_dreambooth_lora_qwenimage.py \ +accelerate launch train_dreambooth_lora_qwen_image.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --output_dir=$OUTPUT_DIR \ diff --git a/examples/dreambooth/train_dreambooth_flux.py b/examples/dreambooth/train_dreambooth_flux.py index b803babdc827..c24d16c6005a 100644 --- a/examples/dreambooth/train_dreambooth_flux.py +++ b/examples/dreambooth/train_dreambooth_flux.py @@ -642,6 +642,7 @@ def parse_args(input_args=None): ], help="The image interpolation method to use for resizing images.", ) + parser.add_argument("--enable_npu_flash_attention", action="store_true", help="Enabla Flash Attention for NPU") if input_args is not None: args = parser.parse_args(input_args) @@ -1182,6 +1183,13 @@ def main(args): text_encoder_one.requires_grad_(False) text_encoder_two.requires_grad_(False) + if args.enable_npu_flash_attention: + if is_torch_npu_available(): + logger.info("npu flash attention enabled.") + transformer.set_attention_backend("_native_npu") + else: + raise ValueError("npu flash attention requires torch_npu extensions and is supported only on npu device ") + # For mixed precision training we cast all non-trainable weights (vae, text_encoder and transformer) to half-precision # as these weights are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32 diff --git a/examples/dreambooth/train_dreambooth_lora_flux.py b/examples/dreambooth/train_dreambooth_lora_flux.py index a8a76097f3c3..2353625c3878 100644 --- a/examples/dreambooth/train_dreambooth_lora_flux.py +++ b/examples/dreambooth/train_dreambooth_lora_flux.py @@ -80,6 +80,7 @@ is_wandb_available, ) from diffusers.utils.hub_utils import load_or_create_model_card, populate_model_card +from diffusers.utils.import_utils import is_torch_npu_available from diffusers.utils.torch_utils import is_compiled_module @@ -686,6 +687,7 @@ def parse_args(input_args=None): ), ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") + parser.add_argument("--enable_npu_flash_attention", action="store_true", help="Enabla Flash Attention for NPU") if input_args is not None: args = parser.parse_args(input_args) @@ -1213,6 +1215,13 @@ def main(args): text_encoder_one.requires_grad_(False) text_encoder_two.requires_grad_(False) + if args.enable_npu_flash_attention: + if is_torch_npu_available(): + logger.info("npu flash attention enabled.") + transformer.set_attention_backend("_native_npu") + else: + raise ValueError("npu flash attention requires torch_npu extensions and is supported only on npu device ") + # For mixed precision training we cast all non-trainable weights (vae, text_encoder and transformer) to half-precision # as these weights are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32 diff --git a/examples/dreambooth/train_dreambooth_lora_flux_kontext.py b/examples/dreambooth/train_dreambooth_lora_flux_kontext.py index 6aa165ed20b3..ffeef7b4b34b 100644 --- a/examples/dreambooth/train_dreambooth_lora_flux_kontext.py +++ b/examples/dreambooth/train_dreambooth_lora_flux_kontext.py @@ -706,6 +706,7 @@ def parse_args(input_args=None): ), ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") + parser.add_argument("--enable_npu_flash_attention", action="store_true", help="Enabla Flash Attention for NPU") if input_args is not None: args = parser.parse_args(input_args) @@ -1354,6 +1355,13 @@ def main(args): text_encoder_one.requires_grad_(False) text_encoder_two.requires_grad_(False) + if args.enable_npu_flash_attention: + if is_torch_npu_available(): + logger.info("npu flash attention enabled.") + transformer.set_attention_backend("_native_npu") + else: + raise ValueError("npu flash attention requires torch_npu extensions and is supported only on npu device ") + # For mixed precision training we cast all non-trainable weights (vae, text_encoder and transformer) to half-precision # as these weights are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32 diff --git a/examples/research_projects/pixart/pipeline_pixart_alpha_controlnet.py b/examples/research_projects/pixart/pipeline_pixart_alpha_controlnet.py index 148b2e7f3147..89228983d4d8 100644 --- a/examples/research_projects/pixart/pipeline_pixart_alpha_controlnet.py +++ b/examples/research_projects/pixart/pipeline_pixart_alpha_controlnet.py @@ -860,7 +860,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/research_projects/rdm/pipeline_rdm.py b/examples/research_projects/rdm/pipeline_rdm.py index 7e2095b7245c..9b696874c5d1 100644 --- a/examples/research_projects/rdm/pipeline_rdm.py +++ b/examples/research_projects/rdm/pipeline_rdm.py @@ -202,7 +202,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/models/attention_flax.py b/src/diffusers/models/attention_flax.py index 17e6f33df051..1bde62e5c666 100644 --- a/src/diffusers/models/attention_flax.py +++ b/src/diffusers/models/attention_flax.py @@ -19,6 +19,11 @@ import jax import jax.numpy as jnp +from ..utils import logging + + +logger = logging.get_logger(__name__) + def _query_chunk_attention(query, key, value, precision, key_chunk_size: int = 4096): """Multi-head dot product attention with a limited number of queries.""" @@ -151,6 +156,11 @@ class FlaxAttention(nn.Module): dtype: jnp.dtype = jnp.float32 def setup(self): + logger.warning( + "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " + "recommend migrating to PyTorch classes or pinning your version of Diffusers." + ) + inner_dim = self.dim_head * self.heads self.scale = self.dim_head**-0.5 @@ -277,6 +287,11 @@ class FlaxBasicTransformerBlock(nn.Module): split_head_dim: bool = False def setup(self): + logger.warning( + "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " + "recommend migrating to PyTorch classes or pinning your version of Diffusers." + ) + # self attention (or cross_attention if only_cross_attention is True) self.attn1 = FlaxAttention( self.dim, @@ -365,6 +380,11 @@ class FlaxTransformer2DModel(nn.Module): split_head_dim: bool = False def setup(self): + logger.warning( + "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " + "recommend migrating to PyTorch classes or pinning your version of Diffusers." + ) + self.norm = nn.GroupNorm(num_groups=32, epsilon=1e-5) inner_dim = self.n_heads * self.d_head @@ -454,6 +474,11 @@ class FlaxFeedForward(nn.Module): dtype: jnp.dtype = jnp.float32 def setup(self): + logger.warning( + "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " + "recommend migrating to PyTorch classes or pinning your version of Diffusers." + ) + # The second linear layer needs to be called # net_2 for now to match the index of the Sequential layer self.net_0 = FlaxGEGLU(self.dim, self.dropout, self.dtype) @@ -484,6 +509,11 @@ class FlaxGEGLU(nn.Module): dtype: jnp.dtype = jnp.float32 def setup(self): + logger.warning( + "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " + "recommend migrating to PyTorch classes or pinning your version of Diffusers." + ) + inner_dim = self.dim * 4 self.proj = nn.Dense(inner_dim * 2, dtype=self.dtype) self.dropout_layer = nn.Dropout(rate=self.dropout) diff --git a/src/diffusers/models/controlnets/controlnet_flax.py b/src/diffusers/models/controlnets/controlnet_flax.py index 4b2148666ebf..f7a8b98fa2f0 100644 --- a/src/diffusers/models/controlnets/controlnet_flax.py +++ b/src/diffusers/models/controlnets/controlnet_flax.py @@ -20,7 +20,7 @@ from flax.core.frozen_dict import FrozenDict from ...configuration_utils import ConfigMixin, flax_register_to_config -from ...utils import BaseOutput +from ...utils import BaseOutput, logging from ..embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from ..modeling_flax_utils import FlaxModelMixin from ..unets.unet_2d_blocks_flax import ( @@ -30,6 +30,9 @@ ) +logger = logging.get_logger(__name__) + + @flax.struct.dataclass class FlaxControlNetOutput(BaseOutput): """ @@ -50,6 +53,11 @@ class FlaxControlNetConditioningEmbedding(nn.Module): dtype: jnp.dtype = jnp.float32 def setup(self) -> None: + logger.warning( + "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " + "recommend migrating to PyTorch classes or pinning your version of Diffusers." + ) + self.conv_in = nn.Conv( self.block_out_channels[0], kernel_size=(3, 3), @@ -184,6 +192,11 @@ def init_weights(self, rng: jax.Array) -> FrozenDict: return self.init(rngs, sample, timesteps, encoder_hidden_states, controlnet_cond)["params"] def setup(self) -> None: + logger.warning( + "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " + "recommend migrating to PyTorch classes or pinning your version of Diffusers." + ) + block_out_channels = self.block_out_channels time_embed_dim = block_out_channels[0] * 4 diff --git a/src/diffusers/models/embeddings_flax.py b/src/diffusers/models/embeddings_flax.py index 1e7e84edeaeb..3790905e583c 100644 --- a/src/diffusers/models/embeddings_flax.py +++ b/src/diffusers/models/embeddings_flax.py @@ -16,6 +16,11 @@ import flax.linen as nn import jax.numpy as jnp +from ..utils import logging + + +logger = logging.get_logger(__name__) + def get_sinusoidal_embeddings( timesteps: jnp.ndarray, @@ -76,6 +81,11 @@ class FlaxTimestepEmbedding(nn.Module): The data type for the embedding parameters. """ + logger.warning( + "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " + "recommend migrating to PyTorch classes or pinning your version of Diffusers." + ) + time_embed_dim: int = 32 dtype: jnp.dtype = jnp.float32 @@ -104,6 +114,11 @@ class FlaxTimesteps(nn.Module): flip_sin_to_cos: bool = False freq_shift: float = 1 + logger.warning( + "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " + "recommend migrating to PyTorch classes or pinning your version of Diffusers." + ) + @nn.compact def __call__(self, timesteps): return get_sinusoidal_embeddings( diff --git a/src/diffusers/models/modeling_flax_utils.py b/src/diffusers/models/modeling_flax_utils.py index 010b7377451c..573828dc4b03 100644 --- a/src/diffusers/models/modeling_flax_utils.py +++ b/src/diffusers/models/modeling_flax_utils.py @@ -290,6 +290,10 @@ def from_pretrained( You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. ``` """ + logger.warning( + "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " + "recommend migrating to PyTorch classes or pinning your version of Diffusers." + ) config = kwargs.pop("config", None) cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) diff --git a/src/diffusers/models/resnet_flax.py b/src/diffusers/models/resnet_flax.py index 9c80932c5c5d..9bedaa9a36b6 100644 --- a/src/diffusers/models/resnet_flax.py +++ b/src/diffusers/models/resnet_flax.py @@ -15,12 +15,22 @@ import jax import jax.numpy as jnp +from ..utils import logging + + +logger = logging.get_logger(__name__) + class FlaxUpsample2D(nn.Module): out_channels: int dtype: jnp.dtype = jnp.float32 def setup(self): + logger.warning( + "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " + "recommend migrating to PyTorch classes or pinning your version of Diffusers." + ) + self.conv = nn.Conv( self.out_channels, kernel_size=(3, 3), @@ -45,6 +55,11 @@ class FlaxDownsample2D(nn.Module): dtype: jnp.dtype = jnp.float32 def setup(self): + logger.warning( + "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " + "recommend migrating to PyTorch classes or pinning your version of Diffusers." + ) + self.conv = nn.Conv( self.out_channels, kernel_size=(3, 3), @@ -68,6 +83,11 @@ class FlaxResnetBlock2D(nn.Module): dtype: jnp.dtype = jnp.float32 def setup(self): + logger.warning( + "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " + "recommend migrating to PyTorch classes or pinning your version of Diffusers." + ) + out_channels = self.in_channels if self.out_channels is None else self.out_channels self.norm1 = nn.GroupNorm(num_groups=32, epsilon=1e-5) diff --git a/src/diffusers/models/transformers/transformer_flux.py b/src/diffusers/models/transformers/transformer_flux.py index 60c7eb1dbabe..7ab371a1a18e 100644 --- a/src/diffusers/models/transformers/transformer_flux.py +++ b/src/diffusers/models/transformers/transformer_flux.py @@ -22,8 +22,7 @@ from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import FluxTransformer2DLoadersMixin, FromOriginalModelMixin, PeftAdapterMixin -from ...utils import USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers -from ...utils.import_utils import is_torch_npu_available +from ...utils import USE_PEFT_BACKEND, logging, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import maybe_allow_in_graph from ..attention import AttentionMixin, AttentionModuleMixin, FeedForward from ..attention_dispatch import dispatch_attention_fn @@ -354,25 +353,13 @@ def __init__(self, dim: int, num_attention_heads: int, attention_head_dim: int, self.act_mlp = nn.GELU(approximate="tanh") self.proj_out = nn.Linear(dim + self.mlp_hidden_dim, dim) - if is_torch_npu_available(): - from ..attention_processor import FluxAttnProcessor2_0_NPU - - deprecation_message = ( - "Defaulting to FluxAttnProcessor2_0_NPU for NPU devices will be removed. Attention processors " - "should be set explicitly using the `set_attn_processor` method." - ) - deprecate("npu_processor", "0.34.0", deprecation_message) - processor = FluxAttnProcessor2_0_NPU() - else: - processor = FluxAttnProcessor() - self.attn = FluxAttention( query_dim=dim, dim_head=attention_head_dim, heads=num_attention_heads, out_dim=dim, bias=True, - processor=processor, + processor=FluxAttnProcessor(), eps=1e-6, pre_only=True, ) diff --git a/src/diffusers/models/transformers/transformer_skyreels_v2.py b/src/diffusers/models/transformers/transformer_skyreels_v2.py index 236fca690a90..358759164b9e 100644 --- a/src/diffusers/models/transformers/transformer_skyreels_v2.py +++ b/src/diffusers/models/transformers/transformer_skyreels_v2.py @@ -1,4 +1,4 @@ -# Copyright 2025 The SkyReels-V2 Team, The Wan Team and The HuggingFace Team. All rights reserved. +# Copyright 2025 The SkyReels Team, The Wan Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -21,9 +21,10 @@ from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import FromOriginalModelMixin, PeftAdapterMixin -from ...utils import USE_PEFT_BACKEND, logging, scale_lora_layers, unscale_lora_layers -from ..attention import FeedForward -from ..attention_processor import Attention +from ...utils import USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers +from ...utils.torch_utils import maybe_allow_in_graph +from ..attention import AttentionMixin, AttentionModuleMixin, FeedForward +from ..attention_dispatch import dispatch_attention_fn from ..cache_utils import CacheMixin from ..embeddings import ( PixArtAlphaTextProjection, @@ -39,20 +40,53 @@ logger = logging.get_logger(__name__) # pylint: disable=invalid-name -class SkyReelsV2AttnProcessor2_0: +def _get_qkv_projections( + attn: "SkyReelsV2Attention", hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor +): + # encoder_hidden_states is only passed for cross-attention + if encoder_hidden_states is None: + encoder_hidden_states = hidden_states + + if attn.fused_projections: + if attn.cross_attention_dim_head is None: + # In self-attention layers, we can fuse the entire QKV projection into a single linear + query, key, value = attn.to_qkv(hidden_states).chunk(3, dim=-1) + else: + # In cross-attention layers, we can only fuse the KV projections into a single linear + query = attn.to_q(hidden_states) + key, value = attn.to_kv(encoder_hidden_states).chunk(2, dim=-1) + else: + query = attn.to_q(hidden_states) + key = attn.to_k(encoder_hidden_states) + value = attn.to_v(encoder_hidden_states) + return query, key, value + + +def _get_added_kv_projections(attn: "SkyReelsV2Attention", encoder_hidden_states_img: torch.Tensor): + if attn.fused_projections: + key_img, value_img = attn.to_added_kv(encoder_hidden_states_img).chunk(2, dim=-1) + else: + key_img = attn.add_k_proj(encoder_hidden_states_img) + value_img = attn.add_v_proj(encoder_hidden_states_img) + return key_img, value_img + + +class SkyReelsV2AttnProcessor: + _attention_backend = None + def __init__(self): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError( - "SkyReelsV2AttnProcessor2_0 requires PyTorch 2.0. To use it, please upgrade PyTorch to 2.0." + "SkyReelsV2AttnProcessor requires PyTorch 2.0. To use it, please upgrade PyTorch to 2.0." ) def __call__( self, - attn: Attention, + attn: "SkyReelsV2Attention", hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, - rotary_emb: Optional[torch.Tensor] = None, + rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, ) -> torch.Tensor: encoder_hidden_states_img = None if attn.add_k_proj is not None: @@ -60,58 +94,66 @@ def __call__( image_context_length = encoder_hidden_states.shape[1] - 512 encoder_hidden_states_img = encoder_hidden_states[:, :image_context_length] encoder_hidden_states = encoder_hidden_states[:, image_context_length:] - if encoder_hidden_states is None: - encoder_hidden_states = hidden_states - query = attn.to_q(hidden_states) - key = attn.to_k(encoder_hidden_states) - value = attn.to_v(encoder_hidden_states) + query, key, value = _get_qkv_projections(attn, hidden_states, encoder_hidden_states) - if attn.norm_q is not None: - query = attn.norm_q(query) - if attn.norm_k is not None: - key = attn.norm_k(key) + query = attn.norm_q(query) + key = attn.norm_k(key) - query = query.unflatten(2, (attn.heads, -1)).transpose(1, 2) - key = key.unflatten(2, (attn.heads, -1)).transpose(1, 2) - value = value.unflatten(2, (attn.heads, -1)).transpose(1, 2) + query = query.unflatten(2, (attn.heads, -1)) + key = key.unflatten(2, (attn.heads, -1)) + value = value.unflatten(2, (attn.heads, -1)) if rotary_emb is not None: - def apply_rotary_emb(hidden_states: torch.Tensor, freqs: torch.Tensor): - x_rotated = torch.view_as_complex(hidden_states.to(torch.float32).unflatten(3, (-1, 2))) - x_out = torch.view_as_real(x_rotated * freqs).flatten(3, 4) - return x_out.type_as(hidden_states) - - query = apply_rotary_emb(query, rotary_emb) - key = apply_rotary_emb(key, rotary_emb) + def apply_rotary_emb( + hidden_states: torch.Tensor, + freqs_cos: torch.Tensor, + freqs_sin: torch.Tensor, + ): + x1, x2 = hidden_states.unflatten(-1, (-1, 2)).unbind(-1) + cos = freqs_cos[..., 0::2] + sin = freqs_sin[..., 1::2] + out = torch.empty_like(hidden_states) + out[..., 0::2] = x1 * cos - x2 * sin + out[..., 1::2] = x1 * sin + x2 * cos + return out.type_as(hidden_states) + + query = apply_rotary_emb(query, *rotary_emb) + key = apply_rotary_emb(key, *rotary_emb) # I2V task hidden_states_img = None if encoder_hidden_states_img is not None: - key_img = attn.add_k_proj(encoder_hidden_states_img) + key_img, value_img = _get_added_kv_projections(attn, encoder_hidden_states_img) key_img = attn.norm_added_k(key_img) - value_img = attn.add_v_proj(encoder_hidden_states_img) - - key_img = key_img.unflatten(2, (attn.heads, -1)).transpose(1, 2) - value_img = value_img.unflatten(2, (attn.heads, -1)).transpose(1, 2) - hidden_states_img = F.scaled_dot_product_attention( - query, key_img, value_img, attn_mask=None, dropout_p=0.0, is_causal=False + key_img = key_img.unflatten(2, (attn.heads, -1)) + value_img = value_img.unflatten(2, (attn.heads, -1)) + + hidden_states_img = dispatch_attention_fn( + query, + key_img, + value_img, + attn_mask=None, + dropout_p=0.0, + is_causal=False, + backend=self._attention_backend, ) - hidden_states_img = hidden_states_img.transpose(1, 2).flatten(2, 3) + hidden_states_img = hidden_states_img.flatten(2, 3) hidden_states_img = hidden_states_img.type_as(query) - hidden_states = F.scaled_dot_product_attention( + hidden_states = dispatch_attention_fn( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False, + backend=self._attention_backend, ) - hidden_states = hidden_states.transpose(1, 2).flatten(2, 3) + hidden_states = hidden_states.flatten(2, 3) hidden_states = hidden_states.type_as(query) if hidden_states_img is not None: @@ -122,7 +164,122 @@ def apply_rotary_emb(hidden_states: torch.Tensor, freqs: torch.Tensor): return hidden_states -# Copied from diffusers.models.transformers.transformer_wan.WanImageEmbedding with WanImageEmbedding -> SkyReelsV2ImageEmbedding +class SkyReelsV2AttnProcessor2_0: + def __new__(cls, *args, **kwargs): + deprecation_message = ( + "The SkyReelsV2AttnProcessor2_0 class is deprecated and will be removed in a future version. " + "Please use SkyReelsV2AttnProcessor instead. " + ) + deprecate("SkyReelsV2AttnProcessor2_0", "1.0.0", deprecation_message, standard_warn=False) + return SkyReelsV2AttnProcessor(*args, **kwargs) + + +class SkyReelsV2Attention(torch.nn.Module, AttentionModuleMixin): + _default_processor_cls = SkyReelsV2AttnProcessor + _available_processors = [SkyReelsV2AttnProcessor] + + def __init__( + self, + dim: int, + heads: int = 8, + dim_head: int = 64, + eps: float = 1e-5, + dropout: float = 0.0, + added_kv_proj_dim: Optional[int] = None, + cross_attention_dim_head: Optional[int] = None, + processor=None, + is_cross_attention=None, + ): + super().__init__() + + self.inner_dim = dim_head * heads + self.heads = heads + self.added_kv_proj_dim = added_kv_proj_dim + self.cross_attention_dim_head = cross_attention_dim_head + self.kv_inner_dim = self.inner_dim if cross_attention_dim_head is None else cross_attention_dim_head * heads + + self.to_q = torch.nn.Linear(dim, self.inner_dim, bias=True) + self.to_k = torch.nn.Linear(dim, self.kv_inner_dim, bias=True) + self.to_v = torch.nn.Linear(dim, self.kv_inner_dim, bias=True) + self.to_out = torch.nn.ModuleList( + [ + torch.nn.Linear(self.inner_dim, dim, bias=True), + torch.nn.Dropout(dropout), + ] + ) + self.norm_q = torch.nn.RMSNorm(dim_head * heads, eps=eps, elementwise_affine=True) + self.norm_k = torch.nn.RMSNorm(dim_head * heads, eps=eps, elementwise_affine=True) + + self.add_k_proj = self.add_v_proj = None + if added_kv_proj_dim is not None: + self.add_k_proj = torch.nn.Linear(added_kv_proj_dim, self.inner_dim, bias=True) + self.add_v_proj = torch.nn.Linear(added_kv_proj_dim, self.inner_dim, bias=True) + self.norm_added_k = torch.nn.RMSNorm(dim_head * heads, eps=eps) + + self.is_cross_attention = cross_attention_dim_head is not None + + self.set_processor(processor) + + def fuse_projections(self): + if getattr(self, "fused_projections", False): + return + + if self.cross_attention_dim_head is None: + concatenated_weights = torch.cat([self.to_q.weight.data, self.to_k.weight.data, self.to_v.weight.data]) + concatenated_bias = torch.cat([self.to_q.bias.data, self.to_k.bias.data, self.to_v.bias.data]) + out_features, in_features = concatenated_weights.shape + with torch.device("meta"): + self.to_qkv = nn.Linear(in_features, out_features, bias=True) + self.to_qkv.load_state_dict( + {"weight": concatenated_weights, "bias": concatenated_bias}, strict=True, assign=True + ) + else: + concatenated_weights = torch.cat([self.to_k.weight.data, self.to_v.weight.data]) + concatenated_bias = torch.cat([self.to_k.bias.data, self.to_v.bias.data]) + out_features, in_features = concatenated_weights.shape + with torch.device("meta"): + self.to_kv = nn.Linear(in_features, out_features, bias=True) + self.to_kv.load_state_dict( + {"weight": concatenated_weights, "bias": concatenated_bias}, strict=True, assign=True + ) + + if self.added_kv_proj_dim is not None: + concatenated_weights = torch.cat([self.add_k_proj.weight.data, self.add_v_proj.weight.data]) + concatenated_bias = torch.cat([self.add_k_proj.bias.data, self.add_v_proj.bias.data]) + out_features, in_features = concatenated_weights.shape + with torch.device("meta"): + self.to_added_kv = nn.Linear(in_features, out_features, bias=True) + self.to_added_kv.load_state_dict( + {"weight": concatenated_weights, "bias": concatenated_bias}, strict=True, assign=True + ) + + self.fused_projections = True + + @torch.no_grad() + def unfuse_projections(self): + if not getattr(self, "fused_projections", False): + return + + if hasattr(self, "to_qkv"): + delattr(self, "to_qkv") + if hasattr(self, "to_kv"): + delattr(self, "to_kv") + if hasattr(self, "to_added_kv"): + delattr(self, "to_added_kv") + + self.fused_projections = False + + def forward( + self, + hidden_states: torch.Tensor, + encoder_hidden_states: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, + **kwargs, + ) -> torch.Tensor: + return self.processor(self, hidden_states, encoder_hidden_states, attention_mask, rotary_emb, **kwargs) + + class SkyReelsV2ImageEmbedding(torch.nn.Module): def __init__(self, in_features: int, out_features: int, pos_embed_seq_len=None): super().__init__() @@ -213,7 +370,11 @@ def forward( class SkyReelsV2RotaryPosEmbed(nn.Module): def __init__( - self, attention_head_dim: int, patch_size: Tuple[int, int, int], max_seq_len: int, theta: float = 10000.0 + self, + attention_head_dim: int, + patch_size: Tuple[int, int, int], + max_seq_len: int, + theta: float = 10000.0, ): super().__init__() @@ -223,37 +384,55 @@ def __init__( h_dim = w_dim = 2 * (attention_head_dim // 6) t_dim = attention_head_dim - h_dim - w_dim + freqs_dtype = torch.float32 if torch.backends.mps.is_available() else torch.float64 + + freqs_cos = [] + freqs_sin = [] - freqs = [] for dim in [t_dim, h_dim, w_dim]: - freq = get_1d_rotary_pos_embed( - dim, max_seq_len, theta, use_real=False, repeat_interleave_real=False, freqs_dtype=torch.float32 + freq_cos, freq_sin = get_1d_rotary_pos_embed( + dim, + max_seq_len, + theta, + use_real=True, + repeat_interleave_real=True, + freqs_dtype=freqs_dtype, ) - freqs.append(freq) - self.freqs = torch.cat(freqs, dim=1) + freqs_cos.append(freq_cos) + freqs_sin.append(freq_sin) + + self.register_buffer("freqs_cos", torch.cat(freqs_cos, dim=1), persistent=False) + self.register_buffer("freqs_sin", torch.cat(freqs_sin, dim=1), persistent=False) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: batch_size, num_channels, num_frames, height, width = hidden_states.shape p_t, p_h, p_w = self.patch_size ppf, pph, ppw = num_frames // p_t, height // p_h, width // p_w - freqs = self.freqs.to(hidden_states.device) - freqs = freqs.split_with_sizes( - [ - self.attention_head_dim // 2 - 2 * (self.attention_head_dim // 6), - self.attention_head_dim // 6, - self.attention_head_dim // 6, - ], - dim=1, - ) + split_sizes = [ + self.attention_head_dim - 2 * (self.attention_head_dim // 3), + self.attention_head_dim // 3, + self.attention_head_dim // 3, + ] + + freqs_cos = self.freqs_cos.split(split_sizes, dim=1) + freqs_sin = self.freqs_sin.split(split_sizes, dim=1) - freqs_f = freqs[0][:ppf].view(ppf, 1, 1, -1).expand(ppf, pph, ppw, -1) - freqs_h = freqs[1][:pph].view(1, pph, 1, -1).expand(ppf, pph, ppw, -1) - freqs_w = freqs[2][:ppw].view(1, 1, ppw, -1).expand(ppf, pph, ppw, -1) - freqs = torch.cat([freqs_f, freqs_h, freqs_w], dim=-1).reshape(1, 1, ppf * pph * ppw, -1) - return freqs + freqs_cos_f = freqs_cos[0][:ppf].view(ppf, 1, 1, -1).expand(ppf, pph, ppw, -1) + freqs_cos_h = freqs_cos[1][:pph].view(1, pph, 1, -1).expand(ppf, pph, ppw, -1) + freqs_cos_w = freqs_cos[2][:ppw].view(1, 1, ppw, -1).expand(ppf, pph, ppw, -1) + freqs_sin_f = freqs_sin[0][:ppf].view(ppf, 1, 1, -1).expand(ppf, pph, ppw, -1) + freqs_sin_h = freqs_sin[1][:pph].view(1, pph, 1, -1).expand(ppf, pph, ppw, -1) + freqs_sin_w = freqs_sin[2][:ppw].view(1, 1, ppw, -1).expand(ppf, pph, ppw, -1) + freqs_cos = torch.cat([freqs_cos_f, freqs_cos_h, freqs_cos_w], dim=-1).reshape(1, ppf * pph * ppw, 1, -1) + freqs_sin = torch.cat([freqs_sin_f, freqs_sin_h, freqs_sin_w], dim=-1).reshape(1, ppf * pph * ppw, 1, -1) + + return freqs_cos, freqs_sin + + +@maybe_allow_in_graph class SkyReelsV2TransformerBlock(nn.Module): def __init__( self, @@ -269,33 +448,24 @@ def __init__( # 1. Self-attention self.norm1 = FP32LayerNorm(dim, eps, elementwise_affine=False) - self.attn1 = Attention( - query_dim=dim, + self.attn1 = SkyReelsV2Attention( + dim=dim, heads=num_heads, - kv_heads=num_heads, dim_head=dim // num_heads, - qk_norm=qk_norm, eps=eps, - bias=True, - cross_attention_dim=None, - out_bias=True, - processor=SkyReelsV2AttnProcessor2_0(), + cross_attention_dim_head=None, + processor=SkyReelsV2AttnProcessor(), ) # 2. Cross-attention - self.attn2 = Attention( - query_dim=dim, + self.attn2 = SkyReelsV2Attention( + dim=dim, heads=num_heads, - kv_heads=num_heads, dim_head=dim // num_heads, - qk_norm=qk_norm, eps=eps, - bias=True, - cross_attention_dim=None, - out_bias=True, added_kv_proj_dim=added_kv_proj_dim, - added_proj_bias=True, - processor=SkyReelsV2AttnProcessor2_0(), + cross_attention_dim_head=dim // num_heads, + processor=SkyReelsV2AttnProcessor(), ) self.norm2 = FP32LayerNorm(dim, eps, elementwise_affine=True) if cross_attn_norm else nn.Identity() @@ -321,15 +491,15 @@ def forward( # For 4D temb in Diffusion Forcing framework, we assume the shape is (b, 6, f * pp_h * pp_w, inner_dim) e = (self.scale_shift_table.unsqueeze(2) + temb.float()).chunk(6, dim=1) shift_msa, scale_msa, gate_msa, c_shift_msa, c_scale_msa, c_gate_msa = [ei.squeeze(1) for ei in e] + # 1. Self-attention norm_hidden_states = (self.norm1(hidden_states.float()) * (1 + scale_msa) + shift_msa).type_as(hidden_states) - attn_output = self.attn1( - hidden_states=norm_hidden_states, rotary_emb=rotary_emb, attention_mask=attention_mask - ) + attn_output = self.attn1(norm_hidden_states, None, attention_mask, rotary_emb) hidden_states = (hidden_states.float() + attn_output * gate_msa).type_as(hidden_states) + # 2. Cross-attention norm_hidden_states = self.norm2(hidden_states.float()).type_as(hidden_states) - attn_output = self.attn2(hidden_states=norm_hidden_states, encoder_hidden_states=encoder_hidden_states) + attn_output = self.attn2(norm_hidden_states, encoder_hidden_states, None, None) hidden_states = hidden_states + attn_output # 3. Feed-forward @@ -338,10 +508,13 @@ def forward( ) ff_output = self.ffn(norm_hidden_states) hidden_states = (hidden_states.float() + ff_output.float() * c_gate_msa).type_as(hidden_states) + return hidden_states -class SkyReelsV2Transformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin, CacheMixin): +class SkyReelsV2Transformer3DModel( + ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin, CacheMixin, AttentionMixin +): r""" A Transformer model for video-like data used in the Wan-based SkyReels-V2 model. @@ -389,6 +562,7 @@ class SkyReelsV2Transformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin, Fr _no_split_modules = ["SkyReelsV2TransformerBlock"] _keep_in_fp32_modules = ["time_embedder", "scale_shift_table", "norm1", "norm2", "norm3"] _keys_to_ignore_on_load_unexpected = ["norm_added_q"] + _repeated_blocks = ["SkyReelsV2TransformerBlock"] @register_to_config def __init__( diff --git a/src/diffusers/models/unets/unet_2d_blocks_flax.py b/src/diffusers/models/unets/unet_2d_blocks_flax.py index abd025165ecf..6e6005afdc31 100644 --- a/src/diffusers/models/unets/unet_2d_blocks_flax.py +++ b/src/diffusers/models/unets/unet_2d_blocks_flax.py @@ -15,10 +15,14 @@ import flax.linen as nn import jax.numpy as jnp +from ...utils import logging from ..attention_flax import FlaxTransformer2DModel from ..resnet_flax import FlaxDownsample2D, FlaxResnetBlock2D, FlaxUpsample2D +logger = logging.get_logger(__name__) + + class FlaxCrossAttnDownBlock2D(nn.Module): r""" Cross Attention 2D Downsizing block - original architecture from Unet transformers: @@ -60,6 +64,11 @@ class FlaxCrossAttnDownBlock2D(nn.Module): transformer_layers_per_block: int = 1 def setup(self): + logger.warning( + "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " + "recommend migrating to PyTorch classes or pinning your version of Diffusers." + ) + resnets = [] attentions = [] @@ -135,6 +144,11 @@ class FlaxDownBlock2D(nn.Module): dtype: jnp.dtype = jnp.float32 def setup(self): + logger.warning( + "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " + "recommend migrating to PyTorch classes or pinning your version of Diffusers." + ) + resnets = [] for i in range(self.num_layers): @@ -208,6 +222,11 @@ class FlaxCrossAttnUpBlock2D(nn.Module): transformer_layers_per_block: int = 1 def setup(self): + logger.warning( + "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " + "recommend migrating to PyTorch classes or pinning your version of Diffusers." + ) + resnets = [] attentions = [] @@ -288,6 +307,11 @@ class FlaxUpBlock2D(nn.Module): dtype: jnp.dtype = jnp.float32 def setup(self): + logger.warning( + "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " + "recommend migrating to PyTorch classes or pinning your version of Diffusers." + ) + resnets = [] for i in range(self.num_layers): @@ -356,6 +380,11 @@ class FlaxUNetMidBlock2DCrossAttn(nn.Module): transformer_layers_per_block: int = 1 def setup(self): + logger.warning( + "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " + "recommend migrating to PyTorch classes or pinning your version of Diffusers." + ) + # there is always at least one resnet resnets = [ FlaxResnetBlock2D( diff --git a/src/diffusers/models/unets/unet_2d_condition_flax.py b/src/diffusers/models/unets/unet_2d_condition_flax.py index 7c21ddb690ae..8d9a309afbcc 100644 --- a/src/diffusers/models/unets/unet_2d_condition_flax.py +++ b/src/diffusers/models/unets/unet_2d_condition_flax.py @@ -20,7 +20,7 @@ from flax.core.frozen_dict import FrozenDict from ...configuration_utils import ConfigMixin, flax_register_to_config -from ...utils import BaseOutput +from ...utils import BaseOutput, logging from ..embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from ..modeling_flax_utils import FlaxModelMixin from .unet_2d_blocks_flax import ( @@ -32,6 +32,9 @@ ) +logger = logging.get_logger(__name__) + + @flax.struct.dataclass class FlaxUNet2DConditionOutput(BaseOutput): """ @@ -163,6 +166,11 @@ def init_weights(self, rng: jax.Array) -> FrozenDict: return self.init(rngs, sample, timesteps, encoder_hidden_states, added_cond_kwargs)["params"] def setup(self) -> None: + logger.warning( + "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " + "recommend migrating to PyTorch classes or pinning your version of Diffusers." + ) + block_out_channels = self.block_out_channels time_embed_dim = block_out_channels[0] * 4 diff --git a/src/diffusers/models/vae_flax.py b/src/diffusers/models/vae_flax.py index 93398a51eac7..13653b90372a 100644 --- a/src/diffusers/models/vae_flax.py +++ b/src/diffusers/models/vae_flax.py @@ -25,10 +25,13 @@ from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config -from ..utils import BaseOutput +from ..utils import BaseOutput, logging from .modeling_flax_utils import FlaxModelMixin +logger = logging.get_logger(__name__) + + @flax.struct.dataclass class FlaxDecoderOutput(BaseOutput): """ @@ -73,6 +76,10 @@ class FlaxUpsample2D(nn.Module): dtype: jnp.dtype = jnp.float32 def setup(self): + logger.warning( + "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " + "recommend migrating to PyTorch classes or pinning your version of Diffusers." + ) self.conv = nn.Conv( self.in_channels, kernel_size=(3, 3), @@ -107,6 +114,11 @@ class FlaxDownsample2D(nn.Module): dtype: jnp.dtype = jnp.float32 def setup(self): + logger.warning( + "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " + "recommend migrating to PyTorch classes or pinning your version of Diffusers." + ) + self.conv = nn.Conv( self.in_channels, kernel_size=(3, 3), @@ -149,6 +161,11 @@ class FlaxResnetBlock2D(nn.Module): dtype: jnp.dtype = jnp.float32 def setup(self): + logger.warning( + "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " + "recommend migrating to PyTorch classes or pinning your version of Diffusers." + ) + out_channels = self.in_channels if self.out_channels is None else self.out_channels self.norm1 = nn.GroupNorm(num_groups=self.groups, epsilon=1e-6) @@ -221,6 +238,11 @@ class FlaxAttentionBlock(nn.Module): dtype: jnp.dtype = jnp.float32 def setup(self): + logger.warning( + "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " + "recommend migrating to PyTorch classes or pinning your version of Diffusers." + ) + self.num_heads = self.channels // self.num_head_channels if self.num_head_channels is not None else 1 dense = partial(nn.Dense, self.channels, dtype=self.dtype) @@ -302,6 +324,11 @@ class FlaxDownEncoderBlock2D(nn.Module): dtype: jnp.dtype = jnp.float32 def setup(self): + logger.warning( + "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " + "recommend migrating to PyTorch classes or pinning your version of Diffusers." + ) + resnets = [] for i in range(self.num_layers): in_channels = self.in_channels if i == 0 else self.out_channels @@ -359,6 +386,11 @@ class FlaxUpDecoderBlock2D(nn.Module): dtype: jnp.dtype = jnp.float32 def setup(self): + logger.warning( + "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " + "recommend migrating to PyTorch classes or pinning your version of Diffusers." + ) + resnets = [] for i in range(self.num_layers): in_channels = self.in_channels if i == 0 else self.out_channels @@ -413,6 +445,11 @@ class FlaxUNetMidBlock2D(nn.Module): dtype: jnp.dtype = jnp.float32 def setup(self): + logger.warning( + "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " + "recommend migrating to PyTorch classes or pinning your version of Diffusers." + ) + resnet_groups = self.resnet_groups if self.resnet_groups is not None else min(self.in_channels // 4, 32) # there is always at least one resnet @@ -504,6 +541,11 @@ class FlaxEncoder(nn.Module): dtype: jnp.dtype = jnp.float32 def setup(self): + logger.warning( + "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " + "recommend migrating to PyTorch classes or pinning your version of Diffusers." + ) + block_out_channels = self.block_out_channels # in self.conv_in = nn.Conv( @@ -616,6 +658,11 @@ class FlaxDecoder(nn.Module): dtype: jnp.dtype = jnp.float32 def setup(self): + logger.warning( + "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " + "recommend migrating to PyTorch classes or pinning your version of Diffusers." + ) + block_out_channels = self.block_out_channels # z to block_in @@ -788,6 +835,11 @@ class FlaxAutoencoderKL(nn.Module, FlaxModelMixin, ConfigMixin): dtype: jnp.dtype = jnp.float32 def setup(self): + logger.warning( + "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " + "recommend migrating to PyTorch classes or pinning your version of Diffusers." + ) + self.encoder = FlaxEncoder( in_channels=self.config.in_channels, out_channels=self.config.latent_channels, diff --git a/src/diffusers/modular_pipelines/modular_pipeline.py b/src/diffusers/modular_pipelines/modular_pipeline.py index 8a05cce209c5..c53fa81d5684 100644 --- a/src/diffusers/modular_pipelines/modular_pipeline.py +++ b/src/diffusers/modular_pipelines/modular_pipeline.py @@ -128,6 +128,15 @@ def to_dict(self) -> Dict[str, Any]: """ return {**self.__dict__} + def __getattr__(self, name): + """ + Allow attribute access to intermediate values. If an attribute is not found in the object, look for it in the + intermediates dict. + """ + if name in self.intermediates: + return self.intermediates[name] + raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{name}'") + def __repr__(self): def format_value(v): if hasattr(v, "shape") and hasattr(v, "dtype"): @@ -638,7 +647,7 @@ def __call__(self, pipeline, state: PipelineState) -> PipelineState: break if block is None: - logger.warning(f"skipping auto block: {self.__class__.__name__}") + logger.info(f"skipping auto block: {self.__class__.__name__}") return pipeline, state try: @@ -1450,9 +1459,10 @@ def __init__( Args: blocks: `ModularPipelineBlocks` instance. If None, will attempt to load default blocks based on the pipeline class name. - pretrained_model_name_or_path: Path to a pretrained pipeline configuration. If provided, - will load component specs (only for from_pretrained components) and config values from the saved - modular_model_index.json file. + pretrained_model_name_or_path: Path to a pretrained pipeline configuration. Can be None if the pipeline + does not require any additional loading config. If provided, will first try to load component specs + (only for from_pretrained components) and config values from `modular_model_index.json`, then + fallback to `model_index.json` for compatibility with standard non-modular repositories. components_manager: Optional ComponentsManager for managing multiple component cross different pipelines and apply offloading strategies. @@ -1501,18 +1511,70 @@ def __init__( # update component_specs and config_specs from modular_repo if pretrained_model_name_or_path is not None: - config_dict = self.load_config(pretrained_model_name_or_path, **kwargs) - - for name, value in config_dict.items(): - # all the components in modular_model_index.json are from_pretrained components - if name in self._component_specs and isinstance(value, (tuple, list)) and len(value) == 3: - library, class_name, component_spec_dict = value - component_spec = self._dict_to_component_spec(name, component_spec_dict) - component_spec.default_creation_method = "from_pretrained" - self._component_specs[name] = component_spec + cache_dir = kwargs.pop("cache_dir", None) + force_download = kwargs.pop("force_download", False) + proxies = kwargs.pop("proxies", None) + token = kwargs.pop("token", None) + local_files_only = kwargs.pop("local_files_only", False) + revision = kwargs.pop("revision", None) + + load_config_kwargs = { + "cache_dir": cache_dir, + "force_download": force_download, + "proxies": proxies, + "token": token, + "local_files_only": local_files_only, + "revision": revision, + } + # try to load modular_model_index.json + try: + config_dict = self.load_config(pretrained_model_name_or_path, **load_config_kwargs) + except EnvironmentError as e: + logger.debug(f"modular_model_index.json not found: {e}") + config_dict = None + + # update component_specs and config_specs based on modular_model_index.json + if config_dict is not None: + for name, value in config_dict.items(): + # all the components in modular_model_index.json are from_pretrained components + if name in self._component_specs and isinstance(value, (tuple, list)) and len(value) == 3: + library, class_name, component_spec_dict = value + component_spec = self._dict_to_component_spec(name, component_spec_dict) + component_spec.default_creation_method = "from_pretrained" + self._component_specs[name] = component_spec + + elif name in self._config_specs: + self._config_specs[name].default = value + + # if modular_model_index.json is not found, try to load model_index.json + else: + logger.debug(" loading config from model_index.json") + try: + from diffusers import DiffusionPipeline + + config_dict = DiffusionPipeline.load_config(pretrained_model_name_or_path, **load_config_kwargs) + except EnvironmentError as e: + logger.debug(f" model_index.json not found in the repo: {e}") + config_dict = None + + # update component_specs and config_specs based on model_index.json + if config_dict is not None: + for name, value in config_dict.items(): + if name in self._component_specs and isinstance(value, (tuple, list)) and len(value) == 2: + library, class_name = value + component_spec_dict = { + "repo": pretrained_model_name_or_path, + "subfolder": name, + "type_hint": (library, class_name), + } + component_spec = self._dict_to_component_spec(name, component_spec_dict) + component_spec.default_creation_method = "from_pretrained" + self._component_specs[name] = component_spec + elif name in self._config_specs: + self._config_specs[name].default = value - elif name in self._config_specs: - self._config_specs[name].default = value + if len(kwargs) > 0: + logger.warning(f"Unexpected input '{kwargs.keys()}' provided. This input will be ignored.") register_components_dict = {} for name, component_spec in self._component_specs.items(): @@ -1570,8 +1632,10 @@ def from_pretrained( Args: pretrained_model_name_or_path (`str` or `os.PathLike`, optional): - Path to a pretrained pipeline configuration. If provided, will load component specs (only for - from_pretrained components) and config values from the modular_model_index.json file. + Path to a pretrained pipeline configuration. It will first try to load config from + `modular_model_index.json`, then fallback to `model_index.json` for compatibility with standard + non-modular repositories. If the repo does not contain any pipeline config, it will be set to None + during initialization. trust_remote_code (`bool`, optional): Whether to trust remote code when loading the pipeline, need to be set to True if you want to create pipeline blocks based on the custom code in `pretrained_model_name_or_path` @@ -1607,11 +1671,35 @@ def from_pretrained( } try: + # try to load modular_model_index.json config_dict = cls.load_config(pretrained_model_name_or_path, **load_config_kwargs) + except EnvironmentError as e: + logger.debug(f" modular_model_index.json not found in the repo: {e}") + config_dict = None + + if config_dict is not None: pipeline_class = _get_pipeline_class(cls, config=config_dict) - except EnvironmentError: - pipeline_class = cls - pretrained_model_name_or_path = None + else: + try: + logger.debug(" try to load model_index.json") + from diffusers import DiffusionPipeline + from diffusers.pipelines.auto_pipeline import _get_model + + config_dict = DiffusionPipeline.load_config(pretrained_model_name_or_path, **load_config_kwargs) + except EnvironmentError as e: + logger.debug(f" model_index.json not found in the repo: {e}") + + if config_dict is not None: + logger.debug(" try to determine the modular pipeline class from model_index.json") + standard_pipeline_class = _get_pipeline_class(cls, config=config_dict) + model_name = _get_model(standard_pipeline_class.__name__) + pipeline_class_name = MODULAR_PIPELINE_MAPPING.get(model_name, ModularPipeline.__name__) + diffusers_module = importlib.import_module("diffusers") + pipeline_class = getattr(diffusers_module, pipeline_class_name) + else: + # there is no config for modular pipeline, assuming that the pipeline block does not need any from_pretrained components + pipeline_class = cls + pretrained_model_name_or_path = None pipeline = pipeline_class( blocks=blocks, @@ -1949,17 +2037,31 @@ def update_components(self, **kwargs): for name, component in passed_components.items(): current_component_spec = self._component_specs[name] - # warn if type changed + # log if type changed if current_component_spec.type_hint is not None and not isinstance( component, current_component_spec.type_hint ): - logger.warning( + logger.info( f"ModularPipeline.update_components: adding {name} with new type: {component.__class__.__name__}, previous type: {current_component_spec.type_hint.__name__}" ) # update _component_specs based on the new component - new_component_spec = ComponentSpec.from_component(name, component) - if new_component_spec.default_creation_method != current_component_spec.default_creation_method: + if component is None: + new_component_spec = current_component_spec + if hasattr(self, name) and getattr(self, name) is not None: + logger.warning(f"ModularPipeline.update_components: setting {name} to None (spec unchanged)") + elif current_component_spec.default_creation_method == "from_pretrained" and not ( + hasattr(component, "_diffusers_load_id") and component._diffusers_load_id is not None + ): logger.warning( + f"ModularPipeline.update_components: {name} has no valid _diffusers_load_id. " + f"This will result in empty loading spec, use ComponentSpec.load() for proper specs" + ) + new_component_spec = ComponentSpec(name=name, type_hint=type(component)) + else: + new_component_spec = ComponentSpec.from_component(name, component) + + if new_component_spec.default_creation_method != current_component_spec.default_creation_method: + logger.info( f"ModularPipeline.update_components: changing the default_creation_method of {name} from {current_component_spec.default_creation_method} to {new_component_spec.default_creation_method}." ) @@ -1980,7 +2082,7 @@ def update_components(self, **kwargs): if current_component_spec.type_hint is not None and not isinstance( created_components[name], current_component_spec.type_hint ): - logger.warning( + logger.info( f"ModularPipeline.update_components: adding {name} with new type: {created_components[name].__class__.__name__}, previous type: {current_component_spec.type_hint.__name__}" ) # update _component_specs based on the user passed component_spec diff --git a/src/diffusers/modular_pipelines/stable_diffusion_xl/before_denoise.py b/src/diffusers/modular_pipelines/stable_diffusion_xl/before_denoise.py index fbe0d22a52f9..fefa622f1a61 100644 --- a/src/diffusers/modular_pipelines/stable_diffusion_xl/before_denoise.py +++ b/src/diffusers/modular_pipelines/stable_diffusion_xl/before_denoise.py @@ -22,7 +22,7 @@ from ...guiders import ClassifierFreeGuidance from ...image_processor import VaeImageProcessor from ...models import AutoencoderKL, ControlNetModel, ControlNetUnionModel, UNet2DConditionModel -from ...pipelines.controlnet.multicontrolnet import MultiControlNetModel +from ...models.controlnets.multicontrolnet import MultiControlNetModel from ...schedulers import EulerDiscreteScheduler from ...utils import logging from ...utils.torch_utils import randn_tensor, unwrap_module diff --git a/src/diffusers/pipelines/allegro/pipeline_allegro.py b/src/diffusers/pipelines/allegro/pipeline_allegro.py index 0993c8b912b0..2c9548706ecb 100644 --- a/src/diffusers/pipelines/allegro/pipeline_allegro.py +++ b/src/diffusers/pipelines/allegro/pipeline_allegro.py @@ -760,7 +760,7 @@ def __call__( latents (`torch.Tensor`, *optional*): generation. Can be used to tweak the same generation with different prompts. If not provided, a latents Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for video - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/animatediff/pipeline_animatediff_sdxl.py b/src/diffusers/pipelines/animatediff/pipeline_animatediff_sdxl.py index 260669ddaf51..56d319027595 100644 --- a/src/diffusers/pipelines/animatediff/pipeline_animatediff_sdxl.py +++ b/src/diffusers/pipelines/animatediff/pipeline_animatediff_sdxl.py @@ -971,7 +971,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for video generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/aura_flow/pipeline_aura_flow.py b/src/diffusers/pipelines/aura_flow/pipeline_aura_flow.py index 7ff9925c452d..6251ca443533 100644 --- a/src/diffusers/pipelines/aura_flow/pipeline_aura_flow.py +++ b/src/diffusers/pipelines/aura_flow/pipeline_aura_flow.py @@ -497,7 +497,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/blip_diffusion/pipeline_blip_diffusion.py b/src/diffusers/pipelines/blip_diffusion/pipeline_blip_diffusion.py index 439dc511a0c9..8cd463c9709f 100644 --- a/src/diffusers/pipelines/blip_diffusion/pipeline_blip_diffusion.py +++ b/src/diffusers/pipelines/blip_diffusion/pipeline_blip_diffusion.py @@ -228,7 +228,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by random sampling. + tensor will be generated by random sampling. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. diff --git a/src/diffusers/pipelines/bria/pipeline_bria.py b/src/diffusers/pipelines/bria/pipeline_bria.py index 39ed484793d5..ebddfb0c0eee 100644 --- a/src/diffusers/pipelines/bria/pipeline_bria.py +++ b/src/diffusers/pipelines/bria/pipeline_bria.py @@ -506,7 +506,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/chroma/pipeline_chroma.py b/src/diffusers/pipelines/chroma/pipeline_chroma.py index 3a34ec2a4218..a3dd1422b876 100644 --- a/src/diffusers/pipelines/chroma/pipeline_chroma.py +++ b/src/diffusers/pipelines/chroma/pipeline_chroma.py @@ -676,7 +676,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/chroma/pipeline_chroma_img2img.py b/src/diffusers/pipelines/chroma/pipeline_chroma_img2img.py index e169db4a4d3e..233f4c43a1c2 100644 --- a/src/diffusers/pipelines/chroma/pipeline_chroma_img2img.py +++ b/src/diffusers/pipelines/chroma/pipeline_chroma_img2img.py @@ -744,7 +744,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/cogvideo/pipeline_cogvideox.py b/src/diffusers/pipelines/cogvideo/pipeline_cogvideox.py index 3c5994172c79..4ac33b24bbe1 100644 --- a/src/diffusers/pipelines/cogvideo/pipeline_cogvideox.py +++ b/src/diffusers/pipelines/cogvideo/pipeline_cogvideox.py @@ -571,7 +571,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/cogvideo/pipeline_cogvideox_fun_control.py b/src/diffusers/pipelines/cogvideo/pipeline_cogvideox_fun_control.py index cf6ccebc476d..c1335839f848 100644 --- a/src/diffusers/pipelines/cogvideo/pipeline_cogvideox_fun_control.py +++ b/src/diffusers/pipelines/cogvideo/pipeline_cogvideox_fun_control.py @@ -616,7 +616,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for video generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. control_video_latents (`torch.Tensor`, *optional*): Pre-generated control latents, sampled from a Gaussian distribution, to be used as inputs for controlled video generation. If not provided, `control_video` must be provided. diff --git a/src/diffusers/pipelines/cogvideo/pipeline_cogvideox_image2video.py b/src/diffusers/pipelines/cogvideo/pipeline_cogvideox_image2video.py index d1f02ca9c95e..225240927fad 100644 --- a/src/diffusers/pipelines/cogvideo/pipeline_cogvideox_image2video.py +++ b/src/diffusers/pipelines/cogvideo/pipeline_cogvideox_image2video.py @@ -671,7 +671,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/cogvideo/pipeline_cogvideox_video2video.py b/src/diffusers/pipelines/cogvideo/pipeline_cogvideox_video2video.py index 230c8ca296ba..897dc6d1b70a 100644 --- a/src/diffusers/pipelines/cogvideo/pipeline_cogvideox_video2video.py +++ b/src/diffusers/pipelines/cogvideo/pipeline_cogvideox_video2video.py @@ -641,7 +641,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/cogview3/pipeline_cogview3plus.py b/src/diffusers/pipelines/cogview3/pipeline_cogview3plus.py index f2f852c213ad..304a5c5ad00b 100644 --- a/src/diffusers/pipelines/cogview3/pipeline_cogview3plus.py +++ b/src/diffusers/pipelines/cogview3/pipeline_cogview3plus.py @@ -466,7 +466,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/cogview4/pipeline_cogview4.py b/src/diffusers/pipelines/cogview4/pipeline_cogview4.py index d8374b694f0e..22510f5d9d50 100644 --- a/src/diffusers/pipelines/cogview4/pipeline_cogview4.py +++ b/src/diffusers/pipelines/cogview4/pipeline_cogview4.py @@ -466,7 +466,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/cogview4/pipeline_cogview4_control.py b/src/diffusers/pipelines/cogview4/pipeline_cogview4_control.py index ac8d786f04f7..e26b7ba415de 100644 --- a/src/diffusers/pipelines/cogview4/pipeline_cogview4_control.py +++ b/src/diffusers/pipelines/cogview4/pipeline_cogview4_control.py @@ -499,7 +499,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/consisid/pipeline_consisid.py b/src/diffusers/pipelines/consisid/pipeline_consisid.py index 644bd811f6c7..3e6c149d7f80 100644 --- a/src/diffusers/pipelines/consisid/pipeline_consisid.py +++ b/src/diffusers/pipelines/consisid/pipeline_consisid.py @@ -733,7 +733,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/controlnet/pipeline_controlnet_blip_diffusion.py b/src/diffusers/pipelines/controlnet/pipeline_controlnet_blip_diffusion.py index 598e3b5b6d16..c2ae408778b3 100644 --- a/src/diffusers/pipelines/controlnet/pipeline_controlnet_blip_diffusion.py +++ b/src/diffusers/pipelines/controlnet/pipeline_controlnet_blip_diffusion.py @@ -279,7 +279,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by random sampling. + tensor will be generated by random sampling. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. diff --git a/src/diffusers/pipelines/controlnet/pipeline_controlnet_inpaint_sd_xl.py b/src/diffusers/pipelines/controlnet/pipeline_controlnet_inpaint_sd_xl.py index 4aa2a62a53ac..397ab15715c2 100644 --- a/src/diffusers/pipelines/controlnet/pipeline_controlnet_inpaint_sd_xl.py +++ b/src/diffusers/pipelines/controlnet/pipeline_controlnet_inpaint_sd_xl.py @@ -1326,7 +1326,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/src/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py b/src/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py index 526e1ffcb2cc..4d4845c5a0a3 100644 --- a/src/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py +++ b/src/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py @@ -1197,7 +1197,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/controlnet/pipeline_controlnet_union_inpaint_sd_xl.py b/src/diffusers/pipelines/controlnet/pipeline_controlnet_union_inpaint_sd_xl.py index 7fa59395a8f1..fb58b222112a 100644 --- a/src/diffusers/pipelines/controlnet/pipeline_controlnet_union_inpaint_sd_xl.py +++ b/src/diffusers/pipelines/controlnet/pipeline_controlnet_union_inpaint_sd_xl.py @@ -1310,7 +1310,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/src/diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl_img2img.py b/src/diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl_img2img.py index 65e2fe661797..8fedb6d8609a 100644 --- a/src/diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl_img2img.py +++ b/src/diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl_img2img.py @@ -1185,7 +1185,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet.py b/src/diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet.py index e31e3a017872..c763411ab5f7 100644 --- a/src/diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet.py +++ b/src/diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet.py @@ -918,7 +918,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet_inpainting.py b/src/diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet_inpainting.py index 000e080d3aea..c33cf979c6d8 100644 --- a/src/diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet_inpainting.py +++ b/src/diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet_inpainting.py @@ -973,7 +973,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py index f9034a58441c..d000d87e6a7b 100644 --- a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py @@ -880,7 +880,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. @@ -1151,7 +1151,7 @@ def invert( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/flux/pipeline_flux_control.py b/src/diffusers/pipelines/flux/pipeline_flux_control.py index 51d6ecbe3171..cc9ebb4754f7 100644 --- a/src/diffusers/pipelines/flux/pipeline_flux_control.py +++ b/src/diffusers/pipelines/flux/pipeline_flux_control.py @@ -674,7 +674,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/flux/pipeline_flux_control_img2img.py b/src/diffusers/pipelines/flux/pipeline_flux_control_img2img.py index c61d46daefa2..262345c75afc 100644 --- a/src/diffusers/pipelines/flux/pipeline_flux_control_img2img.py +++ b/src/diffusers/pipelines/flux/pipeline_flux_control_img2img.py @@ -712,7 +712,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py b/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py index 3de636361bc3..5acc5080f56d 100644 --- a/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py +++ b/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py @@ -838,7 +838,7 @@ def __call__( 1)`, or `(H, W)`. mask_image_latent (`torch.Tensor`, `List[torch.Tensor]`): `Tensor` representing an image batch to mask `image` generated by VAE. If not provided, the mask - latents tensor will ge generated by `mask_image`. + latents tensor will be generated by `mask_image`. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. This is set to 1024 by default for the best results. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): @@ -870,7 +870,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/flux/pipeline_flux_controlnet.py b/src/diffusers/pipelines/flux/pipeline_flux_controlnet.py index a39b9c9ce25c..507ec687347c 100644 --- a/src/diffusers/pipelines/flux/pipeline_flux_controlnet.py +++ b/src/diffusers/pipelines/flux/pipeline_flux_controlnet.py @@ -764,7 +764,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/flux/pipeline_flux_fill.py b/src/diffusers/pipelines/flux/pipeline_flux_fill.py index d50db407a87d..956f6fb10652 100644 --- a/src/diffusers/pipelines/flux/pipeline_flux_fill.py +++ b/src/diffusers/pipelines/flux/pipeline_flux_fill.py @@ -775,7 +775,7 @@ def __call__( 1)`, or `(H, W)`. mask_image_latent (`torch.Tensor`, `List[torch.Tensor]`): `Tensor` representing an image batch to mask `image` generated by VAE. If not provided, the mask - latents tensor will ge generated by `mask_image`. + latents tensor will be generated by `mask_image`. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. This is set to 1024 by default for the best results. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): @@ -807,7 +807,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/flux/pipeline_flux_img2img.py b/src/diffusers/pipelines/flux/pipeline_flux_img2img.py index 08e2f1277844..4a9f2bad6a34 100644 --- a/src/diffusers/pipelines/flux/pipeline_flux_img2img.py +++ b/src/diffusers/pipelines/flux/pipeline_flux_img2img.py @@ -787,7 +787,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/flux/pipeline_flux_inpaint.py b/src/diffusers/pipelines/flux/pipeline_flux_inpaint.py index 049414669390..3bfe82cf4382 100644 --- a/src/diffusers/pipelines/flux/pipeline_flux_inpaint.py +++ b/src/diffusers/pipelines/flux/pipeline_flux_inpaint.py @@ -834,7 +834,7 @@ def __call__( 1)`, or `(H, W)`. mask_image_latent (`torch.Tensor`, `List[torch.Tensor]`): `Tensor` representing an image batch to mask `image` generated by VAE. If not provided, the mask - latents tensor will ge generated by `mask_image`. + latents tensor will be generated by `mask_image`. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. This is set to 1024 by default for the best results. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): @@ -873,7 +873,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/flux/pipeline_flux_kontext.py b/src/diffusers/pipelines/flux/pipeline_flux_kontext.py index ce2941f3ddf4..87011299c425 100644 --- a/src/diffusers/pipelines/flux/pipeline_flux_kontext.py +++ b/src/diffusers/pipelines/flux/pipeline_flux_kontext.py @@ -808,7 +808,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/flux/pipeline_flux_kontext_inpaint.py b/src/diffusers/pipelines/flux/pipeline_flux_kontext_inpaint.py index 56a5e934a4e3..3cdb8caea2ff 100644 --- a/src/diffusers/pipelines/flux/pipeline_flux_kontext_inpaint.py +++ b/src/diffusers/pipelines/flux/pipeline_flux_kontext_inpaint.py @@ -1029,7 +1029,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/hidream_image/pipeline_hidream_image.py b/src/diffusers/pipelines/hidream_image/pipeline_hidream_image.py index 695f54f3d9db..bf36ca2fa3e2 100644 --- a/src/diffusers/pipelines/hidream_image/pipeline_hidream_image.py +++ b/src/diffusers/pipelines/hidream_image/pipeline_hidream_image.py @@ -789,7 +789,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky.py b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky.py index 89fea8933752..92f612f54116 100644 --- a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky.py +++ b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky.py @@ -291,7 +291,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). diff --git a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py index 90d4042ae2a1..7286bcbee17b 100644 --- a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py +++ b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py @@ -271,7 +271,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). @@ -502,7 +502,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). @@ -742,7 +742,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). diff --git a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py index 5645d2a56edd..cde0b8fd0a9d 100644 --- a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py +++ b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py @@ -469,7 +469,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). diff --git a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py index 8781d706edf5..10ea8005c90d 100644 --- a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py +++ b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py @@ -212,7 +212,7 @@ def interpolate( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. negative_prior_prompt (`str`, *optional*): The prompt not to guide the prior diffusion process. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). @@ -437,7 +437,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. guidance_scale (`float`, *optional*, defaults to 4.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py index 3ecc0ebd5b25..429253e99898 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py @@ -175,7 +175,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py index e0b88b41e8c5..fc2083247bb0 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py @@ -262,7 +262,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). @@ -512,7 +512,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). @@ -749,7 +749,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py index b9f98f5458e2..c5faae82796b 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py @@ -211,7 +211,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py index 22171849bbf6..a61673293e1f 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py @@ -356,7 +356,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py index 68954c2dc886..0e7e16f9dd5f 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py @@ -171,7 +171,7 @@ def interpolate( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. negative_prior_prompt (`str`, *optional*): The prompt not to guide the prior diffusion process. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). @@ -412,7 +412,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. guidance_scale (`float`, *optional*, defaults to 4.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py index 13ea2ad6af63..1a7198b9683a 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py @@ -195,7 +195,7 @@ def interpolate( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. negative_prior_prompt (`str`, *optional*): The prompt not to guide the prior diffusion process. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). diff --git a/src/diffusers/pipelines/kolors/pipeline_kolors.py b/src/diffusers/pipelines/kolors/pipeline_kolors.py index 1fa9f6ce1d43..948f73ed91eb 100644 --- a/src/diffusers/pipelines/kolors/pipeline_kolors.py +++ b/src/diffusers/pipelines/kolors/pipeline_kolors.py @@ -749,7 +749,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/kolors/pipeline_kolors_img2img.py b/src/diffusers/pipelines/kolors/pipeline_kolors_img2img.py index e3cf4f227624..67d49b9a8c5e 100644 --- a/src/diffusers/pipelines/kolors/pipeline_kolors_img2img.py +++ b/src/diffusers/pipelines/kolors/pipeline_kolors_img2img.py @@ -900,7 +900,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/latte/pipeline_latte.py b/src/diffusers/pipelines/latte/pipeline_latte.py index 0e60d5c7acbe..4d42a7049ec9 100644 --- a/src/diffusers/pipelines/latte/pipeline_latte.py +++ b/src/diffusers/pipelines/latte/pipeline_latte.py @@ -679,7 +679,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for video generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/ltx/pipeline_ltx.py b/src/diffusers/pipelines/ltx/pipeline_ltx.py index 77ba75170037..bd23e657c408 100644 --- a/src/diffusers/pipelines/ltx/pipeline_ltx.py +++ b/src/diffusers/pipelines/ltx/pipeline_ltx.py @@ -601,7 +601,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/ltx/pipeline_ltx_condition.py b/src/diffusers/pipelines/ltx/pipeline_ltx_condition.py index 217478f418ed..537588f67c95 100644 --- a/src/diffusers/pipelines/ltx/pipeline_ltx_condition.py +++ b/src/diffusers/pipelines/ltx/pipeline_ltx_condition.py @@ -938,7 +938,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/ltx/pipeline_ltx_image2video.py b/src/diffusers/pipelines/ltx/pipeline_ltx_image2video.py index 8793d81377cc..694378b4f040 100644 --- a/src/diffusers/pipelines/ltx/pipeline_ltx_image2video.py +++ b/src/diffusers/pipelines/ltx/pipeline_ltx_image2video.py @@ -665,7 +665,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/lumina/pipeline_lumina.py b/src/diffusers/pipelines/lumina/pipeline_lumina.py index 2067444fa0df..b59c265646cd 100644 --- a/src/diffusers/pipelines/lumina/pipeline_lumina.py +++ b/src/diffusers/pipelines/lumina/pipeline_lumina.py @@ -697,7 +697,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/lumina2/pipeline_lumina2.py b/src/diffusers/pipelines/lumina2/pipeline_lumina2.py index 0fa0fe97734c..c4df7ba1c342 100644 --- a/src/diffusers/pipelines/lumina2/pipeline_lumina2.py +++ b/src/diffusers/pipelines/lumina2/pipeline_lumina2.py @@ -564,7 +564,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/mochi/pipeline_mochi.py b/src/diffusers/pipelines/mochi/pipeline_mochi.py index 3c0f908296df..5581529b2337 100644 --- a/src/diffusers/pipelines/mochi/pipeline_mochi.py +++ b/src/diffusers/pipelines/mochi/pipeline_mochi.py @@ -534,7 +534,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/omnigen/pipeline_omnigen.py b/src/diffusers/pipelines/omnigen/pipeline_omnigen.py index 1254b6725fef..f5a535b2dabd 100644 --- a/src/diffusers/pipelines/omnigen/pipeline_omnigen.py +++ b/src/diffusers/pipelines/omnigen/pipeline_omnigen.py @@ -366,7 +366,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/src/diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl_img2img.py b/src/diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl_img2img.py index 913a647fae3e..a6df1b22c8b9 100644 --- a/src/diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl_img2img.py +++ b/src/diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl_img2img.py @@ -1199,7 +1199,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/pag/pipeline_pag_kolors.py b/src/diffusers/pipelines/pag/pipeline_pag_kolors.py index ed8e33e2ba8b..1368358db6ba 100644 --- a/src/diffusers/pipelines/pag/pipeline_pag_kolors.py +++ b/src/diffusers/pipelines/pag/pipeline_pag_kolors.py @@ -769,7 +769,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/pag/pipeline_pag_pixart_sigma.py b/src/diffusers/pipelines/pag/pipeline_pag_pixart_sigma.py index d9d6d14a38d9..9031877b5b8d 100644 --- a/src/diffusers/pipelines/pag/pipeline_pag_pixart_sigma.py +++ b/src/diffusers/pipelines/pag/pipeline_pag_pixart_sigma.py @@ -644,7 +644,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/pag/pipeline_pag_sana.py b/src/diffusers/pipelines/pag/pipeline_pag_sana.py index 8dbae13a3f16..5857eeeb0443 100644 --- a/src/diffusers/pipelines/pag/pipeline_pag_sana.py +++ b/src/diffusers/pipelines/pag/pipeline_pag_sana.py @@ -703,7 +703,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/pag/pipeline_pag_sd_3.py b/src/diffusers/pipelines/pag/pipeline_pag_sd_3.py index 96796f53b0bc..acb4e52340a6 100644 --- a/src/diffusers/pipelines/pag/pipeline_pag_sd_3.py +++ b/src/diffusers/pipelines/pag/pipeline_pag_sd_3.py @@ -761,7 +761,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/pag/pipeline_pag_sd_3_img2img.py b/src/diffusers/pipelines/pag/pipeline_pag_sd_3_img2img.py index 202120dc2c2b..e1819a79fb30 100644 --- a/src/diffusers/pipelines/pag/pipeline_pag_sd_3_img2img.py +++ b/src/diffusers/pipelines/pag/pipeline_pag_sd_3_img2img.py @@ -822,7 +822,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/pag/pipeline_pag_sd_xl.py b/src/diffusers/pipelines/pag/pipeline_pag_sd_xl.py index 450468413380..6b62ddcc7ca5 100644 --- a/src/diffusers/pipelines/pag/pipeline_pag_sd_xl.py +++ b/src/diffusers/pipelines/pag/pipeline_pag_sd_xl.py @@ -948,7 +948,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/pag/pipeline_pag_sd_xl_img2img.py b/src/diffusers/pipelines/pag/pipeline_pag_sd_xl_img2img.py index 8c355a5fb129..b6422b23648c 100644 --- a/src/diffusers/pipelines/pag/pipeline_pag_sd_xl_img2img.py +++ b/src/diffusers/pipelines/pag/pipeline_pag_sd_xl_img2img.py @@ -1111,7 +1111,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/pag/pipeline_pag_sd_xl_inpaint.py b/src/diffusers/pipelines/pag/pipeline_pag_sd_xl_inpaint.py index 7d42d1876a82..2e12a4a97fbe 100644 --- a/src/diffusers/pipelines/pag/pipeline_pag_sd_xl_inpaint.py +++ b/src/diffusers/pipelines/pag/pipeline_pag_sd_xl_inpaint.py @@ -1251,7 +1251,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/src/diffusers/pipelines/pipeline_flax_utils.py b/src/diffusers/pipelines/pipeline_flax_utils.py index ea2c0763d93a..f69968022ed7 100644 --- a/src/diffusers/pipelines/pipeline_flax_utils.py +++ b/src/diffusers/pipelines/pipeline_flax_utils.py @@ -312,6 +312,11 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P >>> dpm_params["scheduler"] = dpmpp_state ``` """ + logger.warning( + "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " + "recommend migrating to PyTorch classes or pinning your version of Diffusers." + ) + cache_dir = kwargs.pop("cache_dir", None) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", False) diff --git a/src/diffusers/pipelines/pipeline_utils.py b/src/diffusers/pipelines/pipeline_utils.py index d231989973e4..023feae4dd27 100644 --- a/src/diffusers/pipelines/pipeline_utils.py +++ b/src/diffusers/pipelines/pipeline_utils.py @@ -1709,6 +1709,36 @@ def _get_signature_types(cls): logger.warning(f"cannot get type annotation for Parameter {k} of {cls}.") return signature_types + @property + def parameters(self) -> Dict[str, Any]: + r""" + The `self.parameters` property can be useful to run different pipelines with the same weights and + configurations without reallocating additional memory. + + Returns (`dict`): + A dictionary containing all the optional parameters needed to initialize the pipeline. + + Examples: + + ```py + >>> from diffusers import ( + ... StableDiffusionPipeline, + ... StableDiffusionImg2ImgPipeline, + ... StableDiffusionInpaintPipeline, + ... ) + + >>> text2img = StableDiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5") + >>> img2img = StableDiffusionImg2ImgPipeline(**text2img.components, **text2img.parameters) + >>> inpaint = StableDiffusionInpaintPipeline(**text2img.components, **text2img.parameters) + ``` + """ + expected_modules, optional_parameters = self._get_signature_keys(self) + pipeline_parameters = { + k: self.config[k] for k in self.config.keys() if not k.startswith("_") and k in optional_parameters + } + + return pipeline_parameters + @property def components(self) -> Dict[str, Any]: r""" diff --git a/src/diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py b/src/diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py index bd69746be38c..1d718a4852a4 100644 --- a/src/diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py +++ b/src/diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py @@ -755,7 +755,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/pixart_alpha/pipeline_pixart_sigma.py b/src/diffusers/pipelines/pixart_alpha/pipeline_pixart_sigma.py index c14036cf94f3..bb169ac5c443 100644 --- a/src/diffusers/pipelines/pixart_alpha/pipeline_pixart_sigma.py +++ b/src/diffusers/pipelines/pixart_alpha/pipeline_pixart_sigma.py @@ -700,7 +700,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/qwenimage/pipeline_qwenimage.py b/src/diffusers/pipelines/qwenimage/pipeline_qwenimage.py index 8a2ee7b88e94..807910dfb1d6 100644 --- a/src/diffusers/pipelines/qwenimage/pipeline_qwenimage.py +++ b/src/diffusers/pipelines/qwenimage/pipeline_qwenimage.py @@ -435,7 +435,7 @@ def __call__( width: Optional[int] = None, num_inference_steps: int = 50, sigmas: Optional[List[float]] = None, - guidance_scale: float = 1.0, + guidance_scale: Optional[float] = None, num_images_per_prompt: int = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, @@ -462,7 +462,12 @@ def __call__( `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `true_cfg_scale` is not greater than `1`). true_cfg_scale (`float`, *optional*, defaults to 1.0): - When > 1.0 and a provided `negative_prompt`, enables true classifier-free guidance. + Guidance scale as defined in [Classifier-Free Diffusion + Guidance](https://huggingface.co/papers/2207.12598). `true_cfg_scale` is defined as `w` of equation 2. + of [Imagen Paper](https://huggingface.co/papers/2205.11487). Classifier-free guidance is enabled by + setting `true_cfg_scale > 1` and a provided `negative_prompt`. Higher guidance scale encourages to + generate images that are closely linked to the text `prompt`, usually at the expense of lower image + quality. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. This is set to 1024 by default for the best results. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): @@ -474,17 +479,16 @@ def __call__( Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. - guidance_scale (`float`, *optional*, defaults to 3.5): - Guidance scale as defined in [Classifier-Free Diffusion - Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. - of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting - `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to - the text `prompt`, usually at the expense of lower image quality. - - This parameter in the pipeline is there to support future guidance-distilled models when they come up. - Note that passing `guidance_scale` to the pipeline is ineffective. To enable classifier-free guidance, - please pass `true_cfg_scale` and `negative_prompt` (even an empty negative prompt like " ") should - enable classifier-free guidance computations. + guidance_scale (`float`, *optional*, defaults to None): + A guidance scale value for guidance distilled models. Unlike the traditional classifier-free guidance + where the guidance scale is applied during inference through noise prediction rescaling, guidance + distilled models take the guidance scale directly as an input parameter during forward pass. Guidance + scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images + that are closely linked to the text `prompt`, usually at the expense of lower image quality. This + parameter in the pipeline is there to support future guidance-distilled models when they come up. It is + ignored when not using guidance distilled models. To enable traditional classifier-free guidance, + please pass `true_cfg_scale > 1.0` and `negative_prompt` (even an empty negative prompt like " " should + enable classifier-free guidance computations). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): @@ -564,6 +568,16 @@ def __call__( has_neg_prompt = negative_prompt is not None or ( negative_prompt_embeds is not None and negative_prompt_embeds_mask is not None ) + + if true_cfg_scale > 1 and not has_neg_prompt: + logger.warning( + f"true_cfg_scale is passed as {true_cfg_scale}, but classifier-free guidance is not enabled since no negative_prompt is provided." + ) + elif true_cfg_scale <= 1 and has_neg_prompt: + logger.warning( + " negative_prompt is passed but classifier-free guidance is not enabled since true_cfg_scale <= 1" + ) + do_true_cfg = true_cfg_scale > 1 and has_neg_prompt prompt_embeds, prompt_embeds_mask = self.encode_prompt( prompt=prompt, @@ -618,10 +632,17 @@ def __call__( self._num_timesteps = len(timesteps) # handle guidance - if self.transformer.config.guidance_embeds: + if self.transformer.config.guidance_embeds and guidance_scale is None: + raise ValueError("guidance_scale is required for guidance-distilled model.") + elif self.transformer.config.guidance_embeds: guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float32) guidance = guidance.expand(latents.shape[0]) - else: + elif not self.transformer.config.guidance_embeds and guidance_scale is not None: + logger.warning( + f"guidance_scale is passed as {guidance_scale}, but ignored since the model is not guidance-distilled." + ) + guidance = None + elif not self.transformer.config.guidance_embeds and guidance_scale is None: guidance = None if self.attention_kwargs is None: diff --git a/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_controlnet.py b/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_controlnet.py index 6b383fa173bb..322b1d9d3a08 100644 --- a/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_controlnet.py +++ b/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_controlnet.py @@ -535,7 +535,7 @@ def __call__( width: Optional[int] = None, num_inference_steps: int = 50, sigmas: Optional[List[float]] = None, - guidance_scale: float = 1.0, + guidance_scale: Optional[float] = None, control_guidance_start: Union[float, List[float]] = 0.0, control_guidance_end: Union[float, List[float]] = 1.0, control_image: PipelineImageInput = None, @@ -566,7 +566,12 @@ def __call__( `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `true_cfg_scale` is not greater than `1`). true_cfg_scale (`float`, *optional*, defaults to 1.0): - When > 1.0 and a provided `negative_prompt`, enables true classifier-free guidance. + Guidance scale as defined in [Classifier-Free Diffusion + Guidance](https://huggingface.co/papers/2207.12598). `true_cfg_scale` is defined as `w` of equation 2. + of [Imagen Paper](https://huggingface.co/papers/2205.11487). Classifier-free guidance is enabled by + setting `true_cfg_scale > 1` and a provided `negative_prompt`. Higher guidance scale encourages to + generate images that are closely linked to the text `prompt`, usually at the expense of lower image + quality. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. This is set to 1024 by default for the best results. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): @@ -578,12 +583,16 @@ def __call__( Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. - guidance_scale (`float`, *optional*, defaults to 3.5): - Guidance scale as defined in [Classifier-Free Diffusion - Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. - of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting - `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to - the text `prompt`, usually at the expense of lower image quality. + guidance_scale (`float`, *optional*, defaults to None): + A guidance scale value for guidance distilled models. Unlike the traditional classifier-free guidance + where the guidance scale is applied during inference through noise prediction rescaling, guidance + distilled models take the guidance scale directly as an input parameter during forward pass. Guidance + scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images + that are closely linked to the text `prompt`, usually at the expense of lower image quality. This + parameter in the pipeline is there to support future guidance-distilled models when they come up. It is + ignored when not using guidance distilled models. To enable traditional classifier-free guidance, + please pass `true_cfg_scale > 1.0` and `negative_prompt` (even an empty negative prompt like " " should + enable classifier-free guidance computations). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): @@ -674,6 +683,16 @@ def __call__( has_neg_prompt = negative_prompt is not None or ( negative_prompt_embeds is not None and negative_prompt_embeds_mask is not None ) + + if true_cfg_scale > 1 and not has_neg_prompt: + logger.warning( + f"true_cfg_scale is passed as {true_cfg_scale}, but classifier-free guidance is not enabled since no negative_prompt is provided." + ) + elif true_cfg_scale <= 1 and has_neg_prompt: + logger.warning( + " negative_prompt is passed but classifier-free guidance is not enabled since true_cfg_scale <= 1" + ) + do_true_cfg = true_cfg_scale > 1 and has_neg_prompt prompt_embeds, prompt_embeds_mask = self.encode_prompt( prompt=prompt, @@ -822,10 +841,17 @@ def __call__( controlnet_keep.append(keeps[0] if isinstance(self.controlnet, QwenImageControlNetModel) else keeps) # handle guidance - if self.transformer.config.guidance_embeds: + if self.transformer.config.guidance_embeds and guidance_scale is None: + raise ValueError("guidance_scale is required for guidance-distilled model.") + elif self.transformer.config.guidance_embeds: guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float32) guidance = guidance.expand(latents.shape[0]) - else: + elif not self.transformer.config.guidance_embeds and guidance_scale is not None: + logger.warning( + f"guidance_scale is passed as {guidance_scale}, but ignored since the model is not guidance-distilled." + ) + guidance = None + elif not self.transformer.config.guidance_embeds and guidance_scale is None: guidance = None if self.attention_kwargs is None: diff --git a/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_edit.py b/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_edit.py index 45af11fc3950..ceb5492fab56 100644 --- a/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_edit.py +++ b/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_edit.py @@ -532,7 +532,7 @@ def __call__( width: Optional[int] = None, num_inference_steps: int = 50, sigmas: Optional[List[float]] = None, - guidance_scale: float = 1.0, + guidance_scale: Optional[float] = None, num_images_per_prompt: int = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, @@ -559,7 +559,12 @@ def __call__( `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `true_cfg_scale` is not greater than `1`). true_cfg_scale (`float`, *optional*, defaults to 1.0): - When > 1.0 and a provided `negative_prompt`, enables true classifier-free guidance. + true_cfg_scale (`float`, *optional*, defaults to 1.0): Guidance scale as defined in [Classifier-Free + Diffusion Guidance](https://huggingface.co/papers/2207.12598). `true_cfg_scale` is defined as `w` of + equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Classifier-free guidance is + enabled by setting `true_cfg_scale > 1` and a provided `negative_prompt`. Higher guidance scale + encourages to generate images that are closely linked to the text `prompt`, usually at the expense of + lower image quality. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. This is set to 1024 by default for the best results. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): @@ -571,17 +576,16 @@ def __call__( Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. - guidance_scale (`float`, *optional*, defaults to 3.5): - Guidance scale as defined in [Classifier-Free Diffusion - Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. - of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting - `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to - the text `prompt`, usually at the expense of lower image quality. - - This parameter in the pipeline is there to support future guidance-distilled models when they come up. - Note that passing `guidance_scale` to the pipeline is ineffective. To enable classifier-free guidance, - please pass `true_cfg_scale` and `negative_prompt` (even an empty negative prompt like " ") should - enable classifier-free guidance computations. + guidance_scale (`float`, *optional*, defaults to None): + A guidance scale value for guidance distilled models. Unlike the traditional classifier-free guidance + where the guidance scale is applied during inference through noise prediction rescaling, guidance + distilled models take the guidance scale directly as an input parameter during forward pass. Guidance + scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images + that are closely linked to the text `prompt`, usually at the expense of lower image quality. This + parameter in the pipeline is there to support future guidance-distilled models when they come up. It is + ignored when not using guidance distilled models. To enable traditional classifier-free guidance, + please pass `true_cfg_scale > 1.0` and `negative_prompt` (even an empty negative prompt like " " should + enable classifier-free guidance computations). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): @@ -672,6 +676,16 @@ def __call__( has_neg_prompt = negative_prompt is not None or ( negative_prompt_embeds is not None and negative_prompt_embeds_mask is not None ) + + if true_cfg_scale > 1 and not has_neg_prompt: + logger.warning( + f"true_cfg_scale is passed as {true_cfg_scale}, but classifier-free guidance is not enabled since no negative_prompt is provided." + ) + elif true_cfg_scale <= 1 and has_neg_prompt: + logger.warning( + " negative_prompt is passed but classifier-free guidance is not enabled since true_cfg_scale <= 1" + ) + do_true_cfg = true_cfg_scale > 1 and has_neg_prompt prompt_embeds, prompt_embeds_mask = self.encode_prompt( image=prompt_image, @@ -734,10 +748,17 @@ def __call__( self._num_timesteps = len(timesteps) # handle guidance - if self.transformer.config.guidance_embeds: + if self.transformer.config.guidance_embeds and guidance_scale is None: + raise ValueError("guidance_scale is required for guidance-distilled model.") + elif self.transformer.config.guidance_embeds: guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float32) guidance = guidance.expand(latents.shape[0]) - else: + elif not self.transformer.config.guidance_embeds and guidance_scale is not None: + logger.warning( + f"guidance_scale is passed as {guidance_scale}, but ignored since the model is not guidance-distilled." + ) + guidance = None + elif not self.transformer.config.guidance_embeds and guidance_scale is None: guidance = None if self.attention_kwargs is None: diff --git a/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_img2img.py b/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_img2img.py index 43cbac78e156..8040852e53b4 100644 --- a/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_img2img.py +++ b/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_img2img.py @@ -511,7 +511,7 @@ def __call__( strength: float = 0.6, num_inference_steps: int = 50, sigmas: Optional[List[float]] = None, - guidance_scale: float = 1.0, + guidance_scale: Optional[float] = None, num_images_per_prompt: int = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, @@ -544,7 +544,12 @@ def __call__( list of arrays, the expected shape should be `(B, H, W, C)` or `(H, W, C)` It can also accept image latents as `image`, but if passing latents directly it is not encoded again. true_cfg_scale (`float`, *optional*, defaults to 1.0): - When > 1.0 and a provided `negative_prompt`, enables true classifier-free guidance. + Guidance scale as defined in [Classifier-Free Diffusion + Guidance](https://huggingface.co/papers/2207.12598). `true_cfg_scale` is defined as `w` of equation 2. + of [Imagen Paper](https://huggingface.co/papers/2205.11487). Classifier-free guidance is enabled by + setting `true_cfg_scale > 1` and a provided `negative_prompt`. Higher guidance scale encourages to + generate images that are closely linked to the text `prompt`, usually at the expense of lower image + quality. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. This is set to 1024 by default for the best results. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): @@ -562,17 +567,16 @@ def __call__( Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. - guidance_scale (`float`, *optional*, defaults to 3.5): - Guidance scale as defined in [Classifier-Free Diffusion - Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. - of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting - `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to - the text `prompt`, usually at the expense of lower image quality. - - This parameter in the pipeline is there to support future guidance-distilled models when they come up. - Note that passing `guidance_scale` to the pipeline is ineffective. To enable classifier-free guidance, - please pass `true_cfg_scale` and `negative_prompt` (even an empty negative prompt like " ") should - enable classifier-free guidance computations. + guidance_scale (`float`, *optional*, defaults to None): + A guidance scale value for guidance distilled models. Unlike the traditional classifier-free guidance + where the guidance scale is applied during inference through noise prediction rescaling, guidance + distilled models take the guidance scale directly as an input parameter during forward pass. Guidance + scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images + that are closely linked to the text `prompt`, usually at the expense of lower image quality. This + parameter in the pipeline is there to support future guidance-distilled models when they come up. It is + ignored when not using guidance distilled models. To enable traditional classifier-free guidance, + please pass `true_cfg_scale > 1.0` and `negative_prompt` (even an empty negative prompt like " " should + enable classifier-free guidance computations). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): @@ -657,6 +661,16 @@ def __call__( has_neg_prompt = negative_prompt is not None or ( negative_prompt_embeds is not None and negative_prompt_embeds_mask is not None ) + + if true_cfg_scale > 1 and not has_neg_prompt: + logger.warning( + f"true_cfg_scale is passed as {true_cfg_scale}, but classifier-free guidance is not enabled since no negative_prompt is provided." + ) + elif true_cfg_scale <= 1 and has_neg_prompt: + logger.warning( + " negative_prompt is passed but classifier-free guidance is not enabled since true_cfg_scale <= 1" + ) + do_true_cfg = true_cfg_scale > 1 and has_neg_prompt prompt_embeds, prompt_embeds_mask = self.encode_prompt( prompt=prompt, @@ -721,10 +735,17 @@ def __call__( self._num_timesteps = len(timesteps) # handle guidance - if self.transformer.config.guidance_embeds: + if self.transformer.config.guidance_embeds and guidance_scale is None: + raise ValueError("guidance_scale is required for guidance-distilled model.") + elif self.transformer.config.guidance_embeds: guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float32) guidance = guidance.expand(latents.shape[0]) - else: + elif not self.transformer.config.guidance_embeds and guidance_scale is not None: + logger.warning( + f"guidance_scale is passed as {guidance_scale}, but ignored since the model is not guidance-distilled." + ) + guidance = None + elif not self.transformer.config.guidance_embeds and guidance_scale is None: guidance = None if self.attention_kwargs is None: diff --git a/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_inpaint.py b/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_inpaint.py index c2766baf8b08..4d502569a070 100644 --- a/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_inpaint.py +++ b/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_inpaint.py @@ -624,7 +624,7 @@ def __call__( strength: float = 0.6, num_inference_steps: int = 50, sigmas: Optional[List[float]] = None, - guidance_scale: float = 1.0, + guidance_scale: Optional[float] = None, num_images_per_prompt: int = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, @@ -657,7 +657,12 @@ def __call__( list of arrays, the expected shape should be `(B, H, W, C)` or `(H, W, C)` It can also accept image latents as `image`, but if passing latents directly it is not encoded again. true_cfg_scale (`float`, *optional*, defaults to 1.0): - When > 1.0 and a provided `negative_prompt`, enables true classifier-free guidance. + Guidance scale as defined in [Classifier-Free Diffusion + Guidance](https://huggingface.co/papers/2207.12598). `true_cfg_scale` is defined as `w` of equation 2. + of [Imagen Paper](https://huggingface.co/papers/2205.11487). Classifier-free guidance is enabled by + setting `true_cfg_scale > 1` and a provided `negative_prompt`. Higher guidance scale encourages to + generate images that are closely linked to the text `prompt`, usually at the expense of lower image + quality. mask_image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): `Image`, numpy array or tensor representing an image batch to mask `image`. White pixels in the mask are repainted while black pixels are preserved. If `mask_image` is a PIL image, it is converted to a @@ -667,7 +672,7 @@ def __call__( 1)`, or `(H, W)`. mask_image_latent (`torch.Tensor`, `List[torch.Tensor]`): `Tensor` representing an image batch to mask `image` generated by VAE. If not provided, the mask - latents tensor will ge generated by `mask_image`. + latents tensor will be generated by `mask_image`. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. This is set to 1024 by default for the best results. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): @@ -692,17 +697,16 @@ def __call__( Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. - guidance_scale (`float`, *optional*, defaults to 3.5): - Guidance scale as defined in [Classifier-Free Diffusion - Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. - of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting - `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to - the text `prompt`, usually at the expense of lower image quality. - - This parameter in the pipeline is there to support future guidance-distilled models when they come up. - Note that passing `guidance_scale` to the pipeline is ineffective. To enable classifier-free guidance, - please pass `true_cfg_scale` and `negative_prompt` (even an empty negative prompt like " ") should - enable classifier-free guidance computations. + guidance_scale (`float`, *optional*, defaults to None): + A guidance scale value for guidance distilled models. Unlike the traditional classifier-free guidance + where the guidance scale is applied during inference through noise prediction rescaling, guidance + distilled models take the guidance scale directly as an input parameter during forward pass. Guidance + scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images + that are closely linked to the text `prompt`, usually at the expense of lower image quality. This + parameter in the pipeline is there to support future guidance-distilled models when they come up. It is + ignored when not using guidance distilled models. To enable traditional classifier-free guidance, + please pass `true_cfg_scale > 1.0` and `negative_prompt` (even an empty negative prompt like " " should + enable classifier-free guidance computations). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): @@ -801,6 +805,16 @@ def __call__( has_neg_prompt = negative_prompt is not None or ( negative_prompt_embeds is not None and negative_prompt_embeds_mask is not None ) + + if true_cfg_scale > 1 and not has_neg_prompt: + logger.warning( + f"true_cfg_scale is passed as {true_cfg_scale}, but classifier-free guidance is not enabled since no negative_prompt is provided." + ) + elif true_cfg_scale <= 1 and has_neg_prompt: + logger.warning( + " negative_prompt is passed but classifier-free guidance is not enabled since true_cfg_scale <= 1" + ) + do_true_cfg = true_cfg_scale > 1 and has_neg_prompt prompt_embeds, prompt_embeds_mask = self.encode_prompt( prompt=prompt, @@ -890,10 +904,17 @@ def __call__( self._num_timesteps = len(timesteps) # handle guidance - if self.transformer.config.guidance_embeds: + if self.transformer.config.guidance_embeds and guidance_scale is None: + raise ValueError("guidance_scale is required for guidance-distilled model.") + elif self.transformer.config.guidance_embeds: guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float32) guidance = guidance.expand(latents.shape[0]) - else: + elif not self.transformer.config.guidance_embeds and guidance_scale is not None: + logger.warning( + f"guidance_scale is passed as {guidance_scale}, but ignored since the model is not guidance-distilled." + ) + guidance = None + elif not self.transformer.config.guidance_embeds and guidance_scale is None: guidance = None if self.attention_kwargs is None: diff --git a/src/diffusers/pipelines/sana/pipeline_sana.py b/src/diffusers/pipelines/sana/pipeline_sana.py index 103f57a23640..c54fec5b3a18 100644 --- a/src/diffusers/pipelines/sana/pipeline_sana.py +++ b/src/diffusers/pipelines/sana/pipeline_sana.py @@ -781,7 +781,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/sana/pipeline_sana_controlnet.py b/src/diffusers/pipelines/sana/pipeline_sana_controlnet.py index cdc602b964cf..17d6dfd83e08 100644 --- a/src/diffusers/pipelines/sana/pipeline_sana_controlnet.py +++ b/src/diffusers/pipelines/sana/pipeline_sana_controlnet.py @@ -844,7 +844,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/sana/pipeline_sana_sprint.py b/src/diffusers/pipelines/sana/pipeline_sana_sprint.py index e8f9d8368f2a..a140cc16724b 100644 --- a/src/diffusers/pipelines/sana/pipeline_sana_sprint.py +++ b/src/diffusers/pipelines/sana/pipeline_sana_sprint.py @@ -663,7 +663,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/sana/pipeline_sana_sprint_img2img.py b/src/diffusers/pipelines/sana/pipeline_sana_sprint_img2img.py index bf290c3ced56..34d3b9d17e40 100644 --- a/src/diffusers/pipelines/sana/pipeline_sana_sprint_img2img.py +++ b/src/diffusers/pipelines/sana/pipeline_sana_sprint_img2img.py @@ -736,7 +736,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade.py b/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade.py index 6130a9873cb0..aa39983c4e43 100644 --- a/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade.py +++ b/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade.py @@ -362,7 +362,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). diff --git a/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade_combined.py b/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade_combined.py index b705c7e6e5f6..b3dc23f2e571 100644 --- a/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade_combined.py +++ b/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade_combined.py @@ -237,7 +237,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). diff --git a/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade_prior.py b/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade_prior.py index b3b46af206ed..9e63b3489ccd 100644 --- a/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade_prior.py +++ b/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade_prior.py @@ -442,7 +442,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py b/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py index 06c20768160b..6ebe0986a1ab 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py @@ -313,7 +313,7 @@ def __call__( latents (`np.ndarray`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`np.ndarray`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint.py b/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint.py index 141d849ec3d4..158bcabbebfd 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint.py @@ -378,7 +378,7 @@ def __call__( latents (`np.ndarray`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`np.ndarray`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_upscale.py b/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_upscale.py index 882fa98b0762..a765163175a2 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_upscale.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_upscale.py @@ -398,7 +398,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`np.ndarray`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py b/src/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py index afee3f61e972..1618f89a49e3 100644 --- a/src/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py +++ b/src/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py @@ -854,7 +854,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_img2img.py b/src/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_img2img.py index fa1e0a4f3270..7e97909f42ca 100644 --- a/src/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_img2img.py +++ b/src/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_img2img.py @@ -909,7 +909,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_inpaint.py b/src/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_inpaint.py index 937f7195b21d..bed596e57c34 100644 --- a/src/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_inpaint.py +++ b/src/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_inpaint.py @@ -984,7 +984,7 @@ def __call__( 1)`, or `(H, W)`. mask_image_latent (`torch.Tensor`, `List[torch.Tensor]`): `Tensor` representing an image batch to mask `image` generated by VAE. If not provided, the mask - latents tensor will ge generated by `mask_image`. + latents tensor will be generated by `mask_image`. height (`int`, *optional*, defaults to self.transformer.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. This is set to 1024 by default for the best results. width (`int`, *optional*, defaults to self.transformer.config.sample_size * self.vae_scale_factor): @@ -1033,7 +1033,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_k_diffusion.py b/src/diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_k_diffusion.py index 350a49282693..df2564a89b1d 100755 --- a/src/diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_k_diffusion.py +++ b/src/diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_k_diffusion.py @@ -539,7 +539,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_xl_k_diffusion.py b/src/diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_xl_k_diffusion.py index 3b57555071f3..766ca37d8142 100644 --- a/src/diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_xl_k_diffusion.py +++ b/src/diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_xl_k_diffusion.py @@ -652,7 +652,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py b/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py index 9ac64a0d8420..b97cf6f1f6f8 100644 --- a/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py +++ b/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py @@ -937,7 +937,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py b/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py index e63c7a55ce7b..44e8f4fe4b54 100644 --- a/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py +++ b/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py @@ -1097,7 +1097,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py b/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py index f0bc9b9bb3e2..18f8536a7510 100644 --- a/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py +++ b/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py @@ -1251,7 +1251,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py b/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py index b1379d1b2955..58b008361782 100644 --- a/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py +++ b/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py @@ -695,7 +695,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py b/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py index 5c561721fcc7..1ce6987114a7 100644 --- a/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py +++ b/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py @@ -760,7 +760,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py b/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py index 13183df47d4b..2802d690f3cc 100644 --- a/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py +++ b/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py @@ -971,7 +971,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py b/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py index a9fa43c1f5c5..288aae6c0d44 100644 --- a/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py +++ b/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py @@ -1051,7 +1051,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. motion_field_strength_x (`float`, *optional*, defaults to 12): Strength of motion in generated video along x-axis. See the [paper](https://huggingface.co/papers/2303.13439), Sect. 3.3.1. diff --git a/src/diffusers/pipelines/visualcloze/pipeline_visualcloze_combined.py b/src/diffusers/pipelines/visualcloze/pipeline_visualcloze_combined.py index 68130baad709..4e5b32c10c8c 100644 --- a/src/diffusers/pipelines/visualcloze/pipeline_visualcloze_combined.py +++ b/src/diffusers/pipelines/visualcloze/pipeline_visualcloze_combined.py @@ -319,7 +319,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/visualcloze/pipeline_visualcloze_generation.py b/src/diffusers/pipelines/visualcloze/pipeline_visualcloze_generation.py index e7a1d4a4b248..8571211cd027 100644 --- a/src/diffusers/pipelines/visualcloze/pipeline_visualcloze_generation.py +++ b/src/diffusers/pipelines/visualcloze/pipeline_visualcloze_generation.py @@ -736,7 +736,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py b/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py index b9b02a6dd38a..bbdb60471fd1 100644 --- a/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py +++ b/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py @@ -263,7 +263,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). diff --git a/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_combined.py b/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_combined.py index 00a88ce34ed2..c54c1fefe8fe 100644 --- a/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_combined.py +++ b/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_combined.py @@ -222,7 +222,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). diff --git a/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py b/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py index a32f09204d27..e138b6e805c8 100644 --- a/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py +++ b/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py @@ -348,7 +348,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. + tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). diff --git a/src/diffusers/schedulers/scheduling_utils_flax.py b/src/diffusers/schedulers/scheduling_utils_flax.py index e6ac78f63ee7..ffbe3b90207b 100644 --- a/src/diffusers/schedulers/scheduling_utils_flax.py +++ b/src/diffusers/schedulers/scheduling_utils_flax.py @@ -22,9 +22,11 @@ import jax.numpy as jnp from huggingface_hub.utils import validate_hf_hub_args -from ..utils import BaseOutput, PushToHubMixin +from ..utils import BaseOutput, PushToHubMixin, logging +logger = logging.get_logger(__name__) + SCHEDULER_CONFIG_NAME = "scheduler_config.json" @@ -133,6 +135,10 @@ def from_pretrained(
""" + logger.warning( + "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " + "recommend migrating to PyTorch classes or pinning your version of Diffusers." + ) config, kwargs = cls.load_config( pretrained_model_name_or_path=pretrained_model_name_or_path, subfolder=subfolder, diff --git a/src/diffusers/utils/import_utils.py b/src/diffusers/utils/import_utils.py index ac209afb74a6..153be057381d 100644 --- a/src/diffusers/utils/import_utils.py +++ b/src/diffusers/utils/import_utils.py @@ -70,10 +70,11 @@ def _is_package_available(pkg_name: str, get_dist_name: bool = False) -> Tuple[b # Fallback for Python < 3.10 for dist in importlib_metadata.distributions(): _top_level_declared = (dist.read_text("top_level.txt") or "").split() - _infered_opt_names = { + # Infer top-level package names from file structure + _inferred_opt_names = { f.parts[0] if len(f.parts) > 1 else inspect.getmodulename(f) for f in (dist.files or []) } - {None} - _top_level_inferred = filter(lambda name: "." not in name, _infered_opt_names) + _top_level_inferred = filter(lambda name: "." not in name, _inferred_opt_names) for pkg in _top_level_declared or _top_level_inferred: _package_map[pkg].append(dist.metadata["Name"]) except Exception as _: @@ -119,7 +120,7 @@ def _is_package_available(pkg_name: str, get_dist_name: bool = False) -> Tuple[b _safetensors_available, _safetensors_version = _is_package_available("safetensors") else: - logger.info("Disabling Safetensors because USE_TF is set") + logger.info("Disabling Safetensors because USE_SAFETENSORS is set") _safetensors_available = False _onnxruntime_version = "N/A" diff --git a/tests/hooks/test_hooks.py b/tests/hooks/test_hooks.py index 1e845cc40c7d..8a83f60ff278 100644 --- a/tests/hooks/test_hooks.py +++ b/tests/hooks/test_hooks.py @@ -220,6 +220,7 @@ def test_inference(self): self.assertAlmostEqual(output1, output2, places=5) self.assertAlmostEqual(output1, output3, places=5) + self.assertAlmostEqual(output2, output3, places=5) def test_skip_layer_hook(self): registry = HookRegistry.check_if_exists_or_initialize(self.model) diff --git a/tests/models/autoencoders/test_models_vae_flax.py b/tests/models/autoencoders/test_models_vae_flax.py deleted file mode 100644 index 3023a7c32c0d..000000000000 --- a/tests/models/autoencoders/test_models_vae_flax.py +++ /dev/null @@ -1,39 +0,0 @@ -import unittest - -from diffusers import FlaxAutoencoderKL -from diffusers.utils import is_flax_available - -from ...testing_utils import require_flax -from ..test_modeling_common_flax import FlaxModelTesterMixin - - -if is_flax_available(): - import jax - - -@require_flax -class FlaxAutoencoderKLTests(FlaxModelTesterMixin, unittest.TestCase): - model_class = FlaxAutoencoderKL - - @property - def dummy_input(self): - batch_size = 4 - num_channels = 3 - sizes = (32, 32) - - prng_key = jax.random.PRNGKey(0) - image = jax.random.uniform(prng_key, ((batch_size, num_channels) + sizes)) - - return {"sample": image, "prng_key": prng_key} - - def prepare_init_args_and_inputs_for_common(self): - init_dict = { - "block_out_channels": [32, 64], - "in_channels": 3, - "out_channels": 3, - "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], - "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], - "latent_channels": 4, - } - inputs_dict = self.dummy_input - return init_dict, inputs_dict diff --git a/tests/models/test_modeling_common_flax.py b/tests/models/test_modeling_common_flax.py deleted file mode 100644 index 41e970b56664..000000000000 --- a/tests/models/test_modeling_common_flax.py +++ /dev/null @@ -1,67 +0,0 @@ -import inspect - -from diffusers.utils import is_flax_available - -from ..testing_utils import require_flax - - -if is_flax_available(): - import jax - - -@require_flax -class FlaxModelTesterMixin: - def test_output(self): - init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() - - model = self.model_class(**init_dict) - variables = model.init(inputs_dict["prng_key"], inputs_dict["sample"]) - jax.lax.stop_gradient(variables) - - output = model.apply(variables, inputs_dict["sample"]) - - if isinstance(output, dict): - output = output.sample - - self.assertIsNotNone(output) - expected_shape = inputs_dict["sample"].shape - self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") - - def test_forward_with_norm_groups(self): - init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() - - init_dict["norm_num_groups"] = 16 - init_dict["block_out_channels"] = (16, 32) - - model = self.model_class(**init_dict) - variables = model.init(inputs_dict["prng_key"], inputs_dict["sample"]) - jax.lax.stop_gradient(variables) - - output = model.apply(variables, inputs_dict["sample"]) - - if isinstance(output, dict): - output = output.sample - - self.assertIsNotNone(output) - expected_shape = inputs_dict["sample"].shape - self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") - - def test_deprecated_kwargs(self): - has_kwarg_in_model_class = "kwargs" in inspect.signature(self.model_class.__init__).parameters - has_deprecated_kwarg = len(self.model_class._deprecated_kwargs) > 0 - - if has_kwarg_in_model_class and not has_deprecated_kwarg: - raise ValueError( - f"{self.model_class} has `**kwargs` in its __init__ method but has not defined any deprecated kwargs" - " under the `_deprecated_kwargs` class attribute. Make sure to either remove `**kwargs` if there are" - " no deprecated arguments or add the deprecated argument with `_deprecated_kwargs =" - " []`" - ) - - if not has_kwarg_in_model_class and has_deprecated_kwarg: - raise ValueError( - f"{self.model_class} doesn't have `**kwargs` in its __init__ method but has defined deprecated kwargs" - " under the `_deprecated_kwargs` class attribute. Make sure to either add the `**kwargs` argument to" - f" {self.model_class}.__init__ if there are deprecated arguments or remove the deprecated argument" - " from `_deprecated_kwargs = []`" - ) diff --git a/tests/models/unets/test_models_unet_2d_flax.py b/tests/models/unets/test_models_unet_2d_flax.py deleted file mode 100644 index 3bc9a04b3c04..000000000000 --- a/tests/models/unets/test_models_unet_2d_flax.py +++ /dev/null @@ -1,105 +0,0 @@ -import gc -import unittest - -from parameterized import parameterized - -from diffusers import FlaxUNet2DConditionModel -from diffusers.utils import is_flax_available - -from ...testing_utils import load_hf_numpy, require_flax, slow - - -if is_flax_available(): - import jax - import jax.numpy as jnp - - -@slow -@require_flax -class FlaxUNet2DConditionModelIntegrationTests(unittest.TestCase): - def get_file_format(self, seed, shape): - return f"gaussian_noise_s={seed}_shape={'_'.join([str(s) for s in shape])}.npy" - - def tearDown(self): - # clean up the VRAM after each test - super().tearDown() - gc.collect() - - def get_latents(self, seed=0, shape=(4, 4, 64, 64), fp16=False): - dtype = jnp.bfloat16 if fp16 else jnp.float32 - image = jnp.array(load_hf_numpy(self.get_file_format(seed, shape)), dtype=dtype) - return image - - def get_unet_model(self, fp16=False, model_id="CompVis/stable-diffusion-v1-4"): - dtype = jnp.bfloat16 if fp16 else jnp.float32 - revision = "bf16" if fp16 else None - - model, params = FlaxUNet2DConditionModel.from_pretrained( - model_id, subfolder="unet", dtype=dtype, revision=revision - ) - return model, params - - def get_encoder_hidden_states(self, seed=0, shape=(4, 77, 768), fp16=False): - dtype = jnp.bfloat16 if fp16 else jnp.float32 - hidden_states = jnp.array(load_hf_numpy(self.get_file_format(seed, shape)), dtype=dtype) - return hidden_states - - @parameterized.expand( - [ - # fmt: off - [83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]], - [17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]], - [8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]], - [3, 1000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]], - # fmt: on - ] - ) - def test_compvis_sd_v1_4_flax_vs_torch_fp16(self, seed, timestep, expected_slice): - model, params = self.get_unet_model(model_id="CompVis/stable-diffusion-v1-4", fp16=True) - latents = self.get_latents(seed, fp16=True) - encoder_hidden_states = self.get_encoder_hidden_states(seed, fp16=True) - - sample = model.apply( - {"params": params}, - latents, - jnp.array(timestep, dtype=jnp.int32), - encoder_hidden_states=encoder_hidden_states, - ).sample - - assert sample.shape == latents.shape - - output_slice = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten())), dtype=jnp.float32) - expected_output_slice = jnp.array(expected_slice, dtype=jnp.float32) - - # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware - assert jnp.allclose(output_slice, expected_output_slice, atol=1e-2) - - @parameterized.expand( - [ - # fmt: off - [83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]], - [17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]], - [8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]], - [3, 1000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]], - # fmt: on - ] - ) - def test_stabilityai_sd_v2_flax_vs_torch_fp16(self, seed, timestep, expected_slice): - model, params = self.get_unet_model(model_id="stabilityai/stable-diffusion-2", fp16=True) - latents = self.get_latents(seed, shape=(4, 4, 96, 96), fp16=True) - encoder_hidden_states = self.get_encoder_hidden_states(seed, shape=(4, 77, 1024), fp16=True) - - sample = model.apply( - {"params": params}, - latents, - jnp.array(timestep, dtype=jnp.int32), - encoder_hidden_states=encoder_hidden_states, - ).sample - - assert sample.shape == latents.shape - - output_slice = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten())), dtype=jnp.float32) - expected_output_slice = jnp.array(expected_slice, dtype=jnp.float32) - - # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware - assert jnp.allclose(output_slice, expected_output_slice, atol=1e-2) diff --git a/tests/pipelines/controlnet/test_flax_controlnet.py b/tests/pipelines/controlnet/test_flax_controlnet.py deleted file mode 100644 index e9cff4c9571e..000000000000 --- a/tests/pipelines/controlnet/test_flax_controlnet.py +++ /dev/null @@ -1,128 +0,0 @@ -# coding=utf-8 -# Copyright 2025 HuggingFace Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import gc -import unittest - -from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline -from diffusers.utils import is_flax_available, load_image - -from ...testing_utils import require_flax, slow - - -if is_flax_available(): - import jax - import jax.numpy as jnp - from flax.jax_utils import replicate - from flax.training.common_utils import shard - - -@slow -@require_flax -class FlaxControlNetPipelineIntegrationTests(unittest.TestCase): - def tearDown(self): - # clean up the VRAM after each test - super().tearDown() - gc.collect() - - def test_canny(self): - controlnet, controlnet_params = FlaxControlNetModel.from_pretrained( - "lllyasviel/sd-controlnet-canny", from_pt=True, dtype=jnp.bfloat16 - ) - pipe, params = FlaxStableDiffusionControlNetPipeline.from_pretrained( - "stable-diffusion-v1-5/stable-diffusion-v1-5", controlnet=controlnet, from_pt=True, dtype=jnp.bfloat16 - ) - params["controlnet"] = controlnet_params - - prompts = "bird" - num_samples = jax.device_count() - prompt_ids = pipe.prepare_text_inputs([prompts] * num_samples) - - canny_image = load_image( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" - ) - processed_image = pipe.prepare_image_inputs([canny_image] * num_samples) - - rng = jax.random.PRNGKey(0) - rng = jax.random.split(rng, jax.device_count()) - - p_params = replicate(params) - prompt_ids = shard(prompt_ids) - processed_image = shard(processed_image) - - images = pipe( - prompt_ids=prompt_ids, - image=processed_image, - params=p_params, - prng_seed=rng, - num_inference_steps=50, - jit=True, - ).images - assert images.shape == (jax.device_count(), 1, 768, 512, 3) - - images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:]) - image_slice = images[0, 253:256, 253:256, -1] - - output_slice = jnp.asarray(jax.device_get(image_slice.flatten())) - expected_slice = jnp.array( - [0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078] - ) - - assert jnp.abs(output_slice - expected_slice).max() < 1e-2 - - def test_pose(self): - controlnet, controlnet_params = FlaxControlNetModel.from_pretrained( - "lllyasviel/sd-controlnet-openpose", from_pt=True, dtype=jnp.bfloat16 - ) - pipe, params = FlaxStableDiffusionControlNetPipeline.from_pretrained( - "stable-diffusion-v1-5/stable-diffusion-v1-5", controlnet=controlnet, from_pt=True, dtype=jnp.bfloat16 - ) - params["controlnet"] = controlnet_params - - prompts = "Chef in the kitchen" - num_samples = jax.device_count() - prompt_ids = pipe.prepare_text_inputs([prompts] * num_samples) - - pose_image = load_image( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" - ) - processed_image = pipe.prepare_image_inputs([pose_image] * num_samples) - - rng = jax.random.PRNGKey(0) - rng = jax.random.split(rng, jax.device_count()) - - p_params = replicate(params) - prompt_ids = shard(prompt_ids) - processed_image = shard(processed_image) - - images = pipe( - prompt_ids=prompt_ids, - image=processed_image, - params=p_params, - prng_seed=rng, - num_inference_steps=50, - jit=True, - ).images - assert images.shape == (jax.device_count(), 1, 768, 512, 3) - - images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:]) - image_slice = images[0, 253:256, 253:256, -1] - - output_slice = jnp.asarray(jax.device_get(image_slice.flatten())) - expected_slice = jnp.array( - [[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]] - ) - - assert jnp.abs(output_slice - expected_slice).max() < 1e-2 diff --git a/tests/pipelines/qwenimage/test_qwenimage_controlnet.py b/tests/pipelines/qwenimage/test_qwenimage_controlnet.py new file mode 100644 index 000000000000..c78e5cb233d3 --- /dev/null +++ b/tests/pipelines/qwenimage/test_qwenimage_controlnet.py @@ -0,0 +1,339 @@ +# Copyright 2025 The HuggingFace Team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import torch +from transformers import Qwen2_5_VLConfig, Qwen2_5_VLForConditionalGeneration, Qwen2Tokenizer + +from diffusers import ( + AutoencoderKLQwenImage, + FlowMatchEulerDiscreteScheduler, + QwenImageControlNetModel, + QwenImageControlNetPipeline, + QwenImageMultiControlNetModel, + QwenImageTransformer2DModel, +) +from diffusers.utils.testing_utils import enable_full_determinism, torch_device +from diffusers.utils.torch_utils import randn_tensor + +from ..pipeline_params import TEXT_TO_IMAGE_PARAMS +from ..test_pipelines_common import PipelineTesterMixin, to_np + + +enable_full_determinism() + + +class QwenControlNetPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = QwenImageControlNetPipeline + params = (TEXT_TO_IMAGE_PARAMS | frozenset(["control_image", "controlnet_conditioning_scale"])) - { + "cross_attention_kwargs" + } + batch_params = frozenset(["prompt", "negative_prompt", "control_image"]) + image_params = frozenset(["control_image"]) + image_latents_params = frozenset(["latents"]) + + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "latents", + "control_image", + "controlnet_conditioning_scale", + "return_dict", + "callback_on_step_end", + "callback_on_step_end_tensor_inputs", + ] + ) + + supports_dduf = False + test_xformers_attention = True + test_layerwise_casting = True + test_group_offloading = True + + def get_dummy_components(self): + torch.manual_seed(0) + transformer = QwenImageTransformer2DModel( + patch_size=2, + in_channels=16, + out_channels=4, + num_layers=2, + attention_head_dim=16, + num_attention_heads=3, + joint_attention_dim=16, + guidance_embeds=False, + axes_dims_rope=(8, 4, 4), + ) + + torch.manual_seed(0) + controlnet = QwenImageControlNetModel( + patch_size=2, + in_channels=16, + out_channels=4, + num_layers=2, + attention_head_dim=16, + num_attention_heads=3, + joint_attention_dim=16, + axes_dims_rope=(8, 4, 4), + ) + + torch.manual_seed(0) + z_dim = 4 + vae = AutoencoderKLQwenImage( + base_dim=z_dim * 6, + z_dim=z_dim, + dim_mult=[1, 2, 4], + num_res_blocks=1, + temperal_downsample=[False, True], + latents_mean=[0.0] * z_dim, + latents_std=[1.0] * z_dim, + ) + + torch.manual_seed(0) + scheduler = FlowMatchEulerDiscreteScheduler() + + torch.manual_seed(0) + config = Qwen2_5_VLConfig( + text_config={ + "hidden_size": 16, + "intermediate_size": 16, + "num_hidden_layers": 2, + "num_attention_heads": 2, + "num_key_value_heads": 2, + "rope_scaling": { + "mrope_section": [1, 1, 2], + "rope_type": "default", + "type": "default", + }, + "rope_theta": 1_000_000.0, + }, + vision_config={ + "depth": 2, + "hidden_size": 16, + "intermediate_size": 16, + "num_heads": 2, + "out_hidden_size": 16, + }, + hidden_size=16, + vocab_size=152064, + vision_end_token_id=151653, + vision_start_token_id=151652, + vision_token_id=151654, + ) + + text_encoder = Qwen2_5_VLForConditionalGeneration(config) + tokenizer = Qwen2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-Qwen2VLForConditionalGeneration") + + components = { + "transformer": transformer, + "vae": vae, + "scheduler": scheduler, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "controlnet": controlnet, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + control_image = randn_tensor( + (1, 3, 32, 32), + generator=generator, + device=torch.device(device), + dtype=torch.float32, + ) + + inputs = { + "prompt": "dance monkey", + "negative_prompt": "bad quality", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 3.0, + "true_cfg_scale": 1.0, + "height": 32, + "width": 32, + "max_sequence_length": 16, + "control_image": control_image, + "controlnet_conditioning_scale": 0.5, + "output_type": "pt", + } + + return inputs + + def test_qwen_controlnet(self): + device = "cpu" + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs).images + generated_image = image[0] + self.assertEqual(generated_image.shape, (3, 32, 32)) + + # Expected slice from the generated image + expected_slice = torch.tensor( + [ + 0.4726, + 0.5549, + 0.6324, + 0.6548, + 0.4968, + 0.4639, + 0.4749, + 0.4898, + 0.4725, + 0.4645, + 0.4435, + 0.3339, + 0.3400, + 0.4630, + 0.3879, + 0.4406, + ] + ) + + generated_slice = generated_image.flatten() + generated_slice = torch.cat([generated_slice[:8], generated_slice[-8:]]) + self.assertTrue(torch.allclose(generated_slice, expected_slice, atol=1e-3)) + + def test_qwen_controlnet_multicondition(self): + device = "cpu" + components = self.get_dummy_components() + + components["controlnet"] = QwenImageMultiControlNetModel([components["controlnet"]]) + + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + control_image = inputs["control_image"] + inputs["control_image"] = [control_image, control_image] + inputs["controlnet_conditioning_scale"] = [0.5, 0.5] + + image = pipe(**inputs).images + generated_image = image[0] + self.assertEqual(generated_image.shape, (3, 32, 32)) + # Expected slice from the generated image + expected_slice = torch.tensor( + [ + 0.6239, + 0.6642, + 0.5768, + 0.6039, + 0.5270, + 0.5070, + 0.5006, + 0.5271, + 0.4506, + 0.3085, + 0.3435, + 0.5152, + 0.5096, + 0.5422, + 0.4286, + 0.5752, + ] + ) + + generated_slice = generated_image.flatten() + generated_slice = torch.cat([generated_slice[:8], generated_slice[-8:]]) + self.assertTrue(torch.allclose(generated_slice, expected_slice, atol=1e-3)) + + def test_attention_slicing_forward_pass( + self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3 + ): + if not self.test_attention_slicing: + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + output_without_slicing = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=1) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing1 = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=2) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing2 = pipe(**inputs)[0] + + if test_max_difference: + max_diff1 = np.abs(to_np(output_with_slicing1) - to_np(output_without_slicing)).max() + max_diff2 = np.abs(to_np(output_with_slicing2) - to_np(output_without_slicing)).max() + self.assertLess( + max(max_diff1, max_diff2), + expected_max_diff, + "Attention slicing should not affect the inference results", + ) + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical(batch_size=3, expected_max_diff=1e-1) + + def test_vae_tiling(self, expected_diff_max: float = 0.2): + generator_device = "cpu" + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe.to("cpu") + pipe.set_progress_bar_config(disable=None) + + # Without tiling + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + inputs["control_image"] = randn_tensor( + (1, 3, 128, 128), + generator=inputs["generator"], + device=torch.device(generator_device), + dtype=torch.float32, + ) + output_without_tiling = pipe(**inputs)[0] + + # With tiling + pipe.vae.enable_tiling( + tile_sample_min_height=96, + tile_sample_min_width=96, + tile_sample_stride_height=64, + tile_sample_stride_width=64, + ) + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + inputs["control_image"] = randn_tensor( + (1, 3, 128, 128), + generator=inputs["generator"], + device=torch.device(generator_device), + dtype=torch.float32, + ) + output_with_tiling = pipe(**inputs)[0] + + self.assertLess( + (to_np(output_without_tiling) - to_np(output_with_tiling)).max(), + expected_diff_max, + "VAE tiling should not affect the inference results", + ) diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_flax.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_flax.py deleted file mode 100644 index 92effcacadb5..000000000000 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_flax.py +++ /dev/null @@ -1,109 +0,0 @@ -# coding=utf-8 -# Copyright 2025 HuggingFace Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import gc -import unittest - -from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline -from diffusers.utils import is_flax_available - -from ...testing_utils import nightly, require_flax - - -if is_flax_available(): - import jax - import jax.numpy as jnp - from flax.jax_utils import replicate - from flax.training.common_utils import shard - - -@nightly -@require_flax -class FlaxStableDiffusion2PipelineIntegrationTests(unittest.TestCase): - def tearDown(self): - # clean up the VRAM after each test - super().tearDown() - gc.collect() - - def test_stable_diffusion_flax(self): - sd_pipe, params = FlaxStableDiffusionPipeline.from_pretrained( - "stabilityai/stable-diffusion-2", - variant="bf16", - dtype=jnp.bfloat16, - ) - - prompt = "A painting of a squirrel eating a burger" - num_samples = jax.device_count() - prompt = num_samples * [prompt] - prompt_ids = sd_pipe.prepare_inputs(prompt) - - params = replicate(params) - prompt_ids = shard(prompt_ids) - - prng_seed = jax.random.PRNGKey(0) - prng_seed = jax.random.split(prng_seed, jax.device_count()) - - images = sd_pipe(prompt_ids, params, prng_seed, num_inference_steps=25, jit=True)[0] - assert images.shape == (jax.device_count(), 1, 768, 768, 3) - - images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:]) - image_slice = images[0, 253:256, 253:256, -1] - - output_slice = jnp.asarray(jax.device_get(image_slice.flatten())) - expected_slice = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.45508, 0.4512]) - - assert jnp.abs(output_slice - expected_slice).max() < 1e-2 - - -@nightly -@require_flax -class FlaxStableDiffusion2PipelineNightlyTests(unittest.TestCase): - def tearDown(self): - # clean up the VRAM after each test - super().tearDown() - gc.collect() - - def test_stable_diffusion_dpm_flax(self): - model_id = "stabilityai/stable-diffusion-2" - scheduler, scheduler_params = FlaxDPMSolverMultistepScheduler.from_pretrained(model_id, subfolder="scheduler") - sd_pipe, params = FlaxStableDiffusionPipeline.from_pretrained( - model_id, - scheduler=scheduler, - variant="bf16", - dtype=jnp.bfloat16, - ) - params["scheduler"] = scheduler_params - - prompt = "A painting of a squirrel eating a burger" - num_samples = jax.device_count() - prompt = num_samples * [prompt] - prompt_ids = sd_pipe.prepare_inputs(prompt) - - params = replicate(params) - prompt_ids = shard(prompt_ids) - - prng_seed = jax.random.PRNGKey(0) - prng_seed = jax.random.split(prng_seed, jax.device_count()) - - images = sd_pipe(prompt_ids, params, prng_seed, num_inference_steps=25, jit=True)[0] - assert images.shape == (jax.device_count(), 1, 768, 768, 3) - - images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:]) - image_slice = images[0, 253:256, 253:256, -1] - - output_slice = jnp.asarray(jax.device_get(image_slice.flatten())) - expected_slice = jnp.array([0.4336, 0.42969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297]) - - assert jnp.abs(output_slice - expected_slice).max() < 1e-2 diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_flax_inpaint.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_flax_inpaint.py deleted file mode 100644 index cdd088b531b8..000000000000 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_flax_inpaint.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding=utf-8 -# Copyright 2025 HuggingFace Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import gc -import unittest - -from diffusers import FlaxStableDiffusionInpaintPipeline -from diffusers.utils import is_flax_available, load_image - -from ...testing_utils import require_flax, slow - - -if is_flax_available(): - import jax - import jax.numpy as jnp - from flax.jax_utils import replicate - from flax.training.common_utils import shard - - -@slow -@require_flax -class FlaxStableDiffusionInpaintPipelineIntegrationTests(unittest.TestCase): - def tearDown(self): - # clean up the VRAM after each test - super().tearDown() - gc.collect() - - def test_stable_diffusion_inpaint_pipeline(self): - init_image = load_image( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" - "/sd2-inpaint/init_image.png" - ) - mask_image = load_image( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" - ) - - model_id = "xvjiarui/stable-diffusion-2-inpainting" - pipeline, params = FlaxStableDiffusionInpaintPipeline.from_pretrained(model_id, safety_checker=None) - - prompt = "Face of a yellow cat, high resolution, sitting on a park bench" - - prng_seed = jax.random.PRNGKey(0) - num_inference_steps = 50 - - num_samples = jax.device_count() - prompt = num_samples * [prompt] - init_image = num_samples * [init_image] - mask_image = num_samples * [mask_image] - prompt_ids, processed_masked_images, processed_masks = pipeline.prepare_inputs(prompt, init_image, mask_image) - - # shard inputs and rng - params = replicate(params) - prng_seed = jax.random.split(prng_seed, jax.device_count()) - prompt_ids = shard(prompt_ids) - processed_masked_images = shard(processed_masked_images) - processed_masks = shard(processed_masks) - - output = pipeline( - prompt_ids, processed_masks, processed_masked_images, params, prng_seed, num_inference_steps, jit=True - ) - - images = output.images.reshape(num_samples, 512, 512, 3) - - image_slice = images[0, 253:256, 253:256, -1] - - output_slice = jnp.asarray(jax.device_get(image_slice.flatten())) - expected_slice = jnp.array( - [0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] - ) - - assert jnp.abs(output_slice - expected_slice).max() < 1e-2 diff --git a/tests/pipelines/test_pipelines_flax.py b/tests/pipelines/test_pipelines_flax.py deleted file mode 100644 index dbb5c7bfed1d..000000000000 --- a/tests/pipelines/test_pipelines_flax.py +++ /dev/null @@ -1,261 +0,0 @@ -# coding=utf-8 -# Copyright 2025 HuggingFace Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import tempfile -import unittest - -import numpy as np - -from diffusers.utils import is_flax_available - -from ..testing_utils import require_flax, slow - - -if is_flax_available(): - import jax - import jax.numpy as jnp - from flax.jax_utils import replicate - from flax.training.common_utils import shard - - from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline - - -@require_flax -class DownloadTests(unittest.TestCase): - def test_download_only_pytorch(self): - with tempfile.TemporaryDirectory() as tmpdirname: - # pipeline has Flax weights - _ = FlaxDiffusionPipeline.from_pretrained( - "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None, cache_dir=tmpdirname - ) - - all_root_files = [t[-1] for t in os.walk(os.path.join(tmpdirname, os.listdir(tmpdirname)[0], "snapshots"))] - files = [item for sublist in all_root_files for item in sublist] - - # None of the downloaded files should be a PyTorch file even if we have some here: - # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin - assert not any(f.endswith(".bin") for f in files) - - -@slow -@require_flax -class FlaxPipelineTests(unittest.TestCase): - def test_dummy_all_tpus(self): - pipeline, params = FlaxStableDiffusionPipeline.from_pretrained( - "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None - ) - - prompt = ( - "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" - " field, close up, split lighting, cinematic" - ) - - prng_seed = jax.random.PRNGKey(0) - num_inference_steps = 4 - - num_samples = jax.device_count() - prompt = num_samples * [prompt] - prompt_ids = pipeline.prepare_inputs(prompt) - - # shard inputs and rng - params = replicate(params) - prng_seed = jax.random.split(prng_seed, num_samples) - prompt_ids = shard(prompt_ids) - - images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images - - assert images.shape == (num_samples, 1, 64, 64, 3) - if jax.device_count() == 8: - assert np.abs(np.abs(images[0, 0, :2, :2, -2:], dtype=np.float32).sum() - 4.1514745) < 1e-3 - assert np.abs(np.abs(images, dtype=np.float32).sum() - 49947.875) < 5e-1 - - images_pil = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:]))) - assert len(images_pil) == num_samples - - def test_stable_diffusion_v1_4(self): - pipeline, params = FlaxStableDiffusionPipeline.from_pretrained( - "CompVis/stable-diffusion-v1-4", revision="flax", safety_checker=None - ) - - prompt = ( - "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" - " field, close up, split lighting, cinematic" - ) - - prng_seed = jax.random.PRNGKey(0) - num_inference_steps = 50 - - num_samples = jax.device_count() - prompt = num_samples * [prompt] - prompt_ids = pipeline.prepare_inputs(prompt) - - # shard inputs and rng - params = replicate(params) - prng_seed = jax.random.split(prng_seed, num_samples) - prompt_ids = shard(prompt_ids) - - images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images - - assert images.shape == (num_samples, 1, 512, 512, 3) - if jax.device_count() == 8: - assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.float32).sum() - 0.05652401)) < 1e-2 - assert np.abs((np.abs(images, dtype=np.float32).sum() - 2383808.2)) < 5e-1 - - def test_stable_diffusion_v1_4_bfloat_16(self): - pipeline, params = FlaxStableDiffusionPipeline.from_pretrained( - "CompVis/stable-diffusion-v1-4", variant="bf16", dtype=jnp.bfloat16, safety_checker=None - ) - - prompt = ( - "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" - " field, close up, split lighting, cinematic" - ) - - prng_seed = jax.random.PRNGKey(0) - num_inference_steps = 50 - - num_samples = jax.device_count() - prompt = num_samples * [prompt] - prompt_ids = pipeline.prepare_inputs(prompt) - - # shard inputs and rng - params = replicate(params) - prng_seed = jax.random.split(prng_seed, num_samples) - prompt_ids = shard(prompt_ids) - - images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images - - assert images.shape == (num_samples, 1, 512, 512, 3) - if jax.device_count() == 8: - assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.float32).sum() - 0.04003906)) < 5e-2 - assert np.abs((np.abs(images, dtype=np.float32).sum() - 2373516.75)) < 5e-1 - - def test_stable_diffusion_v1_4_bfloat_16_with_safety(self): - pipeline, params = FlaxStableDiffusionPipeline.from_pretrained( - "CompVis/stable-diffusion-v1-4", variant="bf16", dtype=jnp.bfloat16 - ) - - prompt = ( - "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" - " field, close up, split lighting, cinematic" - ) - - prng_seed = jax.random.PRNGKey(0) - num_inference_steps = 50 - - num_samples = jax.device_count() - prompt = num_samples * [prompt] - prompt_ids = pipeline.prepare_inputs(prompt) - - # shard inputs and rng - params = replicate(params) - prng_seed = jax.random.split(prng_seed, num_samples) - prompt_ids = shard(prompt_ids) - - images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images - - assert images.shape == (num_samples, 1, 512, 512, 3) - if jax.device_count() == 8: - assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.float32).sum() - 0.04003906)) < 5e-2 - assert np.abs((np.abs(images, dtype=np.float32).sum() - 2373516.75)) < 5e-1 - - def test_stable_diffusion_v1_4_bfloat_16_ddim(self): - scheduler = FlaxDDIMScheduler( - beta_start=0.00085, - beta_end=0.012, - beta_schedule="scaled_linear", - set_alpha_to_one=False, - steps_offset=1, - ) - - pipeline, params = FlaxStableDiffusionPipeline.from_pretrained( - "CompVis/stable-diffusion-v1-4", - variant="bf16", - dtype=jnp.bfloat16, - scheduler=scheduler, - safety_checker=None, - ) - scheduler_state = scheduler.create_state() - - params["scheduler"] = scheduler_state - - prompt = ( - "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" - " field, close up, split lighting, cinematic" - ) - - prng_seed = jax.random.PRNGKey(0) - num_inference_steps = 50 - - num_samples = jax.device_count() - prompt = num_samples * [prompt] - prompt_ids = pipeline.prepare_inputs(prompt) - - # shard inputs and rng - params = replicate(params) - prng_seed = jax.random.split(prng_seed, num_samples) - prompt_ids = shard(prompt_ids) - - images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images - - assert images.shape == (num_samples, 1, 512, 512, 3) - if jax.device_count() == 8: - assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.float32).sum() - 0.045043945)) < 5e-2 - assert np.abs((np.abs(images, dtype=np.float32).sum() - 2347693.5)) < 5e-1 - - def test_jax_memory_efficient_attention(self): - prompt = ( - "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" - " field, close up, split lighting, cinematic" - ) - - num_samples = jax.device_count() - prompt = num_samples * [prompt] - prng_seed = jax.random.split(jax.random.PRNGKey(0), num_samples) - - pipeline, params = FlaxStableDiffusionPipeline.from_pretrained( - "CompVis/stable-diffusion-v1-4", - variant="bf16", - dtype=jnp.bfloat16, - safety_checker=None, - ) - - params = replicate(params) - prompt_ids = pipeline.prepare_inputs(prompt) - prompt_ids = shard(prompt_ids) - images = pipeline(prompt_ids, params, prng_seed, jit=True).images - assert images.shape == (num_samples, 1, 512, 512, 3) - slice = images[2, 0, 256, 10:17, 1] - - # With memory efficient attention - pipeline, params = FlaxStableDiffusionPipeline.from_pretrained( - "CompVis/stable-diffusion-v1-4", - variant="bf16", - dtype=jnp.bfloat16, - safety_checker=None, - use_memory_efficient_attention=True, - ) - - params = replicate(params) - prompt_ids = pipeline.prepare_inputs(prompt) - prompt_ids = shard(prompt_ids) - images_eff = pipeline(prompt_ids, params, prng_seed, jit=True).images - assert images_eff.shape == (num_samples, 1, 512, 512, 3) - slice_eff = images[2, 0, 256, 10:17, 1] - - # I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum` - # over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now. - assert abs(slice_eff - slice).max() < 1e-2 diff --git a/tests/schedulers/test_scheduler_flax.py b/tests/schedulers/test_scheduler_flax.py deleted file mode 100644 index e6e4fd7d7631..000000000000 --- a/tests/schedulers/test_scheduler_flax.py +++ /dev/null @@ -1,921 +0,0 @@ -# coding=utf-8 -# Copyright 2025 HuggingFace Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import inspect -import tempfile -import unittest -from typing import Dict, List, Tuple - -from diffusers import FlaxDDIMScheduler, FlaxDDPMScheduler, FlaxPNDMScheduler -from diffusers.utils import is_flax_available - -from ..testing_utils import require_flax - - -if is_flax_available(): - import jax - import jax.numpy as jnp - from jax import random - - jax_device = jax.default_backend() - - -@require_flax -class FlaxSchedulerCommonTest(unittest.TestCase): - scheduler_classes = () - forward_default_kwargs = () - - @property - def dummy_sample(self): - batch_size = 4 - num_channels = 3 - height = 8 - width = 8 - - key1, key2 = random.split(random.PRNGKey(0)) - sample = random.uniform(key1, (batch_size, num_channels, height, width)) - - return sample, key2 - - @property - def dummy_sample_deter(self): - batch_size = 4 - num_channels = 3 - height = 8 - width = 8 - - num_elems = batch_size * num_channels * height * width - sample = jnp.arange(num_elems) - sample = sample.reshape(num_channels, height, width, batch_size) - sample = sample / num_elems - return jnp.transpose(sample, (3, 0, 1, 2)) - - def get_scheduler_config(self): - raise NotImplementedError - - def dummy_model(self): - def model(sample, t, *args): - return sample * t / (t + 1) - - return model - - def check_over_configs(self, time_step=0, **config): - kwargs = dict(self.forward_default_kwargs) - - num_inference_steps = kwargs.pop("num_inference_steps", None) - - for scheduler_class in self.scheduler_classes: - sample, key = self.dummy_sample - residual = 0.1 * sample - - scheduler_config = self.get_scheduler_config(**config) - scheduler = scheduler_class(**scheduler_config) - state = scheduler.create_state() - - with tempfile.TemporaryDirectory() as tmpdirname: - scheduler.save_config(tmpdirname) - new_scheduler, new_state = scheduler_class.from_pretrained(tmpdirname) - - if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): - state = scheduler.set_timesteps(state, num_inference_steps) - new_state = new_scheduler.set_timesteps(new_state, num_inference_steps) - elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): - kwargs["num_inference_steps"] = num_inference_steps - - output = scheduler.step(state, residual, time_step, sample, key, **kwargs).prev_sample - new_output = new_scheduler.step(new_state, residual, time_step, sample, key, **kwargs).prev_sample - - assert jnp.sum(jnp.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" - - def check_over_forward(self, time_step=0, **forward_kwargs): - kwargs = dict(self.forward_default_kwargs) - kwargs.update(forward_kwargs) - - num_inference_steps = kwargs.pop("num_inference_steps", None) - - for scheduler_class in self.scheduler_classes: - sample, key = self.dummy_sample - residual = 0.1 * sample - - scheduler_config = self.get_scheduler_config() - scheduler = scheduler_class(**scheduler_config) - state = scheduler.create_state() - - with tempfile.TemporaryDirectory() as tmpdirname: - scheduler.save_config(tmpdirname) - new_scheduler, new_state = scheduler_class.from_pretrained(tmpdirname) - - if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): - state = scheduler.set_timesteps(state, num_inference_steps) - new_state = new_scheduler.set_timesteps(new_state, num_inference_steps) - elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): - kwargs["num_inference_steps"] = num_inference_steps - - output = scheduler.step(state, residual, time_step, sample, key, **kwargs).prev_sample - new_output = new_scheduler.step(new_state, residual, time_step, sample, key, **kwargs).prev_sample - - assert jnp.sum(jnp.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" - - def test_from_save_pretrained(self): - kwargs = dict(self.forward_default_kwargs) - - num_inference_steps = kwargs.pop("num_inference_steps", None) - - for scheduler_class in self.scheduler_classes: - sample, key = self.dummy_sample - residual = 0.1 * sample - - scheduler_config = self.get_scheduler_config() - scheduler = scheduler_class(**scheduler_config) - state = scheduler.create_state() - - with tempfile.TemporaryDirectory() as tmpdirname: - scheduler.save_config(tmpdirname) - new_scheduler, new_state = scheduler_class.from_pretrained(tmpdirname) - - if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): - state = scheduler.set_timesteps(state, num_inference_steps) - new_state = new_scheduler.set_timesteps(new_state, num_inference_steps) - elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): - kwargs["num_inference_steps"] = num_inference_steps - - output = scheduler.step(state, residual, 1, sample, key, **kwargs).prev_sample - new_output = new_scheduler.step(new_state, residual, 1, sample, key, **kwargs).prev_sample - - assert jnp.sum(jnp.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" - - def test_step_shape(self): - kwargs = dict(self.forward_default_kwargs) - - num_inference_steps = kwargs.pop("num_inference_steps", None) - - for scheduler_class in self.scheduler_classes: - scheduler_config = self.get_scheduler_config() - scheduler = scheduler_class(**scheduler_config) - state = scheduler.create_state() - - sample, key = self.dummy_sample - residual = 0.1 * sample - - if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): - state = scheduler.set_timesteps(state, num_inference_steps) - elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): - kwargs["num_inference_steps"] = num_inference_steps - - output_0 = scheduler.step(state, residual, 0, sample, key, **kwargs).prev_sample - output_1 = scheduler.step(state, residual, 1, sample, key, **kwargs).prev_sample - - self.assertEqual(output_0.shape, sample.shape) - self.assertEqual(output_0.shape, output_1.shape) - - def test_scheduler_outputs_equivalence(self): - def set_nan_tensor_to_zero(t): - return t.at[t != t].set(0) - - def recursive_check(tuple_object, dict_object): - if isinstance(tuple_object, (List, Tuple)): - for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object.values()): - recursive_check(tuple_iterable_value, dict_iterable_value) - elif isinstance(tuple_object, Dict): - for tuple_iterable_value, dict_iterable_value in zip(tuple_object.values(), dict_object.values()): - recursive_check(tuple_iterable_value, dict_iterable_value) - elif tuple_object is None: - return - else: - self.assertTrue( - jnp.allclose(set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5), - msg=( - "Tuple and dict output are not equal. Difference:" - f" {jnp.max(jnp.abs(tuple_object - dict_object))}. Tuple has `nan`:" - f" {jnp.isnan(tuple_object).any()} and `inf`: {jnp.isinf(tuple_object)}. Dict has" - f" `nan`: {jnp.isnan(dict_object).any()} and `inf`: {jnp.isinf(dict_object)}." - ), - ) - - kwargs = dict(self.forward_default_kwargs) - num_inference_steps = kwargs.pop("num_inference_steps", None) - - for scheduler_class in self.scheduler_classes: - scheduler_config = self.get_scheduler_config() - scheduler = scheduler_class(**scheduler_config) - state = scheduler.create_state() - - sample, key = self.dummy_sample - residual = 0.1 * sample - - if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): - state = scheduler.set_timesteps(state, num_inference_steps) - elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): - kwargs["num_inference_steps"] = num_inference_steps - - outputs_dict = scheduler.step(state, residual, 0, sample, key, **kwargs) - - if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): - state = scheduler.set_timesteps(state, num_inference_steps) - elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): - kwargs["num_inference_steps"] = num_inference_steps - - outputs_tuple = scheduler.step(state, residual, 0, sample, key, return_dict=False, **kwargs) - - recursive_check(outputs_tuple[0], outputs_dict.prev_sample) - - def test_deprecated_kwargs(self): - for scheduler_class in self.scheduler_classes: - has_kwarg_in_model_class = "kwargs" in inspect.signature(scheduler_class.__init__).parameters - has_deprecated_kwarg = len(scheduler_class._deprecated_kwargs) > 0 - - if has_kwarg_in_model_class and not has_deprecated_kwarg: - raise ValueError( - f"{scheduler_class} has `**kwargs` in its __init__ method but has not defined any deprecated" - " kwargs under the `_deprecated_kwargs` class attribute. Make sure to either remove `**kwargs` if" - " there are no deprecated arguments or add the deprecated argument with `_deprecated_kwargs =" - " []`" - ) - - if not has_kwarg_in_model_class and has_deprecated_kwarg: - raise ValueError( - f"{scheduler_class} doesn't have `**kwargs` in its __init__ method but has defined deprecated" - " kwargs under the `_deprecated_kwargs` class attribute. Make sure to either add the `**kwargs`" - f" argument to {self.model_class}.__init__ if there are deprecated arguments or remove the" - " deprecated argument from `_deprecated_kwargs = []`" - ) - - -@require_flax -class FlaxDDPMSchedulerTest(FlaxSchedulerCommonTest): - scheduler_classes = (FlaxDDPMScheduler,) - - def get_scheduler_config(self, **kwargs): - config = { - "num_train_timesteps": 1000, - "beta_start": 0.0001, - "beta_end": 0.02, - "beta_schedule": "linear", - "variance_type": "fixed_small", - "clip_sample": True, - } - - config.update(**kwargs) - return config - - def test_timesteps(self): - for timesteps in [1, 5, 100, 1000]: - self.check_over_configs(num_train_timesteps=timesteps) - - def test_betas(self): - for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2]): - self.check_over_configs(beta_start=beta_start, beta_end=beta_end) - - def test_schedules(self): - for schedule in ["linear", "squaredcos_cap_v2"]: - self.check_over_configs(beta_schedule=schedule) - - def test_variance_type(self): - for variance in ["fixed_small", "fixed_large", "other"]: - self.check_over_configs(variance_type=variance) - - def test_clip_sample(self): - for clip_sample in [True, False]: - self.check_over_configs(clip_sample=clip_sample) - - def test_time_indices(self): - for t in [0, 500, 999]: - self.check_over_forward(time_step=t) - - def test_variance(self): - scheduler_class = self.scheduler_classes[0] - scheduler_config = self.get_scheduler_config() - scheduler = scheduler_class(**scheduler_config) - state = scheduler.create_state() - - assert jnp.sum(jnp.abs(scheduler._get_variance(state, 0) - 0.0)) < 1e-5 - assert jnp.sum(jnp.abs(scheduler._get_variance(state, 487) - 0.00979)) < 1e-5 - assert jnp.sum(jnp.abs(scheduler._get_variance(state, 999) - 0.02)) < 1e-5 - - def test_full_loop_no_noise(self): - scheduler_class = self.scheduler_classes[0] - scheduler_config = self.get_scheduler_config() - scheduler = scheduler_class(**scheduler_config) - state = scheduler.create_state() - - num_trained_timesteps = len(scheduler) - - model = self.dummy_model() - sample = self.dummy_sample_deter - key1, key2 = random.split(random.PRNGKey(0)) - - for t in reversed(range(num_trained_timesteps)): - # 1. predict noise residual - residual = model(sample, t) - - # 2. predict previous mean of sample x_t-1 - output = scheduler.step(state, residual, t, sample, key1) - pred_prev_sample = output.prev_sample - state = output.state - key1, key2 = random.split(key2) - - # if t > 0: - # noise = self.dummy_sample_deter - # variance = scheduler.get_variance(t) ** (0.5) * noise - # - # sample = pred_prev_sample + variance - sample = pred_prev_sample - - result_sum = jnp.sum(jnp.abs(sample)) - result_mean = jnp.mean(jnp.abs(sample)) - - if jax_device == "tpu": - assert abs(result_sum - 255.0714) < 1e-2 - assert abs(result_mean - 0.332124) < 1e-3 - else: - assert abs(result_sum - 270.2) < 1e-1 - assert abs(result_mean - 0.3519494) < 1e-3 - - -@require_flax -class FlaxDDIMSchedulerTest(FlaxSchedulerCommonTest): - scheduler_classes = (FlaxDDIMScheduler,) - forward_default_kwargs = (("num_inference_steps", 50),) - - def get_scheduler_config(self, **kwargs): - config = { - "num_train_timesteps": 1000, - "beta_start": 0.0001, - "beta_end": 0.02, - "beta_schedule": "linear", - } - - config.update(**kwargs) - return config - - def full_loop(self, **config): - scheduler_class = self.scheduler_classes[0] - scheduler_config = self.get_scheduler_config(**config) - scheduler = scheduler_class(**scheduler_config) - state = scheduler.create_state() - key1, key2 = random.split(random.PRNGKey(0)) - - num_inference_steps = 10 - - model = self.dummy_model() - sample = self.dummy_sample_deter - - state = scheduler.set_timesteps(state, num_inference_steps) - - for t in state.timesteps: - residual = model(sample, t) - output = scheduler.step(state, residual, t, sample) - sample = output.prev_sample - state = output.state - key1, key2 = random.split(key2) - - return sample - - def check_over_configs(self, time_step=0, **config): - kwargs = dict(self.forward_default_kwargs) - - num_inference_steps = kwargs.pop("num_inference_steps", None) - - for scheduler_class in self.scheduler_classes: - sample, _ = self.dummy_sample - residual = 0.1 * sample - - scheduler_config = self.get_scheduler_config(**config) - scheduler = scheduler_class(**scheduler_config) - state = scheduler.create_state() - - with tempfile.TemporaryDirectory() as tmpdirname: - scheduler.save_config(tmpdirname) - new_scheduler, new_state = scheduler_class.from_pretrained(tmpdirname) - - if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): - state = scheduler.set_timesteps(state, num_inference_steps) - new_state = new_scheduler.set_timesteps(new_state, num_inference_steps) - elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): - kwargs["num_inference_steps"] = num_inference_steps - - output = scheduler.step(state, residual, time_step, sample, **kwargs).prev_sample - new_output = new_scheduler.step(new_state, residual, time_step, sample, **kwargs).prev_sample - - assert jnp.sum(jnp.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" - - def test_from_save_pretrained(self): - kwargs = dict(self.forward_default_kwargs) - - num_inference_steps = kwargs.pop("num_inference_steps", None) - - for scheduler_class in self.scheduler_classes: - sample, _ = self.dummy_sample - residual = 0.1 * sample - - scheduler_config = self.get_scheduler_config() - scheduler = scheduler_class(**scheduler_config) - state = scheduler.create_state() - - with tempfile.TemporaryDirectory() as tmpdirname: - scheduler.save_config(tmpdirname) - new_scheduler, new_state = scheduler_class.from_pretrained(tmpdirname) - - if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): - state = scheduler.set_timesteps(state, num_inference_steps) - new_state = new_scheduler.set_timesteps(new_state, num_inference_steps) - elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): - kwargs["num_inference_steps"] = num_inference_steps - - output = scheduler.step(state, residual, 1, sample, **kwargs).prev_sample - new_output = new_scheduler.step(new_state, residual, 1, sample, **kwargs).prev_sample - - assert jnp.sum(jnp.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" - - def check_over_forward(self, time_step=0, **forward_kwargs): - kwargs = dict(self.forward_default_kwargs) - kwargs.update(forward_kwargs) - - num_inference_steps = kwargs.pop("num_inference_steps", None) - - for scheduler_class in self.scheduler_classes: - sample, _ = self.dummy_sample - residual = 0.1 * sample - - scheduler_config = self.get_scheduler_config() - scheduler = scheduler_class(**scheduler_config) - state = scheduler.create_state() - - with tempfile.TemporaryDirectory() as tmpdirname: - scheduler.save_config(tmpdirname) - new_scheduler, new_state = scheduler_class.from_pretrained(tmpdirname) - - if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): - state = scheduler.set_timesteps(state, num_inference_steps) - new_state = new_scheduler.set_timesteps(new_state, num_inference_steps) - elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): - kwargs["num_inference_steps"] = num_inference_steps - - output = scheduler.step(state, residual, time_step, sample, **kwargs).prev_sample - new_output = new_scheduler.step(new_state, residual, time_step, sample, **kwargs).prev_sample - - assert jnp.sum(jnp.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" - - def test_scheduler_outputs_equivalence(self): - def set_nan_tensor_to_zero(t): - return t.at[t != t].set(0) - - def recursive_check(tuple_object, dict_object): - if isinstance(tuple_object, (List, Tuple)): - for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object.values()): - recursive_check(tuple_iterable_value, dict_iterable_value) - elif isinstance(tuple_object, Dict): - for tuple_iterable_value, dict_iterable_value in zip(tuple_object.values(), dict_object.values()): - recursive_check(tuple_iterable_value, dict_iterable_value) - elif tuple_object is None: - return - else: - self.assertTrue( - jnp.allclose(set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5), - msg=( - "Tuple and dict output are not equal. Difference:" - f" {jnp.max(jnp.abs(tuple_object - dict_object))}. Tuple has `nan`:" - f" {jnp.isnan(tuple_object).any()} and `inf`: {jnp.isinf(tuple_object)}. Dict has" - f" `nan`: {jnp.isnan(dict_object).any()} and `inf`: {jnp.isinf(dict_object)}." - ), - ) - - kwargs = dict(self.forward_default_kwargs) - num_inference_steps = kwargs.pop("num_inference_steps", None) - - for scheduler_class in self.scheduler_classes: - scheduler_config = self.get_scheduler_config() - scheduler = scheduler_class(**scheduler_config) - state = scheduler.create_state() - - sample, _ = self.dummy_sample - residual = 0.1 * sample - - if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): - state = scheduler.set_timesteps(state, num_inference_steps) - elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): - kwargs["num_inference_steps"] = num_inference_steps - - outputs_dict = scheduler.step(state, residual, 0, sample, **kwargs) - - if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): - state = scheduler.set_timesteps(state, num_inference_steps) - elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): - kwargs["num_inference_steps"] = num_inference_steps - - outputs_tuple = scheduler.step(state, residual, 0, sample, return_dict=False, **kwargs) - - recursive_check(outputs_tuple[0], outputs_dict.prev_sample) - - def test_step_shape(self): - kwargs = dict(self.forward_default_kwargs) - - num_inference_steps = kwargs.pop("num_inference_steps", None) - - for scheduler_class in self.scheduler_classes: - scheduler_config = self.get_scheduler_config() - scheduler = scheduler_class(**scheduler_config) - state = scheduler.create_state() - - sample, _ = self.dummy_sample - residual = 0.1 * sample - - if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): - state = scheduler.set_timesteps(state, num_inference_steps) - elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): - kwargs["num_inference_steps"] = num_inference_steps - - output_0 = scheduler.step(state, residual, 0, sample, **kwargs).prev_sample - output_1 = scheduler.step(state, residual, 1, sample, **kwargs).prev_sample - - self.assertEqual(output_0.shape, sample.shape) - self.assertEqual(output_0.shape, output_1.shape) - - def test_timesteps(self): - for timesteps in [100, 500, 1000]: - self.check_over_configs(num_train_timesteps=timesteps) - - def test_steps_offset(self): - for steps_offset in [0, 1]: - self.check_over_configs(steps_offset=steps_offset) - - scheduler_class = self.scheduler_classes[0] - scheduler_config = self.get_scheduler_config(steps_offset=1) - scheduler = scheduler_class(**scheduler_config) - state = scheduler.create_state() - state = scheduler.set_timesteps(state, 5) - assert jnp.equal(state.timesteps, jnp.array([801, 601, 401, 201, 1])).all() - - def test_betas(self): - for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2]): - self.check_over_configs(beta_start=beta_start, beta_end=beta_end) - - def test_schedules(self): - for schedule in ["linear", "squaredcos_cap_v2"]: - self.check_over_configs(beta_schedule=schedule) - - def test_time_indices(self): - for t in [1, 10, 49]: - self.check_over_forward(time_step=t) - - def test_inference_steps(self): - for t, num_inference_steps in zip([1, 10, 50], [10, 50, 500]): - self.check_over_forward(time_step=t, num_inference_steps=num_inference_steps) - - def test_variance(self): - scheduler_class = self.scheduler_classes[0] - scheduler_config = self.get_scheduler_config() - scheduler = scheduler_class(**scheduler_config) - state = scheduler.create_state() - - assert jnp.sum(jnp.abs(scheduler._get_variance(state, 0, 0) - 0.0)) < 1e-5 - assert jnp.sum(jnp.abs(scheduler._get_variance(state, 420, 400) - 0.14771)) < 1e-5 - assert jnp.sum(jnp.abs(scheduler._get_variance(state, 980, 960) - 0.32460)) < 1e-5 - assert jnp.sum(jnp.abs(scheduler._get_variance(state, 0, 0) - 0.0)) < 1e-5 - assert jnp.sum(jnp.abs(scheduler._get_variance(state, 487, 486) - 0.00979)) < 1e-5 - assert jnp.sum(jnp.abs(scheduler._get_variance(state, 999, 998) - 0.02)) < 1e-5 - - def test_full_loop_no_noise(self): - sample = self.full_loop() - - result_sum = jnp.sum(jnp.abs(sample)) - result_mean = jnp.mean(jnp.abs(sample)) - - assert abs(result_sum - 172.0067) < 1e-2 - assert abs(result_mean - 0.223967) < 1e-3 - - def test_full_loop_with_set_alpha_to_one(self): - # We specify different beta, so that the first alpha is 0.99 - sample = self.full_loop(set_alpha_to_one=True, beta_start=0.01) - result_sum = jnp.sum(jnp.abs(sample)) - result_mean = jnp.mean(jnp.abs(sample)) - - if jax_device == "tpu": - assert abs(result_sum - 149.8409) < 1e-2 - assert abs(result_mean - 0.1951) < 1e-3 - else: - assert abs(result_sum - 149.8295) < 1e-2 - assert abs(result_mean - 0.1951) < 1e-3 - - def test_full_loop_with_no_set_alpha_to_one(self): - # We specify different beta, so that the first alpha is 0.99 - sample = self.full_loop(set_alpha_to_one=False, beta_start=0.01) - result_sum = jnp.sum(jnp.abs(sample)) - result_mean = jnp.mean(jnp.abs(sample)) - - if jax_device == "tpu": - pass - # FIXME: both result_sum and result_mean are nan on TPU - # assert jnp.isnan(result_sum) - # assert jnp.isnan(result_mean) - else: - assert abs(result_sum - 149.0784) < 1e-2 - assert abs(result_mean - 0.1941) < 1e-3 - - def test_prediction_type(self): - for prediction_type in ["epsilon", "sample", "v_prediction"]: - self.check_over_configs(prediction_type=prediction_type) - - -@require_flax -class FlaxPNDMSchedulerTest(FlaxSchedulerCommonTest): - scheduler_classes = (FlaxPNDMScheduler,) - forward_default_kwargs = (("num_inference_steps", 50),) - - def get_scheduler_config(self, **kwargs): - config = { - "num_train_timesteps": 1000, - "beta_start": 0.0001, - "beta_end": 0.02, - "beta_schedule": "linear", - } - - config.update(**kwargs) - return config - - def check_over_configs(self, time_step=0, **config): - kwargs = dict(self.forward_default_kwargs) - num_inference_steps = kwargs.pop("num_inference_steps", None) - sample, _ = self.dummy_sample - residual = 0.1 * sample - dummy_past_residuals = jnp.array([residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]) - - for scheduler_class in self.scheduler_classes: - scheduler_config = self.get_scheduler_config(**config) - scheduler = scheduler_class(**scheduler_config) - state = scheduler.create_state() - state = scheduler.set_timesteps(state, num_inference_steps, shape=sample.shape) - # copy over dummy past residuals - state = state.replace(ets=dummy_past_residuals[:]) - - with tempfile.TemporaryDirectory() as tmpdirname: - scheduler.save_config(tmpdirname) - new_scheduler, new_state = scheduler_class.from_pretrained(tmpdirname) - new_state = new_scheduler.set_timesteps(new_state, num_inference_steps, shape=sample.shape) - # copy over dummy past residuals - new_state = new_state.replace(ets=dummy_past_residuals[:]) - - (prev_sample, state) = scheduler.step_prk(state, residual, time_step, sample, **kwargs) - (new_prev_sample, new_state) = new_scheduler.step_prk(new_state, residual, time_step, sample, **kwargs) - - assert jnp.sum(jnp.abs(prev_sample - new_prev_sample)) < 1e-5, "Scheduler outputs are not identical" - - output, _ = scheduler.step_plms(state, residual, time_step, sample, **kwargs) - new_output, _ = new_scheduler.step_plms(new_state, residual, time_step, sample, **kwargs) - - assert jnp.sum(jnp.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" - - @unittest.skip("Test not supported.") - def test_from_save_pretrained(self): - pass - - def test_scheduler_outputs_equivalence(self): - def set_nan_tensor_to_zero(t): - return t.at[t != t].set(0) - - def recursive_check(tuple_object, dict_object): - if isinstance(tuple_object, (List, Tuple)): - for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object.values()): - recursive_check(tuple_iterable_value, dict_iterable_value) - elif isinstance(tuple_object, Dict): - for tuple_iterable_value, dict_iterable_value in zip(tuple_object.values(), dict_object.values()): - recursive_check(tuple_iterable_value, dict_iterable_value) - elif tuple_object is None: - return - else: - self.assertTrue( - jnp.allclose(set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5), - msg=( - "Tuple and dict output are not equal. Difference:" - f" {jnp.max(jnp.abs(tuple_object - dict_object))}. Tuple has `nan`:" - f" {jnp.isnan(tuple_object).any()} and `inf`: {jnp.isinf(tuple_object)}. Dict has" - f" `nan`: {jnp.isnan(dict_object).any()} and `inf`: {jnp.isinf(dict_object)}." - ), - ) - - kwargs = dict(self.forward_default_kwargs) - num_inference_steps = kwargs.pop("num_inference_steps", None) - - for scheduler_class in self.scheduler_classes: - scheduler_config = self.get_scheduler_config() - scheduler = scheduler_class(**scheduler_config) - state = scheduler.create_state() - - sample, _ = self.dummy_sample - residual = 0.1 * sample - - if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): - state = scheduler.set_timesteps(state, num_inference_steps, shape=sample.shape) - elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): - kwargs["num_inference_steps"] = num_inference_steps - - outputs_dict = scheduler.step(state, residual, 0, sample, **kwargs) - - if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): - state = scheduler.set_timesteps(state, num_inference_steps, shape=sample.shape) - elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): - kwargs["num_inference_steps"] = num_inference_steps - - outputs_tuple = scheduler.step(state, residual, 0, sample, return_dict=False, **kwargs) - - recursive_check(outputs_tuple[0], outputs_dict.prev_sample) - - def check_over_forward(self, time_step=0, **forward_kwargs): - kwargs = dict(self.forward_default_kwargs) - num_inference_steps = kwargs.pop("num_inference_steps", None) - sample, _ = self.dummy_sample - residual = 0.1 * sample - dummy_past_residuals = jnp.array([residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]) - - for scheduler_class in self.scheduler_classes: - scheduler_config = self.get_scheduler_config() - scheduler = scheduler_class(**scheduler_config) - state = scheduler.create_state() - state = scheduler.set_timesteps(state, num_inference_steps, shape=sample.shape) - - # copy over dummy past residuals (must be after setting timesteps) - scheduler.ets = dummy_past_residuals[:] - - with tempfile.TemporaryDirectory() as tmpdirname: - scheduler.save_config(tmpdirname) - new_scheduler, new_state = scheduler_class.from_pretrained(tmpdirname) - # copy over dummy past residuals - new_state = new_scheduler.set_timesteps(new_state, num_inference_steps, shape=sample.shape) - - # copy over dummy past residual (must be after setting timesteps) - new_state.replace(ets=dummy_past_residuals[:]) - - output, state = scheduler.step_prk(state, residual, time_step, sample, **kwargs) - new_output, new_state = new_scheduler.step_prk(new_state, residual, time_step, sample, **kwargs) - - assert jnp.sum(jnp.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" - - output, _ = scheduler.step_plms(state, residual, time_step, sample, **kwargs) - new_output, _ = new_scheduler.step_plms(new_state, residual, time_step, sample, **kwargs) - - assert jnp.sum(jnp.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" - - def full_loop(self, **config): - scheduler_class = self.scheduler_classes[0] - scheduler_config = self.get_scheduler_config(**config) - scheduler = scheduler_class(**scheduler_config) - state = scheduler.create_state() - - num_inference_steps = 10 - model = self.dummy_model() - sample = self.dummy_sample_deter - state = scheduler.set_timesteps(state, num_inference_steps, shape=sample.shape) - - for i, t in enumerate(state.prk_timesteps): - residual = model(sample, t) - sample, state = scheduler.step_prk(state, residual, t, sample) - - for i, t in enumerate(state.plms_timesteps): - residual = model(sample, t) - sample, state = scheduler.step_plms(state, residual, t, sample) - - return sample - - def test_step_shape(self): - kwargs = dict(self.forward_default_kwargs) - - num_inference_steps = kwargs.pop("num_inference_steps", None) - - for scheduler_class in self.scheduler_classes: - scheduler_config = self.get_scheduler_config() - scheduler = scheduler_class(**scheduler_config) - state = scheduler.create_state() - - sample, _ = self.dummy_sample - residual = 0.1 * sample - - if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): - state = scheduler.set_timesteps(state, num_inference_steps, shape=sample.shape) - elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): - kwargs["num_inference_steps"] = num_inference_steps - - # copy over dummy past residuals (must be done after set_timesteps) - dummy_past_residuals = jnp.array([residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]) - state = state.replace(ets=dummy_past_residuals[:]) - - output_0, state = scheduler.step_prk(state, residual, 0, sample, **kwargs) - output_1, state = scheduler.step_prk(state, residual, 1, sample, **kwargs) - - self.assertEqual(output_0.shape, sample.shape) - self.assertEqual(output_0.shape, output_1.shape) - - output_0, state = scheduler.step_plms(state, residual, 0, sample, **kwargs) - output_1, state = scheduler.step_plms(state, residual, 1, sample, **kwargs) - - self.assertEqual(output_0.shape, sample.shape) - self.assertEqual(output_0.shape, output_1.shape) - - def test_timesteps(self): - for timesteps in [100, 1000]: - self.check_over_configs(num_train_timesteps=timesteps) - - def test_steps_offset(self): - for steps_offset in [0, 1]: - self.check_over_configs(steps_offset=steps_offset) - - scheduler_class = self.scheduler_classes[0] - scheduler_config = self.get_scheduler_config(steps_offset=1) - scheduler = scheduler_class(**scheduler_config) - state = scheduler.create_state() - state = scheduler.set_timesteps(state, 10, shape=()) - assert jnp.equal( - state.timesteps, - jnp.array([901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1]), - ).all() - - def test_betas(self): - for beta_start, beta_end in zip([0.0001, 0.001], [0.002, 0.02]): - self.check_over_configs(beta_start=beta_start, beta_end=beta_end) - - def test_schedules(self): - for schedule in ["linear", "squaredcos_cap_v2"]: - self.check_over_configs(beta_schedule=schedule) - - def test_time_indices(self): - for t in [1, 5, 10]: - self.check_over_forward(time_step=t) - - def test_inference_steps(self): - for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100]): - self.check_over_forward(num_inference_steps=num_inference_steps) - - def test_pow_of_3_inference_steps(self): - # earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3 - num_inference_steps = 27 - - for scheduler_class in self.scheduler_classes: - sample, _ = self.dummy_sample - residual = 0.1 * sample - - scheduler_config = self.get_scheduler_config() - scheduler = scheduler_class(**scheduler_config) - state = scheduler.create_state() - - state = scheduler.set_timesteps(state, num_inference_steps, shape=sample.shape) - - # before power of 3 fix, would error on first step, so we only need to do two - for i, t in enumerate(state.prk_timesteps[:2]): - sample, state = scheduler.step_prk(state, residual, t, sample) - - def test_inference_plms_no_past_residuals(self): - with self.assertRaises(ValueError): - scheduler_class = self.scheduler_classes[0] - scheduler_config = self.get_scheduler_config() - scheduler = scheduler_class(**scheduler_config) - state = scheduler.create_state() - - scheduler.step_plms(state, self.dummy_sample, 1, self.dummy_sample).prev_sample - - def test_full_loop_no_noise(self): - sample = self.full_loop() - result_sum = jnp.sum(jnp.abs(sample)) - result_mean = jnp.mean(jnp.abs(sample)) - - if jax_device == "tpu": - assert abs(result_sum - 198.1275) < 1e-2 - assert abs(result_mean - 0.2580) < 1e-3 - else: - assert abs(result_sum - 198.1318) < 1e-2 - assert abs(result_mean - 0.2580) < 1e-3 - - def test_full_loop_with_set_alpha_to_one(self): - # We specify different beta, so that the first alpha is 0.99 - sample = self.full_loop(set_alpha_to_one=True, beta_start=0.01) - result_sum = jnp.sum(jnp.abs(sample)) - result_mean = jnp.mean(jnp.abs(sample)) - - if jax_device == "tpu": - assert abs(result_sum - 186.83226) < 1e-2 - assert abs(result_mean - 0.24327) < 1e-3 - else: - assert abs(result_sum - 186.9466) < 1e-2 - assert abs(result_mean - 0.24342) < 1e-3 - - def test_full_loop_with_no_set_alpha_to_one(self): - # We specify different beta, so that the first alpha is 0.99 - sample = self.full_loop(set_alpha_to_one=False, beta_start=0.01) - result_sum = jnp.sum(jnp.abs(sample)) - result_mean = jnp.mean(jnp.abs(sample)) - - if jax_device == "tpu": - assert abs(result_sum - 186.83226) < 1e-2 - assert abs(result_mean - 0.24327) < 1e-3 - else: - assert abs(result_sum - 186.9482) < 1e-2 - assert abs(result_mean - 0.2434) < 1e-3 From 1a917d3ac567b67cc2dba8cd30f47cd3c62483fe Mon Sep 17 00:00:00 2001 From: DN6 Date: Thu, 28 Aug 2025 15:02:28 +0530 Subject: [PATCH 7/7] Revert "merge main" This reverts commit 65efbcead58644b31596ed2d714f7cee0e0238d3. --- .github/workflows/pr_flax_dependency_test.yml | 38 + README.md | 10 +- docker/diffusers-flax-cpu/Dockerfile | 49 + docker/diffusers-flax-tpu/Dockerfile | 51 + docs/source/en/_toctree.yml | 12 +- docs/source/en/api/models/autoencoderkl.md | 12 + docs/source/en/api/models/controlnet.md | 8 + docs/source/en/api/models/overview.md | 4 + docs/source/en/api/models/unet2d-cond.md | 6 + docs/source/en/api/outputs.md | 4 + docs/source/en/api/pipelines/controlnet.md | 8 + docs/source/en/api/pipelines/overview.md | 4 + docs/source/en/api/pipelines/skyreels_v2.md | 275 +++--- .../api/pipelines/stable_diffusion/img2img.md | 10 + .../api/pipelines/stable_diffusion/inpaint.md | 10 + .../pipelines/stable_diffusion/text2img.md | 10 + docs/source/en/api/pipelines/wan.md | 4 +- docs/source/en/installation.md | 25 +- docs/source/en/optimization/fp16.md | 2 +- .../en/optimization/speed-memory-optims.md | 5 +- docs/source/en/training/controlnet.md | 83 +- docs/source/en/training/dreambooth.md | 80 +- docs/source/en/training/kandinsky.md | 2 +- docs/source/en/training/lora.md | 14 + docs/source/en/training/overview.md | 26 +- docs/source/en/training/sdxl.md | 2 +- docs/source/en/training/text2image.md | 85 +- docs/source/en/training/text_inversion.md | 84 +- .../en/tutorials/using_peft_for_inference.md | 4 +- docs/source/en/using-diffusers/loading.md | 641 +++++++++--- .../en/using-diffusers/other-formats.md | 3 +- .../en/using-diffusers/reusing_seeds.md | 119 ++- docs/source/en/using-diffusers/schedulers.md | 47 + .../stable_diffusion_jax_how_to.md | 225 +++++ .../source/en/using-diffusers/text-img2vid.md | 2 +- examples/community/README.md | 2 - .../community/composable_stable_diffusion.py | 2 +- examples/community/imagic_stable_diffusion.py | 2 +- examples/community/img2img_inpainting.py | 2 +- .../community/interpolate_stable_diffusion.py | 2 +- examples/community/lpw_stable_diffusion.py | 4 +- .../community/lpw_stable_diffusion_onnx.py | 4 +- examples/community/lpw_stable_diffusion_xl.py | 2 +- .../multilingual_stable_diffusion.py | 2 +- .../pipeline_controlnet_xl_kolors.py | 2 +- .../pipeline_controlnet_xl_kolors_img2img.py | 2 +- .../pipeline_controlnet_xl_kolors_inpaint.py | 2 +- .../community/pipeline_demofusion_sdxl.py | 2 +- .../pipeline_faithdiff_stable_diffusion_xl.py | 2 +- .../pipeline_flux_differential_img2img.py | 4 +- .../pipeline_flux_kontext_multiple_images.py | 2 +- .../community/pipeline_flux_rf_inversion.py | 2 +- .../pipeline_flux_semantic_guidance.py | 2 +- examples/community/pipeline_flux_with_cfg.py | 2 +- .../pipeline_kolors_differential_img2img.py | 2 +- .../community/pipeline_kolors_inpainting.py | 2 +- examples/community/pipeline_prompt2prompt.py | 2 +- .../community/pipeline_sdxl_style_aligned.py | 2 +- ...stable_diffusion_3_differential_img2img.py | 2 +- ...ine_stable_diffusion_3_instruct_pix2pix.py | 2 +- ...ne_stable_diffusion_xl_attentive_eraser.py | 2 +- ..._stable_diffusion_xl_controlnet_adapter.py | 2 +- ...diffusion_xl_controlnet_adapter_inpaint.py | 2 +- ...table_diffusion_xl_differential_img2img.py | 2 +- .../pipeline_stable_diffusion_xl_ipex.py | 2 +- examples/community/pipeline_stg_cogvideox.py | 2 +- examples/community/pipeline_stg_ltx.py | 2 +- .../community/pipeline_stg_ltx_image2video.py | 2 +- examples/community/pipeline_stg_mochi.py | 2 +- examples/community/pipeline_zero1to3.py | 2 +- examples/community/rerender_a_video.py | 2 +- examples/community/run_onnx_controlnet.py | 2 +- examples/community/run_tensorrt_controlnet.py | 2 +- examples/community/sd_text2img_k_diffusion.py | 2 +- .../community/seed_resize_stable_diffusion.py | 2 +- .../community/stable_diffusion_comparison.py | 2 +- .../stable_diffusion_controlnet_img2img.py | 2 +- .../stable_diffusion_controlnet_inpaint.py | 2 +- ...le_diffusion_controlnet_inpaint_img2img.py | 2 +- .../stable_diffusion_controlnet_reference.py | 2 +- examples/community/stable_diffusion_ipex.py | 2 +- .../community/stable_diffusion_reference.py | 2 +- .../community/stable_diffusion_repaint.py | 2 +- .../stable_diffusion_xl_reference.py | 2 +- examples/community/text_inpainting.py | 2 +- examples/community/tiled_upscaling.py | 2 +- .../community/wildcard_stable_diffusion.py | 2 +- examples/dreambooth/README_qwen.md | 2 +- examples/dreambooth/train_dreambooth_flux.py | 8 - .../dreambooth/train_dreambooth_lora_flux.py | 9 - .../train_dreambooth_lora_flux_kontext.py | 8 - .../pipeline_pixart_alpha_controlnet.py | 2 +- .../research_projects/rdm/pipeline_rdm.py | 2 +- src/diffusers/models/attention_flax.py | 30 - .../models/controlnets/controlnet_flax.py | 15 +- src/diffusers/models/embeddings_flax.py | 15 - src/diffusers/models/modeling_flax_utils.py | 4 - src/diffusers/models/resnet_flax.py | 20 - .../models/transformers/transformer_flux.py | 17 +- .../transformers/transformer_skyreels_v2.py | 334 ++----- .../models/unets/unet_2d_blocks_flax.py | 29 - .../models/unets/unet_2d_condition_flax.py | 10 +- src/diffusers/models/vae_flax.py | 54 +- .../modular_pipelines/modular_pipeline.py | 152 +-- .../stable_diffusion_xl/before_denoise.py | 2 +- .../pipelines/allegro/pipeline_allegro.py | 2 +- .../animatediff/pipeline_animatediff_sdxl.py | 2 +- .../pipelines/aura_flow/pipeline_aura_flow.py | 2 +- .../blip_diffusion/pipeline_blip_diffusion.py | 2 +- src/diffusers/pipelines/bria/pipeline_bria.py | 2 +- .../pipelines/chroma/pipeline_chroma.py | 2 +- .../chroma/pipeline_chroma_img2img.py | 2 +- .../pipelines/cogvideo/pipeline_cogvideox.py | 2 +- .../pipeline_cogvideox_fun_control.py | 2 +- .../pipeline_cogvideox_image2video.py | 2 +- .../pipeline_cogvideox_video2video.py | 2 +- .../cogview3/pipeline_cogview3plus.py | 2 +- .../pipelines/cogview4/pipeline_cogview4.py | 2 +- .../cogview4/pipeline_cogview4_control.py | 2 +- .../pipelines/consisid/pipeline_consisid.py | 2 +- .../pipeline_controlnet_blip_diffusion.py | 2 +- .../pipeline_controlnet_inpaint_sd_xl.py | 2 +- .../pipeline_controlnet_sd_xl_img2img.py | 2 +- ...pipeline_controlnet_union_inpaint_sd_xl.py | 2 +- ...pipeline_controlnet_union_sd_xl_img2img.py | 2 +- .../pipeline_stable_diffusion_3_controlnet.py | 2 +- ...table_diffusion_3_controlnet_inpainting.py | 2 +- .../pipeline_stable_diffusion_pix2pix_zero.py | 4 +- .../pipelines/flux/pipeline_flux_control.py | 2 +- .../flux/pipeline_flux_control_img2img.py | 2 +- .../flux/pipeline_flux_control_inpaint.py | 4 +- .../flux/pipeline_flux_controlnet.py | 2 +- .../pipelines/flux/pipeline_flux_fill.py | 4 +- .../pipelines/flux/pipeline_flux_img2img.py | 2 +- .../pipelines/flux/pipeline_flux_inpaint.py | 4 +- .../pipelines/flux/pipeline_flux_kontext.py | 2 +- .../flux/pipeline_flux_kontext_inpaint.py | 2 +- .../hidream_image/pipeline_hidream_image.py | 2 +- .../pipelines/kandinsky/pipeline_kandinsky.py | 2 +- .../kandinsky/pipeline_kandinsky_combined.py | 6 +- .../kandinsky/pipeline_kandinsky_inpaint.py | 2 +- .../kandinsky/pipeline_kandinsky_prior.py | 4 +- .../kandinsky2_2/pipeline_kandinsky2_2.py | 2 +- .../pipeline_kandinsky2_2_combined.py | 6 +- .../pipeline_kandinsky2_2_controlnet.py | 2 +- .../pipeline_kandinsky2_2_inpainting.py | 2 +- .../pipeline_kandinsky2_2_prior.py | 4 +- .../pipeline_kandinsky2_2_prior_emb2emb.py | 2 +- .../pipelines/kolors/pipeline_kolors.py | 2 +- .../kolors/pipeline_kolors_img2img.py | 2 +- .../pipelines/latte/pipeline_latte.py | 2 +- src/diffusers/pipelines/ltx/pipeline_ltx.py | 2 +- .../pipelines/ltx/pipeline_ltx_condition.py | 2 +- .../pipelines/ltx/pipeline_ltx_image2video.py | 2 +- .../pipelines/lumina/pipeline_lumina.py | 2 +- .../pipelines/lumina2/pipeline_lumina2.py | 2 +- .../pipelines/mochi/pipeline_mochi.py | 2 +- .../pipelines/omnigen/pipeline_omnigen.py | 2 +- .../pipeline_pag_controlnet_sd_xl_img2img.py | 2 +- .../pipelines/pag/pipeline_pag_kolors.py | 2 +- .../pag/pipeline_pag_pixart_sigma.py | 2 +- .../pipelines/pag/pipeline_pag_sana.py | 2 +- .../pipelines/pag/pipeline_pag_sd_3.py | 2 +- .../pag/pipeline_pag_sd_3_img2img.py | 2 +- .../pipelines/pag/pipeline_pag_sd_xl.py | 2 +- .../pag/pipeline_pag_sd_xl_img2img.py | 2 +- .../pag/pipeline_pag_sd_xl_inpaint.py | 2 +- .../pipelines/pipeline_flax_utils.py | 5 - src/diffusers/pipelines/pipeline_utils.py | 30 - .../pixart_alpha/pipeline_pixart_alpha.py | 2 +- .../pixart_alpha/pipeline_pixart_sigma.py | 2 +- .../pipelines/qwenimage/pipeline_qwenimage.py | 51 +- .../pipeline_qwenimage_controlnet.py | 46 +- .../qwenimage/pipeline_qwenimage_edit.py | 51 +- .../qwenimage/pipeline_qwenimage_img2img.py | 51 +- .../qwenimage/pipeline_qwenimage_inpaint.py | 53 +- src/diffusers/pipelines/sana/pipeline_sana.py | 2 +- .../sana/pipeline_sana_controlnet.py | 2 +- .../pipelines/sana/pipeline_sana_sprint.py | 2 +- .../sana/pipeline_sana_sprint_img2img.py | 2 +- .../stable_cascade/pipeline_stable_cascade.py | 2 +- .../pipeline_stable_cascade_combined.py | 2 +- .../pipeline_stable_cascade_prior.py | 2 +- .../pipeline_onnx_stable_diffusion.py | 2 +- .../pipeline_onnx_stable_diffusion_inpaint.py | 2 +- .../pipeline_onnx_stable_diffusion_upscale.py | 2 +- .../pipeline_stable_diffusion_3.py | 2 +- .../pipeline_stable_diffusion_3_img2img.py | 2 +- .../pipeline_stable_diffusion_3_inpaint.py | 4 +- .../pipeline_stable_diffusion_k_diffusion.py | 2 +- ...ipeline_stable_diffusion_xl_k_diffusion.py | 2 +- .../pipeline_stable_diffusion_xl.py | 2 +- .../pipeline_stable_diffusion_xl_img2img.py | 2 +- .../pipeline_stable_diffusion_xl_inpaint.py | 2 +- ...ne_stable_diffusion_xl_instruct_pix2pix.py | 2 +- .../pipeline_stable_diffusion_adapter.py | 2 +- .../pipeline_stable_diffusion_xl_adapter.py | 2 +- .../pipeline_text_to_video_zero_sdxl.py | 2 +- .../pipeline_visualcloze_combined.py | 2 +- .../pipeline_visualcloze_generation.py | 2 +- .../wuerstchen/pipeline_wuerstchen.py | 2 +- .../pipeline_wuerstchen_combined.py | 2 +- .../wuerstchen/pipeline_wuerstchen_prior.py | 2 +- .../schedulers/scheduling_utils_flax.py | 8 +- src/diffusers/utils/import_utils.py | 7 +- tests/hooks/test_hooks.py | 1 - .../autoencoders/test_models_vae_flax.py | 39 + tests/models/test_modeling_common_flax.py | 67 ++ .../models/unets/test_models_unet_2d_flax.py | 105 ++ .../controlnet/test_flax_controlnet.py | 128 +++ .../qwenimage/test_qwenimage_controlnet.py | 339 ------- .../test_stable_diffusion_flax.py | 109 +++ .../test_stable_diffusion_flax_inpaint.py | 83 ++ tests/pipelines/test_pipelines_flax.py | 261 +++++ tests/schedulers/test_scheduler_flax.py | 921 ++++++++++++++++++ 215 files changed, 3675 insertions(+), 1671 deletions(-) create mode 100644 .github/workflows/pr_flax_dependency_test.yml create mode 100644 docker/diffusers-flax-cpu/Dockerfile create mode 100644 docker/diffusers-flax-tpu/Dockerfile create mode 100644 docs/source/en/using-diffusers/stable_diffusion_jax_how_to.md create mode 100644 tests/models/autoencoders/test_models_vae_flax.py create mode 100644 tests/models/test_modeling_common_flax.py create mode 100644 tests/models/unets/test_models_unet_2d_flax.py create mode 100644 tests/pipelines/controlnet/test_flax_controlnet.py delete mode 100644 tests/pipelines/qwenimage/test_qwenimage_controlnet.py create mode 100644 tests/pipelines/stable_diffusion_2/test_stable_diffusion_flax.py create mode 100644 tests/pipelines/stable_diffusion_2/test_stable_diffusion_flax_inpaint.py create mode 100644 tests/pipelines/test_pipelines_flax.py create mode 100644 tests/schedulers/test_scheduler_flax.py diff --git a/.github/workflows/pr_flax_dependency_test.yml b/.github/workflows/pr_flax_dependency_test.yml new file mode 100644 index 000000000000..e091b5f2d7b3 --- /dev/null +++ b/.github/workflows/pr_flax_dependency_test.yml @@ -0,0 +1,38 @@ +name: Run Flax dependency tests + +on: + pull_request: + branches: + - main + paths: + - "src/diffusers/**.py" + push: + branches: + - main + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + check_flax_dependencies: + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v3 + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: "3.8" + - name: Install dependencies + run: | + python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH" + python -m pip install --upgrade pip uv + python -m uv pip install -e . + python -m uv pip install "jax[cpu]>=0.2.16,!=0.3.2" + python -m uv pip install "flax>=0.4.1" + python -m uv pip install "jaxlib>=0.1.65" + python -m uv pip install pytest + - name: Check for soft dependencies + run: | + python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH" + pytest tests/others/test_dependencies.py diff --git a/README.md b/README.md index 68202ba095ee..dac3b3598aaf 100644 --- a/README.md +++ b/README.md @@ -37,7 +37,7 @@ limitations under the License. ## Installation -We recommend installing 🤗 Diffusers in a virtual environment from PyPI or Conda. For more details about installing [PyTorch](https://pytorch.org/get-started/locally/), please refer to their official documentation. +We recommend installing 🤗 Diffusers in a virtual environment from PyPI or Conda. For more details about installing [PyTorch](https://pytorch.org/get-started/locally/) and [Flax](https://flax.readthedocs.io/en/latest/#installation), please refer to their official documentation. ### PyTorch @@ -53,6 +53,14 @@ With `conda` (maintained by the community): conda install -c conda-forge diffusers ``` +### Flax + +With `pip` (official package): + +```bash +pip install --upgrade diffusers[flax] +``` + ### Apple Silicon (M1/M2) support Please refer to the [How to use Stable Diffusion in Apple Silicon](https://huggingface.co/docs/diffusers/optimization/mps) guide. diff --git a/docker/diffusers-flax-cpu/Dockerfile b/docker/diffusers-flax-cpu/Dockerfile new file mode 100644 index 000000000000..051008aa9a2e --- /dev/null +++ b/docker/diffusers-flax-cpu/Dockerfile @@ -0,0 +1,49 @@ +FROM ubuntu:20.04 +LABEL maintainer="Hugging Face" +LABEL repository="diffusers" + +ENV DEBIAN_FRONTEND=noninteractive + +RUN apt-get -y update \ + && apt-get install -y software-properties-common \ + && add-apt-repository ppa:deadsnakes/ppa + +RUN apt install -y bash \ + build-essential \ + git \ + git-lfs \ + curl \ + ca-certificates \ + libsndfile1-dev \ + libgl1 \ + python3.10 \ + python3-pip \ + python3.10-venv && \ + rm -rf /var/lib/apt/lists + +# make sure to use venv +RUN python3.10 -m venv /opt/venv +ENV PATH="/opt/venv/bin:$PATH" + +# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py) +# follow the instructions here: https://cloud.google.com/tpu/docs/run-in-container#train_a_jax_model_in_a_docker_container +RUN python3 -m pip install --no-cache-dir --upgrade pip uv==0.1.11 && \ + python3 -m uv pip install --upgrade --no-cache-dir \ + clu \ + "jax[cpu]>=0.2.16,!=0.3.2" \ + "flax>=0.4.1" \ + "jaxlib>=0.1.65" && \ + python3 -m uv pip install --no-cache-dir \ + accelerate \ + datasets \ + hf-doc-builder \ + huggingface-hub \ + Jinja2 \ + librosa \ + numpy==1.26.4 \ + scipy \ + tensorboard \ + transformers \ + hf_transfer + +CMD ["/bin/bash"] \ No newline at end of file diff --git a/docker/diffusers-flax-tpu/Dockerfile b/docker/diffusers-flax-tpu/Dockerfile new file mode 100644 index 000000000000..405f068923b7 --- /dev/null +++ b/docker/diffusers-flax-tpu/Dockerfile @@ -0,0 +1,51 @@ +FROM ubuntu:20.04 +LABEL maintainer="Hugging Face" +LABEL repository="diffusers" + +ENV DEBIAN_FRONTEND=noninteractive + +RUN apt-get -y update \ + && apt-get install -y software-properties-common \ + && add-apt-repository ppa:deadsnakes/ppa + +RUN apt install -y bash \ + build-essential \ + git \ + git-lfs \ + curl \ + ca-certificates \ + libsndfile1-dev \ + libgl1 \ + python3.10 \ + python3-pip \ + python3.10-venv && \ + rm -rf /var/lib/apt/lists + +# make sure to use venv +RUN python3.10 -m venv /opt/venv +ENV PATH="/opt/venv/bin:$PATH" + +# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py) +# follow the instructions here: https://cloud.google.com/tpu/docs/run-in-container#train_a_jax_model_in_a_docker_container +RUN python3 -m pip install --no-cache-dir --upgrade pip uv==0.1.11 && \ + python3 -m pip install --no-cache-dir \ + "jax[tpu]>=0.2.16,!=0.3.2" \ + -f https://storage.googleapis.com/jax-releases/libtpu_releases.html && \ + python3 -m uv pip install --upgrade --no-cache-dir \ + clu \ + "flax>=0.4.1" \ + "jaxlib>=0.1.65" && \ + python3 -m uv pip install --no-cache-dir \ + accelerate \ + datasets \ + hf-doc-builder \ + huggingface-hub \ + Jinja2 \ + librosa \ + numpy==1.26.4 \ + scipy \ + tensorboard \ + transformers \ + hf_transfer + +CMD ["/bin/bash"] \ No newline at end of file diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index a0ddf8f25654..42558b636cd2 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -9,11 +9,11 @@ - local: stable_diffusion title: Basic performance -- title: Pipelines +- title: DiffusionPipeline isExpanded: false sections: - local: using-diffusers/loading - title: DiffusionPipeline + title: Load pipelines - local: tutorials/autopipeline title: AutoPipeline - local: using-diffusers/custom_pipeline_overview @@ -21,7 +21,7 @@ - local: using-diffusers/callback title: Pipeline callbacks - local: using-diffusers/reusing_seeds - title: Reproducibility + title: Reproducible pipelines - local: using-diffusers/schedulers title: Load schedulers and models - local: using-diffusers/scheduler_features @@ -62,6 +62,8 @@ title: Scheduler features - local: using-diffusers/callback title: Pipeline callbacks + - local: using-diffusers/reusing_seeds + title: Reproducible pipelines - local: using-diffusers/image_quality title: Controlling image quality @@ -75,7 +77,7 @@ - local: optimization/memory title: Reduce memory usage - local: optimization/speed-memory-optims - title: Compiling and offloading quantized models + title: Compile and offloading quantized models - title: Community optimizations sections: - local: optimization/pruna @@ -192,6 +194,8 @@ - title: Model accelerators and hardware isExpanded: false sections: + - local: using-diffusers/stable_diffusion_jax_how_to + title: JAX/Flax - local: optimization/onnx title: ONNX - local: optimization/open_vino diff --git a/docs/source/en/api/models/autoencoderkl.md b/docs/source/en/api/models/autoencoderkl.md index 3d949e9bb06c..baeab4017be3 100644 --- a/docs/source/en/api/models/autoencoderkl.md +++ b/docs/source/en/api/models/autoencoderkl.md @@ -44,3 +44,15 @@ model = AutoencoderKL.from_single_file(url) ## DecoderOutput [[autodoc]] models.autoencoders.vae.DecoderOutput + +## FlaxAutoencoderKL + +[[autodoc]] FlaxAutoencoderKL + +## FlaxAutoencoderKLOutput + +[[autodoc]] models.vae_flax.FlaxAutoencoderKLOutput + +## FlaxDecoderOutput + +[[autodoc]] models.vae_flax.FlaxDecoderOutput diff --git a/docs/source/en/api/models/controlnet.md b/docs/source/en/api/models/controlnet.md index f56b7383a0d7..7ce14f17d56a 100644 --- a/docs/source/en/api/models/controlnet.md +++ b/docs/source/en/api/models/controlnet.md @@ -40,3 +40,11 @@ pipe = StableDiffusionControlNetPipeline.from_single_file(url, controlnet=contro ## ControlNetOutput [[autodoc]] models.controlnets.controlnet.ControlNetOutput + +## FlaxControlNetModel + +[[autodoc]] FlaxControlNetModel + +## FlaxControlNetOutput + +[[autodoc]] models.controlnets.controlnet_flax.FlaxControlNetOutput diff --git a/docs/source/en/api/models/overview.md b/docs/source/en/api/models/overview.md index eb9722739f99..1c6a2092e684 100644 --- a/docs/source/en/api/models/overview.md +++ b/docs/source/en/api/models/overview.md @@ -19,6 +19,10 @@ All models are built from the base [`ModelMixin`] class which is a [`torch.nn.Mo ## ModelMixin [[autodoc]] ModelMixin +## FlaxModelMixin + +[[autodoc]] FlaxModelMixin + ## PushToHubMixin [[autodoc]] utils.PushToHubMixin diff --git a/docs/source/en/api/models/unet2d-cond.md b/docs/source/en/api/models/unet2d-cond.md index 99a7c41ab286..175fb1122019 100644 --- a/docs/source/en/api/models/unet2d-cond.md +++ b/docs/source/en/api/models/unet2d-cond.md @@ -23,3 +23,9 @@ The abstract from the paper is: ## UNet2DConditionOutput [[autodoc]] models.unets.unet_2d_condition.UNet2DConditionOutput + +## FlaxUNet2DConditionModel +[[autodoc]] models.unets.unet_2d_condition_flax.FlaxUNet2DConditionModel + +## FlaxUNet2DConditionOutput +[[autodoc]] models.unets.unet_2d_condition_flax.FlaxUNet2DConditionOutput diff --git a/docs/source/en/api/outputs.md b/docs/source/en/api/outputs.md index a13bc89f2bea..bed92f10f94a 100644 --- a/docs/source/en/api/outputs.md +++ b/docs/source/en/api/outputs.md @@ -54,6 +54,10 @@ To check a specific pipeline or model output, refer to its corresponding API doc [[autodoc]] pipelines.ImagePipelineOutput +## FlaxImagePipelineOutput + +[[autodoc]] pipelines.pipeline_flax_utils.FlaxImagePipelineOutput + ## AudioPipelineOutput [[autodoc]] pipelines.AudioPipelineOutput diff --git a/docs/source/en/api/pipelines/controlnet.md b/docs/source/en/api/pipelines/controlnet.md index 2a654a37357f..eea3473d3609 100644 --- a/docs/source/en/api/pipelines/controlnet.md +++ b/docs/source/en/api/pipelines/controlnet.md @@ -72,3 +72,11 @@ Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) ## StableDiffusionPipelineOutput [[autodoc]] pipelines.stable_diffusion.StableDiffusionPipelineOutput + +## FlaxStableDiffusionControlNetPipeline +[[autodoc]] FlaxStableDiffusionControlNetPipeline + - all + - __call__ + +## FlaxStableDiffusionControlNetPipelineOutput +[[autodoc]] pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput diff --git a/docs/source/en/api/pipelines/overview.md b/docs/source/en/api/pipelines/overview.md index d3cc318a5459..b5e3825fef6d 100644 --- a/docs/source/en/api/pipelines/overview.md +++ b/docs/source/en/api/pipelines/overview.md @@ -106,6 +106,10 @@ The table below lists all the pipelines currently available in 🤗 Diffusers an [[autodoc]] pipelines.StableDiffusionMixin.disable_freeu +## FlaxDiffusionPipeline + +[[autodoc]] pipelines.pipeline_flax_utils.FlaxDiffusionPipeline + ## PushToHubMixin [[autodoc]] utils.PushToHubMixin diff --git a/docs/source/en/api/pipelines/skyreels_v2.md b/docs/source/en/api/pipelines/skyreels_v2.md index 6730f1551607..cd94f2a75c08 100644 --- a/docs/source/en/api/pipelines/skyreels_v2.md +++ b/docs/source/en/api/pipelines/skyreels_v2.md @@ -1,4 +1,4 @@ - -# Compiling and offloading quantized models +# Compile and offloading quantized models Optimizing models often involves trade-offs between [inference speed](./fp16) and [memory-usage](./memory). For instance, while [caching](./cache) can boost inference speed, it also increases memory consumption since it needs to store the outputs of intermediate attention layers. A more balanced optimization strategy combines quantizing a model, [torch.compile](./fp16#torchcompile) and various [offloading methods](./memory#offloading). @@ -28,8 +28,7 @@ The table below provides a comparison of optimization strategy combinations and | quantization | 32.602 | 14.9453 | | quantization, torch.compile | 25.847 | 14.9448 | | quantization, torch.compile, model CPU offloading | 32.312 | 12.2369 | - -These results are benchmarked on Flux with a RTX 4090. The transformer and text_encoder components are quantized. Refer to the benchmarking script if you're interested in evaluating your own model. +These results are benchmarked on Flux with a RTX 4090. The transformer and text_encoder components are quantized. Refer to the [benchmarking script](https://gist.github.com/sayakpaul/0db9d8eeeb3d2a0e5ed7cf0d9ca19b7d) if you're interested in evaluating your own model. This guide will show you how to compile and offload a quantized model with [bitsandbytes](../quantization/bitsandbytes#torchcompile). Make sure you are using [PyTorch nightly](https://pytorch.org/get-started/locally/) and the latest version of bitsandbytes. diff --git a/docs/source/en/training/controlnet.md b/docs/source/en/training/controlnet.md index 17da819db84b..0170ff3da9ea 100644 --- a/docs/source/en/training/controlnet.md +++ b/docs/source/en/training/controlnet.md @@ -14,7 +14,7 @@ specific language governing permissions and limitations under the License. [ControlNet](https://hf.co/papers/2302.05543) models are adapters trained on top of another pretrained model. It allows for a greater degree of control over image generation by conditioning the model with an additional input image. The input image can be a canny edge, depth map, human pose, and many more. -If you're training on a GPU with limited vRAM, you should try enabling the `gradient_checkpointing`, `gradient_accumulation_steps`, and `mixed_precision` parameters in the training command. You can also reduce your memory footprint by using memory-efficient attention with [xFormers](../optimization/xformers). +If you're training on a GPU with limited vRAM, you should try enabling the `gradient_checkpointing`, `gradient_accumulation_steps`, and `mixed_precision` parameters in the training command. You can also reduce your memory footprint by using memory-efficient attention with [xFormers](../optimization/xformers). JAX/Flax training is also supported for efficient training on TPUs and GPUs, but it doesn't support gradient checkpointing or xFormers. You should have a GPU with >30GB of memory if you want to train faster with Flax. This guide will explore the [train_controlnet.py](https://github.com/huggingface/diffusers/blob/main/examples/controlnet/train_controlnet.py) training script to help you become familiar with it, and how you can adapt it for your own use-case. @@ -28,10 +28,45 @@ pip install . Then navigate to the example folder containing the training script and install the required dependencies for the script you're using: + + ```bash cd examples/controlnet pip install -r requirements.txt ``` + + + +If you have access to a TPU, the Flax training script runs even faster! Let's run the training script on the [Google Cloud TPU VM](https://cloud.google.com/tpu/docs/run-calculation-jax). Create a single TPU v4-8 VM and connect to it: + +```bash +ZONE=us-central2-b +TPU_TYPE=v4-8 +VM_NAME=hg_flax + +gcloud alpha compute tpus tpu-vm create $VM_NAME \ + --zone $ZONE \ + --accelerator-type $TPU_TYPE \ + --version tpu-vm-v4-base + +gcloud alpha compute tpus tpu-vm ssh $VM_NAME --zone $ZONE -- \ +``` + +Install JAX 0.4.5: + +```bash +pip install "jax[tpu]==0.4.5" -f https://storage.googleapis.com/jax-releases/libtpu_releases.html +``` + +Then install the required dependencies for the Flax script: + +```bash +cd examples/controlnet +pip install -r requirements_flax.txt +``` + + + @@ -85,7 +120,7 @@ Many of the basic and important parameters are described in the [Text-to-image]( ### Min-SNR weighting -The [Min-SNR](https://huggingface.co/papers/2303.09556) weighting strategy can help with training by rebalancing the loss to achieve faster convergence. The training script supports predicting `epsilon` (noise) or `v_prediction`, but Min-SNR is compatible with both prediction types. This weighting strategy is only supported by PyTorch. +The [Min-SNR](https://huggingface.co/papers/2303.09556) weighting strategy can help with training by rebalancing the loss to achieve faster convergence. The training script supports predicting `epsilon` (noise) or `v_prediction`, but Min-SNR is compatible with both prediction types. This weighting strategy is only supported by PyTorch and is unavailable in the Flax training script. Add the `--snr_gamma` parameter and set it to the recommended value of 5.0: @@ -237,6 +272,9 @@ That's it! You don't need to add any additional parameters to your training comm + + + ```bash export MODEL_DIR="stable-diffusion-v1-5/stable-diffusion-v1-5" export OUTPUT_DIR="path/to/save/model" @@ -254,6 +292,47 @@ accelerate launch train_controlnet.py \ --push_to_hub ``` + + + +With Flax, you can [profile your code](https://jax.readthedocs.io/en/latest/profiling.html) by adding the `--profile_steps==5` parameter to your training command. Install the Tensorboard profile plugin: + +```bash +pip install tensorflow tensorboard-plugin-profile +tensorboard --logdir runs/fill-circle-100steps-20230411_165612/ +``` + +Then you can inspect the profile at [http://localhost:6006/#profile](http://localhost:6006/#profile). + + + +If you run into version conflicts with the plugin, try uninstalling and reinstalling all versions of TensorFlow and Tensorboard. The debugging functionality of the profile plugin is still experimental, and not all views are fully functional. The `trace_viewer` cuts off events after 1M, which can result in all your device traces getting lost if for example, you profile the compilation step by accident. + + + +```bash +python3 train_controlnet_flax.py \ + --pretrained_model_name_or_path=$MODEL_DIR \ + --output_dir=$OUTPUT_DIR \ + --dataset_name=fusing/fill50k \ + --resolution=512 \ + --learning_rate=1e-5 \ + --validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \ + --validation_prompt "red circle with blue background" "cyan circle with brown floral background" \ + --validation_steps=1000 \ + --train_batch_size=2 \ + --revision="non-ema" \ + --from_pt \ + --report_to="wandb" \ + --tracker_project_name=$HUB_MODEL_ID \ + --num_train_epochs=11 \ + --push_to_hub \ + --hub_model_id=$HUB_MODEL_ID +``` + + + + Once training is complete, you can use your newly trained model for inference! ```py diff --git a/docs/source/en/training/dreambooth.md b/docs/source/en/training/dreambooth.md index 3a5ba5aa39c3..cff2bb500dab 100644 --- a/docs/source/en/training/dreambooth.md +++ b/docs/source/en/training/dreambooth.md @@ -14,7 +14,7 @@ specific language governing permissions and limitations under the License. [DreamBooth](https://huggingface.co/papers/2208.12242) is a training technique that updates the entire diffusion model by training on just a few images of a subject or style. It works by associating a special word in the prompt with the example images. -If you're training on a GPU with limited vRAM, you should try enabling the `gradient_checkpointing` and `mixed_precision` parameters in the training command. You can also reduce your memory footprint by using memory-efficient attention with [xFormers](../optimization/xformers). +If you're training on a GPU with limited vRAM, you should try enabling the `gradient_checkpointing` and `mixed_precision` parameters in the training command. You can also reduce your memory footprint by using memory-efficient attention with [xFormers](../optimization/xformers). JAX/Flax training is also supported for efficient training on TPUs and GPUs, but it doesn't support gradient checkpointing or xFormers. You should have a GPU with >30GB of memory if you want to train faster with Flax. This guide will explore the [train_dreambooth.py](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth.py) script to help you become more familiar with it, and how you can adapt it for your own use-case. @@ -28,11 +28,25 @@ pip install . Navigate to the example folder with the training script and install the required dependencies for the script you're using: + + + ```bash cd examples/dreambooth pip install -r requirements.txt ``` + + + +```bash +cd examples/dreambooth +pip install -r requirements_flax.txt +``` + + + + 🤗 Accelerate is a library for helping you train on multiple GPUs/TPUs or with mixed-precision. It'll automatically configure your training setup based on your hardware and environment. Take a look at the 🤗 Accelerate [Quick tour](https://huggingface.co/docs/accelerate/quicktour) to learn more. @@ -96,7 +110,7 @@ Some basic and important parameters to know and specify are: ### Min-SNR weighting -The [Min-SNR](https://huggingface.co/papers/2303.09556) weighting strategy can help with training by rebalancing the loss to achieve faster convergence. The training script supports predicting `epsilon` (noise) or `v_prediction`, but Min-SNR is compatible with both prediction types. This weighting strategy is only supported by PyTorch. +The [Min-SNR](https://huggingface.co/papers/2303.09556) weighting strategy can help with training by rebalancing the loss to achieve faster convergence. The training script supports predicting `epsilon` (noise) or `v_prediction`, but Min-SNR is compatible with both prediction types. This weighting strategy is only supported by PyTorch and is unavailable in the Flax training script. Add the `--snr_gamma` parameter and set it to the recommended value of 5.0: @@ -297,6 +311,9 @@ That's it! You don't need to add any additional parameters to your training comm + + + ```bash export MODEL_NAME="stable-diffusion-v1-5/stable-diffusion-v1-5" export INSTANCE_DIR="./dog" @@ -317,6 +334,29 @@ accelerate launch train_dreambooth.py \ --push_to_hub ``` + + + +```bash +export MODEL_NAME="duongna/stable-diffusion-v1-4-flax" +export INSTANCE_DIR="./dog" +export OUTPUT_DIR="path-to-save-model" + +python train_dreambooth_flax.py \ + --pretrained_model_name_or_path=$MODEL_NAME \ + --instance_data_dir=$INSTANCE_DIR \ + --output_dir=$OUTPUT_DIR \ + --instance_prompt="a photo of sks dog" \ + --resolution=512 \ + --train_batch_size=1 \ + --learning_rate=5e-6 \ + --max_train_steps=400 \ + --push_to_hub +``` + + + + Once training is complete, you can use your newly trained model for inference! @@ -343,6 +383,9 @@ image.save("dog-bucket.png") + + + ```py from diffusers import DiffusionPipeline import torch @@ -352,6 +395,39 @@ image = pipeline("A photo of sks dog in a bucket", num_inference_steps=50, guida image.save("dog-bucket.png") ``` + + + +```py +import jax +import numpy as np +from flax.jax_utils import replicate +from flax.training.common_utils import shard +from diffusers import FlaxStableDiffusionPipeline + +pipeline, params = FlaxStableDiffusionPipeline.from_pretrained("path-to-your-trained-model", dtype=jax.numpy.bfloat16) + +prompt = "A photo of sks dog in a bucket" +prng_seed = jax.random.PRNGKey(0) +num_inference_steps = 50 + +num_samples = jax.device_count() +prompt = num_samples * [prompt] +prompt_ids = pipeline.prepare_inputs(prompt) + +# shard inputs and rng +params = replicate(params) +prng_seed = jax.random.split(prng_seed, jax.device_count()) +prompt_ids = shard(prompt_ids) + +images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images +images = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:]))) +image.save("dog-bucket.png") +``` + + + + ## LoRA LoRA is a training technique for significantly reducing the number of trainable parameters. As a result, training is faster and it is easier to store the resulting weights because they are a lot smaller (~100MBs). Use the [train_dreambooth_lora.py](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth_lora.py) script to train with LoRA. diff --git a/docs/source/en/training/kandinsky.md b/docs/source/en/training/kandinsky.md index 561bc1c351b7..77f7af03b801 100644 --- a/docs/source/en/training/kandinsky.md +++ b/docs/source/en/training/kandinsky.md @@ -88,7 +88,7 @@ Most of the parameters are identical to the parameters in the [Text-to-image](te ### Min-SNR weighting -The [Min-SNR](https://huggingface.co/papers/2303.09556) weighting strategy can help with training by rebalancing the loss to achieve faster convergence. The training script supports predicting `epsilon` (noise) or `v_prediction`, but Min-SNR is compatible with both prediction types. This weighting strategy is only supported by PyTorch. +The [Min-SNR](https://huggingface.co/papers/2303.09556) weighting strategy can help with training by rebalancing the loss to achieve faster convergence. The training script supports predicting `epsilon` (noise) or `v_prediction`, but Min-SNR is compatible with both prediction types. This weighting strategy is only supported by PyTorch and is unavailable in the Flax training script. Add the `--snr_gamma` parameter and set it to the recommended value of 5.0: diff --git a/docs/source/en/training/lora.md b/docs/source/en/training/lora.md index e97d8acdac46..9a3512dd76df 100644 --- a/docs/source/en/training/lora.md +++ b/docs/source/en/training/lora.md @@ -38,11 +38,25 @@ pip install . Navigate to the example folder with the training script and install the required dependencies for the script you're using: + + + ```bash cd examples/text_to_image pip install -r requirements.txt ``` + + + +```bash +cd examples/text_to_image +pip install -r requirements_flax.txt +``` + + + + 🤗 Accelerate is a library for helping you train on multiple GPUs/TPUs or with mixed-precision. It'll automatically configure your training setup based on your hardware and environment. Take a look at the 🤗 Accelerate [Quick tour](https://huggingface.co/docs/accelerate/quicktour) to learn more. diff --git a/docs/source/en/training/overview.md b/docs/source/en/training/overview.md index 55d6b1966137..032900d9ac20 100644 --- a/docs/source/en/training/overview.md +++ b/docs/source/en/training/overview.md @@ -23,18 +23,18 @@ Each training script is: Our current collection of training scripts include: -| Training | SDXL-support | LoRA-support | -|---|---|---| -| [unconditional image generation](https://github.com/huggingface/diffusers/tree/main/examples/unconditional_image_generation) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/training_example.ipynb) | | | -| [text-to-image](https://github.com/huggingface/diffusers/tree/main/examples/text_to_image) | 👍 | 👍 | -| [textual inversion](https://github.com/huggingface/diffusers/tree/main/examples/textual_inversion) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb) | | | -| [DreamBooth](https://github.com/huggingface/diffusers/tree/main/examples/dreambooth) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_training.ipynb) | 👍 | 👍 | -| [ControlNet](https://github.com/huggingface/diffusers/tree/main/examples/controlnet) | 👍 | | -| [InstructPix2Pix](https://github.com/huggingface/diffusers/tree/main/examples/instruct_pix2pix) | 👍 | | -| [Custom Diffusion](https://github.com/huggingface/diffusers/tree/main/examples/custom_diffusion) | | | -| [T2I-Adapters](https://github.com/huggingface/diffusers/tree/main/examples/t2i_adapter) | 👍 | | -| [Kandinsky 2.2](https://github.com/huggingface/diffusers/tree/main/examples/kandinsky2_2/text_to_image) | | 👍 | -| [Wuerstchen](https://github.com/huggingface/diffusers/tree/main/examples/wuerstchen/text_to_image) | | 👍 | +| Training | SDXL-support | LoRA-support | Flax-support | +|---|---|---|---| +| [unconditional image generation](https://github.com/huggingface/diffusers/tree/main/examples/unconditional_image_generation) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/training_example.ipynb) | | | | +| [text-to-image](https://github.com/huggingface/diffusers/tree/main/examples/text_to_image) | 👍 | 👍 | 👍 | +| [textual inversion](https://github.com/huggingface/diffusers/tree/main/examples/textual_inversion) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb) | | | 👍 | +| [DreamBooth](https://github.com/huggingface/diffusers/tree/main/examples/dreambooth) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_training.ipynb) | 👍 | 👍 | 👍 | +| [ControlNet](https://github.com/huggingface/diffusers/tree/main/examples/controlnet) | 👍 | | 👍 | +| [InstructPix2Pix](https://github.com/huggingface/diffusers/tree/main/examples/instruct_pix2pix) | 👍 | | | +| [Custom Diffusion](https://github.com/huggingface/diffusers/tree/main/examples/custom_diffusion) | | | | +| [T2I-Adapters](https://github.com/huggingface/diffusers/tree/main/examples/t2i_adapter) | 👍 | | | +| [Kandinsky 2.2](https://github.com/huggingface/diffusers/tree/main/examples/kandinsky2_2/text_to_image) | | 👍 | | +| [Wuerstchen](https://github.com/huggingface/diffusers/tree/main/examples/wuerstchen/text_to_image) | | 👍 | | These examples are **actively** maintained, so please feel free to open an issue if they aren't working as expected. If you feel like another training example should be included, you're more than welcome to start a [Feature Request](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=&template=feature_request.md&title=) to discuss your feature idea with us and whether it meets our criteria of being self-contained, easy-to-tweak, beginner-friendly, and single-purpose. @@ -48,7 +48,7 @@ cd diffusers pip install . ``` -Then navigate to the folder of the training script (for example, [DreamBooth](https://github.com/huggingface/diffusers/tree/main/examples/dreambooth)) and install the `requirements.txt` file. Some training scripts have a specific requirement file for SDXL or LoRA. If you're using one of these scripts, make sure you install its corresponding requirements file. +Then navigate to the folder of the training script (for example, [DreamBooth](https://github.com/huggingface/diffusers/tree/main/examples/dreambooth)) and install the `requirements.txt` file. Some training scripts have a specific requirement file for SDXL, LoRA or Flax. If you're using one of these scripts, make sure you install its corresponding requirements file. ```bash cd examples/dreambooth diff --git a/docs/source/en/training/sdxl.md b/docs/source/en/training/sdxl.md index 12051b7c2d11..da8b93b6d690 100644 --- a/docs/source/en/training/sdxl.md +++ b/docs/source/en/training/sdxl.md @@ -96,7 +96,7 @@ Most of the parameters are identical to the parameters in the [Text-to-image](te ### Min-SNR weighting -The [Min-SNR](https://huggingface.co/papers/2303.09556) weighting strategy can help with training by rebalancing the loss to achieve faster convergence. The training script supports predicting either `epsilon` (noise) or `v_prediction`, but Min-SNR is compatible with both prediction types. This weighting strategy is only supported by PyTorch. +The [Min-SNR](https://huggingface.co/papers/2303.09556) weighting strategy can help with training by rebalancing the loss to achieve faster convergence. The training script supports predicting either `epsilon` (noise) or `v_prediction`, but Min-SNR is compatible with both prediction types. This weighting strategy is only supported by PyTorch and is unavailable in the Flax training script. Add the `--snr_gamma` parameter and set it to the recommended value of 5.0: diff --git a/docs/source/en/training/text2image.md b/docs/source/en/training/text2image.md index 5212fe8393bc..182621e89bdf 100644 --- a/docs/source/en/training/text2image.md +++ b/docs/source/en/training/text2image.md @@ -20,7 +20,7 @@ The text-to-image script is experimental, and it's easy to overfit and run into Text-to-image models like Stable Diffusion are conditioned to generate images given a text prompt. -Training a model can be taxing on your hardware, but if you enable `gradient_checkpointing` and `mixed_precision`, it is possible to train a model on a single 24GB GPU. If you're training with larger batch sizes or want to train faster, it's better to use GPUs with more than 30GB of memory. You can reduce your memory footprint by enabling memory-efficient attention with [xFormers](../optimization/xformers). +Training a model can be taxing on your hardware, but if you enable `gradient_checkpointing` and `mixed_precision`, it is possible to train a model on a single 24GB GPU. If you're training with larger batch sizes or want to train faster, it's better to use GPUs with more than 30GB of memory. You can reduce your memory footprint by enabling memory-efficient attention with [xFormers](../optimization/xformers). JAX/Flax training is also supported for efficient training on TPUs and GPUs, but it doesn't support gradient checkpointing, gradient accumulation or xFormers. A GPU with at least 30GB of memory or a TPU v3 is recommended for training with Flax. This guide will explore the [train_text_to_image.py](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image.py) training script to help you become familiar with it, and how you can adapt it for your own use-case. @@ -34,10 +34,20 @@ pip install . Then navigate to the example folder containing the training script and install the required dependencies for the script you're using: + + ```bash cd examples/text_to_image pip install -r requirements.txt ``` + + +```bash +cd examples/text_to_image +pip install -r requirements_flax.txt +``` + + @@ -96,7 +106,7 @@ Some basic and important parameters include: ### Min-SNR weighting -The [Min-SNR](https://huggingface.co/papers/2303.09556) weighting strategy can help with training by rebalancing the loss to achieve faster convergence. The training script supports predicting `epsilon` (noise) or `v_prediction`, but Min-SNR is compatible with both prediction types. This weighting strategy is only supported by PyTorch. +The [Min-SNR](https://huggingface.co/papers/2303.09556) weighting strategy can help with training by rebalancing the loss to achieve faster convergence. The training script supports predicting `epsilon` (noise) or `v_prediction`, but Min-SNR is compatible with both prediction types. This weighting strategy is only supported by PyTorch and is unavailable in the Flax training script. Add the `--snr_gamma` parameter and set it to the recommended value of 5.0: @@ -145,6 +155,9 @@ Lastly, the [training loop](https://github.com/huggingface/diffusers/blob/8959c5 Once you've made all your changes or you're okay with the default configuration, you're ready to launch the training script! 🚀 + + + Let's train on the [Naruto BLIP captions](https://huggingface.co/datasets/lambdalabs/naruto-blip-captions) dataset to generate your own Naruto characters. Set the environment variables `MODEL_NAME` and `dataset_name` to the model and the dataset (either from the Hub or a local path). If you're training on more than one GPU, add the `--multi_gpu` parameter to the `accelerate launch` command. @@ -174,8 +187,43 @@ accelerate launch --mixed_precision="fp16" train_text_to_image.py \ --push_to_hub ``` + + + +Training with Flax can be faster on TPUs and GPUs thanks to [@duongna211](https://github.com/duongna21). Flax is more efficient on a TPU, but GPU performance is also great. + +Set the environment variables `MODEL_NAME` and `dataset_name` to the model and the dataset (either from the Hub or a local path). + + + +To train on a local dataset, set the `TRAIN_DIR` and `OUTPUT_DIR` environment variables to the path of the dataset and where to save the model to. + + + +```bash +export MODEL_NAME="stable-diffusion-v1-5/stable-diffusion-v1-5" +export dataset_name="lambdalabs/naruto-blip-captions" + +python train_text_to_image_flax.py \ + --pretrained_model_name_or_path=$MODEL_NAME \ + --dataset_name=$dataset_name \ + --resolution=512 --center_crop --random_flip \ + --train_batch_size=1 \ + --max_train_steps=15000 \ + --learning_rate=1e-05 \ + --max_grad_norm=1 \ + --output_dir="sd-naruto-model" \ + --push_to_hub +``` + + + + Once training is complete, you can use your newly trained model for inference: + + + ```py from diffusers import StableDiffusionPipeline import torch @@ -186,6 +234,39 @@ image = pipeline(prompt="yoda").images[0] image.save("yoda-naruto.png") ``` + + + +```py +import jax +import numpy as np +from flax.jax_utils import replicate +from flax.training.common_utils import shard +from diffusers import FlaxStableDiffusionPipeline + +pipeline, params = FlaxStableDiffusionPipeline.from_pretrained("path/to/saved_model", dtype=jax.numpy.bfloat16) + +prompt = "yoda naruto" +prng_seed = jax.random.PRNGKey(0) +num_inference_steps = 50 + +num_samples = jax.device_count() +prompt = num_samples * [prompt] +prompt_ids = pipeline.prepare_inputs(prompt) + +# shard inputs and rng +params = replicate(params) +prng_seed = jax.random.split(prng_seed, jax.device_count()) +prompt_ids = shard(prompt_ids) + +images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images +images = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:]))) +image.save("yoda-naruto.png") +``` + + + + ## Next steps Congratulations on training your own text-to-image model! To learn more about how to use your new model, the following guides may be helpful: diff --git a/docs/source/en/training/text_inversion.md b/docs/source/en/training/text_inversion.md index 91af2f6afb81..b7083ae589ed 100644 --- a/docs/source/en/training/text_inversion.md +++ b/docs/source/en/training/text_inversion.md @@ -14,7 +14,7 @@ specific language governing permissions and limitations under the License. [Textual Inversion](https://hf.co/papers/2208.01618) is a training technique for personalizing image generation models with just a few example images of what you want it to learn. This technique works by learning and updating the text embeddings (the new embeddings are tied to a special word you must use in the prompt) to match the example images you provide. -If you're training on a GPU with limited vRAM, you should try enabling the `gradient_checkpointing` and `mixed_precision` parameters in the training command. You can also reduce your memory footprint by using memory-efficient attention with [xFormers](../optimization/xformers). +If you're training on a GPU with limited vRAM, you should try enabling the `gradient_checkpointing` and `mixed_precision` parameters in the training command. You can also reduce your memory footprint by using memory-efficient attention with [xFormers](../optimization/xformers). JAX/Flax training is also supported for efficient training on TPUs and GPUs, but it doesn't support gradient checkpointing or xFormers. With the same configuration and setup as PyTorch, the Flax training script should be at least ~70% faster! This guide will explore the [textual_inversion.py](https://github.com/huggingface/diffusers/blob/main/examples/textual_inversion/textual_inversion.py) script to help you become more familiar with it, and how you can adapt it for your own use-case. @@ -28,10 +28,25 @@ pip install . Navigate to the example folder with the training script and install the required dependencies for the script you're using: + + + ```bash cd examples/textual_inversion pip install -r requirements.txt ``` + + + + +```bash +cd examples/textual_inversion +pip install -r requirements_flax.txt +``` + + + + 🤗 Accelerate is a library for helping you train on multiple GPUs/TPUs or with mixed-precision. It'll automatically configure your training setup based on your hardware and environment. Take a look at the 🤗 Accelerate [Quick tour](https://huggingface.co/docs/accelerate/quicktour) to learn more. @@ -174,6 +189,9 @@ One more thing before you launch the script. If you're interested in following a --validation_steps=100 ``` + + + ```bash export MODEL_NAME="stable-diffusion-v1-5/stable-diffusion-v1-5" export DATA_DIR="./cat" @@ -196,8 +214,36 @@ accelerate launch textual_inversion.py \ --push_to_hub ``` + + + +```bash +export MODEL_NAME="duongna/stable-diffusion-v1-4-flax" +export DATA_DIR="./cat" + +python textual_inversion_flax.py \ + --pretrained_model_name_or_path=$MODEL_NAME \ + --train_data_dir=$DATA_DIR \ + --learnable_property="object" \ + --placeholder_token="" \ + --initializer_token="toy" \ + --resolution=512 \ + --train_batch_size=1 \ + --max_train_steps=3000 \ + --learning_rate=5.0e-04 \ + --scale_lr \ + --output_dir="textual_inversion_cat" \ + --push_to_hub +``` + + + + After training is complete, you can use your newly trained model for inference like: + + + ```py from diffusers import StableDiffusionPipeline import torch @@ -208,6 +254,42 @@ image = pipeline("A train", num_inference_steps=50).images[0] image.save("cat-train.png") ``` + + + +Flax doesn't support the [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] method, but the textual_inversion_flax.py script [saves](https://github.com/huggingface/diffusers/blob/c0f058265161178f2a88849e92b37ffdc81f1dcc/examples/textual_inversion/textual_inversion_flax.py#L636C2-L636C2) the learned embeddings as a part of the model after training. This means you can use the model for inference like any other Flax model: + +```py +import jax +import numpy as np +from flax.jax_utils import replicate +from flax.training.common_utils import shard +from diffusers import FlaxStableDiffusionPipeline + +model_path = "path-to-your-trained-model" +pipeline, params = FlaxStableDiffusionPipeline.from_pretrained(model_path, dtype=jax.numpy.bfloat16) + +prompt = "A train" +prng_seed = jax.random.PRNGKey(0) +num_inference_steps = 50 + +num_samples = jax.device_count() +prompt = num_samples * [prompt] +prompt_ids = pipeline.prepare_inputs(prompt) + +# shard inputs and rng +params = replicate(params) +prng_seed = jax.random.split(prng_seed, jax.device_count()) +prompt_ids = shard(prompt_ids) + +images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images +images = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:]))) +image.save("cat-train.png") +``` + + + + ## Next steps Congratulations on training your own Textual Inversion model! 🎉 To learn more about how to use your new model, the following guides may be helpful: diff --git a/docs/source/en/tutorials/using_peft_for_inference.md b/docs/source/en/tutorials/using_peft_for_inference.md index 7bdd2a1ee969..5cd47f8674e1 100644 --- a/docs/source/en/tutorials/using_peft_for_inference.md +++ b/docs/source/en/tutorials/using_peft_for_inference.md @@ -94,7 +94,7 @@ pipeline = AutoPipelineForText2Image.from_pretrained( pipeline.unet.load_lora_adapter( "jbilcke-hf/sdxl-cinematic-1", weight_name="pytorch_lora_weights.safetensors", - adapter_name="cinematic", + adapter_name="cinematic" prefix="unet" ) # use cnmt in the prompt to trigger the LoRA @@ -688,4 +688,4 @@ Browse the [LoRA Studio](https://lorastudio.co/models) for different LoRAs to us You can find additional LoRAs in the [FLUX LoRA the Explorer](https://huggingface.co/spaces/multimodalart/flux-lora-the-explorer) and [LoRA the Explorer](https://huggingface.co/spaces/multimodalart/LoraTheExplorer) Spaces. -Check out the [Fast LoRA inference for Flux with Diffusers and PEFT](https://huggingface.co/blog/lora-fast) blog post to learn how to optimize LoRA inference with methods like FlashAttention-3 and fp8 quantization. +Check out the [Fast LoRA inference for Flux with Diffusers and PEFT](https://huggingface.co/blog/lora-fast) blog post to learn how to optimize LoRA inference with methods like FlashAttention-3 and fp8 quantization. \ No newline at end of file diff --git a/docs/source/en/using-diffusers/loading.md b/docs/source/en/using-diffusers/loading.md index f86ea104cf69..20f0cc51e0af 100644 --- a/docs/source/en/using-diffusers/loading.md +++ b/docs/source/en/using-diffusers/loading.md @@ -10,267 +10,598 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express o specific language governing permissions and limitations under the License. --> +# Load pipelines + [[open-in-colab]] -# DiffusionPipeline +Diffusion systems consist of multiple components like parameterized models and schedulers that interact in complex ways. That is why we designed the [`DiffusionPipeline`] to wrap the complexity of the entire diffusion system into an easy-to-use API. At the same time, the [`DiffusionPipeline`] is entirely customizable so you can modify each component to build a diffusion system for your use case. -Diffusion models consists of multiple components like UNets or diffusion transformers (DiTs), text encoders, variational autoencoders (VAEs), and schedulers. The [`DiffusionPipeline`] wraps all of these components into a single easy-to-use API without giving up the flexibility to modify it's components. +This guide will show you how to load: -This guide will show you how to load a [`DiffusionPipeline`]. +- pipelines from the Hub and locally +- different components into a pipeline +- multiple pipelines without increasing memory usage +- checkpoint variants such as different floating point types or non-exponential mean averaged (EMA) weights -## Loading a pipeline +## Load a pipeline -[`DiffusionPipeline`] is a base pipeline class that automatically selects and returns an instance of a model's pipeline subclass, like [`QwenImagePipeline`], by scanning the `model_index.json` file for the class name. +> [!TIP] +> Skip to the [DiffusionPipeline explained](#diffusionpipeline-explained) section if you're interested in an explanation about how the [`DiffusionPipeline`] class works. -Pass a model id to [`~DiffusionPipeline.from_pretrained`] to load a pipeline. +There are two ways to load a pipeline for a task: -```py -import torch -from diffusers import DiffusionPipeline +1. Load the generic [`DiffusionPipeline`] class and allow it to automatically detect the correct pipeline class from the checkpoint. +2. Load a specific pipeline class for a specific task. -pipeline = DiffusionPipeline.from_pretrained( - "Qwen/Qwen-Image", torch_dtype=torch.bfloat16, device_map="cuda" -) -``` + + -Every model has a specific pipeline subclass that inherits from [`DiffusionPipeline`]. A subclass usually has a narrow focus and are task-specific. See the table below for an example. +The [`DiffusionPipeline`] class is a simple and generic way to load the latest trending diffusion model from the [Hub](https://huggingface.co/models?library=diffusers&sort=trending). It uses the [`~DiffusionPipeline.from_pretrained`] method to automatically detect the correct pipeline class for a task from the checkpoint, downloads and caches all the required configuration and weight files, and returns a pipeline ready for inference. -| pipeline subclass | task | -|---|---| -| [`QwenImagePipeline`] | text-to-image | -| [`QwenImageImg2ImgPipeline`] | image-to-image | -| [`QwenImageInpaintPipeline`] | inpaint | +```python +from diffusers import DiffusionPipeline -You could use the subclass directly by passing a model id to [`~QwenImagePipeline.from_pretrained`]. +pipeline = DiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", use_safetensors=True) +``` + +This same checkpoint can also be used for an image-to-image task. The [`DiffusionPipeline`] class can handle any task as long as you provide the appropriate inputs. For example, for an image-to-image task, you need to pass an initial image to the pipeline. ```py -import torch -from diffusers import QwenImagePipeline +from diffusers import DiffusionPipeline -pipeline = QwenImagePipeline.from_pretrained( - "Qwen/Qwen-Image", torch_dtype=torch.bfloat16, device_map="cuda" -) +pipeline = DiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", use_safetensors=True) + +init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-init.png") +prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k" +image = pipeline("Astronaut in a jungle, cold color palette, muted colors, detailed, 8k", image=init_image).images[0] ``` -### Local pipelines + + -Pipelines can also be run locally. Use [`~huggingface_hub.snapshot_download`] to download a model repository. +Checkpoints can be loaded by their specific pipeline class if you already know it. For example, to load a Stable Diffusion model, use the [`StableDiffusionPipeline`] class. -```py -from huggingface_hub import snapshot_download +```python +from diffusers import StableDiffusionPipeline -snapshot_download(repo_id="Qwen/Qwen-Image") +pipeline = StableDiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", use_safetensors=True) ``` -The model is downloaded to your [cache](../installation#cache). Pass the folder path to [`~QwenImagePipeline.from_pretrained`] to load it. +This same checkpoint may also be used for another task like image-to-image. To differentiate what task you want to use the checkpoint for, you have to use the corresponding task-specific pipeline class. For example, to use the same checkpoint for image-to-image, use the [`StableDiffusionImg2ImgPipeline`] class. ```py +from diffusers import StableDiffusionImg2ImgPipeline + +pipeline = StableDiffusionImg2ImgPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", use_safetensors=True) +``` + + + + +Use the Space below to gauge a pipeline's memory requirements before you download and load it to see if it runs on your hardware. + +
+ +
+ + +### Specifying Component-Specific Data Types + +You can customize the data types for individual sub-models by passing a dictionary to the `torch_dtype` parameter. This allows you to load different components of a pipeline in different floating point precisions. For instance, if you want to load the transformer with `torch.bfloat16` and all other components with `torch.float16`, you can pass a dictionary mapping: + +```python +from diffusers import HunyuanVideoPipeline import torch -from diffusers import QwenImagePipeline -pipeline = QwenImagePipeline.from_pretrained( - "path/to/your/cache", torch_dtype=torch.bfloat16, device_map="cuda" +pipe = HunyuanVideoPipeline.from_pretrained( + "hunyuanvideo-community/HunyuanVideo", + torch_dtype={"transformer": torch.bfloat16, "default": torch.float16}, ) +print(pipe.transformer.dtype, pipe.vae.dtype) # (torch.bfloat16, torch.float16) ``` -The [`~QwenImagePipeline.from_pretrained`] method won't download files from the Hub when it detects a local path. But this also means it won't download and cache any updates that have been made to the model either. +If a component is not explicitly specified in the dictionary and no `default` is provided, it will be loaded with `torch.float32`. + +### Parallel loading -## Pipeline data types +Large models are often [sharded](../training/distributed_inference#model-sharding) into smaller files so that they are easier to load. Diffusers supports loading shards in parallel to speed up the loading process. + +Set the environment variables below to enable parallel loading. -Use the `torch_dtype` argument in [`~DiffusionPipeline.from_pretrained`] to load a model with a specific data type. This allows you to load different models in different precisions. For example, loading a large transformer model in half-precision reduces the memory required. +- Set `HF_ENABLE_PARALLEL_LOADING` to `"YES"` to enable parallel loading of shards. +- Set `HF_PARALLEL_LOADING_WORKERS` to configure the number of parallel threads to use when loading shards. More workers loads a model faster but uses more memory. -Pass the data type for each model as a dictionary to `torch_dtype`. Use the `default` key to set the default data type. If a model isn't in the dictionary and `default` isn't provided, it is loaded in full precision (`torch.float32`). +The `device_map` argument should be set to `"cuda"` to pre-allocate a large chunk of memory based on the model size. This substantially reduces model load time because warming up the memory allocator now avoids many smaller calls to the allocator later. ```py +import os import torch -from diffusers import QwenImagePipeline +from diffusers import DiffusionPipeline -pipeline = QwenImagePipeline.from_pretrained( - "Qwen/Qwen-Image", - torch_dtype={"transformer": torch.bfloat16, "default": torch.float16}, +os.environ["HF_ENABLE_PARALLEL_LOADING"] = "YES" +pipeline = DiffusionPipeline.from_pretrained( + "Wan-AI/Wan2.2-I2V-A14B-Diffusers", + torch_dtype=torch.bfloat16, + device_map="cuda" ) -print(pipeline.transformer.dtype, pipeline.vae.dtype) ``` -You don't need to use a dictionary if you're loading all the models in the same data type. +### Local pipeline -```py -import torch -from diffusers import QwenImagePipeline +To load a pipeline locally, use [git-lfs](https://git-lfs.github.com/) to manually download a checkpoint to your local disk. -pipeline = QwenImagePipeline.from_pretrained( - "Qwen/Qwen-Image", torch_dtype=torch.bfloat16 -) -print(pipeline.transformer.dtype, pipeline.vae.dtype) +```bash +git-lfs install +git clone https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5 ``` -## Device placement +This creates a local folder, ./stable-diffusion-v1-5, on your disk and you should pass its path to [`~DiffusionPipeline.from_pretrained`]. -The `device_map` argument determines individual model or pipeline placement on an accelerator like a GPU. It is especially helpful when there are multiple GPUs. +```python +from diffusers import DiffusionPipeline + +stable_diffusion = DiffusionPipeline.from_pretrained("./stable-diffusion-v1-5", use_safetensors=True) +``` -Diffusers currently provides three options to `device_map`, `"cuda"`, `"balanced"` and `"auto"`. Refer to the table below to compare the three placement strategies. +The [`~DiffusionPipeline.from_pretrained`] method won't download files from the Hub when it detects a local path, but this also means it won't download and cache the latest changes to a checkpoint. -| parameter | description | -|---|---| -| `"cuda"` | places model or pipeline on CUDA device | -| `"balanced"` | evenly distributes model or pipeline on all GPUs | -| `"auto"` | distribute model from fastest device first to slowest | +## Customize a pipeline -Use the `max_memory` argument in [`~DiffusionPipeline.from_pretrained`] to allocate a maximum amount of memory to use on each device. By default, Diffusers uses the maximum amount available. +You can customize a pipeline by loading different components into it. This is important because you can: - - +- change to a scheduler with faster generation speed or higher generation quality depending on your needs (call the `scheduler.compatibles` method on your pipeline to see compatible schedulers) +- change a default pipeline component to a newer and better performing one + +For example, let's customize the default [stabilityai/stable-diffusion-xl-base-1.0](https://hf.co/stabilityai/stable-diffusion-xl-base-1.0) checkpoint with: + +- The [`HeunDiscreteScheduler`] to generate higher quality images at the expense of slower generation speed. You must pass the `subfolder="scheduler"` parameter in [`~HeunDiscreteScheduler.from_pretrained`] to load the scheduler configuration into the correct [subfolder](https://hf.co/stabilityai/stable-diffusion-xl-base-1.0/tree/main/scheduler) of the pipeline repository. +- A more stable VAE that runs in fp16. ```py +from diffusers import StableDiffusionXLPipeline, HeunDiscreteScheduler, AutoencoderKL import torch -from diffusers import DiffusionPipeline -pipeline = DiffusionPipeline.from_pretrained( - "Qwen/Qwen-Image", - torch_dtype=torch.bfloat16, - device_map="cuda", -) +scheduler = HeunDiscreteScheduler.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", subfolder="scheduler") +vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16, use_safetensors=True) ``` - - +Now pass the new scheduler and VAE to the [`StableDiffusionXLPipeline`]. ```py +pipeline = StableDiffusionXLPipeline.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", + scheduler=scheduler, + vae=vae, + torch_dtype=torch.float16, + variant="fp16", + use_safetensors=True +).to("cuda") +``` + +## Reuse a pipeline + +When you load multiple pipelines that share the same model components, it makes sense to reuse the shared components instead of reloading everything into memory again, especially if your hardware is memory-constrained. For example: + +1. You generated an image with the [`StableDiffusionPipeline`] but you want to improve its quality with the [`StableDiffusionSAGPipeline`]. Both of these pipelines share the same pretrained model, so it'd be a waste of memory to load the same model twice. +2. You want to add a model component, like a [`MotionAdapter`](../api/pipelines/animatediff#animatediffpipeline), to [`AnimateDiffPipeline`] which was instantiated from an existing [`StableDiffusionPipeline`]. Again, both pipelines share the same pretrained model, so it'd be a waste of memory to load an entirely new pipeline again. + +With the [`DiffusionPipeline.from_pipe`] API, you can switch between multiple pipelines to take advantage of their different features without increasing memory-usage. It is similar to turning on and off a feature in your pipeline. + +> [!TIP] +> To switch between tasks (rather than features), use the [`~DiffusionPipeline.from_pipe`] method with the [AutoPipeline](../api/pipelines/auto_pipeline) class, which automatically identifies the pipeline class based on the task (learn more in the [AutoPipeline](../tutorials/autopipeline) tutorial). + +Let's start with a [`StableDiffusionPipeline`] and then reuse the loaded model components to create a [`StableDiffusionSAGPipeline`] to increase generation quality. You'll use the [`StableDiffusionPipeline`] with an [IP-Adapter](./ip_adapter) to generate a bear eating pizza. + +```python +from diffusers import DiffusionPipeline, StableDiffusionSAGPipeline import torch -from diffusers import AutoModel - -max_memory = {0: "16GB", 1: "16GB"} -transformer = AutoModel.from_pretrained( - "Qwen/Qwen-Image", - subfolder="transformer", - torch_dtype=torch.bfloat16 - device_map="cuda", - max_memory=max_memory +import gc +from diffusers.utils import load_image +from accelerate.utils import compute_module_sizes + +image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/load_neg_embed.png") + +pipe_sd = DiffusionPipeline.from_pretrained("SG161222/Realistic_Vision_V6.0_B1_noVAE", torch_dtype=torch.float16) +pipe_sd.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin") +pipe_sd.set_ip_adapter_scale(0.6) +pipe_sd.to("cuda") + +generator = torch.Generator(device="cpu").manual_seed(33) +out_sd = pipe_sd( + prompt="bear eats pizza", + negative_prompt="wrong white balance, dark, sketches,worst quality,low quality", + ip_adapter_image=image, + num_inference_steps=50, + generator=generator, +).images[0] +out_sd +``` + +
+ +
+ +For reference, you can check how much memory this process consumed. + +```python +def bytes_to_giga_bytes(bytes): + return bytes / 1024 / 1024 / 1024 +print(f"Max memory allocated: {bytes_to_giga_bytes(torch.cuda.max_memory_allocated())} GB") +"Max memory allocated: 4.406213283538818 GB" +``` + +Now, reuse the same pipeline components from [`StableDiffusionPipeline`] in [`StableDiffusionSAGPipeline`] with the [`~DiffusionPipeline.from_pipe`] method. + +> [!WARNING] +> Some pipeline methods may not function properly on new pipelines created with [`~DiffusionPipeline.from_pipe`]. For instance, the [`~DiffusionPipeline.enable_model_cpu_offload`] method installs hooks on the model components based on a unique offloading sequence for each pipeline. If the models are executed in a different order in the new pipeline, the CPU offloading may not work correctly. +> +> To ensure everything works as expected, we recommend re-applying a pipeline method on a new pipeline created with [`~DiffusionPipeline.from_pipe`]. + +```python +pipe_sag = StableDiffusionSAGPipeline.from_pipe( + pipe_sd ) + +generator = torch.Generator(device="cpu").manual_seed(33) +out_sag = pipe_sag( + prompt="bear eats pizza", + negative_prompt="wrong white balance, dark, sketches,worst quality,low quality", + ip_adapter_image=image, + num_inference_steps=50, + generator=generator, + guidance_scale=1.0, + sag_scale=0.75 +).images[0] +out_sag ``` -
-
+
+ +
-The `hf_device_map` attribute allows you to access and view the `device_map`. +If you check the memory usage, you'll see it remains the same as before because [`StableDiffusionPipeline`] and [`StableDiffusionSAGPipeline`] are sharing the same pipeline components. This allows you to use them interchangeably without any additional memory overhead. ```py -print(pipeline.hf_device_map) -# {'unet': 1, 'vae': 1, 'safety_checker': 0, 'text_encoder': 0} +print(f"Max memory allocated: {bytes_to_giga_bytes(torch.cuda.max_memory_allocated())} GB") +"Max memory allocated: 4.406213283538818 GB" ``` -Reset a pipeline's `device_map` with the [`~DiffusionPipeline.reset_device_map`] method. This is necessary if you want to use methods such as `.to()`, [`~DiffusionPipeline.enable_sequential_cpu_offload`], and [`~DiffusionPipeline.enable_model_cpu_offload`]. +Let's animate the image with the [`AnimateDiffPipeline`] and also add a [`MotionAdapter`] module to the pipeline. For the [`AnimateDiffPipeline`], you need to unload the IP-Adapter first and reload it *after* you've created your new pipeline (this only applies to the [`AnimateDiffPipeline`]). ```py -pipeline.reset_device_map() +from diffusers import AnimateDiffPipeline, MotionAdapter, DDIMScheduler +from diffusers.utils import export_to_gif + +pipe_sag.unload_ip_adapter() +adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2", torch_dtype=torch.float16) + +pipe_animate = AnimateDiffPipeline.from_pipe(pipe_sd, motion_adapter=adapter) +pipe_animate.scheduler = DDIMScheduler.from_config(pipe_animate.scheduler.config, beta_schedule="linear") +# load IP-Adapter and LoRA weights again +pipe_animate.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin") +pipe_animate.load_lora_weights("guoyww/animatediff-motion-lora-zoom-out", adapter_name="zoom-out") +pipe_animate.to("cuda") + +generator = torch.Generator(device="cpu").manual_seed(33) +pipe_animate.set_adapters("zoom-out", adapter_weights=0.75) +out = pipe_animate( + prompt="bear eats pizza", + num_frames=16, + num_inference_steps=50, + ip_adapter_image=image, + generator=generator, +).frames[0] +export_to_gif(out, "out_animate.gif") ``` -## Parallel loading +
+ +
-Large models are often [sharded](../training/distributed_inference#model-sharding) into smaller files so that they are easier to load. Diffusers supports loading shards in parallel to speed up the loading process. +The [`AnimateDiffPipeline`] is more memory-intensive and consumes 15GB of memory (see the [Memory-usage of from_pipe](#memory-usage-of-from_pipe) section to learn what this means for your memory-usage). -Set `HF_ENABLE_PARALLEL_LOADING` to `"YES"` to enable parallel loading of shards. +```py +print(f"Max memory allocated: {bytes_to_giga_bytes(torch.cuda.max_memory_allocated())} GB") +"Max memory allocated: 15.178664207458496 GB" +``` -The `device_map` argument should be set to `"cuda"` to pre-allocate a large chunk of memory based on the model size. This substantially reduces model load time because warming up the memory allocator now avoids many smaller calls to the allocator later. +### Modify from_pipe components + +Pipelines loaded with [`~DiffusionPipeline.from_pipe`] can be customized with different model components or methods. However, whenever you modify the *state* of the model components, it affects all the other pipelines that share the same components. For example, if you call [`~diffusers.loaders.IPAdapterMixin.unload_ip_adapter`] on the [`StableDiffusionSAGPipeline`], you won't be able to use IP-Adapter with the [`StableDiffusionPipeline`] because it's been removed from their shared components. ```py -import os -import torch -from diffusers import DiffusionPipeline +pipe.sag_unload_ip_adapter() + +generator = torch.Generator(device="cpu").manual_seed(33) +out_sd = pipe_sd( + prompt="bear eats pizza", + negative_prompt="wrong white balance, dark, sketches,worst quality,low quality", + ip_adapter_image=image, + num_inference_steps=50, + generator=generator, +).images[0] +"AttributeError: 'NoneType' object has no attribute 'image_projection_layers'" +``` -os.environ["HF_ENABLE_PARALLEL_LOADING"] = "YES" +### Memory usage of from_pipe -pipeline = DiffusionPipeline.from_pretrained( - "Wan-AI/Wan2.2-I2V-A14B-Diffusers", torch_dtype=torch.bfloat16, device_map="cuda" -) -``` +The memory requirement of loading multiple pipelines with [`~DiffusionPipeline.from_pipe`] is determined by the pipeline with the highest memory-usage regardless of the number of pipelines you create. -## Replacing models in a pipeline +| Pipeline | Memory usage (GB) | +|---|---| +| StableDiffusionPipeline | 4.400 | +| StableDiffusionSAGPipeline | 4.400 | +| AnimateDiffPipeline | 15.178 | -[`DiffusionPipeline`] is flexible and accommodates loading different models or schedulers. You can experiment with different schedulers to optimize for generation speed or quality, and you can replace models with more performant ones. +The [`AnimateDiffPipeline`] has the highest memory requirement, so the *total memory-usage* is based only on the [`AnimateDiffPipeline`]. Your memory-usage will not increase if you create additional pipelines as long as their memory requirements doesn't exceed that of the [`AnimateDiffPipeline`]. Each pipeline can be used interchangeably without any additional memory overhead. -The example below swaps the default scheduler to generate higher quality images and a more stable VAE version. Pass the `subfolder` argument in [`~HeunDiscreteScheduler.from_pretrained`] to load the scheduler to the correct subfolder. +## Safety checker -```py -import torch -from diffusers import DiffusionPipeline, HeunDiscreteScheduler, AutoModel +Diffusers implements a [safety checker](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/safety_checker.py) for Stable Diffusion models which can generate harmful content. The safety checker screens the generated output against known hardcoded not-safe-for-work (NSFW) content. If for whatever reason you'd like to disable the safety checker, pass `safety_checker=None` to the [`~DiffusionPipeline.from_pretrained`] method. -scheduler = HeunDiscreteScheduler.from_pretrained( - "stabilityai/stable-diffusion-xl-base-1.0", subfolder="scheduler" -) -vae = AutoModel.from_pretrained( - "madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16 -) +```python +from diffusers import DiffusionPipeline -pipeline = DiffusionPipeline.from_pretrained( - "stabilityai/stable-diffusion-xl-base-1.0", - scheduler=scheduler, - vae=vae, - torch_dtype=torch.float16, - device_map="cuda" -) +pipeline = DiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None, use_safetensors=True) +""" +You have disabled the safety checker for by passing `safety_checker=None`. Ensure that you abide by the conditions of the Stable Diffusion license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend keeping the safety filter enabled in all public-facing circumstances, disabling it only for use cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 . +""" ``` -## Reusing models in multiple pipelines +## Checkpoint variants -When working with multiple pipelines that use the same model, the [`~DiffusionPipeline.from_pipe`] method enables reusing a model instead of reloading it each time. This allows you to use multiple pipelines without increasing memory usage. +A checkpoint variant is usually a checkpoint whose weights are: -Memory usage is determined by the pipeline with the highest memory requirement regardless of the number of pipelines. +- Stored in a different floating point type, such as [torch.float16](https://pytorch.org/docs/stable/tensors.html#data-types), because it only requires half the bandwidth and storage to download. You can't use this variant if you're continuing training or using a CPU. +- Non-exponential mean averaged (EMA) weights which shouldn't be used for inference. You should use this variant to continue finetuning a model. -The example below loads a pipeline and then loads a second pipeline with [`~DiffusionPipeline.from_pipe`] to use [perturbed-attention guidance (PAG)](../api/pipelines/pag) to improve generation quality. +> [!TIP] +> When the checkpoints have identical model structures, but they were trained on different datasets and with a different training setup, they should be stored in separate repositories. For example, [stabilityai/stable-diffusion-2](https://hf.co/stabilityai/stable-diffusion-2) and [stabilityai/stable-diffusion-2-1](https://hf.co/stabilityai/stable-diffusion-2-1) are stored in separate repositories. -> [!WARNING] -> Use [`AutoPipelineForText2Image`] because [`DiffusionPipeline`] doesn't support PAG. Refer to the [AutoPipeline](../tutorials/autopipeline) docs to learn more. +Otherwise, a variant is **identical** to the original checkpoint. They have exactly the same serialization format (like [safetensors](./using_safetensors)), model structure, and their weights have identical tensor shapes. + +| **checkpoint type** | **weight name** | **argument for loading weights** | +|---------------------|---------------------------------------------|----------------------------------| +| original | diffusion_pytorch_model.safetensors | | +| floating point | diffusion_pytorch_model.fp16.safetensors | `variant`, `torch_dtype` | +| non-EMA | diffusion_pytorch_model.non_ema.safetensors | `variant` | + +There are two important arguments for loading variants: + +- `torch_dtype` specifies the floating point precision of the loaded checkpoint. For example, if you want to save bandwidth by loading a fp16 variant, you should set `variant="fp16"` and `torch_dtype=torch.float16` to *convert the weights* to fp16. Otherwise, the fp16 weights are converted to the default fp32 precision. + + If you only set `torch_dtype=torch.float16`, the default fp32 weights are downloaded first and then converted to fp16. + +- `variant` specifies which files should be loaded from the repository. For example, if you want to load a non-EMA variant of a UNet from [stable-diffusion-v1-5/stable-diffusion-v1-5](https://hf.co/stable-diffusion-v1-5/stable-diffusion-v1-5/tree/main/unet), set `variant="non_ema"` to download the `non_ema` file. + + + ```py +from diffusers import DiffusionPipeline import torch -from diffusers import AutoPipelineForText2Image -pipeline_sdxl = AutoPipelineForText2Image.from_pretrained( - "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, device_map="cuda" +pipeline = DiffusionPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", variant="fp16", torch_dtype=torch.float16, use_safetensors=True ) -prompt = """ -cinematic film still of a cat sipping a margarita in a pool in Palm Springs, California -highly detailed, high budget hollywood movie, cinemascope, moody, epic, gorgeous, film grain -""" -image = pipeline_sdxl(prompt).images[0] -print(f"Max memory reserved: {torch.cuda.max_memory_allocated() / 1024**3:.2f} GB") -# Max memory reserved: 10.47 GB ``` -Set `enable_pag=True` in the second pipeline to enable PAG. The second pipeline uses the same amount of memory because it shares model weights with the first one. + + ```py -pipeline = AutoPipelineForText2Image.from_pipe( - pipeline_sdxl, enable_pag=True +pipeline = DiffusionPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", variant="non_ema", use_safetensors=True ) -prompt = """ -cinematic film still of a cat sipping a margarita in a pool in Palm Springs, California -highly detailed, high budget hollywood movie, cinemascope, moody, epic, gorgeous, film grain -""" -image = pipeline(prompt).images[0] -print(f"Max memory reserved: {torch.cuda.max_memory_allocated() / 1024**3:.2f} GB") -# Max memory reserved: 10.47 GB ``` -> [!WARNING] -> Pipelines created by [`~DiffusionPipeline.from_pipe`] share the same models and *state*. Modifying the state of a model in one pipeline affects all the other pipelines that share the same model. + + -Some methods may not work correctly on pipelines created with [`~DiffusionPipeline.from_pipe`]. For example, [`~DiffusionPipeline.enable_model_cpu_offload`] relies on a unique model execution order, which may differ in the new pipeline. To ensure proper functionality, reapply these methods on the new pipeline. +Use the `variant` parameter in the [`DiffusionPipeline.save_pretrained`] method to save a checkpoint as a different floating point type or as a non-EMA variant. You should try save a variant to the same folder as the original checkpoint, so you have the option of loading both from the same folder. -## Safety checker + + -Diffusers provides a [safety checker](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/safety_checker.py) for older Stable Diffusion models to prevent generating harmful content. It screens the generated output against a set of hardcoded harmful concepts. +```python +from diffusers import DiffusionPipeline -If you want to disable the safety checker, pass `safety_checker=None` in [`~DiffusionPipeline.from_pretrained`] as shown below. +pipeline.save_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", variant="fp16") +``` + + + ```py -from diffusers import DiffusionPipeline +pipeline.save_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", variant="non_ema") +``` + + + + +If you don't save the variant to an existing folder, you must specify the `variant` argument otherwise it'll throw an `Exception` because it can't find the original checkpoint. +```python +# 👎 this won't work pipeline = DiffusionPipeline.from_pretrained( - "stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None + "./stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True ) -""" -You have disabled the safety checker for by passing `safety_checker=None`. Ensure that you abide by the conditions of the Stable Diffusion license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend keeping the safety filter enabled in all public-facing circumstances, disabling it only for use cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 . -""" -``` \ No newline at end of file +# 👍 this works +pipeline = DiffusionPipeline.from_pretrained( + "./stable-diffusion-v1-5", variant="fp16", torch_dtype=torch.float16, use_safetensors=True +) +``` + +## DiffusionPipeline explained + +As a class method, [`DiffusionPipeline.from_pretrained`] is responsible for two things: + +- Download the latest version of the folder structure required for inference and cache it. If the latest folder structure is available in the local cache, [`DiffusionPipeline.from_pretrained`] reuses the cache and won't redownload the files. +- Load the cached weights into the correct pipeline [class](../api/pipelines/overview#diffusers-summary) - retrieved from the `model_index.json` file - and return an instance of it. + +The pipelines' underlying folder structure corresponds directly with their class instances. For example, the [`StableDiffusionPipeline`] corresponds to the folder structure in [`stable-diffusion-v1-5/stable-diffusion-v1-5`](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5). + +```python +from diffusers import DiffusionPipeline + +repo_id = "stable-diffusion-v1-5/stable-diffusion-v1-5" +pipeline = DiffusionPipeline.from_pretrained(repo_id, use_safetensors=True) +print(pipeline) +``` + +You'll see pipeline is an instance of [`StableDiffusionPipeline`], which consists of seven components: + +- `"feature_extractor"`: a [`~transformers.CLIPImageProcessor`] from 🤗 Transformers. +- `"safety_checker"`: a [component](https://github.com/huggingface/diffusers/blob/e55687e1e15407f60f32242027b7bb8170e58266/src/diffusers/pipelines/stable_diffusion/safety_checker.py#L32) for screening against harmful content. +- `"scheduler"`: an instance of [`PNDMScheduler`]. +- `"text_encoder"`: a [`~transformers.CLIPTextModel`] from 🤗 Transformers. +- `"tokenizer"`: a [`~transformers.CLIPTokenizer`] from 🤗 Transformers. +- `"unet"`: an instance of [`UNet2DConditionModel`]. +- `"vae"`: an instance of [`AutoencoderKL`]. + +```json +StableDiffusionPipeline { + "feature_extractor": [ + "transformers", + "CLIPImageProcessor" + ], + "safety_checker": [ + "stable_diffusion", + "StableDiffusionSafetyChecker" + ], + "scheduler": [ + "diffusers", + "PNDMScheduler" + ], + "text_encoder": [ + "transformers", + "CLIPTextModel" + ], + "tokenizer": [ + "transformers", + "CLIPTokenizer" + ], + "unet": [ + "diffusers", + "UNet2DConditionModel" + ], + "vae": [ + "diffusers", + "AutoencoderKL" + ] +} +``` + +Compare the components of the pipeline instance to the [`stable-diffusion-v1-5/stable-diffusion-v1-5`](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5/tree/main) folder structure, and you'll see there is a separate folder for each of the components in the repository: + +``` +. +├── feature_extractor +│   └── preprocessor_config.json +├── model_index.json +├── safety_checker +│   ├── config.json +| ├── model.fp16.safetensors +│ ├── model.safetensors +│ ├── pytorch_model.bin +| └── pytorch_model.fp16.bin +├── scheduler +│   └── scheduler_config.json +├── text_encoder +│   ├── config.json +| ├── model.fp16.safetensors +│ ├── model.safetensors +│ |── pytorch_model.bin +| └── pytorch_model.fp16.bin +├── tokenizer +│   ├── merges.txt +│   ├── special_tokens_map.json +│   ├── tokenizer_config.json +│   └── vocab.json +├── unet +│   ├── config.json +│   ├── diffusion_pytorch_model.bin +| |── diffusion_pytorch_model.fp16.bin +│ |── diffusion_pytorch_model.f16.safetensors +│ |── diffusion_pytorch_model.non_ema.bin +│ |── diffusion_pytorch_model.non_ema.safetensors +│ └── diffusion_pytorch_model.safetensors +|── vae +. ├── config.json +. ├── diffusion_pytorch_model.bin + ├── diffusion_pytorch_model.fp16.bin + ├── diffusion_pytorch_model.fp16.safetensors + └── diffusion_pytorch_model.safetensors +``` + +You can access each of the components of the pipeline as an attribute to view its configuration: + +```py +pipeline.tokenizer +CLIPTokenizer( + name_or_path="/root/.cache/huggingface/hub/models--runwayml--stable-diffusion-v1-5/snapshots/39593d5650112b4cc580433f6b0435385882d819/tokenizer", + vocab_size=49408, + model_max_length=77, + is_fast=False, + padding_side="right", + truncation_side="right", + special_tokens={ + "bos_token": AddedToken("<|startoftext|>", rstrip=False, lstrip=False, single_word=False, normalized=True), + "eos_token": AddedToken("<|endoftext|>", rstrip=False, lstrip=False, single_word=False, normalized=True), + "unk_token": AddedToken("<|endoftext|>", rstrip=False, lstrip=False, single_word=False, normalized=True), + "pad_token": "<|endoftext|>", + }, + clean_up_tokenization_spaces=True +) +``` + +Every pipeline expects a [`model_index.json`](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5/blob/main/model_index.json) file that tells the [`DiffusionPipeline`]: + +- which pipeline class to load from `_class_name` +- which version of 🧨 Diffusers was used to create the model in `_diffusers_version` +- what components from which library are stored in the subfolders (`name` corresponds to the component and subfolder name, `library` corresponds to the name of the library to load the class from, and `class` corresponds to the class name) + +```json +{ + "_class_name": "StableDiffusionPipeline", + "_diffusers_version": "0.6.0", + "feature_extractor": [ + "transformers", + "CLIPImageProcessor" + ], + "safety_checker": [ + "stable_diffusion", + "StableDiffusionSafetyChecker" + ], + "scheduler": [ + "diffusers", + "PNDMScheduler" + ], + "text_encoder": [ + "transformers", + "CLIPTextModel" + ], + "tokenizer": [ + "transformers", + "CLIPTokenizer" + ], + "unet": [ + "diffusers", + "UNet2DConditionModel" + ], + "vae": [ + "diffusers", + "AutoencoderKL" + ] +} +``` diff --git a/docs/source/en/using-diffusers/other-formats.md b/docs/source/en/using-diffusers/other-formats.md index 59835bbf2622..11afbf29d3f2 100644 --- a/docs/source/en/using-diffusers/other-formats.md +++ b/docs/source/en/using-diffusers/other-formats.md @@ -176,7 +176,7 @@ Benefits of using the Diffusers-multifolder layout include: ).to("cuda") turbo_pipeline.scheduler = EulerDiscreteScheduler.from_config( turbo_pipeline.scheduler.config, - timestep_spacing="trailing" + timestep+spacing="trailing" ) image = turbo_pipeline( "an astronaut riding a unicorn on mars", @@ -267,7 +267,6 @@ pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_d save_folder = "flux-dev" pipe.save_pretrained("flux-dev") export_folder_as_dduf("flux-dev.dduf", folder_path=save_folder) -``` > [!TIP] > Packaging and loading quantized checkpoints in the DDUF format is supported as long as they respect the multi-folder structure. diff --git a/docs/source/en/using-diffusers/reusing_seeds.md b/docs/source/en/using-diffusers/reusing_seeds.md index b4aed0aa6354..ac9350f24caa 100644 --- a/docs/source/en/using-diffusers/reusing_seeds.md +++ b/docs/source/en/using-diffusers/reusing_seeds.md @@ -10,86 +10,129 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express o specific language governing permissions and limitations under the License. --> -# Reproducibility +# Reproducible pipelines -Diffusion is a random process that generates a different output every time. For certain situations like testing and replicating results, you want to generate the same result each time, across releases and platforms within a certain tolerance range. +Diffusion models are inherently random which is what allows it to generate different outputs every time it is run. But there are certain times when you want to generate the same output every time, like when you're testing, replicating results, and even [improving image quality](#deterministic-batch-generation). While you can't expect to get identical results across platforms, you can expect reproducible results across releases and platforms within a certain tolerance range (though even this may vary). -This guide will show you how to control sources of randomness and enable deterministic algorithms. +This guide will show you how to control randomness for deterministic generation on a CPU and GPU. -## Generator +> [!TIP] +> We strongly recommend reading PyTorch's [statement about reproducibility](https://pytorch.org/docs/stable/notes/randomness.html): +> +> "Completely reproducible results are not guaranteed across PyTorch releases, individual commits, or different platforms. Furthermore, results may not be reproducible between CPU and GPU executions, even when using identical seeds." -Pipelines rely on [torch.randn](https://pytorch.org/docs/stable/generated/torch.randn.html), which uses a different random seed each time, to create the initial noisy tensors. To generate the same output on a CPU or GPU, use a [Generator](https://docs.pytorch.org/docs/stable/generated/torch.Generator.html) to manage how random values are generated. +## Control randomness -> [!TIP] -> If reproducibility is important to your use case, we recommend always using a CPU `Generator`. The performance loss is often negligible and you'll generate more similar values. +During inference, pipelines rely heavily on random sampling operations which include creating the +Gaussian noise tensors to denoise and adding noise to the scheduling step. - - +Take a look at the tensor values in the [`DDIMPipeline`] after two inference steps. + +```python +from diffusers import DDIMPipeline +import numpy as np -The GPU uses a different random number generator than the CPU. Diffusers solves this issue with the [`~utils.torch_utils.randn_tensor`] function to create the random tensor on a CPU and then moving it to the GPU. This function is used everywhere inside the pipeline and you don't need to explicitly call it. +ddim = DDIMPipeline.from_pretrained( "google/ddpm-cifar10-32", use_safetensors=True) +image = ddim(num_inference_steps=2, output_type="np").images +print(np.abs(image).sum()) +``` -Use [manual_seed](https://docs.pytorch.org/docs/stable/generated/torch.manual_seed.html) as shown below to set a seed. +Running the code above prints one value, but if you run it again you get a different value. -```py +Each time the pipeline is run, [torch.randn](https://pytorch.org/docs/stable/generated/torch.randn.html) uses a different random seed to create the Gaussian noise tensors. This leads to a different result each time it is run and enables the diffusion pipeline to generate a different random image each time. + +But if you need to reliably generate the same image, that depends on whether you're running the pipeline on a CPU or GPU. + +> [!TIP] +> It might seem unintuitive to pass `Generator` objects to a pipeline instead of the integer value representing the seed. However, this is the recommended design when working with probabilistic models in PyTorch because a `Generator` is a *random state* that can be passed to multiple pipelines in a sequence. As soon as the `Generator` is consumed, the *state* is changed in place which means even if you passed the same `Generator` to a different pipeline, it won't produce the same result because the state is already changed. + + + + +To generate reproducible results on a CPU, you'll need to use a PyTorch [Generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) and set a seed. Now when you run the code, it always prints a value of `1491.1711` because the `Generator` object with the seed is passed to all the random functions in the pipeline. You should get a similar, if not the same, result on whatever hardware and PyTorch version you're using. + +```python import torch import numpy as np from diffusers import DDIMPipeline -ddim = DDIMPipeline.from_pretrained("google/ddpm-cifar10-32", device_map="cuda") -generator = torch.manual_seed(0) +ddim = DDIMPipeline.from_pretrained("google/ddpm-cifar10-32", use_safetensors=True) +generator = torch.Generator(device="cpu").manual_seed(0) image = ddim(num_inference_steps=2, output_type="np", generator=generator).images print(np.abs(image).sum()) ``` - + -Set `device="cpu"` in the `Generator` and use [manual_seed](https://docs.pytorch.org/docs/stable/generated/torch.manual_seed.html) to set a seed for generating random numbers. +Writing a reproducible pipeline on a GPU is a bit trickier, and full reproducibility across different hardware is not guaranteed because matrix multiplication - which diffusion pipelines require a lot of - is less deterministic on a GPU than a CPU. For example, if you run the same code example from the CPU example, you'll get a different result even though the seed is identical. This is because the GPU uses a different random number generator than the CPU. -```py +```python import torch import numpy as np from diffusers import DDIMPipeline -ddim = DDIMPipeline.from_pretrained("google/ddpm-cifar10-32") -generator = torch.Generator(device="cpu").manual_seed(0) +ddim = DDIMPipeline.from_pretrained("google/ddpm-cifar10-32", use_safetensors=True) +ddim.to("cuda") +generator = torch.Generator(device="cuda").manual_seed(0) image = ddim(num_inference_steps=2, output_type="np", generator=generator).images print(np.abs(image).sum()) ``` - - +To avoid this issue, Diffusers has a [`~utils.torch_utils.randn_tensor`] function for creating random noise on the CPU, and then moving the tensor to a GPU if necessary. The [`~utils.torch_utils.randn_tensor`] function is used everywhere inside the pipeline. Now you can call [torch.manual_seed](https://pytorch.org/docs/stable/generated/torch.manual_seed.html) which automatically creates a CPU `Generator` that can be passed to the pipeline even if it is being run on a GPU. -The `Generator` object should be passed to the pipeline instead of an integer seed. `Generator` maintains a *random state* that is consumed and modified when used. Once consumed, the same `Generator` object produces different results in subsequent calls, even across different pipelines, because it's *state* has changed. +```python +import torch +import numpy as np +from diffusers import DDIMPipeline -```py +ddim = DDIMPipeline.from_pretrained("google/ddpm-cifar10-32", use_safetensors=True) +ddim.to("cuda") generator = torch.manual_seed(0) - -for _ in range(5): -- image = pipeline(prompt, generator=generator) -+ image = pipeline(prompt, generator=torch.manual_seed(0)) +image = ddim(num_inference_steps=2, output_type="np", generator=generator).images +print(np.abs(image).sum()) ``` +> [!TIP] +> If reproducibility is important to your use case, we recommend always passing a CPU `Generator`. The performance loss is often negligible and you'll generate more similar values than if the pipeline had been run on a GPU. + +Finally, more complex pipelines such as [`UnCLIPPipeline`], are often extremely +susceptible to precision error propagation. You'll need to use +exactly the same hardware and PyTorch version for full reproducibility. + + + + ## Deterministic algorithms -PyTorch supports [deterministic algorithms](https://docs.pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms) - where available - for certain operations so they produce the same results. Deterministic algorithms may be slower and decrease performance. +You can also configure PyTorch to use deterministic algorithms to create a reproducible pipeline. The downside is that deterministic algorithms may be slower than non-deterministic ones and you may observe a decrease in performance. -Use Diffusers' [enable_full_determinism](https://github.com/huggingface/diffusers/blob/142f353e1c638ff1d20bd798402b68f72c1ebbdd/src/diffusers/utils/testing_utils.py#L861) function to enable deterministic algorithms. +Non-deterministic behavior occurs when operations are launched in more than one CUDA stream. To avoid this, set the environment variable [CUBLAS_WORKSPACE_CONFIG](https://docs.nvidia.com/cuda/cublas/index.html#results-reproducibility) to `:16:8` to only use one buffer size during runtime. -```py -import torch -from diffusers_utils import enable_full_determinism +PyTorch typically benchmarks multiple algorithms to select the fastest one, but if you want reproducibility, you should disable this feature because the benchmark may select different algorithms each time. Set Diffusers [enable_full_determinism](https://github.com/huggingface/diffusers/blob/142f353e1c638ff1d20bd798402b68f72c1ebbdd/src/diffusers/utils/testing_utils.py#L861) to enable deterministic algorithms. +```py enable_full_determinism() ``` -Under the hood, `enable_full_determinism` works by: +Now when you run the same pipeline twice, you'll get identical results. -- Setting the environment variable [CUBLAS_WORKSPACE_CONFIG](https://docs.nvidia.com/cuda/cublas/index.html#results-reproducibility) to `:16:8` to only use one buffer size during rntime. Non-deterministic behavior occurs when operations are used in more than one CUDA stream. -- Disabling benchmarking to find the fastest convolution operation by setting `torch.backends.cudnn.benchmark=False`. Non-deterministic behavior occurs because the benchmark may select different algorithms each time depending on hardware or benchmarking noise. -- Disabling TensorFloat32 (TF32) operations in favor of more precise and consistent full-precision operations. +```py +import torch +from diffusers import DDIMScheduler, StableDiffusionPipeline + +pipe = StableDiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", use_safetensors=True).to("cuda") +pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) +g = torch.Generator(device="cuda") +prompt = "A bear is playing a guitar on Times Square" -## Resources +g.manual_seed(0) +result1 = pipe(prompt=prompt, num_inference_steps=50, generator=g, output_type="latent").images -We strongly recommend reading PyTorch's developer notes about [Reproducibility](https://docs.pytorch.org/docs/stable/notes/randomness.html). You can try to limit randomness, but it is not *guaranteed* even with an identical seed. \ No newline at end of file +g.manual_seed(0) +result2 = pipe(prompt=prompt, num_inference_steps=50, generator=g, output_type="latent").images + +print("L_inf dist =", abs(result1 - result2).max()) +"L_inf dist = tensor(0., device='cuda:0')" +``` diff --git a/docs/source/en/using-diffusers/schedulers.md b/docs/source/en/using-diffusers/schedulers.md index 6d928f8037c4..aabb9dd31c96 100644 --- a/docs/source/en/using-diffusers/schedulers.md +++ b/docs/source/en/using-diffusers/schedulers.md @@ -165,6 +165,53 @@ image Most images look very similar and are comparable in quality. Again, it often comes down to your specific use case so a good approach is to run multiple different schedulers and compare the results. +### Flax schedulers + +To compare Flax schedulers, you need to additionally load the scheduler state into the model parameters. For example, let's change the default scheduler in [`FlaxStableDiffusionPipeline`] to use the super fast [`FlaxDPMSolverMultistepScheduler`]. + +> [!WARNING] +> The [`FlaxLMSDiscreteScheduler`] and [`FlaxDDPMScheduler`] are not compatible with the [`FlaxStableDiffusionPipeline`] yet. + +```py +import jax +import numpy as np +from flax.jax_utils import replicate +from flax.training.common_utils import shard +from diffusers import FlaxStableDiffusionPipeline, FlaxDPMSolverMultistepScheduler + +scheduler, scheduler_state = FlaxDPMSolverMultistepScheduler.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", + subfolder="scheduler" +) +pipeline, params = FlaxStableDiffusionPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", + scheduler=scheduler, + variant="bf16", + dtype=jax.numpy.bfloat16, +) +params["scheduler"] = scheduler_state +``` + +Then you can take advantage of Flax's compatibility with TPUs to generate a number of images in parallel. You'll need to make a copy of the model parameters for each available device and then split the inputs across them to generate your desired number of images. + +```py +# Generate 1 image per parallel device (8 on TPUv2-8 or TPUv3-8) +prompt = "A photograph of an astronaut riding a horse on Mars, high resolution, high definition." +num_samples = jax.device_count() +prompt_ids = pipeline.prepare_inputs([prompt] * num_samples) + +prng_seed = jax.random.PRNGKey(0) +num_inference_steps = 25 + +# shard inputs and rng +params = replicate(params) +prng_seed = jax.random.split(prng_seed, jax.device_count()) +prompt_ids = shard(prompt_ids) + +images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images +images = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:]))) +``` + ## Models Models are loaded from the [`ModelMixin.from_pretrained`] method, which downloads and caches the latest version of the model weights and configurations. If the latest files are available in the local cache, [`~ModelMixin.from_pretrained`] reuses files in the cache instead of re-downloading them. diff --git a/docs/source/en/using-diffusers/stable_diffusion_jax_how_to.md b/docs/source/en/using-diffusers/stable_diffusion_jax_how_to.md new file mode 100644 index 000000000000..ac9ffe0dfc11 --- /dev/null +++ b/docs/source/en/using-diffusers/stable_diffusion_jax_how_to.md @@ -0,0 +1,225 @@ + + +# JAX/Flax + +[[open-in-colab]] + +🤗 Diffusers supports Flax for super fast inference on Google TPUs, such as those available in Colab, Kaggle or Google Cloud Platform. This guide shows you how to run inference with Stable Diffusion using JAX/Flax. + +Before you begin, make sure you have the necessary libraries installed: + +```py +# uncomment to install the necessary libraries in Colab +#!pip install -q jax==0.3.25 jaxlib==0.3.25 flax transformers ftfy +#!pip install -q diffusers +``` + +You should also make sure you're using a TPU backend. While JAX does not run exclusively on TPUs, you'll get the best performance on a TPU because each server has 8 TPU accelerators working in parallel. + +If you are running this guide in Colab, select *Runtime* in the menu above, select the option *Change runtime type*, and then select *TPU* under the *Hardware accelerator* setting. Import JAX and quickly check whether you're using a TPU: + +```python +import jax +import jax.tools.colab_tpu +jax.tools.colab_tpu.setup_tpu() + +num_devices = jax.device_count() +device_type = jax.devices()[0].device_kind + +print(f"Found {num_devices} JAX devices of type {device_type}.") +assert ( + "TPU" in device_type, + "Available device is not a TPU, please select TPU from Runtime > Change runtime type > Hardware accelerator" +) +# Found 8 JAX devices of type Cloud TPU. +``` + +Great, now you can import the rest of the dependencies you'll need: + +```python +import jax.numpy as jnp +from jax import pmap +from flax.jax_utils import replicate +from flax.training.common_utils import shard + +from diffusers import FlaxStableDiffusionPipeline +``` + +## Load a model + +Flax is a functional framework, so models are stateless and parameters are stored outside of them. Loading a pretrained Flax pipeline returns *both* the pipeline and the model weights (or parameters). In this guide, you'll use `bfloat16`, a more efficient half-float type that is supported by TPUs (you can also use `float32` for full precision if you want). + +```python +dtype = jnp.bfloat16 +pipeline, params = FlaxStableDiffusionPipeline.from_pretrained( + "CompVis/stable-diffusion-v1-4", + variant="bf16", + dtype=dtype, +) +``` + +## Inference + +TPUs usually have 8 devices working in parallel, so let's use the same prompt for each device. This means you can perform inference on 8 devices at once, with each device generating one image. As a result, you'll get 8 images in the same amount of time it takes for one chip to generate a single image! + + + +Learn more details in the [How does parallelization work?](#how-does-parallelization-work) section. + + + +After replicating the prompt, get the tokenized text ids by calling the `prepare_inputs` function on the pipeline. The length of the tokenized text is set to 77 tokens as required by the configuration of the underlying CLIP text model. + +```python +prompt = "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of field, close up, split lighting, cinematic" +prompt = [prompt] * jax.device_count() +prompt_ids = pipeline.prepare_inputs(prompt) +prompt_ids.shape +# (8, 77) +``` + +Model parameters and inputs have to be replicated across the 8 parallel devices. The parameters dictionary is replicated with [`flax.jax_utils.replicate`](https://flax.readthedocs.io/en/latest/api_reference/flax.jax_utils.html#flax.jax_utils.replicate) which traverses the dictionary and changes the shape of the weights so they are repeated 8 times. Arrays are replicated using `shard`. + +```python +# parameters +p_params = replicate(params) + +# arrays +prompt_ids = shard(prompt_ids) +prompt_ids.shape +# (8, 1, 77) +``` + +This shape means each one of the 8 devices receives as an input a `jnp` array with shape `(1, 77)`, where `1` is the batch size per device. On TPUs with sufficient memory, you could have a batch size larger than `1` if you want to generate multiple images (per chip) at once. + +Next, create a random number generator to pass to the generation function. This is standard procedure in Flax, which is very serious and opinionated about random numbers. All functions that deal with random numbers are expected to receive a generator to ensure reproducibility, even when you're training across multiple distributed devices. + +The helper function below uses a seed to initialize a random number generator. As long as you use the same seed, you'll get the exact same results. Feel free to use different seeds when exploring results later in the guide. + +```python +def create_key(seed=0): + return jax.random.PRNGKey(seed) +``` + +The helper function, or `rng`, is split 8 times so each device receives a different generator and generates a different image. + +```python +rng = create_key(0) +rng = jax.random.split(rng, jax.device_count()) +``` + +To take advantage of JAX's optimized speed on a TPU, pass `jit=True` to the pipeline to compile the JAX code into an efficient representation and to ensure the model runs in parallel across the 8 devices. + + + +You need to ensure all your inputs have the same shape in subsequent calls, otherwise JAX will need to recompile the code which is slower. + + + +The first inference run takes more time because it needs to compile the code, but subsequent calls (even with different inputs) are much faster. For example, it took more than a minute to compile on a TPU v2-8, but then it takes about **7s** on a future inference run! + +```py +%%time +images = pipeline(prompt_ids, p_params, rng, jit=True)[0] + +# CPU times: user 56.2 s, sys: 42.5 s, total: 1min 38s +# Wall time: 1min 29s +``` + +The returned array has shape `(8, 1, 512, 512, 3)` which should be reshaped to remove the second dimension and get 8 images of `512 × 512 × 3`. Then you can use the [`~utils.numpy_to_pil`] function to convert the arrays into images. + +```python +from diffusers.utils import make_image_grid + +images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:]) +images = pipeline.numpy_to_pil(images) +make_image_grid(images, rows=2, cols=4) +``` + +![img](https://huggingface.co/datasets/YiYiXu/test-doc-assets/resolve/main/stable_diffusion_jax_how_to_cell_38_output_0.jpeg) + +## Using different prompts + +You don't necessarily have to use the same prompt on all devices. For example, to generate 8 different prompts: + +```python +prompts = [ + "Labrador in the style of Hokusai", + "Painting of a squirrel skating in New York", + "HAL-9000 in the style of Van Gogh", + "Times Square under water, with fish and a dolphin swimming around", + "Ancient Roman fresco showing a man working on his laptop", + "Close-up photograph of young black woman against urban background, high quality, bokeh", + "Armchair in the shape of an avocado", + "Clown astronaut in space, with Earth in the background", +] + +prompt_ids = pipeline.prepare_inputs(prompts) +prompt_ids = shard(prompt_ids) + +images = pipeline(prompt_ids, p_params, rng, jit=True).images +images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:]) +images = pipeline.numpy_to_pil(images) + +make_image_grid(images, 2, 4) +``` + +![img](https://huggingface.co/datasets/YiYiXu/test-doc-assets/resolve/main/stable_diffusion_jax_how_to_cell_43_output_0.jpeg) + +## How does parallelization work? + +The Flax pipeline in 🤗 Diffusers automatically compiles the model and runs it in parallel on all available devices. Let's take a closer look at how that process works. + +JAX parallelization can be done in multiple ways. The easiest one revolves around using the [`jax.pmap`](https://jax.readthedocs.io/en/latest/_autosummary/jax.pmap.html) function to achieve single-program multiple-data (SPMD) parallelization. It means running several copies of the same code, each on different data inputs. More sophisticated approaches are possible, and you can go over to the JAX [documentation](https://jax.readthedocs.io/en/latest/index.html) to explore this topic in more detail if you are interested! + +`jax.pmap` does two things: + +1. Compiles (or "`jit`s") the code which is similar to `jax.jit()`. This does not happen when you call `pmap`, and only the first time the `pmap`ped function is called. +2. Ensures the compiled code runs in parallel on all available devices. + +To demonstrate, call `pmap` on the pipeline's `_generate` method (this is a private method that generates images and may be renamed or removed in future releases of 🤗 Diffusers): + +```python +p_generate = pmap(pipeline._generate) +``` + +After calling `pmap`, the prepared function `p_generate` will: + +1. Make a copy of the underlying function, `pipeline._generate`, on each device. +2. Send each device a different portion of the input arguments (this is why it's necessary to call the *shard* function). In this case, `prompt_ids` has shape `(8, 1, 77, 768)` so the array is split into 8 and each copy of `_generate` receives an input with shape `(1, 77, 768)`. + +The most important thing to pay attention to here is the batch size (1 in this example), and the input dimensions that make sense for your code. You don't have to change anything else to make the code work in parallel. + +The first time you call the pipeline takes more time, but the calls afterward are much faster. The `block_until_ready` function is used to correctly measure inference time because JAX uses asynchronous dispatch and returns control to the Python loop as soon as it can. You don't need to use that in your code; blocking occurs automatically when you want to use the result of a computation that has not yet been materialized. + +```py +%%time +images = p_generate(prompt_ids, p_params, rng) +images = images.block_until_ready() + +# CPU times: user 1min 15s, sys: 18.2 s, total: 1min 34s +# Wall time: 1min 15s +``` + +Check your image dimensions to see if they're correct: + +```python +images.shape +# (8, 1, 512, 512, 3) +``` + +## Resources + +To learn more about how JAX works with Stable Diffusion, you may be interested in reading: + +* [Accelerating Stable Diffusion XL Inference with JAX on Cloud TPU v5e](https://hf.co/blog/sdxl_jax) diff --git a/docs/source/en/using-diffusers/text-img2vid.md b/docs/source/en/using-diffusers/text-img2vid.md index ade3e0de329f..67d1fd118e4d 100644 --- a/docs/source/en/using-diffusers/text-img2vid.md +++ b/docs/source/en/using-diffusers/text-img2vid.md @@ -287,7 +287,7 @@ export_to_video(output, "output.mp4", fps=16) ## Reduce memory usage -Recent video models like [`HunyuanVideoPipeline`] and [`WanPipeline`], which have 10B+ parameters, require a lot of memory and it often exceeds the memory available on consumer hardware. Diffusers offers several techniques for reducing the memory requirements of these large models. +Recent video models like [`HunyuanVideoPipeline`] and [`WanPipeline`], which have 10B+ parameters, require a lot of memory and it often exceeds the memory availabe on consumer hardware. Diffusers offers several techniques for reducing the memory requirements of these large models. > [!TIP] > Refer to the [Reduce memory usage](../optimization/memory) guide for more details about other memory saving techniques. diff --git a/examples/community/README.md b/examples/community/README.md index e314463077f0..e4fbd7936686 100644 --- a/examples/community/README.md +++ b/examples/community/README.md @@ -88,8 +88,6 @@ PIXART-α Controlnet pipeline | Implementation of the controlnet model for pixar | FaithDiff Stable Diffusion XL Pipeline | Implementation of [(CVPR 2025) FaithDiff: Unleashing Diffusion Priors for Faithful Image Super-resolutionUnleashing Diffusion Priors for Faithful Image Super-resolution](https://huggingface.co/papers/2411.18824) - FaithDiff is a faithful image super-resolution method that leverages latent diffusion models by actively adapting the diffusion prior and jointly fine-tuning its components (encoder and diffusion model) with an alignment module to ensure high fidelity and structural consistency. | [FaithDiff Stable Diffusion XL Pipeline](#faithdiff-stable-diffusion-xl-pipeline) | [![Hugging Face Models](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Models-blue)](https://huggingface.co/jychen9811/FaithDiff) | [Junyang Chen, Jinshan Pan, Jiangxin Dong, IMAG Lab, (Adapted by Eliseu Silva)](https://github.com/JyChen9811/FaithDiff) | | Stable Diffusion 3 InstructPix2Pix Pipeline | Implementation of Stable Diffusion 3 InstructPix2Pix Pipeline | [Stable Diffusion 3 InstructPix2Pix Pipeline](#stable-diffusion-3-instructpix2pix-pipeline) | [![Hugging Face Models](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Models-blue)](https://huggingface.co/BleachNick/SD3_UltraEdit_freeform) [![Hugging Face Models](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Models-blue)](https://huggingface.co/CaptainZZZ/sd3-instructpix2pix) | [Jiayu Zhang](https://github.com/xduzhangjiayu) and [Haozhe Zhao](https://github.com/HaozheZhao)| | Flux Kontext multiple images | A modified version of the `FluxKontextPipeline` that supports calling Flux Kontext with multiple reference images.| [Flux Kontext multiple input Pipeline](#flux-kontext-multiple-images) | - | [Net-Mist](https://github.com/Net-Mist) | - - To load a custom pipeline you just need to pass the `custom_pipeline` argument to `DiffusionPipeline`, as one of the files in `diffusers/examples/community`. Feel free to send a PR with your own pipelines, we will merge them quickly. ```py diff --git a/examples/community/composable_stable_diffusion.py b/examples/community/composable_stable_diffusion.py index a7c540ceb984..ec653bcdb4c6 100644 --- a/examples/community/composable_stable_diffusion.py +++ b/examples/community/composable_stable_diffusion.py @@ -398,7 +398,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/examples/community/imagic_stable_diffusion.py b/examples/community/imagic_stable_diffusion.py index 091d0fbf8d3a..a2561c919858 100644 --- a/examples/community/imagic_stable_diffusion.py +++ b/examples/community/imagic_stable_diffusion.py @@ -147,7 +147,7 @@ def train( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `nd.array`. diff --git a/examples/community/img2img_inpainting.py b/examples/community/img2img_inpainting.py index 499230b1e2cd..7b9bd043d099 100644 --- a/examples/community/img2img_inpainting.py +++ b/examples/community/img2img_inpainting.py @@ -197,7 +197,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/examples/community/interpolate_stable_diffusion.py b/examples/community/interpolate_stable_diffusion.py index 5b96c14d6367..460bb464f3b1 100644 --- a/examples/community/interpolate_stable_diffusion.py +++ b/examples/community/interpolate_stable_diffusion.py @@ -173,7 +173,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/examples/community/lpw_stable_diffusion.py b/examples/community/lpw_stable_diffusion.py index cb017c0bbe29..ccb17a51e615 100644 --- a/examples/community/lpw_stable_diffusion.py +++ b/examples/community/lpw_stable_diffusion.py @@ -888,7 +888,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. @@ -1131,7 +1131,7 @@ def text2img( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/lpw_stable_diffusion_onnx.py b/examples/community/lpw_stable_diffusion_onnx.py index 92effc193329..ab1462b81b39 100644 --- a/examples/community/lpw_stable_diffusion_onnx.py +++ b/examples/community/lpw_stable_diffusion_onnx.py @@ -721,7 +721,7 @@ def __call__( latents (`np.ndarray`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. max_embeddings_multiples (`int`, *optional*, defaults to `3`): The max multiple length of prompt embeddings compared to the max output length of text encoder. output_type (`str`, *optional*, defaults to `"pil"`): @@ -918,7 +918,7 @@ def text2img( latents (`np.ndarray`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. max_embeddings_multiples (`int`, *optional*, defaults to `3`): The max multiple length of prompt embeddings compared to the max output length of text encoder. output_type (`str`, *optional*, defaults to `"pil"`): diff --git a/examples/community/lpw_stable_diffusion_xl.py b/examples/community/lpw_stable_diffusion_xl.py index 272c5d5652c5..ea67738ab74c 100644 --- a/examples/community/lpw_stable_diffusion_xl.py +++ b/examples/community/lpw_stable_diffusion_xl.py @@ -1519,7 +1519,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. prompt_embeds (`torch.Tensor`, *optional*): diff --git a/examples/community/multilingual_stable_diffusion.py b/examples/community/multilingual_stable_diffusion.py index afef4e9e9719..5e7453ed1201 100644 --- a/examples/community/multilingual_stable_diffusion.py +++ b/examples/community/multilingual_stable_diffusion.py @@ -187,7 +187,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/examples/community/pipeline_controlnet_xl_kolors.py b/examples/community/pipeline_controlnet_xl_kolors.py index dc90aacdbc6b..af5586990e2e 100644 --- a/examples/community/pipeline_controlnet_xl_kolors.py +++ b/examples/community/pipeline_controlnet_xl_kolors.py @@ -888,7 +888,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_controlnet_xl_kolors_img2img.py b/examples/community/pipeline_controlnet_xl_kolors_img2img.py index 189d0312143f..c0831945ed8e 100644 --- a/examples/community/pipeline_controlnet_xl_kolors_img2img.py +++ b/examples/community/pipeline_controlnet_xl_kolors_img2img.py @@ -1066,7 +1066,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_controlnet_xl_kolors_inpaint.py b/examples/community/pipeline_controlnet_xl_kolors_inpaint.py index 4b6123cc1f8b..db15d99ac3ea 100644 --- a/examples/community/pipeline_controlnet_xl_kolors_inpaint.py +++ b/examples/community/pipeline_controlnet_xl_kolors_inpaint.py @@ -1298,7 +1298,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/examples/community/pipeline_demofusion_sdxl.py b/examples/community/pipeline_demofusion_sdxl.py index 119b39cefe68..c9b57a6ece8c 100644 --- a/examples/community/pipeline_demofusion_sdxl.py +++ b/examples/community/pipeline_demofusion_sdxl.py @@ -724,7 +724,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_faithdiff_stable_diffusion_xl.py b/examples/community/pipeline_faithdiff_stable_diffusion_xl.py index aa95d2ec719e..43ef55d32c3d 100644 --- a/examples/community/pipeline_faithdiff_stable_diffusion_xl.py +++ b/examples/community/pipeline_faithdiff_stable_diffusion_xl.py @@ -1906,7 +1906,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_flux_differential_img2img.py b/examples/community/pipeline_flux_differential_img2img.py index 3677e73136f7..7d6358cb3258 100644 --- a/examples/community/pipeline_flux_differential_img2img.py +++ b/examples/community/pipeline_flux_differential_img2img.py @@ -730,7 +730,7 @@ def __call__( 1)`, or `(H, W)`. mask_image_latent (`torch.Tensor`, `List[torch.Tensor]`): `Tensor` representing an image batch to mask `image` generated by VAE. If not provided, the mask - latents tensor will be generated by `mask_image`. + latents tensor will ge generated by `mask_image`. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. This is set to 1024 by default for the best results. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): @@ -769,7 +769,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_flux_kontext_multiple_images.py b/examples/community/pipeline_flux_kontext_multiple_images.py index 7e4a9ed0fadc..ef0c643a405e 100644 --- a/examples/community/pipeline_flux_kontext_multiple_images.py +++ b/examples/community/pipeline_flux_kontext_multiple_images.py @@ -885,7 +885,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_flux_rf_inversion.py b/examples/community/pipeline_flux_rf_inversion.py index 8f8b4817acf2..631d04b762d4 100644 --- a/examples/community/pipeline_flux_rf_inversion.py +++ b/examples/community/pipeline_flux_rf_inversion.py @@ -711,7 +711,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_flux_semantic_guidance.py b/examples/community/pipeline_flux_semantic_guidance.py index b3d2b3a4b4e1..93bcd3af75e6 100644 --- a/examples/community/pipeline_flux_semantic_guidance.py +++ b/examples/community/pipeline_flux_semantic_guidance.py @@ -853,7 +853,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_flux_with_cfg.py b/examples/community/pipeline_flux_with_cfg.py index 3916aff257f0..1b8dc9ecb85e 100644 --- a/examples/community/pipeline_flux_with_cfg.py +++ b/examples/community/pipeline_flux_with_cfg.py @@ -639,7 +639,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_kolors_differential_img2img.py b/examples/community/pipeline_kolors_differential_img2img.py index d299c839815e..9491447409e2 100644 --- a/examples/community/pipeline_kolors_differential_img2img.py +++ b/examples/community/pipeline_kolors_differential_img2img.py @@ -904,7 +904,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_kolors_inpainting.py b/examples/community/pipeline_kolors_inpainting.py index 3cab8ecac002..cce9f10ded3d 100644 --- a/examples/community/pipeline_kolors_inpainting.py +++ b/examples/community/pipeline_kolors_inpainting.py @@ -1246,7 +1246,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/examples/community/pipeline_prompt2prompt.py b/examples/community/pipeline_prompt2prompt.py index 8d94dc9248c1..065edc0cfbe8 100644 --- a/examples/community/pipeline_prompt2prompt.py +++ b/examples/community/pipeline_prompt2prompt.py @@ -611,7 +611,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/examples/community/pipeline_sdxl_style_aligned.py b/examples/community/pipeline_sdxl_style_aligned.py index 10438af365f9..ea168036c196 100644 --- a/examples/community/pipeline_sdxl_style_aligned.py +++ b/examples/community/pipeline_sdxl_style_aligned.py @@ -1480,7 +1480,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_stable_diffusion_3_differential_img2img.py b/examples/community/pipeline_stable_diffusion_3_differential_img2img.py index 643386232bc3..693485d1758d 100644 --- a/examples/community/pipeline_stable_diffusion_3_differential_img2img.py +++ b/examples/community/pipeline_stable_diffusion_3_differential_img2img.py @@ -748,7 +748,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_stable_diffusion_3_instruct_pix2pix.py b/examples/community/pipeline_stable_diffusion_3_instruct_pix2pix.py index d9cee800e8ad..6923db23a6d3 100644 --- a/examples/community/pipeline_stable_diffusion_3_instruct_pix2pix.py +++ b/examples/community/pipeline_stable_diffusion_3_instruct_pix2pix.py @@ -945,7 +945,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_stable_diffusion_xl_attentive_eraser.py b/examples/community/pipeline_stable_diffusion_xl_attentive_eraser.py index a881814c2a91..ab8064c6e378 100644 --- a/examples/community/pipeline_stable_diffusion_xl_attentive_eraser.py +++ b/examples/community/pipeline_stable_diffusion_xl_attentive_eraser.py @@ -1786,7 +1786,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/examples/community/pipeline_stable_diffusion_xl_controlnet_adapter.py b/examples/community/pipeline_stable_diffusion_xl_controlnet_adapter.py index 564a19e923d2..ccf1098c614c 100644 --- a/examples/community/pipeline_stable_diffusion_xl_controlnet_adapter.py +++ b/examples/community/pipeline_stable_diffusion_xl_controlnet_adapter.py @@ -973,7 +973,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_stable_diffusion_xl_controlnet_adapter_inpaint.py b/examples/community/pipeline_stable_diffusion_xl_controlnet_adapter_inpaint.py index c73433b20f88..38db19148d43 100644 --- a/examples/community/pipeline_stable_diffusion_xl_controlnet_adapter_inpaint.py +++ b/examples/community/pipeline_stable_diffusion_xl_controlnet_adapter_inpaint.py @@ -1329,7 +1329,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_stable_diffusion_xl_differential_img2img.py b/examples/community/pipeline_stable_diffusion_xl_differential_img2img.py index 89388e10cb19..b9f00cb82d83 100644 --- a/examples/community/pipeline_stable_diffusion_xl_differential_img2img.py +++ b/examples/community/pipeline_stable_diffusion_xl_differential_img2img.py @@ -1053,7 +1053,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_stable_diffusion_xl_ipex.py b/examples/community/pipeline_stable_diffusion_xl_ipex.py index aa2b24f3965a..eda6089f594f 100644 --- a/examples/community/pipeline_stable_diffusion_xl_ipex.py +++ b/examples/community/pipeline_stable_diffusion_xl_ipex.py @@ -832,7 +832,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_stg_cogvideox.py b/examples/community/pipeline_stg_cogvideox.py index bdb6aecc30c3..1c98ae0f6d8e 100644 --- a/examples/community/pipeline_stg_cogvideox.py +++ b/examples/community/pipeline_stg_cogvideox.py @@ -632,7 +632,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_stg_ltx.py b/examples/community/pipeline_stg_ltx.py index 70069a33f5d9..f7ccf99e96ae 100644 --- a/examples/community/pipeline_stg_ltx.py +++ b/examples/community/pipeline_stg_ltx.py @@ -620,7 +620,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_stg_ltx_image2video.py b/examples/community/pipeline_stg_ltx_image2video.py index c32805e1419f..3b3d2333805d 100644 --- a/examples/community/pipeline_stg_ltx_image2video.py +++ b/examples/community/pipeline_stg_ltx_image2video.py @@ -682,7 +682,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_stg_mochi.py b/examples/community/pipeline_stg_mochi.py index dbe5d2525ad3..b6ab1b192c1e 100644 --- a/examples/community/pipeline_stg_mochi.py +++ b/examples/community/pipeline_stg_mochi.py @@ -603,7 +603,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/pipeline_zero1to3.py b/examples/community/pipeline_zero1to3.py index 9e29566978e8..0db543b1697c 100644 --- a/examples/community/pipeline_zero1to3.py +++ b/examples/community/pipeline_zero1to3.py @@ -657,7 +657,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/rerender_a_video.py b/examples/community/rerender_a_video.py index 78a15a03b099..133c23294395 100644 --- a/examples/community/rerender_a_video.py +++ b/examples/community/rerender_a_video.py @@ -656,7 +656,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/run_onnx_controlnet.py b/examples/community/run_onnx_controlnet.py index f0ab2a2b9643..2221fc09dbde 100644 --- a/examples/community/run_onnx_controlnet.py +++ b/examples/community/run_onnx_controlnet.py @@ -591,7 +591,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/run_tensorrt_controlnet.py b/examples/community/run_tensorrt_controlnet.py index e4f1abc83b0b..b9e71724c046 100644 --- a/examples/community/run_tensorrt_controlnet.py +++ b/examples/community/run_tensorrt_controlnet.py @@ -695,7 +695,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/sd_text2img_k_diffusion.py b/examples/community/sd_text2img_k_diffusion.py index 4d5cea497f8c..ab6cf2d9cd3f 100755 --- a/examples/community/sd_text2img_k_diffusion.py +++ b/examples/community/sd_text2img_k_diffusion.py @@ -326,7 +326,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/examples/community/seed_resize_stable_diffusion.py b/examples/community/seed_resize_stable_diffusion.py index eafe7572aab5..3c823012c102 100644 --- a/examples/community/seed_resize_stable_diffusion.py +++ b/examples/community/seed_resize_stable_diffusion.py @@ -122,7 +122,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/examples/community/stable_diffusion_comparison.py b/examples/community/stable_diffusion_comparison.py index 22f3b3e0c385..36e7dba2de62 100644 --- a/examples/community/stable_diffusion_comparison.py +++ b/examples/community/stable_diffusion_comparison.py @@ -279,7 +279,7 @@ def _call_( latents (`torch.Tensor`, optional): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, optional, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/examples/community/stable_diffusion_controlnet_img2img.py b/examples/community/stable_diffusion_controlnet_img2img.py index 6d8038cfd4ae..877464454a61 100644 --- a/examples/community/stable_diffusion_controlnet_img2img.py +++ b/examples/community/stable_diffusion_controlnet_img2img.py @@ -670,7 +670,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/stable_diffusion_controlnet_inpaint.py b/examples/community/stable_diffusion_controlnet_inpaint.py index fe7b808b6beb..175c47d01523 100644 --- a/examples/community/stable_diffusion_controlnet_inpaint.py +++ b/examples/community/stable_diffusion_controlnet_inpaint.py @@ -810,7 +810,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/stable_diffusion_controlnet_inpaint_img2img.py b/examples/community/stable_diffusion_controlnet_inpaint_img2img.py index 2b5dc77fe5aa..51e7ac38dd54 100644 --- a/examples/community/stable_diffusion_controlnet_inpaint_img2img.py +++ b/examples/community/stable_diffusion_controlnet_inpaint_img2img.py @@ -804,7 +804,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/stable_diffusion_controlnet_reference.py b/examples/community/stable_diffusion_controlnet_reference.py index e5dd249e0424..aa9ab1b24211 100644 --- a/examples/community/stable_diffusion_controlnet_reference.py +++ b/examples/community/stable_diffusion_controlnet_reference.py @@ -179,7 +179,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/stable_diffusion_ipex.py b/examples/community/stable_diffusion_ipex.py index 7d1cd4f5d09e..18d5e8feaa43 100644 --- a/examples/community/stable_diffusion_ipex.py +++ b/examples/community/stable_diffusion_ipex.py @@ -615,7 +615,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/stable_diffusion_reference.py b/examples/community/stable_diffusion_reference.py index 6f7dce982339..69fa0722cf8a 100644 --- a/examples/community/stable_diffusion_reference.py +++ b/examples/community/stable_diffusion_reference.py @@ -885,7 +885,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/stable_diffusion_repaint.py b/examples/community/stable_diffusion_repaint.py index 94b9f8b01b51..9f6172f3b838 100644 --- a/examples/community/stable_diffusion_repaint.py +++ b/examples/community/stable_diffusion_repaint.py @@ -678,7 +678,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/stable_diffusion_xl_reference.py b/examples/community/stable_diffusion_xl_reference.py index eb055574966d..11926a5d9ac9 100644 --- a/examples/community/stable_diffusion_xl_reference.py +++ b/examples/community/stable_diffusion_xl_reference.py @@ -380,7 +380,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/community/text_inpainting.py b/examples/community/text_inpainting.py index f262cf2cac6d..2908388029dd 100644 --- a/examples/community/text_inpainting.py +++ b/examples/community/text_inpainting.py @@ -180,7 +180,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/examples/community/tiled_upscaling.py b/examples/community/tiled_upscaling.py index 7a5e77155cd0..56eb3e89b5d0 100644 --- a/examples/community/tiled_upscaling.py +++ b/examples/community/tiled_upscaling.py @@ -231,7 +231,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. tile_size (`int`, *optional*): The size of the tiles. Too big can result in an OOM-error. tile_border (`int`, *optional*): diff --git a/examples/community/wildcard_stable_diffusion.py b/examples/community/wildcard_stable_diffusion.py index d40221e5b1cf..c750610ca34f 100644 --- a/examples/community/wildcard_stable_diffusion.py +++ b/examples/community/wildcard_stable_diffusion.py @@ -209,7 +209,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/examples/dreambooth/README_qwen.md b/examples/dreambooth/README_qwen.md index 68c546a25df9..0f0b640c8b5c 100644 --- a/examples/dreambooth/README_qwen.md +++ b/examples/dreambooth/README_qwen.md @@ -77,7 +77,7 @@ export MODEL_NAME="Qwen/Qwen-Image" export INSTANCE_DIR="dog" export OUTPUT_DIR="trained-qwenimage-lora" -accelerate launch train_dreambooth_lora_qwen_image.py \ +accelerate launch train_dreambooth_lora_qwenimage.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --output_dir=$OUTPUT_DIR \ diff --git a/examples/dreambooth/train_dreambooth_flux.py b/examples/dreambooth/train_dreambooth_flux.py index c24d16c6005a..b803babdc827 100644 --- a/examples/dreambooth/train_dreambooth_flux.py +++ b/examples/dreambooth/train_dreambooth_flux.py @@ -642,7 +642,6 @@ def parse_args(input_args=None): ], help="The image interpolation method to use for resizing images.", ) - parser.add_argument("--enable_npu_flash_attention", action="store_true", help="Enabla Flash Attention for NPU") if input_args is not None: args = parser.parse_args(input_args) @@ -1183,13 +1182,6 @@ def main(args): text_encoder_one.requires_grad_(False) text_encoder_two.requires_grad_(False) - if args.enable_npu_flash_attention: - if is_torch_npu_available(): - logger.info("npu flash attention enabled.") - transformer.set_attention_backend("_native_npu") - else: - raise ValueError("npu flash attention requires torch_npu extensions and is supported only on npu device ") - # For mixed precision training we cast all non-trainable weights (vae, text_encoder and transformer) to half-precision # as these weights are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32 diff --git a/examples/dreambooth/train_dreambooth_lora_flux.py b/examples/dreambooth/train_dreambooth_lora_flux.py index 2353625c3878..a8a76097f3c3 100644 --- a/examples/dreambooth/train_dreambooth_lora_flux.py +++ b/examples/dreambooth/train_dreambooth_lora_flux.py @@ -80,7 +80,6 @@ is_wandb_available, ) from diffusers.utils.hub_utils import load_or_create_model_card, populate_model_card -from diffusers.utils.import_utils import is_torch_npu_available from diffusers.utils.torch_utils import is_compiled_module @@ -687,7 +686,6 @@ def parse_args(input_args=None): ), ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") - parser.add_argument("--enable_npu_flash_attention", action="store_true", help="Enabla Flash Attention for NPU") if input_args is not None: args = parser.parse_args(input_args) @@ -1215,13 +1213,6 @@ def main(args): text_encoder_one.requires_grad_(False) text_encoder_two.requires_grad_(False) - if args.enable_npu_flash_attention: - if is_torch_npu_available(): - logger.info("npu flash attention enabled.") - transformer.set_attention_backend("_native_npu") - else: - raise ValueError("npu flash attention requires torch_npu extensions and is supported only on npu device ") - # For mixed precision training we cast all non-trainable weights (vae, text_encoder and transformer) to half-precision # as these weights are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32 diff --git a/examples/dreambooth/train_dreambooth_lora_flux_kontext.py b/examples/dreambooth/train_dreambooth_lora_flux_kontext.py index ffeef7b4b34b..6aa165ed20b3 100644 --- a/examples/dreambooth/train_dreambooth_lora_flux_kontext.py +++ b/examples/dreambooth/train_dreambooth_lora_flux_kontext.py @@ -706,7 +706,6 @@ def parse_args(input_args=None): ), ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") - parser.add_argument("--enable_npu_flash_attention", action="store_true", help="Enabla Flash Attention for NPU") if input_args is not None: args = parser.parse_args(input_args) @@ -1355,13 +1354,6 @@ def main(args): text_encoder_one.requires_grad_(False) text_encoder_two.requires_grad_(False) - if args.enable_npu_flash_attention: - if is_torch_npu_available(): - logger.info("npu flash attention enabled.") - transformer.set_attention_backend("_native_npu") - else: - raise ValueError("npu flash attention requires torch_npu extensions and is supported only on npu device ") - # For mixed precision training we cast all non-trainable weights (vae, text_encoder and transformer) to half-precision # as these weights are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32 diff --git a/examples/research_projects/pixart/pipeline_pixart_alpha_controlnet.py b/examples/research_projects/pixart/pipeline_pixart_alpha_controlnet.py index 89228983d4d8..148b2e7f3147 100644 --- a/examples/research_projects/pixart/pipeline_pixart_alpha_controlnet.py +++ b/examples/research_projects/pixart/pipeline_pixart_alpha_controlnet.py @@ -860,7 +860,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/examples/research_projects/rdm/pipeline_rdm.py b/examples/research_projects/rdm/pipeline_rdm.py index 9b696874c5d1..7e2095b7245c 100644 --- a/examples/research_projects/rdm/pipeline_rdm.py +++ b/examples/research_projects/rdm/pipeline_rdm.py @@ -202,7 +202,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/models/attention_flax.py b/src/diffusers/models/attention_flax.py index 1bde62e5c666..17e6f33df051 100644 --- a/src/diffusers/models/attention_flax.py +++ b/src/diffusers/models/attention_flax.py @@ -19,11 +19,6 @@ import jax import jax.numpy as jnp -from ..utils import logging - - -logger = logging.get_logger(__name__) - def _query_chunk_attention(query, key, value, precision, key_chunk_size: int = 4096): """Multi-head dot product attention with a limited number of queries.""" @@ -156,11 +151,6 @@ class FlaxAttention(nn.Module): dtype: jnp.dtype = jnp.float32 def setup(self): - logger.warning( - "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " - "recommend migrating to PyTorch classes or pinning your version of Diffusers." - ) - inner_dim = self.dim_head * self.heads self.scale = self.dim_head**-0.5 @@ -287,11 +277,6 @@ class FlaxBasicTransformerBlock(nn.Module): split_head_dim: bool = False def setup(self): - logger.warning( - "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " - "recommend migrating to PyTorch classes or pinning your version of Diffusers." - ) - # self attention (or cross_attention if only_cross_attention is True) self.attn1 = FlaxAttention( self.dim, @@ -380,11 +365,6 @@ class FlaxTransformer2DModel(nn.Module): split_head_dim: bool = False def setup(self): - logger.warning( - "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " - "recommend migrating to PyTorch classes or pinning your version of Diffusers." - ) - self.norm = nn.GroupNorm(num_groups=32, epsilon=1e-5) inner_dim = self.n_heads * self.d_head @@ -474,11 +454,6 @@ class FlaxFeedForward(nn.Module): dtype: jnp.dtype = jnp.float32 def setup(self): - logger.warning( - "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " - "recommend migrating to PyTorch classes or pinning your version of Diffusers." - ) - # The second linear layer needs to be called # net_2 for now to match the index of the Sequential layer self.net_0 = FlaxGEGLU(self.dim, self.dropout, self.dtype) @@ -509,11 +484,6 @@ class FlaxGEGLU(nn.Module): dtype: jnp.dtype = jnp.float32 def setup(self): - logger.warning( - "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " - "recommend migrating to PyTorch classes or pinning your version of Diffusers." - ) - inner_dim = self.dim * 4 self.proj = nn.Dense(inner_dim * 2, dtype=self.dtype) self.dropout_layer = nn.Dropout(rate=self.dropout) diff --git a/src/diffusers/models/controlnets/controlnet_flax.py b/src/diffusers/models/controlnets/controlnet_flax.py index f7a8b98fa2f0..4b2148666ebf 100644 --- a/src/diffusers/models/controlnets/controlnet_flax.py +++ b/src/diffusers/models/controlnets/controlnet_flax.py @@ -20,7 +20,7 @@ from flax.core.frozen_dict import FrozenDict from ...configuration_utils import ConfigMixin, flax_register_to_config -from ...utils import BaseOutput, logging +from ...utils import BaseOutput from ..embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from ..modeling_flax_utils import FlaxModelMixin from ..unets.unet_2d_blocks_flax import ( @@ -30,9 +30,6 @@ ) -logger = logging.get_logger(__name__) - - @flax.struct.dataclass class FlaxControlNetOutput(BaseOutput): """ @@ -53,11 +50,6 @@ class FlaxControlNetConditioningEmbedding(nn.Module): dtype: jnp.dtype = jnp.float32 def setup(self) -> None: - logger.warning( - "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " - "recommend migrating to PyTorch classes or pinning your version of Diffusers." - ) - self.conv_in = nn.Conv( self.block_out_channels[0], kernel_size=(3, 3), @@ -192,11 +184,6 @@ def init_weights(self, rng: jax.Array) -> FrozenDict: return self.init(rngs, sample, timesteps, encoder_hidden_states, controlnet_cond)["params"] def setup(self) -> None: - logger.warning( - "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " - "recommend migrating to PyTorch classes or pinning your version of Diffusers." - ) - block_out_channels = self.block_out_channels time_embed_dim = block_out_channels[0] * 4 diff --git a/src/diffusers/models/embeddings_flax.py b/src/diffusers/models/embeddings_flax.py index 3790905e583c..1e7e84edeaeb 100644 --- a/src/diffusers/models/embeddings_flax.py +++ b/src/diffusers/models/embeddings_flax.py @@ -16,11 +16,6 @@ import flax.linen as nn import jax.numpy as jnp -from ..utils import logging - - -logger = logging.get_logger(__name__) - def get_sinusoidal_embeddings( timesteps: jnp.ndarray, @@ -81,11 +76,6 @@ class FlaxTimestepEmbedding(nn.Module): The data type for the embedding parameters. """ - logger.warning( - "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " - "recommend migrating to PyTorch classes or pinning your version of Diffusers." - ) - time_embed_dim: int = 32 dtype: jnp.dtype = jnp.float32 @@ -114,11 +104,6 @@ class FlaxTimesteps(nn.Module): flip_sin_to_cos: bool = False freq_shift: float = 1 - logger.warning( - "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " - "recommend migrating to PyTorch classes or pinning your version of Diffusers." - ) - @nn.compact def __call__(self, timesteps): return get_sinusoidal_embeddings( diff --git a/src/diffusers/models/modeling_flax_utils.py b/src/diffusers/models/modeling_flax_utils.py index 573828dc4b03..010b7377451c 100644 --- a/src/diffusers/models/modeling_flax_utils.py +++ b/src/diffusers/models/modeling_flax_utils.py @@ -290,10 +290,6 @@ def from_pretrained( You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. ``` """ - logger.warning( - "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " - "recommend migrating to PyTorch classes or pinning your version of Diffusers." - ) config = kwargs.pop("config", None) cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) diff --git a/src/diffusers/models/resnet_flax.py b/src/diffusers/models/resnet_flax.py index 9bedaa9a36b6..9c80932c5c5d 100644 --- a/src/diffusers/models/resnet_flax.py +++ b/src/diffusers/models/resnet_flax.py @@ -15,22 +15,12 @@ import jax import jax.numpy as jnp -from ..utils import logging - - -logger = logging.get_logger(__name__) - class FlaxUpsample2D(nn.Module): out_channels: int dtype: jnp.dtype = jnp.float32 def setup(self): - logger.warning( - "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " - "recommend migrating to PyTorch classes or pinning your version of Diffusers." - ) - self.conv = nn.Conv( self.out_channels, kernel_size=(3, 3), @@ -55,11 +45,6 @@ class FlaxDownsample2D(nn.Module): dtype: jnp.dtype = jnp.float32 def setup(self): - logger.warning( - "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " - "recommend migrating to PyTorch classes or pinning your version of Diffusers." - ) - self.conv = nn.Conv( self.out_channels, kernel_size=(3, 3), @@ -83,11 +68,6 @@ class FlaxResnetBlock2D(nn.Module): dtype: jnp.dtype = jnp.float32 def setup(self): - logger.warning( - "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " - "recommend migrating to PyTorch classes or pinning your version of Diffusers." - ) - out_channels = self.in_channels if self.out_channels is None else self.out_channels self.norm1 = nn.GroupNorm(num_groups=32, epsilon=1e-5) diff --git a/src/diffusers/models/transformers/transformer_flux.py b/src/diffusers/models/transformers/transformer_flux.py index 7ab371a1a18e..60c7eb1dbabe 100644 --- a/src/diffusers/models/transformers/transformer_flux.py +++ b/src/diffusers/models/transformers/transformer_flux.py @@ -22,7 +22,8 @@ from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import FluxTransformer2DLoadersMixin, FromOriginalModelMixin, PeftAdapterMixin -from ...utils import USE_PEFT_BACKEND, logging, scale_lora_layers, unscale_lora_layers +from ...utils import USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers +from ...utils.import_utils import is_torch_npu_available from ...utils.torch_utils import maybe_allow_in_graph from ..attention import AttentionMixin, AttentionModuleMixin, FeedForward from ..attention_dispatch import dispatch_attention_fn @@ -353,13 +354,25 @@ def __init__(self, dim: int, num_attention_heads: int, attention_head_dim: int, self.act_mlp = nn.GELU(approximate="tanh") self.proj_out = nn.Linear(dim + self.mlp_hidden_dim, dim) + if is_torch_npu_available(): + from ..attention_processor import FluxAttnProcessor2_0_NPU + + deprecation_message = ( + "Defaulting to FluxAttnProcessor2_0_NPU for NPU devices will be removed. Attention processors " + "should be set explicitly using the `set_attn_processor` method." + ) + deprecate("npu_processor", "0.34.0", deprecation_message) + processor = FluxAttnProcessor2_0_NPU() + else: + processor = FluxAttnProcessor() + self.attn = FluxAttention( query_dim=dim, dim_head=attention_head_dim, heads=num_attention_heads, out_dim=dim, bias=True, - processor=FluxAttnProcessor(), + processor=processor, eps=1e-6, pre_only=True, ) diff --git a/src/diffusers/models/transformers/transformer_skyreels_v2.py b/src/diffusers/models/transformers/transformer_skyreels_v2.py index 358759164b9e..236fca690a90 100644 --- a/src/diffusers/models/transformers/transformer_skyreels_v2.py +++ b/src/diffusers/models/transformers/transformer_skyreels_v2.py @@ -1,4 +1,4 @@ -# Copyright 2025 The SkyReels Team, The Wan Team and The HuggingFace Team. All rights reserved. +# Copyright 2025 The SkyReels-V2 Team, The Wan Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -21,10 +21,9 @@ from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import FromOriginalModelMixin, PeftAdapterMixin -from ...utils import USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers -from ...utils.torch_utils import maybe_allow_in_graph -from ..attention import AttentionMixin, AttentionModuleMixin, FeedForward -from ..attention_dispatch import dispatch_attention_fn +from ...utils import USE_PEFT_BACKEND, logging, scale_lora_layers, unscale_lora_layers +from ..attention import FeedForward +from ..attention_processor import Attention from ..cache_utils import CacheMixin from ..embeddings import ( PixArtAlphaTextProjection, @@ -40,53 +39,20 @@ logger = logging.get_logger(__name__) # pylint: disable=invalid-name -def _get_qkv_projections( - attn: "SkyReelsV2Attention", hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor -): - # encoder_hidden_states is only passed for cross-attention - if encoder_hidden_states is None: - encoder_hidden_states = hidden_states - - if attn.fused_projections: - if attn.cross_attention_dim_head is None: - # In self-attention layers, we can fuse the entire QKV projection into a single linear - query, key, value = attn.to_qkv(hidden_states).chunk(3, dim=-1) - else: - # In cross-attention layers, we can only fuse the KV projections into a single linear - query = attn.to_q(hidden_states) - key, value = attn.to_kv(encoder_hidden_states).chunk(2, dim=-1) - else: - query = attn.to_q(hidden_states) - key = attn.to_k(encoder_hidden_states) - value = attn.to_v(encoder_hidden_states) - return query, key, value - - -def _get_added_kv_projections(attn: "SkyReelsV2Attention", encoder_hidden_states_img: torch.Tensor): - if attn.fused_projections: - key_img, value_img = attn.to_added_kv(encoder_hidden_states_img).chunk(2, dim=-1) - else: - key_img = attn.add_k_proj(encoder_hidden_states_img) - value_img = attn.add_v_proj(encoder_hidden_states_img) - return key_img, value_img - - -class SkyReelsV2AttnProcessor: - _attention_backend = None - +class SkyReelsV2AttnProcessor2_0: def __init__(self): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError( - "SkyReelsV2AttnProcessor requires PyTorch 2.0. To use it, please upgrade PyTorch to 2.0." + "SkyReelsV2AttnProcessor2_0 requires PyTorch 2.0. To use it, please upgrade PyTorch to 2.0." ) def __call__( self, - attn: "SkyReelsV2Attention", + attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, - rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, + rotary_emb: Optional[torch.Tensor] = None, ) -> torch.Tensor: encoder_hidden_states_img = None if attn.add_k_proj is not None: @@ -94,66 +60,58 @@ def __call__( image_context_length = encoder_hidden_states.shape[1] - 512 encoder_hidden_states_img = encoder_hidden_states[:, :image_context_length] encoder_hidden_states = encoder_hidden_states[:, image_context_length:] + if encoder_hidden_states is None: + encoder_hidden_states = hidden_states - query, key, value = _get_qkv_projections(attn, hidden_states, encoder_hidden_states) + query = attn.to_q(hidden_states) + key = attn.to_k(encoder_hidden_states) + value = attn.to_v(encoder_hidden_states) - query = attn.norm_q(query) - key = attn.norm_k(key) + if attn.norm_q is not None: + query = attn.norm_q(query) + if attn.norm_k is not None: + key = attn.norm_k(key) - query = query.unflatten(2, (attn.heads, -1)) - key = key.unflatten(2, (attn.heads, -1)) - value = value.unflatten(2, (attn.heads, -1)) + query = query.unflatten(2, (attn.heads, -1)).transpose(1, 2) + key = key.unflatten(2, (attn.heads, -1)).transpose(1, 2) + value = value.unflatten(2, (attn.heads, -1)).transpose(1, 2) if rotary_emb is not None: - def apply_rotary_emb( - hidden_states: torch.Tensor, - freqs_cos: torch.Tensor, - freqs_sin: torch.Tensor, - ): - x1, x2 = hidden_states.unflatten(-1, (-1, 2)).unbind(-1) - cos = freqs_cos[..., 0::2] - sin = freqs_sin[..., 1::2] - out = torch.empty_like(hidden_states) - out[..., 0::2] = x1 * cos - x2 * sin - out[..., 1::2] = x1 * sin + x2 * cos - return out.type_as(hidden_states) - - query = apply_rotary_emb(query, *rotary_emb) - key = apply_rotary_emb(key, *rotary_emb) + def apply_rotary_emb(hidden_states: torch.Tensor, freqs: torch.Tensor): + x_rotated = torch.view_as_complex(hidden_states.to(torch.float32).unflatten(3, (-1, 2))) + x_out = torch.view_as_real(x_rotated * freqs).flatten(3, 4) + return x_out.type_as(hidden_states) + + query = apply_rotary_emb(query, rotary_emb) + key = apply_rotary_emb(key, rotary_emb) # I2V task hidden_states_img = None if encoder_hidden_states_img is not None: - key_img, value_img = _get_added_kv_projections(attn, encoder_hidden_states_img) + key_img = attn.add_k_proj(encoder_hidden_states_img) key_img = attn.norm_added_k(key_img) + value_img = attn.add_v_proj(encoder_hidden_states_img) + + key_img = key_img.unflatten(2, (attn.heads, -1)).transpose(1, 2) + value_img = value_img.unflatten(2, (attn.heads, -1)).transpose(1, 2) - key_img = key_img.unflatten(2, (attn.heads, -1)) - value_img = value_img.unflatten(2, (attn.heads, -1)) - - hidden_states_img = dispatch_attention_fn( - query, - key_img, - value_img, - attn_mask=None, - dropout_p=0.0, - is_causal=False, - backend=self._attention_backend, + hidden_states_img = F.scaled_dot_product_attention( + query, key_img, value_img, attn_mask=None, dropout_p=0.0, is_causal=False ) - hidden_states_img = hidden_states_img.flatten(2, 3) + hidden_states_img = hidden_states_img.transpose(1, 2).flatten(2, 3) hidden_states_img = hidden_states_img.type_as(query) - hidden_states = dispatch_attention_fn( + hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False, - backend=self._attention_backend, ) - hidden_states = hidden_states.flatten(2, 3) + hidden_states = hidden_states.transpose(1, 2).flatten(2, 3) hidden_states = hidden_states.type_as(query) if hidden_states_img is not None: @@ -164,122 +122,7 @@ def apply_rotary_emb( return hidden_states -class SkyReelsV2AttnProcessor2_0: - def __new__(cls, *args, **kwargs): - deprecation_message = ( - "The SkyReelsV2AttnProcessor2_0 class is deprecated and will be removed in a future version. " - "Please use SkyReelsV2AttnProcessor instead. " - ) - deprecate("SkyReelsV2AttnProcessor2_0", "1.0.0", deprecation_message, standard_warn=False) - return SkyReelsV2AttnProcessor(*args, **kwargs) - - -class SkyReelsV2Attention(torch.nn.Module, AttentionModuleMixin): - _default_processor_cls = SkyReelsV2AttnProcessor - _available_processors = [SkyReelsV2AttnProcessor] - - def __init__( - self, - dim: int, - heads: int = 8, - dim_head: int = 64, - eps: float = 1e-5, - dropout: float = 0.0, - added_kv_proj_dim: Optional[int] = None, - cross_attention_dim_head: Optional[int] = None, - processor=None, - is_cross_attention=None, - ): - super().__init__() - - self.inner_dim = dim_head * heads - self.heads = heads - self.added_kv_proj_dim = added_kv_proj_dim - self.cross_attention_dim_head = cross_attention_dim_head - self.kv_inner_dim = self.inner_dim if cross_attention_dim_head is None else cross_attention_dim_head * heads - - self.to_q = torch.nn.Linear(dim, self.inner_dim, bias=True) - self.to_k = torch.nn.Linear(dim, self.kv_inner_dim, bias=True) - self.to_v = torch.nn.Linear(dim, self.kv_inner_dim, bias=True) - self.to_out = torch.nn.ModuleList( - [ - torch.nn.Linear(self.inner_dim, dim, bias=True), - torch.nn.Dropout(dropout), - ] - ) - self.norm_q = torch.nn.RMSNorm(dim_head * heads, eps=eps, elementwise_affine=True) - self.norm_k = torch.nn.RMSNorm(dim_head * heads, eps=eps, elementwise_affine=True) - - self.add_k_proj = self.add_v_proj = None - if added_kv_proj_dim is not None: - self.add_k_proj = torch.nn.Linear(added_kv_proj_dim, self.inner_dim, bias=True) - self.add_v_proj = torch.nn.Linear(added_kv_proj_dim, self.inner_dim, bias=True) - self.norm_added_k = torch.nn.RMSNorm(dim_head * heads, eps=eps) - - self.is_cross_attention = cross_attention_dim_head is not None - - self.set_processor(processor) - - def fuse_projections(self): - if getattr(self, "fused_projections", False): - return - - if self.cross_attention_dim_head is None: - concatenated_weights = torch.cat([self.to_q.weight.data, self.to_k.weight.data, self.to_v.weight.data]) - concatenated_bias = torch.cat([self.to_q.bias.data, self.to_k.bias.data, self.to_v.bias.data]) - out_features, in_features = concatenated_weights.shape - with torch.device("meta"): - self.to_qkv = nn.Linear(in_features, out_features, bias=True) - self.to_qkv.load_state_dict( - {"weight": concatenated_weights, "bias": concatenated_bias}, strict=True, assign=True - ) - else: - concatenated_weights = torch.cat([self.to_k.weight.data, self.to_v.weight.data]) - concatenated_bias = torch.cat([self.to_k.bias.data, self.to_v.bias.data]) - out_features, in_features = concatenated_weights.shape - with torch.device("meta"): - self.to_kv = nn.Linear(in_features, out_features, bias=True) - self.to_kv.load_state_dict( - {"weight": concatenated_weights, "bias": concatenated_bias}, strict=True, assign=True - ) - - if self.added_kv_proj_dim is not None: - concatenated_weights = torch.cat([self.add_k_proj.weight.data, self.add_v_proj.weight.data]) - concatenated_bias = torch.cat([self.add_k_proj.bias.data, self.add_v_proj.bias.data]) - out_features, in_features = concatenated_weights.shape - with torch.device("meta"): - self.to_added_kv = nn.Linear(in_features, out_features, bias=True) - self.to_added_kv.load_state_dict( - {"weight": concatenated_weights, "bias": concatenated_bias}, strict=True, assign=True - ) - - self.fused_projections = True - - @torch.no_grad() - def unfuse_projections(self): - if not getattr(self, "fused_projections", False): - return - - if hasattr(self, "to_qkv"): - delattr(self, "to_qkv") - if hasattr(self, "to_kv"): - delattr(self, "to_kv") - if hasattr(self, "to_added_kv"): - delattr(self, "to_added_kv") - - self.fused_projections = False - - def forward( - self, - hidden_states: torch.Tensor, - encoder_hidden_states: Optional[torch.Tensor] = None, - attention_mask: Optional[torch.Tensor] = None, - rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, - **kwargs, - ) -> torch.Tensor: - return self.processor(self, hidden_states, encoder_hidden_states, attention_mask, rotary_emb, **kwargs) - - +# Copied from diffusers.models.transformers.transformer_wan.WanImageEmbedding with WanImageEmbedding -> SkyReelsV2ImageEmbedding class SkyReelsV2ImageEmbedding(torch.nn.Module): def __init__(self, in_features: int, out_features: int, pos_embed_seq_len=None): super().__init__() @@ -370,11 +213,7 @@ def forward( class SkyReelsV2RotaryPosEmbed(nn.Module): def __init__( - self, - attention_head_dim: int, - patch_size: Tuple[int, int, int], - max_seq_len: int, - theta: float = 10000.0, + self, attention_head_dim: int, patch_size: Tuple[int, int, int], max_seq_len: int, theta: float = 10000.0 ): super().__init__() @@ -384,55 +223,37 @@ def __init__( h_dim = w_dim = 2 * (attention_head_dim // 6) t_dim = attention_head_dim - h_dim - w_dim - freqs_dtype = torch.float32 if torch.backends.mps.is_available() else torch.float64 - - freqs_cos = [] - freqs_sin = [] + freqs = [] for dim in [t_dim, h_dim, w_dim]: - freq_cos, freq_sin = get_1d_rotary_pos_embed( - dim, - max_seq_len, - theta, - use_real=True, - repeat_interleave_real=True, - freqs_dtype=freqs_dtype, + freq = get_1d_rotary_pos_embed( + dim, max_seq_len, theta, use_real=False, repeat_interleave_real=False, freqs_dtype=torch.float32 ) - freqs_cos.append(freq_cos) - freqs_sin.append(freq_sin) - - self.register_buffer("freqs_cos", torch.cat(freqs_cos, dim=1), persistent=False) - self.register_buffer("freqs_sin", torch.cat(freqs_sin, dim=1), persistent=False) + freqs.append(freq) + self.freqs = torch.cat(freqs, dim=1) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: batch_size, num_channels, num_frames, height, width = hidden_states.shape p_t, p_h, p_w = self.patch_size ppf, pph, ppw = num_frames // p_t, height // p_h, width // p_w - split_sizes = [ - self.attention_head_dim - 2 * (self.attention_head_dim // 3), - self.attention_head_dim // 3, - self.attention_head_dim // 3, - ] - - freqs_cos = self.freqs_cos.split(split_sizes, dim=1) - freqs_sin = self.freqs_sin.split(split_sizes, dim=1) - - freqs_cos_f = freqs_cos[0][:ppf].view(ppf, 1, 1, -1).expand(ppf, pph, ppw, -1) - freqs_cos_h = freqs_cos[1][:pph].view(1, pph, 1, -1).expand(ppf, pph, ppw, -1) - freqs_cos_w = freqs_cos[2][:ppw].view(1, 1, ppw, -1).expand(ppf, pph, ppw, -1) - - freqs_sin_f = freqs_sin[0][:ppf].view(ppf, 1, 1, -1).expand(ppf, pph, ppw, -1) - freqs_sin_h = freqs_sin[1][:pph].view(1, pph, 1, -1).expand(ppf, pph, ppw, -1) - freqs_sin_w = freqs_sin[2][:ppw].view(1, 1, ppw, -1).expand(ppf, pph, ppw, -1) - - freqs_cos = torch.cat([freqs_cos_f, freqs_cos_h, freqs_cos_w], dim=-1).reshape(1, ppf * pph * ppw, 1, -1) - freqs_sin = torch.cat([freqs_sin_f, freqs_sin_h, freqs_sin_w], dim=-1).reshape(1, ppf * pph * ppw, 1, -1) + freqs = self.freqs.to(hidden_states.device) + freqs = freqs.split_with_sizes( + [ + self.attention_head_dim // 2 - 2 * (self.attention_head_dim // 6), + self.attention_head_dim // 6, + self.attention_head_dim // 6, + ], + dim=1, + ) - return freqs_cos, freqs_sin + freqs_f = freqs[0][:ppf].view(ppf, 1, 1, -1).expand(ppf, pph, ppw, -1) + freqs_h = freqs[1][:pph].view(1, pph, 1, -1).expand(ppf, pph, ppw, -1) + freqs_w = freqs[2][:ppw].view(1, 1, ppw, -1).expand(ppf, pph, ppw, -1) + freqs = torch.cat([freqs_f, freqs_h, freqs_w], dim=-1).reshape(1, 1, ppf * pph * ppw, -1) + return freqs -@maybe_allow_in_graph class SkyReelsV2TransformerBlock(nn.Module): def __init__( self, @@ -448,24 +269,33 @@ def __init__( # 1. Self-attention self.norm1 = FP32LayerNorm(dim, eps, elementwise_affine=False) - self.attn1 = SkyReelsV2Attention( - dim=dim, + self.attn1 = Attention( + query_dim=dim, heads=num_heads, + kv_heads=num_heads, dim_head=dim // num_heads, + qk_norm=qk_norm, eps=eps, - cross_attention_dim_head=None, - processor=SkyReelsV2AttnProcessor(), + bias=True, + cross_attention_dim=None, + out_bias=True, + processor=SkyReelsV2AttnProcessor2_0(), ) # 2. Cross-attention - self.attn2 = SkyReelsV2Attention( - dim=dim, + self.attn2 = Attention( + query_dim=dim, heads=num_heads, + kv_heads=num_heads, dim_head=dim // num_heads, + qk_norm=qk_norm, eps=eps, + bias=True, + cross_attention_dim=None, + out_bias=True, added_kv_proj_dim=added_kv_proj_dim, - cross_attention_dim_head=dim // num_heads, - processor=SkyReelsV2AttnProcessor(), + added_proj_bias=True, + processor=SkyReelsV2AttnProcessor2_0(), ) self.norm2 = FP32LayerNorm(dim, eps, elementwise_affine=True) if cross_attn_norm else nn.Identity() @@ -491,15 +321,15 @@ def forward( # For 4D temb in Diffusion Forcing framework, we assume the shape is (b, 6, f * pp_h * pp_w, inner_dim) e = (self.scale_shift_table.unsqueeze(2) + temb.float()).chunk(6, dim=1) shift_msa, scale_msa, gate_msa, c_shift_msa, c_scale_msa, c_gate_msa = [ei.squeeze(1) for ei in e] - # 1. Self-attention norm_hidden_states = (self.norm1(hidden_states.float()) * (1 + scale_msa) + shift_msa).type_as(hidden_states) - attn_output = self.attn1(norm_hidden_states, None, attention_mask, rotary_emb) + attn_output = self.attn1( + hidden_states=norm_hidden_states, rotary_emb=rotary_emb, attention_mask=attention_mask + ) hidden_states = (hidden_states.float() + attn_output * gate_msa).type_as(hidden_states) - # 2. Cross-attention norm_hidden_states = self.norm2(hidden_states.float()).type_as(hidden_states) - attn_output = self.attn2(norm_hidden_states, encoder_hidden_states, None, None) + attn_output = self.attn2(hidden_states=norm_hidden_states, encoder_hidden_states=encoder_hidden_states) hidden_states = hidden_states + attn_output # 3. Feed-forward @@ -508,13 +338,10 @@ def forward( ) ff_output = self.ffn(norm_hidden_states) hidden_states = (hidden_states.float() + ff_output.float() * c_gate_msa).type_as(hidden_states) - return hidden_states -class SkyReelsV2Transformer3DModel( - ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin, CacheMixin, AttentionMixin -): +class SkyReelsV2Transformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin, CacheMixin): r""" A Transformer model for video-like data used in the Wan-based SkyReels-V2 model. @@ -562,7 +389,6 @@ class SkyReelsV2Transformer3DModel( _no_split_modules = ["SkyReelsV2TransformerBlock"] _keep_in_fp32_modules = ["time_embedder", "scale_shift_table", "norm1", "norm2", "norm3"] _keys_to_ignore_on_load_unexpected = ["norm_added_q"] - _repeated_blocks = ["SkyReelsV2TransformerBlock"] @register_to_config def __init__( diff --git a/src/diffusers/models/unets/unet_2d_blocks_flax.py b/src/diffusers/models/unets/unet_2d_blocks_flax.py index 6e6005afdc31..abd025165ecf 100644 --- a/src/diffusers/models/unets/unet_2d_blocks_flax.py +++ b/src/diffusers/models/unets/unet_2d_blocks_flax.py @@ -15,14 +15,10 @@ import flax.linen as nn import jax.numpy as jnp -from ...utils import logging from ..attention_flax import FlaxTransformer2DModel from ..resnet_flax import FlaxDownsample2D, FlaxResnetBlock2D, FlaxUpsample2D -logger = logging.get_logger(__name__) - - class FlaxCrossAttnDownBlock2D(nn.Module): r""" Cross Attention 2D Downsizing block - original architecture from Unet transformers: @@ -64,11 +60,6 @@ class FlaxCrossAttnDownBlock2D(nn.Module): transformer_layers_per_block: int = 1 def setup(self): - logger.warning( - "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " - "recommend migrating to PyTorch classes or pinning your version of Diffusers." - ) - resnets = [] attentions = [] @@ -144,11 +135,6 @@ class FlaxDownBlock2D(nn.Module): dtype: jnp.dtype = jnp.float32 def setup(self): - logger.warning( - "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " - "recommend migrating to PyTorch classes or pinning your version of Diffusers." - ) - resnets = [] for i in range(self.num_layers): @@ -222,11 +208,6 @@ class FlaxCrossAttnUpBlock2D(nn.Module): transformer_layers_per_block: int = 1 def setup(self): - logger.warning( - "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " - "recommend migrating to PyTorch classes or pinning your version of Diffusers." - ) - resnets = [] attentions = [] @@ -307,11 +288,6 @@ class FlaxUpBlock2D(nn.Module): dtype: jnp.dtype = jnp.float32 def setup(self): - logger.warning( - "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " - "recommend migrating to PyTorch classes or pinning your version of Diffusers." - ) - resnets = [] for i in range(self.num_layers): @@ -380,11 +356,6 @@ class FlaxUNetMidBlock2DCrossAttn(nn.Module): transformer_layers_per_block: int = 1 def setup(self): - logger.warning( - "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " - "recommend migrating to PyTorch classes or pinning your version of Diffusers." - ) - # there is always at least one resnet resnets = [ FlaxResnetBlock2D( diff --git a/src/diffusers/models/unets/unet_2d_condition_flax.py b/src/diffusers/models/unets/unet_2d_condition_flax.py index 8d9a309afbcc..7c21ddb690ae 100644 --- a/src/diffusers/models/unets/unet_2d_condition_flax.py +++ b/src/diffusers/models/unets/unet_2d_condition_flax.py @@ -20,7 +20,7 @@ from flax.core.frozen_dict import FrozenDict from ...configuration_utils import ConfigMixin, flax_register_to_config -from ...utils import BaseOutput, logging +from ...utils import BaseOutput from ..embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from ..modeling_flax_utils import FlaxModelMixin from .unet_2d_blocks_flax import ( @@ -32,9 +32,6 @@ ) -logger = logging.get_logger(__name__) - - @flax.struct.dataclass class FlaxUNet2DConditionOutput(BaseOutput): """ @@ -166,11 +163,6 @@ def init_weights(self, rng: jax.Array) -> FrozenDict: return self.init(rngs, sample, timesteps, encoder_hidden_states, added_cond_kwargs)["params"] def setup(self) -> None: - logger.warning( - "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " - "recommend migrating to PyTorch classes or pinning your version of Diffusers." - ) - block_out_channels = self.block_out_channels time_embed_dim = block_out_channels[0] * 4 diff --git a/src/diffusers/models/vae_flax.py b/src/diffusers/models/vae_flax.py index 13653b90372a..93398a51eac7 100644 --- a/src/diffusers/models/vae_flax.py +++ b/src/diffusers/models/vae_flax.py @@ -25,13 +25,10 @@ from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config -from ..utils import BaseOutput, logging +from ..utils import BaseOutput from .modeling_flax_utils import FlaxModelMixin -logger = logging.get_logger(__name__) - - @flax.struct.dataclass class FlaxDecoderOutput(BaseOutput): """ @@ -76,10 +73,6 @@ class FlaxUpsample2D(nn.Module): dtype: jnp.dtype = jnp.float32 def setup(self): - logger.warning( - "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " - "recommend migrating to PyTorch classes or pinning your version of Diffusers." - ) self.conv = nn.Conv( self.in_channels, kernel_size=(3, 3), @@ -114,11 +107,6 @@ class FlaxDownsample2D(nn.Module): dtype: jnp.dtype = jnp.float32 def setup(self): - logger.warning( - "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " - "recommend migrating to PyTorch classes or pinning your version of Diffusers." - ) - self.conv = nn.Conv( self.in_channels, kernel_size=(3, 3), @@ -161,11 +149,6 @@ class FlaxResnetBlock2D(nn.Module): dtype: jnp.dtype = jnp.float32 def setup(self): - logger.warning( - "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " - "recommend migrating to PyTorch classes or pinning your version of Diffusers." - ) - out_channels = self.in_channels if self.out_channels is None else self.out_channels self.norm1 = nn.GroupNorm(num_groups=self.groups, epsilon=1e-6) @@ -238,11 +221,6 @@ class FlaxAttentionBlock(nn.Module): dtype: jnp.dtype = jnp.float32 def setup(self): - logger.warning( - "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " - "recommend migrating to PyTorch classes or pinning your version of Diffusers." - ) - self.num_heads = self.channels // self.num_head_channels if self.num_head_channels is not None else 1 dense = partial(nn.Dense, self.channels, dtype=self.dtype) @@ -324,11 +302,6 @@ class FlaxDownEncoderBlock2D(nn.Module): dtype: jnp.dtype = jnp.float32 def setup(self): - logger.warning( - "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " - "recommend migrating to PyTorch classes or pinning your version of Diffusers." - ) - resnets = [] for i in range(self.num_layers): in_channels = self.in_channels if i == 0 else self.out_channels @@ -386,11 +359,6 @@ class FlaxUpDecoderBlock2D(nn.Module): dtype: jnp.dtype = jnp.float32 def setup(self): - logger.warning( - "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " - "recommend migrating to PyTorch classes or pinning your version of Diffusers." - ) - resnets = [] for i in range(self.num_layers): in_channels = self.in_channels if i == 0 else self.out_channels @@ -445,11 +413,6 @@ class FlaxUNetMidBlock2D(nn.Module): dtype: jnp.dtype = jnp.float32 def setup(self): - logger.warning( - "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " - "recommend migrating to PyTorch classes or pinning your version of Diffusers." - ) - resnet_groups = self.resnet_groups if self.resnet_groups is not None else min(self.in_channels // 4, 32) # there is always at least one resnet @@ -541,11 +504,6 @@ class FlaxEncoder(nn.Module): dtype: jnp.dtype = jnp.float32 def setup(self): - logger.warning( - "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " - "recommend migrating to PyTorch classes or pinning your version of Diffusers." - ) - block_out_channels = self.block_out_channels # in self.conv_in = nn.Conv( @@ -658,11 +616,6 @@ class FlaxDecoder(nn.Module): dtype: jnp.dtype = jnp.float32 def setup(self): - logger.warning( - "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " - "recommend migrating to PyTorch classes or pinning your version of Diffusers." - ) - block_out_channels = self.block_out_channels # z to block_in @@ -835,11 +788,6 @@ class FlaxAutoencoderKL(nn.Module, FlaxModelMixin, ConfigMixin): dtype: jnp.dtype = jnp.float32 def setup(self): - logger.warning( - "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " - "recommend migrating to PyTorch classes or pinning your version of Diffusers." - ) - self.encoder = FlaxEncoder( in_channels=self.config.in_channels, out_channels=self.config.latent_channels, diff --git a/src/diffusers/modular_pipelines/modular_pipeline.py b/src/diffusers/modular_pipelines/modular_pipeline.py index c53fa81d5684..8a05cce209c5 100644 --- a/src/diffusers/modular_pipelines/modular_pipeline.py +++ b/src/diffusers/modular_pipelines/modular_pipeline.py @@ -128,15 +128,6 @@ def to_dict(self) -> Dict[str, Any]: """ return {**self.__dict__} - def __getattr__(self, name): - """ - Allow attribute access to intermediate values. If an attribute is not found in the object, look for it in the - intermediates dict. - """ - if name in self.intermediates: - return self.intermediates[name] - raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{name}'") - def __repr__(self): def format_value(v): if hasattr(v, "shape") and hasattr(v, "dtype"): @@ -647,7 +638,7 @@ def __call__(self, pipeline, state: PipelineState) -> PipelineState: break if block is None: - logger.info(f"skipping auto block: {self.__class__.__name__}") + logger.warning(f"skipping auto block: {self.__class__.__name__}") return pipeline, state try: @@ -1459,10 +1450,9 @@ def __init__( Args: blocks: `ModularPipelineBlocks` instance. If None, will attempt to load default blocks based on the pipeline class name. - pretrained_model_name_or_path: Path to a pretrained pipeline configuration. Can be None if the pipeline - does not require any additional loading config. If provided, will first try to load component specs - (only for from_pretrained components) and config values from `modular_model_index.json`, then - fallback to `model_index.json` for compatibility with standard non-modular repositories. + pretrained_model_name_or_path: Path to a pretrained pipeline configuration. If provided, + will load component specs (only for from_pretrained components) and config values from the saved + modular_model_index.json file. components_manager: Optional ComponentsManager for managing multiple component cross different pipelines and apply offloading strategies. @@ -1511,70 +1501,18 @@ def __init__( # update component_specs and config_specs from modular_repo if pretrained_model_name_or_path is not None: - cache_dir = kwargs.pop("cache_dir", None) - force_download = kwargs.pop("force_download", False) - proxies = kwargs.pop("proxies", None) - token = kwargs.pop("token", None) - local_files_only = kwargs.pop("local_files_only", False) - revision = kwargs.pop("revision", None) - - load_config_kwargs = { - "cache_dir": cache_dir, - "force_download": force_download, - "proxies": proxies, - "token": token, - "local_files_only": local_files_only, - "revision": revision, - } - # try to load modular_model_index.json - try: - config_dict = self.load_config(pretrained_model_name_or_path, **load_config_kwargs) - except EnvironmentError as e: - logger.debug(f"modular_model_index.json not found: {e}") - config_dict = None - - # update component_specs and config_specs based on modular_model_index.json - if config_dict is not None: - for name, value in config_dict.items(): - # all the components in modular_model_index.json are from_pretrained components - if name in self._component_specs and isinstance(value, (tuple, list)) and len(value) == 3: - library, class_name, component_spec_dict = value - component_spec = self._dict_to_component_spec(name, component_spec_dict) - component_spec.default_creation_method = "from_pretrained" - self._component_specs[name] = component_spec - - elif name in self._config_specs: - self._config_specs[name].default = value - - # if modular_model_index.json is not found, try to load model_index.json - else: - logger.debug(" loading config from model_index.json") - try: - from diffusers import DiffusionPipeline - - config_dict = DiffusionPipeline.load_config(pretrained_model_name_or_path, **load_config_kwargs) - except EnvironmentError as e: - logger.debug(f" model_index.json not found in the repo: {e}") - config_dict = None - - # update component_specs and config_specs based on model_index.json - if config_dict is not None: - for name, value in config_dict.items(): - if name in self._component_specs and isinstance(value, (tuple, list)) and len(value) == 2: - library, class_name = value - component_spec_dict = { - "repo": pretrained_model_name_or_path, - "subfolder": name, - "type_hint": (library, class_name), - } - component_spec = self._dict_to_component_spec(name, component_spec_dict) - component_spec.default_creation_method = "from_pretrained" - self._component_specs[name] = component_spec - elif name in self._config_specs: - self._config_specs[name].default = value + config_dict = self.load_config(pretrained_model_name_or_path, **kwargs) - if len(kwargs) > 0: - logger.warning(f"Unexpected input '{kwargs.keys()}' provided. This input will be ignored.") + for name, value in config_dict.items(): + # all the components in modular_model_index.json are from_pretrained components + if name in self._component_specs and isinstance(value, (tuple, list)) and len(value) == 3: + library, class_name, component_spec_dict = value + component_spec = self._dict_to_component_spec(name, component_spec_dict) + component_spec.default_creation_method = "from_pretrained" + self._component_specs[name] = component_spec + + elif name in self._config_specs: + self._config_specs[name].default = value register_components_dict = {} for name, component_spec in self._component_specs.items(): @@ -1632,10 +1570,8 @@ def from_pretrained( Args: pretrained_model_name_or_path (`str` or `os.PathLike`, optional): - Path to a pretrained pipeline configuration. It will first try to load config from - `modular_model_index.json`, then fallback to `model_index.json` for compatibility with standard - non-modular repositories. If the repo does not contain any pipeline config, it will be set to None - during initialization. + Path to a pretrained pipeline configuration. If provided, will load component specs (only for + from_pretrained components) and config values from the modular_model_index.json file. trust_remote_code (`bool`, optional): Whether to trust remote code when loading the pipeline, need to be set to True if you want to create pipeline blocks based on the custom code in `pretrained_model_name_or_path` @@ -1671,35 +1607,11 @@ def from_pretrained( } try: - # try to load modular_model_index.json config_dict = cls.load_config(pretrained_model_name_or_path, **load_config_kwargs) - except EnvironmentError as e: - logger.debug(f" modular_model_index.json not found in the repo: {e}") - config_dict = None - - if config_dict is not None: pipeline_class = _get_pipeline_class(cls, config=config_dict) - else: - try: - logger.debug(" try to load model_index.json") - from diffusers import DiffusionPipeline - from diffusers.pipelines.auto_pipeline import _get_model - - config_dict = DiffusionPipeline.load_config(pretrained_model_name_or_path, **load_config_kwargs) - except EnvironmentError as e: - logger.debug(f" model_index.json not found in the repo: {e}") - - if config_dict is not None: - logger.debug(" try to determine the modular pipeline class from model_index.json") - standard_pipeline_class = _get_pipeline_class(cls, config=config_dict) - model_name = _get_model(standard_pipeline_class.__name__) - pipeline_class_name = MODULAR_PIPELINE_MAPPING.get(model_name, ModularPipeline.__name__) - diffusers_module = importlib.import_module("diffusers") - pipeline_class = getattr(diffusers_module, pipeline_class_name) - else: - # there is no config for modular pipeline, assuming that the pipeline block does not need any from_pretrained components - pipeline_class = cls - pretrained_model_name_or_path = None + except EnvironmentError: + pipeline_class = cls + pretrained_model_name_or_path = None pipeline = pipeline_class( blocks=blocks, @@ -2037,31 +1949,17 @@ def update_components(self, **kwargs): for name, component in passed_components.items(): current_component_spec = self._component_specs[name] - # log if type changed + # warn if type changed if current_component_spec.type_hint is not None and not isinstance( component, current_component_spec.type_hint ): - logger.info( + logger.warning( f"ModularPipeline.update_components: adding {name} with new type: {component.__class__.__name__}, previous type: {current_component_spec.type_hint.__name__}" ) # update _component_specs based on the new component - if component is None: - new_component_spec = current_component_spec - if hasattr(self, name) and getattr(self, name) is not None: - logger.warning(f"ModularPipeline.update_components: setting {name} to None (spec unchanged)") - elif current_component_spec.default_creation_method == "from_pretrained" and not ( - hasattr(component, "_diffusers_load_id") and component._diffusers_load_id is not None - ): - logger.warning( - f"ModularPipeline.update_components: {name} has no valid _diffusers_load_id. " - f"This will result in empty loading spec, use ComponentSpec.load() for proper specs" - ) - new_component_spec = ComponentSpec(name=name, type_hint=type(component)) - else: - new_component_spec = ComponentSpec.from_component(name, component) - + new_component_spec = ComponentSpec.from_component(name, component) if new_component_spec.default_creation_method != current_component_spec.default_creation_method: - logger.info( + logger.warning( f"ModularPipeline.update_components: changing the default_creation_method of {name} from {current_component_spec.default_creation_method} to {new_component_spec.default_creation_method}." ) @@ -2082,7 +1980,7 @@ def update_components(self, **kwargs): if current_component_spec.type_hint is not None and not isinstance( created_components[name], current_component_spec.type_hint ): - logger.info( + logger.warning( f"ModularPipeline.update_components: adding {name} with new type: {created_components[name].__class__.__name__}, previous type: {current_component_spec.type_hint.__name__}" ) # update _component_specs based on the user passed component_spec diff --git a/src/diffusers/modular_pipelines/stable_diffusion_xl/before_denoise.py b/src/diffusers/modular_pipelines/stable_diffusion_xl/before_denoise.py index fefa622f1a61..fbe0d22a52f9 100644 --- a/src/diffusers/modular_pipelines/stable_diffusion_xl/before_denoise.py +++ b/src/diffusers/modular_pipelines/stable_diffusion_xl/before_denoise.py @@ -22,7 +22,7 @@ from ...guiders import ClassifierFreeGuidance from ...image_processor import VaeImageProcessor from ...models import AutoencoderKL, ControlNetModel, ControlNetUnionModel, UNet2DConditionModel -from ...models.controlnets.multicontrolnet import MultiControlNetModel +from ...pipelines.controlnet.multicontrolnet import MultiControlNetModel from ...schedulers import EulerDiscreteScheduler from ...utils import logging from ...utils.torch_utils import randn_tensor, unwrap_module diff --git a/src/diffusers/pipelines/allegro/pipeline_allegro.py b/src/diffusers/pipelines/allegro/pipeline_allegro.py index 2c9548706ecb..0993c8b912b0 100644 --- a/src/diffusers/pipelines/allegro/pipeline_allegro.py +++ b/src/diffusers/pipelines/allegro/pipeline_allegro.py @@ -760,7 +760,7 @@ def __call__( latents (`torch.Tensor`, *optional*): generation. Can be used to tweak the same generation with different prompts. If not provided, a latents Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for video - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/animatediff/pipeline_animatediff_sdxl.py b/src/diffusers/pipelines/animatediff/pipeline_animatediff_sdxl.py index 56d319027595..260669ddaf51 100644 --- a/src/diffusers/pipelines/animatediff/pipeline_animatediff_sdxl.py +++ b/src/diffusers/pipelines/animatediff/pipeline_animatediff_sdxl.py @@ -971,7 +971,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for video generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/aura_flow/pipeline_aura_flow.py b/src/diffusers/pipelines/aura_flow/pipeline_aura_flow.py index 6251ca443533..7ff9925c452d 100644 --- a/src/diffusers/pipelines/aura_flow/pipeline_aura_flow.py +++ b/src/diffusers/pipelines/aura_flow/pipeline_aura_flow.py @@ -497,7 +497,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/blip_diffusion/pipeline_blip_diffusion.py b/src/diffusers/pipelines/blip_diffusion/pipeline_blip_diffusion.py index 8cd463c9709f..439dc511a0c9 100644 --- a/src/diffusers/pipelines/blip_diffusion/pipeline_blip_diffusion.py +++ b/src/diffusers/pipelines/blip_diffusion/pipeline_blip_diffusion.py @@ -228,7 +228,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by random sampling. + tensor will ge generated by random sampling. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. diff --git a/src/diffusers/pipelines/bria/pipeline_bria.py b/src/diffusers/pipelines/bria/pipeline_bria.py index ebddfb0c0eee..39ed484793d5 100644 --- a/src/diffusers/pipelines/bria/pipeline_bria.py +++ b/src/diffusers/pipelines/bria/pipeline_bria.py @@ -506,7 +506,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/chroma/pipeline_chroma.py b/src/diffusers/pipelines/chroma/pipeline_chroma.py index a3dd1422b876..3a34ec2a4218 100644 --- a/src/diffusers/pipelines/chroma/pipeline_chroma.py +++ b/src/diffusers/pipelines/chroma/pipeline_chroma.py @@ -676,7 +676,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/chroma/pipeline_chroma_img2img.py b/src/diffusers/pipelines/chroma/pipeline_chroma_img2img.py index 233f4c43a1c2..e169db4a4d3e 100644 --- a/src/diffusers/pipelines/chroma/pipeline_chroma_img2img.py +++ b/src/diffusers/pipelines/chroma/pipeline_chroma_img2img.py @@ -744,7 +744,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/cogvideo/pipeline_cogvideox.py b/src/diffusers/pipelines/cogvideo/pipeline_cogvideox.py index 4ac33b24bbe1..3c5994172c79 100644 --- a/src/diffusers/pipelines/cogvideo/pipeline_cogvideox.py +++ b/src/diffusers/pipelines/cogvideo/pipeline_cogvideox.py @@ -571,7 +571,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/cogvideo/pipeline_cogvideox_fun_control.py b/src/diffusers/pipelines/cogvideo/pipeline_cogvideox_fun_control.py index c1335839f848..cf6ccebc476d 100644 --- a/src/diffusers/pipelines/cogvideo/pipeline_cogvideox_fun_control.py +++ b/src/diffusers/pipelines/cogvideo/pipeline_cogvideox_fun_control.py @@ -616,7 +616,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for video generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. control_video_latents (`torch.Tensor`, *optional*): Pre-generated control latents, sampled from a Gaussian distribution, to be used as inputs for controlled video generation. If not provided, `control_video` must be provided. diff --git a/src/diffusers/pipelines/cogvideo/pipeline_cogvideox_image2video.py b/src/diffusers/pipelines/cogvideo/pipeline_cogvideox_image2video.py index 225240927fad..d1f02ca9c95e 100644 --- a/src/diffusers/pipelines/cogvideo/pipeline_cogvideox_image2video.py +++ b/src/diffusers/pipelines/cogvideo/pipeline_cogvideox_image2video.py @@ -671,7 +671,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/cogvideo/pipeline_cogvideox_video2video.py b/src/diffusers/pipelines/cogvideo/pipeline_cogvideox_video2video.py index 897dc6d1b70a..230c8ca296ba 100644 --- a/src/diffusers/pipelines/cogvideo/pipeline_cogvideox_video2video.py +++ b/src/diffusers/pipelines/cogvideo/pipeline_cogvideox_video2video.py @@ -641,7 +641,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/cogview3/pipeline_cogview3plus.py b/src/diffusers/pipelines/cogview3/pipeline_cogview3plus.py index 304a5c5ad00b..f2f852c213ad 100644 --- a/src/diffusers/pipelines/cogview3/pipeline_cogview3plus.py +++ b/src/diffusers/pipelines/cogview3/pipeline_cogview3plus.py @@ -466,7 +466,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/cogview4/pipeline_cogview4.py b/src/diffusers/pipelines/cogview4/pipeline_cogview4.py index 22510f5d9d50..d8374b694f0e 100644 --- a/src/diffusers/pipelines/cogview4/pipeline_cogview4.py +++ b/src/diffusers/pipelines/cogview4/pipeline_cogview4.py @@ -466,7 +466,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/cogview4/pipeline_cogview4_control.py b/src/diffusers/pipelines/cogview4/pipeline_cogview4_control.py index e26b7ba415de..ac8d786f04f7 100644 --- a/src/diffusers/pipelines/cogview4/pipeline_cogview4_control.py +++ b/src/diffusers/pipelines/cogview4/pipeline_cogview4_control.py @@ -499,7 +499,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/consisid/pipeline_consisid.py b/src/diffusers/pipelines/consisid/pipeline_consisid.py index 3e6c149d7f80..644bd811f6c7 100644 --- a/src/diffusers/pipelines/consisid/pipeline_consisid.py +++ b/src/diffusers/pipelines/consisid/pipeline_consisid.py @@ -733,7 +733,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/controlnet/pipeline_controlnet_blip_diffusion.py b/src/diffusers/pipelines/controlnet/pipeline_controlnet_blip_diffusion.py index c2ae408778b3..598e3b5b6d16 100644 --- a/src/diffusers/pipelines/controlnet/pipeline_controlnet_blip_diffusion.py +++ b/src/diffusers/pipelines/controlnet/pipeline_controlnet_blip_diffusion.py @@ -279,7 +279,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by random sampling. + tensor will ge generated by random sampling. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. diff --git a/src/diffusers/pipelines/controlnet/pipeline_controlnet_inpaint_sd_xl.py b/src/diffusers/pipelines/controlnet/pipeline_controlnet_inpaint_sd_xl.py index 397ab15715c2..4aa2a62a53ac 100644 --- a/src/diffusers/pipelines/controlnet/pipeline_controlnet_inpaint_sd_xl.py +++ b/src/diffusers/pipelines/controlnet/pipeline_controlnet_inpaint_sd_xl.py @@ -1326,7 +1326,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/src/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py b/src/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py index 4d4845c5a0a3..526e1ffcb2cc 100644 --- a/src/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py +++ b/src/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py @@ -1197,7 +1197,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/controlnet/pipeline_controlnet_union_inpaint_sd_xl.py b/src/diffusers/pipelines/controlnet/pipeline_controlnet_union_inpaint_sd_xl.py index fb58b222112a..7fa59395a8f1 100644 --- a/src/diffusers/pipelines/controlnet/pipeline_controlnet_union_inpaint_sd_xl.py +++ b/src/diffusers/pipelines/controlnet/pipeline_controlnet_union_inpaint_sd_xl.py @@ -1310,7 +1310,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/src/diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl_img2img.py b/src/diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl_img2img.py index 8fedb6d8609a..65e2fe661797 100644 --- a/src/diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl_img2img.py +++ b/src/diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl_img2img.py @@ -1185,7 +1185,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet.py b/src/diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet.py index c763411ab5f7..e31e3a017872 100644 --- a/src/diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet.py +++ b/src/diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet.py @@ -918,7 +918,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet_inpainting.py b/src/diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet_inpainting.py index c33cf979c6d8..000e080d3aea 100644 --- a/src/diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet_inpainting.py +++ b/src/diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet_inpainting.py @@ -973,7 +973,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py index d000d87e6a7b..f9034a58441c 100644 --- a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py @@ -880,7 +880,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. @@ -1151,7 +1151,7 @@ def invert( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/flux/pipeline_flux_control.py b/src/diffusers/pipelines/flux/pipeline_flux_control.py index cc9ebb4754f7..51d6ecbe3171 100644 --- a/src/diffusers/pipelines/flux/pipeline_flux_control.py +++ b/src/diffusers/pipelines/flux/pipeline_flux_control.py @@ -674,7 +674,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/flux/pipeline_flux_control_img2img.py b/src/diffusers/pipelines/flux/pipeline_flux_control_img2img.py index 262345c75afc..c61d46daefa2 100644 --- a/src/diffusers/pipelines/flux/pipeline_flux_control_img2img.py +++ b/src/diffusers/pipelines/flux/pipeline_flux_control_img2img.py @@ -712,7 +712,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py b/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py index 5acc5080f56d..3de636361bc3 100644 --- a/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py +++ b/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py @@ -838,7 +838,7 @@ def __call__( 1)`, or `(H, W)`. mask_image_latent (`torch.Tensor`, `List[torch.Tensor]`): `Tensor` representing an image batch to mask `image` generated by VAE. If not provided, the mask - latents tensor will be generated by `mask_image`. + latents tensor will ge generated by `mask_image`. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. This is set to 1024 by default for the best results. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): @@ -870,7 +870,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/flux/pipeline_flux_controlnet.py b/src/diffusers/pipelines/flux/pipeline_flux_controlnet.py index 507ec687347c..a39b9c9ce25c 100644 --- a/src/diffusers/pipelines/flux/pipeline_flux_controlnet.py +++ b/src/diffusers/pipelines/flux/pipeline_flux_controlnet.py @@ -764,7 +764,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/flux/pipeline_flux_fill.py b/src/diffusers/pipelines/flux/pipeline_flux_fill.py index 956f6fb10652..d50db407a87d 100644 --- a/src/diffusers/pipelines/flux/pipeline_flux_fill.py +++ b/src/diffusers/pipelines/flux/pipeline_flux_fill.py @@ -775,7 +775,7 @@ def __call__( 1)`, or `(H, W)`. mask_image_latent (`torch.Tensor`, `List[torch.Tensor]`): `Tensor` representing an image batch to mask `image` generated by VAE. If not provided, the mask - latents tensor will be generated by `mask_image`. + latents tensor will ge generated by `mask_image`. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. This is set to 1024 by default for the best results. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): @@ -807,7 +807,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/flux/pipeline_flux_img2img.py b/src/diffusers/pipelines/flux/pipeline_flux_img2img.py index 4a9f2bad6a34..08e2f1277844 100644 --- a/src/diffusers/pipelines/flux/pipeline_flux_img2img.py +++ b/src/diffusers/pipelines/flux/pipeline_flux_img2img.py @@ -787,7 +787,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/flux/pipeline_flux_inpaint.py b/src/diffusers/pipelines/flux/pipeline_flux_inpaint.py index 3bfe82cf4382..049414669390 100644 --- a/src/diffusers/pipelines/flux/pipeline_flux_inpaint.py +++ b/src/diffusers/pipelines/flux/pipeline_flux_inpaint.py @@ -834,7 +834,7 @@ def __call__( 1)`, or `(H, W)`. mask_image_latent (`torch.Tensor`, `List[torch.Tensor]`): `Tensor` representing an image batch to mask `image` generated by VAE. If not provided, the mask - latents tensor will be generated by `mask_image`. + latents tensor will ge generated by `mask_image`. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. This is set to 1024 by default for the best results. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): @@ -873,7 +873,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/flux/pipeline_flux_kontext.py b/src/diffusers/pipelines/flux/pipeline_flux_kontext.py index 87011299c425..ce2941f3ddf4 100644 --- a/src/diffusers/pipelines/flux/pipeline_flux_kontext.py +++ b/src/diffusers/pipelines/flux/pipeline_flux_kontext.py @@ -808,7 +808,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/flux/pipeline_flux_kontext_inpaint.py b/src/diffusers/pipelines/flux/pipeline_flux_kontext_inpaint.py index 3cdb8caea2ff..56a5e934a4e3 100644 --- a/src/diffusers/pipelines/flux/pipeline_flux_kontext_inpaint.py +++ b/src/diffusers/pipelines/flux/pipeline_flux_kontext_inpaint.py @@ -1029,7 +1029,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/hidream_image/pipeline_hidream_image.py b/src/diffusers/pipelines/hidream_image/pipeline_hidream_image.py index bf36ca2fa3e2..695f54f3d9db 100644 --- a/src/diffusers/pipelines/hidream_image/pipeline_hidream_image.py +++ b/src/diffusers/pipelines/hidream_image/pipeline_hidream_image.py @@ -789,7 +789,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky.py b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky.py index 92f612f54116..89fea8933752 100644 --- a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky.py +++ b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky.py @@ -291,7 +291,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). diff --git a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py index 7286bcbee17b..90d4042ae2a1 100644 --- a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py +++ b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py @@ -271,7 +271,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). @@ -502,7 +502,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). @@ -742,7 +742,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). diff --git a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py index cde0b8fd0a9d..5645d2a56edd 100644 --- a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py +++ b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py @@ -469,7 +469,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). diff --git a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py index 10ea8005c90d..8781d706edf5 100644 --- a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py +++ b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py @@ -212,7 +212,7 @@ def interpolate( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. negative_prior_prompt (`str`, *optional*): The prompt not to guide the prior diffusion process. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). @@ -437,7 +437,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. guidance_scale (`float`, *optional*, defaults to 4.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py index 429253e99898..3ecc0ebd5b25 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py @@ -175,7 +175,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py index fc2083247bb0..e0b88b41e8c5 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py @@ -262,7 +262,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). @@ -512,7 +512,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). @@ -749,7 +749,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py index c5faae82796b..b9f98f5458e2 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py @@ -211,7 +211,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py index a61673293e1f..22171849bbf6 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py @@ -356,7 +356,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py index 0e7e16f9dd5f..68954c2dc886 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py @@ -171,7 +171,7 @@ def interpolate( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. negative_prior_prompt (`str`, *optional*): The prompt not to guide the prior diffusion process. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). @@ -412,7 +412,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. guidance_scale (`float`, *optional*, defaults to 4.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py index 1a7198b9683a..13ea2ad6af63 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py @@ -195,7 +195,7 @@ def interpolate( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. negative_prior_prompt (`str`, *optional*): The prompt not to guide the prior diffusion process. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). diff --git a/src/diffusers/pipelines/kolors/pipeline_kolors.py b/src/diffusers/pipelines/kolors/pipeline_kolors.py index 948f73ed91eb..1fa9f6ce1d43 100644 --- a/src/diffusers/pipelines/kolors/pipeline_kolors.py +++ b/src/diffusers/pipelines/kolors/pipeline_kolors.py @@ -749,7 +749,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/kolors/pipeline_kolors_img2img.py b/src/diffusers/pipelines/kolors/pipeline_kolors_img2img.py index 67d49b9a8c5e..e3cf4f227624 100644 --- a/src/diffusers/pipelines/kolors/pipeline_kolors_img2img.py +++ b/src/diffusers/pipelines/kolors/pipeline_kolors_img2img.py @@ -900,7 +900,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/latte/pipeline_latte.py b/src/diffusers/pipelines/latte/pipeline_latte.py index 4d42a7049ec9..0e60d5c7acbe 100644 --- a/src/diffusers/pipelines/latte/pipeline_latte.py +++ b/src/diffusers/pipelines/latte/pipeline_latte.py @@ -679,7 +679,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for video generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/ltx/pipeline_ltx.py b/src/diffusers/pipelines/ltx/pipeline_ltx.py index bd23e657c408..77ba75170037 100644 --- a/src/diffusers/pipelines/ltx/pipeline_ltx.py +++ b/src/diffusers/pipelines/ltx/pipeline_ltx.py @@ -601,7 +601,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/ltx/pipeline_ltx_condition.py b/src/diffusers/pipelines/ltx/pipeline_ltx_condition.py index 537588f67c95..217478f418ed 100644 --- a/src/diffusers/pipelines/ltx/pipeline_ltx_condition.py +++ b/src/diffusers/pipelines/ltx/pipeline_ltx_condition.py @@ -938,7 +938,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/ltx/pipeline_ltx_image2video.py b/src/diffusers/pipelines/ltx/pipeline_ltx_image2video.py index 694378b4f040..8793d81377cc 100644 --- a/src/diffusers/pipelines/ltx/pipeline_ltx_image2video.py +++ b/src/diffusers/pipelines/ltx/pipeline_ltx_image2video.py @@ -665,7 +665,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/lumina/pipeline_lumina.py b/src/diffusers/pipelines/lumina/pipeline_lumina.py index b59c265646cd..2067444fa0df 100644 --- a/src/diffusers/pipelines/lumina/pipeline_lumina.py +++ b/src/diffusers/pipelines/lumina/pipeline_lumina.py @@ -697,7 +697,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/lumina2/pipeline_lumina2.py b/src/diffusers/pipelines/lumina2/pipeline_lumina2.py index c4df7ba1c342..0fa0fe97734c 100644 --- a/src/diffusers/pipelines/lumina2/pipeline_lumina2.py +++ b/src/diffusers/pipelines/lumina2/pipeline_lumina2.py @@ -564,7 +564,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/mochi/pipeline_mochi.py b/src/diffusers/pipelines/mochi/pipeline_mochi.py index 5581529b2337..3c0f908296df 100644 --- a/src/diffusers/pipelines/mochi/pipeline_mochi.py +++ b/src/diffusers/pipelines/mochi/pipeline_mochi.py @@ -534,7 +534,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/omnigen/pipeline_omnigen.py b/src/diffusers/pipelines/omnigen/pipeline_omnigen.py index f5a535b2dabd..1254b6725fef 100644 --- a/src/diffusers/pipelines/omnigen/pipeline_omnigen.py +++ b/src/diffusers/pipelines/omnigen/pipeline_omnigen.py @@ -366,7 +366,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/src/diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl_img2img.py b/src/diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl_img2img.py index a6df1b22c8b9..913a647fae3e 100644 --- a/src/diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl_img2img.py +++ b/src/diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl_img2img.py @@ -1199,7 +1199,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/pag/pipeline_pag_kolors.py b/src/diffusers/pipelines/pag/pipeline_pag_kolors.py index 1368358db6ba..ed8e33e2ba8b 100644 --- a/src/diffusers/pipelines/pag/pipeline_pag_kolors.py +++ b/src/diffusers/pipelines/pag/pipeline_pag_kolors.py @@ -769,7 +769,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/pag/pipeline_pag_pixart_sigma.py b/src/diffusers/pipelines/pag/pipeline_pag_pixart_sigma.py index 9031877b5b8d..d9d6d14a38d9 100644 --- a/src/diffusers/pipelines/pag/pipeline_pag_pixart_sigma.py +++ b/src/diffusers/pipelines/pag/pipeline_pag_pixart_sigma.py @@ -644,7 +644,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/pag/pipeline_pag_sana.py b/src/diffusers/pipelines/pag/pipeline_pag_sana.py index 5857eeeb0443..8dbae13a3f16 100644 --- a/src/diffusers/pipelines/pag/pipeline_pag_sana.py +++ b/src/diffusers/pipelines/pag/pipeline_pag_sana.py @@ -703,7 +703,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/pag/pipeline_pag_sd_3.py b/src/diffusers/pipelines/pag/pipeline_pag_sd_3.py index acb4e52340a6..96796f53b0bc 100644 --- a/src/diffusers/pipelines/pag/pipeline_pag_sd_3.py +++ b/src/diffusers/pipelines/pag/pipeline_pag_sd_3.py @@ -761,7 +761,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/pag/pipeline_pag_sd_3_img2img.py b/src/diffusers/pipelines/pag/pipeline_pag_sd_3_img2img.py index e1819a79fb30..202120dc2c2b 100644 --- a/src/diffusers/pipelines/pag/pipeline_pag_sd_3_img2img.py +++ b/src/diffusers/pipelines/pag/pipeline_pag_sd_3_img2img.py @@ -822,7 +822,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/pag/pipeline_pag_sd_xl.py b/src/diffusers/pipelines/pag/pipeline_pag_sd_xl.py index 6b62ddcc7ca5..450468413380 100644 --- a/src/diffusers/pipelines/pag/pipeline_pag_sd_xl.py +++ b/src/diffusers/pipelines/pag/pipeline_pag_sd_xl.py @@ -948,7 +948,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/pag/pipeline_pag_sd_xl_img2img.py b/src/diffusers/pipelines/pag/pipeline_pag_sd_xl_img2img.py index b6422b23648c..8c355a5fb129 100644 --- a/src/diffusers/pipelines/pag/pipeline_pag_sd_xl_img2img.py +++ b/src/diffusers/pipelines/pag/pipeline_pag_sd_xl_img2img.py @@ -1111,7 +1111,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/pag/pipeline_pag_sd_xl_inpaint.py b/src/diffusers/pipelines/pag/pipeline_pag_sd_xl_inpaint.py index 2e12a4a97fbe..7d42d1876a82 100644 --- a/src/diffusers/pipelines/pag/pipeline_pag_sd_xl_inpaint.py +++ b/src/diffusers/pipelines/pag/pipeline_pag_sd_xl_inpaint.py @@ -1251,7 +1251,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/src/diffusers/pipelines/pipeline_flax_utils.py b/src/diffusers/pipelines/pipeline_flax_utils.py index f69968022ed7..ea2c0763d93a 100644 --- a/src/diffusers/pipelines/pipeline_flax_utils.py +++ b/src/diffusers/pipelines/pipeline_flax_utils.py @@ -312,11 +312,6 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P >>> dpm_params["scheduler"] = dpmpp_state ``` """ - logger.warning( - "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " - "recommend migrating to PyTorch classes or pinning your version of Diffusers." - ) - cache_dir = kwargs.pop("cache_dir", None) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", False) diff --git a/src/diffusers/pipelines/pipeline_utils.py b/src/diffusers/pipelines/pipeline_utils.py index 023feae4dd27..d231989973e4 100644 --- a/src/diffusers/pipelines/pipeline_utils.py +++ b/src/diffusers/pipelines/pipeline_utils.py @@ -1709,36 +1709,6 @@ def _get_signature_types(cls): logger.warning(f"cannot get type annotation for Parameter {k} of {cls}.") return signature_types - @property - def parameters(self) -> Dict[str, Any]: - r""" - The `self.parameters` property can be useful to run different pipelines with the same weights and - configurations without reallocating additional memory. - - Returns (`dict`): - A dictionary containing all the optional parameters needed to initialize the pipeline. - - Examples: - - ```py - >>> from diffusers import ( - ... StableDiffusionPipeline, - ... StableDiffusionImg2ImgPipeline, - ... StableDiffusionInpaintPipeline, - ... ) - - >>> text2img = StableDiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5") - >>> img2img = StableDiffusionImg2ImgPipeline(**text2img.components, **text2img.parameters) - >>> inpaint = StableDiffusionInpaintPipeline(**text2img.components, **text2img.parameters) - ``` - """ - expected_modules, optional_parameters = self._get_signature_keys(self) - pipeline_parameters = { - k: self.config[k] for k in self.config.keys() if not k.startswith("_") and k in optional_parameters - } - - return pipeline_parameters - @property def components(self) -> Dict[str, Any]: r""" diff --git a/src/diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py b/src/diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py index 1d718a4852a4..bd69746be38c 100644 --- a/src/diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py +++ b/src/diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py @@ -755,7 +755,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/pixart_alpha/pipeline_pixart_sigma.py b/src/diffusers/pipelines/pixart_alpha/pipeline_pixart_sigma.py index bb169ac5c443..c14036cf94f3 100644 --- a/src/diffusers/pipelines/pixart_alpha/pipeline_pixart_sigma.py +++ b/src/diffusers/pipelines/pixart_alpha/pipeline_pixart_sigma.py @@ -700,7 +700,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/qwenimage/pipeline_qwenimage.py b/src/diffusers/pipelines/qwenimage/pipeline_qwenimage.py index 807910dfb1d6..8a2ee7b88e94 100644 --- a/src/diffusers/pipelines/qwenimage/pipeline_qwenimage.py +++ b/src/diffusers/pipelines/qwenimage/pipeline_qwenimage.py @@ -435,7 +435,7 @@ def __call__( width: Optional[int] = None, num_inference_steps: int = 50, sigmas: Optional[List[float]] = None, - guidance_scale: Optional[float] = None, + guidance_scale: float = 1.0, num_images_per_prompt: int = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, @@ -462,12 +462,7 @@ def __call__( `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `true_cfg_scale` is not greater than `1`). true_cfg_scale (`float`, *optional*, defaults to 1.0): - Guidance scale as defined in [Classifier-Free Diffusion - Guidance](https://huggingface.co/papers/2207.12598). `true_cfg_scale` is defined as `w` of equation 2. - of [Imagen Paper](https://huggingface.co/papers/2205.11487). Classifier-free guidance is enabled by - setting `true_cfg_scale > 1` and a provided `negative_prompt`. Higher guidance scale encourages to - generate images that are closely linked to the text `prompt`, usually at the expense of lower image - quality. + When > 1.0 and a provided `negative_prompt`, enables true classifier-free guidance. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. This is set to 1024 by default for the best results. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): @@ -479,16 +474,17 @@ def __call__( Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. - guidance_scale (`float`, *optional*, defaults to None): - A guidance scale value for guidance distilled models. Unlike the traditional classifier-free guidance - where the guidance scale is applied during inference through noise prediction rescaling, guidance - distilled models take the guidance scale directly as an input parameter during forward pass. Guidance - scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images - that are closely linked to the text `prompt`, usually at the expense of lower image quality. This - parameter in the pipeline is there to support future guidance-distilled models when they come up. It is - ignored when not using guidance distilled models. To enable traditional classifier-free guidance, - please pass `true_cfg_scale > 1.0` and `negative_prompt` (even an empty negative prompt like " " should - enable classifier-free guidance computations). + guidance_scale (`float`, *optional*, defaults to 3.5): + Guidance scale as defined in [Classifier-Free Diffusion + Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. + of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting + `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to + the text `prompt`, usually at the expense of lower image quality. + + This parameter in the pipeline is there to support future guidance-distilled models when they come up. + Note that passing `guidance_scale` to the pipeline is ineffective. To enable classifier-free guidance, + please pass `true_cfg_scale` and `negative_prompt` (even an empty negative prompt like " ") should + enable classifier-free guidance computations. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): @@ -568,16 +564,6 @@ def __call__( has_neg_prompt = negative_prompt is not None or ( negative_prompt_embeds is not None and negative_prompt_embeds_mask is not None ) - - if true_cfg_scale > 1 and not has_neg_prompt: - logger.warning( - f"true_cfg_scale is passed as {true_cfg_scale}, but classifier-free guidance is not enabled since no negative_prompt is provided." - ) - elif true_cfg_scale <= 1 and has_neg_prompt: - logger.warning( - " negative_prompt is passed but classifier-free guidance is not enabled since true_cfg_scale <= 1" - ) - do_true_cfg = true_cfg_scale > 1 and has_neg_prompt prompt_embeds, prompt_embeds_mask = self.encode_prompt( prompt=prompt, @@ -632,17 +618,10 @@ def __call__( self._num_timesteps = len(timesteps) # handle guidance - if self.transformer.config.guidance_embeds and guidance_scale is None: - raise ValueError("guidance_scale is required for guidance-distilled model.") - elif self.transformer.config.guidance_embeds: + if self.transformer.config.guidance_embeds: guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float32) guidance = guidance.expand(latents.shape[0]) - elif not self.transformer.config.guidance_embeds and guidance_scale is not None: - logger.warning( - f"guidance_scale is passed as {guidance_scale}, but ignored since the model is not guidance-distilled." - ) - guidance = None - elif not self.transformer.config.guidance_embeds and guidance_scale is None: + else: guidance = None if self.attention_kwargs is None: diff --git a/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_controlnet.py b/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_controlnet.py index 322b1d9d3a08..6b383fa173bb 100644 --- a/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_controlnet.py +++ b/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_controlnet.py @@ -535,7 +535,7 @@ def __call__( width: Optional[int] = None, num_inference_steps: int = 50, sigmas: Optional[List[float]] = None, - guidance_scale: Optional[float] = None, + guidance_scale: float = 1.0, control_guidance_start: Union[float, List[float]] = 0.0, control_guidance_end: Union[float, List[float]] = 1.0, control_image: PipelineImageInput = None, @@ -566,12 +566,7 @@ def __call__( `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `true_cfg_scale` is not greater than `1`). true_cfg_scale (`float`, *optional*, defaults to 1.0): - Guidance scale as defined in [Classifier-Free Diffusion - Guidance](https://huggingface.co/papers/2207.12598). `true_cfg_scale` is defined as `w` of equation 2. - of [Imagen Paper](https://huggingface.co/papers/2205.11487). Classifier-free guidance is enabled by - setting `true_cfg_scale > 1` and a provided `negative_prompt`. Higher guidance scale encourages to - generate images that are closely linked to the text `prompt`, usually at the expense of lower image - quality. + When > 1.0 and a provided `negative_prompt`, enables true classifier-free guidance. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. This is set to 1024 by default for the best results. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): @@ -583,16 +578,12 @@ def __call__( Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. - guidance_scale (`float`, *optional*, defaults to None): - A guidance scale value for guidance distilled models. Unlike the traditional classifier-free guidance - where the guidance scale is applied during inference through noise prediction rescaling, guidance - distilled models take the guidance scale directly as an input parameter during forward pass. Guidance - scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images - that are closely linked to the text `prompt`, usually at the expense of lower image quality. This - parameter in the pipeline is there to support future guidance-distilled models when they come up. It is - ignored when not using guidance distilled models. To enable traditional classifier-free guidance, - please pass `true_cfg_scale > 1.0` and `negative_prompt` (even an empty negative prompt like " " should - enable classifier-free guidance computations). + guidance_scale (`float`, *optional*, defaults to 3.5): + Guidance scale as defined in [Classifier-Free Diffusion + Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. + of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting + `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to + the text `prompt`, usually at the expense of lower image quality. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): @@ -683,16 +674,6 @@ def __call__( has_neg_prompt = negative_prompt is not None or ( negative_prompt_embeds is not None and negative_prompt_embeds_mask is not None ) - - if true_cfg_scale > 1 and not has_neg_prompt: - logger.warning( - f"true_cfg_scale is passed as {true_cfg_scale}, but classifier-free guidance is not enabled since no negative_prompt is provided." - ) - elif true_cfg_scale <= 1 and has_neg_prompt: - logger.warning( - " negative_prompt is passed but classifier-free guidance is not enabled since true_cfg_scale <= 1" - ) - do_true_cfg = true_cfg_scale > 1 and has_neg_prompt prompt_embeds, prompt_embeds_mask = self.encode_prompt( prompt=prompt, @@ -841,17 +822,10 @@ def __call__( controlnet_keep.append(keeps[0] if isinstance(self.controlnet, QwenImageControlNetModel) else keeps) # handle guidance - if self.transformer.config.guidance_embeds and guidance_scale is None: - raise ValueError("guidance_scale is required for guidance-distilled model.") - elif self.transformer.config.guidance_embeds: + if self.transformer.config.guidance_embeds: guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float32) guidance = guidance.expand(latents.shape[0]) - elif not self.transformer.config.guidance_embeds and guidance_scale is not None: - logger.warning( - f"guidance_scale is passed as {guidance_scale}, but ignored since the model is not guidance-distilled." - ) - guidance = None - elif not self.transformer.config.guidance_embeds and guidance_scale is None: + else: guidance = None if self.attention_kwargs is None: diff --git a/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_edit.py b/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_edit.py index ceb5492fab56..45af11fc3950 100644 --- a/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_edit.py +++ b/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_edit.py @@ -532,7 +532,7 @@ def __call__( width: Optional[int] = None, num_inference_steps: int = 50, sigmas: Optional[List[float]] = None, - guidance_scale: Optional[float] = None, + guidance_scale: float = 1.0, num_images_per_prompt: int = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, @@ -559,12 +559,7 @@ def __call__( `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `true_cfg_scale` is not greater than `1`). true_cfg_scale (`float`, *optional*, defaults to 1.0): - true_cfg_scale (`float`, *optional*, defaults to 1.0): Guidance scale as defined in [Classifier-Free - Diffusion Guidance](https://huggingface.co/papers/2207.12598). `true_cfg_scale` is defined as `w` of - equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Classifier-free guidance is - enabled by setting `true_cfg_scale > 1` and a provided `negative_prompt`. Higher guidance scale - encourages to generate images that are closely linked to the text `prompt`, usually at the expense of - lower image quality. + When > 1.0 and a provided `negative_prompt`, enables true classifier-free guidance. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. This is set to 1024 by default for the best results. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): @@ -576,16 +571,17 @@ def __call__( Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. - guidance_scale (`float`, *optional*, defaults to None): - A guidance scale value for guidance distilled models. Unlike the traditional classifier-free guidance - where the guidance scale is applied during inference through noise prediction rescaling, guidance - distilled models take the guidance scale directly as an input parameter during forward pass. Guidance - scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images - that are closely linked to the text `prompt`, usually at the expense of lower image quality. This - parameter in the pipeline is there to support future guidance-distilled models when they come up. It is - ignored when not using guidance distilled models. To enable traditional classifier-free guidance, - please pass `true_cfg_scale > 1.0` and `negative_prompt` (even an empty negative prompt like " " should - enable classifier-free guidance computations). + guidance_scale (`float`, *optional*, defaults to 3.5): + Guidance scale as defined in [Classifier-Free Diffusion + Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. + of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting + `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to + the text `prompt`, usually at the expense of lower image quality. + + This parameter in the pipeline is there to support future guidance-distilled models when they come up. + Note that passing `guidance_scale` to the pipeline is ineffective. To enable classifier-free guidance, + please pass `true_cfg_scale` and `negative_prompt` (even an empty negative prompt like " ") should + enable classifier-free guidance computations. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): @@ -676,16 +672,6 @@ def __call__( has_neg_prompt = negative_prompt is not None or ( negative_prompt_embeds is not None and negative_prompt_embeds_mask is not None ) - - if true_cfg_scale > 1 and not has_neg_prompt: - logger.warning( - f"true_cfg_scale is passed as {true_cfg_scale}, but classifier-free guidance is not enabled since no negative_prompt is provided." - ) - elif true_cfg_scale <= 1 and has_neg_prompt: - logger.warning( - " negative_prompt is passed but classifier-free guidance is not enabled since true_cfg_scale <= 1" - ) - do_true_cfg = true_cfg_scale > 1 and has_neg_prompt prompt_embeds, prompt_embeds_mask = self.encode_prompt( image=prompt_image, @@ -748,17 +734,10 @@ def __call__( self._num_timesteps = len(timesteps) # handle guidance - if self.transformer.config.guidance_embeds and guidance_scale is None: - raise ValueError("guidance_scale is required for guidance-distilled model.") - elif self.transformer.config.guidance_embeds: + if self.transformer.config.guidance_embeds: guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float32) guidance = guidance.expand(latents.shape[0]) - elif not self.transformer.config.guidance_embeds and guidance_scale is not None: - logger.warning( - f"guidance_scale is passed as {guidance_scale}, but ignored since the model is not guidance-distilled." - ) - guidance = None - elif not self.transformer.config.guidance_embeds and guidance_scale is None: + else: guidance = None if self.attention_kwargs is None: diff --git a/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_img2img.py b/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_img2img.py index 8040852e53b4..43cbac78e156 100644 --- a/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_img2img.py +++ b/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_img2img.py @@ -511,7 +511,7 @@ def __call__( strength: float = 0.6, num_inference_steps: int = 50, sigmas: Optional[List[float]] = None, - guidance_scale: Optional[float] = None, + guidance_scale: float = 1.0, num_images_per_prompt: int = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, @@ -544,12 +544,7 @@ def __call__( list of arrays, the expected shape should be `(B, H, W, C)` or `(H, W, C)` It can also accept image latents as `image`, but if passing latents directly it is not encoded again. true_cfg_scale (`float`, *optional*, defaults to 1.0): - Guidance scale as defined in [Classifier-Free Diffusion - Guidance](https://huggingface.co/papers/2207.12598). `true_cfg_scale` is defined as `w` of equation 2. - of [Imagen Paper](https://huggingface.co/papers/2205.11487). Classifier-free guidance is enabled by - setting `true_cfg_scale > 1` and a provided `negative_prompt`. Higher guidance scale encourages to - generate images that are closely linked to the text `prompt`, usually at the expense of lower image - quality. + When > 1.0 and a provided `negative_prompt`, enables true classifier-free guidance. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. This is set to 1024 by default for the best results. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): @@ -567,16 +562,17 @@ def __call__( Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. - guidance_scale (`float`, *optional*, defaults to None): - A guidance scale value for guidance distilled models. Unlike the traditional classifier-free guidance - where the guidance scale is applied during inference through noise prediction rescaling, guidance - distilled models take the guidance scale directly as an input parameter during forward pass. Guidance - scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images - that are closely linked to the text `prompt`, usually at the expense of lower image quality. This - parameter in the pipeline is there to support future guidance-distilled models when they come up. It is - ignored when not using guidance distilled models. To enable traditional classifier-free guidance, - please pass `true_cfg_scale > 1.0` and `negative_prompt` (even an empty negative prompt like " " should - enable classifier-free guidance computations). + guidance_scale (`float`, *optional*, defaults to 3.5): + Guidance scale as defined in [Classifier-Free Diffusion + Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. + of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting + `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to + the text `prompt`, usually at the expense of lower image quality. + + This parameter in the pipeline is there to support future guidance-distilled models when they come up. + Note that passing `guidance_scale` to the pipeline is ineffective. To enable classifier-free guidance, + please pass `true_cfg_scale` and `negative_prompt` (even an empty negative prompt like " ") should + enable classifier-free guidance computations. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): @@ -661,16 +657,6 @@ def __call__( has_neg_prompt = negative_prompt is not None or ( negative_prompt_embeds is not None and negative_prompt_embeds_mask is not None ) - - if true_cfg_scale > 1 and not has_neg_prompt: - logger.warning( - f"true_cfg_scale is passed as {true_cfg_scale}, but classifier-free guidance is not enabled since no negative_prompt is provided." - ) - elif true_cfg_scale <= 1 and has_neg_prompt: - logger.warning( - " negative_prompt is passed but classifier-free guidance is not enabled since true_cfg_scale <= 1" - ) - do_true_cfg = true_cfg_scale > 1 and has_neg_prompt prompt_embeds, prompt_embeds_mask = self.encode_prompt( prompt=prompt, @@ -735,17 +721,10 @@ def __call__( self._num_timesteps = len(timesteps) # handle guidance - if self.transformer.config.guidance_embeds and guidance_scale is None: - raise ValueError("guidance_scale is required for guidance-distilled model.") - elif self.transformer.config.guidance_embeds: + if self.transformer.config.guidance_embeds: guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float32) guidance = guidance.expand(latents.shape[0]) - elif not self.transformer.config.guidance_embeds and guidance_scale is not None: - logger.warning( - f"guidance_scale is passed as {guidance_scale}, but ignored since the model is not guidance-distilled." - ) - guidance = None - elif not self.transformer.config.guidance_embeds and guidance_scale is None: + else: guidance = None if self.attention_kwargs is None: diff --git a/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_inpaint.py b/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_inpaint.py index 4d502569a070..c2766baf8b08 100644 --- a/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_inpaint.py +++ b/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_inpaint.py @@ -624,7 +624,7 @@ def __call__( strength: float = 0.6, num_inference_steps: int = 50, sigmas: Optional[List[float]] = None, - guidance_scale: Optional[float] = None, + guidance_scale: float = 1.0, num_images_per_prompt: int = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, @@ -657,12 +657,7 @@ def __call__( list of arrays, the expected shape should be `(B, H, W, C)` or `(H, W, C)` It can also accept image latents as `image`, but if passing latents directly it is not encoded again. true_cfg_scale (`float`, *optional*, defaults to 1.0): - Guidance scale as defined in [Classifier-Free Diffusion - Guidance](https://huggingface.co/papers/2207.12598). `true_cfg_scale` is defined as `w` of equation 2. - of [Imagen Paper](https://huggingface.co/papers/2205.11487). Classifier-free guidance is enabled by - setting `true_cfg_scale > 1` and a provided `negative_prompt`. Higher guidance scale encourages to - generate images that are closely linked to the text `prompt`, usually at the expense of lower image - quality. + When > 1.0 and a provided `negative_prompt`, enables true classifier-free guidance. mask_image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): `Image`, numpy array or tensor representing an image batch to mask `image`. White pixels in the mask are repainted while black pixels are preserved. If `mask_image` is a PIL image, it is converted to a @@ -672,7 +667,7 @@ def __call__( 1)`, or `(H, W)`. mask_image_latent (`torch.Tensor`, `List[torch.Tensor]`): `Tensor` representing an image batch to mask `image` generated by VAE. If not provided, the mask - latents tensor will be generated by `mask_image`. + latents tensor will ge generated by `mask_image`. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. This is set to 1024 by default for the best results. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): @@ -697,16 +692,17 @@ def __call__( Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. - guidance_scale (`float`, *optional*, defaults to None): - A guidance scale value for guidance distilled models. Unlike the traditional classifier-free guidance - where the guidance scale is applied during inference through noise prediction rescaling, guidance - distilled models take the guidance scale directly as an input parameter during forward pass. Guidance - scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images - that are closely linked to the text `prompt`, usually at the expense of lower image quality. This - parameter in the pipeline is there to support future guidance-distilled models when they come up. It is - ignored when not using guidance distilled models. To enable traditional classifier-free guidance, - please pass `true_cfg_scale > 1.0` and `negative_prompt` (even an empty negative prompt like " " should - enable classifier-free guidance computations). + guidance_scale (`float`, *optional*, defaults to 3.5): + Guidance scale as defined in [Classifier-Free Diffusion + Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. + of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting + `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to + the text `prompt`, usually at the expense of lower image quality. + + This parameter in the pipeline is there to support future guidance-distilled models when they come up. + Note that passing `guidance_scale` to the pipeline is ineffective. To enable classifier-free guidance, + please pass `true_cfg_scale` and `negative_prompt` (even an empty negative prompt like " ") should + enable classifier-free guidance computations. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): @@ -805,16 +801,6 @@ def __call__( has_neg_prompt = negative_prompt is not None or ( negative_prompt_embeds is not None and negative_prompt_embeds_mask is not None ) - - if true_cfg_scale > 1 and not has_neg_prompt: - logger.warning( - f"true_cfg_scale is passed as {true_cfg_scale}, but classifier-free guidance is not enabled since no negative_prompt is provided." - ) - elif true_cfg_scale <= 1 and has_neg_prompt: - logger.warning( - " negative_prompt is passed but classifier-free guidance is not enabled since true_cfg_scale <= 1" - ) - do_true_cfg = true_cfg_scale > 1 and has_neg_prompt prompt_embeds, prompt_embeds_mask = self.encode_prompt( prompt=prompt, @@ -904,17 +890,10 @@ def __call__( self._num_timesteps = len(timesteps) # handle guidance - if self.transformer.config.guidance_embeds and guidance_scale is None: - raise ValueError("guidance_scale is required for guidance-distilled model.") - elif self.transformer.config.guidance_embeds: + if self.transformer.config.guidance_embeds: guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float32) guidance = guidance.expand(latents.shape[0]) - elif not self.transformer.config.guidance_embeds and guidance_scale is not None: - logger.warning( - f"guidance_scale is passed as {guidance_scale}, but ignored since the model is not guidance-distilled." - ) - guidance = None - elif not self.transformer.config.guidance_embeds and guidance_scale is None: + else: guidance = None if self.attention_kwargs is None: diff --git a/src/diffusers/pipelines/sana/pipeline_sana.py b/src/diffusers/pipelines/sana/pipeline_sana.py index c54fec5b3a18..103f57a23640 100644 --- a/src/diffusers/pipelines/sana/pipeline_sana.py +++ b/src/diffusers/pipelines/sana/pipeline_sana.py @@ -781,7 +781,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/sana/pipeline_sana_controlnet.py b/src/diffusers/pipelines/sana/pipeline_sana_controlnet.py index 17d6dfd83e08..cdc602b964cf 100644 --- a/src/diffusers/pipelines/sana/pipeline_sana_controlnet.py +++ b/src/diffusers/pipelines/sana/pipeline_sana_controlnet.py @@ -844,7 +844,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/sana/pipeline_sana_sprint.py b/src/diffusers/pipelines/sana/pipeline_sana_sprint.py index a140cc16724b..e8f9d8368f2a 100644 --- a/src/diffusers/pipelines/sana/pipeline_sana_sprint.py +++ b/src/diffusers/pipelines/sana/pipeline_sana_sprint.py @@ -663,7 +663,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/sana/pipeline_sana_sprint_img2img.py b/src/diffusers/pipelines/sana/pipeline_sana_sprint_img2img.py index 34d3b9d17e40..bf290c3ced56 100644 --- a/src/diffusers/pipelines/sana/pipeline_sana_sprint_img2img.py +++ b/src/diffusers/pipelines/sana/pipeline_sana_sprint_img2img.py @@ -736,7 +736,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade.py b/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade.py index aa39983c4e43..6130a9873cb0 100644 --- a/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade.py +++ b/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade.py @@ -362,7 +362,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). diff --git a/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade_combined.py b/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade_combined.py index b3dc23f2e571..b705c7e6e5f6 100644 --- a/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade_combined.py +++ b/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade_combined.py @@ -237,7 +237,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). diff --git a/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade_prior.py b/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade_prior.py index 9e63b3489ccd..b3b46af206ed 100644 --- a/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade_prior.py +++ b/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade_prior.py @@ -442,7 +442,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py b/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py index 6ebe0986a1ab..06c20768160b 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py @@ -313,7 +313,7 @@ def __call__( latents (`np.ndarray`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`np.ndarray`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint.py b/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint.py index 158bcabbebfd..141d849ec3d4 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint.py @@ -378,7 +378,7 @@ def __call__( latents (`np.ndarray`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`np.ndarray`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_upscale.py b/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_upscale.py index a765163175a2..882fa98b0762 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_upscale.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_upscale.py @@ -398,7 +398,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`np.ndarray`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py b/src/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py index 1618f89a49e3..afee3f61e972 100644 --- a/src/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py +++ b/src/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py @@ -854,7 +854,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_img2img.py b/src/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_img2img.py index 7e97909f42ca..fa1e0a4f3270 100644 --- a/src/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_img2img.py +++ b/src/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_img2img.py @@ -909,7 +909,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_inpaint.py b/src/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_inpaint.py index bed596e57c34..937f7195b21d 100644 --- a/src/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_inpaint.py +++ b/src/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_inpaint.py @@ -984,7 +984,7 @@ def __call__( 1)`, or `(H, W)`. mask_image_latent (`torch.Tensor`, `List[torch.Tensor]`): `Tensor` representing an image batch to mask `image` generated by VAE. If not provided, the mask - latents tensor will be generated by `mask_image`. + latents tensor will ge generated by `mask_image`. height (`int`, *optional*, defaults to self.transformer.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. This is set to 1024 by default for the best results. width (`int`, *optional*, defaults to self.transformer.config.sample_size * self.vae_scale_factor): @@ -1033,7 +1033,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_k_diffusion.py b/src/diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_k_diffusion.py index df2564a89b1d..350a49282693 100755 --- a/src/diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_k_diffusion.py +++ b/src/diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_k_diffusion.py @@ -539,7 +539,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_xl_k_diffusion.py b/src/diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_xl_k_diffusion.py index 766ca37d8142..3b57555071f3 100644 --- a/src/diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_xl_k_diffusion.py +++ b/src/diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_xl_k_diffusion.py @@ -652,7 +652,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py b/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py index b97cf6f1f6f8..9ac64a0d8420 100644 --- a/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py +++ b/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py @@ -937,7 +937,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py b/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py index 44e8f4fe4b54..e63c7a55ce7b 100644 --- a/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py +++ b/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py @@ -1097,7 +1097,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py b/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py index 18f8536a7510..f0bc9b9bb3e2 100644 --- a/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py +++ b/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py @@ -1251,7 +1251,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. diff --git a/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py b/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py index 58b008361782..b1379d1b2955 100644 --- a/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py +++ b/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py @@ -695,7 +695,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py b/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py index 1ce6987114a7..5c561721fcc7 100644 --- a/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py +++ b/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py @@ -760,7 +760,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py b/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py index 2802d690f3cc..13183df47d4b 100644 --- a/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py +++ b/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py @@ -971,7 +971,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py b/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py index 288aae6c0d44..a9fa43c1f5c5 100644 --- a/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py +++ b/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py @@ -1051,7 +1051,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. motion_field_strength_x (`float`, *optional*, defaults to 12): Strength of motion in generated video along x-axis. See the [paper](https://huggingface.co/papers/2303.13439), Sect. 3.3.1. diff --git a/src/diffusers/pipelines/visualcloze/pipeline_visualcloze_combined.py b/src/diffusers/pipelines/visualcloze/pipeline_visualcloze_combined.py index 4e5b32c10c8c..68130baad709 100644 --- a/src/diffusers/pipelines/visualcloze/pipeline_visualcloze_combined.py +++ b/src/diffusers/pipelines/visualcloze/pipeline_visualcloze_combined.py @@ -319,7 +319,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/visualcloze/pipeline_visualcloze_generation.py b/src/diffusers/pipelines/visualcloze/pipeline_visualcloze_generation.py index 8571211cd027..e7a1d4a4b248 100644 --- a/src/diffusers/pipelines/visualcloze/pipeline_visualcloze_generation.py +++ b/src/diffusers/pipelines/visualcloze/pipeline_visualcloze_generation.py @@ -736,7 +736,7 @@ def __call__( latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. diff --git a/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py b/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py index bbdb60471fd1..b9b02a6dd38a 100644 --- a/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py +++ b/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py @@ -263,7 +263,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). diff --git a/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_combined.py b/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_combined.py index c54c1fefe8fe..00a88ce34ed2 100644 --- a/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_combined.py +++ b/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_combined.py @@ -222,7 +222,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). diff --git a/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py b/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py index e138b6e805c8..a32f09204d27 100644 --- a/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py +++ b/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py @@ -348,7 +348,7 @@ def __call__( latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will be generated by sampling using the supplied random `generator`. + tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). diff --git a/src/diffusers/schedulers/scheduling_utils_flax.py b/src/diffusers/schedulers/scheduling_utils_flax.py index ffbe3b90207b..e6ac78f63ee7 100644 --- a/src/diffusers/schedulers/scheduling_utils_flax.py +++ b/src/diffusers/schedulers/scheduling_utils_flax.py @@ -22,11 +22,9 @@ import jax.numpy as jnp from huggingface_hub.utils import validate_hf_hub_args -from ..utils import BaseOutput, PushToHubMixin, logging +from ..utils import BaseOutput, PushToHubMixin -logger = logging.get_logger(__name__) - SCHEDULER_CONFIG_NAME = "scheduler_config.json" @@ -135,10 +133,6 @@ def from_pretrained(
""" - logger.warning( - "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We " - "recommend migrating to PyTorch classes or pinning your version of Diffusers." - ) config, kwargs = cls.load_config( pretrained_model_name_or_path=pretrained_model_name_or_path, subfolder=subfolder, diff --git a/src/diffusers/utils/import_utils.py b/src/diffusers/utils/import_utils.py index 153be057381d..ac209afb74a6 100644 --- a/src/diffusers/utils/import_utils.py +++ b/src/diffusers/utils/import_utils.py @@ -70,11 +70,10 @@ def _is_package_available(pkg_name: str, get_dist_name: bool = False) -> Tuple[b # Fallback for Python < 3.10 for dist in importlib_metadata.distributions(): _top_level_declared = (dist.read_text("top_level.txt") or "").split() - # Infer top-level package names from file structure - _inferred_opt_names = { + _infered_opt_names = { f.parts[0] if len(f.parts) > 1 else inspect.getmodulename(f) for f in (dist.files or []) } - {None} - _top_level_inferred = filter(lambda name: "." not in name, _inferred_opt_names) + _top_level_inferred = filter(lambda name: "." not in name, _infered_opt_names) for pkg in _top_level_declared or _top_level_inferred: _package_map[pkg].append(dist.metadata["Name"]) except Exception as _: @@ -120,7 +119,7 @@ def _is_package_available(pkg_name: str, get_dist_name: bool = False) -> Tuple[b _safetensors_available, _safetensors_version = _is_package_available("safetensors") else: - logger.info("Disabling Safetensors because USE_SAFETENSORS is set") + logger.info("Disabling Safetensors because USE_TF is set") _safetensors_available = False _onnxruntime_version = "N/A" diff --git a/tests/hooks/test_hooks.py b/tests/hooks/test_hooks.py index 8a83f60ff278..1e845cc40c7d 100644 --- a/tests/hooks/test_hooks.py +++ b/tests/hooks/test_hooks.py @@ -220,7 +220,6 @@ def test_inference(self): self.assertAlmostEqual(output1, output2, places=5) self.assertAlmostEqual(output1, output3, places=5) - self.assertAlmostEqual(output2, output3, places=5) def test_skip_layer_hook(self): registry = HookRegistry.check_if_exists_or_initialize(self.model) diff --git a/tests/models/autoencoders/test_models_vae_flax.py b/tests/models/autoencoders/test_models_vae_flax.py new file mode 100644 index 000000000000..3023a7c32c0d --- /dev/null +++ b/tests/models/autoencoders/test_models_vae_flax.py @@ -0,0 +1,39 @@ +import unittest + +from diffusers import FlaxAutoencoderKL +from diffusers.utils import is_flax_available + +from ...testing_utils import require_flax +from ..test_modeling_common_flax import FlaxModelTesterMixin + + +if is_flax_available(): + import jax + + +@require_flax +class FlaxAutoencoderKLTests(FlaxModelTesterMixin, unittest.TestCase): + model_class = FlaxAutoencoderKL + + @property + def dummy_input(self): + batch_size = 4 + num_channels = 3 + sizes = (32, 32) + + prng_key = jax.random.PRNGKey(0) + image = jax.random.uniform(prng_key, ((batch_size, num_channels) + sizes)) + + return {"sample": image, "prng_key": prng_key} + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "block_out_channels": [32, 64], + "in_channels": 3, + "out_channels": 3, + "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], + "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], + "latent_channels": 4, + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict diff --git a/tests/models/test_modeling_common_flax.py b/tests/models/test_modeling_common_flax.py new file mode 100644 index 000000000000..41e970b56664 --- /dev/null +++ b/tests/models/test_modeling_common_flax.py @@ -0,0 +1,67 @@ +import inspect + +from diffusers.utils import is_flax_available + +from ..testing_utils import require_flax + + +if is_flax_available(): + import jax + + +@require_flax +class FlaxModelTesterMixin: + def test_output(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + + model = self.model_class(**init_dict) + variables = model.init(inputs_dict["prng_key"], inputs_dict["sample"]) + jax.lax.stop_gradient(variables) + + output = model.apply(variables, inputs_dict["sample"]) + + if isinstance(output, dict): + output = output.sample + + self.assertIsNotNone(output) + expected_shape = inputs_dict["sample"].shape + self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") + + def test_forward_with_norm_groups(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + + init_dict["norm_num_groups"] = 16 + init_dict["block_out_channels"] = (16, 32) + + model = self.model_class(**init_dict) + variables = model.init(inputs_dict["prng_key"], inputs_dict["sample"]) + jax.lax.stop_gradient(variables) + + output = model.apply(variables, inputs_dict["sample"]) + + if isinstance(output, dict): + output = output.sample + + self.assertIsNotNone(output) + expected_shape = inputs_dict["sample"].shape + self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") + + def test_deprecated_kwargs(self): + has_kwarg_in_model_class = "kwargs" in inspect.signature(self.model_class.__init__).parameters + has_deprecated_kwarg = len(self.model_class._deprecated_kwargs) > 0 + + if has_kwarg_in_model_class and not has_deprecated_kwarg: + raise ValueError( + f"{self.model_class} has `**kwargs` in its __init__ method but has not defined any deprecated kwargs" + " under the `_deprecated_kwargs` class attribute. Make sure to either remove `**kwargs` if there are" + " no deprecated arguments or add the deprecated argument with `_deprecated_kwargs =" + " []`" + ) + + if not has_kwarg_in_model_class and has_deprecated_kwarg: + raise ValueError( + f"{self.model_class} doesn't have `**kwargs` in its __init__ method but has defined deprecated kwargs" + " under the `_deprecated_kwargs` class attribute. Make sure to either add the `**kwargs` argument to" + f" {self.model_class}.__init__ if there are deprecated arguments or remove the deprecated argument" + " from `_deprecated_kwargs = []`" + ) diff --git a/tests/models/unets/test_models_unet_2d_flax.py b/tests/models/unets/test_models_unet_2d_flax.py new file mode 100644 index 000000000000..3bc9a04b3c04 --- /dev/null +++ b/tests/models/unets/test_models_unet_2d_flax.py @@ -0,0 +1,105 @@ +import gc +import unittest + +from parameterized import parameterized + +from diffusers import FlaxUNet2DConditionModel +from diffusers.utils import is_flax_available + +from ...testing_utils import load_hf_numpy, require_flax, slow + + +if is_flax_available(): + import jax + import jax.numpy as jnp + + +@slow +@require_flax +class FlaxUNet2DConditionModelIntegrationTests(unittest.TestCase): + def get_file_format(self, seed, shape): + return f"gaussian_noise_s={seed}_shape={'_'.join([str(s) for s in shape])}.npy" + + def tearDown(self): + # clean up the VRAM after each test + super().tearDown() + gc.collect() + + def get_latents(self, seed=0, shape=(4, 4, 64, 64), fp16=False): + dtype = jnp.bfloat16 if fp16 else jnp.float32 + image = jnp.array(load_hf_numpy(self.get_file_format(seed, shape)), dtype=dtype) + return image + + def get_unet_model(self, fp16=False, model_id="CompVis/stable-diffusion-v1-4"): + dtype = jnp.bfloat16 if fp16 else jnp.float32 + revision = "bf16" if fp16 else None + + model, params = FlaxUNet2DConditionModel.from_pretrained( + model_id, subfolder="unet", dtype=dtype, revision=revision + ) + return model, params + + def get_encoder_hidden_states(self, seed=0, shape=(4, 77, 768), fp16=False): + dtype = jnp.bfloat16 if fp16 else jnp.float32 + hidden_states = jnp.array(load_hf_numpy(self.get_file_format(seed, shape)), dtype=dtype) + return hidden_states + + @parameterized.expand( + [ + # fmt: off + [83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]], + [17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]], + [8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]], + [3, 1000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]], + # fmt: on + ] + ) + def test_compvis_sd_v1_4_flax_vs_torch_fp16(self, seed, timestep, expected_slice): + model, params = self.get_unet_model(model_id="CompVis/stable-diffusion-v1-4", fp16=True) + latents = self.get_latents(seed, fp16=True) + encoder_hidden_states = self.get_encoder_hidden_states(seed, fp16=True) + + sample = model.apply( + {"params": params}, + latents, + jnp.array(timestep, dtype=jnp.int32), + encoder_hidden_states=encoder_hidden_states, + ).sample + + assert sample.shape == latents.shape + + output_slice = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten())), dtype=jnp.float32) + expected_output_slice = jnp.array(expected_slice, dtype=jnp.float32) + + # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware + assert jnp.allclose(output_slice, expected_output_slice, atol=1e-2) + + @parameterized.expand( + [ + # fmt: off + [83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]], + [17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]], + [8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]], + [3, 1000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]], + # fmt: on + ] + ) + def test_stabilityai_sd_v2_flax_vs_torch_fp16(self, seed, timestep, expected_slice): + model, params = self.get_unet_model(model_id="stabilityai/stable-diffusion-2", fp16=True) + latents = self.get_latents(seed, shape=(4, 4, 96, 96), fp16=True) + encoder_hidden_states = self.get_encoder_hidden_states(seed, shape=(4, 77, 1024), fp16=True) + + sample = model.apply( + {"params": params}, + latents, + jnp.array(timestep, dtype=jnp.int32), + encoder_hidden_states=encoder_hidden_states, + ).sample + + assert sample.shape == latents.shape + + output_slice = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten())), dtype=jnp.float32) + expected_output_slice = jnp.array(expected_slice, dtype=jnp.float32) + + # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware + assert jnp.allclose(output_slice, expected_output_slice, atol=1e-2) diff --git a/tests/pipelines/controlnet/test_flax_controlnet.py b/tests/pipelines/controlnet/test_flax_controlnet.py new file mode 100644 index 000000000000..e9cff4c9571e --- /dev/null +++ b/tests/pipelines/controlnet/test_flax_controlnet.py @@ -0,0 +1,128 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import unittest + +from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline +from diffusers.utils import is_flax_available, load_image + +from ...testing_utils import require_flax, slow + + +if is_flax_available(): + import jax + import jax.numpy as jnp + from flax.jax_utils import replicate + from flax.training.common_utils import shard + + +@slow +@require_flax +class FlaxControlNetPipelineIntegrationTests(unittest.TestCase): + def tearDown(self): + # clean up the VRAM after each test + super().tearDown() + gc.collect() + + def test_canny(self): + controlnet, controlnet_params = FlaxControlNetModel.from_pretrained( + "lllyasviel/sd-controlnet-canny", from_pt=True, dtype=jnp.bfloat16 + ) + pipe, params = FlaxStableDiffusionControlNetPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", controlnet=controlnet, from_pt=True, dtype=jnp.bfloat16 + ) + params["controlnet"] = controlnet_params + + prompts = "bird" + num_samples = jax.device_count() + prompt_ids = pipe.prepare_text_inputs([prompts] * num_samples) + + canny_image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" + ) + processed_image = pipe.prepare_image_inputs([canny_image] * num_samples) + + rng = jax.random.PRNGKey(0) + rng = jax.random.split(rng, jax.device_count()) + + p_params = replicate(params) + prompt_ids = shard(prompt_ids) + processed_image = shard(processed_image) + + images = pipe( + prompt_ids=prompt_ids, + image=processed_image, + params=p_params, + prng_seed=rng, + num_inference_steps=50, + jit=True, + ).images + assert images.shape == (jax.device_count(), 1, 768, 512, 3) + + images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:]) + image_slice = images[0, 253:256, 253:256, -1] + + output_slice = jnp.asarray(jax.device_get(image_slice.flatten())) + expected_slice = jnp.array( + [0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078] + ) + + assert jnp.abs(output_slice - expected_slice).max() < 1e-2 + + def test_pose(self): + controlnet, controlnet_params = FlaxControlNetModel.from_pretrained( + "lllyasviel/sd-controlnet-openpose", from_pt=True, dtype=jnp.bfloat16 + ) + pipe, params = FlaxStableDiffusionControlNetPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", controlnet=controlnet, from_pt=True, dtype=jnp.bfloat16 + ) + params["controlnet"] = controlnet_params + + prompts = "Chef in the kitchen" + num_samples = jax.device_count() + prompt_ids = pipe.prepare_text_inputs([prompts] * num_samples) + + pose_image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" + ) + processed_image = pipe.prepare_image_inputs([pose_image] * num_samples) + + rng = jax.random.PRNGKey(0) + rng = jax.random.split(rng, jax.device_count()) + + p_params = replicate(params) + prompt_ids = shard(prompt_ids) + processed_image = shard(processed_image) + + images = pipe( + prompt_ids=prompt_ids, + image=processed_image, + params=p_params, + prng_seed=rng, + num_inference_steps=50, + jit=True, + ).images + assert images.shape == (jax.device_count(), 1, 768, 512, 3) + + images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:]) + image_slice = images[0, 253:256, 253:256, -1] + + output_slice = jnp.asarray(jax.device_get(image_slice.flatten())) + expected_slice = jnp.array( + [[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]] + ) + + assert jnp.abs(output_slice - expected_slice).max() < 1e-2 diff --git a/tests/pipelines/qwenimage/test_qwenimage_controlnet.py b/tests/pipelines/qwenimage/test_qwenimage_controlnet.py deleted file mode 100644 index c78e5cb233d3..000000000000 --- a/tests/pipelines/qwenimage/test_qwenimage_controlnet.py +++ /dev/null @@ -1,339 +0,0 @@ -# Copyright 2025 The HuggingFace Team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import numpy as np -import torch -from transformers import Qwen2_5_VLConfig, Qwen2_5_VLForConditionalGeneration, Qwen2Tokenizer - -from diffusers import ( - AutoencoderKLQwenImage, - FlowMatchEulerDiscreteScheduler, - QwenImageControlNetModel, - QwenImageControlNetPipeline, - QwenImageMultiControlNetModel, - QwenImageTransformer2DModel, -) -from diffusers.utils.testing_utils import enable_full_determinism, torch_device -from diffusers.utils.torch_utils import randn_tensor - -from ..pipeline_params import TEXT_TO_IMAGE_PARAMS -from ..test_pipelines_common import PipelineTesterMixin, to_np - - -enable_full_determinism() - - -class QwenControlNetPipelineFastTests(PipelineTesterMixin, unittest.TestCase): - pipeline_class = QwenImageControlNetPipeline - params = (TEXT_TO_IMAGE_PARAMS | frozenset(["control_image", "controlnet_conditioning_scale"])) - { - "cross_attention_kwargs" - } - batch_params = frozenset(["prompt", "negative_prompt", "control_image"]) - image_params = frozenset(["control_image"]) - image_latents_params = frozenset(["latents"]) - - required_optional_params = frozenset( - [ - "num_inference_steps", - "generator", - "latents", - "control_image", - "controlnet_conditioning_scale", - "return_dict", - "callback_on_step_end", - "callback_on_step_end_tensor_inputs", - ] - ) - - supports_dduf = False - test_xformers_attention = True - test_layerwise_casting = True - test_group_offloading = True - - def get_dummy_components(self): - torch.manual_seed(0) - transformer = QwenImageTransformer2DModel( - patch_size=2, - in_channels=16, - out_channels=4, - num_layers=2, - attention_head_dim=16, - num_attention_heads=3, - joint_attention_dim=16, - guidance_embeds=False, - axes_dims_rope=(8, 4, 4), - ) - - torch.manual_seed(0) - controlnet = QwenImageControlNetModel( - patch_size=2, - in_channels=16, - out_channels=4, - num_layers=2, - attention_head_dim=16, - num_attention_heads=3, - joint_attention_dim=16, - axes_dims_rope=(8, 4, 4), - ) - - torch.manual_seed(0) - z_dim = 4 - vae = AutoencoderKLQwenImage( - base_dim=z_dim * 6, - z_dim=z_dim, - dim_mult=[1, 2, 4], - num_res_blocks=1, - temperal_downsample=[False, True], - latents_mean=[0.0] * z_dim, - latents_std=[1.0] * z_dim, - ) - - torch.manual_seed(0) - scheduler = FlowMatchEulerDiscreteScheduler() - - torch.manual_seed(0) - config = Qwen2_5_VLConfig( - text_config={ - "hidden_size": 16, - "intermediate_size": 16, - "num_hidden_layers": 2, - "num_attention_heads": 2, - "num_key_value_heads": 2, - "rope_scaling": { - "mrope_section": [1, 1, 2], - "rope_type": "default", - "type": "default", - }, - "rope_theta": 1_000_000.0, - }, - vision_config={ - "depth": 2, - "hidden_size": 16, - "intermediate_size": 16, - "num_heads": 2, - "out_hidden_size": 16, - }, - hidden_size=16, - vocab_size=152064, - vision_end_token_id=151653, - vision_start_token_id=151652, - vision_token_id=151654, - ) - - text_encoder = Qwen2_5_VLForConditionalGeneration(config) - tokenizer = Qwen2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-Qwen2VLForConditionalGeneration") - - components = { - "transformer": transformer, - "vae": vae, - "scheduler": scheduler, - "text_encoder": text_encoder, - "tokenizer": tokenizer, - "controlnet": controlnet, - } - return components - - def get_dummy_inputs(self, device, seed=0): - if str(device).startswith("mps"): - generator = torch.manual_seed(seed) - else: - generator = torch.Generator(device=device).manual_seed(seed) - - control_image = randn_tensor( - (1, 3, 32, 32), - generator=generator, - device=torch.device(device), - dtype=torch.float32, - ) - - inputs = { - "prompt": "dance monkey", - "negative_prompt": "bad quality", - "generator": generator, - "num_inference_steps": 2, - "guidance_scale": 3.0, - "true_cfg_scale": 1.0, - "height": 32, - "width": 32, - "max_sequence_length": 16, - "control_image": control_image, - "controlnet_conditioning_scale": 0.5, - "output_type": "pt", - } - - return inputs - - def test_qwen_controlnet(self): - device = "cpu" - components = self.get_dummy_components() - pipe = self.pipeline_class(**components) - pipe.to(device) - pipe.set_progress_bar_config(disable=None) - - inputs = self.get_dummy_inputs(device) - image = pipe(**inputs).images - generated_image = image[0] - self.assertEqual(generated_image.shape, (3, 32, 32)) - - # Expected slice from the generated image - expected_slice = torch.tensor( - [ - 0.4726, - 0.5549, - 0.6324, - 0.6548, - 0.4968, - 0.4639, - 0.4749, - 0.4898, - 0.4725, - 0.4645, - 0.4435, - 0.3339, - 0.3400, - 0.4630, - 0.3879, - 0.4406, - ] - ) - - generated_slice = generated_image.flatten() - generated_slice = torch.cat([generated_slice[:8], generated_slice[-8:]]) - self.assertTrue(torch.allclose(generated_slice, expected_slice, atol=1e-3)) - - def test_qwen_controlnet_multicondition(self): - device = "cpu" - components = self.get_dummy_components() - - components["controlnet"] = QwenImageMultiControlNetModel([components["controlnet"]]) - - pipe = self.pipeline_class(**components) - pipe.to(device) - pipe.set_progress_bar_config(disable=None) - - inputs = self.get_dummy_inputs(device) - control_image = inputs["control_image"] - inputs["control_image"] = [control_image, control_image] - inputs["controlnet_conditioning_scale"] = [0.5, 0.5] - - image = pipe(**inputs).images - generated_image = image[0] - self.assertEqual(generated_image.shape, (3, 32, 32)) - # Expected slice from the generated image - expected_slice = torch.tensor( - [ - 0.6239, - 0.6642, - 0.5768, - 0.6039, - 0.5270, - 0.5070, - 0.5006, - 0.5271, - 0.4506, - 0.3085, - 0.3435, - 0.5152, - 0.5096, - 0.5422, - 0.4286, - 0.5752, - ] - ) - - generated_slice = generated_image.flatten() - generated_slice = torch.cat([generated_slice[:8], generated_slice[-8:]]) - self.assertTrue(torch.allclose(generated_slice, expected_slice, atol=1e-3)) - - def test_attention_slicing_forward_pass( - self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3 - ): - if not self.test_attention_slicing: - return - - components = self.get_dummy_components() - pipe = self.pipeline_class(**components) - for component in pipe.components.values(): - if hasattr(component, "set_default_attn_processor"): - component.set_default_attn_processor() - pipe.to(torch_device) - pipe.set_progress_bar_config(disable=None) - - generator_device = "cpu" - inputs = self.get_dummy_inputs(generator_device) - output_without_slicing = pipe(**inputs)[0] - - pipe.enable_attention_slicing(slice_size=1) - inputs = self.get_dummy_inputs(generator_device) - output_with_slicing1 = pipe(**inputs)[0] - - pipe.enable_attention_slicing(slice_size=2) - inputs = self.get_dummy_inputs(generator_device) - output_with_slicing2 = pipe(**inputs)[0] - - if test_max_difference: - max_diff1 = np.abs(to_np(output_with_slicing1) - to_np(output_without_slicing)).max() - max_diff2 = np.abs(to_np(output_with_slicing2) - to_np(output_without_slicing)).max() - self.assertLess( - max(max_diff1, max_diff2), - expected_max_diff, - "Attention slicing should not affect the inference results", - ) - - def test_inference_batch_single_identical(self): - self._test_inference_batch_single_identical(batch_size=3, expected_max_diff=1e-1) - - def test_vae_tiling(self, expected_diff_max: float = 0.2): - generator_device = "cpu" - components = self.get_dummy_components() - - pipe = self.pipeline_class(**components) - pipe.to("cpu") - pipe.set_progress_bar_config(disable=None) - - # Without tiling - inputs = self.get_dummy_inputs(generator_device) - inputs["height"] = inputs["width"] = 128 - inputs["control_image"] = randn_tensor( - (1, 3, 128, 128), - generator=inputs["generator"], - device=torch.device(generator_device), - dtype=torch.float32, - ) - output_without_tiling = pipe(**inputs)[0] - - # With tiling - pipe.vae.enable_tiling( - tile_sample_min_height=96, - tile_sample_min_width=96, - tile_sample_stride_height=64, - tile_sample_stride_width=64, - ) - inputs = self.get_dummy_inputs(generator_device) - inputs["height"] = inputs["width"] = 128 - inputs["control_image"] = randn_tensor( - (1, 3, 128, 128), - generator=inputs["generator"], - device=torch.device(generator_device), - dtype=torch.float32, - ) - output_with_tiling = pipe(**inputs)[0] - - self.assertLess( - (to_np(output_without_tiling) - to_np(output_with_tiling)).max(), - expected_diff_max, - "VAE tiling should not affect the inference results", - ) diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_flax.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_flax.py new file mode 100644 index 000000000000..92effcacadb5 --- /dev/null +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_flax.py @@ -0,0 +1,109 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import unittest + +from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline +from diffusers.utils import is_flax_available + +from ...testing_utils import nightly, require_flax + + +if is_flax_available(): + import jax + import jax.numpy as jnp + from flax.jax_utils import replicate + from flax.training.common_utils import shard + + +@nightly +@require_flax +class FlaxStableDiffusion2PipelineIntegrationTests(unittest.TestCase): + def tearDown(self): + # clean up the VRAM after each test + super().tearDown() + gc.collect() + + def test_stable_diffusion_flax(self): + sd_pipe, params = FlaxStableDiffusionPipeline.from_pretrained( + "stabilityai/stable-diffusion-2", + variant="bf16", + dtype=jnp.bfloat16, + ) + + prompt = "A painting of a squirrel eating a burger" + num_samples = jax.device_count() + prompt = num_samples * [prompt] + prompt_ids = sd_pipe.prepare_inputs(prompt) + + params = replicate(params) + prompt_ids = shard(prompt_ids) + + prng_seed = jax.random.PRNGKey(0) + prng_seed = jax.random.split(prng_seed, jax.device_count()) + + images = sd_pipe(prompt_ids, params, prng_seed, num_inference_steps=25, jit=True)[0] + assert images.shape == (jax.device_count(), 1, 768, 768, 3) + + images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:]) + image_slice = images[0, 253:256, 253:256, -1] + + output_slice = jnp.asarray(jax.device_get(image_slice.flatten())) + expected_slice = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.45508, 0.4512]) + + assert jnp.abs(output_slice - expected_slice).max() < 1e-2 + + +@nightly +@require_flax +class FlaxStableDiffusion2PipelineNightlyTests(unittest.TestCase): + def tearDown(self): + # clean up the VRAM after each test + super().tearDown() + gc.collect() + + def test_stable_diffusion_dpm_flax(self): + model_id = "stabilityai/stable-diffusion-2" + scheduler, scheduler_params = FlaxDPMSolverMultistepScheduler.from_pretrained(model_id, subfolder="scheduler") + sd_pipe, params = FlaxStableDiffusionPipeline.from_pretrained( + model_id, + scheduler=scheduler, + variant="bf16", + dtype=jnp.bfloat16, + ) + params["scheduler"] = scheduler_params + + prompt = "A painting of a squirrel eating a burger" + num_samples = jax.device_count() + prompt = num_samples * [prompt] + prompt_ids = sd_pipe.prepare_inputs(prompt) + + params = replicate(params) + prompt_ids = shard(prompt_ids) + + prng_seed = jax.random.PRNGKey(0) + prng_seed = jax.random.split(prng_seed, jax.device_count()) + + images = sd_pipe(prompt_ids, params, prng_seed, num_inference_steps=25, jit=True)[0] + assert images.shape == (jax.device_count(), 1, 768, 768, 3) + + images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:]) + image_slice = images[0, 253:256, 253:256, -1] + + output_slice = jnp.asarray(jax.device_get(image_slice.flatten())) + expected_slice = jnp.array([0.4336, 0.42969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297]) + + assert jnp.abs(output_slice - expected_slice).max() < 1e-2 diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_flax_inpaint.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_flax_inpaint.py new file mode 100644 index 000000000000..cdd088b531b8 --- /dev/null +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_flax_inpaint.py @@ -0,0 +1,83 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import unittest + +from diffusers import FlaxStableDiffusionInpaintPipeline +from diffusers.utils import is_flax_available, load_image + +from ...testing_utils import require_flax, slow + + +if is_flax_available(): + import jax + import jax.numpy as jnp + from flax.jax_utils import replicate + from flax.training.common_utils import shard + + +@slow +@require_flax +class FlaxStableDiffusionInpaintPipelineIntegrationTests(unittest.TestCase): + def tearDown(self): + # clean up the VRAM after each test + super().tearDown() + gc.collect() + + def test_stable_diffusion_inpaint_pipeline(self): + init_image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + "/sd2-inpaint/init_image.png" + ) + mask_image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" + ) + + model_id = "xvjiarui/stable-diffusion-2-inpainting" + pipeline, params = FlaxStableDiffusionInpaintPipeline.from_pretrained(model_id, safety_checker=None) + + prompt = "Face of a yellow cat, high resolution, sitting on a park bench" + + prng_seed = jax.random.PRNGKey(0) + num_inference_steps = 50 + + num_samples = jax.device_count() + prompt = num_samples * [prompt] + init_image = num_samples * [init_image] + mask_image = num_samples * [mask_image] + prompt_ids, processed_masked_images, processed_masks = pipeline.prepare_inputs(prompt, init_image, mask_image) + + # shard inputs and rng + params = replicate(params) + prng_seed = jax.random.split(prng_seed, jax.device_count()) + prompt_ids = shard(prompt_ids) + processed_masked_images = shard(processed_masked_images) + processed_masks = shard(processed_masks) + + output = pipeline( + prompt_ids, processed_masks, processed_masked_images, params, prng_seed, num_inference_steps, jit=True + ) + + images = output.images.reshape(num_samples, 512, 512, 3) + + image_slice = images[0, 253:256, 253:256, -1] + + output_slice = jnp.asarray(jax.device_get(image_slice.flatten())) + expected_slice = jnp.array( + [0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] + ) + + assert jnp.abs(output_slice - expected_slice).max() < 1e-2 diff --git a/tests/pipelines/test_pipelines_flax.py b/tests/pipelines/test_pipelines_flax.py new file mode 100644 index 000000000000..dbb5c7bfed1d --- /dev/null +++ b/tests/pipelines/test_pipelines_flax.py @@ -0,0 +1,261 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import tempfile +import unittest + +import numpy as np + +from diffusers.utils import is_flax_available + +from ..testing_utils import require_flax, slow + + +if is_flax_available(): + import jax + import jax.numpy as jnp + from flax.jax_utils import replicate + from flax.training.common_utils import shard + + from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline + + +@require_flax +class DownloadTests(unittest.TestCase): + def test_download_only_pytorch(self): + with tempfile.TemporaryDirectory() as tmpdirname: + # pipeline has Flax weights + _ = FlaxDiffusionPipeline.from_pretrained( + "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None, cache_dir=tmpdirname + ) + + all_root_files = [t[-1] for t in os.walk(os.path.join(tmpdirname, os.listdir(tmpdirname)[0], "snapshots"))] + files = [item for sublist in all_root_files for item in sublist] + + # None of the downloaded files should be a PyTorch file even if we have some here: + # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin + assert not any(f.endswith(".bin") for f in files) + + +@slow +@require_flax +class FlaxPipelineTests(unittest.TestCase): + def test_dummy_all_tpus(self): + pipeline, params = FlaxStableDiffusionPipeline.from_pretrained( + "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None + ) + + prompt = ( + "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" + " field, close up, split lighting, cinematic" + ) + + prng_seed = jax.random.PRNGKey(0) + num_inference_steps = 4 + + num_samples = jax.device_count() + prompt = num_samples * [prompt] + prompt_ids = pipeline.prepare_inputs(prompt) + + # shard inputs and rng + params = replicate(params) + prng_seed = jax.random.split(prng_seed, num_samples) + prompt_ids = shard(prompt_ids) + + images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images + + assert images.shape == (num_samples, 1, 64, 64, 3) + if jax.device_count() == 8: + assert np.abs(np.abs(images[0, 0, :2, :2, -2:], dtype=np.float32).sum() - 4.1514745) < 1e-3 + assert np.abs(np.abs(images, dtype=np.float32).sum() - 49947.875) < 5e-1 + + images_pil = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:]))) + assert len(images_pil) == num_samples + + def test_stable_diffusion_v1_4(self): + pipeline, params = FlaxStableDiffusionPipeline.from_pretrained( + "CompVis/stable-diffusion-v1-4", revision="flax", safety_checker=None + ) + + prompt = ( + "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" + " field, close up, split lighting, cinematic" + ) + + prng_seed = jax.random.PRNGKey(0) + num_inference_steps = 50 + + num_samples = jax.device_count() + prompt = num_samples * [prompt] + prompt_ids = pipeline.prepare_inputs(prompt) + + # shard inputs and rng + params = replicate(params) + prng_seed = jax.random.split(prng_seed, num_samples) + prompt_ids = shard(prompt_ids) + + images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images + + assert images.shape == (num_samples, 1, 512, 512, 3) + if jax.device_count() == 8: + assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.float32).sum() - 0.05652401)) < 1e-2 + assert np.abs((np.abs(images, dtype=np.float32).sum() - 2383808.2)) < 5e-1 + + def test_stable_diffusion_v1_4_bfloat_16(self): + pipeline, params = FlaxStableDiffusionPipeline.from_pretrained( + "CompVis/stable-diffusion-v1-4", variant="bf16", dtype=jnp.bfloat16, safety_checker=None + ) + + prompt = ( + "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" + " field, close up, split lighting, cinematic" + ) + + prng_seed = jax.random.PRNGKey(0) + num_inference_steps = 50 + + num_samples = jax.device_count() + prompt = num_samples * [prompt] + prompt_ids = pipeline.prepare_inputs(prompt) + + # shard inputs and rng + params = replicate(params) + prng_seed = jax.random.split(prng_seed, num_samples) + prompt_ids = shard(prompt_ids) + + images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images + + assert images.shape == (num_samples, 1, 512, 512, 3) + if jax.device_count() == 8: + assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.float32).sum() - 0.04003906)) < 5e-2 + assert np.abs((np.abs(images, dtype=np.float32).sum() - 2373516.75)) < 5e-1 + + def test_stable_diffusion_v1_4_bfloat_16_with_safety(self): + pipeline, params = FlaxStableDiffusionPipeline.from_pretrained( + "CompVis/stable-diffusion-v1-4", variant="bf16", dtype=jnp.bfloat16 + ) + + prompt = ( + "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" + " field, close up, split lighting, cinematic" + ) + + prng_seed = jax.random.PRNGKey(0) + num_inference_steps = 50 + + num_samples = jax.device_count() + prompt = num_samples * [prompt] + prompt_ids = pipeline.prepare_inputs(prompt) + + # shard inputs and rng + params = replicate(params) + prng_seed = jax.random.split(prng_seed, num_samples) + prompt_ids = shard(prompt_ids) + + images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images + + assert images.shape == (num_samples, 1, 512, 512, 3) + if jax.device_count() == 8: + assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.float32).sum() - 0.04003906)) < 5e-2 + assert np.abs((np.abs(images, dtype=np.float32).sum() - 2373516.75)) < 5e-1 + + def test_stable_diffusion_v1_4_bfloat_16_ddim(self): + scheduler = FlaxDDIMScheduler( + beta_start=0.00085, + beta_end=0.012, + beta_schedule="scaled_linear", + set_alpha_to_one=False, + steps_offset=1, + ) + + pipeline, params = FlaxStableDiffusionPipeline.from_pretrained( + "CompVis/stable-diffusion-v1-4", + variant="bf16", + dtype=jnp.bfloat16, + scheduler=scheduler, + safety_checker=None, + ) + scheduler_state = scheduler.create_state() + + params["scheduler"] = scheduler_state + + prompt = ( + "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" + " field, close up, split lighting, cinematic" + ) + + prng_seed = jax.random.PRNGKey(0) + num_inference_steps = 50 + + num_samples = jax.device_count() + prompt = num_samples * [prompt] + prompt_ids = pipeline.prepare_inputs(prompt) + + # shard inputs and rng + params = replicate(params) + prng_seed = jax.random.split(prng_seed, num_samples) + prompt_ids = shard(prompt_ids) + + images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images + + assert images.shape == (num_samples, 1, 512, 512, 3) + if jax.device_count() == 8: + assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.float32).sum() - 0.045043945)) < 5e-2 + assert np.abs((np.abs(images, dtype=np.float32).sum() - 2347693.5)) < 5e-1 + + def test_jax_memory_efficient_attention(self): + prompt = ( + "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" + " field, close up, split lighting, cinematic" + ) + + num_samples = jax.device_count() + prompt = num_samples * [prompt] + prng_seed = jax.random.split(jax.random.PRNGKey(0), num_samples) + + pipeline, params = FlaxStableDiffusionPipeline.from_pretrained( + "CompVis/stable-diffusion-v1-4", + variant="bf16", + dtype=jnp.bfloat16, + safety_checker=None, + ) + + params = replicate(params) + prompt_ids = pipeline.prepare_inputs(prompt) + prompt_ids = shard(prompt_ids) + images = pipeline(prompt_ids, params, prng_seed, jit=True).images + assert images.shape == (num_samples, 1, 512, 512, 3) + slice = images[2, 0, 256, 10:17, 1] + + # With memory efficient attention + pipeline, params = FlaxStableDiffusionPipeline.from_pretrained( + "CompVis/stable-diffusion-v1-4", + variant="bf16", + dtype=jnp.bfloat16, + safety_checker=None, + use_memory_efficient_attention=True, + ) + + params = replicate(params) + prompt_ids = pipeline.prepare_inputs(prompt) + prompt_ids = shard(prompt_ids) + images_eff = pipeline(prompt_ids, params, prng_seed, jit=True).images + assert images_eff.shape == (num_samples, 1, 512, 512, 3) + slice_eff = images[2, 0, 256, 10:17, 1] + + # I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum` + # over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now. + assert abs(slice_eff - slice).max() < 1e-2 diff --git a/tests/schedulers/test_scheduler_flax.py b/tests/schedulers/test_scheduler_flax.py new file mode 100644 index 000000000000..e6e4fd7d7631 --- /dev/null +++ b/tests/schedulers/test_scheduler_flax.py @@ -0,0 +1,921 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import inspect +import tempfile +import unittest +from typing import Dict, List, Tuple + +from diffusers import FlaxDDIMScheduler, FlaxDDPMScheduler, FlaxPNDMScheduler +from diffusers.utils import is_flax_available + +from ..testing_utils import require_flax + + +if is_flax_available(): + import jax + import jax.numpy as jnp + from jax import random + + jax_device = jax.default_backend() + + +@require_flax +class FlaxSchedulerCommonTest(unittest.TestCase): + scheduler_classes = () + forward_default_kwargs = () + + @property + def dummy_sample(self): + batch_size = 4 + num_channels = 3 + height = 8 + width = 8 + + key1, key2 = random.split(random.PRNGKey(0)) + sample = random.uniform(key1, (batch_size, num_channels, height, width)) + + return sample, key2 + + @property + def dummy_sample_deter(self): + batch_size = 4 + num_channels = 3 + height = 8 + width = 8 + + num_elems = batch_size * num_channels * height * width + sample = jnp.arange(num_elems) + sample = sample.reshape(num_channels, height, width, batch_size) + sample = sample / num_elems + return jnp.transpose(sample, (3, 0, 1, 2)) + + def get_scheduler_config(self): + raise NotImplementedError + + def dummy_model(self): + def model(sample, t, *args): + return sample * t / (t + 1) + + return model + + def check_over_configs(self, time_step=0, **config): + kwargs = dict(self.forward_default_kwargs) + + num_inference_steps = kwargs.pop("num_inference_steps", None) + + for scheduler_class in self.scheduler_classes: + sample, key = self.dummy_sample + residual = 0.1 * sample + + scheduler_config = self.get_scheduler_config(**config) + scheduler = scheduler_class(**scheduler_config) + state = scheduler.create_state() + + with tempfile.TemporaryDirectory() as tmpdirname: + scheduler.save_config(tmpdirname) + new_scheduler, new_state = scheduler_class.from_pretrained(tmpdirname) + + if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): + state = scheduler.set_timesteps(state, num_inference_steps) + new_state = new_scheduler.set_timesteps(new_state, num_inference_steps) + elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): + kwargs["num_inference_steps"] = num_inference_steps + + output = scheduler.step(state, residual, time_step, sample, key, **kwargs).prev_sample + new_output = new_scheduler.step(new_state, residual, time_step, sample, key, **kwargs).prev_sample + + assert jnp.sum(jnp.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" + + def check_over_forward(self, time_step=0, **forward_kwargs): + kwargs = dict(self.forward_default_kwargs) + kwargs.update(forward_kwargs) + + num_inference_steps = kwargs.pop("num_inference_steps", None) + + for scheduler_class in self.scheduler_classes: + sample, key = self.dummy_sample + residual = 0.1 * sample + + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + state = scheduler.create_state() + + with tempfile.TemporaryDirectory() as tmpdirname: + scheduler.save_config(tmpdirname) + new_scheduler, new_state = scheduler_class.from_pretrained(tmpdirname) + + if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): + state = scheduler.set_timesteps(state, num_inference_steps) + new_state = new_scheduler.set_timesteps(new_state, num_inference_steps) + elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): + kwargs["num_inference_steps"] = num_inference_steps + + output = scheduler.step(state, residual, time_step, sample, key, **kwargs).prev_sample + new_output = new_scheduler.step(new_state, residual, time_step, sample, key, **kwargs).prev_sample + + assert jnp.sum(jnp.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" + + def test_from_save_pretrained(self): + kwargs = dict(self.forward_default_kwargs) + + num_inference_steps = kwargs.pop("num_inference_steps", None) + + for scheduler_class in self.scheduler_classes: + sample, key = self.dummy_sample + residual = 0.1 * sample + + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + state = scheduler.create_state() + + with tempfile.TemporaryDirectory() as tmpdirname: + scheduler.save_config(tmpdirname) + new_scheduler, new_state = scheduler_class.from_pretrained(tmpdirname) + + if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): + state = scheduler.set_timesteps(state, num_inference_steps) + new_state = new_scheduler.set_timesteps(new_state, num_inference_steps) + elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): + kwargs["num_inference_steps"] = num_inference_steps + + output = scheduler.step(state, residual, 1, sample, key, **kwargs).prev_sample + new_output = new_scheduler.step(new_state, residual, 1, sample, key, **kwargs).prev_sample + + assert jnp.sum(jnp.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" + + def test_step_shape(self): + kwargs = dict(self.forward_default_kwargs) + + num_inference_steps = kwargs.pop("num_inference_steps", None) + + for scheduler_class in self.scheduler_classes: + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + state = scheduler.create_state() + + sample, key = self.dummy_sample + residual = 0.1 * sample + + if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): + state = scheduler.set_timesteps(state, num_inference_steps) + elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): + kwargs["num_inference_steps"] = num_inference_steps + + output_0 = scheduler.step(state, residual, 0, sample, key, **kwargs).prev_sample + output_1 = scheduler.step(state, residual, 1, sample, key, **kwargs).prev_sample + + self.assertEqual(output_0.shape, sample.shape) + self.assertEqual(output_0.shape, output_1.shape) + + def test_scheduler_outputs_equivalence(self): + def set_nan_tensor_to_zero(t): + return t.at[t != t].set(0) + + def recursive_check(tuple_object, dict_object): + if isinstance(tuple_object, (List, Tuple)): + for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object.values()): + recursive_check(tuple_iterable_value, dict_iterable_value) + elif isinstance(tuple_object, Dict): + for tuple_iterable_value, dict_iterable_value in zip(tuple_object.values(), dict_object.values()): + recursive_check(tuple_iterable_value, dict_iterable_value) + elif tuple_object is None: + return + else: + self.assertTrue( + jnp.allclose(set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5), + msg=( + "Tuple and dict output are not equal. Difference:" + f" {jnp.max(jnp.abs(tuple_object - dict_object))}. Tuple has `nan`:" + f" {jnp.isnan(tuple_object).any()} and `inf`: {jnp.isinf(tuple_object)}. Dict has" + f" `nan`: {jnp.isnan(dict_object).any()} and `inf`: {jnp.isinf(dict_object)}." + ), + ) + + kwargs = dict(self.forward_default_kwargs) + num_inference_steps = kwargs.pop("num_inference_steps", None) + + for scheduler_class in self.scheduler_classes: + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + state = scheduler.create_state() + + sample, key = self.dummy_sample + residual = 0.1 * sample + + if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): + state = scheduler.set_timesteps(state, num_inference_steps) + elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): + kwargs["num_inference_steps"] = num_inference_steps + + outputs_dict = scheduler.step(state, residual, 0, sample, key, **kwargs) + + if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): + state = scheduler.set_timesteps(state, num_inference_steps) + elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): + kwargs["num_inference_steps"] = num_inference_steps + + outputs_tuple = scheduler.step(state, residual, 0, sample, key, return_dict=False, **kwargs) + + recursive_check(outputs_tuple[0], outputs_dict.prev_sample) + + def test_deprecated_kwargs(self): + for scheduler_class in self.scheduler_classes: + has_kwarg_in_model_class = "kwargs" in inspect.signature(scheduler_class.__init__).parameters + has_deprecated_kwarg = len(scheduler_class._deprecated_kwargs) > 0 + + if has_kwarg_in_model_class and not has_deprecated_kwarg: + raise ValueError( + f"{scheduler_class} has `**kwargs` in its __init__ method but has not defined any deprecated" + " kwargs under the `_deprecated_kwargs` class attribute. Make sure to either remove `**kwargs` if" + " there are no deprecated arguments or add the deprecated argument with `_deprecated_kwargs =" + " []`" + ) + + if not has_kwarg_in_model_class and has_deprecated_kwarg: + raise ValueError( + f"{scheduler_class} doesn't have `**kwargs` in its __init__ method but has defined deprecated" + " kwargs under the `_deprecated_kwargs` class attribute. Make sure to either add the `**kwargs`" + f" argument to {self.model_class}.__init__ if there are deprecated arguments or remove the" + " deprecated argument from `_deprecated_kwargs = []`" + ) + + +@require_flax +class FlaxDDPMSchedulerTest(FlaxSchedulerCommonTest): + scheduler_classes = (FlaxDDPMScheduler,) + + def get_scheduler_config(self, **kwargs): + config = { + "num_train_timesteps": 1000, + "beta_start": 0.0001, + "beta_end": 0.02, + "beta_schedule": "linear", + "variance_type": "fixed_small", + "clip_sample": True, + } + + config.update(**kwargs) + return config + + def test_timesteps(self): + for timesteps in [1, 5, 100, 1000]: + self.check_over_configs(num_train_timesteps=timesteps) + + def test_betas(self): + for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2]): + self.check_over_configs(beta_start=beta_start, beta_end=beta_end) + + def test_schedules(self): + for schedule in ["linear", "squaredcos_cap_v2"]: + self.check_over_configs(beta_schedule=schedule) + + def test_variance_type(self): + for variance in ["fixed_small", "fixed_large", "other"]: + self.check_over_configs(variance_type=variance) + + def test_clip_sample(self): + for clip_sample in [True, False]: + self.check_over_configs(clip_sample=clip_sample) + + def test_time_indices(self): + for t in [0, 500, 999]: + self.check_over_forward(time_step=t) + + def test_variance(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + state = scheduler.create_state() + + assert jnp.sum(jnp.abs(scheduler._get_variance(state, 0) - 0.0)) < 1e-5 + assert jnp.sum(jnp.abs(scheduler._get_variance(state, 487) - 0.00979)) < 1e-5 + assert jnp.sum(jnp.abs(scheduler._get_variance(state, 999) - 0.02)) < 1e-5 + + def test_full_loop_no_noise(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + state = scheduler.create_state() + + num_trained_timesteps = len(scheduler) + + model = self.dummy_model() + sample = self.dummy_sample_deter + key1, key2 = random.split(random.PRNGKey(0)) + + for t in reversed(range(num_trained_timesteps)): + # 1. predict noise residual + residual = model(sample, t) + + # 2. predict previous mean of sample x_t-1 + output = scheduler.step(state, residual, t, sample, key1) + pred_prev_sample = output.prev_sample + state = output.state + key1, key2 = random.split(key2) + + # if t > 0: + # noise = self.dummy_sample_deter + # variance = scheduler.get_variance(t) ** (0.5) * noise + # + # sample = pred_prev_sample + variance + sample = pred_prev_sample + + result_sum = jnp.sum(jnp.abs(sample)) + result_mean = jnp.mean(jnp.abs(sample)) + + if jax_device == "tpu": + assert abs(result_sum - 255.0714) < 1e-2 + assert abs(result_mean - 0.332124) < 1e-3 + else: + assert abs(result_sum - 270.2) < 1e-1 + assert abs(result_mean - 0.3519494) < 1e-3 + + +@require_flax +class FlaxDDIMSchedulerTest(FlaxSchedulerCommonTest): + scheduler_classes = (FlaxDDIMScheduler,) + forward_default_kwargs = (("num_inference_steps", 50),) + + def get_scheduler_config(self, **kwargs): + config = { + "num_train_timesteps": 1000, + "beta_start": 0.0001, + "beta_end": 0.02, + "beta_schedule": "linear", + } + + config.update(**kwargs) + return config + + def full_loop(self, **config): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config(**config) + scheduler = scheduler_class(**scheduler_config) + state = scheduler.create_state() + key1, key2 = random.split(random.PRNGKey(0)) + + num_inference_steps = 10 + + model = self.dummy_model() + sample = self.dummy_sample_deter + + state = scheduler.set_timesteps(state, num_inference_steps) + + for t in state.timesteps: + residual = model(sample, t) + output = scheduler.step(state, residual, t, sample) + sample = output.prev_sample + state = output.state + key1, key2 = random.split(key2) + + return sample + + def check_over_configs(self, time_step=0, **config): + kwargs = dict(self.forward_default_kwargs) + + num_inference_steps = kwargs.pop("num_inference_steps", None) + + for scheduler_class in self.scheduler_classes: + sample, _ = self.dummy_sample + residual = 0.1 * sample + + scheduler_config = self.get_scheduler_config(**config) + scheduler = scheduler_class(**scheduler_config) + state = scheduler.create_state() + + with tempfile.TemporaryDirectory() as tmpdirname: + scheduler.save_config(tmpdirname) + new_scheduler, new_state = scheduler_class.from_pretrained(tmpdirname) + + if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): + state = scheduler.set_timesteps(state, num_inference_steps) + new_state = new_scheduler.set_timesteps(new_state, num_inference_steps) + elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): + kwargs["num_inference_steps"] = num_inference_steps + + output = scheduler.step(state, residual, time_step, sample, **kwargs).prev_sample + new_output = new_scheduler.step(new_state, residual, time_step, sample, **kwargs).prev_sample + + assert jnp.sum(jnp.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" + + def test_from_save_pretrained(self): + kwargs = dict(self.forward_default_kwargs) + + num_inference_steps = kwargs.pop("num_inference_steps", None) + + for scheduler_class in self.scheduler_classes: + sample, _ = self.dummy_sample + residual = 0.1 * sample + + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + state = scheduler.create_state() + + with tempfile.TemporaryDirectory() as tmpdirname: + scheduler.save_config(tmpdirname) + new_scheduler, new_state = scheduler_class.from_pretrained(tmpdirname) + + if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): + state = scheduler.set_timesteps(state, num_inference_steps) + new_state = new_scheduler.set_timesteps(new_state, num_inference_steps) + elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): + kwargs["num_inference_steps"] = num_inference_steps + + output = scheduler.step(state, residual, 1, sample, **kwargs).prev_sample + new_output = new_scheduler.step(new_state, residual, 1, sample, **kwargs).prev_sample + + assert jnp.sum(jnp.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" + + def check_over_forward(self, time_step=0, **forward_kwargs): + kwargs = dict(self.forward_default_kwargs) + kwargs.update(forward_kwargs) + + num_inference_steps = kwargs.pop("num_inference_steps", None) + + for scheduler_class in self.scheduler_classes: + sample, _ = self.dummy_sample + residual = 0.1 * sample + + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + state = scheduler.create_state() + + with tempfile.TemporaryDirectory() as tmpdirname: + scheduler.save_config(tmpdirname) + new_scheduler, new_state = scheduler_class.from_pretrained(tmpdirname) + + if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): + state = scheduler.set_timesteps(state, num_inference_steps) + new_state = new_scheduler.set_timesteps(new_state, num_inference_steps) + elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): + kwargs["num_inference_steps"] = num_inference_steps + + output = scheduler.step(state, residual, time_step, sample, **kwargs).prev_sample + new_output = new_scheduler.step(new_state, residual, time_step, sample, **kwargs).prev_sample + + assert jnp.sum(jnp.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" + + def test_scheduler_outputs_equivalence(self): + def set_nan_tensor_to_zero(t): + return t.at[t != t].set(0) + + def recursive_check(tuple_object, dict_object): + if isinstance(tuple_object, (List, Tuple)): + for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object.values()): + recursive_check(tuple_iterable_value, dict_iterable_value) + elif isinstance(tuple_object, Dict): + for tuple_iterable_value, dict_iterable_value in zip(tuple_object.values(), dict_object.values()): + recursive_check(tuple_iterable_value, dict_iterable_value) + elif tuple_object is None: + return + else: + self.assertTrue( + jnp.allclose(set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5), + msg=( + "Tuple and dict output are not equal. Difference:" + f" {jnp.max(jnp.abs(tuple_object - dict_object))}. Tuple has `nan`:" + f" {jnp.isnan(tuple_object).any()} and `inf`: {jnp.isinf(tuple_object)}. Dict has" + f" `nan`: {jnp.isnan(dict_object).any()} and `inf`: {jnp.isinf(dict_object)}." + ), + ) + + kwargs = dict(self.forward_default_kwargs) + num_inference_steps = kwargs.pop("num_inference_steps", None) + + for scheduler_class in self.scheduler_classes: + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + state = scheduler.create_state() + + sample, _ = self.dummy_sample + residual = 0.1 * sample + + if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): + state = scheduler.set_timesteps(state, num_inference_steps) + elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): + kwargs["num_inference_steps"] = num_inference_steps + + outputs_dict = scheduler.step(state, residual, 0, sample, **kwargs) + + if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): + state = scheduler.set_timesteps(state, num_inference_steps) + elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): + kwargs["num_inference_steps"] = num_inference_steps + + outputs_tuple = scheduler.step(state, residual, 0, sample, return_dict=False, **kwargs) + + recursive_check(outputs_tuple[0], outputs_dict.prev_sample) + + def test_step_shape(self): + kwargs = dict(self.forward_default_kwargs) + + num_inference_steps = kwargs.pop("num_inference_steps", None) + + for scheduler_class in self.scheduler_classes: + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + state = scheduler.create_state() + + sample, _ = self.dummy_sample + residual = 0.1 * sample + + if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): + state = scheduler.set_timesteps(state, num_inference_steps) + elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): + kwargs["num_inference_steps"] = num_inference_steps + + output_0 = scheduler.step(state, residual, 0, sample, **kwargs).prev_sample + output_1 = scheduler.step(state, residual, 1, sample, **kwargs).prev_sample + + self.assertEqual(output_0.shape, sample.shape) + self.assertEqual(output_0.shape, output_1.shape) + + def test_timesteps(self): + for timesteps in [100, 500, 1000]: + self.check_over_configs(num_train_timesteps=timesteps) + + def test_steps_offset(self): + for steps_offset in [0, 1]: + self.check_over_configs(steps_offset=steps_offset) + + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config(steps_offset=1) + scheduler = scheduler_class(**scheduler_config) + state = scheduler.create_state() + state = scheduler.set_timesteps(state, 5) + assert jnp.equal(state.timesteps, jnp.array([801, 601, 401, 201, 1])).all() + + def test_betas(self): + for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2]): + self.check_over_configs(beta_start=beta_start, beta_end=beta_end) + + def test_schedules(self): + for schedule in ["linear", "squaredcos_cap_v2"]: + self.check_over_configs(beta_schedule=schedule) + + def test_time_indices(self): + for t in [1, 10, 49]: + self.check_over_forward(time_step=t) + + def test_inference_steps(self): + for t, num_inference_steps in zip([1, 10, 50], [10, 50, 500]): + self.check_over_forward(time_step=t, num_inference_steps=num_inference_steps) + + def test_variance(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + state = scheduler.create_state() + + assert jnp.sum(jnp.abs(scheduler._get_variance(state, 0, 0) - 0.0)) < 1e-5 + assert jnp.sum(jnp.abs(scheduler._get_variance(state, 420, 400) - 0.14771)) < 1e-5 + assert jnp.sum(jnp.abs(scheduler._get_variance(state, 980, 960) - 0.32460)) < 1e-5 + assert jnp.sum(jnp.abs(scheduler._get_variance(state, 0, 0) - 0.0)) < 1e-5 + assert jnp.sum(jnp.abs(scheduler._get_variance(state, 487, 486) - 0.00979)) < 1e-5 + assert jnp.sum(jnp.abs(scheduler._get_variance(state, 999, 998) - 0.02)) < 1e-5 + + def test_full_loop_no_noise(self): + sample = self.full_loop() + + result_sum = jnp.sum(jnp.abs(sample)) + result_mean = jnp.mean(jnp.abs(sample)) + + assert abs(result_sum - 172.0067) < 1e-2 + assert abs(result_mean - 0.223967) < 1e-3 + + def test_full_loop_with_set_alpha_to_one(self): + # We specify different beta, so that the first alpha is 0.99 + sample = self.full_loop(set_alpha_to_one=True, beta_start=0.01) + result_sum = jnp.sum(jnp.abs(sample)) + result_mean = jnp.mean(jnp.abs(sample)) + + if jax_device == "tpu": + assert abs(result_sum - 149.8409) < 1e-2 + assert abs(result_mean - 0.1951) < 1e-3 + else: + assert abs(result_sum - 149.8295) < 1e-2 + assert abs(result_mean - 0.1951) < 1e-3 + + def test_full_loop_with_no_set_alpha_to_one(self): + # We specify different beta, so that the first alpha is 0.99 + sample = self.full_loop(set_alpha_to_one=False, beta_start=0.01) + result_sum = jnp.sum(jnp.abs(sample)) + result_mean = jnp.mean(jnp.abs(sample)) + + if jax_device == "tpu": + pass + # FIXME: both result_sum and result_mean are nan on TPU + # assert jnp.isnan(result_sum) + # assert jnp.isnan(result_mean) + else: + assert abs(result_sum - 149.0784) < 1e-2 + assert abs(result_mean - 0.1941) < 1e-3 + + def test_prediction_type(self): + for prediction_type in ["epsilon", "sample", "v_prediction"]: + self.check_over_configs(prediction_type=prediction_type) + + +@require_flax +class FlaxPNDMSchedulerTest(FlaxSchedulerCommonTest): + scheduler_classes = (FlaxPNDMScheduler,) + forward_default_kwargs = (("num_inference_steps", 50),) + + def get_scheduler_config(self, **kwargs): + config = { + "num_train_timesteps": 1000, + "beta_start": 0.0001, + "beta_end": 0.02, + "beta_schedule": "linear", + } + + config.update(**kwargs) + return config + + def check_over_configs(self, time_step=0, **config): + kwargs = dict(self.forward_default_kwargs) + num_inference_steps = kwargs.pop("num_inference_steps", None) + sample, _ = self.dummy_sample + residual = 0.1 * sample + dummy_past_residuals = jnp.array([residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]) + + for scheduler_class in self.scheduler_classes: + scheduler_config = self.get_scheduler_config(**config) + scheduler = scheduler_class(**scheduler_config) + state = scheduler.create_state() + state = scheduler.set_timesteps(state, num_inference_steps, shape=sample.shape) + # copy over dummy past residuals + state = state.replace(ets=dummy_past_residuals[:]) + + with tempfile.TemporaryDirectory() as tmpdirname: + scheduler.save_config(tmpdirname) + new_scheduler, new_state = scheduler_class.from_pretrained(tmpdirname) + new_state = new_scheduler.set_timesteps(new_state, num_inference_steps, shape=sample.shape) + # copy over dummy past residuals + new_state = new_state.replace(ets=dummy_past_residuals[:]) + + (prev_sample, state) = scheduler.step_prk(state, residual, time_step, sample, **kwargs) + (new_prev_sample, new_state) = new_scheduler.step_prk(new_state, residual, time_step, sample, **kwargs) + + assert jnp.sum(jnp.abs(prev_sample - new_prev_sample)) < 1e-5, "Scheduler outputs are not identical" + + output, _ = scheduler.step_plms(state, residual, time_step, sample, **kwargs) + new_output, _ = new_scheduler.step_plms(new_state, residual, time_step, sample, **kwargs) + + assert jnp.sum(jnp.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" + + @unittest.skip("Test not supported.") + def test_from_save_pretrained(self): + pass + + def test_scheduler_outputs_equivalence(self): + def set_nan_tensor_to_zero(t): + return t.at[t != t].set(0) + + def recursive_check(tuple_object, dict_object): + if isinstance(tuple_object, (List, Tuple)): + for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object.values()): + recursive_check(tuple_iterable_value, dict_iterable_value) + elif isinstance(tuple_object, Dict): + for tuple_iterable_value, dict_iterable_value in zip(tuple_object.values(), dict_object.values()): + recursive_check(tuple_iterable_value, dict_iterable_value) + elif tuple_object is None: + return + else: + self.assertTrue( + jnp.allclose(set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5), + msg=( + "Tuple and dict output are not equal. Difference:" + f" {jnp.max(jnp.abs(tuple_object - dict_object))}. Tuple has `nan`:" + f" {jnp.isnan(tuple_object).any()} and `inf`: {jnp.isinf(tuple_object)}. Dict has" + f" `nan`: {jnp.isnan(dict_object).any()} and `inf`: {jnp.isinf(dict_object)}." + ), + ) + + kwargs = dict(self.forward_default_kwargs) + num_inference_steps = kwargs.pop("num_inference_steps", None) + + for scheduler_class in self.scheduler_classes: + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + state = scheduler.create_state() + + sample, _ = self.dummy_sample + residual = 0.1 * sample + + if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): + state = scheduler.set_timesteps(state, num_inference_steps, shape=sample.shape) + elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): + kwargs["num_inference_steps"] = num_inference_steps + + outputs_dict = scheduler.step(state, residual, 0, sample, **kwargs) + + if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): + state = scheduler.set_timesteps(state, num_inference_steps, shape=sample.shape) + elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): + kwargs["num_inference_steps"] = num_inference_steps + + outputs_tuple = scheduler.step(state, residual, 0, sample, return_dict=False, **kwargs) + + recursive_check(outputs_tuple[0], outputs_dict.prev_sample) + + def check_over_forward(self, time_step=0, **forward_kwargs): + kwargs = dict(self.forward_default_kwargs) + num_inference_steps = kwargs.pop("num_inference_steps", None) + sample, _ = self.dummy_sample + residual = 0.1 * sample + dummy_past_residuals = jnp.array([residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]) + + for scheduler_class in self.scheduler_classes: + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + state = scheduler.create_state() + state = scheduler.set_timesteps(state, num_inference_steps, shape=sample.shape) + + # copy over dummy past residuals (must be after setting timesteps) + scheduler.ets = dummy_past_residuals[:] + + with tempfile.TemporaryDirectory() as tmpdirname: + scheduler.save_config(tmpdirname) + new_scheduler, new_state = scheduler_class.from_pretrained(tmpdirname) + # copy over dummy past residuals + new_state = new_scheduler.set_timesteps(new_state, num_inference_steps, shape=sample.shape) + + # copy over dummy past residual (must be after setting timesteps) + new_state.replace(ets=dummy_past_residuals[:]) + + output, state = scheduler.step_prk(state, residual, time_step, sample, **kwargs) + new_output, new_state = new_scheduler.step_prk(new_state, residual, time_step, sample, **kwargs) + + assert jnp.sum(jnp.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" + + output, _ = scheduler.step_plms(state, residual, time_step, sample, **kwargs) + new_output, _ = new_scheduler.step_plms(new_state, residual, time_step, sample, **kwargs) + + assert jnp.sum(jnp.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" + + def full_loop(self, **config): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config(**config) + scheduler = scheduler_class(**scheduler_config) + state = scheduler.create_state() + + num_inference_steps = 10 + model = self.dummy_model() + sample = self.dummy_sample_deter + state = scheduler.set_timesteps(state, num_inference_steps, shape=sample.shape) + + for i, t in enumerate(state.prk_timesteps): + residual = model(sample, t) + sample, state = scheduler.step_prk(state, residual, t, sample) + + for i, t in enumerate(state.plms_timesteps): + residual = model(sample, t) + sample, state = scheduler.step_plms(state, residual, t, sample) + + return sample + + def test_step_shape(self): + kwargs = dict(self.forward_default_kwargs) + + num_inference_steps = kwargs.pop("num_inference_steps", None) + + for scheduler_class in self.scheduler_classes: + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + state = scheduler.create_state() + + sample, _ = self.dummy_sample + residual = 0.1 * sample + + if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): + state = scheduler.set_timesteps(state, num_inference_steps, shape=sample.shape) + elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): + kwargs["num_inference_steps"] = num_inference_steps + + # copy over dummy past residuals (must be done after set_timesteps) + dummy_past_residuals = jnp.array([residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]) + state = state.replace(ets=dummy_past_residuals[:]) + + output_0, state = scheduler.step_prk(state, residual, 0, sample, **kwargs) + output_1, state = scheduler.step_prk(state, residual, 1, sample, **kwargs) + + self.assertEqual(output_0.shape, sample.shape) + self.assertEqual(output_0.shape, output_1.shape) + + output_0, state = scheduler.step_plms(state, residual, 0, sample, **kwargs) + output_1, state = scheduler.step_plms(state, residual, 1, sample, **kwargs) + + self.assertEqual(output_0.shape, sample.shape) + self.assertEqual(output_0.shape, output_1.shape) + + def test_timesteps(self): + for timesteps in [100, 1000]: + self.check_over_configs(num_train_timesteps=timesteps) + + def test_steps_offset(self): + for steps_offset in [0, 1]: + self.check_over_configs(steps_offset=steps_offset) + + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config(steps_offset=1) + scheduler = scheduler_class(**scheduler_config) + state = scheduler.create_state() + state = scheduler.set_timesteps(state, 10, shape=()) + assert jnp.equal( + state.timesteps, + jnp.array([901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1]), + ).all() + + def test_betas(self): + for beta_start, beta_end in zip([0.0001, 0.001], [0.002, 0.02]): + self.check_over_configs(beta_start=beta_start, beta_end=beta_end) + + def test_schedules(self): + for schedule in ["linear", "squaredcos_cap_v2"]: + self.check_over_configs(beta_schedule=schedule) + + def test_time_indices(self): + for t in [1, 5, 10]: + self.check_over_forward(time_step=t) + + def test_inference_steps(self): + for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100]): + self.check_over_forward(num_inference_steps=num_inference_steps) + + def test_pow_of_3_inference_steps(self): + # earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3 + num_inference_steps = 27 + + for scheduler_class in self.scheduler_classes: + sample, _ = self.dummy_sample + residual = 0.1 * sample + + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + state = scheduler.create_state() + + state = scheduler.set_timesteps(state, num_inference_steps, shape=sample.shape) + + # before power of 3 fix, would error on first step, so we only need to do two + for i, t in enumerate(state.prk_timesteps[:2]): + sample, state = scheduler.step_prk(state, residual, t, sample) + + def test_inference_plms_no_past_residuals(self): + with self.assertRaises(ValueError): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + state = scheduler.create_state() + + scheduler.step_plms(state, self.dummy_sample, 1, self.dummy_sample).prev_sample + + def test_full_loop_no_noise(self): + sample = self.full_loop() + result_sum = jnp.sum(jnp.abs(sample)) + result_mean = jnp.mean(jnp.abs(sample)) + + if jax_device == "tpu": + assert abs(result_sum - 198.1275) < 1e-2 + assert abs(result_mean - 0.2580) < 1e-3 + else: + assert abs(result_sum - 198.1318) < 1e-2 + assert abs(result_mean - 0.2580) < 1e-3 + + def test_full_loop_with_set_alpha_to_one(self): + # We specify different beta, so that the first alpha is 0.99 + sample = self.full_loop(set_alpha_to_one=True, beta_start=0.01) + result_sum = jnp.sum(jnp.abs(sample)) + result_mean = jnp.mean(jnp.abs(sample)) + + if jax_device == "tpu": + assert abs(result_sum - 186.83226) < 1e-2 + assert abs(result_mean - 0.24327) < 1e-3 + else: + assert abs(result_sum - 186.9466) < 1e-2 + assert abs(result_mean - 0.24342) < 1e-3 + + def test_full_loop_with_no_set_alpha_to_one(self): + # We specify different beta, so that the first alpha is 0.99 + sample = self.full_loop(set_alpha_to_one=False, beta_start=0.01) + result_sum = jnp.sum(jnp.abs(sample)) + result_mean = jnp.mean(jnp.abs(sample)) + + if jax_device == "tpu": + assert abs(result_sum - 186.83226) < 1e-2 + assert abs(result_mean - 0.24327) < 1e-3 + else: + assert abs(result_sum - 186.9482) < 1e-2 + assert abs(result_mean - 0.2434) < 1e-3