Skip to content

Commit a5cff8b

Browse files
delmalihtolgacangoz
authored andcommitted
Improve docstrings and type hints in scheduling_euler_discrete.py (huggingface#12654)
* refactor: enhance type hints and documentation in EulerDiscreteScheduler Updated type hints for function parameters and return types in the EulerDiscreteScheduler class to improve code clarity and maintainability. Enhanced docstrings for several methods to provide clearer descriptions of their functionality and expected arguments. This includes specifying Literal types for certain parameters and ensuring consistent return type annotations across the class. * refactor: enhance type hints and documentation across multiple schedulers Updated type hints and improved docstrings in various scheduler classes, including CMStochasticIterativeScheduler, CosineDPMSolverMultistepScheduler, and others. This includes specifying parameter types, return types, and providing clearer descriptions of method functionalities. Notable changes include the addition of default values in the begin_index argument and enhanced explanations for noise addition methods. These improvements aim to enhance code clarity and maintainability across the scheduling module. * refactor: update docstrings to clarify noise schedule construction Revised docstrings across multiple scheduler classes to enhance clarity regarding the construction of noise schedules. Updated references to relevant papers, ensuring accurate citations for the methodologies used. This includes changes in DEISMultistepScheduler, DPMSolverMultistepInverseScheduler, and others, improving documentation consistency and readability.
1 parent a480ecc commit a5cff8b

29 files changed

+1426
-159
lines changed

src/diffusers/schedulers/scheduling_consistency_models.py

Lines changed: 41 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -121,7 +121,7 @@ def set_begin_index(self, begin_index: int = 0):
121121
Sets the begin index for the scheduler. This function should be run from pipeline before the inference.
122122
123123
Args:
124-
begin_index (`int`):
124+
begin_index (`int`, defaults to `0`):
125125
The begin index for the scheduler.
126126
"""
127127
self._begin_index = begin_index
@@ -287,7 +287,23 @@ def get_scalings_for_boundary_condition(self, sigma):
287287
return c_skip, c_out
288288

289289
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.index_for_timestep
290-
def index_for_timestep(self, timestep, schedule_timesteps=None):
290+
def index_for_timestep(
291+
self, timestep: Union[float, torch.Tensor], schedule_timesteps: Optional[torch.Tensor] = None
292+
) -> int:
293+
"""
294+
Find the index of a given timestep in the timestep schedule.
295+
296+
Args:
297+
timestep (`float` or `torch.Tensor`):
298+
The timestep value to find in the schedule.
299+
schedule_timesteps (`torch.Tensor`, *optional*):
300+
The timestep schedule to search in. If `None`, uses `self.timesteps`.
301+
302+
Returns:
303+
`int`:
304+
The index of the timestep in the schedule. For the very first step, returns the second index if
305+
multiple matches exist to avoid skipping a sigma when starting mid-schedule (e.g., for image-to-image).
306+
"""
291307
if schedule_timesteps is None:
292308
schedule_timesteps = self.timesteps
293309

@@ -302,7 +318,14 @@ def index_for_timestep(self, timestep, schedule_timesteps=None):
302318
return indices[pos].item()
303319

304320
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._init_step_index
305-
def _init_step_index(self, timestep):
321+
def _init_step_index(self, timestep: Union[float, torch.Tensor]) -> None:
322+
"""
323+
Initialize the step index for the scheduler based on the given timestep.
324+
325+
Args:
326+
timestep (`float` or `torch.Tensor`):
327+
The current timestep to initialize the step index from.
328+
"""
306329
if self.begin_index is None:
307330
if isinstance(timestep, torch.Tensor):
308331
timestep = timestep.to(self.timesteps.device)
@@ -410,6 +433,21 @@ def add_noise(
410433
noise: torch.Tensor,
411434
timesteps: torch.Tensor,
412435
) -> torch.Tensor:
436+
"""
437+
Add noise to the original samples according to the noise schedule at the specified timesteps.
438+
439+
Args:
440+
original_samples (`torch.Tensor`):
441+
The original samples to which noise will be added.
442+
noise (`torch.Tensor`):
443+
The noise tensor to add to the original samples.
444+
timesteps (`torch.Tensor`):
445+
The timesteps at which to add noise, determining the noise level from the schedule.
446+
447+
Returns:
448+
`torch.Tensor`:
449+
The noisy samples with added noise scaled according to the timestep schedule.
450+
"""
413451
# Make sure sigmas and timesteps have the same device and dtype as original_samples
414452
sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype)
415453
if original_samples.device.type == "mps" and torch.is_floating_point(timesteps):

src/diffusers/schedulers/scheduling_cosine_dpmsolver_multistep.py

Lines changed: 29 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -137,7 +137,7 @@ def set_begin_index(self, begin_index: int = 0):
137137
Sets the begin index for the scheduler. This function should be run from pipeline before the inference.
138138
139139
Args:
140-
begin_index (`int`):
140+
begin_index (`int`, defaults to `0`):
141141
The begin index for the scheduler.
142142
"""
143143
self._begin_index = begin_index
@@ -266,6 +266,19 @@ def _compute_exponential_sigmas(self, ramp, sigma_min=None, sigma_max=None) -> t
266266

267267
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t
268268
def _sigma_to_t(self, sigma, log_sigmas):
269+
"""
270+
Convert sigma values to corresponding timestep values through interpolation.
271+
272+
Args:
273+
sigma (`np.ndarray`):
274+
The sigma value(s) to convert to timestep(s).
275+
log_sigmas (`np.ndarray`):
276+
The logarithm of the sigma schedule used for interpolation.
277+
278+
Returns:
279+
`np.ndarray`:
280+
The interpolated timestep value(s) corresponding to the input sigma(s).
281+
"""
269282
# get log sigma
270283
log_sigma = np.log(np.maximum(sigma, 1e-10))
271284

@@ -537,6 +550,21 @@ def add_noise(
537550
noise: torch.Tensor,
538551
timesteps: torch.Tensor,
539552
) -> torch.Tensor:
553+
"""
554+
Add noise to the original samples according to the noise schedule at the specified timesteps.
555+
556+
Args:
557+
original_samples (`torch.Tensor`):
558+
The original samples to which noise will be added.
559+
noise (`torch.Tensor`):
560+
The noise tensor to add to the original samples.
561+
timesteps (`torch.Tensor`):
562+
The timesteps at which to add noise, determining the noise level from the schedule.
563+
564+
Returns:
565+
`torch.Tensor`:
566+
The noisy samples with added noise scaled according to the timestep schedule.
567+
"""
540568
# Make sure sigmas and timesteps have the same device and dtype as original_samples
541569
sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype)
542570
if original_samples.device.type == "mps" and torch.is_floating_point(timesteps):

src/diffusers/schedulers/scheduling_ddim.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -99,10 +99,11 @@ def rescale_zero_terminal_snr(betas: torch.Tensor) -> torch.Tensor:
9999
100100
Args:
101101
betas (`torch.Tensor`):
102-
the betas that the scheduler is being initialized with.
102+
The betas that the scheduler is being initialized with.
103103
104104
Returns:
105-
`torch.Tensor`: rescaled betas with zero terminal SNR
105+
`torch.Tensor`:
106+
Rescaled betas with zero terminal SNR.
106107
"""
107108
# Convert betas to alphas_bar_sqrt
108109
alphas = 1.0 - betas

src/diffusers/schedulers/scheduling_ddim_inverse.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -98,10 +98,11 @@ def rescale_zero_terminal_snr(betas):
9898
9999
Args:
100100
betas (`torch.Tensor`):
101-
the betas that the scheduler is being initialized with.
101+
The betas that the scheduler is being initialized with.
102102
103103
Returns:
104-
`torch.Tensor`: rescaled betas with zero terminal SNR
104+
`torch.Tensor`:
105+
Rescaled betas with zero terminal SNR.
105106
"""
106107
# Convert betas to alphas_bar_sqrt
107108
alphas = 1.0 - betas

src/diffusers/schedulers/scheduling_ddim_parallel.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -100,10 +100,11 @@ def rescale_zero_terminal_snr(betas):
100100
101101
Args:
102102
betas (`torch.Tensor`):
103-
the betas that the scheduler is being initialized with.
103+
The betas that the scheduler is being initialized with.
104104
105105
Returns:
106-
`torch.Tensor`: rescaled betas with zero terminal SNR
106+
`torch.Tensor`:
107+
Rescaled betas with zero terminal SNR.
107108
"""
108109
# Convert betas to alphas_bar_sqrt
109110
alphas = 1.0 - betas

src/diffusers/schedulers/scheduling_ddpm.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -97,10 +97,11 @@ def rescale_zero_terminal_snr(betas: torch.Tensor) -> torch.Tensor:
9797
9898
Args:
9999
betas (`torch.Tensor`):
100-
the betas that the scheduler is being initialized with.
100+
The betas that the scheduler is being initialized with.
101101
102102
Returns:
103-
`torch.Tensor`: rescaled betas with zero terminal SNR
103+
`torch.Tensor`:
104+
Rescaled betas with zero terminal SNR.
104105
"""
105106
# Convert betas to alphas_bar_sqrt
106107
alphas = 1.0 - betas

src/diffusers/schedulers/scheduling_ddpm_parallel.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -99,10 +99,11 @@ def rescale_zero_terminal_snr(betas):
9999
100100
Args:
101101
betas (`torch.Tensor`):
102-
the betas that the scheduler is being initialized with.
102+
The betas that the scheduler is being initialized with.
103103
104104
Returns:
105-
`torch.Tensor`: rescaled betas with zero terminal SNR
105+
`torch.Tensor`:
106+
Rescaled betas with zero terminal SNR.
106107
"""
107108
# Convert betas to alphas_bar_sqrt
108109
alphas = 1.0 - betas

src/diffusers/schedulers/scheduling_deis_multistep.py

Lines changed: 59 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -230,7 +230,7 @@ def set_begin_index(self, begin_index: int = 0):
230230
Sets the begin index for the scheduler. This function should be run from pipeline before the inference.
231231
232232
Args:
233-
begin_index (`int`):
233+
begin_index (`int`, defaults to `0`):
234234
The begin index for the scheduler.
235235
"""
236236
self._begin_index = begin_index
@@ -364,6 +364,19 @@ def _threshold_sample(self, sample: torch.Tensor) -> torch.Tensor:
364364

365365
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t
366366
def _sigma_to_t(self, sigma, log_sigmas):
367+
"""
368+
Convert sigma values to corresponding timestep values through interpolation.
369+
370+
Args:
371+
sigma (`np.ndarray`):
372+
The sigma value(s) to convert to timestep(s).
373+
log_sigmas (`np.ndarray`):
374+
The logarithm of the sigma schedule used for interpolation.
375+
376+
Returns:
377+
`np.ndarray`:
378+
The interpolated timestep value(s) corresponding to the input sigma(s).
379+
"""
367380
# get log sigma
368381
log_sigma = np.log(np.maximum(sigma, 1e-10))
369382

@@ -399,7 +412,20 @@ def _sigma_to_alpha_sigma_t(self, sigma):
399412

400413
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_karras
401414
def _convert_to_karras(self, in_sigmas: torch.Tensor, num_inference_steps) -> torch.Tensor:
402-
"""Constructs the noise schedule of Karras et al. (2022)."""
415+
"""
416+
Construct the noise schedule as proposed in [Elucidating the Design Space of Diffusion-Based Generative
417+
Models](https://huggingface.co/papers/2206.00364).
418+
419+
Args:
420+
in_sigmas (`torch.Tensor`):
421+
The input sigma values to be converted.
422+
num_inference_steps (`int`):
423+
The number of inference steps to generate the noise schedule for.
424+
425+
Returns:
426+
`torch.Tensor`:
427+
The converted sigma values following the Karras noise schedule.
428+
"""
403429

404430
# Hack to make sure that other schedulers which copy this function don't break
405431
# TODO: Add this logic to the other schedulers
@@ -425,7 +451,19 @@ def _convert_to_karras(self, in_sigmas: torch.Tensor, num_inference_steps) -> to
425451

426452
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_exponential
427453
def _convert_to_exponential(self, in_sigmas: torch.Tensor, num_inference_steps: int) -> torch.Tensor:
428-
"""Constructs an exponential noise schedule."""
454+
"""
455+
Construct an exponential noise schedule.
456+
457+
Args:
458+
in_sigmas (`torch.Tensor`):
459+
The input sigma values to be converted.
460+
num_inference_steps (`int`):
461+
The number of inference steps to generate the noise schedule for.
462+
463+
Returns:
464+
`torch.Tensor`:
465+
The converted sigma values following an exponential schedule.
466+
"""
429467

430468
# Hack to make sure that other schedulers which copy this function don't break
431469
# TODO: Add this logic to the other schedulers
@@ -449,7 +487,24 @@ def _convert_to_exponential(self, in_sigmas: torch.Tensor, num_inference_steps:
449487
def _convert_to_beta(
450488
self, in_sigmas: torch.Tensor, num_inference_steps: int, alpha: float = 0.6, beta: float = 0.6
451489
) -> torch.Tensor:
452-
"""From "Beta Sampling is All You Need" [arXiv:2407.12173] (Lee et. al, 2024)"""
490+
"""
491+
Construct a beta noise schedule as proposed in [Beta Sampling is All You
492+
Need](https://huggingface.co/papers/2407.12173).
493+
494+
Args:
495+
in_sigmas (`torch.Tensor`):
496+
The input sigma values to be converted.
497+
num_inference_steps (`int`):
498+
The number of inference steps to generate the noise schedule for.
499+
alpha (`float`, *optional*, defaults to `0.6`):
500+
The alpha parameter for the beta distribution.
501+
beta (`float`, *optional*, defaults to `0.6`):
502+
The beta parameter for the beta distribution.
503+
504+
Returns:
505+
`torch.Tensor`:
506+
The converted sigma values following a beta distribution schedule.
507+
"""
453508

454509
# Hack to make sure that other schedulers which copy this function don't break
455510
# TODO: Add this logic to the other schedulers

0 commit comments

Comments
 (0)