Skip to content

Commit 7fd77f2

Browse files
committed
Fix ONNX/Olive generation.
1 parent 216340d commit 7fd77f2

File tree

3 files changed

+10
-17
lines changed

3 files changed

+10
-17
lines changed

modules/onnx_impl/pipelines/onnx_stable_diffusion_pipeline.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -62,6 +62,9 @@ def __call__(
6262
if generator is None:
6363
generator = torch.Generator("cpu")
6464

65+
# set timesteps
66+
self.scheduler.set_timesteps(num_inference_steps)
67+
6568
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
6669
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
6770
# corresponds to doing no classifier free guidance.
@@ -84,12 +87,9 @@ def __call__(
8487
width,
8588
prompt_embeds.dtype,
8689
generator,
87-
latents
90+
latents,
8891
)
8992

90-
# set timesteps
91-
self.scheduler.set_timesteps(num_inference_steps)
92-
9393
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
9494
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
9595
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502

modules/onnx_impl/pipelines/onnx_stable_diffusion_upscale_pipeline.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -70,6 +70,9 @@ def __call__(
7070
if generator is None:
7171
generator = torch.Generator("cpu")
7272

73+
self.scheduler.set_timesteps(num_inference_steps)
74+
timesteps = self.scheduler.timesteps
75+
7376
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
7477
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
7578
# corresponds to doing no classifier free guidance.
@@ -97,9 +100,6 @@ def __call__(
97100
generator,
98101
)
99102

100-
self.scheduler.set_timesteps(num_inference_steps)
101-
timesteps = self.scheduler.timesteps
102-
103103
# 5. Add noise to image
104104
noise_level = np.array([noise_level]).astype(np.int64)
105105
noise = randn_tensor(

modules/onnx_impl/pipelines/utils.py

+3-10
Original file line numberDiff line numberDiff line change
@@ -26,21 +26,14 @@ def prepare_latents(
2626
width: int,
2727
dtype: np.dtype,
2828
generator: Union[torch.Generator, List[torch.Generator]],
29-
latents: Union[np.ndarray, None]=None,
30-
num_channels_latents=4,
31-
vae_scale_factor=8,
29+
latents: Union[np.ndarray, None] = None,
30+
num_channels_latents = 4,
31+
vae_scale_factor = 8,
3232
):
3333
shape = (batch_size, num_channels_latents, height // vae_scale_factor, width // vae_scale_factor)
34-
if isinstance(generator, list) and len(generator) != batch_size:
35-
raise ValueError(
36-
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
37-
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
38-
)
3934

4035
if latents is None:
4136
latents = randn_tensor(shape, dtype, generator)
42-
elif latents.shape != shape:
43-
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
4437

4538
# scale the initial noise by the standard deviation required by the scheduler
4639
latents = latents * np.float64(init_noise_sigma)

0 commit comments

Comments
 (0)