Skip to content

Commit 9d1f757

Browse files
committed
update
1 parent 5307ae2 commit 9d1f757

17 files changed

+25
-247
lines changed

examples/community/lpw_stable_diffusion_xl.py

Lines changed: 1 addition & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1328,18 +1328,8 @@ def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, d
13281328

13291329
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
13301330
def upcast_vae(self):
1331-
dtype = self.vae.dtype
1331+
deprecate("upcast_vae", "1.0.0", "`upcast_vae` is deprecated. Please use `pipe.vae.to(torch.float32)`")
13321332
self.vae.to(dtype=torch.float32)
1333-
use_torch_2_0_or_xformers = isinstance(
1334-
self.vae.decoder.mid_block.attentions[0].processor,
1335-
(AttnProcessor2_0, XFormersAttnProcessor),
1336-
)
1337-
# if xformers or torch_2_0 is used attention block does not need
1338-
# to be in float32 which can save lots of memory
1339-
if use_torch_2_0_or_xformers:
1340-
self.vae.post_quant_conv.to(dtype)
1341-
self.vae.decoder.conv_in.to(dtype)
1342-
self.vae.decoder.mid_block.to(dtype)
13431333

13441334
# Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
13451335
def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32):

examples/community/mixture_tiling_sdxl.py

Lines changed: 2 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,7 @@
4141
from diffusers.schedulers import KarrasDiffusionSchedulers, LMSDiscreteScheduler
4242
from diffusers.utils import (
4343
USE_PEFT_BACKEND,
44+
deprecate,
4445
is_invisible_watermark_available,
4546
is_torch_xla_available,
4647
logging,
@@ -710,22 +711,8 @@ def _gaussian_weights(self, tile_width, tile_height, nbatches, device, dtype):
710711
return torch.tile(weights_torch, (nbatches, self.unet.config.in_channels, 1, 1))
711712

712713
def upcast_vae(self):
713-
dtype = self.vae.dtype
714+
deprecate("upcast_vae", "1.0.0", "`upcast_vae` is deprecated. Please use `pipe.vae.to(torch.float32)`")
714715
self.vae.to(dtype=torch.float32)
715-
use_torch_2_0_or_xformers = isinstance(
716-
self.vae.decoder.mid_block.attentions[0].processor,
717-
(
718-
AttnProcessor2_0,
719-
XFormersAttnProcessor,
720-
FusedAttnProcessor2_0,
721-
),
722-
)
723-
# if xformers or torch_2_0 is used attention block does not need
724-
# to be in float32 which can save lots of memory
725-
if use_torch_2_0_or_xformers:
726-
self.vae.post_quant_conv.to(dtype)
727-
self.vae.decoder.conv_in.to(dtype)
728-
self.vae.decoder.mid_block.to(dtype)
729716

730717
# Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
731718
def get_guidance_scale_embedding(

examples/community/mod_controlnet_tile_sr_sdxl.py

Lines changed: 2 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,7 @@
4949
from diffusers.schedulers import KarrasDiffusionSchedulers, LMSDiscreteScheduler
5050
from diffusers.utils import (
5151
USE_PEFT_BACKEND,
52+
deprecate,
5253
logging,
5354
replace_example_docstring,
5455
scale_lora_layers,
@@ -1220,23 +1221,9 @@ def prepare_tiles(
12201221

12211222
return tile_weights, tile_row_overlaps, tile_col_overlaps
12221223

1223-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
12241224
def upcast_vae(self):
1225-
dtype = self.vae.dtype
1225+
deprecate("upcast_vae", "1.0.0", "`upcast_vae` is deprecated. Please use `pipe.vae.to(torch.float32)`")
12261226
self.vae.to(dtype=torch.float32)
1227-
use_torch_2_0_or_xformers = isinstance(
1228-
self.vae.decoder.mid_block.attentions[0].processor,
1229-
(
1230-
AttnProcessor2_0,
1231-
XFormersAttnProcessor,
1232-
),
1233-
)
1234-
# if xformers or torch_2_0 is used attention block does not need
1235-
# to be in float32 which can save lots of memory
1236-
if use_torch_2_0_or_xformers:
1237-
self.vae.post_quant_conv.to(dtype)
1238-
self.vae.decoder.conv_in.to(dtype)
1239-
self.vae.decoder.mid_block.to(dtype)
12401227

12411228
@property
12421229
def guidance_scale(self):

examples/community/pipeline_controlnet_xl_kolors.py

Lines changed: 1 addition & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -760,21 +760,8 @@ def _get_add_time_ids(
760760

761761
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
762762
def upcast_vae(self):
763-
dtype = self.vae.dtype
763+
deprecate("upcast_vae", "1.0.0", "`upcast_vae` is deprecated. Please use `pipe.vae.to(torch.float32)`")
764764
self.vae.to(dtype=torch.float32)
765-
use_torch_2_0_or_xformers = isinstance(
766-
self.vae.decoder.mid_block.attentions[0].processor,
767-
(
768-
AttnProcessor2_0,
769-
XFormersAttnProcessor,
770-
),
771-
)
772-
# if xformers or torch_2_0 is used attention block does not need
773-
# to be in float32 which can save lots of memory
774-
if use_torch_2_0_or_xformers:
775-
self.vae.post_quant_conv.to(dtype)
776-
self.vae.decoder.conv_in.to(dtype)
777-
self.vae.decoder.mid_block.to(dtype)
778765

779766
@property
780767
def guidance_scale(self):

examples/community/pipeline_controlnet_xl_kolors_img2img.py

Lines changed: 1 addition & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -930,21 +930,8 @@ def _get_add_time_ids(
930930

931931
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
932932
def upcast_vae(self):
933-
dtype = self.vae.dtype
933+
deprecate("upcast_vae", "1.0.0", "`upcast_vae` is deprecated. Please use `pipe.vae.to(torch.float32)`")
934934
self.vae.to(dtype=torch.float32)
935-
use_torch_2_0_or_xformers = isinstance(
936-
self.vae.decoder.mid_block.attentions[0].processor,
937-
(
938-
AttnProcessor2_0,
939-
XFormersAttnProcessor,
940-
),
941-
)
942-
# if xformers or torch_2_0 is used attention block does not need
943-
# to be in float32 which can save lots of memory
944-
if use_torch_2_0_or_xformers:
945-
self.vae.post_quant_conv.to(dtype)
946-
self.vae.decoder.conv_in.to(dtype)
947-
self.vae.decoder.mid_block.to(dtype)
948935

949936
@property
950937
def guidance_scale(self):

examples/community/pipeline_controlnet_xl_kolors_inpaint.py

Lines changed: 1 addition & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1006,21 +1006,8 @@ def _get_add_time_ids(
10061006

10071007
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
10081008
def upcast_vae(self):
1009-
dtype = self.vae.dtype
1009+
deprecate("upcast_vae", "1.0.0", "`upcast_vae` is deprecated. Please use `pipe.vae.to(torch.float32)`")
10101010
self.vae.to(dtype=torch.float32)
1011-
use_torch_2_0_or_xformers = isinstance(
1012-
self.vae.decoder.mid_block.attentions[0].processor,
1013-
(
1014-
AttnProcessor2_0,
1015-
XFormersAttnProcessor,
1016-
),
1017-
)
1018-
# if xformers or torch_2_0 is used attention block does not need
1019-
# to be in float32 which can save lots of memory
1020-
if use_torch_2_0_or_xformers:
1021-
self.vae.post_quant_conv.to(dtype)
1022-
self.vae.decoder.conv_in.to(dtype)
1023-
self.vae.decoder.mid_block.to(dtype)
10241011

10251012
@property
10261013
def denoising_end(self):

examples/community/pipeline_demofusion_sdxl.py

Lines changed: 2 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121
from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
2222
from diffusers.schedulers import KarrasDiffusionSchedulers
2323
from diffusers.utils import (
24+
deprecate,
2425
is_accelerate_available,
2526
is_accelerate_version,
2627
is_invisible_watermark_available,
@@ -612,20 +613,9 @@ def tiled_decode(self, latents, current_height, current_width):
612613

613614
return image
614615

615-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
616616
def upcast_vae(self):
617-
dtype = self.vae.dtype
617+
deprecate("upcast_vae", "1.0.0", "`upcast_vae` is deprecated. Please use `pipe.vae.to(torch.float32)`")
618618
self.vae.to(dtype=torch.float32)
619-
use_torch_2_0_or_xformers = isinstance(
620-
self.vae.decoder.mid_block.attentions[0].processor,
621-
(AttnProcessor2_0, XFormersAttnProcessor),
622-
)
623-
# if xformers or torch_2_0 is used attention block does not need
624-
# to be in float32 which can save lots of memory
625-
if use_torch_2_0_or_xformers:
626-
self.vae.post_quant_conv.to(dtype)
627-
self.vae.decoder.conv_in.to(dtype)
628-
self.vae.decoder.mid_block.to(dtype)
629619

630620
@torch.no_grad()
631621
@replace_example_docstring(EXAMPLE_DOC_STRING)

examples/community/pipeline_faithdiff_stable_diffusion_xl.py

Lines changed: 1 addition & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -1637,24 +1637,8 @@ def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype
16371637
return latents
16381638

16391639
def upcast_vae(self):
1640-
dtype = self.vae.dtype
1640+
deprecate("upcast_vae", "1.0.0", "`upcast_vae` is deprecated. Please use `pipe.vae.to(torch.float32)`")
16411641
self.vae.to(dtype=torch.float32)
1642-
use_torch_2_0_or_xformers = isinstance(
1643-
self.vae.decoder.mid_block.attentions[0].processor,
1644-
(
1645-
AttnProcessor2_0,
1646-
XFormersAttnProcessor,
1647-
LoRAXFormersAttnProcessor,
1648-
LoRAAttnProcessor2_0,
1649-
FusedAttnProcessor2_0,
1650-
),
1651-
)
1652-
# if xformers or torch_2_0 is used attention block does not need
1653-
# to be in float32 which can save lots of memory
1654-
if use_torch_2_0_or_xformers:
1655-
self.vae.post_quant_conv.to(dtype)
1656-
self.vae.decoder.conv_in.to(dtype)
1657-
self.vae.decoder.mid_block.to(dtype)
16581642

16591643
# Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
16601644
def get_guidance_scale_embedding(

examples/community/pipeline_kolors_differential_img2img.py

Lines changed: 2 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@
2828
from diffusers.pipelines.kolors.tokenizer import ChatGLMTokenizer
2929
from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
3030
from diffusers.schedulers import KarrasDiffusionSchedulers
31-
from diffusers.utils import is_torch_xla_available, logging, replace_example_docstring
31+
from diffusers.utils import deprecate, is_torch_xla_available, logging, replace_example_docstring
3232
from diffusers.utils.torch_utils import randn_tensor
3333

3434

@@ -709,24 +709,9 @@ def _get_add_time_ids(
709709
add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
710710
return add_time_ids
711711

712-
# Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.upcast_vae
713712
def upcast_vae(self):
714-
dtype = self.vae.dtype
713+
deprecate("upcast_vae", "1.0.0", "`upcast_vae` is deprecated. Please use `pipe.vae.to(torch.float32)`")
715714
self.vae.to(dtype=torch.float32)
716-
use_torch_2_0_or_xformers = isinstance(
717-
self.vae.decoder.mid_block.attentions[0].processor,
718-
(
719-
AttnProcessor2_0,
720-
XFormersAttnProcessor,
721-
FusedAttnProcessor2_0,
722-
),
723-
)
724-
# if xformers or torch_2_0 is used attention block does not need
725-
# to be in float32 which can save lots of memory
726-
if use_torch_2_0_or_xformers:
727-
self.vae.post_quant_conv.to(dtype)
728-
self.vae.decoder.conv_in.to(dtype)
729-
self.vae.decoder.mid_block.to(dtype)
730715

731716
# Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
732717
def get_guidance_scale_embedding(

examples/community/pipeline_kolors_inpainting.py

Lines changed: 1 addition & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1008,23 +1008,8 @@ def _get_add_time_ids(
10081008

10091009
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
10101010
def upcast_vae(self):
1011-
dtype = self.vae.dtype
1011+
deprecate("upcast_vae", "1.0.0", "`upcast_vae` is deprecated. Please use `pipe.vae.to(torch.float32)`")
10121012
self.vae.to(dtype=torch.float32)
1013-
use_torch_2_0_or_xformers = isinstance(
1014-
self.vae.decoder.mid_block.attentions[0].processor,
1015-
(
1016-
AttnProcessor2_0,
1017-
XFormersAttnProcessor,
1018-
LoRAXFormersAttnProcessor,
1019-
LoRAAttnProcessor2_0,
1020-
),
1021-
)
1022-
# if xformers or torch_2_0 is used attention block does not need
1023-
# to be in float32 which can save lots of memory
1024-
if use_torch_2_0_or_xformers:
1025-
self.vae.post_quant_conv.to(dtype)
1026-
self.vae.decoder.conv_in.to(dtype)
1027-
self.vae.decoder.mid_block.to(dtype)
10281013

10291014
# Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
10301015
def get_guidance_scale_embedding(

0 commit comments

Comments
 (0)