From db969cc16dc2e487fc2e02bce5d3513fae872853 Mon Sep 17 00:00:00 2001 From: Sai-Suraj-27 Date: Fri, 19 Apr 2024 11:04:09 +0530 Subject: [PATCH] fix: Fixed `type annotations` for compatability with python 3.8 (#7648) * Fixed type annotations for compatability with python 3.8 * Add required imports. --- .../community/pipeline_sdxl_style_aligned.py | 2 +- ...diffusion_xl_controlnet_adapter_inpaint.py | 22 +++++++++---------- .../pipeline_stable_video_diffusion.py | 2 +- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/examples/community/pipeline_sdxl_style_aligned.py b/examples/community/pipeline_sdxl_style_aligned.py index e623913e08ef..88edeeb7ee4c 100644 --- a/examples/community/pipeline_sdxl_style_aligned.py +++ b/examples/community/pipeline_sdxl_style_aligned.py @@ -151,7 +151,7 @@ def concat_first(feat: torch.Tensor, dim: int = 2, scale: float = 1.0) -> torch. return torch.cat((feat, feat_style), dim=dim) -def calc_mean_std(feat: torch.Tensor, eps: float = 1e-5) -> tuple[torch.Tensor, torch.Tensor]: +def calc_mean_std(feat: torch.Tensor, eps: float = 1e-5) -> Tuple[torch.Tensor, torch.Tensor]: feat_std = (feat.var(dim=-2, keepdims=True) + eps).sqrt() feat_mean = feat.mean(dim=-2, keepdims=True) return feat_mean, feat_std diff --git a/examples/community/pipeline_stable_diffusion_xl_controlnet_adapter_inpaint.py b/examples/community/pipeline_stable_diffusion_xl_controlnet_adapter_inpaint.py index de7865d654b0..a85f1c3da6fb 100644 --- a/examples/community/pipeline_stable_diffusion_xl_controlnet_adapter_inpaint.py +++ b/examples/community/pipeline_stable_diffusion_xl_controlnet_adapter_inpaint.py @@ -17,7 +17,7 @@ import inspect from collections.abc import Callable -from typing import Any, List, Optional, Union +from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import PIL @@ -1211,8 +1211,8 @@ def prepare_control_image( @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, - prompt: Optional[Union[str, list[str]]] = None, - prompt_2: Optional[Union[str, list[str]]] = None, + prompt: Optional[Union[str, List[str]]] = None, + prompt_2: Optional[Union[str, List[str]]] = None, image: Optional[Union[torch.Tensor, PIL.Image.Image]] = None, mask_image: Optional[Union[torch.Tensor, PIL.Image.Image]] = None, adapter_image: PipelineImageInput = None, @@ -1224,11 +1224,11 @@ def __call__( denoising_start: Optional[float] = None, denoising_end: Optional[float] = None, guidance_scale: float = 5.0, - negative_prompt: Optional[Union[str, list[str]]] = None, - negative_prompt_2: Optional[Union[str, list[str]]] = None, + negative_prompt: Optional[Union[str, List[str]]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, - generator: Optional[Union[torch.Generator, list[torch.Generator]]] = None, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[Union[torch.FloatTensor]] = None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, @@ -1238,12 +1238,12 @@ def __call__( return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: int = 1, - cross_attention_kwargs: Optional[dict[str, Any]] = None, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, guidance_rescale: float = 0.0, - original_size: Optional[tuple[int, int]] = None, - crops_coords_top_left: Optional[tuple[int, int]] = (0, 0), - target_size: Optional[tuple[int, int]] = None, - adapter_conditioning_scale: Optional[Union[float, list[float]]] = 1.0, + original_size: Optional[Tuple[int, int]] = None, + crops_coords_top_left: Optional[Tuple[int, int]] = (0, 0), + target_size: Optional[Tuple[int, int]] = None, + adapter_conditioning_scale: Optional[Union[float, List[float]]] = 1.0, cond_tau: float = 1.0, aesthetic_score: float = 6.0, negative_aesthetic_score: float = 2.5, diff --git a/src/diffusers/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py b/src/diffusers/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py index ae4e12642242..070183b92409 100644 --- a/src/diffusers/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py +++ b/src/diffusers/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py @@ -637,7 +637,7 @@ def _filter2d(input, kernel): height, width = tmp_kernel.shape[-2:] - padding_shape: list[int] = _compute_padding([height, width]) + padding_shape: List[int] = _compute_padding([height, width]) input = torch.nn.functional.pad(input, padding_shape, mode="reflect") # kernel and input tensor reshape to align element-wise or batch-wise params