Skip to content

Commit cb65a0d

Browse files
authored
MNT Use Python 3.9 as RUFF target version (#2483)
--------- Signed-off-by: cyy <[email protected]>
1 parent 8feea90 commit cb65a0d

37 files changed

+130
-127
lines changed

examples/boft_controlnet/utils/light_controlnet.py

+9-9
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414

1515

1616
from dataclasses import dataclass
17-
from typing import Dict, List, Optional, Tuple, Union
17+
from typing import Optional, Union
1818

1919
import torch
2020
from diffusers.configuration_utils import ConfigMixin, register_to_config
@@ -34,7 +34,7 @@
3434

3535
@dataclass
3636
class ControlNetOutput(BaseOutput):
37-
down_block_res_samples: Tuple[torch.Tensor]
37+
down_block_res_samples: tuple[torch.Tensor]
3838
mid_block_res_sample: torch.Tensor
3939

4040

@@ -52,7 +52,7 @@ def __init__(
5252
self,
5353
conditioning_embedding_channels: int,
5454
conditioning_channels: int = 3,
55-
block_out_channels: Tuple[int] = (16, 32, 96, 256),
55+
block_out_channels: tuple[int] = (16, 32, 96, 256),
5656
):
5757
super().__init__()
5858

@@ -92,7 +92,7 @@ def __init__(
9292
in_channels: int = 4,
9393
out_channels: int = 320,
9494
controlnet_conditioning_channel_order: str = "rgb",
95-
conditioning_embedding_out_channels: Optional[Tuple[int]] = (16, 32, 96, 256),
95+
conditioning_embedding_out_channels: Optional[tuple[int]] = (16, 32, 96, 256),
9696
):
9797
super().__init__()
9898

@@ -104,7 +104,7 @@ def __init__(
104104

105105
@property
106106
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
107-
def attn_processors(self) -> Dict[str, AttentionProcessor]:
107+
def attn_processors(self) -> dict[str, AttentionProcessor]:
108108
r"""
109109
Returns:
110110
`dict` of attention processors: A dictionary containing all attention processors used in the model with
@@ -113,7 +113,7 @@ def attn_processors(self) -> Dict[str, AttentionProcessor]:
113113
# set recursively
114114
processors = {}
115115

116-
def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
116+
def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: dict[str, AttentionProcessor]):
117117
if hasattr(module, "set_processor"):
118118
processors[f"{name}.processor"] = module.processor
119119

@@ -128,7 +128,7 @@ def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors:
128128
return processors
129129

130130
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_attn_processor
131-
def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
131+
def set_attn_processor(self, processor: Union[AttentionProcessor, dict[str, AttentionProcessor]]):
132132
r"""
133133
Parameters:
134134
`processor (`dict` of `AttentionProcessor` or `AttentionProcessor`):
@@ -220,7 +220,7 @@ def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module):
220220
# Recursively walk through all the children.
221221
# Any children which exposes the set_attention_slice method
222222
# gets the message
223-
def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):
223+
def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: list[int]):
224224
if hasattr(module, "set_attention_slice"):
225225
module.set_attention_slice(slice_size.pop())
226226

@@ -238,7 +238,7 @@ def _set_gradient_checkpointing(self, module, value=False):
238238
def forward(
239239
self,
240240
controlnet_cond: torch.FloatTensor,
241-
) -> Union[ControlNetOutput, Tuple]:
241+
) -> Union[ControlNetOutput, tuple]:
242242
# check channel order
243243
channel_order = self.config.controlnet_conditioning_channel_order
244244

examples/boft_controlnet/utils/pipeline_controlnet.py

+11-11
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
# limitations under the License.
1414

1515
from dataclasses import dataclass
16-
from typing import Any, Callable, Dict, List, Optional, Union
16+
from typing import Any, Callable, Optional, Union
1717

1818
import numpy as np
1919
import PIL.Image
@@ -42,8 +42,8 @@ class LightControlNetPipelineOutput(BaseOutput):
4242
(nsfw) content, or `None` if safety checking could not be performed.
4343
"""
4444

45-
images: Union[List[PIL.Image.Image], np.ndarray]
46-
nsfw_content_detected: Optional[List[bool]]
45+
images: Union[list[PIL.Image.Image], np.ndarray]
46+
nsfw_content_detected: Optional[list[bool]]
4747

4848

4949
class LightControlNetPipeline(StableDiffusionControlNetPipeline):
@@ -164,32 +164,32 @@ def check_inputs(
164164
@torch.no_grad()
165165
def __call__(
166166
self,
167-
prompt: Union[str, List[str]] = None,
167+
prompt: Union[str, list[str]] = None,
168168
image: Union[
169169
torch.FloatTensor,
170170
PIL.Image.Image,
171171
np.ndarray,
172-
List[torch.FloatTensor],
173-
List[PIL.Image.Image],
174-
List[np.ndarray],
172+
list[torch.FloatTensor],
173+
list[PIL.Image.Image],
174+
list[np.ndarray],
175175
] = None,
176176
height: Optional[int] = None,
177177
width: Optional[int] = None,
178178
num_inference_steps: int = 50,
179179
guidance_scale: float = 7.5,
180-
negative_prompt: Optional[Union[str, List[str]]] = None,
180+
negative_prompt: Optional[Union[str, list[str]]] = None,
181181
num_images_per_prompt: Optional[int] = 1,
182182
eta: float = 0.0,
183-
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
183+
generator: Optional[Union[torch.Generator, list[torch.Generator]]] = None,
184184
latents: Optional[torch.FloatTensor] = None,
185185
prompt_embeds: Optional[torch.FloatTensor] = None,
186186
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
187187
output_type: Optional[str] = "pil",
188188
return_dict: bool = True,
189189
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
190190
callback_steps: int = 1,
191-
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
192-
controlnet_conditioning_scale: Union[float, List[float]] = 1.0,
191+
cross_attention_kwargs: Optional[dict[str, Any]] = None,
192+
controlnet_conditioning_scale: Union[float, list[float]] = 1.0,
193193
guess_mode: bool = False,
194194
):
195195
r"""

examples/boft_controlnet/utils/unet_2d_condition.py

+5-5
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
# limitations under the License.
1414

1515
from dataclasses import dataclass
16-
from typing import Any, Dict, Optional, Tuple, Union
16+
from typing import Any, Optional, Union
1717

1818
import torch
1919
from diffusers.models import UNet2DConditionModel
@@ -44,13 +44,13 @@ def forward(
4444
class_labels: Optional[torch.Tensor] = None,
4545
timestep_cond: Optional[torch.Tensor] = None,
4646
attention_mask: Optional[torch.Tensor] = None,
47-
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
48-
added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,
49-
down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,
47+
cross_attention_kwargs: Optional[dict[str, Any]] = None,
48+
added_cond_kwargs: Optional[dict[str, torch.Tensor]] = None,
49+
down_block_additional_residuals: Optional[tuple[torch.Tensor]] = None,
5050
mid_block_additional_residual: Optional[torch.Tensor] = None,
5151
encoder_attention_mask: Optional[torch.Tensor] = None,
5252
return_dict: bool = True,
53-
) -> Union[UNet2DConditionOutput, Tuple]:
53+
) -> Union[UNet2DConditionOutput, tuple]:
5454
r"""
5555
Args:
5656
sample (`torch.FloatTensor`): (batch, channel, height, width) noisy inputs tensor

examples/corda_finetuning/corda_finetuning.py

+7-6
Original file line numberDiff line numberDiff line change
@@ -14,8 +14,9 @@
1414

1515
import copy
1616
import os
17+
from collections.abc import Sequence
1718
from dataclasses import dataclass, field
18-
from typing import Dict, List, Optional, Sequence
19+
from typing import Optional
1920

2021
import torch
2122
import transformers
@@ -65,7 +66,7 @@ class TrainingArguments(transformers.TrainingArguments):
6566
model_name_or_path: Optional[str] = field(default="facebook/opt-125m")
6667
data_path: str = field(default=None, metadata={"help": "Path to the training data."})
6768
dataset_split: str = field(default="train[:100000]", metadata={"help": "(`['train', 'test', 'eval']`):"})
68-
dataset_field: List[str] = field(default=None, metadata={"help": "Fields of dataset input and output."})
69+
dataset_field: list[str] = field(default=None, metadata={"help": "Fields of dataset input and output."})
6970
dataloader_num_proc: int = field(default=16, metadata={"help": "Number of processes to load dataset"})
7071
dataloader_batch_size: int = field(
7172
default=3000,
@@ -95,7 +96,7 @@ def safe_save_model_for_hf_trainer(trainer: transformers.Trainer, output_dir: st
9596

9697

9798
def smart_tokenizer_and_embedding_resize(
98-
special_tokens_dict: Dict,
99+
special_tokens_dict: dict,
99100
tokenizer: transformers.PreTrainedTokenizer,
100101
model: transformers.PreTrainedModel,
101102
):
@@ -117,7 +118,7 @@ def smart_tokenizer_and_embedding_resize(
117118
output_embeddings[-num_new_tokens:] = output_embeddings_avg
118119

119120

120-
def _tokenize_fn(strings: Sequence[str], tokenizer: transformers.PreTrainedTokenizer) -> Dict:
121+
def _tokenize_fn(strings: Sequence[str], tokenizer: transformers.PreTrainedTokenizer) -> dict:
121122
"""Tokenize a list of strings."""
122123
tokenized_list = [
123124
tokenizer(
@@ -145,7 +146,7 @@ def preprocess(
145146
sources: Sequence[str],
146147
targets: Sequence[str],
147148
tokenizer: transformers.PreTrainedTokenizer,
148-
) -> Dict:
149+
) -> dict:
149150
"""Preprocess the data by tokenizing."""
150151
examples = [s + t for s, t in zip(sources, targets)]
151152
examples_tokenized, sources_tokenized = (_tokenize_fn(strings, tokenizer) for strings in (examples, sources))
@@ -165,7 +166,7 @@ class DataCollatorForSupervisedDataset:
165166

166167
tokenizer: transformers.PreTrainedTokenizer
167168

168-
def __call__(self, instances: Sequence[Dict]) -> Dict[str, torch.Tensor]:
169+
def __call__(self, instances: Sequence[dict]) -> dict[str, torch.Tensor]:
169170
input_ids, labels = tuple([instance[key] for instance in instances] for key in ("input_ids", "labels"))
170171
input_ids = [torch.tensor(x) for x in input_ids]
171172
input_ids = torch.nn.utils.rnn.pad_sequence(

examples/int8_training/peft_adalora_whisper_large_training.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
from datetime import datetime
99
from pathlib import Path
1010
from random import randint
11-
from typing import Any, Dict, List, Union
11+
from typing import Any, Union
1212

1313
# datasets imports
1414
import datasets
@@ -337,7 +337,7 @@ def load_model_hook(models, input_dir):
337337
class DataCollatorSpeechSeq2SeqWithPadding:
338338
processor: Any
339339

340-
def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
340+
def __call__(self, features: list[dict[str, Union[list[int], torch.Tensor]]]) -> dict[str, torch.Tensor]:
341341
# split inputs and labels since they have to be of different lengths and need different padding methods
342342
# first treat the audio inputs by simply returning torch tensors
343343
input_features = [{"input_features": feature["input_features"]} for feature in features]

examples/lora_dreambooth/convert_kohya_ss_sd_lora_to_peft.py

+5-5
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
import os
33
from collections import Counter
44
from dataclasses import dataclass
5-
from typing import Dict, Optional
5+
from typing import Optional
66

77
import safetensors
88
import torch
@@ -30,13 +30,13 @@ class LoRAInfo:
3030
lora_A: Optional[torch.Tensor] = None
3131
lora_B: Optional[torch.Tensor] = None
3232

33-
def peft_state_dict(self) -> Dict[str, torch.Tensor]:
33+
def peft_state_dict(self) -> dict[str, torch.Tensor]:
3434
if self.lora_A is None or self.lora_B is None:
3535
raise ValueError("At least one of lora_A or lora_B is None, they must both be provided")
3636
return {f"{peft_key}.lora_A.weight": self.lora_A, f"{peft_key}.lora_B.weight": self.lora_A}
3737

3838

39-
def construct_peft_loraconfig(info: Dict[str, LoRAInfo]) -> LoraConfig:
39+
def construct_peft_loraconfig(info: dict[str, LoRAInfo]) -> LoraConfig:
4040
"""Constructs LoraConfig from data extracted from kohya checkpoint
4141
4242
Args:
@@ -75,7 +75,7 @@ def construct_peft_loraconfig(info: Dict[str, LoRAInfo]) -> LoraConfig:
7575
return config
7676

7777

78-
def combine_peft_state_dict(info: Dict[str, LoRAInfo]) -> Dict[str, torch.Tensor]:
78+
def combine_peft_state_dict(info: dict[str, LoRAInfo]) -> dict[str, torch.Tensor]:
7979
result = {}
8080
for key_name, key_info in info.items():
8181
result[f"base_model.model.{key_name}.lora_A.weight"] = key_info.lora_A
@@ -115,7 +115,7 @@ def combine_peft_state_dict(info: Dict[str, LoRAInfo]) -> Dict[str, torch.Tensor
115115
)
116116

117117
# Store conversion info (model_type -> peft_key -> LoRAInfo)
118-
lora_info: Dict[str, Dict[str, LoRAInfo]] = {
118+
lora_info: dict[str, dict[str, LoRAInfo]] = {
119119
"text_encoder": {},
120120
"unet": {},
121121
}

examples/lora_dreambooth/convert_peft_sd_lora_to_kohya_ss.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
import argparse
22
import os
3-
from typing import Dict
43

54
import torch
65
from diffusers import UNet2DConditionModel
@@ -19,7 +18,7 @@
1918

2019
def get_module_kohya_state_dict(
2120
module: PeftModel, prefix: str, dtype: torch.dtype, adapter_name: str = LORA_ADAPTER_NAME
22-
) -> Dict[str, torch.Tensor]:
21+
) -> dict[str, torch.Tensor]:
2322
kohya_ss_state_dict = {}
2423
for peft_key, weight in get_peft_model_state_dict(module, adapter_name=adapter_name).items():
2524
kohya_key = peft_key.replace("base_model.model", prefix)

examples/olora_finetuning/olora_finetuning.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414

1515

1616
import os
17-
from typing import List, Optional
17+
from typing import Optional
1818

1919
import torch
2020
import transformers
@@ -43,7 +43,7 @@ def train(
4343
lora_r: int = 32,
4444
lora_alpha: int = 16,
4545
lora_dropout: float = 0.05,
46-
lora_target_modules: List[str] = None,
46+
lora_target_modules: list[str] = None,
4747
torch_dtype: str = "float16",
4848
init_lora_weights="olora",
4949
seed: Optional[int] = None,

0 commit comments

Comments
 (0)