Skip to content

Commit bbb1128

Browse files
MNT Update ruff to v0.9.2 (#2343)
We use ruff for linting. The version is fixed because otherwise, we formatting changes would creep into random PRs. Thus far, the version was ~0.6.1 but that's already quite old by now, thus moving to ~v0.9.2. The ruff changes themselves are all about: 1. Other line breaking logic for asserts with messages 2. More aggressive string normalizaton Comment Making these changes is always a bit annoying since existing PRs might need to be updated, but there is never a really good time to do it.
1 parent 6e30991 commit bbb1128

24 files changed

+43
-50
lines changed

.pre-commit-config.yaml

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
repos:
22
- repo: https://github.com/astral-sh/ruff-pre-commit
3-
rev: v0.6.1
3+
rev: v0.9.2
44
hooks:
55
- id: ruff
66
args:

examples/causal_language_modeling/peft_lora_clm_accelerate_ds_zero3_offload.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -297,9 +297,9 @@ def test_preprocess_function(examples):
297297

298298
correct = 0
299299
total = 0
300-
assert len(eval_preds) == len(
301-
dataset["train"][label_column]
302-
), f"{len(eval_preds)} != {len(dataset['train'][label_column])}"
300+
assert len(eval_preds) == len(dataset["train"][label_column]), (
301+
f"{len(eval_preds)} != {len(dataset['train'][label_column])}"
302+
)
303303
for pred, true in zip(eval_preds, dataset["train"][label_column]):
304304
if pred.strip() == true.strip():
305305
correct += 1

examples/conditional_generation/peft_lora_seq2seq_accelerate_ds_zero3_offload.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -247,9 +247,9 @@ def collate_fn(examples):
247247

248248
correct = 0
249249
total = 0
250-
assert len(eval_preds) == len(
251-
dataset["train"][label_column]
252-
), f"{len(eval_preds)} != {len(dataset['train'][label_column])}"
250+
assert len(eval_preds) == len(dataset["train"][label_column]), (
251+
f"{len(eval_preds)} != {len(dataset['train'][label_column])}"
252+
)
253253
for pred, true in zip(eval_preds, dataset["train"][label_column]):
254254
if pred.strip() == true.strip():
255255
correct += 1

examples/corda_finetuning/datautils.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,7 @@ def tokenization(example):
6868
return tokenizer(example["text"], truncation=True, max_length=max_length)
6969

7070
if percent != 100:
71-
split = f"train[:{int(850000*percent/100)}]"
71+
split = f"train[:{int(850000 * percent / 100)}]"
7272
else:
7373
split = "train"
7474
dataset = load_dataset("togethercomputer/RedPajama-Data-1T-Sample", split=split)
@@ -105,7 +105,7 @@ def get_qat_dataset(name, tokenizer, data_percent):
105105

106106
def get_calib_data(name, tokenizer, model_id, nsamples, seqlen=2048, seed=3):
107107
print(f" get_data_from: {name}, nsamples={nsamples}, seqlen={seqlen}, {seed}")
108-
cache_file = f"cache/{name}_{model_id.replace('/','_')}_{nsamples}_{seqlen}_{seed}.pt"
108+
cache_file = f"cache/{name}_{model_id.replace('/', '_')}_{nsamples}_{seqlen}_{seed}.pt"
109109
traindataset = []
110110
if not os.path.exists("cache"):
111111
os.makedirs("cache")

examples/feature_extraction/peft_lora_embedding_semantic_search.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -440,13 +440,13 @@ def preprocess_function(examples):
440440
completed_steps += 1
441441

442442
if (step + 1) % 100 == 0:
443-
logger.info(f"Step: {step+1}, Loss: {total_loss/(step+1)}")
443+
logger.info(f"Step: {step + 1}, Loss: {total_loss / (step + 1)}")
444444
if args.with_tracking:
445445
accelerator.log({"train/loss": total_loss / (step + 1)}, step=completed_steps)
446446

447447
if isinstance(checkpointing_steps, int):
448448
if completed_steps % checkpointing_steps == 0:
449-
output_dir = f"step_{completed_steps }"
449+
output_dir = f"step_{completed_steps}"
450450
if args.output_dir is not None:
451451
output_dir = os.path.join(args.output_dir, output_dir)
452452
accelerator.save_state(output_dir)

examples/fp4_finetuning/finetune_fp4_opt_bnb_peft.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@
3838

3939

4040
free_in_GB = int(torch.cuda.mem_get_info()[0] / 1024**3)
41-
max_memory = f"{free_in_GB-2}GB"
41+
max_memory = f"{free_in_GB - 2}GB"
4242

4343
n_gpus = torch.cuda.device_count()
4444
max_memory = {i: max_memory for i in range(n_gpus)}

examples/int8_training/peft_adalora_whisper_large_training.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -494,7 +494,7 @@ def main():
494494
raw_datasets = raw_datasets.cast_column("audio", Audio(sampling_rate=16000))
495495

496496
logger.info("Dataset loaded: %s", raw_datasets)
497-
logger.info(f'{raw_datasets["train"][0]}')
497+
logger.info(f"{raw_datasets['train'][0]}")
498498

499499
vectorized_datasets = raw_datasets.map(
500500
prepare_dataset,

examples/lora_dreambooth/convert_peft_sd_lora_to_kohya_ss.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ def get_module_kohya_state_dict(
3030

3131
# Set alpha parameter
3232
if "lora_down" in kohya_key:
33-
alpha_key = f'{kohya_key.split(".")[0]}.alpha'
33+
alpha_key = f"{kohya_key.split('.')[0]}.alpha"
3434
kohya_ss_state_dict[alpha_key] = torch.tensor(module.peft_config[adapter_name].lora_alpha).to(dtype)
3535

3636
return kohya_ss_state_dict

scripts/log_reports.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ def main(slack_channel_name=None):
3737
if line.get("nodeid", "") != "":
3838
test = line["nodeid"]
3939
if line.get("duration", None) is not None:
40-
duration = f'{line["duration"]:.4f}'
40+
duration = f"{line['duration']:.4f}"
4141
if line.get("outcome", "") == "failed":
4242
section_num_failed += 1
4343
failed.append([test, duration, log.name.split("_")[0]])

setup.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121
extras["quality"] = [
2222
"black", # doc-builder has an implicit dependency on Black, see huggingface/doc-builder#434
2323
"hf-doc-builder",
24-
"ruff~=0.6.1",
24+
"ruff~=0.9.2",
2525
]
2626
extras["docs_specific"] = [
2727
"black", # doc-builder has an implicit dependency on Black, see huggingface/doc-builder#434

src/peft/auto.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -97,7 +97,7 @@ def from_pretrained(
9797
expected_target_class = MODEL_TYPE_TO_PEFT_MODEL_MAPPING[task_type]
9898
if cls._target_peft_class.__name__ != expected_target_class.__name__:
9999
raise ValueError(
100-
f"Expected target PEFT class: {expected_target_class.__name__}, but you have asked for: {cls._target_peft_class.__name__ }"
100+
f"Expected target PEFT class: {expected_target_class.__name__}, but you have asked for: {cls._target_peft_class.__name__}"
101101
" make sure that you are loading the correct model for your task type."
102102
)
103103
elif task_type is None and getattr(peft_config, "auto_mapping", None) is not None:

src/peft/tuners/_buffer_dict.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -136,8 +136,7 @@ def update(self, buffers):
136136
for j, p in enumerate(buffers):
137137
if not isinstance(p, collections.abc.Iterable):
138138
raise TypeError(
139-
"BufferDict update sequence element "
140-
"#" + str(j) + " should be Iterable; is" + type(p).__name__
139+
"BufferDict update sequence element #" + str(j) + " should be Iterable; is" + type(p).__name__
141140
)
142141
if not len(p) == 2:
143142
raise ValueError(

src/peft/tuners/adaption_prompt/model.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -71,8 +71,7 @@ def add_adapter(self, adapter_name: str, config: AdaptionPromptConfig) -> None:
7171
parents.append(par)
7272
if len(parents) < config.adapter_layers:
7373
raise ValueError(
74-
f"Config specifies more adapter layers '{config.adapter_layers}'"
75-
f" than the model has '{len(parents)}'."
74+
f"Config specifies more adapter layers '{config.adapter_layers}' than the model has '{len(parents)}'."
7675
)
7776
# Note that if the target modules are not in Sequential, ModuleList, or
7877
# some other PyTorch ordered container, the behavior is undefined as we

src/peft/tuners/boft/layer.py

+10-10
Original file line numberDiff line numberDiff line change
@@ -276,7 +276,7 @@ def update_layer(
276276
boft_n_butterfly_factor = boft_n_butterfly_factor - 1
277277
if boft_n_butterfly_factor < 0:
278278
raise ValueError(
279-
f"You can only specify boft_n_butterfly_factor {boft_n_butterfly_factor+1} to be a positive integer number."
279+
f"You can only specify boft_n_butterfly_factor {boft_n_butterfly_factor + 1} to be a positive integer number."
280280
)
281281

282282
# Initialize the MultiplicativeDropoutLayer for boft_dropout > 0.0.
@@ -295,11 +295,11 @@ def update_layer(
295295
if boft_n_butterfly_factor != 0:
296296
if boft_n_butterfly_factor > int(math.log2(boft_block_num)):
297297
raise ValueError(
298-
f"Invalid combination of boft_n_butterfly_factor ({boft_n_butterfly_factor+1}) and boft_block_num ({boft_block_num})!"
298+
f"Invalid combination of boft_n_butterfly_factor ({boft_n_butterfly_factor + 1}) and boft_block_num ({boft_block_num})!"
299299
)
300300
if boft_block_num % (2**boft_n_butterfly_factor) != 0:
301301
raise ValueError(
302-
f"boft_block_num ({boft_block_num}) must be a multiple of 2 raised to the power of boft_n_butterfly_factor ({boft_n_butterfly_factor+1})!"
302+
f"boft_block_num ({boft_block_num}) must be a multiple of 2 raised to the power of boft_n_butterfly_factor ({boft_n_butterfly_factor + 1})!"
303303
)
304304

305305
boft_block_size = int(self.in_features // boft_block_num)
@@ -313,11 +313,11 @@ def update_layer(
313313
if boft_n_butterfly_factor != 0:
314314
if self.in_features < (boft_block_size * (2**boft_n_butterfly_factor)):
315315
raise ValueError(
316-
f"Invalid combination of in_features ({self.in_features}), boft_n_butterfly_factor ({boft_n_butterfly_factor+1}) and boft_block_size ({boft_block_size})!"
316+
f"Invalid combination of in_features ({self.in_features}), boft_n_butterfly_factor ({boft_n_butterfly_factor + 1}) and boft_block_size ({boft_block_size})!"
317317
)
318318
if self.in_features % (boft_block_size * (2**boft_n_butterfly_factor)) != 0:
319319
raise ValueError(
320-
f"Invalid combination of in_features ({self.in_features}), boft_n_butterfly_factor ({boft_n_butterfly_factor+1}) and boft_block_size ({boft_block_size})!"
320+
f"Invalid combination of in_features ({self.in_features}), boft_n_butterfly_factor ({boft_n_butterfly_factor + 1}) and boft_block_size ({boft_block_size})!"
321321
)
322322

323323
boft_block_num = int(self.in_features // boft_block_size)
@@ -694,7 +694,7 @@ def update_layer(
694694
boft_n_butterfly_factor = boft_n_butterfly_factor - 1
695695
if boft_n_butterfly_factor < 0:
696696
raise ValueError(
697-
f"You can only specify boft_n_butterfly_factor {boft_n_butterfly_factor+1} to be a positive integer number."
697+
f"You can only specify boft_n_butterfly_factor {boft_n_butterfly_factor + 1} to be a positive integer number."
698698
)
699699

700700
# Initialize the MultiplicativeDropoutLayer for boft_dropout > 0.0.
@@ -718,11 +718,11 @@ def update_layer(
718718
if boft_n_butterfly_factor != 0:
719719
if boft_n_butterfly_factor > int(math.log2(boft_block_num)):
720720
raise ValueError(
721-
f"Invalid combination of boft_n_butterfly_factor ({boft_n_butterfly_factor+1}) and boft_block_num ({boft_block_num})!"
721+
f"Invalid combination of boft_n_butterfly_factor ({boft_n_butterfly_factor + 1}) and boft_block_num ({boft_block_num})!"
722722
)
723723
if boft_block_num % (2**boft_n_butterfly_factor) != 0:
724724
raise ValueError(
725-
f"boft_block_num ({boft_block_num}) must be a multiple of 2 raised to the power of boft_n_butterfly_factor ({boft_n_butterfly_factor+1})!"
725+
f"boft_block_num ({boft_block_num}) must be a multiple of 2 raised to the power of boft_n_butterfly_factor ({boft_n_butterfly_factor + 1})!"
726726
)
727727

728728
boft_block_size = int(conv_filter_dim // boft_block_num)
@@ -736,11 +736,11 @@ def update_layer(
736736
if boft_n_butterfly_factor != 0:
737737
if conv_filter_dim < (boft_block_size * (2**boft_n_butterfly_factor)):
738738
raise ValueError(
739-
f"Invalid combination of convolutional kernel dimension ({conv_filter_dim}), boft_n_butterfly_factor ({boft_n_butterfly_factor+1}) and boft_block_size ({boft_block_size})!"
739+
f"Invalid combination of convolutional kernel dimension ({conv_filter_dim}), boft_n_butterfly_factor ({boft_n_butterfly_factor + 1}) and boft_block_size ({boft_block_size})!"
740740
)
741741
if conv_filter_dim % (boft_block_size * (2**boft_n_butterfly_factor)) != 0:
742742
raise ValueError(
743-
f"Invalid combination of convolutional kernel dimension ({conv_filter_dim}), boft_n_butterfly_factor ({boft_n_butterfly_factor+1}) and boft_block_size ({boft_block_size})!"
743+
f"Invalid combination of convolutional kernel dimension ({conv_filter_dim}), boft_n_butterfly_factor ({boft_n_butterfly_factor + 1}) and boft_block_size ({boft_block_size})!"
744744
)
745745

746746
boft_block_num = int(conv_filter_dim // boft_block_size)

src/peft/tuners/bone/model.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -197,7 +197,7 @@ def _create_new_module(bone_config, adapter_name, target, **kwargs):
197197
new_module = BoneLinear(target, adapter_name, **kwargs)
198198
else:
199199
raise ValueError(
200-
f"Target module {target} is not supported. " "Currently, only `torch.nn.Linear` is supported."
200+
f"Target module {target} is not supported. Currently, only `torch.nn.Linear` is supported."
201201
)
202202

203203
return new_module

src/peft/tuners/fourierft/model.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -190,8 +190,7 @@ def _create_new_module(fourierft_config, adapter_name, target, **kwargs):
190190
kwargs["is_target_conv_1d_layer"] = True
191191
if not kwargs["fan_in_fan_out"]:
192192
warnings.warn(
193-
"fan_in_fan_out is set to False but the target module is `Conv1D`. "
194-
"Setting fan_in_fan_out to True."
193+
"fan_in_fan_out is set to False but the target module is `Conv1D`. Setting fan_in_fan_out to True."
195194
)
196195
kwargs["fan_in_fan_out"] = fourierft_config.fan_in_fan_out = True
197196
else:

src/peft/tuners/ia3/model.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -133,8 +133,7 @@ def _create_new_module(ia3_config, adapter_name, target, **kwargs):
133133
elif isinstance(target_base_layer, Conv1D):
134134
if not kwargs["fan_in_fan_out"]:
135135
warnings.warn(
136-
"fan_in_fan_out is set to False but the target module is `Conv1D`. "
137-
"Setting fan_in_fan_out to True."
136+
"fan_in_fan_out is set to False but the target module is `Conv1D`. Setting fan_in_fan_out to True."
138137
)
139138
kwargs["fan_in_fan_out"] = ia3_config.fan_in_fan_out = True
140139
new_module = Linear(

src/peft/tuners/lora/layer.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -1724,7 +1724,7 @@ def dispatch_default(
17241724
elif isinstance(target_base_layer, Conv1D):
17251725
if not kwargs["fan_in_fan_out"]:
17261726
warnings.warn(
1727-
"fan_in_fan_out is set to False but the target module is `Conv1D`. " "Setting fan_in_fan_out to True."
1727+
"fan_in_fan_out is set to False but the target module is `Conv1D`. Setting fan_in_fan_out to True."
17281728
)
17291729
kwargs["fan_in_fan_out"] = lora_config.fan_in_fan_out = True
17301730
kwargs.update(lora_config.loftq_config)

src/peft/tuners/vblora/model.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -213,8 +213,7 @@ def _create_new_module(vblora_config, vblora_vector_bank, adapter_name, target,
213213
kwargs["is_target_conv_1d_layer"] = True
214214
if not kwargs["fan_in_fan_out"]:
215215
warnings.warn(
216-
"fan_in_fan_out is set to False but the target module is `Conv1D`. "
217-
"Setting fan_in_fan_out to True."
216+
"fan_in_fan_out is set to False but the target module is `Conv1D`. Setting fan_in_fan_out to True."
218217
)
219218
kwargs["fan_in_fan_out"] = vblora_config.fan_in_fan_out = True
220219
else:

src/peft/tuners/vera/model.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -340,8 +340,7 @@ def _create_new_module(vera_config, vera_A, vera_B, adapter_name, target, **kwar
340340
kwargs["is_target_conv_1d_layer"] = True
341341
if not kwargs["fan_in_fan_out"]:
342342
warnings.warn(
343-
"fan_in_fan_out is set to False but the target module is `Conv1D`. "
344-
"Setting fan_in_fan_out to True."
343+
"fan_in_fan_out is set to False but the target module is `Conv1D`. Setting fan_in_fan_out to True."
345344
)
346345
kwargs["fan_in_fan_out"] = vera_config.fan_in_fan_out = True
347346
else:

src/peft/utils/loftq_utils.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -203,8 +203,7 @@ def loftq_init(weight: Union[torch.Tensor, torch.nn.Parameter], num_bits: int, r
203203
dtype = weight.dtype
204204

205205
logging.info(
206-
f"Weight: ({out_feature}, {in_feature}) | Rank: {reduced_rank} "
207-
f"| Num Iter: {num_iter} | Num Bits: {num_bits}"
206+
f"Weight: ({out_feature}, {in_feature}) | Rank: {reduced_rank} | Num Iter: {num_iter} | Num Bits: {num_bits}"
208207
)
209208
if not is_bnb_4bit_available() or num_bits in [2, 8]:
210209
quantizer = NFQuantizer(num_bits=num_bits, device=device, method="normal", block_size=64)

tests/test_custom_models.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -1959,9 +1959,9 @@ def test_multirank_2(self):
19591959
if isinstance(module, BaseTunerLayer):
19601960
rank_expected = rank_pattern.get(key, r)
19611961
rank_current = module.lora_A[adapter].weight.shape[0]
1962-
assert (
1963-
rank_current == rank_expected
1964-
), f"Rank {rank_current} is not equal to expected {rank_expected}"
1962+
assert rank_current == rank_expected, (
1963+
f"Rank {rank_current} is not equal to expected {rank_expected}"
1964+
)
19651965

19661966

19671967
class TestRepr(unittest.TestCase):

tests/test_decoder_models.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -495,9 +495,9 @@ def test_lora_layer_replication(self):
495495
and layers[2].mlp.up_proj.lora_A.default.weight.data.storage().data_ptr()
496496
!= layers[3].mlp.up_proj.lora_A.default.weight.data.storage().data_ptr()
497497
), "Expected all LoRA adapters to have distinct weights"
498-
assert (
499-
len([n for n, _ in model.named_parameters() if ".lora_A." in n]) == 8
500-
), "Expected 8 LoRA adapters since we are adding one each for up and down."
498+
assert len([n for n, _ in model.named_parameters() if ".lora_A." in n]) == 8, (
499+
"Expected 8 LoRA adapters since we are adding one each for up and down."
500+
)
501501
self._test_prepare_for_training(model_id, LoraConfig, config_kwargs)
502502
self._test_generate(model_id, LoraConfig, config_kwargs)
503503

tests/test_incremental_pca.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,7 @@ def test_incremental_pca_validation():
9393
n_components = 3
9494
with pytest.raises(
9595
ValueError,
96-
match=(f"n_components={n_components} must be" " less or equal to the batch number of" f" samples {n_samples}"),
96+
match=(f"n_components={n_components} must be less or equal to the batch number of samples {n_samples}"),
9797
):
9898
IncrementalPCA(n_components=n_components).partial_fit(X)
9999

0 commit comments

Comments
 (0)