Skip to content

Commit 1fc67a2

Browse files
authored
More PYUP fixes (#38883)
More pyup fixes Signed-off-by: cyy <[email protected]>
1 parent 12d4c5b commit 1fc67a2

File tree

150 files changed

+273
-355
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

150 files changed

+273
-355
lines changed

src/transformers/generation/candidate_generator.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -710,8 +710,8 @@ def __init__(
710710
assistant_model: Optional["PreTrainedModel"] = None,
711711
assistant_prune_lm_head: bool = False,
712712
):
713-
self._target_tokenizer: "PreTrainedTokenizerBase" = target_tokenizer
714-
self._assistant_tokenizer: "PreTrainedTokenizerBase" = assistant_tokenizer
713+
self._target_tokenizer: PreTrainedTokenizerBase = target_tokenizer
714+
self._assistant_tokenizer: PreTrainedTokenizerBase = assistant_tokenizer
715715
self._assistant_model_device: str = (
716716
assistant_model_device if assistant_model is None else assistant_model.device
717717
)

src/transformers/generation/streamers.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@ class TextStreamer(BaseStreamer):
7272
```
7373
"""
7474

75-
def __init__(self, tokenizer: "AutoTokenizer", skip_prompt: bool = False, **decode_kwargs):
75+
def __init__(self, tokenizer: AutoTokenizer, skip_prompt: bool = False, **decode_kwargs):
7676
self.tokenizer = tokenizer
7777
self.skip_prompt = skip_prompt
7878
self.decode_kwargs = decode_kwargs
@@ -206,7 +206,7 @@ class TextIteratorStreamer(TextStreamer):
206206
"""
207207

208208
def __init__(
209-
self, tokenizer: "AutoTokenizer", skip_prompt: bool = False, timeout: Optional[float] = None, **decode_kwargs
209+
self, tokenizer: AutoTokenizer, skip_prompt: bool = False, timeout: Optional[float] = None, **decode_kwargs
210210
):
211211
super().__init__(tokenizer, skip_prompt, **decode_kwargs)
212212
self.text_queue = Queue()
@@ -284,7 +284,7 @@ class AsyncTextIteratorStreamer(TextStreamer):
284284
"""
285285

286286
def __init__(
287-
self, tokenizer: "AutoTokenizer", skip_prompt: bool = False, timeout: Optional[float] = None, **decode_kwargs
287+
self, tokenizer: AutoTokenizer, skip_prompt: bool = False, timeout: Optional[float] = None, **decode_kwargs
288288
):
289289
super().__init__(tokenizer, skip_prompt, **decode_kwargs)
290290
self.text_queue = asyncio.Queue()

src/transformers/generation/utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4723,7 +4723,7 @@ def _constrained_beam_search(
47234723
)
47244724

47254725
if return_dict_in_generate and output_scores:
4726-
beam_indices = tuple((beam_indices[beam_idx[i]] + (beam_idx[i],) for i in range(len(beam_indices))))
4726+
beam_indices = tuple(beam_indices[beam_idx[i]] + (beam_idx[i],) for i in range(len(beam_indices)))
47274727

47284728
# increase cur_len
47294729
cur_len = cur_len + 1

src/transformers/integrations/integration_utils.py

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1626,8 +1626,8 @@ def _log_model_checkpoint(self, source_directory: str, checkpoint: str):
16261626
target_path = consistent_checkpoint_path
16271627
except OSError as e:
16281628
logger.warning(
1629-
"NeptuneCallback was unable to made a copy of checkpoint due to I/O exception: '{}'. "
1630-
"Could fail trying to upload.".format(e)
1629+
f"NeptuneCallback was unable to made a copy of checkpoint due to I/O exception: '{e}'. "
1630+
"Could fail trying to upload."
16311631
)
16321632

16331633
self._metadata_namespace[self._target_checkpoints_namespace].upload_files(target_path)
@@ -1976,9 +1976,7 @@ def on_save(self, args, state, control, **kwargs):
19761976
)
19771977
except Exception as e:
19781978
logger.warning(
1979-
"Could not remove checkpoint `{}` after going over the `save_total_limit`. Error is: {}".format(
1980-
self._checkpoints_saved[0].name, e
1981-
)
1979+
f"Could not remove checkpoint `{self._checkpoints_saved[0].name}` after going over the `save_total_limit`. Error is: {e}"
19821980
)
19831981
break
19841982
self._checkpoints_saved = self._checkpoints_saved[1:]

src/transformers/modeling_tf_utils.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1409,10 +1409,10 @@ def _save_checkpoint(self, checkpoint_dir, epoch):
14091409

14101410
def prepare_tf_dataset(
14111411
self,
1412-
dataset: "datasets.Dataset", # noqa:F821
1412+
dataset: datasets.Dataset, # noqa:F821
14131413
batch_size: int = 8,
14141414
shuffle: bool = True,
1415-
tokenizer: Optional["PreTrainedTokenizerBase"] = None,
1415+
tokenizer: Optional[PreTrainedTokenizerBase] = None,
14161416
collate_fn: Optional[Callable] = None,
14171417
collate_fn_args: Optional[dict[str, Any]] = None,
14181418
drop_remainder: Optional[bool] = None,

src/transformers/modeling_utils.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -4424,10 +4424,8 @@ def from_pretrained(
44244424
raise ValueError("DeepSpeed Zero-3 is not compatible with passing a `device_map`.")
44254425
if not is_accelerate_available():
44264426
raise ValueError(
4427-
(
4428-
"Using a `device_map`, `tp_plan`, `torch.device` context manager or setting `torch.set_default_device(device)` "
4429-
"requires `accelerate`. You can install it with `pip install accelerate`"
4430-
)
4427+
"Using a `device_map`, `tp_plan`, `torch.device` context manager or setting `torch.set_default_device(device)` "
4428+
"requires `accelerate`. You can install it with `pip install accelerate`"
44314429
)
44324430

44334431
# handling bnb config from kwargs, remove after `load_in_{4/8}bit` deprecation.

src/transformers/models/albert/tokenization_albert.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -203,7 +203,7 @@ def _tokenize(self, text: str) -> list[str]:
203203
pieces = self.sp_model.encode(text, out_type=str)
204204
new_pieces = []
205205
for piece in pieces:
206-
if len(piece) > 1 and piece[-1] == str(",") and piece[-2].isdigit():
206+
if len(piece) > 1 and piece[-1] == "," and piece[-2].isdigit():
207207
# Logic to handle special cases see https://github.com/google-research/bert/blob/master/README.md#tokenization
208208
# `9,9` -> ['▁9', ',', '9'] instead of [`_9,`, '9']
209209
cur_pieces = self.sp_model.EncodeAsPieces(piece[:-1].replace(SPIECE_UNDERLINE, ""))

src/transformers/models/bamba/modeling_bamba.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -830,7 +830,7 @@ def torch_forward(
830830

831831
# 2. Compute the state for each intra-chunk
832832
# (right term of low-rank factorization of off-diagonal blocks; B terms)
833-
decay_states = torch.exp((A_cumsum[:, :, :, -1:] - A_cumsum))
833+
decay_states = torch.exp(A_cumsum[:, :, :, -1:] - A_cumsum)
834834
B_decay = B * decay_states.permute(0, -2, -1, 1)[..., None]
835835
states = (B_decay[..., None, :] * hidden_states[..., None]).sum(dim=2)
836836

src/transformers/models/bamba/modular_bamba.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -632,7 +632,7 @@ def torch_forward(
632632

633633
# 2. Compute the state for each intra-chunk
634634
# (right term of low-rank factorization of off-diagonal blocks; B terms)
635-
decay_states = torch.exp((A_cumsum[:, :, :, -1:] - A_cumsum))
635+
decay_states = torch.exp(A_cumsum[:, :, :, -1:] - A_cumsum)
636636
B_decay = B * decay_states.permute(0, -2, -1, 1)[..., None]
637637
states = (B_decay[..., None, :] * hidden_states[..., None]).sum(dim=2)
638638

src/transformers/models/bart/tokenization_bart.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@
3232
# See all BART models at https://huggingface.co/models?filter=bart
3333

3434

35-
@lru_cache()
35+
@lru_cache
3636
def bytes_to_unicode():
3737
"""
3838
Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control

src/transformers/models/beit/modeling_beit.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -110,7 +110,7 @@ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
110110
return drop_path(hidden_states, self.drop_prob, self.training)
111111

112112
def extra_repr(self) -> str:
113-
return "p={}".format(self.drop_prob)
113+
return f"p={self.drop_prob}"
114114

115115

116116
# Based on timm implementation, which can be found here:
@@ -513,8 +513,8 @@ def __init__(self, config: BeitConfig, window_size: Optional[tuple] = None, drop
513513

514514
init_values = config.layer_scale_init_value
515515
if init_values > 0:
516-
self.lambda_1 = nn.Parameter(init_values * torch.ones((config.hidden_size)), requires_grad=True)
517-
self.lambda_2 = nn.Parameter(init_values * torch.ones((config.hidden_size)), requires_grad=True)
516+
self.lambda_1 = nn.Parameter(init_values * torch.ones(config.hidden_size), requires_grad=True)
517+
self.lambda_2 = nn.Parameter(init_values * torch.ones(config.hidden_size), requires_grad=True)
518518
else:
519519
self.lambda_1, self.lambda_2 = None, None
520520

src/transformers/models/bert_japanese/tokenization_bert_japanese.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -934,7 +934,7 @@ def tokenize(self, text):
934934
pieces = self.sp_model.encode(text, out_type=str)
935935
new_pieces = []
936936
for piece in pieces:
937-
if len(piece) > 1 and piece[-1] == str(",") and piece[-2].isdigit():
937+
if len(piece) > 1 and piece[-1] == "," and piece[-2].isdigit():
938938
cur_pieces = self.sp_model.EncodeAsPieces(piece[:-1].replace(SPIECE_UNDERLINE, ""))
939939
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
940940
if len(cur_pieces[0]) == 1:

src/transformers/models/biogpt/convert_biogpt_original_pytorch_checkpoint_to_pytorch.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,7 @@ def add_from_file(self, f):
115115
except FileNotFoundError as fnfe:
116116
raise fnfe
117117
except UnicodeError:
118-
raise Exception("Incorrect encoding detected in {}, please rebuild the dataset".format(f))
118+
raise Exception(f"Incorrect encoding detected in {f}, please rebuild the dataset")
119119
return
120120

121121
lines = f.readlines()
@@ -133,11 +133,11 @@ def add_from_file(self, f):
133133
word = line
134134
if word in self and not overwrite:
135135
raise RuntimeError(
136-
"Duplicate word found when loading Dictionary: '{}'. "
136+
f"Duplicate word found when loading Dictionary: '{word}'. "
137137
"Duplicate words can overwrite earlier ones by adding the "
138138
"#fairseq:overwrite flag at the end of the corresponding row "
139139
"in the dictionary file. If using the Camembert model, please "
140-
"download an updated copy of the model file.".format(word)
140+
"download an updated copy of the model file."
141141
)
142142
self.add_symbol(word, n=count, overwrite=overwrite)
143143
except ValueError:

src/transformers/models/bit/modeling_bit.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -310,7 +310,7 @@ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
310310
return drop_path(hidden_states, self.drop_prob, self.training)
311311

312312
def extra_repr(self) -> str:
313-
return "p={}".format(self.drop_prob)
313+
return f"p={self.drop_prob}"
314314

315315

316316
def make_div(value, divisor=8):

src/transformers/models/blenderbot/tokenization_blenderbot.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@
3535
}
3636

3737

38-
@lru_cache()
38+
@lru_cache
3939
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
4040
def bytes_to_unicode():
4141
"""

src/transformers/models/blip/modeling_blip_text.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -641,9 +641,7 @@ def get_extended_attention_mask(
641641
extended_attention_mask = attention_mask[:, None, None, :]
642642
else:
643643
raise ValueError(
644-
"Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
645-
input_shape, attention_mask.shape
646-
)
644+
f"Wrong shape for input_ids (shape {input_shape}) or attention_mask (shape {attention_mask.shape})"
647645
)
648646

649647
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
@@ -723,7 +721,7 @@ def forward(
723721
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
724722

725723
if attention_mask is None:
726-
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length))).to(device)
724+
attention_mask = torch.ones((batch_size, seq_length + past_key_values_length)).to(device)
727725

728726
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
729727
# ourselves in which case we just need to make it broadcastable to all heads.

src/transformers/models/blip/modeling_tf_blip_text.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -800,9 +800,7 @@ def get_extended_attention_mask(
800800
extended_attention_mask = attention_mask[:, None, None, :]
801801
else:
802802
raise ValueError(
803-
"Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
804-
input_shape, attention_mask.shape
805-
)
803+
f"Wrong shape for input_ids (shape {input_shape}) or attention_mask (shape {attention_mask.shape})"
806804
)
807805

808806
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
@@ -881,7 +879,7 @@ def call(
881879
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
882880

883881
if attention_mask is None:
884-
attention_mask = tf.ones(((batch_size, seq_length + past_key_values_length)))
882+
attention_mask = tf.ones((batch_size, seq_length + past_key_values_length))
885883

886884
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
887885
# ourselves in which case we just need to make it broadcastable to all heads.

src/transformers/models/blip_2/modeling_blip_2.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1144,9 +1144,7 @@ def get_extended_attention_mask(
11441144
extended_attention_mask = attention_mask[:, None, None, :]
11451145
else:
11461146
raise ValueError(
1147-
"Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
1148-
input_shape, attention_mask.shape
1149-
)
1147+
f"Wrong shape for input_ids (shape {input_shape}) or attention_mask (shape {attention_mask.shape})"
11501148
)
11511149

11521150
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for

src/transformers/models/bloom/convert_bloom_original_checkpoint_to_pytorch.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,7 @@ def convert_bloom_checkpoint_to_pytorch(
9898
config = BloomConfig()
9999

100100
for j, file in enumerate(file_names):
101-
print("Processing file: {}".format(file))
101+
print(f"Processing file: {file}")
102102
tensors = None
103103

104104
for i in range(pretraining_tp):
@@ -132,16 +132,16 @@ def convert_bloom_checkpoint_to_pytorch(
132132
tensors,
133133
os.path.join(
134134
pytorch_dump_folder_path,
135-
"pytorch_model_{}-of-{}.bin".format(str(j + 1).zfill(5), str(len(file_names)).zfill(5)),
135+
f"pytorch_model_{str(j + 1).zfill(5)}-of-{str(len(file_names)).zfill(5)}.bin",
136136
),
137137
)
138138

139139
for key in tensors.keys():
140140
value = tensors[key]
141141
total_size += value.numel() * get_dtype_size(value.dtype)
142142
if key not in index_dict["weight_map"]:
143-
index_dict["weight_map"][key] = "pytorch_model_{}-of-{}.bin".format(
144-
str(j + 1).zfill(5), str(len(file_names)).zfill(5)
143+
index_dict["weight_map"][key] = (
144+
f"pytorch_model_{str(j + 1).zfill(5)}-of-{str(len(file_names)).zfill(5)}.bin"
145145
)
146146

147147
config = BloomConfig()

src/transformers/models/clap/modeling_clap.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -610,7 +610,7 @@ def get_attn_mask(self, height, width, dtype, device):
610610
mask_windows = window_partition(img_mask, self.window_size)
611611
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
612612
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
613-
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
613+
attn_mask = attn_mask.masked_fill(attn_mask != 0, -100.0).masked_fill(attn_mask == 0, 0.0)
614614
else:
615615
attn_mask = None
616616
return attn_mask

src/transformers/models/clip/tokenization_clip.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@
3434
}
3535

3636

37-
@lru_cache()
37+
@lru_cache
3838
def bytes_to_unicode():
3939
"""
4040
Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
@@ -488,7 +488,7 @@ def convert_tokens_to_string(self, tokens):
488488

489489
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]:
490490
if not os.path.isdir(save_directory):
491-
logger.error("Vocabulary path ({}) should be a directory".format(save_directory))
491+
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
492492
return
493493
vocab_file = os.path.join(
494494
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
@@ -506,8 +506,8 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] =
506506
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
507507
if index != token_index:
508508
logger.warning(
509-
"Saving vocabulary to {}: BPE merge indices are not consecutive."
510-
" Please check that the tokenizer is not corrupted!".format(merge_file)
509+
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
510+
" Please check that the tokenizer is not corrupted!"
511511
)
512512
index = token_index
513513
writer.write(" ".join(bpe_tokens) + "\n")

src/transformers/models/clipseg/convert_clipseg_original_pytorch_to_hf.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -181,7 +181,7 @@ def convert_clipseg_checkpoint(model_name, checkpoint_path, pytorch_dump_folder_
181181
missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False)
182182

183183
if missing_keys != ["clip.text_model.embeddings.position_ids", "clip.vision_model.embeddings.position_ids"]:
184-
raise ValueError("Missing keys that are not expected: {}".format(missing_keys))
184+
raise ValueError(f"Missing keys that are not expected: {missing_keys}")
185185
if unexpected_keys != ["decoder.reduce.weight", "decoder.reduce.bias"]:
186186
raise ValueError(f"Unexpected keys: {unexpected_keys}")
187187

src/transformers/models/clvp/tokenization_clvp.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@
3434
}
3535

3636

37-
@lru_cache()
37+
@lru_cache
3838
# Copied from transformers.models.gpt2.tokenization_gpt2.bytes_to_unicode
3939
def bytes_to_unicode():
4040
"""

src/transformers/models/codegen/tokenization_codegen.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@
4242
}
4343

4444

45-
@lru_cache()
45+
@lru_cache
4646
def bytes_to_unicode():
4747
"""
4848
Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control

src/transformers/models/convnext/modeling_convnext.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
7070
return drop_path(hidden_states, self.drop_prob, self.training)
7171

7272
def extra_repr(self) -> str:
73-
return "p={}".format(self.drop_prob)
73+
return f"p={self.drop_prob}"
7474

7575

7676
class ConvNextLayerNorm(nn.Module):
@@ -149,7 +149,7 @@ def __init__(self, config, dim, drop_path=0):
149149
self.act = ACT2FN[config.hidden_act]
150150
self.pwconv2 = nn.Linear(4 * dim, dim)
151151
self.layer_scale_parameter = (
152-
nn.Parameter(config.layer_scale_init_value * torch.ones((dim)), requires_grad=True)
152+
nn.Parameter(config.layer_scale_init_value * torch.ones(dim), requires_grad=True)
153153
if config.layer_scale_init_value > 0
154154
else None
155155
)

src/transformers/models/convnextv2/modeling_convnextv2.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
7070
return drop_path(hidden_states, self.drop_prob, self.training)
7171

7272
def extra_repr(self) -> str:
73-
return "p={}".format(self.drop_prob)
73+
return f"p={self.drop_prob}"
7474

7575

7676
class ConvNextV2GRN(nn.Module):

src/transformers/models/cpm/tokenization_cpm.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -207,7 +207,7 @@ def _tokenize(self, text: str) -> list[str]:
207207
pieces = self.sp_model.encode(text, out_type=str)
208208
new_pieces = []
209209
for piece in pieces:
210-
if len(piece) > 1 and piece[-1] == str(",") and piece[-2].isdigit():
210+
if len(piece) > 1 and piece[-1] == "," and piece[-2].isdigit():
211211
cur_pieces = self.sp_model.EncodeAsPieces(piece[:-1].replace(SPIECE_UNDERLINE, ""))
212212
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
213213
if len(cur_pieces[0]) == 1:

src/transformers/models/cvt/modeling_cvt.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,7 @@ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
8686
return drop_path(hidden_states, self.drop_prob, self.training)
8787

8888
def extra_repr(self) -> str:
89-
return "p={}".format(self.drop_prob)
89+
return f"p={self.drop_prob}"
9090

9191

9292
class CvtEmbeddings(nn.Module):

src/transformers/models/d_fine/modeling_d_fine.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -187,7 +187,7 @@ def forward(
187187
sampling_locations = reference_points[:, :, None, :, :2] + offset
188188
else:
189189
raise ValueError(
190-
"Last dim of reference_points must be 2 or 4, but get {} instead.".format(reference_points.shape[-1])
190+
f"Last dim of reference_points must be 2 or 4, but get {reference_points.shape[-1]} instead."
191191
)
192192

193193
output = self.ms_deformable_attn_core(

0 commit comments

Comments
 (0)