Skip to content

Commit 66e48d7

Browse files
committed
Black
1 parent 29dc3ea commit 66e48d7

File tree

18 files changed

+27
-28
lines changed

18 files changed

+27
-28
lines changed

pytorch_toolbelt/inference/ensembling.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -92,13 +92,13 @@ def __init__(self, models: List[nn.Module], reduction: str = "mean", outputs: Op
9292
def forward(self, *input, **kwargs): # skipcq: PYL-W0221
9393
outputs = [model(*input, **kwargs) for model in self.models]
9494
output_is_dict = isinstance(outputs[0], dict)
95-
output_is_list = isinstance(outputs[0], (list, tuple))
95+
output_is_list = isinstance(outputs[0], (list, tuple)) # noqa
9696

9797
if self.return_some_outputs:
9898
keys = self.outputs
99-
elif isinstance(outputs[0], dict):
99+
elif output_is_dict:
100100
keys = outputs[0].keys()
101-
elif isinstance(outputs[0], (list, tuple)):
101+
elif output_is_list:
102102
keys = list(range(len(outputs[0])))
103103
elif torch.is_tensor(outputs[0]):
104104
keys = None

pytorch_toolbelt/inference/tiles.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
"""Implementation of tile-based inference allowing to predict huge images that does not fit into GPU memory entirely
22
in a sliding-window fashion and merging prediction mask back to full-resolution.
33
"""
4+
45
import dataclasses
56
import math
67
from typing import List, Iterable, Tuple, Union, Sequence

pytorch_toolbelt/inference/tta.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
Despite this is called test-time augmentation, these method can be used at training time as well since all
44
transformation written in PyTorch and respect gradients flow.
55
"""
6+
67
from collections import defaultdict
78
from functools import partial
89
from typing import Tuple, List, Optional, Union, Callable, Dict, Mapping

pytorch_toolbelt/losses/functional.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ def focal_loss_with_logits(
2929
ignore_index=None,
3030
activation: str = "sigmoid",
3131
softmax_dim: Optional[int] = None,
32-
class_weights: Optional[torch.Tensor] = None
32+
class_weights: Optional[torch.Tensor] = None,
3333
) -> torch.Tensor:
3434
"""Compute binary focal loss between target and output logits.
3535
@@ -70,7 +70,9 @@ def focal_loss_with_logits(
7070
if reduced_threshold is None:
7171
focal_term = (1.0 - pt).pow(gamma)
7272
else:
73-
focal_term = ((1.0 - pt) / (1 - reduced_threshold)).pow(gamma) #the focal term continuity breaks when reduced_threshold not equal to 0.5. At pt equal to reduced_threshold, the value of piecewise function of focal term should be 1 from both sides .
73+
focal_term = ((1.0 - pt) / (1 - reduced_threshold)).pow(
74+
gamma
75+
) # the focal term continuity breaks when reduced_threshold not equal to 0.5. At pt equal to reduced_threshold, the value of piecewise function of focal term should be 1 from both sides .
7476
focal_term = torch.masked_fill(focal_term, pt < reduced_threshold, 1)
7577

7678
loss = focal_term * ce_loss

pytorch_toolbelt/modules/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,4 +18,4 @@
1818
from .initialization import *
1919
from .normalization import *
2020

21-
from .heads import *
21+
from .heads import *

pytorch_toolbelt/modules/activations.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -255,7 +255,7 @@ def get_activation_block(activation_name: str):
255255
ACT_SWISH: Swish,
256256
ACT_SWISH_NAIVE: SwishNaive,
257257
ACT_SIGMOID: nn.Sigmoid,
258-
ACT_SOFTMAX: nn.Softmax
258+
ACT_SOFTMAX: nn.Softmax,
259259
}
260260

261261
return ACTIVATIONS[activation_name.lower()]

pytorch_toolbelt/modules/backbone/senet.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
ResNet code gently borrowed from
33
https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
44
"""
5+
56
from __future__ import print_function, division, absolute_import
67

78
import math

pytorch_toolbelt/modules/encoders/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
33
Encodes listed here provides easy way to swap backbone of classification/segmentation/detection model.
44
"""
5+
56
from .common import *
67
from .densenet import *
78
from .hrnet import *

pytorch_toolbelt/modules/encoders/common.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
33
Encodes listed here provides easy way to swap backbone of classification/segmentation/detection model.
44
"""
5+
56
import math
67
import warnings
78
from typing import List, Union, Tuple, Iterable, Any

pytorch_toolbelt/modules/encoders/seresnet.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
33
Encodes listed here provides easy way to swap backbone of classification/segmentation/detection model.
44
"""
5+
56
from typing import List
67

78
import torch
Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,12 @@
11
from .common import GenericTimmEncoder
22

3+
34
class MaxVitEncoder(GenericTimmEncoder):
4-
def __init__(self, model_name:str, pretrained=True, **kwargs):
5+
def __init__(self, model_name: str, pretrained=True, **kwargs):
56
super().__init__(model_name, pretrained=pretrained, **kwargs)
67

78
def change_input_channels(self, input_channels: int, mode="auto", **kwargs):
89
from pytorch_toolbelt.modules import make_n_channel_input
910

1011
self.encoder.stem.conv1 = make_n_channel_input(self.encoder.stem.conv1, input_channels)
11-
return self
12+
return self

pytorch_toolbelt/modules/encoders/timm/resnet.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -181,6 +181,7 @@ def change_input_channels(self, input_channels: int, mode="auto", **kwargs):
181181
self.encoder.conv1[0] = make_n_channel_input(self.encoder.conv1[0], input_channels, mode=mode, **kwargs)
182182
return self
183183

184+
184185
class TimmResnet50D(GenericTimmEncoder):
185186
def __init__(
186187
self, pretrained=True, layers=None, activation=ACT_RELU, first_conv_stride_one: bool = False, **kwargs

pytorch_toolbelt/modules/heads/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,4 +3,4 @@
33
from .deep_supervision import *
44
from .hypercolumn import *
55
from .segformer_head import *
6-
from .classification_heads import *
6+
from .classification_heads import *

pytorch_toolbelt/modules/interfaces.py

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -61,8 +61,7 @@ class HasInputFeaturesSpecification(Protocol):
6161
"""
6262

6363
@torch.jit.unused
64-
def get_input_spec(self) -> FeatureMapsSpecification:
65-
...
64+
def get_input_spec(self) -> FeatureMapsSpecification: ...
6665

6766

6867
class HasOutputFeaturesSpecification(Protocol):
@@ -71,8 +70,7 @@ class HasOutputFeaturesSpecification(Protocol):
7170
"""
7271

7372
@torch.jit.unused
74-
def get_output_spec(self) -> FeatureMapsSpecification:
75-
...
73+
def get_output_spec(self) -> FeatureMapsSpecification: ...
7674

7775

7876
class AbstractEncoder(nn.Module, HasOutputFeaturesSpecification):
@@ -108,8 +106,7 @@ def __init__(self, input_spec: FeatureMapsSpecification):
108106
@abstractmethod
109107
def forward(
110108
self, feature_maps: List[Tensor], output_size: Union[Tuple[int, int], torch.Size, None] = None
111-
) -> Union[Tensor, Tuple[Tensor, ...], List[Tensor], Mapping[str, Tensor]]:
112-
...
109+
) -> Union[Tensor, Tuple[Tensor, ...], List[Tensor], Mapping[str, Tensor]]: ...
113110

114111
@torch.jit.unused
115112
def apply_to_final_layer(self, func: Callable[[nn.Module], None]):

pytorch_toolbelt/modules/pooling.py

Lines changed: 1 addition & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -200,16 +200,7 @@ def forward(self, x: Tensor) -> Tensor:
200200

201201
def __repr__(self):
202202
p = torch.softplus(self.p) + 1
203-
return (
204-
self.__class__.__name__
205-
+ "("
206-
+ "p="
207-
+ "{:.4f}".format(p.item())
208-
+ ", "
209-
+ "eps="
210-
+ str(self.eps)
211-
+ ")"
212-
)
203+
return self.__class__.__name__ + "(" + "p=" + "{:.4f}".format(p.item()) + ", " + "eps=" + str(self.eps) + ")"
213204

214205

215206
class GlobalMaxAvgPooling2d(nn.Module):

pytorch_toolbelt/utils/random_utils.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
"""Utility functions to make your experiments reproducible
22
33
"""
4+
45
import random
56
import warnings
67

pytorch_toolbelt/utils/torch_utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -198,7 +198,7 @@ def container_to_tensor(value: Union[np.ndarray, List, Tuple, Mapping, Any]):
198198
cls = type(value)
199199
return cls((k, container_to_tensor(v)) for k, v in value.items())
200200

201-
raise ValueError(f"Unsupported container type")
201+
raise ValueError(f"Unsupported container type {type(value)}")
202202

203203

204204
def image_to_tensor(image: np.ndarray, dummy_channels_dim=True) -> torch.Tensor:

setup.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -169,7 +169,7 @@ def get_test_requirements():
169169
"Topic :: Scientific/Engineering :: Artificial Intelligence",
170170
"Topic :: Software Development :: Libraries",
171171
"Topic :: Software Development :: Libraries :: Python Modules",
172-
"Topic :: Software Development :: Libraries :: Application Frameworks"
172+
"Topic :: Software Development :: Libraries :: Application Frameworks",
173173
# "Private :: Do Not Upload"
174174
],
175175
)

0 commit comments

Comments
 (0)