-
Notifications
You must be signed in to change notification settings - Fork 3
Expand file tree
/
Copy pathunet_blocks.py
More file actions
125 lines (97 loc) · 3.65 KB
/
unet_blocks.py
File metadata and controls
125 lines (97 loc) · 3.65 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
from typing import Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
class DoubleConv(nn.Module):
def __init__(self,
in_channels: int,
out_channels: int,
mid_channels: Optional[int] = None
) -> None:
super().__init__()
if not mid_channels:
mid_channels = out_channels
self.double_conv = nn.Sequential(
nn.Conv2d(in_channels,
mid_channels,
kernel_size=3,
padding=1, bias=False
),
nn.BatchNorm2d(mid_channels),
nn.ReLU(inplace=True),
nn.Conv2d(mid_channels,
out_channels,
kernel_size=3,
padding=1,
bias=False
),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
def forward(self, x: int) -> int:
return self.double_conv(x)
class DownConv(nn.Module):
"""Downscale with Maxpool and DoubleConv"""
def __init__(self, in_channels: int, out_channels: int) -> None:
super().__init__()
self.maxpool_conv = nn.Sequential(
nn.MaxPool2d(2),
DoubleConv(in_channels, out_channels)
)
def forward(self, x):
return self.maxpool_conv(x)
class UpConv(nn.Module):
"""Upscale the double conv"""
def __init__(self,
in_channels: int,
out_channels: int,
bilinear: bool = True
) -> None:
super().__init__()
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv = DoubleConv(in_channels, out_channels, in_channels // 2)
else:
self.up = nn.ConvTranspose2d(in_channels, in_channels // 2, kernel_size=2, stride=2)
self.conv = DoubleConv(in_channels, out_channels)
def forward(self, x1, x2):
x1 = self.up(x1)
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,
diffY // 2, diffY - diffY // 2])
x = torch.cat([x2, x1], dim=1)
return self.conv(x)
class OutConv(nn.Module):
def __init__(self, in_channels: int, out_channels: int) -> None:
super(OutConv, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)
def forward(self, x):
return self.conv(x)
class AttentionGate(nn.Module):
def __init__(self, F_g, F_l, F_int):
super(AttentionGate, self).__init__()
# Define the two 1x1 convolutions
self.W_g = nn.Sequential(
nn.Conv2d(F_g, F_int, kernel_size=1, stride=1, padding=0, bias=True),
nn.BatchNorm2d(F_int)
)
self.W_x = nn.Sequential(
nn.Conv2d(F_l, F_int, kernel_size=1, stride=1, padding=0, bias=True),
nn.BatchNorm2d(F_int)
)
# Define the final 1x1 convolution that gives the attention coefficients
self.psi = nn.Sequential(
nn.Conv2d(F_int, 1, kernel_size=1, stride=1, padding=0, bias=True),
nn.BatchNorm2d(1),
nn.Sigmoid()
)
def forward(self, g, x):
# Adjusting spatial dimensions to match
diffY = x.size()[2] - g.size()[2]
diffX = x.size()[3] - g.size()[3]
g = F.pad(g, [diffX // 2, diffX - diffX // 2, diffY // 2, diffY - diffY // 2])
g1 = self.W_g(g)
x1 = self.W_x(x)
psi = self.psi(F.relu(g1 + x1))
return x * psi