This repository was archived by the owner on Jan 13, 2023. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 282
/
Copy pathsrcnn.py
71 lines (59 loc) · 2.37 KB
/
srcnn.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Super Resolution CNN (SRCNN)
# Paper: https://arxiv.org/pdf/1501.00092.pdf
from tensorflow.keras import Input, Model
from tensorflow.keras.layers import Conv2D, BatchNormalization, ReLU, Conv2DTranspose, Activation
from tensorflow.keras.optimizers import Adam
def stem(inputs):
# dimensionality expansion with large coarse filter
x = Conv2D(64, (9, 9), padding='same')(inputs)
x = BatchNormalization()(x)
x = ReLU()(x)
return x
def encoder(x):
# 1x1 bottleneck convolution
x = Conv2D(32, (1, 1), padding='same')(x)
x = BatchNormalization()(x)
x = ReLU()(x)
# Reconstruction
x = Conv2D(3, (5, 5), padding='same')(x)
x = BatchNormalization()(x)
outputs = Activation('sigmoid')(x)
return outputs
inputs = Input((32, 32, 3))
x = stem(inputs)
outputs = encoder(x)
model = Model(inputs, outputs)
model.compile(loss='mean_squared_error', optimizer=Adam(lr=0.001), metrics=['accuracy'])
from tensorflow.keras.datasets import cifar10
import numpy as np
import cv2
def train():
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train_lr = []
for image in x_train:
image = cv2.resize(image, (16, 16), interpolation=cv2.INTER_CUBIC)
x_train_lr.append(cv2.resize(image, (32, 32), interpolation=cv2.INTER_CUBIC))
x_train_lr = np.asarray(x_train_lr)
x_test_lr = []
for image in x_test:
image = cv2.resize(image, (16, 16), interpolation=cv2.INTER_CUBIC)
x_test_lr.append(cv2.resize(image, (32, 32), interpolation=cv2.INTER_CUBIC))
x_test_lr = np.asarray(x_test_lr)
x_train = (x_train / 255.0).astype(np.float32)
x_train_lr = (x_train_lr / 255.0).astype(np.float32)
x_test = (x_test / 255.0).astype(np.float32)
x_test_lr = (x_test_lr / 255.0).astype(np.float32)
model.fit(x_train_lr, x_train, epochs=25, batch_size=32, verbose=1, validation_split=0.1)