Skip to content

Commit 3246650

Browse files
author
Bruno Alves
committed
commit missing files
1 parent b97875e commit 3246650

File tree

7 files changed

+344
-0
lines changed

7 files changed

+344
-0
lines changed

cifar10_dcgan.py

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,31 @@
1+
import os
2+
import tensorflow as tf
3+
from src.models.gans import DCGAN
4+
import argparse
5+
from src import argparser
6+
7+
def main(argv=None):
8+
nepochs = 1000
9+
checkpoint_dir = '/fred/oz012/Bruno/checkpoints/' + str(FLAGS.checkpoint)
10+
with tf.Session() as sess:
11+
dcgan = DCGAN(sess=sess,
12+
in_height=32,
13+
in_width=32,
14+
nchannels=3,
15+
batch_size=128,
16+
noise_dim=100,
17+
mode='wgan-gp',
18+
opt_pars=(0.0001, 0., 0.9),
19+
d_iters=5,
20+
data_name='cifar10',
21+
checkpoint_dir=checkpoint_dir)
22+
23+
if FLAGS.mode == 'train':
24+
dcgan.train(nepochs)
25+
26+
27+
if __name__ == '__main__':
28+
parser = argparse.ArgumentParser()
29+
FLAGS, _ = argparser.add_args(parser)
30+
tf.logging.set_verbosity(tf.logging.DEBUG)
31+
tf.app.run()

mnist_resnet.py

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
import os
2+
import tensorflow as tf
3+
from src.models.gans import ResNetGAN
4+
import argparse
5+
from src import argparser
6+
7+
def main(argv=None):
8+
nepochs = 1000
9+
checkpoint_dir = '/fred/oz012/Bruno/checkpoints/' + str(FLAGS.checkpoint) + '/'
10+
with tf.Session() as sess:
11+
resnet = ResNetGAN(sess=sess,
12+
in_height=32,
13+
in_width=32,
14+
nchannels=3,
15+
batch_size=128,
16+
noise_dim=100,
17+
checkpoint_dir=checkpoint_dir,
18+
data_name='cifar10')
19+
20+
if FLAGS.mode == 'train':
21+
resnet.train(nepochs)
22+
23+
if __name__ == '__main__':
24+
parser = argparse.ArgumentParser()
25+
FLAGS, _ = argparser.add_args(parser)
26+
tf.logging.set_verbosity(tf.logging.DEBUG)
27+
tf.app.run()
Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
import numpy as np
2+
import random
3+
import glob
4+
5+
path = '/fred/oz012/Bruno/data/spectra/gal_form_burst/'
6+
g = glob.glob(path + '/*/*.fits')
7+
8+
random.seed()
9+
r = [np.random.randint(0, len(g)-1) for _ in range(100)]
10+
11+
with open('random_spectra_gal.txt', 'w') as f:
12+
for i in r:
13+
f.write(g[i]+'\n')

spectra_gan_dcgan.py

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
import os
2+
import tensorflow as tf
3+
from src.models.gans import DCGAN
4+
import argparse
5+
from src import argparser
6+
7+
def main(argv=None):
8+
nepochs = 5
9+
checkpoint_dir = '/fred/oz012/Bruno/checkpoints/' + str(FLAGS.checkpoint)
10+
with tf.Session() as sess:
11+
dcgan = DCGAN(sess=sess,
12+
in_width=3500,
13+
in_height=1,
14+
nchannels=1,
15+
batch_size=512,
16+
noise_dim=100,
17+
checkpoint_dir=checkpoint_dir,
18+
data_name='spectra',
19+
files_path='/fred/oz012/Bruno/data/spectra/boss/loz/tmp/',
20+
dataset_size = 350000)
21+
22+
if FLAGS.mode == 'train':
23+
dcgan.train(nepochs)
24+
25+
26+
if __name__ == '__main__':
27+
parser = argparse.ArgumentParser()
28+
FLAGS, _ = argparser.add_args(parser)
29+
tf.logging.set_verbosity(tf.logging.DEBUG)
30+
tf.app.run()

src/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
#

src/models/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
#

src/models/architecture.py

Lines changed: 241 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,241 @@
1+
import numpy as np
2+
import tensorflow as tf
3+
from tensorflow import layers
4+
5+
def minibatch_discrimination(inputs, num_kernels=5, kernel_dim=3):
6+
with tf.variable_scope('minibatch_discrimination'):
7+
T = tf.get_variable('T', shape=[inputs.get_shape()[1], num_kernels*kernel_dim],
8+
initializer=tf.random_normal_initializer(stddev=0.02))
9+
M = tf.reshape(tf.matmul(inputs,T), (-1,num_kernels,kernel_dim))
10+
diffs = tf.expand_dims(M, 3) - tf.expand_dims(tf.transpose(M, [1,2,0]), 0)
11+
abs_diffs = tf.reduce_sum(tf.abs(diffs), 2)
12+
minibatch_features = tf.reduce_sum(tf.exp(-abs_diffs), 2)
13+
return tf.concat([inputs, minibatch_features], 1)
14+
15+
def tikhonov_regularizer(D_real_logits, D_real_arg, D_fake_logits, D_fake_arg, batch_size):
16+
D1 = tf.nn.sigmoid(D_real_logits)
17+
D2 = tf.nn.sigmoid(D_fake_logits)
18+
grad_D1_logits = tf.gradients(D_real_logits, D_real_arg)[0]
19+
grad_D2_logits = tf.gradients(D_fake_logits, D_fake_arg)[0]
20+
grad_D1_logits_norm = tf.norm(tf.reshape(grad_D1_logits, [batch_size,-1]), axis=1, keepdims=True)
21+
grad_D2_logits_norm = tf.norm(tf.reshape(grad_D2_logits, [batch_size,-1]), axis=1, keepdims=True)
22+
23+
reg_D1 = tf.multiply(tf.square(1.0-D1), tf.square(grad_D1_logits_norm))
24+
reg_D2 = tf.multiply(tf.square(D2), tf.square(grad_D2_logits_norm))
25+
disc_regularizer = tf.reduce_mean(reg_D1 + reg_D2)
26+
return disc_regularizer
27+
28+
#try tf.orthogonal_initializer
29+
def dcgan_discriminator_spectra(x, prob=0.):
30+
filters = [128, 256, 512, 1024]
31+
alpha = 0.2
32+
33+
net = tf.reshape(x, shape=[-1, x.shape[1], 1, 1])
34+
net = tf.layers.conv2d(net, filters=filters[0], kernel_size=[10,1], strides=[5,1],
35+
activation=None, padding='same', name='layer1')
36+
net = tf.nn.leaky_relu(net, alpha=alpha)
37+
net = tf.layers.batch_normalization(net)
38+
net = tf.layers.dropout(net,rate=prob)
39+
40+
net = tf.layers.conv2d(net, filters=filters[1], kernel_size=[10,1], strides=[5,1],
41+
activation=None, padding='same', name='layer2')
42+
net = tf.nn.leaky_relu(net, alpha=alpha)
43+
net = tf.layers.batch_normalization(net)
44+
net = tf.layers.dropout(net,rate=prob)
45+
46+
net = tf.layers.conv2d(net, filters=filters[2], kernel_size=[10,1], strides=[5,1],
47+
activation=None, padding='same', name='layer3')
48+
net = tf.nn.leaky_relu(net, alpha=alpha)
49+
net = tf.layers.batch_normalization(net)
50+
net = tf.layers.dropout(net,rate=prob)
51+
52+
net = tf.layers.conv2d(net, filters=filters[3], kernel_size=[10,1], strides=[4,1],
53+
activation=None, padding='same', name='layer4')
54+
net = tf.nn.leaky_relu(net, alpha=alpha)
55+
net = tf.layers.batch_normalization(net)
56+
net = tf.layers.dropout(net,rate=prob)
57+
58+
net = tf.reshape(net, shape=[-1, filters[3]*7*1])
59+
#net = tf.layers.dense(net, 512, activation=None, name='layer5')
60+
net = tf.nn.leaky_relu(net, alpha=alpha)
61+
net = tf.layers.batch_normalization(net)
62+
net = tf.layers.dropout(net,rate=prob)
63+
64+
net = minibatch_discrimination(net, num_kernels=30, kernel_dim=20)
65+
66+
net = tf.layers.dense(net, 1, activation=None, name='layer6')
67+
68+
return net, tf.nn.sigmoid(net, name='discriminator_logit')
69+
70+
def dcgan_generator_spectra(noise, data_size, prob=0.):
71+
filters = [1024, 512, 256, 128]
72+
73+
net = tf.reshape(noise, shape=[-1, noise.shape[1]])
74+
net = tf.layers.dense(net, filters[0]*7, activation=None)
75+
#net = tf.layers.dropout(net,rate=0.5)
76+
net = tf.reshape(net, shape=[-1,7,1,filters[0]])
77+
78+
net = tf.layers.conv2d_transpose(net, filters=filters[1], kernel_size=[10,1], strides=[4,1],
79+
activation=None, padding='same', name='layer1')
80+
net = tf.nn.relu(net)
81+
net = tf.layers.batch_normalization(net)
82+
net = tf.layers.dropout(net,rate=prob)
83+
84+
net = tf.layers.conv2d_transpose(net, filters=filters[2], kernel_size=[10,1], strides=[5,1],
85+
activation=None, padding='same', name='layer2')
86+
net = tf.nn.relu(net)
87+
net = tf.layers.batch_normalization(net)
88+
net = tf.layers.dropout(net,rate=prob)
89+
90+
net = tf.layers.conv2d_transpose(net, filters=filters[3], kernel_size=[10,1], strides=[5,1],
91+
activation=None, padding='same', name='layer3')
92+
net = tf.nn.relu(net)
93+
net = tf.layers.batch_normalization(net)
94+
net = tf.layers.dropout(net,rate=prob)
95+
96+
net = tf.layers.conv2d_transpose(net, filters=1, kernel_size=[10,1], strides=[5,1],
97+
activation=None, padding='same', name='layer4')
98+
net = tf.layers.batch_normalization(net)
99+
net = tf.nn.tanh(net)
100+
net = tf.layers.dropout(net,rate=prob)
101+
net = tf.reshape(net, shape=[-1, data_size], name='generator_output')
102+
return net
103+
104+
def dcgan_discriminator_mnist(x, y=None, prob=0.):
105+
filters = [64, 128, 256]
106+
alpha = 0.2
107+
net = tf.reshape(x, shape=[-1, 28, 28, 1])
108+
109+
#reshape (adding zeros) to the more convenient 32x32 shape
110+
net = tf.pad(net, paddings=[[0,0],[2,2],[2,2],[0,0]], mode='CONSTANT', constant_values=0.)
111+
112+
#condition concatenation for cGAN
113+
if y != None:
114+
y = tf.reshape(y, shape=[-1, 1]) #(batch_size, 1)
115+
y = tf.tile(y, multiples=[1, 32*32])
116+
y = tf.reshape(y, shape=[-1, 32, 32, 1])
117+
net = tf.concat([net, y], axis=3)
118+
119+
net = tf.layers.conv2d(net, filters=filters[0], kernel_size=[5,5], strides=[2,2],
120+
activation=None, padding='same', name='layer1')
121+
net = tf.layers.batch_normalization(net)
122+
net = tf.nn.leaky_relu(net, alpha=alpha)
123+
net = tf.layers.dropout(net,rate=prob)
124+
125+
net = tf.layers.conv2d(net, filters=filters[1], kernel_size=[5,5], strides=[2,2],
126+
activation=None, padding='same', name='layer2')
127+
net = tf.layers.batch_normalization(net)
128+
net = tf.nn.leaky_relu(net, alpha=alpha)
129+
net = tf.layers.dropout(net,rate=prob)
130+
131+
net = tf.layers.conv2d(net, filters=filters[2], kernel_size=[5,5], strides=[2,2],
132+
activation=None, padding='same', name='layer3')
133+
net = tf.layers.batch_normalization(net)
134+
net = tf.nn.leaky_relu(net, alpha=alpha)
135+
net = tf.layers.dropout(net,rate=prob)
136+
137+
net = tf.reshape(net, shape=[-1,filters[2]*4*4])
138+
net = minibatch_discrimination(net, num_kernels=20, kernel_dim=10)
139+
140+
#return the second to last layer to allow the implementation of feature discrimination
141+
features = net
142+
143+
net = tf.layers.dense(net, 1, activation=None, name='layer_dense')
144+
return net, tf.nn.sigmoid(net, name='logit'), features
145+
146+
def dcgan_generator_mnist(noise, y=None, prob=0.):
147+
filters = [256, 128, 64]
148+
149+
net = tf.reshape(noise, shape=[-1, noise.shape[1]])
150+
151+
if y != None:
152+
y = tf.reshape(y, shape=[-1, 1])
153+
net = tf.concat([noise, y], axis=1)
154+
155+
net = tf.layers.batch_normalization(net)
156+
net = tf.nn.relu(net)
157+
net = tf.layers.dense(net, filters[0]*4*4, activation=None)
158+
net = tf.layers.dropout(net,rate=prob)
159+
net = tf.reshape(net, shape=[-1,4,4,filters[0]])
160+
161+
net = tf.layers.conv2d_transpose(net, filters=filters[1], kernel_size=[5,5], strides=[2,2],
162+
activation=None, padding='same', name='layer1')
163+
net = tf.layers.batch_normalization(net)
164+
net = tf.nn.relu(net)
165+
net = tf.layers.dropout(net,rate=0.2)
166+
167+
net = tf.layers.conv2d_transpose(net, filters=filters[2], kernel_size=[5,5], strides=[2,2],
168+
activation=None, padding='same', name='layer2')
169+
net = tf.layers.batch_normalization(net)
170+
net = tf.nn.relu(net)
171+
net = tf.layers.dropout(net,rate=0.2)
172+
173+
net = tf.layers.conv2d_transpose(net, filters=1, kernel_size=[5,5], strides=[2,2],
174+
activation=None, padding='same', name='layer3')
175+
176+
#crop the outer parts of the images to retrieve the original 28x28 size
177+
net = tf.slice(net, begin=[0,2,2,0], size=[-1,28,28,-1])
178+
net = tf.nn.tanh(net)
179+
net = tf.layers.dropout(net,rate=0.2)
180+
181+
return tf.reshape(net, shape=[-1, 28, 28], name='output')
182+
183+
def dcgan_discriminator_cifar10(x, prob=0.):
184+
filters = [64, 128, 256]
185+
alpha = 0.2
186+
assert x.shape[1]==32
187+
assert x.shape[2]==32
188+
net = tf.reshape(x, shape=[-1, 32, 32, 3])
189+
190+
net = tf.layers.conv2d(net, filters=filters[0], kernel_size=[5,5], strides=[2,2],
191+
activation=None, padding='same', name='layer1')
192+
net = tf.layers.batch_normalization(net)
193+
net = tf.nn.leaky_relu(net, alpha=alpha)
194+
net = tf.layers.dropout(net,rate=prob)
195+
196+
net = tf.layers.conv2d(net, filters=filters[1], kernel_size=[5,5], strides=[2,2],
197+
activation=None, padding='same', name='layer2')
198+
net = tf.layers.batch_normalization(net)
199+
net = tf.nn.leaky_relu(net, alpha=alpha)
200+
net = tf.layers.dropout(net,rate=prob)
201+
202+
net = tf.layers.conv2d(net, filters=filters[2], kernel_size=[5,5], strides=[2,2],
203+
activation=None, padding='same', name='layer3')
204+
net = tf.layers.batch_normalization(net)
205+
net = tf.nn.leaky_relu(net, alpha=alpha)
206+
net = tf.layers.dropout(net,rate=prob)
207+
208+
net = tf.reshape(net, shape=[-1,filters[2]*4*4])
209+
net = minibatch_discrimination(net, num_kernels=20, kernel_dim=10)
210+
211+
net = tf.layers.dense(net, 1, activation=None, name='layer4')
212+
return net, tf.nn.sigmoid(net, name='discriminator_logit')
213+
214+
def dcgan_generator_cifar10(noise, prob=0.):
215+
filters = [256, 128, 64]
216+
217+
net = tf.reshape(noise, shape=[-1, noise.shape[1]])
218+
net = tf.layers.batch_normalization(net)
219+
net = tf.nn.relu(net)
220+
net = tf.layers.dense(net, filters[0]*4*4, activation=None)
221+
net = tf.layers.dropout(net,rate=prob)
222+
net = tf.reshape(net, shape=[-1,4,4,filters[0]])
223+
224+
net = tf.layers.conv2d_transpose(net, filters=filters[1], kernel_size=[5,5], strides=[2,2],
225+
activation=None, padding='same', name='layer1')
226+
net = tf.layers.batch_normalization(net)
227+
net = tf.nn.relu(net)
228+
net = tf.layers.dropout(net,rate=0.2)
229+
230+
net = tf.layers.conv2d_transpose(net, filters=filters[2], kernel_size=[5,5], strides=[2,2],
231+
activation=None, padding='same', name='layer2')
232+
net = tf.layers.batch_normalization(net)
233+
net = tf.nn.relu(net)
234+
net = tf.layers.dropout(net,rate=0.2)
235+
236+
net = tf.layers.conv2d_transpose(net, filters=3, kernel_size=[5,5], strides=[2,2],
237+
activation=None, padding='same', name='layer3')
238+
239+
net = tf.nn.tanh(net)
240+
net = tf.layers.dropout(net,rate=0.2)
241+
return tf.reshape(net, shape=[-1, 32, 32, 3], name='generator_output')

0 commit comments

Comments
 (0)