@@ -136,7 +136,8 @@ def training_step(self, batch, batch_nb, optimizer_i):
136
136
137
137
# how well can it label as fake?
138
138
fake = torch .zeros (imgs .size (0 ), 1 )
139
- fake_loss = self .adversarial_loss (self .discriminator (self .generated_imgs .detach ()), fake )
139
+ fake_loss = self .adversarial_loss (
140
+ self .discriminator (self .generated_imgs .detach ()), fake )
140
141
141
142
# discriminator loss is the average of these
142
143
d_loss = (real_loss + fake_loss ) / 2
@@ -146,8 +147,6 @@ def training_step(self, batch, batch_nb, optimizer_i):
146
147
'progress_bar' : tqdm_dict ,
147
148
'log' : tqdm_dict
148
149
})
149
-
150
-
151
150
return output
152
151
153
152
def configure_optimizers (self ):
@@ -200,9 +199,12 @@ def main(hparams):
200
199
parser = ArgumentParser ()
201
200
parser .add_argument ("--batch_size" , type = int , default = 64 , help = "size of the batches" )
202
201
parser .add_argument ("--lr" , type = float , default = 0.0002 , help = "adam: learning rate" )
203
- parser .add_argument ("--b1" , type = float , default = 0.5 , help = "adam: decay of first order momentum of gradient" )
204
- parser .add_argument ("--b2" , type = float , default = 0.999 , help = "adam: decay of first order momentum of gradient" )
205
- parser .add_argument ("--latent_dim" , type = int , default = 100 , help = "dimensionality of the latent space" )
202
+ parser .add_argument ("--b1" , type = float , default = 0.5 ,
203
+ help = "adam: decay of first order momentum of gradient" )
204
+ parser .add_argument ("--b2" , type = float , default = 0.999 ,
205
+ help = "adam: decay of first order momentum of gradient" )
206
+ parser .add_argument ("--latent_dim" , type = int , default = 100 ,
207
+ help = "dimensionality of the latent space" )
206
208
207
209
hparams = parser .parse_args ()
208
210
0 commit comments