Skip to content

Commit 67f6e7b

Browse files
BordawilliamFalcon
authored andcommitted
Fix testing for mac OS (#399)
* fix test for MacOS * formatting * fix pkg names
1 parent 58d52c2 commit 67f6e7b

File tree

4 files changed

+13
-11
lines changed

4 files changed

+13
-11
lines changed

.travis.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ matrix:
3535
addons:
3636
homebrew:
3737
# update: true
38-
packages: python3
38+
packages: python3.6
3939
before_install:
4040
- pip3 install virtualenv
4141
- virtualenv -p python3 ~/venv

MANIFEST.in

+3-3
Original file line numberDiff line numberDiff line change
@@ -16,9 +16,9 @@ exclude *.svg
1616
recursive-include pytorch_lightning *.py
1717

1818
# include examples
19-
recursive-include examples *.py
20-
recursive-include examples *.md
21-
recursive-include examples *.sh
19+
recursive-include pl_examples *.py
20+
recursive-include pl_examples *.md
21+
recursive-include pl_examples *.sh
2222

2323
# exclude tests from package
2424
recursive-exclude tests *

pl_examples/domain_templates/gan.py

+8-6
Original file line numberDiff line numberDiff line change
@@ -136,7 +136,8 @@ def training_step(self, batch, batch_nb, optimizer_i):
136136

137137
# how well can it label as fake?
138138
fake = torch.zeros(imgs.size(0), 1)
139-
fake_loss = self.adversarial_loss(self.discriminator(self.generated_imgs.detach()), fake)
139+
fake_loss = self.adversarial_loss(
140+
self.discriminator(self.generated_imgs.detach()), fake)
140141

141142
# discriminator loss is the average of these
142143
d_loss = (real_loss + fake_loss) / 2
@@ -146,8 +147,6 @@ def training_step(self, batch, batch_nb, optimizer_i):
146147
'progress_bar': tqdm_dict,
147148
'log': tqdm_dict
148149
})
149-
150-
151150
return output
152151

153152
def configure_optimizers(self):
@@ -200,9 +199,12 @@ def main(hparams):
200199
parser = ArgumentParser()
201200
parser.add_argument("--batch_size", type=int, default=64, help="size of the batches")
202201
parser.add_argument("--lr", type=float, default=0.0002, help="adam: learning rate")
203-
parser.add_argument("--b1", type=float, default=0.5, help="adam: decay of first order momentum of gradient")
204-
parser.add_argument("--b2", type=float, default=0.999, help="adam: decay of first order momentum of gradient")
205-
parser.add_argument("--latent_dim", type=int, default=100, help="dimensionality of the latent space")
202+
parser.add_argument("--b1", type=float, default=0.5,
203+
help="adam: decay of first order momentum of gradient")
204+
parser.add_argument("--b2", type=float, default=0.999,
205+
help="adam: decay of first order momentum of gradient")
206+
parser.add_argument("--latent_dim", type=int, default=100,
207+
help="dimensionality of the latent space")
206208

207209
hparams = parser.parse_args()
208210

tox.ini

+1-1
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ commands =
3535
check-manifest --ignore tox.ini
3636
python setup.py check -m -s
3737
flake8 .
38-
coverage run --source pytorch_lightning -m py.test pytorch_lightning tests examples -v --doctest-modules
38+
coverage run --source pytorch_lightning -m py.test pytorch_lightning tests pl_examples -v --doctest-modules
3939

4040
[flake8]
4141
exclude = .tox,*.egg,build,temp,examples/*

0 commit comments

Comments
 (0)