forked from tinygrad/tinygrad
-
Notifications
You must be signed in to change notification settings - Fork 0
/
mnist_gan.py
106 lines (97 loc) · 3.78 KB
/
mnist_gan.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
from pathlib import Path
import numpy as np
from tqdm import trange
import torch
from torchvision.utils import make_grid, save_image
from tinygrad.tensor import Tensor
from tinygrad.helpers import getenv
from tinygrad.nn import optim
from datasets import fetch_mnist
class LinearGen:
def __init__(self):
self.l1 = Tensor.scaled_uniform(128, 256)
self.l2 = Tensor.scaled_uniform(256, 512)
self.l3 = Tensor.scaled_uniform(512, 1024)
self.l4 = Tensor.scaled_uniform(1024, 784)
def forward(self, x):
x = x.dot(self.l1).leakyrelu(0.2)
x = x.dot(self.l2).leakyrelu(0.2)
x = x.dot(self.l3).leakyrelu(0.2)
x = x.dot(self.l4).tanh()
return x
class LinearDisc:
def __init__(self):
self.l1 = Tensor.scaled_uniform(784, 1024)
self.l2 = Tensor.scaled_uniform(1024, 512)
self.l3 = Tensor.scaled_uniform(512, 256)
self.l4 = Tensor.scaled_uniform(256, 2)
def forward(self, x):
x = x.dot(self.l1).leakyrelu(0.2).dropout(0.3)
x = x.dot(self.l2).leakyrelu(0.2).dropout(0.3)
x = x.dot(self.l3).leakyrelu(0.2).dropout(0.3)
x = x.dot(self.l4).log_softmax()
return x
def make_batch(images):
sample = np.random.randint(0, len(images), size=(batch_size))
image_b = images[sample].reshape(-1, 28*28).astype(np.float32) / 255.0
image_b = (image_b - 0.5) / 0.5
return Tensor(image_b)
def make_labels(bs, val):
y = np.zeros((bs, 2), np.float32)
y[range(bs), [val] * bs] = -2.0 # Can we do label smoothin? i.e -2.0 changed to -1.98789.
return Tensor(y)
def train_discriminator(optimizer, data_real, data_fake):
real_labels = make_labels(batch_size, 1)
fake_labels = make_labels(batch_size, 0)
optimizer.zero_grad()
output_real = discriminator.forward(data_real)
output_fake = discriminator.forward(data_fake)
loss_real = (output_real * real_labels).mean()
loss_fake = (output_fake * fake_labels).mean()
loss_real.backward()
loss_fake.backward()
optimizer.step()
return (loss_real + loss_fake).cpu().numpy()
def train_generator(optimizer, data_fake):
real_labels = make_labels(batch_size, 1)
optimizer.zero_grad()
output = discriminator.forward(data_fake)
loss = (output * real_labels).mean()
loss.backward()
optimizer.step()
return loss.cpu().numpy()
if __name__ == "__main__":
# data for training and validation
images_real = np.vstack(fetch_mnist()[::2])
ds_noise = Tensor.randn(64, 128, requires_grad=False)
# parameters
epochs, batch_size, k = 300, 512, 1
sample_interval = epochs // 10
n_steps = len(images_real) // batch_size
# models and optimizer
generator = LinearGen()
discriminator = LinearDisc()
# path to store results
output_dir = Path(".").resolve() / "outputs"
output_dir.mkdir(exist_ok=True)
# optimizers
optim_g = optim.Adam(optim.get_parameters(generator),lr=0.0002, b1=0.5) # 0.0002 for equilibrium!
optim_d = optim.Adam(optim.get_parameters(discriminator),lr=0.0002, b1=0.5)
# training loop
for epoch in (t := trange(epochs)):
loss_g, loss_d = 0.0, 0.0
for _ in range(n_steps):
data_real = make_batch(images_real)
for step in range(k): # Try with k = 5 or 7.
noise = Tensor.randn(batch_size, 128)
data_fake = generator.forward(noise).detach()
loss_d += train_discriminator(optim_d, data_real, data_fake)
noise = Tensor.randn(batch_size, 128)
data_fake = generator.forward(noise)
loss_g += train_generator(optim_g, data_fake)
if (epoch + 1) % sample_interval == 0:
fake_images = generator.forward(ds_noise).detach().cpu().numpy()
fake_images = (fake_images.reshape(-1, 1, 28, 28) + 1) / 2 # 0 - 1 range.
save_image(make_grid(torch.tensor(fake_images)), output_dir / f"image_{epoch+1}.jpg")
t.set_description(f"Generator loss: {loss_g/n_steps}, Discriminator loss: {loss_d/n_steps}")
print("Training Completed!")