Skip to content

Commit bc6b7d5

Browse files
committed
added all chapters
1 parent 463228a commit bc6b7d5

File tree

306 files changed

+90594
-0
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

306 files changed

+90594
-0
lines changed

Chapter01/setupDeepLearning.sh

Lines changed: 83 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,83 @@
1+
#!/bin/bash
2+
3+
# Save this script to /home/yourUserName, chmod +x setupDeepLearning.sh, run using ./setupDeepLearning.sh
4+
5+
mkdir tensorflow
6+
cd tensorflow
7+
8+
################################################################################
9+
# Install utils.
10+
################################################################################
11+
echo -e "\e[36m***Installing utilities*** \e[0m"
12+
sudo apt-get update
13+
sudo apt-get install unzip git-all pkg-config zip g++ zlib1g-dev
14+
15+
################################################################################
16+
# Install Java deps.
17+
################################################################################
18+
echo -e "\e[36m***Installing Java8. Press ENTER when prompted*** \e[0m"
19+
echo -e "\e[36m***And accept licence*** \e[0m"
20+
sudo add-apt-repository ppa:webupd8team/java
21+
sudo apt-get update
22+
sudo apt-get install oracle-java8-installer
23+
24+
################################################################################
25+
# Install Bazel dep.
26+
################################################################################
27+
echo -e "\e[36m***Installing Bazel*** \e[0m"
28+
wget https://github.com/bazelbuild/bazel/releases/download/0.11.1/bazel-0.11.1-without-jdk-installer-linux-x86_64.sh -O bazel-installer-linux-x86_64.sh
29+
chmod +x bazel-installer-linux-x86_64.sh
30+
sudo ./bazel-installer-linux-x86_64.sh
31+
rm bazel-installer-linux-x86_64.sh
32+
sudo chown $USER:$USER ~/.cache/bazel/
33+
34+
################################################################################
35+
# Fetch Swig and Python deps.
36+
################################################################################
37+
echo -e "\e[36m***Installing python deps*** \e[0m"
38+
sudo apt-get install swig
39+
sudo apt-get install build-essential python-dev python-pip checkinstall
40+
sudo apt-get install libreadline-gplv2-dev libncursesw5-dev libssl-dev libsqlite3-dev tk-dev libgdbm-dev libc6-dev libbz2-dev
41+
42+
################################################################################
43+
# Fetch and install Python.
44+
################################################################################
45+
echo -e "\e[36m***Installing Python*** \e[0m"
46+
wget https://www.python.org/ftp/python/2.7.14/Python-2.7.14.tgz
47+
tar -xvf Python-2.7.14.tgz
48+
cd Python-2.7.14
49+
./configure
50+
make
51+
sudo make install
52+
cd ../
53+
rm Python-2.7.14.tgz
54+
55+
################################################################################
56+
# Grab TensorFlow CPU version from central repo
57+
################################################################################
58+
echo -e "\e[36m***Cloning TensorFlow from GitHub*** \e[0m"
59+
sudo pip install tensorflow
60+
61+
################################################################################
62+
# Grab Keras from repo
63+
################################################################################
64+
echo -e "\e[36m***Installing Keras with Tensorflow backend*** \e[0m"
65+
sudo pip install keras
66+
67+
################################################################################
68+
# Installing other python dependencies
69+
################################################################################
70+
echo -e "\e[36m***Installing Python Deps*** \e[0m"
71+
sudo apt-get install python-numpy
72+
sudo pip install numpy --upgrade
73+
sudo pip --no-cache-dir install Pillow pandas scipy sklearn
74+
sudo pip install web.py gunicorn
75+
76+
echo -e "\e[36mReady to run TensorFlow! \e[0m"
77+
78+
################################################################################
79+
# Checking the installations
80+
################################################################################
81+
echo -e "\e[36m***Listing modules*** \e[0m"
82+
83+
pip list

Chapter02/.DS_Store

6 KB
Binary file not shown.

Chapter02/hy_param.py

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
#!/usr/bin/env python2
2+
3+
# Hyperparameters and all other kind of params
4+
5+
# Parameters
6+
learning_rate = 0.01
7+
num_steps = 100
8+
batch_size = 128
9+
display_step = 1
10+
11+
12+
# Network Parameters
13+
n_hidden_1 = 300 # 1st layer number of neurons
14+
n_hidden_2 = 300 # 2nd layer number of neurons
15+
num_input = 784 # MNIST data input (img shape: 28*28)
16+
num_classes = 10 # MNIST total classes (0-9 digits)
17+
18+
#Training Parameters
19+
checkpoint_every = 100
20+
checkpoint_dir = './runs/'

Chapter02/inference.py

Lines changed: 45 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,45 @@
1+
#!/usr/bin/env python2
2+
# -*- coding: utf-8 -*-
3+
4+
5+
from __future__ import print_function
6+
7+
8+
import os
9+
import numpy as np
10+
import tensorflow as tf
11+
import matplotlib.pyplot as plt
12+
13+
import hy_param
14+
15+
from tensorflow.examples.tutorials.mnist import input_data
16+
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
17+
18+
# Pointing the model checkpoint
19+
checkpoint_file = tf.train.latest_checkpoint(os.path.join(hy_param.checkpoint_dir, 'checkpoints'))
20+
saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
21+
22+
# Loading test data
23+
test_data = np.array([mnist.test.images[6]])
24+
25+
# Loading input variable from the model
26+
input_x = tf.get_default_graph().get_operation_by_name("input_x").outputs[0]
27+
28+
# Loading Prediction operation
29+
prediction = tf.get_default_graph().get_operation_by_name("prediction").outputs[0]
30+
31+
32+
with tf.Session() as sess:
33+
# Restoring the model from the checkpoint
34+
saver.restore(sess, checkpoint_file)
35+
36+
# Executing the model to make predictions
37+
data = sess.run(prediction, feed_dict={input_x: test_data })
38+
39+
print("Predicted digit: ", data.argmax() )
40+
41+
42+
# Display the feed image
43+
print ("Input image:")
44+
plt.gray()
45+
plt.imshow(test_data.reshape([28,28]))

Chapter02/model.py

Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,44 @@
1+
#!/usr/bin/env python2
2+
# -*- coding: utf-8 -*-
3+
4+
import tensorflow as tf
5+
import hy_param
6+
7+
8+
## Defining Placeholders which will be used as inputs for the model
9+
X = tf.placeholder("float", [None, hy_param.num_input],name="input_x")
10+
Y = tf.placeholder("float", [None, hy_param.num_classes],name="input_y")
11+
12+
13+
# Defining variables for weights & bias
14+
weights = {
15+
'h1': tf.Variable(tf.random_normal([hy_param.num_input, hy_param.n_hidden_1])),
16+
'h2': tf.Variable(tf.random_normal([hy_param.n_hidden_1, hy_param.n_hidden_2])),
17+
'out': tf.Variable(tf.random_normal([hy_param.n_hidden_2, hy_param.num_classes]))
18+
}
19+
biases = {
20+
'b1': tf.Variable(tf.random_normal([hy_param.n_hidden_1])),
21+
'b2': tf.Variable(tf.random_normal([hy_param.n_hidden_2])),
22+
'out': tf.Variable(tf.random_normal([hy_param.num_classes]))
23+
}
24+
25+
26+
# Hidden fully connected layer 1 with 300 neurons
27+
layer_1 = tf.add(tf.matmul(X, weights['h1']), biases['b1'])
28+
# Hidden fully connected layer 2 with 300 neurons
29+
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
30+
# Output fully connected layer with a neuron for each class
31+
logits = tf.matmul(layer_2, weights['out']) + biases['out']
32+
33+
# Performing softmax operation
34+
prediction = tf.nn.softmax(logits, name='prediction')
35+
36+
# Define loss and optimizer
37+
loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
38+
logits=logits, labels=Y))
39+
optimizer = tf.train.AdamOptimizer(learning_rate=hy_param.learning_rate)
40+
train_op = optimizer.minimize(loss_op)
41+
42+
# Evaluate model
43+
correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))
44+
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32) ,name='accuracy')

Chapter02/train.py

Lines changed: 69 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,69 @@
1+
#!/usr/bin/env python2
2+
# -*- coding: utf-8 -*-
3+
4+
5+
from __future__ import print_function
6+
7+
# Import MNIST data
8+
import os
9+
from tensorflow.examples.tutorials.mnist import input_data
10+
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
11+
12+
import tensorflow as tf
13+
import model
14+
import hy_param
15+
16+
17+
## tf Graph input
18+
X = model.X
19+
Y = model.Y
20+
21+
22+
23+
checkpoint_dir = os.path.abspath(os.path.join(hy_param.checkpoint_dir, "checkpoints"))
24+
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
25+
if not os.path.exists(checkpoint_dir):
26+
os.makedirs(checkpoint_dir)
27+
saver = tf.train.Saver(tf.global_variables(), max_to_keep=2)
28+
29+
#loss = tf.Variable(0.0)
30+
# Initialize the variables
31+
init = tf.global_variables_initializer()
32+
all_loss = []
33+
# Start training
34+
with tf.Session() as sess:
35+
writer_1 = tf.summary.FileWriter("./runs/summary/",sess.graph)
36+
37+
sum_var = tf.summary.scalar("loss", model.accuracy)
38+
write_op = tf.summary.merge_all()
39+
40+
# Run the initializer
41+
sess.run(init)
42+
43+
for step in range(1, hy_param.num_steps+1):
44+
# Extracting
45+
batch_x, batch_y = mnist.train.next_batch(hy_param.batch_size)
46+
# Run optimization op (backprop)
47+
sess.run(model.train_op, feed_dict={X: batch_x, Y: batch_y})
48+
if step % hy_param.display_step == 0 or step == 1:
49+
# Calculate batch loss and accuracy
50+
loss, acc, summary = sess.run([model.loss_op, model.accuracy, write_op], feed_dict={X: batch_x,
51+
Y: batch_y})
52+
all_loss.append(loss)
53+
writer_1.add_summary(summary, step)
54+
print("Step " + str(step) + ", Minibatch Loss= " + \
55+
"{:.4f}".format(loss) + ", Training Accuracy= " + \
56+
"{:.3f}".format(acc))
57+
if step % hy_param.checkpoint_every == 0:
58+
path = saver.save(
59+
sess, checkpoint_prefix, global_step=step)
60+
# print("Saved model checkpoint to {}\n".format(path))
61+
62+
print("Optimization Finished!")
63+
64+
# Calculate accuracy for MNIST test images
65+
print("Testing Accuracy:", \
66+
sess.run(model.accuracy, feed_dict={X: mnist.test.images,
67+
Y: mnist.test.labels}))
68+
69+

Chapter03/.DS_Store

6 KB
Binary file not shown.

0 commit comments

Comments
 (0)