forked from pepesan/machine-learning-python
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy path06_03_tensorflow_example.py
133 lines (107 loc) · 4.14 KB
/
06_03_tensorflow_example.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
""" Neural Network with Eager API.
A 2-Hidden Layers Fully Connected Neural Network (a.k.a Multilayer Perceptron)
implementation with TensorFlow's Eager API. This example is using the MNIST database
of handwritten digits (http://yann.lecun.com/exdb/mnist/).
This example is using TensorFlow layers, see 'neural_network_raw' example for
a raw implementation with variables.
Links:
[MNIST Dataset](http://yann.lecun.com/exdb/mnist/).
Author: Aymeric Damien
Project: https://github.com/aymericdamien/TensorFlow-Examples/
"""
from __future__ import print_function
import tensorflow as tf
import tensorflow.contrib.eager as tfe
# Set Eager API
tfe.enable_eager_execution()
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=False)
# Parameters
learning_rate = 0.001
num_steps = 1000
batch_size = 128
display_step = 100
# Network Parameters
n_hidden_1 = 256 # 1st layer number of neurons
n_hidden_2 = 256 # 2nd layer number of neurons
num_input = 784 # MNIST data input (img shape: 28*28)
num_classes = 10 # MNIST total classes (0-9 digits)
# Using TF Dataset to split data into batches
dataset = tf.data.Dataset.from_tensor_slices(
(mnist.train.images, mnist.train.labels)).batch(batch_size)
dataset_iter = tfe.Iterator(dataset)
# Define the neural network. To use eager API and tf.layers API together,
# we must instantiate a tfe.Network class as follow:
class NeuralNet(tfe.Network):
def __init__(self):
# Define each layer
super(NeuralNet, self).__init__()
# Hidden fully connected layer with 256 neurons
self.layer1 = self.track_layer(
tf.layers.Dense(n_hidden_1, activation=tf.nn.relu))
# Hidden fully connected layer with 256 neurons
self.layer2 = self.track_layer(
tf.layers.Dense(n_hidden_2, activation=tf.nn.relu))
# Output fully connected layer with a neuron for each class
self.out_layer = self.track_layer(tf.layers.Dense(num_classes))
def call(self, x):
x = self.layer1(x)
x = self.layer2(x)
return self.out_layer(x)
neural_net = NeuralNet()
# Cross-Entropy loss function
def loss_fn(inference_fn, inputs, labels):
# Using sparse_softmax cross entropy
return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=inference_fn(inputs), labels=labels))
# Calculate accuracy
def accuracy_fn(inference_fn, inputs, labels):
prediction = tf.nn.softmax(inference_fn(inputs))
correct_pred = tf.equal(tf.argmax(prediction, 1), labels)
return tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# SGD Optimizer
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
# Compute gradients
grad = tfe.implicit_gradients(loss_fn)
# Training
average_loss = 0.
average_acc = 0.
for step in range(num_steps):
# Iterate through the dataset
try:
d = dataset_iter.next()
except StopIteration:
# Refill queue
dataset_iter = tfe.Iterator(dataset)
d = dataset_iter.next()
# Images
x_batch = d[0]
# Labels
y_batch = tf.cast(d[1], dtype=tf.int64)
# Compute the batch loss
batch_loss = loss_fn(neural_net, x_batch, y_batch)
average_loss += batch_loss
# Compute the batch accuracy
batch_accuracy = accuracy_fn(neural_net, x_batch, y_batch)
average_acc += batch_accuracy
if step == 0:
# Display the initial cost, before optimizing
print("Initial loss= {:.9f}".format(average_loss))
# Update the variables following gradients info
optimizer.apply_gradients(grad(neural_net, x_batch, y_batch))
# Display info
if (step + 1) % display_step == 0 or step == 0:
if step > 0:
average_loss /= display_step
average_acc /= display_step
print("Step:", '%04d' % (step + 1), " loss=",
"{:.9f}".format(average_loss), " accuracy=",
"{:.4f}".format(average_acc))
average_loss = 0.
average_acc = 0.
# Evaluate model on the test image set
testX = mnist.test.images
testY = mnist.test.labels
test_acc = accuracy_fn(neural_net, testX, testY)
print("Testset Accuracy: {:.4f}".format(test_acc))