Skip to content

Commit c34bc10

Browse files
committed
decoupled presentation
1 parent 43d69df commit c34bc10

File tree

5 files changed

+157
-76
lines changed

5 files changed

+157
-76
lines changed

.gitignore

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,6 @@ Docs/api/*.yml
3434
__pycache__
3535
*.checkpoint
3636
*.tfevents.*
37-
input/*_cntk_text.txt
3837
model/
39-
logs/
38+
logs/
39+
data/

Digitz/MainWindow.xaml.cs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,7 @@ private void recognizeButton_Click(object sender, RoutedEventArgs e)
8585
var session = new TFSession(graph);
8686
var runner = session.GetRunner();
8787
runner.AddInput(graph["InputData"][0], tensor);
88-
runner.Fetch(graph["Model/linear"][0]);
88+
runner.Fetch(graph["Model/model"][0]);
8989
var output = runner.Run();
9090
TFTensor result = output[0];
9191
float[] p = ((float[][])result.GetValue(true))[0];

LearnDigitz/LearnDigitz.py

Lines changed: 124 additions & 72 deletions
Original file line numberDiff line numberDiff line change
@@ -7,69 +7,143 @@
77
from datetime import *
88

99
###################################################################
10-
# Variables #
11-
# When launching project or scripts from Visual Studio, #
12-
# input_dir and output_dir are passed as arguments automatically. #
13-
# Users could set them from the project setting page. #
10+
# Output Paths #
1411
###################################################################
12+
unique = datetime.now().strftime('%m.%d_%h.%M')
13+
data_path = 'data'
14+
logs_path = os.path.join('logs', 'log_' + unique)
15+
export_path = os.path.join('model', 'model_' + unique)
1516

16-
FLAGS = tf.app.flags.FLAGS
17-
tf.app.flags.DEFINE_string("input_dir", ".", "Input directory where training dataset and meta data are saved")
18-
tf.app.flags.DEFINE_string("output_dir", ".", "Output directory where output such as logs are saved.")
19-
tf.app.flags.DEFINE_string("log_dir", ".", "Model directory where final model files are saved.")
20-
21-
def main(_):
22-
# clearing graph
23-
tf.reset_default_graph()
24-
25-
# Import MINST data
26-
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
17+
###################################################################
18+
# Parameters #
19+
###################################################################
20+
learning_rate = 0.01
21+
training_epochs = 10
22+
batch_size = 100
23+
display_epoch = 1
2724

28-
# Parameters
29-
learning_rate = 0.01
30-
training_epochs = 10
31-
batch_size = 100
32-
display_epoch = 1
33-
unique = datetime.now().strftime('%m-%d_%H_%M')
34-
logs_path = os.path.join('logs', unique)
35-
export_path = os.path.join('model', unique)
25+
###################################################################
26+
# Models #
27+
###################################################################
28+
def linear_model(x):
29+
# set model weights
30+
W = tf.Variable(tf.zeros([784, 10]), name='weights')
31+
b = tf.Variable(tf.zeros([10]), name='bias')
3632

37-
# tf Graph Input
38-
# mnist data image of shape 28*28=784
39-
x = tf.placeholder(tf.float32, [None, 784], name='InputData')
40-
# 0-9 digits recognition => 10 classes
41-
y = tf.placeholder(tf.float32, [None, 10], name='LabelData')
33+
# scope for tensorboard
34+
with tf.name_scope('Model'):
35+
pred = tf.add(tf.matmul(x, W), b, name="model") # linear combination
36+
return pred
4237

43-
# Set model weights
38+
def softmax_model(x):
39+
# set model weights
4440
W = tf.Variable(tf.zeros([784, 10]), name='weights')
4541
b = tf.Variable(tf.zeros([10]), name='bias')
4642

47-
# Construct model and encapsulating all ops into scopes, making
48-
# Tensorboard's Graph visualization more convenient
43+
# scope for tensorboard
4944
with tf.name_scope('Model'):
50-
# Model
51-
#pred = tf.nn.softmax(tf.matmul(x, W) + b, name="model") # Softmax
52-
pred = tf.add(tf.matmul(x, W), b, name="linear") # linear combination
45+
pred = tf.nn.softmax(tf.matmul(x, W) + b, name="model") # Softmax
46+
return pred
5347

48+
###################################################################
49+
# Cost / Loss Functions #
50+
###################################################################
51+
def cross_entropy_loss(fn, y):
5452
with tf.name_scope('Loss'):
5553
# Minimize error using cross entropy
56-
# cost = tf.reduce_mean(-tf.reduce_sum(y * tf.log(pred), reduction_indices=1))
57-
54+
cost = tf.reduce_mean(-tf.reduce_sum(y * tf.log(fn), reduction_indices=1))
55+
return cost
56+
57+
def builtin_cross_entropy_loss(fn, y):
58+
with tf.name_scope('Loss'):
5859
# Minimize error with *better* cross entropy
59-
# cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=pred))
60-
60+
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=fn))
61+
return cost
62+
63+
def squared_error_loss(fn, y):
64+
with tf.name_scope('Loss'):
65+
# Minimize error with *better* cross entropy
66+
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=fn))
67+
return cost
68+
69+
def builtin_l2_loss(fn, y):
70+
with tf.name_scope('Loss'):
6171
# Minimize error using squared error
62-
# cost = tf.nn.l2_loss(y - pred)
63-
64-
# Minimize with computed square error
65-
cost = tf.reduce_mean(tf.pow(y - pred, 2))
66-
with tf.name_scope('SGD'):
67-
# Gradient Descent
68-
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
72+
cost = tf.nn.l2_loss(y - fn)
73+
return cost
74+
75+
###################################################################
76+
# Accuracy #
77+
###################################################################
78+
def get_accuracy(fn, y):
6979
with tf.name_scope('Accuracy'):
70-
# Accuracy
71-
acc = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
80+
acc = tf.equal(tf.argmax(fn, 1), tf.argmax(y, 1))
7281
acc = tf.reduce_mean(tf.cast(acc, tf.float32))
82+
return acc
83+
84+
###################################################################
85+
# Optimizer #
86+
###################################################################
87+
def sgd_optimizer(cost, lr):
88+
with tf.name_scope('SGD'):
89+
# Gradient Descent
90+
optimizer = tf.train.GradientDescentOptimizer(lr).minimize(cost)
91+
return optimizer
92+
93+
###################################################################
94+
# Save Model #
95+
###################################################################
96+
def save_model(sess):
97+
# saving model
98+
checkpoint = os.path.join(export_path, "model.ckpt")
99+
saver = tf.train.Saver()
100+
# checkpoint - variables
101+
saver.save(sess, checkpoint)
102+
# graph
103+
tf.train.write_graph(sess.graph_def, export_path, "model.pb", as_text=False)
104+
# freeze
105+
g = os.path.join(export_path, "model.pb")
106+
frozen = os.path.join(export_path, "digits.pb")
107+
108+
freeze.freeze_graph(
109+
input_graph = g,
110+
input_saver = "",
111+
input_binary = True,
112+
input_checkpoint = checkpoint,
113+
output_node_names = "Model/model",
114+
restore_op_name = "",
115+
filename_tensor_name = "",
116+
output_graph = frozen,
117+
clear_devices = True,
118+
initializer_nodes = ""
119+
)
120+
print("Model saved!")
121+
122+
def main(_):
123+
# resetting graph
124+
tf.reset_default_graph()
125+
126+
# import MINST data
127+
mnist = input_data.read_data_sets(data_path, one_hot=True)
128+
129+
# mnist data image of shape 28*28=784
130+
x = tf.placeholder(tf.float32, [None, 784], name='input')
131+
132+
# 0-9 digits recognition => 10 classes
133+
y = tf.placeholder(tf.float32, [None, 10], name='label')
134+
135+
# model
136+
pred = softmax_model(x)
137+
138+
# model accuracy
139+
acc = get_accuracy(pred, y)
140+
141+
# cost / loss
142+
cost = cross_entropy_loss(pred, y)
143+
144+
# trainer
145+
optimizer = sgd_optimizer(cost, learning_rate)
146+
73147

74148
# Initializing the variables
75149
init = tf.global_variables_initializer()
@@ -112,32 +186,10 @@ def main(_):
112186
# Test model
113187
# Calculate accuracy
114188
print("Accuracy:", acc.eval({x: mnist.test.images, y: mnist.test.labels}))
189+
190+
save_model(sess)
115191

116-
# saving model
117-
checkpoint = os.path.join(export_path, "model.ckpt")
118-
saver = tf.train.Saver()
119-
# checkpoint - variables
120-
saver.save(sess, checkpoint)
121-
# graph
122-
tf.train.write_graph(sess.graph_def, export_path, "model.pb", as_text=False)
123-
# freeze
124-
# python "Python\Lib\site-packages\tensorflow\python\tools\freeze_graph.py" --input_graph=.\Profile.pb --input_checkpoint=.\Profile.ckpt --output_node_names=Output/Predictions,Output/Loss --output_graph=frozen.pb
125-
g = os.path.join(export_path, "model.pb")
126-
frozen = os.path.join(export_path, "frozen.pb")
127-
128-
freeze.freeze_graph(
129-
input_graph = g,
130-
input_saver = "",
131-
input_binary = True,
132-
input_checkpoint = checkpoint,
133-
output_node_names = "Model/linear",
134-
restore_op_name = "",
135-
filename_tensor_name = "",
136-
output_graph = frozen,
137-
clear_devices = True,
138-
initializer_nodes = ""
139-
)
140-
print("Model saved!")
192+
141193
exit(0)
142194

143195

LearnDigitz/LearnDigitz.pyproj

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
<CommandLineArguments>--input_dir . --output_dir .</CommandLineArguments>
1515
<Name>LearnDigitz</Name>
1616
<RootNamespace>LearnDigitz</RootNamespace>
17+
<IsWindowsApplication>False</IsWindowsApplication>
1718
</PropertyGroup>
1819
<PropertyGroup Condition=" '$(Configuration)' == 'Debug' ">
1920
<DebugSymbols>true</DebugSymbols>

README.md

Lines changed: 29 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1,29 @@
1-
# Digitz
1+
# Introduction to Deep Learning
2+
* Machine Learning is Different
3+
* h(x)
4+
* questions machine learning can answer
5+
* process
6+
* Building these things
7+
* features to vectors
8+
* mathematical representation
9+
* model
10+
* cost/reward function
11+
* optimizing the things
12+
* gradients
13+
* Recognizing digits
14+
* Graph
15+
* IN: Constants, Placeholders, Variables
16+
* OUT: Session params
17+
* Run
18+
* linear model
19+
* model + cost function
20+
* tensorflow example
21+
* multi-layer linear model (MLP)
22+
* model + cost function
23+
* tensorflow example
24+
* better architecture
25+
* convolutional neural network
26+
* tensorflow example
27+
28+
# Samples
29+
Loosely based on [TensorFlow Examples](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/examples/tutorials/mnist)

0 commit comments

Comments
 (0)