|
7 | 7 | from datetime import *
|
8 | 8 |
|
9 | 9 | ###################################################################
|
10 |
| -# Variables # |
11 |
| -# When launching project or scripts from Visual Studio, # |
12 |
| -# input_dir and output_dir are passed as arguments automatically. # |
13 |
| -# Users could set them from the project setting page. # |
| 10 | +# Output Paths # |
14 | 11 | ###################################################################
|
| 12 | +unique = datetime.now().strftime('%m.%d_%h.%M') |
| 13 | +data_path = 'data' |
| 14 | +logs_path = os.path.join('logs', 'log_' + unique) |
| 15 | +export_path = os.path.join('model', 'model_' + unique) |
15 | 16 |
|
16 |
| -FLAGS = tf.app.flags.FLAGS |
17 |
| -tf.app.flags.DEFINE_string("input_dir", ".", "Input directory where training dataset and meta data are saved") |
18 |
| -tf.app.flags.DEFINE_string("output_dir", ".", "Output directory where output such as logs are saved.") |
19 |
| -tf.app.flags.DEFINE_string("log_dir", ".", "Model directory where final model files are saved.") |
20 |
| - |
21 |
| -def main(_): |
22 |
| - # clearing graph |
23 |
| - tf.reset_default_graph() |
24 |
| - |
25 |
| - # Import MINST data |
26 |
| - mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) |
| 17 | +################################################################### |
| 18 | +# Parameters # |
| 19 | +################################################################### |
| 20 | +learning_rate = 0.01 |
| 21 | +training_epochs = 10 |
| 22 | +batch_size = 100 |
| 23 | +display_epoch = 1 |
27 | 24 |
|
28 |
| - # Parameters |
29 |
| - learning_rate = 0.01 |
30 |
| - training_epochs = 10 |
31 |
| - batch_size = 100 |
32 |
| - display_epoch = 1 |
33 |
| - unique = datetime.now().strftime('%m-%d_%H_%M') |
34 |
| - logs_path = os.path.join('logs', unique) |
35 |
| - export_path = os.path.join('model', unique) |
| 25 | +################################################################### |
| 26 | +# Models # |
| 27 | +################################################################### |
| 28 | +def linear_model(x): |
| 29 | + # set model weights |
| 30 | + W = tf.Variable(tf.zeros([784, 10]), name='weights') |
| 31 | + b = tf.Variable(tf.zeros([10]), name='bias') |
36 | 32 |
|
37 |
| - # tf Graph Input |
38 |
| - # mnist data image of shape 28*28=784 |
39 |
| - x = tf.placeholder(tf.float32, [None, 784], name='InputData') |
40 |
| - # 0-9 digits recognition => 10 classes |
41 |
| - y = tf.placeholder(tf.float32, [None, 10], name='LabelData') |
| 33 | + # scope for tensorboard |
| 34 | + with tf.name_scope('Model'): |
| 35 | + pred = tf.add(tf.matmul(x, W), b, name="model") # linear combination |
| 36 | + return pred |
42 | 37 |
|
43 |
| - # Set model weights |
| 38 | +def softmax_model(x): |
| 39 | + # set model weights |
44 | 40 | W = tf.Variable(tf.zeros([784, 10]), name='weights')
|
45 | 41 | b = tf.Variable(tf.zeros([10]), name='bias')
|
46 | 42 |
|
47 |
| - # Construct model and encapsulating all ops into scopes, making |
48 |
| - # Tensorboard's Graph visualization more convenient |
| 43 | + # scope for tensorboard |
49 | 44 | with tf.name_scope('Model'):
|
50 |
| - # Model |
51 |
| - #pred = tf.nn.softmax(tf.matmul(x, W) + b, name="model") # Softmax |
52 |
| - pred = tf.add(tf.matmul(x, W), b, name="linear") # linear combination |
| 45 | + pred = tf.nn.softmax(tf.matmul(x, W) + b, name="model") # Softmax |
| 46 | + return pred |
53 | 47 |
|
| 48 | +################################################################### |
| 49 | +# Cost / Loss Functions # |
| 50 | +################################################################### |
| 51 | +def cross_entropy_loss(fn, y): |
54 | 52 | with tf.name_scope('Loss'):
|
55 | 53 | # Minimize error using cross entropy
|
56 |
| - # cost = tf.reduce_mean(-tf.reduce_sum(y * tf.log(pred), reduction_indices=1)) |
57 |
| - |
| 54 | + cost = tf.reduce_mean(-tf.reduce_sum(y * tf.log(fn), reduction_indices=1)) |
| 55 | + return cost |
| 56 | + |
| 57 | +def builtin_cross_entropy_loss(fn, y): |
| 58 | + with tf.name_scope('Loss'): |
58 | 59 | # Minimize error with *better* cross entropy
|
59 |
| - # cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=pred)) |
60 |
| - |
| 60 | + cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=fn)) |
| 61 | + return cost |
| 62 | + |
| 63 | +def squared_error_loss(fn, y): |
| 64 | + with tf.name_scope('Loss'): |
| 65 | + # Minimize error with *better* cross entropy |
| 66 | + cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=fn)) |
| 67 | + return cost |
| 68 | + |
| 69 | +def builtin_l2_loss(fn, y): |
| 70 | + with tf.name_scope('Loss'): |
61 | 71 | # Minimize error using squared error
|
62 |
| - # cost = tf.nn.l2_loss(y - pred) |
63 |
| - |
64 |
| - # Minimize with computed square error |
65 |
| - cost = tf.reduce_mean(tf.pow(y - pred, 2)) |
66 |
| - with tf.name_scope('SGD'): |
67 |
| - # Gradient Descent |
68 |
| - optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) |
| 72 | + cost = tf.nn.l2_loss(y - fn) |
| 73 | + return cost |
| 74 | + |
| 75 | +################################################################### |
| 76 | +# Accuracy # |
| 77 | +################################################################### |
| 78 | +def get_accuracy(fn, y): |
69 | 79 | with tf.name_scope('Accuracy'):
|
70 |
| - # Accuracy |
71 |
| - acc = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)) |
| 80 | + acc = tf.equal(tf.argmax(fn, 1), tf.argmax(y, 1)) |
72 | 81 | acc = tf.reduce_mean(tf.cast(acc, tf.float32))
|
| 82 | + return acc |
| 83 | + |
| 84 | +################################################################### |
| 85 | +# Optimizer # |
| 86 | +################################################################### |
| 87 | +def sgd_optimizer(cost, lr): |
| 88 | + with tf.name_scope('SGD'): |
| 89 | + # Gradient Descent |
| 90 | + optimizer = tf.train.GradientDescentOptimizer(lr).minimize(cost) |
| 91 | + return optimizer |
| 92 | + |
| 93 | +################################################################### |
| 94 | +# Save Model # |
| 95 | +################################################################### |
| 96 | +def save_model(sess): |
| 97 | + # saving model |
| 98 | + checkpoint = os.path.join(export_path, "model.ckpt") |
| 99 | + saver = tf.train.Saver() |
| 100 | + # checkpoint - variables |
| 101 | + saver.save(sess, checkpoint) |
| 102 | + # graph |
| 103 | + tf.train.write_graph(sess.graph_def, export_path, "model.pb", as_text=False) |
| 104 | + # freeze |
| 105 | + g = os.path.join(export_path, "model.pb") |
| 106 | + frozen = os.path.join(export_path, "digits.pb") |
| 107 | + |
| 108 | + freeze.freeze_graph( |
| 109 | + input_graph = g, |
| 110 | + input_saver = "", |
| 111 | + input_binary = True, |
| 112 | + input_checkpoint = checkpoint, |
| 113 | + output_node_names = "Model/model", |
| 114 | + restore_op_name = "", |
| 115 | + filename_tensor_name = "", |
| 116 | + output_graph = frozen, |
| 117 | + clear_devices = True, |
| 118 | + initializer_nodes = "" |
| 119 | + ) |
| 120 | + print("Model saved!") |
| 121 | + |
| 122 | +def main(_): |
| 123 | + # resetting graph |
| 124 | + tf.reset_default_graph() |
| 125 | + |
| 126 | + # import MINST data |
| 127 | + mnist = input_data.read_data_sets(data_path, one_hot=True) |
| 128 | + |
| 129 | + # mnist data image of shape 28*28=784 |
| 130 | + x = tf.placeholder(tf.float32, [None, 784], name='input') |
| 131 | + |
| 132 | + # 0-9 digits recognition => 10 classes |
| 133 | + y = tf.placeholder(tf.float32, [None, 10], name='label') |
| 134 | + |
| 135 | + # model |
| 136 | + pred = softmax_model(x) |
| 137 | + |
| 138 | + # model accuracy |
| 139 | + acc = get_accuracy(pred, y) |
| 140 | + |
| 141 | + # cost / loss |
| 142 | + cost = cross_entropy_loss(pred, y) |
| 143 | + |
| 144 | + # trainer |
| 145 | + optimizer = sgd_optimizer(cost, learning_rate) |
| 146 | + |
73 | 147 |
|
74 | 148 | # Initializing the variables
|
75 | 149 | init = tf.global_variables_initializer()
|
@@ -112,32 +186,10 @@ def main(_):
|
112 | 186 | # Test model
|
113 | 187 | # Calculate accuracy
|
114 | 188 | print("Accuracy:", acc.eval({x: mnist.test.images, y: mnist.test.labels}))
|
| 189 | + |
| 190 | + save_model(sess) |
115 | 191 |
|
116 |
| - # saving model |
117 |
| - checkpoint = os.path.join(export_path, "model.ckpt") |
118 |
| - saver = tf.train.Saver() |
119 |
| - # checkpoint - variables |
120 |
| - saver.save(sess, checkpoint) |
121 |
| - # graph |
122 |
| - tf.train.write_graph(sess.graph_def, export_path, "model.pb", as_text=False) |
123 |
| - # freeze |
124 |
| - # python "Python\Lib\site-packages\tensorflow\python\tools\freeze_graph.py" --input_graph=.\Profile.pb --input_checkpoint=.\Profile.ckpt --output_node_names=Output/Predictions,Output/Loss --output_graph=frozen.pb |
125 |
| - g = os.path.join(export_path, "model.pb") |
126 |
| - frozen = os.path.join(export_path, "frozen.pb") |
127 |
| - |
128 |
| - freeze.freeze_graph( |
129 |
| - input_graph = g, |
130 |
| - input_saver = "", |
131 |
| - input_binary = True, |
132 |
| - input_checkpoint = checkpoint, |
133 |
| - output_node_names = "Model/linear", |
134 |
| - restore_op_name = "", |
135 |
| - filename_tensor_name = "", |
136 |
| - output_graph = frozen, |
137 |
| - clear_devices = True, |
138 |
| - initializer_nodes = "" |
139 |
| - ) |
140 |
| - print("Model saved!") |
| 192 | + |
141 | 193 | exit(0)
|
142 | 194 |
|
143 | 195 |
|
|
0 commit comments