forked from xiangyongcao/CNN_HSIC_MRF
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathcnn_train.py
156 lines (127 loc) · 5.52 KB
/
cnn_train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 18 16:21:13 2017
@author: Xiangyong Cao
This code is modified based on https://github.com/KGPML/Hyperspectral
"""
from __future__ import print_function
import tensorflow as tf
import HSI_Data_Preparation
from HSI_Data_Preparation import num_train,Band, All_data, TrainIndex, TestIndex, Height, Width
from utils import patch_size, Post_Processing
import numpy as np
import os
import scipy.io
from cnn_model import conv_net
import time
def compute_prob_map():
# Obtain the probabilistic map
num_all = len(All_data['patch'])
times = 20
num_each_time = int(num_all / times)
res_num = num_all - times * num_each_time
Num_Each_File = num_each_time * np.ones((1,times),dtype=int)
Num_Each_File = Num_Each_File[0]
Num_Each_File[times-1] = Num_Each_File[times-1] + res_num
start = 0
prob_map = np.zeros((1,n_classes))
for i in range(times):
feed_x = np.reshape(np.asarray(All_data['patch'][start:start+Num_Each_File[i]]),(-1,n_input))
temp = sess.run(softmax_output, feed_dict={x: feed_x})
prob_map = np.concatenate((prob_map,temp),axis=0)
start += Num_Each_File[i]
prob_map = np.delete(prob_map,(0),axis=0)
return prob_map
start_time = time.time()
# Import HSI data
Training_data, Test_data = HSI_Data_Preparation.Prepare_data()
#Training_data, Test_data, TrainIndex, TestIndex = HSI_Data_Preparation.load_index_data()
n_input = Band * patch_size * patch_size
Training_data['train_patch'] = np.transpose(Training_data['train_patch'],(0,2,3,1))
Test_data['test_patch'] = np.transpose(Test_data['test_patch'],(0,2,3,1))
Training_data['train_patch'] = np.reshape(Training_data['train_patch'],(-1,n_input))
Test_data['test_patch'] = np.reshape(Test_data['test_patch'],(-1,n_input))
# Parameters
learning_rate = 0.001
training_iters = 10000
batch_size = 100
display_step = 500
n_classes = 16
# tf Graph input
x = tf.placeholder("float", [None, n_input])
y = tf.placeholder("float", [None, n_classes])
# Construct model
pred = conv_net(x)
softmax_output= tf.nn.softmax(pred)
# Define loss and optimizer
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y)
cost = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Define accuracy
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
predict_test_label = tf.argmax(pred, 1)
# Initializing the variables
init = tf.global_variables_initializer()
x_test, y_test = Test_data['test_patch'], Test_data['test_labels']
y_test_scalar = np.argmax(y_test,1) + 1
x_train, y_train = Training_data['train_patch'],Training_data['train_labels']
# Launch the graph
with tf.Session() as sess:
sess.run(init)
# Training cycle
for iteration in range(training_iters):
idx = np.random.choice(num_train, size=batch_size, replace=False)
# Use the random index to select random images and labels.
batch_x = Training_data['train_patch'][idx, :]
batch_y = Training_data['train_labels'][idx, :]
# Run optimization op (backprop) and cost op (to get loss value)
_, batch_cost, train_acc = sess.run([optimizer, cost, accuracy],
feed_dict={x: batch_x,y: batch_y})
# Display logs per epoch step
if iteration % 100 == 0:
print("Iteraion", '%04d,' % (iteration), \
"Batch cost=%.4f," % (batch_cost),\
"Training Accuracy=%.4f" % (train_acc))
if iteration % 1000 ==0:
print('Training Data Eval: Training Accuracy = %.4f' % sess.run(accuracy,\
feed_dict={x: x_train,y: y_train}))
print('Test Data Eval: Test Accuracy = %.4f' % sess.run(accuracy,\
feed_dict={x: x_test,y: y_test}))
print("Optimization Finished!")
# Test model
print("The Final Test Accuracy is :", sess.run(accuracy,feed_dict={x: x_test,y: y_test}))
# Obtain the probabilistic map
All_data['patch'] = np.transpose(All_data['patch'],(0,2,3,1))
num_all = len(All_data['patch'])
times = 20
num_each_time = int(num_all / times)
res_num = num_all - times * num_each_time
Num_Each_File = num_each_time * np.ones((1,times),dtype=int)
Num_Each_File = Num_Each_File[0]
Num_Each_File[times-1] = Num_Each_File[times-1] + res_num
start = 0
prob_map = np.zeros((1,n_classes))
for i in range(times):
feed_x = np.reshape(np.asarray(All_data['patch'][start:start+Num_Each_File[i]]),(-1,n_input))
temp = sess.run(softmax_output, feed_dict={x: feed_x})
prob_map = np.concatenate((prob_map,temp),axis=0)
start += Num_Each_File[i]
prob_map = np.delete(prob_map,(0),axis=0)
# MRF
prob_map = compute_prob_map()
Seg_Label, seg_Label, seg_accuracy = Post_Processing(prob_map,Height,Width,n_classes,y_test_scalar,TestIndex)
print('The shape of prob_map is (%d,%d)' %(prob_map.shape[0],prob_map.shape[1]))
DATA_PATH = os.getcwd()
file_name = 'prob_map.mat'
prob = {}
prob['prob_map'] = prob_map
scipy.io.savemat(os.path.join(DATA_PATH, file_name),prob)
train_ind = {}
train_ind['TrainIndex'] = TrainIndex
scipy.io.savemat(os.path.join(DATA_PATH, 'TrainIndex.mat'),train_ind)
test_ind = {}
test_ind['TestIndex'] = TestIndex
scipy.io.savemat(os.path.join(DATA_PATH, 'TestIndex.mat'),test_ind)
end_time = time.time()
print('The elapsed time is %.2f' % (end_time-start_time))