forked from RubensZimbres/Repo-2016
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathPython - Keras Multi Layer Perceptron
127 lines (108 loc) · 3.29 KB
/
Python - Keras Multi Layer Perceptron
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.optimizers import SGD
from keras.callbacks import LearningRateScheduler,EarlyStopping, ModelCheckpoint
from keras import backend as K
epochs = 50
learning_rate = 0.01
decay_rate = 2e-6
momentum = 0.9
reg=0.001
look_back = 23
sd=[]
class LossHistory(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.losses = [1,1]
def on_epoch_end(self, batch, logs={}):
self.losses.append(logs.get('loss'))
sd.append(step_decay(len(self.losses)))
print('learning rate:', step_decay(len(self.losses)))
print('derivative of loss:', 2*np.sqrt((self.losses[-1])))
def my_init(shape, name=None):
value = np.random.random(shape)
return K.variable(value, name=name)
def step_decay(losses):
if float(2*np.sqrt(np.array(history.losses[-1])))<1.1:
lrate=0.01*1/(1+0.1*len(history.losses))
momentum=0.2
decay_rate=0.0
return lrate
else:
lrate=0.01
return lrate
model = Sequential()
model.add(Dense(6, input_dim=3, init=my_init))
model.add(Activation('tanh'))
model.add(Dropout(0.5))
model.add(Dense(2, init='uniform'))
model.add(Activation('softmax'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.97, nesterov=True)
model.compile(loss='categorical_crossentropy',
optimizer=sgd,metrics=['accuracy'])
aa=pd.read_csv('questao1NN.csv',sep=',',header=0)
aa.head()
aa[[1]]
df=aa[0:1800]
df2=aa[1801:1999]
y=np.array(df[[1]])
y_train=[item for sublist in y for item in sublist]
y_train=pd.get_dummies(y_train)
y_train=np.array(y_train)
y_train=pd.DataFrame(y_train).values
x=np.array(df)
x1=x.T
x1.shape
for i in range(1,14):
x1[i]=np.array([float(i) for i in x1[i]])
[x1[2],x1[3],x1[4],x1[5],x1[6],x1[7],x1[8],x1[9],x1[10],x1[11],x1[12],x1[13],x1[14]]
x2=[x1[6],x1[7],x1[9]]
x3=np.array(x2).T
X_train=x3
X_train.shape
y2=np.array(df2[[1]])
y_train2=[item for sublist in y2 for item in sublist]
y_train20=pd.get_dummies(y_train2)
y_train2=np.array(y_train20)
y_test=pd.DataFrame(y_train2).values
y_test.shape
x0=np.array(df2)
x10=x0.T
x20=[x10[1],x10[2]]
x30=np.array(x20).T
X_test=x30
filepath="w-questao-{epoch:02d}-{loss:.4f}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=1, save_best_only=True)
# Early stopping
early_stopping = EarlyStopping(monitor='loss', verbose=1, patience=5)
history=LossHistory()
lrate=LearningRateScheduler(step_decay)
model.fit(X_train, y_train,nb_epoch=epochs,batch_size=100,callbacks=[history,lrate])
filename = "weights-improvement-09-0.0010.hdf5"
model.load_weights(filename)
model.compile(loss='categorical_crossentropy', optimizer='adam')
model.evaluate(X_train, y_train, batch_size=16)
model.predict(X_train, batch_size=32, verbose=0)
pred2=model.predict_classes(X_train, batch_size=32, verbose=0)
# THRESHOLD
y2=np.array([int(i) for i in y])
y2.shape
y3=y2.reshape(60,30)
plt.figure(figsize=(9,6))
plt.imshow(y3)
plt.title("REAL")
plt.show()
y22=np.array([int(i) for i in pred2])
y22.shape
y32=y22.reshape(60,30)
plt.figure(figsize=(9,6))
plt.imshow(y32)
plt.title("PREDICTION")
plt.show()
b=[float(i) for i in history.losses]
plt.plot(b,color='r',linewidth=3)
plt.title("LOSS HISTORY")
plt.show()