@@ -109,27 +109,27 @@ We try to find the line that more closely approximates the relationship.
109109**** code: pizzas per reservations
110110
111111#+begin_src python
112- x, y = np.loadtxt("pizza.txt", skiprows=1, unpack=true)
112+ X, Y = np.loadtxt("pizza.txt", skiprows=1, unpack=true)
113113
114114 # x = input var, restaurant reservations
115115 # w = weight
116- def predict(x , w): # our model
117- return x * w
116+ def predict(X , w): # our model
117+ return X * w
118118
119119 # y = ground truth, pizzas bought
120- def loss(x, y , w): # = Mean Squared Error
121- return np.average((predict(x ,w) - y ) ** 2)
120+ def loss(X, Y , w): # = Mean Squared Error
121+ return np.average((predict(X ,w) - Y ) ** 2)
122122
123123 # Returns a new w(eight)
124124 # lr = learning rate, step
125- def train(x, y , iterations, lr):
125+ def train(X, Y , iterations, lr):
126126 w = 0 # arbitrary init value
127127 for i in range(iterations):
128- current_loss = loss(x, y , w)
128+ current_loss = loss(X, Y , w)
129129 print("iteration %4d => loss: %.6f" % (i, current_loss))
130- if loss(x, y , w + lr) < current_loss:
130+ if loss(X, Y , w + lr) < current_loss:
131131 w += lr
132- elif loss(x, y , w - lr) < current_loss:
132+ elif loss(X, Y , w - lr) < current_loss:
133133 w -= lr
134134 else:
135135 return w
@@ -139,23 +139,23 @@ We try to find the line that more closely approximates the relationship.
139139**** code: adding a bias
140140
141141#+begin_src python
142- def predict(x , w, b): # our model
142+ def predict(X , w, b): # our model
143143 return x * w + b
144144
145- def loss(x, y , w, b): # Mean Squared Error
146- return np.average((predict(x ,w,b) - y ) ** 2)
145+ def loss(X, Y , w, b): # Mean Squared Error
146+ return np.average((predict(X ,w,b) - Y ) ** 2)
147147
148- def train(x, y , iterations, lr):
148+ def train(X, Y , iterations, lr):
149149 w = b = 0
150150 for i in range(iterations):
151- current_loss = loss(x, y , w, b)
152- if loss(x,y ,w+lr,b) < current_loss:
151+ current_loss = loss(X, Y , w, b)
152+ if loss(X,Y ,w+lr,b) < current_loss:
153153 w += lr
154- elif loss(x,y ,w-lr,b) < current_loss:
154+ elif loss(X,Y ,w-lr,b) < current_loss:
155155 w -= lr
156- elif loss(x,y ,w,b+lr) < current_loss:
156+ elif loss(X,Y ,w,b+lr) < current_loss:
157157 b += lr
158- elif loss(x,y ,w,b-lr) < current_loss:
158+ elif loss(X,Y ,w,b-lr) < current_loss:
159159 b -= lr
160160 else:
161161 return w, b
@@ -175,7 +175,10 @@ We try to find the line that more closely approximates the relationship.
175175 plt.yticks(fontsize=15)
176176 plt.xlabel("reservations", fontsize=30)
177177 plt.ylabel("pizza", fontsize=30)
178- x, y = np.loadtxt("pizza.txt", skiprows=1, unpack=true)
178+ X, Y = np.loadtxt("pizza.txt", skiprows=1, unpack=true)
179+ w = train(X,Y,iterations=1000,lr=0.01)
180+ print("w=%.3f" % w)
181+ print("Prediction: x=%d => y=%.2f" % (20, predict(20,w)))
179182 plt.plot(x,y,"bo")
180183 plt.show()
181184#+end_src
@@ -186,7 +189,7 @@ We try to find the line that more closely approximates the relationship.
186189
187190- Problems with our current =train()=
188191 1) doesn't scale well (cpu/time) when adding new _hyperparameters_ (INPUTS)
189- 2) is NOT precise, since _hyperparameters_ are defined in *lr* terms
192+ 2) is NOT precise, since _hyperparameters_ are defined in *lr* (learning rate) terms
190193
191194- observation:
192195 - a plot of loss(), when b=0, looks like a U curve
0 commit comments