diff --git a/Optimization/Loss.py b/Optimization/Loss.py new file mode 100644 index 0000000..e771d17 --- /dev/null +++ b/Optimization/Loss.py @@ -0,0 +1,39 @@ +import numpy as np + +class CrossEntropyLoss: + def __init__(self): + self.input = None + self.epsilon = np.finfo(float).eps + + def forward(self, input_tensor: np.ndarray, label_tensor: np.ndarray) -> float: + ''' + forward pass of the cross entropy loss + Literature: H(P, Q) = -sum(P * log(Q)) : P is label_tensor, Q is input_tensor + Modification: + added epsilon to avoid log(0) --> log(0) = -inf + Inputs: + input_tensor: np.ndarray + label_tensor: np.ndarray + Expected Output: + loss: float + ''' + self.input = input_tensor + input_tensor = input_tensor[label_tensor==1] + loss = -np.sum(np.log(input_tensor + self.epsilon)) + return loss + + + def backward(self, label_tensor) -> np.ndarray: + ''' + backward pass of the cross entropy loss + Literature: dH/dQ = -P/Q : P is label_tensor, Q is input_tensor + Modification: + added epsilon to avoid division by zero + Inputs: + label_tensor: np.ndarray + Expected Output: + gradient: np.ndarray + ''' + gradient = -label_tensor/(self.input + self.epsilon) + return gradient + \ No newline at end of file diff --git a/Optimization/Optimizers.py b/Optimization/Optimizers.py new file mode 100644 index 0000000..9901ed4 --- /dev/null +++ b/Optimization/Optimizers.py @@ -0,0 +1,19 @@ +import numpy as np + +class Sgd: + ''' + Stochastic Gradient Descent optimizer class + + Equation: + w_new = w_old - learning_rate * gradient of loss function w.r.t weight tensor + + Position in the NN pipeline: + After the backward pass to update the weights of the model + ''' + def __init__(self, learning_rate: float) -> None: + self.learning_rate = learning_rate + + def calculate_update(self, weight_tensor: np.ndarray, gradient_tensor: np.ndarray) -> np.ndarray: + return weight_tensor - self.learning_rate * gradient_tensor + + diff --git a/Optimization/__init__.py b/Optimization/__init__.py new file mode 100644 index 0000000..adf7b78 --- /dev/null +++ b/Optimization/__init__.py @@ -0,0 +1 @@ +__all__ = ["Optimizers", "Loss"]