From e833643c245fc8b4442ee5ad2bed131438e172bd Mon Sep 17 00:00:00 2001 From: Antoine Debouchage Date: Sat, 6 Nov 2021 16:10:46 +0100 Subject: [PATCH] Introducing Syngular files to git --- .gitignore | 2 + dataset/dataset.py | 105 ++++++++++++++++++++ experimental/layers.py | 180 +++++++++++++++++++++++++++++++++++ layers/TensorCNN.py | 0 layers/TensorDense.py | 88 +++++++++++++++++ layers/TensorGRU.py | 0 layers/TensorLSTM.py | 0 layers/TensorLayer.py | 98 +++++++++++++++++++ layers/TensorRNN.py | 0 layers/TensorRing.py | 0 models/TensorNet.py | 24 +++++ optimizers/TensorGradient.py | 76 +++++++++++++++ process/option.py | 90 ++++++++++++++++++ process/stochastic.py | 93 ++++++++++++++++++ readme.md | 0 tensor/tensor.py | 55 +++++++++++ utils/benchmark.py | 110 +++++++++++++++++++++ 17 files changed, 921 insertions(+) create mode 100644 .gitignore create mode 100644 dataset/dataset.py create mode 100644 experimental/layers.py create mode 100644 layers/TensorCNN.py create mode 100644 layers/TensorDense.py create mode 100644 layers/TensorGRU.py create mode 100644 layers/TensorLSTM.py create mode 100644 layers/TensorLayer.py create mode 100644 layers/TensorRNN.py create mode 100644 layers/TensorRing.py create mode 100644 models/TensorNet.py create mode 100644 optimizers/TensorGradient.py create mode 100644 process/option.py create mode 100644 process/stochastic.py create mode 100644 readme.md create mode 100644 tensor/tensor.py create mode 100644 utils/benchmark.py diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..cb63130 --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +__pycache__/ +.git/ \ No newline at end of file diff --git a/dataset/dataset.py b/dataset/dataset.py new file mode 100644 index 0000000..aebb8e8 --- /dev/null +++ b/dataset/dataset.py @@ -0,0 +1,105 @@ +from __future__ import annotations + +import numpy as np +import nptyping as ntp +import pandas as pd + +import math +import time + + +from typing import NamedTuple, Union, Callable, Type +from progressbar.progressbar import ProgressBar + +from sklearn.model_selection import ParameterGrid +from scipy.stats import gamma, beta, uniform + +from syngular.utils.benchmark import Timer + +class Parameter(NamedTuple): + name: str + call: Union[Callable[..., float], list[float], ntp.NDArray[float]] + +print(ntp) + +class Dataset: + + def __init__(self, increment, params: list[Parameter]) -> None: + self.increment = increment + self.percentiles = pd.Series(np.linspace(0, 0.99, self.increment)) + + self.params = params + self.grid = {} + self.pregrid = {} + + self.bar = ProgressBar() + self.dataframe = pd.DataFrame() + + @Timer.wrapper + def generate(self): + for p in self.params: + print(p.name) + if isinstance(p.call, list[float].__origin__) or isinstance(p.call, ntp.NDArray[float].__origin__): + # if len(p.call) == self.increment: + self.pregrid[p.name] = p.call + # else: + # raise IndexError("Parameter list of values must be the same size of the increment") + elif isinstance(p.call, Callable[..., float].__origin__): + self.pregrid[p.name] = self.percentiles.apply(p.call) + else: + raise TypeError("Parameter call must be a float function or a list of floats") + + self.grid = ParameterGrid(self.pregrid) + + for params in self.bar(self.grid): + self.dataframe = self.dataframe.append(pd.Series(params), ignore_index=True) + + # print(self.dataframe.head()) + + return self + + def add_column(self, name, col): + self.dataframe[name] = col + + def __add__(self, dataset: Dataset): + return Dataset(self.increment + dataset.increment, self.params + dataset.params) + + @staticmethod + def empty(): + return Dataset(0, []) + + @staticmethod + def concatenate(*datasets: list[Type[Dataset]]): + dt_list = list(datasets) + dt_concat = Dataset.empty() + + while len(dt_list) > 0: + dt = dt_list.pop() + dt_concat += dt + return dt_concat + + def __str__(self): + return self.dataframe.__repr__() + + + +# dt = Dataset(2, [ +# Parameter(name = "S", call = lambda x : gamma.ppf(x, a=100, scale=1)), +# Parameter(name = "K", call = lambda x : uniform.ppf(x, 50, 200)), +# Parameter(name = "R", call = lambda x : uniform.ppf(x, 0.01, 0.18)), +# Parameter(name = "D", call = lambda x : uniform.ppf(x, 0.01, 0.18)), +# Parameter(name = "sigma", call = lambda x : (beta.ppf(x, a=2, b=5) + 0.001)) +# ]) + +# dt2 = Dataset(2, [ +# Parameter(name = "S", call = lambda x : gamma.ppf(x, a=100, scale=1)), +# Parameter(name = "K", call = lambda x : uniform.ppf(x, 50, 200)), +# Parameter(name = "R", call = lambda x : uniform.ppf(x, 0.01, 0.18)), +# Parameter(name = "D", call = lambda x : uniform.ppf(x, 0.01, 0.18)), +# Parameter(name = "sigma", call = lambda x : (beta.ppf(x, a=2, b=5) + 0.001)) +# ]) + +# dt3 = Dataset.concatenate(dt,dt2) +# dt3.generate() + +# print(dt3) diff --git a/experimental/layers.py b/experimental/layers.py new file mode 100644 index 0000000..7d08d0d --- /dev/null +++ b/experimental/layers.py @@ -0,0 +1,180 @@ +from __future__ import print_function, annotations +from functools import reduce +from re import match + +import numpy as np +from numpy.core.einsumfunc import einsum + + +def unfold_shape(shape): + return reduce(lambda x, y: x+y, shape) + +class TensorTrainLayer(): + + def __init__(self) -> None: + pass + + def build(self): + pass + + def call(self): + pass + + def forward(self, input): + return input + + def backward(self, input, grad_output): + num_units = input.shape[1] + + d_layer_d_input = np.eye(num_units) + + return np.dot(grad_output, d_layer_d_input) + + def train(self): + pass + +class ReLU(TensorTrainLayer): + + def forward(self, input): + relu_forward = np.maximum(0,input) + return relu_forward + + def backward(self, input, grad_output): + relu_grad = input > 0 + return grad_output*relu_grad + +class Dense(TensorTrainLayer): + + def __init__(self, input_shape, output_shape, bond_dim=2, core_number=None, learning_rate=0.01) -> None: + + if len(input_shape) != len(output_shape): + raise Exception("input shape and output shape should have the same length") + + if core_number != None and core_number != len(input_shape): + raise Exception("number of cores does not match the size of input_shape") + + self.input_shape = input_shape + self.unfold_input_shape = unfold_shape(self.input_shape) + self.output_shape = output_shape + self.unfold_output_shape = unfold_shape(self.output_shape) + + self.cores_number = core_number if core_number != None else len(input_shape) + self.bond_dim = bond_dim + + self.learning_rate = learning_rate + + self.cores = [] + self.bias = [] + + def __get_core_shape(self, index): + if index == 0 or index == self.cores_number-1: + return (self.input_shape[index], self.output_shape[index], self.bond_dim,) + else: + return (self.input_shape[index], self.output_shape[index], self.bond_dim, self.bond_dim,) + + def __add_core(self, name, type): + index = len(self.cores) + + shape = self.__get_core_shape(index) + size = unfold_shape(shape) + + print(shape) + + if type == 'middle' and 0 < index < self.cores_number-1: + return np.random.normal( + loc=0.0, + scale = np.sqrt(2/size), + size = shape + ) + elif type == 'extreme' and (index == 0 or index == self.cores_number-1): + return np.random.normal( + loc=0.0, + scale = np.sqrt(2/size), + size = shape + ) + else: + raise Exception('the type of core to add does not match the current cores structure') + + + def build(self): + self.cores.append(self.__add_core(name='core_1', type='extreme')) + + for i in range(1, self.cores_number-1): + self.cores.append(self.__add_core(name = "core_"+str(i), type='middle')) + + self.cores.append(self.__add_core(name='core_'+str(self.cores_number), type='extreme')) + + self.bias = np.zeros(shape=self.output_shape) + + + def call(self): + pass + + def forward(self, input): + input = np.array(input) + unfold_input = unfold_shape(input.shape) + + if self.unfold_input_shape != unfold_input: + exception = f"input of shape {input.shape} cannot be reshaped into {self.input_shape} [{unfold_input} != {self.unfold_input_shape}]" + raise Exception(exception) + + input_tensor = np.reshape(input, newshape=self.input_shape) + + print(input_tensor) + + einsum_structure = [] + input_index = np.arange(self.cores_number) + + einsum_structure.append(input_tensor) + einsum_structure.append(input_index) + + for idx in range(self.cores_number): + ipt_index = idx + opt_index = self.cores_number+idx + einsum_structure.append(self.cores[idx]) + if idx == 0: + bnd_index = 2*self.cores_number + einsum_structure.append([ipt_index, opt_index, bnd_index]) + elif idx == self.cores_number-1: + bnd_index = 3*self.cores_number-2 + einsum_structure.append([ipt_index, opt_index, bnd_index]) + else: + bnd_index_1 = 2*self.cores_number+idx-1 + bnd_index_2 = 2*self.cores_number+idx + einsum_structure.append([ipt_index, opt_index, bnd_index_1, bnd_index_2]) + + output_index = np.arange(self.cores_number)+self.cores_number + + einsum_structure.append(output_index) + + print("Structure") + print(einsum_structure) + print(len(einsum_structure)) + + contraction = np.einsum(*einsum_structure) + + print("Contraction") + print(contraction) + + result = contraction+self.bias + print(result) + + + def backward(self): + pass + + def train(self): + pass + + +if __name__ == "__main__": + + layer = Dense((2,2), (3,3), bond_dim=2) + layer.build() + + print("Cores") + print(layer.cores) + print("Bias") + print(layer.bias) + + layer.forward([[1,4],[2,5]]) \ No newline at end of file diff --git a/layers/TensorCNN.py b/layers/TensorCNN.py new file mode 100644 index 0000000..e69de29 diff --git a/layers/TensorDense.py b/layers/TensorDense.py new file mode 100644 index 0000000..64e637f --- /dev/null +++ b/layers/TensorDense.py @@ -0,0 +1,88 @@ +from functools import reduce + +import tensorflow as tf +from tensorflow.keras.layers import Layer + +import tensornetwork as tn +import numpy as np + +class TensorDense(Layer): + + def __init__(self, units, cores_number, bond_dim=2, shape=None) -> None: + super(TensorDense, self).__init__() + + self.units = units + self.cores_number = cores_number + self.bond_dim = bond_dim + + if shape == None: + roots = int(np.power(self.units, 1/self.cores_number)) + self.shape = [roots] * self.cores_number + else: + self.shape = shape + + self.cores = [] + + def build(self, input_shape): + + self.bias = tf.Variable(tf.zeros(shape=self.shape), name="bias", trainable=True) + + # self.shape_input = [] + + self.cores.append(self.add_weight( + shape = (input_shape[1], self.shape[0], self.bond_dim,), + name = "core_1", + initializer = 'random_normal', + trainable = True + )) + # self.shape_input.append(input_shape[1]) + + for i in range(1, self.cores_number-1): + self.cores.append(self.add_weight( + shape = (input_shape[1], self.shape[i], self.bond_dim, self.bond_dim,), + name = "core_"+str(i), + initializer = 'random_normal', + trainable = True + )) + # self.shape_input.append(input_shape[1]) + + self.cores.append(self.add_weight( + shape = (input_shape[1], self.shape[-1], self.bond_dim,), + name = "core_"+str(self.cores_number), + initializer = 'random_normal', + trainable = True + )) + # self.shape_input.append(input_shape[1]) + # self.shape_input = tuple(self.shape_input) + + def call(self, inputs): + + def process(input, cores, bias): + # unfold = tf.reshape(input,[-1]) + # reduction = reduce(lambda x, y: x*y, self.shape_input) + # padding = tf.convert_to_tensor(np.zeros((reduction-unfold.shape[0]), dtype="float32")) + # input = tf.reshape(tf.concat(values=[input, padding], axis=0), self.shape_input) + + input = [input, input] + input = tf.reshape(input, (2,2)) + + mx = self.cores_number + + cores = [tn.Node(core, backend="tensorflow").tensor for core in cores] + x = tn.Node(input, backend="tensorflow") + + links = [[i, -i, "bond"+str(i-1), "bond"+str(i)] for i in range(2, mx)] + + # print([list(range(1,mx+1)), [1, -1, "bond"+str(1)], *links, [mx, -mx, "bond"+str(mx-1)]]) + + result = tn.ncon( + tensors = [x.tensor] + cores, + network_structure = [list(range(1,mx+1)), [1, -1, "bond"+str(1)], *links, [mx, -mx, "bond"+str(mx-1)]], + backend="tensorflow" + ) + + return result + bias + + result = tf.vectorized_map(lambda vec: process(vec, self.cores, self.bias), inputs) + + return tf.nn.relu(tf.reshape(result, (-1, self.units))) \ No newline at end of file diff --git a/layers/TensorGRU.py b/layers/TensorGRU.py new file mode 100644 index 0000000..e69de29 diff --git a/layers/TensorLSTM.py b/layers/TensorLSTM.py new file mode 100644 index 0000000..e69de29 diff --git a/layers/TensorLayer.py b/layers/TensorLayer.py new file mode 100644 index 0000000..9ec63d4 --- /dev/null +++ b/layers/TensorLayer.py @@ -0,0 +1,98 @@ +from functools import reduce + +import tensorflow as tf +from tensorflow.keras.layers import Layer + +import tensornetwork as tn +import numpy as np + + +from ..utils.benchmark import Timer + +class Core: + + def __init__(self, shape, bond) -> None: + self.shape = shape + self.bond = bond + + @staticmethod + def create(shape, name=np.random.random_sample(), random=True): + if random: + normal_var = tf.random.normal( + shape = shape, + stddev = 1.0/shape[0] + ) + return tf.Variable(normal_var, trainable = True, name=name) + +class TensorLayer(Layer): + + @Timer.wrapper + def __init__(self, shape, bond_dim=2, core_number=None) -> None: + super().__init__() + + self.shape = shape + self.cores_number = core_number if core_number != None else len(shape) + self.bond_dim = bond_dim + + self.cores = [] + + + def build(self, input_shape): + + self.bias = tf.Variable(tf.zeros(shape=self.shape), name="bias", trainable=True) + + print("INPUT SHAPE ", input_shape) + + self.cores.append(self.add_weight( + shape = self.shape[0:2]+(self.bond_dim,), + name = "core_1", + initializer = 'random_normal', + trainable = True + )) + + for i in range(1, self.cores_number-1): + self.cores.append(self.add_weight( + shape = self.shape[i-1:i+1]+(self.bond_dim,self.bond_dim,), + name = "core_"+str(i), + initializer = 'random_normal', + trainable = True + )) + + self.cores.append(self.add_weight( + shape = self.shape[-2:]+(self.bond_dim,), + name = "core_"+str(self.cores_number), + initializer = 'random_normal', + trainable = True + )) + + def call(self, inputs): + + # print("TRAINABLE ", self.trainable_variables) + # print("TRAINABLE ", self.trainable_weights) + + # print("NON TRAINABLE", self.non_trainable_variables) + # print("NON TRAINABLE", self.non_trainable_weights) + + def process(input, cores, bias): + input = tf.reshape(input, self.shape) + mx = self.cores_number + + cores = [tn.Node(core, backend="tensorflow").tensor for core in cores] + x = tn.Node(input, backend="tensorflow") + + links = [[-i, i, "bond"+str(i-1), "bond"+str(i)] for i in range(2, mx)] + + # print([list(range(1,mx+1)), [-1, 1, "bond"+str(1)], *links, [-mx, mx, "bond"+str(mx-1)]]) + + result = tn.ncon( + tensors = [x.tensor] + cores, + network_structure = [list(range(1,mx+1)), [-1, 1, "bond"+str(1)], *links, [-mx, mx, "bond"+str(mx-1)]], + backend="tensorflow" + ) + + return result + bias + + result = tf.vectorized_map(lambda vec: process(vec, self.cores, self.bias), inputs) + reduction = reduce(lambda x, y: x*y, self.shape) + + return tf.nn.relu(tf.reshape(result, (-1, reduction))) diff --git a/layers/TensorRNN.py b/layers/TensorRNN.py new file mode 100644 index 0000000..e69de29 diff --git a/layers/TensorRing.py b/layers/TensorRing.py new file mode 100644 index 0000000..e69de29 diff --git a/models/TensorNet.py b/models/TensorNet.py new file mode 100644 index 0000000..063f033 --- /dev/null +++ b/models/TensorNet.py @@ -0,0 +1,24 @@ +import tensorflow as tf +from tensorflow.keras import optimizers +from tensorflow.keras.layers import Layer +from tensorflow.keras.models import Model + +import tensornetwork as tn +import numpy as np + + +class TensorNetwork(Model): + + def compile(self, optimizer, loss_fn): + super(TensorNetwork, self).compile() + + self.optimizer = optimizer + self.loss_fn = loss_fn + + def train_step(self, data): + + input_tensor, output_tensor = data + + with tf.GradientTape() as tape: + predictions = self(input_tensor, trainable=True) + d_loss = self.loss_fn(output_tensor, predictions) \ No newline at end of file diff --git a/optimizers/TensorGradient.py b/optimizers/TensorGradient.py new file mode 100644 index 0000000..bd1242b --- /dev/null +++ b/optimizers/TensorGradient.py @@ -0,0 +1,76 @@ +import tensorflow as tf +from tensorflow.keras.layers import Layer +from tensorflow.keras.optimizers import Optimizer + +import tensornetwork as tn +import numpy as np + + +class TensorGradientDescent(Optimizer): + + def __init__(self, learning_rate=0.01, name="Tensor Gradient Descent Optimizer", **kwargs): + super().__init__(name, **kwargs) + self._set_hyper("learning_rate", kwargs.get("lr", learning_rate)) + self._is_first = True + + def _create_slots(self, var_list): + # Adding previous variables + for var in var_list: + self.add_slot(var, "pv") + # Adding previous gradients + for var in var_list: + self.add_slot(var, "pg") + + @tf.function + def _resource_apply_dense(self, grad, var): + var_dtype = var.dtype.base_dtype + lr_t = self._decayed_lr(var_dtype) + + new_var_m = var - grad * lr_t + + # Extract the previous values of Variables and Gradients + pv_var = self.get_slot(var, "pv") + pg_var = self.get_slot(var, "pg") + + # If it first time, use just the traditional method + if self._is_first: + self._is_first = False + new_var = new_var_m + else: + # create a boolean tensor contain true and false + # True will be where the gradient haven't changed the sign and False will be the case where the gradients have changed sign + cond = grad*pg_var >= 0 + + # Compute the average of previous weight and current. Though we will be using only few of these. + #Of course, it is prone to overflow. We can also compute the avg using a + (b -a)/2.0 + avg_weights = (pv_var + var)/2.0 + + # tf.where picks the value from new_var_m where the cond is True otherwise it takes from avg_weights + # We must avoid the for loops + new_var = tf.where(cond, new_var_m, avg_weights) + + # Finally we are saving current values in the slots. + pv_var.assign(var) + pg_var.assign(grad) + + def _resource_apply_sparse(self, grad, var): + raise NotImplementedError + + def get_config(self): + base_config = super().get_config() + return { + **base_config, + "learning_rate": self._serialize_hyperparameter("learning_rate"), + } + + def _resource_apply_sparse(self, grad, var): + raise NotImplementedError + + def get_config(self): + base_config = super().get_config() + return { + **base_config, + "learning_rate": self._serialize_hyperparameter("learning_rate"), + "decay": self._serialize_hyperparameter("decay"), + "momentum": self._serialize_hyperparameter("momentum"), + } \ No newline at end of file diff --git a/process/option.py b/process/option.py new file mode 100644 index 0000000..a78a66d --- /dev/null +++ b/process/option.py @@ -0,0 +1,90 @@ +import numpy as np + +from progressbar import ProgressBar +from scipy.stats import gamma +from scipy.stats import beta +from scipy.stats import uniform +from scipy.stats import norm + +import math +import time + +from typing import Any, Optional, Tuple, NamedTuple + +class Law(NamedTuple): + name: str + min: float + max: float + +normal = Law(name = 'normal', min = 0, max = 1) + +class Model: + def __init__(self, iterations) -> None: + self.iterations = iterations + + def monte_carlo(self, law: Law): + if law.name == 'normal': + return np.random.normal(law.min, law.max, [1, self.iterations]) + +class Pricing: + def __init__(self) -> None: + pass + +class BlackScholes(Pricing): + def __init__(self, F, K, t, r, sigma) -> None: + # self.S = S + self.K = K + self.t = t + self.r = r + self.sigma = sigma + + self.deflater = np.exp(-self.r*self.t) + self.F = F + + self.N = norm.cdf + + def d1(self) -> float: + return (np.log(self.F / self.K) + (np.power(self.sigma, 2) * self.t / 2.0 )) / (self.sigma * np.sqrt(self.t)) + + def d2(self) -> float: + return (np.log(self.F / self.K) - (np.power(self.sigma, 2) * self.t / 2.0 )) / (self.sigma * np.sqrt(self.t)) + + def put(self) -> float: + return self.deflater * (-self.F * self.N(-self.d1()) + self.K * self.N(-self.d2())) + + def call(self) -> float: + return self.deflater * (self.F * self.N(self.d1()) - self.K * self.N(self.d2())) + + +class BlackScholesMerton(BlackScholes): + def __init__(self, S, K, t, r, q, sigma) -> None: + super().__init__(S * np.exp((r-q)*t), K, t, r, sigma) + self.S = S + self.q = q + +class Option(Model): + + def __init__(self, pricing: Pricing) -> None: + super().__init__() + + self.pricing = pricing + + def call(self): + self.pricing.call() + + def put(self): + self.pricing.put() + + +if __name__ == "__main__": + + S = 100 + K = 95 + q = .05 + t = 0.5 + r = 0.1 + sigma = 0.2 + p_published_value = 2.4648 + p_calc = BlackScholesMerton(S, K, t, r, q, sigma).put() + print(p_calc) + print(abs(p_published_value - p_calc) < 0.0001) \ No newline at end of file diff --git a/process/stochastic.py b/process/stochastic.py new file mode 100644 index 0000000..cecd23c --- /dev/null +++ b/process/stochastic.py @@ -0,0 +1,93 @@ +import matplotlib.pyplot as plt + +import numpy as np +import math + +class StochasticProcess: + name: str + + def simulate(self): + pass + +class RandomWalk1D(StochasticProcess): + def __init__(self, iteration: float, origin: float = 0, step_set: list[float] = [-1, 0, 1]) -> None: + self.iteration = iteration + self.step_set = step_set + self.origin = [origin] + + self.simulate() + + def simulate(self): + self.steps = np.random.choice(a = self.step_set, size = (self.iteration, 1)) + print(self.steps, self.origin) + self.path = np.concatenate([[self.origin], self.steps]).cumsum(0) + +class BrownianMotion(StochasticProcess): + def __init__(self, origin: float, dt: float, time: float) -> None: + self.origin = origin + self.current = self.origin + self.time = time + self.initial_time = time + self.dt = dt + self.iteration = self.time / self.dt + + self.path = [self.origin] + + self.simulate() + + def simulate(self): + while(self.time - self.dt > 0): + dW = np.random.normal(0, math.sqrt(self.dt)) + value = self.current + dW + self.current = value + + self.path.append(value) + + self.time -= self.dt + + def pdf(self, x: float, t: float) -> float: + if t != None: + timeline = np.linspace(self.dt, self.initial_time, int(self.iteration)-1) + return 1 / np.sqrt(2 * np.pi * timeline) * np.exp(-x**2 / (2*timeline)) + else: + return 1 / np.sqrt(2 * np.pi * t) * np.exp(-x**2 / (2*t)) + +WienerProcess = BrownianMotion + +class GeometricBrownianMotion(StochasticProcess): + + def __init__(self, initial_price: float, drift: float, volatility: float, dt: float, time: float) -> None: + self.current_price = initial_price + self.initial_price = initial_price + + self.name = "Geometric Brownian Motion" + + self.drift = drift + self.volatility = volatility + self.dt = dt + self.time = time + + self.prices = [] + + self.simulate() + + # Simulate the following equation : + # dYt/Yt = mu.dt + sigma.dWt with dWt ~ N(0, sqrt(dt)) + # where mu is the drift, sigma the volatility of the motion + def simulate(self): + while(self.time - self.dt > 0): + dWt = np.random.normal(0, math.sqrt(self.dt)) + dYt = self.drift * self.dt + self.volatility * dWt + + self.current_price += dYt + self.prices.append(self.current_price) + + self.time -= self.dt + +plt.plot(BrownianMotion(origin=0, dt=1/365, time=1).pdf(0.5)) +for i in range(4): + # plt.plot(GeometricBrownianMotion(100, 0.08, 0.1, 1/365, 1).prices) + # plt.plot(RandomWalk1D(origin=0, iteration=10000).path) + # plt.plot(BrownianMotion(origin=0, dt=1/365, time=1).path) + pass +plt.show() \ No newline at end of file diff --git a/readme.md b/readme.md new file mode 100644 index 0000000..e69de29 diff --git a/tensor/tensor.py b/tensor/tensor.py new file mode 100644 index 0000000..36e8a74 --- /dev/null +++ b/tensor/tensor.py @@ -0,0 +1,55 @@ +from __future__ import annotations + +from typing import List, Union + +from ..utils.benchmark import Timer + +import tensorflow as tf +import numpy as np + +class Tensor: + + def __init__(self, tensor: Union[tf.Tensor, list]) -> None: + self.tensor = tensor + + self.shape = self.tensor.get_shape().as_list() + + def __add__(self, tensor: tf.Tensor) -> Tensor: + return Tensor(self.tensor + tensor) + + @Timer.wrapper + def __getitem__(self, index): + print(index) + value = self.tensor + if type(index) == int: + index = [index] + while len(index) > 0: + value = value[index[0]] + index = index[1:] + return value + + + def decompose(self, cores, bond_dim): + pass + + @staticmethod + def empty(shape): + return Tensor(tf.Variable(np.empty(shape), dtype=np.float32)) + + ''' Simple implemtation for two tensors for now ''' + @staticmethod + def contract(a: Tensor, b: Tensor, index: List[List[float]]): + c = Tensor.empty((2,2)) + print('A', a) + print(a.shape, b.shape) + + # case index = [[x,y]] + + shape = a.shape[:index[0][0]-1]+a.shape[index[0][0]:] + b.shape[:index[0][1]-1]+b.shape[index[0][1]:] + print(shape) + + c = Tensor.empty(shape) + print(c.tensor) + + + diff --git a/utils/benchmark.py b/utils/benchmark.py new file mode 100644 index 0000000..b1f468b --- /dev/null +++ b/utils/benchmark.py @@ -0,0 +1,110 @@ +import time +import threading + +import tensorflow as tf + + +""" Google implementation from its tensorflow tutorial 'NMT with Attention' """ +class ShapeChecker(): + def __init__(self): + # Keep a cache of every axis-name seen + self.shapes = {} + + def __call__(self, tensor, names, broadcast=False): + if not tf.executing_eagerly(): + return + + if isinstance(names, str): + names = (names,) + + shape = tf.shape(tensor) + rank = tf.rank(tensor) + + if rank != len(names): + raise ValueError(f'Rank mismatch:\n' + f' found {rank}: {shape.numpy()}\n' + f' expected {len(names)}: {names}\n') + + for i, name in enumerate(names): + if isinstance(name, int): + old_dim = name + else: + old_dim = self.shapes.get(name, None) + new_dim = shape[i] + + if (broadcast and new_dim == 1): + continue + + if old_dim is None: + # If the axis name is new, add its length to the cache. + self.shapes[name] = new_dim + continue + + if new_dim != old_dim: + raise ValueError(f"Shape mismatch for dimension: '{name}'\n" + f" found: {new_dim}\n" + f" expected: {old_dim}\n") + +class Timer: + + @classmethod + def wrapper(self, func): + def wrap(*args, **kwargs): + name = func.__qualname__ + + start = time.time() + result = func(*args, **kwargs) + end = time.time() + + cost = end - start + + print(f'`{name}` time : {cost}') + + return result + return wrap + + + +class Thread: + + def __init__(self) -> None: + self.event = threading.Event() + self.thread = threading.Thread() + self.stop = False + + def interval(self, action, elapse, kill=10000): + self.thread = threading.Thread(target=self._interval, args=(action, elapse, lambda: self.stop, )) + self.start() + + self.kill(kill) + + return self + + @Timer.wrapper + def _interval(self, action, elapse, stop): + + next = time.time() + elapse + + while not self.event.wait(next-time.time()): + next += elapse + action() + if stop(): + break + + def start(self): + self.thread.start() + + def kill(self, elapse): + while not self.event.wait(elapse): + self.event.set() + self.stop = True + + +if __name__ == "__main__": + + def action(): + print("Hey") + + # thread = Thread() + # inter = thread.interval(action, 0.6, kill=2) +