-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
0 parents
commit e833643
Showing
17 changed files
with
921 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,2 @@ | ||
__pycache__/ | ||
.git/ |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,105 @@ | ||
from __future__ import annotations | ||
|
||
import numpy as np | ||
import nptyping as ntp | ||
import pandas as pd | ||
|
||
import math | ||
import time | ||
|
||
|
||
from typing import NamedTuple, Union, Callable, Type | ||
from progressbar.progressbar import ProgressBar | ||
|
||
from sklearn.model_selection import ParameterGrid | ||
from scipy.stats import gamma, beta, uniform | ||
|
||
from syngular.utils.benchmark import Timer | ||
|
||
class Parameter(NamedTuple): | ||
name: str | ||
call: Union[Callable[..., float], list[float], ntp.NDArray[float]] | ||
|
||
print(ntp) | ||
|
||
class Dataset: | ||
|
||
def __init__(self, increment, params: list[Parameter]) -> None: | ||
self.increment = increment | ||
self.percentiles = pd.Series(np.linspace(0, 0.99, self.increment)) | ||
|
||
self.params = params | ||
self.grid = {} | ||
self.pregrid = {} | ||
|
||
self.bar = ProgressBar() | ||
self.dataframe = pd.DataFrame() | ||
|
||
@Timer.wrapper | ||
def generate(self): | ||
for p in self.params: | ||
print(p.name) | ||
if isinstance(p.call, list[float].__origin__) or isinstance(p.call, ntp.NDArray[float].__origin__): | ||
# if len(p.call) == self.increment: | ||
self.pregrid[p.name] = p.call | ||
# else: | ||
# raise IndexError("Parameter list of values must be the same size of the increment") | ||
elif isinstance(p.call, Callable[..., float].__origin__): | ||
self.pregrid[p.name] = self.percentiles.apply(p.call) | ||
else: | ||
raise TypeError("Parameter call must be a float function or a list of floats") | ||
|
||
self.grid = ParameterGrid(self.pregrid) | ||
|
||
for params in self.bar(self.grid): | ||
self.dataframe = self.dataframe.append(pd.Series(params), ignore_index=True) | ||
|
||
# print(self.dataframe.head()) | ||
|
||
return self | ||
|
||
def add_column(self, name, col): | ||
self.dataframe[name] = col | ||
|
||
def __add__(self, dataset: Dataset): | ||
return Dataset(self.increment + dataset.increment, self.params + dataset.params) | ||
|
||
@staticmethod | ||
def empty(): | ||
return Dataset(0, []) | ||
|
||
@staticmethod | ||
def concatenate(*datasets: list[Type[Dataset]]): | ||
dt_list = list(datasets) | ||
dt_concat = Dataset.empty() | ||
|
||
while len(dt_list) > 0: | ||
dt = dt_list.pop() | ||
dt_concat += dt | ||
return dt_concat | ||
|
||
def __str__(self): | ||
return self.dataframe.__repr__() | ||
|
||
|
||
|
||
# dt = Dataset(2, [ | ||
# Parameter(name = "S", call = lambda x : gamma.ppf(x, a=100, scale=1)), | ||
# Parameter(name = "K", call = lambda x : uniform.ppf(x, 50, 200)), | ||
# Parameter(name = "R", call = lambda x : uniform.ppf(x, 0.01, 0.18)), | ||
# Parameter(name = "D", call = lambda x : uniform.ppf(x, 0.01, 0.18)), | ||
# Parameter(name = "sigma", call = lambda x : (beta.ppf(x, a=2, b=5) + 0.001)) | ||
# ]) | ||
|
||
# dt2 = Dataset(2, [ | ||
# Parameter(name = "S", call = lambda x : gamma.ppf(x, a=100, scale=1)), | ||
# Parameter(name = "K", call = lambda x : uniform.ppf(x, 50, 200)), | ||
# Parameter(name = "R", call = lambda x : uniform.ppf(x, 0.01, 0.18)), | ||
# Parameter(name = "D", call = lambda x : uniform.ppf(x, 0.01, 0.18)), | ||
# Parameter(name = "sigma", call = lambda x : (beta.ppf(x, a=2, b=5) + 0.001)) | ||
# ]) | ||
|
||
# dt3 = Dataset.concatenate(dt,dt2) | ||
# dt3.generate() | ||
|
||
# print(dt3) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,180 @@ | ||
from __future__ import print_function, annotations | ||
from functools import reduce | ||
from re import match | ||
|
||
import numpy as np | ||
from numpy.core.einsumfunc import einsum | ||
|
||
|
||
def unfold_shape(shape): | ||
return reduce(lambda x, y: x+y, shape) | ||
|
||
class TensorTrainLayer(): | ||
|
||
def __init__(self) -> None: | ||
pass | ||
|
||
def build(self): | ||
pass | ||
|
||
def call(self): | ||
pass | ||
|
||
def forward(self, input): | ||
return input | ||
|
||
def backward(self, input, grad_output): | ||
num_units = input.shape[1] | ||
|
||
d_layer_d_input = np.eye(num_units) | ||
|
||
return np.dot(grad_output, d_layer_d_input) | ||
|
||
def train(self): | ||
pass | ||
|
||
class ReLU(TensorTrainLayer): | ||
|
||
def forward(self, input): | ||
relu_forward = np.maximum(0,input) | ||
return relu_forward | ||
|
||
def backward(self, input, grad_output): | ||
relu_grad = input > 0 | ||
return grad_output*relu_grad | ||
|
||
class Dense(TensorTrainLayer): | ||
|
||
def __init__(self, input_shape, output_shape, bond_dim=2, core_number=None, learning_rate=0.01) -> None: | ||
|
||
if len(input_shape) != len(output_shape): | ||
raise Exception("input shape and output shape should have the same length") | ||
|
||
if core_number != None and core_number != len(input_shape): | ||
raise Exception("number of cores does not match the size of input_shape") | ||
|
||
self.input_shape = input_shape | ||
self.unfold_input_shape = unfold_shape(self.input_shape) | ||
self.output_shape = output_shape | ||
self.unfold_output_shape = unfold_shape(self.output_shape) | ||
|
||
self.cores_number = core_number if core_number != None else len(input_shape) | ||
self.bond_dim = bond_dim | ||
|
||
self.learning_rate = learning_rate | ||
|
||
self.cores = [] | ||
self.bias = [] | ||
|
||
def __get_core_shape(self, index): | ||
if index == 0 or index == self.cores_number-1: | ||
return (self.input_shape[index], self.output_shape[index], self.bond_dim,) | ||
else: | ||
return (self.input_shape[index], self.output_shape[index], self.bond_dim, self.bond_dim,) | ||
|
||
def __add_core(self, name, type): | ||
index = len(self.cores) | ||
|
||
shape = self.__get_core_shape(index) | ||
size = unfold_shape(shape) | ||
|
||
print(shape) | ||
|
||
if type == 'middle' and 0 < index < self.cores_number-1: | ||
return np.random.normal( | ||
loc=0.0, | ||
scale = np.sqrt(2/size), | ||
size = shape | ||
) | ||
elif type == 'extreme' and (index == 0 or index == self.cores_number-1): | ||
return np.random.normal( | ||
loc=0.0, | ||
scale = np.sqrt(2/size), | ||
size = shape | ||
) | ||
else: | ||
raise Exception('the type of core to add does not match the current cores structure') | ||
|
||
|
||
def build(self): | ||
self.cores.append(self.__add_core(name='core_1', type='extreme')) | ||
|
||
for i in range(1, self.cores_number-1): | ||
self.cores.append(self.__add_core(name = "core_"+str(i), type='middle')) | ||
|
||
self.cores.append(self.__add_core(name='core_'+str(self.cores_number), type='extreme')) | ||
|
||
self.bias = np.zeros(shape=self.output_shape) | ||
|
||
|
||
def call(self): | ||
pass | ||
|
||
def forward(self, input): | ||
input = np.array(input) | ||
unfold_input = unfold_shape(input.shape) | ||
|
||
if self.unfold_input_shape != unfold_input: | ||
exception = f"input of shape {input.shape} cannot be reshaped into {self.input_shape} [{unfold_input} != {self.unfold_input_shape}]" | ||
raise Exception(exception) | ||
|
||
input_tensor = np.reshape(input, newshape=self.input_shape) | ||
|
||
print(input_tensor) | ||
|
||
einsum_structure = [] | ||
input_index = np.arange(self.cores_number) | ||
|
||
einsum_structure.append(input_tensor) | ||
einsum_structure.append(input_index) | ||
|
||
for idx in range(self.cores_number): | ||
ipt_index = idx | ||
opt_index = self.cores_number+idx | ||
einsum_structure.append(self.cores[idx]) | ||
if idx == 0: | ||
bnd_index = 2*self.cores_number | ||
einsum_structure.append([ipt_index, opt_index, bnd_index]) | ||
elif idx == self.cores_number-1: | ||
bnd_index = 3*self.cores_number-2 | ||
einsum_structure.append([ipt_index, opt_index, bnd_index]) | ||
else: | ||
bnd_index_1 = 2*self.cores_number+idx-1 | ||
bnd_index_2 = 2*self.cores_number+idx | ||
einsum_structure.append([ipt_index, opt_index, bnd_index_1, bnd_index_2]) | ||
|
||
output_index = np.arange(self.cores_number)+self.cores_number | ||
|
||
einsum_structure.append(output_index) | ||
|
||
print("Structure") | ||
print(einsum_structure) | ||
print(len(einsum_structure)) | ||
|
||
contraction = np.einsum(*einsum_structure) | ||
|
||
print("Contraction") | ||
print(contraction) | ||
|
||
result = contraction+self.bias | ||
print(result) | ||
|
||
|
||
def backward(self): | ||
pass | ||
|
||
def train(self): | ||
pass | ||
|
||
|
||
if __name__ == "__main__": | ||
|
||
layer = Dense((2,2), (3,3), bond_dim=2) | ||
layer.build() | ||
|
||
print("Cores") | ||
print(layer.cores) | ||
print("Bias") | ||
print(layer.bias) | ||
|
||
layer.forward([[1,4],[2,5]]) |
Empty file.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,88 @@ | ||
from functools import reduce | ||
|
||
import tensorflow as tf | ||
from tensorflow.keras.layers import Layer | ||
|
||
import tensornetwork as tn | ||
import numpy as np | ||
|
||
class TensorDense(Layer): | ||
|
||
def __init__(self, units, cores_number, bond_dim=2, shape=None) -> None: | ||
super(TensorDense, self).__init__() | ||
|
||
self.units = units | ||
self.cores_number = cores_number | ||
self.bond_dim = bond_dim | ||
|
||
if shape == None: | ||
roots = int(np.power(self.units, 1/self.cores_number)) | ||
self.shape = [roots] * self.cores_number | ||
else: | ||
self.shape = shape | ||
|
||
self.cores = [] | ||
|
||
def build(self, input_shape): | ||
|
||
self.bias = tf.Variable(tf.zeros(shape=self.shape), name="bias", trainable=True) | ||
|
||
# self.shape_input = [] | ||
|
||
self.cores.append(self.add_weight( | ||
shape = (input_shape[1], self.shape[0], self.bond_dim,), | ||
name = "core_1", | ||
initializer = 'random_normal', | ||
trainable = True | ||
)) | ||
# self.shape_input.append(input_shape[1]) | ||
|
||
for i in range(1, self.cores_number-1): | ||
self.cores.append(self.add_weight( | ||
shape = (input_shape[1], self.shape[i], self.bond_dim, self.bond_dim,), | ||
name = "core_"+str(i), | ||
initializer = 'random_normal', | ||
trainable = True | ||
)) | ||
# self.shape_input.append(input_shape[1]) | ||
|
||
self.cores.append(self.add_weight( | ||
shape = (input_shape[1], self.shape[-1], self.bond_dim,), | ||
name = "core_"+str(self.cores_number), | ||
initializer = 'random_normal', | ||
trainable = True | ||
)) | ||
# self.shape_input.append(input_shape[1]) | ||
# self.shape_input = tuple(self.shape_input) | ||
|
||
def call(self, inputs): | ||
|
||
def process(input, cores, bias): | ||
# unfold = tf.reshape(input,[-1]) | ||
# reduction = reduce(lambda x, y: x*y, self.shape_input) | ||
# padding = tf.convert_to_tensor(np.zeros((reduction-unfold.shape[0]), dtype="float32")) | ||
# input = tf.reshape(tf.concat(values=[input, padding], axis=0), self.shape_input) | ||
|
||
input = [input, input] | ||
input = tf.reshape(input, (2,2)) | ||
|
||
mx = self.cores_number | ||
|
||
cores = [tn.Node(core, backend="tensorflow").tensor for core in cores] | ||
x = tn.Node(input, backend="tensorflow") | ||
|
||
links = [[i, -i, "bond"+str(i-1), "bond"+str(i)] for i in range(2, mx)] | ||
|
||
# print([list(range(1,mx+1)), [1, -1, "bond"+str(1)], *links, [mx, -mx, "bond"+str(mx-1)]]) | ||
|
||
result = tn.ncon( | ||
tensors = [x.tensor] + cores, | ||
network_structure = [list(range(1,mx+1)), [1, -1, "bond"+str(1)], *links, [mx, -mx, "bond"+str(mx-1)]], | ||
backend="tensorflow" | ||
) | ||
|
||
return result + bias | ||
|
||
result = tf.vectorized_map(lambda vec: process(vec, self.cores, self.bias), inputs) | ||
|
||
return tf.nn.relu(tf.reshape(result, (-1, self.units))) |
Empty file.
Empty file.
Oops, something went wrong.