Skip to content

Commit

Permalink
First qunatum glimpse & optimization of einsum with opt_einsum package
Browse files Browse the repository at this point in the history
  • Loading branch information
antoine311200 committed Dec 20, 2021
1 parent 729b196 commit 0fb4d52
Show file tree
Hide file tree
Showing 10 changed files with 190 additions and 41 deletions.
2 changes: 2 additions & 0 deletions quantum/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
from syngular.quantum.circuit import Circuit
from syngular.quantum.qbit import Qbit
30 changes: 30 additions & 0 deletions quantum/circuit.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@


from syngular.tensor.matrix_product_state import MatrixProductState


class Circuit:

def __init__(self, size, bond=2, initializer="ground", structure=[]):
self.initializer = initializer
self.size = size

self.structure = structure

self.current_step = 0
self.current_state = None
self.states = []

def run(self):
pass

def reset(self):
if self.initializer == 'ground':
self.current_state = MatrixProductState.zeros(
input_shape=(2,)*self.size,
bond_shape=(2,)*(self.size-1)
)
self.states.append(self.current_state)

def step(self):
pass
27 changes: 27 additions & 0 deletions quantum/gate.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
import numpy as np


I = np.array([
[1., 0.],
[0., 1.]
])

X = np.array([
[0., 1.],
[1., 0.]
])

Y = np.array([
[0., -1.j],
[1.j, 0.]
])

Z = np.array([
[1., 0.],
[0., -1.]
])

H = 1/np.sqrt(2) * np.array([
[1., 1.],
[1., -1.]
])
20 changes: 20 additions & 0 deletions quantum/qbit.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@


from syngular.tensor.matrix_product_state import MatrixProductState
from syngular.quantum import gate

class Qbit:

def __init__(self, size):
self.size = size
self.dim = 2**self.size

self.state = MatrixProductState.zeros((2,)*size, (2,)*(size-1)).decompose()
self.state.real_parameters_number = self.dim

for idx in range(self.state.sites_number-1):
self.state.sites[idx][0] = gate.I
self.state.sites[-1][0][0] = 1.

def to_tensor(self):
return self.state.to_tensor().reshape(self.dim)
13 changes: 8 additions & 5 deletions tensor/matrix_product_operator.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@
import itertools
from typing import Tuple, List, Union

from opt_einsum import contract

import numpy as np
from scipy import linalg

Expand Down Expand Up @@ -160,7 +162,7 @@ def __matmul__(self, mp: Union[MatrixProductOperator, MatrixProductState]):

if isinstance(mp, MatrixProductState):
for idx in range(self.sites_number):
site = np.einsum(mp.sites[idx], [1,2,3], self.sites[idx], [4,2,6,5], [1,4,6,3,5])
site = contract(mp.sites[idx], [1,2,3], self.sites[idx], [4,2,6,5], [1,4,6,3,5])

site = site.reshape((
self.shape[idx][0]*mp.shape[idx][0],
Expand All @@ -171,7 +173,7 @@ def __matmul__(self, mp: Union[MatrixProductOperator, MatrixProductState]):
return MatrixProductState.from_sites(sites)
elif isinstance(mp, MatrixProductOperator):
for idx in range(self.sites_number):
site = np.einsum(self.sites[idx], [1,2,3,4], mp.sites[idx], [5,3,6,7], [1,5,2,6,4,7])
site = contract(self.sites[idx], [1,2,3,4], mp.sites[idx], [5,3,6,7], [1,5,2,6,4,7])

site = site.reshape((
self.shape[idx][0]*mp.shape[idx][0],
Expand Down Expand Up @@ -288,6 +290,7 @@ def to_tensor(self):

for inp in itertools.product(*range_inp):
for out in itertools.product(*range_out):
print(inp, out)
tensor[(*inp, *out)] = self[inp, out]

return tensor
Expand Down Expand Up @@ -461,7 +464,7 @@ def apply(self, operator, indices):
struct += output_shape
# print(struct)

T = np.squeeze(np.einsum(*struct), axis=(1, len(output_shape)-2))
T = np.squeeze(contract(*struct), axis=(1, len(output_shape)-2))

for idx in range(m-1):
jdx = indices[idx]
Expand Down Expand Up @@ -505,7 +508,7 @@ def apply(self, operator, indices):
# ]
# struct += output_shape
# print(struct)
T = np.einsum(*struct)
T = contract(*struct)
print(T.shape)

else:
Expand All @@ -522,7 +525,7 @@ def retrieve(self, input_indices, output_indices):
einsum_structure.append(self.sites[idx][:, input_indices[idx], output_indices[idx], :])
einsum_structure.append([idx, idx+1])

return np.einsum(*einsum_structure)
return contract(*einsum_structure)


def left_orthonormalization(self, bond_shape=()):
Expand Down
56 changes: 32 additions & 24 deletions tensor/matrix_product_state.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import numpy as np
from scipy import linalg


from opt_einsum import contract

class MatrixProductState:

Expand Down Expand Up @@ -115,7 +115,7 @@ def __or__(self, mp):
self.sites[idx], [idx,n+idx+1,idx+1],
mp.sites[idx], [2*n+idx+1, n+idx+1, 2*n+idx+2]
]
r = np.einsum(*struct)
r = contract(*struct)
return r.reshape(1)[0]
else:
raise Exception("right-hand site must be a MatrixProductState")
Expand Down Expand Up @@ -186,7 +186,6 @@ def from_sites(sites: List[np.ndarray], orthogonality=None, real_parameters_numb
mp.sites = sites
mp.sites_number = len(sites)

mp.decomposed = True
mp.decomposed = True
mp.orthonormalized = orthogonality

Expand All @@ -210,6 +209,19 @@ def from_sites(sites: List[np.ndarray], orthogonality=None, real_parameters_numb
def empty():
return MatrixProductState()

@staticmethod
def zeros(input_shape, bond_shape):
n = len(input_shape)

shape = [(1, input_shape[0],bond_shape[0])]
shape += [(bond_shape[i-1], input_shape[i], bond_shape[i]) for i in range(1, n-1)]
shape += [(bond_shape[n-2], input_shape[n-1], 1)]

print(shape)
sites = []
for idx in range(n):
sites.append(np.zeros(shape=shape[idx]))
return MatrixProductState.from_sites(sites)

"""norm method of MatrixProductState
Compute the norm of the MatrixProductState
Expand Down Expand Up @@ -244,32 +256,29 @@ def copy(self):
def decompose(self, mode="left"):
if not self.decomposed:
if mode == "left":
current_matrix = self.tensor
current_shape = self.shape[0][1]
current_rank = 1

n = self.sites_number-1
T = self.tensor


for k in range(self.sites_number-1):

unfolding_matrix = np.reshape(current_matrix, newshape=(current_shape*current_rank, -1))
rank = self.shape[k][2]
L = self.left_matricization(T, index=k)

Q, R = np.linalg.qr(unfolding_matrix, mode="complete")

Q, R = np.linalg.qr(L, mode="complete")
print(Q.shape, R.shape)
rank = self.shape[k][2]
Q = Q[:,:rank]
Q = np.reshape(Q, newshape=(current_rank, current_shape, -1))
R = R[:rank, :]

if self.verbose: print(f"Core {k} with {current_rank} , {current_shape}")
self.sites[k] = self.tensoricization(Q, k)
T = R

self.sites[k] = Q
self.parameters_number += np.prod(self.sites[k].shape)

current_shape = self.shape[k+1][1]
current_rank = rank
current_matrix = R
self.sites[-1] = self.tensoricization(T, n)
self.parameters_number += np.prod(self.sites[-1].shape)

current_matrix = current_matrix[:, :, np.newaxis]
self.sites[-1] = current_matrix

del self.tensor
gc.collect()

Expand Down Expand Up @@ -352,7 +361,7 @@ def compress(self, dim: int, mode="left", strict=False):
else:
that = self.copy()

print(that)
# print(that)

if mode == 'left':
# if self.orthonormalized != 'left': self.left_orthonormalization()
Expand All @@ -367,8 +376,8 @@ def compress(self, dim: int, mode="left", strict=False):

W = S @ R

print(Q.shape, L.shape, S.T.shape, R.shape)
print(W.shape)
# print(Q.shape, L.shape, S.T.shape, R.shape)
# print(W.shape)

l1 = list(that.shape[k])
l2 = list(that.shape[k+1])
Expand All @@ -392,8 +401,7 @@ def retrieve(self, indices):
for idx in range(self.sites_number):
einsum_structure.append(self.sites[idx][:, indices[idx], :])
einsum_structure.append([idx, Ellipsis, idx+1])

return np.einsum(*einsum_structure)
return contract(*einsum_structure)


def left_orthonormalization(self, bond_shape=()):
Expand Down
21 changes: 11 additions & 10 deletions test/test_mpo.py
Original file line number Diff line number Diff line change
Expand Up @@ -246,7 +246,7 @@ def test_random():

print("---------- MatrixProductOperator.random(input_shape, output_shape, bond_shape) ----------")
start = time.time()
MatrixProductOperator.random((4,4,4), (2,2,2), (3,3,))
MatrixProductOperator.random((4,4,4), (2,2,2), (8,8,))
end = time.time()
print(f"> Execution time : {end-start:.8f}sec")
print("-------------------------------------------------------")
Expand Down Expand Up @@ -284,6 +284,7 @@ def test_to_tensor():
tensor = np.arange((2*2*2*2*2*2*2*2)*(2*2*2*2*2*2*2*2)).reshape(2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2)
mpo = MatrixProductOperator(tensor, bond_shape=(2,2,2,2,2,2,2,)).decompose()
print(mpo)
print(mpo.real_parameters_number, mpo.parameters_number)
print("---------- MatrixProductOperator.to_tensor() ----------")
start = time.time()
mpo.to_tensor()
Expand All @@ -293,14 +294,14 @@ def test_to_tensor():



# test_decompose()
# test_from_sites()
# test_random()
# test_to_tensor()
# test_add()
test_decompose()
test_from_sites()
test_random()
# test_to_tensor() -- very expensive ~1min
test_add()
test_mul()
test_matmul()
# test_dot()
# test_compress()
# test_orthogonality()
# test_apply()
test_dot()
test_compress()
test_orthogonality()
test_apply()
34 changes: 33 additions & 1 deletion test/test_mps.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,35 @@
np.set_printoptions(suppress=True)


def test_augment():
x = np.arange(8).reshape((2,2,2))
X = MatrixProductState(x, bond_shape=(2,2)).decompose()

Y = MatrixProductState.zeros((2,2,2),(2,2,))

print("---------- MatrixProductOperator.__add__(a, b) ----------")
start = time.time()
Z = X + Y + Y + Y + Y + Y + Y + Y + Y + Y + Y + Y + Y + Y + Y + Y + Y + Y + Y + Y + Y + Y + Y + Y + Y + Y + Y + Y + Y + Y + Y + Y
end = time.time()
print(f"> Execution time : {end-start:.8f}sec")
print("-------------------------------------------------------")

print(Z)
print(Z.to_tensor())
print(X.to_tensor())
print((Z >> 2).to_tensor())


def test_zeros():
print("---------- MatrixProductOperator.__add__(a, b) ----------")
start = time.time()
Z = MatrixProductState.zeros((2,2,2),(2,2,))
end = time.time()
print(f"> Execution time : {end-start:.8f}sec")
print("-------------------------------------------------------")
print(Z.to_tensor())
print(Z.shape)

def test_add():
x = np.arange(8).reshape((2,2,2))
y = np.arange(8).reshape((2,2,2))
Expand Down Expand Up @@ -58,4 +87,7 @@ def test_random():

# test_add()
# test_compress()
test_random()
# test_random()
# test_zeros()

test_augment()
26 changes: 26 additions & 0 deletions test/test_quantum.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
from syngular.quantum import Circuit, Qbit
import syngular.quantum.gate as gate

circ = Circuit(initializer="ground", size=2, structure=[
(gate.X, 0)
])

size = 30

ground = Qbit(size)

# print(ground.to_tensor())
print(ground.state[(1,)*size])
print(ground.state[(0,)*size])
# print(ground.state.real_parameters_number, ground.state.parameters_number)
# import numpy as np
# from syngular.tensor import MatrixProductState
# ground = np.zeros(shape=(2**size))
# ground[0] = 1
# print(ground)
# qbit2 = MatrixProductState(ground.reshape((2,)*size), (3,)*(size-1)).decompose()
# print(qbit2)
# for site in qbit2.sites:
# print('---')
# print(site)
# print(qbit2.to_tensor().reshape(2**size))
2 changes: 1 addition & 1 deletion trash/deep.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import itertools
from PIL.Image import new
from numpy.core.fromnumeric import size
from syngular.tensor.tensor_train import MatrixProductOperator, MatrixProductState
from syngular.trash.tensor_train import MatrixProductOperator, MatrixProductState

import numpy as np

Expand Down

0 comments on commit 0fb4d52

Please sign in to comment.