Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Sketch out conj method #46

Open
wants to merge 15 commits into
base: dev
Choose a base branch
from
18 changes: 18 additions & 0 deletions Dockerfile.dev
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
FROM nvidia/cuda:11.0.3-runtime-ubuntu20.04

ENV DEBIAN_FRONTEND=noninteractive

RUN apt update -y && \
apt install -y python3 python3-pip git htop vim

# Make sure you first recursively clone down the git repo before building
WORKDIR /app
RUN pip install quimb pyrofiler cartesian-explorer opt_einsum
RUN pip install --no-binary pynauty pynauty
# Run the below commands after the container opens - because volume hasn't mounted yet
# RUN cd qtree && pip install .
# RUN pip install .
RUN pip install pdbpp
RUN pip install tensornetwork

ENTRYPOINT ["bash"]
4 changes: 4 additions & 0 deletions dev.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
#!/bin/bash

docker build -f Dockerfile.dev -t dev .
docker run -v $(pwd):/app -it dev
74 changes: 74 additions & 0 deletions qtensor/Simulate.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,3 +155,77 @@ def simulate(self, qc, **params):
sim = cirq.Simulator(**params)
return sim.simulate(qc)

if __name__=="__main__":
import networkx as nx
import numpy as np
import tensornetwork as tn

G = nx.random_regular_graph(3, 10)
gamma, beta = [np.pi/3], [np.pi/2]

from qtensor import QtreeQAOAComposer, QAOAQtreeSimulator
composer = QtreeQAOAComposer(graph=G, gamma=gamma, beta=beta)
composer.ansatz_state()

sim = QAOAQtreeSimulator(composer)

# now let's run the prepare buckets method to init the tn
sim.simulate_batch(composer.circuit)
buckets = sim.tn.buckets

# now let's use these buckets to square the TN
def conj(buckets):
# turn each bucket into a node
nodes = []
for bucket in buckets:
node = tn.Node(np.array(bucket))
nodes.append(node)

# now for each node, append its conjugate
conj_nodes = []
for node in nodes:
conj = np.conj(node.tensor)
conj_node = tn.Node(conj)
conj_nodes.append(conj_node)


indices = {}

for node in nodes:
for conj_node in conj_nodes:
# check if there is a shared index between a node and a conj_node
node_indices = node.get_all_dangling()
conj_indices = conj_node.get_all_dangling()

shared_indices = set(node_indices).intersection(set(conj_indices))
if shared_indices:
if node not in indices:
indices[node] = shared_indices
else:
indices[node].update(shared_indices)

if conj_node not in indices:
indices[conj_node] = shared_indices
else:
indices[conj_node].update(shared_indices)

for node, shared_indices in indices.items():
for pair_node in indices.keys():
if node == pair_node:
continue
# if there are shared indices, connect an edge
if shared_indices.intersection(set(pair_node.get_all_dangling())):
edge = tn.connect(node, pair_node)

# TODO: TNAdapter should support tensornetwork.Node
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@danlkv this is my TODO for next week

# So that we can contract this resulting tensor network directly
return []

tn_with_conj = conj(buckets)

# TODO: contract or sample using tn_with_conj based on method in other branch



log.debug('hello world')
import pdb; pdb.set_trace()
51 changes: 51 additions & 0 deletions scratchpad/tn_api/test_tn_api.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
import random
import numpy as np
from tn import TensorNetwork
from functools import reduce

def test_add_numpy_array():
a = TensorNetwork()
t = np.random.randn(2, 2)
a.add(t)
b = TensorNetwork()
b.add(a)
assert b == a


def test_composition():
"""
tensor network adding is associative
"""
tns = [TensorNetwork.new_random_cpu(2, 3, 4) for _ in range(5)]
stack = TensorNetwork()
# (((0A)B)C)D
for tn in tns:
stack.add(tn)
# A(B(CD))
for i in range(len(tns)):
l = tns[len(tns)-2-i]
r = tns[len(tns)-1-i]
l.add(r)

assert stack == tns[0]

def test_edges_consistent_ports():
tns = [TensorNetwork.new_random_cpu(2, 3, 4) for _ in range(5)]
tn = TensorNetwork()
# (((0A)B)C)D
for t in tns:
tn.add(t)

port_data = {}
for e in tn._edges:
for p in e:
port_data[p.tensor_ref] = port_data.get(p.tensor_ref, [])
port_data[p.tensor_ref].append(p.ix)
for i, t in enumerate(tn._tensors):
assert len(t.shape) == len(port_data[i])



if __name__=="__main__":
test_edges_consistent_ports()
test_composition()
202 changes: 202 additions & 0 deletions scratchpad/tn_api/tn.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,202 @@
import numpy as np
import math
from dataclasses import dataclass
from typing import TypeVar, Generic, Iterable

class Array(np.ndarray):
shape: tuple

D = TypeVar('D') # tensor data type (numpy, torch, etc.)

class ContractionInfo:
pass

class TensorNetworkIFC(Generic[D]):
def __init__(self, *args, **kwargs):
...

def optimize(self, out_indices: Iterable = []) -> ContractionInfo:
return ContractionInfo()

# slice not inplace
def slice(self, slice_dict: dict) -> 'TensorNetwork':
...

# contract to produce a new tensor
def contract(self, contraction_info: ContractionInfo) -> D:
...

#
def copy(self):
...

def add(self, other: "TensorNetworkIFC[D] | D"):
...


@classmethod
def new_random_cpu(cls, dims: Iterable[int])-> 'TensorNetworkIFC[D]':
...

def __eq__(a, b):
...


N = TypeVar('N', bound=np.ndarray)

@dataclass
class Port:
tensor_ref: int
ix: int

class TensorNetwork(TensorNetworkIFC[np.ndarray]):
tensors: Iterable[np.ndarray]
shape: tuple
edges: tuple

def __init__(self, *args, **kwargs):
self._tensors = []
self._edges = tuple()
self.shape = tuple()
self._indices = {}

# slice not inplace
def slice(self, slice_dict: dict) -> 'TensorNetwork':
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@danlkv thanks for the previous comment, i believed i've addressed that in this update

tn = self.copy()

# hydrate the index map of edge index to tensor and dimension (via port object)
for idx, edge in enumerate(tn._edges):
dallonasnes marked this conversation as resolved.
Show resolved Hide resolved
for port in edge:
if idx in tn._indices:
tn._indices[idx].append(port)
else:
tn._indices[idx] = [port]

for idx, slice_val in slice_dict.items():
# don't care about indices that are not in TN
if idx not in tn._indices:
dallonasnes marked this conversation as resolved.
Show resolved Hide resolved
continue

edge = tn._edges.pop(idx)
# get all tensors indexed by this edge
tensors_to_slice = set(port.tensor_ref for port in edge)
dallonasnes marked this conversation as resolved.
Show resolved Hide resolved
# store slice index and value for each tensor
local_slices_dict = {}
for current_tensor_ref in tensors_to_slice:
slice_dict = {}
# get all ports for the current tensor
current_tensor_ref_ports = [port for port in edge where port.tensor_ref == current_tensor_ref]
for current_port in current_tensor_ref_ports:
slice_dict[current_port.ix] = slice_val
# store the slice params for this tensor in the local dict
local_slices_dict[current_tensor_ref] = slice_dict

# now use the local slice dict to slice for each tensor
for current_tensor_ref, slice_dict in local_slices_dict.items():
slice_bounds = []
current_tensor = tn._tensors[current_tensor_ref]
for idx in range(current_tensor.ndim):
try:
slice_bounds.append(slice_dict[idx])
except KeyError:
slice_bounds.append(slice(None))
tn._tensors[current_tensor_ref] = tn._tensors[current_tensor_ref][tuple(slice_bounds)]

return tn

def copy(self):
new = TensorNetwork()
new._tensors = self._tensors
new._edges = self._edges
new.shape = self.shape
new.indices = self.indices
return new

def add(self, other: "TensorNetwork | np.ndarray"):
if not isinstance(other, TensorNetwork):
self._tensors.append(other)
self.shape = self.shape + other.shape
else:
m = len(self._tensors)
n = len(self.shape)
# -- other's edges tensors will refer to shifted tensor location
enew = []
for e in other._edges:
e_ = []
for p in e:
if p.tensor_ref == -1:
e_.append(Port(tensor_ref=-1, ix=p.ix+n))
else:
e_.append(Port(tensor_ref=p.tensor_ref+m, ix=p.ix))
enew.append(tuple(e_))

self._edges += tuple(enew)
self._tensors += other._tensors
self.shape += other.shape

# contract to produce a new tensor
def contract(self, contraction_info: ContractionInfo) -> np.ndarray:
raise NotImplementedError()
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

contraction_info here is indices i'd want in the output
einsum

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

test case is contracted A = contraction of sliceA[0] and sliceA[1]. this is true for any index so can select it at random


def optimize(self, out_indices: Iterable = []) -> ContractionInfo:
raise NotImplementedError()


@classmethod
def new_random_cpu(cls, count, size, dim: int):
out = cls()
for i in range(count):
t: np.ndarray = np.random.random((dim, )*size)
out.add(t)
# arbitrary number of output indices
out_dims = np.random.randint(low=0, high=len(out.shape))
tensor_dims = len(out.shape)
out.shape = (dim, )*out_dims
# -- random connectivity
# A hypergraph can be generated as a partition into
# E parts where E is number of edges. The isolated vertices are equivalent
# to vertices with 1 edge that contains only them.
# arbitrary max number of edges, must be less than total indices
edges_cnt = np.random.randint(low=1, high=tensor_dims+out_dims)
# a partition can be implemented using a random function
partition_fn = lambda : np.random.randint(low=0, high=edges_cnt)
partition_dict = {}
for t_ref, t in enumerate(out._tensors):
for i in range(t.ndim):
eix = partition_fn()
new_port = Port(tensor_ref=t_ref, ix=i)
partition_dict[eix] = partition_dict.get(eix, [])
partition_dict[eix].append(new_port)

# add "self" tensor indices to partition
for i in range(len(out.shape)):
eix = partition_fn()
new_port = Port(tensor_ref=-1, ix=i)
partition_dict[eix] = partition_dict.get(eix, [])
partition_dict[eix].append(new_port)

edges = []
for i in range(edges_cnt):
p = partition_dict.get(i)
if p is not None:
edges.append(tuple(p))
out._edges = tuple(edges)
return out

def __eq__(self, other):
if self.shape != other.shape:
return False
if self._edges != other._edges:
return False
return all((a==b).all() for a, b in zip(self._tensors, other._tensors))

def __repr__(self):
return f"TensorNetwork({self.shape})<{self._tensors}, {self._edges}>"



if __name__ == "__main__":
tn = TensorNetwork.new_random_cpu(2, 3, 4)
slice_dict = {0: slice(0, 2), 1: slice(1, 3)}
sliced_tn = tn.slice(slice_dict)
import pdb; pdb.set_trace()
Loading