Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Sketch out conj method #46

Open
wants to merge 15 commits into
base: dev
Choose a base branch
from
18 changes: 18 additions & 0 deletions Dockerfile.dev
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
FROM nvidia/cuda:11.0.3-runtime-ubuntu20.04

ENV DEBIAN_FRONTEND=noninteractive

RUN apt update -y && \
apt install -y python3 python3-pip git htop vim

# Make sure you first recursively clone down the git repo before building
WORKDIR /app
RUN pip install quimb pyrofiler cartesian-explorer opt_einsum
RUN pip install --no-binary pynauty pynauty
# Run the below commands after the container opens - because volume hasn't mounted yet
# RUN cd qtree && pip install .
# RUN pip install .
RUN pip install pdbpp
RUN pip install tensornetwork

ENTRYPOINT ["bash"]
4 changes: 4 additions & 0 deletions dev.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
#!/bin/bash

docker build -f Dockerfile.dev -t dev .
docker run -v $(pwd):/app -it dev
74 changes: 74 additions & 0 deletions qtensor/Simulate.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,3 +155,77 @@ def simulate(self, qc, **params):
sim = cirq.Simulator(**params)
return sim.simulate(qc)

if __name__=="__main__":
import networkx as nx
import numpy as np
import tensornetwork as tn

G = nx.random_regular_graph(3, 10)
gamma, beta = [np.pi/3], [np.pi/2]

from qtensor import QtreeQAOAComposer, QAOAQtreeSimulator
composer = QtreeQAOAComposer(graph=G, gamma=gamma, beta=beta)
composer.ansatz_state()

sim = QAOAQtreeSimulator(composer)

# now let's run the prepare buckets method to init the tn
sim.simulate_batch(composer.circuit)
buckets = sim.tn.buckets

# now let's use these buckets to square the TN
def conj(buckets):
# turn each bucket into a node
nodes = []
for bucket in buckets:
node = tn.Node(np.array(bucket))
nodes.append(node)

# now for each node, append its conjugate
conj_nodes = []
for node in nodes:
conj = np.conj(node.tensor)
conj_node = tn.Node(conj)
conj_nodes.append(conj_node)


indices = {}

for node in nodes:
for conj_node in conj_nodes:
# check if there is a shared index between a node and a conj_node
node_indices = node.get_all_dangling()
conj_indices = conj_node.get_all_dangling()

shared_indices = set(node_indices).intersection(set(conj_indices))
if shared_indices:
if node not in indices:
indices[node] = shared_indices
else:
indices[node].update(shared_indices)

if conj_node not in indices:
indices[conj_node] = shared_indices
else:
indices[conj_node].update(shared_indices)

for node, shared_indices in indices.items():
for pair_node in indices.keys():
if node == pair_node:
continue
# if there are shared indices, connect an edge
if shared_indices.intersection(set(pair_node.get_all_dangling())):
edge = tn.connect(node, pair_node)

# TODO: TNAdapter should support tensornetwork.Node
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@danlkv this is my TODO for next week

# So that we can contract this resulting tensor network directly
return []

tn_with_conj = conj(buckets)

# TODO: contract or sample using tn_with_conj based on method in other branch



log.debug('hello world')
import pdb; pdb.set_trace()
51 changes: 51 additions & 0 deletions scratchpad/tn_api/test_tn_api.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
import random
import numpy as np
from tn import TensorNetwork
from functools import reduce

def test_add_numpy_array():
a = TensorNetwork()
t = np.random.randn(2, 2)
a.add(t)
b = TensorNetwork()
b.add(a)
assert b == a


def test_composition():
"""
tensor network adding is associative
"""
tns = [TensorNetwork.new_random_cpu(2, 3, 4) for _ in range(5)]
stack = TensorNetwork()
# (((0A)B)C)D
for tn in tns:
stack.add(tn)
# A(B(CD))
for i in range(len(tns)):
l = tns[len(tns)-2-i]
r = tns[len(tns)-1-i]
l.add(r)

assert stack == tns[0]

def test_edges_consistent_ports():
tns = [TensorNetwork.new_random_cpu(2, 3, 4) for _ in range(5)]
tn = TensorNetwork()
# (((0A)B)C)D
for t in tns:
tn.add(t)

port_data = {}
for e in tn._edges:
for p in e:
port_data[p.tensor_ref] = port_data.get(p.tensor_ref, [])
port_data[p.tensor_ref].append(p.ix)
for i, t in enumerate(tn._tensors):
assert len(t.shape) == len(port_data[i])



if __name__=="__main__":
test_edges_consistent_ports()
test_composition()
173 changes: 173 additions & 0 deletions scratchpad/tn_api/tn.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,173 @@
import numpy as np
import math
from dataclasses import dataclass
from typing import TypeVar, Generic, Iterable

class Array(np.ndarray):
shape: tuple

D = TypeVar('D') # tensor data type (numpy, torch, etc.)

class ContractionInfo:
pass

class TensorNetworkIFC(Generic[D]):
def __init__(self, *args, **kwargs):
...

def optimize(self, out_indices: Iterable = []) -> ContractionInfo:
return ContractionInfo()

# slice not inplace
def slice(self, slice_dict: dict) -> 'TensorNetwork':
...

# contract to produce a new tensor
def contract(self, contraction_info: ContractionInfo) -> D:
...

#
def copy(self):
...

def add(self, other: "TensorNetworkIFC[D] | D"):
...


@classmethod
def new_random_cpu(cls, dims: Iterable[int])-> 'TensorNetworkIFC[D]':
...

def __eq__(a, b):
...


N = TypeVar('N', bound=np.ndarray)

@dataclass
class Port:
tensor_ref: int
ix: int

class TensorNetwork(TensorNetworkIFC[np.ndarray]):
tensors: Iterable[np.ndarray]
shape: tuple
edges: tuple

def __init__(self, *args, **kwargs):
self._tensors = []
self._edges = tuple()
self.shape = tuple()

# slice not inplace
def slice(self, slice_dict: dict) -> 'TensorNetwork':
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@danlkv thanks for the previous comment, i believed i've addressed that in this update

tn = self.copy()
sliced_tns = []
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@danlkv i think this implementation is better, wdyt?

Copy link
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, this is much better. However, we need a way to refer to tensor indices globally. For example, what would happen if using this code we sliced a tensor with 3 tensors of shape (2, 2) and slice dict {1: 0}? The answer is that all tensors will be sliced along the 2nd dimension, but we want a behavior that is global to all TN

Copy link
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In addition, you have to consider how the edges change as well.

Copy link
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@dallonasnes Here's a sketch of what can be a solution.
First, let me clear out something about the edges structure. Edges in the edge array represent delta-tensors, which make all indices that are connected to them the same. For example for expression T_{abc}\delta_{abd} B_{edg}C_{mp} the equivalent expression is T_{xxc } B_{exg} C_{mp}. Note that you can have a one-to-one correspondence between each delta and an index (in the example index x).

Let's assume that all indices in our TN are represented by a delta tensor and its corresponding tuple in the edges attribute (This may seem excessive, but we can find overcome the overhead later). Now, a slice dict may refer to the element in the edges attribute by using just index of the edge.

So the slice dict is {index in edge dict: index value}

The algorithm would be
1 For each index, value pair of slice dict

  1. Pop the edge from edges attr using the index
  2. Get all tensors that are indexed by this edge (in the example above that would be T and B). Use Port.tensor_ix for that.
  3. Using the information in ports in edges, construct the local (wrt each tensor) slicing dict for each tensor
  4. Slice and update the tensor.

Note that after slicing the edge disappears.

for tensor in tn._tensors:
slice_bounds = []
for idx in range(tensor.ndim):
try:
slice_bounds.append(slice_dict[idx])
except KeyError:
slice_bounds.append(slice(None))

sliced_tns.append(tensor[tuple(slice_bounds)])

tn._tensors = sliced_tns
return tn

def copy(self):
new = TensorNetwork()
new._tensors = self._tensors
new._edges = self._edges
new.shape = self.shape
return new

def add(self, other: "TensorNetwork | np.ndarray"):
if not isinstance(other, TensorNetwork):
self._tensors.append(other)
self.shape = self.shape + other.shape
else:
m = len(self._tensors)
n = len(self.shape)
# -- other's edges tensors will refer to shifted tensor location
enew = []
for e in other._edges:
e_ = []
for p in e:
if p.tensor_ref == -1:
e_.append(Port(tensor_ref=-1, ix=p.ix+n))
else:
e_.append(Port(tensor_ref=p.tensor_ref+m, ix=p.ix))
enew.append(tuple(e_))

self._edges += tuple(enew)
self._tensors += other._tensors
self.shape += other.shape

# contract to produce a new tensor
def contract(self, contraction_info: ContractionInfo) -> np.ndarray:
raise NotImplementedError()
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

contraction_info here is indices i'd want in the output
einsum

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

test case is contracted A = contraction of sliceA[0] and sliceA[1]. this is true for any index so can select it at random


def optimize(self, out_indices: Iterable = []) -> ContractionInfo:
raise NotImplementedError()


@classmethod
def new_random_cpu(cls, count, size, dim: int):
out = cls()
for i in range(count):
t: np.ndarray = np.random.random((dim, )*size)
out.add(t)
# arbitrary number of output indices
out_dims = np.random.randint(low=0, high=len(out.shape))
tensor_dims = len(out.shape)
out.shape = (dim, )*out_dims
# -- random connectivity
# A hypergraph can be generated as a partition into
# E parts where E is number of edges. The isolated vertices are equivalent
# to vertices with 1 edge that contains only them.
# arbitrary max number of edges, must be less than total indices
edges_cnt = np.random.randint(low=1, high=tensor_dims+out_dims)
# a partition can be implemented using a random function
partition_fn = lambda : np.random.randint(low=0, high=edges_cnt)
partition_dict = {}
for t_ref, t in enumerate(out._tensors):
for i in range(t.ndim):
eix = partition_fn()
new_port = Port(tensor_ref=t_ref, ix=i)
partition_dict[eix] = partition_dict.get(eix, [])
partition_dict[eix].append(new_port)

# add "self" tensor indices to partition
for i in range(len(out.shape)):
eix = partition_fn()
new_port = Port(tensor_ref=-1, ix=i)
partition_dict[eix] = partition_dict.get(eix, [])
partition_dict[eix].append(new_port)

edges = []
for i in range(edges_cnt):
p = partition_dict.get(i)
if p is not None:
edges.append(tuple(p))
out._edges = tuple(edges)
return out

def __eq__(self, other):
if self.shape != other.shape:
return False
if self._edges != other._edges:
return False
return all((a==b).all() for a, b in zip(self._tensors, other._tensors))

def __repr__(self):
return f"TensorNetwork({self.shape})<{self._tensors}, {self._edges}>"



if __name__ == "__main__":
tn = TensorNetwork.new_random_cpu(2, 3, 4)
slice_dict = {0: slice(0, 2), 1: slice(1, 3)}
sliced_tn = tn.slice(slice_dict)
import pdb; pdb.set_trace()
Loading