-
Notifications
You must be signed in to change notification settings - Fork 13
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Sketch out conj method #46
base: dev
Are you sure you want to change the base?
Changes from 4 commits
eeba989
69d0341
0d74462
b0645a2
d7e24c2
1aafd05
3533289
ea2a43c
0c91a8f
dbeee3d
1d7271b
0026283
b48ea18
efbab98
1631afb
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,18 @@ | ||
FROM nvidia/cuda:11.0.3-runtime-ubuntu20.04 | ||
|
||
ENV DEBIAN_FRONTEND=noninteractive | ||
|
||
RUN apt update -y && \ | ||
apt install -y python3 python3-pip git htop vim | ||
|
||
# Make sure you first recursively clone down the git repo before building | ||
WORKDIR /app | ||
RUN pip install quimb pyrofiler cartesian-explorer opt_einsum | ||
RUN pip install --no-binary pynauty pynauty | ||
# Run the below commands after the container opens - because volume hasn't mounted yet | ||
# RUN cd qtree && pip install . | ||
# RUN pip install . | ||
RUN pip install pdbpp | ||
RUN pip install tensornetwork | ||
|
||
ENTRYPOINT ["bash"] |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,4 @@ | ||
#!/bin/bash | ||
|
||
docker build -f Dockerfile.dev -t dev . | ||
docker run -v $(pwd):/app -it dev |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,51 @@ | ||
import random | ||
import numpy as np | ||
from tn import TensorNetwork | ||
from functools import reduce | ||
|
||
def test_add_numpy_array(): | ||
a = TensorNetwork() | ||
t = np.random.randn(2, 2) | ||
a.add(t) | ||
b = TensorNetwork() | ||
b.add(a) | ||
assert b == a | ||
|
||
|
||
def test_composition(): | ||
""" | ||
tensor network adding is associative | ||
""" | ||
tns = [TensorNetwork.new_random_cpu(2, 3, 4) for _ in range(5)] | ||
stack = TensorNetwork() | ||
# (((0A)B)C)D | ||
for tn in tns: | ||
stack.add(tn) | ||
# A(B(CD)) | ||
for i in range(len(tns)): | ||
l = tns[len(tns)-2-i] | ||
r = tns[len(tns)-1-i] | ||
l.add(r) | ||
|
||
assert stack == tns[0] | ||
|
||
def test_edges_consistent_ports(): | ||
tns = [TensorNetwork.new_random_cpu(2, 3, 4) for _ in range(5)] | ||
tn = TensorNetwork() | ||
# (((0A)B)C)D | ||
for t in tns: | ||
tn.add(t) | ||
|
||
port_data = {} | ||
for e in tn._edges: | ||
for p in e: | ||
port_data[p.tensor_ref] = port_data.get(p.tensor_ref, []) | ||
port_data[p.tensor_ref].append(p.ix) | ||
for i, t in enumerate(tn._tensors): | ||
assert len(t.shape) == len(port_data[i]) | ||
|
||
|
||
|
||
if __name__=="__main__": | ||
test_edges_consistent_ports() | ||
test_composition() |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,173 @@ | ||
import numpy as np | ||
import math | ||
from dataclasses import dataclass | ||
from typing import TypeVar, Generic, Iterable | ||
|
||
class Array(np.ndarray): | ||
shape: tuple | ||
|
||
D = TypeVar('D') # tensor data type (numpy, torch, etc.) | ||
|
||
class ContractionInfo: | ||
pass | ||
|
||
class TensorNetworkIFC(Generic[D]): | ||
def __init__(self, *args, **kwargs): | ||
... | ||
|
||
def optimize(self, out_indices: Iterable = []) -> ContractionInfo: | ||
return ContractionInfo() | ||
|
||
# slice not inplace | ||
def slice(self, slice_dict: dict) -> 'TensorNetwork': | ||
... | ||
|
||
# contract to produce a new tensor | ||
def contract(self, contraction_info: ContractionInfo) -> D: | ||
... | ||
|
||
# | ||
def copy(self): | ||
... | ||
|
||
def add(self, other: "TensorNetworkIFC[D] | D"): | ||
... | ||
|
||
|
||
@classmethod | ||
def new_random_cpu(cls, dims: Iterable[int])-> 'TensorNetworkIFC[D]': | ||
... | ||
|
||
def __eq__(a, b): | ||
... | ||
|
||
|
||
N = TypeVar('N', bound=np.ndarray) | ||
|
||
@dataclass | ||
class Port: | ||
tensor_ref: int | ||
ix: int | ||
|
||
class TensorNetwork(TensorNetworkIFC[np.ndarray]): | ||
tensors: Iterable[np.ndarray] | ||
shape: tuple | ||
edges: tuple | ||
|
||
def __init__(self, *args, **kwargs): | ||
self._tensors = [] | ||
self._edges = tuple() | ||
self.shape = tuple() | ||
|
||
# slice not inplace | ||
def slice(self, slice_dict: dict) -> 'TensorNetwork': | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @danlkv thanks for the previous comment, i believed i've addressed that in this update |
||
tn = self.copy() | ||
sliced_tns = [] | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @danlkv i think this implementation is better, wdyt? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yes, this is much better. However, we need a way to refer to tensor indices globally. For example, what would happen if using this code we sliced a tensor with 3 tensors of shape (2, 2) and slice dict {1: 0}? The answer is that all tensors will be sliced along the 2nd dimension, but we want a behavior that is global to all TN There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. In addition, you have to consider how the edges change as well. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @dallonasnes Here's a sketch of what can be a solution. Let's assume that all indices in our TN are represented by a delta tensor and its corresponding tuple in the edges attribute (This may seem excessive, but we can find overcome the overhead later). Now, a slice dict may refer to the element in the edges attribute by using just index of the edge. So the slice dict is {index in edge dict: index value} The algorithm would be
Note that after slicing the edge disappears. |
||
for tensor in tn._tensors: | ||
slice_bounds = [] | ||
for idx in range(tensor.ndim): | ||
try: | ||
slice_bounds.append(slice_dict[idx]) | ||
except KeyError: | ||
slice_bounds.append(slice(None)) | ||
|
||
sliced_tns.append(tensor[tuple(slice_bounds)]) | ||
|
||
tn._tensors = sliced_tns | ||
return tn | ||
|
||
def copy(self): | ||
new = TensorNetwork() | ||
new._tensors = self._tensors | ||
new._edges = self._edges | ||
new.shape = self.shape | ||
return new | ||
|
||
def add(self, other: "TensorNetwork | np.ndarray"): | ||
if not isinstance(other, TensorNetwork): | ||
self._tensors.append(other) | ||
self.shape = self.shape + other.shape | ||
else: | ||
m = len(self._tensors) | ||
n = len(self.shape) | ||
# -- other's edges tensors will refer to shifted tensor location | ||
enew = [] | ||
for e in other._edges: | ||
e_ = [] | ||
for p in e: | ||
if p.tensor_ref == -1: | ||
e_.append(Port(tensor_ref=-1, ix=p.ix+n)) | ||
else: | ||
e_.append(Port(tensor_ref=p.tensor_ref+m, ix=p.ix)) | ||
enew.append(tuple(e_)) | ||
|
||
self._edges += tuple(enew) | ||
self._tensors += other._tensors | ||
self.shape += other.shape | ||
|
||
# contract to produce a new tensor | ||
def contract(self, contraction_info: ContractionInfo) -> np.ndarray: | ||
raise NotImplementedError() | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. contraction_info here is indices i'd want in the output There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. test case is contracted A = contraction of sliceA[0] and sliceA[1]. this is true for any index so can select it at random |
||
|
||
def optimize(self, out_indices: Iterable = []) -> ContractionInfo: | ||
raise NotImplementedError() | ||
|
||
|
||
@classmethod | ||
def new_random_cpu(cls, count, size, dim: int): | ||
out = cls() | ||
for i in range(count): | ||
t: np.ndarray = np.random.random((dim, )*size) | ||
out.add(t) | ||
# arbitrary number of output indices | ||
out_dims = np.random.randint(low=0, high=len(out.shape)) | ||
tensor_dims = len(out.shape) | ||
out.shape = (dim, )*out_dims | ||
# -- random connectivity | ||
# A hypergraph can be generated as a partition into | ||
# E parts where E is number of edges. The isolated vertices are equivalent | ||
# to vertices with 1 edge that contains only them. | ||
# arbitrary max number of edges, must be less than total indices | ||
edges_cnt = np.random.randint(low=1, high=tensor_dims+out_dims) | ||
# a partition can be implemented using a random function | ||
partition_fn = lambda : np.random.randint(low=0, high=edges_cnt) | ||
partition_dict = {} | ||
for t_ref, t in enumerate(out._tensors): | ||
for i in range(t.ndim): | ||
eix = partition_fn() | ||
new_port = Port(tensor_ref=t_ref, ix=i) | ||
partition_dict[eix] = partition_dict.get(eix, []) | ||
partition_dict[eix].append(new_port) | ||
|
||
# add "self" tensor indices to partition | ||
for i in range(len(out.shape)): | ||
eix = partition_fn() | ||
new_port = Port(tensor_ref=-1, ix=i) | ||
partition_dict[eix] = partition_dict.get(eix, []) | ||
partition_dict[eix].append(new_port) | ||
|
||
edges = [] | ||
for i in range(edges_cnt): | ||
p = partition_dict.get(i) | ||
if p is not None: | ||
edges.append(tuple(p)) | ||
out._edges = tuple(edges) | ||
return out | ||
|
||
def __eq__(self, other): | ||
if self.shape != other.shape: | ||
return False | ||
if self._edges != other._edges: | ||
return False | ||
return all((a==b).all() for a, b in zip(self._tensors, other._tensors)) | ||
|
||
def __repr__(self): | ||
return f"TensorNetwork({self.shape})<{self._tensors}, {self._edges}>" | ||
|
||
|
||
|
||
if __name__ == "__main__": | ||
tn = TensorNetwork.new_random_cpu(2, 3, 4) | ||
slice_dict = {0: slice(0, 2), 1: slice(1, 3)} | ||
sliced_tn = tn.slice(slice_dict) | ||
import pdb; pdb.set_trace() |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
@danlkv this is my TODO for next week