Skip to content

Commit

Permalink
Tensor generator helpers (#93)
Browse files Browse the repository at this point in the history
* TENONES: Add initial tenones support

* TENZEROS: Add initial tenzeros support

* TENDIAG: Add initial tendiag support

* SPTENDIAG: Add initial sptendiag support
  • Loading branch information
ntjohnson1 authored Apr 20, 2023
1 parent ab3b410 commit 6d26158
Show file tree
Hide file tree
Showing 6 changed files with 198 additions and 4 deletions.
4 changes: 2 additions & 2 deletions pyttb/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,13 +15,13 @@
from pyttb.ktensor import ktensor
from pyttb.pyttb_utils import *
from pyttb.sptenmat import sptenmat
from pyttb.sptensor import sptensor
from pyttb.sptensor import sptendiag, sptensor
from pyttb.sptensor3 import sptensor3
from pyttb.sumtensor import sumtensor
from pyttb.symktensor import symktensor
from pyttb.symtensor import symtensor
from pyttb.tenmat import tenmat
from pyttb.tensor import tensor
from pyttb.tensor import tendiag, tenones, tensor, tenzeros
from pyttb.ttensor import ttensor
from pyttb.tucker_als import tucker_als

Expand Down
2 changes: 1 addition & 1 deletion pyttb/pyttb_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -700,7 +700,7 @@ def tt_subscheck(subs, nargout=True):
len(subs.shape) == 2
and (np.isfinite(subs)).all()
and issubclass(subs.dtype.type, np.integer)
and (subs > 0).all()
and (subs >= 0).all()
):
ok = True
else:
Expand Down
38 changes: 37 additions & 1 deletion pyttb/sptensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -297,7 +297,7 @@ def from_aggregator(

# Check for subscripts out of range
for j, dim in enumerate(shape):
if subs.size > 0 and np.max(subs[:, j]) > dim:
if subs.size > 0 and np.max(subs[:, j]) >= dim:
assert False, "Subscript exceeds sptensor shape"

if subs.size == 0:
Expand Down Expand Up @@ -2582,3 +2582,39 @@ def ttm(
# TODO evaluate performance loss by casting into sptensor then tensor.
# I assume minimal since we are already using spare matrix representation
return ttb.tensor.from_tensor_type(Ynt)


def sptendiag(
elements: np.ndarray, shape: Optional[Tuple[int, ...]] = None
) -> sptensor:
"""
Creates a sparse tensor with elements along super diagonal
If provided shape is too small the tensor will be enlarged to accomodate
Parameters
----------
elements: Elements to set along the diagonal
shape: Shape of resulting tensor
Returns
-------
Constructed tensor
Example
-------
>>> shape = (2,)
>>> values = np.ones(shape)
>>> X = ttb.sptendiag(values)
>>> Y = ttb.sptendiag(values, (2, 2))
>>> X.isequal(Y)
True
"""
# Flatten provided elements
elements = np.ravel(elements)
N = len(elements)
if shape is None:
constructed_shape = (N,) * N
else:
constructed_shape = tuple(max(N, dim) for dim in shape)
subs = np.tile(np.arange(0, N).transpose(), (len(constructed_shape), 1)).transpose()
return sptensor.from_aggregator(subs, elements.reshape((N, 1)), constructed_shape)
74 changes: 74 additions & 0 deletions pyttb/tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -1835,6 +1835,80 @@ def __repr__(self):
__str__ = __repr__


def tenones(shape: Tuple[int, ...]) -> tensor:
"""
Creates a tensor of all ones
Parameters
----------
shape: Shape of resulting tensor
Returns
-------
Constructed tensor
Example
-------
>>> X = ttb.tenones((2,2))
"""
return tensor.from_function(np.ones, shape)


def tenzeros(shape: Tuple[int, ...]) -> tensor:
"""
Creates a tensor of all zeros
Parameters
----------
shape: Shape of resulting tensor
Returns
-------
Constructed tensor
Example
-------
>>> X = ttb.tenzeros((2,2))
"""
return tensor.from_function(np.zeros, shape)


def tendiag(elements: np.ndarray, shape: Optional[Tuple[int, ...]] = None) -> tensor:
"""
Creates a tensor with elements along super diagonal
If provided shape is too small the tensor will be enlarged to accomodate
Parameters
----------
elements: Elements to set along the diagonal
shape: Shape of resulting tensor
Returns
-------
Constructed tensor
Example
-------
>>> shape = (2,)
>>> values = np.ones(shape)
>>> X = ttb.tendiag(values)
>>> Y = ttb.tendiag(values, (2, 2))
>>> X.isequal(Y)
True
"""
# Flatten provided elements
elements = np.ravel(elements)
N = len(elements)
if shape is None:
constructed_shape = (N,) * N
else:
constructed_shape = tuple(max(N, dim) for dim in shape)
X = tenzeros(constructed_shape)
subs = np.tile(np.arange(0, N).transpose(), (len(constructed_shape), 1))
X[subs] = elements
return X


if __name__ == "__main__":
import doctest # pragma: no cover

Expand Down
36 changes: 36 additions & 0 deletions tests/test_sptensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -1758,3 +1758,39 @@ def test_sptensor_from_sparse_matrix():
Xnt = tt_to_sparse_matrix(sptensorCopy, mode, False)
Ynt = tt_from_sparse_matrix(Xnt, sptensorCopy.shape, mode, 1)
assert sptensorCopy.isequal(Ynt)


def test_sptendiag():
N = 4
elements = np.arange(0, N)
exact_shape = [N] * N

# Inferred shape
X = ttb.sptendiag(elements)
for i in range(N):
diag_index = (i,) * N
assert (
X[diag_index] == i
), f"Idx: {diag_index} expected: {i} got: {X[diag_index]}"

# Exact shape
X = ttb.sptendiag(elements, tuple(exact_shape))
for i in range(N):
diag_index = (i,) * N
assert X[diag_index] == i

# Larger shape
larger_shape = exact_shape.copy()
larger_shape[0] += 1
X = ttb.sptendiag(elements, tuple(larger_shape))
for i in range(N):
diag_index = (i,) * N
assert X[diag_index] == i

# Smaller Shape
smaller_shape = exact_shape.copy()
smaller_shape[0] -= 1
X = ttb.sptendiag(elements, tuple(smaller_shape))
for i in range(N):
diag_index = (i,) * N
assert X[diag_index] == i
48 changes: 48 additions & 0 deletions tests/test_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -1632,3 +1632,51 @@ def test_tensor_nvecs(sample_tensor_2way):
"Greater than or equal to tensor.shape[n] - 1 eigenvectors requires cast to dense to solve"
in str(record[0].message)
)


def test_tenones():
arbitrary_shape = (3, 3, 3)
ones_tensor = ttb.tenones(arbitrary_shape)
data_tensor = ttb.tensor.from_data(np.ones(arbitrary_shape))
assert np.equal(ones_tensor, data_tensor), "Tenones should match all ones tensor"


def test_tenzeros():
arbitrary_shape = (3, 3, 3)
zeros_tensor = ttb.tenzeros(arbitrary_shape)
data_tensor = ttb.tensor.from_data(np.zeros(arbitrary_shape))
assert np.equal(zeros_tensor, data_tensor), "Tenzeros should match all zeros tensor"


def test_tendiag():
N = 4
elements = np.arange(0, N)
exact_shape = [N] * N

# Inferred shape
X = ttb.tendiag(elements)
for i in range(N):
diag_index = (i,) * N
assert X[diag_index] == i

# Exact shape
X = ttb.tendiag(elements, tuple(exact_shape))
for i in range(N):
diag_index = (i,) * N
assert X[diag_index] == i

# Larger shape
larger_shape = exact_shape.copy()
larger_shape[0] += 1
X = ttb.tendiag(elements, tuple(larger_shape))
for i in range(N):
diag_index = (i,) * N
assert X[diag_index] == i

# Smaller Shape
smaller_shape = exact_shape.copy()
smaller_shape[0] -= 1
X = ttb.tendiag(elements, tuple(smaller_shape))
for i in range(N):
diag_index = (i,) * N
assert X[diag_index] == i

0 comments on commit 6d26158

Please sign in to comment.