Skip to content

Commit

Permalink
tfield sparse_insert, caching inverse mapping
Browse files Browse the repository at this point in the history
  • Loading branch information
chrischoy committed Jan 10, 2021
1 parent 606a66b commit a0f7e62
Show file tree
Hide file tree
Showing 14 changed files with 506 additions and 160 deletions.
14 changes: 11 additions & 3 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,13 +1,21 @@
# Change Log

## [0.5.0]

### Added
## [0.5.1]

- v0.5 documentation updates
- Nonlinear functionals and modules
- Warning when using cuda without ME cuda support
- diagnostics test
- TensorField slice
- Cache the unique map and inverse map pair in the coordinate manager
- CoordinateManager
- `field_to_sparse_insert_and_map`
- `exists_field_to_sparse`
- `get_field_to_sparse_map`
- CoordiateFieldMap
- `quantize_coordinates`

## [0.5.0] - 2020-12-24

## [0.5.0a] - 2020-08-05

Expand Down
50 changes: 49 additions & 1 deletion MinkowskiEngine/MinkowskiCoordinateManager.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,6 +162,9 @@ def insert_and_map(
coordinate_map_type == `CoordinateMapType.GPU`) that defines the
coordinates.
:attr:`tensor_stride` (`list`): a list of `D` elements that defines the
tensor stride for the new order-`D + 1` sparse tensor.
Example::
>>> manager = CoordinateManager(D=1)
Expand All @@ -178,14 +181,18 @@ def insert_and_map(
def insert_field(
self,
coordinates: torch.Tensor,
tensor_stride: Union[int, Sequence, np.ndarray],
tensor_stride: Sequence,
string_id: str = "",
) -> Tuple[CoordinateMapKey, Tuple[torch.IntTensor, torch.IntTensor]]:
r"""create a new coordinate map and returns
:attr:`coordinates`: `torch.FloatTensor` (`CUDA` if coordinate_map_type
== `CoordinateMapType.GPU`) that defines the coordinates.
:attr:`tensor_stride` (`list`): a list of `D` elements that defines the
tensor stride for the new order-`D + 1` sparse tensor.
Example::
>>> manager = CoordinateManager(D=1)
Expand All @@ -198,6 +205,44 @@ def insert_field(
"""
return self._manager.insert_field(coordinates, tensor_stride, string_id)

def field_to_sparse_insert_and_map(
self,
field_map_key: CoordinateMapKey,
sparse_tensor_stride: Union[int, Sequence, np.ndarray],
sparse_tensor_string_id: str = "",
) -> Tuple[CoordinateMapKey, Tuple[torch.IntTensor, torch.IntTensor]]:

r"""Create a sparse tensor coordinate map with the tensor stride.
:attr:`field_map_key` (`CoordinateMapKey`): field map that a new sparse
tensor will be created from.
:attr:`tensor_stride` (`list`): a list of `D` elements that defines the
tensor stride for the new order-`D + 1` sparse tensor.
:attr:`string_id` (`str`): string id of the new sparse tensor coordinate map key.
Example::
>>> manager = CoordinateManager(D=1)
>>> coordinates = torch.FloatTensor([[0, 0.1], [0, 2.3], [0, 1.2], [0, 2.4]])
>>> key, (unique_map, inverse_map) = manager.insert(coordinates, [1])
"""
return self._manager.field_to_sparse_insert_and_map(
field_map_key, sparse_tensor_stride, sparse_tensor_string_id
)

def exists_field_to_sparse(
self, field_map_key: CoordinateMapKey, sparse_map_key: CoordinateMapKey
):
return self._manager.exists_field_to_sparse(field_map_key, sparse_map_key)

def get_field_to_sparse_map(
self, field_map_key: CoordinateMapKey, sparse_map_key: CoordinateMapKey
):
return self._manager.get_field_to_sparse_map(field_map_key, sparse_map_key)

def stride(
self,
coordinate_map_key: CoordinateMapKey,
Expand Down Expand Up @@ -284,6 +329,9 @@ def get_unique_coordinate_map_key(
"""
Returns a unique coordinate_map_key for a given tensor stride.
:attr:`tensor_stride` (`list`): a list of `D` elements that defines the
tensor stride for the new order-`D + 1` sparse tensor.
"""
return self._manager.get_random_string_id(tensor_stride, "")

Expand Down
8 changes: 5 additions & 3 deletions MinkowskiEngine/MinkowskiSparseTensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -564,7 +564,7 @@ def slice(self, X):

if isinstance(X, TensorField):
return TensorField(
self.F[X.inverse_mapping],
self.F[X.inverse_mapping(self.coordinate_map_key)],
coordinate_field_map_key=X.coordinate_field_map_key,
coordinate_manager=X.coordinate_manager,
quantization_mode=X.quantization_mode,
Expand Down Expand Up @@ -616,7 +616,9 @@ def cat_slice(self, X):

from MinkowskiTensorField import TensorField

features = torch.cat((self.F[X.inverse_mapping], X.F), dim=1)
features = torch.cat(
(self.F[X.inverse_mapping(self.coordinate_map_key)], X.F), dim=1
)
if isinstance(X, TensorField):
return TensorField(
features,
Expand All @@ -630,7 +632,7 @@ def cat_slice(self, X):
), "Slice can only be applied on the same coordinates (coordinate_map_key)"
return TensorField(
features,
coordinates=self.C[X.inverse_mapping],
coordinates=self.C[X.inverse_mapping(self.coordinate_map_key)],
coordinate_manager=self.coordinate_manager,
quantization_mode=self.quantization_mode,
)
Expand Down
88 changes: 73 additions & 15 deletions MinkowskiEngine/MinkowskiTensorField.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,11 @@
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import os
import torch
import numpy as np
from collections import Sequence
from typing import Union, List, Tuple

import torch
from MinkowskiCommon import convert_to_int_list, StrideType
from MinkowskiEngineBackend._C import (
GPUMemoryAllocatorType,
Expand All @@ -41,6 +44,7 @@
set_global_coordinate_manager,
)
from MinkowskiSparseTensor import SparseTensor
from sparse_matrix_functions import MinkowskiSPMMFunction


class TensorField(Tensor):
Expand Down Expand Up @@ -212,6 +216,7 @@ def __init__(
self._C = coordinates
self.coordinate_field_map_key = coordinate_field_map_key
self._batch_rows = None
self._inverse_mapping = {}

@property
def C(self):
Expand Down Expand Up @@ -243,29 +248,82 @@ def _batchwise_row_indices(self):
def _get_coordinate_field(self):
return self._manager.get_coordinate_field(self.coordinate_field_map_key)

def sparse(self, quantization_mode=None):
def sparse(
self, tensor_stride: Union[int, Sequence, np.array] = 1, quantization_mode=None
):
r"""Converts the current sparse tensor field to a sparse tensor."""
if quantization_mode is None:
quantization_mode = self.quantization_mode

tensor_stride = convert_to_int_list(tensor_stride, self.D)

sparse_tensor_key, (
unique_index,
inverse_mapping,
) = self._manager.field_to_sparse_insert_and_map(
self.coordinate_field_map_key,
tensor_stride,
)

self._inverse_mapping[sparse_tensor_key] = inverse_mapping

if self.quantization_mode in [
SparseTensorQuantizationMode.UNWEIGHTED_SUM,
SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE,
]:
spmm = MinkowskiSPMMFunction()
N = len(self._F)
cols = torch.arange(
N,
dtype=inverse_mapping.dtype,
device=inverse_mapping.device,
)
vals = torch.ones(N, dtype=self._F.dtype, device=self._F.device)
size = torch.Size([len(unique_index), len(inverse_mapping)])
features = spmm.apply(inverse_mapping, cols, vals, size, self._F)
if (
self.quantization_mode
== SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE
):
nums = spmm.apply(
inverse_mapping,
cols,
vals,
size,
vals.reshape(N, 1),
)
features /= nums
elif self.quantization_mode == SparseTensorQuantizationMode.RANDOM_SUBSAMPLE:
features = self._F[unique_index]
else:
# No quantization
raise ValueError("Invalid quantization mode")

sparse_tensor = SparseTensor(
self._F,
coordinates=self.coordinates,
quantization_mode=quantization_mode,
coordinate_manager=self.coordinate_manager,
features,
coordinate_map_key=sparse_tensor_key,
coordinate_manager=self._manager,
)

# Save the inverse mapping
self._inverse_mapping = sparse_tensor.inverse_mapping
return sparse_tensor

@property
def inverse_mapping(self):
if not hasattr(self, "_inverse_mapping"):
raise ValueError(
"Did you run SparseTensor.slice? The slice must take a tensor field that returned TensorField.space."
)
return self._inverse_mapping
def inverse_mapping(self, sparse_tensor_map_key: CoordinateMapKey):
if sparse_tensor_map_key not in self._inverse_mapping:
if not self._manager.exists_field_to_sparse(
self.coordinate_field_map_key, sparse_tensor_map_key
):
raise ValueError(
f"The field to sparse tensor mapping does not exists for the key: {sparse_tensor_map_key}. Please run TensorField.sparse({sparse_tensor_map_key.get_tensor_stride()})"
)
else:
# Extract the mapping
(
_,
self._inverse_mapping[sparse_tensor_map_key],
) = self._manager.get_field_to_sparse_map(
self.coordinate_field_map_key, sparse_tensor_map_key
)
return self._inverse_mapping[sparse_tensor_map_key]

def __repr__(self):
return (
Expand Down
4 changes: 2 additions & 2 deletions examples/resnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ def _make_layer(self, block, planes, blocks, stride=1, dilation=1, bn_momentum=0

return nn.Sequential(*layers)

def forward(self, x):
def forward(self, x: ME.SparseTensor):
x = self.conv1(x)
x = self.layer1(x)
x = self.layer2(x)
Expand Down Expand Up @@ -185,7 +185,7 @@ def network_initialization(self, in_channels, out_channels, D):

ResNetBase.network_initialization(self, field_ch2, out_channels, D)

def forward(self, x):
def forward(self, x: ME.TensorField):
otensor = self.field_network(x)
otensor2 = self.field_network2(otensor.cat_slice(x))
return ResNetBase.forward(self, otensor2)
Expand Down
7 changes: 7 additions & 0 deletions pybind/extern.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -755,6 +755,13 @@ void instantiate_manager(py::module &m, const std::string &dtypestr) {
&manager_type::to_string, py::const_))
.def("insert_and_map", &manager_type::insert_and_map)
.def("insert_field", &manager_type::insert_field)
.def("field_to_sparse_insert_and_map",
&manager_type::field_to_sparse_insert_and_map)
.def("exists_field_to_sparse",
py::overload_cast<minkowski::CoordinateMapKey const *,
minkowski::CoordinateMapKey const *>(
&manager_type::exists_field_to_sparse, py::const_))
.def("get_field_to_sparse_map", &manager_type::get_field_to_sparse_map)
.def("stride", &manager_type::py_stride)
.def("origin", &manager_type::py_origin)
.def("get_coordinates", &manager_type::get_coordinates)
Expand Down
66 changes: 59 additions & 7 deletions src/coordinate_map_cpu.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -188,14 +188,16 @@ std::vector<at::Tensor> interpolation_map_weight_kernel(

} // namespace detail

template <typename coordinate_type,
template <typename coordinate_field_type, typename coordinate_int_type,
template <typename T> class TemplatedAllocator = std::allocator>
class CoordinateFieldMapCPU
: public CoordinateMap<coordinate_type, TemplatedAllocator> {
: public CoordinateMap<coordinate_field_type, TemplatedAllocator> {
// Coordinate wrapper
public:
using base_type = CoordinateMap<coordinate_type, TemplatedAllocator>;
using self_type = CoordinateFieldMapCPU<coordinate_type, TemplatedAllocator>;
using base_type = CoordinateMap<coordinate_field_type, TemplatedAllocator>;
using self_type =
CoordinateFieldMapCPU<coordinate_field_type, coordinate_int_type,
TemplatedAllocator>;
using size_type = typename base_type::size_type;
using index_type = typename base_type::index_type;
using stride_type = typename base_type::stride_type;
Expand All @@ -218,20 +220,70 @@ class CoordinateFieldMapCPU
*
* @return none
*/
void insert(coordinate_type const *coordinate_begin,
coordinate_type const *coordinate_end) {
void insert(coordinate_field_type const *coordinate_begin,
coordinate_field_type const *coordinate_end) {
size_type N = (coordinate_end - coordinate_begin) / m_coordinate_size;
base_type::allocate(N);
// copy data directly to the ptr
std::copy_n(coordinate_begin, N * m_coordinate_size,
base_type::coordinate_data());
}

void copy_coordinates(coordinate_type *dst_coordinate) const {
using base_type::const_coordinate_data;
using base_type::coordinate_data;

void copy_coordinates(coordinate_field_type *dst_coordinate) const {
std::copy_n(base_type::const_coordinate_data(), size() * m_coordinate_size,
dst_coordinate);
}

void quantize_coordinates(coordinate_int_type *p_dst_coordinates,
stride_type const &tensor_stride) const {
coordinate_field_type const *const p_tfield = const_coordinate_data();
int64_t const stride_prod = std::accumulate(
tensor_stride.begin(), tensor_stride.end(), 1, std::multiplies<>());
ASSERT(stride_prod > 0, "Invalid stride");

const size_t N = omp_get_max_threads();
const size_t stride = (size() + N - 1) / N;
LOG_DEBUG("kernel map with", N, "chunks and", stride, "stride.");

if (stride_prod == 1) {
#pragma omp parallel for
for (uint32_t n = 0; n < N; n++) {
for (auto i = stride * n;
i < std::min((n + 1) * stride, uint64_t(size())); ++i) {

// batch index
coordinate_int_type *p_curr_dst =
&p_dst_coordinates[i * m_coordinate_size];
p_curr_dst[0] = std::lroundf(p_tfield[i * m_coordinate_size]);
for (uint32_t j = 1; j < m_coordinate_size; ++j) {
p_curr_dst[j] = std::floor(p_tfield[m_coordinate_size * i + j]);
}
}
}
} else {
#pragma omp parallel for
for (uint32_t n = 0; n < N; n++) {
for (auto i = stride * n;
i < std::min((n + 1) * stride, uint64_t(size())); ++i) {

// batch index
coordinate_int_type *p_curr_dst =
&p_dst_coordinates[i * m_coordinate_size];
p_curr_dst[0] = std::lroundf(p_tfield[i * m_coordinate_size]);
for (uint32_t j = 1; j < m_coordinate_size; ++j) {
auto const curr_tensor_stride = tensor_stride[j - 1];
p_curr_dst[j] = curr_tensor_stride *
std::floor(p_tfield[m_coordinate_size * i + j] /
curr_tensor_stride);
}
}
}
}
}

inline size_type size() const noexcept { return m_size; }
std::string to_string() const {
Formatter o;
Expand Down
Loading

0 comments on commit a0f7e62

Please sign in to comment.