Skip to content

Commit

Permalink
empty sparse tensor (#102)
Browse files Browse the repository at this point in the history
  • Loading branch information
chrischoy committed Apr 2, 2020
1 parent b809252 commit 573b16c
Show file tree
Hide file tree
Showing 5 changed files with 72 additions and 33 deletions.
3 changes: 2 additions & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,8 @@
- Update `get_union_map` doc (Issue #108)
- Abstract getattr minkowski backend functions
- Add `coordinates_and_features_at(batch_index)` function in the SparseTensor class.
- Add `MinkowskiChannelwiseConvolution`
- Add `MinkowskiChannelwiseConvolution` (Issue #92)
- Update `MinkowskiPruning` to generate an empty sparse tensor as output (Issue #102)


## [0.4.2] - 2020-03-13
Expand Down
68 changes: 39 additions & 29 deletions src/pruning.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -38,21 +38,23 @@ void PruningForwardCPU(at::Tensor in_feat, // CPU feat
py::object py_out_coords_key,
py::object py_coords_manager) {
CoordsManager *p_coords_manager = py_coords_manager.cast<CoordsManager *>();
const auto &in_out = p_coords_manager->getPruningInOutMaps(
use_feat, py_in_coords_key, py_out_coords_key);

// Get the total number of coords
at::Tensor sum = use_feat.sum();
const int64_t tot_n = sum.item<int64_t>();
ASSERT(tot_n > 0, "Invalid total number of features", to_string(tot_n));

out_feat.resize_({tot_n, in_feat.size(1)});
out_feat.zero_();

const auto &in_out = p_coords_manager->getPruningInOutMaps(
use_feat, py_in_coords_key, py_out_coords_key);

PruningForwardKernelCPU<Dtype, int>(in_feat.data<Dtype>(),
out_feat.data<Dtype>(), in_feat.size(1),
get<0>(in_out), get<1>(in_out));
if (tot_n == 0) {
WARNING(true, "MinkowskiPruning: Generating an empty SparseTensor");
out_feat.resize_({0, in_feat.size(1)});
} else {
out_feat.resize_({tot_n, in_feat.size(1)});
out_feat.zero_();

PruningForwardKernelCPU<Dtype, int>(in_feat.data<Dtype>(),
out_feat.data<Dtype>(), in_feat.size(1),
get<0>(in_out), get<1>(in_out));
}
}

template <typename Dtype>
Expand All @@ -76,9 +78,12 @@ void PruningBackwardCPU(at::Tensor grad_in_feat, // CPU feat
grad_in_feat.resize_({in_nrows, nchannel});
grad_in_feat.zero_();

PruningBackwardKernelCPU<Dtype, int>(
grad_in_feat.data<Dtype>(), grad_out_feat.data<Dtype>(), nchannel,
p_coords_manager->in_maps[map_key], p_coords_manager->out_maps[map_key]);
if (grad_out_feat.size(0) > 0)
PruningBackwardKernelCPU<Dtype, int>(
grad_in_feat.data<Dtype>(), grad_out_feat.data<Dtype>(), nchannel,
p_coords_manager->in_maps[map_key], p_coords_manager->out_maps[map_key]);
else
WARNING(true, "MinkowskiPruning: Backprop from a size-0 sparse tensor.");
}

#ifndef CPU_ONLY
Expand All @@ -90,21 +95,23 @@ void PruningForwardGPU(at::Tensor in_feat, // GPU feat
py::object py_out_coords_key,
py::object py_coords_manager) {
CoordsManager *p_coords_manager = py_coords_manager.cast<CoordsManager *>();
const auto &in_out = p_coords_manager->getPruningInOutMapsGPU(
use_feat, py_in_coords_key, py_out_coords_key);

// Get the total number of coords
at::Tensor sum = use_feat.sum();
const int64_t tot_n = sum.item<int64_t>();
ASSERT(tot_n > 0, "Invalid total number of features", to_string(tot_n));

out_feat.resize_({tot_n, in_feat.size(1)});
out_feat.zero_();

const auto &in_out = p_coords_manager->getPruningInOutMapsGPU(
use_feat, py_in_coords_key, py_out_coords_key);

PruningForwardKernelGPU<Dtype, int>(
in_feat.data<Dtype>(), out_feat.data<Dtype>(), in_feat.size(1),
get<0>(in_out), get<1>(in_out), at::cuda::getCurrentCUDAStream());
if (tot_n == 0) {
WARNING(true, "MinkowskiPruning: Generating an empty SparseTensor");
out_feat.resize_({0, in_feat.size(1)});
} else {
out_feat.resize_({tot_n, in_feat.size(1)});
out_feat.zero_();

PruningForwardKernelGPU<Dtype, int>(
in_feat.data<Dtype>(), out_feat.data<Dtype>(), in_feat.size(1),
get<0>(in_out), get<1>(in_out), at::cuda::getCurrentCUDAStream());
}
}

template <typename Dtype>
Expand All @@ -127,10 +134,13 @@ void PruningBackwardGPU(at::Tensor grad_in_feat, // GPU feat
grad_in_feat.resize_({in_nrows, nchannel});
grad_in_feat.zero_();

PruningBackwardKernelGPU<Dtype, int>(
grad_in_feat.data<Dtype>(), grad_out_feat.data<Dtype>(), nchannel,
p_coords_manager->d_in_maps[map_key],
p_coords_manager->d_out_maps[map_key], at::cuda::getCurrentCUDAStream());
if (grad_out_feat.size(0) > 0)
PruningBackwardKernelGPU<Dtype, int>(
grad_in_feat.data<Dtype>(), grad_out_feat.data<Dtype>(), nchannel,
p_coords_manager->d_in_maps[map_key],
p_coords_manager->d_out_maps[map_key], at::cuda::getCurrentCUDAStream());
else
WARNING(true, "MinkowskiPruning: Backprop from a size-0 sparse tensor.");
}
#endif

Expand Down
4 changes: 2 additions & 2 deletions src/utils.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -102,10 +102,10 @@ class Formatter {

#define WARNING(condition, ...) \
{ \
if (!(condition)) { \
if (condition) { \
Formatter formatter; \
formatter << __FILE__ << ":" << __LINE__ << ","; \
formatter << " assertion (" #condition << ") faild. "; \
formatter << " (" #condition << ") "; \
formatter.append(__VA_ARGS__); \
std::cerr << formatter.str() << std::endl; \
} \
Expand Down
21 changes: 21 additions & 0 deletions tests/pruning.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,27 @@

class TestPruning(unittest.TestCase):

def test_empty(self):
in_channels = 2
coords, feats, labels = data_loader(in_channels, batch_size=1)
feats = feats.double()
feats.requires_grad_()
import ipdb; ipdb.set_trace()
input = SparseTensor(feats, coords=coords)
use_feat = torch.BoolTensor(len(input))
use_feat.zero_()
pruning = MinkowskiPruning()
output = pruning(input, use_feat)
print(input)
print(use_feat)
print(output)

# Check backward
fn = MinkowskiPruningFunction()
self.assertTrue(
gradcheck(fn, (input.F, use_feat, input.coords_key,
output.coords_key, input.coords_man)))

def test_pruning(self):
in_channels, D = 2, 2
coords, feats, labels = data_loader(in_channels, batch_size=1)
Expand Down
9 changes: 8 additions & 1 deletion tests/sparse_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,13 @@ def test(self):
input = SparseTensor(feats, coords=coords)
print(input)

def test_empty(self):
print(f"{self.__class__.__name__}: test_empty SparseTensor")
feats = torch.FloatTensor(0, 16)
coords = torch.IntTensor(0, 4)
input = SparseTensor(feats, coords=coords)
print(input)

def test_force_creation(self):
print(f"{self.__class__.__name__}: test_force_creation")
coords, feats, labels = data_loader(nchannel=2)
Expand Down Expand Up @@ -76,7 +83,7 @@ def test_extraction(self):

CC0, FC0 = X.coordinates_and_features_at(0)
self.assertTrue((C0 == CC0).all())
self.assertEqual((F0 == FC0).all())
self.assertTrue((F0 == FC0).all())

coords, feats = X.decomposed_coordinates_and_features
for c, f in zip(coords, feats):
Expand Down

0 comments on commit 573b16c

Please sign in to comment.