Skip to content

Commit

Permalink
Generative Convolution Transpose cpu/gpu
Browse files Browse the repository at this point in the history
  • Loading branch information
chrischoy committed Dec 15, 2020
1 parent 140d597 commit 2114cfa
Show file tree
Hide file tree
Showing 13 changed files with 472 additions and 92 deletions.
87 changes: 85 additions & 2 deletions MinkowskiEngine/MinkowskiConvolution.py
Original file line number Diff line number Diff line change
Expand Up @@ -414,7 +414,6 @@ def __init__(
dilation=1,
bias=False,
kernel_generator=None,
generate_new_coordinates=False,
dimension=None,
):
r"""a generalized sparse transposed convolution layer.
Expand Down Expand Up @@ -466,7 +465,6 @@ def __init__(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
generate_new_coordinates=generate_new_coordinates,
dimension=dimension,
)

Expand All @@ -483,3 +481,88 @@ def __init__(
dimension=dimension,
)
self.reset_parameters(True)


class MinkowskiGenerativeConvolutionTranspose(MinkowskiConvolutionBase):
r"""A generalized sparse transposed convolution or deconvolution layer that generates new coordinates.
"""

def __init__(
self,
in_channels,
out_channels,
kernel_size=-1,
stride=1,
dilation=1,
bias=False,
kernel_generator=None,
dimension=None,
):
r"""a generalized sparse transposed convolution layer that creates new coordinates.
Args:
:attr:`in_channels` (int): the number of input channels in the
input tensor.
:attr:`out_channels` (int): the number of output channels in the
output tensor.
:attr:`kernel_size` (int, optional): the size of the kernel in the
output tensor. If not provided, :attr:`region_offset` should be
:attr:`RegionType.CUSTOM` and :attr:`region_offset` should be a 2D
matrix with size :math:`N\times D` such that it lists all :math:`N`
offsets in D-dimension.
:attr:`stride` (int, or list, optional): stride size that defines
upsampling rate. If non-identity is used, the output coordinates
will be :attr:`tensor_stride` / :attr:`stride` apart. When a list is
given, the length must be D; each element will be used for stride
size for the specific axis.
:attr:`dilation` (int, or list, optional): dilation size for the
convolution kernel. When a list is given, the length must be D and
each element is an axis specific dilation. All elements must be > 0.
:attr:`has_bias` (bool, optional): if True, the convolution layer
has a bias.
:attr:`kernel_generator` (:attr:`MinkowskiEngine.KernelGenerator`,
optional): defines custom kernel shape.
:attr:`generate_new_coords` (bool, optional): Force generation of
new coordinates. When True, the output coordinates will be the
outer product of the kernel shape and the input coordinates.
`False` by defaul.
:attr:`dimension` (int): the spatial dimension of the space where
all the inputs and the network are defined. For example, images are
in a 2D space, meshes and 3D shapes are in a 3D space.
.. note:
TODO: support `kernel_size` > `stride`.
"""
if kernel_generator is None:
kernel_generator = KernelGenerator(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
generate_new_coordinates=True,
dimension=dimension,
)
else:
kernel_generator.generate_new_coordinates = True

MinkowskiConvolutionBase.__init__(
self,
in_channels,
out_channels,
kernel_size,
stride,
dilation,
bias,
kernel_generator,
is_transpose=True,
dimension=dimension,
)
self.reset_parameters(True)
1 change: 1 addition & 0 deletions MinkowskiEngine/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,7 @@
MinkowskiConvolution,
MinkowskiConvolutionTransposeFunction,
MinkowskiConvolutionTranspose,
MinkowskiGenerativeConvolutionTranspose,
)

#
Expand Down
11 changes: 6 additions & 5 deletions src/convolution_transpose_cpu.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -51,8 +51,6 @@ at::Tensor ConvolutionTransposeForwardCPU(
CoordinateMapKey *p_in_map_key, //
CoordinateMapKey *p_out_map_key, //
cpu_manager_type<coordinate_type> *p_map_manager) {
ASSERT(!generate_new_coordinates, ERROR_NOT_IMPLEMENTED);

ASSERT(in_feat.is_contiguous(), "in_feat must be contiguous");
ASSERT(kernel.is_contiguous(), "kernel must be contiguous");

Expand All @@ -74,7 +72,7 @@ at::Tensor ConvolutionTransposeForwardCPU(
ASSERT(in_feat.size(0) == p_map_manager->size(in_key), "Invalid in_feat size",
in_feat.size(0), "!=", p_map_manager->size(in_key));

if (!p_out_map_key->is_key_set()) {
if (!p_out_map_key->is_key_set() || generate_new_coordinates) {
auto map_it = p_map_manager->find(p_in_map_key->get_key());
ASSERT(map_it != p_map_manager->map_end(), ERROR_MAP_NOT_FOUND);
auto const &in_map = (*map_it).second;
Expand All @@ -87,10 +85,13 @@ at::Tensor ConvolutionTransposeForwardCPU(
out_tensor_stride.data(), //
kernel_size.data(), //
kernel_dilation.data(), //
0, offset.data_ptr<coordinate_type>(), offset.size(0));
0, // volume. Will be initialized automatically
offset.data_ptr<coordinate_type>(), offset.size(0),
true // is_transpose
);

coordinate_map_key_type out_key = std::get<0>(p_map_manager->stride_region(
in_key, kernel_region, true /* is_transpose */));
in_key, kernel_region, generate_new_coordinates));
p_out_map_key->set_key(out_key);
}

Expand Down
11 changes: 6 additions & 5 deletions src/convolution_transpose_gpu.cu
Original file line number Diff line number Diff line change
Expand Up @@ -58,8 +58,6 @@ at::Tensor ConvolutionTransposeForwardGPU(
CoordinateMapKey *p_out_map_key, //
gpu_manager_type<coordinate_type, TemplatedAllocator> *p_map_manager) {

ASSERT(!generate_new_coordinates, ERROR_NOT_IMPLEMENTED);

ASSERT(in_feat.is_contiguous(), "in_feat must be contiguous");
ASSERT(kernel.is_contiguous(), "kernel must be contiguous");

Expand All @@ -86,7 +84,7 @@ at::Tensor ConvolutionTransposeForwardGPU(
ASSERT(in_feat.size(0) == p_map_manager->size(in_key), "Invalid in_feat size",
in_feat.size(0), "!=", p_map_manager->size(in_key));

if (!p_out_map_key->is_key_set()) {
if (!p_out_map_key->is_key_set() || generate_new_coordinates) {
auto map_it = p_map_manager->find(p_in_map_key->get_key());
ASSERT(map_it != p_map_manager->map_end(), ERROR_MAP_NOT_FOUND);
auto const &in_map = (*map_it).second;
Expand All @@ -99,10 +97,13 @@ at::Tensor ConvolutionTransposeForwardGPU(
out_tensor_stride.data(), //
kernel_size.data(), //
kernel_dilation.data(), //
0, offset.data_ptr<coordinate_type>(), offset.size(0));
0, // volume
offset.data_ptr<coordinate_type>(), offset.size(0),
true // is_transpose
);

coordinate_map_key_type out_key = std::get<0>(p_map_manager->stride_region(
in_key, kernel_region, true /* is_transpose */));
in_key, kernel_region, generate_new_coordinates));
LOG_DEBUG("ConvolutionTranspose out key:", out_key);
p_out_map_key->set_key(out_key);
}
Expand Down
6 changes: 5 additions & 1 deletion src/coordinate_map.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -82,8 +82,12 @@ stride_tensor_stride(const default_types::stride_type &tensor_stride,
ASSERT(tensor_stride.size() == stride.size(), "stride size mismatch.");
default_types::stride_type strided_tensor_stride{tensor_stride};
if (is_transpose) {
for (default_types::size_type i = 0; i < tensor_stride.size(); ++i)
for (default_types::size_type i = 0; i < tensor_stride.size(); ++i) {
ASSERT(strided_tensor_stride[i] % stride[i] == 0,
"Invalid up stride on tensor stride:", tensor_stride,
"kernel stride:", stride);
strided_tensor_stride[i] /= stride[i];
}
} else {
for (default_types::size_type i = 0; i < tensor_stride.size(); ++i)
strided_tensor_stride[i] *= stride[i];
Expand Down
20 changes: 4 additions & 16 deletions src/coordinate_map_cpu.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -196,18 +196,13 @@ class CoordinateMapCPU : public CoordinateMap<coordinate_type, TemplatedAllocato
/*
* @brief strided coordinate map for region.
*/
self_type
stride_region(cpu_kernel_region<coordinate_type> const &kernel) const {
self_type stride_region(cpu_kernel_region<coordinate_type> const &kernel,
stride_type const &out_tensor_stride) const {
ASSERT(kernel.coordinate_size() == m_coordinate_size, "Invalid kernel");
// Over estimate the reserve size to be size();
stride_type out_tensor_stride(
kernel.tensor_stride(), kernel.tensor_stride() + m_coordinate_size - 1);

self_type stride_map(size() * kernel.volume(), m_coordinate_size,
out_tensor_stride, base_type::m_byte_allocator);

auto &out_mmap = stride_map.m_map;

auto ckernel = cpu_kernel_region<coordinate_type>(kernel);
std::vector<coordinate_type> lb(m_coordinate_size), ub(m_coordinate_size),
tmp(m_coordinate_size);
Expand All @@ -221,15 +216,8 @@ class CoordinateMapCPU : public CoordinateMap<coordinate_type, TemplatedAllocato

// For elements in the current region
for (const auto &point : ckernel) {
// If the input coord exists
const auto iter_out = out_mmap.find(point);
// LOG_DEBUG(kernel_ind, ":",
// PtrToString(iter_out->first.data(), m_coordinate_size),
// "->", PtrToString(point.data(), m_coordinate_size));
if (iter_out == out_mmap.end()) {
insert(point, num_used);
++num_used;
}
auto const result = stride_map.insert(point, num_used);
num_used += result.second;
}
}
return stride_map;
Expand Down
22 changes: 20 additions & 2 deletions src/coordinate_map_functors.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -155,11 +155,29 @@ template <typename coordinate_type, typename map_type> struct update_value {
map_type m_map;
};

template <bool value> struct is_first {
template <typename index_type, typename map_type>
struct update_value_with_offset {
update_value_with_offset(map_type &_map, index_type const *valid_map_offset)
: m_valid_map_offset(valid_map_offset), m_map{_map} {}

__device__ void operator()(index_type i) {
auto &result = m_map.data()[m_valid_map_offset[i]];
result.second = i;
}

index_type const *m_valid_map_offset;
map_type m_map;
};

template <typename T> struct is_first {
is_first(T value) : m_value(value) {}

template <typename Tuple>
inline __device__ bool operator()(Tuple const &item) const {
return thrust::get<0>(item) == value;
return thrust::get<0>(item) == m_value;
}

T m_value;
};

template <typename coordinate_type, typename mapped_type>
Expand Down
Loading

0 comments on commit 2114cfa

Please sign in to comment.