diff --git a/aten/src/ATen/Formatting.cpp b/aten/src/ATen/Formatting.cpp index d6ae4453ff0d4..aab224f0d9d39 100644 --- a/aten/src/ATen/Formatting.cpp +++ b/aten/src/ATen/Formatting.cpp @@ -260,10 +260,7 @@ std::ostream& print(std::ostream& stream, const Tensor & tensor_, int64_t linesi stream << defaultfloat << tensor.data()[0] << std::endl; stream << "[ " << tensor_.pImpl->toString() << "{} ]"; } else if(tensor.ndimension() == 1) { - if (tensor.numel() == 0) { - stream << "[ Tensor (empty) ]"; - } - else { + if (tensor.numel() > 0) { double scale; int64_t sz; std::tie(scale, sz) = __printFormat(stream, tensor); @@ -274,18 +271,22 @@ std::ostream& print(std::ostream& stream, const Tensor & tensor_, int64_t linesi for(int64_t i = 0; i < tensor.size(0); i++) { stream << std::setw(sz) << tensor_p[i]/scale << std::endl; } - stream << "[ " << tensor_.pImpl->toString() << "{" << tensor.size(0) << "} ]"; } + stream << "[ " << tensor_.pImpl->toString() << "{" << tensor.size(0) << "} ]"; } else if(tensor.ndimension() == 2) { - __printMatrix(stream, tensor, linesize, 0); + if (tensor.numel() > 0) { + __printMatrix(stream, tensor, linesize, 0); + } stream << "[ " << tensor_.pImpl->toString() << "{" << tensor.size(0) << "," << tensor.size(1) << "} ]"; } else { + if (tensor.numel() > 0) { __printTensor(stream, tensor, linesize); - stream << "[ " << tensor_.pImpl->toString() << "{" << tensor.size(0); - for(int64_t i = 1; i < tensor.ndimension(); i++) { - stream << "," << tensor.size(i); - } - stream << "} ]"; + } + stream << "[ " << tensor_.pImpl->toString() << "{" << tensor.size(0); + for(int64_t i = 1; i < tensor.ndimension(); i++) { + stream << "," << tensor.size(i); + } + stream << "} ]"; } } return stream; diff --git a/aten/src/TH/generic/THTensorMath.cpp b/aten/src/TH/generic/THTensorMath.cpp index 19633cc357813..ad2389f6ee935 100644 --- a/aten/src/TH/generic/THTensorMath.cpp +++ b/aten/src/TH/generic/THTensorMath.cpp @@ -288,7 +288,7 @@ void THTensor_(nonzero)(THLongTensor *subscript, THTensor *tensor) #ifdef DEBUG THAssert(numel <= LONG_MAX); #endif - THLongTensor_resize2d(subscript, numel, tensor->_dim()); + THLongTensor_resize2d(subscript, numel, tensor->dim()); /* Second pass populates subscripts */ subscript_data = THLongTensor_data(subscript); @@ -296,12 +296,12 @@ void THTensor_(nonzero)(THLongTensor *subscript, THTensor *tensor) if IS_NONZERO(*tensor_data) { div = 1; - for (dim = tensor->_dim() - 1; dim >= 0; dim--) { + for (dim = tensor->dim() - 1; dim >= 0; dim--) { *(subscript_data + dim) = (i/div) % tensor->size[dim]; div *= tensor->size[dim]; } - subscript_data += tensor->_dim(); + subscript_data += tensor->dim(); } ++i;); } diff --git a/aten/src/THC/generic/THCTensorMath.cu b/aten/src/THC/generic/THCTensorMath.cu index 83019ec101456..368cdebbeff6a 100644 --- a/aten/src/THC/generic/THCTensorMath.cu +++ b/aten/src/THC/generic/THCTensorMath.cu @@ -271,7 +271,7 @@ void THCTensor_(nonzero)(THCState* state, THCudaLongTensor *tensor, self = THCTensor_(newContiguous)(state, self); thrust::device_ptr self_data(THCTensor_(data)(state, self)); - int num_dim = THCTensor_(_nDimension)(state, self); + int num_dim = THCTensor_(nDimension)(state, self); int64_t N = THCTensor_(nElement)(state, self); THCudaLongTensor_resize2d(state, tensor, N, num_dim); diff --git a/test/test_torch.py b/test/test_torch.py index 20b8a9ded7517..a1c84c8b0818d 100644 --- a/test/test_torch.py +++ b/test/test_torch.py @@ -6208,6 +6208,17 @@ def test_nonzero(self): for i in range(dst1.size(0)): self.assertNotEqual(tensor[dst1[i, 0], dst1[i, 1], dst1[i, 2]].item(), 0) + def test_nonzero_empty(self): + if not torch._C._use_zero_size_dim(): + return + + devices = ['cpu'] if not torch.cuda.is_available() else ['cpu', 'cuda'] + for device in devices: + x = torch.randn(0, 2, 0, 5, 0, device=device) + y = torch.nonzero(x) + self.assertEqual(0, y.numel()) + self.assertEqual(torch.Size([0, 5]), y.shape) + def test_deepcopy(self): from copy import deepcopy a = torch.randn(5, 5)