Skip to content

Commit

Permalink
Prepare for moving 0-sized dimensions in TH/THC. (pytorch#8337)
Browse files Browse the repository at this point in the history
This does the following:
1) makes nDimension an int64_t (to match ATen)
2) changes the dimension value to dim_ (so we catch direct usages)
3) provide an _dim() that provides access to the "old" view (so we can migrate functions one at a time)
4) have code call ->-_dim() instead of ->nDimension.
  • Loading branch information
gchanan authored and ezyang committed Jun 12, 2018
1 parent 0cced57 commit 38362fa
Show file tree
Hide file tree
Showing 107 changed files with 821 additions and 801 deletions.
2 changes: 0 additions & 2 deletions aten/src/ATen/gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -329,10 +329,8 @@ def generate_storage_type_and_tensor(backend, density, scalar_type, declarations
fm.write(env['Storage'] + ".cpp", STORAGE_DERIVED_CPP, env)
fm.write(env['Storage'] + ".h", STORAGE_DERIVED_H, env)
env['TensorDenseOrSparse'] = TENSOR_DENSE_CPP.substitute(env)
env['THTensor_nDimension'] = 'tensor->nDimension'
else:
env['TensorDenseOrSparse'] = TENSOR_SPARSE_CPP.substitute(env)
env['THTensor_nDimension'] = 'tensor->nDimensionI + tensor->nDimensionV'

fm.write(env['Type'] + ".cpp", TYPE_DERIVED_CPP, env)
fm.write(env['Type'] + ".h", TYPE_DERIVED_H, env)
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/templates/TensorDense.cpp
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
// included as 'TensorDenseOrSparse' in TensorDerived.cpp

IntList ${Tensor}::strides() const {
int64_t d = tensor->nDimension;
int64_t d = tensor->_dim();
if (d != 0) {
return IntList(reinterpret_cast<int64_t*>(tensor->stride),dim());
} else {
Expand Down
4 changes: 2 additions & 2 deletions aten/src/ATen/templates/TensorDerived.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ const char * ${Tensor}::toString() const {
}

IntList ${Tensor}::sizes() const {
int64_t d = ${THTensor_nDimension};
int64_t d = tensor->_dim();
if (d != 0) {
// note: this will return "{}" for a scalar because dim() will return 0 in that case.
return IntList(reinterpret_cast<int64_t*>(tensor->size),dim());
Expand All @@ -43,7 +43,7 @@ IntList ${Tensor}::sizes() const {
int64_t ${Tensor}::dim() const {
if(isScalar())
return 0;
int64_t d = ${THTensor_nDimension};
int64_t d = tensor->_dim();
// See Note [Empty versus 0-dim tensors]
if (d != 0)
return d;
Expand Down
8 changes: 7 additions & 1 deletion aten/src/TH/THTensor.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ typedef struct THTensor
{
int64_t *size;
int64_t *stride;
int nDimension;
int64_t dim_;

// Note: storage->size may be greater than the recorded size
// of a tensor
Expand All @@ -31,6 +31,12 @@ typedef struct THTensor
inline T * unsafe_data() const {
return storage->unsafe_data<T>() + storageOffset;
}

// NOTE: this returns the "old" TH dimension view where no dimensions represents an empty tensor.
// There will be a dim() function that gives the new view that supports 0-sized dimensions.
inline int64_t _dim() const {
return dim_;
}
} THTensor;

#include "generic/THTensorFastGetSet.hpp"
Expand Down
30 changes: 15 additions & 15 deletions aten/src/TH/THTensorApply.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,18 +35,18 @@
int64_t *TENSOR##_counter = NULL, *TENSOR##_sizes = NULL, *TENSOR##_strides = NULL, *TENSOR##_dimOffset = NULL; \
int64_t TENSOR##_stride = 0, TENSOR##_size = 0, TENSOR##_dim = 0, TENSOR##_i, TENSOR##_n; \
int TENSOR##_contiguous = ALLOW_CONTIGUOUS && DIM < 0; \
TENSOR##_n = (TENSOR->nDimension ? 1 : 0); \
for(TENSOR##_i = 0; TENSOR##_i < TENSOR->nDimension; TENSOR##_i++) \
TENSOR##_n = (TENSOR->_dim() ? 1 : 0); \
for(TENSOR##_i = 0; TENSOR##_i < TENSOR->_dim(); TENSOR##_i++) \
TENSOR##_n *= TENSOR->size[TENSOR##_i]; \
\
if(TENSOR->nDimension == 0) \
if(TENSOR->_dim() == 0) \
TH_TENSOR_APPLY_hasFinished = 1; \
else \
{ \
TENSOR##_data = TENSOR->storage->data<TYPE>()+TENSOR->storageOffset; \
TENSOR##_size = 1; \
TENSOR##_stride = 1; \
for(TENSOR##_i = TENSOR->nDimension-1; TENSOR##_i >= 0; TENSOR##_i--) { \
for(TENSOR##_i = TENSOR->_dim()-1; TENSOR##_i >= 0; TENSOR##_i--) { \
if(TENSOR->size[TENSOR##_i] != 1) { \
if(TENSOR->stride[TENSOR##_i] == TENSOR##_size && TENSOR##_i != DIM) \
TENSOR##_size *= TENSOR->size[TENSOR##_i]; \
Expand All @@ -59,7 +59,7 @@
if (!TENSOR##_contiguous) { \
/* Find the dimension of contiguous sections */ \
TENSOR##_dim = 1; \
for(TENSOR##_i = TENSOR->nDimension-2; TENSOR##_i >= 0; TENSOR##_i--) \
for(TENSOR##_i = TENSOR->_dim()-2; TENSOR##_i >= 0; TENSOR##_i--) \
{ \
if(TENSOR->stride[TENSOR##_i] != TENSOR->stride[TENSOR##_i+1] * TENSOR->size[TENSOR##_i+1] || TENSOR##_i == DIM || TENSOR##_i+1 == DIM) \
TENSOR##_dim++; \
Expand All @@ -69,19 +69,19 @@
TENSOR##_sizes = TENSOR##_counter + TENSOR##_dim; \
TENSOR##_strides = TENSOR##_counter + 2*TENSOR##_dim; \
TH_TENSOR_dim_index = TENSOR##_dim-1; \
TENSOR##_dimOffset = (DIM == TENSOR->nDimension-1) ? &TENSOR##_i : &TENSOR##_counter[DIM]; \
TENSOR##_sizes[TH_TENSOR_dim_index] = TENSOR->size[TENSOR->nDimension-1]; \
TENSOR##_strides[TH_TENSOR_dim_index] = TENSOR->stride[TENSOR->nDimension-1]; \
TENSOR##_dimOffset = (DIM == TENSOR->_dim()-1) ? &TENSOR##_i : &TENSOR##_counter[DIM]; \
TENSOR##_sizes[TH_TENSOR_dim_index] = TENSOR->size[TENSOR->_dim()-1]; \
TENSOR##_strides[TH_TENSOR_dim_index] = TENSOR->stride[TENSOR->_dim()-1]; \
/* TENSOR##_counter tracks where we are in the storage. The offset into the */ \
/* storage is given by storage_offset + (i * j), where i is the stride */ \
/* vector and j is tensor_counter vector. This sets the starting position for the loop. */ \
for(TENSOR##_i = TENSOR##_dim-1; TENSOR##_i >= 0; --TENSOR##_i) { \
TENSOR##_counter[TENSOR##_i] = 0; \
} \
for(TENSOR##_i = TENSOR->nDimension-2; TENSOR##_i >= 0; --TENSOR##_i) { \
for(TENSOR##_i = TENSOR->_dim()-2; TENSOR##_i >= 0; --TENSOR##_i) { \
if (TENSOR->stride[TENSOR##_i] == TENSOR->stride[TENSOR##_i+1] * TENSOR->size[TENSOR##_i+1] && TENSOR##_i != DIM && TENSOR##_i+1 != DIM) { \
TENSOR##_sizes[TH_TENSOR_dim_index] = TENSOR->size[TENSOR##_i] * TENSOR##_sizes[TH_TENSOR_dim_index]; \
if (DIM != TENSOR->nDimension-1 && TENSOR##_i < DIM) \
if (DIM != TENSOR->_dim()-1 && TENSOR##_i < DIM) \
TENSOR##_dimOffset--; \
} else { \
--TH_TENSOR_dim_index; \
Expand Down Expand Up @@ -160,9 +160,9 @@
elements_equal = 0; \
} \
if (elements_equal == 0) { \
THDescBuff T1buff = _THSizeDesc(TENSOR1->size, TENSOR1->nDimension); \
THDescBuff T2buff = _THSizeDesc(TENSOR2->size, TENSOR2->nDimension); \
THDescBuff T3buff = _THSizeDesc(TENSOR3->size, TENSOR3->nDimension); \
THDescBuff T1buff = _THSizeDesc(TENSOR1->size, TENSOR1->_dim()); \
THDescBuff T2buff = _THSizeDesc(TENSOR2->size, TENSOR2->_dim()); \
THDescBuff T3buff = _THSizeDesc(TENSOR3->size, TENSOR3->_dim()); \
THError("inconsistent tensor size, expected %s %s, %s %s and %s %s to have the same " \
"number of elements, but got %d, %d and %d elements respectively", \
#TENSOR1, T1buff.str, #TENSOR2, T2buff.str, #TENSOR3, T3buff.str, \
Expand Down Expand Up @@ -199,8 +199,8 @@
__TH_TENSOR_APPLYX_PREAMBLE(TYPE2, TENSOR2, DIM, 1) \
\
if(TENSOR1##_n != TENSOR2##_n) { \
THDescBuff T1buff = _THSizeDesc(TENSOR1->size, TENSOR1->nDimension); \
THDescBuff T2buff = _THSizeDesc(TENSOR2->size, TENSOR2->nDimension); \
THDescBuff T1buff = _THSizeDesc(TENSOR1->size, TENSOR1->_dim()); \
THDescBuff T2buff = _THSizeDesc(TENSOR2->size, TENSOR2->_dim()); \
THError("inconsistent tensor size, expected %s %s and %s %s to have the same " \
"number of elements, but got %d and %d elements respectively", \
#TENSOR1, T1buff.str, #TENSOR2, T2buff.str, TENSOR1##_n, TENSOR2##_n); \
Expand Down
76 changes: 38 additions & 38 deletions aten/src/TH/THTensorDimApply.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
#define TH_TENSOR_DIM_APPLY3_SIZE_EQ_EXCEPT_DIM(TENSOR1, TENSOR2, TENSOR3, DIMENSION) \
{ \
int shape_check_flag = 0; \
for(TH_TENSOR_DIM_APPLY_i = 0; TH_TENSOR_DIM_APPLY_i < TENSOR1->nDimension; TH_TENSOR_DIM_APPLY_i++) \
for(TH_TENSOR_DIM_APPLY_i = 0; TH_TENSOR_DIM_APPLY_i < TENSOR1->_dim(); TH_TENSOR_DIM_APPLY_i++) \
{ \
if (TH_TENSOR_DIM_APPLY_i == DIMENSION) \
continue; \
Expand All @@ -23,9 +23,9 @@
} \
} \
if (shape_check_flag == 1) { \
THDescBuff T1buff = _THSizeDesc(TENSOR1->size, TENSOR1->nDimension); \
THDescBuff T2buff = _THSizeDesc(TENSOR2->size, TENSOR2->nDimension); \
THDescBuff T3buff = _THSizeDesc(TENSOR3->size, TENSOR3->nDimension); \
THDescBuff T1buff = _THSizeDesc(TENSOR1->size, TENSOR1->_dim()); \
THDescBuff T2buff = _THSizeDesc(TENSOR2->size, TENSOR2->_dim()); \
THDescBuff T3buff = _THSizeDesc(TENSOR3->size, TENSOR3->_dim()); \
THError("Expected %s %s, %s %s and %s %s to have the same size apart from dimension %d", \
#TENSOR1, T1buff.str, #TENSOR2, T2buff.str, #TENSOR3, T3buff.str, DIMENSION); \
} \
Expand All @@ -43,26 +43,26 @@
int TH_TENSOR_DIM_APPLY_hasFinished = 0; \
int TH_TENSOR_DIM_APPLY_i; \
\
if( (DIMENSION < 0) || (DIMENSION >= TENSOR1->nDimension) ) \
THError("invalid dimension %d (expected to be 0 <= dim < %d)", DIMENSION, TENSOR1->nDimension); \
if( (DIMENSION < 0) || (DIMENSION >= TENSOR1->_dim()) ) \
THError("invalid dimension %d (expected to be 0 <= dim < %d)", DIMENSION, TENSOR1->_dim()); \
int same_dims = 1; \
if( TENSOR1->nDimension != TENSOR2->nDimension ) { \
if( TENSOR1->_dim() != TENSOR2->_dim() ) { \
same_dims = 0; \
} \
if( TENSOR1->nDimension != TENSOR3->nDimension ) { \
if( TENSOR1->_dim() != TENSOR3->_dim() ) { \
same_dims = 0; \
} \
if (same_dims == 0) { \
THDescBuff T1buff = _THSizeDesc(TENSOR1->size, TENSOR1->nDimension); \
THDescBuff T2buff = _THSizeDesc(TENSOR2->size, TENSOR2->nDimension); \
THDescBuff T3buff = _THSizeDesc(TENSOR3->size, TENSOR3->nDimension); \
THDescBuff T1buff = _THSizeDesc(TENSOR1->size, TENSOR1->_dim()); \
THDescBuff T2buff = _THSizeDesc(TENSOR2->size, TENSOR2->_dim()); \
THDescBuff T3buff = _THSizeDesc(TENSOR3->size, TENSOR3->_dim()); \
THError("inconsistent tensor size, expected %s %s, %s %s and %s %s to have the same " \
"number of dimensions", #TENSOR1, T1buff.str, #TENSOR2, T2buff.str, #TENSOR3, T3buff.str); \
} \
SIZE_CHECK(TENSOR1, TENSOR2, TENSOR3, DIMENSION) \
\
TH_TENSOR_DIM_APPLY_counter = (int64_t*)THAlloc(sizeof(int64_t)*(TENSOR1->nDimension)); \
for(TH_TENSOR_DIM_APPLY_i = 0; TH_TENSOR_DIM_APPLY_i < TENSOR1->nDimension; TH_TENSOR_DIM_APPLY_i++) \
TH_TENSOR_DIM_APPLY_counter = (int64_t*)THAlloc(sizeof(int64_t)*(TENSOR1->_dim())); \
for(TH_TENSOR_DIM_APPLY_i = 0; TH_TENSOR_DIM_APPLY_i < TENSOR1->_dim(); TH_TENSOR_DIM_APPLY_i++) \
TH_TENSOR_DIM_APPLY_counter[TH_TENSOR_DIM_APPLY_i] = 0; \
\
TENSOR1##_data = (TENSOR1)->storage->data<TYPE1>()+(TENSOR1)->storageOffset; \
Expand All @@ -81,14 +81,14 @@
{ \
CODE \
\
if(TENSOR1->nDimension == 1) \
if(TENSOR1->_dim() == 1) \
break; \
\
for(TH_TENSOR_DIM_APPLY_i = 0; TH_TENSOR_DIM_APPLY_i < TENSOR1->nDimension; TH_TENSOR_DIM_APPLY_i++) \
for(TH_TENSOR_DIM_APPLY_i = 0; TH_TENSOR_DIM_APPLY_i < TENSOR1->_dim(); TH_TENSOR_DIM_APPLY_i++) \
{ \
if(TH_TENSOR_DIM_APPLY_i == DIMENSION) \
{ \
if(TH_TENSOR_DIM_APPLY_i == TENSOR1->nDimension-1) \
if(TH_TENSOR_DIM_APPLY_i == TENSOR1->_dim()-1) \
{ \
TH_TENSOR_DIM_APPLY_hasFinished = 1; \
break; \
Expand All @@ -103,7 +103,7 @@
\
if(TH_TENSOR_DIM_APPLY_counter[TH_TENSOR_DIM_APPLY_i] == TENSOR1->size[TH_TENSOR_DIM_APPLY_i]) \
{ \
if(TH_TENSOR_DIM_APPLY_i == TENSOR1->nDimension-1) \
if(TH_TENSOR_DIM_APPLY_i == TENSOR1->_dim()-1) \
{ \
TH_TENSOR_DIM_APPLY_hasFinished = 1; \
break; \
Expand Down Expand Up @@ -150,29 +150,29 @@
int TH_TENSOR_DIM_APPLY_hasFinished = 0; \
int TH_TENSOR_DIM_APPLY_i; \
\
if( (DIMENSION < 0) || (DIMENSION >= TENSOR1->nDimension) ) \
THError("invalid dimension %d (expected to be 0 <= dim < %d)", DIMENSION, TENSOR1->nDimension); \
if( TENSOR1->nDimension != TENSOR2->nDimension ) { \
THDescBuff T1buff = _THSizeDesc(TENSOR1->size, TENSOR1->nDimension); \
THDescBuff T2buff = _THSizeDesc(TENSOR2->size, TENSOR2->nDimension); \
if( (DIMENSION < 0) || (DIMENSION >= TENSOR1->_dim()) ) \
THError("invalid dimension %d (expected to be 0 <= dim < %d)", DIMENSION, TENSOR1->_dim()); \
if( TENSOR1->_dim() != TENSOR2->_dim() ) { \
THDescBuff T1buff = _THSizeDesc(TENSOR1->size, TENSOR1->_dim()); \
THDescBuff T2buff = _THSizeDesc(TENSOR2->size, TENSOR2->_dim()); \
THError("inconsistent tensor size, expected %s %s and %s %s to have the same " \
"number of dimensions", #TENSOR1, T1buff.str, #TENSOR2, T2buff.str); \
} \
TH_UNUSED int shape_check_flag = 0; \
for(TH_TENSOR_DIM_APPLY_i = 0; TH_TENSOR_DIM_APPLY_i < TENSOR1->nDimension; TH_TENSOR_DIM_APPLY_i++) \
for(TH_TENSOR_DIM_APPLY_i = 0; TH_TENSOR_DIM_APPLY_i < TENSOR1->_dim(); TH_TENSOR_DIM_APPLY_i++) \
{ \
if(TH_TENSOR_DIM_APPLY_i == DIMENSION) \
continue; \
if(TENSOR1->size[TH_TENSOR_DIM_APPLY_i] != TENSOR2->size[TH_TENSOR_DIM_APPLY_i]) { \
THDescBuff T1buff = _THSizeDesc(TENSOR1->size, TENSOR1->nDimension); \
THDescBuff T2buff = _THSizeDesc(TENSOR2->size, TENSOR2->nDimension); \
THDescBuff T1buff = _THSizeDesc(TENSOR1->size, TENSOR1->_dim()); \
THDescBuff T2buff = _THSizeDesc(TENSOR2->size, TENSOR2->_dim()); \
THError("Expected %s %s and %s %s to have the same size in dimension %d", \
#TENSOR1, T1buff.str, #TENSOR2, T2buff.str, DIMENSION); \
} \
} \
\
TH_TENSOR_DIM_APPLY_counter = (int64_t*)THAlloc(sizeof(int64_t)*(TENSOR1->nDimension)); \
for(TH_TENSOR_DIM_APPLY_i = 0; TH_TENSOR_DIM_APPLY_i < TENSOR1->nDimension; TH_TENSOR_DIM_APPLY_i++) \
TH_TENSOR_DIM_APPLY_counter = (int64_t*)THAlloc(sizeof(int64_t)*(TENSOR1->_dim())); \
for(TH_TENSOR_DIM_APPLY_i = 0; TH_TENSOR_DIM_APPLY_i < TENSOR1->_dim(); TH_TENSOR_DIM_APPLY_i++) \
TH_TENSOR_DIM_APPLY_counter[TH_TENSOR_DIM_APPLY_i] = 0; \
\
TENSOR1##_data = (TENSOR1)->storage->data<TYPE1>()+(TENSOR1)->storageOffset; \
Expand All @@ -187,14 +187,14 @@
{ \
CODE \
\
if(TENSOR1->nDimension == 1) \
if(TENSOR1->_dim() == 1) \
break; \
\
for(TH_TENSOR_DIM_APPLY_i = 0; TH_TENSOR_DIM_APPLY_i < TENSOR1->nDimension; TH_TENSOR_DIM_APPLY_i++) \
for(TH_TENSOR_DIM_APPLY_i = 0; TH_TENSOR_DIM_APPLY_i < TENSOR1->_dim(); TH_TENSOR_DIM_APPLY_i++) \
{ \
if(TH_TENSOR_DIM_APPLY_i == DIMENSION) \
{ \
if(TH_TENSOR_DIM_APPLY_i == TENSOR1->nDimension-1) \
if(TH_TENSOR_DIM_APPLY_i == TENSOR1->_dim()-1) \
{ \
TH_TENSOR_DIM_APPLY_hasFinished = 1; \
break; \
Expand All @@ -208,7 +208,7 @@
\
if(TH_TENSOR_DIM_APPLY_counter[TH_TENSOR_DIM_APPLY_i] == TENSOR1->size[TH_TENSOR_DIM_APPLY_i]) \
{ \
if(TH_TENSOR_DIM_APPLY_i == TENSOR1->nDimension-1) \
if(TH_TENSOR_DIM_APPLY_i == TENSOR1->_dim()-1) \
{ \
TH_TENSOR_DIM_APPLY_hasFinished = 1; \
break; \
Expand Down Expand Up @@ -274,33 +274,33 @@
int TH_TENSOR_DIM_APPLY_hasFinished = 0; \
int TH_TENSOR_DIM_APPLY_i; \
\
if( (DIMENSION < 0) || (DIMENSION >= TENSOR->nDimension) ) \
if( (DIMENSION < 0) || (DIMENSION >= TENSOR->_dim()) ) \
THError("invalid dimension"); \
\
TENSOR##_data = (TENSOR)->storage->data<TYPE>()+(TENSOR)->storageOffset; \
TENSOR##_stride = (TENSOR)->stride[DIMENSION]; \
TENSOR##_size = TENSOR->size[DIMENSION]; \
/* Counter stores the indices into the Tensor at any time */ \
TH_TENSOR_DIM_APPLY_counter = (int64_t*)THAlloc(sizeof(int64_t)*(TENSOR->nDimension)); \
for(TH_TENSOR_DIM_APPLY_i = 0; TH_TENSOR_DIM_APPLY_i < TENSOR->nDimension; TH_TENSOR_DIM_APPLY_i++) \
TH_TENSOR_DIM_APPLY_counter = (int64_t*)THAlloc(sizeof(int64_t)*(TENSOR->_dim())); \
for(TH_TENSOR_DIM_APPLY_i = 0; TH_TENSOR_DIM_APPLY_i < TENSOR->_dim(); TH_TENSOR_DIM_APPLY_i++) \
TH_TENSOR_DIM_APPLY_counter[TH_TENSOR_DIM_APPLY_i] = 0; \
\
while(!TH_TENSOR_DIM_APPLY_hasFinished) \
{ \
CODE \
\
if(TENSOR->nDimension == 1) \
if(TENSOR->_dim() == 1) \
break; \
\
for(TH_TENSOR_DIM_APPLY_i = 0; TH_TENSOR_DIM_APPLY_i < TENSOR->nDimension; TH_TENSOR_DIM_APPLY_i++) \
for(TH_TENSOR_DIM_APPLY_i = 0; TH_TENSOR_DIM_APPLY_i < TENSOR->_dim(); TH_TENSOR_DIM_APPLY_i++) \
{ \
/* Check if the index is equal to DIMENSION. We don't need to update the */ \
/* offset if this is the case, and can consider the next index. However, */ \
/* in the case that the DIMENSION is the last index in the Tensor, then */ \
/* we have parsed the entire tensor and can exit */ \
if(TH_TENSOR_DIM_APPLY_i == DIMENSION) \
{ \
if(TH_TENSOR_DIM_APPLY_i == TENSOR->nDimension-1) \
if(TH_TENSOR_DIM_APPLY_i == TENSOR->_dim()-1) \
{ \
TH_TENSOR_DIM_APPLY_hasFinished = 1; \
break; \
Expand All @@ -315,7 +315,7 @@
if(TH_TENSOR_DIM_APPLY_counter[TH_TENSOR_DIM_APPLY_i] == TENSOR->size[TH_TENSOR_DIM_APPLY_i]) \
{ \
/* Handled TENSOR_size(dim) iterations for DIM_APPLY_i. If this is the last dimension, exit */ \
if(TH_TENSOR_DIM_APPLY_i == TENSOR->nDimension-1) \
if(TH_TENSOR_DIM_APPLY_i == TENSOR->_dim()-1) \
{ \
TH_TENSOR_DIM_APPLY_hasFinished = 1; \
break; \
Expand Down
Loading

0 comments on commit 38362fa

Please sign in to comment.