From 405b39cb7e68c2ec1b4ac8c4b4ca9fa6fcfcb2fc Mon Sep 17 00:00:00 2001 From: chrischoy Date: Wed, 26 Oct 2022 12:11:10 -0700 Subject: [PATCH] docker build fix --- README.md | 2 +- docker/Dockerfile | 16 ++++++++++------ setup.py | 3 +++ src/spmm.cu | 10 ---------- 4 files changed, 14 insertions(+), 17 deletions(-) diff --git a/README.md b/README.md index e1359386..04858815 100644 --- a/README.md +++ b/README.md @@ -188,7 +188,7 @@ python setup.py install ``` git clone https://github.com/NVIDIA/MinkowskiEngine cd MinkowskiEngine -docker build -t MinkowskiEngine docker +docker build -t minkowski_engine docker ``` Once the docker is built, check it loads MinkowskiEngine correctly. diff --git a/docker/Dockerfile b/docker/Dockerfile index 6895dbc2..81a7884c 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,12 +1,17 @@ -ARG PYTORCH="1.9.0" -ARG CUDA="11.1" +# Use use previous versions, modify these variables +# ARG PYTORCH="1.9.0" +# ARG CUDA="11.1" + +ARG PYTORCH="1.12.0" +ARG CUDA="11.3" ARG CUDNN="8" FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-devel ############################################## # You should modify this to match your GPU compute capability -ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0+PTX" +# ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0+PTX" +ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 6.2 7.0 7.2 7.5 8.0 8.6" ############################################## ENV TORCH_NVCC_FLAGS="-Xfatbin -compress-all" @@ -21,6 +26,5 @@ RUN rm -rf /var/lib/apt/lists/* # For faster build, use more jobs. ENV MAX_JOBS=4 -RUN pip install -U git+https://github.com/NVIDIA/MinkowskiEngine -v --no-deps \ - --install-option="--force_cuda" \ - --install-option="--blas=openblas" \ No newline at end of file +RUN git clone --recursive "https://github.com/NVIDIA/MinkowskiEngine" +RUN cd MinkowskiEngine; python setup.py install --force_cuda --blas=openblas diff --git a/setup.py b/setup.py index a05670ff..452ea57a 100644 --- a/setup.py +++ b/setup.py @@ -132,6 +132,9 @@ def _argparse(pattern, argv, is_flag=True, is_list=False): CPU_ONLY = CPU_ONLY or not torch.cuda.is_available() if FORCE_CUDA: + print("--------------------------------") + print("| FORCE_CUDA set |") + print("--------------------------------") CPU_ONLY = False # args with return value diff --git a/src/spmm.cu b/src/spmm.cu index c5c5c79d..8891a566 100644 --- a/src/spmm.cu +++ b/src/spmm.cu @@ -40,16 +40,6 @@ namespace minkowski { #define BLOCK_SIZE 128 -template -__global__ void -unique_row2num_nonzero(const int n, Dtype *__restrict__ d_num_nonzero, - const Itype *__restrict__ unique_row_ptr, - const Dtype *__restrict__ reduced_val_ptr) { - CUDA_KERNEL_LOOP(index, n) { - d_num_nonzero[unique_row_ptr[index]] = reduced_val_ptr[index]; - } -} - template __global__ void inverse_val(const int n, Dtype *__restrict__ d_sorted_val, const Itype *__restrict__ sorted_row,