Skip to content

Commit

Permalink
docker build fix
Browse files Browse the repository at this point in the history
  • Loading branch information
chrischoy committed Oct 26, 2022
1 parent 7867296 commit 405b39c
Show file tree
Hide file tree
Showing 4 changed files with 14 additions and 17 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -188,7 +188,7 @@ python setup.py install
```
git clone https://github.com/NVIDIA/MinkowskiEngine
cd MinkowskiEngine
docker build -t MinkowskiEngine docker
docker build -t minkowski_engine docker
```

Once the docker is built, check it loads MinkowskiEngine correctly.
Expand Down
16 changes: 10 additions & 6 deletions docker/Dockerfile
Original file line number Diff line number Diff line change
@@ -1,12 +1,17 @@
ARG PYTORCH="1.9.0"
ARG CUDA="11.1"
# Use use previous versions, modify these variables
# ARG PYTORCH="1.9.0"
# ARG CUDA="11.1"

ARG PYTORCH="1.12.0"
ARG CUDA="11.3"
ARG CUDNN="8"

FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-devel

##############################################
# You should modify this to match your GPU compute capability
ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0+PTX"
# ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0+PTX"
ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 6.2 7.0 7.2 7.5 8.0 8.6"
##############################################

ENV TORCH_NVCC_FLAGS="-Xfatbin -compress-all"
Expand All @@ -21,6 +26,5 @@ RUN rm -rf /var/lib/apt/lists/*

# For faster build, use more jobs.
ENV MAX_JOBS=4
RUN pip install -U git+https://github.com/NVIDIA/MinkowskiEngine -v --no-deps \
--install-option="--force_cuda" \
--install-option="--blas=openblas"
RUN git clone --recursive "https://github.com/NVIDIA/MinkowskiEngine"
RUN cd MinkowskiEngine; python setup.py install --force_cuda --blas=openblas
3 changes: 3 additions & 0 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,6 +132,9 @@ def _argparse(pattern, argv, is_flag=True, is_list=False):

CPU_ONLY = CPU_ONLY or not torch.cuda.is_available()
if FORCE_CUDA:
print("--------------------------------")
print("| FORCE_CUDA set |")
print("--------------------------------")
CPU_ONLY = False

# args with return value
Expand Down
10 changes: 0 additions & 10 deletions src/spmm.cu
Original file line number Diff line number Diff line change
Expand Up @@ -40,16 +40,6 @@ namespace minkowski {

#define BLOCK_SIZE 128

template <typename Itype, typename Dtype>
__global__ void
unique_row2num_nonzero(const int n, Dtype *__restrict__ d_num_nonzero,
const Itype *__restrict__ unique_row_ptr,
const Dtype *__restrict__ reduced_val_ptr) {
CUDA_KERNEL_LOOP(index, n) {
d_num_nonzero[unique_row_ptr[index]] = reduced_val_ptr[index];
}
}

template <typename Itype, typename Dtype>
__global__ void inverse_val(const int n, Dtype *__restrict__ d_sorted_val,
const Itype *__restrict__ sorted_row,
Expand Down

0 comments on commit 405b39c

Please sign in to comment.