From 0ed3f766e9eb803e8f37f728af9494d756aec9a7 Mon Sep 17 00:00:00 2001 From: peter Date: Thu, 10 Jan 2019 06:44:37 -0800 Subject: [PATCH] Unify flags and environmental variable when building LibTorch/PyTorch (#15868) Summary: Fixes #15858. Pull Request resolved: https://github.com/pytorch/pytorch/pull/15868 Differential Revision: D13622354 Pulled By: soumith fbshipit-source-id: bb8c49520ebf926c6194d42db75accba867018c7 --- setup.py | 125 ++------------------------- tools/build_libtorch.py | 34 ++------ tools/setup_helpers/configure.py | 139 +++++++++++++++++++++++++++++++ tools/setup_helpers/env.py | 2 + 4 files changed, 156 insertions(+), 144 deletions(-) create mode 100644 tools/setup_helpers/configure.py diff --git a/setup.py b/setup.py index fb743f486b2ba..0af57c342471f 100644 --- a/setup.py +++ b/setup.py @@ -145,39 +145,17 @@ import glob import importlib -from tools.setup_helpers.env import (check_env_flag, check_negative_env_flag, - hotpatch_build_env_vars) - - -hotpatch_build_env_vars() - -from tools.setup_helpers.cuda import USE_CUDA, CUDA_HOME, CUDA_VERSION -from tools.setup_helpers.build import (BUILD_BINARY, BUILD_TEST, - BUILD_CAFFE2_OPS, USE_LEVELDB, - USE_LMDB, USE_OPENCV, USE_TENSORRT, - USE_FFMPEG, USE_FBGEMM) -from tools.setup_helpers.rocm import USE_ROCM, ROCM_HOME, ROCM_VERSION -from tools.setup_helpers.cudnn import (USE_CUDNN, CUDNN_LIBRARY, - CUDNN_LIB_DIR, CUDNN_INCLUDE_DIR) -from tools.setup_helpers.miopen import (USE_MIOPEN, MIOPEN_LIBRARY, - MIOPEN_LIB_DIR, MIOPEN_INCLUDE_DIR) -from tools.setup_helpers.nccl import USE_NCCL, USE_SYSTEM_NCCL, NCCL_LIB_DIR, \ - NCCL_INCLUDE_DIR, NCCL_ROOT_DIR, NCCL_SYSTEM_LIB -from tools.setup_helpers.nnpack import USE_NNPACK -from tools.setup_helpers.qnnpack import USE_QNNPACK -from tools.setup_helpers.nvtoolext import NVTOOLEXT_HOME +# If you want to modify flags or environmental variables that is set when +# building torch, you should do it in tools/setup_helpers/configure.py. +# Please don't add it here unless it's only used in PyTorch. +from tools.setup_helpers.configure import * from tools.setup_helpers.generate_code import generate_code from tools.setup_helpers.ninja_builder import NinjaBuilder, ninja_build_ext -from tools.setup_helpers.dist_check import USE_DISTRIBUTED, \ - USE_GLOO_IBVERBS ################################################################################ # Parameters parsed from environment ################################################################################ -DEBUG = check_env_flag('DEBUG') -REL_WITH_DEB_INFO = check_env_flag('REL_WITH_DEB_INFO') - VERBOSE_SCRIPT = True # see if the user passed a quiet flag to setup.py arguments and respect # that in our parts of the build @@ -194,38 +172,6 @@ def report(*args): def report(*args): pass -IS_WINDOWS = (platform.system() == 'Windows') -IS_DARWIN = (platform.system() == 'Darwin') -IS_LINUX = (platform.system() == 'Linux') -IS_PPC = (platform.machine() == 'ppc64le') -IS_ARM = (platform.machine() == 'aarch64') - -BUILD_PYTORCH = check_env_flag('BUILD_PYTORCH') -# ppc64le and aarch64 do not support MKLDNN -if IS_PPC or IS_ARM: - USE_MKLDNN = check_env_flag('USE_MKLDNN', 'OFF') -else: - USE_MKLDNN = check_env_flag('USE_MKLDNN', 'ON') - -USE_CUDA_STATIC_LINK = check_env_flag('USE_CUDA_STATIC_LINK') -RERUN_CMAKE = True - -NUM_JOBS = multiprocessing.cpu_count() -max_jobs = os.getenv("MAX_JOBS") -if max_jobs is not None: - NUM_JOBS = min(NUM_JOBS, int(max_jobs)) - -ONNX_NAMESPACE = os.getenv("ONNX_NAMESPACE") -if not ONNX_NAMESPACE: - ONNX_NAMESPACE = "onnx_torch" - -# Ninja -try: - import ninja - USE_NINJA = True -except ImportError: - USE_NINJA = False - # Constant known variables used throughout this file cwd = os.path.dirname(os.path.abspath(__file__)) lib_path = os.path.join(cwd, "torch", "lib") @@ -324,8 +270,9 @@ def build_libs(libs): build_libs_cmd = ['tools\\build_pytorch_libs.bat'] else: build_libs_cmd = ['bash', os.path.join('..', 'tools', 'build_pytorch_libs.sh')] - my_env = os.environ.copy() - my_env["PYTORCH_PYTHON"] = sys.executable + + my_env, extra_flags = get_pytorch_env_with_flags() + build_libs_cmd.extend(extra_flags) my_env["PYTORCH_PYTHON_LIBRARY"] = cmake_python_library my_env["PYTORCH_PYTHON_INCLUDE_DIR"] = cmake_python_include_dir my_env["PYTORCH_BUILD_VERSION"] = version @@ -335,64 +282,6 @@ def build_libs(libs): cmake_prefix_path = my_env["CMAKE_PREFIX_PATH"] + ";" + cmake_prefix_path my_env["CMAKE_PREFIX_PATH"] = cmake_prefix_path - my_env["NUM_JOBS"] = str(NUM_JOBS) - my_env["ONNX_NAMESPACE"] = ONNX_NAMESPACE - if not IS_WINDOWS: - if USE_NINJA: - my_env["CMAKE_GENERATOR"] = '-GNinja' - my_env["CMAKE_INSTALL"] = 'ninja install' - else: - my_env['CMAKE_GENERATOR'] = '' - my_env['CMAKE_INSTALL'] = 'make install' - if USE_SYSTEM_NCCL: - my_env["NCCL_ROOT_DIR"] = NCCL_ROOT_DIR - my_env["NCCL_INCLUDE_DIR"] = NCCL_INCLUDE_DIR - my_env["NCCL_SYSTEM_LIB"] = NCCL_SYSTEM_LIB - if USE_CUDA: - my_env["CUDA_BIN_PATH"] = CUDA_HOME - build_libs_cmd += ['--use-cuda'] - if IS_WINDOWS: - my_env["NVTOOLEXT_HOME"] = NVTOOLEXT_HOME - if USE_CUDA_STATIC_LINK: - build_libs_cmd += ['--cuda-static-link'] - if USE_FBGEMM: - build_libs_cmd += ['--use-fbgemm'] - if USE_ROCM: - build_libs_cmd += ['--use-rocm'] - if USE_NNPACK: - build_libs_cmd += ['--use-nnpack'] - if USE_NUMPY: - my_env["NUMPY_INCLUDE_DIR"] = NUMPY_INCLUDE_DIR - if USE_CUDNN: - my_env["CUDNN_LIB_DIR"] = CUDNN_LIB_DIR - my_env["CUDNN_LIBRARY"] = CUDNN_LIBRARY - my_env["CUDNN_INCLUDE_DIR"] = CUDNN_INCLUDE_DIR - if USE_MIOPEN: - my_env["MIOPEN_LIB_DIR"] = MIOPEN_LIB_DIR - my_env["MIOPEN_LIBRARY"] = MIOPEN_LIBRARY - my_env["MIOPEN_INCLUDE_DIR"] = MIOPEN_INCLUDE_DIR - if USE_MKLDNN: - build_libs_cmd += ['--use-mkldnn'] - if USE_QNNPACK: - build_libs_cmd += ['--use-qnnpack'] - if USE_GLOO_IBVERBS: - build_libs_cmd += ['--use-gloo-ibverbs'] - if not RERUN_CMAKE: - build_libs_cmd += ['--dont-rerun-cmake'] - - my_env["BUILD_TORCH"] = "ON" - my_env["BUILD_PYTHON"] = "ON" - my_env["BUILD_BINARY"] = "ON" if BUILD_BINARY else "OFF" - my_env["BUILD_TEST"] = "ON" if BUILD_TEST else "OFF" - my_env["BUILD_CAFFE2_OPS"] = "ON" if BUILD_CAFFE2_OPS else "OFF" - my_env["INSTALL_TEST"] = "ON" if BUILD_TEST else "OFF" - my_env["USE_LEVELDB"] = "ON" if USE_LEVELDB else "OFF" - my_env["USE_LMDB"] = "ON" if USE_LMDB else "OFF" - my_env["USE_OPENCV"] = "ON" if USE_OPENCV else "OFF" - my_env["USE_TENSORRT"] = "ON" if USE_TENSORRT else "OFF" - my_env["USE_FFMPEG"] = "ON" if USE_FFMPEG else "OFF" - my_env["USE_DISTRIBUTED"] = "ON" if USE_DISTRIBUTED else "OFF" - my_env["USE_SYSTEM_NCCL"] = "ON" if USE_SYSTEM_NCCL else "OFF" if VERBOSE_SCRIPT: my_env['VERBOSE_SCRIPT'] = '1' try: diff --git a/tools/build_libtorch.py b/tools/build_libtorch.py index bc0baf7f4f5e9..a03d7e92ff94d 100644 --- a/tools/build_libtorch.py +++ b/tools/build_libtorch.py @@ -4,45 +4,27 @@ import subprocess import sys -from setup_helpers.env import check_env_flag, hotpatch_build_env_vars - - -hotpatch_build_env_vars() - -from setup_helpers.cuda import USE_CUDA -from setup_helpers.dist_check import USE_DISTRIBUTED, USE_GLOO_IBVERBS, IS_LINUX +# If you want to modify flags or environmental variables that is set when +# building torch, you should do it in tools/setup_helpers/configure.py. +# Please don't add it here unless it's only used in LibTorch. +from setup_helpers.configure import get_libtorch_env_with_flags if __name__ == '__main__': # Placeholder for future interface. For now just gives a nice -h. parser = argparse.ArgumentParser(description='Build libtorch') options = parser.parse_args() - os.environ['BUILD_TORCH'] = 'ON' - os.environ['BUILD_TEST'] = 'ON' - os.environ['ONNX_NAMESPACE'] = 'onnx_torch' - os.environ['PYTORCH_PYTHON'] = sys.executable - tools_path = os.path.dirname(os.path.abspath(__file__)) if sys.platform == 'win32': build_pytorch_libs = os.path.join(tools_path, 'build_pytorch_libs.bat') else: build_pytorch_libs = os.path.join(tools_path, 'build_pytorch_libs.sh') - command = [build_pytorch_libs, '--use-nnpack'] - USE_MKLDNN = check_env_flag('USE_MKLDNN', 'ON') - if USE_MKLDNN: - command.append('--use-mkldnn') - if USE_CUDA: - command.append('--use-cuda') - if os.environ.get('USE_CUDA_STATIC_LINK', False): - command.append('--cuda-static-link') - if USE_DISTRIBUTED and IS_LINUX: - if USE_GLOO_IBVERBS: - command.append('--use-gloo-ibverbs') - command.append('--use-distributed') - + command = [build_pytorch_libs] + my_env, extra_flags = get_libtorch_env_with_flags() + command.extend(extra_flags) command.append('caffe2') sys.stdout.flush() sys.stderr.flush() - subprocess.check_call(command, universal_newlines=True) + subprocess.check_call(command, universal_newlines=True, env=my_env) diff --git a/tools/setup_helpers/configure.py b/tools/setup_helpers/configure.py new file mode 100644 index 0000000000000..480000f04e5d7 --- /dev/null +++ b/tools/setup_helpers/configure.py @@ -0,0 +1,139 @@ +import os +import sys +import multiprocessing +from .env import (IS_ARM, IS_DARWIN, IS_LINUX, IS_PPC, IS_WINDOWS, + check_env_flag, check_negative_env_flag, + hotpatch_build_env_vars) + + +hotpatch_build_env_vars() + +from .build import (BUILD_BINARY, BUILD_CAFFE2_OPS, BUILD_TEST, USE_FBGEMM, + USE_FFMPEG, USE_LEVELDB, USE_LMDB, USE_OPENCV, + USE_TENSORRT) +from .cuda import CUDA_HOME, CUDA_VERSION, USE_CUDA +from .cudnn import CUDNN_INCLUDE_DIR, CUDNN_LIB_DIR, CUDNN_LIBRARY, USE_CUDNN +from .dist_check import USE_DISTRIBUTED, USE_GLOO_IBVERBS +from .miopen import (MIOPEN_INCLUDE_DIR, MIOPEN_LIB_DIR, MIOPEN_LIBRARY, + USE_MIOPEN) +from .nccl import (NCCL_INCLUDE_DIR, NCCL_LIB_DIR, NCCL_ROOT_DIR, + NCCL_SYSTEM_LIB, USE_NCCL, USE_SYSTEM_NCCL) +from .nnpack import USE_NNPACK +from .nvtoolext import NVTOOLEXT_HOME +from .qnnpack import USE_QNNPACK +from .rocm import ROCM_HOME, ROCM_VERSION, USE_ROCM + + +DEBUG = check_env_flag('DEBUG') +REL_WITH_DEB_INFO = check_env_flag('REL_WITH_DEB_INFO') + +BUILD_PYTORCH = check_env_flag('BUILD_PYTORCH') +# ppc64le and aarch64 do not support MKLDNN +if IS_PPC or IS_ARM: + USE_MKLDNN = check_env_flag('USE_MKLDNN', 'OFF') +else: + USE_MKLDNN = check_env_flag('USE_MKLDNN', 'ON') + +USE_CUDA_STATIC_LINK = check_env_flag('USE_CUDA_STATIC_LINK') +RERUN_CMAKE = True + +NUM_JOBS = multiprocessing.cpu_count() +max_jobs = os.getenv("MAX_JOBS") +if max_jobs is not None: + NUM_JOBS = min(NUM_JOBS, int(max_jobs)) + +ONNX_NAMESPACE = os.getenv("ONNX_NAMESPACE") +if not ONNX_NAMESPACE: + ONNX_NAMESPACE = "onnx_torch" + +# Ninja +try: + import ninja + USE_NINJA = True +except ImportError: + USE_NINJA = False + +try: + import numpy as np + NUMPY_INCLUDE_DIR = np.get_include() + USE_NUMPY = True +except ImportError: + USE_NUMPY = False + + +def get_common_env_with_flags(): + extra_flags = [] + my_env = os.environ.copy() + my_env["PYTORCH_PYTHON"] = sys.executable + my_env["ONNX_NAMESPACE"] = ONNX_NAMESPACE + if USE_SYSTEM_NCCL: + my_env["NCCL_ROOT_DIR"] = NCCL_ROOT_DIR + my_env["NCCL_INCLUDE_DIR"] = NCCL_INCLUDE_DIR + my_env["NCCL_SYSTEM_LIB"] = NCCL_SYSTEM_LIB + if USE_CUDA: + my_env["CUDA_BIN_PATH"] = CUDA_HOME + extra_flags += ['--use-cuda'] + if IS_WINDOWS: + my_env["NVTOOLEXT_HOME"] = NVTOOLEXT_HOME + if USE_CUDA_STATIC_LINK: + extra_flags += ['--cuda-static-link'] + if USE_FBGEMM: + extra_flags += ['--use-fbgemm'] + if USE_ROCM: + extra_flags += ['--use-rocm'] + if USE_NNPACK: + extra_flags += ['--use-nnpack'] + if USE_CUDNN: + my_env["CUDNN_LIB_DIR"] = CUDNN_LIB_DIR + my_env["CUDNN_LIBRARY"] = CUDNN_LIBRARY + my_env["CUDNN_INCLUDE_DIR"] = CUDNN_INCLUDE_DIR + if USE_MIOPEN: + my_env["MIOPEN_LIB_DIR"] = MIOPEN_LIB_DIR + my_env["MIOPEN_LIBRARY"] = MIOPEN_LIBRARY + my_env["MIOPEN_INCLUDE_DIR"] = MIOPEN_INCLUDE_DIR + if USE_MKLDNN: + extra_flags += ['--use-mkldnn'] + if USE_QNNPACK: + extra_flags += ['--use-qnnpack'] + if USE_GLOO_IBVERBS: + extra_flags += ['--use-gloo-ibverbs'] + if not RERUN_CMAKE: + extra_flags += ['--dont-rerun-cmake'] + + my_env["BUILD_TORCH"] = "ON" + my_env["BUILD_TEST"] = "ON" if BUILD_TEST else "OFF" + my_env["BUILD_CAFFE2_OPS"] = "ON" if BUILD_CAFFE2_OPS else "OFF" + my_env["INSTALL_TEST"] = "ON" if BUILD_TEST else "OFF" + my_env["USE_LEVELDB"] = "ON" if USE_LEVELDB else "OFF" + my_env["USE_LMDB"] = "ON" if USE_LMDB else "OFF" + my_env["USE_OPENCV"] = "ON" if USE_OPENCV else "OFF" + my_env["USE_TENSORRT"] = "ON" if USE_TENSORRT else "OFF" + my_env["USE_FFMPEG"] = "ON" if USE_FFMPEG else "OFF" + my_env["USE_DISTRIBUTED"] = "ON" if USE_DISTRIBUTED else "OFF" + my_env["USE_SYSTEM_NCCL"] = "ON" if USE_SYSTEM_NCCL else "OFF" + + return my_env, extra_flags + + +def get_libtorch_env_with_flags(): + my_env, extra_flags = get_common_env_with_flags() + + return my_env, extra_flags + + +def get_pytorch_env_with_flags(): + my_env, extra_flags = get_common_env_with_flags() + my_env["BUILD_BINARY"] = "ON" if BUILD_BINARY else "OFF" + my_env["BUILD_PYTHON"] = "ON" + my_env["NUM_JOBS"] = str(NUM_JOBS) + if not IS_WINDOWS: + if USE_NINJA: + my_env["CMAKE_GENERATOR"] = '-GNinja' + my_env["CMAKE_INSTALL"] = 'ninja install' + else: + my_env['CMAKE_GENERATOR'] = '' + my_env['CMAKE_INSTALL'] = 'make install' + if USE_NUMPY: + my_env["NUMPY_INCLUDE_DIR"] = NUMPY_INCLUDE_DIR + + return my_env, extra_flags diff --git a/tools/setup_helpers/env.py b/tools/setup_helpers/env.py index 8f88c4279db4b..fe40ebdd679b0 100644 --- a/tools/setup_helpers/env.py +++ b/tools/setup_helpers/env.py @@ -7,6 +7,8 @@ IS_WINDOWS = (platform.system() == 'Windows') IS_DARWIN = (platform.system() == 'Darwin') IS_LINUX = (platform.system() == 'Linux') +IS_PPC = (platform.machine() == 'ppc64le') +IS_ARM = (platform.machine() == 'aarch64') IS_CONDA = 'conda' in sys.version or 'Continuum' in sys.version or any([x.startswith('CONDA') for x in os.environ])