Skip to content

Commit

Permalink
[bazel] make it possible to build the whole world, update CI (pytorch…
Browse files Browse the repository at this point in the history
…#78870)

Fixes pytorch#77509

This PR supersedes pytorch#77510.
It allows both `bazel query //...` and `bazel build --config=gpu //...` to work.

Concretely the changes are:
1. Add "GenerateAten" mnemonic -- this is a convenience thing, so anybody who uses [Remote Execution](https://bazel.build/docs/remote-execution) can add a

```
build:rbe --strategy=GenerateAten=sandboxed,local
```

line to the `~/.bazelrc` and build this action locally (it doesn't have hermetic dependencies at the moment).

2. Replaced few `http_archive` repos by the proper existing submodules to avoid code drift.
3. Updated `pybind11_bazel` and added `python_version="3"` to `python_configure`. This prevents hard-to-debug error that are caused by an attempt to build with python2 on the systems where it's a default python (Ubuntu 18.04 for example).
4. Added `unused_` repos, they purpose is to hide the unwanted submodules of submodules that often have bazel targets in them.
5. Updated CI to build //... -- this is a great step forward to prevent regressions in targets not only in the top-level BUILD.bazel file, but in other folders too.
6. Switch default bazel build to use gpu support.
Pull Request resolved: pytorch#78870
Approved by: https://github.com/ezyang
  • Loading branch information
vors authored and pytorchmergebot committed Jun 6, 2022
1 parent 97762d3 commit a0a23c6
Show file tree
Hide file tree
Showing 6 changed files with 93 additions and 35 deletions.
23 changes: 14 additions & 9 deletions .bazelrc
Original file line number Diff line number Diff line change
Expand Up @@ -13,15 +13,20 @@ build:no-tty --curses no
build:no-tty --progress_report_interval 10
build:no-tty --show_progress_rate_limit 10

# Configuration to build with GPU support
build:gpu --define=cuda=true
# Build with GPU support by default.
build --define=cuda=true
# rules_cuda configuration
build --@rules_cuda//cuda:enable_cuda
build --@rules_cuda//cuda:cuda_targets=sm_52
build --@rules_cuda//cuda:compiler=nvcc
build --repo_env=CUDA_PATH=/usr/local/cuda

# Configuration to build without GPU support
build:cpu-only --define=cuda=false
# define a separate build folder for faster switching between configs
build:gpu --platform_suffix=-gpu
build:cpu-only --platform_suffix=-cpu-only
# See the note on the config-less build for details about why we are
# doing this. We must also do it for the "-gpu" platform suffix.
build --copt=-isystem --copt=bazel-out/k8-fastbuild-gpu/bin
# doing this. We must also do it for the "-cpu-only" platform suffix.
build --copt=-isystem --copt=bazel-out/k8-fastbuild-cpu-only/bin
# rules_cuda configuration
build:gpu --@rules_cuda//cuda:enable_cuda
build:gpu --@rules_cuda//cuda:cuda_targets=sm_52
build:gpu --@rules_cuda//cuda:compiler=nvcc
build:gpu --repo_env=CUDA_PATH=/usr/local/cuda
build:cpu-only --@rules_cuda//cuda:enable_cuda=False
8 changes: 4 additions & 4 deletions .jenkins/pytorch/build.sh
Original file line number Diff line number Diff line change
Expand Up @@ -203,10 +203,10 @@ if [[ "$BUILD_ENVIRONMENT" == *-bazel-* ]]; then

get_bazel

# first build torch, the Python module, and tests for CPU-only
tools/bazel build --config=no-tty :torch :_C.so :all_tests
# then build everything with CUDA
tools/bazel build --config=no-tty --config=gpu :all
tools/bazel build --config=no-tty //...
# Build torch, the Python module, and tests for CPU-only
tools/bazel build --config=no-tty --config=cpu-only :torch :_C.so :all_tests

else
# check that setup.py would fail with bad arguments
echo "The next three invocations are expected to fail with invalid command error messages."
Expand Down
4 changes: 2 additions & 2 deletions .jenkins/pytorch/test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -491,10 +491,10 @@ test_bazel() {
# Test //c10/... without Google flags and logging libraries. The
# :all_tests target in the subsequent Bazel invocation tests
# //c10/... with the Google libraries.
tools/bazel test --test_timeout=480 --test_output=all --test_tag_filters=-gpu-required --test_filter=-*CUDA \
tools/bazel test --config=cpu-only --test_timeout=480 --test_output=all --test_tag_filters=-gpu-required --test_filter=-*CUDA \
--no//c10:use_gflags --no//c10:use_glog //c10/...

tools/bazel test --test_timeout=480 --test_output=all --test_tag_filters=-gpu-required --test_filter=-*CUDA :all_tests
tools/bazel test --config=cpu-only --test_timeout=480 --test_output=all --test_tag_filters=-gpu-required --test_filter=-*CUDA :all_tests
}

test_benchmarks() {
Expand Down
90 changes: 71 additions & 19 deletions WORKSPACE
Original file line number Diff line number Diff line change
Expand Up @@ -25,26 +25,11 @@ http_archive(
],
)

http_archive(
name = "com_google_googletest",
strip_prefix = "googletest-cd6b9ae3243985d4dc725abd513a874ab4161f3e",
urls = [
"https://github.com/google/googletest/archive/cd6b9ae3243985d4dc725abd513a874ab4161f3e.tar.gz",
],
)

http_archive(
name = "google_benchmark",
sha256 = "6132883bc8c9b0df5375b16ab520fac1a85dc9e4cf5be59480448ece74b278d4",
strip_prefix = "benchmark-1.6.1/",
urls = ["https://github.com/google/benchmark/archive/refs/tags/v1.6.1.tar.gz"],
)

http_archive(
name = "pybind11_bazel",
strip_prefix = "pybind11_bazel-7f397b5d2cc2434bbd651e096548f7b40c128044",
urls = ["https://github.com/pybind/pybind11_bazel/archive/7f397b5d2cc2434bbd651e096548f7b40c128044.zip"],
sha256 = "e4a9536f49d4a88e3c5a09954de49c4a18d6b1632c457a62d6ec4878c27f1b5b",
strip_prefix = "pybind11_bazel-992381ced716ae12122360b0fbadbc3dda436dbf",
urls = ["https://github.com/pybind/pybind11_bazel/archive/992381ced716ae12122360b0fbadbc3dda436dbf.zip"],
sha256 = "3dc6435bd41c058453efe102995ef084d0a86b0176fd6a67a6b7100a2e9a940e",
)

new_local_repository(
Expand Down Expand Up @@ -183,7 +168,7 @@ http_archive(
)

load("@pybind11_bazel//:python_configure.bzl", "python_configure")
python_configure(name = "local_config_python")
python_configure(name = "local_config_python", python_version="3")

load("@com_google_protobuf//:protobuf_deps.bzl", "protobuf_deps")

Expand All @@ -209,3 +194,70 @@ local_repository(
name = "com_github_google_flatbuffers",
path = "third_party/flatbuffers",
)

local_repository(
name = "google_benchmark",
path = "third_party/benchmark",
)

local_repository(
name = "com_google_googletest",
path = "third_party/googletest",
)

local_repository(
name = "pthreadpool",
path = "third_party/pthreadpool",
repo_mapping = {"@com_google_benchmark" : "@google_benchmark"}
)

local_repository(
name = "FXdiv",
path = "third_party/FXdiv",
repo_mapping = {"@com_google_benchmark" : "@google_benchmark"}
)

local_repository(
name = "XNNPACK",
path = "third_party/XNNPACK",
repo_mapping = {"@com_google_benchmark" : "@google_benchmark"}
)

local_repository(
name = "gemmlowp",
path = "third_party/gemmlowp/gemmlowp",
)

### Unused repos start

# `unused` repos are defined to hide bazel files from submodules of submodules.
# This allows us to run `bazel build //...` and not worry about the submodules madness.
# Otherwise everything traverses recursively and a lot of submodules of submodules have
# they own bazel build files.

local_repository(
name = "unused_tensorpipe_googletest",
path = "third_party/tensorpipe/third_party/googletest",
)

local_repository(
name = "unused_fbgemm",
path = "third_party/fbgemm",
)

local_repository(
name = "unused_kineto_googletest",
path = "third_party/kineto/libkineto/third_party/googletest",
)

local_repository(
name = "unused_onnx_benchmark",
path = "third_party/onnx/third_party/benchmark",
)

local_repository(
name = "unused_onnx_tensorrt_benchmark",
path = "third_party/onnx-tensorrt/third_party/onnx/third_party/benchmark",
)

### Unused repos end
1 change: 1 addition & 0 deletions aten.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,7 @@ def generate_aten_impl(ctx):
tools = tool_inputs,
input_manifests = tool_inputs_manifest,
use_default_shell_env = True,
mnemonic = "GenerateAten",
)
return [DefaultInfo(files = depset(outputs))]

Expand Down
2 changes: 1 addition & 1 deletion third_party/benchmark
Submodule benchmark updated 102 files

0 comments on commit a0a23c6

Please sign in to comment.