diff --git a/.circleci/cimodel/data/simple/binary_smoketest.py b/.circleci/cimodel/data/simple/binary_smoketest.py index 6d1d421d029cc..a97195024e909 100644 --- a/.circleci/cimodel/data/simple/binary_smoketest.py +++ b/.circleci/cimodel/data/simple/binary_smoketest.py @@ -18,7 +18,6 @@ - binary_linux_libtorch_3_6m_cu90_devtoolset7_static-without-deps_build """ -import cimodel.lib.miniutils as miniutils import cimodel.data.simple.util.branch_filters @@ -65,29 +64,6 @@ def gen_tree(self): WORKFLOW_DATA = [ - SmoketestJob( - "binary_linux_build", - ["manywheel", "3.7m", "cu102", "devtoolset7"], - "pytorch/manylinux-cuda102", - "binary_linux_manywheel_3_7m_cu102_devtoolset7_build", - is_master_only=True, - ), - SmoketestJob( - "binary_linux_build", - ["libtorch", "3.7m", "cpu", "devtoolset7"], - "pytorch/manylinux-cuda102", - "binary_linux_libtorch_3_7m_cpu_devtoolset7_shared-with-deps_build", - is_master_only=True, - has_libtorch_variant=True, - ), - SmoketestJob( - "binary_linux_build", - ["libtorch", "3.7m", "cpu", "gcc5.4_cxx11-abi"], - "pytorch/pytorch-binary-docker-image-ubuntu16.04:latest", - "binary_linux_libtorch_3_7m_cpu_gcc5_4_cxx11-abi_shared-with-deps_build", - is_master_only=False, - has_libtorch_variant=True, - ), SmoketestJob( "binary_mac_build", ["wheel", "3.7", "cpu"], @@ -104,88 +80,9 @@ def gen_tree(self): "binary_macos_libtorch_3_7_cpu_build", is_master_only=True, ), - SmoketestJob( - "binary_windows_build", - ["libtorch", "3.7", "cpu", "debug"], - None, - "binary_windows_libtorch_3_7_cpu_debug_build", - is_master_only=True, - ), - SmoketestJob( - "binary_windows_build", - ["libtorch", "3.7", "cpu", "release"], - None, - "binary_windows_libtorch_3_7_cpu_release_build", - is_master_only=True, - ), - SmoketestJob( - "binary_windows_build", - ["wheel", "3.7", "cu113"], - None, - "binary_windows_wheel_3_7_cu113_build", - is_master_only=True, - ), - SmoketestJob( - "binary_windows_test", - ["libtorch", "3.7", "cpu", "debug"], - None, - "binary_windows_libtorch_3_7_cpu_debug_test", - is_master_only=True, - requires=["binary_windows_libtorch_3_7_cpu_debug_build"], - ), - SmoketestJob( - "binary_windows_test", - ["libtorch", "3.7", "cpu", "release"], - None, - "binary_windows_libtorch_3_7_cpu_release_test", - is_master_only=False, - requires=["binary_windows_libtorch_3_7_cpu_release_build"], - ), - SmoketestJob( - "binary_windows_test", - ["wheel", "3.7", "cu113"], - None, - "binary_windows_wheel_3_7_cu113_test", - is_master_only=True, - requires=["binary_windows_wheel_3_7_cu113_build"], - extra_props={ - "executor": "windows-with-nvidia-gpu", - }, - ), - - SmoketestJob( - "binary_linux_test", - ["manywheel", "3.7m", "cu102", "devtoolset7"], - "pytorch/manylinux-cuda102", - "binary_linux_manywheel_3_7m_cu102_devtoolset7_test", - is_master_only=True, - requires=["binary_linux_manywheel_3_7m_cu102_devtoolset7_build"], - extra_props={ - "resource_class": "gpu.nvidia.small", - "use_cuda_docker_runtime": miniutils.quote((str(1))), - }, - ), - SmoketestJob( - "binary_linux_test", - ["libtorch", "3.7m", "cpu", "devtoolset7"], - "pytorch/manylinux-cuda102", - "binary_linux_libtorch_3_7m_cpu_devtoolset7_shared-with-deps_test", - is_master_only=True, - requires=["binary_linux_libtorch_3_7m_cpu_devtoolset7_shared-with-deps_build"], - has_libtorch_variant=True, - ), - SmoketestJob( - "binary_linux_test", - ["libtorch", "3.7m", "cpu", "gcc5.4_cxx11-abi"], - "pytorch/pytorch-binary-docker-image-ubuntu16.04:latest", - "binary_linux_libtorch_3_7m_cpu_gcc5_4_cxx11-abi_shared-with-deps_test", - is_master_only=True, - requires=["binary_linux_libtorch_3_7m_cpu_gcc5_4_cxx11-abi_shared-with-deps_build"], - has_libtorch_variant=True, - ), ] diff --git a/.circleci/config.yml b/.circleci/config.yml index 57f2fba481373..4c2ea8bb0b509 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -2413,32 +2413,6 @@ workflows: - pytorch_linux_xenial_py3_clang5_android_ndk_r19c_x86_64_build - pytorch_linux_xenial_py3_clang5_android_ndk_r19c_arm_v7a_build - pytorch_linux_xenial_py3_clang5_android_ndk_r19c_arm_v8a_build - - binary_linux_build: - build_environment: manywheel 3.7m cu102 devtoolset7 - docker_image: pytorch/manylinux-cuda102 - filters: - branches: - only: - - master - - /ci-all\/.*/ - - /release\/.*/ - name: binary_linux_manywheel_3_7m_cu102_devtoolset7_build - - binary_linux_build: - build_environment: libtorch 3.7m cpu devtoolset7 - docker_image: pytorch/manylinux-cuda102 - filters: - branches: - only: - - master - - /ci-all\/.*/ - - /release\/.*/ - libtorch_variant: shared-with-deps - name: binary_linux_libtorch_3_7m_cpu_devtoolset7_shared-with-deps_build - - binary_linux_build: - build_environment: libtorch 3.7m cpu gcc5.4_cxx11-abi - docker_image: pytorch/pytorch-binary-docker-image-ubuntu16.04:latest - libtorch_variant: shared-with-deps - name: binary_linux_libtorch_3_7m_cpu_gcc5_4_cxx11-abi_shared-with-deps_build - binary_mac_build: build_environment: wheel 3.7 cpu filters: @@ -2457,101 +2431,6 @@ workflows: - /ci-all\/.*/ - /release\/.*/ name: binary_macos_libtorch_3_7_cpu_build - - binary_windows_build: - build_environment: libtorch 3.7 cpu debug - filters: - branches: - only: - - master - - /ci-all\/.*/ - - /release\/.*/ - name: binary_windows_libtorch_3_7_cpu_debug_build - - binary_windows_build: - build_environment: libtorch 3.7 cpu release - filters: - branches: - only: - - master - - /ci-all\/.*/ - - /release\/.*/ - name: binary_windows_libtorch_3_7_cpu_release_build - - binary_windows_build: - build_environment: wheel 3.7 cu113 - filters: - branches: - only: - - master - - /ci-all\/.*/ - - /release\/.*/ - name: binary_windows_wheel_3_7_cu113_build - - binary_windows_test: - build_environment: libtorch 3.7 cpu debug - filters: - branches: - only: - - master - - /ci-all\/.*/ - - /release\/.*/ - name: binary_windows_libtorch_3_7_cpu_debug_test - requires: - - binary_windows_libtorch_3_7_cpu_debug_build - - binary_windows_test: - build_environment: libtorch 3.7 cpu release - name: binary_windows_libtorch_3_7_cpu_release_test - requires: - - binary_windows_libtorch_3_7_cpu_release_build - - binary_windows_test: - build_environment: wheel 3.7 cu113 - executor: windows-with-nvidia-gpu - filters: - branches: - only: - - master - - /ci-all\/.*/ - - /release\/.*/ - name: binary_windows_wheel_3_7_cu113_test - requires: - - binary_windows_wheel_3_7_cu113_build - - binary_linux_test: - build_environment: manywheel 3.7m cu102 devtoolset7 - docker_image: pytorch/manylinux-cuda102 - filters: - branches: - only: - - master - - /ci-all\/.*/ - - /release\/.*/ - name: binary_linux_manywheel_3_7m_cu102_devtoolset7_test - requires: - - binary_linux_manywheel_3_7m_cu102_devtoolset7_build - resource_class: gpu.nvidia.small - use_cuda_docker_runtime: "1" - - binary_linux_test: - build_environment: libtorch 3.7m cpu devtoolset7 - docker_image: pytorch/manylinux-cuda102 - filters: - branches: - only: - - master - - /ci-all\/.*/ - - /release\/.*/ - libtorch_variant: shared-with-deps - name: binary_linux_libtorch_3_7m_cpu_devtoolset7_shared-with-deps_test - requires: - - binary_linux_libtorch_3_7m_cpu_devtoolset7_shared-with-deps_build - - binary_linux_test: - build_environment: libtorch 3.7m cpu gcc5.4_cxx11-abi - docker_image: pytorch/pytorch-binary-docker-image-ubuntu16.04:latest - filters: - branches: - only: - - master - - /ci-all\/.*/ - - /release\/.*/ - libtorch_variant: shared-with-deps - name: binary_linux_libtorch_3_7m_cpu_gcc5_4_cxx11-abi_shared-with-deps_test - requires: - - binary_linux_libtorch_3_7m_cpu_gcc5_4_cxx11-abi_shared-with-deps_build - binary_ios_build: build_environment: libtorch-ios-12.5.1-nightly-x86_64-build context: org-member @@ -2849,68 +2728,12 @@ workflows: - pytorch_linux_xenial_py3_clang5_android_ndk_r19c_x86_64_build - pytorch_linux_xenial_py3_clang5_android_ndk_r19c_arm_v7a_build - pytorch_linux_xenial_py3_clang5_android_ndk_r19c_arm_v8a_build - - binary_linux_build: - build_environment: manywheel 3.7m cu102 devtoolset7 - docker_image: pytorch/manylinux-cuda102 - name: binary_linux_manywheel_3_7m_cu102_devtoolset7_build - - binary_linux_build: - build_environment: libtorch 3.7m cpu devtoolset7 - docker_image: pytorch/manylinux-cuda102 - libtorch_variant: shared-with-deps - name: binary_linux_libtorch_3_7m_cpu_devtoolset7_shared-with-deps_build - - binary_linux_build: - build_environment: libtorch 3.7m cpu gcc5.4_cxx11-abi - docker_image: pytorch/pytorch-binary-docker-image-ubuntu16.04:latest - libtorch_variant: shared-with-deps - name: binary_linux_libtorch_3_7m_cpu_gcc5_4_cxx11-abi_shared-with-deps_build - binary_mac_build: build_environment: wheel 3.7 cpu name: binary_macos_wheel_3_7_cpu_build - binary_mac_build: build_environment: libtorch 3.7 cpu name: binary_macos_libtorch_3_7_cpu_build - - binary_windows_build: - build_environment: libtorch 3.7 cpu debug - name: binary_windows_libtorch_3_7_cpu_debug_build - - binary_windows_build: - build_environment: libtorch 3.7 cpu release - name: binary_windows_libtorch_3_7_cpu_release_build - - binary_windows_build: - build_environment: wheel 3.7 cu113 - name: binary_windows_wheel_3_7_cu113_build - - binary_windows_test: - build_environment: libtorch 3.7 cpu debug - name: binary_windows_libtorch_3_7_cpu_debug_test - requires: - - binary_windows_libtorch_3_7_cpu_debug_build - - binary_windows_test: - build_environment: wheel 3.7 cu113 - executor: windows-with-nvidia-gpu - name: binary_windows_wheel_3_7_cu113_test - requires: - - binary_windows_wheel_3_7_cu113_build - - binary_linux_test: - build_environment: manywheel 3.7m cu102 devtoolset7 - docker_image: pytorch/manylinux-cuda102 - name: binary_linux_manywheel_3_7m_cu102_devtoolset7_test - requires: - - binary_linux_manywheel_3_7m_cu102_devtoolset7_build - resource_class: gpu.nvidia.small - use_cuda_docker_runtime: "1" - - binary_linux_test: - build_environment: libtorch 3.7m cpu devtoolset7 - docker_image: pytorch/manylinux-cuda102 - libtorch_variant: shared-with-deps - name: binary_linux_libtorch_3_7m_cpu_devtoolset7_shared-with-deps_test - requires: - - binary_linux_libtorch_3_7m_cpu_devtoolset7_shared-with-deps_build - - binary_linux_test: - build_environment: libtorch 3.7m cpu gcc5.4_cxx11-abi - docker_image: pytorch/pytorch-binary-docker-image-ubuntu16.04:latest - libtorch_variant: shared-with-deps - name: binary_linux_libtorch_3_7m_cpu_gcc5_4_cxx11-abi_shared-with-deps_test - requires: - - binary_linux_libtorch_3_7m_cpu_gcc5_4_cxx11-abi_shared-with-deps_build - docker_build_job: name: "docker-pytorch-linux-xenial-py3-clang5-android-ndk-r19c" image_name: "pytorch-linux-xenial-py3-clang5-android-ndk-r19c" diff --git a/.github/generated-ciflow-ruleset.json b/.github/generated-ciflow-ruleset.json index 3625512b7a804..c7ae3dfaeff1c 100644 --- a/.github/generated-ciflow-ruleset.json +++ b/.github/generated-ciflow-ruleset.json @@ -12,6 +12,9 @@ "ios-12-5-1-x86-64-coreml", "libtorch-linux-xenial-cuda10.2-py3.7-gcc7", "libtorch-linux-xenial-cuda11.3-py3.7-gcc7", + "linux-binary-libtorch-cxx11-abi", + "linux-binary-libtorch-pre-cxx11", + "linux-binary-manywheel", "linux-bionic-cuda10.2-py3.9-gcc7", "linux-bionic-py3.7-clang9", "linux-bionic-rocm4.5-py3.7", @@ -43,7 +46,10 @@ "pytorch-linux-xenial-py3-clang5-android-ndk-r19c-gradle-custom-build-single-full-jit", "pytorch-xla-linux-bionic-py3.7-clang8", "win-vs2019-cpu-py3", - "win-vs2019-cuda11.3-py3" + "win-vs2019-cuda11.3-py3", + "windows-binary-libtorch-debug", + "windows-binary-libtorch-release", + "windows-binary-wheel" ], "ciflow/android": [ "pytorch-linux-xenial-py3-clang5-android-ndk-r19c-build", @@ -255,6 +261,9 @@ "ios-12-5-1-x86-64-coreml", "libtorch-linux-xenial-cuda10.2-py3.7-gcc7", "libtorch-linux-xenial-cuda11.3-py3.7-gcc7", + "linux-binary-libtorch-cxx11-abi", + "linux-binary-libtorch-pre-cxx11", + "linux-binary-manywheel", "linux-bionic-cuda10.2-py3.9-gcc7", "linux-bionic-py3.7-clang9", "linux-bionic-rocm4.5-py3.7", @@ -280,7 +289,10 @@ "pytorch-linux-xenial-py3-clang5-android-ndk-r19c-gradle-custom-build-single-full-jit", "pytorch-xla-linux-bionic-py3.7-clang8", "win-vs2019-cpu-py3", - "win-vs2019-cuda11.3-py3" + "win-vs2019-cuda11.3-py3", + "windows-binary-libtorch-debug", + "windows-binary-libtorch-release", + "windows-binary-wheel" ], "ciflow/vulkan": [ "linux-vulkan-bionic-py3.7-clang9" diff --git a/.github/scripts/generate_binary_build_matrix.py b/.github/scripts/generate_binary_build_matrix.py index 90e509d87c276..84a6769a0fb82 100644 --- a/.github/scripts/generate_binary_build_matrix.py +++ b/.github/scripts/generate_binary_build_matrix.py @@ -10,7 +10,7 @@ * Latest ROCM """ -from typing import Dict, List, Tuple +from typing import Dict, List, Tuple, Optional CUDA_ARCHES = ["10.2", "11.3", "11.5"] @@ -112,20 +112,26 @@ def generate_conda_matrix(os: str) -> List[Dict[str, str]]: return ret -def generate_libtorch_matrix(os: str, abi_version: str) -> List[Dict[str, str]]: - libtorch_variants = [ - "shared-with-deps", - "shared-without-deps", - "static-with-deps", - "static-without-deps", - ] +def generate_libtorch_matrix(os: str, abi_version: str, + arches: Optional[List[str]] = None, + libtorch_variants: Optional[List[str]] = None) -> List[Dict[str, str]]: + if arches is None: + arches = ["cpu"] + if os == "linux": + arches += CUDA_ARCHES + elif os == "windows": + # We don't build CUDA 10.2 for window see https://github.com/pytorch/pytorch/issues/65648 + arches += list_without(CUDA_ARCHES, ["10.2"]) + + if libtorch_variants is None: + libtorch_variants = [ + "shared-with-deps", + "shared-without-deps", + "static-with-deps", + "static-without-deps", + ] + ret: List[Dict[str, str]] = [] - arches = ["cpu"] - if os == "linux": - arches += CUDA_ARCHES - elif os == "windows": - # We don't build CUDA 10.2 for window see https://github.com/pytorch/pytorch/issues/65648 - arches += list_without(CUDA_ARCHES, ["10.2"]) for arch_version in arches: for libtorch_variant in libtorch_variants: # We don't currently build libtorch for rocm @@ -156,19 +162,28 @@ def generate_libtorch_matrix(os: str, abi_version: str) -> List[Dict[str, str]]: return ret -def generate_wheels_matrix(os: str) -> List[Dict[str, str]]: - arches = ["cpu"] +def generate_wheels_matrix(os: str, + arches: Optional[List[str]] = None, + python_versions: Optional[List[str]] = None) -> List[Dict[str, str]]: package_type = "wheel" - python_versions = FULL_PYTHON_VERSIONS - if os == "linux": - arches += CUDA_ARCHES + ROCM_ARCHES - # NOTE: We only build manywheel packages for linux - package_type = "manywheel" - elif os == "windows": - # We don't build CUDA 10.2 for window see https://github.com/pytorch/pytorch/issues/65648 - arches += list_without(CUDA_ARCHES, ["10.2"]) - elif os == "macos-arm64": - python_versions = list_without(python_versions, ["3.7"]) + + if python_versions is None: + # Define default python version + python_versions = FULL_PYTHON_VERSIONS + if os == "macos-arm64": + python_versions = list_without(python_versions, ["3.7"]) + + if arches is None: + # Define default compute archivectures + arches = ["cpu"] + if os == "linux": + arches += CUDA_ARCHES + ROCM_ARCHES + # NOTE: We only build manywheel packages for linux + package_type = "manywheel" + elif os == "windows": + # We don't build CUDA 10.2 for window see https://github.com/pytorch/pytorch/issues/65648 + arches += list_without(CUDA_ARCHES, ["10.2"]) + ret: List[Dict[str, str]] = [] for python_version in python_versions: for arch_version in arches: diff --git a/.github/scripts/generate_ci_workflows.py b/.github/scripts/generate_ci_workflows.py index dab955d3596e5..bf2d3b9289521 100755 --- a/.github/scripts/generate_ci_workflows.py +++ b/.github/scripts/generate_ci_workflows.py @@ -358,6 +358,7 @@ class BinaryBuildWorkflow: abi_version: str = '' ciflow_config: CIFlowConfig = field(default_factory=CIFlowConfig) is_scheduled: str = '' + branches: str = 'nightly' # Mainly for macos cross_compile_arm64: bool = False xcode_version: str = '' @@ -369,7 +370,7 @@ def __post_init__(self) -> None: self.build_environment = f"{self.os}-binary-{self.package_type}" def generate_workflow_file(self, workflow_template: jinja2.Template) -> None: - output_file_path = GITHUB_DIR / f"workflows/generated-{self.build_environment}.yml" + output_file_path = GITHUB_DIR / f"workflows/generated-{self.build_environment}-{self.branches}.yml" with open(output_file_path, "w") as output_file: GENERATED = "generated" # Note that please keep the variable GENERATED otherwise phabricator will hide the whole file output_file.writelines([f"# @{GENERATED} DO NOT EDIT MANUALLY\n"]) @@ -961,6 +962,40 @@ class OperatingSystem: ), ] +LINUX_BINARY_SMOKE_WORKFLOWS = [ + BinaryBuildWorkflow( + os=OperatingSystem.LINUX, + package_type="manywheel", + build_configs=generate_binary_build_matrix.generate_wheels_matrix( + OperatingSystem.LINUX, + arches=["10.2"], + python_versions=["3.7"]), + branches="master", + ), + BinaryBuildWorkflow( + os=OperatingSystem.LINUX, + package_type="libtorch", + abi_version=generate_binary_build_matrix.CXX11_ABI, + build_configs=generate_binary_build_matrix.generate_libtorch_matrix( + OperatingSystem.LINUX, generate_binary_build_matrix.CXX11_ABI, + arches=["cpu"], + libtorch_variants=["shared-with-deps"], + ), + branches="master", + ), + BinaryBuildWorkflow( + os=OperatingSystem.LINUX, + package_type="libtorch", + abi_version=generate_binary_build_matrix.PRE_CXX11_ABI, + build_configs=generate_binary_build_matrix.generate_libtorch_matrix( + OperatingSystem.LINUX, generate_binary_build_matrix.CXX11_ABI, + arches=["cpu"], + libtorch_variants=["shared-with-deps"], + ), + branches="master", + ), +] + WINDOWS_BINARY_BUILD_WORKFLOWS = [ BinaryBuildWorkflow( os=OperatingSystem.WINDOWS, @@ -1007,6 +1042,39 @@ class OperatingSystem: ), ), ] +WINDOWS_BINARY_SMOKE_WORKFLOWS = [ + BinaryBuildWorkflow( + os=OperatingSystem.WINDOWS, + package_type="wheel", + build_configs=generate_binary_build_matrix.generate_wheels_matrix( + OperatingSystem.WINDOWS, + arches=["11.3"], + python_versions=["3.7"]), + branches="master", + ), + BinaryBuildWorkflow( + os=OperatingSystem.WINDOWS, + package_type="libtorch", + abi_version=generate_binary_build_matrix.RELEASE, + build_configs=generate_binary_build_matrix.generate_libtorch_matrix( + OperatingSystem.WINDOWS, generate_binary_build_matrix.RELEASE, + arches=["cpu"], + libtorch_variants=["shared-with-deps"], + ), + branches="master", + ), + BinaryBuildWorkflow( + os=OperatingSystem.WINDOWS, + package_type="libtorch", + abi_version=generate_binary_build_matrix.DEBUG, + build_configs=generate_binary_build_matrix.generate_libtorch_matrix( + OperatingSystem.WINDOWS, generate_binary_build_matrix.DEBUG, + arches=["cpu"], + libtorch_variants=["shared-with-deps"], + ), + branches="master", + ), +] MACOS_BINARY_BUILD_WORKFLOWS = [ BinaryBuildWorkflow( @@ -1090,7 +1158,9 @@ def main() -> None: (jinja_env.get_template("android_ci_full_workflow.yml.j2"), ANDROID_WORKFLOWS), (jinja_env.get_template("android_ci_workflow.yml.j2"), ANDROID_SHORT_WORKFLOWS), (jinja_env.get_template("linux_binary_build_workflow.yml.j2"), LINUX_BINARY_BUILD_WORFKLOWS), + (jinja_env.get_template("linux_binary_build_workflow.yml.j2"), LINUX_BINARY_SMOKE_WORKFLOWS), (jinja_env.get_template("windows_binary_build_workflow.yml.j2"), WINDOWS_BINARY_BUILD_WORKFLOWS), + (jinja_env.get_template("windows_binary_build_workflow.yml.j2"), WINDOWS_BINARY_SMOKE_WORKFLOWS), (jinja_env.get_template("macos_binary_build_workflow.yml.j2"), MACOS_BINARY_BUILD_WORKFLOWS), ] # Delete the existing generated files first, this should align with .gitattributes file description. diff --git a/.github/templates/linux_binary_build_workflow.yml.j2 b/.github/templates/linux_binary_build_workflow.yml.j2 index 97f2795f4f640..aab32fafd4a2f 100644 --- a/.github/templates/linux_binary_build_workflow.yml.j2 +++ b/.github/templates/linux_binary_build_workflow.yml.j2 @@ -9,13 +9,17 @@ name: !{{ build_environment }} on: push: + {%- if branches == "nightly" %} # NOTE: Meta Employees can trigger new nightlies using: https://fburl.com/trigger_pytorch_nightly_build + {%- endif %} branches: - - nightly + - !{{ branches }} + {%- if branches == "nightly" %} tags: # NOTE: Binary build pipelines should only get triggered on release candidate builds # Release candidate tags look like: v1.11.0-rc1 - v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+ + {%- endif %} {%- for label in ciflow_config.labels | sort %} {%- if label != "ciflow/default" %} - '!{{ label }}/*' @@ -172,5 +176,7 @@ jobs: docker exec -t -w "${PYTORCH_ROOT}" -e OUTPUT_SCRIPT="/run.sh" "${container_name}" bash -c "bash .circleci/scripts/binary_linux_test.sh" docker exec -t "${container_name}" bash -c "source ${BINARY_ENV_FILE} && bash -x /run.sh" !{{ common.teardown_ec2_linux("pytorch/") }} + {%- if branches == "nightly" %} !{{ upload.upload_binaries(config) }} + {%- endif %} {%- endfor %} diff --git a/.github/templates/windows_binary_build_workflow.yml.j2 b/.github/templates/windows_binary_build_workflow.yml.j2 index df018fc43919b..46ecc34be1452 100644 --- a/.github/templates/windows_binary_build_workflow.yml.j2 +++ b/.github/templates/windows_binary_build_workflow.yml.j2 @@ -21,13 +21,17 @@ name: !{{ build_environment }} on: push: + {%- if branches == "nightly" %} # NOTE: Meta Employees can trigger new nightlies using: https://fburl.com/trigger_pytorch_nightly_build + {%- endif %} branches: - - nightly + - !{{ branches }} + {%- if branches == "nightly" %} tags: # NOTE: Binary build pipelines should only get triggered on release candidate builds # Release candidate tags look like: v1.11.0-rc1 - v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+ + {%- endif %} {%- for label in ciflow_config.labels | sort %} {%- if label != "ciflow/default" %} - '!{{ label }}/*' @@ -107,5 +111,7 @@ jobs: run: | "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh" !{{ common.wait_and_kill_ssh_windows('pytorch') }} + {%- if branches == "nightly" %} !{{ upload.upload_binaries(config, True) }} + {%- endif %} {%- endfor %} diff --git a/.github/workflows/generated-linux-binary-conda.yml b/.github/workflows/generated-linux-binary-conda-nightly.yml similarity index 100% rename from .github/workflows/generated-linux-binary-conda.yml rename to .github/workflows/generated-linux-binary-conda-nightly.yml diff --git a/.github/workflows/generated-linux-binary-libtorch-cxx11-abi-master.yml b/.github/workflows/generated-linux-binary-libtorch-cxx11-abi-master.yml new file mode 100644 index 0000000000000..0e1c9ef696023 --- /dev/null +++ b/.github/workflows/generated-linux-binary-libtorch-cxx11-abi-master.yml @@ -0,0 +1,328 @@ +# @generated DO NOT EDIT MANUALLY + +# Template is at: .github/templates/linux_binary_build_workflow.yml.j2 +# Generation script: .github/scripts/generate_ci_workflows.py +name: linux-binary-libtorch-cxx11-abi + +on: + push: + branches: + - master + - 'ciflow/all/*' + - 'ciflow/trunk/*' + workflow_dispatch: + +env: + # Needed for conda builds + ALPINE_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/alpine" + ANACONDA_USER: pytorch + AWS_DEFAULT_REGION: us-east-1 + BINARY_ENV_FILE: /tmp/env + BUILD_ENVIRONMENT: linux-binary-libtorch-cxx11-abi + BUILDER_ROOT: /builder + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + IN_CI: 1 + IS_GHA: 1 + PR_LABELS: ${{ toJson(github.event.pull_request.labels.*.name) }} + PR_NUMBER: ${{ github.event.pull_request.number }} + PYTORCH_FINAL_PACKAGE_DIR: /artifacts + PYTORCH_RETRY_TEST_CASES: 1 + PYTORCH_ROOT: /pytorch + SHA1: ${{ github.event.pull_request.head.sha || github.sha }} + SKIP_ALL_TESTS: 1 +concurrency: + group: linux-binary-libtorch-cxx11-abi-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }} + cancel-in-progress: true + +jobs: + libtorch-cpu-shared-with-deps-cxx11-abi-build: + if: ${{ github.repository_owner == 'pytorch' }} + runs-on: linux.4xlarge + timeout-minutes: 240 + env: + PACKAGE_TYPE: libtorch + # TODO: This is a legacy variable that we eventually want to get rid of in + # favor of GPU_ARCH_VERSION + DESIRED_CUDA: cpu + GPU_ARCH_TYPE: cpu + DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cpu + SKIP_ALL_TESTS: 1 + LIBTORCH_VARIANT: shared-with-deps + DESIRED_DEVTOOLSET: cxx11-abi + steps: + - name: Display EC2 information + shell: bash + run: | + set -euo pipefail + function get_ec2_metadata() { + # Pulled from instance metadata endpoint for EC2 + # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html + category=$1 + curl -fsSL "http://169.254.169.254/latest/meta-data/${category}" + } + echo "ami-id: $(get_ec2_metadata ami-id)" + echo "instance-id: $(get_ec2_metadata instance-id)" + echo "instance-type: $(get_ec2_metadata instance-type)" + - name: Log in to ECR + env: + AWS_RETRY_MODE: standard + AWS_MAX_ATTEMPTS: 5 + run: | + AWS_ACCOUNT_ID=$(aws sts get-caller-identity|grep Account|cut -f4 -d\") + retry () { + "$@" || (sleep 1 && "$@") || (sleep 2 && "$@") + } + retry aws ecr get-login-password --region "$AWS_DEFAULT_REGION" | docker login --username AWS \ + --password-stdin "$AWS_ACCOUNT_ID.dkr.ecr.$AWS_DEFAULT_REGION.amazonaws.com" + - name: Chown workspace + run: | + retry () { + "$@" || (sleep 1 && "$@") || (sleep 2 && "$@") + } + retry docker pull "${ALPINE_IMAGE}" + # Ensure the working directory gets chowned back to the current user + docker run --pull=never --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" . + - name: Clean workspace + run: | + rm -rf "${GITHUB_WORKSPACE}" + mkdir "${GITHUB_WORKSPACE}" + - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)" + uses: seemethere/add-github-ssh-key@v1 + with: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + - name: Preserve github env variables for use in docker + run: | + env | grep '^GITHUB' > "/tmp/github_env_${GITHUB_RUN_ID}" + - name: Checkout PyTorch + uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9 + with: + ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + submodules: recursive + path: pytorch + - name: Clean PyTorch checkout + run: | + # Remove any artifacts from the previous checkouts + git clean -fxd + working-directory: pytorch + - name: Checkout pytorch/builder + uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9 + with: + ref: main + submodules: recursive + repository: pytorch/builder + path: builder + - name: Clean pytorch/builder checkout + run: | + # Remove any artifacts from the previous checkouts + git clean -fxd + working-directory: builder + - name: Pull Docker image + run: | + retry () { + "$@" || (sleep 1 && "$@") || (sleep 2 && "$@") + } + retry docker pull "${DOCKER_IMAGE}" + - name: Build PyTorch binary + run: | + set -x + mkdir -p artifacts/ + container_name=$(docker run \ + -e BINARY_ENV_FILE \ + -e BUILDER_ROOT \ + -e BUILD_ENVIRONMENT \ + -e BUILD_SPLIT_CUDA \ + -e DESIRED_CUDA \ + -e DESIRED_DEVTOOLSET \ + -e DESIRED_PYTHON \ + -e GPU_ARCH_TYPE \ + -e GPU_ARCH_VERSION \ + -e IS_GHA \ + -e LIBTORCH_VARIANT \ + -e PACKAGE_TYPE \ + -e PYTORCH_FINAL_PACKAGE_DIR \ + -e PYTORCH_ROOT \ + -e SKIP_ALL_TESTS \ + --tty \ + --detach \ + -v "${GITHUB_WORKSPACE}/pytorch:/pytorch" \ + -v "${GITHUB_WORKSPACE}/builder:/builder" \ + -v "${RUNNER_TEMP}/artifacts:/artifacts" \ + -w / \ + "${DOCKER_IMAGE}" + ) + docker exec -t -w "${PYTORCH_ROOT}" "${container_name}" bash -c "bash .circleci/scripts/binary_populate_env.sh" + docker exec -t "${container_name}" bash -c "source ${BINARY_ENV_FILE} && bash /builder/libtorch/build.sh" + - name: Chown artifacts + if: always() + run: | + # Ensure the working directory gets chowned back to the current user + docker run --rm -v "${RUNNER_TEMP}/artifacts:/v" -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" . + - uses: seemethere/upload-artifact-s3@v3 + with: + name: libtorch-cpu-shared-with-deps-cxx11-abi + retention-days: 14 + if-no-files-found: error + path: + ${{ runner.temp }}/artifacts/* + - name: Hold runner for 2 hours or until ssh sessions have drained + working-directory: pytorch/ + # Always hold for active ssh sessions + if: always() + run: .github/scripts/wait_for_ssh_to_drain.sh + - name: Chown workspace + if: always() + run: | + # Ensure the working directory gets chowned back to the current user + docker run --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" . + - name: Kill containers, clean up images + if: always() + run: | + # ignore expansion of "docker ps -q" since it could be empty + # shellcheck disable=SC2046 + docker stop $(docker ps -q) || true + # Prune all of the docker images + docker system prune -af + libtorch-cpu-shared-with-deps-cxx11-abi-test: # Testing + if: ${{ github.repository_owner == 'pytorch' }} + needs: libtorch-cpu-shared-with-deps-cxx11-abi-build + runs-on: linux.4xlarge + timeout-minutes: 240 + env: + PACKAGE_TYPE: libtorch + # TODO: This is a legacy variable that we eventually want to get rid of in + # favor of GPU_ARCH_VERSION + DESIRED_CUDA: cpu + GPU_ARCH_TYPE: cpu + DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cpu + SKIP_ALL_TESTS: 1 + LIBTORCH_VARIANT: shared-with-deps + DESIRED_DEVTOOLSET: cxx11-abi + steps: + - name: Display EC2 information + shell: bash + run: | + set -euo pipefail + function get_ec2_metadata() { + # Pulled from instance metadata endpoint for EC2 + # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html + category=$1 + curl -fsSL "http://169.254.169.254/latest/meta-data/${category}" + } + echo "ami-id: $(get_ec2_metadata ami-id)" + echo "instance-id: $(get_ec2_metadata instance-id)" + echo "instance-type: $(get_ec2_metadata instance-type)" + - name: Log in to ECR + env: + AWS_RETRY_MODE: standard + AWS_MAX_ATTEMPTS: 5 + run: | + AWS_ACCOUNT_ID=$(aws sts get-caller-identity|grep Account|cut -f4 -d\") + retry () { + "$@" || (sleep 1 && "$@") || (sleep 2 && "$@") + } + retry aws ecr get-login-password --region "$AWS_DEFAULT_REGION" | docker login --username AWS \ + --password-stdin "$AWS_ACCOUNT_ID.dkr.ecr.$AWS_DEFAULT_REGION.amazonaws.com" + - name: Chown workspace + run: | + retry () { + "$@" || (sleep 1 && "$@") || (sleep 2 && "$@") + } + retry docker pull "${ALPINE_IMAGE}" + # Ensure the working directory gets chowned back to the current user + docker run --pull=never --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" . + - name: Clean workspace + run: | + rm -rf "${GITHUB_WORKSPACE}" + mkdir "${GITHUB_WORKSPACE}" + - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)" + uses: seemethere/add-github-ssh-key@v1 + with: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + - name: Preserve github env variables for use in docker + run: | + env | grep '^GITHUB' > "/tmp/github_env_${GITHUB_RUN_ID}" + - uses: seemethere/download-artifact-s3@0504774707cbc8603d7dca922e8026eb8bf3b47b + name: Download Build Artifacts + with: + name: libtorch-cpu-shared-with-deps-cxx11-abi + path: "${{ runner.temp }}/artifacts/" + - name: Checkout PyTorch + uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9 + with: + ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + submodules: recursive + path: pytorch + - name: Clean PyTorch checkout + run: | + # Remove any artifacts from the previous checkouts + git clean -fxd + working-directory: pytorch + - name: Checkout pytorch/builder + uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9 + with: + ref: main + submodules: recursive + repository: pytorch/builder + path: builder + - name: Clean pytorch/builder checkout + run: | + # Remove any artifacts from the previous checkouts + git clean -fxd + working-directory: builder + - name: Pull Docker image + run: | + retry () { + "$@" || (sleep 1 && "$@") || (sleep 2 && "$@") + } + retry docker pull "${DOCKER_IMAGE}" + - name: Test PyTorch binary + run: | + set -x + # shellcheck disable=SC2086,SC2090 + container_name=$(docker run \ + ${GPU_FLAG:-} \ + -e BINARY_ENV_FILE \ + -e BUILDER_ROOT \ + -e BUILD_ENVIRONMENT \ + -e BUILD_SPLIT_CUDA \ + -e DESIRED_CUDA \ + -e DESIRED_DEVTOOLSET \ + -e DESIRED_PYTHON \ + -e GPU_ARCH_TYPE \ + -e GPU_ARCH_VERSION \ + -e IS_GHA \ + -e LIBTORCH_VARIANT \ + -e PACKAGE_TYPE \ + -e PYTORCH_FINAL_PACKAGE_DIR \ + -e PYTORCH_ROOT \ + -e SKIP_ALL_TESTS \ + --tty \ + --detach \ + -v "${GITHUB_WORKSPACE}/pytorch:/pytorch" \ + -v "${GITHUB_WORKSPACE}/builder:/builder" \ + -v "${RUNNER_TEMP}/artifacts:/final_pkgs" \ + -w / \ + "${DOCKER_IMAGE}" + ) + docker exec -t -w "${PYTORCH_ROOT}" "${container_name}" bash -c "bash .circleci/scripts/binary_populate_env.sh" + # Generate test script + docker exec -t -w "${PYTORCH_ROOT}" -e OUTPUT_SCRIPT="/run.sh" "${container_name}" bash -c "bash .circleci/scripts/binary_linux_test.sh" + docker exec -t "${container_name}" bash -c "source ${BINARY_ENV_FILE} && bash -x /run.sh" + - name: Hold runner for 2 hours or until ssh sessions have drained + working-directory: pytorch/ + # Always hold for active ssh sessions + if: always() + run: .github/scripts/wait_for_ssh_to_drain.sh + - name: Chown workspace + if: always() + run: | + # Ensure the working directory gets chowned back to the current user + docker run --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" . + - name: Kill containers, clean up images + if: always() + run: | + # ignore expansion of "docker ps -q" since it could be empty + # shellcheck disable=SC2046 + docker stop $(docker ps -q) || true + # Prune all of the docker images + docker system prune -af diff --git a/.github/workflows/generated-linux-binary-libtorch-cxx11-abi.yml b/.github/workflows/generated-linux-binary-libtorch-cxx11-abi-nightly.yml similarity index 100% rename from .github/workflows/generated-linux-binary-libtorch-cxx11-abi.yml rename to .github/workflows/generated-linux-binary-libtorch-cxx11-abi-nightly.yml diff --git a/.github/workflows/generated-linux-binary-libtorch-pre-cxx11-master.yml b/.github/workflows/generated-linux-binary-libtorch-pre-cxx11-master.yml new file mode 100644 index 0000000000000..74c367b49de1e --- /dev/null +++ b/.github/workflows/generated-linux-binary-libtorch-pre-cxx11-master.yml @@ -0,0 +1,328 @@ +# @generated DO NOT EDIT MANUALLY + +# Template is at: .github/templates/linux_binary_build_workflow.yml.j2 +# Generation script: .github/scripts/generate_ci_workflows.py +name: linux-binary-libtorch-pre-cxx11 + +on: + push: + branches: + - master + - 'ciflow/all/*' + - 'ciflow/trunk/*' + workflow_dispatch: + +env: + # Needed for conda builds + ALPINE_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/alpine" + ANACONDA_USER: pytorch + AWS_DEFAULT_REGION: us-east-1 + BINARY_ENV_FILE: /tmp/env + BUILD_ENVIRONMENT: linux-binary-libtorch-pre-cxx11 + BUILDER_ROOT: /builder + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + IN_CI: 1 + IS_GHA: 1 + PR_LABELS: ${{ toJson(github.event.pull_request.labels.*.name) }} + PR_NUMBER: ${{ github.event.pull_request.number }} + PYTORCH_FINAL_PACKAGE_DIR: /artifacts + PYTORCH_RETRY_TEST_CASES: 1 + PYTORCH_ROOT: /pytorch + SHA1: ${{ github.event.pull_request.head.sha || github.sha }} + SKIP_ALL_TESTS: 1 +concurrency: + group: linux-binary-libtorch-pre-cxx11-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }} + cancel-in-progress: true + +jobs: + libtorch-cpu-shared-with-deps-cxx11-abi-build: + if: ${{ github.repository_owner == 'pytorch' }} + runs-on: linux.4xlarge + timeout-minutes: 240 + env: + PACKAGE_TYPE: libtorch + # TODO: This is a legacy variable that we eventually want to get rid of in + # favor of GPU_ARCH_VERSION + DESIRED_CUDA: cpu + GPU_ARCH_TYPE: cpu + DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cpu + SKIP_ALL_TESTS: 1 + LIBTORCH_VARIANT: shared-with-deps + DESIRED_DEVTOOLSET: cxx11-abi + steps: + - name: Display EC2 information + shell: bash + run: | + set -euo pipefail + function get_ec2_metadata() { + # Pulled from instance metadata endpoint for EC2 + # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html + category=$1 + curl -fsSL "http://169.254.169.254/latest/meta-data/${category}" + } + echo "ami-id: $(get_ec2_metadata ami-id)" + echo "instance-id: $(get_ec2_metadata instance-id)" + echo "instance-type: $(get_ec2_metadata instance-type)" + - name: Log in to ECR + env: + AWS_RETRY_MODE: standard + AWS_MAX_ATTEMPTS: 5 + run: | + AWS_ACCOUNT_ID=$(aws sts get-caller-identity|grep Account|cut -f4 -d\") + retry () { + "$@" || (sleep 1 && "$@") || (sleep 2 && "$@") + } + retry aws ecr get-login-password --region "$AWS_DEFAULT_REGION" | docker login --username AWS \ + --password-stdin "$AWS_ACCOUNT_ID.dkr.ecr.$AWS_DEFAULT_REGION.amazonaws.com" + - name: Chown workspace + run: | + retry () { + "$@" || (sleep 1 && "$@") || (sleep 2 && "$@") + } + retry docker pull "${ALPINE_IMAGE}" + # Ensure the working directory gets chowned back to the current user + docker run --pull=never --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" . + - name: Clean workspace + run: | + rm -rf "${GITHUB_WORKSPACE}" + mkdir "${GITHUB_WORKSPACE}" + - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)" + uses: seemethere/add-github-ssh-key@v1 + with: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + - name: Preserve github env variables for use in docker + run: | + env | grep '^GITHUB' > "/tmp/github_env_${GITHUB_RUN_ID}" + - name: Checkout PyTorch + uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9 + with: + ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + submodules: recursive + path: pytorch + - name: Clean PyTorch checkout + run: | + # Remove any artifacts from the previous checkouts + git clean -fxd + working-directory: pytorch + - name: Checkout pytorch/builder + uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9 + with: + ref: main + submodules: recursive + repository: pytorch/builder + path: builder + - name: Clean pytorch/builder checkout + run: | + # Remove any artifacts from the previous checkouts + git clean -fxd + working-directory: builder + - name: Pull Docker image + run: | + retry () { + "$@" || (sleep 1 && "$@") || (sleep 2 && "$@") + } + retry docker pull "${DOCKER_IMAGE}" + - name: Build PyTorch binary + run: | + set -x + mkdir -p artifacts/ + container_name=$(docker run \ + -e BINARY_ENV_FILE \ + -e BUILDER_ROOT \ + -e BUILD_ENVIRONMENT \ + -e BUILD_SPLIT_CUDA \ + -e DESIRED_CUDA \ + -e DESIRED_DEVTOOLSET \ + -e DESIRED_PYTHON \ + -e GPU_ARCH_TYPE \ + -e GPU_ARCH_VERSION \ + -e IS_GHA \ + -e LIBTORCH_VARIANT \ + -e PACKAGE_TYPE \ + -e PYTORCH_FINAL_PACKAGE_DIR \ + -e PYTORCH_ROOT \ + -e SKIP_ALL_TESTS \ + --tty \ + --detach \ + -v "${GITHUB_WORKSPACE}/pytorch:/pytorch" \ + -v "${GITHUB_WORKSPACE}/builder:/builder" \ + -v "${RUNNER_TEMP}/artifacts:/artifacts" \ + -w / \ + "${DOCKER_IMAGE}" + ) + docker exec -t -w "${PYTORCH_ROOT}" "${container_name}" bash -c "bash .circleci/scripts/binary_populate_env.sh" + docker exec -t "${container_name}" bash -c "source ${BINARY_ENV_FILE} && bash /builder/libtorch/build.sh" + - name: Chown artifacts + if: always() + run: | + # Ensure the working directory gets chowned back to the current user + docker run --rm -v "${RUNNER_TEMP}/artifacts:/v" -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" . + - uses: seemethere/upload-artifact-s3@v3 + with: + name: libtorch-cpu-shared-with-deps-cxx11-abi + retention-days: 14 + if-no-files-found: error + path: + ${{ runner.temp }}/artifacts/* + - name: Hold runner for 2 hours or until ssh sessions have drained + working-directory: pytorch/ + # Always hold for active ssh sessions + if: always() + run: .github/scripts/wait_for_ssh_to_drain.sh + - name: Chown workspace + if: always() + run: | + # Ensure the working directory gets chowned back to the current user + docker run --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" . + - name: Kill containers, clean up images + if: always() + run: | + # ignore expansion of "docker ps -q" since it could be empty + # shellcheck disable=SC2046 + docker stop $(docker ps -q) || true + # Prune all of the docker images + docker system prune -af + libtorch-cpu-shared-with-deps-cxx11-abi-test: # Testing + if: ${{ github.repository_owner == 'pytorch' }} + needs: libtorch-cpu-shared-with-deps-cxx11-abi-build + runs-on: linux.4xlarge + timeout-minutes: 240 + env: + PACKAGE_TYPE: libtorch + # TODO: This is a legacy variable that we eventually want to get rid of in + # favor of GPU_ARCH_VERSION + DESIRED_CUDA: cpu + GPU_ARCH_TYPE: cpu + DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cpu + SKIP_ALL_TESTS: 1 + LIBTORCH_VARIANT: shared-with-deps + DESIRED_DEVTOOLSET: cxx11-abi + steps: + - name: Display EC2 information + shell: bash + run: | + set -euo pipefail + function get_ec2_metadata() { + # Pulled from instance metadata endpoint for EC2 + # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html + category=$1 + curl -fsSL "http://169.254.169.254/latest/meta-data/${category}" + } + echo "ami-id: $(get_ec2_metadata ami-id)" + echo "instance-id: $(get_ec2_metadata instance-id)" + echo "instance-type: $(get_ec2_metadata instance-type)" + - name: Log in to ECR + env: + AWS_RETRY_MODE: standard + AWS_MAX_ATTEMPTS: 5 + run: | + AWS_ACCOUNT_ID=$(aws sts get-caller-identity|grep Account|cut -f4 -d\") + retry () { + "$@" || (sleep 1 && "$@") || (sleep 2 && "$@") + } + retry aws ecr get-login-password --region "$AWS_DEFAULT_REGION" | docker login --username AWS \ + --password-stdin "$AWS_ACCOUNT_ID.dkr.ecr.$AWS_DEFAULT_REGION.amazonaws.com" + - name: Chown workspace + run: | + retry () { + "$@" || (sleep 1 && "$@") || (sleep 2 && "$@") + } + retry docker pull "${ALPINE_IMAGE}" + # Ensure the working directory gets chowned back to the current user + docker run --pull=never --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" . + - name: Clean workspace + run: | + rm -rf "${GITHUB_WORKSPACE}" + mkdir "${GITHUB_WORKSPACE}" + - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)" + uses: seemethere/add-github-ssh-key@v1 + with: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + - name: Preserve github env variables for use in docker + run: | + env | grep '^GITHUB' > "/tmp/github_env_${GITHUB_RUN_ID}" + - uses: seemethere/download-artifact-s3@0504774707cbc8603d7dca922e8026eb8bf3b47b + name: Download Build Artifacts + with: + name: libtorch-cpu-shared-with-deps-cxx11-abi + path: "${{ runner.temp }}/artifacts/" + - name: Checkout PyTorch + uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9 + with: + ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + submodules: recursive + path: pytorch + - name: Clean PyTorch checkout + run: | + # Remove any artifacts from the previous checkouts + git clean -fxd + working-directory: pytorch + - name: Checkout pytorch/builder + uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9 + with: + ref: main + submodules: recursive + repository: pytorch/builder + path: builder + - name: Clean pytorch/builder checkout + run: | + # Remove any artifacts from the previous checkouts + git clean -fxd + working-directory: builder + - name: Pull Docker image + run: | + retry () { + "$@" || (sleep 1 && "$@") || (sleep 2 && "$@") + } + retry docker pull "${DOCKER_IMAGE}" + - name: Test PyTorch binary + run: | + set -x + # shellcheck disable=SC2086,SC2090 + container_name=$(docker run \ + ${GPU_FLAG:-} \ + -e BINARY_ENV_FILE \ + -e BUILDER_ROOT \ + -e BUILD_ENVIRONMENT \ + -e BUILD_SPLIT_CUDA \ + -e DESIRED_CUDA \ + -e DESIRED_DEVTOOLSET \ + -e DESIRED_PYTHON \ + -e GPU_ARCH_TYPE \ + -e GPU_ARCH_VERSION \ + -e IS_GHA \ + -e LIBTORCH_VARIANT \ + -e PACKAGE_TYPE \ + -e PYTORCH_FINAL_PACKAGE_DIR \ + -e PYTORCH_ROOT \ + -e SKIP_ALL_TESTS \ + --tty \ + --detach \ + -v "${GITHUB_WORKSPACE}/pytorch:/pytorch" \ + -v "${GITHUB_WORKSPACE}/builder:/builder" \ + -v "${RUNNER_TEMP}/artifacts:/final_pkgs" \ + -w / \ + "${DOCKER_IMAGE}" + ) + docker exec -t -w "${PYTORCH_ROOT}" "${container_name}" bash -c "bash .circleci/scripts/binary_populate_env.sh" + # Generate test script + docker exec -t -w "${PYTORCH_ROOT}" -e OUTPUT_SCRIPT="/run.sh" "${container_name}" bash -c "bash .circleci/scripts/binary_linux_test.sh" + docker exec -t "${container_name}" bash -c "source ${BINARY_ENV_FILE} && bash -x /run.sh" + - name: Hold runner for 2 hours or until ssh sessions have drained + working-directory: pytorch/ + # Always hold for active ssh sessions + if: always() + run: .github/scripts/wait_for_ssh_to_drain.sh + - name: Chown workspace + if: always() + run: | + # Ensure the working directory gets chowned back to the current user + docker run --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" . + - name: Kill containers, clean up images + if: always() + run: | + # ignore expansion of "docker ps -q" since it could be empty + # shellcheck disable=SC2046 + docker stop $(docker ps -q) || true + # Prune all of the docker images + docker system prune -af diff --git a/.github/workflows/generated-linux-binary-libtorch-pre-cxx11.yml b/.github/workflows/generated-linux-binary-libtorch-pre-cxx11-nightly.yml similarity index 100% rename from .github/workflows/generated-linux-binary-libtorch-pre-cxx11.yml rename to .github/workflows/generated-linux-binary-libtorch-pre-cxx11-nightly.yml diff --git a/.github/workflows/generated-linux-binary-manywheel-master.yml b/.github/workflows/generated-linux-binary-manywheel-master.yml new file mode 100644 index 0000000000000..5044aa94a359d --- /dev/null +++ b/.github/workflows/generated-linux-binary-manywheel-master.yml @@ -0,0 +1,339 @@ +# @generated DO NOT EDIT MANUALLY + +# Template is at: .github/templates/linux_binary_build_workflow.yml.j2 +# Generation script: .github/scripts/generate_ci_workflows.py +name: linux-binary-manywheel + +on: + push: + branches: + - master + - 'ciflow/all/*' + - 'ciflow/trunk/*' + workflow_dispatch: + +env: + # Needed for conda builds + ALPINE_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/alpine" + ANACONDA_USER: pytorch + AWS_DEFAULT_REGION: us-east-1 + BINARY_ENV_FILE: /tmp/env + BUILD_ENVIRONMENT: linux-binary-manywheel + BUILDER_ROOT: /builder + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + IN_CI: 1 + IS_GHA: 1 + PR_LABELS: ${{ toJson(github.event.pull_request.labels.*.name) }} + PR_NUMBER: ${{ github.event.pull_request.number }} + PYTORCH_FINAL_PACKAGE_DIR: /artifacts + PYTORCH_RETRY_TEST_CASES: 1 + PYTORCH_ROOT: /pytorch + SHA1: ${{ github.event.pull_request.head.sha || github.sha }} + SKIP_ALL_TESTS: 1 +concurrency: + group: linux-binary-manywheel-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }} + cancel-in-progress: true + +jobs: + wheel-py3_7-cuda10_2-build: + if: ${{ github.repository_owner == 'pytorch' }} + runs-on: linux.4xlarge + timeout-minutes: 240 + env: + PACKAGE_TYPE: wheel + # TODO: This is a legacy variable that we eventually want to get rid of in + # favor of GPU_ARCH_VERSION + DESIRED_CUDA: cu102 + GPU_ARCH_VERSION: 10.2 + GPU_ARCH_TYPE: cuda + DOCKER_IMAGE: pytorch/manylinux-builder:cuda10.2 + SKIP_ALL_TESTS: 1 + DESIRED_PYTHON: "3.7" + steps: + - name: Display EC2 information + shell: bash + run: | + set -euo pipefail + function get_ec2_metadata() { + # Pulled from instance metadata endpoint for EC2 + # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html + category=$1 + curl -fsSL "http://169.254.169.254/latest/meta-data/${category}" + } + echo "ami-id: $(get_ec2_metadata ami-id)" + echo "instance-id: $(get_ec2_metadata instance-id)" + echo "instance-type: $(get_ec2_metadata instance-type)" + - name: Log in to ECR + env: + AWS_RETRY_MODE: standard + AWS_MAX_ATTEMPTS: 5 + run: | + AWS_ACCOUNT_ID=$(aws sts get-caller-identity|grep Account|cut -f4 -d\") + retry () { + "$@" || (sleep 1 && "$@") || (sleep 2 && "$@") + } + retry aws ecr get-login-password --region "$AWS_DEFAULT_REGION" | docker login --username AWS \ + --password-stdin "$AWS_ACCOUNT_ID.dkr.ecr.$AWS_DEFAULT_REGION.amazonaws.com" + - name: Chown workspace + run: | + retry () { + "$@" || (sleep 1 && "$@") || (sleep 2 && "$@") + } + retry docker pull "${ALPINE_IMAGE}" + # Ensure the working directory gets chowned back to the current user + docker run --pull=never --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" . + - name: Clean workspace + run: | + rm -rf "${GITHUB_WORKSPACE}" + mkdir "${GITHUB_WORKSPACE}" + - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)" + uses: seemethere/add-github-ssh-key@v1 + with: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + - name: Preserve github env variables for use in docker + run: | + env | grep '^GITHUB' > "/tmp/github_env_${GITHUB_RUN_ID}" + - name: Checkout PyTorch + uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9 + with: + ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + submodules: recursive + path: pytorch + - name: Clean PyTorch checkout + run: | + # Remove any artifacts from the previous checkouts + git clean -fxd + working-directory: pytorch + - name: Checkout pytorch/builder + uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9 + with: + ref: main + submodules: recursive + repository: pytorch/builder + path: builder + - name: Clean pytorch/builder checkout + run: | + # Remove any artifacts from the previous checkouts + git clean -fxd + working-directory: builder + - name: Pull Docker image + run: | + retry () { + "$@" || (sleep 1 && "$@") || (sleep 2 && "$@") + } + retry docker pull "${DOCKER_IMAGE}" + - name: Build PyTorch binary + run: | + set -x + mkdir -p artifacts/ + container_name=$(docker run \ + -e BINARY_ENV_FILE \ + -e BUILDER_ROOT \ + -e BUILD_ENVIRONMENT \ + -e BUILD_SPLIT_CUDA \ + -e DESIRED_CUDA \ + -e DESIRED_DEVTOOLSET \ + -e DESIRED_PYTHON \ + -e GPU_ARCH_TYPE \ + -e GPU_ARCH_VERSION \ + -e IS_GHA \ + -e LIBTORCH_VARIANT \ + -e PACKAGE_TYPE \ + -e PYTORCH_FINAL_PACKAGE_DIR \ + -e PYTORCH_ROOT \ + -e SKIP_ALL_TESTS \ + --tty \ + --detach \ + -v "${GITHUB_WORKSPACE}/pytorch:/pytorch" \ + -v "${GITHUB_WORKSPACE}/builder:/builder" \ + -v "${RUNNER_TEMP}/artifacts:/artifacts" \ + -w / \ + "${DOCKER_IMAGE}" + ) + docker exec -t -w "${PYTORCH_ROOT}" "${container_name}" bash -c "bash .circleci/scripts/binary_populate_env.sh" + docker exec -t "${container_name}" bash -c "source ${BINARY_ENV_FILE} && bash /builder/wheel/build.sh" + - name: Chown artifacts + if: always() + run: | + # Ensure the working directory gets chowned back to the current user + docker run --rm -v "${RUNNER_TEMP}/artifacts:/v" -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" . + - uses: seemethere/upload-artifact-s3@v3 + with: + name: wheel-py3_7-cuda10_2 + retention-days: 14 + if-no-files-found: error + path: + ${{ runner.temp }}/artifacts/* + - name: Hold runner for 2 hours or until ssh sessions have drained + working-directory: pytorch/ + # Always hold for active ssh sessions + if: always() + run: .github/scripts/wait_for_ssh_to_drain.sh + - name: Chown workspace + if: always() + run: | + # Ensure the working directory gets chowned back to the current user + docker run --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" . + - name: Kill containers, clean up images + if: always() + run: | + # ignore expansion of "docker ps -q" since it could be empty + # shellcheck disable=SC2046 + docker stop $(docker ps -q) || true + # Prune all of the docker images + docker system prune -af + wheel-py3_7-cuda10_2-test: # Testing + if: ${{ github.repository_owner == 'pytorch' }} + needs: wheel-py3_7-cuda10_2-build + runs-on: linux.4xlarge.nvidia.gpu + timeout-minutes: 240 + env: + PACKAGE_TYPE: wheel + # TODO: This is a legacy variable that we eventually want to get rid of in + # favor of GPU_ARCH_VERSION + DESIRED_CUDA: cu102 + GPU_ARCH_VERSION: 10.2 + GPU_ARCH_TYPE: cuda + DOCKER_IMAGE: pytorch/manylinux-builder:cuda10.2 + SKIP_ALL_TESTS: 1 + DESIRED_PYTHON: "3.7" + steps: + - name: Display EC2 information + shell: bash + run: | + set -euo pipefail + function get_ec2_metadata() { + # Pulled from instance metadata endpoint for EC2 + # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html + category=$1 + curl -fsSL "http://169.254.169.254/latest/meta-data/${category}" + } + echo "ami-id: $(get_ec2_metadata ami-id)" + echo "instance-id: $(get_ec2_metadata instance-id)" + echo "instance-type: $(get_ec2_metadata instance-type)" + - name: Log in to ECR + env: + AWS_RETRY_MODE: standard + AWS_MAX_ATTEMPTS: 5 + run: | + AWS_ACCOUNT_ID=$(aws sts get-caller-identity|grep Account|cut -f4 -d\") + retry () { + "$@" || (sleep 1 && "$@") || (sleep 2 && "$@") + } + retry aws ecr get-login-password --region "$AWS_DEFAULT_REGION" | docker login --username AWS \ + --password-stdin "$AWS_ACCOUNT_ID.dkr.ecr.$AWS_DEFAULT_REGION.amazonaws.com" + - name: Chown workspace + run: | + retry () { + "$@" || (sleep 1 && "$@") || (sleep 2 && "$@") + } + retry docker pull "${ALPINE_IMAGE}" + # Ensure the working directory gets chowned back to the current user + docker run --pull=never --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" . + - name: Clean workspace + run: | + rm -rf "${GITHUB_WORKSPACE}" + mkdir "${GITHUB_WORKSPACE}" + - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)" + uses: seemethere/add-github-ssh-key@v1 + with: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + - name: Preserve github env variables for use in docker + run: | + env | grep '^GITHUB' > "/tmp/github_env_${GITHUB_RUN_ID}" + - uses: seemethere/download-artifact-s3@0504774707cbc8603d7dca922e8026eb8bf3b47b + name: Download Build Artifacts + with: + name: wheel-py3_7-cuda10_2 + path: "${{ runner.temp }}/artifacts/" + - name: Checkout PyTorch + uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9 + with: + ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + submodules: recursive + path: pytorch + - name: Clean PyTorch checkout + run: | + # Remove any artifacts from the previous checkouts + git clean -fxd + working-directory: pytorch + - name: Checkout pytorch/builder + uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9 + with: + ref: main + submodules: recursive + repository: pytorch/builder + path: builder + - name: Clean pytorch/builder checkout + run: | + # Remove any artifacts from the previous checkouts + git clean -fxd + working-directory: builder + - uses: nick-fields/retry@71062288b76e2b6214ebde0e673ce0de1755740a + name: Install nvidia driver, nvidia-docker runtime, set GPU_FLAG + with: + timeout_minutes: 10 + max_attempts: 3 + command: | + set -ex + pushd pytorch + bash .github/scripts/install_nvidia_utils_linux.sh + echo "GPU_FLAG=--gpus all" >> "${GITHUB_ENV}" + popd + - name: Pull Docker image + run: | + retry () { + "$@" || (sleep 1 && "$@") || (sleep 2 && "$@") + } + retry docker pull "${DOCKER_IMAGE}" + - name: Test PyTorch binary + run: | + set -x + # shellcheck disable=SC2086,SC2090 + container_name=$(docker run \ + ${GPU_FLAG:-} \ + -e BINARY_ENV_FILE \ + -e BUILDER_ROOT \ + -e BUILD_ENVIRONMENT \ + -e BUILD_SPLIT_CUDA \ + -e DESIRED_CUDA \ + -e DESIRED_DEVTOOLSET \ + -e DESIRED_PYTHON \ + -e GPU_ARCH_TYPE \ + -e GPU_ARCH_VERSION \ + -e IS_GHA \ + -e LIBTORCH_VARIANT \ + -e PACKAGE_TYPE \ + -e PYTORCH_FINAL_PACKAGE_DIR \ + -e PYTORCH_ROOT \ + -e SKIP_ALL_TESTS \ + --tty \ + --detach \ + -v "${GITHUB_WORKSPACE}/pytorch:/pytorch" \ + -v "${GITHUB_WORKSPACE}/builder:/builder" \ + -v "${RUNNER_TEMP}/artifacts:/final_pkgs" \ + -w / \ + "${DOCKER_IMAGE}" + ) + docker exec -t -w "${PYTORCH_ROOT}" "${container_name}" bash -c "bash .circleci/scripts/binary_populate_env.sh" + # Generate test script + docker exec -t -w "${PYTORCH_ROOT}" -e OUTPUT_SCRIPT="/run.sh" "${container_name}" bash -c "bash .circleci/scripts/binary_linux_test.sh" + docker exec -t "${container_name}" bash -c "source ${BINARY_ENV_FILE} && bash -x /run.sh" + - name: Hold runner for 2 hours or until ssh sessions have drained + working-directory: pytorch/ + # Always hold for active ssh sessions + if: always() + run: .github/scripts/wait_for_ssh_to_drain.sh + - name: Chown workspace + if: always() + run: | + # Ensure the working directory gets chowned back to the current user + docker run --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" . + - name: Kill containers, clean up images + if: always() + run: | + # ignore expansion of "docker ps -q" since it could be empty + # shellcheck disable=SC2046 + docker stop $(docker ps -q) || true + # Prune all of the docker images + docker system prune -af diff --git a/.github/workflows/generated-linux-binary-manywheel.yml b/.github/workflows/generated-linux-binary-manywheel-nightly.yml similarity index 100% rename from .github/workflows/generated-linux-binary-manywheel.yml rename to .github/workflows/generated-linux-binary-manywheel-nightly.yml diff --git a/.github/workflows/generated-macos-arm64-binary-conda.yml b/.github/workflows/generated-macos-arm64-binary-conda-nightly.yml similarity index 100% rename from .github/workflows/generated-macos-arm64-binary-conda.yml rename to .github/workflows/generated-macos-arm64-binary-conda-nightly.yml diff --git a/.github/workflows/generated-macos-arm64-binary-wheel.yml b/.github/workflows/generated-macos-arm64-binary-wheel-nightly.yml similarity index 100% rename from .github/workflows/generated-macos-arm64-binary-wheel.yml rename to .github/workflows/generated-macos-arm64-binary-wheel-nightly.yml diff --git a/.github/workflows/generated-macos-binary-conda.yml b/.github/workflows/generated-macos-binary-conda-nightly.yml similarity index 100% rename from .github/workflows/generated-macos-binary-conda.yml rename to .github/workflows/generated-macos-binary-conda-nightly.yml diff --git a/.github/workflows/generated-macos-binary-libtorch-cxx11-abi.yml b/.github/workflows/generated-macos-binary-libtorch-cxx11-abi-nightly.yml similarity index 100% rename from .github/workflows/generated-macos-binary-libtorch-cxx11-abi.yml rename to .github/workflows/generated-macos-binary-libtorch-cxx11-abi-nightly.yml diff --git a/.github/workflows/generated-macos-binary-libtorch-pre-cxx11.yml b/.github/workflows/generated-macos-binary-libtorch-pre-cxx11-nightly.yml similarity index 100% rename from .github/workflows/generated-macos-binary-libtorch-pre-cxx11.yml rename to .github/workflows/generated-macos-binary-libtorch-pre-cxx11-nightly.yml diff --git a/.github/workflows/generated-macos-binary-wheel.yml b/.github/workflows/generated-macos-binary-wheel-nightly.yml similarity index 100% rename from .github/workflows/generated-macos-binary-wheel.yml rename to .github/workflows/generated-macos-binary-wheel-nightly.yml diff --git a/.github/workflows/generated-windows-binary-libtorch-debug-master.yml b/.github/workflows/generated-windows-binary-libtorch-debug-master.yml new file mode 100644 index 0000000000000..dc1f68d3b6594 --- /dev/null +++ b/.github/workflows/generated-windows-binary-libtorch-debug-master.yml @@ -0,0 +1,223 @@ +# @generated DO NOT EDIT MANUALLY + +# Template is at: .github/templates/windows_binary_build_workflow.yml.j2 +# Generation script: .github/scripts/generate_ci_workflows.py +name: windows-binary-libtorch-debug + +on: + push: + branches: + - master + - 'ciflow/all/*' + - 'ciflow/trunk/*' + workflow_dispatch: + +env: + # Needed for conda builds + ALPINE_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/alpine" + ANACONDA_USER: pytorch + AWS_DEFAULT_REGION: us-east-1 + BUILD_ENVIRONMENT: windows-binary-libtorch-debug + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + IN_CI: 1 + IS_GHA: 1 + PR_LABELS: ${{ toJson(github.event.pull_request.labels.*.name) }} + PR_NUMBER: ${{ github.event.pull_request.number }} + PYTORCH_RETRY_TEST_CASES: 1 + SHA1: ${{ github.event.pull_request.head.sha || github.sha }} + SKIP_ALL_TESTS: 1 +concurrency: + group: windows-binary-libtorch-debug-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }} + cancel-in-progress: true + +jobs: + libtorch-cpu-shared-with-deps-debug-build: + runs-on: windows.4xlarge + timeout-minutes: 240 + env: + PYTORCH_ROOT: ${{ github.workspace }}/pytorch + BUILDER_ROOT: ${{ github.workspace }}/builder + PACKAGE_TYPE: libtorch + # TODO: This is a legacy variable that we eventually want to get rid of in + # favor of GPU_ARCH_VERSION + DESIRED_CUDA: cpu + GPU_ARCH_TYPE: cpu + SKIP_ALL_TESTS: 1 + LIBTORCH_CONFIG: debug + LIBTORCH_VARIANT: shared-with-deps + # This is a dummy value for libtorch to work correctly with our batch scripts + # without this value pip does not get installed for some reason + DESIRED_PYTHON: "3.7" + steps: + - name: Display EC2 information + shell: bash + run: | + set -euo pipefail + function get_ec2_metadata() { + # Pulled from instance metadata endpoint for EC2 + # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html + category=$1 + curl -fsSL "http://169.254.169.254/latest/meta-data/${category}" + } + echo "ami-id: $(get_ec2_metadata ami-id)" + echo "instance-id: $(get_ec2_metadata instance-id)" + echo "instance-type: $(get_ec2_metadata instance-type)" + - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)" + uses: seemethere/add-github-ssh-key@v1 + with: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + # NOTE: These environment variables are put here so that they can be applied on every job equally + # They are also here because setting them at a workflow level doesn't give us access to the + # runner.temp variable, which we need. + - name: Populate binary env + shell: bash + run: | + echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}" + echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}" + echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}" + - name: Checkout PyTorch + uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9 + with: + ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + submodules: recursive + path: pytorch + - name: Clean PyTorch checkout + run: | + # Remove any artifacts from the previous checkouts + git clean -fxd + working-directory: pytorch + - name: Checkout pytorch/builder + uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9 + with: + ref: main + submodules: recursive + repository: pytorch/builder + path: builder + - name: Clean pytorch/builder checkout + run: | + # Remove any artifacts from the previous checkouts + git clean -fxd + working-directory: builder + - name: Populate binary env + shell: bash + run: | + "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh" + - name: Build PyTorch binary + shell: bash + run: | + "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh" + - uses: seemethere/upload-artifact-s3@v3 + if: always() + with: + name: libtorch-cpu-shared-with-deps-debug + retention-days: 14 + if-no-files-found: error + path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}" + - name: Wait until all sessions have drained + shell: powershell + working-directory: pytorch + if: always() + timeout-minutes: 120 + run: | + .github\scripts\wait_for_ssh_to_drain.ps1 + - name: Kill active ssh sessions if still around (Useful if workflow was cancelled) + shell: powershell + working-directory: pytorch + if: always() + run: | + .github\scripts\kill_active_ssh_sessions.ps1 + libtorch-cpu-shared-with-deps-debug-test: # Testing + if: ${{ github.repository_owner == 'pytorch' }} + needs: libtorch-cpu-shared-with-deps-debug-build + runs-on: windows.4xlarge + timeout-minutes: 240 + env: + PYTORCH_ROOT: ${{ github.workspace }}/pytorch + BUILDER_ROOT: ${{ github.workspace }}/builder + PACKAGE_TYPE: libtorch + # TODO: This is a legacy variable that we eventually want to get rid of in + # favor of GPU_ARCH_VERSION + DESIRED_CUDA: cpu + GPU_ARCH_TYPE: cpu + SKIP_ALL_TESTS: 1 + LIBTORCH_CONFIG: debug + LIBTORCH_VARIANT: shared-with-deps + # This is a dummy value for libtorch to work correctly with our batch scripts + # without this value pip does not get installed for some reason + DESIRED_PYTHON: "3.7" + steps: + - name: Display EC2 information + shell: bash + run: | + set -euo pipefail + function get_ec2_metadata() { + # Pulled from instance metadata endpoint for EC2 + # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html + category=$1 + curl -fsSL "http://169.254.169.254/latest/meta-data/${category}" + } + echo "ami-id: $(get_ec2_metadata ami-id)" + echo "instance-id: $(get_ec2_metadata instance-id)" + echo "instance-type: $(get_ec2_metadata instance-type)" + - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)" + uses: seemethere/add-github-ssh-key@v1 + with: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + # NOTE: These environment variables are put here so that they can be applied on every job equally + # They are also here because setting them at a workflow level doesn't give us access to the + # runner.temp variable, which we need. + - name: Populate binary env + shell: bash + run: | + echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}" + echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}" + echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}" + - uses: seemethere/download-artifact-s3@0504774707cbc8603d7dca922e8026eb8bf3b47b + name: Download Build Artifacts + with: + name: libtorch-cpu-shared-with-deps-debug + path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}" + - name: Checkout PyTorch + uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9 + with: + ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + submodules: recursive + path: pytorch + - name: Clean PyTorch checkout + run: | + # Remove any artifacts from the previous checkouts + git clean -fxd + working-directory: pytorch + - name: Checkout pytorch/builder + uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9 + with: + ref: main + submodules: recursive + repository: pytorch/builder + path: builder + - name: Clean pytorch/builder checkout + run: | + # Remove any artifacts from the previous checkouts + git clean -fxd + working-directory: builder + - name: Populate binary env + shell: bash + run: | + "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh" + - name: Test PyTorch binary + shell: bash + run: | + "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh" + - name: Wait until all sessions have drained + shell: powershell + working-directory: pytorch + if: always() + timeout-minutes: 120 + run: | + .github\scripts\wait_for_ssh_to_drain.ps1 + - name: Kill active ssh sessions if still around (Useful if workflow was cancelled) + shell: powershell + working-directory: pytorch + if: always() + run: | + .github\scripts\kill_active_ssh_sessions.ps1 diff --git a/.github/workflows/generated-windows-binary-libtorch-debug.yml b/.github/workflows/generated-windows-binary-libtorch-debug-nightly.yml similarity index 100% rename from .github/workflows/generated-windows-binary-libtorch-debug.yml rename to .github/workflows/generated-windows-binary-libtorch-debug-nightly.yml diff --git a/.github/workflows/generated-windows-binary-libtorch-release-master.yml b/.github/workflows/generated-windows-binary-libtorch-release-master.yml new file mode 100644 index 0000000000000..670817020010a --- /dev/null +++ b/.github/workflows/generated-windows-binary-libtorch-release-master.yml @@ -0,0 +1,223 @@ +# @generated DO NOT EDIT MANUALLY + +# Template is at: .github/templates/windows_binary_build_workflow.yml.j2 +# Generation script: .github/scripts/generate_ci_workflows.py +name: windows-binary-libtorch-release + +on: + push: + branches: + - master + - 'ciflow/all/*' + - 'ciflow/trunk/*' + workflow_dispatch: + +env: + # Needed for conda builds + ALPINE_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/alpine" + ANACONDA_USER: pytorch + AWS_DEFAULT_REGION: us-east-1 + BUILD_ENVIRONMENT: windows-binary-libtorch-release + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + IN_CI: 1 + IS_GHA: 1 + PR_LABELS: ${{ toJson(github.event.pull_request.labels.*.name) }} + PR_NUMBER: ${{ github.event.pull_request.number }} + PYTORCH_RETRY_TEST_CASES: 1 + SHA1: ${{ github.event.pull_request.head.sha || github.sha }} + SKIP_ALL_TESTS: 1 +concurrency: + group: windows-binary-libtorch-release-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }} + cancel-in-progress: true + +jobs: + libtorch-cpu-shared-with-deps-release-build: + runs-on: windows.4xlarge + timeout-minutes: 240 + env: + PYTORCH_ROOT: ${{ github.workspace }}/pytorch + BUILDER_ROOT: ${{ github.workspace }}/builder + PACKAGE_TYPE: libtorch + # TODO: This is a legacy variable that we eventually want to get rid of in + # favor of GPU_ARCH_VERSION + DESIRED_CUDA: cpu + GPU_ARCH_TYPE: cpu + SKIP_ALL_TESTS: 1 + LIBTORCH_CONFIG: release + LIBTORCH_VARIANT: shared-with-deps + # This is a dummy value for libtorch to work correctly with our batch scripts + # without this value pip does not get installed for some reason + DESIRED_PYTHON: "3.7" + steps: + - name: Display EC2 information + shell: bash + run: | + set -euo pipefail + function get_ec2_metadata() { + # Pulled from instance metadata endpoint for EC2 + # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html + category=$1 + curl -fsSL "http://169.254.169.254/latest/meta-data/${category}" + } + echo "ami-id: $(get_ec2_metadata ami-id)" + echo "instance-id: $(get_ec2_metadata instance-id)" + echo "instance-type: $(get_ec2_metadata instance-type)" + - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)" + uses: seemethere/add-github-ssh-key@v1 + with: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + # NOTE: These environment variables are put here so that they can be applied on every job equally + # They are also here because setting them at a workflow level doesn't give us access to the + # runner.temp variable, which we need. + - name: Populate binary env + shell: bash + run: | + echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}" + echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}" + echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}" + - name: Checkout PyTorch + uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9 + with: + ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + submodules: recursive + path: pytorch + - name: Clean PyTorch checkout + run: | + # Remove any artifacts from the previous checkouts + git clean -fxd + working-directory: pytorch + - name: Checkout pytorch/builder + uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9 + with: + ref: main + submodules: recursive + repository: pytorch/builder + path: builder + - name: Clean pytorch/builder checkout + run: | + # Remove any artifacts from the previous checkouts + git clean -fxd + working-directory: builder + - name: Populate binary env + shell: bash + run: | + "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh" + - name: Build PyTorch binary + shell: bash + run: | + "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh" + - uses: seemethere/upload-artifact-s3@v3 + if: always() + with: + name: libtorch-cpu-shared-with-deps-release + retention-days: 14 + if-no-files-found: error + path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}" + - name: Wait until all sessions have drained + shell: powershell + working-directory: pytorch + if: always() + timeout-minutes: 120 + run: | + .github\scripts\wait_for_ssh_to_drain.ps1 + - name: Kill active ssh sessions if still around (Useful if workflow was cancelled) + shell: powershell + working-directory: pytorch + if: always() + run: | + .github\scripts\kill_active_ssh_sessions.ps1 + libtorch-cpu-shared-with-deps-release-test: # Testing + if: ${{ github.repository_owner == 'pytorch' }} + needs: libtorch-cpu-shared-with-deps-release-build + runs-on: windows.4xlarge + timeout-minutes: 240 + env: + PYTORCH_ROOT: ${{ github.workspace }}/pytorch + BUILDER_ROOT: ${{ github.workspace }}/builder + PACKAGE_TYPE: libtorch + # TODO: This is a legacy variable that we eventually want to get rid of in + # favor of GPU_ARCH_VERSION + DESIRED_CUDA: cpu + GPU_ARCH_TYPE: cpu + SKIP_ALL_TESTS: 1 + LIBTORCH_CONFIG: release + LIBTORCH_VARIANT: shared-with-deps + # This is a dummy value for libtorch to work correctly with our batch scripts + # without this value pip does not get installed for some reason + DESIRED_PYTHON: "3.7" + steps: + - name: Display EC2 information + shell: bash + run: | + set -euo pipefail + function get_ec2_metadata() { + # Pulled from instance metadata endpoint for EC2 + # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html + category=$1 + curl -fsSL "http://169.254.169.254/latest/meta-data/${category}" + } + echo "ami-id: $(get_ec2_metadata ami-id)" + echo "instance-id: $(get_ec2_metadata instance-id)" + echo "instance-type: $(get_ec2_metadata instance-type)" + - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)" + uses: seemethere/add-github-ssh-key@v1 + with: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + # NOTE: These environment variables are put here so that they can be applied on every job equally + # They are also here because setting them at a workflow level doesn't give us access to the + # runner.temp variable, which we need. + - name: Populate binary env + shell: bash + run: | + echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}" + echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}" + echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}" + - uses: seemethere/download-artifact-s3@0504774707cbc8603d7dca922e8026eb8bf3b47b + name: Download Build Artifacts + with: + name: libtorch-cpu-shared-with-deps-release + path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}" + - name: Checkout PyTorch + uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9 + with: + ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + submodules: recursive + path: pytorch + - name: Clean PyTorch checkout + run: | + # Remove any artifacts from the previous checkouts + git clean -fxd + working-directory: pytorch + - name: Checkout pytorch/builder + uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9 + with: + ref: main + submodules: recursive + repository: pytorch/builder + path: builder + - name: Clean pytorch/builder checkout + run: | + # Remove any artifacts from the previous checkouts + git clean -fxd + working-directory: builder + - name: Populate binary env + shell: bash + run: | + "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh" + - name: Test PyTorch binary + shell: bash + run: | + "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh" + - name: Wait until all sessions have drained + shell: powershell + working-directory: pytorch + if: always() + timeout-minutes: 120 + run: | + .github\scripts\wait_for_ssh_to_drain.ps1 + - name: Kill active ssh sessions if still around (Useful if workflow was cancelled) + shell: powershell + working-directory: pytorch + if: always() + run: | + .github\scripts\kill_active_ssh_sessions.ps1 diff --git a/.github/workflows/generated-windows-binary-libtorch-release.yml b/.github/workflows/generated-windows-binary-libtorch-release-nightly.yml similarity index 100% rename from .github/workflows/generated-windows-binary-libtorch-release.yml rename to .github/workflows/generated-windows-binary-libtorch-release-nightly.yml diff --git a/.github/workflows/generated-windows-binary-wheel-master.yml b/.github/workflows/generated-windows-binary-wheel-master.yml new file mode 100644 index 0000000000000..53a98ecc62298 --- /dev/null +++ b/.github/workflows/generated-windows-binary-wheel-master.yml @@ -0,0 +1,217 @@ +# @generated DO NOT EDIT MANUALLY + +# Template is at: .github/templates/windows_binary_build_workflow.yml.j2 +# Generation script: .github/scripts/generate_ci_workflows.py +name: windows-binary-wheel + +on: + push: + branches: + - master + - 'ciflow/all/*' + - 'ciflow/trunk/*' + workflow_dispatch: + +env: + # Needed for conda builds + ALPINE_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/alpine" + ANACONDA_USER: pytorch + AWS_DEFAULT_REGION: us-east-1 + BUILD_ENVIRONMENT: windows-binary-wheel + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + IN_CI: 1 + IS_GHA: 1 + PR_LABELS: ${{ toJson(github.event.pull_request.labels.*.name) }} + PR_NUMBER: ${{ github.event.pull_request.number }} + PYTORCH_RETRY_TEST_CASES: 1 + SHA1: ${{ github.event.pull_request.head.sha || github.sha }} + SKIP_ALL_TESTS: 1 +concurrency: + group: windows-binary-wheel-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }} + cancel-in-progress: true + +jobs: + wheel-py3_7-cuda11_3-build: + runs-on: windows.4xlarge + timeout-minutes: 240 + env: + PYTORCH_ROOT: ${{ github.workspace }}/pytorch + BUILDER_ROOT: ${{ github.workspace }}/builder + PACKAGE_TYPE: wheel + # TODO: This is a legacy variable that we eventually want to get rid of in + # favor of GPU_ARCH_VERSION + DESIRED_CUDA: cu113 + GPU_ARCH_VERSION: 11.3 + GPU_ARCH_TYPE: cuda + SKIP_ALL_TESTS: 1 + DESIRED_PYTHON: "3.7" + steps: + - name: Display EC2 information + shell: bash + run: | + set -euo pipefail + function get_ec2_metadata() { + # Pulled from instance metadata endpoint for EC2 + # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html + category=$1 + curl -fsSL "http://169.254.169.254/latest/meta-data/${category}" + } + echo "ami-id: $(get_ec2_metadata ami-id)" + echo "instance-id: $(get_ec2_metadata instance-id)" + echo "instance-type: $(get_ec2_metadata instance-type)" + - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)" + uses: seemethere/add-github-ssh-key@v1 + with: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + # NOTE: These environment variables are put here so that they can be applied on every job equally + # They are also here because setting them at a workflow level doesn't give us access to the + # runner.temp variable, which we need. + - name: Populate binary env + shell: bash + run: | + echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}" + echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}" + echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}" + - name: Checkout PyTorch + uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9 + with: + ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + submodules: recursive + path: pytorch + - name: Clean PyTorch checkout + run: | + # Remove any artifacts from the previous checkouts + git clean -fxd + working-directory: pytorch + - name: Checkout pytorch/builder + uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9 + with: + ref: main + submodules: recursive + repository: pytorch/builder + path: builder + - name: Clean pytorch/builder checkout + run: | + # Remove any artifacts from the previous checkouts + git clean -fxd + working-directory: builder + - name: Populate binary env + shell: bash + run: | + "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh" + - name: Build PyTorch binary + shell: bash + run: | + "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh" + - uses: seemethere/upload-artifact-s3@v3 + if: always() + with: + name: wheel-py3_7-cuda11_3 + retention-days: 14 + if-no-files-found: error + path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}" + - name: Wait until all sessions have drained + shell: powershell + working-directory: pytorch + if: always() + timeout-minutes: 120 + run: | + .github\scripts\wait_for_ssh_to_drain.ps1 + - name: Kill active ssh sessions if still around (Useful if workflow was cancelled) + shell: powershell + working-directory: pytorch + if: always() + run: | + .github\scripts\kill_active_ssh_sessions.ps1 + wheel-py3_7-cuda11_3-test: # Testing + if: ${{ github.repository_owner == 'pytorch' }} + needs: wheel-py3_7-cuda11_3-build + runs-on: windows.8xlarge.nvidia.gpu + timeout-minutes: 240 + env: + PYTORCH_ROOT: ${{ github.workspace }}/pytorch + BUILDER_ROOT: ${{ github.workspace }}/builder + PACKAGE_TYPE: wheel + # TODO: This is a legacy variable that we eventually want to get rid of in + # favor of GPU_ARCH_VERSION + DESIRED_CUDA: cu113 + GPU_ARCH_VERSION: 11.3 + GPU_ARCH_TYPE: cuda + SKIP_ALL_TESTS: 1 + DESIRED_PYTHON: "3.7" + steps: + - name: Display EC2 information + shell: bash + run: | + set -euo pipefail + function get_ec2_metadata() { + # Pulled from instance metadata endpoint for EC2 + # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html + category=$1 + curl -fsSL "http://169.254.169.254/latest/meta-data/${category}" + } + echo "ami-id: $(get_ec2_metadata ami-id)" + echo "instance-id: $(get_ec2_metadata instance-id)" + echo "instance-type: $(get_ec2_metadata instance-type)" + - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)" + uses: seemethere/add-github-ssh-key@v1 + with: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + # NOTE: These environment variables are put here so that they can be applied on every job equally + # They are also here because setting them at a workflow level doesn't give us access to the + # runner.temp variable, which we need. + - name: Populate binary env + shell: bash + run: | + echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}" + echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}" + echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}" + - uses: seemethere/download-artifact-s3@0504774707cbc8603d7dca922e8026eb8bf3b47b + name: Download Build Artifacts + with: + name: wheel-py3_7-cuda11_3 + path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}" + - name: Checkout PyTorch + uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9 + with: + ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + submodules: recursive + path: pytorch + - name: Clean PyTorch checkout + run: | + # Remove any artifacts from the previous checkouts + git clean -fxd + working-directory: pytorch + - name: Checkout pytorch/builder + uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9 + with: + ref: main + submodules: recursive + repository: pytorch/builder + path: builder + - name: Clean pytorch/builder checkout + run: | + # Remove any artifacts from the previous checkouts + git clean -fxd + working-directory: builder + - name: Populate binary env + shell: bash + run: | + "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh" + - name: Test PyTorch binary + shell: bash + run: | + "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh" + - name: Wait until all sessions have drained + shell: powershell + working-directory: pytorch + if: always() + timeout-minutes: 120 + run: | + .github\scripts\wait_for_ssh_to_drain.ps1 + - name: Kill active ssh sessions if still around (Useful if workflow was cancelled) + shell: powershell + working-directory: pytorch + if: always() + run: | + .github\scripts\kill_active_ssh_sessions.ps1 diff --git a/.github/workflows/generated-windows-binary-wheel.yml b/.github/workflows/generated-windows-binary-wheel-nightly.yml similarity index 100% rename from .github/workflows/generated-windows-binary-wheel.yml rename to .github/workflows/generated-windows-binary-wheel-nightly.yml