Skip to content

Commit

Permalink
Delete torch::deploy from pytorch core (pytorch#85953)
Browse files Browse the repository at this point in the history
As we have migrated torch::deploy over to https://github.com/pytorch/multipy, we can now delete it from pytorch core as ongoing development will happen there.

This PR was created due to syncing issues with pytorch#85443 which is where the review history can be found.
Pull Request resolved: pytorch#85953
Approved by: https://github.com/seemethere, https://github.com/malfet
  • Loading branch information
PaliC authored and pytorchmergebot committed Oct 6, 2022
1 parent 27c3fb0 commit 936e930
Show file tree
Hide file tree
Showing 82 changed files with 9 additions and 8,172 deletions.
20 changes: 0 additions & 20 deletions .github/workflows/pull.yml
Original file line number Diff line number Diff line change
Expand Up @@ -302,26 +302,6 @@ jobs:
docker-image-name: pytorch-linux-focal-py3.7-gcc7
build-generates-artifacts: false

linux-bionic-cuda11_6-py3_10-gcc7-deploy-build:
name: linux-bionic-cuda11_6-py3_10-gcc7-deploy
uses: ./.github/workflows/_linux-build.yml
with:
build-environment: linux-bionic-cuda11.6-py3.10-gcc7-deploy
docker-image-name: pytorch-linux-bionic-cuda11.6-cudnn8-py3-gcc7
test-matrix: |
{ include: [
{ config: "deploy", shard: 1, num_shards: 1, runner: "linux.4xlarge.nvidia.gpu" },
]}
deploy-linux-bionic-cuda11_6-py3_10-gcc7-test:
name: linux-bionic-cuda11_6-py3_10-gcc7-deploy
uses: ./.github/workflows/_linux-test.yml
needs: linux-bionic-cuda11_6-py3_10-gcc7-deploy-build
with:
build-environment: linux-bionic-cuda11.6-py3.10-gcc7-deploy
docker-image: ${{ needs.linux-bionic-cuda11_6-py3_10-gcc7-deploy-build.outputs.docker-image }}
test-matrix: ${{ needs.linux-bionic-cuda11_6-py3_10-gcc7-deploy-build.outputs.test-matrix }}

linux-focal-rocm5_2-py3_7-build:
# don't run build twice on master
if: github.event_name == 'pull_request'
Expand Down
4 changes: 0 additions & 4 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -78,10 +78,6 @@ torch/testing/_internal/generated/annotated_fn_args.py
torch/testing/_internal/data/*.pt
torch/csrc/api/include/torch/version.h
torch/csrc/cudnn/cuDNN.cpp
torch/csrc/deploy/example/generated
torch/csrc/deploy/interpreter/cpython
torch/csrc/deploy/interpreter/frozen
torch/csrc/deploy/interpreter/third_party/typing_extensions.py
torch/csrc/generated
torch/csrc/generic/TensorMethods.cpp
torch/csrc/jit/generated/*
Expand Down
6 changes: 0 additions & 6 deletions .jenkins/pytorch/build.sh
Original file line number Diff line number Diff line change
Expand Up @@ -19,12 +19,6 @@ if [[ "$BUILD_ENVIRONMENT" == *-mobile-*build* ]]; then
exec "$(dirname "${BASH_SOURCE[0]}")/build-mobile.sh" "$@"
fi

if [[ "$BUILD_ENVIRONMENT" == *deploy* ]]; then
# Enabling DEPLOY build (embedded torch python interpreter, experimental)
# only on one config for now, can expand later
export USE_DEPLOY=ON
fi

echo "Python version:"
python --version

Expand Down
15 changes: 1 addition & 14 deletions .jenkins/pytorch/test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -655,16 +655,6 @@ test_dynamo() {
popd
}

test_torch_deploy() {
python torch/csrc/deploy/example/generate_examples.py
ln -sf "$TORCH_LIB_DIR"/libtorch* "$TORCH_BIN_DIR"
ln -sf "$TORCH_LIB_DIR"/libshm* "$TORCH_BIN_DIR"
ln -sf "$TORCH_LIB_DIR"/libc10* "$TORCH_BIN_DIR"
"$TORCH_BIN_DIR"/test_deploy
"$TORCH_BIN_DIR"/test_deploy_gpu
assert_git_not_dirty
}

test_docs_test() {
.jenkins/pytorch/docs-test.sh
}
Expand All @@ -673,10 +663,7 @@ if ! [[ "${BUILD_ENVIRONMENT}" == *libtorch* || "${BUILD_ENVIRONMENT}" == *-baze
(cd test && python -c "import torch; print(torch.__config__.show())")
(cd test && python -c "import torch; print(torch.__config__.parallel_info())")
fi
if [[ "${TEST_CONFIG}" == *deploy* ]]; then
install_torchdynamo
test_torch_deploy
elif [[ "${TEST_CONFIG}" == *backward* ]]; then
if [[ "${TEST_CONFIG}" == *backward* ]]; then
test_forward_backward_compatibility
# Do NOT add tests after bc check tests, see its comment.
elif [[ "${TEST_CONFIG}" == *xla* ]]; then
Expand Down
7 changes: 0 additions & 7 deletions .lintrunner.toml
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,6 @@ command = [
[[linter]]
code = 'CLANGTIDY'
include_patterns = [
'torch/csrc/deploy/**/*.cpp',
'torch/csrc/fx/**/*.cpp',
'torch/csrc/generic/**/*.cpp',
'torch/csrc/onnx/**/*.cpp',
Expand All @@ -183,7 +182,6 @@ exclude_patterns = [
# FunctionsManual.cpp is excluded to keep this diff clean. It will be fixed
# in a follow up PR.
# /torch/csrc/generic/*.cpp is excluded because those files aren't actually built.
# deploy/interpreter files are excluded due to using macros and other techniquies
# that are not easily converted to accepted c++
'torch/csrc/jit/passes/onnx/helper.cpp',
'torch/csrc/jit/passes/onnx/shape_type_inference.cpp',
Expand All @@ -197,11 +195,6 @@ exclude_patterns = [
'torch/csrc/autograd/FunctionsManual.cpp',
'torch/csrc/generic/*.cpp',
'torch/csrc/jit/codegen/cuda/runtime/*',
'torch/csrc/deploy/interactive_embedded_interpreter.cpp',
'torch/csrc/deploy/interpreter/**',
'torch/csrc/deploy/test_deploy_python_ext.cpp',
'torch/csrc/deploy/test_deploy_missing_interpreter.cpp',
'torch/csrc/deploy/test_deploy_gpu.cpp',
'torch/csrc/utils/disable_torch_function.cpp',
]
init_command = [
Expand Down
1 change: 0 additions & 1 deletion BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -1748,7 +1748,6 @@ cc_library(
# Torch integration tests rely on a labeled data set from the MNIST database.
# http://yann.lecun.com/exdb/mnist/

# imethod.cpp is excluded since torch/csrc/deploy* build is not yet supported.
cpp_api_tests = glob(
["test/cpp/api/*.cpp"],
exclude = [
Expand Down
8 changes: 0 additions & 8 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -345,9 +345,6 @@ cmake_dependent_option(
option(ONNX_ML "Enable traditional ONNX ML API." ON)
option(HAVE_SOVERSION "Whether to add SOVERSION to the shared objects" OFF)
option(BUILD_LIBTORCH_CPU_WITH_DEBUG "Enable RelWithDebInfo for libtorch_cpu target only" OFF)
cmake_dependent_option(
USE_DEPLOY "Build embedded torch::deploy interpreter. See torch/csrc/deploy/README.md for more info." OFF
"BUILD_PYTHON" OFF)
cmake_dependent_option(USE_CCACHE "Attempt using CCache to wrap the compilation" ON "UNIX" OFF)
option(WERROR "Build with -Werror supported by the compiler" OFF)
option(USE_COREML_DELEGATE "Use the CoreML backend through delegate APIs" OFF)
Expand Down Expand Up @@ -1177,11 +1174,6 @@ endif()
include(cmake/Summary.cmake)
caffe2_print_configuration_summary()

# ---[ Torch Deploy
if(USE_DEPLOY)
add_subdirectory(torch/csrc/deploy)
endif()

if(BUILD_FUNCTORCH)
add_subdirectory(functorch)
endif()
6 changes: 0 additions & 6 deletions caffe2/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -1154,12 +1154,6 @@ install(FILES
"${TORCH_SRC_DIR}/library.h"
"${TORCH_SRC_DIR}/custom_class_detail.h"
DESTINATION ${TORCH_INSTALL_INCLUDE_DIR}/torch)
if(USE_DEPLOY)
install(FILES
"${TORCH_SRC_DIR}/deploy.h"
DESTINATION ${TORCH_INSTALL_INCLUDE_DIR}/torch)
endif()

if(BUILD_TEST)
if(BUILD_LITE_INTERPRETER)
add_subdirectory(
Expand Down
1 change: 0 additions & 1 deletion cmake/Summary.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -193,7 +193,6 @@ function(caffe2_print_configuration_summary)
if(NOT "${SELECTED_OP_LIST}" STREQUAL "")
message(STATUS " SELECTED_OP_LIST : ${SELECTED_OP_LIST}")
endif()
message(STATUS " USE_DEPLOY : ${USE_DEPLOY}")
message(STATUS " Public Dependencies : ${Caffe2_PUBLIC_DEPENDENCY_LIBS}")
message(STATUS " Private Dependencies : ${Caffe2_DEPENDENCY_LIBS}")
# coreml
Expand Down
Loading

0 comments on commit 936e930

Please sign in to comment.