Skip to content

Commit

Permalink
[ci] two procs for parallelization (pytorch#85985)
Browse files Browse the repository at this point in the history
hitting ooms on linux cuda so use 2 procs instead of 3

pytorch#85939
Pull Request resolved: pytorch#85985
Approved by: https://github.com/huydhn
  • Loading branch information
clee2000 authored and pytorchmergebot committed Sep 30, 2022
1 parent e73e3e3 commit 401a358
Show file tree
Hide file tree
Showing 2 changed files with 2 additions and 2 deletions.
2 changes: 1 addition & 1 deletion test/run_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -699,7 +699,7 @@ def run_test_ops(test_module, test_directory, options):
return run_test(test_module, test_directory, copy.deepcopy(options),
extra_unittest_args=["--use-pytest", '-vv', '-x', '--reruns=2', '-rfEX'],
)
NUM_PROCS = 3
NUM_PROCS = 2
return_codes = []
os.environ["NUM_PARALLEL_PROCS"] = str(NUM_PROCS)
pool = get_context("spawn").Pool(NUM_PROCS)
Expand Down
2 changes: 1 addition & 1 deletion torch/testing/_internal/common_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -908,7 +908,7 @@ def _check_module_exists(name: str) -> bool:


if TEST_CUDA and 'NUM_PARALLEL_PROCS' in os.environ:
num_procs = int(os.getenv("NUM_PARALLEL_PROCS", "3"))
num_procs = int(os.getenv("NUM_PARALLEL_PROCS", "2"))
# other libraries take up about 11% of space per process
torch.cuda.set_per_process_memory_fraction(round(1 / num_procs - .11, 2))

Expand Down

0 comments on commit 401a358

Please sign in to comment.