Skip to content

Commit

Permalink
Fix gpu_info PADDLE_ENFORCE_GT when fraction_of_gpu_memory_to_use=1.0 (
Browse files Browse the repository at this point in the history
…PaddlePaddle#18950)

* fix gpu_info, test=develop

* fix reserving gpu memory calculation bug, add fraction=1 unittest, test=develop

* fix bug again for reserving size, test=develop
  • Loading branch information
sneaxiy authored Aug 1, 2019
1 parent 3ab1866 commit 08fa98f
Show file tree
Hide file tree
Showing 2 changed files with 32 additions and 4 deletions.
25 changes: 25 additions & 0 deletions paddle/fluid/memory/detail/buddy_allocator_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -208,6 +208,31 @@ TEST(BuddyAllocator, AllocFromAvailable) {
TestBuddyAllocator(&buddy_allocator, 10 << 10);
TestBuddyAllocator(&buddy_allocator, 10 << 20);
TestBuddyAllocator(&buddy_allocator, static_cast<size_t>(1 << 30));

if (p) {
EXPECT_TRUE(cudaFree(p) == cudaSuccess);
}
}

TEST(BuddyAllocator, AllocFromAvailableWhenFractionIsOne) {
FLAGS_fraction_of_gpu_memory_to_use = 1.0;
FLAGS_initial_gpu_memory_in_mb = 0;
FLAGS_reallocate_gpu_memory_in_mb = 0;

void* p = nullptr;
EXPECT_TRUE(cudaMalloc(&p, static_cast<size_t>(4) << 30) == cudaSuccess);

// BuddyAllocator should be able to alloc the remaining GPU
BuddyAllocator buddy_allocator(
std::unique_ptr<SystemAllocator>(new GPUAllocator(TEST_GPU_ID)),
platform::GpuMinChunkSize(), platform::GpuMaxChunkSize());

TestBuddyAllocator(&buddy_allocator, static_cast<size_t>(1) << 30);
TestBuddyAllocator(&buddy_allocator, static_cast<size_t>(5) << 30);

if (p) {
EXPECT_TRUE(cudaFree(p) == cudaSuccess);
}
}

#endif
Expand Down
11 changes: 7 additions & 4 deletions paddle/fluid/platform/gpu_info.cc
Original file line number Diff line number Diff line change
Expand Up @@ -230,12 +230,15 @@ void GpuMemoryUsage(size_t *available, size_t *total) {
size_t GpuAvailableMemToAlloc() {
size_t total = 0;
size_t available = 0;
size_t reserving = static_cast<size_t>(fraction_reserve_gpu_memory * total);
GpuMemoryUsage(&available, &total);
size_t reserving =
static_cast<size_t>(fraction_reserve_gpu_memory * available);
// If available size is less than minimum chunk size, no usable memory exists
size_t available_to_alloc = available - reserving;
size_t min_chunk_size = GpuMinChunkSize();
size_t available_to_alloc =
std::min(available > min_chunk_size ? available : 0, total - reserving);
if (available_to_alloc < min_chunk_size) {
available_to_alloc = 0;
}
VLOG(10) << "GPU usage " << (available >> 20) << "M/" << (total >> 20)
<< "M, " << (available_to_alloc >> 20) << "M available to allocate";
return available_to_alloc;
Expand All @@ -255,7 +258,7 @@ static size_t GpuAllocSize(bool realloc) {
size_t alloc_bytes =
(flag_mb > 0ul ? flag_mb << 20 : available_to_alloc *
FLAGS_fraction_of_gpu_memory_to_use);
PADDLE_ENFORCE_GT(available_to_alloc, alloc_bytes,
PADDLE_ENFORCE_GE(available_to_alloc, alloc_bytes,
"No enough available GPU memory");
VLOG(10) << "Alloc size is " << (alloc_bytes >> 20)
<< " MiB, is it Re-alloc: " << realloc;
Expand Down

0 comments on commit 08fa98f

Please sign in to comment.