Skip to content

Commit

Permalink
Revert "GWP-ASan: Teach allocator about PartitionAlloc"
Browse files Browse the repository at this point in the history
This reverts commit 3c600ba.

Reason for revert: This change is causing test failures on the Windows/
macOS ASan bots.

Original change's description:
> GWP-ASan: Teach allocator about PartitionAlloc
> 
> A core security guarantee of PartitionAlloc is that allocations of
> different types are never given overlapping allocations. In order to
> maintain this security guarantee, the GuardedPageAllocator needs to
> taught to know when it is used to back PartitionAlloc, and passed in the
> types on Allocate(). In those cases it maintains a PartitionAlloc
> specific free list that is aware of what types have been previously used
> for particular slots.
> 
> Bug: 956824
> Change-Id: I77e596f29d29cc7f88b9e838b8bae891037bafcb
> Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1622477
> Reviewed-by: Vitaly Buka <vitalybuka@chromium.org>
> Commit-Queue: Vitaly Buka <vitalybuka@chromium.org>
> Auto-Submit: Vlad Tsyrklevich <vtsyrklevich@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#661994}

TBR=vtsyrklevich@chromium.org,vitalybuka@chromium.org

Change-Id: I0e72a0e081899ccd2705418e068360051f3dfb92
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: 956824
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1623669
Commit-Queue: Vlad Tsyrklevich <vtsyrklevich@chromium.org>
Reviewed-by: Vlad Tsyrklevich <vtsyrklevich@chromium.org>
Cr-Commit-Position: refs/heads/master@{#662074}
  • Loading branch information
vlad902 authored and Commit Bot committed May 22, 2019
1 parent 99fb68a commit ad6074c
Show file tree
Hide file tree
Showing 7 changed files with 64 additions and 194 deletions.
80 changes: 15 additions & 65 deletions components/gwp_asan/client/guarded_page_allocator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -65,16 +65,12 @@ void GuardedPageAllocator::SimpleFreeList<T>::Initialize(T max_entries) {
}

template <typename T>
bool GuardedPageAllocator::SimpleFreeList<T>::Allocate(T* out,
const char* type) {
if (num_used_entries_ < max_entries_) {
*out = num_used_entries_++;
return true;
}
T GuardedPageAllocator::SimpleFreeList<T>::Allocate() {
if (num_used_entries_ < max_entries_)
return num_used_entries_++;

DCHECK_LE(free_list_.size(), max_entries_);
*out = RandomEviction(&free_list_);
return true;
return RandomEviction(&free_list_);
}

template <typename T>
Expand All @@ -83,47 +79,12 @@ void GuardedPageAllocator::SimpleFreeList<T>::Free(T entry) {
free_list_.push_back(entry);
}

GuardedPageAllocator::PartitionAllocSlotFreeList::PartitionAllocSlotFreeList() =
default;
GuardedPageAllocator::PartitionAllocSlotFreeList::
~PartitionAllocSlotFreeList() = default;

void GuardedPageAllocator::PartitionAllocSlotFreeList::Initialize(
AllocatorState::SlotIdx max_entries) {
max_entries_ = max_entries;
type_mapping_.reserve(max_entries);
}

bool GuardedPageAllocator::PartitionAllocSlotFreeList::Allocate(
AllocatorState::SlotIdx* out,
const char* type) {
if (num_used_entries_ < max_entries_) {
type_mapping_[num_used_entries_] = type;
*out = num_used_entries_++;
return true;
}

if (!free_list_.count(type) || free_list_[type].empty())
return false;

DCHECK_LE(free_list_[type].size(), max_entries_);
*out = RandomEviction(&free_list_[type]);
return true;
}

void GuardedPageAllocator::PartitionAllocSlotFreeList::Free(
AllocatorState::SlotIdx entry) {
DCHECK_LT(entry, num_used_entries_);
free_list_[type_mapping_[entry]].push_back(entry);
}

GuardedPageAllocator::GuardedPageAllocator() {}

void GuardedPageAllocator::Init(size_t max_alloced_pages,
size_t num_metadata,
size_t total_pages,
OutOfMemoryCallback oom_callback,
bool is_partition_alloc) {
OutOfMemoryCallback oom_callback) {
CHECK_GT(max_alloced_pages, 0U);
CHECK_LE(max_alloced_pages, num_metadata);
CHECK_LE(num_metadata, AllocatorState::kMaxMetadata);
Expand All @@ -133,7 +94,6 @@ void GuardedPageAllocator::Init(size_t max_alloced_pages,
state_.num_metadata = num_metadata;
state_.total_pages = total_pages;
oom_callback_ = std::move(oom_callback);
is_partition_alloc_ = is_partition_alloc;

state_.page_size = base::GetPageSize();

Expand All @@ -150,11 +110,7 @@ void GuardedPageAllocator::Init(size_t max_alloced_pages,
// there should be no risk of a race here.
base::AutoLock lock(lock_);
free_metadata_.Initialize(num_metadata);
if (is_partition_alloc_)
free_slots_ = std::make_unique<PartitionAllocSlotFreeList>();
else
free_slots_ = std::make_unique<SimpleFreeList<AllocatorState::SlotIdx>>();
free_slots_->Initialize(total_pages);
free_slots_.Initialize(total_pages);
}

slot_to_metadata_idx_.resize(total_pages);
Expand All @@ -173,12 +129,7 @@ GuardedPageAllocator::~GuardedPageAllocator() {
UnmapRegion();
}

void* GuardedPageAllocator::Allocate(size_t size,
size_t align,
const char* type) {
if (!is_partition_alloc_)
DCHECK_EQ(type, nullptr);

void* GuardedPageAllocator::Allocate(size_t size, size_t align) {
if (!size || size > state_.page_size || align > state_.page_size)
return nullptr;

Expand All @@ -192,7 +143,7 @@ void* GuardedPageAllocator::Allocate(size_t size,

AllocatorState::SlotIdx free_slot;
AllocatorState::MetadataIdx free_metadata;
if (!ReserveSlotAndMetadata(&free_slot, &free_metadata, type))
if (!ReserveSlotAndMetadata(&free_slot, &free_metadata))
return nullptr;

uintptr_t free_page = state_.SlotToAddr(free_slot);
Expand Down Expand Up @@ -281,13 +232,11 @@ size_t GuardedPageAllocator::RegionSize() const {

bool GuardedPageAllocator::ReserveSlotAndMetadata(
AllocatorState::SlotIdx* slot,
AllocatorState::MetadataIdx* metadata_idx,
const char* type) {
AllocatorState::MetadataIdx* metadata_idx) {
base::AutoLock lock(lock_);
if (num_alloced_pages_ == max_alloced_pages_ ||
!free_slots_->Allocate(slot, type)) {
if (!oom_hit_) {
if (++consecutive_failed_allocations_ == kOutOfMemoryCount) {
if (num_alloced_pages_ == max_alloced_pages_) {
if (++consecutive_failed_allocations_ == kOutOfMemoryCount) {
if (!oom_hit_) {
oom_hit_ = true;
base::AutoUnlock unlock(lock_);
std::move(oom_callback_).Run(total_allocations_ - kOutOfMemoryCount);
Expand All @@ -296,7 +245,8 @@ bool GuardedPageAllocator::ReserveSlotAndMetadata(
return false;
}

CHECK(free_metadata_.Allocate(metadata_idx, nullptr));
*slot = free_slots_.Allocate();
*metadata_idx = free_metadata_.Allocate();
if (metadata_[*metadata_idx].alloc_ptr) {
// Overwrite the outdated slot_to_metadata_idx mapping from the previous use
// of this metadata if it's still valid.
Expand All @@ -319,7 +269,7 @@ void GuardedPageAllocator::FreeSlotAndMetadata(
DCHECK_LT(metadata_idx, state_.num_metadata);

base::AutoLock lock(lock_);
free_slots_->Free(slot);
free_slots_.Free(slot);
free_metadata_.Free(metadata_idx);

DCHECK_GT(num_alloced_pages_, 0U);
Expand Down
69 changes: 10 additions & 59 deletions components/gwp_asan/client/guarded_page_allocator.h
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
#define COMPONENTS_GWP_ASAN_CLIENT_GUARDED_PAGE_ALLOCATOR_H_

#include <atomic>
#include <map>
#include <memory>
#include <string>
#include <vector>
Expand Down Expand Up @@ -55,8 +54,7 @@ class GWP_ASAN_EXPORT GuardedPageAllocator {
void Init(size_t max_alloced_pages,
size_t num_metadata,
size_t total_pages,
OutOfMemoryCallback oom_callback,
bool is_partition_alloc);
OutOfMemoryCallback oom_callback);

// On success, returns a pointer to size bytes of page-guarded memory. On
// failure, returns nullptr. The allocation is not guaranteed to be
Expand All @@ -67,10 +65,8 @@ class GWP_ASAN_EXPORT GuardedPageAllocator {
// It must be less than or equal to the allocation size. If it's left as zero
// it will default to the default alignment the allocator chooses.
//
// The type parameter should only be set for PartitionAlloc allocations.
//
// Preconditions: Init() must have been called.
void* Allocate(size_t size, size_t align = 0, const char* type = nullptr);
void* Allocate(size_t size, size_t align = 0);

// Deallocates memory pointed to by ptr. ptr must have been previously
// returned by a call to Allocate.
Expand All @@ -90,29 +86,17 @@ class GWP_ASAN_EXPORT GuardedPageAllocator {
}

private:
// Virtual base class representing a free list of entries T.
template <typename T>
class FreeList {
public:
FreeList() = default;
virtual ~FreeList() = default;
virtual void Initialize(T max_entries) = 0;
virtual bool Allocate(T* out, const char* type) = 0;
virtual void Free(T entry) = 0;
};

// Manages a free list of slot or metadata indices in the range
// [0, max_entries). Access to SimpleFreeList objects must be synchronized.
//
// SimpleFreeList is specifically designed to pre-allocate data in Initialize
// so that it never recurses into malloc/free during Allocate/Free.
template <typename T>
class SimpleFreeList : public FreeList<T> {
class SimpleFreeList {
public:
~SimpleFreeList() final = default;
void Initialize(T max_entries) final;
bool Allocate(T* out, const char* type) final;
void Free(T entry) final;
void Initialize(T max_entries);
T Allocate();
void Free(T entry);

private:
std::vector<T> free_list_;
Expand All @@ -123,36 +107,6 @@ class GWP_ASAN_EXPORT GuardedPageAllocator {
T max_entries_ = 0;
};

// Manages a free list of slot indices especially for PartitionAlloc.
// Allocate() is type-aware so that once a page has been used to allocate
// a given partition, it never reallocates an object of a different type on
// that page. Access to this object must be synchronized.
//
// PartitionAllocSlotFreeList can perform malloc/free during Allocate/Free,
// so it is not safe to use with malloc hooks!
//
// TODO(vtsyrklevich): Right now we allocate slots to partitions on a
// first-come first-serve basis, this makes it likely that all slots will be
// used up by common types first. Set aside a fixed amount of slots (~5%) for
// one-off partitions so that we make sure to sample rare types as well.
class PartitionAllocSlotFreeList : public FreeList<AllocatorState::SlotIdx> {
public:
PartitionAllocSlotFreeList();
~PartitionAllocSlotFreeList() final;
void Initialize(AllocatorState::SlotIdx max_entries) final;
bool Allocate(AllocatorState::SlotIdx* out, const char* type) final;
void Free(AllocatorState::SlotIdx entry) final;

private:
std::vector<const char*> type_mapping_;
std::map<const char*, std::vector<AllocatorState::SlotIdx>> free_list_;

// Number of used entries. This counter ensures all free entries are used
// before starting to use random eviction.
AllocatorState::SlotIdx num_used_entries_ = 0;
AllocatorState::SlotIdx max_entries_ = 0;
};

// Unmaps memory allocated by this class, if Init was called.
~GuardedPageAllocator();

Expand All @@ -173,8 +127,8 @@ class GWP_ASAN_EXPORT GuardedPageAllocator {
// On success, returns true and writes the reserved indices to |slot| and
// |metadata_idx|. Otherwise returns false if no allocations are available.
bool ReserveSlotAndMetadata(AllocatorState::SlotIdx* slot,
AllocatorState::MetadataIdx* metadata_idx,
const char* type) LOCKS_EXCLUDED(lock_);
AllocatorState::MetadataIdx* metadata_idx)
LOCKS_EXCLUDED(lock_);

// Marks the specified slot and metadata as unreserved.
void FreeSlotAndMetadata(AllocatorState::SlotIdx slot,
Expand All @@ -196,8 +150,7 @@ class GWP_ASAN_EXPORT GuardedPageAllocator {
// Lock that synchronizes allocating/freeing slots between threads.
base::Lock lock_;

std::unique_ptr<FreeList<AllocatorState::SlotIdx>> free_slots_
GUARDED_BY(lock_);
SimpleFreeList<AllocatorState::SlotIdx> free_slots_ GUARDED_BY(lock_);
SimpleFreeList<AllocatorState::MetadataIdx> free_metadata_ GUARDED_BY(lock_);

// Number of currently-allocated pages.
Expand All @@ -220,10 +173,8 @@ class GWP_ASAN_EXPORT GuardedPageAllocator {
bool oom_hit_ GUARDED_BY(lock_) = false;
OutOfMemoryCallback oom_callback_;

bool is_partition_alloc_ = false;

friend class BaseGpaTest;
friend class CrashAnalyzerTest;
friend class GuardedPageAllocatorTest;
FRIEND_TEST_ALL_PREFIXES(CrashAnalyzerTest, InternalError);
FRIEND_TEST_ALL_PREFIXES(CrashAnalyzerTest, StackTraceCollection);

Expand Down
Loading

0 comments on commit ad6074c

Please sign in to comment.