Skip to content

Commit

Permalink
Fixed mmap usage and (re)-implemented most parts of memory_posix.cc
Browse files Browse the repository at this point in the history
  • Loading branch information
guccigang420 committed Oct 1, 2023
1 parent f6b5424 commit e675bf3
Showing 1 changed file with 68 additions and 3 deletions.
71 changes: 68 additions & 3 deletions src/xenia/base/memory_posix.cc
Original file line number Diff line number Diff line change
Expand Up @@ -81,13 +81,44 @@ uint32_t ToPosixProtectFlags(PageAccess access) {

bool IsWritableExecutableMemorySupported() { return true; }

struct MappedFileRange {
size_t region_begin;
size_t region_end;
};

std::vector<struct MappedFileRange> mapped_file_ranges;

void* AllocFixed(void* base_address, size_t length,
AllocationType allocation_type, PageAccess access) {
// mmap does not support reserve / commit, so ignore allocation_type.
uint32_t prot = ToPosixProtectFlags(access);
void* result = mmap(base_address, length, prot,
MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS, -1, 0);

int flags = MAP_PRIVATE | MAP_ANONYMOUS;
if (base_address != nullptr) {
flags |= MAP_FIXED_NOREPLACE;
}
void* result = mmap(base_address, length, prot, flags, -1, 0);

if (result == MAP_FAILED) {
// If the address is within this range, the mmap failed because we have
// already mapped this memory.
size_t region_begin = (size_t)base_address;
size_t region_end = (size_t)base_address + length;
for (const auto mapped_range : mapped_file_ranges) {
// Check if the allocation is within this range...
if (region_begin >= mapped_range.region_begin &&
region_end <= mapped_range.region_end) {
bool should_protect = (((uint8_t)allocation_type & 2) == 2);

if (should_protect) {
if (Protect(base_address, length, access)) {
return base_address;
}
} else if ((((uint8_t)allocation_type & 1) == 1)) {
return base_address;
}
}
}
return nullptr;
} else {
return result;
Expand All @@ -96,6 +127,15 @@ void* AllocFixed(void* base_address, size_t length,

bool DeallocFixed(void* base_address, size_t length,
DeallocationType deallocation_type) {
size_t region_begin = (size_t)base_address;
size_t region_end = (size_t)base_address + length;
for (const auto mapped_range : mapped_file_ranges) {
if (region_begin >= mapped_range.region_begin &&
region_end <= mapped_range.region_end) {
return Protect(base_address, length, PageAccess::kNoAccess);
}
}

return munmap(base_address, length) == 0;
}

Expand Down Expand Up @@ -178,12 +218,37 @@ void CloseFileMappingHandle(FileMappingHandle handle,
void* MapFileView(FileMappingHandle handle, void* base_address, size_t length,
PageAccess access, size_t file_offset) {
uint32_t prot = ToPosixProtectFlags(access);
return mmap64(base_address, length, prot, MAP_PRIVATE | MAP_ANONYMOUS, handle,

int flags = MAP_SHARED;
if (base_address != nullptr) {
flags = flags | MAP_FIXED_NOREPLACE;
}

void* result = mmap(base_address, length, prot, flags, handle,
file_offset);

if (result == MAP_FAILED) {
return nullptr;
}else {
mapped_file_ranges.push_back({(size_t)result, (size_t)result + length});
return result;
}
}

bool UnmapFileView(FileMappingHandle handle, void* base_address,
size_t length) {
for (auto mapped_range = mapped_file_ranges.begin();
mapped_range != mapped_file_ranges.end();) {
if (mapped_range->region_begin == (size_t)base_address &&
mapped_range->region_end == (size_t)base_address + length) {
mapped_file_ranges.erase(mapped_range);
return munmap(base_address, length) == 0;
} else {
mapped_range++;
}
}
// TODO: Implement partial file unmapping.
assert_always("Error: Partial unmapping of files not yet supported.");
return munmap(base_address, length) == 0;
}

Expand Down

0 comments on commit e675bf3

Please sign in to comment.