Skip to content

Commit

Permalink
Fixed mmap usage and (re)-implemented most parts of memory_posix.cc
Browse files Browse the repository at this point in the history
  • Loading branch information
guccigang420 committed Oct 14, 2023
1 parent f6b5424 commit 7f0a8b0
Showing 1 changed file with 153 additions and 6 deletions.
159 changes: 153 additions & 6 deletions src/xenia/base/memory_posix.cc
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,9 @@
#include <sys/mman.h>
#include <unistd.h>
#include <cstddef>
#include <fstream>
#include <mutex>
#include <sstream>

#include "xenia/base/math.h"
#include "xenia/base/platform.h"
Expand Down Expand Up @@ -79,14 +82,53 @@ uint32_t ToPosixProtectFlags(PageAccess access) {
}
}

PageAccess ToXeniaProtectFlags(char* protection) {
if (protection[0] == 'r' && protection[1] == 'w' && protection[2] == 'x') {
return PageAccess::kExecuteReadWrite;
} else if (protection[0] == 'r' && protection[1] == '-' &&
protection[2] == 'x') {
return PageAccess::kExecuteReadOnly;
} else if (protection[0] == 'r' && protection[1] == 'w' &&
protection[2] == '-') {
return PageAccess::kReadWrite;
} else if (protection[0] == 'r' && protection[1] == '-' &&
protection[2] == '-') {
return PageAccess::kReadOnly;
} else {
return PageAccess::kNoAccess;
}
}

bool IsWritableExecutableMemorySupported() { return true; }

struct MappedFileRange {
uintptr_t region_begin;
uintptr_t region_end;
};

std::vector<struct MappedFileRange> mapped_file_ranges;
std::mutex g_mapped_file_ranges_mutex;

void* AllocFixed(void* base_address, size_t length,
AllocationType allocation_type, PageAccess access) {
// mmap does not support reserve / commit, so ignore allocation_type.
uint32_t prot = ToPosixProtectFlags(access);
void* result = mmap(base_address, length, prot,
MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
int flags = MAP_PRIVATE | MAP_ANONYMOUS;

if (base_address != nullptr) {
bool should_protect = allocation_type == AllocationType::kCommit;
if (should_protect) {
if (Protect(base_address, length, access)){
return base_address;
} else {
return nullptr;
}
}
flags |= MAP_FIXED_NOREPLACE;
}

void* result = mmap(base_address, length, prot, flags, -1, 0);

if (result == MAP_FAILED) {
return nullptr;
} else {
Expand All @@ -96,20 +138,97 @@ void* AllocFixed(void* base_address, size_t length,

bool DeallocFixed(void* base_address, size_t length,
DeallocationType deallocation_type) {
return munmap(base_address, length) == 0;
const uintptr_t region_begin = (uintptr_t)base_address;
const uintptr_t region_end = (uintptr_t)base_address + length;

std::lock_guard<std::mutex> guard(g_mapped_file_ranges_mutex);
for (const auto& mapped_range : mapped_file_ranges) {
if (region_begin >= mapped_range.region_begin &&
region_end <= mapped_range.region_end) {

switch(deallocation_type) {
case DeallocationType::kDecommit:
return Protect(base_address, length, PageAccess::kNoAccess);
case DeallocationType::kRelease:
assert_always("Error: Tried to release mapped memory!");
default:
assert_unhandled_case(deallocation_type);
}

}
}

switch(deallocation_type) {
case DeallocationType::kDecommit:
return Protect(base_address, length, PageAccess::kNoAccess);
case DeallocationType::kRelease:
return munmap(base_address, length) == 0;
default:
assert_unhandled_case(deallocation_type);
}
}

bool Protect(void* base_address, size_t length, PageAccess access,
PageAccess* out_old_access) {
// Linux does not have a syscall to query memory permissions.
assert_null(out_old_access);
if (out_old_access) {
size_t length_copy = length;
QueryProtect(base_address, length_copy, *out_old_access);
}

uint32_t prot = ToPosixProtectFlags(access);
return mprotect(base_address, length, prot) == 0;
}

bool QueryProtect(void* base_address, size_t& length, PageAccess& access_out) {
// No generic POSIX solution exists. The Linux solution should work on all Linux
// kernel based OS, including Android.
#if XE_PLATFORM_LINUX
std::ifstream memory_maps;
memory_maps.open("/proc/self/maps", std::ios_base::in);
std::string maps_entry_string;

while (std::getline(memory_maps, maps_entry_string)) {
std::stringstream entry_stream(maps_entry_string);
uintptr_t map_region_begin, map_region_end;
char separator, protection[4];

entry_stream >> std::hex >> map_region_begin >> separator >>
map_region_end >> protection;

if (map_region_begin <= (uintptr_t)base_address &&
map_region_end > (uintptr_t)base_address) {
length = map_region_end - reinterpret_cast<uintptr_t>(base_address);

access_out = ToXeniaProtectFlags(protection);

// Look at the next consecutive mappings
while (std::getline(memory_maps, maps_entry_string)) {
std::stringstream next_entry_stream(maps_entry_string);
uintptr_t next_map_region_begin, next_map_region_end;
char next_protection[4];

next_entry_stream >> std::hex >> next_map_region_begin >> separator >>
next_map_region_end >> next_protection;
if (map_region_end == next_map_region_begin &&
access_out == ToXeniaProtectFlags(next_protection)) {
length =
next_map_region_end - reinterpret_cast<uintptr_t>(base_address);
continue;
} else {
break;
}
}

memory_maps.close();
return true;
}
}

memory_maps.close();
return false;
#else
return false;
#endif
}

FileMappingHandle CreateFileMappingHandle(const std::filesystem::path& path,
Expand Down Expand Up @@ -178,12 +297,40 @@ void CloseFileMappingHandle(FileMappingHandle handle,
void* MapFileView(FileMappingHandle handle, void* base_address, size_t length,
PageAccess access, size_t file_offset) {
uint32_t prot = ToPosixProtectFlags(access);
return mmap64(base_address, length, prot, MAP_PRIVATE | MAP_ANONYMOUS, handle,

int flags = MAP_SHARED;
if (base_address != nullptr) {
flags |= MAP_FIXED_NOREPLACE;
}

void* result = mmap(base_address, length, prot, flags, handle,
file_offset);

if (result == MAP_FAILED) {
return nullptr;
} else {
std::lock_guard<std::mutex> guard(g_mapped_file_ranges_mutex);
mapped_file_ranges.push_back(
{(uintptr_t)result, (uintptr_t)result + length});
return result;
}
}

bool UnmapFileView(FileMappingHandle handle, void* base_address,
size_t length) {
std::lock_guard<std::mutex> guard(g_mapped_file_ranges_mutex);
for (auto mapped_range = mapped_file_ranges.begin();
mapped_range != mapped_file_ranges.end();) {
if (mapped_range->region_begin == (uintptr_t)base_address &&
mapped_range->region_end == (uintptr_t)base_address + length) {
mapped_file_ranges.erase(mapped_range);
return munmap(base_address, length) == 0;
} else {
mapped_range++;
}
}
// TODO: Implement partial file unmapping.
assert_always("Error: Partial unmapping of files not yet supported.");
return munmap(base_address, length) == 0;
}

Expand Down

0 comments on commit 7f0a8b0

Please sign in to comment.