mirror of
https://github.com/decompals/wibo.git
synced 2025-12-12 06:45:05 +00:00
Refactor memory management into wibo::heap
- Removes blockUpper2GB hack; we now start early in the process and reserve all (available) space in the lower 2GB address space, leaving the upper 2GB untouched for host code - All virtual memory operations flow through wibo::heap for bookkeeping - All guest code uses a guest mimalloc area + thread-local heaps reserved in the guest address space
This commit is contained in:
1
.gitignore
vendored
1
.gitignore
vendored
@@ -19,3 +19,4 @@ venv/
|
|||||||
*.pyc
|
*.pyc
|
||||||
*.pyo
|
*.pyo
|
||||||
*.pyd
|
*.pyd
|
||||||
|
.ruff_cache/
|
||||||
|
|||||||
@@ -174,6 +174,7 @@ add_executable(wibo
|
|||||||
src/files.cpp
|
src/files.cpp
|
||||||
src/handles.cpp
|
src/handles.cpp
|
||||||
src/loader.cpp
|
src/loader.cpp
|
||||||
|
src/heap.cpp
|
||||||
src/main.cpp
|
src/main.cpp
|
||||||
src/modules.cpp
|
src/modules.cpp
|
||||||
src/processes.cpp
|
src/processes.cpp
|
||||||
@@ -184,9 +185,10 @@ add_executable(wibo
|
|||||||
target_compile_definitions(wibo PRIVATE _GNU_SOURCE _FILE_OFFSET_BITS=64 _TIME_BITS=64)
|
target_compile_definitions(wibo PRIVATE _GNU_SOURCE _FILE_OFFSET_BITS=64 _TIME_BITS=64)
|
||||||
target_compile_features(wibo PRIVATE cxx_std_20)
|
target_compile_features(wibo PRIVATE cxx_std_20)
|
||||||
target_compile_options(wibo PRIVATE -Wall -Wextra)
|
target_compile_options(wibo PRIVATE -Wall -Wextra)
|
||||||
|
target_link_options(wibo PRIVATE -no-pie -Wl,--image-base=0x90000000)
|
||||||
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||||
target_compile_options(wibo PRIVATE -fno-pie -maccumulate-outgoing-args)
|
target_compile_options(wibo PRIVATE -fno-pie -maccumulate-outgoing-args)
|
||||||
target_link_options(wibo PRIVATE -no-pie -maccumulate-outgoing-args)
|
target_link_options(wibo PRIVATE -maccumulate-outgoing-args)
|
||||||
endif()
|
endif()
|
||||||
target_include_directories(wibo PRIVATE dll src ${WIBO_GENERATED_HEADER_DIR})
|
target_include_directories(wibo PRIVATE dll src ${WIBO_GENERATED_HEADER_DIR})
|
||||||
target_link_libraries(wibo PRIVATE mimalloc-obj atomic)
|
target_link_libraries(wibo PRIVATE mimalloc-obj atomic)
|
||||||
|
|||||||
@@ -3,6 +3,7 @@
|
|||||||
#include "common.h"
|
#include "common.h"
|
||||||
#include "context.h"
|
#include "context.h"
|
||||||
#include "crt_trampolines.h"
|
#include "crt_trampolines.h"
|
||||||
|
#include "heap.h"
|
||||||
#include "kernel32/internal.h"
|
#include "kernel32/internal.h"
|
||||||
#include "modules.h"
|
#include "modules.h"
|
||||||
|
|
||||||
@@ -180,25 +181,25 @@ const char *CDECL strrchr(const char *str, int ch) {
|
|||||||
void *CDECL malloc(SIZE_T size) {
|
void *CDECL malloc(SIZE_T size) {
|
||||||
HOST_CONTEXT_GUARD();
|
HOST_CONTEXT_GUARD();
|
||||||
VERBOSE_LOG("malloc(%zu)\n", size);
|
VERBOSE_LOG("malloc(%zu)\n", size);
|
||||||
return ::malloc(size);
|
return wibo::heap::guestMalloc(size);
|
||||||
}
|
}
|
||||||
|
|
||||||
void *CDECL calloc(SIZE_T count, SIZE_T size) {
|
void *CDECL calloc(SIZE_T count, SIZE_T size) {
|
||||||
HOST_CONTEXT_GUARD();
|
HOST_CONTEXT_GUARD();
|
||||||
VERBOSE_LOG("calloc(%zu, %zu)\n", count, size);
|
VERBOSE_LOG("calloc(%zu, %zu)\n", count, size);
|
||||||
return ::calloc(count, size);
|
return wibo::heap::guestCalloc(count, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
void *CDECL realloc(void *ptr, SIZE_T newSize) {
|
void *CDECL realloc(void *ptr, SIZE_T newSize) {
|
||||||
HOST_CONTEXT_GUARD();
|
HOST_CONTEXT_GUARD();
|
||||||
VERBOSE_LOG("realloc(%p, %zu)\n", ptr, newSize);
|
VERBOSE_LOG("realloc(%p, %zu)\n", ptr, newSize);
|
||||||
return ::realloc(ptr, newSize);
|
return wibo::heap::guestRealloc(ptr, newSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
void CDECL free(void *ptr) {
|
void CDECL free(void *ptr) {
|
||||||
HOST_CONTEXT_GUARD();
|
HOST_CONTEXT_GUARD();
|
||||||
VERBOSE_LOG("free(%p)\n", ptr);
|
VERBOSE_LOG("free(%p)\n", ptr);
|
||||||
::free(ptr);
|
wibo::heap::guestFree(ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
void *CDECL memcpy(void *dest, const void *src, SIZE_T count) {
|
void *CDECL memcpy(void *dest, const void *src, SIZE_T count) {
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
#include "heapapi.h"
|
#include "heapapi.h"
|
||||||
|
#include "heap.h"
|
||||||
|
|
||||||
#include "common.h"
|
#include "common.h"
|
||||||
#include "context.h"
|
#include "context.h"
|
||||||
@@ -22,8 +23,7 @@ HeapObject *g_processHeapRecord = nullptr;
|
|||||||
|
|
||||||
void ensureProcessHeapInitialized() {
|
void ensureProcessHeapInitialized() {
|
||||||
std::call_once(g_processHeapInitFlag, []() {
|
std::call_once(g_processHeapInitFlag, []() {
|
||||||
mi_heap_t *heap = mi_heap_get_default();
|
auto record = make_pin<HeapObject>(nullptr);
|
||||||
auto record = make_pin<HeapObject>(heap);
|
|
||||||
if (!record) {
|
if (!record) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@@ -38,9 +38,13 @@ bool isExecutableHeap(const HeapObject *record) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
LPVOID heapAllocFromRecord(HeapObject *record, DWORD dwFlags, SIZE_T dwBytes) {
|
LPVOID heapAllocFromRecord(HeapObject *record, DWORD dwFlags, SIZE_T dwBytes) {
|
||||||
if (!record || !record->heap) {
|
if (!record) {
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
auto *heap = record->heap;
|
||||||
|
if (!heap && record->isProcessHeap) {
|
||||||
|
heap = wibo::heap::getGuestHeap();
|
||||||
|
}
|
||||||
if ((record->createFlags | dwFlags) & HEAP_GENERATE_EXCEPTIONS) {
|
if ((record->createFlags | dwFlags) & HEAP_GENERATE_EXCEPTIONS) {
|
||||||
DEBUG_LOG("HeapAlloc: HEAP_GENERATE_EXCEPTIONS not supported\n");
|
DEBUG_LOG("HeapAlloc: HEAP_GENERATE_EXCEPTIONS not supported\n");
|
||||||
kernel32::setLastError(ERROR_INVALID_PARAMETER);
|
kernel32::setLastError(ERROR_INVALID_PARAMETER);
|
||||||
@@ -48,7 +52,7 @@ LPVOID heapAllocFromRecord(HeapObject *record, DWORD dwFlags, SIZE_T dwBytes) {
|
|||||||
}
|
}
|
||||||
const bool zeroMemory = (dwFlags & HEAP_ZERO_MEMORY) != 0;
|
const bool zeroMemory = (dwFlags & HEAP_ZERO_MEMORY) != 0;
|
||||||
const SIZE_T requestSize = std::max<SIZE_T>(1, dwBytes);
|
const SIZE_T requestSize = std::max<SIZE_T>(1, dwBytes);
|
||||||
void *mem = zeroMemory ? mi_heap_zalloc(record->heap, requestSize) : mi_heap_malloc(record->heap, requestSize);
|
void *mem = zeroMemory ? mi_heap_zalloc(heap, requestSize) : mi_heap_malloc(heap, requestSize);
|
||||||
if (!mem) {
|
if (!mem) {
|
||||||
kernel32::setLastError(ERROR_NOT_ENOUGH_MEMORY);
|
kernel32::setLastError(ERROR_NOT_ENOUGH_MEMORY);
|
||||||
return nullptr;
|
return nullptr;
|
||||||
@@ -63,9 +67,7 @@ LPVOID heapAllocFromRecord(HeapObject *record, DWORD dwFlags, SIZE_T dwBytes) {
|
|||||||
|
|
||||||
HeapObject::~HeapObject() {
|
HeapObject::~HeapObject() {
|
||||||
if (heap) {
|
if (heap) {
|
||||||
if (!isProcessHeap) {
|
|
||||||
mi_heap_destroy(heap);
|
mi_heap_destroy(heap);
|
||||||
}
|
|
||||||
heap = nullptr;
|
heap = nullptr;
|
||||||
}
|
}
|
||||||
if (isProcessHeap) {
|
if (isProcessHeap) {
|
||||||
@@ -84,7 +86,7 @@ HANDLE WINAPI HeapCreate(DWORD flOptions, SIZE_T dwInitialSize, SIZE_T dwMaximum
|
|||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
mi_heap_t *heap = mi_heap_new();
|
mi_heap_t *heap = wibo::heap::createGuestHeap();
|
||||||
if (!heap) {
|
if (!heap) {
|
||||||
setLastError(ERROR_NOT_ENOUGH_MEMORY);
|
setLastError(ERROR_NOT_ENOUGH_MEMORY);
|
||||||
return nullptr;
|
return nullptr;
|
||||||
@@ -211,7 +213,11 @@ LPVOID WINAPI HeapReAlloc(HANDLE hHeap, DWORD dwFlags, LPVOID lpMem, SIZE_T dwBy
|
|||||||
return lpMem;
|
return lpMem;
|
||||||
}
|
}
|
||||||
|
|
||||||
void *ret = mi_heap_realloc(record->heap, lpMem, requestSize);
|
auto *heap = record->heap;
|
||||||
|
if (!heap && record->isProcessHeap) {
|
||||||
|
heap = wibo::heap::getGuestHeap();
|
||||||
|
}
|
||||||
|
void *ret = mi_heap_realloc(heap, lpMem, requestSize);
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
setLastError(ERROR_NOT_ENOUGH_MEMORY);
|
setLastError(ERROR_NOT_ENOUGH_MEMORY);
|
||||||
return nullptr;
|
return nullptr;
|
||||||
|
|||||||
@@ -157,7 +157,7 @@ struct HeapObject : public ObjectBase {
|
|||||||
~HeapObject() override;
|
~HeapObject() override;
|
||||||
|
|
||||||
[[nodiscard]] inline bool isOwner() const { return pthread_equal(owner, pthread_self()); }
|
[[nodiscard]] inline bool isOwner() const { return pthread_equal(owner, pthread_self()); }
|
||||||
[[nodiscard]] inline bool canAccess() const { return (isProcessHeap || isOwner()) && heap != nullptr; }
|
[[nodiscard]] inline bool canAccess() const { return isProcessHeap || (isOwner() && heap != nullptr); }
|
||||||
};
|
};
|
||||||
|
|
||||||
inline constexpr uintptr_t kPseudoCurrentProcessHandleValue = static_cast<uintptr_t>(-1);
|
inline constexpr uintptr_t kPseudoCurrentProcessHandleValue = static_cast<uintptr_t>(-1);
|
||||||
|
|||||||
@@ -4,8 +4,8 @@
|
|||||||
#include "context.h"
|
#include "context.h"
|
||||||
#include "errors.h"
|
#include "errors.h"
|
||||||
#include "handles.h"
|
#include "handles.h"
|
||||||
|
#include "heap.h"
|
||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
#include "modules.h"
|
|
||||||
#include "strutil.h"
|
#include "strutil.h"
|
||||||
|
|
||||||
#include <cerrno>
|
#include <cerrno>
|
||||||
@@ -18,7 +18,6 @@
|
|||||||
#include <sys/mman.h>
|
#include <sys/mman.h>
|
||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
#include <utility>
|
#include <utility>
|
||||||
#include <vector>
|
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
@@ -55,32 +54,12 @@ struct ViewInfo {
|
|||||||
DWORD protect = PAGE_NOACCESS;
|
DWORD protect = PAGE_NOACCESS;
|
||||||
DWORD allocationProtect = PAGE_NOACCESS;
|
DWORD allocationProtect = PAGE_NOACCESS;
|
||||||
DWORD type = MEM_PRIVATE;
|
DWORD type = MEM_PRIVATE;
|
||||||
|
bool managed = false;
|
||||||
};
|
};
|
||||||
|
|
||||||
std::map<uintptr_t, ViewInfo> g_viewInfo;
|
std::map<uintptr_t, ViewInfo> g_viewInfo;
|
||||||
std::mutex g_viewInfoMutex;
|
std::mutex g_viewInfoMutex;
|
||||||
|
|
||||||
struct VirtualAllocation {
|
|
||||||
uintptr_t base = 0;
|
|
||||||
size_t size = 0;
|
|
||||||
DWORD allocationProtect = 0;
|
|
||||||
std::vector<DWORD> pageProtect;
|
|
||||||
};
|
|
||||||
|
|
||||||
std::map<uintptr_t, VirtualAllocation> g_virtualAllocations;
|
|
||||||
std::mutex g_virtualAllocMutex;
|
|
||||||
|
|
||||||
size_t systemPageSize() {
|
|
||||||
static size_t cached = []() {
|
|
||||||
long detected = sysconf(_SC_PAGESIZE);
|
|
||||||
if (detected <= 0) {
|
|
||||||
return static_cast<size_t>(4096);
|
|
||||||
}
|
|
||||||
return static_cast<size_t>(detected);
|
|
||||||
}();
|
|
||||||
return cached;
|
|
||||||
}
|
|
||||||
|
|
||||||
uintptr_t alignDown(uintptr_t value, size_t alignment) {
|
uintptr_t alignDown(uintptr_t value, size_t alignment) {
|
||||||
const uintptr_t mask = static_cast<uintptr_t>(alignment) - 1;
|
const uintptr_t mask = static_cast<uintptr_t>(alignment) - 1;
|
||||||
return value & ~mask;
|
return value & ~mask;
|
||||||
@@ -97,66 +76,6 @@ uintptr_t alignUp(uintptr_t value, size_t alignment) {
|
|||||||
return (value + mask) & ~mask;
|
return (value + mask) & ~mask;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool addOverflows(uintptr_t base, size_t amount) {
|
|
||||||
return base > std::numeric_limits<uintptr_t>::max() - static_cast<uintptr_t>(amount);
|
|
||||||
}
|
|
||||||
|
|
||||||
uintptr_t regionEnd(const VirtualAllocation ®ion) { return region.base + region.size; }
|
|
||||||
|
|
||||||
bool rangeOverlapsLocked(uintptr_t base, size_t length) {
|
|
||||||
if (length == 0) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
if (addOverflows(base, length - 1)) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
uintptr_t end = base + length;
|
|
||||||
auto next = g_virtualAllocations.lower_bound(base);
|
|
||||||
if (next != g_virtualAllocations.begin()) {
|
|
||||||
auto prev = std::prev(next);
|
|
||||||
if (regionEnd(prev->second) > base) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (next != g_virtualAllocations.end() && next->second.base < end) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::map<uintptr_t, VirtualAllocation>::iterator findRegionIterator(uintptr_t address) {
|
|
||||||
auto it = g_virtualAllocations.upper_bound(address);
|
|
||||||
if (it == g_virtualAllocations.begin()) {
|
|
||||||
return g_virtualAllocations.end();
|
|
||||||
}
|
|
||||||
--it;
|
|
||||||
if (address >= regionEnd(it->second)) {
|
|
||||||
return g_virtualAllocations.end();
|
|
||||||
}
|
|
||||||
return it;
|
|
||||||
}
|
|
||||||
|
|
||||||
VirtualAllocation *lookupRegion(uintptr_t address) {
|
|
||||||
auto it = findRegionIterator(address);
|
|
||||||
if (it == g_virtualAllocations.end()) {
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
return &it->second;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool rangeWithinRegion(const VirtualAllocation ®ion, uintptr_t start, size_t length) {
|
|
||||||
if (length == 0) {
|
|
||||||
return start >= region.base && start <= regionEnd(region);
|
|
||||||
}
|
|
||||||
if (start < region.base) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
if (addOverflows(start, length)) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
return (start + length) <= regionEnd(region);
|
|
||||||
}
|
|
||||||
|
|
||||||
DWORD desiredAccessToProtect(DWORD desiredAccess, DWORD mappingProtect) {
|
DWORD desiredAccessToProtect(DWORD desiredAccess, DWORD mappingProtect) {
|
||||||
DWORD access = desiredAccess;
|
DWORD access = desiredAccess;
|
||||||
if ((access & FILE_MAP_ALL_ACCESS) == FILE_MAP_ALL_ACCESS) {
|
if ((access & FILE_MAP_ALL_ACCESS) == FILE_MAP_ALL_ACCESS) {
|
||||||
@@ -219,93 +138,12 @@ DWORD desiredAccessToProtect(DWORD desiredAccess, DWORD mappingProtect) {
|
|||||||
return protect;
|
return protect;
|
||||||
}
|
}
|
||||||
|
|
||||||
void markCommitted(VirtualAllocation ®ion, uintptr_t start, size_t length, DWORD protect) {
|
|
||||||
if (length == 0) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
const size_t pageSize = systemPageSize();
|
|
||||||
const size_t firstPage = (start - region.base) / pageSize;
|
|
||||||
const size_t pageCount = length / pageSize;
|
|
||||||
for (size_t i = 0; i < pageCount; ++i) {
|
|
||||||
region.pageProtect[firstPage + i] = protect;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void markDecommitted(VirtualAllocation ®ion, uintptr_t start, size_t length) {
|
|
||||||
if (length == 0) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
const size_t pageSize = systemPageSize();
|
|
||||||
const size_t firstPage = (start - region.base) / pageSize;
|
|
||||||
const size_t pageCount = length / pageSize;
|
|
||||||
for (size_t i = 0; i < pageCount; ++i) {
|
|
||||||
region.pageProtect[firstPage + i] = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
bool moduleRegionForAddress(uintptr_t pageBase, MEMORY_BASIC_INFORMATION &info) {
|
|
||||||
if (pageBase == 0) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
wibo::ModuleInfo *module = wibo::moduleInfoFromAddress(reinterpret_cast<void *>(pageBase));
|
|
||||||
if (!module || !module->executable) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
const auto §ions = module->executable->sections;
|
|
||||||
if (sections.empty()) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
size_t matchIndex = sections.size();
|
|
||||||
for (size_t i = 0; i < sections.size(); ++i) {
|
|
||||||
const auto §ion = sections[i];
|
|
||||||
if (pageBase >= section.base && pageBase < section.base + section.size) {
|
|
||||||
matchIndex = i;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (matchIndex == sections.size()) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
uintptr_t blockStart = sections[matchIndex].base;
|
|
||||||
uintptr_t blockEnd = sections[matchIndex].base + sections[matchIndex].size;
|
|
||||||
DWORD blockProtect = sections[matchIndex].protect;
|
|
||||||
for (size_t prev = matchIndex; prev > 0;) {
|
|
||||||
--prev;
|
|
||||||
const auto §ion = sections[prev];
|
|
||||||
if (section.base + section.size != blockStart) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if (section.protect != blockProtect) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
blockStart = section.base;
|
|
||||||
}
|
|
||||||
for (size_t next = matchIndex + 1; next < sections.size(); ++next) {
|
|
||||||
const auto §ion = sections[next];
|
|
||||||
if (section.base != blockEnd) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if (section.protect != blockProtect) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
blockEnd = section.base + section.size;
|
|
||||||
}
|
|
||||||
info.BaseAddress = reinterpret_cast<void *>(blockStart);
|
|
||||||
info.AllocationBase = module->executable->imageBase;
|
|
||||||
info.AllocationProtect = blockProtect;
|
|
||||||
info.RegionSize = blockEnd > blockStart ? blockEnd - blockStart : 0;
|
|
||||||
info.State = MEM_COMMIT;
|
|
||||||
info.Protect = blockProtect;
|
|
||||||
info.Type = MEM_IMAGE;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool mappedViewRegionForAddress(uintptr_t request, uintptr_t pageBase, MEMORY_BASIC_INFORMATION &info) {
|
bool mappedViewRegionForAddress(uintptr_t request, uintptr_t pageBase, MEMORY_BASIC_INFORMATION &info) {
|
||||||
std::lock_guard guard(g_viewInfoMutex);
|
std::lock_guard guard(g_viewInfoMutex);
|
||||||
if (g_viewInfo.empty()) {
|
if (g_viewInfo.empty()) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
const size_t pageSize = systemPageSize();
|
const size_t pageSize = wibo::heap::systemPageSize();
|
||||||
for (const auto &entry : g_viewInfo) {
|
for (const auto &entry : g_viewInfo) {
|
||||||
const ViewInfo &view = entry.second;
|
const ViewInfo &view = entry.second;
|
||||||
if (view.viewLength == 0) {
|
if (view.viewLength == 0) {
|
||||||
@@ -335,130 +173,6 @@ bool mappedViewRegionForAddress(uintptr_t request, uintptr_t pageBase, MEMORY_BA
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool virtualAllocationRegionForAddress(uintptr_t pageBase, MEMORY_BASIC_INFORMATION &info) {
|
|
||||||
const size_t pageSize = systemPageSize();
|
|
||||||
std::unique_lock lk(g_virtualAllocMutex);
|
|
||||||
VirtualAllocation *region = lookupRegion(pageBase);
|
|
||||||
if (!region) {
|
|
||||||
uintptr_t regionStart = pageBase;
|
|
||||||
uintptr_t regionEnd = regionStart;
|
|
||||||
auto next = g_virtualAllocations.lower_bound(pageBase);
|
|
||||||
if (next != g_virtualAllocations.end()) {
|
|
||||||
regionEnd = next->second.base;
|
|
||||||
} else {
|
|
||||||
regionEnd = kProcessAddressLimit;
|
|
||||||
}
|
|
||||||
if (regionEnd <= regionStart) {
|
|
||||||
regionEnd = regionStart + pageSize;
|
|
||||||
}
|
|
||||||
lk.unlock();
|
|
||||||
info.BaseAddress = reinterpret_cast<void *>(regionStart);
|
|
||||||
info.AllocationBase = nullptr;
|
|
||||||
info.AllocationProtect = 0;
|
|
||||||
info.RegionSize = regionEnd - regionStart;
|
|
||||||
info.State = MEM_FREE;
|
|
||||||
info.Protect = PAGE_NOACCESS;
|
|
||||||
info.Type = 0;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
const uintptr_t regionLimit = region->base + region->size;
|
|
||||||
const size_t pageIndex = (pageBase - region->base) / pageSize;
|
|
||||||
if (pageIndex >= region->pageProtect.size()) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
const DWORD pageProtect = region->pageProtect[pageIndex];
|
|
||||||
const bool committed = pageProtect != 0;
|
|
||||||
uintptr_t blockStart = pageBase;
|
|
||||||
uintptr_t blockEnd = pageBase + pageSize;
|
|
||||||
while (blockStart > region->base) {
|
|
||||||
size_t idx = (blockStart - region->base) / pageSize - 1;
|
|
||||||
DWORD protect = region->pageProtect[idx];
|
|
||||||
bool pageCommitted = protect != 0;
|
|
||||||
if (pageCommitted != committed) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if (committed && protect != pageProtect) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
blockStart -= pageSize;
|
|
||||||
}
|
|
||||||
while (blockEnd < regionLimit) {
|
|
||||||
size_t idx = (blockEnd - region->base) / pageSize;
|
|
||||||
if (idx >= region->pageProtect.size()) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
DWORD protect = region->pageProtect[idx];
|
|
||||||
bool pageCommitted = protect != 0;
|
|
||||||
if (pageCommitted != committed) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if (committed && protect != pageProtect) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
blockEnd += pageSize;
|
|
||||||
}
|
|
||||||
uintptr_t allocationBase = region->base;
|
|
||||||
DWORD allocationProtect = region->allocationProtect != 0 ? region->allocationProtect : PAGE_NOACCESS;
|
|
||||||
DWORD finalProtect = committed ? pageProtect : PAGE_NOACCESS;
|
|
||||||
lk.unlock();
|
|
||||||
info.BaseAddress = reinterpret_cast<void *>(blockStart);
|
|
||||||
info.AllocationBase = reinterpret_cast<void *>(allocationBase);
|
|
||||||
info.AllocationProtect = allocationProtect;
|
|
||||||
info.RegionSize = blockEnd - blockStart;
|
|
||||||
info.State = committed ? MEM_COMMIT : MEM_RESERVE;
|
|
||||||
info.Protect = finalProtect;
|
|
||||||
info.Type = MEM_PRIVATE;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
void *alignedReserve(size_t length, int prot, int flags) {
|
|
||||||
const size_t granularity = kVirtualAllocationGranularity;
|
|
||||||
const size_t request = length + granularity;
|
|
||||||
void *raw = mmap(nullptr, request, prot, flags, -1, 0);
|
|
||||||
if (raw == MAP_FAILED) {
|
|
||||||
return MAP_FAILED;
|
|
||||||
}
|
|
||||||
uintptr_t rawAddr = reinterpret_cast<uintptr_t>(raw);
|
|
||||||
uintptr_t aligned = alignUp(rawAddr, granularity);
|
|
||||||
size_t front = aligned - rawAddr;
|
|
||||||
size_t back = (rawAddr + request) - (aligned + length);
|
|
||||||
if (front != 0) {
|
|
||||||
if (munmap(raw, front) != 0) {
|
|
||||||
munmap(raw, request);
|
|
||||||
return MAP_FAILED;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (back != 0) {
|
|
||||||
if (munmap(reinterpret_cast<void *>(aligned + length), back) != 0) {
|
|
||||||
munmap(reinterpret_cast<void *>(aligned), length);
|
|
||||||
return MAP_FAILED;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return reinterpret_cast<void *>(aligned);
|
|
||||||
}
|
|
||||||
|
|
||||||
int translateProtect(DWORD flProtect) {
|
|
||||||
switch (flProtect) {
|
|
||||||
case PAGE_NOACCESS:
|
|
||||||
return PROT_NONE;
|
|
||||||
case PAGE_READONLY:
|
|
||||||
return PROT_READ;
|
|
||||||
case PAGE_READWRITE:
|
|
||||||
case PAGE_WRITECOPY:
|
|
||||||
return PROT_READ | PROT_WRITE;
|
|
||||||
case PAGE_EXECUTE:
|
|
||||||
return PROT_EXEC;
|
|
||||||
case PAGE_EXECUTE_READ:
|
|
||||||
return PROT_READ | PROT_EXEC;
|
|
||||||
case PAGE_EXECUTE_READWRITE:
|
|
||||||
case PAGE_EXECUTE_WRITECOPY:
|
|
||||||
return PROT_READ | PROT_WRITE | PROT_EXEC;
|
|
||||||
default:
|
|
||||||
DEBUG_LOG("Unhandled flProtect: %u, defaulting to RW\n", flProtect);
|
|
||||||
return PROT_READ | PROT_WRITE;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
namespace kernel32 {
|
namespace kernel32 {
|
||||||
@@ -581,7 +295,7 @@ static LPVOID mapViewOfFileInternal(Pin<MappingObject> mapping, DWORD dwDesiredA
|
|||||||
}
|
}
|
||||||
|
|
||||||
int flags = (mapping->anonymous ? MAP_ANONYMOUS : 0) | (wantCopy ? MAP_PRIVATE : MAP_SHARED);
|
int flags = (mapping->anonymous ? MAP_ANONYMOUS : 0) | (wantCopy ? MAP_PRIVATE : MAP_SHARED);
|
||||||
const size_t pageSize = systemPageSize();
|
const size_t pageSize = wibo::heap::systemPageSize();
|
||||||
off_t alignedOffset = mapping->anonymous ? 0 : static_cast<off_t>(offset & ~static_cast<uint64_t>(pageSize - 1));
|
off_t alignedOffset = mapping->anonymous ? 0 : static_cast<off_t>(offset & ~static_cast<uint64_t>(pageSize - 1));
|
||||||
size_t offsetDelta = static_cast<size_t>(offset - static_cast<uint64_t>(alignedOffset));
|
size_t offsetDelta = static_cast<size_t>(offset - static_cast<uint64_t>(alignedOffset));
|
||||||
uint64_t requestedLength = length + offsetDelta;
|
uint64_t requestedLength = length + offsetDelta;
|
||||||
@@ -598,6 +312,7 @@ static LPVOID mapViewOfFileInternal(Pin<MappingObject> mapping, DWORD dwDesiredA
|
|||||||
int mmapFd = mapping->anonymous ? -1 : mapping->fd;
|
int mmapFd = mapping->anonymous ? -1 : mapping->fd;
|
||||||
void *requestedBase = nullptr;
|
void *requestedBase = nullptr;
|
||||||
int mapFlags = flags;
|
int mapFlags = flags;
|
||||||
|
bool reservedMapping = false;
|
||||||
if (baseAddress) {
|
if (baseAddress) {
|
||||||
uintptr_t baseAddr = reinterpret_cast<uintptr_t>(baseAddress);
|
uintptr_t baseAddr = reinterpret_cast<uintptr_t>(baseAddress);
|
||||||
if (baseAddr == 0 || (baseAddr % kVirtualAllocationGranularity) != 0) {
|
if (baseAddr == 0 || (baseAddr % kVirtualAllocationGranularity) != 0) {
|
||||||
@@ -619,6 +334,16 @@ static LPVOID mapViewOfFileInternal(Pin<MappingObject> mapping, DWORD dwDesiredA
|
|||||||
#else
|
#else
|
||||||
mapFlags |= MAP_FIXED;
|
mapFlags |= MAP_FIXED;
|
||||||
#endif
|
#endif
|
||||||
|
} else {
|
||||||
|
void *candidate = nullptr;
|
||||||
|
wibo::heap::VmStatus reserveStatus = wibo::heap::reserveViewRange(mapLength, 0, 0, &candidate);
|
||||||
|
if (reserveStatus != wibo::heap::VmStatus::Success) {
|
||||||
|
setLastError(wibo::heap::win32ErrorFromVmStatus(reserveStatus));
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
reservedMapping = true;
|
||||||
|
requestedBase = candidate;
|
||||||
|
mapFlags |= MAP_FIXED;
|
||||||
}
|
}
|
||||||
|
|
||||||
errno = 0;
|
errno = 0;
|
||||||
@@ -630,12 +355,18 @@ static LPVOID mapViewOfFileInternal(Pin<MappingObject> mapping, DWORD dwDesiredA
|
|||||||
} else {
|
} else {
|
||||||
setLastError(wibo::winErrorFromErrno(err));
|
setLastError(wibo::winErrorFromErrno(err));
|
||||||
}
|
}
|
||||||
|
if (reservedMapping) {
|
||||||
|
wibo::heap::releaseViewRange(requestedBase);
|
||||||
|
}
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
void *viewPtr = static_cast<uint8_t *>(mapBase) + offsetDelta;
|
void *viewPtr = static_cast<uint8_t *>(mapBase) + offsetDelta;
|
||||||
if (baseAddress && viewPtr != baseAddress) {
|
if (baseAddress && viewPtr != baseAddress) {
|
||||||
munmap(mapBase, mapLength);
|
munmap(mapBase, mapLength);
|
||||||
setLastError(ERROR_INVALID_ADDRESS);
|
setLastError(ERROR_INVALID_ADDRESS);
|
||||||
|
if (reservedMapping) {
|
||||||
|
wibo::heap::releaseViewRange(requestedBase);
|
||||||
|
}
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
uintptr_t viewLength = static_cast<uintptr_t>(length);
|
uintptr_t viewLength = static_cast<uintptr_t>(length);
|
||||||
@@ -653,6 +384,10 @@ static LPVOID mapViewOfFileInternal(Pin<MappingObject> mapping, DWORD dwDesiredA
|
|||||||
view.protect = desiredAccessToProtect(dwDesiredAccess, protect);
|
view.protect = desiredAccessToProtect(dwDesiredAccess, protect);
|
||||||
view.allocationProtect = protect;
|
view.allocationProtect = protect;
|
||||||
view.type = MEM_MAPPED;
|
view.type = MEM_MAPPED;
|
||||||
|
view.managed = reservedMapping;
|
||||||
|
if (reservedMapping) {
|
||||||
|
wibo::heap::registerViewRange(mapBase, mapLength, protect, view.protect);
|
||||||
|
}
|
||||||
{
|
{
|
||||||
std::lock_guard guard(g_viewInfoMutex);
|
std::lock_guard guard(g_viewInfoMutex);
|
||||||
g_viewInfo.emplace(view.viewBase, std::move(view));
|
g_viewInfo.emplace(view.viewBase, std::move(view));
|
||||||
@@ -701,11 +436,15 @@ BOOL WINAPI UnmapViewOfFile(LPCVOID lpBaseAddress) {
|
|||||||
}
|
}
|
||||||
void *base = reinterpret_cast<void *>(it->second.allocationBase);
|
void *base = reinterpret_cast<void *>(it->second.allocationBase);
|
||||||
size_t length = it->second.allocationLength;
|
size_t length = it->second.allocationLength;
|
||||||
|
bool managed = it->second.managed;
|
||||||
g_viewInfo.erase(it);
|
g_viewInfo.erase(it);
|
||||||
lk.unlock();
|
lk.unlock();
|
||||||
if (length != 0) {
|
if (length != 0) {
|
||||||
munmap(base, length);
|
munmap(base, length);
|
||||||
}
|
}
|
||||||
|
if (managed) {
|
||||||
|
wibo::heap::releaseViewRange(base);
|
||||||
|
}
|
||||||
return TRUE;
|
return TRUE;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -755,7 +494,7 @@ BOOL WINAPI FlushViewOfFile(LPCVOID lpBaseAddress, SIZE_T dwNumberOfBytesToFlush
|
|||||||
|
|
||||||
uintptr_t flushStart = address;
|
uintptr_t flushStart = address;
|
||||||
uintptr_t flushEnd = flushStart + bytesToFlush;
|
uintptr_t flushEnd = flushStart + bytesToFlush;
|
||||||
const size_t pageSize = systemPageSize();
|
const size_t pageSize = wibo::heap::systemPageSize();
|
||||||
uintptr_t alignedStart = alignDown(flushStart, pageSize);
|
uintptr_t alignedStart = alignDown(flushStart, pageSize);
|
||||||
uintptr_t alignedEnd = alignUp(flushEnd, pageSize);
|
uintptr_t alignedEnd = alignUp(flushEnd, pageSize);
|
||||||
if (alignedEnd == std::numeric_limits<uintptr_t>::max()) {
|
if (alignedEnd == std::numeric_limits<uintptr_t>::max()) {
|
||||||
@@ -788,336 +527,42 @@ LPVOID WINAPI VirtualAlloc(LPVOID lpAddress, SIZE_T dwSize, DWORD flAllocationTy
|
|||||||
HOST_CONTEXT_GUARD();
|
HOST_CONTEXT_GUARD();
|
||||||
DEBUG_LOG("VirtualAlloc(%p, %zu, %u, %u)\n", lpAddress, dwSize, flAllocationType, flProtect);
|
DEBUG_LOG("VirtualAlloc(%p, %zu, %u, %u)\n", lpAddress, dwSize, flAllocationType, flProtect);
|
||||||
|
|
||||||
if (dwSize == 0) {
|
void *base = lpAddress;
|
||||||
setLastError(ERROR_INVALID_PARAMETER);
|
std::size_t size = static_cast<std::size_t>(dwSize);
|
||||||
|
wibo::heap::VmStatus status = wibo::heap::virtualAlloc(&base, &size, flAllocationType, flProtect);
|
||||||
|
if (status != wibo::heap::VmStatus::Success) {
|
||||||
|
DWORD err = wibo::heap::win32ErrorFromVmStatus(status);
|
||||||
|
DEBUG_LOG("-> failed (status=%u, err=%u)\n", static_cast<unsigned>(status), err);
|
||||||
|
setLastError(err);
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
return base;
|
||||||
DWORD unsupportedFlags = flAllocationType & (MEM_WRITE_WATCH | MEM_PHYSICAL | MEM_LARGE_PAGES | MEM_RESET_UNDO);
|
|
||||||
if (unsupportedFlags != 0) {
|
|
||||||
DEBUG_LOG("VirtualAlloc unsupported flags: 0x%x\n", unsupportedFlags);
|
|
||||||
setLastError(ERROR_NOT_SUPPORTED);
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool reserve = (flAllocationType & MEM_RESERVE) != 0;
|
|
||||||
bool commit = (flAllocationType & MEM_COMMIT) != 0;
|
|
||||||
bool reset = (flAllocationType & MEM_RESET) != 0;
|
|
||||||
|
|
||||||
if (!reserve && commit && lpAddress == nullptr) {
|
|
||||||
reserve = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (reset) {
|
|
||||||
if (reserve || commit) {
|
|
||||||
setLastError(ERROR_INVALID_PARAMETER);
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
if (!lpAddress) {
|
|
||||||
setLastError(ERROR_INVALID_ADDRESS);
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
const size_t pageSize = systemPageSize();
|
|
||||||
uintptr_t request = reinterpret_cast<uintptr_t>(lpAddress);
|
|
||||||
if (addOverflows(request, static_cast<size_t>(dwSize))) {
|
|
||||||
setLastError(ERROR_INVALID_PARAMETER);
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
uintptr_t start = alignDown(request, pageSize);
|
|
||||||
uintptr_t end = alignUp(request + static_cast<uintptr_t>(dwSize), pageSize);
|
|
||||||
size_t length = static_cast<size_t>(end - start);
|
|
||||||
std::unique_lock lk(g_virtualAllocMutex);
|
|
||||||
VirtualAllocation *region = lookupRegion(start);
|
|
||||||
if (!region || !rangeWithinRegion(*region, start, length)) {
|
|
||||||
setLastError(ERROR_INVALID_ADDRESS);
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
#ifdef MADV_FREE
|
|
||||||
int advice = MADV_FREE;
|
|
||||||
#else
|
|
||||||
int advice = MADV_DONTNEED;
|
|
||||||
#endif
|
|
||||||
if (madvise(reinterpret_cast<void *>(start), length, advice) != 0) {
|
|
||||||
setLastErrorFromErrno();
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
return reinterpret_cast<LPVOID>(start);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!reserve && !commit) {
|
|
||||||
setLastError(ERROR_INVALID_PARAMETER);
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
const size_t pageSize = systemPageSize();
|
|
||||||
std::unique_lock lk(g_virtualAllocMutex);
|
|
||||||
|
|
||||||
if (reserve) {
|
|
||||||
uintptr_t base = 0;
|
|
||||||
size_t length = 0;
|
|
||||||
if (lpAddress) {
|
|
||||||
uintptr_t request = reinterpret_cast<uintptr_t>(lpAddress);
|
|
||||||
base = alignDown(request, kVirtualAllocationGranularity);
|
|
||||||
size_t offset = static_cast<size_t>(request - base);
|
|
||||||
if (addOverflows(offset, static_cast<size_t>(dwSize))) {
|
|
||||||
setLastError(ERROR_INVALID_PARAMETER);
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
size_t span = static_cast<size_t>(dwSize) + offset;
|
|
||||||
uintptr_t alignedSpan = alignUp(span, pageSize);
|
|
||||||
if (alignedSpan == std::numeric_limits<uintptr_t>::max()) {
|
|
||||||
setLastError(ERROR_INVALID_PARAMETER);
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
length = static_cast<size_t>(alignedSpan);
|
|
||||||
if (length == 0 || rangeOverlapsLocked(base, length)) {
|
|
||||||
setLastError(ERROR_INVALID_ADDRESS);
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
uintptr_t aligned = alignUp(static_cast<uintptr_t>(dwSize), pageSize);
|
|
||||||
if (aligned == std::numeric_limits<uintptr_t>::max() || aligned == 0) {
|
|
||||||
setLastError(ERROR_INVALID_PARAMETER);
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
length = static_cast<size_t>(aligned);
|
|
||||||
}
|
|
||||||
const int prot = commit ? translateProtect(flProtect) : PROT_NONE;
|
|
||||||
int flags = MAP_PRIVATE | MAP_ANONYMOUS;
|
|
||||||
if (!commit) {
|
|
||||||
flags |= MAP_NORESERVE;
|
|
||||||
}
|
|
||||||
void *result = MAP_FAILED;
|
|
||||||
if (lpAddress) {
|
|
||||||
#ifdef MAP_FIXED_NOREPLACE
|
|
||||||
flags |= MAP_FIXED_NOREPLACE;
|
|
||||||
#else
|
|
||||||
flags |= MAP_FIXED;
|
|
||||||
#endif
|
|
||||||
result = mmap(reinterpret_cast<void *>(base), length, prot, flags, -1, 0);
|
|
||||||
} else {
|
|
||||||
result = alignedReserve(length, prot, flags);
|
|
||||||
}
|
|
||||||
if (result == MAP_FAILED) {
|
|
||||||
setLastErrorFromErrno();
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
if (reinterpret_cast<uintptr_t>(result) >= 0x80000000) {
|
|
||||||
munmap(result, length);
|
|
||||||
setLastError(ERROR_NOT_ENOUGH_MEMORY);
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
uintptr_t actualBase = reinterpret_cast<uintptr_t>(result);
|
|
||||||
VirtualAllocation allocation{};
|
|
||||||
allocation.base = actualBase;
|
|
||||||
allocation.size = length;
|
|
||||||
allocation.allocationProtect = flProtect;
|
|
||||||
allocation.pageProtect.assign(length / pageSize, commit ? flProtect : 0);
|
|
||||||
g_virtualAllocations[actualBase] = std::move(allocation);
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
uintptr_t request = reinterpret_cast<uintptr_t>(lpAddress);
|
|
||||||
if (addOverflows(request, static_cast<size_t>(dwSize))) {
|
|
||||||
setLastError(ERROR_INVALID_PARAMETER);
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
uintptr_t start = alignDown(request, pageSize);
|
|
||||||
uintptr_t end = alignUp(request + static_cast<uintptr_t>(dwSize), pageSize);
|
|
||||||
size_t length = static_cast<size_t>(end - start);
|
|
||||||
if (length == 0) {
|
|
||||||
setLastError(ERROR_INVALID_PARAMETER);
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
VirtualAllocation *region = lookupRegion(start);
|
|
||||||
if (!region || !rangeWithinRegion(*region, start, length)) {
|
|
||||||
setLastError(ERROR_INVALID_ADDRESS);
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
const size_t pageCount = length / pageSize;
|
|
||||||
std::vector<std::pair<uintptr_t, size_t>> committedRuns;
|
|
||||||
committedRuns.reserve(pageCount);
|
|
||||||
for (size_t i = 0; i < pageCount; ++i) {
|
|
||||||
size_t pageIndex = ((start - region->base) / pageSize) + i;
|
|
||||||
if (pageIndex >= region->pageProtect.size()) {
|
|
||||||
setLastError(ERROR_INVALID_ADDRESS);
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
if (region->pageProtect[pageIndex] != 0) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
uintptr_t runBase = start + i * pageSize;
|
|
||||||
size_t runLength = pageSize;
|
|
||||||
while (i + 1 < pageCount) {
|
|
||||||
size_t nextIndex = ((start - region->base) / pageSize) + i + 1;
|
|
||||||
if (region->pageProtect[nextIndex] != 0) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
++i;
|
|
||||||
runLength += pageSize;
|
|
||||||
}
|
|
||||||
committedRuns.emplace_back(runBase, runLength);
|
|
||||||
}
|
|
||||||
for (const auto &run : committedRuns) {
|
|
||||||
void *result = mmap(reinterpret_cast<void *>(run.first), run.second, translateProtect(flProtect),
|
|
||||||
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
|
|
||||||
if (result == MAP_FAILED) {
|
|
||||||
setLastErrorFromErrno();
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
markCommitted(*region, run.first, run.second, flProtect);
|
|
||||||
}
|
|
||||||
DEBUG_LOG("VirtualAlloc commit success -> %p\n", reinterpret_cast<void *>(start));
|
|
||||||
return reinterpret_cast<LPVOID>(start);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
BOOL WINAPI VirtualFree(LPVOID lpAddress, SIZE_T dwSize, DWORD dwFreeType) {
|
BOOL WINAPI VirtualFree(LPVOID lpAddress, SIZE_T dwSize, DWORD dwFreeType) {
|
||||||
HOST_CONTEXT_GUARD();
|
HOST_CONTEXT_GUARD();
|
||||||
DEBUG_LOG("VirtualFree(%p, %zu, %u)\n", lpAddress, dwSize, dwFreeType);
|
DEBUG_LOG("VirtualFree(%p, %zu, %u)\n", lpAddress, dwSize, dwFreeType);
|
||||||
if (!lpAddress) {
|
wibo::heap::VmStatus status = wibo::heap::virtualFree(lpAddress, static_cast<std::size_t>(dwSize), dwFreeType);
|
||||||
setLastError(ERROR_INVALID_ADDRESS);
|
if (status != wibo::heap::VmStatus::Success) {
|
||||||
|
DWORD err = wibo::heap::win32ErrorFromVmStatus(status);
|
||||||
|
DEBUG_LOG("-> failed (status=%u, err=%u)\n", static_cast<unsigned>(status), err);
|
||||||
|
setLastError(err);
|
||||||
return FALSE;
|
return FALSE;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((dwFreeType & (MEM_COALESCE_PLACEHOLDERS | MEM_PRESERVE_PLACEHOLDER)) != 0) {
|
|
||||||
setLastError(ERROR_NOT_SUPPORTED);
|
|
||||||
return FALSE;
|
|
||||||
}
|
|
||||||
|
|
||||||
const bool release = (dwFreeType & MEM_RELEASE) != 0;
|
|
||||||
const bool decommit = (dwFreeType & MEM_DECOMMIT) != 0;
|
|
||||||
if (release == decommit) {
|
|
||||||
setLastError(ERROR_INVALID_PARAMETER);
|
|
||||||
return FALSE;
|
|
||||||
}
|
|
||||||
|
|
||||||
const size_t pageSize = systemPageSize();
|
|
||||||
std::unique_lock lk(g_virtualAllocMutex);
|
|
||||||
|
|
||||||
if (release) {
|
|
||||||
uintptr_t base = reinterpret_cast<uintptr_t>(lpAddress);
|
|
||||||
auto exact = g_virtualAllocations.find(base);
|
|
||||||
if (exact == g_virtualAllocations.end()) {
|
|
||||||
auto containing = findRegionIterator(base);
|
|
||||||
if (dwSize != 0 && containing != g_virtualAllocations.end()) {
|
|
||||||
setLastError(ERROR_INVALID_PARAMETER);
|
|
||||||
} else {
|
|
||||||
setLastError(ERROR_INVALID_ADDRESS);
|
|
||||||
}
|
|
||||||
return FALSE;
|
|
||||||
}
|
|
||||||
if (dwSize != 0) {
|
|
||||||
setLastError(ERROR_INVALID_PARAMETER);
|
|
||||||
return FALSE;
|
|
||||||
}
|
|
||||||
size_t length = exact->second.size;
|
|
||||||
g_virtualAllocations.erase(exact);
|
|
||||||
lk.unlock();
|
|
||||||
if (munmap(lpAddress, length) != 0) {
|
|
||||||
setLastErrorFromErrno();
|
|
||||||
return FALSE;
|
|
||||||
}
|
|
||||||
return TRUE;
|
|
||||||
}
|
|
||||||
|
|
||||||
uintptr_t request = reinterpret_cast<uintptr_t>(lpAddress);
|
|
||||||
auto regionIt = findRegionIterator(request);
|
|
||||||
if (regionIt == g_virtualAllocations.end()) {
|
|
||||||
setLastError(ERROR_INVALID_ADDRESS);
|
|
||||||
return FALSE;
|
|
||||||
}
|
|
||||||
VirtualAllocation ®ion = regionIt->second;
|
|
||||||
uintptr_t start = alignDown(request, pageSize);
|
|
||||||
uintptr_t end = 0;
|
|
||||||
if (dwSize == 0) {
|
|
||||||
if (request != region.base) {
|
|
||||||
setLastError(ERROR_INVALID_PARAMETER);
|
|
||||||
return FALSE;
|
|
||||||
}
|
|
||||||
start = region.base;
|
|
||||||
end = region.base + region.size;
|
|
||||||
} else {
|
|
||||||
if (addOverflows(request, static_cast<size_t>(dwSize))) {
|
|
||||||
setLastError(ERROR_INVALID_PARAMETER);
|
|
||||||
return FALSE;
|
|
||||||
}
|
|
||||||
end = alignUp(request + static_cast<uintptr_t>(dwSize), pageSize);
|
|
||||||
}
|
|
||||||
if (end <= start) {
|
|
||||||
setLastError(ERROR_INVALID_PARAMETER);
|
|
||||||
return FALSE;
|
|
||||||
}
|
|
||||||
size_t length = static_cast<size_t>(end - start);
|
|
||||||
if (!rangeWithinRegion(region, start, length)) {
|
|
||||||
setLastError(ERROR_INVALID_ADDRESS);
|
|
||||||
return FALSE;
|
|
||||||
}
|
|
||||||
void *result = mmap(reinterpret_cast<void *>(start), length, PROT_NONE,
|
|
||||||
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED | MAP_NORESERVE, -1, 0);
|
|
||||||
if (result == MAP_FAILED) {
|
|
||||||
setLastErrorFromErrno();
|
|
||||||
return FALSE;
|
|
||||||
}
|
|
||||||
markDecommitted(region, start, length);
|
|
||||||
return TRUE;
|
return TRUE;
|
||||||
}
|
}
|
||||||
|
|
||||||
BOOL WINAPI VirtualProtect(LPVOID lpAddress, SIZE_T dwSize, DWORD flNewProtect, PDWORD lpflOldProtect) {
|
BOOL WINAPI VirtualProtect(LPVOID lpAddress, SIZE_T dwSize, DWORD flNewProtect, PDWORD lpflOldProtect) {
|
||||||
HOST_CONTEXT_GUARD();
|
HOST_CONTEXT_GUARD();
|
||||||
DEBUG_LOG("VirtualProtect(%p, %zu, %u)\n", lpAddress, dwSize, flNewProtect);
|
DEBUG_LOG("VirtualProtect(%p, %zu, %u)\n", lpAddress, dwSize, flNewProtect);
|
||||||
if (!lpAddress || dwSize == 0) {
|
wibo::heap::VmStatus status =
|
||||||
setLastError(ERROR_INVALID_PARAMETER);
|
wibo::heap::virtualProtect(lpAddress, static_cast<std::size_t>(dwSize), flNewProtect, lpflOldProtect);
|
||||||
|
if (status != wibo::heap::VmStatus::Success) {
|
||||||
|
DWORD err = wibo::heap::win32ErrorFromVmStatus(status);
|
||||||
|
DEBUG_LOG("-> failed (status=%u, err=%u)\n", static_cast<unsigned>(status), err);
|
||||||
|
setLastError(err);
|
||||||
return FALSE;
|
return FALSE;
|
||||||
}
|
}
|
||||||
|
|
||||||
const size_t pageSize = systemPageSize();
|
|
||||||
uintptr_t request = reinterpret_cast<uintptr_t>(lpAddress);
|
|
||||||
uintptr_t start = alignDown(request, pageSize);
|
|
||||||
uintptr_t end = alignUp(request + static_cast<uintptr_t>(dwSize), pageSize);
|
|
||||||
if (end <= start) {
|
|
||||||
setLastError(ERROR_INVALID_PARAMETER);
|
|
||||||
return FALSE;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::unique_lock lk(g_virtualAllocMutex);
|
|
||||||
VirtualAllocation *region = lookupRegion(start);
|
|
||||||
if (!region || !rangeWithinRegion(*region, start, static_cast<size_t>(end - start))) {
|
|
||||||
setLastError(ERROR_INVALID_ADDRESS);
|
|
||||||
return FALSE;
|
|
||||||
}
|
|
||||||
|
|
||||||
const size_t firstPage = (start - region->base) / pageSize;
|
|
||||||
const size_t pageCount = (end - start) / pageSize;
|
|
||||||
if (pageCount == 0) {
|
|
||||||
setLastError(ERROR_INVALID_PARAMETER);
|
|
||||||
return FALSE;
|
|
||||||
}
|
|
||||||
|
|
||||||
DWORD previousProtect = region->pageProtect[firstPage];
|
|
||||||
if (previousProtect == 0) {
|
|
||||||
setLastError(ERROR_NOACCESS);
|
|
||||||
return FALSE;
|
|
||||||
}
|
|
||||||
for (size_t i = 0; i < pageCount; ++i) {
|
|
||||||
if (region->pageProtect[firstPage + i] == 0) {
|
|
||||||
setLastError(ERROR_NOACCESS);
|
|
||||||
return FALSE;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
int prot = translateProtect(flNewProtect);
|
|
||||||
if (mprotect(reinterpret_cast<void *>(start), end - start, prot) != 0) {
|
|
||||||
setLastErrorFromErrno();
|
|
||||||
return FALSE;
|
|
||||||
}
|
|
||||||
for (size_t i = 0; i < pageCount; ++i) {
|
|
||||||
region->pageProtect[firstPage + i] = flNewProtect;
|
|
||||||
}
|
|
||||||
lk.unlock();
|
|
||||||
|
|
||||||
if (lpflOldProtect) {
|
|
||||||
*lpflOldProtect = previousProtect;
|
|
||||||
}
|
|
||||||
return TRUE;
|
return TRUE;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1131,7 +576,7 @@ SIZE_T WINAPI VirtualQuery(LPCVOID lpAddress, PMEMORY_BASIC_INFORMATION lpBuffer
|
|||||||
}
|
}
|
||||||
|
|
||||||
std::memset(lpBuffer, 0, sizeof(MEMORY_BASIC_INFORMATION));
|
std::memset(lpBuffer, 0, sizeof(MEMORY_BASIC_INFORMATION));
|
||||||
const size_t pageSize = systemPageSize();
|
const size_t pageSize = wibo::heap::systemPageSize();
|
||||||
uintptr_t request = lpAddress ? reinterpret_cast<uintptr_t>(lpAddress) : 0;
|
uintptr_t request = lpAddress ? reinterpret_cast<uintptr_t>(lpAddress) : 0;
|
||||||
uintptr_t pageBase = alignDown(request, pageSize);
|
uintptr_t pageBase = alignDown(request, pageSize);
|
||||||
if (pageBase >= kProcessAddressLimit) {
|
if (pageBase >= kProcessAddressLimit) {
|
||||||
@@ -1141,21 +586,20 @@ SIZE_T WINAPI VirtualQuery(LPCVOID lpAddress, PMEMORY_BASIC_INFORMATION lpBuffer
|
|||||||
}
|
}
|
||||||
|
|
||||||
MEMORY_BASIC_INFORMATION info{};
|
MEMORY_BASIC_INFORMATION info{};
|
||||||
if (moduleRegionForAddress(pageBase, info)) {
|
|
||||||
*lpBuffer = info;
|
|
||||||
return sizeof(MEMORY_BASIC_INFORMATION);
|
|
||||||
}
|
|
||||||
if (mappedViewRegionForAddress(request, pageBase, info)) {
|
if (mappedViewRegionForAddress(request, pageBase, info)) {
|
||||||
*lpBuffer = info;
|
*lpBuffer = info;
|
||||||
return sizeof(MEMORY_BASIC_INFORMATION);
|
return sizeof(MEMORY_BASIC_INFORMATION);
|
||||||
}
|
}
|
||||||
if (virtualAllocationRegionForAddress(pageBase, info)) {
|
|
||||||
|
wibo::heap::VmStatus status = wibo::heap::virtualQuery(lpAddress, &info);
|
||||||
|
if (status == wibo::heap::VmStatus::Success) {
|
||||||
*lpBuffer = info;
|
*lpBuffer = info;
|
||||||
return sizeof(MEMORY_BASIC_INFORMATION);
|
return sizeof(MEMORY_BASIC_INFORMATION);
|
||||||
}
|
}
|
||||||
|
|
||||||
setLastError(ERROR_INVALID_ADDRESS);
|
DEBUG_LOG("VirtualQuery fallback failed status=%u\n", static_cast<unsigned>(status));
|
||||||
DEBUG_LOG("-> ERROR_INVALID_ADDRESS\n");
|
setLastError(wibo::heap::win32ErrorFromVmStatus(status));
|
||||||
|
DEBUG_LOG("-> VirtualQuery failed (status=%u)\n", static_cast<unsigned>(status));
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1173,8 +617,7 @@ BOOL WINAPI GetProcessWorkingSetSize(HANDLE hProcess, PSIZE_T lpMinimumWorkingSe
|
|||||||
return TRUE;
|
return TRUE;
|
||||||
}
|
}
|
||||||
|
|
||||||
BOOL WINAPI SetProcessWorkingSetSize(HANDLE hProcess, SIZE_T dwMinimumWorkingSetSize,
|
BOOL WINAPI SetProcessWorkingSetSize(HANDLE hProcess, SIZE_T dwMinimumWorkingSetSize, SIZE_T dwMaximumWorkingSetSize) {
|
||||||
SIZE_T dwMaximumWorkingSetSize) {
|
|
||||||
HOST_CONTEXT_GUARD();
|
HOST_CONTEXT_GUARD();
|
||||||
DEBUG_LOG("SetProcessWorkingSetSize(%p, %zu, %zu)\n", hProcess, dwMinimumWorkingSetSize, dwMaximumWorkingSetSize);
|
DEBUG_LOG("SetProcessWorkingSetSize(%p, %zu, %zu)\n", hProcess, dwMinimumWorkingSetSize, dwMaximumWorkingSetSize);
|
||||||
(void)hProcess;
|
(void)hProcess;
|
||||||
|
|||||||
@@ -3,18 +3,6 @@
|
|||||||
#include "types.h"
|
#include "types.h"
|
||||||
#include "minwinbase.h"
|
#include "minwinbase.h"
|
||||||
|
|
||||||
struct MEMORY_BASIC_INFORMATION {
|
|
||||||
PVOID BaseAddress;
|
|
||||||
PVOID AllocationBase;
|
|
||||||
DWORD AllocationProtect;
|
|
||||||
SIZE_T RegionSize;
|
|
||||||
DWORD State;
|
|
||||||
DWORD Protect;
|
|
||||||
DWORD Type;
|
|
||||||
};
|
|
||||||
|
|
||||||
using PMEMORY_BASIC_INFORMATION = MEMORY_BASIC_INFORMATION *;
|
|
||||||
|
|
||||||
namespace kernel32 {
|
namespace kernel32 {
|
||||||
|
|
||||||
HANDLE WINAPI CreateFileMappingA(HANDLE hFile, LPSECURITY_ATTRIBUTES lpFileMappingAttributes, DWORD flProtect,
|
HANDLE WINAPI CreateFileMappingA(HANDLE hFile, LPSECURITY_ATTRIBUTES lpFileMappingAttributes, DWORD flProtect,
|
||||||
|
|||||||
@@ -3,6 +3,7 @@
|
|||||||
#include "context.h"
|
#include "context.h"
|
||||||
#include "errors.h"
|
#include "errors.h"
|
||||||
#include "files.h"
|
#include "files.h"
|
||||||
|
#include "heap.h"
|
||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
#include "strutil.h"
|
#include "strutil.h"
|
||||||
|
|
||||||
@@ -78,7 +79,7 @@ LPCH WINAPI GetEnvironmentStrings() {
|
|||||||
}
|
}
|
||||||
bufSize++;
|
bufSize++;
|
||||||
|
|
||||||
char *buffer = static_cast<char *>(mi_malloc(bufSize));
|
char *buffer = static_cast<char *>(wibo::heap::guestMalloc(bufSize));
|
||||||
if (!buffer) {
|
if (!buffer) {
|
||||||
setLastError(ERROR_NOT_ENOUGH_MEMORY);
|
setLastError(ERROR_NOT_ENOUGH_MEMORY);
|
||||||
return nullptr;
|
return nullptr;
|
||||||
@@ -111,7 +112,7 @@ LPWCH WINAPI GetEnvironmentStringsW() {
|
|||||||
}
|
}
|
||||||
bufSizeW++;
|
bufSizeW++;
|
||||||
|
|
||||||
uint16_t *buffer = static_cast<uint16_t *>(mi_malloc(bufSizeW * sizeof(uint16_t)));
|
uint16_t *buffer = static_cast<uint16_t *>(wibo::heap::guestMalloc(bufSizeW * sizeof(uint16_t)));
|
||||||
if (!buffer) {
|
if (!buffer) {
|
||||||
setLastError(ERROR_NOT_ENOUGH_MEMORY);
|
setLastError(ERROR_NOT_ENOUGH_MEMORY);
|
||||||
return nullptr;
|
return nullptr;
|
||||||
|
|||||||
@@ -148,7 +148,6 @@ void *threadTrampoline(void *param) {
|
|||||||
DEBUG_LOG("Calling thread entry %p with userData %p\n", data.entry, data.userData);
|
DEBUG_LOG("Calling thread entry %p with userData %p\n", data.entry, data.userData);
|
||||||
DWORD result = 0;
|
DWORD result = 0;
|
||||||
if (data.entry) {
|
if (data.entry) {
|
||||||
GUEST_CONTEXT_GUARD(threadTib);
|
|
||||||
result = call_LPTHREAD_START_ROUTINE(data.entry, data.userData);
|
result = call_LPTHREAD_START_ROUTINE(data.entry, data.userData);
|
||||||
}
|
}
|
||||||
DEBUG_LOG("Thread exiting with code %u\n", result);
|
DEBUG_LOG("Thread exiting with code %u\n", result);
|
||||||
|
|||||||
@@ -4,6 +4,7 @@
|
|||||||
#include "context.h"
|
#include "context.h"
|
||||||
#include "errors.h"
|
#include "errors.h"
|
||||||
#include "files.h"
|
#include "files.h"
|
||||||
|
#include "heap.h"
|
||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
#include "modules.h"
|
#include "modules.h"
|
||||||
#include "strutil.h"
|
#include "strutil.h"
|
||||||
@@ -169,7 +170,7 @@ void *doAlloc(UINT dwBytes, bool zero) {
|
|||||||
if (dwBytes == 0) {
|
if (dwBytes == 0) {
|
||||||
dwBytes = 1;
|
dwBytes = 1;
|
||||||
}
|
}
|
||||||
void *ret = mi_malloc_aligned(dwBytes, 8);
|
void *ret = mi_heap_malloc_aligned(wibo::heap::getGuestHeap(), dwBytes, 8);
|
||||||
if (ret && zero) {
|
if (ret && zero) {
|
||||||
std::memset(ret, 0, mi_usable_size(ret));
|
std::memset(ret, 0, mi_usable_size(ret));
|
||||||
}
|
}
|
||||||
@@ -181,7 +182,7 @@ void *doRealloc(void *mem, UINT dwBytes, bool zero) {
|
|||||||
dwBytes = 1;
|
dwBytes = 1;
|
||||||
}
|
}
|
||||||
size_t oldSize = mi_usable_size(mem);
|
size_t oldSize = mi_usable_size(mem);
|
||||||
void *ret = mi_realloc_aligned(mem, dwBytes, 8);
|
void *ret = mi_heap_realloc_aligned(wibo::heap::getGuestHeap(), mem, dwBytes, 8);
|
||||||
size_t newSize = mi_usable_size(ret);
|
size_t newSize = mi_usable_size(ret);
|
||||||
if (ret && zero && newSize > oldSize) {
|
if (ret && zero && newSize > oldSize) {
|
||||||
std::memset(static_cast<char *>(ret) + oldSize, 0, newSize - oldSize);
|
std::memset(static_cast<char *>(ret) + oldSize, 0, newSize - oldSize);
|
||||||
|
|||||||
@@ -3,6 +3,7 @@
|
|||||||
#include "common.h"
|
#include "common.h"
|
||||||
#include "context.h"
|
#include "context.h"
|
||||||
#include "files.h"
|
#include "files.h"
|
||||||
|
#include "heap.h"
|
||||||
#include "kernel32/internal.h"
|
#include "kernel32/internal.h"
|
||||||
#include "modules.h"
|
#include "modules.h"
|
||||||
#include "msvcrt_trampolines.h"
|
#include "msvcrt_trampolines.h"
|
||||||
@@ -1382,7 +1383,7 @@ namespace msvcrt {
|
|||||||
std::strcpy(absPath, winPath.c_str());
|
std::strcpy(absPath, winPath.c_str());
|
||||||
return absPath;
|
return absPath;
|
||||||
}
|
}
|
||||||
char *result = static_cast<char *>(std::malloc(winPath.size() + 1));
|
char *result = static_cast<char *>(wibo::heap::guestMalloc(winPath.size() + 1));
|
||||||
if (!result) {
|
if (!result) {
|
||||||
errno = ENOMEM;
|
errno = ENOMEM;
|
||||||
return nullptr;
|
return nullptr;
|
||||||
@@ -1512,7 +1513,7 @@ namespace msvcrt {
|
|||||||
}
|
}
|
||||||
|
|
||||||
SIZE_T value_len = match->length;
|
SIZE_T value_len = match->length;
|
||||||
auto *copy = static_cast<uint16_t *>(malloc((value_len + 1) * sizeof(uint16_t)));
|
auto *copy = static_cast<uint16_t *>(wibo::heap::guestMalloc((value_len + 1) * sizeof(uint16_t)));
|
||||||
if (!copy) {
|
if (!copy) {
|
||||||
DEBUG_LOG("_wdupenv_s: allocation failed\n");
|
DEBUG_LOG("_wdupenv_s: allocation failed\n");
|
||||||
errno = ENOMEM;
|
errno = ENOMEM;
|
||||||
@@ -1686,7 +1687,7 @@ namespace msvcrt {
|
|||||||
}
|
}
|
||||||
|
|
||||||
SIZE_T length = ::strlen(strSource);
|
SIZE_T length = ::strlen(strSource);
|
||||||
auto *copy = static_cast<char *>(std::malloc(length + 1));
|
auto *copy = static_cast<char *>(wibo::heap::guestMalloc(length + 1));
|
||||||
if (!copy) {
|
if (!copy) {
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
@@ -1704,25 +1705,25 @@ namespace msvcrt {
|
|||||||
void* CDECL malloc(SIZE_T size){
|
void* CDECL malloc(SIZE_T size){
|
||||||
HOST_CONTEXT_GUARD();
|
HOST_CONTEXT_GUARD();
|
||||||
VERBOSE_LOG("malloc(%zu)\n", size);
|
VERBOSE_LOG("malloc(%zu)\n", size);
|
||||||
return std::malloc(size);
|
return wibo::heap::guestMalloc(size);
|
||||||
}
|
}
|
||||||
|
|
||||||
void* CDECL calloc(SIZE_T count, SIZE_T size){
|
void* CDECL calloc(SIZE_T count, SIZE_T size){
|
||||||
HOST_CONTEXT_GUARD();
|
HOST_CONTEXT_GUARD();
|
||||||
VERBOSE_LOG("calloc(%zu, %zu)\n", count, size);
|
VERBOSE_LOG("calloc(%zu, %zu)\n", count, size);
|
||||||
return std::calloc(count, size);
|
return wibo::heap::guestCalloc(count, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
void* CDECL realloc(void *ptr, SIZE_T size) {
|
void* CDECL realloc(void *ptr, SIZE_T size) {
|
||||||
HOST_CONTEXT_GUARD();
|
HOST_CONTEXT_GUARD();
|
||||||
VERBOSE_LOG("realloc(%p, %zu)\n", ptr, size);
|
VERBOSE_LOG("realloc(%p, %zu)\n", ptr, size);
|
||||||
return std::realloc(ptr, size);
|
return wibo::heap::guestRealloc(ptr, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
void* CDECL _malloc_crt(SIZE_T size) {
|
void* CDECL _malloc_crt(SIZE_T size) {
|
||||||
HOST_CONTEXT_GUARD();
|
HOST_CONTEXT_GUARD();
|
||||||
VERBOSE_LOG("_malloc_crt(%zu)\n", size);
|
VERBOSE_LOG("_malloc_crt(%zu)\n", size);
|
||||||
return std::malloc(size);
|
return wibo::heap::guestMalloc(size);
|
||||||
}
|
}
|
||||||
|
|
||||||
void CDECL _lock(int locknum) {
|
void CDECL _lock(int locknum) {
|
||||||
@@ -2479,7 +2480,7 @@ namespace msvcrt {
|
|||||||
if(!strSource) return nullptr;
|
if(!strSource) return nullptr;
|
||||||
SIZE_T strLen = wstrlen(strSource);
|
SIZE_T strLen = wstrlen(strSource);
|
||||||
|
|
||||||
auto *dup = static_cast<uint16_t *>(malloc((strLen + 1) * sizeof(uint16_t)));
|
auto *dup = static_cast<uint16_t *>(wibo::heap::guestMalloc((strLen + 1) * sizeof(uint16_t)));
|
||||||
if(!dup) return nullptr;
|
if(!dup) return nullptr;
|
||||||
|
|
||||||
for(SIZE_T i = 0; i <= strLen; i++){
|
for(SIZE_T i = 0; i <= strLen; i++){
|
||||||
@@ -3000,7 +3001,7 @@ namespace msvcrt {
|
|||||||
return absPath;
|
return absPath;
|
||||||
} else {
|
} else {
|
||||||
// Windows behavior: if absPath == NULL, allocate new
|
// Windows behavior: if absPath == NULL, allocate new
|
||||||
auto *newBuf = new uint16_t[wResolved.size() + 1];
|
auto *newBuf = static_cast<uint16_t*>(wibo::heap::guestMalloc((wResolved.size() + 1) * sizeof(uint16_t)));
|
||||||
std::copy(wResolved.begin(), wResolved.end(), newBuf);
|
std::copy(wResolved.begin(), wResolved.end(), newBuf);
|
||||||
newBuf[wResolved.size()] = 0;
|
newBuf[wResolved.size()] = 0;
|
||||||
|
|
||||||
|
|||||||
@@ -5,6 +5,7 @@
|
|||||||
#include "errors.h"
|
#include "errors.h"
|
||||||
#include "files.h"
|
#include "files.h"
|
||||||
#include "handles.h"
|
#include "handles.h"
|
||||||
|
#include "heap.h"
|
||||||
#include "kernel32/internal.h"
|
#include "kernel32/internal.h"
|
||||||
#include "kernel32/processthreadsapi.h"
|
#include "kernel32/processthreadsapi.h"
|
||||||
#include "modules.h"
|
#include "modules.h"
|
||||||
@@ -14,7 +15,6 @@
|
|||||||
|
|
||||||
#include <cerrno>
|
#include <cerrno>
|
||||||
#include <cstring>
|
#include <cstring>
|
||||||
#include <sys/mman.h>
|
|
||||||
#include <sys/stat.h>
|
#include <sys/stat.h>
|
||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
|
|
||||||
@@ -267,35 +267,23 @@ NTSTATUS WINAPI NtAllocateVirtualMemory(HANDLE ProcessHandle, PVOID *BaseAddress
|
|||||||
HOST_CONTEXT_GUARD();
|
HOST_CONTEXT_GUARD();
|
||||||
DEBUG_LOG("NtAllocateVirtualMemory(%p, %p, %lu, %p, %lu, %lu) ", ProcessHandle, BaseAddress, ZeroBits, RegionSize,
|
DEBUG_LOG("NtAllocateVirtualMemory(%p, %p, %lu, %p, %lu, %lu) ", ProcessHandle, BaseAddress, ZeroBits, RegionSize,
|
||||||
AllocationType, Protect);
|
AllocationType, Protect);
|
||||||
assert(ProcessHandle == (HANDLE)-1);
|
if (ProcessHandle != (HANDLE)-1) {
|
||||||
assert(ZeroBits == 0);
|
DEBUG_LOG("-> 0x%x\n", STATUS_INVALID_HANDLE);
|
||||||
|
return STATUS_INVALID_HANDLE;
|
||||||
int prot = 0;
|
}
|
||||||
if (Protect & PAGE_NOACCESS)
|
if (ZeroBits != 0 || BaseAddress == nullptr || RegionSize == nullptr) {
|
||||||
prot |= PROT_NONE;
|
DEBUG_LOG("-> 0x%x\n", STATUS_INVALID_PARAMETER);
|
||||||
if (Protect & PAGE_READONLY)
|
return STATUS_INVALID_PARAMETER;
|
||||||
prot |= PROT_READ;
|
}
|
||||||
if (Protect & PAGE_READWRITE)
|
|
||||||
prot |= PROT_READ | PROT_WRITE;
|
wibo::heap::VmStatus vmStatus =
|
||||||
if (Protect & PAGE_WRITECOPY)
|
wibo::heap::virtualAlloc(reinterpret_cast<void **>(BaseAddress), reinterpret_cast<std::size_t *>(RegionSize),
|
||||||
prot |= PROT_READ | PROT_WRITE;
|
static_cast<DWORD>(AllocationType), static_cast<DWORD>(Protect));
|
||||||
if (Protect & PAGE_EXECUTE)
|
if (vmStatus != wibo::heap::VmStatus::Success) {
|
||||||
prot |= PROT_EXEC;
|
NTSTATUS status = wibo::heap::ntStatusFromVmStatus(vmStatus);
|
||||||
if (Protect & PAGE_EXECUTE_READ)
|
DEBUG_LOG("-> 0x%x\n", status);
|
||||||
prot |= PROT_EXEC | PROT_READ;
|
return status;
|
||||||
if (Protect & PAGE_EXECUTE_READWRITE)
|
|
||||||
prot |= PROT_EXEC | PROT_READ | PROT_WRITE;
|
|
||||||
assert(!(Protect & PAGE_EXECUTE_WRITECOPY));
|
|
||||||
assert(!(Protect & PAGE_GUARD));
|
|
||||||
assert(!(Protect & PAGE_NOCACHE));
|
|
||||||
assert(!(Protect & PAGE_WRITECOMBINE));
|
|
||||||
|
|
||||||
void *addr = mmap(*BaseAddress, *RegionSize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
|
||||||
if (addr == MAP_FAILED) {
|
|
||||||
perror("mmap");
|
|
||||||
return STATUS_NOT_SUPPORTED;
|
|
||||||
}
|
}
|
||||||
*BaseAddress = addr;
|
|
||||||
|
|
||||||
DEBUG_LOG("-> 0x%x\n", STATUS_SUCCESS);
|
DEBUG_LOG("-> 0x%x\n", STATUS_SUCCESS);
|
||||||
return STATUS_SUCCESS;
|
return STATUS_SUCCESS;
|
||||||
@@ -306,38 +294,25 @@ NTSTATUS WINAPI NtProtectVirtualMemory(HANDLE ProcessHandle, PVOID *BaseAddress,
|
|||||||
HOST_CONTEXT_GUARD();
|
HOST_CONTEXT_GUARD();
|
||||||
DEBUG_LOG("NtProtectVirtualMemory(%p, %p, %p, %lu, %p) ", ProcessHandle, BaseAddress, NumberOfBytesToProtect,
|
DEBUG_LOG("NtProtectVirtualMemory(%p, %p, %p, %lu, %p) ", ProcessHandle, BaseAddress, NumberOfBytesToProtect,
|
||||||
NewAccessProtection, OldAccessProtection);
|
NewAccessProtection, OldAccessProtection);
|
||||||
assert(ProcessHandle == (HANDLE)-1);
|
if (ProcessHandle != (HANDLE)-1) {
|
||||||
assert(NumberOfBytesToProtect != nullptr);
|
DEBUG_LOG("-> 0x%x\n", STATUS_INVALID_HANDLE);
|
||||||
|
return STATUS_INVALID_HANDLE;
|
||||||
int prot = 0;
|
}
|
||||||
if (NewAccessProtection & PAGE_NOACCESS)
|
if (BaseAddress == nullptr || NumberOfBytesToProtect == nullptr) {
|
||||||
prot |= PROT_NONE;
|
DEBUG_LOG("-> 0x%x\n", STATUS_INVALID_PARAMETER);
|
||||||
if (NewAccessProtection & PAGE_READONLY)
|
return STATUS_INVALID_PARAMETER;
|
||||||
prot |= PROT_READ;
|
|
||||||
if (NewAccessProtection & PAGE_READWRITE)
|
|
||||||
prot |= PROT_READ | PROT_WRITE;
|
|
||||||
if (NewAccessProtection & PAGE_WRITECOPY)
|
|
||||||
prot |= PROT_READ | PROT_WRITE;
|
|
||||||
if (NewAccessProtection & PAGE_EXECUTE)
|
|
||||||
prot |= PROT_EXEC;
|
|
||||||
if (NewAccessProtection & PAGE_EXECUTE_READ)
|
|
||||||
prot |= PROT_EXEC | PROT_READ;
|
|
||||||
if (NewAccessProtection & PAGE_EXECUTE_READWRITE)
|
|
||||||
prot |= PROT_EXEC | PROT_READ | PROT_WRITE;
|
|
||||||
assert(!(NewAccessProtection & PAGE_EXECUTE_WRITECOPY));
|
|
||||||
assert(!(NewAccessProtection & PAGE_GUARD));
|
|
||||||
assert(!(NewAccessProtection & PAGE_NOCACHE));
|
|
||||||
assert(!(NewAccessProtection & PAGE_WRITECOMBINE));
|
|
||||||
|
|
||||||
int ret = mprotect(*BaseAddress, *NumberOfBytesToProtect, prot);
|
|
||||||
if (ret != 0) {
|
|
||||||
perror("mprotect");
|
|
||||||
return STATUS_NOT_SUPPORTED;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (OldAccessProtection) {
|
void *base = *BaseAddress;
|
||||||
*OldAccessProtection = 0; // stub
|
std::size_t length = static_cast<std::size_t>(*NumberOfBytesToProtect);
|
||||||
|
wibo::heap::VmStatus vmStatus =
|
||||||
|
wibo::heap::virtualProtect(base, length, static_cast<DWORD>(NewAccessProtection), OldAccessProtection);
|
||||||
|
if (vmStatus != wibo::heap::VmStatus::Success) {
|
||||||
|
NTSTATUS status = wibo::heap::ntStatusFromVmStatus(vmStatus);
|
||||||
|
DEBUG_LOG("-> 0x%x\n", status);
|
||||||
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
DEBUG_LOG("-> 0x%x\n", STATUS_SUCCESS);
|
DEBUG_LOG("-> 0x%x\n", STATUS_SUCCESS);
|
||||||
return STATUS_SUCCESS;
|
return STATUS_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,6 +2,7 @@
|
|||||||
|
|
||||||
#include "common.h"
|
#include "common.h"
|
||||||
#include "context.h"
|
#include "context.h"
|
||||||
|
#include "heap.h"
|
||||||
#include "modules.h"
|
#include "modules.h"
|
||||||
|
|
||||||
#include <cstdarg>
|
#include <cstdarg>
|
||||||
@@ -125,7 +126,7 @@ RPC_STATUS WINAPI RpcStringBindingComposeW(RPC_WSTR objUuid, RPC_WSTR protSeq, R
|
|||||||
|
|
||||||
if (stringBinding) {
|
if (stringBinding) {
|
||||||
size_t length = encoded.size();
|
size_t length = encoded.size();
|
||||||
auto *buffer = static_cast<char16_t *>(std::malloc((length + 1) * sizeof(char16_t)));
|
auto *buffer = static_cast<char16_t *>(wibo::heap::guestMalloc((length + 1) * sizeof(char16_t)));
|
||||||
if (!buffer) {
|
if (!buffer) {
|
||||||
return RPC_S_OUT_OF_MEMORY;
|
return RPC_S_OUT_OF_MEMORY;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#define HOST_CONTEXT_GUARD()
|
#define HOST_CONTEXT_GUARD()
|
||||||
#define GUEST_CONTEXT_GUARD(tibPtr)
|
|
||||||
|
|||||||
@@ -5,3 +5,9 @@
|
|||||||
typedef VOID(_CC_CDECL *EntryProc)();
|
typedef VOID(_CC_CDECL *EntryProc)();
|
||||||
typedef BOOL(_CC_STDCALL *DllEntryProc)(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpReserved);
|
typedef BOOL(_CC_STDCALL *DllEntryProc)(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpReserved);
|
||||||
typedef VOID(_CC_STDCALL *PIMAGE_TLS_CALLBACK)(PVOID DllHandle, DWORD Reason, PVOID Reserved);
|
typedef VOID(_CC_STDCALL *PIMAGE_TLS_CALLBACK)(PVOID DllHandle, DWORD Reason, PVOID Reserved);
|
||||||
|
|
||||||
|
namespace entry {
|
||||||
|
|
||||||
|
void CDECL stubBase(SIZE_T index);
|
||||||
|
|
||||||
|
} // namespace entry
|
||||||
|
|||||||
1229
src/heap.cpp
Normal file
1229
src/heap.cpp
Normal file
File diff suppressed because it is too large
Load Diff
53
src/heap.h
Normal file
53
src/heap.h
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "types.h"
|
||||||
|
|
||||||
|
#include <cstddef>
|
||||||
|
#include <cstdint>
|
||||||
|
#include <cstdio>
|
||||||
|
|
||||||
|
struct mi_heap_s;
|
||||||
|
typedef struct mi_heap_s mi_heap_t;
|
||||||
|
|
||||||
|
namespace wibo::heap {
|
||||||
|
|
||||||
|
bool initialize();
|
||||||
|
uintptr_t systemPageSize();
|
||||||
|
uintptr_t allocationGranularity();
|
||||||
|
mi_heap_t *getGuestHeap();
|
||||||
|
mi_heap_t *createGuestHeap();
|
||||||
|
|
||||||
|
enum class VmStatus : uint32_t {
|
||||||
|
Success = 0,
|
||||||
|
InvalidParameter,
|
||||||
|
InvalidAddress,
|
||||||
|
NoAccess,
|
||||||
|
NotSupported,
|
||||||
|
NoMemory,
|
||||||
|
Rejected,
|
||||||
|
UnknownError,
|
||||||
|
};
|
||||||
|
|
||||||
|
// Guest heap memory allocation helpers
|
||||||
|
void *guestMalloc(std::size_t size);
|
||||||
|
void *guestCalloc(std::size_t count, std::size_t size);
|
||||||
|
void *guestRealloc(void *ptr, std::size_t newSize);
|
||||||
|
void guestFree(void *ptr);
|
||||||
|
|
||||||
|
VmStatus virtualAlloc(void **baseAddress, std::size_t *regionSize, DWORD allocationType, DWORD protect,
|
||||||
|
DWORD type = MEM_PRIVATE);
|
||||||
|
VmStatus virtualFree(void *baseAddress, std::size_t regionSize, DWORD freeType);
|
||||||
|
VmStatus virtualProtect(void *baseAddress, std::size_t regionSize, DWORD newProtect, DWORD *oldProtect);
|
||||||
|
VmStatus virtualQuery(const void *address, MEMORY_BASIC_INFORMATION *outInfo);
|
||||||
|
VmStatus virtualReset(void *baseAddress, std::size_t regionSize);
|
||||||
|
|
||||||
|
VmStatus reserveViewRange(std::size_t regionSize, uintptr_t minAddr, uintptr_t maxAddr, void **baseAddress);
|
||||||
|
void registerViewRange(void *baseAddress, std::size_t regionSize, DWORD allocationProtect, DWORD protect);
|
||||||
|
void releaseViewRange(void *baseAddress);
|
||||||
|
|
||||||
|
DWORD win32ErrorFromVmStatus(VmStatus status);
|
||||||
|
NTSTATUS ntStatusFromVmStatus(VmStatus status);
|
||||||
|
|
||||||
|
bool reserveGuestStack(std::size_t stackSizeBytes, void **outStackLimit, void **outStackBase);
|
||||||
|
|
||||||
|
} // namespace wibo::heap
|
||||||
@@ -1,12 +1,13 @@
|
|||||||
#include "common.h"
|
#include "common.h"
|
||||||
#include "errors.h"
|
#include "errors.h"
|
||||||
|
#include "heap.h"
|
||||||
#include "kernel32/internal.h"
|
#include "kernel32/internal.h"
|
||||||
#include "modules.h"
|
#include "modules.h"
|
||||||
|
#include "types.h"
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <cstring>
|
#include <cstring>
|
||||||
#include <strings.h>
|
#include <strings.h>
|
||||||
#include <sys/mman.h>
|
|
||||||
#include <sys/syscall.h>
|
#include <sys/syscall.h>
|
||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
|
|
||||||
@@ -183,7 +184,7 @@ uint32_t read32(FILE *file) {
|
|||||||
|
|
||||||
wibo::Executable::~Executable() {
|
wibo::Executable::~Executable() {
|
||||||
if (imageBase) {
|
if (imageBase) {
|
||||||
munmap(imageBase, imageSize);
|
wibo::heap::virtualFree(imageBase, 0, MEM_RELEASE);
|
||||||
imageBase = nullptr;
|
imageBase = nullptr;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -239,20 +240,28 @@ bool wibo::Executable::loadPE(FILE *file, bool exec) {
|
|||||||
|
|
||||||
// Build buffer
|
// Build buffer
|
||||||
imageSize = header32.sizeOfImage;
|
imageSize = header32.sizeOfImage;
|
||||||
int prot = PROT_READ | PROT_WRITE;
|
DWORD initialProtect = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
|
||||||
if (exec)
|
void *preferredBase = reinterpret_cast<void *>(static_cast<uintptr_t>(header32.imageBase));
|
||||||
prot |= PROT_EXEC;
|
void *allocatedBase = preferredBase;
|
||||||
void *preferredBase = (void *)(uintptr_t)header32.imageBase;
|
std::size_t allocationSize = static_cast<std::size_t>(header32.sizeOfImage);
|
||||||
imageBase = mmap(preferredBase, header32.sizeOfImage, prot, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
|
wibo::heap::VmStatus allocStatus = wibo::heap::virtualAlloc(
|
||||||
if (imageBase == MAP_FAILED) {
|
&allocatedBase, &allocationSize, MEM_RESERVE | MEM_COMMIT, initialProtect, MEM_IMAGE);
|
||||||
imageBase = mmap(nullptr, header32.sizeOfImage, prot, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
|
if (allocStatus != wibo::heap::VmStatus::Success) {
|
||||||
|
DEBUG_LOG("loadPE: preferred base allocation failed (status=%u), retrying anywhere\n",
|
||||||
|
static_cast<unsigned>(allocStatus));
|
||||||
|
allocatedBase = nullptr;
|
||||||
|
allocationSize = static_cast<std::size_t>(header32.sizeOfImage);
|
||||||
|
allocStatus = wibo::heap::virtualAlloc(
|
||||||
|
&allocatedBase, &allocationSize, MEM_RESERVE | MEM_COMMIT, initialProtect, MEM_IMAGE);
|
||||||
}
|
}
|
||||||
if (imageBase == MAP_FAILED) {
|
if (allocStatus != wibo::heap::VmStatus::Success) {
|
||||||
perror("Image mapping failed!");
|
DEBUG_LOG("Image mapping failed (status=%u)\n", static_cast<unsigned>(allocStatus));
|
||||||
imageBase = nullptr;
|
imageBase = nullptr;
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
relocationDelta = (intptr_t)((uintptr_t)imageBase - (uintptr_t)header32.imageBase);
|
imageBase = allocatedBase;
|
||||||
|
relocationDelta =
|
||||||
|
static_cast<intptr_t>(reinterpret_cast<uintptr_t>(imageBase) - static_cast<uintptr_t>(header32.imageBase));
|
||||||
memset(imageBase, 0, header32.sizeOfImage);
|
memset(imageBase, 0, header32.sizeOfImage);
|
||||||
sections.clear();
|
sections.clear();
|
||||||
uintptr_t imageBaseAddr = reinterpret_cast<uintptr_t>(imageBase);
|
uintptr_t imageBaseAddr = reinterpret_cast<uintptr_t>(imageBase);
|
||||||
@@ -315,7 +324,7 @@ bool wibo::Executable::loadPE(FILE *file, bool exec) {
|
|||||||
if (exec && relocationDelta != 0) {
|
if (exec && relocationDelta != 0) {
|
||||||
if (relocationDirectoryRVA == 0 || relocationDirectorySize == 0) {
|
if (relocationDirectoryRVA == 0 || relocationDirectorySize == 0) {
|
||||||
DEBUG_LOG("Relocation required but no relocation directory present\n");
|
DEBUG_LOG("Relocation required but no relocation directory present\n");
|
||||||
munmap(imageBase, imageSize);
|
wibo::heap::virtualFree(imageBase, 0, MEM_RELEASE);
|
||||||
imageBase = nullptr;
|
imageBase = nullptr;
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@@ -361,9 +370,34 @@ bool wibo::Executable::loadPE(FILE *file, bool exec) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool wibo::Executable::resolveImports() {
|
bool wibo::Executable::resolveImports() {
|
||||||
|
auto finalizeSections = [this]() -> bool {
|
||||||
|
if (!execMapped || sectionsProtected) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
for (const auto §ion : sections) {
|
||||||
|
if (section.size == 0) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
void *sectionAddress = reinterpret_cast<void *>(section.base);
|
||||||
|
wibo::heap::VmStatus status =
|
||||||
|
wibo::heap::virtualProtect(sectionAddress, section.size, section.protect, nullptr);
|
||||||
|
if (status != wibo::heap::VmStatus::Success) {
|
||||||
|
DEBUG_LOG("resolveImports: failed to set section protection at %p (size=%zu, protect=0x%x) status=%u\n",
|
||||||
|
sectionAddress, section.size, section.protect, static_cast<unsigned>(status));
|
||||||
|
kernel32::setLastError(wibo::heap::win32ErrorFromVmStatus(status));
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sectionsProtected = true;
|
||||||
|
return true;
|
||||||
|
};
|
||||||
|
|
||||||
if (importsResolved || !execMapped) {
|
if (importsResolved || !execMapped) {
|
||||||
importsResolved = true;
|
importsResolved = true;
|
||||||
importsResolving = false;
|
importsResolving = false;
|
||||||
|
if (!finalizeSections()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
if (importsResolving) {
|
if (importsResolving) {
|
||||||
@@ -374,6 +408,9 @@ bool wibo::Executable::resolveImports() {
|
|||||||
if (!importDirectoryRVA) {
|
if (!importDirectoryRVA) {
|
||||||
importsResolved = true;
|
importsResolved = true;
|
||||||
importsResolving = false;
|
importsResolving = false;
|
||||||
|
if (!finalizeSections()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -381,6 +418,9 @@ bool wibo::Executable::resolveImports() {
|
|||||||
if (!dir) {
|
if (!dir) {
|
||||||
importsResolved = true;
|
importsResolved = true;
|
||||||
importsResolving = false;
|
importsResolving = false;
|
||||||
|
if (!finalizeSections()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -464,5 +504,9 @@ bool wibo::Executable::resolveImports() {
|
|||||||
|
|
||||||
importsResolved = true;
|
importsResolved = true;
|
||||||
importsResolving = false;
|
importsResolving = false;
|
||||||
|
if (!finalizeSections()) {
|
||||||
|
importsResolved = false;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,6 +3,7 @@
|
|||||||
#include "entry.h"
|
#include "entry.h"
|
||||||
#include "entry_trampolines.h"
|
#include "entry_trampolines.h"
|
||||||
#include "files.h"
|
#include "files.h"
|
||||||
|
#include "heap.h"
|
||||||
#include "modules.h"
|
#include "modules.h"
|
||||||
#include "processes.h"
|
#include "processes.h"
|
||||||
#include "strutil.h"
|
#include "strutil.h"
|
||||||
@@ -501,10 +502,7 @@ int main(int argc, char **argv) {
|
|||||||
kernel32::setLastError(0);
|
kernel32::setLastError(0);
|
||||||
|
|
||||||
// Invoke the damn thing
|
// Invoke the damn thing
|
||||||
{
|
|
||||||
GUEST_CONTEXT_GUARD(&tib);
|
|
||||||
call_EntryProc(entryPoint);
|
call_EntryProc(entryPoint);
|
||||||
}
|
|
||||||
DEBUG_LOG("We came back\n");
|
DEBUG_LOG("We came back\n");
|
||||||
wibo::shutdownModuleRegistry();
|
wibo::shutdownModuleRegistry();
|
||||||
wibo::tls::cleanupTib(&tib);
|
wibo::tls::cleanupTib(&tib);
|
||||||
|
|||||||
@@ -1,11 +1,11 @@
|
|||||||
#include "modules.h"
|
#include "modules.h"
|
||||||
|
|
||||||
#include "common.h"
|
#include "common.h"
|
||||||
#include "context.h"
|
|
||||||
#include "entry.h"
|
#include "entry.h"
|
||||||
#include "entry_trampolines.h"
|
#include "entry_trampolines.h"
|
||||||
#include "errors.h"
|
#include "errors.h"
|
||||||
#include "files.h"
|
#include "files.h"
|
||||||
|
#include "heap.h"
|
||||||
#include "kernel32/internal.h"
|
#include "kernel32/internal.h"
|
||||||
#include "msvcrt.h"
|
#include "msvcrt.h"
|
||||||
#include "msvcrt_trampolines.h"
|
#include "msvcrt_trampolines.h"
|
||||||
@@ -87,16 +87,11 @@ std::string makeStubKey(const char *dllName, const char *funcName) {
|
|||||||
return key;
|
return key;
|
||||||
}
|
}
|
||||||
|
|
||||||
void stubBase(size_t index) {
|
template <size_t Index> void stubThunk() {
|
||||||
const char *func = stubFuncNames[index].empty() ? "<unknown>" : stubFuncNames[index].c_str();
|
// Call the appropriate entry trampoline
|
||||||
const char *dll = stubDlls[index].empty() ? "<unknown>" : stubDlls[index].c_str();
|
thunk_entry_stubBase(Index);
|
||||||
fprintf(stderr, "wibo: call reached missing import %s from %s\n", func, dll);
|
|
||||||
fflush(stderr);
|
|
||||||
abort();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template <size_t Index> void stubThunk() { stubBase(Index); }
|
|
||||||
|
|
||||||
template <size_t... Indices>
|
template <size_t... Indices>
|
||||||
constexpr std::array<void (*)(void), sizeof...(Indices)> makeStubTable(std::index_sequence<Indices...>) {
|
constexpr std::array<void (*)(void), sizeof...(Indices)> makeStubTable(std::index_sequence<Indices...>) {
|
||||||
return {{stubThunk<Indices>...}};
|
return {{stubThunk<Indices>...}};
|
||||||
@@ -293,7 +288,7 @@ bool allocateModuleTlsForThread(wibo::ModuleInfo &module, TEB *tib) {
|
|||||||
void *block = nullptr;
|
void *block = nullptr;
|
||||||
const size_t allocationSize = info.allocationSize;
|
const size_t allocationSize = info.allocationSize;
|
||||||
if (allocationSize > 0) {
|
if (allocationSize > 0) {
|
||||||
block = std::malloc(allocationSize);
|
block = wibo::heap::guestMalloc(allocationSize);
|
||||||
if (!block) {
|
if (!block) {
|
||||||
DEBUG_LOG(" allocateModuleTlsForThread: failed to allocate %zu bytes for %s\n", allocationSize,
|
DEBUG_LOG(" allocateModuleTlsForThread: failed to allocate %zu bytes for %s\n", allocationSize,
|
||||||
module.originalName.c_str());
|
module.originalName.c_str());
|
||||||
@@ -716,6 +711,19 @@ void ensureExportsInitialized(wibo::ModuleInfo &info) {
|
|||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
|
namespace entry {
|
||||||
|
|
||||||
|
// Trampoline target for missing imports
|
||||||
|
void stubBase(SIZE_T index) {
|
||||||
|
const char *func = stubFuncNames[index].empty() ? "<unknown>" : stubFuncNames[index].c_str();
|
||||||
|
const char *dll = stubDlls[index].empty() ? "<unknown>" : stubDlls[index].c_str();
|
||||||
|
fprintf(stderr, "wibo: call reached missing import %s from %s\n", func, dll);
|
||||||
|
fflush(stderr);
|
||||||
|
abort();
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace entry
|
||||||
|
|
||||||
namespace wibo {
|
namespace wibo {
|
||||||
|
|
||||||
void initializeModuleRegistry() { registry(); }
|
void initializeModuleRegistry() { registry(); }
|
||||||
|
|||||||
@@ -62,6 +62,7 @@ class Executable {
|
|||||||
bool execMapped = false;
|
bool execMapped = false;
|
||||||
bool importsResolved = false;
|
bool importsResolved = false;
|
||||||
bool importsResolving = false;
|
bool importsResolving = false;
|
||||||
|
bool sectionsProtected = false;
|
||||||
std::vector<SectionInfo> sections;
|
std::vector<SectionInfo> sections;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
10
src/types.h
10
src/types.h
@@ -436,3 +436,13 @@ static_assert(offsetof(TEB, LastErrorValue) == 0x34, "LastErrorValue offset mism
|
|||||||
static_assert(offsetof(TEB, GdiTebBatch) == 0x1FC, "GdiTebBatch offset mismatch");
|
static_assert(offsetof(TEB, GdiTebBatch) == 0x1FC, "GdiTebBatch offset mismatch");
|
||||||
static_assert(offsetof(TEB, DeallocationStack) == 0xE0C, "DeallocationStack offset mismatch");
|
static_assert(offsetof(TEB, DeallocationStack) == 0xE0C, "DeallocationStack offset mismatch");
|
||||||
static_assert(offsetof(TEB, TlsSlots) == 0xE10, "TLS slots offset mismatch");
|
static_assert(offsetof(TEB, TlsSlots) == 0xE10, "TLS slots offset mismatch");
|
||||||
|
|
||||||
|
typedef struct _MEMORY_BASIC_INFORMATION {
|
||||||
|
PVOID BaseAddress;
|
||||||
|
PVOID AllocationBase;
|
||||||
|
DWORD AllocationProtect;
|
||||||
|
SIZE_T RegionSize;
|
||||||
|
DWORD State;
|
||||||
|
DWORD Protect;
|
||||||
|
DWORD Type;
|
||||||
|
} MEMORY_BASIC_INFORMATION, *PMEMORY_BASIC_INFORMATION;
|
||||||
|
|||||||
@@ -30,6 +30,7 @@ from clang.cindex import (
|
|||||||
CursorKind,
|
CursorKind,
|
||||||
Index,
|
Index,
|
||||||
TranslationUnit,
|
TranslationUnit,
|
||||||
|
Type,
|
||||||
TypeKind,
|
TypeKind,
|
||||||
conf,
|
conf,
|
||||||
)
|
)
|
||||||
@@ -97,7 +98,7 @@ class ArgInfo:
|
|||||||
slot_size: int
|
slot_size: int
|
||||||
primitive: bool
|
primitive: bool
|
||||||
sign_extended: bool
|
sign_extended: bool
|
||||||
type_str: str
|
type: Type
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
@@ -108,6 +109,7 @@ class FuncInfo:
|
|||||||
source_cc: CallingConv
|
source_cc: CallingConv
|
||||||
target_cc: CallingConv
|
target_cc: CallingConv
|
||||||
variadic: bool
|
variadic: bool
|
||||||
|
return_type: Type
|
||||||
args: List[ArgInfo] = field(default_factory=list)
|
args: List[ArgInfo] = field(default_factory=list)
|
||||||
|
|
||||||
|
|
||||||
@@ -117,7 +119,7 @@ class TypedefInfo:
|
|||||||
source_cc: CallingConv
|
source_cc: CallingConv
|
||||||
target_cc: CallingConv
|
target_cc: CallingConv
|
||||||
variadic: bool
|
variadic: bool
|
||||||
return_type: str
|
return_type: Type
|
||||||
args: List[ArgInfo] = field(default_factory=list)
|
args: List[ArgInfo] = field(default_factory=list)
|
||||||
|
|
||||||
|
|
||||||
@@ -233,7 +235,7 @@ def _collect_args(func_type: CXType) -> List[ArgInfo]:
|
|||||||
slot_size=slot_size,
|
slot_size=slot_size,
|
||||||
primitive=is_primitive,
|
primitive=is_primitive,
|
||||||
sign_extended=is_sign_extended,
|
sign_extended=is_sign_extended,
|
||||||
type_str=_type_to_string(t),
|
type=t,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
return args
|
return args
|
||||||
@@ -258,10 +260,11 @@ def collect_functions(tu: TranslationUnit, ns_filter: Optional[str]) -> List[Fun
|
|||||||
qualified_ns="::".join(ns_parts),
|
qualified_ns="::".join(ns_parts),
|
||||||
name=name,
|
name=name,
|
||||||
mangled=node.mangled_name or name,
|
mangled=node.mangled_name or name,
|
||||||
args=_collect_args(node.type),
|
|
||||||
source_cc=source_cc,
|
source_cc=source_cc,
|
||||||
target_cc=_get_function_calling_conv(node.type),
|
target_cc=_get_function_calling_conv(node.type),
|
||||||
variadic=node.type.is_function_variadic(),
|
variadic=node.type.is_function_variadic(),
|
||||||
|
return_type=node.type.get_result(),
|
||||||
|
args=_collect_args(node.type),
|
||||||
)
|
)
|
||||||
|
|
||||||
# Recurse into children
|
# Recurse into children
|
||||||
@@ -519,8 +522,42 @@ def emit_header_mapping(
|
|||||||
|
|
||||||
# Guest-to-host thunk functions
|
# Guest-to-host thunk functions
|
||||||
for f in funcs:
|
for f in funcs:
|
||||||
|
# Generate best-effort function prototype so that simple thunks can be called directly
|
||||||
|
# in special cases (e.g. thunk_entry_stubBase)
|
||||||
|
def _is_opaque(t: Type) -> bool:
|
||||||
|
if (
|
||||||
|
t.kind == TypeKind.RECORD
|
||||||
|
or t.kind == TypeKind.ENUM
|
||||||
|
or t.kind == TypeKind.FUNCTIONPROTO
|
||||||
|
or t.kind == TypeKind.FUNCTIONNOPROTO
|
||||||
|
):
|
||||||
|
return True
|
||||||
|
return t.kind == TypeKind.POINTER and _is_opaque(
|
||||||
|
t.get_pointee().get_canonical()
|
||||||
|
)
|
||||||
|
|
||||||
|
def _canonical_type_str(t: Type) -> str:
|
||||||
|
c = t.get_canonical()
|
||||||
|
if _is_opaque(c):
|
||||||
|
return "void *"
|
||||||
|
return c.spelling
|
||||||
|
|
||||||
thunk = f"thunk_{dll}_{f.name}"
|
thunk = f"thunk_{dll}_{f.name}"
|
||||||
lines.append(f"void {thunk}(void);")
|
args = []
|
||||||
|
for i, arg in enumerate(f.args):
|
||||||
|
type_str = _canonical_type_str(arg.type)
|
||||||
|
args.append(f"{type_str} arg{i}")
|
||||||
|
param_list = ", ".join(args)
|
||||||
|
return_type = _canonical_type_str(f.return_type)
|
||||||
|
if f.source_cc == CallingConv.X86_STDCALL:
|
||||||
|
cc_attr = "__attribute__((stdcall))"
|
||||||
|
elif f.source_cc == CallingConv.C:
|
||||||
|
cc_attr = "__attribute__((cdecl))"
|
||||||
|
else:
|
||||||
|
raise NotImplementedError(
|
||||||
|
f"Unsupported calling convention {f.source_cc} for function {f.name}"
|
||||||
|
)
|
||||||
|
lines.append(f"{cc_attr} {return_type} {thunk}({param_list});")
|
||||||
|
|
||||||
# Host-to-guest thunk functions
|
# Host-to-guest thunk functions
|
||||||
for td in typedefs:
|
for td in typedefs:
|
||||||
@@ -530,7 +567,8 @@ def emit_header_mapping(
|
|||||||
|
|
||||||
params = [f"{td.name} fn"]
|
params = [f"{td.name} fn"]
|
||||||
for i, arg in enumerate(td.args):
|
for i, arg in enumerate(td.args):
|
||||||
params.append(f"{arg.type_str} arg{i}")
|
type_str = _type_to_string(arg.type)
|
||||||
|
params.append(f"{type_str} arg{i}")
|
||||||
|
|
||||||
param_list = ", ".join(params)
|
param_list = ", ".join(params)
|
||||||
lines.append(f"{td.return_type} {thunk}({param_list});")
|
lines.append(f"{td.return_type} {thunk}({param_list});")
|
||||||
|
|||||||
Reference in New Issue
Block a user