diff --git a/CMakeLists.txt b/CMakeLists.txt index dd86d78..e3c442c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -174,6 +174,17 @@ if(BUILD_TESTING) ${CMAKE_CURRENT_SOURCE_DIR}/test/test_time.c ${CMAKE_CURRENT_SOURCE_DIR}/test/test_assert.h) + add_custom_command( + OUTPUT ${WIBO_TEST_BIN_DIR}/test_virtualalloc.exe + COMMAND ${WIBO_MINGW_CC} -Wall -Wextra -O2 + -I${CMAKE_CURRENT_SOURCE_DIR}/test + -o test_virtualalloc.exe + ${CMAKE_CURRENT_SOURCE_DIR}/test/test_virtualalloc.c + WORKING_DIRECTORY ${WIBO_TEST_BIN_DIR} + DEPENDS + ${CMAKE_CURRENT_SOURCE_DIR}/test/test_virtualalloc.c + ${CMAKE_CURRENT_SOURCE_DIR}/test/test_assert.h) + add_custom_target(wibo_test_fixtures DEPENDS ${WIBO_TEST_BIN_DIR}/external_exports.dll @@ -183,7 +194,8 @@ if(BUILD_TESTING) ${WIBO_TEST_BIN_DIR}/test_threading.exe ${WIBO_TEST_BIN_DIR}/test_heap.exe ${WIBO_TEST_BIN_DIR}/test_overlapped_io.exe - ${WIBO_TEST_BIN_DIR}/test_time.exe) + ${WIBO_TEST_BIN_DIR}/test_time.exe + ${WIBO_TEST_BIN_DIR}/test_virtualalloc.exe) if(CMAKE_CONFIGURATION_TYPES) set(_wibo_fixture_build_command @@ -236,6 +248,12 @@ if(BUILD_TESTING) set_tests_properties(wibo.test_time PROPERTIES WORKING_DIRECTORY ${WIBO_TEST_BIN_DIR} DEPENDS wibo.build_fixtures) + + add_test(NAME wibo.test_virtualalloc + COMMAND $ ${WIBO_TEST_BIN_DIR}/test_virtualalloc.exe) + set_tests_properties(wibo.test_virtualalloc PROPERTIES + WORKING_DIRECTORY ${WIBO_TEST_BIN_DIR} + DEPENDS wibo.build_fixtures) endif() endif() endif() diff --git a/dll/kernel32.cpp b/dll/kernel32.cpp index 22b32db..196bc40 100644 --- a/dll/kernel32.cpp +++ b/dll/kernel32.cpp @@ -37,6 +37,8 @@ #include #include #include +#include +#include namespace advapi32 { void releaseToken(void *tokenPtr); @@ -4195,6 +4197,170 @@ namespace kernel32 { return TRUE; } + constexpr DWORD MEM_COMMIT = 0x00001000; + constexpr DWORD MEM_RESERVE = 0x00002000; + constexpr DWORD MEM_DECOMMIT = 0x00004000; + constexpr DWORD MEM_RELEASE = 0x00008000; + constexpr DWORD MEM_RESET = 0x00080000; + constexpr DWORD MEM_RESET_UNDO = 0x01000000; + constexpr DWORD MEM_TOP_DOWN = 0x00100000; + constexpr DWORD MEM_WRITE_WATCH = 0x00200000; + constexpr DWORD MEM_PHYSICAL = 0x00400000; + constexpr DWORD MEM_PRIVATE = 0x00020000; + constexpr DWORD MEM_LARGE_PAGES = 0x20000000; + constexpr size_t VIRTUAL_ALLOCATION_GRANULARITY = 64 * 1024; + constexpr DWORD MEM_COALESCE_PLACEHOLDERS = 0x00000001; + constexpr DWORD MEM_PRESERVE_PLACEHOLDER = 0x00000002; + + namespace { + struct VirtualAllocation { + uintptr_t base = 0; + size_t size = 0; + DWORD allocationProtect = 0; + std::vector pageProtect; // 0 indicates reserved/uncommitted + }; + + static std::map g_virtualAllocations; + static std::mutex g_virtualAllocMutex; + + static size_t systemPageSize() { + static size_t cached = []() { + long detected = sysconf(_SC_PAGESIZE); + if (detected <= 0) { + return static_cast(4096); + } + return static_cast(detected); + }(); + return cached; + } + + static uintptr_t alignDown(uintptr_t value, size_t alignment) { + const uintptr_t mask = static_cast(alignment) - 1; + return value & ~mask; + } + + static uintptr_t alignUp(uintptr_t value, size_t alignment) { + const uintptr_t mask = static_cast(alignment) - 1; + if (mask == std::numeric_limits::max()) { + return value; + } + if (value > std::numeric_limits::max() - mask) { + return std::numeric_limits::max(); + } + return (value + mask) & ~mask; + } + + static bool addOverflows(uintptr_t base, size_t amount) { + return base > std::numeric_limits::max() - static_cast(amount); + } + + static uintptr_t regionEnd(const VirtualAllocation ®ion) { return region.base + region.size; } + + static bool rangeOverlapsLocked(uintptr_t base, size_t length) { + if (length == 0) { + return false; + } + if (addOverflows(base, length - 1)) { + return true; + } + uintptr_t end = base + length; + auto next = g_virtualAllocations.lower_bound(base); + if (next != g_virtualAllocations.begin()) { + auto prev = std::prev(next); + if (regionEnd(prev->second) > base) { + return true; + } + } + if (next != g_virtualAllocations.end() && next->second.base < end) { + return true; + } + return false; + } + + static std::map::iterator findRegionIterator(uintptr_t address) { + auto it = g_virtualAllocations.upper_bound(address); + if (it == g_virtualAllocations.begin()) { + return g_virtualAllocations.end(); + } + --it; + if (address >= regionEnd(it->second)) { + return g_virtualAllocations.end(); + } + return it; + } + + static VirtualAllocation *lookupRegion(uintptr_t address) { + auto it = findRegionIterator(address); + if (it == g_virtualAllocations.end()) { + return nullptr; + } + return &it->second; + } + + static bool rangeWithinRegion(const VirtualAllocation ®ion, uintptr_t start, size_t length) { + if (length == 0) { + return start >= region.base && start <= regionEnd(region); + } + if (start < region.base) { + return false; + } + if (addOverflows(start, length)) { + return false; + } + return (start + length) <= regionEnd(region); + } + + static void markCommitted(VirtualAllocation ®ion, uintptr_t start, size_t length, DWORD protect) { + if (length == 0) { + return; + } + const size_t pageSize = systemPageSize(); + const size_t firstPage = (start - region.base) / pageSize; + const size_t pageCount = length / pageSize; + for (size_t i = 0; i < pageCount; ++i) { + region.pageProtect[firstPage + i] = protect; + } + } + + static void markDecommitted(VirtualAllocation ®ion, uintptr_t start, size_t length) { + if (length == 0) { + return; + } + const size_t pageSize = systemPageSize(); + const size_t firstPage = (start - region.base) / pageSize; + const size_t pageCount = length / pageSize; + for (size_t i = 0; i < pageCount; ++i) { + region.pageProtect[firstPage + i] = 0; + } + } + + static void *alignedReserve(size_t length, int prot, int flags) { + const size_t granularity = VIRTUAL_ALLOCATION_GRANULARITY; + const size_t request = length + granularity; + void *raw = mmap(nullptr, request, prot, flags, -1, 0); + if (raw == MAP_FAILED) { + return MAP_FAILED; + } + uintptr_t rawAddr = reinterpret_cast(raw); + uintptr_t aligned = alignUp(rawAddr, granularity); + size_t front = aligned - rawAddr; + size_t back = (rawAddr + request) - (aligned + length); + if (front != 0) { + if (munmap(raw, front) != 0) { + munmap(raw, request); + return MAP_FAILED; + } + } + if (back != 0) { + if (munmap(reinterpret_cast(aligned + length), back) != 0) { + munmap(reinterpret_cast(aligned), length); + return MAP_FAILED; + } + } + return reinterpret_cast(aligned); + } + } // namespace + static int translateProtect(DWORD flProtect) { switch (flProtect) { case 0x01: /* PAGE_NOACCESS */ @@ -4219,31 +4385,290 @@ namespace kernel32 { } } - void *WIN_FUNC VirtualAlloc(void *lpAddress, unsigned int dwSize, unsigned int flAllocationType, unsigned int flProtect) { + void *WIN_FUNC VirtualAlloc(void *lpAddress, unsigned int dwSize, unsigned int flAllocationType, + unsigned int flProtect) { DEBUG_LOG("VirtualAlloc(%p, %u, %u, %u)\n", lpAddress, dwSize, flAllocationType, flProtect); - int prot = translateProtect(flProtect); - - int flags = MAP_PRIVATE | MAP_ANONYMOUS; // MAP_ANONYMOUS ensures the memory is zeroed out - if (lpAddress != NULL) { - flags |= MAP_FIXED; + if (dwSize == 0) { + wibo::lastError = ERROR_INVALID_PARAMETER; + return nullptr; } - void* result = mmap(lpAddress, dwSize, prot, flags, -1, 0); - // Windows only fences off the lower 2GB of the 32-bit address space for the private use of processes. - assert(result < (void*)0x80000000); - if (result == MAP_FAILED) { - DEBUG_LOG("mmap failed\n"); - return NULL; + const DWORD unsupportedFlags = + flAllocationType & (MEM_WRITE_WATCH | MEM_PHYSICAL | MEM_LARGE_PAGES | MEM_RESET_UNDO); + if (unsupportedFlags != 0) { + DEBUG_LOG("VirtualAlloc unsupported flags: 0x%x\n", unsupportedFlags); + wibo::lastError = ERROR_NOT_SUPPORTED; + return nullptr; } - else { - DEBUG_LOG("-> %p\n", result); + + bool reserve = (flAllocationType & MEM_RESERVE) != 0; + bool commit = (flAllocationType & MEM_COMMIT) != 0; + const bool reset = (flAllocationType & MEM_RESET) != 0; + + if (!reserve && commit && lpAddress == nullptr) { + reserve = true; + } + + if (reset) { + if (reserve || commit) { + wibo::lastError = ERROR_INVALID_PARAMETER; + return nullptr; + } + if (!lpAddress) { + wibo::lastError = ERROR_INVALID_ADDRESS; + return nullptr; + } + const size_t pageSize = systemPageSize(); + uintptr_t request = reinterpret_cast(lpAddress); + if (addOverflows(request, static_cast(dwSize))) { + wibo::lastError = ERROR_INVALID_PARAMETER; + return nullptr; + } + uintptr_t start = alignDown(request, pageSize); + uintptr_t end = alignUp(request + static_cast(dwSize), pageSize); + size_t length = static_cast(end - start); + std::unique_lock lock(g_virtualAllocMutex); + VirtualAllocation *region = lookupRegion(start); + if (!region || !rangeWithinRegion(*region, start, length)) { + wibo::lastError = ERROR_INVALID_ADDRESS; + return nullptr; + } +#ifdef MADV_FREE + int advice = MADV_FREE; +#else + int advice = MADV_DONTNEED; +#endif + if (madvise(reinterpret_cast(start), length, advice) != 0) { + wibo::lastError = wibo::winErrorFromErrno(errno); + return nullptr; + } + wibo::lastError = ERROR_SUCCESS; + return reinterpret_cast(start); + } + + if (!reserve && !commit) { + wibo::lastError = ERROR_INVALID_PARAMETER; + return nullptr; + } + + const size_t pageSize = systemPageSize(); + std::unique_lock lock(g_virtualAllocMutex); + + if (reserve) { + uintptr_t base = 0; + size_t length = 0; + if (lpAddress) { + uintptr_t request = reinterpret_cast(lpAddress); + base = alignDown(request, VIRTUAL_ALLOCATION_GRANULARITY); + size_t offset = static_cast(request - base); + if (addOverflows(offset, static_cast(dwSize))) { + wibo::lastError = ERROR_INVALID_PARAMETER; + return nullptr; + } + size_t span = static_cast(dwSize) + offset; + uintptr_t alignedSpan = alignUp(span, pageSize); + if (alignedSpan == std::numeric_limits::max()) { + wibo::lastError = ERROR_INVALID_PARAMETER; + return nullptr; + } + length = static_cast(alignedSpan); + if (length == 0 || rangeOverlapsLocked(base, length)) { + wibo::lastError = ERROR_INVALID_ADDRESS; + return nullptr; + } + } else { + uintptr_t aligned = alignUp(static_cast(dwSize), pageSize); + if (aligned == std::numeric_limits::max() || aligned == 0) { + wibo::lastError = ERROR_INVALID_PARAMETER; + return nullptr; + } + length = static_cast(aligned); + } + const int prot = commit ? translateProtect(flProtect) : PROT_NONE; + int flags = MAP_PRIVATE | MAP_ANONYMOUS; + if (!commit) { + flags |= MAP_NORESERVE; + } + void *result = MAP_FAILED; + if (lpAddress) { +#ifdef MAP_FIXED_NOREPLACE + flags |= MAP_FIXED_NOREPLACE; +#else + flags |= MAP_FIXED; +#endif + result = mmap(reinterpret_cast(base), length, prot, flags, -1, 0); + } else { + result = alignedReserve(length, prot, flags); + } + if (result == MAP_FAILED) { + wibo::lastError = wibo::winErrorFromErrno(errno); + return nullptr; + } + if (reinterpret_cast(result) >= 0x80000000) { + munmap(result, length); + wibo::lastError = ERROR_NOT_ENOUGH_MEMORY; + return nullptr; + } + uintptr_t actualBase = reinterpret_cast(result); + VirtualAllocation allocation{}; + allocation.base = actualBase; + allocation.size = length; + allocation.allocationProtect = flProtect; + allocation.pageProtect.assign(length / pageSize, commit ? flProtect : 0); + g_virtualAllocations[actualBase] = std::move(allocation); + wibo::lastError = ERROR_SUCCESS; return result; } + + uintptr_t request = reinterpret_cast(lpAddress); + if (addOverflows(request, static_cast(dwSize))) { + wibo::lastError = ERROR_INVALID_PARAMETER; + return nullptr; + } + uintptr_t start = alignDown(request, pageSize); + uintptr_t end = alignUp(request + static_cast(dwSize), pageSize); + size_t length = static_cast(end - start); + if (length == 0) { + wibo::lastError = ERROR_INVALID_PARAMETER; + return nullptr; + } + VirtualAllocation *region = lookupRegion(start); + if (!region || !rangeWithinRegion(*region, start, length)) { + wibo::lastError = ERROR_INVALID_ADDRESS; + return nullptr; + } + const int prot = translateProtect(flProtect); + const size_t firstPage = (start - region->base) / pageSize; + const size_t pageCount = length / pageSize; + DEBUG_LOG("VirtualAlloc commit within region base=%p size=%zu firstPage=%zu pageCount=%zu vectorSize=%zu\n", + reinterpret_cast(region->base), region->size, firstPage, pageCount, + region->pageProtect.size()); + std::vector> committedRuns; + committedRuns.reserve(pageCount); + size_t localIndex = 0; + while (localIndex < pageCount) { + size_t absoluteIndex = firstPage + localIndex; + if (region->pageProtect[absoluteIndex] != 0) { + ++localIndex; + continue; + } + size_t runBeginLocal = localIndex; + while (localIndex < pageCount && region->pageProtect[firstPage + localIndex] == 0) { + ++localIndex; + } + size_t runPages = localIndex - runBeginLocal; + uintptr_t runBase = region->base + (firstPage + runBeginLocal) * pageSize; + size_t runLength = runPages * pageSize; + void *mapped = mmap(reinterpret_cast(runBase), runLength, prot, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0); + if (mapped == MAP_FAILED) { + for (const auto &run : committedRuns) { + mmap(reinterpret_cast(run.first), run.second, PROT_NONE, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED | MAP_NORESERVE, -1, 0); + markDecommitted(*region, run.first, run.second); + } + wibo::lastError = wibo::winErrorFromErrno(errno); + return nullptr; + } + committedRuns.emplace_back(runBase, runLength); + markCommitted(*region, runBase, runLength, flProtect); + } + wibo::lastError = ERROR_SUCCESS; + DEBUG_LOG("VirtualAlloc commit success -> %p\n", reinterpret_cast(start)); + return reinterpret_cast(start); } unsigned int WIN_FUNC VirtualFree(void *lpAddress, unsigned int dwSize, int dwFreeType) { - DEBUG_LOG("STUB: VirtualFree(%p, %u, %i)\n", lpAddress, dwSize, dwFreeType); + DEBUG_LOG("VirtualFree(%p, %u, %i)\n", lpAddress, dwSize, dwFreeType); + if (!lpAddress) { + wibo::lastError = ERROR_INVALID_ADDRESS; + return 0; + } + + DWORD freeType = static_cast(dwFreeType); + if ((freeType & (MEM_COALESCE_PLACEHOLDERS | MEM_PRESERVE_PLACEHOLDER)) != 0) { + wibo::lastError = ERROR_NOT_SUPPORTED; + return 0; + } + + const bool release = (freeType & MEM_RELEASE) != 0; + const bool decommit = (freeType & MEM_DECOMMIT) != 0; + if (release == decommit) { + wibo::lastError = ERROR_INVALID_PARAMETER; + return 0; + } + + const size_t pageSize = systemPageSize(); + std::unique_lock lock(g_virtualAllocMutex); + + if (release) { + uintptr_t base = reinterpret_cast(lpAddress); + auto exact = g_virtualAllocations.find(base); + if (exact == g_virtualAllocations.end()) { + auto containing = findRegionIterator(base); + if (dwSize != 0 && containing != g_virtualAllocations.end()) { + wibo::lastError = ERROR_INVALID_PARAMETER; + } else { + wibo::lastError = ERROR_INVALID_ADDRESS; + } + return 0; + } + if (dwSize != 0) { + wibo::lastError = ERROR_INVALID_PARAMETER; + return 0; + } + size_t length = exact->second.size; + g_virtualAllocations.erase(exact); + lock.unlock(); + if (munmap(lpAddress, length) != 0) { + wibo::lastError = wibo::winErrorFromErrno(errno); + return 0; + } + wibo::lastError = ERROR_SUCCESS; + return 1; + } + + uintptr_t request = reinterpret_cast(lpAddress); + auto regionIt = findRegionIterator(request); + if (regionIt == g_virtualAllocations.end()) { + wibo::lastError = ERROR_INVALID_ADDRESS; + return 0; + } + VirtualAllocation ®ion = regionIt->second; + uintptr_t start = alignDown(request, pageSize); + uintptr_t end = 0; + if (dwSize == 0) { + if (request != region.base) { + wibo::lastError = ERROR_INVALID_PARAMETER; + return 0; + } + start = region.base; + end = region.base + region.size; + } else { + if (addOverflows(request, static_cast(dwSize))) { + wibo::lastError = ERROR_INVALID_PARAMETER; + return 0; + } + end = alignUp(request + static_cast(dwSize), pageSize); + } + if (end <= start) { + wibo::lastError = ERROR_INVALID_PARAMETER; + return 0; + } + size_t length = static_cast(end - start); + if (!rangeWithinRegion(region, start, length)) { + wibo::lastError = ERROR_INVALID_ADDRESS; + return 0; + } + void *result = mmap(reinterpret_cast(start), length, PROT_NONE, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED | MAP_NORESERVE, -1, 0); + if (result == MAP_FAILED) { + wibo::lastError = wibo::winErrorFromErrno(errno); + return 0; + } + markDecommitted(region, start, length); + wibo::lastError = ERROR_SUCCESS; return 1; } @@ -4253,16 +4678,56 @@ namespace kernel32 { wibo::lastError = ERROR_INVALID_PARAMETER; return FALSE; } - if (lpflOldProtect) - *lpflOldProtect = flNewProtect; - size_t pageSize = static_cast(sysconf(_SC_PAGESIZE)); - uintptr_t base = reinterpret_cast(lpAddress) & ~(pageSize - 1); - size_t length = ((reinterpret_cast(lpAddress) + dwSize) - base + pageSize - 1) & ~(pageSize - 1); - int prot = translateProtect(flNewProtect); - if (mprotect(reinterpret_cast(base), length, prot) != 0) { - perror("VirtualProtect/mprotect"); + + const size_t pageSize = systemPageSize(); + uintptr_t request = reinterpret_cast(lpAddress); + uintptr_t start = alignDown(request, pageSize); + uintptr_t end = alignUp(request + static_cast(dwSize), pageSize); + if (end <= start) { + wibo::lastError = ERROR_INVALID_PARAMETER; return FALSE; } + + std::unique_lock lock(g_virtualAllocMutex); + VirtualAllocation *region = lookupRegion(start); + if (!region || !rangeWithinRegion(*region, start, static_cast(end - start))) { + wibo::lastError = ERROR_INVALID_ADDRESS; + return FALSE; + } + + const size_t firstPage = (start - region->base) / pageSize; + const size_t pageCount = (end - start) / pageSize; + if (pageCount == 0) { + wibo::lastError = ERROR_INVALID_PARAMETER; + return FALSE; + } + + DWORD previousProtect = region->pageProtect[firstPage]; + if (previousProtect == 0) { + wibo::lastError = ERROR_NOACCESS; + return FALSE; + } + for (size_t i = 0; i < pageCount; ++i) { + if (region->pageProtect[firstPage + i] == 0) { + wibo::lastError = ERROR_NOACCESS; + return FALSE; + } + } + + int prot = translateProtect(flNewProtect); + if (mprotect(reinterpret_cast(start), end - start, prot) != 0) { + wibo::lastError = wibo::winErrorFromErrno(errno); + return FALSE; + } + for (size_t i = 0; i < pageCount; ++i) { + region->pageProtect[firstPage + i] = flNewProtect; + } + lock.unlock(); + + if (lpflOldProtect) { + *lpflOldProtect = previousProtect; + } + wibo::lastError = ERROR_SUCCESS; return TRUE; } @@ -4278,17 +4743,58 @@ namespace kernel32 { SIZE_T WIN_FUNC VirtualQuery(const void *lpAddress, PMEMORY_BASIC_INFORMATION lpBuffer, SIZE_T dwLength) { DEBUG_LOG("VirtualQuery(%p, %p, %zu)\n", lpAddress, lpBuffer, dwLength); - if (!lpBuffer || dwLength < sizeof(MEMORY_BASIC_INFORMATION)) { + if (!lpBuffer || dwLength < sizeof(MEMORY_BASIC_INFORMATION) || !lpAddress) { + wibo::lastError = ERROR_INVALID_PARAMETER; return 0; } - memset(lpBuffer, 0, sizeof(MEMORY_BASIC_INFORMATION)); - lpBuffer->BaseAddress = const_cast(lpAddress); - lpBuffer->AllocationBase = lpBuffer->BaseAddress; - lpBuffer->AllocationProtect = 0x04; // PAGE_READWRITE - lpBuffer->RegionSize = static_cast(sysconf(_SC_PAGESIZE)); - lpBuffer->State = 0x1000; // MEM_COMMIT - lpBuffer->Protect = 0x04; // PAGE_READWRITE - lpBuffer->Type = 0x20000; // MEM_PRIVATE + + std::memset(lpBuffer, 0, sizeof(MEMORY_BASIC_INFORMATION)); + const size_t pageSize = systemPageSize(); + uintptr_t request = reinterpret_cast(lpAddress); + uintptr_t pageBase = alignDown(request, pageSize); + + std::unique_lock lock(g_virtualAllocMutex); + VirtualAllocation *region = lookupRegion(pageBase); + if (!region) { + wibo::lastError = ERROR_INVALID_ADDRESS; + return 0; + } + + const size_t pageIndex = (pageBase - region->base) / pageSize; + if (pageIndex >= region->pageProtect.size()) { + wibo::lastError = ERROR_INVALID_ADDRESS; + return 0; + } + const bool committed = region->pageProtect[pageIndex] != 0; + uintptr_t blockStart = pageBase; + uintptr_t blockEnd = pageBase + pageSize; + while (blockStart > region->base) { + size_t idx = (blockStart - region->base) / pageSize - 1; + const bool pageCommitted = region->pageProtect[idx] != 0; + if (pageCommitted != committed) { + break; + } + blockStart -= pageSize; + } + while (blockEnd < region->base + region->size) { + size_t idx = (blockEnd - region->base) / pageSize; + const bool pageCommitted = region->pageProtect[idx] != 0; + if (pageCommitted != committed) { + break; + } + blockEnd += pageSize; + } + + lpBuffer->BaseAddress = reinterpret_cast(blockStart); + lpBuffer->AllocationBase = reinterpret_cast(region->base); + lpBuffer->AllocationProtect = + region->allocationProtect != 0 ? region->allocationProtect : 0x01; // PAGE_NOACCESS fallback + lpBuffer->RegionSize = blockEnd - blockStart; + lpBuffer->State = committed ? MEM_COMMIT : MEM_RESERVE; + lpBuffer->Protect = committed ? region->pageProtect[pageIndex] : 0; + lpBuffer->Type = MEM_PRIVATE; + lock.unlock(); + wibo::lastError = ERROR_SUCCESS; return sizeof(MEMORY_BASIC_INFORMATION); } @@ -5935,8 +6441,6 @@ static void *resolveByName(const char *name) { if (strcmp(name, "DecodePointer") == 0) return (void *) kernel32::DecodePointer; if (strcmp(name, "SetDllDirectoryA") == 0) return (void *) kernel32::SetDllDirectoryA; if (strcmp(name, "Sleep") == 0) return (void *) kernel32::Sleep; - if (strcmp(name, "VirtualProtect") == 0) return (void *) kernel32::VirtualProtect; - if (strcmp(name, "VirtualQuery") == 0) return (void *) kernel32::VirtualQuery; // processenv.h if (strcmp(name, "GetCommandLineA") == 0) return (void *) kernel32::GetCommandLineA; @@ -6058,6 +6562,8 @@ static void *resolveByName(const char *name) { // memoryapi.h if (strcmp(name, "VirtualAlloc") == 0) return (void *) kernel32::VirtualAlloc; if (strcmp(name, "VirtualFree") == 0) return (void *) kernel32::VirtualFree; + if (strcmp(name, "VirtualProtect") == 0) return (void *) kernel32::VirtualProtect; + if (strcmp(name, "VirtualQuery") == 0) return (void *) kernel32::VirtualQuery; if (strcmp(name, "GetProcessWorkingSetSize") == 0) return (void *) kernel32::GetProcessWorkingSetSize; if (strcmp(name, "SetProcessWorkingSetSize") == 0) return (void *) kernel32::SetProcessWorkingSetSize; diff --git a/errors.h b/errors.h index 5e24563..b1a29a9 100644 --- a/errors.h +++ b/errors.h @@ -11,6 +11,8 @@ #define ERROR_NO_MORE_FILES 18 #define ERROR_READ_FAULT 30 #define ERROR_HANDLE_EOF 38 +#define ERROR_INVALID_ADDRESS 487 +#define ERROR_NOACCESS 998 #define ERROR_BROKEN_PIPE 109 #define ERROR_NOT_SUPPORTED 50 #define ERROR_INVALID_PARAMETER 87 diff --git a/test/test_virtualalloc.c b/test/test_virtualalloc.c new file mode 100644 index 0000000..4a52e01 --- /dev/null +++ b/test/test_virtualalloc.c @@ -0,0 +1,106 @@ +#include +#include +#include + +#include "test_assert.h" + +static SIZE_T query_page_size(void) { + SYSTEM_INFO info; + GetSystemInfo(&info); + return info.dwPageSize; +} + +int main(void) { + const SIZE_T page = query_page_size(); + + uint8_t *reserved = (uint8_t *)VirtualAlloc(NULL, page * 2, MEM_RESERVE, PAGE_READWRITE); + TEST_CHECK(reserved != NULL); + TEST_CHECK(((uintptr_t)reserved % (64 * 1024)) == 0); + + uint8_t *reserveCommit = (uint8_t *)VirtualAlloc(NULL, page, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); + TEST_CHECK(reserveCommit != NULL); + TEST_CHECK(((uintptr_t)reserveCommit % (64 * 1024)) == 0); + reserveCommit[page - 1] = 0x77; + TEST_CHECK(VirtualFree(reserveCommit, 0, MEM_RELEASE)); + + uint8_t *directCommit = (uint8_t *)VirtualAlloc(NULL, page, MEM_COMMIT, PAGE_READWRITE); + TEST_CHECK(directCommit != NULL); + TEST_CHECK(((uintptr_t)directCommit % (64 * 1024)) == 0); + directCommit[0] = 0x55; + directCommit[page - 1] = 0x66; + TEST_CHECK(VirtualFree(directCommit, 0, MEM_RELEASE)); + + MEMORY_BASIC_INFORMATION mbi; + TEST_CHECK_EQ(sizeof(mbi), VirtualQuery(reserved, &mbi, sizeof(mbi))); + TEST_CHECK_EQ((uintptr_t)reserved, (uintptr_t)mbi.BaseAddress); + TEST_CHECK_EQ((uintptr_t)reserved, (uintptr_t)mbi.AllocationBase); + TEST_CHECK_EQ(PAGE_READWRITE, mbi.AllocationProtect); + TEST_CHECK_EQ(page * 2, mbi.RegionSize); + TEST_CHECK_EQ(MEM_RESERVE, mbi.State); + TEST_CHECK(mbi.Protect == 0 || mbi.Protect == PAGE_NOACCESS); + TEST_CHECK_EQ(MEM_PRIVATE, mbi.Type); + + uint8_t *first = (uint8_t *)VirtualAlloc(reserved, page, MEM_COMMIT, PAGE_READWRITE); + TEST_CHECK(first == reserved); + first[0] = 0xAB; + first[page - 1] = 0xCD; + + TEST_CHECK_EQ(sizeof(mbi), VirtualQuery(reserved, &mbi, sizeof(mbi))); + TEST_CHECK_EQ((uintptr_t)reserved, (uintptr_t)mbi.BaseAddress); + TEST_CHECK_EQ(MEM_COMMIT, mbi.State); + TEST_CHECK_EQ(page, mbi.RegionSize); + TEST_CHECK_EQ(PAGE_READWRITE, mbi.Protect); + + TEST_CHECK_EQ(sizeof(mbi), VirtualQuery(reserved + page, &mbi, sizeof(mbi))); + TEST_CHECK_EQ((uintptr_t)(reserved + page), (uintptr_t)mbi.BaseAddress); + TEST_CHECK_EQ(MEM_RESERVE, mbi.State); + TEST_CHECK_EQ(page, mbi.RegionSize); + TEST_CHECK(mbi.Protect == 0 || mbi.Protect == PAGE_NOACCESS); + + uint8_t *second = (uint8_t *)VirtualAlloc(reserved + page, page, MEM_COMMIT, PAGE_READONLY); + TEST_CHECK(second == reserved + page); + + TEST_CHECK_EQ(sizeof(mbi), VirtualQuery(reserved + page, &mbi, sizeof(mbi))); + TEST_CHECK_EQ(MEM_COMMIT, mbi.State); + TEST_CHECK_EQ(PAGE_READONLY, mbi.Protect); + + DWORD oldProtect = 0; + TEST_CHECK(VirtualProtect(second, page, PAGE_READWRITE, &oldProtect)); + TEST_CHECK_EQ(PAGE_READONLY, oldProtect); + TEST_CHECK_EQ(sizeof(mbi), VirtualQuery(second, &mbi, sizeof(mbi))); + TEST_CHECK_EQ(PAGE_READWRITE, mbi.Protect); + + TEST_CHECK(VirtualFree(second, page, MEM_DECOMMIT)); + + SetLastError(0); + TEST_CHECK(!VirtualProtect(second, page, PAGE_READWRITE, NULL)); + TEST_CHECK_EQ(ERROR_NOACCESS, GetLastError()); + + TEST_CHECK_EQ(sizeof(mbi), VirtualQuery(second, &mbi, sizeof(mbi))); + TEST_CHECK_EQ(MEM_RESERVE, mbi.State); + TEST_CHECK(mbi.Protect == 0 || mbi.Protect == PAGE_NOACCESS); + + TEST_CHECK(VirtualFree(first, page, MEM_DECOMMIT)); + + uint8_t *recommit = (uint8_t *)VirtualAlloc(reserved, page, MEM_COMMIT, PAGE_READWRITE); + TEST_CHECK(recommit == reserved); + for (SIZE_T i = 0; i < page; ++i) { + TEST_CHECK(recommit[i] == 0); + } + + SetLastError(0); + TEST_CHECK(!VirtualFree(reserved + page, page, MEM_RELEASE)); + TEST_CHECK_EQ(ERROR_INVALID_PARAMETER, GetLastError()); + + SetLastError(0); + TEST_CHECK(!VirtualFree(reserved + page, 0, MEM_RELEASE)); + TEST_CHECK_EQ(ERROR_INVALID_ADDRESS, GetLastError()); + + TEST_CHECK(VirtualFree(reserved, 0, MEM_RELEASE)); + + SetLastError(0); + TEST_CHECK(VirtualAlloc(reserved, page, MEM_COMMIT, PAGE_READWRITE) == NULL); + TEST_CHECK_EQ(ERROR_INVALID_ADDRESS, GetLastError()); + + return EXIT_SUCCESS; +}