mirror of
https://github.com/decompals/wibo.git
synced 2025-10-15 22:55:11 +00:00
859 lines
25 KiB
C++
859 lines
25 KiB
C++
#include "memoryapi.h"
|
|
#include "common.h"
|
|
#include "errors.h"
|
|
#include "files.h"
|
|
#include "handles.h"
|
|
#include "internal.h"
|
|
#include "strutil.h"
|
|
|
|
#include <algorithm>
|
|
#include <cerrno>
|
|
#include <fcntl.h>
|
|
#include <iterator>
|
|
#include <limits>
|
|
#include <map>
|
|
#include <mutex>
|
|
#include <sys/mman.h>
|
|
#include <unistd.h>
|
|
#include <unordered_map>
|
|
#include <utility>
|
|
#include <vector>
|
|
|
|
namespace kernel32 {
|
|
int64_t getFileSize(HANDLE hFile);
|
|
}
|
|
|
|
namespace {
|
|
|
|
constexpr size_t kVirtualAllocationGranularity = 64 * 1024;
|
|
|
|
struct MappingObject {
|
|
int fd = -1;
|
|
size_t maxSize = 0;
|
|
DWORD protect = 0;
|
|
bool anonymous = false;
|
|
bool closed = false;
|
|
size_t refCount = 0;
|
|
};
|
|
|
|
struct ViewInfo {
|
|
void *mapBase = nullptr;
|
|
size_t mapLength = 0;
|
|
MappingObject *owner = nullptr;
|
|
};
|
|
|
|
std::unordered_map<void *, ViewInfo> g_viewInfo;
|
|
|
|
void closeMappingIfPossible(MappingObject *mapping) {
|
|
if (!mapping) {
|
|
return;
|
|
}
|
|
if (mapping->fd != -1) {
|
|
close(mapping->fd);
|
|
mapping->fd = -1;
|
|
}
|
|
delete mapping;
|
|
}
|
|
|
|
void tryReleaseMapping(MappingObject *mapping) {
|
|
if (!mapping) {
|
|
return;
|
|
}
|
|
if (mapping->closed && mapping->refCount == 0) {
|
|
closeMappingIfPossible(mapping);
|
|
}
|
|
}
|
|
|
|
struct VirtualAllocation {
|
|
uintptr_t base = 0;
|
|
size_t size = 0;
|
|
DWORD allocationProtect = 0;
|
|
std::vector<DWORD> pageProtect;
|
|
};
|
|
|
|
std::map<uintptr_t, VirtualAllocation> g_virtualAllocations;
|
|
std::mutex g_virtualAllocMutex;
|
|
|
|
size_t systemPageSize() {
|
|
static size_t cached = []() {
|
|
long detected = sysconf(_SC_PAGESIZE);
|
|
if (detected <= 0) {
|
|
return static_cast<size_t>(4096);
|
|
}
|
|
return static_cast<size_t>(detected);
|
|
}();
|
|
return cached;
|
|
}
|
|
|
|
uintptr_t alignDown(uintptr_t value, size_t alignment) {
|
|
const uintptr_t mask = static_cast<uintptr_t>(alignment) - 1;
|
|
return value & ~mask;
|
|
}
|
|
|
|
uintptr_t alignUp(uintptr_t value, size_t alignment) {
|
|
const uintptr_t mask = static_cast<uintptr_t>(alignment) - 1;
|
|
if (mask == std::numeric_limits<uintptr_t>::max()) {
|
|
return value;
|
|
}
|
|
if (value > std::numeric_limits<uintptr_t>::max() - mask) {
|
|
return std::numeric_limits<uintptr_t>::max();
|
|
}
|
|
return (value + mask) & ~mask;
|
|
}
|
|
|
|
bool addOverflows(uintptr_t base, size_t amount) {
|
|
return base > std::numeric_limits<uintptr_t>::max() - static_cast<uintptr_t>(amount);
|
|
}
|
|
|
|
uintptr_t regionEnd(const VirtualAllocation ®ion) { return region.base + region.size; }
|
|
|
|
bool rangeOverlapsLocked(uintptr_t base, size_t length) {
|
|
if (length == 0) {
|
|
return false;
|
|
}
|
|
if (addOverflows(base, length - 1)) {
|
|
return true;
|
|
}
|
|
uintptr_t end = base + length;
|
|
auto next = g_virtualAllocations.lower_bound(base);
|
|
if (next != g_virtualAllocations.begin()) {
|
|
auto prev = std::prev(next);
|
|
if (regionEnd(prev->second) > base) {
|
|
return true;
|
|
}
|
|
}
|
|
if (next != g_virtualAllocations.end() && next->second.base < end) {
|
|
return true;
|
|
}
|
|
return false;
|
|
}
|
|
|
|
std::map<uintptr_t, VirtualAllocation>::iterator findRegionIterator(uintptr_t address) {
|
|
auto it = g_virtualAllocations.upper_bound(address);
|
|
if (it == g_virtualAllocations.begin()) {
|
|
return g_virtualAllocations.end();
|
|
}
|
|
--it;
|
|
if (address >= regionEnd(it->second)) {
|
|
return g_virtualAllocations.end();
|
|
}
|
|
return it;
|
|
}
|
|
|
|
VirtualAllocation *lookupRegion(uintptr_t address) {
|
|
auto it = findRegionIterator(address);
|
|
if (it == g_virtualAllocations.end()) {
|
|
return nullptr;
|
|
}
|
|
return &it->second;
|
|
}
|
|
|
|
bool rangeWithinRegion(const VirtualAllocation ®ion, uintptr_t start, size_t length) {
|
|
if (length == 0) {
|
|
return start >= region.base && start <= regionEnd(region);
|
|
}
|
|
if (start < region.base) {
|
|
return false;
|
|
}
|
|
if (addOverflows(start, length)) {
|
|
return false;
|
|
}
|
|
return (start + length) <= regionEnd(region);
|
|
}
|
|
|
|
void markCommitted(VirtualAllocation ®ion, uintptr_t start, size_t length, DWORD protect) {
|
|
if (length == 0) {
|
|
return;
|
|
}
|
|
const size_t pageSize = systemPageSize();
|
|
const size_t firstPage = (start - region.base) / pageSize;
|
|
const size_t pageCount = length / pageSize;
|
|
for (size_t i = 0; i < pageCount; ++i) {
|
|
region.pageProtect[firstPage + i] = protect;
|
|
}
|
|
}
|
|
|
|
void markDecommitted(VirtualAllocation ®ion, uintptr_t start, size_t length) {
|
|
if (length == 0) {
|
|
return;
|
|
}
|
|
const size_t pageSize = systemPageSize();
|
|
const size_t firstPage = (start - region.base) / pageSize;
|
|
const size_t pageCount = length / pageSize;
|
|
for (size_t i = 0; i < pageCount; ++i) {
|
|
region.pageProtect[firstPage + i] = 0;
|
|
}
|
|
}
|
|
|
|
void *alignedReserve(size_t length, int prot, int flags) {
|
|
const size_t granularity = kVirtualAllocationGranularity;
|
|
const size_t request = length + granularity;
|
|
void *raw = mmap(nullptr, request, prot, flags, -1, 0);
|
|
if (raw == MAP_FAILED) {
|
|
return MAP_FAILED;
|
|
}
|
|
uintptr_t rawAddr = reinterpret_cast<uintptr_t>(raw);
|
|
uintptr_t aligned = alignUp(rawAddr, granularity);
|
|
size_t front = aligned - rawAddr;
|
|
size_t back = (rawAddr + request) - (aligned + length);
|
|
if (front != 0) {
|
|
if (munmap(raw, front) != 0) {
|
|
munmap(raw, request);
|
|
return MAP_FAILED;
|
|
}
|
|
}
|
|
if (back != 0) {
|
|
if (munmap(reinterpret_cast<void *>(aligned + length), back) != 0) {
|
|
munmap(reinterpret_cast<void *>(aligned), length);
|
|
return MAP_FAILED;
|
|
}
|
|
}
|
|
return reinterpret_cast<void *>(aligned);
|
|
}
|
|
|
|
int translateProtect(DWORD flProtect) {
|
|
switch (flProtect) {
|
|
case PAGE_NOACCESS:
|
|
return PROT_NONE;
|
|
case PAGE_READONLY:
|
|
return PROT_READ;
|
|
case PAGE_READWRITE:
|
|
case PAGE_WRITECOPY:
|
|
return PROT_READ | PROT_WRITE;
|
|
case PAGE_EXECUTE:
|
|
return PROT_EXEC;
|
|
case PAGE_EXECUTE_READ:
|
|
return PROT_READ | PROT_EXEC;
|
|
case PAGE_EXECUTE_READWRITE:
|
|
case PAGE_EXECUTE_WRITECOPY:
|
|
return PROT_READ | PROT_WRITE | PROT_EXEC;
|
|
default:
|
|
DEBUG_LOG("Unhandled flProtect: %u, defaulting to RW\n", flProtect);
|
|
return PROT_READ | PROT_WRITE;
|
|
}
|
|
}
|
|
|
|
} // namespace
|
|
|
|
namespace kernel32 {
|
|
|
|
HANDLE WIN_FUNC CreateFileMappingA(HANDLE hFile, LPSECURITY_ATTRIBUTES lpFileMappingAttributes, DWORD flProtect,
|
|
DWORD dwMaximumSizeHigh, DWORD dwMaximumSizeLow, LPCSTR lpName) {
|
|
DEBUG_LOG("CreateFileMappingA(%p, %p, %u, %u, %u, %s)\n", hFile, lpFileMappingAttributes, flProtect,
|
|
dwMaximumSizeHigh, dwMaximumSizeLow, lpName ? lpName : "(null)");
|
|
(void)lpFileMappingAttributes;
|
|
(void)lpName;
|
|
|
|
auto *mapping = new MappingObject();
|
|
mapping->protect = flProtect;
|
|
|
|
uint64_t size = (static_cast<uint64_t>(dwMaximumSizeHigh) << 32) | dwMaximumSizeLow;
|
|
if (flProtect != PAGE_READONLY && flProtect != PAGE_READWRITE && flProtect != PAGE_WRITECOPY) {
|
|
DEBUG_LOG("CreateFileMappingA: unsupported protection 0x%x\n", flProtect);
|
|
wibo::lastError = ERROR_INVALID_PARAMETER;
|
|
closeMappingIfPossible(mapping);
|
|
return nullptr;
|
|
}
|
|
|
|
if (hFile == INVALID_HANDLE_VALUE) {
|
|
mapping->anonymous = true;
|
|
mapping->fd = -1;
|
|
if (size == 0) {
|
|
wibo::lastError = ERROR_INVALID_PARAMETER;
|
|
closeMappingIfPossible(mapping);
|
|
return nullptr;
|
|
}
|
|
mapping->maxSize = size;
|
|
} else {
|
|
FILE *fp = files::fpFromHandle(hFile);
|
|
if (!fp) {
|
|
wibo::lastError = ERROR_INVALID_HANDLE;
|
|
closeMappingIfPossible(mapping);
|
|
return nullptr;
|
|
}
|
|
int originalFd = fileno(fp);
|
|
if (originalFd == -1) {
|
|
setLastErrorFromErrno();
|
|
closeMappingIfPossible(mapping);
|
|
return nullptr;
|
|
}
|
|
int dupFd = fcntl(originalFd, F_DUPFD_CLOEXEC, 0);
|
|
if (dupFd == -1) {
|
|
setLastErrorFromErrno();
|
|
closeMappingIfPossible(mapping);
|
|
return nullptr;
|
|
}
|
|
mapping->fd = dupFd;
|
|
if (size == 0) {
|
|
int64_t fileSize = getFileSize(hFile);
|
|
if (fileSize < 0) {
|
|
closeMappingIfPossible(mapping);
|
|
return nullptr;
|
|
}
|
|
size = static_cast<uint64_t>(fileSize);
|
|
}
|
|
mapping->maxSize = size;
|
|
}
|
|
|
|
wibo::lastError = ERROR_SUCCESS;
|
|
return handles::allocDataHandle({handles::TYPE_MAPPED, mapping, static_cast<size_t>(mapping->maxSize)});
|
|
}
|
|
|
|
HANDLE WIN_FUNC CreateFileMappingW(HANDLE hFile, LPSECURITY_ATTRIBUTES lpFileMappingAttributes, DWORD flProtect,
|
|
DWORD dwMaximumSizeHigh, DWORD dwMaximumSizeLow, LPCWSTR lpName) {
|
|
DEBUG_LOG("CreateFileMappingW -> ");
|
|
std::string name = wideStringToString(lpName);
|
|
return CreateFileMappingA(hFile, lpFileMappingAttributes, flProtect, dwMaximumSizeHigh, dwMaximumSizeLow,
|
|
lpName ? name.c_str() : nullptr);
|
|
}
|
|
|
|
LPVOID WIN_FUNC MapViewOfFile(HANDLE hFileMappingObject, DWORD dwDesiredAccess, DWORD dwFileOffsetHigh,
|
|
DWORD dwFileOffsetLow, SIZE_T dwNumberOfBytesToMap) {
|
|
DEBUG_LOG("MapViewOfFile(%p, 0x%x, %u, %u, %zu)\n", hFileMappingObject, dwDesiredAccess, dwFileOffsetHigh,
|
|
dwFileOffsetLow, dwNumberOfBytesToMap);
|
|
|
|
handles::Data data = handles::dataFromHandle(hFileMappingObject, false);
|
|
if (data.type != handles::TYPE_MAPPED) {
|
|
wibo::lastError = ERROR_INVALID_HANDLE;
|
|
return nullptr;
|
|
}
|
|
auto *mapping = reinterpret_cast<MappingObject *>(data.ptr);
|
|
if (!mapping) {
|
|
wibo::lastError = ERROR_INVALID_HANDLE;
|
|
return nullptr;
|
|
}
|
|
if (mapping->closed) {
|
|
wibo::lastError = ERROR_INVALID_HANDLE;
|
|
return nullptr;
|
|
}
|
|
|
|
uint64_t offset = (static_cast<uint64_t>(dwFileOffsetHigh) << 32) | dwFileOffsetLow;
|
|
if (mapping->anonymous && offset != 0) {
|
|
wibo::lastError = ERROR_INVALID_PARAMETER;
|
|
return nullptr;
|
|
}
|
|
size_t maxSize = mapping->maxSize;
|
|
uint64_t length = static_cast<uint64_t>(dwNumberOfBytesToMap);
|
|
if (length == 0) {
|
|
if (maxSize == 0) {
|
|
wibo::lastError = ERROR_INVALID_PARAMETER;
|
|
return nullptr;
|
|
}
|
|
if (offset > maxSize) {
|
|
wibo::lastError = ERROR_INVALID_PARAMETER;
|
|
return nullptr;
|
|
}
|
|
length = maxSize - offset;
|
|
}
|
|
if (length == 0) {
|
|
wibo::lastError = ERROR_INVALID_PARAMETER;
|
|
return nullptr;
|
|
}
|
|
if (maxSize != 0 && offset + length > maxSize) {
|
|
wibo::lastError = ERROR_INVALID_PARAMETER;
|
|
return nullptr;
|
|
}
|
|
|
|
int prot = PROT_READ;
|
|
bool wantWrite = (dwDesiredAccess & FILE_MAP_WRITE) != 0;
|
|
bool wantExecute = (dwDesiredAccess & FILE_MAP_EXECUTE) != 0;
|
|
|
|
if (mapping->protect == PAGE_READWRITE) {
|
|
if (wantWrite) {
|
|
prot |= PROT_WRITE;
|
|
}
|
|
} else {
|
|
if (wantWrite && (dwDesiredAccess & FILE_MAP_COPY) == 0) {
|
|
wibo::lastError = ERROR_ACCESS_DENIED;
|
|
return nullptr;
|
|
}
|
|
}
|
|
if (wantExecute) {
|
|
prot |= PROT_EXEC;
|
|
}
|
|
|
|
int flags = 0;
|
|
if (mapping->anonymous) {
|
|
flags |= MAP_ANONYMOUS;
|
|
}
|
|
flags |= (dwDesiredAccess & FILE_MAP_COPY) ? MAP_PRIVATE : MAP_SHARED;
|
|
|
|
size_t pageSize = static_cast<size_t>(sysconf(_SC_PAGESIZE));
|
|
off_t alignedOffset = mapping->anonymous ? 0 : static_cast<off_t>(offset & ~static_cast<uint64_t>(pageSize - 1));
|
|
size_t offsetDelta = static_cast<size_t>(offset - static_cast<uint64_t>(alignedOffset));
|
|
uint64_t requestedLength = length + offsetDelta;
|
|
if (requestedLength < length) {
|
|
wibo::lastError = ERROR_INVALID_PARAMETER;
|
|
return nullptr;
|
|
}
|
|
size_t mapLength = static_cast<size_t>(requestedLength);
|
|
if (static_cast<uint64_t>(mapLength) != requestedLength) {
|
|
wibo::lastError = ERROR_INVALID_PARAMETER;
|
|
return nullptr;
|
|
}
|
|
|
|
int mmapFd = mapping->anonymous ? -1 : mapping->fd;
|
|
void *mapBase = mmap(nullptr, mapLength, prot, flags, mmapFd, alignedOffset);
|
|
if (mapBase == MAP_FAILED) {
|
|
setLastErrorFromErrno();
|
|
return nullptr;
|
|
}
|
|
void *viewPtr = static_cast<uint8_t *>(mapBase) + offsetDelta;
|
|
g_viewInfo[viewPtr] = ViewInfo{mapBase, mapLength, mapping};
|
|
mapping->refCount++;
|
|
wibo::lastError = ERROR_SUCCESS;
|
|
return viewPtr;
|
|
}
|
|
|
|
BOOL WIN_FUNC UnmapViewOfFile(LPCVOID lpBaseAddress) {
|
|
DEBUG_LOG("UnmapViewOfFile(%p)\n", lpBaseAddress);
|
|
auto it = g_viewInfo.find(const_cast<void *>(lpBaseAddress));
|
|
if (it == g_viewInfo.end()) {
|
|
wibo::lastError = ERROR_INVALID_PARAMETER;
|
|
return FALSE;
|
|
}
|
|
ViewInfo info = it->second;
|
|
g_viewInfo.erase(it);
|
|
if (info.mapBase && info.mapLength) {
|
|
munmap(info.mapBase, info.mapLength);
|
|
}
|
|
if (info.owner && info.owner->refCount > 0) {
|
|
info.owner->refCount--;
|
|
tryReleaseMapping(info.owner);
|
|
}
|
|
wibo::lastError = ERROR_SUCCESS;
|
|
return TRUE;
|
|
}
|
|
|
|
bool closeFileMappingHandle(void *mappingPtr) {
|
|
auto *mapping = reinterpret_cast<MappingObject *>(mappingPtr);
|
|
if (!mapping) {
|
|
return false;
|
|
}
|
|
mapping->closed = true;
|
|
tryReleaseMapping(mapping);
|
|
return true;
|
|
}
|
|
|
|
LPVOID WIN_FUNC VirtualAlloc(LPVOID lpAddress, SIZE_T dwSize, DWORD flAllocationType, DWORD flProtect) {
|
|
DEBUG_LOG("VirtualAlloc(%p, %zu, %u, %u)\n", lpAddress, dwSize, flAllocationType, flProtect);
|
|
|
|
if (dwSize == 0) {
|
|
wibo::lastError = ERROR_INVALID_PARAMETER;
|
|
return nullptr;
|
|
}
|
|
|
|
DWORD unsupportedFlags = flAllocationType & (MEM_WRITE_WATCH | MEM_PHYSICAL | MEM_LARGE_PAGES | MEM_RESET_UNDO);
|
|
if (unsupportedFlags != 0) {
|
|
DEBUG_LOG("VirtualAlloc unsupported flags: 0x%x\n", unsupportedFlags);
|
|
wibo::lastError = ERROR_NOT_SUPPORTED;
|
|
return nullptr;
|
|
}
|
|
|
|
bool reserve = (flAllocationType & MEM_RESERVE) != 0;
|
|
bool commit = (flAllocationType & MEM_COMMIT) != 0;
|
|
bool reset = (flAllocationType & MEM_RESET) != 0;
|
|
|
|
if (!reserve && commit && lpAddress == nullptr) {
|
|
reserve = true;
|
|
}
|
|
|
|
if (reset) {
|
|
if (reserve || commit) {
|
|
wibo::lastError = ERROR_INVALID_PARAMETER;
|
|
return nullptr;
|
|
}
|
|
if (!lpAddress) {
|
|
wibo::lastError = ERROR_INVALID_ADDRESS;
|
|
return nullptr;
|
|
}
|
|
const size_t pageSize = systemPageSize();
|
|
uintptr_t request = reinterpret_cast<uintptr_t>(lpAddress);
|
|
if (addOverflows(request, static_cast<size_t>(dwSize))) {
|
|
wibo::lastError = ERROR_INVALID_PARAMETER;
|
|
return nullptr;
|
|
}
|
|
uintptr_t start = alignDown(request, pageSize);
|
|
uintptr_t end = alignUp(request + static_cast<uintptr_t>(dwSize), pageSize);
|
|
size_t length = static_cast<size_t>(end - start);
|
|
std::unique_lock<std::mutex> lock(g_virtualAllocMutex);
|
|
VirtualAllocation *region = lookupRegion(start);
|
|
if (!region || !rangeWithinRegion(*region, start, length)) {
|
|
wibo::lastError = ERROR_INVALID_ADDRESS;
|
|
return nullptr;
|
|
}
|
|
#ifdef MADV_FREE
|
|
int advice = MADV_FREE;
|
|
#else
|
|
int advice = MADV_DONTNEED;
|
|
#endif
|
|
if (madvise(reinterpret_cast<void *>(start), length, advice) != 0) {
|
|
wibo::lastError = wibo::winErrorFromErrno(errno);
|
|
return nullptr;
|
|
}
|
|
wibo::lastError = ERROR_SUCCESS;
|
|
return reinterpret_cast<LPVOID>(start);
|
|
}
|
|
|
|
if (!reserve && !commit) {
|
|
wibo::lastError = ERROR_INVALID_PARAMETER;
|
|
return nullptr;
|
|
}
|
|
|
|
const size_t pageSize = systemPageSize();
|
|
std::unique_lock<std::mutex> lock(g_virtualAllocMutex);
|
|
|
|
if (reserve) {
|
|
uintptr_t base = 0;
|
|
size_t length = 0;
|
|
if (lpAddress) {
|
|
uintptr_t request = reinterpret_cast<uintptr_t>(lpAddress);
|
|
base = alignDown(request, kVirtualAllocationGranularity);
|
|
size_t offset = static_cast<size_t>(request - base);
|
|
if (addOverflows(offset, static_cast<size_t>(dwSize))) {
|
|
wibo::lastError = ERROR_INVALID_PARAMETER;
|
|
return nullptr;
|
|
}
|
|
size_t span = static_cast<size_t>(dwSize) + offset;
|
|
uintptr_t alignedSpan = alignUp(span, pageSize);
|
|
if (alignedSpan == std::numeric_limits<uintptr_t>::max()) {
|
|
wibo::lastError = ERROR_INVALID_PARAMETER;
|
|
return nullptr;
|
|
}
|
|
length = static_cast<size_t>(alignedSpan);
|
|
if (length == 0 || rangeOverlapsLocked(base, length)) {
|
|
wibo::lastError = ERROR_INVALID_ADDRESS;
|
|
return nullptr;
|
|
}
|
|
} else {
|
|
uintptr_t aligned = alignUp(static_cast<uintptr_t>(dwSize), pageSize);
|
|
if (aligned == std::numeric_limits<uintptr_t>::max() || aligned == 0) {
|
|
wibo::lastError = ERROR_INVALID_PARAMETER;
|
|
return nullptr;
|
|
}
|
|
length = static_cast<size_t>(aligned);
|
|
}
|
|
const int prot = commit ? translateProtect(flProtect) : PROT_NONE;
|
|
int flags = MAP_PRIVATE | MAP_ANONYMOUS;
|
|
if (!commit) {
|
|
flags |= MAP_NORESERVE;
|
|
}
|
|
void *result = MAP_FAILED;
|
|
if (lpAddress) {
|
|
#ifdef MAP_FIXED_NOREPLACE
|
|
flags |= MAP_FIXED_NOREPLACE;
|
|
#else
|
|
flags |= MAP_FIXED;
|
|
#endif
|
|
result = mmap(reinterpret_cast<void *>(base), length, prot, flags, -1, 0);
|
|
} else {
|
|
result = alignedReserve(length, prot, flags);
|
|
}
|
|
if (result == MAP_FAILED) {
|
|
wibo::lastError = wibo::winErrorFromErrno(errno);
|
|
return nullptr;
|
|
}
|
|
if (reinterpret_cast<uintptr_t>(result) >= 0x80000000) {
|
|
munmap(result, length);
|
|
wibo::lastError = ERROR_NOT_ENOUGH_MEMORY;
|
|
return nullptr;
|
|
}
|
|
uintptr_t actualBase = reinterpret_cast<uintptr_t>(result);
|
|
VirtualAllocation allocation{};
|
|
allocation.base = actualBase;
|
|
allocation.size = length;
|
|
allocation.allocationProtect = flProtect;
|
|
allocation.pageProtect.assign(length / pageSize, commit ? flProtect : 0);
|
|
g_virtualAllocations[actualBase] = std::move(allocation);
|
|
wibo::lastError = ERROR_SUCCESS;
|
|
return result;
|
|
}
|
|
|
|
uintptr_t request = reinterpret_cast<uintptr_t>(lpAddress);
|
|
if (addOverflows(request, static_cast<size_t>(dwSize))) {
|
|
wibo::lastError = ERROR_INVALID_PARAMETER;
|
|
return nullptr;
|
|
}
|
|
uintptr_t start = alignDown(request, pageSize);
|
|
uintptr_t end = alignUp(request + static_cast<uintptr_t>(dwSize), pageSize);
|
|
size_t length = static_cast<size_t>(end - start);
|
|
if (length == 0) {
|
|
wibo::lastError = ERROR_INVALID_PARAMETER;
|
|
return nullptr;
|
|
}
|
|
VirtualAllocation *region = lookupRegion(start);
|
|
if (!region || !rangeWithinRegion(*region, start, length)) {
|
|
wibo::lastError = ERROR_INVALID_ADDRESS;
|
|
return nullptr;
|
|
}
|
|
const size_t pageCount = length / pageSize;
|
|
std::vector<std::pair<uintptr_t, size_t>> committedRuns;
|
|
committedRuns.reserve(pageCount);
|
|
for (size_t i = 0; i < pageCount; ++i) {
|
|
size_t pageIndex = ((start - region->base) / pageSize) + i;
|
|
if (pageIndex >= region->pageProtect.size()) {
|
|
wibo::lastError = ERROR_INVALID_ADDRESS;
|
|
return nullptr;
|
|
}
|
|
if (region->pageProtect[pageIndex] != 0) {
|
|
continue;
|
|
}
|
|
uintptr_t runBase = start + i * pageSize;
|
|
size_t runLength = pageSize;
|
|
while (i + 1 < pageCount) {
|
|
size_t nextIndex = ((start - region->base) / pageSize) + i + 1;
|
|
if (region->pageProtect[nextIndex] != 0) {
|
|
break;
|
|
}
|
|
++i;
|
|
runLength += pageSize;
|
|
}
|
|
committedRuns.emplace_back(runBase, runLength);
|
|
}
|
|
for (const auto &run : committedRuns) {
|
|
void *result = mmap(reinterpret_cast<void *>(run.first), run.second, translateProtect(flProtect),
|
|
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
|
|
if (result == MAP_FAILED) {
|
|
wibo::lastError = wibo::winErrorFromErrno(errno);
|
|
return nullptr;
|
|
}
|
|
markCommitted(*region, run.first, run.second, flProtect);
|
|
}
|
|
wibo::lastError = ERROR_SUCCESS;
|
|
DEBUG_LOG("VirtualAlloc commit success -> %p\n", reinterpret_cast<void *>(start));
|
|
return reinterpret_cast<LPVOID>(start);
|
|
}
|
|
|
|
BOOL WIN_FUNC VirtualFree(LPVOID lpAddress, SIZE_T dwSize, DWORD dwFreeType) {
|
|
DEBUG_LOG("VirtualFree(%p, %zu, %u)\n", lpAddress, dwSize, dwFreeType);
|
|
if (!lpAddress) {
|
|
wibo::lastError = ERROR_INVALID_ADDRESS;
|
|
return FALSE;
|
|
}
|
|
|
|
if ((dwFreeType & (MEM_COALESCE_PLACEHOLDERS | MEM_PRESERVE_PLACEHOLDER)) != 0) {
|
|
wibo::lastError = ERROR_NOT_SUPPORTED;
|
|
return FALSE;
|
|
}
|
|
|
|
const bool release = (dwFreeType & MEM_RELEASE) != 0;
|
|
const bool decommit = (dwFreeType & MEM_DECOMMIT) != 0;
|
|
if (release == decommit) {
|
|
wibo::lastError = ERROR_INVALID_PARAMETER;
|
|
return FALSE;
|
|
}
|
|
|
|
const size_t pageSize = systemPageSize();
|
|
std::unique_lock<std::mutex> lock(g_virtualAllocMutex);
|
|
|
|
if (release) {
|
|
uintptr_t base = reinterpret_cast<uintptr_t>(lpAddress);
|
|
auto exact = g_virtualAllocations.find(base);
|
|
if (exact == g_virtualAllocations.end()) {
|
|
auto containing = findRegionIterator(base);
|
|
if (dwSize != 0 && containing != g_virtualAllocations.end()) {
|
|
wibo::lastError = ERROR_INVALID_PARAMETER;
|
|
} else {
|
|
wibo::lastError = ERROR_INVALID_ADDRESS;
|
|
}
|
|
return FALSE;
|
|
}
|
|
if (dwSize != 0) {
|
|
wibo::lastError = ERROR_INVALID_PARAMETER;
|
|
return FALSE;
|
|
}
|
|
size_t length = exact->second.size;
|
|
g_virtualAllocations.erase(exact);
|
|
lock.unlock();
|
|
if (munmap(lpAddress, length) != 0) {
|
|
wibo::lastError = wibo::winErrorFromErrno(errno);
|
|
return FALSE;
|
|
}
|
|
wibo::lastError = ERROR_SUCCESS;
|
|
return TRUE;
|
|
}
|
|
|
|
uintptr_t request = reinterpret_cast<uintptr_t>(lpAddress);
|
|
auto regionIt = findRegionIterator(request);
|
|
if (regionIt == g_virtualAllocations.end()) {
|
|
wibo::lastError = ERROR_INVALID_ADDRESS;
|
|
return FALSE;
|
|
}
|
|
VirtualAllocation ®ion = regionIt->second;
|
|
uintptr_t start = alignDown(request, pageSize);
|
|
uintptr_t end = 0;
|
|
if (dwSize == 0) {
|
|
if (request != region.base) {
|
|
wibo::lastError = ERROR_INVALID_PARAMETER;
|
|
return FALSE;
|
|
}
|
|
start = region.base;
|
|
end = region.base + region.size;
|
|
} else {
|
|
if (addOverflows(request, static_cast<size_t>(dwSize))) {
|
|
wibo::lastError = ERROR_INVALID_PARAMETER;
|
|
return FALSE;
|
|
}
|
|
end = alignUp(request + static_cast<uintptr_t>(dwSize), pageSize);
|
|
}
|
|
if (end <= start) {
|
|
wibo::lastError = ERROR_INVALID_PARAMETER;
|
|
return FALSE;
|
|
}
|
|
size_t length = static_cast<size_t>(end - start);
|
|
if (!rangeWithinRegion(region, start, length)) {
|
|
wibo::lastError = ERROR_INVALID_ADDRESS;
|
|
return FALSE;
|
|
}
|
|
void *result = mmap(reinterpret_cast<void *>(start), length, PROT_NONE,
|
|
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED | MAP_NORESERVE, -1, 0);
|
|
if (result == MAP_FAILED) {
|
|
wibo::lastError = wibo::winErrorFromErrno(errno);
|
|
return FALSE;
|
|
}
|
|
markDecommitted(region, start, length);
|
|
wibo::lastError = ERROR_SUCCESS;
|
|
return TRUE;
|
|
}
|
|
|
|
BOOL WIN_FUNC VirtualProtect(LPVOID lpAddress, SIZE_T dwSize, DWORD flNewProtect, PDWORD lpflOldProtect) {
|
|
DEBUG_LOG("VirtualProtect(%p, %zu, %u)\n", lpAddress, dwSize, flNewProtect);
|
|
if (!lpAddress || dwSize == 0) {
|
|
wibo::lastError = ERROR_INVALID_PARAMETER;
|
|
return FALSE;
|
|
}
|
|
|
|
const size_t pageSize = systemPageSize();
|
|
uintptr_t request = reinterpret_cast<uintptr_t>(lpAddress);
|
|
uintptr_t start = alignDown(request, pageSize);
|
|
uintptr_t end = alignUp(request + static_cast<uintptr_t>(dwSize), pageSize);
|
|
if (end <= start) {
|
|
wibo::lastError = ERROR_INVALID_PARAMETER;
|
|
return FALSE;
|
|
}
|
|
|
|
std::unique_lock<std::mutex> lock(g_virtualAllocMutex);
|
|
VirtualAllocation *region = lookupRegion(start);
|
|
if (!region || !rangeWithinRegion(*region, start, static_cast<size_t>(end - start))) {
|
|
wibo::lastError = ERROR_INVALID_ADDRESS;
|
|
return FALSE;
|
|
}
|
|
|
|
const size_t firstPage = (start - region->base) / pageSize;
|
|
const size_t pageCount = (end - start) / pageSize;
|
|
if (pageCount == 0) {
|
|
wibo::lastError = ERROR_INVALID_PARAMETER;
|
|
return FALSE;
|
|
}
|
|
|
|
DWORD previousProtect = region->pageProtect[firstPage];
|
|
if (previousProtect == 0) {
|
|
wibo::lastError = ERROR_NOACCESS;
|
|
return FALSE;
|
|
}
|
|
for (size_t i = 0; i < pageCount; ++i) {
|
|
if (region->pageProtect[firstPage + i] == 0) {
|
|
wibo::lastError = ERROR_NOACCESS;
|
|
return FALSE;
|
|
}
|
|
}
|
|
|
|
int prot = translateProtect(flNewProtect);
|
|
if (mprotect(reinterpret_cast<void *>(start), end - start, prot) != 0) {
|
|
wibo::lastError = wibo::winErrorFromErrno(errno);
|
|
return FALSE;
|
|
}
|
|
for (size_t i = 0; i < pageCount; ++i) {
|
|
region->pageProtect[firstPage + i] = flNewProtect;
|
|
}
|
|
lock.unlock();
|
|
|
|
if (lpflOldProtect) {
|
|
*lpflOldProtect = previousProtect;
|
|
}
|
|
wibo::lastError = ERROR_SUCCESS;
|
|
return TRUE;
|
|
}
|
|
|
|
SIZE_T WIN_FUNC VirtualQuery(LPCVOID lpAddress, PMEMORY_BASIC_INFORMATION lpBuffer, SIZE_T dwLength) {
|
|
DEBUG_LOG("VirtualQuery(%p, %p, %zu)\n", lpAddress, lpBuffer, dwLength);
|
|
if (!lpBuffer || dwLength < sizeof(MEMORY_BASIC_INFORMATION) || !lpAddress) {
|
|
wibo::lastError = ERROR_INVALID_PARAMETER;
|
|
return 0;
|
|
}
|
|
|
|
std::memset(lpBuffer, 0, sizeof(MEMORY_BASIC_INFORMATION));
|
|
const size_t pageSize = systemPageSize();
|
|
uintptr_t request = reinterpret_cast<uintptr_t>(lpAddress);
|
|
uintptr_t pageBase = alignDown(request, pageSize);
|
|
|
|
std::unique_lock<std::mutex> lock(g_virtualAllocMutex);
|
|
VirtualAllocation *region = lookupRegion(pageBase);
|
|
if (!region) {
|
|
wibo::lastError = ERROR_INVALID_ADDRESS;
|
|
return 0;
|
|
}
|
|
|
|
const size_t pageIndex = (pageBase - region->base) / pageSize;
|
|
if (pageIndex >= region->pageProtect.size()) {
|
|
wibo::lastError = ERROR_INVALID_ADDRESS;
|
|
return 0;
|
|
}
|
|
const bool committed = region->pageProtect[pageIndex] != 0;
|
|
uintptr_t blockStart = pageBase;
|
|
uintptr_t blockEnd = pageBase + pageSize;
|
|
while (blockStart > region->base) {
|
|
size_t idx = (blockStart - region->base) / pageSize - 1;
|
|
bool pageCommitted = region->pageProtect[idx] != 0;
|
|
if (pageCommitted != committed) {
|
|
break;
|
|
}
|
|
blockStart -= pageSize;
|
|
}
|
|
while (blockEnd < region->base + region->size) {
|
|
size_t idx = (blockEnd - region->base) / pageSize;
|
|
bool pageCommitted = region->pageProtect[idx] != 0;
|
|
if (pageCommitted != committed) {
|
|
break;
|
|
}
|
|
blockEnd += pageSize;
|
|
}
|
|
|
|
lpBuffer->BaseAddress = reinterpret_cast<PVOID>(blockStart);
|
|
lpBuffer->AllocationBase = reinterpret_cast<PVOID>(region->base);
|
|
lpBuffer->AllocationProtect = region->allocationProtect != 0 ? region->allocationProtect : PAGE_NOACCESS;
|
|
lpBuffer->RegionSize = blockEnd - blockStart;
|
|
lpBuffer->State = committed ? MEM_COMMIT : MEM_RESERVE;
|
|
lpBuffer->Protect = committed ? region->pageProtect[pageIndex] : 0;
|
|
lpBuffer->Type = MEM_PRIVATE;
|
|
lock.unlock();
|
|
wibo::lastError = ERROR_SUCCESS;
|
|
return sizeof(MEMORY_BASIC_INFORMATION);
|
|
}
|
|
|
|
BOOL WIN_FUNC GetProcessWorkingSetSize(HANDLE hProcess, PSIZE_T lpMinimumWorkingSetSize,
|
|
PSIZE_T lpMaximumWorkingSetSize) {
|
|
DEBUG_LOG("GetProcessWorkingSetSize(%p, %p, %p)\n", hProcess, lpMinimumWorkingSetSize, lpMaximumWorkingSetSize);
|
|
(void)hProcess;
|
|
if (!lpMinimumWorkingSetSize || !lpMaximumWorkingSetSize) {
|
|
wibo::lastError = ERROR_INVALID_PARAMETER;
|
|
return FALSE;
|
|
}
|
|
*lpMinimumWorkingSetSize = 32 * 1024 * 1024; // 32 MiB stub
|
|
*lpMaximumWorkingSetSize = 128 * 1024 * 1024; // 128 MiB stub
|
|
wibo::lastError = ERROR_SUCCESS;
|
|
return TRUE;
|
|
}
|
|
|
|
BOOL WIN_FUNC SetProcessWorkingSetSize(HANDLE hProcess, SIZE_T dwMinimumWorkingSetSize,
|
|
SIZE_T dwMaximumWorkingSetSize) {
|
|
DEBUG_LOG("SetProcessWorkingSetSize(%p, %zu, %zu)\n", hProcess, dwMinimumWorkingSetSize, dwMaximumWorkingSetSize);
|
|
(void)hProcess;
|
|
(void)dwMinimumWorkingSetSize;
|
|
(void)dwMaximumWorkingSetSize;
|
|
wibo::lastError = ERROR_SUCCESS;
|
|
return TRUE;
|
|
}
|
|
|
|
} // namespace kernel32
|