--- /dev/null
+/*
+ * Multi-platform virtual memory functions for UAE.
+ * Copyright (C) 2015 Frode Solheim
+ *
+ * Licensed under the terms of the GNU General Public License version 2.
+ * See the file 'COPYING' for full license text.
+ */
+
+#ifndef UAE_VM_H
+#define UAE_VM_H
+
+#include "uae/types.h"
+
+#define UAE_VM_WRITE 2
+#define UAE_VM_EXECUTE 4
+
+#define UAE_VM_32BIT (1 << 8)
+#define UAE_VM_ALLOC_FAILED NULL
+
+/* Even though it looks like you can OR together vm protection values,
+ * do not do this. Not all combinations are supported (on Windows), and only
+ * a few combinations are implemented. Only use the following predefined
+ * constants to be safe. */
+
+#define UAE_VM_NO_ACCESS 0
+#define UAE_VM_READ 1
+#define UAE_VM_READ_WRITE (UAE_VM_READ | UAE_VM_WRITE)
+#define UAE_VM_READ_EXECUTE (UAE_VM_READ | UAE_VM_EXECUTE)
+#define UAE_VM_READ_WRITE_EXECUTE (UAE_VM_READ | UAE_VM_WRITE | UAE_VM_EXECUTE)
+
+#if 0
+void *uae_vm_alloc(uae_u32 size);
+void *uae_vm_alloc(uae_u32 size, int flags);
+#endif
+void *uae_vm_alloc(uae_u32 size, int flags, int protect);
+void uae_vm_protect(void *address, int size, int protect);
+void uae_vm_free(void *address, int size);
+
+#if 0
+/* Replacement functions for mman - implement later */
+void *uae_vm_reserve(uae_u32 size, int flags);
+void *uae_vm_commit(void *address, uae_u32 size, int protect);
+void *uae_vm_decommit(uae_u32 size, int flags);
+#endif
+
+int uae_vm_page_size(void);
+
+// void *uae_vm_alloc_with_flags(uae_u32 size, int protect, int flags);
+
+#endif /* UAE_VM_H */
cpuid(uae_u32 op, uae_u32 *eax, uae_u32 *ebx, uae_u32 *ecx, uae_u32 *edx)
{
const int CPUID_SPACE = 4096;
-#ifdef UAE
- uae_u8* cpuid_space = (uae_u8 *)cache_alloc(CPUID_SPACE);
- if (cpuid_space == 0) {
-#else
uae_u8* cpuid_space = (uae_u8 *)vm_acquire(CPUID_SPACE);
if (cpuid_space == VM_MAP_FAILED)
-#endif
jit_abort("Could not allocate cpuid_space");
-#ifdef UAE
- }
-#else
vm_protect(cpuid_space, CPUID_SPACE, VM_PAGE_READ | VM_PAGE_WRITE | VM_PAGE_EXECUTE);
-#endif
static uae_u32 s_op, s_eax, s_ebx, s_ecx, s_edx;
uae_u8* tmp=get_target();
if (ecx != NULL) *ecx = s_ecx;
if (edx != NULL) *edx = s_edx;
-#ifdef UAE
- cache_free (cpuid_space);
-#else
vm_release(cpuid_space, CPUID_SPACE);
-#endif
}
static void
#ifdef UAE
#include "uae/log.h"
+#include "uae/vm.h"
+#define vm_acquire(size) uae_vm_alloc(size, UAE_VM_32BIT, UAE_VM_READ_WRITE)
+#define vm_protect(address, size, protect) uae_vm_protect(address, size, protect)
+#define vm_release(address, size) uae_vm_free(address, size)
+#define VM_PAGE_READ UAE_VM_READ
+#define VM_PAGE_WRITE UAE_VM_WRITE
+#define VM_PAGE_EXECUTE UAE_VM_EXECUTE
+#define VM_MAP_FAILED UAE_VM_ALLOC_FAILED
+
#define UNUSED(x)
/* FIXME: Looks like HAVE_GET_WORD_UNSWAPPED should be defined for little-endian / ARAnyM */
#define HAVE_GET_WORD_UNSWAPPED
while (currentPool) {
Pool * deadPool = currentPool;
currentPool = currentPool->next;
+#ifdef UAE
+ vm_release(deadPool, sizeof(Pool));
+#else
free(deadPool);
+#endif
}
}
if (!mChunks) {
// There is no chunk left, allocate a new pool and link the
// chunks into the free list
+#ifdef UAE
+ Pool * newPool = (Pool *) vm_acquire(sizeof(Pool));
+#else
Pool * newPool = (Pool *)malloc(sizeof(Pool));
+#endif
for (T * chunk = &newPool->chunk[0]; chunk < &newPool->chunk[kPoolSize]; chunk++) {
chunk->next = mChunks;
mChunks = chunk;
const int CODE_ALLOC_MAX_ATTEMPTS = 10;
const int CODE_ALLOC_BOUNDARIES = 128 * 1024; // 128 KB
+static uae_u8 *do_alloc_code(uae_u32 size, int depth)
+{
#ifdef UAE
-
+ return (uae_u8*) vm_acquire(size);
#else
-static uint8 *do_alloc_code(uint32 size, int depth)
-{
#if defined(__linux__) && 0
/*
This is a really awful hack that is known to work on Linux at
uint8 *code = (uint8 *)vm_acquire(size, VM_MAP_DEFAULT | VM_MAP_32BIT);
return code == VM_MAP_FAILED ? NULL : code;
#endif
+#endif
}
-static inline uint8 *alloc_code(uint32 size)
+static inline uae_u8 *alloc_code(uae_u32 size)
{
- uint8 *ptr = do_alloc_code(size, 0);
+ uae_u8 *ptr = do_alloc_code(size, 0);
/* allocated code must fit in 32-bit boundaries */
assert((uintptr)ptr <= 0xffffffff);
return ptr;
}
-#endif
void alloc_cache(void)
{
+#ifdef JIT_EXCEPTION_HANDLER
+ if (veccode == NULL) {
+ veccode = alloc_code(256);
+ vm_protect(veccode, 256, VM_PAGE_READ | VM_PAGE_WRITE | VM_PAGE_EXECUTE);
+ }
+#endif
if (compiled_code) {
flush_icache_hard(0, 3);
- cache_free(compiled_code);
+ vm_release(compiled_code, cache_size * 1024);
compiled_code = 0;
}
-#ifdef JIT_EXCEPTION_HANDLER
- if (veccode == NULL)
- veccode = cache_alloc (256);
-#endif
compiled_code = NULL;
if (cache_size == 0)
return;
while (!compiled_code && cache_size) {
- if ((compiled_code = cache_alloc(cache_size * 1024)) == NULL) {
+ if ((compiled_code = alloc_code(cache_size * 1024)) == NULL) {
compiled_code = 0;
cache_size /= 2;
}
}
+ vm_protect(compiled_code, cache_size * 1024, VM_PAGE_READ | VM_PAGE_WRITE | VM_PAGE_EXECUTE);
+
if (compiled_code) {
max_compile_start = compiled_code + cache_size*1024 - BYTES_PER_INST;
current_compile_p=compiled_code;
int i,r;
#ifdef UAE
- if (popallspace == NULL)
- popallspace = cache_alloc(POPALLSPACE_SIZE);
-#else
+ if (popallspace == NULL) {
+#endif
if ((popallspace = alloc_code(POPALLSPACE_SIZE)) == NULL) {
write_log("FATAL: Could not allocate popallspace!\n");
abort();
}
vm_protect(popallspace, POPALLSPACE_SIZE, VM_PAGE_READ | VM_PAGE_WRITE);
+#ifdef UAE
+ }
#endif
int stack_space = STACK_OFFSET;
}
raw_jmp(uae_p32(check_checksum));
-#ifdef UAE
- /* FIXME: write-protect popallspace? */
#ifdef USE_UDIS86
UDISFN(pushall_call_handler, get_target());
#endif
-#else
// no need to further write into popallspace
vm_protect(popallspace, POPALLSPACE_SIZE, VM_PAGE_READ | VM_PAGE_EXECUTE);
flush_cpu_icache((void *)popallspace, (void *)target);
-#endif
}
static inline void reset_lists(void)
<ClCompile Include="..\..\tabletlibrary.cpp" />
<ClCompile Include="..\..\test_card.cpp" />
<ClCompile Include="..\..\uaenative.cpp" />
+ <ClCompile Include="..\..\vm.cpp" />
<ClCompile Include="..\..\x86.cpp" />
<ClCompile Include="..\ahidsound_dsonly.cpp" />
<ClCompile Include="..\ahidsound_new.cpp" />
<ClCompile Include="..\..\slirp_uae.cpp">
<Filter>slirp</Filter>
</ClCompile>
+ <ClCompile Include="..\..\vm.cpp">
+ <Filter>common</Filter>
+ </ClCompile>
</ItemGroup>
<ItemGroup>
<None Include="..\resources\35floppy.ico">
--- /dev/null
+/*
+ * Multi-platform virtual memory functions for UAE.
+ * Copyright (C) 2015 Frode Solheim
+ *
+ * Licensed under the terms of the GNU General Public License version 2.
+ * See the file 'COPYING' for full license text.
+ */
+
+#include "sysconfig.h"
+#include "sysdeps.h"
+#include "uae/vm.h"
+#include "uae/log.h"
+#ifdef _WIN32
+
+#else
+#include <sys/mman.h>
+#endif
+#ifdef HAVE_SYS_PARAM_H
+#include <sys/param.h>
+#endif
+#if defined(__APPLE__)
+#include <sys/sysctl.h>
+#endif
+
+// #define TRACK_ALLOCATIONS
+
+#ifdef TRACK_ALLOCATIONS
+
+struct alloc_size {
+ void *address;
+ uae_u32 size;
+};
+
+#define MAX_ALLOCATIONS 2048
+/* A bit inefficient, but good enough for few and rare allocs. Storing
+ * the size at the start of the allocated memory would be better, but this
+ * could be awkward if/when you want to allocate page-aligned memory. */
+static struct alloc_size alloc_sizes[MAX_ALLOCATIONS];
+
+static void add_allocation(void *address, uae_u32 size)
+{
+ uae_log("add_allocation %p (%d)\n", address, size);
+ for (int i = 0; i < MAX_ALLOCATIONS; i++) {
+ if (alloc_sizes[i].address == NULL) {
+ alloc_sizes[i].address = address;
+ alloc_sizes[i].size = size;
+ return;
+ }
+ }
+ abort();
+}
+
+static uae_u32 find_allocation(void *address)
+{
+ for (int i = 0; i < MAX_ALLOCATIONS; i++) {
+ if (alloc_sizes[i].address == address) {
+ return alloc_sizes[i].size;
+ }
+ }
+ abort();
+}
+
+static uae_u32 remove_allocation(void *address)
+{
+ for (int i = 0; i < MAX_ALLOCATIONS; i++) {
+ if (alloc_sizes[i].address == address) {
+ alloc_sizes[i].address = NULL;
+ uae_u32 size = alloc_sizes[i].size;
+ alloc_sizes[i].size = 0;
+ return size;
+ }
+ }
+ abort();
+}
+
+#endif /* TRACK_ALLOCATIONS */
+
+static int protect_to_native(int protect)
+{
+#ifdef _WIN32
+ if (protect == UAE_VM_NO_ACCESS) return PAGE_NOACCESS;
+ if (protect == UAE_VM_READ) return PAGE_READONLY;
+ if (protect == UAE_VM_READ_WRITE) return PAGE_READWRITE;
+ if (protect == UAE_VM_READ_EXECUTE) return PAGE_EXECUTE_READ;
+ if (protect == UAE_VM_READ_WRITE_EXECUTE) return PAGE_EXECUTE_READWRITE;
+ uae_log("ERROR: invalid protect value %d\n", protect);
+ return PAGE_NOACCESS;
+#else
+ if (protect == UAE_VM_NO_ACCESS) return PROT_NONE;
+ if (protect == UAE_VM_READ) return PROT_READ;
+ if (protect == UAE_VM_READ_WRITE) return PROT_READ | PROT_WRITE;
+ if (protect == UAE_VM_READ_EXECUTE) return PROT_READ | PROT_EXEC;
+ if (protect == UAE_VM_READ_WRITE_EXECUTE) {
+ return PROT_READ | PROT_WRITE | PROT_EXEC;
+ }
+ uae_log("ERROR: invalid protect value %d\n", protect);
+ return PROT_NONE;
+#endif
+}
+
+int uae_vm_page_size(void)
+{
+ static int page_size = 0;
+ if (page_size == 0) {
+#ifdef _WIN32
+ SYSTEM_INFO si;
+ GetSystemInfo(&si);
+ page_size = si.dwPageSize;
+#else
+ page_size = sysconf(_SC_PAGESIZE);
+#endif
+ }
+ return page_size;
+}
+
+static void *uae_vm_alloc_with_flags(uae_u32 size, int flags, int protect)
+{
+ void *address = NULL;
+ uae_log("uae_vm_alloc(%u, %d, %d)\n", size, flags, protect);
+#ifdef _WIN32
+ int va_type = MEM_COMMIT;
+ int va_protect = protect_to_native(protect);
+ address = VirtualAlloc(NULL, size, va_type, va_protect);
+#else
+ //size = size < uae_vm_page_size() ? uae_vm_page_size() : size;
+ int mmap_flags = MAP_PRIVATE | MAP_ANON;
+ int mmap_prot = protect_to_native(protect);
+#ifdef CPU_64_BIT
+ if (flags & UAE_VM_32BIT) {
+ mmap_flags |= MAP_32BIT;
+ }
+#endif
+ address = mmap(0, size, mmap_prot, mmap_flags, -1, 0);
+ if (address == MAP_FAILED) {
+ uae_log("uae_vm_alloc(%u, %d, %d) mmap failed (%d)\n",
+ size, flags, protect, errno);
+ return NULL;
+ }
+#endif
+#ifdef TRACK_ALLOCATIONS
+ add_allocation(address, size);
+#endif
+ return address;
+}
+
+#if 0
+
+void *uae_vm_alloc(uae_u32 size)
+{
+ return uae_vm_alloc_with_flags(size, UAE_VM_32BIT, UAE_VM_READ_WRITE);
+}
+
+void *uae_vm_alloc(uae_u32 size, int flags)
+{
+ return uae_vm_alloc_with_flags(size, flags, UAE_VM_READ_WRITE);
+}
+
+#endif
+
+void *uae_vm_alloc(uae_u32 size, int flags, int protect)
+{
+ return uae_vm_alloc_with_flags(size, flags, protect);
+}
+
+void uae_vm_protect(void *address, int size, int protect)
+{
+ uae_log("uae_vm_protect(%p, %d, %d)\n", address, size, protect);
+#ifdef TRACK_ALLOCATIONS
+ uae_u32 allocated_size = find_allocation(address);
+ assert(allocated_size == size);
+#endif
+#ifdef _WIN32
+ DWORD old;
+ if (VirtualProtect(address, size, protect_to_native(protect), &old) == 0) {
+ uae_log("uae_vm_protect(%p, %d, %d) VirtualProtect failed (%d)\n",
+ address, size, protect, GetLastError());
+ }
+#else
+ if (mprotect(address, size, protect_to_native(protect)) != 0) {
+ uae_log("uae_vm_protect(%p, %d, %d) mprotect failed (%d)\n",
+ address, size, protect, errno);
+ }
+#endif
+}
+
+void uae_vm_free(void *address, int size)
+{
+ uae_log("uae_vm_free(%p, %d)\n", address, size);
+#ifdef TRACK_ALLOCATIONS
+ uae_u32 allocated_size = remove_allocation(address);
+ assert(allocated_size == size);
+#endif
+#ifdef _WIN32
+ VirtualFree(address, 0, MEM_RELEASE);
+#else
+ if (munmap(address, size) != 0) {
+ uae_log("uae_vm_free(%p, %d) munmap failed (%d)\n",
+ address, size, errno);
+ }
+#endif
+}