cfgfile_write_bool (f, _T("immediate_blits"), p->immediate_blits);
cfgfile_dwrite_str (f, _T("waiting_blits"), waitblits[p->waiting_blits]);
+ cfgfile_dwrite (f, _T("blitter_throttle"), _T("%.8f"), p->blitter_speed_throttle);
cfgfile_write_bool (f, _T("ntsc"), p->ntscmode);
cfgfile_write_bool(f, _T("genlock"), p->genlock);
cfgfile_dwrite_bool(f, _T("genlock_alpha"), p->genlock_alpha);
}
cfgfile_write_bool (f, _T("cpu_compatible"), p->cpu_compatible);
cfgfile_write_bool (f, _T("cpu_24bit_addressing"), p->address_space_24);
+ cfgfile_write_bool (f, _T("cpu_data_cache"), p->cpu_data_cache);
/* do not reorder end */
cfgfile_dwrite_bool(f, _T("cpu_reset_pause"), p->reset_delay);
cfgfile_dwrite_bool(f, _T("cpu_threaded"), p->cpu_thread);
|| cfgfile_yesno(option, value, _T("genlock_alpha"), &p->genlock_alpha)
|| cfgfile_yesno(option, value, _T("genlock_aspect"), &p->genlock_aspect)
|| cfgfile_yesno(option, value, _T("cpu_compatible"), &p->cpu_compatible)
+ || cfgfile_yesno(option, value, _T("cpu_data_cache"), &p->cpu_data_cache)
|| cfgfile_yesno(option, value, _T("cpu_threaded"), &p->cpu_thread)
|| cfgfile_yesno(option, value, _T("cpu_24bit_addressing"), &p->address_space_24)
|| cfgfile_yesno(option, value, _T("cpu_reset_pause"), &p->reset_delay)
p->m68k_speed = -1;
return 1;
}
+ if (cfgfile_doubleval(option, value, _T("blitter_throttle"), &p->blitter_speed_throttle)) {
+ return 1;
+ }
if (cfgfile_intval (option, value, _T("dongle"), &p->dongle, 1)) {
if (p->dongle == 0)
int vga = -1;
for (int i = 0; i < MAX_RTG_BOARDS; i++) {
struct rtgboardconfig *rbc = &p->rtgboards[i];
- if (rbc->rtgmem_type == GFXBOARD_A2410 || rbc->rtgmem_type == GFXBOARD_RESOLVER) {
+ if (rbc->rtgmem_type == GFXBOARD_A2410) {
if (a2410 >= 0) {
rbc->rtgmem_size = 0;
rbc->rtgmem_type = 0;
#include "cpummu.h"
#include "debug.h"
-#define MMUDUMP 0
+#define MMUDUMP 1
#define DBG_MMU_VERBOSE 1
#define DBG_MMU_SANITY 1
bool mmu_pagesize_8k;
int mmu_pageshift, mmu_pageshift1m;
uae_u8 mmu_cache_state;
+uae_u8 cache_default_ins, cache_default_data;
int mmu060_state;
uae_u16 mmu_opcode;
from_addr = ttr & MMU_TTR_LOGICAL_BASE;
to_addr = (ttr & MMU_TTR_LOGICAL_MASK) << 8;
- write_log(_T("%s: [%08x] %08x - %08x enabled=%d supervisor=%d wp=%d cm=%02d\n"),
+ console_out_f(_T("%s: [%08x] %08x - %08x enabled=%d supervisor=%d wp=%d cm=%02d\n"),
label, ttr,
from_addr, to_addr,
ttr & MMU_TTR_BIT_ENABLED ? 1 : 0,
/* {{{ mmu_dump_tables */
void mmu_dump_tables(void)
{
- write_log(_T("URP: %08x SRP: %08x MMUSR: %x TC: %x\n"), regs.urp, regs.srp, regs.mmusr, regs.tcr);
+ console_out_f(_T("URP: %08x SRP: %08x MMUSR: %x TC: %x\n"), regs.urp, regs.srp, regs.mmusr, regs.tcr);
mmu_dump_ttr(_T("DTT0"), regs.dtt0);
mmu_dump_ttr(_T("DTT1"), regs.dtt1);
mmu_dump_ttr(_T("ITT0"), regs.itt0);
mmu_dump_atc();
#if MMUDUMP
mmu_dump_table("SRP", regs.srp);
+ if (regs.urp != regs.srp)
+ mmu_dump_table("URP", regs.urp);
#endif
}
/* }}} */
regs.mmu_fault_addr = addr;
#if 0
- if (m68k_getpc () == 0x0004B0AC) {
- write_log (_T("*"));
-#if 0
- extern void activate_debugger(void);
- activate_debugger ();
-#endif
- }
+ activate_debugger ();
#endif
+
+ cache_default_data &= ~CACHE_DISABLE_ALLOCATE;
+
THROW(2);
}
return TTR_NO_MATCH;
}
}
- if (ttr & MMU_TTR_CACHE_DISABLE)
+ if (ttr & MMU_TTR_CACHE_DISABLE) {
mmu_cache_state = CACHE_DISABLE_MMU;
+ } else {
+ mmu_cache_state = CACHE_ENABLE_ALL;
+ if (ttr && MMU_TTR_CACHE_MODE) {
+ mmu_cache_state |= CACHE_ENABLE_COPYBACK;
+ }
+ }
return (ttr & MMU_TTR_BIT_WRITE_PROTECT) ? TTR_NO_WRITE : TTR_OK_MATCH;
}
}
return res;
}
+// Descriptor read accesses can use data cache but never allocate new cache lines.
+static uae_u32 desc_get_long(uaecptr addr)
+{
+ mmu_cache_state = ce_cachable[addr >>16] | CACHE_DISABLE_ALLOCATE;
+ return x_phys_get_long(addr);
+}
+// Write accesses probably are always pushed to memomory
+static void desc_put_long(uaecptr addr, uae_u32 v)
+{
+ mmu_cache_state = CACHE_DISABLE_MMU;
+ x_phys_put_long(addr, v);
+}
+
/*
* Lookup the address by walking the page table and updating
* the page descriptors accordingly. Returns the found descriptor
SAVE_EXCEPTION;
TRY(prb) {
- desc = phys_get_long(desc_addr);
+ desc = desc_get_long(desc_addr);
if ((desc & 2) == 0) {
#if MMUDEBUG > 1
write_log(_T("MMU: invalid root descriptor %s for %x desc at %x desc=%x\n"), super ? _T("srp"):_T("urp"),
}
wp |= desc;
- if ((desc & MMU_DES_USED) == 0)
- phys_put_long(desc_addr, desc | MMU_DES_USED);
+ if ((desc & MMU_DES_USED) == 0) {
+ desc_put_long(desc_addr, desc | MMU_DES_USED);
+ }
/* fetch pointer table descriptor */
i = (addr >> 16) & 0x1fc;
desc_addr = (desc & MMU_ROOT_PTR_ADDR_MASK) | i;
- desc = phys_get_long(desc_addr);
+ desc = desc_get_long(desc_addr);
if ((desc & 2) == 0) {
#if MMUDEBUG > 1
write_log(_T("MMU: invalid ptr descriptor %s for %x desc at %x desc=%x\n"), super ? _T("srp"):_T("urp"),
}
wp |= desc;
if ((desc & MMU_DES_USED) == 0)
- phys_put_long(desc_addr, desc | MMU_DES_USED);
+ desc_put_long(desc_addr, desc | MMU_DES_USED);
/* fetch page table descriptor */
if (mmu_pagesize_8k) {
desc_addr = (desc & MMU_PTR_PAGE_ADDR_MASK_4) + i;
}
- desc = phys_get_long(desc_addr);
+ desc = desc_get_long(desc_addr);
if ((desc & 3) == 2) {
/* indirect */
desc_addr = desc & MMU_PAGE_INDIRECT_MASK;
- desc = phys_get_long(desc_addr);
+ desc = desc_get_long(desc_addr);
}
if ((desc & 1) == 1) {
wp |= desc;
if ((wp & MMU_DES_WP) || ((desc & MMU_DES_SUPER) && !super)) {
if ((desc & MMU_DES_USED) == 0) {
desc |= MMU_DES_USED;
- phys_put_long(desc_addr, desc);
+ desc_put_long(desc_addr, desc);
}
} else if ((desc & (MMU_DES_USED|MMU_DES_MODIFIED)) !=
(MMU_DES_USED|MMU_DES_MODIFIED)) {
desc |= MMU_DES_USED|MMU_DES_MODIFIED;
- phys_put_long(desc_addr, desc);
+ desc_put_long(desc_addr, desc);
}
} else {
if ((desc & MMU_DES_USED) == 0) {
desc |= MMU_DES_USED;
- phys_put_long(desc_addr, desc);
+ desc_put_long(desc_addr, desc);
}
}
desc |= wp & MMU_DES_WP;
if (idx2 < MMUFASTCACHE_ENTRIES - 1) {
atc_data_cache_write[idx2].log = idx1;
atc_data_cache_write[idx2].phys = phys;
+ atc_data_cache_write[idx2].cache_state = mmu_cache_state;
}
} else {
if (idx2 < MMUFASTCACHE_ENTRIES - 1) {
atc_data_cache_read[idx2].log = idx1;
atc_data_cache_read[idx2].phys = phys;
+ atc_data_cache_read[idx2].cache_state = mmu_cache_state;
}
}
#endif
// save way for next access (likely in same page)
mmu_atc_ways[data] = way;
- if (l->status & MMU_MMUSR_CM_DISABLE)
+ if (l->status & MMU_MMUSR_CM_DISABLE) {
mmu_cache_state = CACHE_DISABLE_MMU;
+ } else {
+ mmu_cache_state = CACHE_ENABLE_ALL;
+ if (l->status & MMU_MMUSR_CM_MODE) {
+ mmu_cache_state |= CACHE_ENABLE_COPYBACK;
+ }
+ }
mmu_add_cache(addr, l->phys, super, data, write);
{
uae_u16 res;
- res = (uae_u16)mmu_get_user_byte(addr, regs.s != 0, true, sz_word) << 8;
+ res = (uae_u16)mmu_get_user_byte(addr, regs.s != 0, true, sz_word, true) << 8;
SAVE_EXCEPTION;
TRY(prb) {
- res |= mmu_get_user_byte(addr + 1, regs.s != 0, true, sz_word);
+ res |= mmu_get_user_byte(addr + 1, regs.s != 0, true, sz_word, true);
RESTORE_EXCEPTION;
}
CATCH(prb) {
uae_u32 res;
if (likely(!(addr & 1))) {
- res = (uae_u32)mmu_get_user_word(addr, regs.s != 0, true, sz_long) << 16;
+ res = (uae_u32)mmu_get_user_word(addr, regs.s != 0, true, sz_long, true) << 16;
SAVE_EXCEPTION;
TRY(prb) {
- res |= mmu_get_user_word(addr + 2, regs.s != 0, true, sz_long);
+ res |= mmu_get_user_word(addr + 2, regs.s != 0, true, sz_long, true);
RESTORE_EXCEPTION;
}
CATCH(prb) {
THROW_AGAIN(prb);
} ENDTRY
} else {
- res = (uae_u32)mmu_get_user_byte(addr, regs.s != 0, true, sz_long) << 8;
+ res = (uae_u32)mmu_get_user_byte(addr, regs.s != 0, true, sz_long, true) << 8;
SAVE_EXCEPTION;
TRY(prb) {
- res = (res | mmu_get_user_byte(addr + 1, regs.s != 0, true, sz_long)) << 8;
- res = (res | mmu_get_user_byte(addr + 2, regs.s != 0, true, sz_long)) << 8;
- res |= mmu_get_user_byte(addr + 3, regs.s != 0, true, sz_long);
+ res = (res | mmu_get_user_byte(addr + 1, regs.s != 0, true, sz_long, true)) << 8;
+ res = (res | mmu_get_user_byte(addr + 2, regs.s != 0, true, sz_long, true)) << 8;
+ res |= mmu_get_user_byte(addr + 3, regs.s != 0, true, sz_long, true);
RESTORE_EXCEPTION;
}
CATCH(prb) {
return res;
}
+
+static void REGPARAM2 mmu_put_lrmw_long_unaligned(uaecptr addr, uae_u32 val)
+{
+ SAVE_EXCEPTION;
+ TRY(prb) {
+ if (likely(!(addr & 1))) {
+ mmu_put_user_word(addr, val >> 16, regs.s != 0, sz_long, true);
+ mmu_put_user_word(addr + 2, val, regs.s != 0, sz_long, true);
+ } else {
+ mmu_put_user_byte(addr, val >> 24, regs.s != 0, sz_long, true);
+ mmu_put_user_byte(addr + 1, val >> 16, regs.s != 0, sz_long, true);
+ mmu_put_user_byte(addr + 2, val >> 8, regs.s != 0, sz_long, true);
+ mmu_put_user_byte(addr + 3, val, regs.s != 0, sz_long, true);
+ }
+ RESTORE_EXCEPTION;
+ }
+ CATCH(prb) {
+ RESTORE_EXCEPTION;
+ regs.wb3_data = val;
+ misalignednotfirstcheck(addr);
+ THROW_AGAIN(prb);
+ } ENDTRY
+}
+
+static void REGPARAM2 mmu_put_lrmw_word_unaligned(uaecptr addr, uae_u16 val)
+{
+ SAVE_EXCEPTION;
+ TRY(prb) {
+ mmu_put_user_byte(addr, val >> 8, regs.s != 0, sz_word, true);
+ mmu_put_user_byte(addr + 1, val, regs.s != 0, sz_word, true);
+ RESTORE_EXCEPTION;
+ }
+ CATCH(prb) {
+ RESTORE_EXCEPTION;
+ regs.wb3_data = val;
+ misalignednotfirstcheck(addr);
+ THROW_AGAIN(prb);
+ } ENDTRY
+}
+
+
+
void REGPARAM2 mmu_put_long_unaligned(uaecptr addr, uae_u32 val, bool data)
{
SAVE_EXCEPTION;
ismoves = true;
if (likely(!is_unaligned(addr, 4))) {
- res = mmu_get_user_long(addr, super, false, sz_long);
+ res = mmu_get_user_long(addr, super, false, sz_long, false);
} else {
if (likely(!(addr & 1))) {
- res = (uae_u32)mmu_get_user_word(addr, super, false, sz_long) << 16;
+ res = (uae_u32)mmu_get_user_word(addr, super, false, sz_long, false) << 16;
SAVE_EXCEPTION;
TRY(prb) {
- res |= mmu_get_user_word(addr + 2, super, false, sz_long);
+ res |= mmu_get_user_word(addr + 2, super, false, sz_long, false);
RESTORE_EXCEPTION;
}
CATCH(prb) {
THROW_AGAIN(prb);
} ENDTRY
} else {
- res = (uae_u32)mmu_get_user_byte(addr, super, false, sz_long) << 8;
+ res = (uae_u32)mmu_get_user_byte(addr, super, false, sz_long, false) << 8;
SAVE_EXCEPTION;
TRY(prb) {
- res = (res | mmu_get_user_byte(addr + 1, super, false, sz_long)) << 8;
- res = (res | mmu_get_user_byte(addr + 2, super, false, sz_long)) << 8;
- res |= mmu_get_user_byte(addr + 3, super, false, sz_long);
+ res = (res | mmu_get_user_byte(addr + 1, super, false, sz_long, false)) << 8;
+ res = (res | mmu_get_user_byte(addr + 2, super, false, sz_long, false)) << 8;
+ res |= mmu_get_user_byte(addr + 3, super, false, sz_long, false);
RESTORE_EXCEPTION;
}
CATCH(prb) {
ismoves = true;
if (likely(!is_unaligned(addr, 2))) {
- res = mmu_get_user_word(addr, super, false, sz_word);
+ res = mmu_get_user_word(addr, super, false, sz_word, false);
} else {
- res = (uae_u16)mmu_get_user_byte(addr, super, false, sz_word) << 8;
+ res = (uae_u16)mmu_get_user_byte(addr, super, false, sz_word, false) << 8;
SAVE_EXCEPTION;
TRY(prb) {
- res |= mmu_get_user_byte(addr + 1, super, false, sz_word);
+ res |= mmu_get_user_byte(addr + 1, super, false, sz_word, false);
RESTORE_EXCEPTION;
}
CATCH(prb) {
uae_u8 res;
ismoves = true;
- res = mmu_get_user_byte(addr, super, false, sz_byte);
+ res = mmu_get_user_byte(addr, super, false, sz_byte, false);
ismoves = false;
return res;
}
SAVE_EXCEPTION;
TRY(prb) {
if (likely(!is_unaligned(addr, 4))) {
- mmu_put_user_long(addr, val, super, sz_long);
+ mmu_put_user_long(addr, val, super, sz_long, false);
} else if (likely(!(addr & 1))) {
- mmu_put_user_word(addr, val >> 16, super, sz_long);
- mmu_put_user_word(addr + 2, val, super, sz_long);
+ mmu_put_user_word(addr, val >> 16, super, sz_long, false);
+ mmu_put_user_word(addr + 2, val, super, sz_long, false);
} else {
- mmu_put_user_byte(addr, val >> 24, super, sz_long);
- mmu_put_user_byte(addr + 1, val >> 16, super, sz_long);
- mmu_put_user_byte(addr + 2, val >> 8, super, sz_long);
- mmu_put_user_byte(addr + 3, val, super, sz_long);
+ mmu_put_user_byte(addr, val >> 24, super, sz_long, false);
+ mmu_put_user_byte(addr + 1, val >> 16, super, sz_long, false);
+ mmu_put_user_byte(addr + 2, val >> 8, super, sz_long, false);
+ mmu_put_user_byte(addr + 3, val, super, sz_long, false);
}
RESTORE_EXCEPTION;
}
SAVE_EXCEPTION;
TRY(prb) {
if (likely(!is_unaligned(addr, 2))) {
- mmu_put_user_word(addr, val, super, sz_word);
+ mmu_put_user_word(addr, val, super, sz_word, false);
} else {
- mmu_put_user_byte(addr, val >> 8, super, sz_word);
- mmu_put_user_byte(addr + 1, val, super, sz_word);
+ mmu_put_user_byte(addr, val >> 8, super, sz_word, false);
+ mmu_put_user_byte(addr + 1, val, super, sz_word, false);
}
RESTORE_EXCEPTION;
}
ismoves = true;
SAVE_EXCEPTION;
TRY(prb) {
- mmu_put_user_byte(addr, val, super, sz_byte);
+ mmu_put_user_byte(addr, val, super, sz_byte, false);
RESTORE_EXCEPTION;
}
CATCH(prb) {
if ((!mmu_ttr_enabled || mmu_match_ttr(addr,super,data) == TTR_NO_MATCH) && regs.mmu_enabled) {
addr = mmu_translate(addr, 0, super, data, false, size);
}
+ // MOVE16 read and cache miss: do not allocate new cache line
+ mmu_cache_state |= CACHE_DISABLE_ALLOCATE;
for (int i = 0; i < 4; i++) {
- v[i] = phys_get_long(addr + i * 4);
+ v[i] = x_phys_get_long(addr + i * 4);
}
}
if ((!mmu_ttr_enabled || mmu_match_ttr_write(addr,super,data,val[0],size) == TTR_NO_MATCH) && regs.mmu_enabled) {
addr = mmu_translate(addr, val[0], super, data, true, size);
}
+ // MOVE16 write invalidates existing line and also does not allocate new cache lines.
+ mmu_cache_state = CACHE_DISABLE_MMU;
for (int i = 0; i < 4; i++) {
- phys_put_long(addr + i * 4, val[i]);
+ x_phys_put_long(addr + i * 4, val[i]);
}
}
if (currprefs.cpu_memory_cycle_exact || currprefs.cpu_compatible) {
x_phys_get_iword = get_word_icache040;
x_phys_get_ilong = get_long_icache040;
- x_phys_get_byte = get_byte_cache_040;
- x_phys_get_word = get_word_cache_040;
- x_phys_get_long = get_long_cache_040;
- x_phys_put_byte = put_byte_cache_040;
- x_phys_put_word = put_word_cache_040;
- x_phys_put_long = put_long_cache_040;
+ if (currprefs.cpu_data_cache) {
+ x_phys_get_byte = get_byte_cache_040;
+ x_phys_get_word = get_word_cache_040;
+ x_phys_get_long = get_long_cache_040;
+ x_phys_put_byte = put_byte_cache_040;
+ x_phys_put_word = put_word_cache_040;
+ x_phys_put_long = put_long_cache_040;
+ } else if (currprefs.cpu_memory_cycle_exact) {
+ x_phys_get_byte = mem_access_delay_byte_read_c040;
+ x_phys_get_word = mem_access_delay_word_read_c040;
+ x_phys_get_long = mem_access_delay_long_read_c040;
+ x_phys_put_byte = mem_access_delay_byte_write_c040;
+ x_phys_put_word = mem_access_delay_word_write_c040;
+ x_phys_put_long = mem_access_delay_long_write_c040;
+ } else {
+ x_phys_get_byte = phys_get_byte;
+ x_phys_get_word = phys_get_word;
+ x_phys_get_long = phys_get_long;
+ x_phys_put_byte = phys_put_byte;
+ x_phys_put_word = phys_put_word;
+ x_phys_put_long = phys_put_long;
+ }
} else {
x_phys_get_iword = phys_get_word;
x_phys_get_ilong = phys_get_long;
mmu_pageshift = mmu_pagesize_8k ? 13 : 12;
mmu_pageshift1m = mmu_pageshift - 1;
+ cache_default_ins = CACHE_ENABLE_ALL;
+ cache_default_data = CACHE_ENABLE_ALL;
+ if (currprefs.mmu_model == 68060) {
+ int dc = (tc >> 3) & 3;
+ cache_default_ins = 0;
+ if (!(dc & 2))
+ cache_default_ins = CACHE_ENABLE_ALL;
+ dc = (tc >> 8) & 3;
+ cache_default_data = 0;
+ if (!(dc & 2))
+ cache_default_data = (dc & 1) ? CACHE_ENABLE_COPYBACK | CACHE_ENABLE_ALL : CACHE_ENABLE_ALL;
+ }
+
mmu_flush_atc_all(true);
write_log(_T("%d MMU: enabled=%d page8k=%d PC=%08x\n"), currprefs.mmu_model, regs.mmu_enabled, mmu_pagesize_8k, m68k_getpc());
-#if MMUDUMP
- if (regs.mmu_enabled)
- mmu_dump_tables();
-#endif
}
void REGPARAM2 mmu_set_super(bool super)
{
locked_rmw_cycle = true;
if (size == sz_byte) {
- mmu_put_byte(addr, v, true, sz_byte);
+ mmu_put_user_byte(addr, v, regs.s, sz_byte, true);
} else if (size == sz_word) {
if (unlikely(is_unaligned(addr, 2))) {
- mmu_put_word_unaligned(addr, v, true);
+ mmu_put_lrmw_word_unaligned(addr, v);
} else {
- mmu_put_word(addr, v, true, sz_word);
+ mmu_put_user_word(addr, v, regs.s != 0, sz_word, true);
}
} else {
if (unlikely(is_unaligned(addr, 4)))
- mmu_put_long_unaligned(addr, v, true);
+ mmu_put_lrmw_long_unaligned(addr, v);
else
- mmu_put_long(addr, v, true, sz_long);
+ mmu_put_user_long(addr, v, regs.s, sz_long, true);
}
locked_rmw_cycle = false;
}
uae_u32 v;
locked_rmw_cycle = true;
if (size == sz_byte) {
- v = mmu_get_user_byte(addr, regs.s != 0, true, sz_byte);
+ v = mmu_get_user_byte(addr, regs.s != 0, true, sz_byte, true);
} else if (size == sz_word) {
if (unlikely(is_unaligned(addr, 2))) {
v = mmu_get_lrmw_word_unaligned(addr);
} else {
- v = mmu_get_user_word(addr, regs.s != 0, true, sz_word);
+ v = mmu_get_user_word(addr, regs.s != 0, true, sz_word, true);
}
} else {
if (unlikely(is_unaligned(addr, 4)))
v = mmu_get_lrmw_long_unaligned(addr);
else
- v = mmu_get_user_long(addr, regs.s != 0, true, sz_long);
+ v = mmu_get_user_long(addr, regs.s != 0, true, sz_long, true);
}
locked_rmw_cycle = false;
return v;
static uae_u32 get_dcache_byte(uaecptr addr)
{
- return read_dcache030(addr, 0, (regs.s ? 4 : 0) | 1);
+ return read_dcache030_bget(addr, (regs.s ? 4 : 0) | 1);
}
static uae_u32 get_dcache_word(uaecptr addr)
{
- return read_dcache030(addr, 1, (regs.s ? 4 : 0) | 1);
+ return read_dcache030_wget(addr, (regs.s ? 4 : 0) | 1);
}
static uae_u32 get_dcache_long(uaecptr addr)
{
- return read_dcache030(addr, 2, (regs.s ? 4 : 0) | 1);
+ return read_dcache030_lget(addr, (regs.s ? 4 : 0) | 1);
}
static void put_dcache_byte(uaecptr addr, uae_u32 v)
{
- write_dcache030(addr, v, 0, (regs.s ? 4 : 0) | 1);
+ write_dcache030_bput(addr, v, (regs.s ? 4 : 0) | 1);
}
static void put_dcache_word(uaecptr addr, uae_u32 v)
{
- write_dcache030(addr, v, 1, (regs.s ? 4 : 0) | 1);
+ write_dcache030_wput(addr, v, (regs.s ? 4 : 0) | 1);
}
static void put_dcache_long(uaecptr addr, uae_u32 v)
{
- write_dcache030(addr, v, 2, (regs.s ? 4 : 0) | 1);
+ write_dcache030_lput(addr, v, (regs.s ? 4 : 0) | 1);
}
/* MMU Reset */
}
}
-
void m68k_do_rte_mmu030 (uaecptr a7)
{
// Restore access error exception state
_T(" c Dump state of the CIA, disk drives and custom registers.\n")
_T(" r Dump state of the CPU.\n")
_T(" r <reg> <value> Modify CPU registers (Dx,Ax,USP,ISP,VBR,...).\n")
+ _T(" rc[d] Show CPU instruction or data cache contents.\n")
_T(" m <address> [<lines>] Memory dump starting at <address>.\n")
_T(" a <address> Assembler.\n")
_T(" d <address> [<lines>] Disassembly starting at <address>.\n")
if (debug_mmu_mode & 1) {
v = mmu_get_iword(addr, sz_byte);
} else {
- v = mmu_get_user_byte (addr, regs.s != 0, false, sz_byte);
+ v = mmu_get_user_byte (addr, regs.s != 0, false, sz_byte, false);
}
}
} CATCH(p) {
if (debug_mmu_mode & 1) {
v = mmu_get_iword(addr, sz_word);
} else {
- v = mmu_get_user_word (addr, regs.s != 0, false, sz_word);
+ v = mmu_get_user_word (addr, regs.s != 0, false, sz_word, false);
}
}
} CATCH(p) {
if (debug_mmu_mode & 1) {
v = mmu_get_ilong(addr, sz_long);
} else {
- v = mmu_get_user_long (addr, regs.s != 0, false, sz_long);
+ v = mmu_get_user_long (addr, regs.s != 0, false, sz_long, false);
}
}
} CATCH(p) {
} UaeMemoryMap;
#endif
+static const TCHAR *bankmodes[] = { _T("F32"), _T("C16"), _T("C32"), _T("CIA"), _T("F16"), _T("F16X") };
+
static void memory_map_dump_3(UaeMemoryMap *map, int log)
{
bool imold;
size_out /= 1024;
size_ext = 'M';
}
-#if 1
- _stprintf (txt, _T("%08X %7d%c/%d = %7d%c %s"), (j << 16) | bankoffset, size_out, size_ext,
- mirrored, mirrored ? size_out / mirrored : size_out, size_ext, name);
-#endif
+ _stprintf (txt, _T("%08X %7d%c/%d = %7d%c %s%s %s %s"), (j << 16) | bankoffset, size_out, size_ext,
+ mirrored, mirrored ? size_out / mirrored : size_out, size_ext,
+ (a1->flags & ABFLAG_CACHE_ENABLE_INS) ? _T("I") : _T("-"),
+ (a1->flags & ABFLAG_CACHE_ENABLE_DATA) ? _T("D") : _T("-"),
+ bankmodes[ce_banktype[j]],
+ name);
tmp[0] = 0;
if ((a1->flags & ABFLAG_ROM) && mirrored) {
TCHAR *p = txt + _tcslen (txt);
map->num_regions += 1;
}
}
-#if 1
_tcscat (txt, _T("\n"));
if (log > 0)
write_log (_T("%s"), txt);
else if (log == 0)
console_out (tmp);
}
-#endif
if (!sb)
break;
bankoffset = bankoffset2;
case 'e': dump_custom_regs (tolower(*inptr) == 'a'); break;
case 'r':
{
- if (*inptr == 'c')
- m68k_dumpcache ();
- else if (more_params(&inptr))
+ if (*inptr == 'c') {
+ next_char(&inptr);
+ m68k_dumpcache (*inptr == 'd');
+ } else if (more_params(&inptr)) {
m68k_modify (&inptr);
- else
+ } else {
m68k_dumpstate (&nextpc);
+ }
}
break;
case 'D': deepcheatsearch (&inptr); break;
if (*inptr == 'm' && inptr[1] == 'u') {
if (currprefs.mmu_model) {
inptr += 2;
- if (more_params (&inptr))
- debug_mmu_mode = readint (&inptr);
- else
- debug_mmu_mode = 0;
- console_out_f (_T("MMU translation function code = %d\n"), debug_mmu_mode);
+ if (inptr[0] == 'd') {
+ if (currprefs.mmu_model >= 68040)
+ mmu_dump_tables();
+ } else {
+ if (more_params (&inptr))
+ debug_mmu_mode = readint (&inptr);
+ else
+ debug_mmu_mode = 0;
+ console_out_f (_T("MMU translation function code = %d\n"), debug_mmu_mode);
+ }
}
break;
}
fastmem0_lput, fastmem0_wput, fastmem0_bput,
fastmem0_xlate, fastmem0_check, NULL, _T("*"), _T("Fast memory"),
fastmem0_lget, fastmem0_wget,
- ABFLAG_RAM | ABFLAG_THREADSAFE, 0, 0
+ ABFLAG_RAM | ABFLAG_THREADSAFE | ABFLAG_CACHE_ENABLE_ALL, 0, 0
},
{
fastmem1_lget, fastmem1_wget, fastmem1_bget,
fastmem1_lput, fastmem1_wput, fastmem1_bput,
fastmem1_xlate, fastmem1_check, NULL, _T("*"), _T("Fast memory 2"),
fastmem1_lget, fastmem1_wget,
- ABFLAG_RAM | ABFLAG_THREADSAFE, 0, 0
+ ABFLAG_RAM | ABFLAG_THREADSAFE | ABFLAG_CACHE_ENABLE_ALL, 0, 0
},
{
fastmem2_lget, fastmem2_wget, fastmem2_bget,
fastmem2_lput, fastmem2_wput, fastmem2_bput,
fastmem2_xlate, fastmem2_check, NULL, _T("*"), _T("Fast memory 3"),
fastmem2_lget, fastmem2_wget,
- ABFLAG_RAM | ABFLAG_THREADSAFE, 0, 0
+ ABFLAG_RAM | ABFLAG_THREADSAFE | ABFLAG_CACHE_ENABLE_ALL, 0, 0
},
{
fastmem3_lget, fastmem3_wget, fastmem3_bget,
fastmem3_lput, fastmem3_wput, fastmem3_bput,
fastmem3_xlate, fastmem3_check, NULL, _T("*"), _T("Fast memory 4"),
fastmem3_lget, fastmem3_wget,
- ABFLAG_RAM | ABFLAG_THREADSAFE, 0, 0
+ ABFLAG_RAM | ABFLAG_THREADSAFE | ABFLAG_CACHE_ENABLE_ALL, 0, 0
}
};
z3fastmem0_lput, z3fastmem0_wput, z3fastmem0_bput,
z3fastmem0_xlate, z3fastmem0_check, NULL, _T("*"), _T("Zorro III Fast RAM"),
z3fastmem0_lget, z3fastmem0_wget,
- ABFLAG_RAM | ABFLAG_THREADSAFE, 0, 0
+ ABFLAG_RAM | ABFLAG_THREADSAFE | ABFLAG_CACHE_ENABLE_ALL, 0, 0
},
{
z3fastmem1_lget, z3fastmem1_wget, z3fastmem1_bget,
z3fastmem1_lput, z3fastmem1_wput, z3fastmem1_bput,
z3fastmem1_xlate, z3fastmem1_check, NULL, _T("*"), _T("Zorro III Fast RAM #2"),
z3fastmem1_lget, z3fastmem1_wget,
- ABFLAG_RAM | ABFLAG_THREADSAFE, 0, 0
+ ABFLAG_RAM | ABFLAG_THREADSAFE | ABFLAG_CACHE_ENABLE_ALL, 0, 0
},
{
z3fastmem2_lget, z3fastmem2_wget, z3fastmem2_bget,
z3fastmem2_lput, z3fastmem2_wput, z3fastmem2_bput,
z3fastmem2_xlate, z3fastmem2_check, NULL, _T("*"), _T("Zorro III Fast RAM #3"),
z3fastmem2_lget, z3fastmem2_wget,
- ABFLAG_RAM | ABFLAG_THREADSAFE, 0, 0
+ ABFLAG_RAM | ABFLAG_THREADSAFE | ABFLAG_CACHE_ENABLE_ALL, 0, 0
},
{
z3fastmem3_lget, z3fastmem3_wget, z3fastmem3_bget,
z3fastmem3_lput, z3fastmem3_wput, z3fastmem3_bput,
z3fastmem3_xlate, z3fastmem3_check, NULL, _T("*"), _T("Zorro III Fast RAM #4"),
z3fastmem3_lget, z3fastmem3_wget,
- ABFLAG_RAM | ABFLAG_THREADSAFE, 0, 0
+ ABFLAG_RAM | ABFLAG_THREADSAFE | ABFLAG_CACHE_ENABLE_ALL, 0, 0
}
};
z3chipmem_lput, z3chipmem_wput, z3chipmem_bput,
z3chipmem_xlate, z3chipmem_check, NULL, _T("*"), _T("MegaChipRAM"),
z3chipmem_lget, z3chipmem_wget,
- ABFLAG_RAM | ABFLAG_THREADSAFE, 0, 0
+ ABFLAG_RAM | ABFLAG_THREADSAFE | ABFLAG_CACHE_ENABLE_ALL, 0, 0
};
/* ********************************************************** */
};
static const struct cpuboardsubtype blizzardboard_sub[] = {
{
- _T("Blizzard 1230 II"),
+ _T("Blizzard 1230 I/II"),
_T("Blizzard1230II"),
ROMTYPE_CB_B1230MK2, 0,
cpuboard_ncr9x_add_scsi_unit, EXPANSIONTYPE_SCSI,
k->file_pos += actual;
}
xfree (buf);
- flush_dcache (addr, size);
size = 0;
}
}
PUT_PCK_RES1 (packet, actual);
k->file_pos += actual;
}
- flush_dcache (addr, size);
}
TRACE((_T("=%d\n"), actual));
0x00000000, 0x00200000, 0x00200000, 0x10000, 0, 0, 2, false,
0, 0xc1, &a2410_func
},
+#if 0
+ {
+ _T("Resolver"), _T("DMI"), _T("Resolver"),
+ 2129, 1, 0,
+ 0x00000000, 0x00200000, 0x00200000, 0x10000, 0, 0, 2, false,
+ 0, 0xc1, &a2410_func
+ },
+#endif
{
_T("x86 bridgeboard VGA"), _T("x86"), _T("VGA"),
0, 0, 0,
gfxboard_lput_mem, gfxboard_wput_mem, gfxboard_bput_mem,
gfxboard_xlate, gfxboard_check, NULL, NULL, NULL,
gfxboard_lget_mem, gfxboard_wget_mem,
- ABFLAG_RAM | ABFLAG_THREADSAFE, 0, 0
+ ABFLAG_RAM | ABFLAG_THREADSAFE | ABFLAG_CACHE_ENABLE_ALL, 0, 0
};
static const addrbank tmpl_gfxboard_bank_memory_nojit = {
gfxboard_lput_mem_nojit, gfxboard_wput_mem_nojit, gfxboard_bput_mem_nojit,
gfxboard_xlate, gfxboard_check, NULL, NULL, NULL,
gfxboard_lget_mem_nojit, gfxboard_wget_mem_nojit,
- ABFLAG_RAM | ABFLAG_THREADSAFE, S_READ, S_WRITE
+ ABFLAG_RAM | ABFLAG_THREADSAFE | ABFLAG_CACHE_ENABLE_ALL, S_READ, S_WRITE
};
static const addrbank tmpl_gfxboard_bank_wbsmemory = {
gfxboard_lput_wbsmem, gfxboard_wput_wbsmem, gfxboard_bput_wbsmem,
gfxboard_xlate, gfxboard_check, NULL, NULL, NULL,
gfxboard_lget_wbsmem, gfxboard_wget_wbsmem,
- ABFLAG_RAM | ABFLAG_THREADSAFE, S_READ, S_WRITE
+ ABFLAG_RAM | ABFLAG_THREADSAFE | ABFLAG_CACHE_ENABLE_ALL, S_READ, S_WRITE
};
static const addrbank tmpl_gfxboard_bank_lbsmemory = {
gfxboard_lput_lbsmem, gfxboard_wput_lbsmem, gfxboard_bput_lbsmem,
gfxboard_xlate, gfxboard_check, NULL, NULL, NULL,
gfxboard_lget_lbsmem, gfxboard_wget_lbsmem,
- ABFLAG_RAM | ABFLAG_THREADSAFE, S_READ, S_WRITE
+ ABFLAG_RAM | ABFLAG_THREADSAFE | ABFLAG_CACHE_ENABLE_ALL, S_READ, S_WRITE
};
static const addrbank tmpl_gfxboard_bank_nbsmemory = {
gfxboard_lput_nbsmem, gfxboard_wput_nbsmem, gfxboard_bput_bsmem,
gfxboard_xlate, gfxboard_check, NULL, NULL, _T("Picasso IV banked VRAM"),
gfxboard_lget_nbsmem, gfxboard_wget_nbsmem,
- ABFLAG_RAM | ABFLAG_THREADSAFE, S_READ, S_WRITE
+ ABFLAG_RAM | ABFLAG_THREADSAFE | ABFLAG_CACHE_ENABLE_ALL, S_READ, S_WRITE
};
static const addrbank tmpl_gfxboard_bank_registers = {
STATIC_INLINE void put_long_030(uaecptr addr, uae_u32 v)
{
- write_dcache030(addr, v, 2, (regs.s ? 4 : 0) | 1);
+ write_data_030_lput(addr, v);
}
STATIC_INLINE void put_word_030(uaecptr addr, uae_u32 v)
{
- write_dcache030(addr, v, 1, (regs.s ? 4 : 0) | 1);
+ write_data_030_wput(addr, v);
}
STATIC_INLINE void put_byte_030(uaecptr addr, uae_u32 v)
{
- write_dcache030(addr, v, 0, (regs.s ? 4 : 0) | 1);
+ write_data_030_bput(addr, v);
}
STATIC_INLINE uae_u32 get_long_030(uaecptr addr)
{
- return read_dcache030(addr, 2, (regs.s ? 4 : 0) | 1);
+ return read_data_030_lget(addr);
}
STATIC_INLINE uae_u32 get_word_030(uaecptr addr)
{
- return read_dcache030(addr, 1, (regs.s ? 4 : 0) | 1);
+ return read_data_030_wget(addr);
}
STATIC_INLINE uae_u32 get_byte_030(uaecptr addr)
{
- return read_dcache030(addr, 0, (regs.s ? 4 : 0) | 1);
+ return read_data_030_bget(addr);
}
STATIC_INLINE uae_u32 get_long_030_prefetch(int o)
STATIC_INLINE void put_long_ce030 (uaecptr addr, uae_u32 v)
{
- write_dcache030 (addr, v, 2, (regs.s ? 4 : 0) | 1);
+ write_dcache030_lput(addr, v, (regs.s ? 4 : 0) | 1);
}
STATIC_INLINE void put_word_ce030 (uaecptr addr, uae_u32 v)
{
- write_dcache030 (addr, v, 1, (regs.s ? 4 : 0) | 1);
+ write_dcache030_wput(addr, v, (regs.s ? 4 : 0) | 1);
}
STATIC_INLINE void put_byte_ce030 (uaecptr addr, uae_u32 v)
{
- write_dcache030 (addr, v, 0, (regs.s ? 4 : 0) | 1);
+ write_dcache030_bput(addr, v, (regs.s ? 4 : 0) | 1);
}
STATIC_INLINE uae_u32 get_long_ce030 (uaecptr addr)
{
- return read_dcache030 (addr, 2, (regs.s ? 4 : 0) | 1);
+ return read_dcache030_lget(addr, (regs.s ? 4 : 0) | 1);
}
STATIC_INLINE uae_u32 get_word_ce030 (uaecptr addr)
{
- return read_dcache030 (addr, 1, (regs.s ? 4 : 0) | 1);
+ return read_dcache030_wget(addr, (regs.s ? 4 : 0) | 1);
}
STATIC_INLINE uae_u32 get_byte_ce030 (uaecptr addr)
{
- return read_dcache030 (addr, 0, (regs.s ? 4 : 0) | 1);
+ return read_dcache030_bget(addr, (regs.s ? 4 : 0) | 1);
}
STATIC_INLINE uae_u32 get_long_ce030_prefetch (int o)
extern bool mmu_ttr_enabled, mmu_ttr_enabled_ins, mmu_ttr_enabled_data;
extern bool rmw_cycle;
extern uae_u8 mmu_cache_state;
+extern uae_u8 cache_default_ins, cache_default_data;
extern void mmu_dump_tables(void);
{
uae_u32 log;
uae_u32 phys;
+ uae_u8 cache_state;
};
extern struct mmufastcache atc_data_cache_read[MMUFASTCACHE_ENTRIES];
extern struct mmufastcache atc_data_cache_write[MMUFASTCACHE_ENTRIES];
extern int mmu_data_write_hit, mmu_data_write_miss;
#endif
-STATIC_INLINE void cacheablecheck(uaecptr addr)
-{
- if (mmu_cache_state == CACHE_ENABLE_ALL) {
- // MMU didn't inhibit caches, use hardware cache state
- mmu_cache_state = ce_cachable[addr >> 16];
- }
-}
static ALWAYS_INLINE uae_u32 mmu_get_ilong(uaecptr addr, int size)
{
- mmu_cache_state = CACHE_ENABLE_ALL;
+ mmu_cache_state = cache_default_ins;
if ((!mmu_ttr_enabled_ins || mmu_match_ttr_ins(addr,regs.s!=0) == TTR_NO_MATCH) && regs.mmu_enabled) {
#if MMU_IPAGECACHE
if (((addr & mmu_pagemaski) | regs.s) == atc_last_ins_laddr) {
}
#endif
}
- cacheablecheck(addr);
return x_phys_get_ilong(addr);
}
static ALWAYS_INLINE uae_u16 mmu_get_iword(uaecptr addr, int size)
{
- mmu_cache_state = CACHE_ENABLE_ALL;
+ mmu_cache_state = cache_default_ins;
if ((!mmu_ttr_enabled_ins || mmu_match_ttr_ins(addr,regs.s!=0) == TTR_NO_MATCH) && regs.mmu_enabled) {
#if MMU_IPAGECACHE
if (((addr & mmu_pagemaski) | regs.s) == atc_last_ins_laddr) {
}
#endif
}
- cacheablecheck(addr);
return x_phys_get_iword(addr);
}
static ALWAYS_INLINE uae_u32 mmu_get_long(uaecptr addr, bool data, int size)
{
+ mmu_cache_state = cache_default_data;
if ((!mmu_ttr_enabled || mmu_match_ttr(addr,regs.s!=0,data) == TTR_NO_MATCH) && regs.mmu_enabled) {
#if MMU_DPAGECACHE
uae_u32 idx1 = ((addr & mmu_pagemaski) >> mmu_pageshift1m) | regs.s;
uae_u32 idx2 = idx1 & (MMUFASTCACHE_ENTRIES - 1);
if (atc_data_cache_read[idx2].log == idx1) {
addr = atc_data_cache_read[idx2].phys | (addr & mmu_pagemask);
+ mmu_cache_state = atc_data_cache_read[idx2].cache_state;
#if CACHE_HIT_COUNT
mmu_data_read_hit++;
#endif
}
#endif
}
- return phys_get_long(addr);
+ return x_phys_get_long(addr);
}
static ALWAYS_INLINE uae_u16 mmu_get_word(uaecptr addr, bool data, int size)
{
+ mmu_cache_state = cache_default_data;
if ((!mmu_ttr_enabled || mmu_match_ttr(addr,regs.s!=0,data) == TTR_NO_MATCH) && regs.mmu_enabled) {
#if MMU_DPAGECACHE
uae_u32 idx1 = ((addr & mmu_pagemaski) >> mmu_pageshift1m) | regs.s;
uae_u32 idx2 = idx1 & (MMUFASTCACHE_ENTRIES - 1);
if (atc_data_cache_read[idx2].log == idx1) {
addr = atc_data_cache_read[idx2].phys | (addr & mmu_pagemask);
+ mmu_cache_state = atc_data_cache_read[idx2].cache_state;
#if CACHE_HIT_COUNT
mmu_data_read_hit++;
#endif
}
#endif
}
- return phys_get_word(addr);
+ return x_phys_get_word(addr);
}
static ALWAYS_INLINE uae_u8 mmu_get_byte(uaecptr addr, bool data, int size)
{
+ mmu_cache_state = cache_default_data;
if ((!mmu_ttr_enabled || mmu_match_ttr(addr,regs.s!=0,data) == TTR_NO_MATCH) && regs.mmu_enabled) {
#if MMU_DPAGECACHE
uae_u32 idx1 = ((addr & mmu_pagemaski) >> mmu_pageshift1m) | regs.s;
uae_u32 idx2 = idx1 & (MMUFASTCACHE_ENTRIES - 1);
if (atc_data_cache_read[idx2].log == idx1) {
addr = atc_data_cache_read[idx2].phys | (addr & mmu_pagemask);
+ mmu_cache_state = atc_data_cache_read[idx2].cache_state;
#if CACHE_HIT_COUNT
mmu_data_read_hit++;
#endif
}
#endif
}
- return phys_get_byte(addr);
+ return x_phys_get_byte(addr);
}
static ALWAYS_INLINE void mmu_put_long(uaecptr addr, uae_u32 val, bool data, int size)
{
+ mmu_cache_state = cache_default_data;
if ((!mmu_ttr_enabled || mmu_match_ttr_write(addr,regs.s!=0,data,val,size) == TTR_NO_MATCH) && regs.mmu_enabled) {
#if MMU_DPAGECACHE
uae_u32 idx1 = ((addr & mmu_pagemaski) >> mmu_pageshift1m) | regs.s;
uae_u32 idx2 = idx1 & (MMUFASTCACHE_ENTRIES - 1);
if (atc_data_cache_write[idx2].log == idx1) {
addr = atc_data_cache_write[idx2].phys | (addr & mmu_pagemask);
+ mmu_cache_state = atc_data_cache_read[idx2].cache_state;
#if CACHE_HIT_COUNT
mmu_data_write_hit++;
#endif
}
#endif
}
- phys_put_long(addr, val);
+ x_phys_put_long(addr, val);
}
static ALWAYS_INLINE void mmu_put_word(uaecptr addr, uae_u16 val, bool data, int size)
{
+ mmu_cache_state = cache_default_data;
if ((!mmu_ttr_enabled || mmu_match_ttr_write(addr,regs.s!=0,data,val,size) == TTR_NO_MATCH) && regs.mmu_enabled) {
#if MMU_DPAGECACHE
uae_u32 idx1 = ((addr & mmu_pagemaski) >> mmu_pageshift1m) | regs.s;
uae_u32 idx2 = idx1 & (MMUFASTCACHE_ENTRIES - 1);
if (atc_data_cache_write[idx2].log == idx1) {
addr = atc_data_cache_write[idx2].phys | (addr & mmu_pagemask);
+ mmu_cache_state = atc_data_cache_read[idx2].cache_state;
#if CACHE_HIT_COUNT
mmu_data_write_hit++;
#endif
}
#endif
}
- phys_put_word(addr, val);
+ x_phys_put_word(addr, val);
}
static ALWAYS_INLINE void mmu_put_byte(uaecptr addr, uae_u8 val, bool data, int size)
{
+ mmu_cache_state = cache_default_data;
if ((!mmu_ttr_enabled || mmu_match_ttr_write(addr,regs.s!=0,data,val,size) == TTR_NO_MATCH) && regs.mmu_enabled) {
#if MMU_DPAGECACHE
uae_u32 idx1 = ((addr & mmu_pagemaski) >> mmu_pageshift1m) | regs.s;
uae_u32 idx2 = idx1 & (MMUFASTCACHE_ENTRIES - 1);
if (atc_data_cache_write[idx2].log == idx1) {
addr = atc_data_cache_write[idx2].phys | (addr & mmu_pagemask);
+ mmu_cache_state = atc_data_cache_read[idx2].cache_state;
#if CACHE_HIT_COUNT
mmu_data_write_hit++;
#endif
}
#endif
}
- phys_put_byte(addr, val);
+ x_phys_put_byte(addr, val);
}
-static ALWAYS_INLINE uae_u32 mmu_get_user_long(uaecptr addr, bool super, bool write, int size)
+static ALWAYS_INLINE uae_u32 mmu_get_user_long(uaecptr addr, bool super, bool write, int size, bool ci)
{
+ mmu_cache_state = cache_default_data;
if ((!mmu_ttr_enabled || mmu_match_ttr_maybe_write(addr,super,true,size,write) == TTR_NO_MATCH) && regs.mmu_enabled) {
#if MMU_DPAGECACHE
uae_u32 idx1 = ((addr & mmu_pagemaski) >> mmu_pageshift1m) | (super ? 1 : 0);
uae_u32 idx2 = idx1 & (MMUFASTCACHE_ENTRIES - 1);
if (atc_data_cache_read[idx2].log == idx1) {
addr = atc_data_cache_read[idx2].phys | (addr & mmu_pagemask);
+ mmu_cache_state = atc_data_cache_read[idx2].cache_state;
#if CACHE_HIT_COUNT
mmu_data_read_hit++;
#endif
}
#endif
}
- return phys_get_long(addr);
+ if (ci)
+ mmu_cache_state = CACHE_DISABLE_MMU;
+ return x_phys_get_long(addr);
}
-static ALWAYS_INLINE uae_u16 mmu_get_user_word(uaecptr addr, bool super, bool write, int size)
+static ALWAYS_INLINE uae_u16 mmu_get_user_word(uaecptr addr, bool super, bool write, int size, bool ci)
{
+ mmu_cache_state = cache_default_data;
if ((!mmu_ttr_enabled || mmu_match_ttr_maybe_write(addr,super,true,size,write) == TTR_NO_MATCH) && regs.mmu_enabled) {
#if MMU_DPAGECACHE
uae_u32 idx1 = ((addr & mmu_pagemaski) >> mmu_pageshift1m) | (super ? 1 : 0);
uae_u32 idx2 = idx1 & (MMUFASTCACHE_ENTRIES - 1);
if (atc_data_cache_read[idx2].log == idx1) {
addr = atc_data_cache_read[idx2].phys | (addr & mmu_pagemask);
+ mmu_cache_state = atc_data_cache_read[idx2].cache_state;
#if CACHE_HIT_COUNT
mmu_data_read_hit++;
#endif
}
#endif
}
- return phys_get_word(addr);
+ if (ci)
+ mmu_cache_state = CACHE_DISABLE_MMU;
+ return x_phys_get_word(addr);
}
-static ALWAYS_INLINE uae_u8 mmu_get_user_byte(uaecptr addr, bool super, bool write, int size)
+static ALWAYS_INLINE uae_u8 mmu_get_user_byte(uaecptr addr, bool super, bool write, int size, bool ci)
{
+ mmu_cache_state = cache_default_data;
if ((!mmu_ttr_enabled || mmu_match_ttr_maybe_write(addr,super,true,size,write) == TTR_NO_MATCH) && regs.mmu_enabled) {
#if MMU_DPAGECACHE
uae_u32 idx1 = ((addr & mmu_pagemaski) >> mmu_pageshift1m) | (super ? 1 : 0);
uae_u32 idx2 = idx1 & (MMUFASTCACHE_ENTRIES - 1);
if (atc_data_cache_read[idx2].log == idx1) {
addr = atc_data_cache_read[idx2].phys | (addr & mmu_pagemask);
+ mmu_cache_state = atc_data_cache_read[idx2].cache_state;
#if CACHE_HIT_COUNT
mmu_data_read_hit++;
#endif
}
#endif
}
- return phys_get_byte(addr);
+ if (ci)
+ mmu_cache_state = CACHE_DISABLE_MMU;
+ return x_phys_get_byte(addr);
}
-static ALWAYS_INLINE void mmu_put_user_long(uaecptr addr, uae_u32 val, bool super, int size)
+static ALWAYS_INLINE void mmu_put_user_long(uaecptr addr, uae_u32 val, bool super, int size, bool ci)
{
+ mmu_cache_state = cache_default_data;
if ((!mmu_ttr_enabled || mmu_match_ttr_write(addr,super,true,val,size) == TTR_NO_MATCH) && regs.mmu_enabled) {
#if MMU_DPAGECACHE
uae_u32 idx1 = ((addr & mmu_pagemaski) >> mmu_pageshift1m) | (super ? 1 : 0);
uae_u32 idx2 = idx1 & (MMUFASTCACHE_ENTRIES - 1);
if (atc_data_cache_write[idx2].log == idx1) {
addr = atc_data_cache_write[idx2].phys | (addr & mmu_pagemask);
+ mmu_cache_state = atc_data_cache_read[idx2].cache_state;
#if CACHE_HIT_COUNT
mmu_data_write_hit++;
#endif
}
#endif
}
- phys_put_long(addr, val);
+ if (ci)
+ mmu_cache_state = CACHE_DISABLE_MMU;
+ x_phys_put_long(addr, val);
}
-static ALWAYS_INLINE void mmu_put_user_word(uaecptr addr, uae_u16 val, bool super, int size)
+static ALWAYS_INLINE void mmu_put_user_word(uaecptr addr, uae_u16 val, bool super, int size, bool ci)
{
+ mmu_cache_state = cache_default_data;
if ((!mmu_ttr_enabled || mmu_match_ttr_write(addr,super,true,val,size) == TTR_NO_MATCH) && regs.mmu_enabled) {
#if MMU_DPAGECACHE
uae_u32 idx1 = ((addr & mmu_pagemaski) >> mmu_pageshift1m) | (super ? 1 : 0);
uae_u32 idx2 = idx1 & (MMUFASTCACHE_ENTRIES - 1);
if (atc_data_cache_write[idx2].log == idx1) {
addr = atc_data_cache_write[idx2].phys | (addr & mmu_pagemask);
+ mmu_cache_state = atc_data_cache_read[idx2].cache_state;
#if CACHE_HIT_COUNT
mmu_data_write_hit++;
#endif
}
#endif
}
- phys_put_word(addr, val);
+ if (ci)
+ mmu_cache_state = CACHE_DISABLE_MMU;
+ x_phys_put_word(addr, val);
}
-static ALWAYS_INLINE void mmu_put_user_byte(uaecptr addr, uae_u8 val, bool super, int size)
+static ALWAYS_INLINE void mmu_put_user_byte(uaecptr addr, uae_u8 val, bool super, int size, bool ci)
{
+ mmu_cache_state = cache_default_data;
if ((!mmu_ttr_enabled || mmu_match_ttr_write(addr,super,true,val,size) == TTR_NO_MATCH) && regs.mmu_enabled) {
#if MMU_DPAGECACHE
uae_u32 idx1 = ((addr & mmu_pagemaski) >> mmu_pageshift1m) | (super ? 1 : 0);
uae_u32 idx2 = idx1 & (MMUFASTCACHE_ENTRIES - 1);
if (atc_data_cache_write[idx2].log == idx1) {
addr = atc_data_cache_write[idx2].phys | (addr & mmu_pagemask);
+ mmu_cache_state = atc_data_cache_read[idx2].cache_state;
#if CACHE_HIT_COUNT
mmu_data_write_hit++;
#endif
}
#endif
}
- phys_put_byte(addr, val);
+ if (ci)
+ mmu_cache_state = CACHE_DISABLE_MMU;
+ x_phys_put_byte(addr, val);
}
#if MMUDEBUG > 2
write_log(_T("sfc030_get_long: FC = %i\n"),fc);
#endif
- return read_dcache030(addr, 2, regs.sfc);
+ return read_dcache030_lget(addr, regs.sfc);
}
static ALWAYS_INLINE uae_u16 sfc030c_get_word(uaecptr addr)
#if MMUDEBUG > 2
write_log(_T("sfc030_get_word: FC = %i\n"),fc);
#endif
- return read_dcache030(addr, 1, regs.sfc);
+ return read_dcache030_wget(addr, regs.sfc);
}
static ALWAYS_INLINE uae_u8 sfc030c_get_byte(uaecptr addr)
#if MMUDEBUG > 2
write_log(_T("sfc030_get_byte: FC = %i\n"),fc);
#endif
- return read_dcache030(addr, 0, regs.sfc);
+ return read_dcache030_bget(addr, regs.sfc);
}
static ALWAYS_INLINE void dfc030c_put_long(uaecptr addr, uae_u32 val)
#if MMUDEBUG > 2
write_log(_T("dfc030_put_long: %08X = %08X FC = %i\n"), addr, val, fc);
#endif
- write_dcache030(addr, val, 2, regs.dfc);
+ write_dcache030_lput(addr, val, regs.dfc);
}
static ALWAYS_INLINE void dfc030c_put_word(uaecptr addr, uae_u16 val)
#if MMUDEBUG > 2
write_log(_T("dfc030_put_word: %08X = %04X FC = %i\n"), addr, val, fc);
#endif
- write_dcache030(addr, val, 1, regs.dfc);
+ write_dcache030_wput(addr, val, regs.dfc);
}
static ALWAYS_INLINE void dfc030c_put_byte(uaecptr addr, uae_u8 val)
#if MMUDEBUG > 2
write_log(_T("dfc030_put_byte: %08X = %02X FC = %i\n"), addr, val, fc);
#endif
- write_dcache030(addr, val, 0, regs.dfc);
+ write_dcache030_bput(addr, val, regs.dfc);
}
uae_u32 REGPARAM3 get_disp_ea_020_mmu030c (uae_u32 base, int idx) REGPARAM;
STATIC_INLINE void put_byte_mmu030c_state (uaecptr addr, uae_u32 v)
{
ACCESS_CHECK_PUT
- write_dcache030_mmu(addr, v, 0);
+ write_data_030_bput(addr, v);
ACCESS_EXIT_PUT
}
STATIC_INLINE void put_lrmw_byte_mmu030c_state (uaecptr addr, uae_u32 v)
STATIC_INLINE void put_word_mmu030c_state (uaecptr addr, uae_u32 v)
{
ACCESS_CHECK_PUT
- write_dcache030_mmu(addr, v, 1);
+ write_data_030_wput(addr, v);
ACCESS_EXIT_PUT
}
STATIC_INLINE void put_lrmw_word_mmu030c_state (uaecptr addr, uae_u32 v)
STATIC_INLINE void put_long_mmu030c_state (uaecptr addr, uae_u32 v)
{
ACCESS_CHECK_PUT
- write_dcache030_mmu(addr, v, 2);
+ write_data_030_lput(addr, v);
ACCESS_EXIT_PUT
}
STATIC_INLINE void put_lrmw_long_mmu030c_state (uaecptr addr, uae_u32 v)
{
uae_u32 v;
ACCESS_CHECK_GET
- v = read_dcache030_mmu(addr, 0);
+ v = read_data_030_bget(addr);
ACCESS_EXIT_GET
return v;
}
{
uae_u32 v;
ACCESS_CHECK_GET
- v = read_dcache030_mmu(addr, 1);
+ v = read_data_030_wget(addr);
ACCESS_EXIT_GET
return v;
}
{
uae_u32 v;
ACCESS_CHECK_GET
- v = read_dcache030_mmu(addr, 2);
+ v = read_data_030_lget(addr);
ACCESS_EXIT_GET
return v;
}
STATIC_INLINE uae_u32 get_word_mmu030c (uaecptr addr)
{
- return read_dcache030_mmu(addr, 1);
+ return read_data_030_wget(addr);
}
STATIC_INLINE uae_u32 get_long_mmu030c (uaecptr addr)
{
- return read_dcache030_mmu(addr, 2);
+ return read_data_030_lget(addr);
}
STATIC_INLINE void put_word_mmu030c (uaecptr addr, uae_u32 v)
{
- write_dcache030_mmu(addr, v, 1);
+ write_data_030_wput(addr, v);
}
STATIC_INLINE void put_long_mmu030c (uaecptr addr, uae_u32 v)
{
- write_dcache030_mmu(addr, v, 2);
+ write_data_030_lput(addr, v);
}
extern void m68k_do_rts_mmu030c(void);
extern uae_u8* baseaddr[];
+#define CACHE_ENABLE_DATA 0x01
+#define CACHE_ENABLE_DATA_BURST 0x02
+#define CACHE_ENABLE_COPYBACK 0x020
+#define CACHE_ENABLE_INS 0x80
+#define CACHE_ENABLE_INS_BURST 0x40
+#define CACHE_ENABLE_BOTH (CACHE_ENABLE_DATA | CACHE_ENABLE_INS)
+#define CACHE_ENABLE_ALL (CACHE_ENABLE_BOTH | CACHE_ENABLE_INS_BURST | CACHE_ENABLE_DATA_BURST)
+#define CACHE_DISABLE_ALLOCATE 0x08
+#define CACHE_DISABLE_MMU 0x10
+extern uae_u8 ce_banktype[65536], ce_cachable[65536];
+
+#define ABFLAG_CACHE_SHIFT 16
enum
{
ABFLAG_UNK = 0, ABFLAG_RAM = 1, ABFLAG_ROM = 2, ABFLAG_ROMIN = 4, ABFLAG_IO = 8,
ABFLAG_NONE = 16, ABFLAG_SAFE = 32, ABFLAG_INDIRECT = 64, ABFLAG_NOALLOC = 128,
ABFLAG_RTG = 256, ABFLAG_THREADSAFE = 512, ABFLAG_DIRECTMAP = 1024, ABFLAG_ALLOCINDIRECT = 2048,
ABFLAG_CHIPRAM = 4096, ABFLAG_CIA = 8192, ABFLAG_PPCIOSPACE = 16384,
+ ABFLAG_CACHE_ENABLE_DATA = CACHE_ENABLE_DATA << ABFLAG_CACHE_SHIFT,
+ ABFLAG_CACHE_ENABLE_DATA_BURST = CACHE_ENABLE_DATA_BURST << ABFLAG_CACHE_SHIFT,
+ ABFLAG_CACHE_ENABLE_INS = CACHE_ENABLE_INS << ABFLAG_CACHE_SHIFT,
+ ABFLAG_CACHE_ENABLE_INS_BURST = CACHE_ENABLE_INS_BURST << ABFLAG_CACHE_SHIFT,
};
+
+#define ABFLAG_CACHE_ENABLE_BOTH (ABFLAG_CACHE_ENABLE_DATA | ABFLAG_CACHE_ENABLE_INS)
+#define ABFLAG_CACHE_ENABLE_ALL (ABFLAG_CACHE_ENABLE_BOTH | ABFLAG_CACHE_ENABLE_INS_BURST | ABFLAG_CACHE_ENABLE_DATA_BURST)
+
typedef struct {
/* These ones should be self-explanatory... */
mem_get_func lget, wget, bget;
#define CE_MEMBANK_CIA 3
#define CE_MEMBANK_FAST16 4
//#define CE_MEMBANK_FAST16_EXTRA_ACCURACY 5
-#define CACHE_ENABLE_DATA 0x01
-#define CACHE_ENABLE_DATA_BURST 0x02
-#define CACHE_ENABLE_INS 0x80
-#define CACHE_ENABLE_INS_BURST 0x40
-#define CACHE_ENABLE_BOTH (CACHE_ENABLE_DATA | CACHE_ENABLE_INS)
-#define CACHE_ENABLE_ALL (CACHE_ENABLE_BOTH | CACHE_ENABLE_INS_BURST | CACHE_ENABLE_DATA_BURST)
-#define CACHE_DISABLE_MMU 0x10
-extern uae_u8 ce_banktype[65536], ce_cachable[65536];
#define MEMORY_LGET(name) \
static uae_u32 REGPARAM3 name ## _lget (uaecptr) REGPARAM; \
{
uae_u32 data[CACHELINES040][4];
bool dirty[CACHELINES040][4];
+ bool gdirty[CACHELINES040];
bool valid[CACHELINES040];
uae_u32 tag[CACHELINES040];
};
uae_u32 mem_access_delay_longi_read_ce020 (uaecptr addr);
uae_u32 mem_access_delay_wordi_read_ce020 (uaecptr addr);
+void mem_access_delay_long_write_c040 (uaecptr addr, uae_u32 v);
+void mem_access_delay_word_write_c040 (uaecptr addr, uae_u32 v);
+void mem_access_delay_byte_write_c040 (uaecptr addr, uae_u32 v);
+uae_u32 mem_access_delay_byte_read_c040 (uaecptr addr);
+uae_u32 mem_access_delay_word_read_c040 (uaecptr addr);
+uae_u32 mem_access_delay_long_read_c040 (uaecptr addr);
+uae_u32 mem_access_delay_longi_read_c040 (uaecptr addr);
+
extern uae_u32(REGPARAM3 *x_cp_get_disp_ea_020)(uae_u32 base, int idx) REGPARAM;
/* direct (regs.pc_p) access */
}
}
-extern void write_dcache030(uaecptr, uae_u32, uae_u32, uae_u32);
-extern uae_u32 read_dcache030(uaecptr, uae_u32, uae_u32);
-
-extern void write_dcache030_mmu(uaecptr, uae_u32, uae_u32);
-extern uae_u32 read_dcache030_mmu(uaecptr, uae_u32);
+extern uae_u32(*read_data_030_bget)(uaecptr);
+extern uae_u32(*read_data_030_wget)(uaecptr);
+extern uae_u32(*read_data_030_lget)(uaecptr);
+extern void(*write_data_030_bput)(uaecptr,uae_u32);
+extern void(*write_data_030_wput)(uaecptr,uae_u32);
+extern void(*write_data_030_lput)(uaecptr,uae_u32);
+
+extern void write_dcache030_bput(uaecptr, uae_u32, uae_u32);
+extern void write_dcache030_wput(uaecptr, uae_u32, uae_u32);
+extern void write_dcache030_lput(uaecptr, uae_u32, uae_u32);
+extern uae_u32 read_dcache030_bget(uaecptr, uae_u32);
+extern uae_u32 read_dcache030_wget(uaecptr, uae_u32);
+extern uae_u32 read_dcache030_lget(uaecptr, uae_u32);
+
+extern void write_dcache030_mmu_bput(uaecptr, uae_u32);
+extern void write_dcache030_mmu_wput(uaecptr, uae_u32);
+extern void write_dcache030_mmu_lput(uaecptr, uae_u32);
+extern uae_u32 read_dcache030_mmu_bget(uaecptr);
+extern uae_u32 read_dcache030_mmu_wget(uaecptr);
+extern uae_u32 read_dcache030_mmu_lget(uaecptr);
extern void write_dcache030_lrmw_mmu(uaecptr, uae_u32, uae_u32);
extern uae_u32 read_dcache030_lrmw_mmu(uaecptr, uae_u32);
extern void m68k_go (int);
extern void m68k_dumpstate (uaecptr *);
extern void m68k_dumpstate (uaecptr, uaecptr *);
-extern void m68k_dumpcache (void);
+extern void m68k_dumpcache (bool);
extern int getDivu68kCycles (uae_u32 dividend, uae_u16 divisor);
extern int getDivs68kCycles (uae_s32 dividend, uae_s16 divisor);
extern void divbyzero_special (bool issigned, uae_s32 dst);
#define flush_icache_hard(int) do {} while (0)
#endif
bool check_prefs_changed_comp (bool);
-extern void flush_dcache (uaecptr, int);
extern int movec_illg (int regno);
extern uae_u32 val_move2c (int regno);
bool immediate_blits;
int waiting_blits;
+ double blitter_speed_throttle;
unsigned int chipset_mask;
bool keyboard_connected;
bool ntscmode;
bool int_no_unimplemented;
bool fpu_no_unimplemented;
bool address_space_24;
+ bool cpu_data_cache;
bool picasso96_nocustom;
int picasso96_modeflags;
int cpu_model_fallback;
error_log(_T("Cycle-exact mode requires at least Disabled but emulated sound setting."));
}
+ if (p->cpu_data_cache && (!p->cpu_compatible || p->cachesize || p->cpu_model < 68030)) {
+ p->cpu_data_cache = false;
+ error_log(_T("Data cache emulation requires More compatible, is not JIT compatible, 68030+ only."));
+ }
+ if (p->cpu_data_cache && (currprefs.uaeboard != 3 && uae_boot_rom_type > 0)) {
+ p->cpu_data_cache = false;
+ error_log(_T("Data cache emulation requires Indirect UAE Boot ROM."));
+ }
+
#if 0
if (p->cachesize && p->cpuboard_type && !cpuboard_jitdirectompatible(p) && !p->comptrustbyte) {
error_log(_T("JIT direct is not compatible with emulated Blizzard accelerator boards."));
chipmem_lput, chipmem_wput, chipmem_bput,
chipmem_xlate, chipmem_check, NULL, _T("chip"), _T("Chip memory"),
chipmem_lget, chipmem_wget,
- ABFLAG_RAM | ABFLAG_THREADSAFE | ABFLAG_CHIPRAM, 0, 0
+ ABFLAG_RAM | ABFLAG_THREADSAFE | ABFLAG_CHIPRAM | ABFLAG_CACHE_ENABLE_BOTH, 0, 0
};
addrbank chipmem_dummy_bank = {
chipmem_lput_ce2, chipmem_wput_ce2, chipmem_bput_ce2,
chipmem_xlate, chipmem_check, NULL, NULL, _T("Chip memory (68020 'ce')"),
chipmem_lget_ce2, chipmem_wget_ce2,
- ABFLAG_RAM | ABFLAG_CHIPRAM, S_READ, S_WRITE
+ ABFLAG_RAM | ABFLAG_CHIPRAM | ABFLAG_CACHE_ENABLE_BOTH, S_READ, S_WRITE
};
#endif
bogomem_lput, bogomem_wput, bogomem_bput,
bogomem_xlate, bogomem_check, NULL, _T("bogo"), _T("Slow memory"),
bogomem_lget, bogomem_wget,
- ABFLAG_RAM | ABFLAG_THREADSAFE, 0, 0
+ ABFLAG_RAM | ABFLAG_THREADSAFE | ABFLAG_CACHE_ENABLE_BOTH, 0, 0
};
addrbank cardmem_bank = {
mem25bit_lput, mem25bit_wput, mem25bit_bput,
mem25bit_xlate, mem25bit_check, NULL, _T("25bitmem"), _T("25bit memory"),
mem25bit_lget, mem25bit_wget,
- ABFLAG_RAM | ABFLAG_THREADSAFE, 0, 0
+ ABFLAG_RAM | ABFLAG_THREADSAFE | ABFLAG_CACHE_ENABLE_ALL, 0, 0
};
addrbank a3000lmem_bank = {
a3000lmem_lput, a3000lmem_wput, a3000lmem_bput,
a3000lmem_xlate, a3000lmem_check, NULL, _T("ramsey_low"), _T("RAMSEY memory (low)"),
a3000lmem_lget, a3000lmem_wget,
- ABFLAG_RAM | ABFLAG_THREADSAFE, 0, 0
+ ABFLAG_RAM | ABFLAG_THREADSAFE | ABFLAG_CACHE_ENABLE_ALL, 0, 0
};
addrbank a3000hmem_bank = {
a3000hmem_lput, a3000hmem_wput, a3000hmem_bput,
a3000hmem_xlate, a3000hmem_check, NULL, _T("ramsey_high"), _T("RAMSEY memory (high)"),
a3000hmem_lget, a3000hmem_wget,
- ABFLAG_RAM | ABFLAG_THREADSAFE, 0, 0
+ ABFLAG_RAM | ABFLAG_THREADSAFE | ABFLAG_CACHE_ENABLE_ALL, 0, 0
};
addrbank kickmem_bank = {
kickmem_lput, kickmem_wput, kickmem_bput,
kickmem_xlate, kickmem_check, NULL, _T("kick"), _T("Kickstart ROM"),
kickmem_lget, kickmem_wget,
- ABFLAG_ROM | ABFLAG_THREADSAFE, 0, S_WRITE
+ ABFLAG_ROM | ABFLAG_THREADSAFE | ABFLAG_CACHE_ENABLE_ALL, 0, S_WRITE
};
addrbank kickram_bank = {
kickmem2_lput, kickmem2_wput, kickmem2_bput,
kickmem_xlate, kickmem_check, NULL, NULL, _T("Kickstart Shadow RAM"),
kickmem_lget, kickmem_wget,
- ABFLAG_UNK | ABFLAG_SAFE, 0, S_WRITE
+ ABFLAG_UNK | ABFLAG_SAFE | ABFLAG_CACHE_ENABLE_ALL, 0, S_WRITE
};
addrbank extendedkickmem_bank = {
extendedkickmem_lput, extendedkickmem_wput, extendedkickmem_bput,
extendedkickmem_xlate, extendedkickmem_check, NULL, NULL, _T("Extended Kickstart ROM"),
extendedkickmem_lget, extendedkickmem_wget,
- ABFLAG_ROM | ABFLAG_THREADSAFE, 0, S_WRITE
+ ABFLAG_ROM | ABFLAG_THREADSAFE | ABFLAG_CACHE_ENABLE_ALL, 0, S_WRITE
};
addrbank extendedkickmem2_bank = {
extendedkickmem2_lget, extendedkickmem2_wget, extendedkickmem2_bget,
extendedkickmem2_lput, extendedkickmem2_wput, extendedkickmem2_bput,
extendedkickmem2_xlate, extendedkickmem2_check, NULL, _T("rom_a8"), _T("Extended 2nd Kickstart ROM"),
extendedkickmem2_lget, extendedkickmem2_wget,
- ABFLAG_ROM | ABFLAG_THREADSAFE, 0, S_WRITE
+ ABFLAG_ROM | ABFLAG_THREADSAFE | ABFLAG_CACHE_ENABLE_ALL, 0, S_WRITE
};
addrbank fakeuaebootrom_bank = {
fakeuaebootrom_lget, fakeuaebootrom_wget, mem25bit_bget,
custmem1_lput, custmem1_wput, custmem1_bput,
custmem1_xlate, custmem1_check, NULL, _T("custmem1"), _T("Non-autoconfig RAM #1"),
custmem1_lget, custmem1_wget,
- ABFLAG_RAM | ABFLAG_THREADSAFE, 0, 0
+ ABFLAG_RAM | ABFLAG_THREADSAFE | ABFLAG_CACHE_ENABLE_ALL, 0, 0
};
addrbank custmem2_bank = {
custmem2_lget, custmem2_wget, custmem2_bget,
custmem2_lput, custmem2_wput, custmem2_bput,
custmem2_xlate, custmem2_check, NULL, _T("custmem2"), _T("Non-autoconfig RAM #2"),
custmem2_lget, custmem2_wget,
- ABFLAG_RAM | ABFLAG_THREADSAFE, 0, 0
+ ABFLAG_RAM | ABFLAG_THREADSAFE | ABFLAG_CACHE_ENABLE_ALL, 0, 0
};
#define fkickmem_size ROM_SIZE_512
memset (ce_banktype, CE_MEMBANK_FAST32, sizeof ce_banktype);
}
- memset(ce_cachable, CACHE_ENABLE_INS, sizeof ce_cachable);
- memset(ce_cachable + (0x00f80000 >> 16), CACHE_ENABLE_BOTH, 524288 >> 16);
- memset(ce_cachable + (0x00c00000 >> 16), CACHE_ENABLE_BOTH, currprefs.bogomem_size >> 16);
- for (int i = 0; i < MAX_RAM_BOARDS; i++) {
- if (fastmem_bank[i].start != 0xffffffff)
- memset(ce_cachable + (fastmem_bank[i].start >> 16), CACHE_ENABLE_BOTH, currprefs.fastmem[i].size >> 16);
- if (z3fastmem_bank[i].start != 0xffffffff)
- memset(ce_cachable + (z3fastmem_bank[i].start >> 16), CACHE_ENABLE_ALL, currprefs.z3fastmem[i].size >> 16);
- }
- memset(ce_cachable + (a3000hmem_bank.start >> 16), CACHE_ENABLE_ALL, currprefs.mbresmem_high_size >> 16);
- memset(ce_cachable + (a3000lmem_bank.start >> 16), CACHE_ENABLE_ALL, currprefs.mbresmem_low_size >> 16);
- memset(ce_cachable + (mem25bit_bank.start >> 16), CACHE_ENABLE_ALL, currprefs.mem25bit_size >> 16);
-
- if (uae_boot_rom_type > 0) {
- for (int i = 0; i < sizeof(ce_cachable); i++) {
- ce_cachable[i] &= ~(CACHE_ENABLE_DATA | CACHE_ENABLE_DATA_BURST);
- }
- }
-
addrbank *ab = &get_mem_bank(0);
if (ab && (ab->flags & ABFLAG_CHIPRAM)) {
for (i = 0; i < (0x200000 >> 16); i++) {
b = &get_mem_bank (i << 16);
if (b && !(b->flags & ABFLAG_CIA)) {
ce_banktype[i] = CE_MEMBANK_FAST32;
- ce_cachable[i] = 1;
}
}
// CD32 ROM is 16-bit
if (mem_hardreset > 2)
memory_init ();
+ memset(ce_cachable, CACHE_ENABLE_INS, sizeof ce_cachable);
+
be_cnt = be_recursive = 0;
currprefs.chipmem_size = changed_prefs.chipmem_size;
currprefs.bogomem_size = changed_prefs.bogomem_size;
#endif
}
put_mem_bank (bnr << 16, bank, realstart << 16);
+ ce_cachable[bnr] = bank->flags >> ABFLAG_CACHE_SHIFT;
#ifdef WITH_THREADED_CPU
if (currprefs.cpu_thread) {
if (orig_bank)
#endif
}
put_mem_bank ((bnr + hioffs) << 16, bank, realstart << 16);
+ ce_cachable[bnr + hioffs] = bank->flags >> ABFLAG_CACHE_SHIFT;
#ifdef WITH_THREADED_CPU
if (currprefs.cpu_thread) {
if (orig_bank)
#define DEBUG_CD32CDTVIO 0
#define EXCEPTION3_DEBUGGER 0
#define CPUTRACE_DEBUG 0
+
#define VALIDATE_68030_DATACACHE 0
+#define VALIDATE_68040_DATACACHE 0
+#define DISABLE_68040_COPYBACK 0
#define MORE_ACCURATE_68020_PIPELINE 1
bool m68k_interrupt_delay;
static bool m68k_reset_delay;
-static int cachesets04060, cachesets04060minus1;
+static int cacheisets04060, cacheisets04060mask, cacheitag04060mask;
+static int cachedsets04060, cachedsets04060mask, cachedtag04060mask;
static int cpu_prefs_changed_flag;
static struct cache020 caches020[CACHELINES020];
static struct cache030 icaches030[CACHELINES030];
static struct cache030 dcaches030[CACHELINES030];
-static int icachelinecnt, dcachelinecnt;
+static int icachelinecnt, icachehalfline;
+static int dcachelinecnt;
static struct cache040 icaches040[CACHESETS060];
static struct cache040 dcaches040[CACHESETS060];
+static int cache_lastline;
static int fallback_cpu_model, fallback_mmu_model, fallback_fpu_model;
static bool fallback_cpu_compatible, fallback_cpu_address_space_24;
do_cycles_ce020 (cycles);
}
-static uae_u8 dcache030_check_nommu(uaecptr addr, bool write, uae_u32 size)
+static uae_u8 dcache_check_nommu(uaecptr addr, bool write, uae_u32 size)
{
return ce_cachable[addr >> 16];
}
static uae_u32 (*icache_fetch)(uaecptr);
-static uae_u32 (*dcache030_lget)(uaecptr);
-static uae_u32 (*dcache030_wget)(uaecptr);
-static uae_u32 (*dcache030_bget)(uaecptr);
-static uae_u8 (*dcache030_check)(uaecptr, bool, uae_u32);
-static void (*dcache030_lput)(uaecptr, uae_u32);
-static void (*dcache030_wput)(uaecptr, uae_u32);
-static void (*dcache030_bput)(uaecptr, uae_u32);
+static uae_u32 (*dcache_lget)(uaecptr);
+static uae_u32 (*dcache_wget)(uaecptr);
+static uae_u32 (*dcache_bget)(uaecptr);
+static uae_u8 (*dcache_check)(uaecptr, bool, uae_u32);
+static void (*dcache_lput)(uaecptr, uae_u32);
+static void (*dcache_wput)(uaecptr, uae_u32);
+static void (*dcache_bput)(uaecptr, uae_u32);
+
+uae_u32(*read_data_030_bget)(uaecptr);
+uae_u32(*read_data_030_wget)(uaecptr);
+uae_u32(*read_data_030_lget)(uaecptr);
+void(*write_data_030_bput)(uaecptr,uae_u32);
+void(*write_data_030_wput)(uaecptr,uae_u32);
+ void(*write_data_030_lput)(uaecptr,uae_u32);
static void set_x_ifetches(void)
{
x_get_ibyte = NULL;
x_next_iword = next_iword_cache040;
x_next_ilong = next_ilong_cache040;
- x_put_long = put_long_cache_040;
- x_put_word = put_word_cache_040;
- x_put_byte = put_byte_cache_040;
- x_get_long = get_long_cache_040;
- x_get_word = get_word_cache_040;
- x_get_byte = get_byte_cache_040;
+ if (currprefs.cpu_data_cache) {
+ x_put_long = put_long_cache_040;
+ x_put_word = put_word_cache_040;
+ x_put_byte = put_byte_cache_040;
+ x_get_long = get_long_cache_040;
+ x_get_word = get_word_cache_040;
+ x_get_byte = get_byte_cache_040;
+ } else {
+ x_phys_get_byte = mem_access_delay_byte_read_c040;
+ x_phys_get_word = mem_access_delay_word_read_c040;
+ x_phys_get_long = mem_access_delay_long_read_c040;
+ x_phys_put_byte = mem_access_delay_byte_write_c040;
+ x_phys_put_word = mem_access_delay_word_write_c040;
+ x_phys_put_long = mem_access_delay_long_write_c040;
+ }
x_do_cycles = do_cycles;
x_do_cycles_pre = do_cycles;
x_do_cycles_post = do_cycles_post;
x_get_ibyte = NULL;
x_next_iword = next_iword_cache040;
x_next_ilong = next_ilong_cache040;
- x_put_long = put_long_cache_040;
- x_put_word = put_word_cache_040;
- x_put_byte = put_byte_cache_040;
- x_get_long = get_long_cache_040;
- x_get_word = get_word_cache_040;
- x_get_byte = get_byte_cache_040;
+ if (currprefs.cpu_data_cache) {
+ x_put_long = put_long_cache_040;
+ x_put_word = put_word_cache_040;
+ x_put_byte = put_byte_cache_040;
+ x_get_long = get_long_cache_040;
+ x_get_word = get_word_cache_040;
+ x_get_byte = get_byte_cache_040;
+ } else {
+ x_put_long = put_long;
+ x_put_word = put_word;
+ x_put_byte = put_byte;
+ x_get_long = get_long;
+ x_get_word = get_word;
+ x_get_byte = get_byte;
+ }
x_do_cycles = do_cycles;
x_do_cycles_pre = do_cycles;
x_do_cycles_post = do_cycles_post;
x_get_ibyte = NULL;
x_next_iword = next_iword_cache040;
x_next_ilong = next_ilong_cache040;
- x_put_long = put_long_cache_040;
- x_put_word = put_word_cache_040;
- x_put_byte = put_byte_cache_040;
- x_get_long = get_long_cache_040;
- x_get_word = get_word_cache_040;
- x_get_byte = get_byte_cache_040;
+ if (currprefs.cpu_data_cache) {
+ x_put_long = put_long_cache_040;
+ x_put_word = put_word_cache_040;
+ x_put_byte = put_byte_cache_040;
+ x_get_long = get_long_cache_040;
+ x_get_word = get_word_cache_040;
+ x_get_byte = get_byte_cache_040;
+ } else {
+ x_phys_get_byte = mem_access_delay_byte_read_c040;
+ x_phys_get_word = mem_access_delay_word_read_c040;
+ x_phys_get_long = mem_access_delay_long_read_c040;
+ x_phys_put_byte = mem_access_delay_byte_write_c040;
+ x_phys_put_word = mem_access_delay_word_write_c040;
+ x_phys_put_long = mem_access_delay_long_write_c040;
+ }
x_do_cycles = do_cycles_ce020;
x_do_cycles_pre = do_cycles_ce020;
x_do_cycles_post = do_cycles_ce020_post;
mmu_set_funcs();
mmu030_set_funcs();
+ dcache_lput = put_long;
+ dcache_wput = put_word;
+ dcache_bput = put_byte;
+ dcache_lget = get_long;
+ dcache_wget = get_word;
+ dcache_bget = get_byte;
+ dcache_check = dcache_check_nommu;
+
icache_fetch = get_longi;
- dcache030_lput = put_long;
- dcache030_wput = put_word;
- dcache030_bput = put_byte;
- dcache030_lget = get_long;
- dcache030_wget = get_word;
- dcache030_bget = get_byte;
- dcache030_check = dcache030_check_nommu;
if (currprefs.cpu_cycle_exact) {
icache_fetch = mem_access_delay_longi_read_ce020;
}
+ if (currprefs.cpu_model >= 68040 && currprefs.cpu_memory_cycle_exact) {
+ icache_fetch = mem_access_delay_longi_read_c040;
+ dcache_bget = mem_access_delay_byte_read_c040;
+ dcache_wget = mem_access_delay_word_read_c040;
+ dcache_lget = mem_access_delay_long_read_c040;
+ dcache_bput = mem_access_delay_byte_write_c040;
+ dcache_wput = mem_access_delay_word_write_c040;
+ dcache_lput = mem_access_delay_long_write_c040;
+ }
+
if (currprefs.cpu_model == 68030) {
+
+ if (currprefs.cpu_data_cache) {
+ read_data_030_bget = read_dcache030_mmu_bget;
+ read_data_030_wget = read_dcache030_mmu_wget;
+ read_data_030_lget = read_dcache030_mmu_lget;
+ write_data_030_bput = write_dcache030_mmu_bput;
+ write_data_030_wput = write_dcache030_mmu_wput;
+ write_data_030_lput = write_dcache030_mmu_lput;
+ } else {
+ read_data_030_bget = dcache_bget;
+ read_data_030_wget = dcache_wget;
+ read_data_030_lget = dcache_lget;
+ write_data_030_bput = dcache_bput;
+ write_data_030_wput = dcache_wput;
+ write_data_030_lput = dcache_lput;
+ }
if (currprefs.mmu_model) {
if (currprefs.cpu_compatible) {
icache_fetch = uae_mmu030_get_ilong_fc;
- dcache030_lput = uae_mmu030_put_long_fc;
- dcache030_wput = uae_mmu030_put_word_fc;
- dcache030_bput = uae_mmu030_put_byte_fc;
- dcache030_lget = uae_mmu030_get_long_fc;
- dcache030_wget = uae_mmu030_get_word_fc;
- dcache030_bget = uae_mmu030_get_byte_fc;
- dcache030_check = uae_mmu030_check_fc;
+ dcache_lput = uae_mmu030_put_long_fc;
+ dcache_wput = uae_mmu030_put_word_fc;
+ dcache_bput = uae_mmu030_put_byte_fc;
+ dcache_lget = uae_mmu030_get_long_fc;
+ dcache_wget = uae_mmu030_get_word_fc;
+ dcache_bget = uae_mmu030_get_byte_fc;
+ dcache_check = uae_mmu030_check_fc;
} else {
icache_fetch = uae_mmu030_get_ilong;
- dcache030_lput = uae_mmu030_put_long;
- dcache030_wput = uae_mmu030_put_word;
- dcache030_bput = uae_mmu030_put_byte;
- dcache030_lget = uae_mmu030_get_long;
- dcache030_wget = uae_mmu030_get_word;
- dcache030_bget = uae_mmu030_get_byte;
+ dcache_lput = uae_mmu030_put_long;
+ dcache_wput = uae_mmu030_put_word;
+ dcache_bput = uae_mmu030_put_byte;
+ dcache_lget = uae_mmu030_get_long;
+ dcache_wget = uae_mmu030_get_word;
+ dcache_bget = uae_mmu030_get_byte;
+ }
+ if (currprefs.cpu_data_cache) {
+ read_data_030_bget = read_dcache030_mmu_bget;
+ read_data_030_wget = read_dcache030_mmu_wget;
+ read_data_030_lget = read_dcache030_mmu_lget;
+ write_data_030_bput = write_dcache030_mmu_bput;
+ write_data_030_wput = write_dcache030_mmu_wput;
+ write_data_030_lput = write_dcache030_mmu_lput;
+ } else {
+ if (currprefs.cpu_compatible) {
+ read_data_030_bget = uae_mmu030_get_byte_fc;
+ read_data_030_wget = uae_mmu030_get_word_fc;
+ read_data_030_lget = uae_mmu030_get_long_fc;
+ write_data_030_bput = uae_mmu030_put_byte_fc;
+ write_data_030_wput = uae_mmu030_put_word_fc;
+ write_data_030_lput = uae_mmu030_put_long_fc;
+ } else {
+ read_data_030_bget = uae_mmu030_get_byte;
+ read_data_030_wget = uae_mmu030_get_word;
+ read_data_030_lget = uae_mmu030_get_long;
+ write_data_030_bput = uae_mmu030_put_byte;
+ write_data_030_wput = uae_mmu030_put_word;
+ write_data_030_lput = uae_mmu030_put_long;
+ }
}
} else if (currprefs.cpu_memory_cycle_exact) {
icache_fetch = mem_access_delay_longi_read_ce020;
- dcache030_lput = mem_access_delay_long_write_ce020;
- dcache030_wput = mem_access_delay_word_write_ce020;
- dcache030_bput = mem_access_delay_byte_write_ce020;
- dcache030_lget = mem_access_delay_long_read_ce020;
- dcache030_wget = mem_access_delay_word_read_ce020;
- dcache030_bget = mem_access_delay_byte_read_ce020;
+ dcache_lput = mem_access_delay_long_write_ce020;
+ dcache_wput = mem_access_delay_word_write_ce020;
+ dcache_bput = mem_access_delay_byte_write_ce020;
+ dcache_lget = mem_access_delay_long_read_ce020;
+ dcache_wget = mem_access_delay_word_read_ce020;
+ dcache_bget = mem_access_delay_byte_read_ce020;
}
}
}
return is_cpu_tracer ();
}
+static void invalidate_cpu_data_caches(void)
+{
+ if (currprefs.cpu_model == 68030) {
+ for (int i = 0; i < CACHELINES030; i++) {
+ dcaches030[i].valid[0] = 0;
+ dcaches030[i].valid[1] = 0;
+ dcaches030[i].valid[2] = 0;
+ dcaches030[i].valid[3] = 0;
+ }
+ } else if (currprefs.cpu_model >= 68040) {
+ dcachelinecnt = 0;
+ for (int i = 0; i < CACHESETS060; i++) {
+ for (int j = 0; j < CACHELINES040; j++) {
+ dcaches040[i].valid[j] = false;
+ }
+ }
+ }
+}
+
void flush_cpu_caches(bool force)
{
bool doflush = currprefs.cpu_compatible || currprefs.cpu_memory_cycle_exact;
} else if (currprefs.cpu_model >= 68040) {
mmu_flush_cache();
icachelinecnt = 0;
- dcachelinecnt = 0;
- if (doflush) {
+ icachehalfline = 0;
+ if (doflush || force) {
for (int i = 0; i < CACHESETS060; i++) {
- icaches040[i].valid[0] = 0;
- icaches040[i].valid[1] = 0;
- icaches040[i].valid[2] = 0;
- icaches040[i].valid[3] = 0;
+ for (int j = 0; j < CACHELINES040; j++) {
+ icaches040[i].valid[j] = false;
+ }
}
}
}
}
+#if VALIDATE_68040_DATACACHE > 1
+static void validate_dcache040(void)
+{
+ for (int i = 0; i < cachedsets04060; i++) {
+ struct cache040 *c = &dcaches040[i];
+ for (int j = 0; j < CACHELINES040; j++) {
+ if (c->valid[j]) {
+ uae_u32 addr = (c->tag[j] & cachedtag04060mask) | (i << 4);
+ if (addr < 0x200000 || (addr >= 0xd80000 && addr < 0xe00000) || (addr >= 0xe80000 && addr < 0xf00000) || (addr >= 0xa00000 && addr < 0xc00000)) {
+ write_log(_T("Chip RAM or IO address cached! %08x\n"), addr);
+ }
+ for (int k = 0; k < 4; k++) {
+ if (!c->dirty[j][k]) {
+ uae_u32 v = get_long(addr + k * 4);
+ if (v != c->data[j][k]) {
+ write_log(_T("Address %08x data cache mismatch %08x != %08x\n"), addr, v, c->data[j][k]);
+ }
+ }
+ }
+ }
+ }
+ }
+}
+#endif
+
+static void dcache040_push_line(int index, int line, bool writethrough, bool invalidate)
+{
+ struct cache040 *c = &dcaches040[index];
+#if VALIDATE_68040_DATACACHE
+ if (!c->valid[line]) {
+ write_log("dcache040_push_line pushing invalid line!\n");
+ }
+#endif
+ if (c->gdirty[line]) {
+ uae_u32 addr = (c->tag[line] & cachedtag04060mask) | (index << 4);
+ for (int i = 0; i < 4; i++) {
+ if (c->dirty[line][i] || (!writethrough && currprefs.cpu_model == 68060)) {
+ dcache_lput(addr + i * 4, c->data[line][i]);
+ c->dirty[line][i] = false;
+ }
+ }
+ c->gdirty[line] = false;
+ }
+ if (invalidate)
+ c->valid[line] = false;
+
+#if VALIDATE_68040_DATACACHE > 1
+ validate_dcache040();
+#endif
+}
+
void flush_cpu_caches_040(uae_u16 opcode)
{
+ // 0 (1) = data, 1 (2) = instruction
int cache = (opcode >> 6) & 3;
int scope = (opcode >> 3) & 3;
int areg = opcode & 7;
uaecptr addr = m68k_areg(regs, areg);
bool push = (opcode & 0x20) != 0;
+ bool pushinv = (regs.cacr & 0x01000000) == 0; // 68060 DPI
- regs.prefetch020addr = 0xffffffff;
+#if VALIDATE_68040_DATACACHE
+ write_log(_T("push %d %d %d %08x %d %d\n"), cache, scope, areg, addr, push, pushinv);
+#endif
+
+ if (cache & 2)
+ regs.prefetch020addr = 0xffffffff;
for (int k = 0; k < 2; k++) {
if (cache & (1 << k)) {
if (scope == 3) {
// all
- flush_cpu_caches(true);
+ if (!k) {
+ // data
+ for (int i = 0; i < cachedsets04060; i++) {
+ struct cache040 *c = &dcaches040[i];
+ for (int j = 0; j < CACHELINES040; j++) {
+ if (c->valid[j]) {
+ if (push) {
+ dcache040_push_line(i, j, false, pushinv);
+ } else {
+ c->valid[j] = false;
+ }
+ }
+ }
+ }
+ dcachelinecnt = 0;
+ } else {
+ // instruction
+ flush_cpu_caches(true);
+ }
} else {
uae_u32 pagesize;
if (scope == 2) {
}
addr &= ~(pagesize - 1);
for (int j = 0; j < pagesize; j += 16, addr += 16) {
- int index = (addr >> 4) & (cachesets04060minus1);
- uae_u32 tag = addr & ~((cachesets04060 << 4) - 1);
- struct cache040 *c = k ? &icaches040[index] : &dcaches040[index];
+ int index;
+ uae_u32 tag;
+ uae_u32 tagmask;
+ struct cache040 *c;
+ if (k) {
+ tagmask = cacheitag04060mask;
+ index = (addr >> 4) & cacheisets04060mask;
+ c = &icaches040[index];
+ } else {
+ tagmask = cachedtag04060mask;
+ index = (addr >> 4) & cachedsets04060mask;
+ c = &dcaches040[index];
+ }
+ tag = addr & tagmask;
for (int i = 0; i < CACHELINES040; i++) {
if (c->valid[i] && c->tag[i] == tag) {
- // data cache not yet in use
- for (int ii = 0; ii < 4; ii++) {
- if (c->dirty[i][ii]) {
- c->dirty[i][ii] = false;
- if (push)
- put_long(addr + ii * 4, c->data[i][ii]);
- }
+ if (push) {
+ dcache040_push_line(index, i, false, pushinv);
+ } else {
+ c->valid[i] = false;
}
- c->valid[i] = false;
}
}
}
}
}
}
+ mmu_flush_cache();
}
void set_cpu_caches (bool flush)
{
regs.prefetch020addr = 0xffffffff;
regs.cacheholdingaddr020 = 0xffffffff;
+ cache_default_data &= ~CACHE_DISABLE_ALLOCATE;
+
+ // 68060 FIC 1/2 instruction cache
+ cacheisets04060 = currprefs.cpu_model == 68060 && !(regs.cacr & 0x00002000) ? CACHESETS060 : CACHESETS040;
+ cacheisets04060mask = cacheisets04060 - 1;
+ cacheitag04060mask = ~((cacheisets04060 << 4) - 1);
+ // 68060 FOC 1/2 data cache
+ cachedsets04060 = currprefs.cpu_model == 68060 && !(regs.cacr & 0x08000000) ? CACHESETS060 : CACHESETS040;
+ cachedsets04060mask = cachedsets04060 - 1;
+ cachedtag04060mask = ~((cachedsets04060 << 4) - 1);
+ cache_lastline = 0;
#ifdef JIT
if (currprefs.cachesize) {
mmu_set_tc(tcr);
mmu_set_super(regs.s != 0);
mmu_tt_modified();
- mmu_dump_tables();
} else if (currprefs.mmu_model == 68030) {
mmu030_reset(-1);
mmu030_flush_atc_all();
if (currprefs.cpu_compatible != changed_prefs.cpu_compatible) {
currprefs.cpu_compatible = changed_prefs.cpu_compatible;
flush_cpu_caches(true);
+ invalidate_cpu_data_caches();
+ }
+ if (currprefs.cpu_data_cache != changed_prefs.cpu_data_cache) {
+ currprefs.cpu_data_cache = changed_prefs.cpu_data_cache;
+ invalidate_cpu_data_caches();
}
currprefs.address_space_24 = changed_prefs.address_space_24;
currprefs.cpu_cycle_exact = changed_prefs.cpu_cycle_exact;
|| currprefs.fpu_model != changed_prefs.fpu_model
|| currprefs.mmu_model != changed_prefs.mmu_model
|| currprefs.mmu_ec != changed_prefs.mmu_ec
+ || currprefs.cpu_data_cache != changed_prefs.cpu_data_cache
|| currprefs.int_no_unimplemented != changed_prefs.int_no_unimplemented
|| currprefs.fpu_no_unimplemented != changed_prefs.fpu_no_unimplemented
|| currprefs.cpu_compatible != changed_prefs.cpu_compatible
interrupt = nr >= 24 && nr < 24 + 8;
+ // exception vector fetch and exception stack frame
+ // operations don't allocate new cachelines
+ cache_default_data |= CACHE_DISABLE_ALLOCATE;
+
exception_debug (nr);
MakeSR ();
exception3_read(regs.ir, newpc);
return;
}
+
+ cache_default_data &= ~CACHE_DISABLE_ALLOCATE;
+
m68k_setpci (newpc);
fill_prefetch ();
exception_check_trace (nr);
int interrupt;
int vector_nr = nr;
+ cache_default_data |= CACHE_DISABLE_ALLOCATE;
+
interrupt = nr >= 24 && nr < 24 + 8;
if (interrupt && currprefs.cpu_model <= 68010)
return;
}
m68k_setpc (newpc);
+ cache_default_data &= ~CACHE_DISABLE_ALLOCATE;
#ifdef JIT
set_special (SPCFLAG_END_COMPILE);
#endif
mmufixup[1].reg = -1;
mmu030_cache_state = CACHE_ENABLE_ALL;
mmu_cache_state = CACHE_ENABLE_ALL;
- cachesets04060 = currprefs.cpu_model == 68060 ? CACHESETS060 : CACHESETS040;
- cachesets04060minus1 = cachesets04060 - 1;
+ if (currprefs.cpu_model >= 68040) {
+ set_cpu_caches(false);
+ }
if (currprefs.mmu_model >= 68040) {
mmu_reset ();
mmu_set_tc (regs.tcr);
{
m68k_dumpstate (m68k_getpc (), nextpc);
}
-void m68k_dumpcache (void)
+void m68k_dumpcache (bool dc)
{
if (!currprefs.cpu_compatible)
return;
console_out_f (_T("\n"));
}
} else if (currprefs.cpu_model == 68030) {
- for (int j = 0; j < 2; j++) {
- console_out_f (_T("%s\n"), j == 0 ? _T("Instruction") : _T("Data"));
- for (int i = 0; i < CACHELINES030; i++) {
- struct cache030 *c = j ? &dcaches030[i] : &icaches030[i];
- int fc;
- uaecptr addr;
- if (j == 0) {
- fc = (c->tag & 1) ? 6 : 2;
- } else {
- fc = c->fc;
- }
- addr = c->tag & ~1;
- addr |= i << 4;
- console_out_f (_T("%08X %d: "), addr, fc);
- for (int j = 0; j < 4; j++) {
- console_out_f (_T("%08X%c "), c->data[j], c->valid[j] ? '*' : ' ');
+ for (int i = 0; i < CACHELINES030; i++) {
+ struct cache030 *c = dc ? &dcaches030[i] : &icaches030[i];
+ int fc;
+ uaecptr addr;
+ if (!dc) {
+ fc = (c->tag & 1) ? 6 : 2;
+ } else {
+ fc = c->fc;
+ }
+ addr = c->tag & ~1;
+ addr |= i << 4;
+ console_out_f (_T("%08X %d: "), addr, fc);
+ for (int j = 0; j < 4; j++) {
+ console_out_f (_T("%08X%c "), c->data[j], c->valid[j] ? '*' : ' ');
+ }
+ console_out_f (_T("\n"));
+ }
+ } else if (currprefs.cpu_model >= 68040) {
+ uae_u32 tagmask = dc ? cachedtag04060mask : cacheitag04060mask;
+ for (int i = 0; i < cachedsets04060; i++) {
+ struct cache040 *c = dc ? &dcaches040[i] : &icaches040[i];
+ for (int j = 0; j < CACHELINES040; j++) {
+ if (c->valid[j]) {
+ uae_u32 addr = (c->tag[j] & tagmask) | (i << 4);
+ write_log(_T("%02d:%d %08x = %08x%c %08x%c %08x%c %08x%c\n"),
+ i, j, addr,
+ c->data[j][0], c->dirty[j][0] ? '*' : ' ',
+ c->data[j][1], c->dirty[j][1] ? '*' : ' ',
+ c->data[j][2], c->dirty[j][2] ? '*' : ' ',
+ c->data[j][3], c->dirty[j][3] ? '*' : ' ');
}
- console_out_f (_T("\n"));
}
}
}
if (flags & 0x8000000) {
for (int i = 0; i < ((model == 68060 && (flags & 0x4000000)) ? CACHESETS060 : CACHESETS040); i++) {
for (int j = 0; j < CACHELINES040; j++) {
- icaches040[i].data[j][0] = restore_u32();
- icaches040[i].data[j][1] = restore_u32();
- icaches040[i].data[j][2] = restore_u32();
- icaches040[i].data[j][3] = restore_u32();
- icaches040[i].tag[j] = restore_u32();
- icaches040[i].valid[j] = restore_u16() & 1;
+ struct cache040 *c = &icaches040[i];
+ c->data[j][0] = restore_u32();
+ c->data[j][1] = restore_u32();
+ c->data[j][2] = restore_u32();
+ c->data[j][3] = restore_u32();
+ c->tag[j] = restore_u32();
+ c->valid[j] = restore_u16() & 1;
}
}
regs.prefetch020addr = restore_u32();
regs.cacheholdingdata020 = restore_u32();
for (int i = 0; i < CPU_PIPELINE_MAX; i++)
regs.prefetch040[i] = restore_u32();
+ if (flags & 0x4000000) {
+ for (int i = 0; i < (model == 68060 ? CACHESETS060 : CACHESETS040); i++) {
+ for (int j = 0; j < CACHELINES040; j++) {
+ struct cache040 *c = &dcaches040[i];
+ c->data[j][0] = restore_u32();
+ c->data[j][1] = restore_u32();
+ c->data[j][2] = restore_u32();
+ c->data[j][3] = restore_u32();
+ c->tag[j] = restore_u32();
+ uae_u16 v = restore_u16();
+ c->valid[j] = (v & 1) != 0;
+ c->dirty[j][0] = (v & 0x10) != 0;
+ c->dirty[j][1] = (v & 0x20) != 0;
+ c->dirty[j][2] = (v & 0x40) != 0;
+ c->dirty[j][3] = (v & 0x80) != 0;
+ c->gdirty[j] = c->dirty[j][0] || c->dirty[j][1] || c->dirty[j][2] || c->dirty[j][3];
+ }
+ }
+ }
}
}
if (model >= 68020) {
} else if (model >= 68040) {
for (int i = 0; i < (model == 68060 ? CACHESETS060 : CACHESETS040); i++) {
for (int j = 0; j < CACHELINES040; j++) {
- save_u32(icaches040[i].data[j][0]);
- save_u32(icaches040[i].data[j][1]);
- save_u32(icaches040[i].data[j][2]);
- save_u32(icaches040[i].data[j][3]);
- save_u32(icaches040[i].tag[j]);
- save_u16(icaches040[i].valid[j] ? 1 : 0);
+ struct cache040 *c = &icaches040[i];
+ save_u32(c->data[j][0]);
+ save_u32(c->data[j][1]);
+ save_u32(c->data[j][2]);
+ save_u32(c->data[j][3]);
+ save_u32(c->tag[j]);
+ save_u16(c->valid[j] ? 1 : 0);
}
}
save_u32(regs.prefetch020addr);
save_u32(regs.cacheholdingaddr020);
save_u32(regs.cacheholdingdata020);
- for (int i = 0; i < CPU_PIPELINE_MAX; i++)
+ for (int i = 0; i < CPU_PIPELINE_MAX; i++) {
save_u32(regs.prefetch040[i]);
+ }
+ for (int i = 0; i < (model == 68060 ? CACHESETS060 : CACHESETS040); i++) {
+ for (int j = 0; j < CACHELINES040; j++) {
+ struct cache040 *c = &dcaches040[i];
+ save_u32(c->data[j][0]);
+ save_u32(c->data[j][1]);
+ save_u32(c->data[j][2]);
+ save_u32(c->data[j][3]);
+ save_u32(c->tag[j]);
+ uae_u16 v = c->valid[j] ? 1 : 0;
+ v |= c->dirty[j][0] ? 0x10 : 0;
+ v |= c->dirty[j][1] ? 0x20 : 0;
+ v |= c->dirty[j][2] ? 0x40 : 0;
+ v |= c->dirty[j][3] ? 0x80 : 0;
+ save_u16(v);
+ }
+ }
}
if (currprefs.cpu_model >= 68020) {
save_u32 (0); //save_u32 (regs.ce020memcycles);
}
}
-void write_dcache030(uaecptr addr, uae_u32 v, uae_u32 size, uae_u32 fc)
+void write_dcache030_bput(uaecptr addr, uae_u32 v,uae_u32 fc)
{
regs.fc030 = fc;
- if (size == 2)
- dcache030_lput(addr, v);
- else if (size == 1)
- dcache030_wput(addr, v);
- else
- dcache030_bput(addr, v);
- write_dcache030x(addr, v, size, fc);
+ dcache_bput(addr, v);
+ write_dcache030x(addr, v, 0, fc);
+}
+void write_dcache030_wput(uaecptr addr, uae_u32 v,uae_u32 fc)
+{
+ regs.fc030 = fc;
+ dcache_wput(addr, v);
+ write_dcache030x(addr, v, 1, fc);
+}
+void write_dcache030_lput(uaecptr addr, uae_u32 v,uae_u32 fc)
+{
+ regs.fc030 = fc;
+ dcache_lput(addr, v);
+ write_dcache030x(addr, v, 2, fc);
}
static void dcache030_maybe_burst(uaecptr addr, struct cache030 *c, int lws)
for (int j = 0; j < 3; j++) {
i++;
i &= 3;
- c->data[i] = dcache030_lget (baddr + i * 4);
+ c->data[i] = dcache_lget (baddr + i * 4);
c->valid[i] = true;
}
} CATCH (prb) {
for (int j = 0; j < 3; j++) {
i++;
i &= 3;
- c->data[i] = dcache030_lget (baddr + i * 4);
+ c->data[i] = dcache_lget (baddr + i * 4);
c->valid[i] = true;
}
if (currprefs.cpu_cycle_exact)
}
}
-#if 0
-static void uae_cache_check(uaecptr addr, uae_u32 *v, struct cache030 *c, int lws, int size)
-{
- if (uae_boot_rom_type <= 0)
- return;
- // this check and fix is needed for UAE filesystem handler because it runs in host side and in
- // separate thread. No way to access via cache without locking that would cause major slowdown
- // and unneeded complexity
- uae_u32 tv = get_long(addr);
- if (tv != *v) {
- write_log(_T("data cache mismatch %d %d %08x %08x != %08x %08x %d PC=%08x\n"),
- size, addr, tv, v, c->tag, lws, M68K_GETPC);
- *v = tv;
- }
-}
-#endif
-
uae_u32 read_dcache030 (uaecptr addr, uae_u32 size, uae_u32 fc)
{
uae_u32 addr_o = addr;
if (!c1->valid[lws1] || c1->tag != tag1 || c1->fc != fc) {
// MMU validate address, returns zero if valid but uncacheable
// throws bus error if invalid
- uae_u8 cs = dcache030_check(addr_o, false, size);
+ uae_u8 cs = dcache_check(addr_o, false, size);
if (!(cs & CACHE_ENABLE_DATA))
goto end;
- v1 = dcache030_lget(addr);
+ v1 = dcache_lget(addr);
update_dcache030 (c1, v1, tag1, fc, lws1);
if ((cs & CACHE_ENABLE_DATA_BURST) && (regs.cacr & 0x1100) == 0x1100)
dcache030_maybe_burst(addr, c1, lws1);
} else {
// Cache hit, inhibited caching do not prevent read hits.
v1 = c1->data[lws1];
-#if 0
- uae_cache_check(addr, &v1, c1, lws1, size);
-#endif
}
// only one long fetch needed?
addr += 4;
c2 = getdcache030 (dcaches030, addr, &tag2, &lws2);
if (!c2->valid[lws2] || c2->tag != tag2 || c2->fc != fc) {
- uae_u8 cs = dcache030_check(addr, false, 2);
+ uae_u8 cs = dcache_check(addr, false, 2);
if (!(cs & CACHE_ENABLE_DATA))
goto end;
- v2 = dcache030_lget(addr);
+ v2 = dcache_lget(addr);
update_dcache030 (c2, v2, tag2, fc, lws2);
if ((cs & CACHE_ENABLE_DATA_BURST) && (regs.cacr & 0x1100) == 0x1100)
dcache030_maybe_burst(addr, c2, lws2);
#endif
} else {
v2 = c2->data[lws2];
-#if 0
- uae_cache_check(addr, &v2, c2, lws2, size);
-#endif
}
uae_u64 v64 = ((uae_u64)v1 << 32) | v2;
end:
// read from memory, data cache is disabled or inhibited.
if (size == 2)
- return dcache030_lget (addr_o);
+ return dcache_lget (addr_o);
else if (size == 1)
- return dcache030_wget (addr_o);
+ return dcache_wget (addr_o);
else
- return dcache030_bget (addr_o);
+ return dcache_bget (addr_o);
+}
+uae_u32 read_dcache030_bget(uaecptr addr, uae_u32 fc)
+{
+ return read_dcache030(addr, 0, fc);
+}
+uae_u32 read_dcache030_wget(uaecptr addr, uae_u32 fc)
+{
+ return read_dcache030(addr, 1, fc);
+}
+uae_u32 read_dcache030_lget(uaecptr addr, uae_u32 fc)
+{
+ return read_dcache030(addr, 2, fc);
}
-uae_u32 read_dcache030_mmu(uaecptr addr, uae_u32 size)
+uae_u32 read_dcache030_mmu_bget(uaecptr addr)
+{
+ return read_dcache030_bget(addr, (regs.s ? 4 : 0) | 1);
+}
+uae_u32 read_dcache030_mmu_wget(uaecptr addr)
+{
+ return read_dcache030_wget(addr, (regs.s ? 4 : 0) | 1);
+}
+uae_u32 read_dcache030_mmu_lget(uaecptr addr)
+{
+ return read_dcache030_lget(addr, (regs.s ? 4 : 0) | 1);
+}
+void write_dcache030_mmu_bput(uaecptr addr, uae_u32 val)
+{
+ write_dcache030_bput(addr, val, (regs.s ? 4 : 0) | 1);
+}
+void write_dcache030_mmu_wput(uaecptr addr, uae_u32 val)
{
- return read_dcache030(addr, size, (regs.s ? 4 : 0) | 1);
+ write_dcache030_wput(addr, val, (regs.s ? 4 : 0) | 1);
}
-void write_dcache030_mmu(uaecptr addr, uae_u32 val, uae_u32 size)
+void write_dcache030_mmu_lput(uaecptr addr, uae_u32 val)
{
- write_dcache030(addr, val, size, (regs.s ? 4 : 0) | 1);
+ write_dcache030_lput(addr, val, (regs.s ? 4 : 0) | 1);
}
uae_u32 read_dcache030_lrmw_mmu(uaecptr addr, uae_u32 size)
{
mmu030_cache_state = CACHE_DISABLE_MMU;
- return read_dcache030(addr, size, (regs.s ? 4 : 0) | 1);
+ if (size == 0)
+ return read_dcache030_bget(addr, (regs.s ? 4 : 0) | 1);
+ if (size == 1)
+ return read_dcache030_wget(addr, (regs.s ? 4 : 0) | 1);
+ return read_dcache030_lget(addr, (regs.s ? 4 : 0) | 1);
}
void write_dcache030_lrmw_mmu(uaecptr addr, uae_u32 val, uae_u32 size)
{
mmu030_cache_state = CACHE_DISABLE_MMU;
- write_dcache030(addr, val, size, (regs.s ? 4 : 0) | 1);
+ if (size == 0)
+ write_dcache030_bput(addr, val, (regs.s ? 4 : 0) | 1);
+ else if (size == 1)
+ write_dcache030_wput(addr, val, (regs.s ? 4 : 0) | 1);
+ else
+ write_dcache030_lput(addr, val, (regs.s ? 4 : 0) | 1);
}
static void do_access_or_bus_error(uaecptr pc, uaecptr pcnow)
uae_u32 tag, addr2;
struct cache040 *c;
int line;
- uae_u8 cm = CACHE_ENABLE_ALL;
- static int lastline;
addr2 = addr & ~15;
lws = (addr >> 2) & 3;
- if (regs.prefetch020addr == addr2)
+ if (regs.prefetch020addr == addr2) {
return regs.prefetch040[lws];
+ }
if (regs.cacr & 0x8000) {
- index = (addr >> 4) & (cachesets04060minus1);
- tag = regs.s | (addr & ~((cachesets04060 << 4) - 1));
+ if (!(ce_cachable[addr >> 16] & CACHE_ENABLE_INS))
+ mmu_cache_state = CACHE_DISABLE_MMU;
+
+ index = (addr >> 4) & cacheisets04060mask;
+ tag = addr & cacheitag04060mask;
c = &icaches040[index];
for (int i = 0; i < CACHELINES040; i++) {
- if (c->valid[lastline] && c->tag[lastline] == tag) {
+ if (c->valid[cache_lastline] && c->tag[cache_lastline] == tag) {
// cache hit
- if (!(cm & CACHE_ENABLE_INS)) {
- c->valid[lastline] = false;
+ if (!(mmu_cache_state & CACHE_ENABLE_INS) || (mmu_cache_state & CACHE_DISABLE_MMU)) {
+ c->valid[cache_lastline] = false;
goto end;
}
- icachelinecnt++;
- x_do_cycles(1 * cpucycleunit);
- return c->data[lastline][lws];
+ if ((lws & 1) != icachehalfline) {
+ icachehalfline ^= 1;
+ icachelinecnt++;
+ }
+ return c->data[cache_lastline][lws];
}
- lastline++;
- lastline &= (CACHELINES040 - 1);
+ cache_lastline++;
+ cache_lastline &= (CACHELINES040 - 1);
}
// cache miss
- cm = mmu_cache_state;
regs.prefetch020addr = 0xffffffff;
regs.prefetch040[0] = icache_fetch(addr2 + 0);
regs.prefetch040[1] = icache_fetch(addr2 + 4);
regs.prefetch040[2] = icache_fetch(addr2 + 8);
regs.prefetch040[3] = icache_fetch(addr2 + 12);
regs.prefetch020addr = addr2;
- if (!(cm & CACHE_ENABLE_INS))
+ if (!(mmu_cache_state & CACHE_ENABLE_INS) || (mmu_cache_state & CACHE_DISABLE_MMU))
+ goto end;
+ if (regs.cacr & 0x00004000) // 68060 NAI
goto end;
-
if (c->valid[0] && c->valid[1] && c->valid[2] && c->valid[3]) {
- line = (icachelinecnt >> 1) & (CACHELINES040 - 1);
+ line = icachelinecnt & (CACHELINES040 - 1);
+ icachehalfline = (lws & 1) ? 0 : 1;
} else {
for (line = 0; line < CACHELINES040; line++) {
if (c->valid[line] == false)
c->data[line][1] = regs.prefetch040[1];
c->data[line][2] = regs.prefetch040[2];
c->data[line][3] = regs.prefetch040[3];
- if (!currprefs.cpu_memory_cycle_exact)
- x_do_cycles(4 * cpucycleunit);
+ if ((lws & 1) != icachehalfline) {
+ icachehalfline ^= 1;
+ icachelinecnt++;
+ }
return c->data[line][lws];
}
regs.prefetch040[1] = icache_fetch(addr2 + 4);
regs.prefetch040[2] = icache_fetch(addr2 + 8);
regs.prefetch040[3] = icache_fetch(addr2 + 12);
- if (!currprefs.cpu_memory_cycle_exact)
- x_do_cycles(4 * cpucycleunit);
return regs.prefetch040[lws];
}
-#if 0
-static bool is_dcache040(uae_u32 addr)
+STATIC_INLINE void do_cycles_c040_mem (int clocks, uae_u32 val)
{
- int index, i, lws;
- uae_u32 tag;
- struct cache040 *c;
+ x_do_cycles_post (clocks * cpucycleunit, val);
+}
- addr &= ~15;
- index = (addr >> 4) & (CACHESETS040 - 1);
- tag = regs.s | (addr & ~((CACHESETS040 << 4) - 1));
- lws = (addr >> 2) & 3;
- c = &dcaches040[index];
- for (i = 0; i < CACHELINES040; i++) {
- if (c->valid[i] && c->tag[i] == tag) {
- return true;
+uae_u32 mem_access_delay_longi_read_c040 (uaecptr addr)
+{
+ uae_u32 v;
+ switch (ce_banktype[addr >> 16])
+ {
+ case CE_MEMBANK_CHIP16:
+ v = wait_cpu_cycle_read_ce020 (addr + 0, 1) << 16;
+ v |= wait_cpu_cycle_read_ce020 (addr + 2, 1) << 0;
+ break;
+ case CE_MEMBANK_CHIP32:
+ if ((addr & 3) != 0) {
+ v = wait_cpu_cycle_read_ce020 (addr + 0, 1) << 16;
+ v |= wait_cpu_cycle_read_ce020 (addr + 2, 1) << 0;
+ } else {
+ v = wait_cpu_cycle_read_ce020 (addr, -1);
+ }
+ break;
+ case CE_MEMBANK_FAST16:
+ v = get_longi (addr);
+ do_cycles_c040_mem(1, v);
+ break;
+ case CE_MEMBANK_FAST32:
+ v = get_longi (addr);
+ break;
+ default:
+ v = get_longi (addr);
+ break;
+ }
+ return v;
+}
+uae_u32 mem_access_delay_long_read_c040 (uaecptr addr)
+{
+ uae_u32 v;
+ switch (ce_banktype[addr >> 16])
+ {
+ case CE_MEMBANK_CHIP16:
+ v = wait_cpu_cycle_read_ce020 (addr + 0, 1) << 16;
+ v |= wait_cpu_cycle_read_ce020 (addr + 2, 1) << 0;
+ break;
+ case CE_MEMBANK_CHIP32:
+ if ((addr & 3) != 0) {
+ v = wait_cpu_cycle_read_ce020 (addr + 0, 1) << 16;
+ v |= wait_cpu_cycle_read_ce020 (addr + 2, 1) << 0;
+ } else {
+ v = wait_cpu_cycle_read_ce020 (addr, -1);
}
+ break;
+ case CE_MEMBANK_FAST16:
+ v = get_long (addr);
+ do_cycles_c040_mem(1, v);
+ break;
+ case CE_MEMBANK_FAST32:
+ v = get_long (addr);
+ break;
+ default:
+ v = get_long (addr);
+ break;
}
- return false;
+ return v;
}
-uae_u32 read_dcache040(uae_u32 addr)
+uae_u32 mem_access_delay_word_read_c040 (uaecptr addr)
{
- int index, i, lws;
- uae_u32 tag;
- struct cache040 *c;
- int line;
+ uae_u32 v;
+ switch (ce_banktype[addr >> 16])
+ {
+ case CE_MEMBANK_CHIP16:
+ case CE_MEMBANK_CHIP32:
+ if ((addr & 3) == 3) {
+ v = wait_cpu_cycle_read_ce020 (addr + 0, 0) << 8;
+ v |= wait_cpu_cycle_read_ce020 (addr + 1, 0) << 0;
+ } else {
+ v = wait_cpu_cycle_read_ce020 (addr, 1);
+ }
+ break;
+ case CE_MEMBANK_FAST16:
+ v = get_word (addr);
+ do_cycles_c040_mem (2, v);
+ break;
+ case CE_MEMBANK_FAST32:
+ v = get_word (addr);
+ break;
+ default:
+ v = get_word (addr);
+ break;
+ }
+ return v;
+}
- addr &= ~15;
- index = (addr >> 4) & (CACHESETS040 - 1);
- tag = regs.s | (addr & ~((CACHESETS040 << 4) - 1));
- lws = (addr >> 2) & 3;
- c = &dcaches040[index];
- for (i = 0; i < CACHELINES040; i++) {
- if (c->valid[i] && c->tag[i] == tag) {
- // cache hit
- dcachelinecnt++;
- return c->data[i][lws];
+uae_u32 mem_access_delay_byte_read_c040 (uaecptr addr)
+{
+ uae_u32 v;
+ switch (ce_banktype[addr >> 16])
+ {
+ case CE_MEMBANK_CHIP16:
+ case CE_MEMBANK_CHIP32:
+ v = wait_cpu_cycle_read_ce020 (addr, 0);
+ break;
+ case CE_MEMBANK_FAST16:
+ v = get_byte (addr);
+ do_cycles_c040_mem (1, v);
+ break;
+ case CE_MEMBANK_FAST32:
+ v = get_byte (addr);
+ break;
+ default:
+ v = get_byte (addr);
+ break;
+ }
+ return v;
+}
+
+void mem_access_delay_byte_write_c040 (uaecptr addr, uae_u32 v)
+{
+ switch (ce_banktype[addr >> 16])
+ {
+ case CE_MEMBANK_CHIP16:
+ case CE_MEMBANK_CHIP32:
+ wait_cpu_cycle_write_ce020 (addr, 0, v);
+ break;
+ case CE_MEMBANK_FAST16:
+ put_byte (addr, v);
+ do_cycles_c040_mem (1, v);
+ break;
+ case CE_MEMBANK_FAST32:
+ put_byte (addr, v);
+ break;
+ default:
+ put_byte (addr, v);
+ break;
+ }
+}
+
+void mem_access_delay_word_write_c040 (uaecptr addr, uae_u32 v)
+{
+ switch (ce_banktype[addr >> 16])
+ {
+ case CE_MEMBANK_CHIP16:
+ case CE_MEMBANK_CHIP32:
+ if ((addr & 3) == 3) {
+ wait_cpu_cycle_write_ce020 (addr + 0, 0, (v >> 8) & 0xff);
+ wait_cpu_cycle_write_ce020 (addr + 1, 0, (v >> 0) & 0xff);
+ } else {
+ wait_cpu_cycle_write_ce020 (addr + 0, 1, v);
+ }
+ break;
+ case CE_MEMBANK_FAST16:
+ put_word (addr, v);
+ if ((addr & 3) == 3)
+ do_cycles_c040_mem(2, v);
+ else
+ do_cycles_c040_mem(1, v);
+ break;
+ case CE_MEMBANK_FAST32:
+ put_word (addr, v);
+ break;
+ default:
+ put_word (addr, v);
+ break;
+ }
+}
+
+void mem_access_delay_long_write_c040 (uaecptr addr, uae_u32 v)
+{
+ switch (ce_banktype[addr >> 16])
+ {
+ case CE_MEMBANK_CHIP16:
+ wait_cpu_cycle_write_ce020 (addr + 0, 1, (v >> 16) & 0xffff);
+ wait_cpu_cycle_write_ce020 (addr + 2, 1, (v >> 0) & 0xffff);
+ break;
+ case CE_MEMBANK_CHIP32:
+ if ((addr & 3) == 3) {
+ wait_cpu_cycle_write_ce020 (addr + 0, 1, (v >> 16) & 0xffff);
+ wait_cpu_cycle_write_ce020 (addr + 2, 1, (v >> 0) & 0xffff);
+ } else {
+ wait_cpu_cycle_write_ce020 (addr + 0, -1, v);
}
+ break;
+ case CE_MEMBANK_FAST16:
+ put_long (addr, v);
+ do_cycles_ce020_mem (2 * CPU020_MEM_CYCLE, v);
+ break;
+ case CE_MEMBANK_FAST32:
+ put_long (addr, v);
+ break;
+ default:
+ put_long (addr, v);
+ break;
}
- // cache miss
- if (c->valid[0] && c->valid[1] && c->valid[2] && c->valid[3]) {
- line = (icachelinecnt >> 1) & (CACHELINES040 - 1);
- for (i = 0; i < 4; i++) {
- if (c->dirty[line][i]) {
- c->dirty[line][i] = false;
- mem_access_delay_long_write_ce020(addr + i * 4, c->data[line][i]);
- }
+}
+
+static uae_u32 dcache040_get_data(uaecptr addr, struct cache040 *c, int line, int size)
+{
+ static const uae_u32 mask[3] = { 0x000000ff, 0x0000ffff, 0xffffffff };
+ int offset = (addr & 15) * 8;
+ int offset32 = offset & 31;
+ int slot = offset / 32;
+ int width = 8 << size;
+ uae_u32 vv;
+
+ if (offset32 + width <= 32) {
+ uae_u32 v = c->data[line][slot];
+ v >>= 32 - (offset32 + width);
+ v &= mask[size];
+ vv = v;
+ } else {
+#if VALIDATE_68040_DATACACHE
+ if (slot >= 3) {
+ write_log(_T("invalid dcache040_get_data!\n"));
+ return 0;
}
+#endif
+ uae_u64 v = c->data[line][slot];
+ v <<= 32;
+ v |= c->data[line][slot + 1];
+ v >>= 64 - (offset32 + width);
+ vv = v & mask[size];
}
- else {
+ return vv;
+}
+
+static void dcache040_update(uaecptr addr, struct cache040 *c, int line, uae_u32 val, int size)
+{
+ static const uae_u64 mask64[3] = { 0xff, 0xffff, 0xffffffff };
+ static const uae_u32 mask32[3] = { 0xff, 0xffff, 0xffffffff };
+ int offset = (addr & 15) * 8;
+ int offset32 = offset & 31;
+ int slot = offset / 32;
+ int width = 8 << size;
+
+#if VALIDATE_68040_DATACACHE > 1
+ validate_dcache040();
+#endif
+
+ if (offset32 + width <= 32) {
+ int shift = 32 - (offset32 + width);
+ uae_u32 v = c->data[line][slot];
+ v &= ~(mask32[size] << shift);
+ v |= val << shift;
+ c->data[line][slot] = v;
+ c->dirty[line][slot] = true;
+ } else {
+#if VALIDATE_68040_DATACACHE
+ if (slot >= 3) {
+ write_log(_T("invalid dcache040_update!\n"));
+ return;
+ }
+#endif
+ int shift = 64 - (offset32 + width);
+ uae_u64 v = c->data[line][slot];
+ v <<= 32;
+ v |= c->data[line][slot + 1];
+ v &= ~(mask64[size] << shift);
+ v |= ((uae_u64)val) << shift;
+ c->data[line][slot] = v >> 32;
+ c->dirty[line][slot] = true;
+ c->data[line][slot + 1] = (uae_u32)v;
+ c->dirty[line][slot + 1] = true;
+ }
+ c->gdirty[line] = true;
+}
+
+static int dcache040_fill_line(int index, uae_u32 tag, uaecptr addr)
+{
+ // cache miss
+ struct cache040 *c = &dcaches040[index];
+ int line;
+ if (c->valid[0] && c->valid[1] && c->valid[2] && c->valid[3]) {
+ // all lines allocated, choose one, push and invalidate.
+ line = dcachelinecnt & (CACHELINES040 - 1);
+ dcachelinecnt++;
+ dcache040_push_line(index, line, false, true);
+ } else {
+ // at least one invalid
for (line = 0; line < CACHELINES040; line++) {
if (c->valid[line] == false)
break;
}
}
c->tag[line] = tag;
+ c->dirty[line][0] = false;
+ c->dirty[line][1] = false;
+ c->dirty[line][2] = false;
+ c->dirty[line][3] = false;
+ c->gdirty[line] = false;
+ c->data[line][0] = dcache_lget(addr + 0);
+ c->data[line][1] = dcache_lget(addr + 4);
+ c->data[line][2] = dcache_lget(addr + 8);
+ c->data[line][3] = dcache_lget(addr + 12);
c->valid[line] = true;
- c->data[line][0] = mem_access_delay_long_read_ce020(addr + 0);
- c->data[line][1] = mem_access_delay_long_read_ce020(addr + 4);
- c->data[line][2] = mem_access_delay_long_read_ce020(addr + 8);
- c->data[line][3] = mem_access_delay_long_read_ce020(addr + 12);
- regs.cacheholdingaddr020 = addr;
+ return line;
}
-void write_dcache040(uae_u32 addr, uae_u32 val)
+static uae_u32 read_dcache040(uae_u32 addr, int size, uae_u32 (*fetch)(uaecptr))
{
- int index, i, lws;
+ int index;
uae_u32 tag;
struct cache040 *c;
int line;
+ uae_u32 addr_o = addr;
+
+ if (!(regs.cacr & 0x80000000))
+ goto nocache;
+
+#if VALIDATE_68040_DATACACHE > 1
+ validate_dcache040();
+#endif
+
+ // Simple because 68040+ caches physical addresses (68030 caches logical addresses)
+ if (!(ce_cachable[addr >> 16] & CACHE_ENABLE_DATA))
+ mmu_cache_state = CACHE_DISABLE_MMU;
addr &= ~15;
- index = (addr >> 4) & (CACHESETS040 - 1);
- tag = regs.s | (addr & ~((CACHESETS040 << 4) - 1));
- lws = (addr >> 2) & 3;
+ index = (addr >> 4) & cachedsets04060mask;
+ tag = addr & cachedtag04060mask;
c = &dcaches040[index];
- for (i = 0; i < CACHELINES040; i++) {
- if (c->valid[i] && c->tag[i] == tag) {
+ for (line = 0; line < CACHELINES040; line++) {
+ if (c->valid[line] && c->tag[line] == tag) {
// cache hit
dcachelinecnt++;
- c->data[i][lws] = val;
- mem_access_delay_long_write_ce020(addr + i * 4, c->data[i][lws]);
- //c->dirty[i][lws] = true;
+ // Cache hit but MMU disabled: do not cache, push and invalidate possible existing line
+ if (mmu_cache_state & CACHE_DISABLE_MMU) {
+ dcache040_push_line(index, line, false, true);
+ goto nocache;
+ }
+ return dcache040_get_data(addr_o, c, line, size);
}
}
-#if 0
- // cache miss
- if (c->valid[0] && c->valid[1] && c->valid[2] && c->valid[3]) {
- line = (icachelinecnt >> 1) & (CACHELINES040 - 1);
- for (i = 0; i < 4; i++) {
- if (c->dirty[line][i]) {
- c->dirty[line][i] = false;
- mem_access_delay_long_write_ce020(addr + i * 4, c->data[line][i]);
+ // Cache miss
+ // 040+ always caches whole line
+ if ((mmu_cache_state & CACHE_DISABLE_MMU) || !(mmu_cache_state & CACHE_ENABLE_DATA) || (mmu_cache_state & CACHE_DISABLE_ALLOCATE) || (regs.cacr & 0x400000000)) {
+nocache:
+ return fetch(addr_o);
+ }
+ // Allocate new cache line, return requested data.
+ line = dcache040_fill_line(index, tag, addr);
+ return dcache040_get_data(addr_o, c, line, size);
+}
+
+static void write_dcache040(uae_u32 addr, uae_u32 val, int size, void (*store)(uaecptr, uae_u32))
+{
+ static const uae_u32 mask[3] = { 0x000000ff, 0x0000ffff, 0xffffffff };
+ int index;
+ uae_u32 tag;
+ struct cache040 *c;
+ int line;
+ uae_u32 addr_o = addr;
+
+ val &= mask[size];
+
+ if (!(regs.cacr & 0x80000000))
+ goto nocache;
+
+ if (!(ce_cachable[addr >> 16] & CACHE_ENABLE_DATA))
+ mmu_cache_state = CACHE_DISABLE_MMU;
+
+ addr &= ~15;
+ index = (addr >> 4) & cachedsets04060mask;
+ tag = addr & cachedtag04060mask;
+ c = &dcaches040[index];
+ for (line = 0; line < CACHELINES040; line++) {
+ if (c->valid[line] && c->tag[line] == tag) {
+ // cache hit
+ dcachelinecnt++;
+ // Cache hit but MMU disabled: do not cache, push and invalidate possible existing line
+ if (mmu_cache_state & CACHE_DISABLE_MMU) {
+ dcache040_push_line(index, line, false, true);
+ goto nocache;
+ }
+ dcache040_update(addr_o, c, line, val, size);
+ // If not copyback mode: push modifications immediately (write-through)
+ if (!(mmu_cache_state & CACHE_ENABLE_COPYBACK) || DISABLE_68040_COPYBACK) {
+ dcache040_push_line(index, line, true, false);
}
+ return;
}
}
- else {
- for (line = 0; line < CACHELINES040; line++) {
- if (c->valid[line] == false)
- break;
- }
+ // Cache miss
+ // 040+ always caches whole line
+ // Writes misses in write-through mode don't allocate new cache lines
+ if (!(mmu_cache_state & CACHE_ENABLE_DATA) || (mmu_cache_state & CACHE_DISABLE_MMU) || (mmu_cache_state & CACHE_DISABLE_ALLOCATE) || !(mmu_cache_state & CACHE_ENABLE_COPYBACK) || (regs.cacr & 0x400000000)) {
+nocache:
+ store(addr_o, val);
+ return;
+ }
+ // Allocate new cache line and update it with new data.
+ line = dcache040_fill_line(index, tag, addr);
+ dcache040_update(addr_o, c, line, val, size);
+ if (DISABLE_68040_COPYBACK) {
+ dcache040_push_line(index, line, true, false);
}
- c->tag[line] = tag;
- c->valid[line] = true;
- c->data[line][0] = mem_access_delay_long_read_ce020(addr + 0);
- c->data[line][1] = mem_access_delay_long_read_ce020(addr + 4);
- c->data[line][2] = mem_access_delay_long_read_ce020(addr + 8);
- c->data[line][3] = mem_access_delay_long_read_ce020(addr + 12);
- c->data[line][lws] = val;
- c->dirty[line][lws] = true;
-#endif
}
-#endif
// really unoptimized
uae_u32 get_word_icache040(uaecptr addr)
return get_word_icache040(m68k_getpci() + o);
}
-STATIC_INLINE bool nocache040(uaecptr addr)
-{
- if (!currprefs.cpu_memory_cycle_exact)
- return false;
- if (!(regs.cacr & 0x80000000))
- return true;
- if (addr >= 0xd80000 && addr < 0xc00000)
- return true;
- if (addr >= 0xe80000 && addr < 0xf00000)
- return true;
- return false;
-}
-
void put_long_cache_040(uaecptr addr, uae_u32 v)
{
-#if 1
- if (nocache040(addr))
- mem_access_delay_long_write_ce020(addr, v);
- else
- put_long(addr, v);
-#else
- if ((addr & 2) == 0) {
- if (is_dcache040(addr))
- write_dcache040(addr, v);
- else if (currprefs.cpu_memory_cycle_exact)
- mem_access_delay_long_write_ce020(addr, v);
- else
- put_long(addr, v);
- } else {
- uae_u32 vp;
- if (is_dcache040(addr)) {
- vp = read_dcache040(addr);
- vp &= 0xffff0000;
- vp |= v >> 16;
- write_dcache040(addr, vp);
- } else if (currprefs.cpu_memory_cycle_exact) {
- mem_access_delay_word_write_ce020(addr + 0, v >> 16);
- } else {
- put_word(addr + 0, v >> 16);
- }
- if (is_dcache040(addr + 4)) {
- vp = read_dcache040(addr + 4);
- vp &= 0x0000ffff;
- vp |= v << 16;
- write_dcache040(addr + 4, vp);
- } else if (currprefs.cpu_memory_cycle_exact) {
- mem_access_delay_word_write_ce020(addr + 2, v);
- } else {
- put_word(addr + 2, v);
- }
+ int offset = addr & 15;
+ // access must not cross cachelines
+ if (offset < 13) {
+ write_dcache040(addr, v, 2, dcache_lput);
+ } else if (offset == 13 || offset == 15) {
+ write_dcache040(addr + 0, v >> 24, 0, dcache_bput);
+ write_dcache040(addr + 1, v >> 8, 1, dcache_wput);
+ write_dcache040(addr + 3, v >> 0, 0, dcache_bput);
+ } else if (offset == 14) {
+ write_dcache040(addr + 0, v >> 16, 1, dcache_wput);
+ write_dcache040(addr + 2, v >> 0, 1, dcache_wput);
}
-#endif
}
void put_word_cache_040(uaecptr addr, uae_u32 v)
{
-#if 1
- if (nocache040(addr))
- mem_access_delay_word_write_ce020(addr, v);
- else
- put_word(addr, v);
-#else
- if (is_dcache040(addr)) {
- uae_u32 vp;
- vp = read_dcache040(addr);
- if (addr & 2) {
- vp &= 0xffff0000;
- vp |= v & 0xffff;
- } else {
- vp &= 0x0000ffff;
- vp |= v << 16;
- }
- write_dcache040(addr, vp);
- } else if (currprefs.cpu_memory_cycle_exact) {
- mem_access_delay_word_write_ce020(addr, v);
+ int offset = addr & 15;
+ if (offset < 15) {
+ write_dcache040(addr, v, 1, dcache_wput);
} else {
- put_word(addr, v);
+ write_dcache040(addr + 0, v >> 8, 0, dcache_bput);
+ write_dcache040(addr + 1, v >> 0, 0, dcache_bput);
}
-#endif
}
void put_byte_cache_040(uaecptr addr, uae_u32 v)
{
-#if 1
- if (nocache040(addr))
- mem_access_delay_byte_write_ce020(addr, v);
- else
- put_byte(addr, v);
-#else
- if (is_dcache040(addr)) {
- uae_u32 vp;
- uae_u32 mask = 0xff000000 >> (addr & 3);
- vp = read_dcache040(addr);
- vp &= ~mask;
- vp |= (v << (3 - (addr & 3))) & mask;
- write_dcache040(addr, vp);
- } else if (currprefs.cpu_memory_cycle_exact) {
- mem_access_delay_byte_write_ce020(addr, v);
- } else {
- put_byte(addr, v);
- }
-#endif
+ return write_dcache040(addr, v, 0, dcache_bput);
}
uae_u32 get_long_cache_040(uaecptr addr)
{
-#if 1
- if (nocache040(addr))
- return mem_access_delay_long_read_ce020(addr);
- else
- return get_long(addr);
-#else
- uae_u32 v1, v2;
- v1 = read_dcache040(addr);
- if ((addr & 2) == 0)
- return v1;
- v2 = read_dcache040(addr + 4);
- return (v2 >> 16) | (v1 << 16);
-#endif
+ uae_u32 v;
+ int offset = addr & 15;
+ if (offset < 13) {
+ v = read_dcache040(addr, 2, dcache_lget);
+ } else if (offset == 13 || offset == 15) {
+ v = read_dcache040(addr + 0, 0, dcache_bget) << 24;
+ v |= read_dcache040(addr + 1, 1, dcache_wget) << 8;
+ v |= read_dcache040(addr + 3, 0, dcache_bget) << 0;
+ } else if (offset == 14) {
+ v = read_dcache040(addr + 0, 1, dcache_wget) << 16;
+ v |= read_dcache040(addr + 2, 1, dcache_wget) << 0;
+ }
+ return v;
}
uae_u32 get_word_cache_040(uaecptr addr)
{
-#if 1
- if (nocache040(addr))
- return mem_access_delay_word_read_ce020(addr);
- else
- return get_word(addr);
-#else
- uae_u32 v = read_dcache040(addr);
- return v >> ((addr & 2) ? 0 : 16);
-#endif
+ uae_u32 v;
+ int offset = addr & 15;
+ if (offset < 15) {
+ v = read_dcache040(addr, 1, dcache_wget);
+ } else {
+ v = read_dcache040(addr + 0, 0, dcache_bget) << 8;
+ v |= read_dcache040(addr + 1, 0, dcache_bget) << 0;
+ }
+ return v;
}
uae_u32 get_byte_cache_040(uaecptr addr)
{
-#if 1
- if (nocache040(addr))
- return mem_access_delay_byte_read_ce020(addr);
- else
- return get_byte(addr);
-#else
- uae_u32 v = read_dcache040(addr);
- return v >> (8 * (3 - (addr & 3)));
-#endif
+ return read_dcache040(addr, 0, dcache_bget);
}
uae_u32 next_iword_cache040(void)
{
return r;
}
-void flush_dcache (uaecptr addr, int size)
-{
- if (!currprefs.cpu_memory_cycle_exact && !currprefs.cpu_compatible)
- return;
- if (currprefs.cpu_model >= 68030) {
- for (int i = 0; i < CACHELINES030; i++) {
- dcaches030[i].valid[0] = 0;
- dcaches030[i].valid[1] = 0;
- dcaches030[i].valid[2] = 0;
- dcaches030[i].valid[3] = 0;
- }
- }
-}
-
void check_t0_trace(void)
{
if (regs.t0 && currprefs.cpu_model >= 68020) {