*tmpp++ = 0;
if (checkequals) {
tmpp2 = _tcschr(p, '=');
- if (!tmpp2)
- return NULL;
- *tmpp2++ = 0;
+ if (tmpp2)
+ *tmpp2++ = 0;
}
if (!strcasecmp(p, option)) {
- if (checkequals)
+ if (checkequals && tmpp2)
return tmpp2;
return p;
}
_tcscat(tmp, extras);
_tcscat(tmp3, extras);
}
+ if (ci->unit_special_flags) {
+ TCHAR tmpx[32];
+ _stprintf(tmpx, _T(",flags=0x%x"), ci->unit_special_flags);
+ _tcscat(tmp, tmpx);
+ _tcscat(tmp3, tmpx);
+ }
if (ci->type == UAEDEV_HDF)
cfgfile_write_str (f, _T("hardfile2"), tmp);
#if 0
}
cfgfile_write_bool (f, _T("cpu_cycle_exact"), p->cpu_cycle_exact);
+ // must be after cpu_cycle_exact
+ cfgfile_write_bool (f, _T("cpu_memory_cycle_exact"), p->cpu_memory_cycle_exact);
cfgfile_write_bool (f, _T("blitter_cycle_exact"), p->blitter_cycle_exact);
cfgfile_write_bool (f, _T("cycle_exact"), p->cpu_cycle_exact && p->blitter_cycle_exact ? 1 : 0);
cfgfile_dwrite_bool (f, _T("fpu_no_unimplemented"), p->fpu_no_unimplemented);
else if (cfgfile_option_find(tmpp2, _T("HD")))
uci.controller_media_type = 0;
+ TCHAR *pflags;
+ if ((pflags = cfgfile_option_get(tmpp2, _T("flags")))) {
+ getintval(&pflags, &uci.unit_special_flags, 0);
+ }
+
if (cfgfile_option_find(tmpp2, _T("SCSI2")))
uci.unit_feature_level = HD_LEVEL_SCSI_2;
else if (cfgfile_option_find(tmpp2, _T("SCSI1")))
bool tmpbool, dummybool;
TCHAR tmpbuf[CONFIG_BLEN];
- if (cfgfile_yesno (option, value, _T("cpu_cycle_exact"), &p->cpu_cycle_exact)
- || cfgfile_yesno (option, value, _T("blitter_cycle_exact"), &p->blitter_cycle_exact)) {
- if (p->cpu_model >= 68020 && p->cachesize > 0)
- p->cpu_cycle_exact = p->blitter_cycle_exact = 0;
- /* we don't want cycle-exact in 68020/40+JIT modes */
- return 1;
+ if (cfgfile_yesno (option, value, _T("cpu_cycle_exact"), &p->cpu_cycle_exact)) {
+ /* we don't want cycle-exact in 68020/40+JIT modes */
+ if (p->cpu_model >= 68020 && p->cachesize > 0)
+ p->cpu_cycle_exact = p->cpu_memory_cycle_exact = p->blitter_cycle_exact = 0;
+ p->cpu_memory_cycle_exact = p->cpu_cycle_exact;
+ return 1;
+ }
+ if (cfgfile_yesno (option, value, _T("blitter_cycle_exact"), &p->blitter_cycle_exact)) {
+ if (p->cpu_model >= 68020 && p->cachesize > 0)
+ p->cpu_cycle_exact = p->cpu_memory_cycle_exact = p->blitter_cycle_exact = 0;
+ return 1;
+ }
+ if (cfgfile_yesno (option, value, _T("cpu_memory_cycle_exact"), &p->cpu_memory_cycle_exact)) {
+ if (!p->cpu_memory_cycle_exact)
+ p->cpu_cycle_exact = false;
+ return 1;
}
if (cfgfile_yesno (option, value, _T("cycle_exact"), &tmpbool)) {
- p->cpu_cycle_exact = p->blitter_cycle_exact = tmpbool;
+ p->cpu_cycle_exact = p->cpu_memory_cycle_exact = p->blitter_cycle_exact = tmpbool;
if (p->cpu_model >= 68020 && p->cachesize > 0)
- p->cpu_cycle_exact = p->blitter_cycle_exact = false;
+ p->cpu_cycle_exact = p->cpu_memory_cycle_exact = p->blitter_cycle_exact = false;
// if old version and CE and fastest possible: set to approximate
if (p->cpu_cycle_exact && p->config_version < ((2 << 16) | (8 << 8) | (2 << 0)) && p->m68k_speed < 0)
p->m68k_speed = 0;
p->cpu_compatible = 1;
p->address_space_24 = 1;
p->cpu_cycle_exact = 0;
+ p->cpu_memory_cycle_exact = 0;
p->blitter_cycle_exact = 0;
p->chipset_mask = CSMASK_ECS_AGNUS;
p->genlock = 0;
p->cpu_compatible = 1;
p->address_space_24 = 1;
p->cpu_cycle_exact = 0;
+ p->cpu_memory_cycle_exact = 0;
p->blitter_cycle_exact = 0;
p->chipset_mask = CSMASK_ECS_AGNUS;
p->immediate_blits = 0;
p->m68k_speed = 0;
if (p->cpu_model == 68020 && p->cachesize == 0) {
p->cpu_cycle_exact = 1;
+ p->cpu_memory_cycle_exact = 1;
p->cpu_clock_multiplier = 4 << 8;
}
break;
switch (compa)
{
case 0:
- p->cpu_cycle_exact = p->blitter_cycle_exact = 1;
+ p->cpu_cycle_exact = p->cpu_memory_cycle_exact = p->blitter_cycle_exact = 1;
break;
case 1:
break;
v = bip_super (p, config, compa, romcheck);
break;
}
- if ((p->cpu_model >= 68020 || !p->cpu_cycle_exact) && !p->immediate_blits)
+ if ((p->cpu_model >= 68020 || !p->cpu_cycle_exact || !p->cpu_memory_cycle_exact) && !p->immediate_blits)
p->waiting_blits = 1;
if (p->sound_filter_type == FILTER_SOUND_TYPE_A500 && (p->chipset_mask & CSMASK_AGA))
p->sound_filter_type = FILTER_SOUND_TYPE_A1200;
x_do_cycles = do_cycles_ce;
x_do_cycles_pre = do_cycles_ce;
x_do_cycles_post = do_cycles_ce_post;
+ } else if (currprefs.cpu_memory_cycle_exact) {
+ // cpu_memory_cycle_exact + cpu_compatible
+ x_prefetch = get_word_prefetch;
+ x_get_ilong = NULL;
+ x_get_iword = get_iiword;
+ x_get_ibyte = get_iibyte;
+ x_next_iword = NULL;
+ x_next_ilong = NULL;
+ x_put_long = put_long_ce000;
+ x_put_word = put_word_ce000;
+ x_put_byte = put_byte_ce000;
+ x_get_long = get_long_ce000;
+ x_get_word = get_word_ce000;
+ x_get_byte = get_byte_ce000;
+ x_do_cycles = do_cycles;
+ x_do_cycles_pre = do_cycles;
+ x_do_cycles_post = do_cycles_post;
} else if (currprefs.cpu_compatible) {
+ // cpu_compatible only
x_prefetch = get_word_prefetch;
x_get_ilong = NULL;
x_get_iword = get_iiword;
}
} else if (!currprefs.cpu_cycle_exact) {
// 68020+ no ce
- if (currprefs.cpu_compatible) {
+ if (currprefs.cpu_memory_cycle_exact) {
+ // cpu_memory_cycle_exact + cpu_compatible
+ if (currprefs.cpu_model == 68020 && !currprefs.cachesize) {
+ x_prefetch = get_word_020_prefetch;
+ x_get_ilong = get_long_020_prefetch;
+ x_get_iword = get_word_020_prefetch;
+ x_get_ibyte = NULL;
+ x_next_iword = next_iword_020_prefetch;
+ x_next_ilong = next_ilong_020_prefetch;
+ x_put_long = put_long_ce020;
+ x_put_word = put_word_ce020;
+ x_put_byte = put_byte_ce020;
+ x_get_long = get_long_ce020;
+ x_get_word = get_word_ce020;
+ x_get_byte = get_byte_ce020;
+ x_do_cycles = do_cycles;
+ x_do_cycles_pre = do_cycles;
+ x_do_cycles_post = do_cycles_post;
+ } else if (currprefs.cpu_model == 68030 && !currprefs.cachesize) {
+ x_prefetch = get_word_030_prefetch;
+ x_get_ilong = get_long_030_prefetch;
+ x_get_iword = get_word_030_prefetch;
+ x_get_ibyte = NULL;
+ x_next_iword = next_iword_030_prefetch;
+ x_next_ilong = next_ilong_030_prefetch;
+ x_put_long = put_long_ce030;
+ x_put_word = put_word_ce030;
+ x_put_byte = put_byte_ce030;
+ x_get_long = get_long_ce030;
+ x_get_word = get_word_ce030;
+ x_get_byte = get_byte_ce030;
+ x_do_cycles = do_cycles;
+ x_do_cycles_pre = do_cycles;
+ x_do_cycles_post = do_cycles_post;
+ } else if (currprefs.cpu_model < 68040) {
+ // JIT or 68030+ does not have real prefetch only emulation
+ x_prefetch = NULL;
+ set_x_ifetches();
+ x_put_long = put_long;
+ x_put_word = put_word;
+ x_put_byte = put_byte;
+ x_get_long = get_long;
+ x_get_word = get_word;
+ x_get_byte = get_byte;
+ x_do_cycles = do_cycles;
+ x_do_cycles_pre = do_cycles;
+ x_do_cycles_post = do_cycles_post;
+ } else {
+ // 68040+ (same as below)
+ x_prefetch = NULL;
+ x_get_ilong = get_ilong_cache_040;
+ x_get_iword = get_iword_cache_040;
+ x_get_ibyte = NULL;
+ x_next_iword = next_iword_cache040;
+ x_next_ilong = next_ilong_cache040;
+ x_put_long = put_long_cache_040;
+ x_put_word = put_word_cache_040;
+ x_put_byte = put_byte_cache_040;
+ x_get_long = get_long_cache_040;
+ x_get_word = get_word_cache_040;
+ x_get_byte = get_byte_cache_040;
+ x_do_cycles = do_cycles;
+ x_do_cycles_pre = do_cycles;
+ x_do_cycles_post = do_cycles_post;
+ }
+ } else if (currprefs.cpu_compatible) {
+ // cpu_compatible only
if (currprefs.cpu_model == 68020 && !currprefs.cachesize) {
x_prefetch = get_word_prefetch;
x_get_ilong = get_long_020_prefetch;
bool can_cpu_tracer (void)
{
- return (currprefs.cpu_model == 68000 || currprefs.cpu_model == 68020) && currprefs.cpu_cycle_exact;
+ return (currprefs.cpu_model == 68000 || currprefs.cpu_model == 68020) && currprefs.cpu_memory_cycle_exact;
}
bool is_cpu_tracer (void)
void flush_cpu_caches(bool force)
{
- bool doflush = currprefs.cpu_compatible || currprefs.cpu_cycle_exact;
+ bool doflush = currprefs.cpu_compatible || currprefs.cpu_memory_cycle_exact;
if (currprefs.cpu_model == 68020) {
if (regs.cacr & 0x08) { // clear instr cache
}
write_log (_T("Building CPU, %d opcodes (%d %d %d)\n"),
opcnt, lvl,
- currprefs.cpu_cycle_exact ? -1 : currprefs.cpu_compatible ? 1 : 0, currprefs.address_space_24);
+ currprefs.cpu_cycle_exact ? -2 : currprefs.cpu_memory_cycle_exact ? -1 : currprefs.cpu_compatible ? 1 : 0, currprefs.address_space_24);
#ifdef JIT
write_log(_T("JIT: &countdown = %p\n"), &countdown);
write_log(_T("JIT: &build_comp = %p\n"), &build_comp);
write_log(_T(" prefetch and cycle-exact"));
else
write_log(_T(" ~cycle-exact"));
+ } else if (currprefs.cpu_memory_cycle_exact) {
+ write_log(_T(" ~memory-cycle-exact"));
} else if (currprefs.cpu_compatible) {
if (currprefs.cpu_model <= 68020) {
write_log(_T(" prefetch"));
currprefs.mmu_model = changed_prefs.mmu_model;
currprefs.cpu_compatible = changed_prefs.cpu_compatible;
currprefs.cpu_cycle_exact = changed_prefs.cpu_cycle_exact;
+ currprefs.cpu_memory_cycle_exact = changed_prefs.cpu_memory_cycle_exact;
currprefs.int_no_unimplemented = changed_prefs.int_no_unimplemented;
currprefs.fpu_no_unimplemented = changed_prefs.fpu_no_unimplemented;
currprefs.blitter_cycle_exact = changed_prefs.blitter_cycle_exact;
|| currprefs.int_no_unimplemented != changed_prefs.int_no_unimplemented
|| currprefs.fpu_no_unimplemented != changed_prefs.fpu_no_unimplemented
|| currprefs.cpu_compatible != changed_prefs.cpu_compatible
- || currprefs.cpu_cycle_exact != changed_prefs.cpu_cycle_exact) {
+ || currprefs.cpu_cycle_exact != changed_prefs.cpu_cycle_exact
+ || currprefs.cpu_memory_cycle_exact != changed_prefs.cpu_memory_cycle_exact) {
cpu_prefs_changed_flag |= 1;
}
if (changed
_T("FP5"),
_T("FP6"),
_T("FP7"),
- _T("FPCR"),
+ _T("FPIAR"),
_T("FPSR"),
- _T("FPIAR")
+ _T("FPCR")
};
static void addmovemreg (TCHAR *out, int *prevreg, int *lastreg, int *first, int reg, int fpmode)
currprefs.address_space_24 = changed_prefs.address_space_24;
currprefs.cpu_compatible = changed_prefs.cpu_compatible;
currprefs.cpu_cycle_exact = changed_prefs.cpu_cycle_exact;
+ currprefs.cpu_memory_cycle_exact = changed_prefs.cpu_memory_cycle_exact;
currprefs.blitter_cycle_exact = changed_prefs.blitter_cycle_exact;
currprefs.cpu_frequency = changed_prefs.cpu_frequency = 0;
currprefs.cpu_clock_multiplier = changed_prefs.cpu_clock_multiplier = 0;
uae_u32 flags = restore_u32 ();
currprefs.cpu_cycle_exact = changed_prefs.cpu_cycle_exact = (flags & 1) ? true : false;
+ currprefs.cpu_memory_cycle_exact = changed_prefs.cpu_memory_cycle_exact = currprefs.cpu_cycle_exact;
+ if ((flags & 32) && !(flags & 1))
+ currprefs.cpu_memory_cycle_exact = changed_prefs.cpu_memory_cycle_exact = true;
currprefs.blitter_cycle_exact = changed_prefs.blitter_cycle_exact = currprefs.cpu_cycle_exact;
currprefs.cpu_compatible = changed_prefs.cpu_compatible = (flags & 2) ? true : false;
currprefs.cpu_frequency = changed_prefs.cpu_frequency = restore_u32 ();
flags |= currprefs.m68k_speed < 0 ? 4 : 0;
flags |= currprefs.cachesize > 0 ? 8 : 0;
flags |= currprefs.m68k_speed > 0 ? 16 : 0;
+ flags |= currprefs.cpu_memory_cycle_exact ? 32 : 0;
if (currprefs.m68k_speed > 0)
flags |= (currprefs.m68k_speed / CYCLE_UNIT) << 24;
save_u32 (flags);
m68k_reset_delay = currprefs.reset_delay;
set_special(SPCFLAG_CHECK);
send_internalevent(INTERNALEVENT_CPURESET);
- if ((currprefs.cpu_compatible || currprefs.cpu_cycle_exact) && currprefs.cpu_model <= 68020) {
+ if ((currprefs.cpu_compatible || currprefs.cpu_memory_cycle_exact) && currprefs.cpu_model <= 68020) {
custom_reset (false, false);
return;
}
unset_special (SPCFLAG_STOP);
}
+
+uae_u32 mem_access_delay_word_read (uaecptr addr)
+{
+ uae_u32 v;
+ switch (ce_banktype[addr >> 16])
+ {
+ case CE_MEMBANK_CHIP16:
+ case CE_MEMBANK_CHIP32:
+ v = wait_cpu_cycle_read (addr, 1);
+ break;
+ case CE_MEMBANK_FAST16:
+ case CE_MEMBANK_FAST32:
+ v = get_word (addr);
+ x_do_cycles_post (4 * cpucycleunit, v);
+ break;
+ default:
+ v = get_word (addr);
+ break;
+ }
+ regs.db = v;
+ return v;
+}
+uae_u32 mem_access_delay_wordi_read (uaecptr addr)
+{
+ uae_u32 v;
+ switch (ce_banktype[addr >> 16])
+ {
+ case CE_MEMBANK_CHIP16:
+ case CE_MEMBANK_CHIP32:
+ v = wait_cpu_cycle_read (addr, 1);
+ break;
+ case CE_MEMBANK_FAST16:
+ case CE_MEMBANK_FAST32:
+ v = get_wordi (addr);
+ x_do_cycles_post (4 * cpucycleunit, v);
+ break;
+ default:
+ v = get_wordi (addr);
+ break;
+ }
+ regs.db = v;
+ return v;
+}
+
+uae_u32 mem_access_delay_byte_read (uaecptr addr)
+{
+ uae_u32 v;
+ switch (ce_banktype[addr >> 16])
+ {
+ case CE_MEMBANK_CHIP16:
+ case CE_MEMBANK_CHIP32:
+ v = wait_cpu_cycle_read (addr, 0);
+ break;
+ case CE_MEMBANK_FAST16:
+ case CE_MEMBANK_FAST32:
+ v = get_byte (addr);
+ x_do_cycles_post (4 * cpucycleunit, v);
+ break;
+ default:
+ v = get_byte (addr);
+ break;
+ }
+ regs.db = (v << 8) | v;
+ return v;
+}
+void mem_access_delay_byte_write (uaecptr addr, uae_u32 v)
+{
+ regs.db = (v << 8) | v;
+ switch (ce_banktype[addr >> 16])
+ {
+ case CE_MEMBANK_CHIP16:
+ case CE_MEMBANK_CHIP32:
+ wait_cpu_cycle_write (addr, 0, v);
+ return;
+ case CE_MEMBANK_FAST16:
+ case CE_MEMBANK_FAST32:
+ put_byte (addr, v);
+ x_do_cycles_post (4 * cpucycleunit, v);
+ return;
+ }
+ put_byte (addr, v);
+}
+void mem_access_delay_word_write (uaecptr addr, uae_u32 v)
+{
+ regs.db = v;
+ switch (ce_banktype[addr >> 16])
+ {
+ case CE_MEMBANK_CHIP16:
+ case CE_MEMBANK_CHIP32:
+ wait_cpu_cycle_write (addr, 1, v);
+ return;
+ case CE_MEMBANK_FAST16:
+ case CE_MEMBANK_FAST32:
+ put_word (addr, v);
+ x_do_cycles_post (4 * cpucycleunit, v);
+ return;
+ }
+ put_word (addr, v);
+}
+
+
// this one is really simple and easy
static void fill_icache020 (uae_u32 addr, uae_u32 (*fetch)(uaecptr))
{
#endif
regs.prefetch020[0] = regs.prefetch020[1];
if (!MORE_ACCURATE_68020_PIPELINE || regs.pipeline_stop >= 0) {
- fill_icache020 (pc + 2 + 4, get_longi);
+ fill_icache020 (pc + 2 + 4, currprefs.cpu_memory_cycle_exact ? mem_access_delay_longi_read_ce020 : get_longi);
regs.prefetch020[1] = regs.cacheholdingdata020;
}
regs.db = regs.prefetch020[0] >> 16;
// add as available "free" internal CPU time.
cycs = get_cycles () - cycs;
regs.ce020memcycles += cycs;
+ } else if (currprefs.cpu_memory_cycle_exact) {
+ data = mem_access_delay_longi_read_ce020 (addr);
} else {
data = get_longi (addr);
}
void write_dcache030(uaecptr addr, uae_u32 v, int size)
{
write_dcache030x(addr, v, size);
- if (currprefs.cpu_cycle_exact) {
+ if (currprefs.cpu_memory_cycle_exact) {
if (size == 2)
mem_access_delay_long_write_ce020(addr, v);
else if (size == 1)
uae_u32 v1, v2;
if (!(regs.cacr & 0x100) || !cancache030 (addr)) { // data cache disabled?
- if (currprefs.cpu_cycle_exact) {
+ if (currprefs.cpu_memory_cycle_exact) {
if (size == 2)
return mem_access_delay_long_read_ce020 (addr);
else if (size == 1)
c1 = getcache030 (dcaches030, addr, &tag1, &lws1);
addr &= ~3;
if (!c1->valid[lws1] || c1->tag != tag1) {
- v1 = currprefs.cpu_cycle_exact ? mem_access_delay_long_read_ce020 (addr) : get_long (addr);
+ v1 = currprefs.cpu_memory_cycle_exact ? mem_access_delay_long_read_ce020 (addr) : get_long (addr);
update_cache030 (c1, v1, tag1, lws1);
} else {
v1 = c1->data[lws1];
addr += 4;
c2 = getcache030 (dcaches030, addr, &tag2, &lws2);
if (!c2->valid[lws2] || c2->tag != tag2) {
- v2 = currprefs.cpu_cycle_exact ? mem_access_delay_long_read_ce020 (addr) : get_long (addr);
+ v2 = currprefs.cpu_memory_cycle_exact ? mem_access_delay_long_read_ce020 (addr) : get_long (addr);
update_cache030 (c2, v2, tag2, lws2);
} else {
v2 = c2->data[lws2];
if (regs.prefetch020addr == addr2)
return regs.prefetch020[lws];
regs.prefetch020addr = addr2;
- if (currprefs.cpu_cycle_exact) {
+ if (currprefs.cpu_memory_cycle_exact) {
regs.prefetch020[0] = mem_access_delay_longi_read_ce020(addr2 + 0);
regs.prefetch020[1] = mem_access_delay_longi_read_ce020(addr2 + 4);
regs.prefetch020[2] = mem_access_delay_longi_read_ce020(addr2 + 8);
}
c->tag[line] = tag;
c->valid[line] = true;
- if (currprefs.cpu_cycle_exact) {
+ if (currprefs.cpu_memory_cycle_exact) {
c->data[line][0] = mem_access_delay_longi_read_ce020(addr + 0);
c->data[line][1] = mem_access_delay_longi_read_ce020(addr + 4);
c->data[line][2] = mem_access_delay_longi_read_ce020(addr + 8);
STATIC_INLINE bool nocache040(uaecptr addr)
{
- if (!currprefs.cpu_cycle_exact)
+ if (!currprefs.cpu_memory_cycle_exact)
return false;
if (!(regs.cacr & 0x80000000))
return true;
if ((addr & 2) == 0) {
if (is_dcache040(addr))
write_dcache040(addr, v);
- else if (currprefs.cpu_cycle_exact)
+ else if (currprefs.cpu_memory_cycle_exact)
mem_access_delay_long_write_ce020(addr, v);
else
put_long(addr, v);
vp &= 0xffff0000;
vp |= v >> 16;
write_dcache040(addr, vp);
- } else if (currprefs.cpu_cycle_exact) {
+ } else if (currprefs.cpu_memory_cycle_exact) {
mem_access_delay_word_write_ce020(addr + 0, v >> 16);
} else {
put_word(addr + 0, v >> 16);
vp &= 0x0000ffff;
vp |= v << 16;
write_dcache040(addr + 4, vp);
- } else if (currprefs.cpu_cycle_exact) {
+ } else if (currprefs.cpu_memory_cycle_exact) {
mem_access_delay_word_write_ce020(addr + 2, v);
} else {
put_word(addr + 2, v);
vp |= v << 16;
}
write_dcache040(addr, vp);
- } else if (currprefs.cpu_cycle_exact) {
+ } else if (currprefs.cpu_memory_cycle_exact) {
mem_access_delay_word_write_ce020(addr, v);
} else {
put_word(addr, v);
vp &= ~mask;
vp |= (v << (3 - (addr & 3))) & mask;
write_dcache040(addr, vp);
- } else if (currprefs.cpu_cycle_exact) {
+ } else if (currprefs.cpu_memory_cycle_exact) {
mem_access_delay_byte_write_ce020(addr, v);
} else {
put_byte(addr, v);
void flush_dcache (uaecptr addr, int size)
{
- if (!currprefs.cpu_cycle_exact && !currprefs.cpu_compatible)
+ if (!currprefs.cpu_memory_cycle_exact && !currprefs.cpu_compatible)
return;
if (currprefs.cpu_model >= 68030) {
for (int i = 0; i < CACHELINES030; i++) {
uaecptr pc = m68k_getpc ();
uaecptr pc2 = pc;
pc &= ~3;
- uae_u32 (*fetch)(uaecptr) = currprefs.cpu_cycle_exact ? mem_access_delay_longi_read_ce020 : get_longi;
+ uae_u32 (*fetch)(uaecptr) = currprefs.cpu_memory_cycle_exact ? mem_access_delay_longi_read_ce020 : get_longi;
regs.pipeline_pos = 0;
regs.pipeline_stop = 0;
regs.pipeline_r8[0] = regs.pipeline_r8[1] = -1;
if (!currprefs.cpu_compatible)
return;
if (currprefs.cpu_model >= 68040) {
- if (currprefs.cpu_compatible || currprefs.cpu_cycle_exact) {
+ if (currprefs.cpu_compatible || currprefs.cpu_memory_cycle_exact) {
fill_icache040(m68k_getpc() + 16);
fill_icache040(m68k_getpc());
}