/* Events */
uae_u32 vsync_cycles;
-static int extra_cycle;
+static uae_u32 extra_cycle;
static int rpt_did_reset;
struct ev eventtab[ev_max];
restore_u32 ();
start_cycles = restore_u64 ();
extra_cycle = restore_u32 ();
- if (extra_cycle < 0 || extra_cycle >= 2 * CYCLE_UNIT)
+ if (extra_cycle >= 2 * CYCLE_UNIT)
extra_cycle = 0;
write_log (_T("RESTORECYCLES %08lX\n"), start_cycles);
return src;
void do_cycles_ce020(uae_u32 cycles)
{
uae_u32 c;
- int extra;
+ uae_u32 extra;
if (!cycles) {
return;
do_cycles(1 * CYCLE_UNIT);
c -= CYCLE_UNIT;
}
- if (c > 0) {
+ if (c) {
do_cycles(c);
}
}
return true;
if (!cputrace.readcounter && !cputrace.writecounter && !cputrace.cyclecounter) {
if (cpu_tracer != -2) {
- write_log (_T("CPU trace: dma_cycle() enabled. %08x %08x NOW=%08lx\n"),
+ write_log (_T("CPU trace: dma_cycle() enabled. %08x %08x NOW=%08x\n"),
cputrace.cyclecounter_pre, cputrace.cyclecounter_post, get_cycles ());
cpu_tracer = -2; // dma_cycle() allowed to work now
}
struct cputracememory *ctm = &cputrace.ctm[i];
if (ctm->addr == addr && ctm->mode == mode) {
ctm->mode = 0;
- write_log (_T("CPU trace: GET %d: PC=%08x %08x=%08x %d %d %08x/%08x/%08x %d/%d (%08lx)\n"),
+ write_log (_T("CPU trace: GET %d: PC=%08x %08x=%08x %d %d %08x/%08x/%08x %d/%d (%08x)\n"),
i, cputrace.pc, addr, ctm->data, accessmode, size,
cputrace.cyclecounter, cputrace.cyclecounter_pre, cputrace.cyclecounter_post,
cputrace.readcounter, cputrace.writecounter, get_cycles ());