static int blitter_vcounter;
#endif
-static long blit_firstline_cycles;
-static long blit_first_cycle;
+static evt_t blit_firstline_cycles;
+static evt_t blit_first_cycle;
static int blit_last_cycle, blit_dmacount, blit_cyclecount;
static int blit_linecycles, blit_extracycles;
static int blit_faulty;
if (log_blitter & 1) {
write_log(_T("cycles %d, missed %d, total %d\n"),
blit_totalcyclecounter, blit_misscyclecounter, blit_totalcyclecounter + blit_misscyclecounter);
+
}
blt_info.blitter_dangerous_bpl = 0;
}
goto end;
}
- if (hack == 1 && (int)get_cycles() - (int)blit_firstline_cycles < 0)
+ if (hack == 1 && get_cycles() < blit_firstline_cycles)
goto end;
blitter_handler(0);
}
if (blit_last_cycle >= blit_cyclecount && blit_dmacount == blit_cyclecount)
return 0;
- cycles = (get_cycles() - blit_first_cycle) / CYCLE_UNIT;
+ cycles = int((get_cycles() - blit_first_cycle) / CYCLE_UNIT);
ccnt = 0;
while (blit_last_cycle + blit_cyclecount < cycles) {
ccnt += blit_dmacount;
restore_u8();
restore_u8();
restore_u8();
+ restore_u8();
if (restore_u16() != 0x1234) {
write_log(_T("blitter state restore error\n"));
blitter_dump();
}
- save_u32(blit_first_cycle);
+ save_u32((uae_u32)blit_first_cycle);
save_u32(blit_last_cycle);
save_u32(blit_waitcyclecounter);
save_u32(0); //(blit_startcycles);
save_u32(blit_maxcyclecounter);
- save_u32(blit_firstline_cycles);
+ save_u32((uae_u32)blit_firstline_cycles);
save_u32(blit_cyclecounter);
save_u32(blit_slowdown);
save_u32(blit_misscyclecounter);
save_u8(blt_info.blit_finald);
save_u8(blit_ovf);
+ save_u32(blit_first_cycle >> 32);
+ save_u32(blit_firstline_cycles >> 32);
+
*len = dst - dstbak;
return dstbak;
}
/* Events */
-uae_u32 vsync_cycles;
-static uae_u32 extra_cycle;
+evt_t vsync_cycles;
+static int extra_cycle;
static int rpt_did_reset;
struct ev eventtab[ev_max];
static bool bplcon0_interlace_seen;
static int scandoubled_line;
static bool vsync_rendered, frame_rendered, frame_shown;
-static int vsynctimeperline;
-static int frameskiptime;
+static frame_time_t vsynctimeperline;
+static frame_time_t frameskiptime;
static bool genlockhtoggle;
static bool genlockvtoggle;
static bool graphicsbuffer_retry;
static struct chipset_refresh *stored_chipset_refresh;
int doublescan;
int programmedmode;
-int syncbase;
+frame_time_t syncbase;
static int fmode_saved, fmode;
uae_u16 beamcon0, new_beamcon0;
uae_u16 bemcon0_hsync_mask, bemcon0_vsync_mask;
/*
* Statistics
*/
-unsigned long int frametime = 0, lastframetime = 0, timeframes = 0;
-unsigned long hsync_counter = 0, vsync_counter = 0;
-unsigned long int idletime;
+uae_u32 timeframes;
+evt_t frametime;
+frame_time_t lastframetime;
+uae_u32 hsync_counter, vsync_counter;
+frame_time_t idletime;
int bogusframe;
/* Recording of custom chip register changes. */
}
}
-static int irq_forced, irq_delay;
+static int irq_forced;
+static evt_t irq_delay;
void IRQ_forced(int lvl, int delay)
{
irq_forced = lvl;
- irq_delay = -1;
+ irq_delay = 0;
if (delay > 0 && currprefs.cpu_compatible) {
- irq_delay = get_cycles() + irq_delay * CYCLE_UNIT;
+ irq_delay = get_cycles() + delay * CYCLE_UNIT;
}
doint();
}
{
if (irq_forced) {
int lvl = irq_forced;
- if (irq_delay == -1 || ((int)get_cycles()) - irq_delay > 0) {
+ if (irq_delay == -1 || get_cycles() > irq_delay) {
irq_forced = 0;
irq_delay = -1;
}
next_sprite_forced = 1;
}
-static int rpt_vsync(int adjust)
+static frame_time_t rpt_vsync(int adjust)
{
frame_time_t curr_time = read_processor_time();
- int v = curr_time - vsyncwaittime + adjust;
+ frame_time_t v = curr_time - vsyncwaittime + adjust;
if (v > syncbase || v < -syncbase) {
vsyncmintime = vsyncmaxtime = vsyncwaittime = curr_time;
v = 0;
#define MAVG_MAX_SIZE 128
struct mavg_data
{
- int values[MAVG_MAX_SIZE];
+ frame_time_t values[MAVG_MAX_SIZE];
int size;
int offset;
- int mavg;
+ frame_time_t mavg;
};
static void mavg_clear (struct mavg_data *md)
md->mavg = 0;
}
-static int mavg(struct mavg_data *md, int newval, int size)
+static frame_time_t mavg(struct mavg_data *md, frame_time_t newval, int size)
{
if (md->size < size) {
md->values[md->size++] = newval;
events_reset_syncline();
static struct mavg_data ma_frameskipt;
- int frameskipt_avg = mavg(&ma_frameskipt, frameskiptime, MAVG_VSYNC_SIZE);
+ frame_time_t frameskipt_avg = mavg(&ma_frameskipt, frameskiptime, MAVG_VSYNC_SIZE);
frameskiptime = 0;
static struct mavg_data ma_legacy;
static frame_time_t vsync_time;
- int t;
+ frame_time_t t;
curr_time = read_processor_time();
vsyncwaittime = vsyncmaxtime = curr_time + vsynctimebase;
maybe_process_pull_audio();
- int legacy_avg = mavg(&ma_legacy, t, MAVG_VSYNC_SIZE);
+ frame_time_t legacy_avg = mavg(&ma_legacy, t, MAVG_VSYNC_SIZE);
if (t > legacy_avg) {
legacy_avg = t;
}
status = 1;
int clockadjust = 0;
- int vstb = vsynctimebase;
+ frame_time_t vstb = vsynctimebase;
if (currprefs.m68k_speed < 0 && !cpu_sleepmode && !currprefs.cpu_memory_cycle_exact) {
// this delay can safely overshoot frame time by 1-2 ms, following code will compensate for it.
for (;;) {
curr_time = read_processor_time();
- if ((int)vsyncwaittime - (int)curr_time <= 0 || (int)vsyncwaittime - (int)curr_time > 2 * vsynctimebase) {
+ if (vsyncwaittime - curr_time <= 0 || vsyncwaittime - curr_time > 2 * vsynctimebase) {
break;
}
rtg_vsynccheck ();
}
int max;
- int adjust = 0;
- if ((int)curr_time - (int)vsyncwaittime > 0 && (int)curr_time - (int)vsyncwaittime < vstb / 2) {
+ frame_time_t adjust = 0;
+ if (curr_time - vsyncwaittime > 0 && curr_time - vsyncwaittime < vstb / 2) {
adjust += curr_time - vsyncwaittime;
}
adjust += clockadjust;
} else {
- int t = 0;
+ frame_time_t t = 0;
start = read_processor_time();
if (!frame_rendered && !ad->picasso_on) {
frame_time_t rpt;
for (;;) {
rpt = read_processor_time();
- if ((int)rpt - (int)(vsyncmintime - vsynctimebase * 2 / 3) >= 0 || (int)rpt - (int)vsyncmintime < -2 * vsynctimebase)
+ if (rpt - (vsyncmintime - vsynctimebase * 2 / 3) >= 0 || rpt - vsyncmintime < -2 * vsynctimebase)
break;
maybe_process_pull_audio();
if (currprefs.m68k_speed < 0 && !was_syncline) {
for (;;) {
rpt = read_processor_time();
- if ((int)rpt - (int)vsyncmintime >= 0 || (int)rpt - (int)vsyncmintime < -2 * vsynctimebase)
+ if (rpt - vsyncmintime >= 0 || rpt - vsyncmintime < -2 * vsynctimebase)
break;
maybe_process_pull_audio();
if (currprefs.m68k_speed < 0 && !was_syncline) {
target_spin(0);
}
- if ((int)rpt - (int)vsyncmintime < vsynctimebase && (int)rpt - (int)vsyncmintime > -vsynctimebase) {
+ if (rpt - vsyncmintime < vsynctimebase && rpt - vsyncmintime > -vsynctimebase) {
vsyncmintime += vsynctimebase;
} else {
vsyncmintime = rpt + vsynctimebase;
if (regs.stopped && currprefs.cpu_idle) {
// CPU in STOP state: sleep if enough time left.
frame_time_t rpt = read_processor_time();
- while (vsync_isdone(NULL) <= 0 && (int)vsyncmintime - (int)(rpt + vsynctimebase / 10) > 0 && (int)vsyncmintime - (int)rpt < vsynctimebase) {
+ while (vsync_isdone(NULL) <= 0 && vsyncmintime - (rpt + vsynctimebase / 10) > 0 && vsyncmintime - rpt < vsynctimebase) {
maybe_process_pull_audio();
// if (!execute_other_cpu(rpt + vsynctimebase / 10)) {
if (cpu_sleep_millis(1) < 0)
linecounter++;
events_reset_syncline();
if (vsync_isdone(NULL) <= 0 && !currprefs.turbo_emulation) {
- if ((int)vsyncmaxtime - (int)vsyncmintime > 0) {
- if ((int)vsyncwaittime - (int)vsyncmintime > 0) {
+ if (vsyncmaxtime - vsyncmintime > 0) {
+ if (vsyncwaittime - vsyncmintime > 0) {
frame_time_t rpt = read_processor_time();
/* Extra time left? Do some extra CPU emulation */
- if ((int)vsyncmintime - (int)rpt > 0) {
+ if (vsyncmintime - rpt > 0) {
if (regs.stopped && currprefs.cpu_idle && sleeps_remaining > 0) {
// STOP STATE: sleep.
cpu_sleep_millis(1);
if (audio_is_pull() > 0 && !currprefs.turbo_emulation) {
maybe_process_pull_audio();
frame_time_t rpt = read_processor_time();
- while (audio_pull_buffer() > 1 && (!isvsync() || (vsync_isdone(NULL) <= 0 && (int)vsyncmintime - (int)(rpt + vsynctimebase / 10) > 0 && (int)vsyncmintime - (int)rpt < vsynctimebase))) {
+ while (audio_pull_buffer() > 1 && (!isvsync() || (vsync_isdone(NULL) <= 0 && vsyncmintime - (rpt + vsynctimebase / 10) > 0 && vsyncmintime - rpt < vsynctimebase))) {
cpu_sleep_millis(1);
maybe_process_pull_audio();
rpt = read_processor_time();
if (vsync_isdone(NULL) <= 0 && !currprefs.turbo_emulation) {
frame_time_t rpt = read_processor_time();
// sleep if more than 2ms "free" time
- while (vsync_isdone(NULL) <= 0 && (int)vsyncmintime - (int)(rpt + vsynctimebase / 10) > 0 && (int)vsyncmintime - (int)rpt < vsynctimebase) {
+ while (vsync_isdone(NULL) <= 0 && vsyncmintime - (rpt + vsynctimebase / 10) > 0 && vsyncmintime - rpt < vsynctimebase) {
maybe_process_pull_audio();
// if (!execute_other_cpu(rpt + vsynctimebase / 10)) {
if (cpu_sleep_millis(1) < 0)
int cnt = restore_u8();
for (int i = 0; i < cnt; i++) {
uae_u8 type = restore_u8();
- evt e = restore_u64();
+ evt_t e = restore_u64();
uae_u32 data = restore_u32();
if (type == 1)
event2_newevent_xx(-1, e, data, send_interrupt_do);
save_u32(CYCLE_UNIT);
save_u64(get_cycles());
save_u32(extra_cycle);
- write_log(_T("SAVECYCLES %08lX\n"), get_cycles());
+ write_log(_T("SAVECYCLES %08llX\n"), get_cycles());
*len = dst - dstbak;
return dstbak;
}
static void sync_cycles(void)
{
- uae_u32 c;
+ evt_t c;
uae_u32 extra;
c = get_cycles();
}
-void do_cycles_ce(uae_u32 cycles)
+void do_cycles_ce(int cycles)
{
cycles += extra_cycle;
while (cycles >= CYCLE_UNIT) {
extra_cycle = cycles;
}
-void do_cycles_ce020(uae_u32 cycles)
+void do_cycles_ce020(int cycles)
{
- uae_u32 c;
- uae_u32 extra;
+ int c;
+ evt_t cc;
+ int extra;
if (!cycles) {
return;
}
- c = get_cycles();
- extra = c & (CYCLE_UNIT - 1);
+ cc = get_cycles();
+ extra = cc & (CYCLE_UNIT - 1);
if (extra) {
extra = CYCLE_UNIT - extra;
if (extra >= cycles) {
static const int pissoff_nojit_value = 256 * CYCLE_UNIT;
-uae_u32 event_cycles, nextevent, currcycle;
-int is_syncline, is_syncline_end;
+evt_t event_cycles, nextevent, currcycle;
+int is_syncline;
+frame_time_t is_syncline_end;
int cycles_to_next_event;
int max_cycles_to_next_event;
int cycles_to_hsync_event;
-uae_u32 start_cycles;
+evt_t start_cycles;
bool event_wait;
frame_time_t vsyncmintime, vsyncmintimepre;
frame_time_t vsyncmaxtime, vsyncwaittime;
-int vsynctimebase;
+frame_time_t vsynctimebase;
int event2_count;
static void events_fast(void)
{
int i;
- uae_u32 mintime = ~0L;
+ evt_t mintime = EVT_MAX;
for (i = 0; i < ev_max; i++) {
if (eventtab[i].active) {
- uae_u32 eventtime = eventtab[i].evtime - currcycle;
+ evt_t eventtime = eventtab[i].evtime - currcycle;
if (eventtime < mintime)
mintime = eventtime;
}
}
- nextevent = currcycle + mintime;
+ if (mintime < EVT_MAX) {
+ nextevent = currcycle + mintime;
+ } else {
+ nextevent = EVT_MAX;
+ }
}
extern void vsync_event_done(void);
// wait is_syncline_end
if (event_wait) {
- int rpt = read_processor_time();
- int v = rpt - is_syncline_end;
+ frame_time_t rpt = read_processor_time();
+ frame_time_t v = rpt - is_syncline_end;
if (v < 0) {
#ifdef WITH_PPC
if (ppc_state) {
// wait is_syncline_end/vsyncmintime
if (event_wait) {
- int rpt = read_processor_time();
- int v = rpt - vsyncmintime;
- int v2 = rpt - is_syncline_end;
+ frame_time_t rpt = read_processor_time();
+ frame_time_t v = rpt - vsyncmintime;
+ frame_time_t v2 = rpt - is_syncline_end;
if (v > vsynctimebase || v < -vsynctimebase) {
v = 0;
}
return false;
}
-void do_cycles_slow (uae_u32 cycles_to_add)
+void do_cycles_slow (int cycles_to_add)
{
#ifdef WITH_X86
#if 0
return;
}
- cycles_to_add -= nextevent - currcycle;
+ cycles_to_add -= (int)(nextevent - currcycle);
currcycle = nextevent;
for (int i = 0; i < ev_max; i++) {
static bool dorecheck;
bool recheck;
int i;
- evt mintime;
- evt ct = get_cycles ();
+ evt_t mintime;
+ evt_t ct = get_cycles();
static int recursive;
if (recursive) {
recheck = true;
while (recheck) {
recheck = false;
- mintime = ~0L;
+ mintime = EVT_MAX;
for (i = 0; i < ev2_max; i++) {
if (eventtab2[i].active) {
if (eventtab2[i].evtime == ct) {
dorecheck = false;
}
} else {
- evt eventtime = eventtab2[i].evtime - ct;
+ evt_t eventtime = eventtab2[i].evtime - ct;
if (eventtime < mintime)
mintime = eventtime;
}
}
}
}
- if (mintime != ~0UL) {
+ if (mintime < EVT_MAX) {
eventtab[ev_misc].active = true;
eventtab[ev_misc].oldcycles = ct;
eventtab[ev_misc].evtime = ct + mintime;
}
-void event2_newevent_xx (int no, evt t, uae_u32 data, evfunc2 func)
+void event2_newevent_xx (int no, evt_t t, uae_u32 data, evfunc2 func)
{
- evt et;
+ evt_t et;
static int next = ev2_misc;
et = t + get_cycles ();
if (no == next) {
write_log (_T("out of event2's!\n"));
// execute most recent event immediately
- evt mintime = ~0L;
+ evt_t mintime = EVT_MAX;
int minevent = -1;
- evt ct = get_cycles();
+ evt_t ct = get_cycles();
for (int i = 0; i < ev2_max; i++) {
if (eventtab2[i].active) {
- evt eventtime = eventtab2[i].evtime - ct;
+ evt_t eventtime = eventtab2[i].evtime - ct;
if (eventtime < mintime) {
mintime = eventtime;
minevent = i;
MISC_handler ();
}
-void event2_newevent_x_replace(evt t, uae_u32 data, evfunc2 func)
+void event2_newevent_x_replace(evt_t t, uae_u32 data, evfunc2 func)
{
for (int i = 0; i < ev2_max; i++) {
if (eventtab2[i].active && eventtab2[i].handler == func) {
/* Set to 1 to leave out the current frame in average frame time calculation.
* Useful if the debugger was active. */
extern int bogusframe;
-extern unsigned long int hsync_counter, vsync_counter;
+extern uae_u32 hsync_counter, vsync_counter;
extern uae_u16 dmacon;
extern uae_u16 intena, intreq, intreqr;
#define CYCLE_MASK 0x0f
-extern unsigned long frametime, timeframes;
+extern uae_u32 timeframes;
+extern evt_t frametime;
extern uae_u16 htotal, vtotal, beamcon0, new_beamcon0;
extern uae_u16 bemcon0_hsync_mask, bemcon0_vsync_mask;
}
extern void fpscounter_reset(void);
-extern unsigned long idletime;
+extern frame_time_t idletime;
extern int lightpen_x[2], lightpen_y[2];
extern int lightpen_cx[2], lightpen_cy[2], lightpen_active, lightpen_enabled, lightpen_enabled2;
#include "machdep/rpt.h"
+#define EVT_MAX 0x7fffffffffffffff
+
extern frame_time_t vsyncmintime, vsyncmintimepre;
extern frame_time_t vsyncmaxtime, vsyncwaittime;
-extern int vsynctimebase, syncbase;
-extern void reset_frame_rate_hack (void);
-extern uae_u32 vsync_cycles;
-extern uae_u32 start_cycles;
+extern frame_time_t vsynctimebase, syncbase;
+extern void reset_frame_rate_hack(void);
+extern evt_t vsync_cycles;
+extern evt_t start_cycles;
extern int event2_count;
extern bool event_wait;
-extern void compute_vsynctime (void);
-extern void init_eventtab (void);
-extern void do_cycles_ce (uae_u32 cycles);
-extern void do_cycles_ce020 (uae_u32 cycles);
-extern void events_schedule (void);
-extern void do_cycles_slow (uae_u32 cycles_to_add);
+extern void compute_vsynctime(void);
+extern void init_eventtab(void);
+extern void do_cycles_ce(int cycles);
+extern void do_cycles_ce020(int cycles);
+extern void events_schedule(void);
+extern void do_cycles_slow(int cycles_to_add);
extern void events_reset_syncline(void);
extern bool is_cycle_ce(uaecptr);
-extern uae_u32 currcycle, nextevent;
-extern int is_syncline, is_syncline_end;
+extern evt_t currcycle, nextevent;
+extern int is_syncline;
+extern evt_t is_syncline_end;
typedef void (*evfunc)(void);
typedef void (*evfunc2)(uae_u32);
-typedef unsigned int evt;
-
struct ev
{
bool active;
- evt evtime, oldcycles;
+ evt_t evtime, oldcycles;
evfunc handler;
};
struct ev2
{
bool active;
- evt evtime;
+ evt_t evtime;
uae_u32 data;
evfunc2 handler;
};
};
extern int pissoff_value;
-extern uae_s32 pissoff;
+extern int pissoff;
#define countdown pissoff
#define do_cycles do_cycles_slow
}
}
-STATIC_INLINE void do_extra_cycles (uae_u32 cycles_to_add)
+STATIC_INLINE void do_extra_cycles(int cycles_to_add)
{
pissoff -= cycles_to_add;
}
-STATIC_INLINE uae_u32 get_cycles (void)
+STATIC_INLINE evt_t get_cycles(void)
{
return currcycle;
}
-STATIC_INLINE void set_cycles (uae_u32 x)
+STATIC_INLINE void set_cycles (evt_t x)
{
currcycle = x;
eventtab[ev_hsync].oldcycles = x;
#endif
}
-STATIC_INLINE int current_hpos_safe (void)
+STATIC_INLINE int current_hpos_safe(void)
{
- int hp = (get_cycles () - eventtab[ev_hsync].oldcycles) / CYCLE_UNIT;
+ int hp = (int)((get_cycles () - eventtab[ev_hsync].oldcycles)) / CYCLE_UNIT;
return hp;
}
extern int current_hpos(void);
-STATIC_INLINE bool cycles_in_range (uae_u32 endcycles)
+STATIC_INLINE bool cycles_in_range(evt_t endcycles)
{
- uae_s32 c = get_cycles ();
- return (uae_s32)endcycles - c > 0;
+ evt_t c = get_cycles();
+ return endcycles > c;
}
-extern void MISC_handler (void);
-extern void event2_newevent_xx (int no, evt t, uae_u32 data, evfunc2 func);
-extern void event2_newevent_x_replace(evt t, uae_u32 data, evfunc2 func);
+extern void MISC_handler(void);
+extern void event2_newevent_xx(int no, evt_t t, uae_u32 data, evfunc2 func);
+extern void event2_newevent_x_replace(evt_t t, uae_u32 data, evfunc2 func);
-STATIC_INLINE void event2_newevent_x (int no, evt t, uae_u32 data, evfunc2 func)
+STATIC_INLINE void event2_newevent_x(int no, evt_t t, uae_u32 data, evfunc2 func)
{
- if (((int)t) <= 0) {
- func (data);
+ if (t <= 0) {
+ func(data);
return;
}
- event2_newevent_xx (no, t * CYCLE_UNIT, data, func);
+ event2_newevent_xx(no, t * CYCLE_UNIT, data, func);
}
-STATIC_INLINE void event2_newevent (int no, evt t, uae_u32 data)
+STATIC_INLINE void event2_newevent(int no, evt_t t, uae_u32 data)
{
- event2_newevent_x (no, t, data, eventtab2[no].handler);
+ event2_newevent_x(no, t, data, eventtab2[no].handler);
}
-STATIC_INLINE void event2_newevent2 (evt t, uae_u32 data, evfunc2 func)
+STATIC_INLINE void event2_newevent2(evt_t t, uae_u32 data, evfunc2 func)
{
- event2_newevent_x (-1, t, data, func);
+ event2_newevent_x(-1, t, data, func);
}
-STATIC_INLINE void event2_remevent (int no)
+STATIC_INLINE void event2_remevent(int no)
{
eventtab2[no].active = 0;
}
uae_u32 prefetch040[CPU_PIPELINE_MAX];
- int ce020endcycle;
- int ce020startcycle;
- int ce020prefetchendcycle;
+ evt_t ce020endcycle;
+ evt_t ce020startcycle;
+ evt_t ce020prefetchendcycle;
- int ce020extracycles;
+ evt_t ce020extracycles;
bool ce020memcycle_data;
int ce020_tail;
- frame_time_t ce020_tail_cycles;
+ evt_t ce020_tail_cycles;
int memory_waitstate_cycles;
};
int pipeline_stop;
uae_u16 read_buffer, write_buffer;
- uae_u32 startcycles;
+ evt_t startcycles;
int needendcycles;
int memoryoffset;
int cyclecounter, cyclecounter_pre, cyclecounter_post;
STATIC_INLINE void set_special (uae_u32 x)
{
atomic_or(®s.spcflags, x);
- cycles_do_special ();
+ cycles_do_special();
}
STATIC_INLINE void unset_special (uae_u32 x)
extern void dfc_nommu_put_word(uaecptr, uae_u32);
extern void dfc_nommu_put_long(uaecptr, uae_u32);
-extern void (*x_do_cycles)(uae_u32);
-extern void (*x_do_cycles_pre)(uae_u32);
-extern void (*x_do_cycles_post)(uae_u32, uae_u32);
+extern void (*x_do_cycles)(int);
+extern void (*x_do_cycles_pre)(int);
+extern void (*x_do_cycles_post)(int, uae_u32);
extern uae_u32 REGPARAM3 x_get_disp_ea_020 (uae_u32 base, int idx) REGPARAM;
extern uae_u32 REGPARAM3 x_get_disp_ea_ce020 (uae_u32 base, int idx) REGPARAM;
extern uae_u32 REGPARAM3 x_get_bitfield (uae_u32 src, uae_u32 bdata[2], uae_s32 offset, int width) REGPARAM;
extern void REGPARAM3 x_put_bitfield (uae_u32 dst, uae_u32 bdata[2], uae_u32 val, uae_s32 offset, int width) REGPARAM;
-extern void m68k_setstopped (void);
-extern void m68k_resumestopped (void);
+extern void m68k_setstopped(void);
+extern void m68k_resumestopped(void);
extern void m68k_cancel_idle(void);
extern uae_u32 REGPARAM3 get_disp_ea_020 (uae_u32 base, int idx) REGPARAM;
#include "uae/types.h"
/* frame_time_t is often cast to int in the code so we use int for now... */
-typedef uae_u32 uae_time_t;
+typedef uae_s64 uae_time_t;
void uae_time_init(void);
void uae_time_calibrate(void);
uae_time_t uae_time(void);
-extern int syncbase;
-
#ifdef _WIN32
void uae_time_use_rdtsc(bool enable);
-uae_u32 read_system_time(void);
+uae_s64 read_system_time(void);
#endif
typedef uae_time_t frame_time_t;
return uae_time();
}
+extern frame_time_t syncbase;
+
#endif /* UAE_TIME_H */
#define TRUE (!FALSE)
#endif
+typedef signed long long evt_t;
+
#endif /* UAE_TYPES_H */
static struct teststore testmode_wait[TESTMODE_MAX];
static int bouncy;
-static signed long bouncy_cycles;
+static frame_time_t bouncy_cycles;
static int autopause;
#define HANDLE_IE_FLAG_CANSTOPPLAYBACK 1
static void (*x2_put_long)(uaecptr,uae_u32);
static void (*x2_put_word)(uaecptr,uae_u32);
static void (*x2_put_byte)(uaecptr,uae_u32);
-static void (*x2_do_cycles)(uae_u32);
-static void (*x2_do_cycles_pre)(uae_u32);
-static void (*x2_do_cycles_post)(uae_u32, uae_u32);
+static void (*x2_do_cycles)(int);
+static void (*x2_do_cycles_pre)(int);
+static void (*x2_do_cycles_post)(int, uae_u32);
uae_u32 (*x_prefetch)(int);
uae_u32 (*x_next_iword)(void);
void (*x_cp_put_byte)(uaecptr,uae_u32);
uae_u32 (REGPARAM3 *x_cp_get_disp_ea_020)(uae_u32 base, int idx) REGPARAM;
-void (*x_do_cycles)(uae_u32);
-void (*x_do_cycles_pre)(uae_u32);
-void (*x_do_cycles_post)(uae_u32, uae_u32);
+void (*x_do_cycles)(int);
+void (*x_do_cycles_pre)(int);
+void (*x_do_cycles_post)(int, uae_u32);
uae_u32(*x_phys_get_iword)(uaecptr);
uae_u32(*x_phys_get_ilong)(uaecptr);
x_do_cycles_pre = x2_do_cycles_pre;
x_do_cycles_post = x2_do_cycles_post;
set_x_cp_funcs();
- write_log(_T("CPU tracer playback complete. STARTCYCLES=%08x NOWCYCLES=%08x\n"), cputrace.startcycles, get_cycles());
+ write_log(_T("CPU tracer playback complete. STARTCYCLES=%016llx NOWCYCLES=%016llx\n"), cputrace.startcycles, get_cycles());
cputrace.needendcycles = 1;
cpu_tracer = 0;
return true;
write_log (_T("cputracefunc2_x_put_byte %d <> %d\n"), v, val);
}
-static void cputracefunc_x_do_cycles (uae_u32 cycles)
+static void cputracefunc_x_do_cycles(int cycles)
{
while (cycles >= CYCLE_UNIT) {
cputrace.cyclecounter += CYCLE_UNIT;
}
}
-static void cputracefunc2_x_do_cycles (uae_u32 cycles)
+static void cputracefunc2_x_do_cycles(int cycles)
{
if (cputrace.cyclecounter > cycles) {
cputrace.cyclecounter -= cycles;
x_do_cycles (cycles);
}
-static void cputracefunc_x_do_cycles_pre (uae_u32 cycles)
+static void cputracefunc_x_do_cycles_pre(int cycles)
{
cputrace.cyclecounter_post = 0;
cputrace.cyclecounter_pre = 0;
}
// cyclecounter_pre = how many cycles we need to SWALLOW
// -1 = rerun whole access
-static void cputracefunc2_x_do_cycles_pre (uae_u32 cycles)
+static void cputracefunc2_x_do_cycles_pre (int cycles)
{
if (cputrace.cyclecounter_pre == -1) {
cputrace.cyclecounter_pre = 0;
x_do_cycles (cycles);
}
-static void cputracefunc_x_do_cycles_post (uae_u32 cycles, uae_u32 v)
+static void cputracefunc_x_do_cycles_post (int cycles, uae_u32 v)
{
if (cputrace.memoryoffset < 1) {
#if CPUTRACE_DEBUG
cputrace.cyclecounter_post = 0;
}
// cyclecounter_post = how many cycles we need to WAIT
-static void cputracefunc2_x_do_cycles_post (uae_u32 cycles, uae_u32 v)
+static void cputracefunc2_x_do_cycles_post (int cycles, uae_u32 v)
{
- uae_u32 c;
+ int c;
if (cputrace.cyclecounter_post) {
c = cputrace.cyclecounter_post;
cputrace.cyclecounter_post = 0;
x_do_cycles (c);
}
-static void do_cycles_post (uae_u32 cycles, uae_u32 v)
+static void do_cycles_post (int cycles, uae_u32 v)
{
do_cycles (cycles);
}
-static void do_cycles_ce_post (uae_u32 cycles, uae_u32 v)
+static void do_cycles_ce_post (int cycles, uae_u32 v)
{
do_cycles_ce (cycles);
}
-static void do_cycles_ce020_post (uae_u32 cycles, uae_u32 v)
+static void do_cycles_ce020_post (int cycles, uae_u32 v)
{
do_cycles_ce020 (cycles);
}
x_get_byte = get_byte_dc030;
}
}
- x_do_cycles = do_cycles;
- x_do_cycles_pre = do_cycles;
- x_do_cycles_post = do_cycles_post;
-
+ if (currprefs.cpu_cycle_exact) {
+ x_do_cycles = do_cycles_ce020;
+ x_do_cycles_pre = do_cycles_ce020;
+ x_do_cycles_post = do_cycles_ce020_post;
+ } else {
+ x_do_cycles = do_cycles;
+ x_do_cycles_pre = do_cycles;
+ x_do_cycles_post = do_cycles_post;
+ }
} else if (currprefs.cpu_model < 68020) {
// 68000/010
if (currprefs.cpu_cycle_exact) {
if (currprefs.m68k_speed == 0) { // approximate
cycles_mult = CYCLES_DIV;
if (currprefs.cpu_model >= 68040) {
- cycles_mult = CYCLES_DIV / 12;
+ if (currprefs.mmu_model) {
+ cycles_mult = CYCLES_DIV / 20;
+ } else {
+ cycles_mult = CYCLES_DIV / 12;
+ }
} else if (currprefs.cpu_model >= 68020) {
- cycles_mult = CYCLES_DIV / 6;
+ if (currprefs.mmu_model) {
+ cycles_mult = CYCLES_DIV / 10;
+ } else {
+ cycles_mult = CYCLES_DIV / 6;
+ }
}
+
if (!currprefs.cpu_cycle_exact) {
if (currprefs.m68k_speed_throttle < 0) {
- cycles_mult = (cycles_mult * 1000) / (1000 + currprefs.m68k_speed_throttle);
+ cycles_mult = (uae_u32)((cycles_mult * 1000) / (1000 + currprefs.m68k_speed_throttle));
} else if (currprefs.m68k_speed_throttle > 0) {
- cycles_mult = (cycles_mult * 1000) / (1000 + currprefs.m68k_speed_throttle);
+ cycles_mult = (uae_u32)((cycles_mult * 1000) / (1000 + currprefs.m68k_speed_throttle));
}
}
} else {
// 1-9 = wait, levels
// 10 = max wait
-static bool haltloop_do(int vsynctimeline, int rpt_end, int lines)
+static bool haltloop_do(int vsynctimeline, frame_time_t rpt_end, int lines)
{
int ovpos = vpos;
while (lines-- >= 0) {
#endif
if (event_wait)
break;
- int d = read_processor_time() - rpt_end;
+ frame_time_t d = read_processor_time() - rpt_end;
if (d < -2 * vsynctimeline || d >= 0)
break;
}
int ovpos = vpos;
while (regs.halted) {
- int vsynctimeline = vsynctimebase / (maxvpos_display + 1);
+ int vsynctimeline = (int)(vsynctimebase / (maxvpos_display + 1));
int lines;
- int rpt_scanline = read_processor_time();
- int rpt_end = rpt_scanline + vsynctimeline;
+ frame_time_t rpt_scanline = read_processor_time();
+ frame_time_t rpt_end = rpt_scanline + vsynctimeline;
// See expansion handling.
// Dialog must be opened from main thread.
check_uae_int_request();
uae_ppc_execute_check();
- lines = (read_processor_time() - rpt_scanline) / vsynctimeline + 1;
+ lines = (int)(read_processor_time() - rpt_scanline) / vsynctimeline + 1;
} else {
cont:
if (cputrace.needendcycles) {
cputrace.needendcycles = 0;
- write_log(_T("STARTCYCLES=%08x ENDCYCLES=%08x\n"), cputrace.startcycles, get_cycles());
+ write_log(_T("STARTCYCLES=%016llx ENDCYCLES=%016llx\n"), cputrace.startcycles, get_cycles());
log_dma_record ();
}
else
dstbak = dst = xmalloc (uae_u8, 10000);
- save_u32 (2 | 4 | 16 | 32 | 64);
+ save_u32 (2 | 4 | 16 | 32 | 64 | 128);
save_u16 (cputrace.opcode);
for (int i = 0; i < 16; i++)
save_u32 (cputrace.regs[i]);
save_u32 (cputrace.readcounter);
save_u32 (cputrace.writecounter);
save_u32 (cputrace.memoryoffset);
- write_log (_T("CPUT SAVE: PC=%08x C=%08X %08x %08x %08x %d %d %d\n"),
+ write_log (_T("CPUT SAVE: PC=%08x C=%016llX %08x %08x %08x %d %d %d\n"),
cputrace.pc, cputrace.startcycles,
cputrace.cyclecounter, cputrace.cyclecounter_pre, cputrace.cyclecounter_post,
cputrace.readcounter, cputrace.writecounter, cputrace.memoryoffset);
save_u32 (cputrace.ctm[i].mode);
write_log (_T("CPUT%d: %08x %08x %08x\n"), i, cputrace.ctm[i].addr, cputrace.ctm[i].data, cputrace.ctm[i].mode);
}
- save_u32 (cputrace.startcycles);
+ save_u32 ((uae_u32)cputrace.startcycles);
if (currprefs.cpu_model == 68020) {
for (int i = 0; i < CACHELINES020; i++) {
save_u16(cputrace.read_buffer);
save_u16(cputrace.writecounter);
+ save_u32(cputrace.startcycles >> 32);
+
*len = dst - dstbak;
cputrace.needendcycles = 1;
return dstbak;
cputrace.ctm[i].data = restore_u32 ();
cputrace.ctm[i].mode = restore_u32 ();
}
- cputrace.startcycles = restore_u32 ();
+ cputrace.startcycles = restore_u32();
if (v & 4) {
if (currprefs.cpu_model == 68020) {
cputrace.write_buffer = restore_u16();
}
}
+
+ if (v & 128) {
+ cputrace.startcycles |= ((uae_u64)restore_u32()) << 32;
+ }
}
cputrace.needendcycles = 1;
if (nanos < 0)
return 800;
LARGE_INTEGER interval;
- int start = read_processor_time();
+ frame_time_t start = read_processor_time();
nanos *= 10;
if (nanos < ActualTimerResolution)
nanos = ActualTimerResolution;
static int sleep_millis2 (int ms, bool main)
{
UINT TimerEvent;
- int start = 0;
+ frame_time_t start = 0;
int cnt;
HANDLE sound_event = get_sound_event();
bool wasneg = ms < 0;
static frame_time_t read_processor_time_rdtsc(void)
{
- frame_time_t foo = 0;
+ uae_u32 foo = 0;
#if defined(X86_MSVC_ASSEMBLY)
- frame_time_t bar;
+ uae_u32 bar;
__asm
{
rdtsc
mov bar, edx
}
/* very high speed CPU's RDTSC might overflow without this.. */
- foo >>= 6;
- foo |= bar << 26;
- if (!foo)
- foo++;
+ frame_time_t out;
+ out = ((uae_u64)foo << 32) | bar;
+ out >>= 6;
#endif
return foo;
}
return t;
}
-uae_u32 read_system_time(void)
+uae_s64 read_system_time(void)
{
- return GetTickCount();
+ return GetTickCount64();
}
static volatile int dummythread_die;
while (!dummythread_die);
}
-static uae_u64 win32_read_processor_time(void)
+static uae_s64 win32_read_processor_time(void)
{
#if defined(X86_MSVC_ASSEMBLY)
uae_u32 foo, bar;
mov foo, eax
mov bar, edx
}
- return (((uae_u64)bar) << 32) | foo;
+ return (((uae_s64)bar) << 32) | foo;
#else
return 0;
#endif
static void figure_processor_speed_rdtsc(void)
{
static int freqset;
- uae_u64 clockrate;
+ frame_time_t clockrate;
int oldpri;
HANDLE th;
write_log(_T("CLOCKFREQ: QPF %.2fMHz (%.2fMHz, DIV=%d)\n"),
freq.QuadPart / 1000000.0,
qpfrate / 1000000.0, 1 << qpcdivisor);
- syncbase = (int) qpfrate;
+ syncbase = qpfrate;
}
void uae_time_calibrate(void)