int maxvpos = MAXVPOS_PAL;
int maxvpos_nom = MAXVPOS_PAL; // nominal value (same as maxvpos but "faked" maxvpos in fake 60hz modes)
int maxvpos_display = MAXVPOS_PAL; // value used for display size
+static int maxhpos_temp; // line being >maxhpos due to VHPOSW tricks
static int maxhpos_display = AMIGA_WIDTH_MAX;
int maxvpos_display_vsync; // extra lines from top visible in bottom
static int vblank_extraline;
static int sprite_sprctlmask;
int sprite_buffer_res;
-uae_u8 cycle_line_slot[MAX_CHIPSETSLOTS + RGA_PIPELINE_ADJUST];
-uae_u16 cycle_line_pipe[MAX_CHIPSETSLOTS + RGA_PIPELINE_ADJUST];
+uae_u8 cycle_line_slot[MAX_CHIPSETSLOTS + RGA_PIPELINE_ADJUST + MAX_CHIPSETSLOTS_EXTRA];
+uae_u16 cycle_line_pipe[MAX_CHIPSETSLOTS + RGA_PIPELINE_ADJUST + MAX_CHIPSETSLOTS_EXTRA];
static uae_u8 cycle_line_slot_last;
static uae_s16 bpl1mod, bpl2mod, bpl1mod_prev, bpl2mod_prev;
static bool shdelay_disabled;
static int delay_cycles, delay_cycles2;
static int delay_lastcycle[2], delay_hsynccycle;
-static int hack_delay_shift;
+static int vhposr_delay_offset, vhposr_sprite_offset;
static bool bplcon1_written;
static bool bplcon0_planes_changed;
static bool sprites_enabled_this_line;
#define ESTIMATED_FETCH_MODE 1
#define OPTIMIZED_ESTIMATE 1
-static uae_s8 estimated_cycles_buf0[256];
-static uae_s8 estimated_cycles_buf1[256];
-static uae_s8 estimated_cycles_empty[256];
+static uae_s8 estimated_cycles_buf0[MAX_CHIPSETSLOTS + MAX_CHIPSETSLOTS_EXTRA];
+static uae_s8 estimated_cycles_buf1[MAX_CHIPSETSLOTS + MAX_CHIPSETSLOTS_EXTRA];
+static uae_s8 estimated_cycles_empty[MAX_CHIPSETSLOTS + MAX_CHIPSETSLOTS_EXTRA];
static int estimate_cycles_empty_index = -1;
static uae_u16 estimated_bplcon0, estimated_fm, estimated_plfstrt, estimated_plfstop;
static uae_s8 *estimated_cycles = estimated_cycles_empty;
remember_ctable();
}
- hpos += hack_delay_shift;
+ hpos += vhposr_delay_offset;
record_color_change2(hpos, regno, value);
}
count = 0;
for (int i = 0; i < MAX_SPRITES; i++) {
struct sprite *s = &spr[i];
- int xpos = (spr[i].xpos + hdiw_counter_sconflict) & hdiw_counter_sconflict_mask;
+ int xpos = (spr[i].xpos + hdiw_counter_sconflict + vhposr_sprite_offset) & hdiw_counter_sconflict_mask;
int sprxp = (fmode & 0x8000) ? (xpos & ~sscanmask) : xpos;
int hw_xp = sprxp >> sprite_buffer_res;
int pointx = usepointx && (s->ctl & sprite_sprctlmask) ? 0 : 1;
last_diw_hpos = 0;
last_diw_hpos2 = 0;
blt_info.finishhpos = -1;
- hack_delay_shift = 0;
/* Default to no bitplane DMA overriding sprite DMA */
plfstrt_sprite = 0x100;
last_recorded_diw_hpos = 0;
collision_hpos = 0;
+ vhposr_delay_offset = 0;
+ vhposr_sprite_offset = 0;
+
compute_toscr_delay(bplcon1);
last_diwlastword = -1;
static uae_u16 VPOSR(void)
{
- unsigned int csbit = 0;
+ uae_u16 csbit = 0;
uae_u16 vp = GETVPOS();
uae_u16 hp = GETHPOS();
- int lof = lof_store;
+ int lofr = lof_store;
+ int lolr = lol;
- if (vp + 1 == maxvpos + lof_store && (hp == maxhpos - 1 || hp == maxhpos - 2)) {
- // lof toggles 2 cycles before maxhpos, so do fake toggle here.
- if ((bplcon0 & 4) && CPU_ACCURATE) {
- lof = lof ? 0 : 1;
+ if (hp == 0) {
+ // LOF and LOL toggles when HPOS=1
+ // Return pre-toggled value if VPOSR is read when HPOS=0
+ if (vp == 0) {
+ if ((bplcon0 & 4) && CPU_ACCURATE) {
+ lofr = lofr ? 0 : 1;
+ }
+ }
+ if (islinetoggle()) {
+ lolr = lolr ? 0 : 1;
}
}
vhpos_adj(&hp, &vp);
if (!ecs_agnus) {
vp &= 1;
+ } else {
+ vp |= lolr ? 0x80 : 0;
}
- vp |= (lof ? 0x8000 : 0) | csbit;
- if (ecs_agnus) {
- vp |= lol ? 0x80 : 0;
- }
+ vp |= (lofr ? 0x8000 : 0) | csbit;
hsyncdelay();
#if 0
if (1 || (M68K_GETPC < 0x00f00000 || M68K_GETPC >= 0x10000000))
vb_check();
}
-static void VHPOSW(uae_u16 v)
+static void VHPOSW_delayed(uae_u32 v)
{
int oldvpos = vpos;
int newvpos = vpos;
write_log (_T("VHPOSW %04X PC=%08x\n"), v, M68K_GETPC);
#endif
- if (currprefs.cpu_memory_cycle_exact && currprefs.cpu_model == 68000) {
- /* Special hack for Smooth Copper in CoolFridge / Upfront demo */
- int chp = current_hpos_safe() - 4;
- int hp = v & 0xff;
- if (chp >= 0x21 && chp <= 0x29 && hp == 0x2d) {
- hack_delay_shift = 4;
- record_color_change(chp, 0, COLOR_CHANGE_HSYNC_HACK | 6);
- thisline_changed = 1;
- }
+ int hpos_org = current_hpos();
+ int hpos = hpos_org;
+ int hnew = (v & 0xff);
+ int hnew_org = hnew;
+ bool newinc = false;
+ if (hpos == 0 || hpos == 1) {
+ hpos += maxhpos;
+ }
+ if (hnew == 0 || hnew == 1) {
+ hnew += maxhpos;
+ newinc = true;
}
-
- int hpos = current_hpos();
- int hnew = v & 0xff;
int hdiff = hnew - hpos;
+ //write_log("%02x %02x %d\n", hpos_org, hnew_org, hdiff);
if (copper_access && (hdiff & 1)) {
write_log("VHPOSW write %04X. New horizontal value is odd. Copper confusion possible.\n", v);
}
- modify_eventcounter(-(hdiff - 2));
+
+ int hpos2 = 0;
+
+ delay_cycles += ((-hdiff * 8 - 2) & 7) << LORES_TO_SHRES_SHIFT;
+ if (hdiff & 1) {
+ vhposr_delay_offset = 1;
+ }
+ vhposr_sprite_offset += (hdiff * 4 - 2) << sprite_buffer_res;
+
+ if (newinc && hnew == maxhpos + 1) {
+ // 0000 -> 0001 (0 and 1 are part of previous line, vpos increases when hpos=1). No need to do anything
+ } else if (hnew >= maxhpos) {
+ // maxhpos check skip: counter counts until it wraps around 0xFF->0x00
+ int hdiff2 = (0x100 - hnew) - (maxhpos - hpos);
+ hdiff2 *= CYCLE_UNIT;
+ hdiff *= CYCLE_UNIT;
+ eventtab[ev_hsync].evtime += hdiff2;
+ eventtab[ev_hsync].oldcycles = get_cycles() - hnew * CYCLE_UNIT;
+ eventtab[ev_hsynch].evtime += hdiff2;
+ eventtab[ev_hsynch].oldcycles += hdiff2;
+ maxhpos_temp = 0x100;
+ hpos2 = current_hpos_safe();
+ } else {
+ hdiff = -hdiff;
+ hdiff *= CYCLE_UNIT;
+ for (;;) {
+ eventtab[ev_hsync].evtime += hdiff;
+ eventtab[ev_hsync].oldcycles += hdiff;
+ eventtab[ev_hsynch].evtime += hdiff;
+ eventtab[ev_hsynch].oldcycles += hdiff;
+ hpos2 = current_hpos_safe();
+ if (hpos2 >= 0 && hpos < 256) {
+ // don't allow line crossing, restore original value
+ break;
+ }
+ hdiff -= hdiff;
+ }
+ events_schedule();
+ }
+
+#ifdef DEBUGGER
+ if (newvpos == oldvpos && hdiff) {
+ record_dma_reoffset(vpos, hpos, hnew);
+ }
+#endif
+
+ if (hdiff) {
+ int hold = hpos;
+ memset(cycle_line_slot + MAX_CHIPSETSLOTS + RGA_PIPELINE_ADJUST, 0, sizeof(uae_u8) * MAX_CHIPSETSLOTS_EXTRA);
+ memset(cycle_line_pipe + MAX_CHIPSETSLOTS + RGA_PIPELINE_ADJUST, 0, sizeof(uae_u16) * MAX_CHIPSETSLOTS_EXTRA);
+ int total = (MAX_CHIPSETSLOTS + RGA_PIPELINE_ADJUST + MAX_CHIPSETSLOTS_EXTRA) - (hnew > hold ? hnew : hold);
+ if (total > 0) {
+ memmove(cycle_line_slot + hnew, cycle_line_slot + hold, total * sizeof(uae_u8));
+ memmove(cycle_line_pipe + hnew, cycle_line_pipe + hold, total * sizeof(uae_u16));
+ }
+ }
v >>= 8;
newvpos &= 0xff00;
cia_adjust_eclock_phase((newvpos - oldvpos) * maxhpos);
vposw_change++;
#ifdef DEBUGGER
- record_dma_hsync(hpos + 2);
+ record_dma_hsync(hpos_org);
if (debug_dma) {
int vp = vpos;
vpos = newvpos;
}
vpos = newvpos;
vb_check();
+
#if 0
if (vpos < oldvpos)
vposback (oldvpos);
#endif
+}
+
+static void VHPOSW(uae_u16 v)
+{
+ event2_newevent_xx(-1, 2 * CYCLE_UNIT, v, VHPOSW_delayed);
}
static uae_u16 VHPOSR(void)
if (oldres == RES_HIRES && res == RES_LORES) {
toscr_special_skip_ptr = toscr_spc_ecs_hires_to_lores;
}
- } else if (0) {
+ } else if (1) {
if (oldres == RES_LORES && res == RES_HIRES) {
toscr_special_skip_ptr = toscr_spc_ocs_lores_to_hires;
}
events_schedule();
}
-static void set_hpos(void)
+static void set_hpos()
{
line_start_cycles = (get_cycles() + CYCLE_UNIT - 1) & ~(CYCLE_UNIT - 1);
maxhposeven_prev = maxhposeven;
maxhposeven = (maxhpos & 1) == 0;
eventtab[ev_hsync].evtime = line_start_cycles + HSYNCTIME;
eventtab[ev_hsync].oldcycles = line_start_cycles;
-#ifdef DEBUGGER
- if (debug_dma) {
- record_dma_hsync(maxhpos);
- }
-#endif
}
// this finishes current line
hsync_counter++;
+ int currentmaxhp = current_hpos();
+#ifdef DEBUGGER
+ if (debug_dma) {
+ record_dma_hsync(currentmaxhp);
+ }
+#endif
+ // just to be sure
+ if (currentmaxhp > 0) {
+ cycle_line_slot_last = cycle_line_slot[currentmaxhp - 1];
+ } else {
+ cycle_line_slot_last = 0;
+ }
+ set_hpos();
+
vpos_prev = vpos;
vpos++;
vpos_count++;
else
lol = 0;
- cycle_line_slot_last = cycle_line_slot[maxhpos - 1];
- set_hpos();
-
// to record decisions correctly between end of scanline and start of hsync
if (!eventtab[ev_hsynch].active) {
eventtab[ev_hsynch].evtime = get_cycles() + hsyncstartpos_start_cycles * CYCLE_UNIT;
// this prepares for new line
static void hsync_handler_post(bool onvsync)
{
- memset(cycle_line_slot, 0, maxhpos + 1);
+ memset(cycle_line_slot, 0, maxhpos_temp > maxhpos ? maxhpos_temp + 1 : maxhpos + 1);
+ maxhpos_temp = 0;
// genlock active:
// vertical: interlaced = toggles every other field, non-interlaced = both fields (normal)
audio_evhandler();
}
-void init_eventtab (void)
+void init_eventtab(void)
{
if (!savestate_state) {
clear_events();
eventtab2[ev2_blitter].handler = blitter_handler;
- events_schedule ();
+ events_schedule();
}
-void custom_prepare (void)
+void custom_prepare(void)
{
- set_hpos ();
- hsync_handler_post (true);
+ set_hpos();
+ hsync_handler_post(true);
}
void custom_cpuchange(void)
static struct cop_rec *cop_record[2];
static int nr_cop_records[2], curr_cop_set, selected_cop_set;
-#define NR_DMA_REC_HPOS 256
+#define NR_DMA_REC_HPOS 288
#define NR_DMA_REC_VPOS 1000
static struct dma_rec *dma_record[2];
static int dma_record_toggle, dma_record_frame[2];
struct dma_rec *dr2 = &dr[v * NR_DMA_REC_HPOS + h];
memset(dr2, 0, sizeof(struct dma_rec));
dr2->reg = 0xffff;
+ dr2->hpos = -1;
dr2->cf_reg = 0xffff;
dr2->addr = 0xffffffff;
}
static void dma_record_init(void)
{
if (!dma_record[0]) {
- dma_record[0] = xmalloc(struct dma_rec, NR_DMA_REC_HPOS * NR_DMA_REC_VPOS);
- dma_record[1] = xmalloc(struct dma_rec, NR_DMA_REC_HPOS * NR_DMA_REC_VPOS);
+ dma_record[0] = xcalloc(struct dma_rec, NR_DMA_REC_HPOS * NR_DMA_REC_VPOS + 2);
+ dma_record[1] = xcalloc(struct dma_rec, NR_DMA_REC_HPOS * NR_DMA_REC_VPOS + 2);
+ dma_record[0]->vpos = -1;
+ dma_record[1]->vpos = -1;
+ dma_record[0]->end = 1;
+ dma_record[1]->end = 1;
+ dma_record[0]++;
+ dma_record[1]++;
record_dma_reset(0);
dma_record_toggle = 0;
dma_record_frame[0] = -1;
static int cycles_toggle;
static int record_dma_maxhpos, record_dma_maxvpos;
+static int dma_record_hoffset;
static void debug_draw_cycles(uae_u8 *buf, int bpp, int line, int width, int height, uae_u32 *xredcolors, uae_u32 *xgreencolors, uae_u32 *xbluescolors)
{
dr->end = true;
record_dma_maxvpos = vp;
-
+ dma_record_hoffset = 0;
cycles_toggle = cycles_toggle ? 0 : 1;
}
+void record_dma_reoffset(int vp, int oldhpos, int newhpos)
+{
+ if (!dma_record[0])
+ return;
+
+ int hp = newhpos + dma_record_hoffset;
+ struct dma_rec *dr = &dma_record[dma_record_toggle][vp * NR_DMA_REC_HPOS + hp];
+ dma_record_hoffset -= newhpos - oldhpos;
+#if 0
+ dr->vpos = vp;
+ dr->hpos = oldhpos;
+#endif
+ if (dma_record_hoffset >= NR_DMA_REC_HPOS) {
+ dma_record_hoffset = NR_DMA_REC_HPOS - 1;
+ }
+ if (dma_record_hoffset <= -NR_DMA_REC_HPOS) {
+ dma_record_hoffset = -NR_DMA_REC_HPOS + 1;
+ }
+}
+
void record_dma_hsync(int lasthpos)
{
struct dma_rec *dr;
if (!dma_record[0])
return;
- if (lasthpos >= NR_DMA_REC_HPOS || vpos >= NR_DMA_REC_VPOS)
- return;
+ lasthpos += dma_record_hoffset;
+ if (lasthpos >= NR_DMA_REC_HPOS || vpos >= NR_DMA_REC_VPOS) {
+ dma_record_hoffset = 0;
+ return;
+ }
dr = &dma_record[dma_record_toggle][vpos * NR_DMA_REC_HPOS + lasthpos];
dr->end = true;
+ lasthpos -= dma_record_hoffset;
+ dma_record_hoffset = 0;
if (vpos == 0) {
record_dma_maxhpos = lasthpos;
void record_dma_ipl(int hpos, int vpos)
{
struct dma_rec *dr;
+ int hp = hpos;
if (!dma_record[0])
return;
+ hpos += dma_record_hoffset;
if (hpos >= NR_DMA_REC_HPOS || vpos >= NR_DMA_REC_VPOS)
return;
dr = &dma_record[dma_record_toggle][vpos * NR_DMA_REC_HPOS + hpos];
+ dr->hpos = hp;
+ dr->vpos = vpos;
dr->intlev = regs.intmask;
dr->ipl = regs.ipl_pin;
dr->evt2 |= DMA_EVENT2_IPL;
void record_dma_ipl_sample(int hpos, int vpos)
{
struct dma_rec *dr;
+ int hp = hpos;
if (!dma_record[0])
return;
+ hpos += dma_record_hoffset;
if (hpos >= NR_DMA_REC_HPOS || vpos >= NR_DMA_REC_VPOS)
return;
dr = &dma_record[dma_record_toggle][vpos * NR_DMA_REC_HPOS + hpos];
+ dr->hpos = hp;
+ dr->vpos = vpos;
dr->intlev = regs.intmask;
dr->ipl2 = regs.ipl_pin;
dr->evt2 |= DMA_EVENT2_IPLSAMPLE;
void record_dma_event(uae_u32 evt, int hpos, int vpos)
{
struct dma_rec *dr;
+ int hp = hpos;
if (!dma_record[0])
return;
+ hpos += dma_record_hoffset;
if (hpos >= NR_DMA_REC_HPOS || vpos >= NR_DMA_REC_VPOS)
return;
dr = &dma_record[dma_record_toggle][vpos * NR_DMA_REC_HPOS + hpos];
+ dr->hpos = hp;
+ dr->vpos = vpos;
dr->evt |= evt;
dr->ipl = regs.ipl_pin;
}
void record_dma_event2(uae_u32 evt2, int hpos, int vpos)
{
struct dma_rec *dr;
+ int hp = hpos;
if (!dma_record[0])
return;
+ hpos += dma_record_hoffset;
if (hpos >= NR_DMA_REC_HPOS || vpos >= NR_DMA_REC_VPOS)
return;
dr = &dma_record[dma_record_toggle][vpos * NR_DMA_REC_HPOS + hpos];
+ dr->hpos = hp;
+ dr->vpos = vpos;
dr->evt2 |= evt2;
dr->ipl = regs.ipl_pin;
}
void record_dma_event_data(uae_u32 evt, int hpos, int vpos, uae_u32 data)
{
struct dma_rec *dr;
+ int hp = hpos;
if (!dma_record[0])
return;
+ hpos += dma_record_hoffset;
if (hpos >= NR_DMA_REC_HPOS || vpos >= NR_DMA_REC_VPOS)
return;
dr = &dma_record[dma_record_toggle][vpos * NR_DMA_REC_HPOS + hpos];
+ dr->hpos = hp;
+ dr->vpos = vpos;
dr->evt |= evt;
dr->evtdata = data;
dr->evtdataset = true;
struct dma_rec *dr;
if (!dma_record[0])
return;
+ hpos += dma_record_hoffset;
if (hpos >= NR_DMA_REC_HPOS || vpos >= NR_DMA_REC_VPOS)
return;
dr = &dma_record[dma_record_toggle][vpos * NR_DMA_REC_HPOS + hpos];
void record_dma_write(uae_u16 reg, uae_u32 dat, uae_u32 addr, int hpos, int vpos, int type, int extra)
{
struct dma_rec *dr;
+ int hp = hpos;
if (!dma_record[0]) {
dma_record_init();
}
+ hpos += dma_record_hoffset;
if (hpos >= NR_DMA_REC_HPOS || vpos >= NR_DMA_REC_VPOS)
return;
dma_conflict(vpos, hpos, dr, reg, false);
return;
}
+ dr->hpos = hp;
+ dr->vpos = vpos;
dr->reg = reg;
dr->dat = dat;
dr->addr = addr;
struct dma_rec *last_dma_rec;
void record_dma_read_value_pos(uae_u32 v, int hpos, int vpos)
{
+ hpos += dma_record_hoffset;
struct dma_rec *dr = &dma_record[dma_record_toggle][vpos * NR_DMA_REC_HPOS + hpos];
last_dma_rec = dr;
record_dma_read_value(v);
if (!dma_record[0]) {
return false;
}
+ hpos += dma_record_hoffset;
if (hpos >= NR_DMA_REC_HPOS || vpos >= NR_DMA_REC_VPOS) {
return false;
}
if (!dma_record[0]) {
return;
}
+ hpos += dma_record_hoffset;
if (hpos >= NR_DMA_REC_HPOS || vpos >= NR_DMA_REC_VPOS) {
return;
}
void record_cia_access(int r, int mask, uae_u16 value, bool rw, int hpos, int vpos, int phase)
{
struct dma_rec *dr;
+ int hp = hpos;
dma_record_init();
+ hpos += dma_record_hoffset;
if (hpos >= NR_DMA_REC_HPOS || vpos >= NR_DMA_REC_VPOS)
return;
if (dr->ciaphase < 0) {
return;
}
+ dr->hpos = hp;
+ dr->vpos = vpos;
dr->ciamask = mask;
dr->ciareg = r;
dr->ciavalue = value;
void record_dma_read(uae_u16 reg, uae_u32 addr, int hpos, int vpos, int type, int extra)
{
struct dma_rec *dr;
+ int hp = hpos;
dma_record_init();
+ hpos += dma_record_hoffset;
if (hpos >= NR_DMA_REC_HPOS || vpos >= NR_DMA_REC_VPOS)
return;
}
return;
}
+ dr->hpos = hp;
+ dr->vpos = vpos;
dr->reg = reg;
dr->dat = 0;
dr->addr = addr;
debug_mark_refreshed(dr->addr);
}
-static bool get_record_dma_info(struct dma_rec *dr, int hpos, int vpos, TCHAR *l1, TCHAR *l2, TCHAR *l3, TCHAR *l4, TCHAR *l5, TCHAR *l6, uae_u32 *split, int *iplp)
+static bool get_record_dma_info(struct dma_rec *drs, struct dma_rec *dr, TCHAR *l1, TCHAR *l2, TCHAR *l3, TCHAR *l4, TCHAR *l5, TCHAR *l6, uae_u32 *split, int *iplp)
{
int longsize = dr->size;
bool got = false;
if (l6)
l6[0] = 0;
+ int hpos = dr->hpos;
+ if (hpos < 0) {
+ struct dma_rec *dr2 = dr;
+ int cnt = 0;
+ while (!dr2->end) {
+ if (dr2 == drs) {
+ hpos = dr - drs;
+ break;
+ }
+ if (dr2->hpos >= 0) {
+ hpos = dr2->hpos + cnt;
+ break;
+ }
+ cnt++;
+ dr2--;
+ }
+ }
+ if (hpos < 0) {
+ hpos = 0;
+ }
+
if (split) {
if ((dr->evt & DMA_EVENT_CPUINS) && dr->evtdataset) {
*split = dr->evtdata;
TCHAR l1l[16], l2l[16], l3l[16], l4l[16], l5l[16], l6l[16];
uae_u32 split = 0xffffffff;
- get_record_dma_info(dr, h, vpos, l1l, l2l, l3l, l4l, l5l, l6l, &split, &ipl);
+ get_record_dma_info(dr_start, dr, l1l, l2l, l3l, l4l, l5l, l6l, &split, &ipl);
TCHAR *p = l1 + _tcslen(l1);
_stprintf(p, _T("%11s "), l1l);
} else {
for (;;) {
bool err;
- ignore_ws (c);
- if (!more_params (c))
+ ignore_ws(c);
+ if (!more_params(c))
break;
val = readhex(c, &len, &err);
if (err) {
if (!dma_record[0] || frames < 0 || vp < 0 || hp < 0)
return;
for (;;) {
- struct dma_rec *dr = NULL;
- if (dma_record_frame[0] == frames)
+ struct dma_rec *dr = NULL, *drs = NULL;
+ if (dma_record_frame[0] == frames) {
+ drs = &dma_record[0][vp * NR_DMA_REC_HPOS];
dr = &dma_record[0][vp * NR_DMA_REC_HPOS + hp];
- else if (dma_record_frame[1] == frames)
+ } else if (dma_record_frame[1] == frames) {
+ drs = &dma_record[1][vp * NR_DMA_REC_HPOS];
dr = &dma_record[1][vp * NR_DMA_REC_HPOS + hp];
+ }
if (!dr)
return;
TCHAR l1[16], l2[16], l3[16], l4[16];
- if (get_record_dma_info(dr, hp, vp, l1, l2, l3, l4, NULL, NULL, NULL, NULL)) {
+ if (get_record_dma_info(drs, dr, l1, l2, l3, l4, NULL, NULL, NULL, NULL)) {
TCHAR tmp[256];
- _stprintf(tmp, _T(" - %02d %02X %s"), dr->ipl, hp, l2);
+ _stprintf(tmp, _T(" - %02d %02X %s"), dr->ipl, dr->hpos, l2);
while (_tcslen(tmp) < 18) {
_tcscat(tmp, _T(" "));
}
colors_for_drawing.extra &= ~(1 << (CE_SHRES_DELAY_SHIFT + 1));
colors_for_drawing.extra |= (value & 3) << CE_SHRES_DELAY_SHIFT;
pfield_expand_dp_bplcon();
- } else if (value & COLOR_CHANGE_HSYNC_HACK) {
- hsync_shift_hack = (uae_s8)value;
}
}
}
return hp;
}
-// emulate VPOSHW writes changing cycle counter
-void modify_eventcounter(int diff)
-{
- if (diff == 0) {
- return;
- }
-
- int hpos = current_hpos();
-
- // fake >HTOTAL change delays
- if (hpos + diff >= maxhpos) {
- int dd = maxhpos - (hpos + diff);
- diff += dd;
- } else if (hpos + diff < 0) {
- diff = (maxhpos - (0x100 - hpos + diff));
- }
-
- int cdiff = diff * CYCLE_UNIT;
- if (cdiff < 0) {
- if (currcycle >= cdiff) {
- currcycle -= cdiff;
- } else {
- cdiff = -(int)currcycle;
- currcycle = 0;
- }
- } else {
- currcycle += cdiff;
- }
-
- cia_adjust_eclock_phase(diff);
-
- // adjust all existing timers
- for (int i = 0; i < ev_max; i++) {
- if (i != ev_hsync && i != ev_hsynch) {
- eventtab[i].evtime += cdiff;
- eventtab[i].oldcycles += cdiff;
- }
- }
-
- for (int i = 0; i < ev2_max; i++) {
- eventtab2[i].evtime += cdiff;
- }
-
- events_schedule();
-}
-
void clear_events(void)
{
nextevent = EVT_MAX;
#define RGA_PIPELINE_ADJUST 4
#define MAX_CHIPSETSLOTS 256
-extern uae_u8 cycle_line_slot[MAX_CHIPSETSLOTS + RGA_PIPELINE_ADJUST];
-extern uae_u16 cycle_line_pipe[MAX_CHIPSETSLOTS + RGA_PIPELINE_ADJUST];
+#define MAX_CHIPSETSLOTS_EXTRA 12
+extern uae_u8 cycle_line_slot[MAX_CHIPSETSLOTS + RGA_PIPELINE_ADJUST + MAX_CHIPSETSLOTS_EXTRA];
+extern uae_u16 cycle_line_pipe[MAX_CHIPSETSLOTS + RGA_PIPELINE_ADJUST + MAX_CHIPSETSLOTS_EXTRA];
#define CYCLE_PIPE_CPUSTEAL 0x8000
#define CYCLE_PIPE_NONE 0x4000
struct dma_rec
{
+ int hpos, vpos;
uae_u16 reg;
uae_u64 dat;
uae_u16 size;
extern bool record_dma_check(int hpos, int vpos);
extern void record_dma_hsync(int);
extern void record_dma_vsync(int);
+extern void record_dma_reoffset(int, int, int);
extern void record_cia_access(int r, int mask, uae_u16 value, bool rw, int hpos, int vpos, int phase);
extern void record_dma_ipl(int hpos, int vpos);
extern void record_dma_ipl_sample(int hpos, int vpos);
#define COLOR_CHANGE_BRDBLANK 0x80000000
#define COLOR_CHANGE_SHRES_DELAY 0x40000000
-#define COLOR_CHANGE_HSYNC_HACK 0x20000000
-#define COLOR_CHANGE_BLANK 0x10000000
+#define COLOR_CHANGE_BLANK 0x20000000
#define COLOR_CHANGE_ACTBORDER (COLOR_CHANGE_BLANK | COLOR_CHANGE_BRDBLANK)
#define COLOR_CHANGE_MASK 0xf0000000
struct color_change {
extern void events_schedule(void);
extern void do_cycles_slow(int cycles_to_add);
extern void events_reset_syncline(void);
-extern void modify_eventcounter(int diff);
extern void clear_events(void);
extern bool is_cycle_ce(uaecptr);