out("opcode |= 0x80000;\n");
} else if (g_instr->mnemo == i_CLR) {
if (g_instr->smode < Ad16) {
- out("regflags.cznv = oldflags;\n");
+ out("regflags.cznv = oldflags.cznv;\n");
}
// (an)+ and -(an) is done later
if (g_instr->smode == Aipi || g_instr->smode == Apdi) {
out("regs.irc = dsta >> 16;\n");
}
if (reset_ccr) {
- out("regflags.cznv = oldflags;\n");
+ out("regflags.cznv = oldflags.cznv;\n");
}
if (set_ccr) {
out("ccr_68000_word_move_ae_normal((uae_s16)(src));\n");
genastore_rev("0", curi->smode, "srcreg", curi->size, "src");
}
} else if (cpu_level == 1) {
- out("uae_u16 oldflags = regflags.cznv;\n");
+ out("struct flag_struct oldflags;\n");
+ out("oldflags.cznv = regflags.cznv;\n");
genamode(curi, curi->smode, "srcreg", curi->size, "src", 3, 0, GF_CLR68010);
if (isreg(curi->smode) && curi->size == sz_long) {
addcycles000(2);
if (curi->mnemo == i_MOVE) {
if (cpu_level == 1 && (isreg(curi->smode) || curi->smode == imm)) {
- out("uae_u16 oldflags = regflags.cznv;\n");
+ out("struct flag_struct oldflags;\n");
+ out("oldflags.cznv = regflags.cznv;\n");
}
if (curi->size == sz_long && (using_prefetch || using_ce) && curi->dmode >= Aind) {
// to support bus error exception correct flags, flags needs to be set
#define UAE
#endif
-#if defined(__x86_64__) || defined(_M_AMD64)
+#if defined(__arm__) || defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC)
+#define CPU_arm 1
+#define ARM_ASSEMBLY 1
+#elif defined(__x86_64__) || defined(_M_AMD64)
#define CPU_x86_64 1
#define CPU_64_BIT 1
#define X86_64_ASSEMBLY 1
#define CPU_i386 1
#define X86_ASSEMBLY 1
#define SAHF_SETO_PROFITABLE
-#elif defined(__arm__) || defined(_M_ARM)
-#define CPU_arm 1
#elif defined(__powerpc__) || defined(_M_PPC)
#define CPU_powerpc 1
#else
#ifdef _WIN32
void uae_time_use_rdtsc(bool enable);
uae_s64 read_system_time(void);
+uae_s64 read_processor_time_rdtsc(void);
#endif
typedef uae_time_t frame_time_t;
--- /dev/null
+/*
+ * compiler/codegen_arm.cpp - ARM code generator
+ *
+ * Copyright (c) 2013 Jens Heitmann of ARAnyM dev team (see AUTHORS)
+ *
+ * Inspired by Christian Bauer's Basilisk II
+ *
+ * This file is part of the ARAnyM project which builds a new and powerful
+ * TOS/FreeMiNT compatible virtual machine running on almost any hardware.
+ *
+ * JIT compiler m68k -> ARM
+ *
+ * Original 68040 JIT compiler for UAE, copyright 2000-2002 Bernd Meyer
+ * Adaptation for Basilisk II and improvements, copyright 2000-2004 Gwenole Beauchesne
+ * Portions related to CPU detection come from linux/arch/i386/kernel/setup.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Current state:
+ * - Experimental
+ * - Still optimizable
+ * - Not clock cycle optimized
+ * - as a first step this compiler emulates x86 instruction to be compatible
+ * with gencomp. Better would be a specialized version of gencomp compiling
+ * 68k instructions to ARM compatible instructions. This is a step for the
+ * future
+ *
+ */
+
+#include "flags_arm.h"
+
+// Declare the built-in __clear_cache function.
+extern void __clear_cache (char*, char*);
+
+/*************************************************************************
+ * Some basic information about the the target CPU *
+ *************************************************************************/
+
+#define R0_INDEX 0
+#define R1_INDEX 1
+#define R2_INDEX 2
+#define R3_INDEX 3
+#define R4_INDEX 4
+#define R5_INDEX 5
+#define R6_INDEX 6
+#define R7_INDEX 7
+#define R8_INDEX 8
+#define R9_INDEX 9
+#define R10_INDEX 10
+#define R11_INDEX 11
+#define R12_INDEX 12
+#define R13_INDEX 13
+#define R14_INDEX 14
+#define R15_INDEX 15
+
+#define RSP_INDEX 13
+#define RLR_INDEX 14
+#define RPC_INDEX 15
+
+/* The register in which subroutines return an integer return value */
+#define REG_RESULT R0_INDEX
+
+/* The registers subroutines take their first and second argument in */
+#define REG_PAR1 R0_INDEX
+#define REG_PAR2 R1_INDEX
+
+#define REG_WORK1 R2_INDEX
+#define REG_WORK2 R3_INDEX
+
+//#define REG_DATAPTR R10_INDEX
+
+#define REG_PC_PRE R0_INDEX /* The register we use for preloading regs.pc_p */
+#define REG_PC_TMP R1_INDEX /* Another register that is not the above */
+
+#define SHIFTCOUNT_NREG R1_INDEX /* Register that can be used for shiftcount.
+ -1 if any reg will do. Normally this can be set to -1 but compemu_support is tied to 1 */
+#define MUL_NREG1 R0_INDEX /* %r4 will hold the low 32 bits after a 32x32 mul */
+#define MUL_NREG2 R1_INDEX /* %r5 will hold the high 32 bits */
+
+#define STACK_ALIGN 4
+#define STACK_OFFSET sizeof(void *)
+#define STACK_SHADOW_SPACE 0
+
+uae_s8 always_used[]={2,3,-1};
+uae_s8 can_byte[]={0,1,4,5,6,7,8,9,10,11,12,-1};
+uae_s8 can_word[]={0,1,4,5,6,7,8,9,10,11,12,-1};
+
+uae_u8 call_saved[]={0,0,0,0,1,1,1,1,1,1,1,1,0,1,1,1};
+
+/* This *should* be the same as call_saved. But:
+ - We might not really know which registers are saved, and which aren't,
+ so we need to preserve some, but don't want to rely on everyone else
+ also saving those registers
+ - Special registers (such like the stack pointer) should not be "preserved"
+ by pushing, even though they are "saved" across function calls
+*/
+static const uae_u8 need_to_preserve[]={0,0,0,0,1,1,1,1,1,1,1,1,1,0,0,0};
+static const uae_u32 PRESERVE_MASK = ((1<<R4_INDEX)|(1<<R5_INDEX)|(1<<R6_INDEX)|(1<<R7_INDEX)|(1<<R8_INDEX)|(1<<R9_INDEX)
+ |(1<<R10_INDEX)|(1<<R11_INDEX)|(1<<R12_INDEX));
+
+/* Whether classes of instructions do or don't clobber the native flags */
+#define CLOBBER_MOV
+#define CLOBBER_LEA
+#define CLOBBER_CMOV
+#define CLOBBER_POP
+#define CLOBBER_PUSH
+#define CLOBBER_SUB clobber_flags()
+#define CLOBBER_SBB clobber_flags()
+#define CLOBBER_CMP clobber_flags()
+#define CLOBBER_ADD clobber_flags()
+#define CLOBBER_ADC clobber_flags()
+#define CLOBBER_AND clobber_flags()
+#define CLOBBER_OR clobber_flags()
+#define CLOBBER_XOR clobber_flags()
+
+#define CLOBBER_ROL clobber_flags()
+#define CLOBBER_ROR clobber_flags()
+#define CLOBBER_SHLL clobber_flags()
+#define CLOBBER_SHRL clobber_flags()
+#define CLOBBER_SHRA clobber_flags()
+#define CLOBBER_TEST clobber_flags()
+#define CLOBBER_CL16
+#define CLOBBER_CL8
+#define CLOBBER_SE32
+#define CLOBBER_SE16
+#define CLOBBER_SE8
+#define CLOBBER_ZE32
+#define CLOBBER_ZE16
+#define CLOBBER_ZE8
+#define CLOBBER_SW16
+#define CLOBBER_SW32
+#define CLOBBER_SETCC
+#define CLOBBER_MUL clobber_flags()
+#define CLOBBER_BT clobber_flags()
+#define CLOBBER_BSF clobber_flags()
+
+#include "codegen_arm.h"
+
+#define arm_emit_byte(B) emit_byte(B)
+#define arm_emit_word(W) emit_word(W)
+#define arm_emit_long(L) emit_long(L)
+#define arm_emit_quad(Q) emit_quad(Q)
+#define arm_get_target() get_target()
+#define arm_emit_failure(MSG) jit_fail(MSG, __FILE__, __LINE__, __FUNCTION__)
+
+const bool optimize_imm8 = true;
+
+/*
+ * Helper functions for immediate optimization
+ */
+static inline int isbyte(uae_s32 x)
+{
+ return (x>=-128 && x<=127);
+}
+
+static inline int is8bit(uae_s32 x)
+{
+ return (x>=-255 && x<=255);
+}
+
+static inline int isword(uae_s32 x)
+{
+ return (x>=-32768 && x<=32767);
+}
+
+#define jit_unimplemented(fmt, ...) do{ panicbug("**** Unimplemented ****"); panicbug(fmt, ## __VA_ARGS__); abort(); }while (0)
+
+#if 0 /* currently unused */
+static void jit_fail(const char *msg, const char *file, int line, const char *function)
+{
+ panicbug("JIT failure in function %s from file %s at line %d: %s",
+ function, file, line, msg);
+ abort();
+}
+#endif
+
+LOWFUNC(NONE,WRITE,1,raw_push_l_r,(RR4 r))
+{
+ PUSH(r);
+}
+
+LOWFUNC(NONE,READ,1,raw_pop_l_r,(RR4 r))
+{
+ POP(r);
+}
+
+LOWFUNC(RMW,NONE,2,raw_adc_b,(RW1 d, RR1 s))
+{
+ MVN_ri(REG_WORK1, 0); // mvn r2,#0
+ LSL_rri(REG_WORK2, d, 24); // lsl r3, %[d], #24
+ ORR_rrrLSRi(REG_WORK2, REG_WORK2, REG_WORK1, 8); // orr r3, r3, r2, lsr #8
+ LSL_rri(REG_WORK1, s, 24); // lsl r2, %[s], #24
+
+ ADCS_rrr(REG_WORK2, REG_WORK2, REG_WORK1); // adcs r3, r3, r2
+
+ BIC_rri(d, d, 0xFF); // bic %[d],%[d],#0xFF
+ ORR_rrrLSRi(d, d, REG_WORK2, 24); // orr %[d],%[d], R3 LSR #24
+}
+
+LOWFUNC(RMW,NONE,2,raw_adc_w,(RW2 d, RR2 s))
+{
+ MVN_ri(REG_WORK1, 0); // mvn r2,#0
+ LSL_rri(REG_WORK2, d, 16); // lsl r3, %[d], #16
+ ORR_rrrLSRi(REG_WORK2, REG_WORK2, REG_WORK1, 16); // orr r3, r3, r2, lsr #16
+ LSL_rri(REG_WORK1, s, 16); // lsl r2, %[s], #16
+
+ ADCS_rrr(REG_WORK2, REG_WORK2, REG_WORK1); // adds r3, r3, r2
+#ifdef ARMV6_ASSEMBLY
+ PKHTB_rrrASRi(d,d,REG_WORK2,16);
+#else
+ BIC_rri(d, d, 0xff); // bic %[d],%[d],#0xff
+ BIC_rri(d, d, 0xff00); // bic %[d],%[d],#0xff00
+ ORR_rrrLSRi(d, d, REG_WORK2, 16); // orr %[d], %[d], r3, lsr #16
+#endif
+}
+
+LOWFUNC(RMW,NONE,2,raw_adc_l,(RW4 d, RR4 s))
+{
+ ADCS_rrr(d, d, s); // adcs %[d],%[d],%[s]
+}
+
+LOWFUNC(WRITE,NONE,2,raw_add_b,(RW1 d, RR1 s))
+{
+ LSL_rri(REG_WORK1, s, 24); // lsl r2, %[s], #24
+ LSL_rri(REG_WORK2, d, 24); // lsl r3, %[d], #24
+
+ ADDS_rrr(REG_WORK2, REG_WORK2, REG_WORK1); // adds r3, r3, r2
+
+ BIC_rri(d, d, 0xFF); // bic %[d],%[d],#0xFF
+ ORR_rrrLSRi(d, d, REG_WORK2, 24); // orr %[d],%[d], r3 LSR #24
+}
+
+LOWFUNC(WRITE,NONE,2,raw_add_w,(RW2 d, RR2 s))
+{
+ LSL_rri(REG_WORK1, s, 16); // lsl r2, %[s], #16
+ LSL_rri(REG_WORK2, d, 16); // lsl r3, %[d], #16
+
+ ADDS_rrr(REG_WORK2, REG_WORK2, REG_WORK1); // adds r3, r3, r2
+
+#ifdef ARMV6_ASSEMBLY
+ PKHTB_rrrASRi(d,d,REG_WORK2,16);
+#else
+ BIC_rri(d, d, 0xff); // bic %[d],%[d],#0xff
+ BIC_rri(d, d, 0xff00); // bic %[d],%[d],#0xff00
+ ORR_rrrLSRi(d, d, REG_WORK2, 16); // orr r7, r7, r3, LSR #16
+#endif
+}
+
+LOWFUNC(WRITE,NONE,2,raw_add_l,(RW4 d, RR4 s))
+{
+ ADDS_rrr(d, d, s); // adds %[d], %[d], %[s]
+}
+
+LOWFUNC(WRITE,NONE,2,raw_add_w_ri,(RW2 d, IMM i))
+{
+#if defined(USE_DATA_BUFFER)
+ long offs = data_word_offs(i);
+ LDR_rRI(REG_WORK1, RPC_INDEX, offs); // ldrh r2, [pc, #offs]
+#else
+# ifdef ARMV6_ASSEMBLY
+ LDRH_rRI(REG_WORK1, RPC_INDEX, 24); // ldrh r2, [pc, #24] ; <value>
+# else
+ LDRH_rRI(REG_WORK1, RPC_INDEX, 16); // ldrh r2, [pc, #16] ; <value>
+# endif
+#endif
+ LSL_rri(REG_WORK2, d, 16); // lsl r3, %[d], #16
+ LSL_rri(REG_WORK1, REG_WORK1, 16); // lsl r2, r2, #16
+
+ ADDS_rrr(REG_WORK2, REG_WORK2, REG_WORK1); // adds r3, r3, r2
+
+#ifdef ARMV6_ASSEMBLY
+ PKHTB_rrrASRi(d,d,REG_WORK2,16);
+#else
+ BIC_rri(d, d, 0xff); // bic %[d],%[d],#0xff
+ BIC_rri(d, d, 0xff00); // bic %[d],%[d],#0xff00
+ ORR_rrrLSRi(d, d, REG_WORK2, 16); // orr %[d],%[d], r3, LSR #16
+#endif
+
+#if !defined(USE_DATA_BUFFER)
+ B_i(0); // b <jp>
+
+ //<value>:
+ emit_word(i);
+ skip_word(0);
+ //<jp>:
+#endif
+}
+
+LOWFUNC(WRITE,NONE,2,raw_add_b_ri,(RW1 d, IMM i))
+{
+ LSL_rri(REG_WORK2, d, 24); // lsl r3, %[d], #24
+
+ ADDS_rri(REG_WORK2, REG_WORK2, i << 24); // adds r3, r3, #0x12000000
+
+ BIC_rri(d, d, 0xFF); // bic %[d],%[d], #0xFF
+ ORR_rrrLSRi(d, d, REG_WORK2, 24); // orr %[d],%[d], r3, lsr #24
+}
+
+LOWFUNC(WRITE,NONE,2,raw_add_l_ri,(RW4 d, IMM i))
+{
+#if defined(USE_DATA_BUFFER)
+ long offs = data_long_offs(i);
+ LDR_rRI(REG_WORK1, RPC_INDEX, offs); // ldr r2, [pc, #offs]
+ ADDS_rrr(d, d, REG_WORK1); // adds %[d], %[d], r2
+#else
+ LDR_rRI(REG_WORK1, RPC_INDEX, 4); // ldr r2, [pc, #4] ; <value>
+ ADDS_rrr(d, d, REG_WORK1); // adds %[d], %[d], r2
+ B_i(0); // b <jp>
+
+ //<value>:
+ emit_long(i);
+ //<jp>:
+#endif
+}
+
+LOWFUNC(WRITE,NONE,2,raw_and_b,(RW1 d, RR1 s))
+{
+ MVN_rrLSLi(REG_WORK1, s, 24); // mvn r2, %[s], lsl #24
+ MVN_rrLSRi(REG_WORK1, REG_WORK1, 24); // mvn r2, %[s], lsr #24
+ AND_rrr(d, d, REG_WORK1); // and %[d], %[d], r2
+
+ LSLS_rri(REG_WORK1, d, 24); // lsls r2, %[d], #24
+
+ MRS_CPSR(REG_WORK1); // mrs r2, CPSR
+ BIC_rri(REG_WORK1, REG_WORK1, ARM_CV_FLAGS); // bic r2, r2, #0x30000000
+ MSR_CPSR_r(REG_WORK1); // msr CPSR_fc, r2
+}
+
+LOWFUNC(WRITE,NONE,2,raw_and_w,(RW2 d, RR2 s))
+{
+ MVN_rrLSLi(REG_WORK1, s, 16); // mvn r2, %[s], lsl #16
+ MVN_rrLSRi(REG_WORK1, REG_WORK1, 16); // mvn r2, %[s], lsr #16
+ AND_rrr(d, d, REG_WORK1); // and %[d], %[d], r2
+
+ LSLS_rri(REG_WORK1, d, 16); // lsls r2, %[d], #16
+
+ MRS_CPSR(REG_WORK1); // mrs r2, CPSR
+ BIC_rri(REG_WORK1, REG_WORK1, ARM_CV_FLAGS); // bic r2, r2, #0x30000000
+ MSR_CPSR_r(REG_WORK1); // msr CPSR_fc, r2
+}
+
+LOWFUNC(WRITE,NONE,2,raw_and_l,(RW4 d, RR4 s))
+{
+ ANDS_rrr(d, d, s); // ands r7, r7, r6
+
+ MRS_CPSR(REG_WORK1); // mrs r2, CPSR
+ BIC_rri(REG_WORK1, REG_WORK1, ARM_CV_FLAGS); // bic r2, r2, #0x30000000
+ MSR_CPSR_r(REG_WORK1); // msr CPSR_fc, r2
+}
+
+LOWFUNC(WRITE,NONE,2,raw_and_l_ri,(RW4 d, IMM i))
+{
+#if defined(USE_DATA_BUFFER)
+ long offs = data_long_offs(i);
+ LDR_rRI(REG_WORK1, RPC_INDEX, offs); // ldr r2, [pc, #offs]
+#else
+ LDR_rRI(REG_WORK1, RPC_INDEX, 16); // ldr r2, [pc, #16] ; <value>
+#endif
+ ANDS_rrr(d, d, REG_WORK1); // ands %[d], %[d], r2
+
+ MRS_CPSR(REG_WORK1); // mrs r2, CPSR
+ BIC_rri(REG_WORK1, REG_WORK1, ARM_CV_FLAGS); // bic r2, r2, #0x30000000
+ MSR_CPSR_r(REG_WORK1); // msr CPSR_fc, r2
+
+#if !defined(USE_DATA_BUFFER)
+ B_i(0); // b <jp>
+
+ //<value>:
+ emit_long(i);
+ //<jp>:
+#endif
+}
+
+LOWFUNC(WRITE,NONE,2,raw_bsf_l_rr,(W4 d, RR4 s))
+{
+ MOV_rr(REG_WORK1, s); // mov r2,%[s]
+ RSB_rri(REG_WORK2, REG_WORK1, 0); // rsb r3,r2,#0
+ AND_rrr(REG_WORK1, REG_WORK1, REG_WORK2); // and r2,r2,r3
+ CLZ_rr(REG_WORK2, REG_WORK1); // clz r3,r2
+ MOV_ri(d, 32); // mov %[d],#32
+ SUB_rrr(d, d, REG_WORK2); // sub %[d],%[d],r3
+
+ MRS_CPSR(REG_WORK2); // mrs r3,cpsr
+ TEQ_ri(d, 0); // teq %[d],#0
+ CC_SUBS_rri(NATIVE_CC_NE, d,d,1); // sub %[d],%[d],#1
+ CC_BIC_rri(NATIVE_CC_NE, REG_WORK2, REG_WORK2, ARM_Z_FLAG); // bic r3,r3,#0x40000000
+ CC_ORR_rri(NATIVE_CC_EQ, REG_WORK2, REG_WORK2, ARM_Z_FLAG); // orr r3,r3,#0x40000000
+ MSR_CPSR_r(REG_WORK2); // msr cpsr,r3
+}
+
+LOWFUNC(WRITE,NONE,1,raw_bswap_16,(RW2 r))
+{
+#if defined(ARMV6_ASSEMBLY)
+ REVSH_rr(REG_WORK1,r); // revsh r2,%[r]
+ UXTH_rr(REG_WORK1, REG_WORK1); // utxh r2,r2
+ LSR_rri(r, r, 16);
+ ORR_rrrLSLi(r, REG_WORK1, r, 16); // orr %[r], %[r], r2
+#else
+ MOV_rr(REG_WORK1, r); // mov r2, r6
+ BIC_rri(REG_WORK1, REG_WORK1, 0xff0000); // bic r2, r2, #0xff0000
+ BIC_rri(REG_WORK1, REG_WORK1, 0xff000000); // bic r2, r2, #0xff000000
+
+ EOR_rrr(r, r, REG_WORK1); // eor r6, r6, r2
+
+ ORR_rrrLSRi(r, r, REG_WORK1, 8); // orr r6, r6, r2, lsr #8
+ BIC_rri(REG_WORK1, REG_WORK1, 0xff00); // bic r2, r2, #0xff00
+ ORR_rrrLSLi(r,r,REG_WORK1, 8); // orr r6, r6, r2, lsl #8
+#endif
+}
+
+LOWFUNC(NONE,NONE,1,raw_bswap_32,(RW4 r))
+{
+#if defined(ARMV6_ASSEMBLY)
+ REV_rr(r,r); // rev %[r],%[r]
+#else
+ EOR_rrrRORi(REG_WORK1, r, r, 16); // eor r2, r6, r6, ror #16
+ BIC_rri(REG_WORK1, REG_WORK1, 0xff0000); // bic r2, r2, #0xff0000
+ ROR_rri(r, r, 8); // ror r6, r6, #8
+ EOR_rrrLSRi(r, r, REG_WORK1, 8); // eor r6, r6, r2, lsr #8
+#endif
+}
+
+LOWFUNC(WRITE,NONE,2,raw_bt_l_ri,(RR4 r, IMM i))
+{
+ int imm = (1 << (i & 0x1f));
+
+ MRS_CPSR(REG_WORK2); // mrs r3, CPSR
+ TST_ri(r, imm); // tst r6, #0x1000000
+ CC_BIC_rri(NATIVE_CC_EQ, REG_WORK2, REG_WORK2, ARM_C_FLAG); // bic r3, r3, #0x20000000
+ CC_ORR_rri(NATIVE_CC_NE, REG_WORK2, REG_WORK2, ARM_C_FLAG); // orr r3, r3, #0x20000000
+ MSR_CPSR_r(REG_WORK2); // msr CPSR_fc, r3
+}
+
+LOWFUNC(WRITE,NONE,2,raw_bt_l_rr,(RR4 r, RR4 b))
+{
+ AND_rri(REG_WORK2, b, 0x1f); // and r3, r7, #0x1f
+ LSR_rrr(REG_WORK1, r, REG_WORK2); // lsr r2, r6, r3
+
+ MRS_CPSR(REG_WORK2); // mrs r3, CPSR
+ TST_ri(REG_WORK1, 1); // tst r2, #1
+ CC_ORR_rri(NATIVE_CC_NE, REG_WORK2, REG_WORK2, ARM_C_FLAG); // orr r3, r3, #0x20000000
+ CC_BIC_rri(NATIVE_CC_EQ, REG_WORK2, REG_WORK2, ARM_C_FLAG); // bic r3, r3, #0x20000000
+ MSR_CPSR_r(REG_WORK2); // msr CPSR_fc, r3
+}
+
+LOWFUNC(WRITE,NONE,2,raw_btc_l_rr,(RW4 r, RR4 b))
+{
+ MOV_ri(REG_WORK1, 1); // mov r2, #1
+ AND_rri(REG_WORK2, b, 0x1f); // and r3, r7, #0x1f
+ LSL_rrr(REG_WORK1, REG_WORK1, REG_WORK2); // lsl r2, r2, r3
+
+ MRS_CPSR(REG_WORK2); // mrs r3, CPSR
+ TST_rr(r, REG_WORK1); // tst r6, r2
+ CC_ORR_rri(NATIVE_CC_NE, REG_WORK2, REG_WORK2, ARM_C_FLAG); // orr r3, r3, #0x20000000
+ CC_BIC_rri(NATIVE_CC_EQ, REG_WORK2, REG_WORK2, ARM_C_FLAG); // bic r3, r3, #0x20000000
+ EOR_rrr(r, r, REG_WORK1); // eor r6, r6, r2
+ MSR_CPSR_r(REG_WORK2); // msr CPSR_fc, r3
+}
+
+LOWFUNC(WRITE,NONE,2,raw_btr_l_rr,(RW4 r, RR4 b))
+{
+ MOV_ri(REG_WORK1, 1); // mov r2, #1
+ AND_rri(REG_WORK2, b, 0x1f); // and r3, r7, #0x1f
+ LSL_rrr(REG_WORK1, REG_WORK1, REG_WORK2); // lsl r2, r2, r3
+
+ MRS_CPSR(REG_WORK2); // mrs r3, CPSR
+ TST_rr(r, REG_WORK1); // tst r6, r2
+ CC_ORR_rri(NATIVE_CC_NE, REG_WORK2, REG_WORK2, ARM_C_FLAG); // orr r3, r3, #0x20000000
+ CC_BIC_rri(NATIVE_CC_EQ, REG_WORK2, REG_WORK2, ARM_C_FLAG); // bic r3, r3, #0x20000000
+ BIC_rrr(r, r, REG_WORK1); // bic r6, r6, r2
+ MSR_CPSR_r(REG_WORK2); // msr CPSR_fc, r3
+}
+
+LOWFUNC(WRITE,NONE,2,raw_bts_l_rr,(RW4 r, RR4 b))
+{
+ MOV_ri(REG_WORK1, 1); // mov r2, #1
+ AND_rri(REG_WORK2, b, 0x1f); // and r3, r7, #0x1f
+ LSL_rrr(REG_WORK1, REG_WORK1, REG_WORK2); // lsl r2, r2, r3
+
+ MRS_CPSR(REG_WORK2); // mrs r3, CPSR
+ TST_rr(r, REG_WORK1); // tst r6, r2
+ CC_ORR_rri(NATIVE_CC_NE, REG_WORK2, REG_WORK2, ARM_C_FLAG); // orr r3, r3, #0x20000000
+ CC_BIC_rri(NATIVE_CC_EQ, REG_WORK2, REG_WORK2, ARM_C_FLAG); // bic r3, r3, #0x20000000
+ ORR_rrr(r, r, REG_WORK1); // orr r6, r6, r2
+ MSR_CPSR_r(REG_WORK2); // msr CPSR_fc, r3
+}
+
+LOWFUNC(READ,NONE,3,raw_cmov_l_rr,(RW4 d, RR4 s, IMM cc))
+{
+ switch (cc) {
+ case 9: // LS
+ BEQ_i(0); // beq <set> Z != 0
+ BCC_i(0); // bcc <continue> C == 0
+
+ //<set>:
+ MOV_rr(d, s); // mov r7,r6
+ break;
+
+ case 8: // HI
+ BEQ_i(1); // beq <continue> Z != 0
+ BCS_i(0); // bcs <continue> C != 0
+ MOV_rr(d, s); // mov r7,#0
+ break;
+
+ default:
+ CC_MOV_rr(cc, d, s); // MOVcc R7,#1
+ break;
+ }
+ //<continue>:
+}
+
+LOWFUNC(WRITE,NONE,2,raw_cmp_b,(RR1 d, RR1 s))
+{
+#if defined(ARMV6_ASSEMBLY)
+ SXTB_rr(REG_WORK1, d); // sxtb r2,%[d]
+ SXTB_rr(REG_WORK2, s); // sxtb r3,%[s]
+#else
+ LSL_rri(REG_WORK1, d, 24); // lsl r2,r6,#24
+ LSL_rri(REG_WORK2, s, 24); // lsl r3,r7,#24
+#endif
+ CMP_rr(REG_WORK1, REG_WORK2); // cmp r2, r3
+
+ MRS_CPSR(REG_WORK1); // mrs r2, CPSR
+ EOR_rri(REG_WORK1, REG_WORK1, ARM_C_FLAG); // eor r2, r2, #0x20000000
+ MSR_CPSR_r(REG_WORK1); // msr CPSR_fc, r2
+}
+
+LOWFUNC(WRITE,NONE,2,raw_cmp_w,(RR2 d, RR2 s))
+{
+#if defined(ARMV6_ASSEMBLY)
+ SXTH_rr(REG_WORK1, d); // sxtb r2,%[d]
+ SXTH_rr(REG_WORK2, s); // sxtb r3,%[s]
+#else
+ LSL_rri(REG_WORK1, d, 16); // lsl r6, r1, #16
+ LSL_rri(REG_WORK2, s, 16); // lsl r7, r2, #16
+#endif
+
+ CMP_rr(REG_WORK1, REG_WORK2); // cmp r7, r6, asr #16
+
+ MRS_CPSR(REG_WORK1); // mrs r2, CPSR
+ EOR_rri(REG_WORK1, REG_WORK1, ARM_C_FLAG); // eor r2, r2, #0x20000000
+ MSR_CPSR_r(REG_WORK1); // msr CPSR_fc, r2
+}
+
+LOWFUNC(WRITE,NONE,2,raw_cmp_l,(RR4 d, RR4 s))
+{
+ CMP_rr(d, s); // cmp r7, r6
+
+ MRS_CPSR(REG_WORK1); // mrs r2, CPSR
+ EOR_rri(REG_WORK1, REG_WORK1, ARM_C_FLAG); // eor r2, r2, #0x20000000
+ MSR_CPSR_r(REG_WORK1); // msr CPSR_fc, r2
+}
+
+LOWFUNC(NONE,NONE,2,raw_imul_32_32,(RW4 d, RR4 s))
+{
+ SMULL_rrrr(REG_WORK1, REG_WORK2, d, s); // smull r2,r3,r7,r6
+ MOV_rr(d, REG_WORK1); // mov r7,r2
+}
+
+LOWFUNC(NONE,NONE,2,raw_imul_64_32,(RW4 d, RW4 s))
+{
+ SMULL_rrrr(REG_WORK1, REG_WORK2, d, s); // smull r2,r3,r7,r6
+ MOV_rr(MUL_NREG1, REG_WORK1); // mov r7,r2
+ MOV_rr(MUL_NREG2, REG_WORK2);
+}
+
+LOWFUNC(NONE,NONE,3,raw_lea_l_brr,(W4 d, RR4 s, IMM offset))
+{
+#if defined(USE_DATA_BUFFER)
+ long offs = data_long_offs(offset);
+ LDR_rRI(REG_WORK1, RPC_INDEX, offs); // ldr r2, [pc, #offs]
+ ADD_rrr(d, s, REG_WORK1); // add r7, r6, r2
+#else
+ LDR_rRI(REG_WORK1, RPC_INDEX, 4); // ldr r2, [pc, #4] ; <value>
+ ADD_rrr(d, s, REG_WORK1); // add r7, r6, r2
+ B_i(0); // b <jp>
+
+ //<value>:
+ emit_long(offset);
+ //<jp>:
+#endif
+}
+
+LOWFUNC(NONE,NONE,5,raw_lea_l_brr_indexed,(W4 d, RR4 s, RR4 index, IMM factor, IMM offset))
+{
+ int shft;
+ switch(factor) {
+ case 1: shft=0; break;
+ case 2: shft=1; break;
+ case 4: shft=2; break;
+ case 8: shft=3; break;
+ default: abort();
+ }
+
+#if defined(USE_DATA_BUFFER)
+ long offs = data_long_offs(offset);
+ LDR_rRI(REG_WORK1, RPC_INDEX, offs); // LDR R2,[PC, #offs]
+#else
+ LDR_rRI(REG_WORK1, RPC_INDEX, 8); // LDR R2,[PC, #8]
+#endif
+ ADD_rrr(REG_WORK1, s, REG_WORK1); // ADD R7,R6,R2
+ ADD_rrrLSLi(d, REG_WORK1, index, shft); // ADD R7,R7,R5,LSL #2
+#if !defined(USE_DATA_BUFFER)
+ B_i(0); // B jp
+
+ emit_long(offset);
+ //<jp>;
+#endif
+}
+
+LOWFUNC(NONE,NONE,4,raw_lea_l_rr_indexed,(W4 d, RR4 s, RR4 index, IMM factor))
+{
+ int shft;
+ switch(factor) {
+ case 1: shft=0; break;
+ case 2: shft=1; break;
+ case 4: shft=2; break;
+ case 8: shft=3; break;
+ default: abort();
+ }
+
+ ADD_rrrLSLi(d, s, index, shft); // ADD R7,R6,R5,LSL #2
+}
+
+LOWFUNC(NONE,READ,3,raw_mov_b_brR,(W1 d, RR4 s, IMM offset))
+{
+#if defined(USE_DATA_BUFFER)
+ long offs = data_long_offs(offset);
+ LDR_rRI(REG_WORK1, RPC_INDEX, offs); // ldr r2, [pc, #offs]
+#else
+ LDR_rRI(REG_WORK1, RPC_INDEX, 12); // ldr r2, [pc, #12] ; <value>
+#endif
+ LDRB_rRR(REG_WORK1, REG_WORK1, s); // ldrb r2, [r2, r6]
+
+ BIC_rri(d, d, 0xff); // bic r7, r7, #0xff
+ ORR_rrr(d, d, REG_WORK1); // orr r7, r7, r2
+#if !defined(USE_DATA_BUFFER)
+ B_i(0); // b <jp>
+
+ //<value>:
+ emit_long(offset);
+ //<jp>:
+#endif
+}
+
+LOWFUNC(NONE,WRITE,3,raw_mov_b_bRr,(RR4 d, RR1 s, IMM offset))
+{
+#if defined(USE_DATA_BUFFER)
+ long offs = data_long_offs(offset);
+ LDR_rRI(REG_WORK1, RPC_INDEX, offs); // ldr r2,[pc, #offs]
+ STRB_rRR(s, d, REG_WORK1); // strb r6,[r7, r2]
+#else
+ LDR_rRI(REG_WORK1, RPC_INDEX, 4); // ldr r2,[pc,#4]
+ STRB_rRR(s, d, REG_WORK1); // strb r6,[r7, r2]
+ B_i(0); // b <jp>
+
+ //<value>:
+ emit_long(offset);
+ //<jp>:
+#endif
+}
+
+LOWFUNC(NONE,WRITE,2,raw_mov_b_mi,(MEMW d, IMM s))
+{
+#if defined(USE_DATA_BUFFER)
+ long offs = data_long_offs(d);
+ LDR_rRI(REG_WORK1, RPC_INDEX, offs); // ldr r2, [pc, #offs] ; <d>
+#else
+ LDR_rRI(REG_WORK1, RPC_INDEX, 8); // ldr r2, [pc, #8] ; <d>
+#endif
+ MOV_ri(REG_WORK2, s & 0xFF); // mov r3, #0x34
+ STRB_rR(REG_WORK2, REG_WORK1); // strb r3, [r2]
+#if !defined(USE_DATA_BUFFER)
+ B_i(0); // b <jp>
+
+ //d:
+ emit_long(d);
+
+ //<jp>:
+#endif
+}
+
+LOWFUNC(NONE,WRITE,2,raw_mov_b_mr,(IMM d, RR1 s))
+{
+#if defined(USE_DATA_BUFFER)
+ long offs = data_long_offs(d);
+ LDR_rRI(REG_WORK1, RPC_INDEX, offs); // ldr r2, [pc, #offs]
+ STRB_rR(s, REG_WORK1); // strb r6, [r2]
+#else
+ LDR_rRI(REG_WORK1, RPC_INDEX, 4); // ldr r2, [pc, #4] ; <value>
+ STRB_rR(s, REG_WORK1); // strb r6, [r2]
+ B_i(0); // b <jp>
+
+ //<value>:
+ emit_long(d);
+ //<jp>:
+#endif
+}
+
+LOWFUNC(NONE,NONE,2,raw_mov_b_ri,(W1 d, IMM s))
+{
+ BIC_rri(d, d, 0xff); // bic %[d], %[d], #0xff
+ ORR_rri(d, d, (s & 0xff)); // orr %[d], %[d], #%[s]
+}
+
+LOWFUNC(NONE,READ,2,raw_mov_b_rm,(W1 d, IMM s))
+{
+#if defined(USE_DATA_BUFFER)
+ long offs = data_long_offs(s);
+ LDR_rRI(REG_WORK1, RPC_INDEX, offs); // ldr r2, [pc, #offs]
+#else
+ LDR_rRI(REG_WORK1, RPC_INDEX, 12); // ldr r2, [pc, #12] ; <value>
+#endif
+ LDRB_rR(REG_WORK2, REG_WORK1); // ldrb r2, [r2]
+ BIC_rri(d, d, 0xff); // bic r7, r7, #0xff
+ ORR_rrr(d, REG_WORK2, d); // orr r7, r2, r7
+#if !defined(USE_DATA_BUFFER)
+ B_i(0); // b <jp>
+
+ //<value>:
+ emit_long(s);
+ //<jp>:
+#endif
+}
+
+LOWFUNC(NONE,NONE,2,raw_mov_b_rr,(W1 d, RR1 s))
+{
+ AND_rri(REG_WORK1, s, 0xff); // and r2,r2, #0xff
+ BIC_rri(d, d, 0x0ff); // bic %[d], %[d], #0xff
+ ORR_rrr(d, d, REG_WORK1); // orr %[d], %[d], r2
+}
+
+LOWFUNC(NONE,READ,3,raw_mov_l_brR,(W4 d, RR4 s, IMM offset))
+{
+#if defined(USE_DATA_BUFFER)
+ long offs = data_long_offs(offset);
+ LDR_rRI(REG_WORK1, RPC_INDEX, offs); // ldr r2, [pc, #offs]
+ LDR_rRR(d, REG_WORK1, s); // ldr r7, [r2, r6]
+#else
+ LDR_rRI(REG_WORK1, RPC_INDEX, 4); // ldr r2, [pc, #4] ; <value>
+ LDR_rRR(d, REG_WORK1, s); // ldr r7, [r2, r6]
+
+ B_i(0); // b <jp>
+
+ emit_long(offset); //<value>:
+ //<jp>:
+#endif
+}
+
+LOWFUNC(NONE,WRITE,3,raw_mov_l_bRr,(RR4 d, RR4 s, IMM offset))
+{
+#if defined(USE_DATA_BUFFER)
+ long offs = data_long_offs(offset);
+ LDR_rRI(REG_WORK1, RPC_INDEX, offs); // ldr r2,[pc, #offs]
+ STR_rRR(s, d, REG_WORK1); // str R6,[R7, r2]
+#else
+ LDR_rRI(REG_WORK1, RPC_INDEX, 4); // ldr r2,[pc,#4] ; <value>
+ STR_rRR(s, d, REG_WORK1); // str R6,[R7, r2]
+ B_i(0); // b <jp>
+
+ //<value>:
+ emit_long(offset);
+ //<jp>:
+#endif
+}
+
+LOWFUNC(NONE,WRITE,2,raw_mov_l_mi,(MEMW d, IMM s))
+{
+ // TODO: optimize imm
+
+#if defined(USE_DATA_BUFFER)
+ data_check_end(8, 12);
+ long offs = data_long_offs(d);
+
+ LDR_rRI(REG_WORK1, RPC_INDEX, offs); // ldr r2, [pc, #offs] ; d
+
+ offs = data_long_offs(s);
+ LDR_rRI(REG_WORK2, RPC_INDEX, offs); // ldr r3, [pc, #offs] ; s
+
+ STR_rR(REG_WORK2, REG_WORK1); // str r3, [r2]
+#else
+ LDR_rRI(REG_WORK1, RPC_INDEX, 8); // ldr r2, [pc, #8] ; <value>
+ LDR_rRI(REG_WORK2, RPC_INDEX, 8); // ldr r3, [pc, #8] ; <value2>
+ STR_rR(REG_WORK2, REG_WORK1); // str r3, [r2]
+ B_i(1); // b <jp>
+
+ emit_long(d); //<value>:
+ emit_long(s); //<value2>:
+
+ //<jp>:
+#endif
+}
+
+LOWFUNC(NONE,READ,3,raw_mov_w_brR,(W2 d, RR4 s, IMM offset))
+{
+#if defined(USE_DATA_BUFFER)
+ long offs = data_long_offs(offset);
+ LDR_rRI(REG_WORK1, RPC_INDEX, offs); // ldr r2, [pc, #offs]
+#else
+# ifdef ARMV6_ASSEMBLY
+ LDR_rRI(REG_WORK1, RPC_INDEX, 8); // ldr r2, [pc, #16] ; <value>
+# else
+ LDR_rRI(REG_WORK1, RPC_INDEX, 16); // ldr r2, [pc, #16] ; <value>
+# endif
+#endif
+ LDRH_rRR(REG_WORK1, REG_WORK1, s); // ldrh r2, [r2, r6]
+
+#ifdef ARMV6_ASSEMBLY
+ PKHBT_rrr(d,REG_WORK1,d);
+#else
+ BIC_rri(d, d, 0xff); // bic r7, r7, #0xff
+ BIC_rri(d, d, 0xff00); // bic r7, r7, #0xff00
+ ORR_rrr(d, d, REG_WORK1); // orr r7, r7, r2
+#endif
+
+#if !defined(USE_DATA_BUFFER)
+ B_i(0); // b <jp>
+
+ emit_long(offset); //<value>:
+ //<jp>:
+#endif
+}
+
+LOWFUNC(NONE,WRITE,3,raw_mov_w_bRr,(RR4 d, RR2 s, IMM offset))
+{
+#if defined(USE_DATA_BUFFER)
+ long offs = data_long_offs(offset);
+ LDR_rRI(REG_WORK1, RPC_INDEX, offs); // ldr r2,[pc, #offs]
+ STRH_rRR(s, d, REG_WORK1); // strh r6,[r7, r2]
+#else
+ LDR_rRI(REG_WORK1, RPC_INDEX, 4); // ldr r2,[pc,#4]
+ STRH_rRR(s, d, REG_WORK1); // strh r6,[r7, r2]
+ B_i(0); // b <jp>
+
+ //<value>:
+ emit_long(offset);
+ //<jp>:
+#endif
+}
+
+LOWFUNC(NONE,WRITE,2,raw_mov_w_mr,(IMM d, RR2 s))
+{
+#if defined(USE_DATA_BUFFER)
+ long offs = data_long_offs(d);
+ LDR_rRI(REG_WORK1, RPC_INDEX, offs); // ldr r2, [pc,#offs]
+ STRH_rR(s, REG_WORK1); // strh r3, [r2]
+#else
+ LDR_rRI(REG_WORK1, RPC_INDEX, 4); // ldr r2, [pc, #4] ; <value>
+ STRH_rR(s, REG_WORK1); // strh r3, [r2]
+ B_i(0); // b <jp>
+
+ //<value>:
+ emit_long(d);
+ //<jp>:
+#endif
+}
+
+LOWFUNC(NONE,NONE,2,raw_mov_w_ri,(W2 d, IMM s))
+{
+#if defined(USE_DATA_BUFFER)
+ long offs = data_word_offs(s);
+ LDR_rRI(REG_WORK2, RPC_INDEX, offs); // ldrh r3, [pc, #offs]
+#else
+# ifdef ARMV6_ASSEMBLY
+ LDRH_rRI(REG_WORK2, RPC_INDEX, 12); // ldrh r3, [pc, #12] ; <value>
+# else
+ LDRH_rRI(REG_WORK2, RPC_INDEX, 4); // ldrh r3, [pc, #12] ; <value>
+# endif
+#endif
+
+#ifdef ARMV6_ASSEMBLY
+ PKHBT_rrr(d,REG_WORK2,d);
+#else
+ BIC_rri(REG_WORK1, d, 0xff); // bic r2, r7, #0xff
+ BIC_rri(REG_WORK1, REG_WORK1, 0xff00); // bic r2, r2, #0xff00
+ ORR_rrr(d, REG_WORK2, REG_WORK1); // orr r7, r3, r2
+#endif
+
+#if !defined(USE_DATA_BUFFER)
+ B_i(0); // b <jp>
+
+ //<value>:
+ emit_word(s);
+ skip_word(0);
+ //<jp>:
+#endif
+}
+
+LOWFUNC(NONE,WRITE,2,raw_mov_w_mi,(MEMW d, IMM s))
+{
+ // TODO: optimize imm
+
+#if defined(USE_DATA_BUFFER)
+ data_check_end(8, 12);
+ long offs = data_long_offs(d);
+
+ LDR_rRI(REG_WORK2, RPC_INDEX, offs); // ldr r3, [pc, #offs] ; <mem>
+
+ offs = data_word_offs(s);
+ LDR_rRI(REG_WORK1, RPC_INDEX, offs); // ldr r2, [pc, #offs] ; <imm>
+
+ STRH_rR(REG_WORK1, REG_WORK2); // strh r2, [r3]
+#else
+ LDR_rRI(REG_WORK2, RPC_INDEX, 8); // ldr r3, [pc, #8] ; <mem>
+ LDRH_rRI(REG_WORK1, RPC_INDEX, 8); // ldrh r2, [pc, #8] ; <imm>
+ STRH_rR(REG_WORK1, REG_WORK2); // strh r2, [r3]
+ B_i(1); // b <jp>
+
+ //mem:
+ emit_long(d);
+ //imm:
+ emit_word(s);
+ skip_word(0); // Alignment
+
+ //<jp>:
+#endif
+}
+
+LOWFUNC(NONE,WRITE,2,raw_mov_l_mr,(IMM d, RR4 s))
+{
+#if defined(USE_DATA_BUFFER)
+ long offs = data_long_offs(d);
+ LDR_rRI(REG_WORK1, RPC_INDEX, offs); // ldr r2, [pc, #offs]
+ STR_rR(s, REG_WORK1); // str r3, [r2]
+#else
+ LDR_rRI(REG_WORK1, RPC_INDEX, 4); // ldr r2, [pc, #4] ; <value>
+ STR_rR(s, REG_WORK1); // str r3, [r2]
+ B_i(0); // b <jp>
+
+ //<value>:
+ emit_long(d);
+ //<jp>:
+#endif
+}
+
+LOWFUNC(NONE,WRITE,3,raw_mov_w_Ri,(RR4 d, IMM i, IMM offset))
+{
+ Dif(!isbyte(offset)) abort();
+
+#if defined(USE_DATA_BUFFER)
+ long offs = data_word_offs(i);
+ LDR_rRI(REG_WORK1, RPC_INDEX, offs); // ldr r2, [pc, #offs]
+#else
+ LDRH_rRI(REG_WORK1, RPC_INDEX, 4); // ldrh r2, [pc, #4] ; <value>
+#endif
+ if (offset >= 0)
+ STRH_rRI(REG_WORK1, d, offset); // strh r2, [r7, #0x54]
+ else
+ STRH_rRi(REG_WORK1, d, -offset);// strh r2, [r7, #-0x54]
+#if !defined(USE_DATA_BUFFER)
+ B_i(0); // b <jp>
+
+ //<value>:
+ emit_word(i);
+ skip_word(0);
+ //<jp>:
+#endif
+}
+
+LOWFUNC(NONE,READ,2,raw_mov_w_rm,(W2 d, IMM s))
+{
+#if defined(USE_DATA_BUFFER)
+ long offs = data_long_offs(s);
+ LDR_rRI(REG_WORK1, RPC_INDEX, offs); // ldr r2, [pc, #offs]
+#else
+ LDR_rRI(REG_WORK1, RPC_INDEX, 12); // ldr r2, [pc, #12] ; <value>
+#endif
+ LDRH_rR(REG_WORK1, REG_WORK1); // ldrh r2, [r2]
+ LSR_rri(d, d, 16); // lsr r7, r7, #16
+ ORR_rrrLSLi(d, REG_WORK1, d, 16); // orr r7, r2, r7, lsl #16
+#if !defined(USE_DATA_BUFFER)
+ B_i(0); // b <jp>
+
+ //<value>:
+ emit_long(s);
+ //<jp>:
+#endif
+}
+
+LOWFUNC(NONE,NONE,2,raw_mov_w_rr,(W2 d, RR2 s))
+{
+ LSL_rri(REG_WORK1, s, 16); // lsl r2, r6, #16
+ ORR_rrrLSRi(d, REG_WORK1, d, 16); // orr r7, r2, r7, lsr #16
+ ROR_rri(d, d, 16); // ror r7, r7, #16
+}
+
+LOWFUNC(NONE,READ,3,raw_mov_w_rR,(W2 d, RR4 s, IMM offset))
+{
+ Dif(!isbyte(offset)) abort();
+
+ if (offset >= 0)
+ LDRH_rRI(REG_WORK1, s, offset); // ldrh r2, [r6, #12]
+ else
+ LDRH_rRi(REG_WORK1, s, -offset); // ldrh r2, [r6, #-12]
+
+#ifdef ARMV6_ASSEMBLY
+ PKHBT_rrr(d,REG_WORK1,d);
+#else
+ BIC_rri(d, d, 0xff); // bic r7, r7, #0xff
+ BIC_rri(d, d, 0xff00); // bic r7, r7, #0xff00
+ ORR_rrr(d, d, REG_WORK1); // orr r7, r7, r2
+#endif
+}
+
+LOWFUNC(NONE,WRITE,3,raw_mov_w_Rr,(RR4 d, RR2 s, IMM offset))
+{
+ Dif(!isbyte(offset)) abort();
+
+ if (offset >= 0)
+ STRH_rRI(s, d, offset); // strh r6, [r7, #0x7f]
+ else
+ STRH_rRi(s, d, -offset);// strh r6, [r7, #-0x7f]
+}
+
+LOWFUNC(NONE,READ,2,raw_mov_l_rm,(W4 d, MEMR s))
+{
+#if defined(USE_DATA_BUFFER)
+ long offs = data_long_offs(s);
+ LDR_rRI(REG_WORK1, RPC_INDEX, offs); // ldr r2, [r10, #offs]
+ LDR_rR(d, REG_WORK1); // ldr r7, [r2]
+#else
+ LDR_rRI(REG_WORK1, RPC_INDEX, 4); // ldr r2, [pc, #4] ; <value>
+ LDR_rR(d, REG_WORK1); // ldr r7, [r2]
+ B_i(0); // b <jp>
+
+ emit_long(s); //<value>:
+
+ //<jp>:
+#endif
+}
+
+LOWFUNC(NONE,READ,4,raw_mov_l_rm_indexed,(W4 d, MEMR base, RR4 index, IMM factor))
+{
+ int shft;
+ switch(factor) {
+ case 1: shft=0; break;
+ case 2: shft=1; break;
+ case 4: shft=2; break;
+ case 8: shft=3; break;
+ default: abort();
+ }
+
+#if defined(USE_DATA_BUFFER)
+ long offs = data_long_offs(base);
+ LDR_rRI(REG_WORK1, RPC_INDEX, offs); // ldr r2, [pc, #offs]
+ LDR_rRR_LSLi(d, REG_WORK1, index, shft); // ldr %[d], [r2, %[index], lsl #[shift]]
+#else
+ LDR_rRI(REG_WORK1, RPC_INDEX, 4); // ldr r2, [pc, #4] ; <value>
+ LDR_rRR_LSLi(d, REG_WORK1, index, shft); // ldr %[d], [r2, %[index], lsl #[shift]]
+
+ B_i(0); // b <jp>
+ emit_long(base); //<value>:
+ //<jp>:
+#endif
+}
+
+LOWFUNC(NONE,WRITE,3,raw_mov_l_Ri,(RR4 d, IMM i, IMM offset8))
+{
+ Dif(!isbyte(offset8)) abort();
+
+#if defined(USE_DATA_BUFFER)
+ long offs = data_long_offs(i);
+ LDR_rRI(REG_WORK1, RPC_INDEX, offs); // ldr r2, [pc, #offs]
+#else
+ LDR_rRI(REG_WORK1, RPC_INDEX, 4); // ldr r2, [pc, #4] ; <value>
+#endif
+ if (offset8 >= 0)
+ STR_rRI(REG_WORK1, d, offset8); // str r2, [r7, #0x54]
+ else
+ STR_rRi(REG_WORK1, d, -offset8); // str r2, [r7, #-0x54]
+#if !defined(USE_DATA_BUFFER)
+ B_i(0); // b <jp>
+
+ //<value>:
+ emit_long(i);
+ //<jp>:
+#endif
+}
+
+LOWFUNC(NONE,READ,3,raw_mov_l_rR,(W4 d, RR4 s, IMM offset))
+{
+ Dif(!isbyte(offset)) abort();
+
+ if (offset >= 0) {
+ LDR_rRI(d, s, offset); // ldr r2, [r1, #-12]
+ } else
+ LDR_rRi(d, s, -offset); // ldr r2, [r1, #12]
+}
+
+LOWFUNC(NONE,NONE,2,raw_mov_l_rr,(W4 d, RR4 s))
+{
+ MOV_rr(d, s); // mov %[d], %[s]
+}
+
+LOWFUNC(NONE,WRITE,3,raw_mov_l_Rr,(RR4 d, RR4 s, IMM offset))
+{
+ Dif(!isbyte(offset)) abort();
+
+ if (offset >= 0)
+ STR_rRI(s, d, offset); // str r6, [r7, #12]
+ else
+ STR_rRi(s, d, -offset); // str r6, [r7, #-12]
+}
+
+LOWFUNC(NONE,NONE,2,raw_mul_64_32,(RW4 d, RW4 s))
+{
+ UMULL_rrrr(REG_WORK1, REG_WORK2, d, s); // umull r2,r3,r7,r6
+ MOV_rr(MUL_NREG1, REG_WORK1); // mov r7,r2
+ MOV_rr(MUL_NREG2, REG_WORK2);
+}
+
+LOWFUNC(WRITE,NONE,2,raw_or_b,(RW1 d, RR1 s))
+{
+ AND_rri(REG_WORK1, s, 0xFF); // and r2, %[s], 0xFF
+ ORR_rrr(d, d, REG_WORK1); // orr %[d], %[d], r2
+ LSLS_rri(REG_WORK1, d, 24); // lsls r2, %[d], #24
+
+ MRS_CPSR(REG_WORK1); // mrs r2, CPSR
+ BIC_rri(REG_WORK1, REG_WORK1, ARM_CV_FLAGS); // bic r2, r2, #0x30000000
+ MSR_CPSR_r(REG_WORK1); // msr CPSR_fc, r2
+}
+
+LOWFUNC(WRITE,NONE,2,raw_or_w,(RW2 d, RR2 s))
+{
+#if defined(ARMV6_ASSEMBLY)
+ UXTH_rr(REG_WORK1, s); // UXTH r2, %[s]
+#else
+ BIC_rri(REG_WORK1, s, 0xff000000); // bic r2, %[s], #0xff000000
+ BIC_rri(REG_WORK1, REG_WORK1, 0x00ff0000); // bic r2, r2, #0x00ff0000
+#endif
+ ORR_rrr(d, d, REG_WORK1); // orr %[d], %[d], r2
+ LSLS_rri(REG_WORK1, d, 16); // lsls r2, %[d], #16
+
+ MRS_CPSR(REG_WORK1); // mrs r2, CPSR
+ BIC_rri(REG_WORK1, REG_WORK1, ARM_CV_FLAGS); // bic r2, r2, #0x30000000
+ MSR_CPSR_r(REG_WORK1); // msr CPSR_fc, r2
+}
+
+LOWFUNC(WRITE,NONE,2,raw_or_l,(RW4 d, RR4 s))
+{
+ ORRS_rrr(d, d, s); // orrs r7, r7, r6
+
+ MRS_CPSR(REG_WORK1); // mrs r2, CPSR
+ BIC_rri(REG_WORK1, REG_WORK1, ARM_CV_FLAGS); // bic r2, r2, #0x30000000
+ MSR_CPSR_r(REG_WORK1); // msr CPSR_fc, r2
+}
+
+LOWFUNC(WRITE,NONE,2,raw_or_l_ri,(RW4 d, IMM i))
+{
+#if defined(USE_DATA_BUFFER)
+ long offs = data_long_offs(i);
+ LDR_rRI(REG_WORK1, RPC_INDEX, offs); // LDR r2, [pc, #offs]
+#else
+ LDR_rRI(REG_WORK1, RPC_INDEX, 16); // LDR r2, [pc,#16] ; <value>
+#endif
+ ORRS_rrr(d, d, REG_WORK1); // ORRS r7,r7,r2
+
+ MRS_CPSR(REG_WORK1); // mrs r2, CPSR
+ BIC_rri(REG_WORK1, REG_WORK1, ARM_CV_FLAGS); // bic r2, r2, #0x30000000
+ MSR_CPSR_r(REG_WORK1); // msr CPSR_fc, r2
+
+#if !defined(USE_DATA_BUFFER)
+ B_i(0); // b <jp>
+
+ // value:
+ emit_long(i);
+ //jp:
+#endif
+}
+
+LOWFUNC(WRITE,NONE,2,raw_rol_b_ri,(RW1 r, IMM i))
+{
+ // TODO: Check if the Bittest is necessary. compemu.c seems to do it itself, but meanwhile make sure, that carry is set correctly
+ int imm = 32 - (i & 0x1f);
+
+ MOV_rrLSLi(REG_WORK1, r, 24); // mov r2,r7,lsl #24
+ ORR_rrrLSRi(REG_WORK1, REG_WORK1, REG_WORK1, 16); // orr r2,r2,r2,lsr #16
+ ORR_rrrLSRi(REG_WORK1, REG_WORK1, REG_WORK1, 8); // orr r2,r2,r2,lsr #8
+
+ RORS_rri(REG_WORK1, REG_WORK1, imm); // rors r2,r2,#(32 - (i & 0x1f))
+
+ MRS_CPSR(REG_WORK2); // mrs r3,cpsr
+ TST_ri(REG_WORK1, 1); // tst r2,#1
+ CC_ORR_rri(NATIVE_CC_NE, REG_WORK2, REG_WORK2, ARM_C_FLAG); // orr r3,r3,#0x20000000
+ CC_BIC_rri(NATIVE_CC_EQ, REG_WORK2, REG_WORK2, ARM_C_FLAG); // bic r3,r3,#0x20000000
+ MSR_CPSR_r(REG_WORK2);
+
+ AND_rri(REG_WORK1, REG_WORK1, 0xff); // and r2,r2,#0xff
+ BIC_rri(r, r, 0xff); // bic r7,r7,#0xff
+ ORR_rrr(r, r, REG_WORK1); // orr r7,r7,r2
+}
+
+LOWFUNC(WRITE,NONE,2,raw_rol_b_rr,(RW1 d, RR1 r))
+{
+ // TODO: Check if the Bittest is necessary. compemu.c seems to do it itself, but meanwhile make sure, that carry is set correctly
+
+ MOV_ri(REG_WORK2, 32); // mov r3,#32
+ AND_rri(REG_WORK1, r, 0x1f); // and r2,r6,#0x1f
+ SUB_rrr(REG_WORK2, REG_WORK2, REG_WORK1); // sub r3,r3,r2
+
+ MOV_rrLSLi(REG_WORK1, d, 24); // mov r2,r7,lsl #24
+ ORR_rrrLSRi(REG_WORK1, REG_WORK1, REG_WORK1, 16); // orr r2,r2,r2,lsr #16
+ ORR_rrrLSRi(REG_WORK1, REG_WORK1, REG_WORK1, 8); // orr r2,r2,r2,lsr #8
+
+ RORS_rrr(REG_WORK1, REG_WORK1, REG_WORK2); // rors r2,r2,r3
+
+ MRS_CPSR(REG_WORK2); // mrs r3,cpsr
+ TST_ri(REG_WORK1, 1); // tst r2,#1
+ CC_ORR_rri(NATIVE_CC_NE, REG_WORK2, REG_WORK2, ARM_C_FLAG); // orr r3,r3,#0x20000000
+ CC_BIC_rri(NATIVE_CC_EQ, REG_WORK2, REG_WORK2, ARM_C_FLAG); // bic r3,r3,#0x20000000
+ MSR_CPSR_r(REG_WORK2);
+
+ AND_rri(REG_WORK1, REG_WORK1, 0xff); // and r2,r2,#0xff
+ BIC_rri(d, d, 0xff); // bic r7,r7,#0xff
+
+ ORR_rrr(d, d, REG_WORK1); // orr r7,r7,r2
+}
+
+LOWFUNC(WRITE,NONE,2,raw_rol_w_ri,(RW2 r, IMM i))
+{
+ // TODO: Check if the Bittest is necessary. compemu.c seems to do it itself, but meanwhile make sure, that carry is set correctly
+ int imm = 32 - (i & 0x1f);
+
+ MOV_rrLSLi(REG_WORK1, r, 16); // mov r2,r7,lsl #16
+ ORR_rrrLSRi(REG_WORK1, REG_WORK1, REG_WORK1, 16); // orr r2,r2,r2,lsr #16
+
+ RORS_rri(REG_WORK1, REG_WORK1, imm); // rors r2,r2,#(32 - (i & 0x1f))
+
+ MRS_CPSR(REG_WORK2); // mrs r3,cpsr
+ TST_ri(REG_WORK1, 1); // tst r2,#1
+ CC_ORR_rri(NATIVE_CC_NE, REG_WORK2, REG_WORK2, ARM_C_FLAG); // orr r3,r3,#0x20000000
+ CC_BIC_rri(NATIVE_CC_EQ, REG_WORK2, REG_WORK2, ARM_C_FLAG); // bic r3,r3,#0x20000000
+ MSR_CPSR_r(REG_WORK2);
+
+ BIC_rri(r, r, 0xff00); // bic r2,r2,#0xff00
+ BIC_rri(r, r, 0xff); // bic r2,r2,#0xff
+
+ ORR_rrrLSRi(r, r, REG_WORK1, 16); // orr r7,r7,r2,lsr #16
+}
+
+LOWFUNC(WRITE,NONE,2,raw_rol_w_rr,(RW2 d, RR1 r))
+{
+ // TODO: Check if the Bittest is necessary. compemu.c seems to do it itself, but meanwhile make sure, that carry is set correctly
+
+ MOV_ri(REG_WORK2, 32); // mov r3,#32
+ AND_rri(REG_WORK1, r, 0x1f); // and r2,r6,#0x1f
+ SUB_rrr(REG_WORK2, REG_WORK2, REG_WORK1); // sub r3,r3,r2
+
+ MOV_rrLSLi(REG_WORK1, d, 16); // mov r2,r7,lsl #16
+ ORR_rrrLSRi(REG_WORK1, REG_WORK1, REG_WORK1, 16); // orr r2,r2,r2,lsr #16
+
+ RORS_rrr(REG_WORK1, REG_WORK1, REG_WORK2); // rors r2,r2,r3
+
+ MRS_CPSR(REG_WORK2); // mrs r3,cpsr
+ TST_ri(REG_WORK1, 1); // tst r2,#1
+ CC_ORR_rri(NATIVE_CC_NE, REG_WORK2, REG_WORK2, ARM_C_FLAG); // orr r3,r3,#0x20000000
+ CC_BIC_rri(NATIVE_CC_EQ, REG_WORK2, REG_WORK2, ARM_C_FLAG); // bic r3,r3,#0x20000000
+ MSR_CPSR_r(REG_WORK2);
+
+ BIC_rri(d, d, 0xff00); // bic r2,r2,#0xff00
+ BIC_rri(d, d, 0xff); // bic r2,r2,#0xff
+
+ ORR_rrrLSRi(d, d, REG_WORK1, 16); // orr r2,r2,r7,lsr #16
+}
+
+LOWFUNC(WRITE,NONE,2,raw_rol_l_ri,(RW4 r, IMM i))
+{
+ // TODO: Check if the Bittest is necessary. compemu.c seems to do it itself, but meanwhile make sure, that carry is set correctly
+ int imm = 32 - (i & 0x1f);
+
+ RORS_rri(r, r, imm); // rors r7,r7,#(32 - (i & 0x1f))
+
+ MRS_CPSR(REG_WORK2); // mrs r3,cpsr
+ TST_ri(r, 1); // tst r7,#1
+ CC_ORR_rri(NATIVE_CC_NE, REG_WORK2, REG_WORK2, ARM_C_FLAG); // orr r3,r3,#0x20000000
+ CC_BIC_rri(NATIVE_CC_EQ, REG_WORK2, REG_WORK2, ARM_C_FLAG); // bic r3,r3,#0x20000000
+ MSR_CPSR_r(REG_WORK2);
+}
+
+LOWFUNC(WRITE,NONE,2,raw_ror_l_ri,(RW4 r, IMM i))
+{
+ RORS_rri(r, r, i & 0x1F); // RORS r7,r7,#12
+}
+
+LOWFUNC(WRITE,NONE,2,raw_rol_l_rr,(RW4 d, RR1 r))
+{
+ // TODO: Check if the Bittest is necessary. compemu.c seems to do it itself, but meanwhile make sure, that carry is set correctly
+
+ MOV_ri(REG_WORK1, 32); // mov r2,#32
+ AND_rri(REG_WORK2, r, 0x1f); // and r3,r6,#0x1f
+ SUB_rrr(REG_WORK1, REG_WORK1, REG_WORK2); // sub r2,r2,r3
+
+ RORS_rrr(d, d, REG_WORK1); // rors r7,r7,r2
+
+ MRS_CPSR(REG_WORK2); // mrs r3,cpsr
+ TST_ri(d, 1); // tst r7,#1
+ CC_ORR_rri(NATIVE_CC_NE, REG_WORK2, REG_WORK2, ARM_C_FLAG); // orr r3,r3,#0x20000000
+ CC_BIC_rri(NATIVE_CC_EQ, REG_WORK2, REG_WORK2, ARM_C_FLAG); // bic r3,r3,#0x20000000
+ MSR_CPSR_r(REG_WORK2);
+}
+
+LOWFUNC(WRITE,NONE,2,raw_ror_l_rr,(RW4 d, RR1 r))
+{
+ RORS_rrr(d, d, r); // RORS r7,r7,r6
+}
+
+LOWFUNC(WRITE,NONE,2,raw_ror_b_ri,(RW1 r, IMM i))
+{
+ MOV_rrLSLi(REG_WORK1, r, 24); // mov r2,r7,lsl #24
+ ORR_rrrLSRi(REG_WORK1, REG_WORK1, REG_WORK1, 16); // orr r2,r2,r2,lsr #16
+ ORR_rrrLSRi(REG_WORK1, REG_WORK1, REG_WORK1, 8); // orr r2,r2,r2,lsr #8
+
+ RORS_rri(REG_WORK1, REG_WORK1, i & 0x1f); // rors r2,r2,#12
+
+ AND_rri(REG_WORK1, REG_WORK1, 0xff); // and r2,r2,#0xff
+ BIC_rri(r, r, 0xff); // bic r7,r7,#0xff
+ ORR_rrr(r, r, REG_WORK1); // orr r7,r7,r2
+}
+
+LOWFUNC(WRITE,NONE,2,raw_ror_b_rr,(RW1 d, RR1 r))
+{
+ MOV_rrLSLi(REG_WORK1, d, 24); // mov r2,r7,lsl #24
+ ORR_rrrLSRi(REG_WORK1, REG_WORK1, REG_WORK1, 16); // orr r2,r2,r2,lsr #16
+ ORR_rrrLSRi(REG_WORK1, REG_WORK1, REG_WORK1, 8); // orr r2,r2,r2,lsr #8
+
+ RORS_rrr(REG_WORK1, REG_WORK1, r); // rors r2,r2,r6
+
+ AND_rri(REG_WORK1, REG_WORK1, 0xff); // and r2,r2,#0xff
+ BIC_rri(d, d, 0xff); // bic r7,r7,#0xff
+ ORR_rrr(d, d, REG_WORK1); // orr r7,r7,r2
+}
+
+LOWFUNC(WRITE,NONE,2,raw_ror_w_ri,(RW2 r, IMM i))
+{
+ MOV_rrLSLi(REG_WORK1, r, 16); // mov r2,r7,lsl #16
+ ORR_rrrLSRi(REG_WORK1, REG_WORK1, REG_WORK1, 16); // orr r2,r2,r2,lsr #16
+
+ RORS_rri(REG_WORK1, REG_WORK1, i & 0x1f); // RORS r2,r2,#12
+
+ BIC_rri(r, r, 0xff00); // bic r7,r7,#0xff00
+ BIC_rri(r, r, 0xff); // bic r7,r7,#0xff
+
+ ORR_rrrLSRi(r, r, REG_WORK1, 16); // orr r7,r7,r2,lsr #16
+}
+
+LOWFUNC(WRITE,NONE,2,raw_ror_w_rr,(RW2 d, RR1 r))
+{
+ MOV_rrLSLi(REG_WORK1, d, 16); // mov r2,r7,lsl #16
+ ORR_rrrLSRi(REG_WORK1, REG_WORK1, REG_WORK1, 16); // orr r2,r2,r2,lsr #16
+
+ RORS_rrr(REG_WORK1, REG_WORK1, r); // RORS r2,r2,r6
+
+ BIC_rri(d, d, 0xff00); // bic r7,r7,#0xff00
+ BIC_rri(d, d, 0xff); // bic r7,r7,#0xff
+
+ ORR_rrrLSRi(d, d, REG_WORK1, 16); // orr r7,r7,r2,lsr #16
+}
+
+LOWFUNC(RMW,NONE,2,raw_sbb_b,(RW1 d, RR1 s))
+{
+ MRS_CPSR(REG_WORK1); // mrs r2, CPSR
+ EOR_rri(REG_WORK1, REG_WORK1, ARM_C_FLAG); // eor r2, r2, #0x20000000
+ MSR_CPSR_r(REG_WORK1); // msr CPSR_fc, r2
+
+ LSL_rri(REG_WORK2, d, 24); // lsl r3, %[d], #24
+ LSL_rri(REG_WORK1, s, 24); // lsl r2, r6, #24
+
+ SBCS_rrr(REG_WORK2, REG_WORK2, REG_WORK1); // subs r3, r3, r2
+ BIC_rri(d, d, 0xFF);
+ ORR_rrrLSRi(d, d, REG_WORK2, 24); // orr r7, r7, r3
+
+ MRS_CPSR(REG_WORK1); // mrs r2, CPSR
+ EOR_rri(REG_WORK1, REG_WORK1, ARM_C_FLAG); // eor r2, r2, #0x20000000
+ MSR_CPSR_r(REG_WORK1); // msr CPSR_fc, r2
+}
+
+LOWFUNC(RMW,NONE,2,raw_sbb_l,(RW4 d, RR4 s))
+{
+ MRS_CPSR(REG_WORK1); // mrs r2, CPSR
+ EOR_rri(REG_WORK1, REG_WORK1, ARM_C_FLAG); // eor r2, r2, #0x20000000
+ MSR_CPSR_r(REG_WORK1); // msr CPSR_fc, r2
+
+ SBCS_rrr(d, d, s); // sbcs r7, r7, r6
+
+ MRS_CPSR(REG_WORK1); // mrs r2, CPSR
+ EOR_rri(REG_WORK1, REG_WORK1, ARM_C_FLAG); // eor r2, r2, #0x20000000
+ MSR_CPSR_r(REG_WORK1); // msr CPSR_fc, r2
+}
+
+LOWFUNC(RMW,NONE,2,raw_sbb_w,(RW2 d, RR2 s))
+{
+ MRS_CPSR(REG_WORK1); // mrs r2, CPSR
+ EOR_rri(REG_WORK1, REG_WORK1, ARM_C_FLAG); // eor r2, r2, #0x20000000
+ MSR_CPSR_r(REG_WORK1); // msr CPSR_fc, r2
+
+ LSL_rri(REG_WORK2, d, 16); // lsl r3, %[d], #24
+ LSL_rri(REG_WORK1, s, 16); // lsl r2, r6, #16
+
+ SBCS_rrr(REG_WORK2, REG_WORK2, REG_WORK1); // subs r3, r3, r2
+ BIC_rri(d,d, 0xff);
+ BIC_rri(d,d, 0xff00);
+ ORR_rrrLSRi(d, d, REG_WORK2, 16); // orr r7, r7, r3
+
+ MRS_CPSR(REG_WORK1); // mrs r2, CPSR
+ EOR_rri(REG_WORK1, REG_WORK1, ARM_C_FLAG); // eor r2, r2, #0x20000000
+ MSR_CPSR_r(REG_WORK1); // msr CPSR_fc, r2
+}
+
+LOWFUNC(READ,NONE,2,raw_setcc,(W1 d, IMM cc))
+{
+ switch (cc) {
+ case 9: // LS
+ BEQ_i(0); // beq <doset>
+ BCC_i(1); // bcs <unset>
+
+ MOV_ri(d, 1); // mov r7,#0
+ B_i(0); // b <continue>
+
+ //<unset>:
+ MOV_ri(d, 0); // mov r7,#1
+ break;
+
+ case 8: // HI
+ BEQ_i(2); // beq <unset> Z != 0
+ BCS_i(1); // bcc <doset> C = 0
+
+ //<unset>:
+ MOV_ri(d, 1); // mov r7,#0
+ B_i(0); // b <continue>
+
+ //<doset>:
+ MOV_ri(d, 0); // mov r7,#1
+ break;
+
+ default:
+ CC_MOV_ri(cc, d, 1); // MOVcc R7,#1
+ CC_MOV_ri(cc^1, d, 0); // MOVcc^1 R7,#0
+ break;
+ }
+ //<continue>:
+}
+
+LOWFUNC(READ,WRITE,2,raw_setcc_m,(MEMW d, IMM cc))
+{
+ switch (cc) {
+ case 9: // LS
+ BEQ_i(0); // beq <doset>
+ BCC_i(1); // bcs <doset>
+
+ MOV_ri(REG_WORK1, 1); // mov r2,#0
+ B_i(0); // b <continue>
+
+ //<doset>:
+ MOV_ri(REG_WORK1, 0); // mov r2,#1
+ break;
+
+ case 8: // HI
+ BEQ_i(2); // beq <unset> Z != 0
+ BCS_i(1); // bcc <doset> C = 0
+
+ MOV_ri(REG_WORK1, 1); // mov r2,#0
+ B_i(0); // b <continue>
+
+ //<doset>:
+ MOV_ri(REG_WORK1, 0); // mov r2,#1
+ break;
+
+ default:
+ CC_MOV_ri(cc, REG_WORK1, 1); // MOVcc R2,#1
+ CC_MOV_ri(cc^1, REG_WORK1, 0); // MOVcc^1 R2,#0
+ break;
+ }
+ //<continue>:
+#if defined(USE_DATA_BUFFER)
+ long offs = data_long_offs(d);
+ LDR_rRI(REG_WORK2, RPC_INDEX, offs); // LDR R3,[PC, #offs]
+#else
+ LDR_rRI(REG_WORK2, RPC_INDEX, 4); // LDR R3,[PC, #4]
+#endif
+ STRB_rR(REG_WORK1, REG_WORK2); // STRB R2,[R3]
+#if !defined(USE_DATA_BUFFER)
+ B_i(0); // B <jp>
+
+ emit_long(d);
+ //<jp>:
+#endif
+}
+
+LOWFUNC(WRITE,NONE,2,raw_shll_b_ri,(RW1 r, IMM i))
+{
+ LSL_rri(REG_WORK1, r, 24); // LSL r2,r7,#24
+
+ LSLS_rri(REG_WORK1, REG_WORK1, i & 0x1f); // LSLS r2,r2,#12
+
+ BIC_rri(r, r, 0xff); // BIC r7,r7,0xff
+ ORR_rrrLSRi(r, r, REG_WORK1, 24); // ORR r7,r7,r2,lsr #24
+}
+
+LOWFUNC(WRITE,NONE,2,raw_shll_b_rr,(RW1 d, RR1 r))
+{
+ LSL_rri(REG_WORK1, d, 24); // LSL r2,r7,#24
+ LSLS_rrr(REG_WORK1, REG_WORK1, r); // LSLS r2,r2,r6
+ BIC_rri(d, d, 0xff); // BIC r7,r7,#0xff
+ ORR_rrrLSRi(d, d, REG_WORK1, 24); // ORR r7,r7,r2,lsr #24
+}
+
+LOWFUNC(WRITE,NONE,2,raw_shll_l_ri,(RW4 r, IMM i))
+{
+ LSLS_rri(r,r, i & 0x1f); // lsls r7,r7,#12
+}
+
+LOWFUNC(WRITE,NONE,2,raw_shll_l_rr,(RW4 d, RR1 r))
+{
+ LSLS_rrr(d, d, r);
+}
+
+LOWFUNC(WRITE,NONE,2,raw_shll_w_ri,(RW2 r, IMM i))
+{
+ LSL_rri(REG_WORK1, r, 16); // LSL r2,r7,#16
+ LSLS_rri(REG_WORK1, REG_WORK1, i&0x1f); // LSLS r2,r2,#12
+
+ ORR_rrrLSRi(REG_WORK1, REG_WORK1, r, 16); // ORR r2,r2,r7,lsr #16
+
+ ROR_rri(r, REG_WORK1, 16); // ROR r7,r2,#16
+}
+
+LOWFUNC(WRITE,NONE,2,raw_shll_w_rr,(RW2 d, RR1 r))
+{
+ LSL_rri(REG_WORK1, d, 16); // LSL r2,r7,#16
+ LSLS_rrr(REG_WORK1, REG_WORK1, r); // LSLS r2,r2,r6
+ ORR_rrrLSRi(REG_WORK1, REG_WORK1, d, 16); // ORR r2,r2,r7,lsr #16
+ ROR_rri(d, REG_WORK1, 16); // ROR r7,r2,#16
+}
+
+LOWFUNC(WRITE,NONE,2,raw_shra_b_ri,(RW1 r, IMM i))
+{
+ LSL_rri(REG_WORK1, r, 24); // lsl r2,r7,#24
+ ASR_rri(REG_WORK1, REG_WORK1, 24); // asr r2,r2,#24
+
+ ASRS_rri(REG_WORK1, REG_WORK1, i & 0x1f); // asrs r2,r2,#12
+
+ AND_rri(REG_WORK1, REG_WORK1, 0xff); // and r2,r2,#0xff
+ BIC_rri(r,r, 0xff); // bic r7,r7,#0xff
+ ORR_rrr(r,r,REG_WORK1); // orr r7,r7,r2
+}
+
+LOWFUNC(WRITE,NONE,2,raw_shra_b_rr,(RW1 d, RR1 r))
+{
+ LSL_rri(REG_WORK1, d, 24); // lsl r2,r7,#24
+ ASR_rri(REG_WORK1, REG_WORK1, 24); // asr r2,r2,#24
+
+ ASRS_rrr(REG_WORK1, REG_WORK1, r); // asrs r2,r2,r6
+
+ AND_rri(REG_WORK1, REG_WORK1, 0xff); // and r2,r2,#0xff
+ BIC_rri(d,d, 0xff); // bic r7,r7,#0xff
+
+ ORR_rrr(d,d,REG_WORK1); // orr r7,r7,r2
+}
+
+LOWFUNC(WRITE,NONE,2,raw_shra_w_ri,(RW2 r, IMM i))
+{
+ LSL_rri(REG_WORK1, r, 16); // lsl r2,r7,#16
+ ASR_rri(REG_WORK1, REG_WORK1, 16); // asr r2,r2,#16
+
+ ASRS_rri(REG_WORK1, REG_WORK1, i & 0x1f); // asrs r2,r2,#12
+
+#if defined(ARMV6_ASSEMBLY)
+ UXTH_rr(REG_WORK1, REG_WORK1);
+#else
+ BIC_rri(REG_WORK1, REG_WORK1, 0xff000000);
+ BIC_rri(REG_WORK1, REG_WORK1, 0xff0000);
+#endif
+
+ BIC_rri(r,r,0xff00); // bic r7,r7,#0xff00
+ BIC_rri(r,r,0xff); // bic r7,r7,#0xff
+
+ ORR_rrr(r,r,REG_WORK1); // orr r7,r7,r2
+}
+
+LOWFUNC(WRITE,NONE,2,raw_shra_w_rr,(RW2 d, RR1 r))
+{
+ LSL_rri(REG_WORK1, d, 16); // lsl r2,r7,#16
+ ASR_rri(REG_WORK1, REG_WORK1, 16); // asr r2,r2,#16
+
+ ASRS_rrr(REG_WORK1, REG_WORK1, r); // asrs r2,r2,r6
+
+#if defined(ARMV6_ASSEMBLY)
+ UXTH_rr(REG_WORK1, REG_WORK1);
+#else
+ BIC_rri(REG_WORK1, REG_WORK1, 0xff000000); // bic r2,r2,#0xff000000
+ BIC_rri(REG_WORK1, REG_WORK1, 0xff0000); // bic r2,r2,#0xff0000
+#endif
+
+ BIC_rri(d,d, 0xff00); // bic r7,r7,#0xff00
+ BIC_rri(d,d, 0xff); // bic r7,r7,#0xff
+
+ ORR_rrr(d,d,REG_WORK1); // orr r7,r7,r2
+}
+
+LOWFUNC(WRITE,NONE,2,raw_shra_l_ri,(RW4 r, IMM i))
+{
+ ASRS_rri(r, r, i & 0x1f); // ASRS r7,r7,#12
+}
+
+LOWFUNC(WRITE,NONE,2,raw_shra_l_rr,(RW4 d, RR1 r))
+{
+ ASRS_rrr(d, d, r); // ASRS r7,r7,r6
+}
+
+LOWFUNC(WRITE,NONE,2,raw_shrl_b_ri,(RW1 r, IMM i))
+{
+ AND_rri(REG_WORK1, r, 0xff); // AND r2,r7,#0xFF
+
+ LSRS_rri(REG_WORK1, REG_WORK1, i & 0x1f); // LSRS r2,r2,r6
+
+ BIC_rri(r, r, 0xFF); // BIC r7,r7,#0xff
+ ORR_rrr(r, r, REG_WORK1); // ORR r7,r7,r2
+}
+
+LOWFUNC(WRITE,NONE,2,raw_shrl_b_rr,(RW1 d, RR1 r))
+{
+ AND_rri(REG_WORK1, d, 0xff); // AND r2,r7,#0xFF
+
+ LSRS_rrr(REG_WORK1, REG_WORK1, r); // LSRS r2,r2,r6
+
+ BIC_rri(d, d, 0xFF); // BIC r7,r7,#0xff
+ ORR_rrr(d, d, REG_WORK1); // ORR r7,r7,r2
+}
+
+LOWFUNC(WRITE,NONE,2,raw_shrl_l_ri,(RW4 r, IMM i))
+{
+ LSRS_rri(r, r, i & 0x1f); // LSRS r7,r7,#12
+}
+
+LOWFUNC(WRITE,NONE,2,raw_shrl_w_ri,(RW2 r, IMM i))
+{
+#if defined(ARMV6_ASSEMBLY)
+ UXTH_rr(REG_WORK1, r);
+#else
+ BIC_rri(REG_WORK1, r, 0xff0000); // BIC r2,r7,#0xff0000
+ BIC_rri(REG_WORK1, REG_WORK1, 0xff000000); // BIC r2,r2,#0xff000000
+#endif
+
+ LSRS_rri(REG_WORK1, REG_WORK1, i & 0x1f); // LSRS r2,r2,#12
+
+ BIC_rri(r, r, 0xFF); // BIC r7,r7,#0xff
+ BIC_rri(r, r, 0xFF00); // BIC r7,r7,#0xff00
+ ORR_rrr(r, r, REG_WORK1); // ORR r7,r7,r2
+}
+
+LOWFUNC(WRITE,NONE,2,raw_shrl_w_rr,(RW2 d, RR1 r))
+{
+#if defined(ARMV6_ASSEMBLY)
+ UXTH_rr(REG_WORK1, d);
+#else
+ BIC_rri(REG_WORK1, d, 0xff0000); // BIC r2,r7,#0xff0000
+ BIC_rri(REG_WORK1, REG_WORK1, 0xff000000); // BIC r2,r2,#0xff000000
+#endif
+
+ LSRS_rrr(REG_WORK1, REG_WORK1, r); // LSRS r2,r2,r6
+
+ BIC_rri(d, d, 0xFF); // BIC r7,r7,#0xff
+ BIC_rri(d, d, 0xFF00); // BIC r7,r7,#0xff00
+ ORR_rrr(d, d, REG_WORK1); // ORR r7,r7,r2
+}
+
+LOWFUNC(WRITE,NONE,2,raw_shrl_l_rr,(RW4 d, RR1 r))
+{
+ LSRS_rrr(d, d, r);
+}
+
+LOWFUNC(WRITE,NONE,2,raw_sub_b,(RW1 d, RR1 s))
+{
+ LSL_rri(REG_WORK1, s, 24); // lsl r2, r6, #24
+ LSL_rri(REG_WORK2, d, 24); // lsl r3, r7, #24
+
+ SUBS_rrr(REG_WORK2, REG_WORK2, REG_WORK1); // subs r3, r3, r2
+ BIC_rri(d, d, 0xFF);
+ ORR_rrrLSRi(d, d, REG_WORK2, 24); // orr r7, r7, r3
+
+ MRS_CPSR(REG_WORK1); // mrs r2, CPSR
+ EOR_rri(REG_WORK1, REG_WORK1, ARM_C_FLAG); // eor r2, r2, #0x20000000
+ MSR_CPSR_r(REG_WORK1); // msr CPSR_fc, r2
+}
+
+LOWFUNC(WRITE,NONE,2,raw_sub_b_ri,(RW1 d, IMM i))
+{
+ LSL_rri(REG_WORK2, d, 24); // lsl r3, r7, #24
+
+ SUBS_rri(REG_WORK2, REG_WORK2, i << 24); // subs r3, r3, #0x12000000
+ BIC_rri(d, d, 0xFF); // bic r7, r7, #0xFF
+ ORR_rrrLSRi(d, d, REG_WORK2, 24); // orr r7, r7, r3, lsr #24
+
+ MRS_CPSR(REG_WORK1); // mrs r2, CPSR
+ EOR_rri(REG_WORK1, REG_WORK1, ARM_C_FLAG); // eor r2, r2, #0x20000000
+ MSR_CPSR_r(REG_WORK1); // msr CPSR_fc, r2
+}
+
+LOWFUNC(WRITE,NONE,2,raw_sub_l,(RW4 d, RR4 s))
+{
+ SUBS_rrr(d, d, s); // subs r7, r7, r6
+
+ MRS_CPSR(REG_WORK1); // mrs r2, CPSR
+ EOR_rri(REG_WORK1, REG_WORK1, ARM_C_FLAG); // eor r2, r2, #0x20000000
+ MSR_CPSR_r(REG_WORK1); // msr CPSR_fc, r2
+}
+
+LOWFUNC(WRITE,NONE,2,raw_sub_l_ri,(RW4 d, IMM i))
+{
+#if defined(USE_DATA_BUFFER)
+ long offs = data_long_offs(i);
+ LDR_rRI(REG_WORK1, RPC_INDEX, offs); // ldr r2, [pc, #offs]
+#else
+ LDR_rRI(REG_WORK1, RPC_INDEX, 16); // ldr r2, [pc, #16] ; <value>
+#endif
+ SUBS_rrr(d, d, REG_WORK1); // subs r7, r7, r2
+
+ MRS_CPSR(REG_WORK1); // mrs r2, CPSR
+ EOR_rri(REG_WORK1, REG_WORK1, ARM_C_FLAG); // eor r2, r2, #0x20000000
+ MSR_CPSR_r(REG_WORK1); // msr CPSR_fc, r2
+
+#if !defined(USE_DATA_BUFFER)
+ B_i(0); // b <jp>
+
+ //<value>:
+ emit_long(i);
+ //<jp>:
+#endif
+}
+
+LOWFUNC(WRITE,NONE,2,raw_sub_w,(RW2 d, RR2 s))
+{
+ LSL_rri(REG_WORK1, s, 16); // lsl r2, r6, #16
+ LSL_rri(REG_WORK2, d, 16); // lsl r3, r7, #16
+
+ SUBS_rrr(REG_WORK2, REG_WORK2, REG_WORK1); // subs r3, r3, r2
+ BIC_rri(d, d, 0xff);
+ BIC_rri(d, d, 0xff00);
+ ORR_rrrLSRi(d, d, REG_WORK2, 16); // orr r7, r7, r3
+
+ MRS_CPSR(REG_WORK1); // mrs r2, CPSR
+ EOR_rri(REG_WORK1, REG_WORK1, ARM_C_FLAG); // eor r2, r2, #0x20000000
+ MSR_CPSR_r(REG_WORK1); // msr CPSR_fc, r2
+}
+
+LOWFUNC(WRITE,NONE,2,raw_sub_w_ri,(RW2 d, IMM i))
+{
+ // TODO: optimize_imm
+
+#if defined(USE_DATA_BUFFER)
+ long offs = data_word_offs(i);
+ LDR_rRI(REG_WORK1, RPC_INDEX, offs); // ldr r2, [pc, #offs] ; <value>
+#else
+ LDRH_rRI(REG_WORK1, RPC_INDEX, 36); // ldrh r2, [pc, #36] ; <value>
+#endif
+ LSL_rri(REG_WORK1, REG_WORK1, 16); // lsl r2, r2, #16
+ LSL_rri(REG_WORK2, d, 16); // lsl r3, r6, #16
+
+ SUBS_rrr(REG_WORK2, REG_WORK2, REG_WORK1); // subs r3, r3, r2
+ BIC_rri(d, d, 0xff);
+ BIC_rri(d, d, 0xff00);
+ ORR_rrrLSRi(d, d, REG_WORK2, 16); // orr r6, r3, r6, lsr #16
+
+ MRS_CPSR(REG_WORK1); // mrs r2, CPSR
+ EOR_rri(REG_WORK1, REG_WORK1, ARM_C_FLAG); // eor r2, r2, #0x20000000
+ MSR_CPSR_r(REG_WORK1); // msr CPSR_fc, r2
+
+#if !defined(USE_DATA_BUFFER)
+ B_i(0); // b <jp>
+
+ emit_word(i);
+ skip_word(0); //<value>:
+
+ //<jp>:
+#endif
+}
+
+LOWFUNC(WRITE,NONE,2,raw_test_b_rr,(RR1 d, RR1 s))
+{
+#if defined(ARMV6_ASSEMBLY)
+ SXTB_rr(REG_WORK1, s);
+ SXTB_rr(REG_WORK2, d);
+#else
+ LSL_rri(REG_WORK1, s, 24); // lsl r2, r6, #24
+ LSL_rri(REG_WORK2, d, 24); // lsl r3, r7, #24
+#endif
+
+ TST_rr(REG_WORK2, REG_WORK1); // tst r3, r2
+
+ MRS_CPSR(REG_WORK1); // mrs r2, CPSR
+ BIC_rri(REG_WORK1, REG_WORK1, ARM_CV_FLAGS); // bic r2, r2, #0x30000000
+ MSR_CPSR_r(REG_WORK1); // msr CPSR_fc, r2
+}
+
+LOWFUNC(WRITE,NONE,2,raw_test_l_ri,(RR4 d, IMM i))
+{
+#if defined(USE_DATA_BUFFER)
+ long offs = data_long_offs(i);
+ LDR_rRI(REG_WORK1, RPC_INDEX, offs); // ldr r2, [pc, #offs]
+#else
+ LDR_rRI(REG_WORK1, RPC_INDEX, 16); // ldr r2, [pc, #16] ; <value>
+#endif
+ TST_rr(d, REG_WORK1); // tst r7, r2
+
+ MRS_CPSR(REG_WORK1); // mrs r2, CPSR
+ BIC_rri(REG_WORK1, REG_WORK1, ARM_CV_FLAGS); // bic r2, r2, #0x30000000
+ MSR_CPSR_r(REG_WORK1); // msr CPSR_fc, r2
+
+#if !defined(USE_DATA_BUFFER)
+ B_i(0); // b <jp>
+
+ //<value>:
+ emit_long(i);
+ //<jp>:
+#endif
+}
+
+LOWFUNC(WRITE,NONE,2,raw_test_l_rr,(RR4 d, RR4 s))
+{
+ TST_rr(d, s); // tst r7, r6
+
+ MRS_CPSR(REG_WORK1); // mrs r2, CPSR
+ BIC_rri(REG_WORK1, REG_WORK1, ARM_CV_FLAGS); // bic r2, r2, #0x30000000
+ MSR_CPSR_r(REG_WORK1); // msr CPSR_fc, r2
+}
+
+LOWFUNC(WRITE,NONE,2,raw_test_w_rr,(RR2 d, RR2 s))
+{
+#ifdef ARMV6_ASSEMBLY
+ SXTH_rr(REG_WORK1, s);
+ SXTH_rr(REG_WORK2, d);
+#else
+ LSL_rri(REG_WORK1, s, 16); // lsl r2, r6, #16
+ LSL_rri(REG_WORK2, d, 16); // lsl r3, r7, #16
+#endif
+
+ TST_rr(REG_WORK2, REG_WORK1); // tst r3, r2
+
+ MRS_CPSR(REG_WORK1); // mrs r2, CPSR
+ BIC_rri(REG_WORK1, REG_WORK1, ARM_CV_FLAGS); // bic r2, r2, #0x30000000
+ MSR_CPSR_r(REG_WORK1); // msr CPSR_fc, r2
+}
+
+LOWFUNC(WRITE,NONE,2,raw_xor_b,(RW1 d, RR1 s))
+{
+ AND_rri(REG_WORK1, s, 0xFF); // and r2, %[s], 0xFF
+ EOR_rrr(d, d, REG_WORK1); // eor %[d], %[d], r2
+ LSLS_rri(REG_WORK1, d, 24); // lsls r2, %[d], #24
+
+ MRS_CPSR(REG_WORK1); // mrs r2, CPSR
+ BIC_rri(REG_WORK1, REG_WORK1, ARM_CV_FLAGS); // bic r2, r2, #0x30000000
+ MSR_CPSR_r(REG_WORK1); // msr CPSR_fc, r2
+}
+
+LOWFUNC(WRITE,NONE,2,raw_xor_w,(RW2 d, RR2 s))
+{
+#if defined(ARMV6_ASSEMBLY)
+ UXTH_rr(REG_WORK1, s); // UXTH r2, %[s]
+#else
+ BIC_rri(REG_WORK1, s, 0xff000000); // bic r2, %[s], #0xff000000
+ BIC_rri(REG_WORK1, REG_WORK1, 0x00ff0000); // bic r2, r2, #0x00ff0000
+#endif
+ EOR_rrr(d, d, REG_WORK1); // eor %[d], %[d], r2
+ LSLS_rri(REG_WORK1, d, 16); // lsls r2, %[d], #16
+
+ MRS_CPSR(REG_WORK1); // mrs r2, CPSR
+ BIC_rri(REG_WORK1, REG_WORK1, ARM_CV_FLAGS); // bic r2, r2, #0x30000000
+ MSR_CPSR_r(REG_WORK1); // msr CPSR_fc, r2
+}
+
+LOWFUNC(WRITE,NONE,2,raw_xor_l,(RW4 d, RR4 s))
+{
+ EORS_rrr(d, d, s); // eors r7, r7, r6
+
+ MRS_CPSR(REG_WORK1); // mrs r2, CPSR
+ BIC_rri(REG_WORK1, REG_WORK1, ARM_CV_FLAGS); // bic r2, r2, #0x30000000
+ MSR_CPSR_r(REG_WORK1); // msr CPSR_fc, r2
+}
+
+LOWFUNC(NONE,NONE,2,raw_sign_extend_16_rr,(W4 d, RR2 s))
+{
+#if defined(ARMV6_ASSEMBLY)
+ SXTH_rr(d, s); // sxth %[d],%[s]
+#else
+ LSL_rri(d, s, 16); // lsl r6, r7, #16
+ ASR_rri(d, d, 16); // asr r6, r6, #16
+#endif
+}
+
+LOWFUNC(NONE,NONE,2,raw_sign_extend_8_rr,(W4 d, RR1 s))
+{
+#if defined(ARMV6_ASSEMBLY)
+ SXTB_rr(d, s); // SXTB %[d],%[s]
+#else
+ ROR_rri(d, s, 8); // ror r6, r7, #8
+ ASR_rri(d, d, 24); // asr r6, r6, #24
+#endif
+}
+
+LOWFUNC(NONE,NONE,2,raw_zero_extend_8_rr,(W4 d, RR1 s))
+{
+#if defined(ARMV6_ASSEMBLY)
+ UXTB_rr(d, s); // UXTB %[d], %[s]
+#else
+ ROR_rri(d, s, 8); // ror r2, r1, #8
+ LSR_rri(d, d, 24); // lsr r2, r2, #24
+#endif
+}
+
+LOWFUNC(NONE,NONE,2,raw_zero_extend_16_rr,(W4 d, RR2 s))
+{
+#if defined(ARMV6_ASSEMBLY)
+ UXTH_rr(d, s); // UXTH %[d], %[s]
+#else
+ BIC_rri(d, s, 0xff000000); // bic %[d], %[s], #0xff000000
+ BIC_rri(d, d, 0x00ff0000); // bic %[d], %[d], #0x00ff0000
+#endif
+}
+
+static inline void raw_dec_sp(int off)
+{
+ if (off) {
+ LDR_rRI(REG_WORK1, RPC_INDEX, 4); // ldr r2, [pc, #4] ; <value>
+ SUB_rrr(RSP_INDEX, RSP_INDEX, REG_WORK1); // sub r7, r7, r2
+ B_i(0); // b <jp>
+ //<value>:
+ emit_long(off);
+ }
+}
+
+static inline void raw_inc_sp(int off)
+{
+ if (off) {
+ LDR_rRI(REG_WORK1, RPC_INDEX, 4); // ldr r2, [pc, #4] ; <value>
+ ADD_rrr(RSP_INDEX, RSP_INDEX, REG_WORK1); // sub r7, r7, r2
+ B_i(0); // b <jp>
+ //<value>:
+ emit_long(off);
+ }
+}
+
+static inline void raw_push_regs_to_preserve(void) {
+ PUSH_REGS(PRESERVE_MASK);
+}
+
+static inline void raw_pop_preserved_regs(void) {
+ POP_REGS(PRESERVE_MASK);
+}
+
+// Verify!!!
+/* FLAGX is byte sized, and we *do* write it at that size */
+static inline void raw_load_flagx(uae_u32 t)
+{
+ raw_mov_l_rm(t,(uintptr)live.state[FLAGX].mem);
+}
+
+static inline void raw_flags_evicted(int r)
+{
+ //live.state[FLAGTMP].status=CLEAN;
+ live.state[FLAGTMP].status=INMEM;
+ live.state[FLAGTMP].realreg=-1;
+ /* We just "evicted" FLAGTMP. */
+ if (live.nat[r].nholds!=1) {
+ /* Huh? */
+ abort();
+ }
+ live.nat[r].nholds=0;
+}
+
+static inline void raw_flags_init(void) {
+}
+
+static __inline__ void raw_flags_set_zero(int s, int tmp)
+{
+ raw_mov_l_rr(tmp,s);
+ MRS_CPSR(s);
+ BIC_rri(s,s,ARM_Z_FLAG);
+ AND_rri(tmp,tmp,ARM_Z_FLAG);
+ EOR_rri(tmp,tmp,ARM_Z_FLAG);
+ ORR_rrr(s,s,tmp);
+ MSR_CPSR_r(s);
+}
+
+static inline void raw_flags_to_reg(int r)
+{
+ MRS_CPSR(r);
+ raw_mov_l_mr((uintptr)live.state[FLAGTMP].mem,r);
+ raw_flags_evicted(r);
+}
+
+static inline void raw_reg_to_flags(int r)
+{
+ MSR_CPSR_r(r); // msr CPSR_fc, %r
+}
+
+/* Apparently, there are enough instructions between flag store and
+ flag reload to avoid the partial memory stall */
+static inline void raw_load_flagreg(uae_u32 t)
+{
+ raw_mov_l_rm(t,(uintptr)live.state[FLAGTMP].mem);
+}
+
+/* %eax register is clobbered if target processor doesn't support fucomi */
+#define FFLAG_NREG_CLOBBER_CONDITION !have_cmov
+#define FFLAG_NREG R0_INDEX
+#define FLAG_NREG2 -1
+#define FLAG_NREG1 -1
+#define FLAG_NREG3 -1
+
+static inline void raw_fflags_into_flags(int r)
+{
+ jit_unimplemented("raw_fflags_into_flags %x", r);
+}
+
+static inline void raw_fp_init(void)
+{
+ int i;
+
+ for (i=0;i<N_FREGS;i++)
+ live.spos[i]=-2;
+ live.tos=-1; /* Stack is empty */
+}
+
+// Verify
+static inline void raw_fp_cleanup_drop(void)
+{
+D(panicbug("raw_fp_cleanup_drop"));
+
+ while (live.tos>=1) {
+// emit_byte(0xde);
+// emit_byte(0xd9);
+ live.tos-=2;
+ }
+ while (live.tos>=0) {
+// emit_byte(0xdd);
+// emit_byte(0xd8);
+ live.tos--;
+ }
+ raw_fp_init();
+}
+
+LOWFUNC(NONE,WRITE,2,raw_fmov_mr_drop,(MEMPTRW m, FR r))
+{
+ jit_unimplemented("raw_fmov_mr_drop %x %x", m, r);
+}
+
+LOWFUNC(NONE,WRITE,2,raw_fmov_mr,(MEMPTRW m, FR r))
+{
+ jit_unimplemented("raw_fmov_mr %x %x", m, r);
+}
+
+LOWFUNC(NONE,READ,2,raw_fmov_rm,(FW r, MEMPTRR m))
+{
+ jit_unimplemented("raw_fmov_rm %x %x", r, m);
+}
+
+LOWFUNC(NONE,NONE,2,raw_fmov_rr,(FW d, FR s))
+{
+ jit_unimplemented("raw_fmov_rr %x %x", d, s);
+}
+
+static inline void raw_emit_nop_filler(int nbytes)
+{
+ nbytes >>= 2;
+ while(nbytes--) { NOP(); }
+}
+
+static inline void raw_emit_nop(void)
+{
+ NOP();
+}
+
+#ifdef UAE
+static
+#endif
+void compiler_status() {
+ jit_log("compiled code starts at %p, current at %p (size 0x%x)", compiled_code, current_compile_p, (unsigned int)(current_compile_p - compiled_code));
+}
+
+//
+// ARM doesn't have bsf, but clz is a good alternative instruction for it
+//
+static bool target_check_bsf(void)
+{
+ return false;
+}
+
+static void raw_init_cpu(void)
+{
+ /* Have CMOV support, because ARM support conditions for all instructions */
+ have_cmov = true;
+
+ align_loops = 0;
+ align_jumps = 0;
+
+ raw_flags_init();
+}
+
+//
+// Arm instructions
+//
+LOWFUNC(WRITE,NONE,2,raw_ADD_l_rr,(RW4 d, RR4 s))
+{
+ ADD_rrr(d, d, s);
+}
+
+LOWFUNC(WRITE,NONE,2,raw_ADD_l_rri,(RW4 d, RR4 s, IMM i))
+{
+ ADD_rri(d, s, i);
+}
+
+LOWFUNC(WRITE,NONE,2,raw_SUB_l_rri,(RW4 d, RR4 s, IMM i))
+{
+ SUB_rri(d, s, i);
+}
+
+LOWFUNC(WRITE,NONE,2,raw_AND_b_rr,(RW1 d, RR1 s))
+{
+ MVN_rrLSLi(REG_WORK1, s, 24); // mvn r2, %[s], lsl #24
+ MVN_rrLSRi(REG_WORK1, REG_WORK1, 24); // mvn r2, %[s], lsr #24
+ AND_rrr(d, d, REG_WORK1); // and %[d], %[d], r2
+}
+
+LOWFUNC(WRITE,NONE,2,raw_AND_l_rr,(RW4 d, RR4 s))
+{
+ AND_rrr(d, d, s);
+}
+
+LOWFUNC(WRITE,NONE,2,raw_AND_l_ri,(RW4 d, IMM i))
+{
+ AND_rri(d, d, i);
+}
+
+LOWFUNC(WRITE,NONE,2,raw_AND_w_rr,(RW2 d, RR2 s))
+{
+ MVN_rrLSLi(REG_WORK1, s, 16); // mvn r2, %[s], lsl #16
+ MVN_rrLSRi(REG_WORK1, REG_WORK1, 16); // mvn r2, %[s], lsr #16
+ AND_rrr(d, d, REG_WORK1); // and %[d], %[d], r2
+}
+
+LOWFUNC(WRITE,NONE,2,raw_EOR_b_rr,(RW1 d, RR1 s))
+{
+#if defined(ARMV6_ASSEMBLY)
+ UXTB_rr(REG_WORK1, s); // UXTH r2, %[s]
+#else
+ AND_rri(REG_WORK1, s, 0xFF); // and r2, %[s], 0xFF
+#endif
+ EOR_rrr(d, d, REG_WORK1); // eor %[d], %[d], r2
+}
+
+LOWFUNC(WRITE,NONE,2,raw_EOR_l_rr,(RW4 d, RR4 s))
+{
+ EOR_rrr(d, d, s); // eors r7, r7, r6
+}
+
+LOWFUNC(WRITE,NONE,2,raw_EOR_w_rr,(RW2 d, RR2 s))
+{
+#if defined(ARMV6_ASSEMBLY)
+ UXTH_rr(REG_WORK1, s); // UXTH r2, %[s]
+ EOR_rrr(d, d, REG_WORK1); // eor %[d], %[d], r2
+#else
+ LSL_rri(REG_WORK1, s, 16); // bic r2, %[s], #0xff000000
+ EOR_rrrLSRi(d, d, REG_WORK1, 16); // orr %[d], %[d], r2
+#endif
+}
+
+LOWFUNC(WRITE,NONE,2,raw_LDR_l_ri,(RW4 d, IMM i))
+{
+#if defined(USE_DATA_BUFFER)
+ long offs = data_long_offs(i);
+ LDR_rRI(d, RPC_INDEX, offs); // ldr r2, [pc, #offs]
+#else
+ LDR_rR(d, RPC_INDEX);
+ B_i(0);
+ emit_long(i);
+#endif
+}
+
+LOWFUNC(WRITE,NONE,2,raw_MOV_l_ri8,(RW4 d, IMM i))
+{
+ MOV_ri(d, i);
+}
+
+LOWFUNC(WRITE,NONE,2,raw_ORR_b_rr,(RW1 d, RR1 s))
+{
+#if defined(ARMV6_ASSEMBLY)
+ UXTB_rr(REG_WORK1, s); // UXTH r2, %[s]
+#else
+ AND_rri(REG_WORK1, s, 0xFF); // and r2, %[s], 0xFF
+#endif
+ ORR_rrr(d, d, REG_WORK1); // orr %[d], %[d], r2
+}
+
+LOWFUNC(WRITE,NONE,2,raw_ORR_l_rr,(RW4 d, RR4 s))
+{
+ ORR_rrr(d, d, s);
+}
+
+LOWFUNC(WRITE,NONE,2,raw_ORR_w_rr,(RW2 d, RR2 s))
+{
+#if defined(ARMV6_ASSEMBLY)
+ UXTH_rr(REG_WORK1, s); // UXTH r2, %[s]
+ ORR_rrr(d, d, REG_WORK1); // orr %[d], %[d], r2
+#else
+ LSL_rri(REG_WORK1, s, 16); // bic r2, %[s], #0xff000000
+ ORR_rrrLSRi(d, d, REG_WORK1, 16); // orr %[d], %[d], r2
+#endif
+}
+
+LOWFUNC(WRITE,NONE,2,raw_ROR_l_ri,(RW4 r, IMM i))
+{
+ ROR_rri(r, r, i);
+}
+
+//
+// compuemu_support used raw calls
+//
+LOWFUNC(WRITE,RMW,2,compemu_raw_add_l_mi,(IMM d, IMM s))
+{
+#if defined(USE_DATA_BUFFER)
+ data_check_end(8, 24);
+ long target = data_long(d, 24);
+ long offs = get_data_offset(target);
+
+ LDR_rRI(REG_WORK1, RPC_INDEX, offs); // ldr r2, [pc, #offs] ; d
+ LDR_rR(REG_WORK2, REG_WORK1); // ldr r3, [r2]
+
+ offs = data_long_offs(s);
+ LDR_rRI(REG_WORK1, RPC_INDEX, offs); // ldr r2, [pc, #offs] ; s
+
+ ADD_rrr(REG_WORK2, REG_WORK2, REG_WORK1); // adds r3, r3, r2
+
+ offs = get_data_offset(target);
+ LDR_rRI(REG_WORK1, RPC_INDEX, offs); // ldr r2, [pc, #offs] ; d
+ STR_rR(REG_WORK2, REG_WORK1); // str r3, [r2]
+#else
+ LDR_rRI(REG_WORK1, RPC_INDEX, 20); // ldr r2, [pc, #20] ; <value>
+ LDR_rR(REG_WORK2, REG_WORK1); // ldr r3, [r2]
+
+ LDR_rRI(REG_WORK1, RPC_INDEX, 16); // ldr r2, [pc, #16] ; <value2>
+
+ ADD_rrr(REG_WORK2, REG_WORK2, REG_WORK1); // adds r3, r3, r2
+
+ LDR_rRI(REG_WORK1, RPC_INDEX, 4); // ldr r2, [pc, #4] ; <value>
+ STR_rR(REG_WORK2, REG_WORK1); // str r3, [r2]
+
+ B_i(1); // b <jp>
+
+ //<value>:
+ emit_long(d);
+ //<value2>:
+ emit_long(s);
+ //<jp>:
+#endif
+}
+
+LOWFUNC(WRITE,NONE,2,compemu_raw_and_l_ri,(RW4 d, IMM i))
+{
+#if defined(USE_DATA_BUFFER)
+ long offs = data_long_offs(i);
+ LDR_rRI(REG_WORK1, RPC_INDEX, offs); // ldr r2, [pc, #offs] ; <value>
+ AND_rrr(d, d, REG_WORK1); // ands %[d], %[d], r2
+#else
+ LDR_rRI(REG_WORK1, RPC_INDEX, 4); // ldr r2, [pc, #16] ; <value>
+ AND_rrr(d, d, REG_WORK1); // ands %[d], %[d], r2
+ B_i(0);
+ emit_long(i);
+#endif
+}
+
+LOWFUNC(NONE,NONE,1,compemu_raw_bswap_32,(RW4 r))
+{
+#if defined(ARMV6_ASSEMBLY)
+ REV_rr(r,r); // rev %[r],%[r]
+#else
+ EOR_rrrRORi(REG_WORK1, r, r, 16); // eor r2, r6, r6, ror #16
+ BIC_rri(REG_WORK1, REG_WORK1, 0xff0000); // bic r2, r2, #0xff0000
+ ROR_rri(r, r, 8); // ror r6, r6, #8
+ EOR_rrrLSRi(r, r, REG_WORK1, 8); // eor r6, r6, r2, lsr #8
+#endif
+}
+
+LOWFUNC(WRITE,NONE,2,compemu_raw_bt_l_ri,(RR4 r, IMM i))
+{
+ int imm = (1 << (i & 0x1f));
+
+ MRS_CPSR(REG_WORK2); // mrs r3, CPSR
+ TST_ri(r, imm); // tst r6, #0x1000000
+ CC_BIC_rri(NATIVE_CC_EQ, REG_WORK2, REG_WORK2, ARM_C_FLAG); // bic r3, r3, #0x20000000
+ CC_ORR_rri(NATIVE_CC_NE, REG_WORK2, REG_WORK2, ARM_C_FLAG); // orr r3, r3, #0x20000000
+ MSR_CPSR_r(REG_WORK2); // msr CPSR_fc, r3
+}
+
+LOWFUNC(NONE,READ,5,compemu_raw_cmov_l_rm_indexed,(W4 d, IMM base, RR4 index, IMM factor, IMM cond))
+{
+ int shft;
+ switch(factor) {
+ case 1: shft=0; break;
+ case 2: shft=1; break;
+ case 4: shft=2; break;
+ case 8: shft=3; break;
+ default: abort();
+ }
+
+ switch (cond) {
+ case 9: // LS
+ jit_unimplemented("cmov LS not implemented");
+ abort();
+ case 8: // HI
+ jit_unimplemented("cmov HI not implemented");
+ abort();
+ default:
+#if defined(USE_DATA_BUFFER)
+ long offs = data_long_offs(base);
+ CC_LDR_rRI(cond, REG_WORK1, RPC_INDEX, offs); // ldrcc r2, [pc, #offs] ; <value>
+ CC_LDR_rRR_LSLi(cond, d, REG_WORK1, index, shft); // ldrcc %[d], [r2, %[index], lsl #[shift]]
+#else
+ CC_LDR_rRI(cond, REG_WORK1, RPC_INDEX, 4); // ldrcc r2, [pc, #4] ; <value>
+ CC_LDR_rRR_LSLi(cond, d, REG_WORK1, index, shft); // ldrcc %[d], [r2, %[index], lsl #[shift]]
+ B_i(0); // b <jp>
+#endif
+ break;
+ }
+#if !defined(USE_DATA_BUFFER)
+ emit_long(base); // <value>:
+ //<jp>:
+#endif
+}
+
+LOWFUNC(WRITE,READ,2,compemu_raw_cmp_l_mi,(MEMR d, IMM s))
+{
+#if defined(USE_DATA_BUFFER)
+ data_check_end(8, 16);
+ long offs = data_long_offs(d);
+ LDR_rRI(REG_WORK1, RPC_INDEX, offs); // ldr r2, [pc, #offs] ; d
+ LDR_rR(REG_WORK1, REG_WORK1); // ldr r2, [r2]
+
+ offs = data_long_offs(s);
+ LDR_rRI(REG_WORK2, RPC_INDEX, offs); // ldr r3, [pc, #offs] ; s
+
+ CMP_rr(REG_WORK1, REG_WORK2); // cmp r2, r3
+
+#else
+ LDR_rRI(REG_WORK1, RPC_INDEX, 12); // ldr r2, [pc, #24] ; <value>
+ LDR_rR(REG_WORK1, REG_WORK1); // ldr r2, [r2]
+
+ LDR_rRI(REG_WORK2, RPC_INDEX, 8); // ldr r3, [pc, #20] ; <value2>
+
+ CMP_rr(REG_WORK1, REG_WORK2); // cmp r2, r3
+
+ B_i(1); // b <jp>
+
+ //<value>:
+ emit_long(d);
+ //<value2>:
+ emit_long(s);
+ //<jp>:
+#endif
+}
+
+LOWFUNC(WRITE,READ,2,compemu_raw_cmp_l_mi8,(MEMR d, IMM s))
+{
+#if defined(USE_DATA_BUFFER)
+ long offs = data_long_offs(d);
+ LDR_rRI(REG_WORK1, RPC_INDEX, offs); // ldr r2, [pc, #offs] ; <value>
+#else
+ LDR_rRI(REG_WORK1, RPC_INDEX, 8); // ldr r2, [pc, #8] ; <value>
+#endif
+ LDR_rR(REG_WORK1, REG_WORK1); // ldr r2, [r2]
+
+ CMP_ri(REG_WORK1, s); // cmp r2, r3
+
+#if !defined(USE_DATA_BUFFER)
+ B_i(0); // b <jp>
+
+ //<value>:
+ emit_long(d);
+ //<jp>:
+#endif
+}
+
+LOWFUNC(NONE,NONE,3,compemu_raw_lea_l_brr,(W4 d, RR4 s, IMM offset))
+{
+#if defined(USE_DATA_BUFFER)
+ long offs = data_long_offs(offset);
+ LDR_rRI(REG_WORK1, RPC_INDEX, offs); // ldr r2, [pc, #offs] ; <value>
+ ADD_rrr(d, s, REG_WORK1); // add r7, r6, r2
+#else
+ LDR_rRI(REG_WORK1, RPC_INDEX, 4); // ldr r2, [pc, #4] ; <value>
+ ADD_rrr(d, s, REG_WORK1); // add r7, r6, r2
+ B_i(0); // b <jp>
+
+ //<value>:
+ emit_long(offset);
+ //<jp>:
+#endif
+}
+
+LOWFUNC(NONE,NONE,4,compemu_raw_lea_l_rr_indexed,(W4 d, RR4 s, RR4 index, IMM factor))
+{
+ int shft;
+ switch(factor) {
+ case 1: shft=0; break;
+ case 2: shft=1; break;
+ case 4: shft=2; break;
+ case 8: shft=3; break;
+ default: abort();
+ }
+
+ ADD_rrrLSLi(d, s, index, shft); // ADD R7,R6,R5,LSL #2
+}
+
+LOWFUNC(NONE,WRITE,2,compemu_raw_mov_b_mr,(IMM d, RR1 s))
+{
+#if defined(USE_DATA_BUFFER)
+ long offs = data_long_offs(d);
+ LDR_rRI(REG_WORK1, RPC_INDEX, offs); // ldr r2, [pc, #offs] ; <value>
+ STRB_rR(s, REG_WORK1); // strb r6, [r2]
+#else
+ LDR_rRI(REG_WORK1, RPC_INDEX, 4); // ldr r2, [pc, #4] ; <value>
+ STRB_rR(s, REG_WORK1); // strb r6, [r2]
+ B_i(0); // b <jp>
+
+ //<value>:
+ emit_long(d);
+ //<jp>:
+#endif
+}
+
+LOWFUNC(NONE,WRITE,2,compemu_raw_mov_l_mi,(MEMW d, IMM s))
+{
+ // TODO: optimize imm
+
+#if defined(USE_DATA_BUFFER)
+ data_check_end(8, 12);
+ long offs = data_long_offs(d);
+ LDR_rRI(REG_WORK1, RPC_INDEX, offs); // ldr r2, [pc, #offs] ; d
+ offs = data_long_offs(s);
+ LDR_rRI(REG_WORK2, RPC_INDEX, offs); // ldr r3, [pc, #offs] ; s
+ STR_rR(REG_WORK2, REG_WORK1); // str r3, [r2]
+#else
+ LDR_rRI(REG_WORK1, RPC_INDEX, 8); // ldr r2, [pc, #8] ; <value>
+ LDR_rRI(REG_WORK2, RPC_INDEX, 8); // ldr r3, [pc, #8] ; <value2>
+ STR_rR(REG_WORK2, REG_WORK1); // str r3, [r2]
+ B_i(1); // b <jp>
+
+ emit_long(d); //<value>:
+ emit_long(s); //<value2>:
+
+ //<jp>:
+#endif
+}
+
+LOWFUNC(NONE,WRITE,2,compemu_raw_mov_l_mr,(IMM d, RR4 s))
+{
+#if defined(USE_DATA_BUFFER)
+ long offs = data_long_offs(d);
+ LDR_rRI(REG_WORK1, RPC_INDEX, offs); // ldr r2, [pc, #offs] ; <value>
+ STR_rR(s, REG_WORK1); // str r3, [r2]
+#else
+ LDR_rRI(REG_WORK1, RPC_INDEX, 4); // ldr r2, [pc, #4] ; <value>
+ STR_rR(s, REG_WORK1); // str r3, [r2]
+ B_i(0); // b <jp>
+
+ //<value>:
+ emit_long(d);
+ //<jp>:
+#endif
+}
+
+LOWFUNC(NONE,NONE,2,compemu_raw_mov_l_ri,(W4 d, IMM s))
+{
+#if defined(USE_DATA_BUFFER)
+ long offs = data_long_offs(s);
+ LDR_rRI(d, RPC_INDEX, offs); // ldr %[d], [pc, #offs] ; <value>
+#else
+ LDR_rR(d, RPC_INDEX); // ldr %[d], [pc] ; <value>
+ B_i(0); // b <jp>
+
+ //<value>:
+ emit_long(s);
+ //<jp>:
+#endif
+}
+
+LOWFUNC(NONE,READ,2,compemu_raw_mov_l_rm,(W4 d, MEMR s))
+{
+#if defined(USE_DATA_BUFFER)
+ long offs = data_long_offs(s);
+ LDR_rRI(REG_WORK1, RPC_INDEX, offs); // ldr r2, [pc, #offs] ; <value>
+ LDR_rR(d, REG_WORK1); // ldr r7, [r2]
+#else
+ LDR_rRI(REG_WORK1, RPC_INDEX, 4); // ldr r2, [pc, #4] ; <value>
+ LDR_rR(d, REG_WORK1); // ldr r7, [r2]
+ B_i(0); // b <jp>
+
+ emit_long(s); //<value>:
+ //<jp>:
+#endif
+}
+
+LOWFUNC(NONE,NONE,2,compemu_raw_mov_l_rr,(W4 d, RR4 s))
+{
+ MOV_rr(d, s); // mov %[d], %[s]
+}
+
+LOWFUNC(NONE,WRITE,2,compemu_raw_mov_w_mr,(IMM d, RR2 s))
+{
+#if defined(USE_DATA_BUFFER)
+ long offs = data_long_offs(d);
+ LDR_rRI(REG_WORK1, RPC_INDEX, offs); // ldr r2, [pc, #offs] ; <value>
+ STRH_rR(s, REG_WORK1); // strh r3, [r2]
+#else
+ LDR_rRI(REG_WORK1, RPC_INDEX, 4); // ldr r2, [pc, #4] ; <value>
+ STRH_rR(s, REG_WORK1); // strh r3, [r2]
+ B_i(0); // b <jp>
+
+ //<value>:
+ emit_long(d);
+ //<jp>:
+#endif
+}
+
+LOWFUNC(WRITE,RMW,2,compemu_raw_sub_l_mi,(MEMRW d, IMM s))
+{
+#if defined(USE_DATA_BUFFER)
+ data_check_end(8, 24);
+ long target = data_long(d, 24);
+ long offs = get_data_offset(target);
+ LDR_rRI(REG_WORK1, RPC_INDEX, offs); // ldr r2, [pc, #offs] ; d
+ LDR_rR(REG_WORK2, REG_WORK1); // ldr r3, [r2]
+
+ offs = data_long_offs(s);
+ LDR_rRI(REG_WORK1, RPC_INDEX, offs); // ldr r2, [pc, #offs] ; s
+
+ SUBS_rrr(REG_WORK2, REG_WORK2, REG_WORK1); // subs r3, r3, r2
+
+ offs = get_data_offset(target);
+ LDR_rRI(REG_WORK1, RPC_INDEX, offs); // ldr r2, [pc, #offs] ; d
+ STR_rR(REG_WORK2, REG_WORK1); // str r3, [r2]
+#else
+ LDR_rRI(REG_WORK1, RPC_INDEX, 20); // ldr r2, [pc, #32] ; <value>
+ LDR_rR(REG_WORK2, REG_WORK1); // ldr r3, [r2]
+
+ LDR_rRI(REG_WORK1, RPC_INDEX, 16); // ldr r2, [pc, #28] ; <value2>
+
+ SUBS_rrr(REG_WORK2, REG_WORK2, REG_WORK1); // subs r3, r3, r2
+
+ LDR_rRI(REG_WORK1, RPC_INDEX, 4); // ldr r2, [pc, #16] ; <value>
+ STR_rR(REG_WORK2, REG_WORK1); // str r3, [r2]
+
+ B_i(1); // b <jp>
+
+ //<value>:
+ emit_long(d);
+ //<value2>:
+ emit_long(s);
+ //<jp>:
+#endif
+}
+
+LOWFUNC(WRITE,NONE,2,compemu_raw_test_l_rr,(RR4 d, RR4 s))
+{
+ TST_rr(d, s); // tst r7, r6
+}
+
+LOWFUNC(NONE,NONE,2,compemu_raw_zero_extend_16_rr,(W4 d, RR2 s))
+{
+#if defined(ARMV6_ASSEMBLY)
+ UXTH_rr(d, s); // UXTH %[d], %[s]
+#else
+ BIC_rri(d, s, 0xff000000); // bic %[d], %[s], #0xff000000
+ BIC_rri(d, d, 0x00ff0000); // bic %[d], %[d], #0x00ff0000
+#endif
+}
+
+static inline void compemu_raw_call(uae_u32 t)
+{
+#if defined(USE_DATA_BUFFER)
+ long offs = data_long_offs(t);
+ LDR_rRI(REG_WORK1, RPC_INDEX, offs); // ldr r2, [pc, #offs] ; <value>
+#else
+ LDR_rRI(REG_WORK1, RPC_INDEX, 12); // ldr r2, [pc, #12] ; <value>
+#endif
+ PUSH(RLR_INDEX); // push {lr}
+ BLX_r(REG_WORK1); // blx r2
+ POP(RLR_INDEX); // pop {lr}
+#if !defined(USE_DATA_BUFFER)
+ B_i(0); // b <jp>
+
+ //<value>:
+ emit_long(t);
+ //<jp>:
+#endif
+}
+
+#if defined(UAE)
+static inline void compemu_raw_call_r(RR4 r)
+{
+ PUSH(RLR_INDEX); // push {lr}
+ BLX_r(r); // blx r0
+ POP(RLR_INDEX); // pop {lr}
+}
+#endif
+
+static inline void compemu_raw_jcc_l_oponly(int cc)
+{
+ switch (cc) {
+ case 9: // LS
+ BEQ_i(0); // beq <dojmp>
+ BCC_i(2); // bcc <jp>
+
+ //<dojmp>:
+ LDR_rR(REG_WORK1, RPC_INDEX); // ldr r2, [pc] ; <value>
+ BX_r(REG_WORK1); // bx r2
+ break;
+
+ case 8: // HI
+ BEQ_i(3); // beq <jp>
+ BCS_i(2); // bcs <jp>
+
+ //<dojmp>:
+ LDR_rR(REG_WORK1, RPC_INDEX); // ldr r2, [pc] ; <value>
+ BX_r(REG_WORK1); // bx r2
+ break;
+
+ default:
+ CC_LDR_rRI(cc, REG_WORK1, RPC_INDEX, 4); // ldrlt r2, [pc, #4] ; <value>
+ CC_BX_r(cc, REG_WORK1); // bxlt r2
+ B_i(0); // b <jp>
+ break;
+ }
+ // emit of target will be done by caller
+}
+
+static inline void compemu_raw_jl(uae_u32 t)
+{
+#if defined(USE_DATA_BUFFER)
+ long offs = data_long_offs(t);
+ CC_LDR_rRI(NATIVE_CC_LT, RPC_INDEX, RPC_INDEX, offs); // ldrlt pc, [pc, offs]
+#else
+ CC_LDR_rR(NATIVE_CC_LT, RPC_INDEX, RPC_INDEX); // ldrlt pc, [pc]
+ B_i(0); // b <jp>
+
+ //<value>:
+ emit_long(t);
+ //<jp>:
+#endif
+}
+
+static inline void compemu_raw_jmp(uae_u32 t)
+{
+ LDR_rR(REG_WORK1, RPC_INDEX); // ldr r2, [pc]
+ BX_r(REG_WORK1); // bx r2
+ emit_long(t);
+}
+
+static inline void compemu_raw_jmp_m_indexed(uae_u32 base, uae_u32 r, uae_u32 m)
+{
+ int shft;
+ switch(m) {
+ case 1: shft=0; break;
+ case 2: shft=1; break;
+ case 4: shft=2; break;
+ case 8: shft=3; break;
+ default: abort();
+ }
+
+ LDR_rR(REG_WORK1, RPC_INDEX); // ldr r2, [pc] ; <value>
+ LDR_rRR_LSLi(RPC_INDEX, REG_WORK1, r, shft); // ldr pc, [r2, r6, lsl #3]
+ emit_long(base);
+}
+
+static inline void compemu_raw_jmp_r(RR4 r)
+{
+ BX_r(r);
+}
+
+static inline void compemu_raw_jnz(uae_u32 t)
+{
+#if defined(USE_DATA_BUFFER)
+ long offs = data_long_offs(t);
+ CC_LDR_rRI(NATIVE_CC_NE, RPC_INDEX, RPC_INDEX, offs); // ldrne pc, [pc, offs]
+#else
+ CC_LDR_rR(NATIVE_CC_NE, RPC_INDEX, RPC_INDEX); // ldrne pc, [pc]
+ B_i(0); // b <jp>
+
+ emit_long(t);
+ //<jp>:
+#endif
+}
+
+static inline void compemu_raw_jz_b_oponly(void)
+{
+ BNE_i(2); // bne jp
+ LDRSB_rRI(REG_WORK1, RPC_INDEX, 3); // ldrsb r2,[pc,#3]
+ ADD_rrr(RPC_INDEX, RPC_INDEX, REG_WORK1); // add pc,pc,r2
+
+ skip_n_bytes(3); /* additionally 1 byte skipped by generic code */
+
+ // <jp:>
+}
+
+static inline void compemu_raw_jnz_b_oponly(void)
+{
+ BEQ_i(2); // beq jp
+ LDRSB_rRI(REG_WORK1, RPC_INDEX, 3); // ldrsb r2,[pc,#3]
+ ADD_rrr(RPC_INDEX, RPC_INDEX, REG_WORK1); // add pc,pc,r2
+
+ skip_n_bytes(3); /* additionally 1 byte skipped by generic code */
+
+ // <jp:>
+}
+
+static inline void compemu_raw_branch(IMM d)
+{
+ B_i((d >> 2) - 1);
+}
--- /dev/null
+/*
+ * compiler/codegen_arm.h - IA-32 and AMD64 code generator
+ *
+ * Copyright (c) 2013 Jens Heitmann of ARAnyM dev team (see AUTHORS)
+ *
+ * Inspired by Christian Bauer's Basilisk II
+ *
+ * This file is part of the ARAnyM project which builds a new and powerful
+ * TOS/FreeMiNT compatible virtual machine running on almost any hardware.
+ *
+ * JIT compiler m68k -> ARM
+ *
+ * Original 68040 JIT compiler for UAE, copyright 2000-2002 Bernd Meyer
+ * This file is derived from CCG, copyright 1999-2003 Ian Piumarta
+ * Adaptation for Basilisk II and improvements, copyright 2000-2004 Gwenole Beauchesne
+ * Portions related to CPU detection come from linux/arch/i386/kernel/setup.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef ARM_RTASM_H
+#define ARM_RTASM_H
+
+/* NOTES
+ *
+ */
+
+/* --- Configuration ------------------------------------------------------- */
+
+/* CPSR flags */
+
+#define ARM_N_FLAG 0x80000000
+#define ARM_Z_FLAG 0x40000000
+#define ARM_C_FLAG 0x20000000
+#define ARM_V_FLAG 0x10000000
+#define ARM_Q_FLAG 0x08000000
+#define ARM_CV_FLAGS (ARM_C_FLAG|ARM_V_FLAG)
+
+#define ARM_GE3 0x00080000
+#define ARM_GE2 0x00040000
+#define ARM_GE1 0x00020000
+#define ARM_GE0 0x00010000
+
+/* --- Macros -------------------------------------------------------------- */
+
+/* ========================================================================= */
+/* --- UTILITY ------------------------------------------------------------- */
+/* ========================================================================= */
+
+#define _W(c) emit_long(c)
+#define _LS2_ADDR(a) (((a) & 0x01f0000f) | (((a) & 0xf0) << 4))
+
+/* ========================================================================= */
+/* --- ENCODINGS ----------------------------------------------------------- */
+/* ========================================================================= */
+
+#define IMM32(c) (((c) & 0xffffff00) == 0 ? (c) : \
+ ((c) & 0x3fffffc0) == 0 ? (0x100 | (((c) >> 30) & 0x3) | ((((c) & 0x0000003f) << 2))) : \
+ ((c) & 0x0ffffff0) == 0 ? (0x200 | (((c) >> 28) & 0xf) | ((((c) & 0x0000000f) << 4))) : \
+ ((c) & 0x03fffffc) == 0 ? (0x300 | (((c) >> 26) & 0x3f) | ((((c) & 0x00000003) << 6)) ) : \
+ ((c) & 0x00ffffff) == 0 ? (0x400 | (((c) >> 24) & 0xff)) : \
+ ((c) & 0xc03fffff) == 0 ? (0x500 | ((c) >> 22)) : \
+ ((c) & 0xf00fffff) == 0 ? (0x600 | ((c) >> 20)) : \
+ ((c) & 0xfc03ffff) == 0 ? (0x700 | ((c) >> 18)) : \
+ ((c) & 0xff00ffff) == 0 ? (0x800 | ((c) >> 16)) : \
+ ((c) & 0xffc03fff) == 0 ? (0x900 | ((c) >> 14)) : \
+ ((c) & 0xfff00fff) == 0 ? (0xa00 | ((c) >> 12)) : \
+ ((c) & 0xfffc03ff) == 0 ? (0xb00 | ((c) >> 10)) : \
+ ((c) & 0xffff00ff) == 0 ? (0xc00 | ((c) >> 8)) : \
+ ((c) & 0xffffc03f) == 0 ? (0xd00 | ((c) >> 6)) : \
+ ((c) & 0xfffff00f) == 0 ? (0xe00 | ((c) >> 4)) : \
+ ((c) & 0xfffffc03) == 0 ? (0xf00 | ((c) >> 2)) : \
+ 0\
+ )
+
+#define SHIFT_IMM(c) (0x02000000 | (IMM32((c))))
+
+#define UNSHIFTED_IMM8(c) (0x02000000 | (c))
+#define SHIFT_IMM8_ROR(c,r) (0x02000000 | (c) | ((r >> 1) << 8))
+
+#define SHIFT_REG(Rm) (Rm)
+#define SHIFT_LSL_i(Rm,s) ((Rm) | ((s) << 7))
+#define SHIFT_LSL_r(Rm,Rs) ((Rm) | ((Rs) << 8) | 0x10)
+#define SHIFT_LSR_i(Rm,s) ((Rm) | ((s) << 7) | 0x20)
+#define SHIFT_LSR_r(Rm,Rs) ((Rm) | ((Rs) << 8) | 0x30)
+#define SHIFT_ASR_i(Rm,s) ((Rm) | ((s) << 7) | 0x40)
+#define SHIFT_ASR_r(Rm,Rs) ((Rm) | ((Rs) << 8) | 0x50)
+#define SHIFT_ROR_i(Rm,s) ((Rm) | ((s) << 7) | 0x60)
+#define SHIFT_ROR_r(Rm,Rs) ((Rm) | ((Rs) << 8) | 0x70)
+#define SHIFT_RRX(Rm) ((Rm) | 0x60)
+#define SHIFT_PK(Rm,s) ((Rm) | ((s) << 7))
+
+/* Load/Store addressings */
+#define ADR_ADD(v) ((1 << 23) | (v))
+#define ADR_SUB(v) (v)
+
+#define ADR_IMM(v) ((v) | (1 << 24))
+#define ADR_IMMPOST(v) (v)
+#define ADR_REG(Rm) ((1 << 25) | (1 << 24) | (Rm))
+#define ADR_REGPOST(Rm) ((1 << 25) | (Rm))
+
+#define ADD_IMM(i) ADR_ADD(ADR_IMM(i))
+#define SUB_IMM(i) ADR_SUB(ADR_IMM(i))
+
+#define ADD_REG(Rm) ADR_ADD(ADR_REG(Rm))
+#define SUB_REG(Rm) ADR_SUB(ADR_REG(Rm))
+
+#define ADD_LSL(Rm,i) ADR_ADD(ADR_REG(Rm) | ((i) << 7))
+#define SUB_LSL(Rm,i) ADR_SUB(ADR_REG(Rm) | ((i) << 7))
+
+#define ADD_LSR(Rm,i) ADR_ADD(ADR_REG(Rm) | (((i) & 0x1f) << 7) | (1 << 5))
+#define SUB_LSR(Rm,i) ADR_SUB(ADR_REG(Rm) | (((i) & 0x1f) << 7) | (1 << 5))
+
+#define ADD_ASR(Rm,i) ADR_ADD(ADR_REG(Rm) | (((i) & 0x1f) << 7) | (2 << 5))
+#define SUB_ASR(Rm,i) ADR_SUB(ADR_REG(Rm) | (((i) & 0x1f) << 7) | (2 << 5))
+
+#define ADD_ROR(Rm,i) ADR_ADD(ADR_REG(Rm) | (((i) & 0x1f) << 7) | (3 << 5))
+#define SUB_ROR(Rm,i) ADR_SUB(ADR_REG(Rm) | (((i) & 0x1f) << 7) | (3 << 5))
+
+#define ADD_RRX(Rm) ADR_ADD(ADR_REG(Rm) | (3 << 5))
+#define SUB_RRX(Rm) ADR_SUB(ADR_REG(Rm) | (3 << 5))
+
+#define ADD2_IMM(i) ADR_ADD(i | (1 << 22))
+#define SUB2_IMM(i) ADR_SUB(i | (1 << 22))
+
+#define ADD2_REG(Rm) ADR_ADD(Rm)
+#define SUB2_REG(Rm) ADR_SUB(Rm)
+
+/* MOV, MVN */
+#define _OP1(cc,op,s,Rd,shift) _W(((cc) << 28) | ((op) << 21) | ((s) << 20) | ((Rd) << 12) | (shift))
+
+/* CMP, CMN, TST, TEQ */
+#define _OP2(cc,op,Rn,shift) _W(((cc) << 28) | ((op) << 21) | (1 << 20) | ((Rn) << 16) | (shift))
+
+/* ADD, SUB, RSB, ADC, SBC, RSC, AND, BIC, EOR, ORR */
+#define _OP3(cc,op,s,Rd,Rn,shift) _W(((cc) << 28) | ((op) << 21) | ((s) << 20) | ((Rn) << 16) | ((Rd) << 12) | (shift))
+
+/* LDR, STR */
+#define _LS1(cc,l,b,Rd,Rn,a) _W(((cc) << 28) | (0x01 << 26) | ((l) << 20) | ((b) << 22) | ((Rn) << 16) | ((Rd) << 12) | (a))
+#define _LS2(cc,p,l,s,h,Rd,Rn,a) _W(((cc) << 28) | ((p) << 24) | ((l) << 20) | ((Rn) << 16) | ((Rd) << 12) | ((s) << 6) | ((h) << 5) | 0x90 | _LS2_ADDR((a)))
+
+/* ========================================================================= */
+/* --- OPCODES ------------------------------------------------------------- */
+/* ========================================================================= */
+
+/* Branch instructions */
+#ifndef __ANDROID__
+enum {
+ _B, _BL, _BLX, _BX, _BXJ
+};
+#endif
+
+/* Data processing instructions */
+enum {
+ _AND = 0,
+ _EOR,
+ _SUB,
+ _RSB,
+ _ADD,
+ _ADC,
+ _SBC,
+ _RSC,
+ _TST,
+ _TEQ,
+ _CMP,
+ _CMN,
+ _ORR,
+ _MOV,
+ _BIC,
+ _MVN
+};
+
+/* Single instruction Multiple Data (SIMD) instructions */
+
+/* Multiply instructions */
+
+/* Parallel instructions */
+
+/* Extend instructions */
+
+/* Miscellaneous arithmetic instrations */
+
+/* Status register transfer instructions */
+
+/* Load and Store instructions */
+
+/* Coprocessor instructions */
+
+/* Exception generation instructions */
+
+/* ========================================================================= */
+/* --- ASSEMBLER ----------------------------------------------------------- */
+/* ========================================================================= */
+
+#define NOP() _W(0xe1a00000)
+#define SETEND_BE() _W(0xf1010200)
+#define SETEND_LE() _W(0xf1010000)
+
+/* Data processing instructions */
+
+/* Opcodes Type 1 */
+/* MOVcc rd,#i */
+#define CC_MOV_ri8(cc,Rd,i) _OP1(cc,_MOV,0,Rd,UNSHIFTED_IMM8(i))
+/* MOVcc Rd,#i ROR #s */
+#define CC_MOV_ri8RORi(cc,Rd,i,s) _OP1(cc,_MOV,0,Rd,SHIFT_IMM8_ROR(i,s))
+#define CC_MOV_ri(cc,Rd,i) _OP1(cc,_MOV,0,Rd,SHIFT_IMM(i))
+#define CC_MOV_rr(cc,Rd,Rm) _OP1(cc,_MOV,0,Rd,SHIFT_REG(Rm))
+#define CC_MOV_rrLSLi(cc,Rd,Rm,i) _OP1(cc,_MOV,0,Rd,SHIFT_LSL_i(Rm,i))
+#define CC_MOV_rrLSLr(cc,Rd,Rm,Rs) _OP1(cc,_MOV,0,Rd,SHIFT_LSL_r(Rm,Rs))
+#define CC_MOV_rrLSRi(cc,Rd,Rm,i) _OP1(cc,_MOV,0,Rd,SHIFT_LSR_i(Rm,i))
+#define CC_MOV_rrLSRr(cc,Rd,Rm,Rs) _OP1(cc,_MOV,0,Rd,SHIFT_LSR_r(Rm,Rs))
+#define CC_MOV_rrASRi(cc,Rd,Rm,i) _OP1(cc,_MOV,0,Rd,SHIFT_ASR_i(Rm,i))
+#define CC_MOV_rrASRr(cc,Rd,Rm,Rs) _OP1(cc,_MOV,0,Rd,SHIFT_ASR_r(Rm,Rs))
+#define CC_MOV_rrRORi(cc,Rd,Rm,i) _OP1(cc,_MOV,0,Rd,SHIFT_ROR_i(Rm,i))
+#define CC_MOV_rrRORr(cc,Rd,Rm,Rs) _OP1(cc,_MOV,0,Rd,SHIFT_ROR_r(Rm,Rs))
+#define CC_MOV_rrRRX(cc,Rd,Rm) _OP1(cc,_MOV,0,Rd,SHIFT_RRX(Rm))
+
+/* MOV rd,#i */
+#define MOV_ri8(Rd,i) CC_MOV_ri8(NATIVE_CC_AL,Rd,i)
+/* MOV Rd,#i ROR #s */
+#define MOV_ri8RORi(Rd,i,s) CC_MOV_ri8RORi(NATIVE_CC_AL,Rd,i,s)
+#define MOV_ri(Rd,i) CC_MOV_ri(NATIVE_CC_AL,Rd,i)
+#define MOV_rr(Rd,Rm) CC_MOV_rr(NATIVE_CC_AL,Rd,Rm)
+#define MOV_rrLSLi(Rd,Rm,i) CC_MOV_rrLSLi(NATIVE_CC_AL,Rd,Rm,i)
+#define MOV_rrLSLr(Rd,Rm,Rs) CC_MOV_rrLSLr(NATIVE_CC_AL,Rd,Rm,Rs)
+#define MOV_rrLSRi(Rd,Rm,i) CC_MOV_rrLSRi(NATIVE_CC_AL,Rd,Rm,i)
+#define MOV_rrLSRr(Rd,Rm,Rs) CC_MOV_rrLSRr(NATIVE_CC_AL,Rd,Rm,Rs)
+#define MOV_rrASRi(Rd,Rm,i) CC_MOV_rrASRi(NATIVE_CC_AL,Rd,Rm,i)
+#define MOV_rrASRr(Rd,Rm,Rs) CC_MOV_rrASRr(NATIVE_CC_AL,Rd,Rm,Rs)
+#define MOV_rrRORi(Rd,Rm,i) CC_MOV_rrRORi(NATIVE_CC_AL,Rd,Rm,i)
+#define MOV_rrRORr(Rd,Rm,Rs) CC_MOV_rrRORr(NATIVE_CC_AL,Rd,Rm,Rs)
+#define MOV_rrRRX(Rd,Rm) CC_MOV_rrRRX(NATIVE_CC_AL,Rd,Rm)
+
+#define CC_MOVS_ri(cc,Rd,i) _OP1(cc,_MOV,1,Rd,SHIFT_IMM(i))
+#define CC_MOVS_rr(cc,Rd,Rm) _OP1(cc,_MOV,1,Rd,SHIFT_REG(Rm))
+#define CC_MOVS_rrLSLi(cc,Rd,Rm,i) _OP1(cc,_MOV,1,Rd,SHIFT_LSL_i(Rm,i))
+#define CC_MOVS_rrLSLr(cc,Rd,Rm,Rs) _OP1(cc,_MOV,1,Rd,SHIFT_LSL_r(Rm,Rs))
+#define CC_MOVS_rrLSRi(cc,Rd,Rm,i) _OP1(cc,_MOV,1,Rd,SHIFT_LSR_i(Rm,i))
+#define CC_MOVS_rrLSRr(cc,Rd,Rm,Rs) _OP1(cc,_MOV,1,Rd,SHIFT_LSR_r(Rm,Rs))
+#define CC_MOVS_rrASRi(cc,Rd,Rm,i) _OP1(cc,_MOV,1,Rd,SHIFT_ASR_i(Rm,i))
+#define CC_MOVS_rrASRr(cc,Rd,Rm,Rs) _OP1(cc,_MOV,1,Rd,SHIFT_ASR_r(Rm,Rs))
+#define CC_MOVS_rrRORi(cc,Rd,Rm,i) _OP1(cc,_MOV,1,Rd,SHIFT_ROR_i(Rm,i))
+#define CC_MOVS_rrRORr(cc,Rd,Rm,Rs) _OP1(cc,_MOV,1,Rd,SHIFT_ROR_r(Rm,Rs))
+#define CC_MOVS_rrRRX(cc,Rd,Rm) _OP1(cc,_MOV,1,Rd,SHIFT_RRX(Rm))
+
+#define MOVS_ri(Rd,i) CC_MOVS_ri(NATIVE_CC_AL,Rd,i)
+#define MOVS_rr(Rd,Rm) CC_MOVS_rr(NATIVE_CC_AL,Rd,Rm)
+#define MOVS_rrLSLi(Rd,Rm,i) CC_MOVS_rrLSLi(NATIVE_CC_AL,Rd,Rm,i)
+#define MOVS_rrLSLr(Rd,Rm,Rs) CC_MOVS_rrLSLr(NATIVE_CC_AL,Rd,Rm,Rs)
+#define MOVS_rrLSRi(Rd,Rm,i) CC_MOVS_rrLSRi(NATIVE_CC_AL,Rd,Rm,i)
+#define MOVS_rrLSRr(Rd,Rm,Rs) CC_MOVS_rrLSRr(NATIVE_CC_AL,Rd,Rm,Rs)
+#define MOVS_rrASRi(Rd,Rm,i) CC_MOVS_rrASRi(NATIVE_CC_AL,Rd,Rm,i)
+#define MOVS_rrASRr(Rd,Rm,Rs) CC_MOVS_rrASRr(NATIVE_CC_AL,Rd,Rm,Rs)
+#define MOVS_rrRORi(Rd,Rm,i) CC_MOVS_rrRORi(NATIVE_CC_AL,Rd,Rm,i)
+#define MOVS_rrRORr(Rd,Rm,Rs) CC_MOVS_rrRORr(NATIVE_CC_AL,Rd,Rm,Rs)
+#define MOVS_rrRRX(Rd,Rm) CC_MOVS_rrRRX(NATIVE_CC_AL,Rd,Rm)
+
+/* MVNcc rd,#i */
+#define CC_MVN_ri8(cc,Rd,i) _OP1(cc,_MVN,0,Rd,UNSHIFTED_IMM8(i))
+/* MVNcc Rd,#i ROR #s */
+#define CC_MVN_ri8RORi(cc,Rd,i,s) _OP1(cc,_MVN,0,Rd,SHIFT_IMM8_ROR(i,s))
+#define CC_MVN_ri(cc,Rd,i) _OP1(cc,_MVN,0,Rd,SHIFT_IMM(i))
+#define CC_MVN_rr(cc,Rd,Rm) _OP1(cc,_MVN,0,Rd,SHIFT_REG(Rm))
+#define CC_MVN_rrLSLi(cc,Rd,Rm,i) _OP1(cc,_MVN,0,Rd,SHIFT_LSL_i(Rm,i))
+#define CC_MVN_rrLSLr(cc,Rd,Rm,Rs) _OP1(cc,_MVN,0,Rd,SHIFT_LSL_r(Rm,Rs))
+#define CC_MVN_rrLSRi(cc,Rd,Rm,i) _OP1(cc,_MVN,0,Rd,SHIFT_LSR_i(Rm,i))
+#define CC_MVN_rrLSRr(cc,Rd,Rm,Rs) _OP1(cc,_MVN,0,Rd,SHIFT_LSR_r(Rm,Rs))
+#define CC_MVN_rrASRi(cc,Rd,Rm,i) _OP1(cc,_MVN,0,Rd,SHIFT_ASR_i(Rm,i))
+#define CC_MVN_rrASRr(cc,Rd,Rm,Rs) _OP1(cc,_MVN,0,Rd,SHIFT_ASR_r(Rm,Rs))
+#define CC_MVN_rrRORi(cc,Rd,Rm,i) _OP1(cc,_MVN,0,Rd,SHIFT_ROR_i(Rm,i))
+#define CC_MVN_rrRORr(cc,Rd,Rm,Rs) _OP1(cc,_MVN,0,Rd,SHIFT_ROR_r(Rm,Rs))
+#define CC_MVN_rrRRX(cc,Rd,Rm) _OP1(cc,_MVN,0,Rd,SHIFT_RRX(Rm))
+
+/* MVN rd,#i */
+#define MVN_ri8(Rd,i) CC_MVN_ri8(NATIVE_CC_AL,Rd,i)
+/* MVN Rd,#i ROR #s */
+#define MVN_ri8RORi(Rd,i,s) CC_MVN_ri8RORi(NATIVE_CC_AL,Rd,i,s)
+#define MVN_ri(Rd,i) CC_MVN_ri(NATIVE_CC_AL,Rd,i)
+#define MVN_rr(Rd,Rm) CC_MVN_rr(NATIVE_CC_AL,Rd,Rm)
+#define MVN_rrLSLi(Rd,Rm,i) CC_MVN_rrLSLi(NATIVE_CC_AL,Rd,Rm,i)
+#define MVN_rrLSLr(Rd,Rm,Rs) CC_MVN_rrLSLr(NATIVE_CC_AL,Rd,Rm,Rs)
+#define MVN_rrLSRi(Rd,Rm,i) CC_MVN_rrLSRi(NATIVE_CC_AL,Rd,Rm,i)
+#define MVN_rrLSRr(Rd,Rm,Rs) CC_MVN_rrLSRr(NATIVE_CC_AL,Rd,Rm,Rs)
+#define MVN_rrASRi(Rd,Rm,i) CC_MVN_rrASRi(NATIVE_CC_AL,Rd,Rm,i)
+#define MVN_rrASRr(Rd,Rm,Rs) CC_MVN_rrASRr(NATIVE_CC_AL,Rd,Rm,Rs)
+#define MVN_rrRORi(Rd,Rm,i) CC_MVN_rrRORi(NATIVE_CC_AL,Rd,Rm,i)
+#define MVN_rrRORr(Rd,Rm,Rs) CC_MVN_rrRORr(NATIVE_CC_AL,Rd,Rm,Rs)
+#define MVN_rrRRX(Rd,Rm) CC_MVN_rrRRX(NATIVE_CC_AL,Rd,Rm)
+
+#define CC_MVNS_ri(cc,Rd,i) _OP1(cc,_MVN,1,Rd,SHIFT_IMM(i))
+#define CC_MVNS_rr(cc,Rd,Rm) _OP1(cc,_MVN,1,Rd,SHIFT_REG(Rm))
+#define CC_MVNS_rrLSLi(cc,Rd,Rm,i) _OP1(cc,_MVN,1,Rd,SHIFT_LSL_i(Rm,i))
+#define CC_MVNS_rrLSLr(cc,Rd,Rm,Rs) _OP1(cc,_MVN,1,Rd,SHIFT_LSL_r(Rm,Rs))
+#define CC_MVNS_rrLSRi(cc,Rd,Rm,i) _OP1(cc,_MVN,1,Rd,SHIFT_LSR_i(Rm,i))
+#define CC_MVNS_rrLSRr(cc,Rd,Rm,Rs) _OP1(cc,_MVN,1,Rd,SHIFT_LSR_r(Rm,Rs))
+#define CC_MVNS_rrASRi(cc,Rd,Rm,i) _OP1(cc,_MVN,1,Rd,SHIFT_ASR_i(Rm,i))
+#define CC_MVNS_rrASRr(cc,Rd,Rm,Rs) _OP1(cc,_MVN,1,Rd,SHIFT_ASR_r(Rm,Rs))
+#define CC_MVNS_rrRORi(cc,Rd,Rm,i) _OP1(cc,_MVN,1,Rd,SHIFT_ROR_i(Rm,i))
+#define CC_MVNS_rrRORr(cc,Rd,Rm,Rs) _OP1(cc,_MVN,1,Rd,SHIFT_ROR_r(Rm,Rs))
+#define CC_MVNS_rrRRX(cc,Rd,Rm) _OP1(cc,_MVN,1,Rd,SHIFT_RRX(Rm))
+
+#define MVNS_ri(Rd,i) CC_MVNS_ri(NATIVE_CC_AL,Rd,i)
+#define MVNS_rr(Rd,Rm) CC_MVNS_rr(NATIVE_CC_AL,Rd,Rm)
+#define MVNS_rrLSLi(Rd,Rm,i) CC_MVNS_rrLSLi(NATIVE_CC_AL,Rd,Rm,i)
+#define MVNS_rrLSLr(Rd,Rm,Rs) CC_MVNS_rrLSLr(NATIVE_CC_AL,Rd,Rm,Rs)
+#define MVNS_rrLSRi(Rd,Rm,i) CC_MVNS_rrLSRi(NATIVE_CC_AL,Rd,Rm,i)
+#define MVNS_rrLSRr(Rd,Rm,Rs) CC_MVNS_rrLSRr(NATIVE_CC_AL,Rd,Rm,Rs)
+#define MVNS_rrASRi(Rd,Rm,i) CC_MVNS_rrASRi(NATIVE_CC_AL,Rd,Rm,i)
+#define MVNS_rrASRr(Rd,Rm,Rs) CC_MVNS_rrASRr(NATIVE_CC_AL,Rd,Rm,Rs)
+#define MVNS_rrRORi(Rd,Rm,i) CC_MVNS_rrRORi(NATIVE_CC_AL,Rd,Rm,i)
+#define MVNS_rrRORr(Rd,Rm,Rs) CC_MVNS_rrRORr(NATIVE_CC_AL,Rd,Rm,Rs)
+#define MVNS_rrRRX(Rd,Rm) CC_MVNS_rrRRX(NATIVE_CC_AL,Rd,Rm)
+
+/* Opcodes Type 2 */
+#define CC_CMP_ri(cc,Rn,i) _OP2(cc,_CMP,Rn,SHIFT_IMM(i))
+#define CC_CMP_rr(cc,Rn,Rm) _OP2(cc,_CMP,Rn,SHIFT_REG(Rm))
+#define CC_CMP_rrLSLi(cc,Rn,Rm,i) _OP2(cc,_CMP,Rn,SHIFT_LSL_i(Rm,i))
+#define CC_CMP_rrLSLr(cc,Rn,Rm,Rs) _OP2(cc,_CMP,Rn,SHIFT_LSL_r(Rm,Rs))
+#define CC_CMP_rrLSRi(cc,Rn,Rm,i) _OP2(cc,_CMP,Rn,SHIFT_LSR_i(Rm,i))
+#define CC_CMP_rrLSRr(cc,Rn,Rm,Rs) _OP2(cc,_CMP,Rn,SHIFT_LSR_r(Rm,Rs))
+#define CC_CMP_rrASRi(cc,Rn,Rm,i) _OP2(cc,_CMP,Rn,SHIFT_ASR_i(Rm,i))
+#define CC_CMP_rrASRr(cc,Rn,Rm,Rs) _OP2(cc,_CMP,Rn,SHIFT_ASR_r(Rm,Rs))
+#define CC_CMP_rrRORi(cc,Rn,Rm,i) _OP2(cc,_CMP,Rn,SHIFT_ROR_i(Rm,i))
+#define CC_CMP_rrRORr(cc,Rn,Rm,Rs) _OP2(cc,_CMP,Rn,SHIFT_ROR_r(Rm,Rs))
+#define CC_CMP_rrRRX(cc,Rn,Rm) _OP2(cc,_CMP,Rn,SHIFT_RRX(Rm))
+
+#define CMP_ri(Rn,i) CC_CMP_ri(NATIVE_CC_AL,Rn,i)
+#define CMP_rr(Rn,Rm) CC_CMP_rr(NATIVE_CC_AL,Rn,Rm)
+#define CMP_rrLSLi(Rn,Rm,i) CC_CMP_rrLSLi(NATIVE_CC_AL,Rn,Rm,i)
+#define CMP_rrLSLr(Rn,Rm,Rs) CC_CMP_rrLSLr(NATIVE_CC_AL,Rn,Rm,Rs)
+#define CMP_rrLSRi(Rn,Rm,i) CC_CMP_rrLSRi(NATIVE_CC_AL,Rn,Rm,i)
+#define CMP_rrLSRr(Rn,Rm,Rs) CC_CMP_rrLSRr(NATIVE_CC_AL,Rn,Rm,Rs)
+#define CMP_rrASRi(Rn,Rm,i) CC_CMP_rrASRi(NATIVE_CC_AL,Rn,Rm,i)
+#define CMP_rrASRr(Rn,Rm,Rs) CC_CMP_rrASRr(NATIVE_CC_AL,Rn,Rm,Rs)
+#define CMP_rrRORi(Rn,Rm,i) CC_CMP_rrRORi(NATIVE_CC_AL,Rn,Rm,i)
+#define CMP_rrRORr(Rn,Rm,Rs) CC_CMP_rrRORr(NATIVE_CC_AL,Rn,Rm,Rs)
+#define CMP_rrRRX(Rn,Rm) CC_CMP_rrRRX(NATIVE_CC_AL,Rn,Rm)
+
+#define CC_CMN_ri(cc,Rn,i) _OP2(cc,_CMN,Rn,SHIFT_IMM(i))
+#define CC_CMN_rr(cc,Rn,r) _OP2(cc,_CMN,Rn,SHIFT_REG(r))
+#define CC_CMN_rrLSLi(cc,Rn,Rm,i) _OP2(cc,_CMN,Rn,SHIFT_LSL_i(Rm,i))
+#define CC_CMN_rrLSLr(cc,Rn,Rm,Rs) _OP2(cc,_CMN,Rn,SHIFT_LSL_r(Rm,Rs))
+#define CC_CMN_rrLSRi(cc,Rn,Rm,i) _OP2(cc,_CMN,Rn,SHIFT_LSR_i(Rm,i))
+#define CC_CMN_rrLSRr(cc,Rn,Rm,Rs) _OP2(cc,_CMN,Rn,SHIFT_LSR_r(Rm,Rs))
+#define CC_CMN_rrASRi(cc,Rn,Rm,i) _OP2(cc,_CMN,Rn,SHIFT_ASR_i(Rm,i))
+#define CC_CMN_rrASRr(cc,Rn,Rm,Rs) _OP2(cc,_CMN,Rn,SHIFT_ASR_r(Rm,Rs))
+#define CC_CMN_rrRORi(cc,Rn,Rm,i) _OP2(cc,_CMN,Rn,SHIFT_ROR_i(Rm,i))
+#define CC_CMN_rrRORr(cc,Rn,Rm,Rs) _OP2(cc,_CMN,Rn,SHIFT_ROR_r(Rm,Rs))
+#define CC_CMN_rrRRX(cc,Rn,Rm) _OP2(cc,_CMN,Rn,SHIFT_RRX(Rm))
+
+#define CMN_ri(Rn,i) CC_CMN_ri(NATIVE_CC_AL,Rn,i)
+#define CMN_rr(Rn,r) CC_CMN_rr(NATIVE_CC_AL,Rn,r)
+#define CMN_rrLSLi(Rn,Rm,i) CC_CMN_rrLSLi(NATIVE_CC_AL,Rn,Rm,i)
+#define CMN_rrLSLr(Rn,Rm,Rs) CC_CMN_rrLSLr(NATIVE_CC_AL,Rn,Rm,Rs)
+#define CMN_rrLSRi(Rn,Rm,i) CC_CMN_rrLSRi(NATIVE_CC_AL,Rn,Rm,i)
+#define CMN_rrLSRr(Rn,Rm,Rs) CC_CMN_rrLSRr(NATIVE_CC_AL,Rn,Rm,Rs)
+#define CMN_rrASRi(Rn,Rm,i) CC_CMN_rrASRi(NATIVE_CC_AL,Rn,Rm,i)
+#define CMN_rrASRr(Rn,Rm,Rs) CC_CMN_rrASRr(NATIVE_CC_AL,Rn,Rm,Rs)
+#define CMN_rrRORi(Rn,Rm,i) CC_CMN_rrRORi(NATIVE_CC_AL,Rn,Rm,i)
+#define CMN_rrRORr(Rn,Rm,Rs) CC_CMN_rrRORr(NATIVE_CC_AL,Rn,Rm,Rs)
+#define CMN_rrRRX(Rn,Rm) CC_CMN_rrRRX(NATIVE_CC_AL,Rn,Rm)
+
+#define CC_TST_ri(cc,Rn,i) _OP2(cc,_TST,Rn,SHIFT_IMM(i))
+#define CC_TST_rr(cc,Rn,r) _OP2(cc,_TST,Rn,SHIFT_REG(r))
+#define CC_TST_rrLSLi(cc,Rn,Rm,i) _OP2(cc,_TST,Rn,SHIFT_LSL_i(Rm,i))
+#define CC_TST_rrLSLr(cc,Rn,Rm,Rs) _OP2(cc,_TST,Rn,SHIFT_LSL_r(Rm,Rs))
+#define CC_TST_rrLSRi(cc,Rn,Rm,i) _OP2(cc,_TST,Rn,SHIFT_LSR_i(Rm,i))
+#define CC_TST_rrLSRr(cc,Rn,Rm,Rs) _OP2(cc,_TST,Rn,SHIFT_LSR_r(Rm,Rs))
+#define CC_TST_rrASRi(cc,Rn,Rm,i) _OP2(cc,_TST,Rn,SHIFT_ASR_i(Rm,i))
+#define CC_TST_rrASRr(cc,Rn,Rm,Rs) _OP2(cc,_TST,Rn,SHIFT_ASR_r(Rm,Rs))
+#define CC_TST_rrRORi(cc,Rn,Rm,i) _OP2(cc,_TST,Rn,SHIFT_ROR_i(Rm,i))
+#define CC_TST_rrRORr(cc,Rn,Rm,Rs) _OP2(cc,_TST,Rn,SHIFT_ROR_r(Rm,Rs))
+#define CC_TST_rrRRX(cc,Rn,Rm) _OP2(cc,_TST,Rn,SHIFT_RRX(Rm))
+
+#define TST_ri(Rn,i) CC_TST_ri(NATIVE_CC_AL,Rn,i)
+#define TST_rr(Rn,r) CC_TST_rr(NATIVE_CC_AL,Rn,r)
+#define TST_rrLSLi(Rn,Rm,i) CC_TST_rrLSLi(NATIVE_CC_AL,Rn,Rm,i)
+#define TST_rrLSLr(Rn,Rm,Rs) CC_TST_rrLSLr(NATIVE_CC_AL,Rn,Rm,Rs)
+#define TST_rrLSRi(Rn,Rm,i) CC_TST_rrLSRi(NATIVE_CC_AL,Rn,Rm,i)
+#define TST_rrLSRr(Rn,Rm,Rs) CC_TST_rrLSRr(NATIVE_CC_AL,Rn,Rm,Rs)
+#define TST_rrASRi(Rn,Rm,i) CC_TST_rrASRi(NATIVE_CC_AL,Rn,Rm,i)
+#define TST_rrASRr(Rn,Rm,Rs) CC_TST_rrASRr(NATIVE_CC_AL,Rn,Rm,Rs)
+#define TST_rrRORi(Rn,Rm,i) CC_TST_rrRORi(NATIVE_CC_AL,Rn,Rm,i)
+#define TST_rrRORr(Rn,Rm,Rs) CC_TST_rrRORr(NATIVE_CC_AL,Rn,Rm,Rs)
+#define TST_rrRRX(Rn,Rm) CC_TST_rrRRX(NATIVE_CC_AL,Rn,Rm)
+
+#define CC_TEQ_ri(cc,Rn,i) _OP2(cc,_TEQ,Rn,SHIFT_IMM(i))
+#define CC_TEQ_rr(cc,Rn,r) _OP2(cc,_TEQ,Rn,SHIFT_REG(r))
+#define CC_TEQ_rrLSLi(cc,Rn,Rm,i) _OP2(cc,_TEQ,Rn,SHIFT_LSL_i(Rm,i))
+#define CC_TEQ_rrLSLr(cc,Rn,Rm,Rs) _OP2(cc,_TEQ,Rn,SHIFT_LSL_r(Rm,Rs))
+#define CC_TEQ_rrLSRi(cc,Rn,Rm,i) _OP2(cc,_TEQ,Rn,SHIFT_LSR_i(Rm,i))
+#define CC_TEQ_rrLSRr(cc,Rn,Rm,Rs) _OP2(cc,_TEQ,Rn,SHIFT_LSR_r(Rm,Rs))
+#define CC_TEQ_rrASRi(cc,Rn,Rm,i) _OP2(cc,_TEQ,Rn,SHIFT_ASR_i(Rm,i))
+#define CC_TEQ_rrASRr(cc,Rn,Rm,Rs) _OP2(cc,_TEQ,Rn,SHIFT_ASR_r(Rm,Rs))
+#define CC_TEQ_rrRORi(cc,Rn,Rm,i) _OP2(cc,_TEQ,Rn,SHIFT_ROR_i(Rm,i))
+#define CC_TEQ_rrRORr(cc,Rn,Rm,Rs) _OP2(cc,_TEQ,Rn,SHIFT_ROR_r(Rm,Rs))
+#define CC_TEQ_rrRRX(cc,Rn,Rm) _OP2(cc,_TEQ,Rn,SHIFT_RRX(Rm))
+
+#define TEQ_ri(Rn,i) CC_TEQ_ri(NATIVE_CC_AL,Rn,i)
+#define TEQ_rr(Rn,r) CC_TEQ_rr(NATIVE_CC_AL,Rn,r)
+#define TEQ_rrLSLi(Rn,Rm,i) CC_TEQ_rrLSLi(NATIVE_CC_AL,Rn,Rm,i)
+#define TEQ_rrLSLr(Rn,Rm,Rs) CC_TEQ_rrLSLr(NATIVE_CC_AL,Rn,Rm,Rs)
+#define TEQ_rrLSRi(Rn,Rm,i) CC_TEQ_rrLSRi(NATIVE_CC_AL,Rn,Rm,i)
+#define TEQ_rrLSRr(Rn,Rm,Rs) CC_TEQ_rrLSRr(NATIVE_CC_AL,Rn,Rm,Rs)
+#define TEQ_rrASRi(Rn,Rm,i) CC_TEQ_rrASRi(NATIVE_CC_AL,Rn,Rm,i)
+#define TEQ_rrASRr(Rn,Rm,Rs) CC_TEQ_rrASRr(NATIVE_CC_AL,Rn,Rm,Rs)
+#define TEQ_rrRORi(Rn,Rm,i) CC_TEQ_rrRORi(NATIVE_CC_AL,Rn,Rm,i)
+#define TEQ_rrRORr(Rn,Rm,Rs) CC_TEQ_rrRORr(NATIVE_CC_AL,Rn,Rm,Rs)
+#define TEQ_rrRRX(Rn,Rm) CC_TEQ_rrRRX(NATIVE_CC_AL,Rn,Rm)
+
+/* Opcodes Type 3 */
+#define CC_AND_rri(cc,Rd,Rn,i) _OP3(cc,_AND,0,Rd,Rn,SHIFT_IMM(i))
+#define CC_AND_rrr(cc,Rd,Rn,Rm) _OP3(cc,_AND,0,Rd,Rn,SHIFT_REG(Rm))
+#define CC_AND_rrrLSLi(cc,Rd,Rn,Rm,i) _OP3(cc,_AND,0,Rd,Rn,SHIFT_LSL_i(Rm,i))
+#define CC_AND_rrrLSLr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_AND,0,Rd,Rn,SHIFT_LSL_r(Rm,Rs))
+#define CC_AND_rrrLSRi(cc,Rd,Rn,Rm,i) _OP3(cc,_AND,0,Rd,Rn,SHIFT_LSR_i(Rm,i))
+#define CC_AND_rrrLSRr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_AND,0,Rd,Rn,SHIFT_LSR_r(Rm,Rs))
+#define CC_AND_rrrASRi(cc,Rd,Rn,Rm,i) _OP3(cc,_AND,0,Rd,Rn,SHIFT_ASR_i(Rm,i))
+#define CC_AND_rrrASRr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_AND,0,Rd,Rn,SHIFT_ASR_r(Rm,Rs))
+#define CC_AND_rrrRORi(cc,Rd,Rn,Rm,i) _OP3(cc,_AND,0,Rd,Rn,SHIFT_ROR_i(Rm,i))
+#define CC_AND_rrrRORr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_AND,0,Rd,Rn,SHIFT_ROR_r(Rm,Rs))
+#define CC_AND_rrrRRX(cc,Rd,Rn,Rm) _OP3(cc,_AND,0,Rd,Rn,SHIFT_RRX(Rm))
+
+#define AND_rri(Rd,Rn,i) CC_AND_rri(NATIVE_CC_AL,Rd,Rn,i)
+#define AND_rrr(Rd,Rn,Rm) CC_AND_rrr(NATIVE_CC_AL,Rd,Rn,Rm)
+#define AND_rrrLSLi(Rd,Rn,Rm,i) CC_AND_rrrLSLi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define AND_rrrLSLr(Rd,Rn,Rm,Rs) CC_AND_rrrLSLr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define AND_rrrLSRi(Rd,Rn,Rm,i) CC_AND_rrrLSRi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define AND_rrrLSRr(Rd,Rn,Rm,Rs) CC_AND_rrrLSRr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define AND_rrrASRi(Rd,Rn,Rm,i) CC_AND_rrrASRi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define AND_rrrASRr(Rd,Rn,Rm,Rs) CC_AND_rrrASRr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define AND_rrrRORi(Rd,Rn,Rm,i) CC_AND_rrrRORi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define AND_rrrRORr(Rd,Rn,Rm,Rs) CC_AND_rrrRORr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define AND_rrrRRX(Rd,Rn,Rm) CC_AND_rrrRRX(NATIVE_CC_AL,Rd,Rn,Rm)
+
+#define CC_ANDS_rri(cc,Rd,Rn,i) _OP3(cc,_AND,1,Rd,Rn,SHIFT_IMM(i))
+#define CC_ANDS_rrr(cc,Rd,Rn,Rm) _OP3(cc,_AND,1,Rd,Rn,SHIFT_REG(Rm))
+#define CC_ANDS_rrrLSLi(cc,Rd,Rn,Rm,i) _OP3(cc,_AND,1,Rd,Rn,SHIFT_LSL_i(Rm,i))
+#define CC_ANDS_rrrLSLr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_AND,1,Rd,Rn,SHIFT_LSL_r(Rm,Rs))
+#define CC_ANDS_rrrLSRi(cc,Rd,Rn,Rm,i) _OP3(cc,_AND,1,Rd,Rn,SHIFT_LSR_i(Rm,i))
+#define CC_ANDS_rrrLSRr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_AND,1,Rd,Rn,SHIFT_LSR_r(Rm,Rs))
+#define CC_ANDS_rrrASRi(cc,Rd,Rn,Rm,i) _OP3(cc,_AND,1,Rd,Rn,SHIFT_ASR_i(Rm,i))
+#define CC_ANDS_rrrASRr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_AND,1,Rd,Rn,SHIFT_ASR_r(Rm,Rs))
+#define CC_ANDS_rrrRORi(cc,Rd,Rn,Rm,i) _OP3(cc,_AND,1,Rd,Rn,SHIFT_ROR_i(Rm,i))
+#define CC_ANDS_rrrRORr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_AND,1,Rd,Rn,SHIFT_ROR_r(Rm,Rs))
+#define CC_ANDS_rrrRRX(cc,Rd,Rn,Rm) _OP3(cc,_AND,1,Rd,Rn,SHIFT_RRX(Rm))
+
+#define ANDS_rri(Rd,Rn,i) CC_ANDS_rri(NATIVE_CC_AL,Rd,Rn,i)
+#define ANDS_rrr(Rd,Rn,Rm) CC_ANDS_rrr(NATIVE_CC_AL,Rd,Rn,Rm)
+#define ANDS_rrrLSLi(Rd,Rn,Rm,i) CC_ANDS_rrrLSLi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define ANDS_rrrLSLr(Rd,Rn,Rm,Rs) CC_ANDS_rrrLSLr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define ANDS_rrrLSRi(Rd,Rn,Rm,i) CC_ANDS_rrrLSRi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define ANDS_rrrLSRr(Rd,Rn,Rm,Rs) CC_ANDS_rrrLSRr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define ANDS_rrrASRi(Rd,Rn,Rm,i) CC_ANDS_rrrASRi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define ANDS_rrrASRr(Rd,Rn,Rm,Rs) CC_ANDS_rrrASRr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define ANDS_rrrRORi(Rd,Rn,Rm,i) CC_ANDS_rrrRORi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define ANDS_rrrRORr(Rd,Rn,Rm,Rs) CC_ANDS_rrrRORr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define ANDS_rrrRRX(Rd,Rn,Rm) CC_ANDS_rrrRRX(NATIVE_CC_AL,Rd,Rn,Rm)
+
+#define CC_EOR_rri(cc,Rd,Rn,i) _OP3(cc,_EOR,0,Rd,Rn,SHIFT_IMM(i))
+#define CC_EOR_rrr(cc,Rd,Rn,Rm) _OP3(cc,_EOR,0,Rd,Rn,SHIFT_REG(Rm))
+#define CC_EOR_rrrLSLi(cc,Rd,Rn,Rm,i) _OP3(cc,_EOR,0,Rd,Rn,SHIFT_LSL_i(Rm,i))
+#define CC_EOR_rrrLSLr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_EOR,0,Rd,Rn,SHIFT_LSL_r(Rm,Rs))
+#define CC_EOR_rrrLSRi(cc,Rd,Rn,Rm,i) _OP3(cc,_EOR,0,Rd,Rn,SHIFT_LSR_i(Rm,i))
+#define CC_EOR_rrrLSRr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_EOR,0,Rd,Rn,SHIFT_LSR_r(Rm,Rs))
+#define CC_EOR_rrrASRi(cc,Rd,Rn,Rm,i) _OP3(cc,_EOR,0,Rd,Rn,SHIFT_ASR_i(Rm,i))
+#define CC_EOR_rrrASRr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_EOR,0,Rd,Rn,SHIFT_ASR_r(Rm,Rs))
+#define CC_EOR_rrrRORi(cc,Rd,Rn,Rm,i) _OP3(cc,_EOR,0,Rd,Rn,SHIFT_ROR_i(Rm,i))
+#define CC_EOR_rrrRORr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_EOR,0,Rd,Rn,SHIFT_ROR_r(Rm,Rs))
+#define CC_EOR_rrrRRX(cc,Rd,Rn,Rm) _OP3(cc,_EOR,0,Rd,Rn,SHIFT_RRX(Rm))
+
+#define EOR_rri(Rd,Rn,i) CC_EOR_rri(NATIVE_CC_AL,Rd,Rn,i)
+#define EOR_rrr(Rd,Rn,Rm) CC_EOR_rrr(NATIVE_CC_AL,Rd,Rn,Rm)
+#define EOR_rrrLSLi(Rd,Rn,Rm,i) CC_EOR_rrrLSLi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define EOR_rrrLSLr(Rd,Rn,Rm,Rs) CC_EOR_rrrLSLr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define EOR_rrrLSRi(Rd,Rn,Rm,i) CC_EOR_rrrLSRi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define EOR_rrrLSRr(Rd,Rn,Rm,Rs) CC_EOR_rrrLSRr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define EOR_rrrASRi(Rd,Rn,Rm,i) CC_EOR_rrrASRi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define EOR_rrrASRr(Rd,Rn,Rm,Rs) CC_EOR_rrrASRr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define EOR_rrrRORi(Rd,Rn,Rm,i) CC_EOR_rrrRORi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define EOR_rrrRORr(Rd,Rn,Rm,Rs) CC_EOR_rrrRORr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define EOR_rrrRRX(Rd,Rn,Rm) CC_EOR_rrrRRX(NATIVE_CC_AL,Rd,Rn,Rm)
+
+#define CC_EORS_rri(cc,Rd,Rn,i) _OP3(cc,_EOR,1,Rd,Rn,SHIFT_IMM(i))
+#define CC_EORS_rrr(cc,Rd,Rn,Rm) _OP3(cc,_EOR,1,Rd,Rn,SHIFT_REG(Rm))
+#define CC_EORS_rrrLSLi(cc,Rd,Rn,Rm,i) _OP3(cc,_EOR,1,Rd,Rn,SHIFT_LSL_i(Rm,i))
+#define CC_EORS_rrrLSLr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_EOR,1,Rd,Rn,SHIFT_LSL_r(Rm,Rs))
+#define CC_EORS_rrrLSRi(cc,Rd,Rn,Rm,i) _OP3(cc,_EOR,1,Rd,Rn,SHIFT_LSR_i(Rm,i))
+#define CC_EORS_rrrLSRr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_EOR,1,Rd,Rn,SHIFT_LSR_r(Rm,Rs))
+#define CC_EORS_rrrASRi(cc,Rd,Rn,Rm,i) _OP3(cc,_EOR,1,Rd,Rn,SHIFT_ASR_i(Rm,i))
+#define CC_EORS_rrrASRr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_EOR,1,Rd,Rn,SHIFT_ASR_r(Rm,Rs))
+#define CC_EORS_rrrRORi(cc,Rd,Rn,Rm,i) _OP3(cc,_EOR,1,Rd,Rn,SHIFT_ROR_i(Rm,i))
+#define CC_EORS_rrrRORr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_EOR,1,Rd,Rn,SHIFT_ROR_r(Rm,Rs))
+#define CC_EORS_rrrRRX(cc,Rd,Rn,Rm) _OP3(cc,_EOR,1,Rd,Rn,SHIFT_RRX(Rm))
+
+#define EORS_rri(Rd,Rn,i) CC_EORS_rri(NATIVE_CC_AL,Rd,Rn,i)
+#define EORS_rrr(Rd,Rn,Rm) CC_EORS_rrr(NATIVE_CC_AL,Rd,Rn,Rm)
+#define EORS_rrrLSLi(Rd,Rn,Rm,i) CC_EORS_rrrLSLi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define EORS_rrrLSLr(Rd,Rn,Rm,Rs) CC_EORS_rrrLSLr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define EORS_rrrLSRi(Rd,Rn,Rm,i) CC_EORS_rrrLSRi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define EORS_rrrLSRr(Rd,Rn,Rm,Rs) CC_EORS_rrrLSRr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define EORS_rrrASRi(Rd,Rn,Rm,i) CC_EORS_rrrASRi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define EORS_rrrASRr(Rd,Rn,Rm,Rs) CC_EORS_rrrASRr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define EORS_rrrRORi(Rd,Rn,Rm,i) CC_EORS_rrrRORi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define EORS_rrrRORr(Rd,Rn,Rm,Rs) CC_EORS_rrrRORr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define EORS_rrrRRX(Rd,Rn,Rm) CC_EORS_rrrRRX(NATIVE_CC_AL,Rd,Rn,Rm)
+
+#define CC_SUB_rri(cc,Rd,Rn,i) _OP3(cc,_SUB,0,Rd,Rn,SHIFT_IMM(i))
+#define CC_SUB_rrr(cc,Rd,Rn,Rm) _OP3(cc,_SUB,0,Rd,Rn,SHIFT_REG(Rm))
+#define CC_SUB_rrrLSLi(cc,Rd,Rn,Rm,i) _OP3(cc,_SUB,0,Rd,Rn,SHIFT_LSL_i(Rm,i))
+#define CC_SUB_rrrLSLr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_SUB,0,Rd,Rn,SHIFT_LSL_r(Rm,Rs))
+#define CC_SUB_rrrLSRi(cc,Rd,Rn,Rm,i) _OP3(cc,_SUB,0,Rd,Rn,SHIFT_LSR_i(Rm,i))
+#define CC_SUB_rrrLSRr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_SUB,0,Rd,Rn,SHIFT_LSR_r(Rm,Rs))
+#define CC_SUB_rrrASRi(cc,Rd,Rn,Rm,i) _OP3(cc,_SUB,0,Rd,Rn,SHIFT_ASR_i(Rm,i))
+#define CC_SUB_rrrASRr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_SUB,0,Rd,Rn,SHIFT_ASR_r(Rm,Rs))
+#define CC_SUB_rrrRORi(cc,Rd,Rn,Rm,i) _OP3(cc,_SUB,0,Rd,Rn,SHIFT_ROR_i(Rm,i))
+#define CC_SUB_rrrRORr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_SUB,0,Rd,Rn,SHIFT_ROR_r(Rm,Rs))
+#define CC_SUB_rrrRRX(cc,Rd,Rn,Rm) _OP3(cc,_SUB,0,Rd,Rn,SHIFT_RRX(Rm))
+
+#define SUB_rri(Rd,Rn,i) CC_SUB_rri(NATIVE_CC_AL,Rd,Rn,i)
+#define SUB_rrr(Rd,Rn,Rm) CC_SUB_rrr(NATIVE_CC_AL,Rd,Rn,Rm)
+#define SUB_rrrLSLi(Rd,Rn,Rm,i) CC_SUB_rrrLSLi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define SUB_rrrLSLr(Rd,Rn,Rm,Rs) CC_SUB_rrrLSLr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define SUB_rrrLSRi(Rd,Rn,Rm,i) CC_SUB_rrrLSRi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define SUB_rrrLSRr(Rd,Rn,Rm,Rs) CC_SUB_rrrLSRr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define SUB_rrrASRi(Rd,Rn,Rm,i) CC_SUB_rrrASRi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define SUB_rrrASRr(Rd,Rn,Rm,Rs) CC_SUB_rrrASRr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define SUB_rrrRORi(Rd,Rn,Rm,i) CC_SUB_rrrRORi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define SUB_rrrRORr(Rd,Rn,Rm,Rs) CC_SUB_rrrRORr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define SUB_rrrRRX(Rd,Rn,Rm) CC_SUB_rrrRRX(NATIVE_CC_AL,Rd,Rn,Rm)
+
+#define CC_SUBS_rri(cc,Rd,Rn,i) _OP3(cc,_SUB,1,Rd,Rn,SHIFT_IMM(i))
+#define CC_SUBS_rrr(cc,Rd,Rn,Rm) _OP3(cc,_SUB,1,Rd,Rn,SHIFT_REG(Rm))
+#define CC_SUBS_rrrLSLi(cc,Rd,Rn,Rm,i) _OP3(cc,_SUB,1,Rd,Rn,SHIFT_LSL_i(Rm,i))
+#define CC_SUBS_rrrLSLr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_SUB,1,Rd,Rn,SHIFT_LSL_r(Rm,Rs))
+#define CC_SUBS_rrrLSRi(cc,Rd,Rn,Rm,i) _OP3(cc,_SUB,1,Rd,Rn,SHIFT_LSR_i(Rm,i))
+#define CC_SUBS_rrrLSRr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_SUB,1,Rd,Rn,SHIFT_LSR_r(Rm,Rs))
+#define CC_SUBS_rrrASRi(cc,Rd,Rn,Rm,i) _OP3(cc,_SUB,1,Rd,Rn,SHIFT_ASR_i(Rm,i))
+#define CC_SUBS_rrrASRr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_SUB,1,Rd,Rn,SHIFT_ASR_r(Rm,Rs))
+#define CC_SUBS_rrrRORi(cc,Rd,Rn,Rm,i) _OP3(cc,_SUB,1,Rd,Rn,SHIFT_ROR_i(Rm,i))
+#define CC_SUBS_rrrRORr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_SUB,1,Rd,Rn,SHIFT_ROR_r(Rm,Rs))
+#define CC_SUBS_rrrRRX(cc,Rd,Rn,Rm) _OP3(cc,_SUB,1,Rd,Rn,SHIFT_RRX(Rm))
+
+#define SUBS_rri(Rd,Rn,i) CC_SUBS_rri(NATIVE_CC_AL,Rd,Rn,i)
+#define SUBS_rrr(Rd,Rn,Rm) CC_SUBS_rrr(NATIVE_CC_AL,Rd,Rn,Rm)
+#define SUBS_rrrLSLi(Rd,Rn,Rm,i) CC_SUBS_rrrLSLi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define SUBS_rrrLSLr(Rd,Rn,Rm,Rs) CC_SUBS_rrrLSLr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define SUBS_rrrLSRi(Rd,Rn,Rm,i) CC_SUBS_rrrLSRi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define SUBS_rrrLSRr(Rd,Rn,Rm,Rs) CC_SUBS_rrrLSRr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define SUBS_rrrASRi(Rd,Rn,Rm,i) CC_SUBS_rrrASRi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define SUBS_rrrASRr(Rd,Rn,Rm,Rs) CC_SUBS_rrrASRr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define SUBS_rrrRORi(Rd,Rn,Rm,i) CC_SUBS_rrrRORi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define SUBS_rrrRORr(Rd,Rn,Rm,Rs) CC_SUBS_rrrRORr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define SUBS_rrrRRX(Rd,Rn,Rm) CC_SUBS_rrrRRX(NATIVE_CC_AL,Rd,Rn,Rm)
+
+#define CC_RSB_rri(cc,Rd,Rn,i) _OP3(cc,_RSB,0,Rd,Rn,SHIFT_IMM(i))
+#define CC_RSB_rrr(cc,Rd,Rn,Rm) _OP3(cc,_RSB,0,Rd,Rn,SHIFT_REG(Rm))
+#define CC_RSB_rrrLSLi(cc,Rd,Rn,Rm,i) _OP3(cc,_RSB,0,Rd,Rn,SHIFT_LSL_i(Rm,i))
+#define CC_RSB_rrrLSLr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_RSB,0,Rd,Rn,SHIFT_LSL_r(Rm,Rs))
+#define CC_RSB_rrrLSRi(cc,Rd,Rn,Rm,i) _OP3(cc,_RSB,0,Rd,Rn,SHIFT_LSR_i(Rm,i))
+#define CC_RSB_rrrLSRr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_RSB,0,Rd,Rn,SHIFT_LSR_r(Rm,Rs))
+#define CC_RSB_rrrASRi(cc,Rd,Rn,Rm,i) _OP3(cc,_RSB,0,Rd,Rn,SHIFT_ASR_i(Rm,i))
+#define CC_RSB_rrrASRr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_RSB,0,Rd,Rn,SHIFT_ASR_r(Rm,Rs))
+#define CC_RSB_rrrRORi(cc,Rd,Rn,Rm,i) _OP3(cc,_RSB,0,Rd,Rn,SHIFT_ROR_i(Rm,i))
+#define CC_RSB_rrrRORr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_RSB,0,Rd,Rn,SHIFT_ROR_r(Rm,Rs))
+#define CC_RSB_rrrRRX(cc,Rd,Rn,Rm) _OP3(cc,_RSB,0,Rd,Rn,SHIFT_RRX(Rm))
+
+#define RSB_rri(Rd,Rn,i) CC_RSB_rri(NATIVE_CC_AL,Rd,Rn,i)
+#define RSB_rrr(Rd,Rn,Rm) CC_RSB_rrr(NATIVE_CC_AL,Rd,Rn,Rm)
+#define RSB_rrrLSLi(Rd,Rn,Rm,i) CC_RSB_rrrLSLi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define RSB_rrrLSLr(Rd,Rn,Rm,Rs) CC_RSB_rrrLSLr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define RSB_rrrLSRi(Rd,Rn,Rm,i) CC_RSB_rrrLSRi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define RSB_rrrLSRr(Rd,Rn,Rm,Rs) CC_RSB_rrrLSRr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define RSB_rrrASRi(Rd,Rn,Rm,i) CC_RSB_rrrASRi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define RSB_rrrASRr(Rd,Rn,Rm,Rs) CC_RSB_rrrASRr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define RSB_rrrRORi(Rd,Rn,Rm,i) CC_RSB_rrrRORi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define RSB_rrrRORr(Rd,Rn,Rm,Rs) CC_RSB_rrrRORr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define RSB_rrrRRX(Rd,Rn,Rm) CC_RSB_rrrRRX(NATIVE_CC_AL,Rd,Rn,Rm)
+
+#define CC_RSBS_rri(cc,Rd,Rn,i) _OP3(cc,_RSB,1,Rd,Rn,SHIFT_IMM(i))
+#define CC_RSBS_rrr(cc,Rd,Rn,Rm) _OP3(cc,_RSB,1,Rd,Rn,SHIFT_REG(Rm))
+#define CC_RSBS_rrrLSLi(cc,Rd,Rn,Rm,i) _OP3(cc,_RSB,1,Rd,Rn,SHIFT_LSL_i(Rm,i))
+#define CC_RSBS_rrrLSLr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_RSB,1,Rd,Rn,SHIFT_LSL_r(Rm,Rs))
+#define CC_RSBS_rrrLSRi(cc,Rd,Rn,Rm,i) _OP3(cc,_RSB,1,Rd,Rn,SHIFT_LSR_i(Rm,i))
+#define CC_RSBS_rrrLSRr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_RSB,1,Rd,Rn,SHIFT_LSR_r(Rm,Rs))
+#define CC_RSBS_rrrASRi(cc,Rd,Rn,Rm,i) _OP3(cc,_RSB,1,Rd,Rn,SHIFT_ASR_i(Rm,i))
+#define CC_RSBS_rrrASRr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_RSB,1,Rd,Rn,SHIFT_ASR_r(Rm,Rs))
+#define CC_RSBS_rrrRORi(cc,Rd,Rn,Rm,i) _OP3(cc,_RSB,1,Rd,Rn,SHIFT_ROR_i(Rm,i))
+#define CC_RSBS_rrrRORr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_RSB,1,Rd,Rn,SHIFT_ROR_r(Rm,Rs))
+#define CC_RSBS_rrrRRX(cc,Rd,Rn,Rm) _OP3(cc,_RSB,1,Rd,Rn,SHIFT_RRX(Rm))
+
+#define RSBS_rri(Rd,Rn,i) CC_RSBS_rri(NATIVE_CC_AL,Rd,Rn,i)
+#define RSBS_rrr(Rd,Rn,Rm) CC_RSBS_rrr(NATIVE_CC_AL,Rd,Rn,Rm)
+#define RSBS_rrrLSLi(Rd,Rn,Rm,i) CC_RSBS_rrrLSLi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define RSBS_rrrLSLr(Rd,Rn,Rm,Rs) CC_RSBS_rrrLSLr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define RSBS_rrrLSRi(Rd,Rn,Rm,i) CC_RSBS_rrrLSRi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define RSBS_rrrLSRr(Rd,Rn,Rm,Rs) CC_RSBS_rrrLSRr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define RSBS_rrrASRi(Rd,Rn,Rm,i) CC_RSBS_rrrASRi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define RSBS_rrrASRr(Rd,Rn,Rm,Rs) CC_RSBS_rrrASRr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define RSBS_rrrRORi(Rd,Rn,Rm,i) CC_RSBS_rrrRORi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define RSBS_rrrRORr(Rd,Rn,Rm,Rs) CC_RSBS_rrrRORr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define RSBS_rrrRRX(Rd,Rn,Rm) CC_RSBS_rrrRRX(NATIVE_CC_AL,Rd,Rn,Rm)
+
+#define CC_ADD_rri8(cc,Rd,Rn,i) _OP3(cc,_ADD,0,Rd,Rn,UNSHIFT_IMM8(i))
+#define CC_ADD_rri8RORi(cc,Rd,Rn,Rm,i) _OP3(cc,_ADD,0,Rd,Rn,SHIFT_IMM8_ROR(Rm,i))
+
+#define CC_ADD_rri(cc,Rd,Rn,i) _OP3(cc,_ADD,0,Rd,Rn,SHIFT_IMM(i))
+#define CC_ADD_rrr(cc,Rd,Rn,Rm) _OP3(cc,_ADD,0,Rd,Rn,SHIFT_REG(Rm))
+#define CC_ADD_rrrLSLi(cc,Rd,Rn,Rm,i) _OP3(cc,_ADD,0,Rd,Rn,SHIFT_LSL_i(Rm,i))
+#define CC_ADD_rrrLSLr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_ADD,0,Rd,Rn,SHIFT_LSL_r(Rm,Rs))
+#define CC_ADD_rrrLSRi(cc,Rd,Rn,Rm,i) _OP3(cc,_ADD,0,Rd,Rn,SHIFT_LSR_i(Rm,i))
+#define CC_ADD_rrrLSRr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_ADD,0,Rd,Rn,SHIFT_LSR_r(Rm,Rs))
+#define CC_ADD_rrrASRi(cc,Rd,Rn,Rm,i) _OP3(cc,_ADD,0,Rd,Rn,SHIFT_ASR_i(Rm,i))
+#define CC_ADD_rrrASRr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_ADD,0,Rd,Rn,SHIFT_ASR_r(Rm,Rs))
+#define CC_ADD_rrrRORi(cc,Rd,Rn,Rm,i) _OP3(cc,_ADD,0,Rd,Rn,SHIFT_ROR_i(Rm,i))
+#define CC_ADD_rrrRORr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_ADD,0,Rd,Rn,SHIFT_ROR_r(Rm,Rs))
+#define CC_ADD_rrrRRX(cc,Rd,Rn,Rm) _OP3(cc,_ADD,0,Rd,Rn,SHIFT_RRX(Rm))
+
+#define ADD_rri8(cc,Rd,Rn,i) CC_ADD_rri8(NATIVE_CC_AL,Rd,Rn,i)
+#define ADD_rri8RORi(cc,Rd,Rn,Rm,i) CC_ADD_rri8RORi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+
+#define ADD_rri(Rd,Rn,i) CC_ADD_rri(NATIVE_CC_AL,Rd,Rn,i)
+#define ADD_rrr(Rd,Rn,Rm) CC_ADD_rrr(NATIVE_CC_AL,Rd,Rn,Rm)
+#define ADD_rrrLSLi(Rd,Rn,Rm,i) CC_ADD_rrrLSLi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define ADD_rrrLSLr(Rd,Rn,Rm,Rs) CC_ADD_rrrLSLr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define ADD_rrrLSRi(Rd,Rn,Rm,i) CC_ADD_rrrLSRi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define ADD_rrrLSRr(Rd,Rn,Rm,Rs) CC_ADD_rrrLSRr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define ADD_rrrASRi(Rd,Rn,Rm,i) CC_ADD_rrrASRi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define ADD_rrrASRr(Rd,Rn,Rm,Rs) CC_ADD_rrrASRr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define ADD_rrrRORi(Rd,Rn,Rm,i) CC_ADD_rrrRORi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define ADD_rrrRORr(Rd,Rn,Rm,Rs) CC_ADD_rrrRORr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define ADD_rrrRRX(Rd,Rn,Rm) CC_ADD_rrrRRX(NATIVE_CC_AL,Rd,Rn,Rm)
+
+#define CC_ADDS_rri(cc,Rd,Rn,i) _OP3(cc,_ADD,1,Rd,Rn,SHIFT_IMM(i))
+#define CC_ADDS_rrr(cc,Rd,Rn,Rm) _OP3(cc,_ADD,1,Rd,Rn,SHIFT_REG(Rm))
+#define CC_ADDS_rrrLSLi(cc,Rd,Rn,Rm,i) _OP3(cc,_ADD,1,Rd,Rn,SHIFT_LSL_i(Rm,i))
+#define CC_ADDS_rrrLSLr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_ADD,1,Rd,Rn,SHIFT_LSL_r(Rm,Rs))
+#define CC_ADDS_rrrLSRi(cc,Rd,Rn,Rm,i) _OP3(cc,_ADD,1,Rd,Rn,SHIFT_LSR_i(Rm,i))
+#define CC_ADDS_rrrLSRr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_ADD,1,Rd,Rn,SHIFT_LSR_r(Rm,Rs))
+#define CC_ADDS_rrrASRi(cc,Rd,Rn,Rm,i) _OP3(cc,_ADD,1,Rd,Rn,SHIFT_ASR_i(Rm,i))
+#define CC_ADDS_rrrASRr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_ADD,1,Rd,Rn,SHIFT_ASR_r(Rm,Rs))
+#define CC_ADDS_rrrRORi(cc,Rd,Rn,Rm,i) _OP3(cc,_ADD,1,Rd,Rn,SHIFT_ROR_i(Rm,i))
+#define CC_ADDS_rrrRORr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_ADD,1,Rd,Rn,SHIFT_ROR_r(Rm,Rs))
+#define CC_ADDS_rrrRRX(cc,Rd,Rn,Rm) _OP3(cc,_ADD,1,Rd,Rn,SHIFT_RRX(Rm))
+
+#define ADDS_rri(Rd,Rn,i) CC_ADDS_rri(NATIVE_CC_AL,Rd,Rn,i)
+#define ADDS_rrr(Rd,Rn,Rm) CC_ADDS_rrr(NATIVE_CC_AL,Rd,Rn,Rm)
+#define ADDS_rrrLSLi(Rd,Rn,Rm,i) CC_ADDS_rrrLSLi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define ADDS_rrrLSLr(Rd,Rn,Rm,Rs) CC_ADDS_rrrLSLr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define ADDS_rrrLSRi(Rd,Rn,Rm,i) CC_ADDS_rrrLSRi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define ADDS_rrrLSRr(Rd,Rn,Rm,Rs) CC_ADDS_rrrLSRr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define ADDS_rrrASRi(Rd,Rn,Rm,i) CC_ADDS_rrrASRi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define ADDS_rrrASRr(Rd,Rn,Rm,Rs) CC_ADDS_rrrASRr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define ADDS_rrrRORi(Rd,Rn,Rm,i) CC_ADDS_rrrRORi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define ADDS_rrrRORr(Rd,Rn,Rm,Rs) CC_ADDS_rrrRORr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define ADDS_rrrRRX(Rd,Rn,Rm) CC_ADDS_rrrRRX(NATIVE_CC_AL,Rd,Rn,Rm)
+
+#define CC_ADC_rri(cc,Rd,Rn,i) _OP3(cc,_ADC,0,Rd,Rn,SHIFT_IMM(i))
+#define CC_ADC_rrr(cc,Rd,Rn,Rm) _OP3(cc,_ADC,0,Rd,Rn,SHIFT_REG(Rm))
+#define CC_ADC_rrrLSLi(cc,Rd,Rn,Rm,i) _OP3(cc,_ADC,0,Rd,Rn,SHIFT_LSL_i(Rm,i))
+#define CC_ADC_rrrLSLr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_ADC,0,Rd,Rn,SHIFT_LSL_r(Rm,Rs))
+#define CC_ADC_rrrLSRi(cc,Rd,Rn,Rm,i) _OP3(cc,_ADC,0,Rd,Rn,SHIFT_LSR_i(Rm,i))
+#define CC_ADC_rrrLSRr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_ADC,0,Rd,Rn,SHIFT_LSR_r(Rm,Rs))
+#define CC_ADC_rrrASRi(cc,Rd,Rn,Rm,i) _OP3(cc,_ADC,0,Rd,Rn,SHIFT_ASR_i(Rm,i))
+#define CC_ADC_rrrASRr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_ADC,0,Rd,Rn,SHIFT_ASR_r(Rm,Rs))
+#define CC_ADC_rrrRORi(cc,Rd,Rn,Rm,i) _OP3(cc,_ADC,0,Rd,Rn,SHIFT_ROR_i(Rm,i))
+#define CC_ADC_rrrRORr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_ADC,0,Rd,Rn,SHIFT_ROR_r(Rm,Rs))
+#define CC_ADC_rrrRRX(cc,Rd,Rn,Rm) _OP3(cc,_ADC,0,Rd,Rn,SHIFT_RRX(Rm))
+
+#define ADC_rri(Rd,Rn,i) CC_ADC_rri(NATIVE_CC_AL,Rd,Rn,i)
+#define ADC_rrr(Rd,Rn,Rm) CC_ADC_rrr(NATIVE_CC_AL,Rd,Rn,Rm)
+#define ADC_rrrLSLi(Rd,Rn,Rm,i) CC_ADC_rrrLSLi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define ADC_rrrLSLr(Rd,Rn,Rm,Rs) CC_ADC_rrrLSLr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define ADC_rrrLSRi(Rd,Rn,Rm,i) CC_ADC_rrrLSRi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define ADC_rrrLSRr(Rd,Rn,Rm,Rs) CC_ADC_rrrLSRr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define ADC_rrrASRi(Rd,Rn,Rm,i) CC_ADC_rrrASRi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define ADC_rrrASRr(Rd,Rn,Rm,Rs) CC_ADC_rrrASRr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define ADC_rrrRORi(Rd,Rn,Rm,i) CC_ADC_rrrRORi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define ADC_rrrRORr(Rd,Rn,Rm,Rs) CC_ADC_rrrRORr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define ADC_rrrRRX(Rd,Rn,Rm) CC_ADC_rrrRRX(NATIVE_CC_AL,Rd,Rn,Rm)
+
+#define CC_ADCS_rri(cc,Rd,Rn,i) _OP3(cc,_ADC,1,Rd,Rn,SHIFT_IMM(i))
+#define CC_ADCS_rrr(cc,Rd,Rn,Rm) _OP3(cc,_ADC,1,Rd,Rn,SHIFT_REG(Rm))
+#define CC_ADCS_rrrLSLi(cc,Rd,Rn,Rm,i) _OP3(cc,_ADC,1,Rd,Rn,SHIFT_LSL_i(Rm,i))
+#define CC_ADCS_rrrLSLr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_ADC,1,Rd,Rn,SHIFT_LSL_r(Rm,Rs))
+#define CC_ADCS_rrrLSRi(cc,Rd,Rn,Rm,i) _OP3(cc,_ADC,1,Rd,Rn,SHIFT_LSR_i(Rm,i))
+#define CC_ADCS_rrrLSRr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_ADC,1,Rd,Rn,SHIFT_LSR_r(Rm,Rs))
+#define CC_ADCS_rrrASRi(cc,Rd,Rn,Rm,i) _OP3(cc,_ADC,1,Rd,Rn,SHIFT_ASR_i(Rm,i))
+#define CC_ADCS_rrrASRr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_ADC,1,Rd,Rn,SHIFT_ASR_r(Rm,Rs))
+#define CC_ADCS_rrrRORi(cc,Rd,Rn,Rm,i) _OP3(cc,_ADC,1,Rd,Rn,SHIFT_ROR_i(Rm,i))
+#define CC_ADCS_rrrRORr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_ADC,1,Rd,Rn,SHIFT_ROR_r(Rm,Rs))
+#define CC_ADCS_rrrRRX(cc,Rd,Rn,Rm) _OP3(cc,_ADC,1,Rd,Rn,SHIFT_RRX(Rm))
+
+#define ADCS_rri(Rd,Rn,i) CC_ADCS_rri(NATIVE_CC_AL,Rd,Rn,i)
+#define ADCS_rrr(Rd,Rn,Rm) CC_ADCS_rrr(NATIVE_CC_AL,Rd,Rn,Rm)
+#define ADCS_rrrLSLi(Rd,Rn,Rm,i) CC_ADCS_rrrLSLi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define ADCS_rrrLSLr(Rd,Rn,Rm,Rs) CC_ADCS_rrrLSLr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define ADCS_rrrLSRi(Rd,Rn,Rm,i) CC_ADCS_rrrLSRi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define ADCS_rrrLSRr(Rd,Rn,Rm,Rs) CC_ADCS_rrrLSRr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define ADCS_rrrASRi(Rd,Rn,Rm,i) CC_ADCS_rrrASRi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define ADCS_rrrASRr(Rd,Rn,Rm,Rs) CC_ADCS_rrrASRr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define ADCS_rrrRORi(Rd,Rn,Rm,i) CC_ADCS_rrrRORi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define ADCS_rrrRORr(Rd,Rn,Rm,Rs) CC_ADCS_rrrRORr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define ADCS_rrrRRX(Rd,Rn,Rm) CC_ADCS_rrrRRX(NATIVE_CC_AL,Rd,Rn,Rm)
+
+#define CC_SBC_rri(cc,Rd,Rn,i) _OP3(cc,_SBC,0,Rd,Rn,SHIFT_IMM(i))
+#define CC_SBC_rrr(cc,Rd,Rn,Rm) _OP3(cc,_SBC,0,Rd,Rn,SHIFT_REG(Rm))
+#define CC_SBC_rrrLSLi(cc,Rd,Rn,Rm,i) _OP3(cc,_SBC,0,Rd,Rn,SHIFT_LSL_i(Rm,i))
+#define CC_SBC_rrrLSLr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_SBC,0,Rd,Rn,SHIFT_LSL_r(Rm,Rs))
+#define CC_SBC_rrrLSRi(cc,Rd,Rn,Rm,i) _OP3(cc,_SBC,0,Rd,Rn,SHIFT_LSR_i(Rm,i))
+#define CC_SBC_rrrLSRr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_SBC,0,Rd,Rn,SHIFT_LSR_r(Rm,Rs))
+#define CC_SBC_rrrASRi(cc,Rd,Rn,Rm,i) _OP3(cc,_SBC,0,Rd,Rn,SHIFT_ASR_i(Rm,i))
+#define CC_SBC_rrrASRr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_SBC,0,Rd,Rn,SHIFT_ASR_r(Rm,Rs))
+#define CC_SBC_rrrRORi(cc,Rd,Rn,Rm,i) _OP3(cc,_SBC,0,Rd,Rn,SHIFT_ROR_i(Rm,i))
+#define CC_SBC_rrrRORr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_SBC,0,Rd,Rn,SHIFT_ROR_r(Rm,Rs))
+#define CC_SBC_rrrRRX(cc,Rd,Rn,Rm) _OP3(cc,_SBC,0,Rd,Rn,SHIFT_RRX(Rm))
+
+#define SBC_rri(Rd,Rn,i) CC_SBC_rri(NATIVE_CC_AL,Rd,Rn,i)
+#define SBC_rrr(Rd,Rn,Rm) CC_SBC_rrr(NATIVE_CC_AL,Rd,Rn,Rm)
+#define SBC_rrrLSLi(Rd,Rn,Rm,i) CC_SBC_rrrLSLi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define SBC_rrrLSLr(Rd,Rn,Rm,Rs) CC_SBC_rrrLSLr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define SBC_rrrLSRi(Rd,Rn,Rm,i) CC_SBC_rrrLSRi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define SBC_rrrLSRr(Rd,Rn,Rm,Rs) CC_SBC_rrrLSRr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define SBC_rrrASRi(Rd,Rn,Rm,i) CC_SBC_rrrASRi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define SBC_rrrASRr(Rd,Rn,Rm,Rs) CC_SBC_rrrASRr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define SBC_rrrRORi(Rd,Rn,Rm,i) CC_SBC_rrrRORi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define SBC_rrrRORr(Rd,Rn,Rm,Rs) CC_SBC_rrrRORr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define SBC_rrrRRX(Rd,Rn,Rm) CC_SBC_rrrRRX(NATIVE_CC_AL,Rd,Rn,Rm)
+
+#define CC_SBCS_rri(cc,Rd,Rn,i) _OP3(cc,_SBC,1,Rd,Rn,SHIFT_IMM(i))
+#define CC_SBCS_rrr(cc,Rd,Rn,Rm) _OP3(cc,_SBC,1,Rd,Rn,SHIFT_REG(Rm))
+#define CC_SBCS_rrrLSLi(cc,Rd,Rn,Rm,i) _OP3(cc,_SBC,1,Rd,Rn,SHIFT_LSL_i(Rm,i))
+#define CC_SBCS_rrrLSLr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_SBC,1,Rd,Rn,SHIFT_LSL_r(Rm,Rs))
+#define CC_SBCS_rrrLSRi(cc,Rd,Rn,Rm,i) _OP3(cc,_SBC,1,Rd,Rn,SHIFT_LSR_i(Rm,i))
+#define CC_SBCS_rrrLSRr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_SBC,1,Rd,Rn,SHIFT_LSR_r(Rm,Rs))
+#define CC_SBCS_rrrASRi(cc,Rd,Rn,Rm,i) _OP3(cc,_SBC,1,Rd,Rn,SHIFT_ASR_i(Rm,i))
+#define CC_SBCS_rrrASRr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_SBC,1,Rd,Rn,SHIFT_ASR_r(Rm,Rs))
+#define CC_SBCS_rrrRORi(cc,Rd,Rn,Rm,i) _OP3(cc,_SBC,1,Rd,Rn,SHIFT_ROR_i(Rm,i))
+#define CC_SBCS_rrrRORr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_SBC,1,Rd,Rn,SHIFT_ROR_r(Rm,Rs))
+#define CC_SBCS_rrrRRX(cc,Rd,Rn,Rm) _OP3(cc,_SBC,1,Rd,Rn,SHIFT_RRX(Rm))
+
+#define SBCS_rri(Rd,Rn,i) CC_SBCS_rri(NATIVE_CC_AL,Rd,Rn,i)
+#define SBCS_rrr(Rd,Rn,Rm) CC_SBCS_rrr(NATIVE_CC_AL,Rd,Rn,Rm)
+#define SBCS_rrrLSLi(Rd,Rn,Rm,i) CC_SBCS_rrrLSLi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define SBCS_rrrLSLr(Rd,Rn,Rm,Rs) CC_SBCS_rrrLSLr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define SBCS_rrrLSRi(Rd,Rn,Rm,i) CC_SBCS_rrrLSRi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define SBCS_rrrLSRr(Rd,Rn,Rm,Rs) CC_SBCS_rrrLSRr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define SBCS_rrrASRi(Rd,Rn,Rm,i) CC_SBCS_rrrASRi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define SBCS_rrrASRr(Rd,Rn,Rm,Rs) CC_SBCS_rrrASRr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define SBCS_rrrRORi(Rd,Rn,Rm,i) CC_SBCS_rrrRORi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define SBCS_rrrRORr(Rd,Rn,Rm,Rs) CC_SBCS_rrrRORr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define SBCS_rrrRRX(Rd,Rn,Rm) CC_SBCS_rrrRRX(NATIVE_CC_AL,Rd,Rn,Rm)
+
+#define CC_RSC_rri(cc,Rd,Rn,i) _OP3(cc,_RSC,0,Rd,Rn,SHIFT_IMM(i))
+#define CC_RSC_rrr(cc,Rd,Rn,Rm) _OP3(cc,_RSC,0,Rd,Rn,SHIFT_REG(Rm))
+#define CC_RSC_rrrLSLi(cc,Rd,Rn,Rm,i) _OP3(cc,_RSC,0,Rd,Rn,SHIFT_LSL_i(Rm,i))
+#define CC_RSC_rrrLSLr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_RSC,0,Rd,Rn,SHIFT_LSL_r(Rm,Rs))
+#define CC_RSC_rrrLSRi(cc,Rd,Rn,Rm,i) _OP3(cc,_RSC,0,Rd,Rn,SHIFT_LSR_i(Rm,i))
+#define CC_RSC_rrrLSRr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_RSC,0,Rd,Rn,SHIFT_LSR_r(Rm,Rs))
+#define CC_RSC_rrrASRi(cc,Rd,Rn,Rm,i) _OP3(cc,_RSC,0,Rd,Rn,SHIFT_ASR_i(Rm,i))
+#define CC_RSC_rrrASRr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_RSC,0,Rd,Rn,SHIFT_ASR_r(Rm,Rs))
+#define CC_RSC_rrrRORi(cc,Rd,Rn,Rm,i) _OP3(cc,_RSC,0,Rd,Rn,SHIFT_ROR_i(Rm,i))
+#define CC_RSC_rrrRORr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_RSC,0,Rd,Rn,SHIFT_ROR_r(Rm,Rs))
+#define CC_RSC_rrrRRX(cc,Rd,Rn,Rm) _OP3(cc,_RSC,0,Rd,Rn,SHIFT_RRX(Rm))
+
+#define RSC_rri(Rd,Rn,i) CC_RSC_rri(NATIVE_CC_AL,Rd,Rn,i)
+#define RSC_rrr(Rd,Rn,Rm) CC_RSC_rrr(NATIVE_CC_AL,Rd,Rn,Rm)
+#define RSC_rrrLSLi(Rd,Rn,Rm,i) CC_RSC_rrrLSLi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define RSC_rrrLSLr(Rd,Rn,Rm,Rs) CC_RSC_rrrLSLr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define RSC_rrrLSRi(Rd,Rn,Rm,i) CC_RSC_rrrLSRi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define RSC_rrrLSRr(Rd,Rn,Rm,Rs) CC_RSC_rrrLSRr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define RSC_rrrASRi(Rd,Rn,Rm,i) CC_RSC_rrrASRi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define RSC_rrrASRr(Rd,Rn,Rm,Rs) CC_RSC_rrrASRr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define RSC_rrrRORi(Rd,Rn,Rm,i) CC_RSC_rrrRORi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define RSC_rrrRORr(Rd,Rn,Rm,Rs) CC_RSC_rrrRORr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define RSC_rrrRRX(Rd,Rn,Rm) CC_RSC_rrrRRX(NATIVE_CC_AL,Rd,Rn,Rm)
+
+#define CC_RSCS_rri(cc,Rd,Rn,i) _OP3(cc,_RSC,1,Rd,Rn,SHIFT_IMM(i))
+#define CC_RSCS_rrr(cc,Rd,Rn,Rm) _OP3(cc,_RSC,1,Rd,Rn,SHIFT_REG(Rm))
+#define CC_RSCS_rrrLSLi(cc,Rd,Rn,Rm,i) _OP3(cc,_RSC,1,Rd,Rn,SHIFT_LSL_i(Rm,i))
+#define CC_RSCS_rrrLSLr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_RSC,1,Rd,Rn,SHIFT_LSL_r(Rm,Rs))
+#define CC_RSCS_rrrLSRi(cc,Rd,Rn,Rm,i) _OP3(cc,_RSC,1,Rd,Rn,SHIFT_LSR_i(Rm,i))
+#define CC_RSCS_rrrLSRr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_RSC,1,Rd,Rn,SHIFT_LSR_r(Rm,Rs))
+#define CC_RSCS_rrrASRi(cc,Rd,Rn,Rm,i) _OP3(cc,_RSC,1,Rd,Rn,SHIFT_ASR_i(Rm,i))
+#define CC_RSCS_rrrASRr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_RSC,1,Rd,Rn,SHIFT_ASR_r(Rm,Rs))
+#define CC_RSCS_rrrRORi(cc,Rd,Rn,Rm,i) _OP3(cc,_RSC,1,Rd,Rn,SHIFT_ROR_i(Rm,i))
+#define CC_RSCS_rrrRORr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_RSC,1,Rd,Rn,SHIFT_ROR_r(Rm,Rs))
+#define CC_RSCS_rrrRRX(cc,Rd,Rn,Rm) _OP3(cc,_RSC,1,Rd,Rn,SHIFT_RRX(Rm))
+
+#define RSCS_rri(Rd,Rn,i) CC_RSCS_rri(NATIVE_CC_AL,Rd,Rn,i)
+#define RSCS_rrr(Rd,Rn,Rm) CC_RSCS_rrr(NATIVE_CC_AL,Rd,Rn,Rm)
+#define RSCS_rrrLSLi(Rd,Rn,Rm,i) CC_RSCS_rrrLSLi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define RSCS_rrrLSLr(Rd,Rn,Rm,Rs) CC_RSCS_rrrLSLr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define RSCS_rrrLSRi(Rd,Rn,Rm,i) CC_RSCS_rrrLSRi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define RSCS_rrrLSRr(Rd,Rn,Rm,Rs) CC_RSCS_rrrLSRr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define RSCS_rrrASRi(Rd,Rn,Rm,i) CC_RSCS_rrrASRi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define RSCS_rrrASRr(Rd,Rn,Rm,Rs) CC_RSCS_rrrASRr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define RSCS_rrrRORi(Rd,Rn,Rm,i) CC_RSCS_rrrRORi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define RSCS_rrrRORr(Rd,Rn,Rm,Rs) CC_RSCS_rrrRORr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define RSCS_rrrRRX(Rd,Rn,Rm) CC_RSCS_rrrRRX(NATIVE_CC_AL,Rd,Rn,Rm)
+
+/* ORRcc Rd,Rn,#i */
+#define CC_ORR_rri8(cc,Rd,Rn,i) _OP3(cc,_ORR,0,Rd,Rn,UNSHIFTED_IMM8(i))
+/* ORRcc Rd,Rn,#i ROR #s */
+#define CC_ORR_rri8RORi(cc,Rd,Rn,i,s) _OP3(cc,_ORR,0,Rd,Rn,SHIFT_IMM8_ROR(i,s))
+
+#define CC_ORR_rri(cc,Rd,Rn,i) _OP3(cc,_ORR,0,Rd,Rn,SHIFT_IMM(i))
+#define CC_ORR_rrr(cc,Rd,Rn,Rm) _OP3(cc,_ORR,0,Rd,Rn,SHIFT_REG(Rm))
+#define CC_ORR_rrrLSLi(cc,Rd,Rn,Rm,i) _OP3(cc,_ORR,0,Rd,Rn,SHIFT_LSL_i(Rm,i))
+#define CC_ORR_rrrLSLr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_ORR,0,Rd,Rn,SHIFT_LSL_r(Rm,Rs))
+#define CC_ORR_rrrLSRi(cc,Rd,Rn,Rm,i) _OP3(cc,_ORR,0,Rd,Rn,SHIFT_LSR_i(Rm,i))
+#define CC_ORR_rrrLSRr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_ORR,0,Rd,Rn,SHIFT_LSR_r(Rm,Rs))
+#define CC_ORR_rrrASRi(cc,Rd,Rn,Rm,i) _OP3(cc,_ORR,0,Rd,Rn,SHIFT_ASR_i(Rm,i))
+#define CC_ORR_rrrASRr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_ORR,0,Rd,Rn,SHIFT_ASR_r(Rm,Rs))
+#define CC_ORR_rrrRORi(cc,Rd,Rn,Rm,i) _OP3(cc,_ORR,0,Rd,Rn,SHIFT_ROR_i(Rm,i))
+#define CC_ORR_rrrRORr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_ORR,0,Rd,Rn,SHIFT_ROR_r(Rm,Rs))
+#define CC_ORR_rrrRRX(cc,Rd,Rn,Rm) _OP3(cc,_ORR,0,Rd,Rn,SHIFT_RRX(Rm))
+
+/* ORR Rd,Rn,#i */
+#define ORR_rri8(Rd,Rn,i) CC_ORR_rri8(NATIVE_CC_AL,Rd,Rn,i)
+/* ORR Rd,Rn,#i ROR #s */
+#define ORR_rri8RORi(Rd,Rn,i,s) CC_ORR_rri8RORi(NATIVE_CC_AL,Rd,Rn,i,s)
+
+#define ORR_rri(Rd,Rn,i) CC_ORR_rri(NATIVE_CC_AL,Rd,Rn,i)
+#define ORR_rrr(Rd,Rn,Rm) CC_ORR_rrr(NATIVE_CC_AL,Rd,Rn,Rm)
+#define ORR_rrrLSLi(Rd,Rn,Rm,i) CC_ORR_rrrLSLi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define ORR_rrrLSLr(Rd,Rn,Rm,Rs) CC_ORR_rrrLSLr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define ORR_rrrLSRi(Rd,Rn,Rm,i) CC_ORR_rrrLSRi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define ORR_rrrLSRr(Rd,Rn,Rm,Rs) CC_ORR_rrrLSRr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define ORR_rrrASRi(Rd,Rn,Rm,i) CC_ORR_rrrASRi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define ORR_rrrASRr(Rd,Rn,Rm,Rs) CC_ORR_rrrASRr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define ORR_rrrRORi(Rd,Rn,Rm,i) CC_ORR_rrrRORi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define ORR_rrrRORr(Rd,Rn,Rm,Rs) CC_ORR_rrrRORr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define ORR_rrrRRX(Rd,Rn,Rm) CC_ORR_rrrRRX(NATIVE_CC_AL,Rd,Rn,Rm)
+
+#define CC_ORRS_rri(cc,Rd,Rn,i) _OP3(cc,_ORR,1,Rd,Rn,SHIFT_IMM(i))
+#define CC_ORRS_rrr(cc,Rd,Rn,Rm) _OP3(cc,_ORR,1,Rd,Rn,SHIFT_REG(Rm))
+#define CC_ORRS_rrrLSLi(cc,Rd,Rn,Rm,i) _OP3(cc,_ORR,1,Rd,Rn,SHIFT_LSL_i(Rm,i))
+#define CC_ORRS_rrrLSLr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_ORR,1,Rd,Rn,SHIFT_LSL_r(Rm,Rs))
+#define CC_ORRS_rrrLSRi(cc,Rd,Rn,Rm,i) _OP3(cc,_ORR,1,Rd,Rn,SHIFT_LSR_i(Rm,i))
+#define CC_ORRS_rrrLSRr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_ORR,1,Rd,Rn,SHIFT_LSR_r(Rm,Rs))
+#define CC_ORRS_rrrASRi(cc,Rd,Rn,Rm,i) _OP3(cc,_ORR,1,Rd,Rn,SHIFT_ASR_i(Rm,i))
+#define CC_ORRS_rrrASRr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_ORR,1,Rd,Rn,SHIFT_ASR_r(Rm,Rs))
+#define CC_ORRS_rrrRORi(cc,Rd,Rn,Rm,i) _OP3(cc,_ORR,1,Rd,Rn,SHIFT_ROR_i(Rm,i))
+#define CC_ORRS_rrrRORr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_ORR,1,Rd,Rn,SHIFT_ROR_r(Rm,Rs))
+#define CC_ORRS_rrrRRX(cc,Rd,Rn,Rm) _OP3(cc,_ORR,1,Rd,Rn,SHIFT_RRX(Rm))
+
+#define ORRS_rri(Rd,Rn,i) CC_ORRS_rri(NATIVE_CC_AL,Rd,Rn,i)
+#define ORRS_rrr(Rd,Rn,Rm) CC_ORRS_rrr(NATIVE_CC_AL,Rd,Rn,Rm)
+#define ORRS_rrrLSLi(Rd,Rn,Rm,i) CC_ORRS_rrrLSLi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define ORRS_rrrLSLr(Rd,Rn,Rm,Rs) CC_ORRS_rrrLSLr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define ORRS_rrrLSRi(Rd,Rn,Rm,i) CC_ORRS_rrrLSRi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define ORRS_rrrLSRr(Rd,Rn,Rm,Rs) CC_ORRS_rrrLSRr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define ORRS_rrrASRi(Rd,Rn,Rm,i) CC_ORRS_rrrASRi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define ORRS_rrrASRr(Rd,Rn,Rm,Rs) CC_ORRS_rrrASRr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define ORRS_rrrRORi(Rd,Rn,Rm,i) CC_ORRS_rrrRORi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define ORRS_rrrRORr(Rd,Rn,Rm,Rs) CC_ORRS_rrrRORr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define ORRS_rrrRRX(Rd,Rn,Rm) CC_ORRS_rrrRRX(NATIVE_CC_AL,Rd,Rn,Rm)
+
+#define CC_BIC_rri(cc,Rd,Rn,i) _OP3(cc,_BIC,0,Rd,Rn,SHIFT_IMM(i))
+#define CC_BIC_rrr(cc,Rd,Rn,Rm) _OP3(cc,_BIC,0,Rd,Rn,SHIFT_REG(Rm))
+#define CC_BIC_rrrLSLi(cc,Rd,Rn,Rm,i) _OP3(cc,_BIC,0,Rd,Rn,SHIFT_LSL_i(Rm,i))
+#define CC_BIC_rrrLSLr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_BIC,0,Rd,Rn,SHIFT_LSL_r(Rm,Rs))
+#define CC_BIC_rrrLSRi(cc,Rd,Rn,Rm,i) _OP3(cc,_BIC,0,Rd,Rn,SHIFT_LSR_i(Rm,i))
+#define CC_BIC_rrrLSRr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_BIC,0,Rd,Rn,SHIFT_LSR_r(Rm,Rs))
+#define CC_BIC_rrrASRi(cc,Rd,Rn,Rm,i) _OP3(cc,_BIC,0,Rd,Rn,SHIFT_ASR_i(Rm,i))
+#define CC_BIC_rrrASRr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_BIC,0,Rd,Rn,SHIFT_ASR_r(Rm,Rs))
+#define CC_BIC_rrrRORi(cc,Rd,Rn,Rm,i) _OP3(cc,_BIC,0,Rd,Rn,SHIFT_ROR_i(Rm,i))
+#define CC_BIC_rrrRORr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_BIC,0,Rd,Rn,SHIFT_ROR_r(Rm,Rs))
+#define CC_BIC_rrrRRX(cc,Rd,Rn,Rm) _OP3(cc,_BIC,0,Rd,Rn,SHIFT_RRX(Rm))
+
+#define BIC_rri(Rd,Rn,i) CC_BIC_rri(NATIVE_CC_AL,Rd,Rn,i)
+#define BIC_rrr(Rd,Rn,Rm) CC_BIC_rrr(NATIVE_CC_AL,Rd,Rn,Rm)
+#define BIC_rrrLSLi(Rd,Rn,Rm,i) CC_BIC_rrrLSLi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define BIC_rrrLSLr(Rd,Rn,Rm,Rs) CC_BIC_rrrLSLr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define BIC_rrrLSRi(Rd,Rn,Rm,i) CC_BIC_rrrLSRi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define BIC_rrrLSRr(Rd,Rn,Rm,Rs) CC_BIC_rrrLSRr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define BIC_rrrASRi(Rd,Rn,Rm,i) CC_BIC_rrrASRi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define BIC_rrrASRr(Rd,Rn,Rm,Rs) CC_BIC_rrrASRr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define BIC_rrrRORi(Rd,Rn,Rm,i) CC_BIC_rrrRORi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define BIC_rrrRORr(Rd,Rn,Rm,Rs) CC_BIC_rrrRORr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define BIC_rrrRRX(Rd,Rn,Rm) CC_BIC_rrrRRX(NATIVE_CC_AL,Rd,Rn,Rm)
+
+#define CC_BICS_rri(cc,Rd,Rn,i) _OP3(cc,_BIC,1,Rd,Rn,SHIFT_IMM(i))
+#define CC_BICS_rrr(cc,Rd,Rn,Rm) _OP3(cc,_BIC,1,Rd,Rn,SHIFT_REG(Rm))
+#define CC_BICS_rrrLSLi(cc,Rd,Rn,Rm,i) _OP3(cc,_BIC,1,Rd,Rn,SHIFT_LSL_i(Rm,i))
+#define CC_BICS_rrrLSLr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_BIC,1,Rd,Rn,SHIFT_LSL_r(Rm,Rs))
+#define CC_BICS_rrrLSRi(cc,Rd,Rn,Rm,i) _OP3(cc,_BIC,1,Rd,Rn,SHIFT_LSR_i(Rm,i))
+#define CC_BICS_rrrLSRr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_BIC,1,Rd,Rn,SHIFT_LSR_r(Rm,Rs))
+#define CC_BICS_rrrASRi(cc,Rd,Rn,Rm,i) _OP3(cc,_BIC,1,Rd,Rn,SHIFT_ASR_i(Rm,i))
+#define CC_BICS_rrrASRr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_BIC,1,Rd,Rn,SHIFT_ASR_r(Rm,Rs))
+#define CC_BICS_rrrRORi(cc,Rd,Rn,Rm,i) _OP3(cc,_BIC,1,Rd,Rn,SHIFT_ROR_i(Rm,i))
+#define CC_BICS_rrrRORr(cc,Rd,Rn,Rm,Rs) _OP3(cc,_BIC,1,Rd,Rn,SHIFT_ROR_r(Rm,Rs))
+#define CC_BICS_rrrRRX(cc,Rd,Rn,Rm) _OP3(cc,_BIC,1,Rd,Rn,SHIFT_RRX(Rm))
+
+#define BICS_rri(Rd,Rn,i) CC_BICS_rri(NATIVE_CC_AL,Rd,Rn,i)
+#define BICS_rrr(Rd,Rn,Rm) CC_BICS_rrr(NATIVE_CC_AL,Rd,Rn,Rm)
+#define BICS_rrrLSLi(Rd,Rn,Rm,i) CC_BICS_rrrLSLi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define BICS_rrrLSLr(Rd,Rn,Rm,Rs) CC_BICS_rrrLSLr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define BICS_rrrLSRi(Rd,Rn,Rm,i) CC_BICS_rrrLSRi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define BICS_rrrLSRr(Rd,Rn,Rm,Rs) CC_BICS_rrrLSRr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define BICS_rrrASRi(Rd,Rn,Rm,i) CC_BICS_rrrASRi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define BICS_rrrASRr(Rd,Rn,Rm,Rs) CC_BICS_rrrASRr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define BICS_rrrRORi(Rd,Rn,Rm,i) CC_BICS_rrrRORi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define BICS_rrrRORr(Rd,Rn,Rm,Rs) CC_BICS_rrrRORr(NATIVE_CC_AL,Rd,Rn,Rm,Rs)
+#define BICS_rrrRRX(Rd,Rn,Rm) CC_BICS_rrrRRX(NATIVE_CC_AL,Rd,Rn,Rm)
+
+/* Branch instructions */
+#define CC_B_i(cc,i) _W(((cc) << 28) | (10 << 24) | (i))
+#define CC_BL_i(cc,i) _W(((cc) << 28) | (11 << 24) | (i))
+#define CC_BLX_r(cc,r) _W(((cc) << 28) | (0x12 << 20) | (3 << 4) | (0xfff << 8) | (r))
+#define CC_BX_r(cc,r) _W(((cc) << 28) | (0x12 << 20) | (1 << 4) | (0xfff << 8) | (r))
+#define CC_BXJ_r(cc,r) _W(((cc) << 28) | (0x12 << 20) | (2 << 4) | (0xfff << 8) | (r))
+
+#define BEQ_i(i) CC_B_i(NATIVE_CC_EQ,i)
+#define BNE_i(i) CC_B_i(NATIVE_CC_NE,i)
+#define BCS_i(i) CC_B_i(NATIVE_CC_CS,i)
+#define BCC_i(i) CC_B_i(NATIVE_CC_CC,i)
+#define BMI_i(i) CC_B_i(NATIVE_CC_MI,i)
+#define BPL_i(i) CC_B_i(NATIVE_CC_PL,i)
+#define BVS_i(i) CC_B_i(NATIVE_CC_VS,i)
+#define BVC_i(i) CC_B_i(NATIVE_CC_VC,i)
+#define BHI_i(i) CC_B_i(NATIVE_CC_HI,i)
+#define BLS_i(i) CC_B_i(NATIVE_CC_LS,i)
+#define BGE_i(i) CC_B_i(NATIVE_CC_GE,i)
+#define BLT_i(i) CC_B_i(NATIVE_CC_LT,i)
+#define BGT_i(i) CC_B_i(NATIVE_CC_GT,i)
+#define BLE_i(i) CC_B_i(NATIVE_CC_LE,i)
+#define B_i(i) CC_B_i(NATIVE_CC_AL,i)
+
+#define BL_i(i) CC_BL_i(NATIVE_CC_AL,i)
+#define BLX_i(i) _W((NATIVE_CC_AL << 28) | (10 << 24) | (i))
+#define BLX_r(r) CC_BLX_r(NATIVE_CC_AL,r)
+#define BX_r(r) CC_BX_r(NATIVE_CC_AL,r)
+#define BXJ_r(r) CC_BXJ_r(NATIVE_CC_AL,r)
+
+/* Status register instructions */
+#define CC_MRS_CPSR(cc,Rd) _W(((cc) << 28) | (0x10 << 20) | ((Rd) << 12) | (0xf << 16))
+#define MRS_CPSR(Rd) CC_MRS_CPSR(NATIVE_CC_AL,Rd)
+#define CC_MRS_SPSR(cc,Rd) _W(((cc) << 28) | (0x14 << 20) | ((Rd) << 12) | (0xf << 16))
+#define MRS_SPSR(Rd) CC_MRS_SPSR(NATIVE_CC_AL,Rd)
+
+#define CC_MSR_CPSR_i(cc,i) _W(((cc) << 28) | (0x32 << 20) | (0x9 << 16) | (0xf << 12) | SHIFT_IMM(i))
+#define CC_MSR_CPSR_r(cc,Rm) _W(((cc) << 28) | (0x12 << 20) | (0x9 << 16) | (0xf << 12) | (Rm))
+
+#define MSR_CPSR_i(i) CC_MSR_CPSR_i(NATIVE_CC_AL,(i))
+#define MSR_CPSR_r(Rm) CC_MSR_CPSR_r(NATIVE_CC_AL,(Rm))
+
+#define CC_MSR_CPSRf_i(cc,i) _W(((cc) << 28) | (0x32 << 20) | (0x8 << 16) | (0xf << 12) | SHIFT_IMM(i))
+#define CC_MSR_CPSRf_r(cc,Rm) _W(((cc) << 28) | (0x12 << 20) | (0x8 << 16) | (0xf << 12) | (Rm))
+
+#define MSR_CPSRf_i(i) CC_MSR_CPSRf_i(NATIVE_CC_AL,(i))
+#define MSR_CPSRf_r(Rm) CC_MSR_CPSRf_r(NATIVE_CC_AL,(Rm))
+
+#define CC_MSR_CPSRc_i(cc,i) _W(((cc) << 28) | (0x32 << 20) | (0x1 << 16) | (0xf << 12) | SHIFT_IMM(i))
+#define CC_MSR_CPSRc_r(cc,Rm) _W(((cc) << 28) | (0x12 << 20) | (0x1 << 16) | (0xf << 12) | (Rm))
+
+#define MSR_CPSRc_i(i) CC_MSR_CPSRc_i(NATIVE_CC_AL,(i))
+#define MSR_CPSRc_r(Rm) CC_MSR_CPSRc_r(NATIVE_CC_AL,(Rm))
+
+/* Load Store instructions */
+
+#define CC_PUSH(cc,r) _W(((cc) << 28) | (0x92d << 16) | (1 << (r)))
+#define PUSH(r) CC_PUSH(NATIVE_CC_AL, r)
+
+#define CC_PUSH_REGS(cc,r) _W(((cc) << 28) | (0x92d << 16) | (r))
+#define PUSH_REGS(r) CC_PUSH_REGS(NATIVE_CC_AL, r)
+
+#define CC_POP(cc,r) _W(((cc) << 28) | (0x8bd << 16) | (1 << (r)))
+#define POP(r) CC_POP(NATIVE_CC_AL, r)
+
+#define CC_POP_REGS(cc,r) _W(((cc) << 28) | (0x8bd << 16) | (r))
+#define POP_REGS(r) CC_POP_REGS(NATIVE_CC_AL, r)
+
+#define CC_LDR_rR(cc,Rd,Rn) _LS1(cc,1,0,Rd,Rn,ADD_IMM(0))
+#define CC_LDR_rRI(cc,Rd,Rn,i) _LS1(cc,1,0,Rd,Rn,(i) >= 0 ? ADD_IMM(i) : SUB_IMM(-(i)))
+#define CC_LDR_rRi(cc,Rd,Rn,i) _LS1(cc,1,0,Rd,Rn,SUB_IMM(i))
+#define CC_LDR_rRR(cc,Rd,Rn,Rm) _LS1(cc,1,0,Rd,Rn,ADD_REG(Rm))
+#define CC_LDR_rRr(cc,Rd,Rn,Rm) _LS1(cc,1,0,Rd,Rn,SUB_REG(Rm))
+#define CC_LDR_rRR_LSLi(cc,Rd,Rn,Rm,i) _LS1(cc,1,0,Rd,Rn,ADD_LSL(Rm,i))
+#define CC_LDR_rRr_LSLi(cc,Rd,Rn,Rm,i) _LS1(cc,1,0,Rd,Rn,SUB_LSL(Rm,i))
+#define CC_LDR_rRR_LSRi(cc,Rd,Rn,Rm,i) _LS1(cc,1,0,Rd,Rn,ADD_LSR(Rm,i))
+#define CC_LDR_rRr_LSRi(cc,Rd,Rn,Rm,i) _LS1(cc,1,0,Rd,Rn,SUB_LSR(Rm,i))
+#define CC_LDR_rRR_ASRi(cc,Rd,Rn,Rm,i) _LS1(cc,1,0,Rd,Rn,ADD_ASR(Rm,i))
+#define CC_LDR_rRr_ASRi(cc,Rd,Rn,Rm,i) _LS1(cc,1,0,Rd,Rn,SUB_ASR(Rm,i))
+#define CC_LDR_rRR_RORi(cc,Rd,Rn,Rm,i) _LS1(cc,1,0,Rd,Rn,ADD_ROR(Rm,i))
+#define CC_LDR_rRr_RORi(cc,Rd,Rn,Rm,i) _LS1(cc,1,0,Rd,Rn,SUB_ROR(Rm,i))
+#define CC_LDR_rRR_RRX(cc,Rd,Rn,Rm) _LS1(cc,1,0,Rd,Rn,ADD_RRX(Rm))
+#define CC_LDR_rRr_RRX(cc,Rd,Rn,Rm) _LS1(cc,1,0,Rd,Rn,SUB_RRX(Rm))
+
+#define LDR_rR(Rd,Rn) CC_LDR_rR(NATIVE_CC_AL,Rd,Rn)
+#define LDR_rRI(Rd,Rn,i) CC_LDR_rRI(NATIVE_CC_AL,Rd,Rn,i)
+#define LDR_rRi(Rd,Rn,i) CC_LDR_rRi(NATIVE_CC_AL,Rd,Rn,i)
+#define LDR_rRR(Rd,Rn,Rm) CC_LDR_rRR(NATIVE_CC_AL,Rd,Rn,Rm)
+#define LDR_rRr(Rd,Rn,Rm) CC_LDR_rRr(NATIVE_CC_AL,Rd,Rn,Rm)
+#define LDR_rRR_LSLi(Rd,Rn,Rm,i) CC_LDR_rRR_LSLi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define LDR_rRr_LSLi(Rd,Rn,Rm,i) CC_LDR_rRr_LSLi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define LDR_rRR_LSRi(Rd,Rn,Rm,i) CC_LDR_rRR_LSRi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define LDR_rRr_LSRi(Rd,Rn,Rm,i) CC_LDR_rRr_LSRi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define LDR_rRR_ASRi(Rd,Rn,Rm,i) CC_LDR_rRR_ASRi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define LDR_rRr_ASRi(Rd,Rn,Rm,i) CC_LDR_rRr_ASRi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define LDR_rRR_RORi(Rd,Rn,Rm,i) CC_LDR_rRR_RORi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define LDR_rRr_RORi(Rd,Rn,Rm,i) CC_LDR_rRr_RORi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define LDR_rRR_RRX(Rd,Rn,Rm) CC_LDR_rRR_RRX(NATIVE_CC_AL,Rd,Rn,Rm)
+#define LDR_rRr_RRX(Rd,Rn,Rm) CC_LDR_rRr_RRX(NATIVE_CC_AL,Rd,Rn,Rm)
+
+#define CC_STR_rR(cc,Rd,Rn) _LS1(cc,0,0,Rd,Rn,ADD_IMM(0))
+#define CC_STR_rRI(cc,Rd,Rn,i) _LS1(cc,0,0,Rd,Rn,ADD_IMM(i))
+#define CC_STR_rRi(cc,Rd,Rn,i) _LS1(cc,0,0,Rd,Rn,SUB_IMM(i))
+#define CC_STR_rRR(cc,Rd,Rn,Rm) _LS1(cc,0,0,Rd,Rn,ADD_REG(Rm))
+#define CC_STR_rRr(cc,Rd,Rn,Rm) _LS1(cc,0,0,Rd,Rn,SUB_REG(Rm))
+#define CC_STR_rRR_LSLi(cc,Rd,Rn,Rm,i) _LS1(cc,0,0,Rd,Rn,ADD_LSL(Rm,i))
+#define CC_STR_rRr_LSLi(cc,Rd,Rn,Rm,i) _LS1(cc,0,0,Rd,Rn,SUB_LSL(Rm,i))
+#define CC_STR_rRR_LSRi(cc,Rd,Rn,Rm,i) _LS1(cc,0,0,Rd,Rn,ADD_LSR(Rm,i))
+#define CC_STR_rRr_LSRi(cc,Rd,Rn,Rm,i) _LS1(cc,0,0,Rd,Rn,SUB_LSR(Rm,i))
+#define CC_STR_rRR_ASRi(cc,Rd,Rn,Rm,i) _LS1(cc,0,0,Rd,Rn,ADD_ASR(Rm,i))
+#define CC_STR_rRr_ASRi(cc,Rd,Rn,Rm,i) _LS1(cc,0,0,Rd,Rn,SUB_ASR(Rm,i))
+#define CC_STR_rRR_RORi(cc,Rd,Rn,Rm,i) _LS1(cc,0,0,Rd,Rn,ADD_ROR(Rm,i))
+#define CC_STR_rRr_RORi(cc,Rd,Rn,Rm,i) _LS1(cc,0,0,Rd,Rn,SUB_ROR(Rm,i))
+#define CC_STR_rRR_RRX(cc,Rd,Rn,Rm) _LS1(cc,0,0,Rd,Rn,ADD_RRX(Rm))
+#define CC_STR_rRr_RRX(cc,Rd,Rn,Rm) _LS1(cc,0,0,Rd,Rn,SUB_RRX(Rm))
+
+#define STR_rR(Rd,Rn) CC_STR_rR(NATIVE_CC_AL,Rd,Rn)
+#define STR_rRI(Rd,Rn,i) CC_STR_rRI(NATIVE_CC_AL,Rd,Rn,i)
+#define STR_rRi(Rd,Rn,i) CC_STR_rRi(NATIVE_CC_AL,Rd,Rn,i)
+#define STR_rRR(Rd,Rn,Rm) CC_STR_rRR(NATIVE_CC_AL,Rd,Rn,Rm)
+#define STR_rRr(Rd,Rn,Rm) CC_STR_rRr(NATIVE_CC_AL,Rd,Rn,Rm)
+#define STR_rRR_LSLi(Rd,Rn,Rm,i) CC_STR_rRR_LSLi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define STR_rRr_LSLi(Rd,Rn,Rm,i) CC_STR_rRr_LSLi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define STR_rRR_LSRi(Rd,Rn,Rm,i) CC_STR_rRR_LSRi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define STR_rRr_LSRi(Rd,Rn,Rm,i) CC_STR_rRr_LSRi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define STR_rRR_ASRi(Rd,Rn,Rm,i) CC_STR_rRR_ASRi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define STR_rRr_ASRi(Rd,Rn,Rm,i) CC_STR_rRr_ASRi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define STR_rRR_RORi(Rd,Rn,Rm,i) CC_STR_rRR_RORi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define STR_rRr_RORi(Rd,Rn,Rm,i) CC_STR_rRr_RORi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define STR_rRR_RRX(Rd,Rn,Rm) CC_STR_rRR_RRX(NATIVE_CC_AL,Rd,Rn,Rm)
+#define STR_rRr_RRX(Rd,Rn,Rm) CC_STR_rRr_RRX(NATIVE_CC_AL,Rd,Rn,Rm)
+
+#define CC_LDRB_rR(cc,Rd,Rn) _LS1(cc,1,1,Rd,Rn,ADD_IMM(0))
+#define CC_LDRB_rRI(cc,Rd,Rn,i) _LS1(cc,1,1,Rd,Rn,ADD_IMM(i))
+#define CC_LDRB_rRi(cc,Rd,Rn,i) _LS1(cc,1,1,Rd,Rn,SUB_IMM(i))
+#define CC_LDRB_rRR(cc,Rd,Rn,Rm) _LS1(cc,1,1,Rd,Rn,ADD_REG(Rm))
+#define CC_LDRB_rRr(cc,Rd,Rn,Rm) _LS1(cc,1,1,Rd,Rn,SUB_REG(Rm))
+#define CC_LDRB_rRR_LSLi(cc,Rd,Rn,Rm,i) _LS1(cc,1,1,Rd,Rn,ADD_LSL(Rm,i))
+#define CC_LDRB_rRr_LSLi(cc,Rd,Rn,Rm,i) _LS1(cc,1,1,Rd,Rn,SUB_LSL(Rm,i))
+#define CC_LDRB_rRR_LSRi(cc,Rd,Rn,Rm,i) _LS1(cc,1,1,Rd,Rn,ADD_LSR(Rm,i))
+#define CC_LDRB_rRr_LSRi(cc,Rd,Rn,Rm,i) _LS1(cc,1,1,Rd,Rn,SUB_LSR(Rm,i))
+#define CC_LDRB_rRR_ASRi(cc,Rd,Rn,Rm,i) _LS1(cc,1,1,Rd,Rn,ADD_ASR(Rm,i))
+#define CC_LDRB_rRr_ASRi(cc,Rd,Rn,Rm,i) _LS1(cc,1,1,Rd,Rn,SUB_ASR(Rm,i))
+#define CC_LDRB_rRR_RORi(cc,Rd,Rn,Rm,i) _LS1(cc,1,1,Rd,Rn,ADD_ROR(Rm,i))
+#define CC_LDRB_rRr_RORi(cc,Rd,Rn,Rm,i) _LS1(cc,1,1,Rd,Rn,SUB_ROR(Rm,i))
+#define CC_LDRB_rRR_RRX(cc,Rd,Rn,Rm) _LS1(cc,1,1,Rd,Rn,ADD_RRX(Rm))
+#define CC_LDRB_rRr_RRX(cc,Rd,Rn,Rm) _LS1(cc,1,1,Rd,Rn,SUB_RRX(Rm))
+
+#define LDRB_rR(Rd,Rn) CC_LDRB_rR(NATIVE_CC_AL,Rd,Rn)
+#define LDRB_rRI(Rd,Rn,i) CC_LDRB_rRI(NATIVE_CC_AL,Rd,Rn,i)
+#define LDRB_rRi(Rd,Rn,i) CC_LDRB_rRi(NATIVE_CC_AL,Rd,Rn,i)
+#define LDRB_rRR(Rd,Rn,Rm) CC_LDRB_rRR(NATIVE_CC_AL,Rd,Rn,Rm)
+#define LDRB_rRr(Rd,Rn,Rm) CC_LDRB_rRr(NATIVE_CC_AL,Rd,Rn,Rm)
+#define LDRB_rRR_LSLi(Rd,Rn,Rm,i) CC_LDRB_rRR_LSLi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define LDRB_rRr_LSLi(Rd,Rn,Rm,i) CC_LDRB_rRr_LSLi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define LDRB_rRR_LSRi(Rd,Rn,Rm,i) CC_LDRB_rRR_LSRi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define LDRB_rRr_LSRi(Rd,Rn,Rm,i) CC_LDRB_rRr_LSRi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define LDRB_rRR_ASRi(Rd,Rn,Rm,i) CC_LDRB_rRR_ASRi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define LDRB_rRr_ASRi(Rd,Rn,Rm,i) CC_LDRB_rRr_ASRi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define LDRB_rRR_RORi(Rd,Rn,Rm,i) CC_LDRB_rRR_RORi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define LDRB_rRr_RORi(Rd,Rn,Rm,i) CC_LDRB_rRr_RORi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define LDRB_rRR_RRX(Rd,Rn,Rm) CC_LDRB_rRR_RRX(NATIVE_CC_AL,Rd,Rn,Rm)
+#define LDRB_rRr_RRX(Rd,Rn,Rm) CC_LDRB_rRr_RRX(NATIVE_CC_AL,Rd,Rn,Rm)
+
+#define CC_STRB_rR(cc,Rd,Rn) _LS1(cc,0,1,Rd,Rn,ADD_IMM(0))
+#define CC_STRB_rRI(cc,Rd,Rn,i) _LS1(cc,0,1,Rd,Rn,ADD_IMM(i))
+#define CC_STRB_rRi(cc,Rd,Rn,i) _LS1(cc,0,1,Rd,Rn,SUB_IMM(i))
+#define CC_STRB_rRR(cc,Rd,Rn,Rm) _LS1(cc,0,1,Rd,Rn,ADD_REG(Rm))
+#define CC_STRB_rRr(cc,Rd,Rn,Rm) _LS1(cc,0,1,Rd,Rn,SUB_REG(Rm))
+#define CC_STRB_rRR_LSLi(cc,Rd,Rn,Rm,i) _LS1(cc,0,1,Rd,Rn,ADD_LSL(Rm,i))
+#define CC_STRB_rRr_LSLi(cc,Rd,Rn,Rm,i) _LS1(cc,0,1,Rd,Rn,SUB_LSL(Rm,i))
+#define CC_STRB_rRR_LSRi(cc,Rd,Rn,Rm,i) _LS1(cc,0,1,Rd,Rn,ADD_LSR(Rm,i))
+#define CC_STRB_rRr_LSRi(cc,Rd,Rn,Rm,i) _LS1(cc,0,1,Rd,Rn,SUB_LSR(Rm,i))
+#define CC_STRB_rRR_ASRi(cc,Rd,Rn,Rm,i) _LS1(cc,0,1,Rd,Rn,ADD_ASR(Rm,i))
+#define CC_STRB_rRr_ASRi(cc,Rd,Rn,Rm,i) _LS1(cc,0,1,Rd,Rn,SUB_ASR(Rm,i))
+#define CC_STRB_rRR_RORi(cc,Rd,Rn,Rm,i) _LS1(cc,0,1,Rd,Rn,ADD_ROR(Rm,i))
+#define CC_STRB_rRr_RORi(cc,Rd,Rn,Rm,i) _LS1(cc,0,1,Rd,Rn,SUB_ROR(Rm,i))
+#define CC_STRB_rRR_RRX(cc,Rd,Rn,Rm) _LS1(cc,0,1,Rd,Rn,ADD_RRX(Rm))
+#define CC_STRB_rRr_RRX(cc,Rd,Rn,Rm) _LS1(cc,0,1,Rd,Rn,SUB_RRX(Rm))
+
+#define STRB_rR(Rd,Rn) CC_STRB_rR(NATIVE_CC_AL,Rd,Rn)
+#define STRB_rRI(Rd,Rn,i) CC_STRB_rRI(NATIVE_CC_AL,Rd,Rn,i)
+#define STRB_rRi(Rd,Rn,i) CC_STRB_rRi(NATIVE_CC_AL,Rd,Rn,i)
+#define STRB_rRR(Rd,Rn,Rm) CC_STRB_rRR(NATIVE_CC_AL,Rd,Rn,Rm)
+#define STRB_rRr(Rd,Rn,Rm) CC_STRB_rRr(NATIVE_CC_AL,Rd,Rn,Rm)
+#define STRB_rRR_LSLi(Rd,Rn,Rm,i) CC_STRB_rRR_LSLi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define STRB_rRr_LSLi(Rd,Rn,Rm,i) CC_STRB_rRr_LSLi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define STRB_rRR_LSRi(Rd,Rn,Rm,i) CC_STRB_rRR_LSRi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define STRB_rRr_LSRi(Rd,Rn,Rm,i) CC_STRB_rRr_LSRi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define STRB_rRR_ASRi(Rd,Rn,Rm,i) CC_STRB_rRR_ASRi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define STRB_rRr_ASRi(Rd,Rn,Rm,i) CC_STRB_rRr_ASRi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define STRB_rRR_RORi(Rd,Rn,Rm,i) CC_STRB_rRR_RORi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define STRB_rRr_RORi(Rd,Rn,Rm,i) CC_STRB_rRr_RORi(NATIVE_CC_AL,Rd,Rn,Rm,i)
+#define STRB_rRR_RRX(Rd,Rn,Rm) CC_STRB_rRR_RRX(NATIVE_CC_AL,Rd,Rn,Rm)
+#define STRB_rRr_RRX(Rd,Rn,Rm) CC_STRB_rRr_RRX(NATIVE_CC_AL,Rd,Rn,Rm)
+
+#define CC_LDRSH_rR(cc,Rd,Rn) _LS2(cc,1,1,1,1,Rd,Rn,ADD2_IMM(0))
+#define CC_LDRSH_rRI(cc,Rd,Rn,i) _LS2(cc,1,1,1,1,Rd,Rn,ADD2_IMM(i))
+#define CC_LDRSH_rRi(cc,Rd,Rn,i) _LS2(cc,1,1,1,1,Rd,Rn,SUB2_IMM(i))
+#define CC_LDRSH_rRR(cc,Rd,Rn,Rm) _LS2(cc,1,1,1,1,Rd,Rn,ADD2_REG(Rm))
+#define CC_LDRSH_rRr(cc,Rd,Rn,Rm) _LS2(cc,1,1,1,1,Rd,Rn,SUB2_REG(Rm))
+
+#define LDRSH_rR(Rd,Rn) CC_LDRSH_rR(NATIVE_CC_AL,Rd,Rn)
+#define LDRSH_rRI(Rd,Rn,i) CC_LDRSH_rRI(NATIVE_CC_AL,Rd,Rn,i)
+#define LDRSH_rRi(Rd,Rn,i) CC_LDRSH_rRi(NATIVE_CC_AL,Rd,Rn,i)
+#define LDRSH_rRR(Rd,Rn,Rm) CC_LDRSH_rRR(NATIVE_CC_AL,Rd,Rn,Rm)
+#define LDRSH_rRr(Rd,Rn,Rm) CC_LDRSH_rRr(NATIVE_CC_AL,Rd,Rn,Rm)
+
+#define CC_LDRH_rR(cc,Rd,Rn) _LS2(cc,1,1,0,1,Rd,Rn,ADD2_IMM(0))
+#define CC_LDRH_rRI(cc,Rd,Rn,i) _LS2(cc,1,1,0,1,Rd,Rn,(i) >= 0 ? ADD2_IMM(i) : SUB2_IMM(-(i)))
+#define CC_LDRH_rRi(cc,Rd,Rn,i) _LS2(cc,1,1,0,1,Rd,Rn,SUB2_IMM(i))
+#define CC_LDRH_rRR(cc,Rd,Rn,Rm) _LS2(cc,1,1,0,1,Rd,Rn,ADD2_REG(Rm))
+#define CC_LDRH_rRr(cc,Rd,Rn,Rm) _LS2(cc,1,1,0,1,Rd,Rn,SUB2_REG(Rm))
+
+#define LDRH_rR(Rd,Rn) CC_LDRH_rR(NATIVE_CC_AL,Rd,Rn)
+#define LDRH_rRI(Rd,Rn,i) CC_LDRH_rRI(NATIVE_CC_AL,Rd,Rn,i)
+#define LDRH_rRi(Rd,Rn,i) CC_LDRH_rRi(NATIVE_CC_AL,Rd,Rn,i)
+#define LDRH_rRR(Rd,Rn,Rm) CC_LDRH_rRR(NATIVE_CC_AL,Rd,Rn,Rm)
+#define LDRH_rRr(Rd,Rn,Rm) CC_LDRH_rRr(NATIVE_CC_AL,Rd,Rn,Rm)
+
+#define CC_STRD_rR(cc,Rd,Rn) _LS2(cc,1,0,1,1,Rd,Rn,ADD2_IMM(0))
+#define CC_STRD_rRI(cc,Rd,Rn,i) _LS2(cc,1,0,1,1,Rd,Rn,ADD2_IMM(i))
+#define CC_STRD_rRi(cc,Rd,Rn,i) _LS2(cc,1,0,1,1,Rd,Rn,SUB2_IMM(i))
+#define CC_STRD_rRR(cc,Rd,Rn,Rm) _LS2(cc,1,0,1,1,Rd,Rn,ADD2_REG(Rm))
+#define CC_STRD_rRr(cc,Rd,Rn,Rm) _LS2(cc,1,0,1,1,Rd,Rn,SUB2_REG(Rm))
+
+#define STRD_rR(Rd,Rn) CC_STRD_rR(NATIVE_CC_AL,Rd,Rn)
+#define STRD_rRI(Rd,Rn,i) CC_STRD_rRI(NATIVE_CC_AL,Rd,Rn,i)
+#define STRD_rRi(Rd,Rn,i) CC_STRD_rRi(NATIVE_CC_AL,Rd,Rn,i)
+#define STRD_rRR(Rd,Rn,Rm) CC_STRD_rRR(NATIVE_CC_AL,Rd,Rn,Rm)
+#define STRD_rRr(Rd,Rn,Rm) CC_STRD_rRr(NATIVE_CC_AL,Rd,Rn,Rm)
+
+#define CC_STRH_rR(cc,Rd,Rn) _LS2(cc,1,0,0,1,Rd,Rn,ADD2_IMM(0))
+#define CC_STRH_rRI(cc,Rd,Rn,i) _LS2(cc,1,0,0,1,Rd,Rn,ADD2_IMM(i))
+#define CC_STRH_rRi(cc,Rd,Rn,i) _LS2(cc,1,0,0,1,Rd,Rn,SUB2_IMM(i))
+#define CC_STRH_rRR(cc,Rd,Rn,Rm) _LS2(cc,1,0,0,1,Rd,Rn,ADD2_REG(Rm))
+#define CC_STRH_rRr(cc,Rd,Rn,Rm) _LS2(cc,1,0,0,1,Rd,Rn,SUB2_REG(Rm))
+
+#define STRH_rR(Rd,Rn) CC_STRH_rR(NATIVE_CC_AL,Rd,Rn)
+#define STRH_rRI(Rd,Rn,i) CC_STRH_rRI(NATIVE_CC_AL,Rd,Rn,i)
+#define STRH_rRi(Rd,Rn,i) CC_STRH_rRi(NATIVE_CC_AL,Rd,Rn,i)
+#define STRH_rRR(Rd,Rn,Rm) CC_STRH_rRR(NATIVE_CC_AL,Rd,Rn,Rm)
+#define STRH_rRr(Rd,Rn,Rm) CC_STRH_rRr(NATIVE_CC_AL,Rd,Rn,Rm)
+
+#define CC_LDRSB_rR(cc,Rd,Rn) _LS2(cc,1,1,1,0,Rd,Rn,ADD2_IMM(0))
+#define CC_LDRSB_rRI(cc,Rd,Rn,i) _LS2(cc,1,1,1,0,Rd,Rn,ADD2_IMM(i))
+#define CC_LDRSB_rRi(cc,Rd,Rn,i) _LS2(cc,1,1,1,0,Rd,Rn,SUB2_IMM(i))
+#define CC_LDRSB_rRR(cc,Rd,Rn,Rm) _LS2(cc,1,1,1,0,Rd,Rn,ADD2_REG(Rm))
+#define CC_LDRSB_rRr(cc,Rd,Rn,Rm) _LS2(cc,1,1,1,0,Rd,Rn,SUB2_REG(Rm))
+
+#define LDRSB_rR(Rd,Rn) CC_LDRSB_rR(NATIVE_CC_AL,Rd,Rn)
+#define LDRSB_rRI(Rd,Rn,i) CC_LDRSB_rRI(NATIVE_CC_AL,Rd,Rn,i)
+#define LDRSB_rRi(Rd,Rn,i) CC_LDRSB_rRi(NATIVE_CC_AL,Rd,Rn,i)
+#define LDRSB_rRR(Rd,Rn,Rm) CC_LDRSB_rRR(NATIVE_CC_AL,Rd,Rn,Rm)
+#define LDRSB_rRr(Rd,Rn,Rm) CC_LDRSB_rRr(NATIVE_CC_AL,Rd,Rn,Rm)
+
+#define CC_LDRD_rR(cc,Rd,Rn) _LS2(cc,1,0,1,0,Rd,Rn,ADD2_IMM(0))
+#define CC_LDRD_rRI(cc,Rd,Rn,i) _LS2(cc,1,0,1,0,Rd,Rn,ADD2_IMM(i))
+#define CC_LDRD_rRi(cc,Rd,Rn,i) _LS2(cc,1,0,1,0,Rd,Rn,SUB2_IMM(i))
+#define CC_LDRD_rRR(cc,Rd,Rn,Rm) _LS2(cc,1,0,1,0,Rd,Rn,ADD2_REG(Rm))
+#define CC_LDRD_rRr(cc,Rd,Rn,Rm) _LS2(cc,1,0,1,0,Rd,Rn,SUB2_REG(Rm))
+
+#define LDRD_rR(Rd,Rn) CC_LDRD_rR(NATIVE_CC_AL,Rd,Rn)
+#define LDRD_rRI(Rd,Rn,i) CC_LDRD_rRI(NATIVE_CC_AL,Rd,Rn,i)
+#define LDRD_rRi(Rd,Rn,i) CC_LDRD_rRi(NATIVE_CC_AL,Rd,Rn,i)
+#define LDRD_rRR(Rd,Rn,Rm) CC_LDRD_rRR(NATIVE_CC_AL,Rd,Rn,Rm)
+#define LDRD_rRr(Rd,Rn,Rm) CC_LDRD_rRr(NATIVE_CC_AL,Rd,Rn,Rm)
+
+/* Multiply */
+#define CC_SMULL_rrrr(cc, RdLo, RdHi, Rm, Rs) _W(((cc) << 28) | (0x0C << 20) | ((RdHi) << 16) | ((RdLo) << 12) | ((Rs) << 8) | (0x9 << 4) | (Rm))
+#define SMULL_rrrr(RdLo,RdHi,Rm,Rs) CC_SMULL_rrrr(NATIVE_CC_AL,RdLo,RdHi,Rm,Rs)
+#define CC_SMULLS_rrrr(cc, RdLo, RdHi, Rm, Rs) _W(((cc) << 28) | (0x0D << 20) | ((RdHi) << 16) | ((RdLo) << 12) | ((Rs) << 8) | (0x9 << 4) | (Rm))
+#define SMULLS_rrrr(RdLo,RdHi,Rm,Rs) CC_SMULLS_rrrr(NATIVE_CC_AL,RdLo,RdHi,Rm,Rs)
+#define CC_MUL_rrr(cc, Rd, Rm, Rs) _W(((cc) << 28) | (0x00 << 20) | ((Rd) << 16) | ((Rs) << 8) | (0x9 << 4) | (Rm))
+#define MUL_rrr(Rd, Rm, Rs) CC_MUL_rrr(NATIVE_CC_AL, Rd, Rm, Rs)
+#define CC_MULS_rrr(cc, Rd, Rm, Rs) _W(((cc) << 28) | (0x01 << 20) | ((Rd) << 16) | ((Rs) << 8) | (0x9 << 4) | (Rm))
+#define MULS_rrr(Rd, Rm, Rs) CC_MULS_rrr(NATIVE_CC_AL, Rd, Rm, Rs)
+
+#define CC_UMULL_rrrr(cc, RdLo, RdHi, Rm, Rs) _W(((cc) << 28) | (0x08 << 20) | ((RdHi) << 16) | ((RdLo) << 12) | ((Rs) << 8) | (0x9 << 4) | (Rm))
+#define UMULL_rrrr(RdLo,RdHi,Rm,Rs) CC_UMULL_rrrr(NATIVE_CC_AL,RdLo,RdHi,Rm,Rs)
+#define CC_UMULLS_rrrr(cc, RdLo, RdHi, Rm, Rs) _W(((cc) << 28) | (0x09 << 20) | ((RdHi) << 16) | ((RdLo) << 12) | ((Rs) << 8) | (0x9 << 4) | (Rm))
+#define UMULLS_rrrr(RdLo,RdHi,Rm,Rs) CC_UMULLS_rrrr(NATIVE_CC_AL,RdLo,RdHi,Rm,Rs)
+
+/* Others */
+#define CC_CLZ_rr(cc,Rd,Rm) _W(((cc) << 28) | (0x16 << 20) | (0xf << 16) | ((Rd) << 12) | (0xf << 8) | (0x1 << 4) | SHIFT_REG(Rm))
+#define CLZ_rr(Rd,Rm) CC_CLZ_rr(NATIVE_CC_AL,Rd,Rm)
+
+/* Alias */
+#define LSL_rri(Rd,Rm,i) MOV_rrLSLi(Rd,Rm,i)
+#define LSL_rrr(Rd,Rm,Rs) MOV_rrLSLr(Rd,Rm,Rs)
+#define LSR_rri(Rd,Rm,i) MOV_rrLSRi(Rd,Rm,i)
+#define LSR_rrr(Rd,Rm,Rs) MOV_rrLSRr(Rd,Rm,Rs)
+#define ASR_rri(Rd,Rm,i) MOV_rrASRi(Rd,Rm,i)
+#define ASR_rrr(Rd,Rm,Rs) MOV_rrASRr(Rd,Rm,Rs)
+#define ROR_rri(Rd,Rm,i) MOV_rrRORi(Rd,Rm,i)
+#define ROR_rrr(Rd,Rm,Rs) MOV_rrRORr(Rd,Rm,Rs)
+#define RRX_rr(Rd,Rm) MOV_rrRRX(Rd,Rm)
+#define LSLS_rri(Rd,Rm,i) MOVS_rrLSLi(Rd,Rm,i)
+#define LSLS_rrr(Rd,Rm,Rs) MOVS_rrLSLr(Rd,Rm,Rs)
+#define LSRS_rri(Rd,Rm,i) MOVS_rrLSRi(Rd,Rm,i)
+#define LSRS_rrr(Rd,Rm,Rs) MOVS_rrLSRr(Rd,Rm,Rs)
+#define ASRS_rri(Rd,Rm,i) MOVS_rrASRi(Rd,Rm,i)
+#define ASRS_rrr(Rd,Rm,Rs) MOVS_rrASRr(Rd,Rm,Rs)
+#define RORS_rri(Rd,Rm,i) MOVS_rrRORi(Rd,Rm,i)
+#define RORS_rrr(Rd,Rm,Rs) MOVS_rrRORr(Rd,Rm,Rs)
+#define RRXS_rr(Rd,Rm) MOVS_rrRRX(Rd,Rm)
+
+/* ARMV6 ops */
+#define CC_SXTB_rr(cc,Rd,Rm) _W(((cc) << 28) | (0x6a << 20) | (0xf << 16) | ((Rd) << 12) | (0x7 << 4) | SHIFT_REG(Rm))
+#define SXTB_rr(Rd,Rm) CC_SXTB_rr(NATIVE_CC_AL,Rd,Rm)
+
+#define CC_SXTB_rr_ROR8(cc,Rd,Rm) _W(((cc) << 28) | (0x6a << 20) | (0xf << 16) | ((Rd) << 12) | (1 << 10) | (0x7 << 4) | SHIFT_REG(Rm))
+#define SXTB_rr_ROR8(Rd,Rm) CC_SXTB_rr_ROR8(NATIVE_CC_AL,Rd,Rm)
+
+#define CC_SXTB_rr_ROR16(cc,Rd,Rm) _W(((cc) << 28) | (0x6a << 20) | (0xf << 16) | ((Rd) << 12) | (2 << 10) | (0x7 << 4) | SHIFT_REG(Rm))
+#define SXTB_rr_ROR16(Rd,Rm) CC_SXTB_rr_ROR16(NATIVE_CC_AL,Rd,Rm)
+
+#define CC_SXTB_rr_ROR24(cc,Rd,Rm) _W(((cc) << 28) | (0x6a << 20) | (0xf << 16) | ((Rd) << 12) | (3 << 10) | (0x7 << 4) | SHIFT_REG(Rm))
+#define SXTB_rr_ROR24(Rd,Rm) CC_SXTB_rr_ROR24(NATIVE_CC_AL,Rd,Rm)
+
+#define CC_SXTH_rr(cc,Rd,Rm) _W(((cc) << 28) | (0x6b << 20) | (0xf << 16) | ((Rd) << 12) | (0x7 << 4) | SHIFT_REG(Rm))
+#define SXTH_rr(Rd,Rm) CC_SXTH_rr(NATIVE_CC_AL,Rd,Rm)
+
+#define CC_SXTH_rr_ROR8(cc,Rd,Rm) _W(((cc) << 28) | (0x6b << 20) | (0xf << 16) | ((Rd) << 12) | (1 << 10) | (0x7 << 4) | SHIFT_REG(Rm))
+#define SXTH_rr_ROR8(Rd,Rm) CC_SXTH_rr_ROR8(NATIVE_CC_AL,Rd,Rm)
+
+#define CC_SXTH_rr_ROR16(cc,Rd,Rm) _W(((cc) << 28) | (0x6b << 20) | (0xf << 16) | ((Rd) << 12) | (2 << 10) | (0x7 << 4) | SHIFT_REG(Rm))
+#define SXTH_rr_ROR16(Rd,Rm) CC_SXTH_rr_ROR16(NATIVE_CC_AL,Rd,Rm)
+
+#define CC_SXTH_rr_ROR24(cc,Rd,Rm) _W(((cc) << 28) | (0x6b << 20) | (0xf << 16) | ((Rd) << 12) | (3 << 10) | (0x7 << 4) | SHIFT_REG(Rm))
+#define SXTH_rr_ROR24(Rd,Rm) CC_SXTH_rr_ROR24(NATIVE_CC_AL,Rd,Rm)
+
+#define CC_UXTB_rr(cc,Rd,Rm) _W(((cc) << 28) | (0x6e << 20) | (0xf << 16) | ((Rd) << 12) | (0x7 << 4) | SHIFT_REG(Rm))
+#define UXTB_rr(Rd,Rm) CC_UXTB_rr(NATIVE_CC_AL,Rd,Rm)
+
+#define CC_UXTB_rr_ROR8(cc,Rd,Rm) _W(((cc) << 28) | (0x6e << 20) | (0xf << 16) | ((Rd) << 12) | (1 << 10) | (0x7 << 4) | SHIFT_REG(Rm))
+#define UXTB_rr_ROR8(Rd,Rm) CC_UXTB_rr_ROR8(NATIVE_CC_AL,Rd,Rm)
+
+#define CC_UXTB_rr_ROR16(cc,Rd,Rm) _W(((cc) << 28) | (0x6e << 20) | (0xf << 16) | ((Rd) << 12) | (2 << 10) | (0x7 << 4) | SHIFT_REG(Rm))
+#define UXTB_rr_ROR16(Rd,Rm) CC_UXTB_rr_ROR16(NATIVE_CC_AL,Rd,Rm)
+
+#define CC_UXTB_rr_ROR24(cc,Rd,Rm) _W(((cc) << 28) | (0x6e << 20) | (0xf << 16) | ((Rd) << 12) | (3 << 10) | (0x7 << 4) | SHIFT_REG(Rm))
+#define UXTB_rr_ROR24(Rd,Rm) CC_UXTB_rr_ROR24(NATIVE_CC_AL,Rd,Rm)
+
+#define CC_UXTH_rr(cc,Rd,Rm) _W(((cc) << 28) | (0x6f << 20) | (0xf << 16) | ((Rd) << 12) | (0x7 << 4) | SHIFT_REG(Rm))
+#define UXTH_rr(Rd,Rm) CC_UXTH_rr(NATIVE_CC_AL,Rd,Rm)
+
+#define CC_UXTH_rr_ROR8(cc,Rd,Rm) _W(((cc) << 28) | (0x6f << 20) | (0xf << 16) | ((Rd) << 12) | (1 << 10) | (0x7 << 4) | SHIFT_REG(Rm))
+#define UXTH_rr_ROR8(Rd,Rm) CC_UXTH_rr_ROR8(NATIVE_CC_AL,Rd,Rm)
+
+#define CC_UXTH_rr_ROR16(cc,Rd,Rm) _W(((cc) << 28) | (0x6f << 20) | (0xf << 16) | ((Rd) << 12) | (2 << 10) | (0x7 << 4) | SHIFT_REG(Rm))
+#define UXTH_rr_ROR16(Rd,Rm) CC_UXTH_rr_ROR16(NATIVE_CC_AL,Rd,Rm)
+
+#define CC_UXTH_rr_ROR24(cc,Rd,Rm) _W(((cc) << 28) | (0x6f << 20) | (0xf << 16) | ((Rd) << 12) | (3 << 10) | (0x7 << 4) | SHIFT_REG(Rm))
+#define UXTH_rr_ROR24(Rd,Rm) CC_UXTH_rr_ROR24(NATIVE_CC_AL,Rd,Rm)
+
+#define CC_REV_rr(cc,Rd,Rm) _W(((cc) << 28) | (0x6b << 20) | (0xf << 16) | (0xf << 8) | ((Rd) << 12) | (0x3 << 4) | SHIFT_REG(Rm))
+#define REV_rr(Rd,Rm) CC_REV_rr(NATIVE_CC_AL,Rd,Rm)
+
+#define CC_REV16_rr(cc,Rd,Rm) _W(((cc) << 28) | (0x6b << 20) | (0xf << 16) | (0xf << 8) | ((Rd) << 12) | (0xB << 4) | SHIFT_REG(Rm))
+#define REV16_rr(Rd,Rm) CC_REV16_rr(NATIVE_CC_AL,Rd,Rm)
+
+#define CC_REVSH_rr(cc,Rd,Rm) _W(((cc) << 28) | (0x6f << 20) | (0xf << 16) | (0xf << 8) | ((Rd) << 12) | (0xB << 4) | SHIFT_REG(Rm))
+#define REVSH_rr(Rd,Rm) CC_REVSH_rr(NATIVE_CC_AL,Rd,Rm)
+
+#define CC_PKHBT_rrr(cc,Rd,Rn,Rm) _W(((cc) << 28) | (0x68 << 20) | (Rn << 16) | (Rd << 12) | (0x1 << 4) | (Rm))
+#define CC_PKHBT_rrrLSLi(cc,Rd,Rn,Rm,s) _W(((cc) << 28) | (0x68 << 20) | (Rn << 16) | (Rd << 12) | (0x1 << 4) | SHIFT_PK(Rm, s))
+#define PKHBT_rrr(Rd,Rn,Rm) CC_PKHBT_rrr(NATIVE_CC_AL,Rd,Rn,Rm)
+#define PKHBT_rrrLSLi(Rd,Rn,Rm,s) CC_PKHBT_rrrLSLi(NATIVE_CC_AL,Rd,Rn,Rm,s)
+
+#define CC_PKHTB_rrrASRi(cc,Rd,Rn,Rm,s) _W(((cc) << 28) | (0x68 << 20) | (Rn << 16) | (Rd << 12) | (0x5 << 4) | SHIFT_PK(Rm, s))
+#define PKHTB_rrrASRi(Rd,Rn,Rm,s) CC_PKHTB_rrrASRi(NATIVE_CC_AL,Rd,Rn,Rm,s)
+
+#endif /* ARM_RTASM_H */
--- /dev/null
+/*
+ * compiler/compemu_midfunc_arm.cpp - Native MIDFUNCS for ARM
+ *
+ * Copyright (c) 2014 Jens Heitmann of ARAnyM dev team (see AUTHORS)
+ *
+ * Inspired by Christian Bauer's Basilisk II
+ *
+ * Original 68040 JIT compiler for UAE, copyright 2000-2002 Bernd Meyer
+ *
+ * Adaptation for Basilisk II and improvements, copyright 2000-2002
+ * Gwenole Beauchesne
+ *
+ * Basilisk II (C) 1997-2002 Christian Bauer
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Note:
+ * File is included by compemu_support.cpp
+ *
+ */
+
+/********************************************************************
+ * CPU functions exposed to gencomp. Both CREATE and EMIT time *
+ ********************************************************************/
+
+/*
+ * RULES FOR HANDLING REGISTERS:
+ *
+ * * In the function headers, order the parameters
+ * - 1st registers written to
+ * - 2nd read/modify/write registers
+ * - 3rd registers read from
+ * * Before calling raw_*, you must call readreg, writereg or rmw for
+ * each register
+ * * The order for this is
+ * - 1st call remove_offset for all registers written to with size<4
+ * - 2nd call readreg for all registers read without offset
+ * - 3rd call rmw for all rmw registers
+ * - 4th call readreg_offset for all registers that can handle offsets
+ * - 5th call get_offset for all the registers from the previous step
+ * - 6th call writereg for all written-to registers
+ * - 7th call raw_*
+ * - 8th unlock2 all registers that were locked
+ */
+
+MIDFUNC(0,live_flags,(void))
+{
+ live.flags_on_stack=TRASH;
+ live.flags_in_flags=VALID;
+ live.flags_are_important=1;
+}
+
+MIDFUNC(0,dont_care_flags,(void))
+{
+ live.flags_are_important=0;
+}
+
+MIDFUNC(0,duplicate_carry,(void))
+{
+ evict(FLAGX);
+ make_flags_live_internal();
+ COMPCALL(setcc_m)((uintptr)live.state[FLAGX].mem,NATIVE_CC_CS);
+ log_vwrite(FLAGX);
+}
+
+MIDFUNC(0,restore_carry,(void))
+{
+#if defined(USE_JIT2)
+ RR4 r=readreg(FLAGX,4);
+ MRS_CPSR(REG_WORK1);
+ TEQ_ri(r,1);
+ CC_BIC_rri(NATIVE_CC_NE, REG_WORK1, REG_WORK1, ARM_C_FLAG);
+ CC_ORR_rri(NATIVE_CC_EQ, REG_WORK1, REG_WORK1, ARM_C_FLAG);
+ MSR_CPSRf_r(REG_WORK1);
+ unlock2(r);
+#else
+ if (!have_rat_stall) { /* Not a P6 core, i.e. no partial stalls */
+ bt_l_ri_noclobber(FLAGX,0);
+ }
+ else { /* Avoid the stall the above creates.
+ This is slow on non-P6, though.
+ */
+ COMPCALL(rol_b_ri(FLAGX,8));
+ isclean(FLAGX);
+ }
+#endif
+}
+
+MIDFUNC(0,start_needflags,(void))
+{
+ needflags=1;
+}
+
+MIDFUNC(0,end_needflags,(void))
+{
+ needflags=0;
+}
+
+MIDFUNC(0,make_flags_live,(void))
+{
+ make_flags_live_internal();
+}
+
+MIDFUNC(2,bt_l_ri,(RR4 r, IMM i)) /* This is defined as only affecting C */
+{
+ int size=4;
+ if (i<16)
+ size=2;
+ CLOBBER_BT;
+ r=readreg(r,size);
+ raw_bt_l_ri(r,i);
+ unlock2(r);
+}
+
+MIDFUNC(2,bt_l_rr,(RR4 r, RR4 b)) /* This is defined as only affecting C */
+{
+ CLOBBER_BT;
+ r=readreg(r,4);
+ b=readreg(b,4);
+ raw_bt_l_rr(r,b);
+ unlock2(r);
+ unlock2(b);
+}
+
+MIDFUNC(2,btc_l_rr,(RW4 r, RR4 b))
+{
+ CLOBBER_BT;
+ b=readreg(b,4);
+ r=rmw(r,4,4);
+ raw_btc_l_rr(r,b);
+ unlock2(r);
+ unlock2(b);
+}
+
+MIDFUNC(2,btr_l_rr,(RW4 r, RR4 b))
+{
+ CLOBBER_BT;
+ b=readreg(b,4);
+ r=rmw(r,4,4);
+ raw_btr_l_rr(r,b);
+ unlock2(r);
+ unlock2(b);
+}
+
+MIDFUNC(2,bts_l_rr,(RW4 r, RR4 b))
+{
+ CLOBBER_BT;
+ b=readreg(b,4);
+ r=rmw(r,4,4);
+ raw_bts_l_rr(r,b);
+ unlock2(r);
+ unlock2(b);
+}
+
+MIDFUNC(2,mov_l_rm,(W4 d, IMM s))
+{
+ CLOBBER_MOV;
+ d=writereg(d,4);
+ raw_mov_l_rm(d,s);
+ unlock2(d);
+}
+
+MIDFUNC(4,mov_l_rm_indexed,(W4 d, IMM base, RR4 index, IMM factor))
+{
+ CLOBBER_MOV;
+ index=readreg(index,4);
+ d=writereg(d,4);
+ raw_mov_l_rm_indexed(d,base,index,factor);
+ unlock2(index);
+ unlock2(d);
+}
+
+MIDFUNC(2,mov_l_mi,(IMM d, IMM s))
+{
+ CLOBBER_MOV;
+ raw_mov_l_mi(d,s);
+}
+
+MIDFUNC(2,mov_w_mi,(IMM d, IMM s))
+{
+ CLOBBER_MOV;
+ raw_mov_w_mi(d,s);
+}
+
+MIDFUNC(2,mov_b_mi,(IMM d, IMM s))
+{
+ CLOBBER_MOV;
+ raw_mov_b_mi(d,s);
+}
+
+MIDFUNC(2,rol_b_ri,(RW1 r, IMM i))
+{
+ if (!i && !needflags)
+ return;
+ CLOBBER_ROL;
+ r=rmw(r,1,1);
+ raw_rol_b_ri(r,i);
+ unlock2(r);
+}
+
+MIDFUNC(2,rol_w_ri,(RW2 r, IMM i))
+{
+ if (!i && !needflags)
+ return;
+ CLOBBER_ROL;
+ r=rmw(r,2,2);
+ raw_rol_w_ri(r,i);
+ unlock2(r);
+}
+
+MIDFUNC(2,rol_l_ri,(RW4 r, IMM i))
+{
+ if (!i && !needflags)
+ return;
+ CLOBBER_ROL;
+ r=rmw(r,4,4);
+ raw_rol_l_ri(r,i);
+ unlock2(r);
+}
+
+MIDFUNC(2,rol_l_rr,(RW4 d, RR1 r))
+{
+ if (isconst(r)) {
+ COMPCALL(rol_l_ri)(d,(uae_u8)live.state[r].val);
+ return;
+ }
+ CLOBBER_ROL;
+ r=readreg(r,1);
+ d=rmw(d,4,4);
+ raw_rol_l_rr(d,r);
+ unlock2(r);
+ unlock2(d);
+}
+
+MIDFUNC(2,rol_w_rr,(RW2 d, RR1 r))
+{ /* Can only do this with r==1, i.e. cl */
+
+ if (isconst(r)) {
+ COMPCALL(rol_w_ri)(d,(uae_u8)live.state[r].val);
+ return;
+ }
+ CLOBBER_ROL;
+ r=readreg(r,1);
+ d=rmw(d,2,2);
+ raw_rol_w_rr(d,r);
+ unlock2(r);
+ unlock2(d);
+}
+
+MIDFUNC(2,rol_b_rr,(RW1 d, RR1 r))
+{ /* Can only do this with r==1, i.e. cl */
+
+ if (isconst(r)) {
+ COMPCALL(rol_b_ri)(d,(uae_u8)live.state[r].val);
+ return;
+ }
+ CLOBBER_ROL;
+ r=readreg(r,1);
+ d=rmw(d,1,1);
+ raw_rol_b_rr(d,r);
+ unlock2(r);
+ unlock2(d);
+}
+
+MIDFUNC(2,shll_l_rr,(RW4 d, RR1 r))
+{
+ if (isconst(r)) {
+ COMPCALL(shll_l_ri)(d,(uae_u8)live.state[r].val);
+ return;
+ }
+ CLOBBER_SHLL;
+ r=readreg(r,1);
+ d=rmw(d,4,4);
+ raw_shll_l_rr(d,r);
+ unlock2(r);
+ unlock2(d);
+}
+
+MIDFUNC(2,shll_w_rr,(RW2 d, RR1 r))
+{ /* Can only do this with r==1, i.e. cl */
+
+ if (isconst(r)) {
+ COMPCALL(shll_w_ri)(d,(uae_u8)live.state[r].val);
+ return;
+ }
+ CLOBBER_SHLL;
+ r=readreg(r,1);
+ d=rmw(d,2,2);
+ raw_shll_w_rr(d,r);
+ unlock2(r);
+ unlock2(d);
+}
+
+MIDFUNC(2,shll_b_rr,(RW1 d, RR1 r))
+{ /* Can only do this with r==1, i.e. cl */
+
+ if (isconst(r)) {
+ COMPCALL(shll_b_ri)(d,(uae_u8)live.state[r].val);
+ return;
+ }
+ CLOBBER_SHLL;
+ r=readreg(r,1);
+ d=rmw(d,1,1);
+ raw_shll_b_rr(d,r);
+ unlock2(r);
+ unlock2(d);
+}
+
+MIDFUNC(2,ror_b_ri,(RR1 r, IMM i))
+{
+ if (!i && !needflags)
+ return;
+ CLOBBER_ROR;
+ r=rmw(r,1,1);
+ raw_ror_b_ri(r,i);
+ unlock2(r);
+}
+
+MIDFUNC(2,ror_w_ri,(RR2 r, IMM i))
+{
+ if (!i && !needflags)
+ return;
+ CLOBBER_ROR;
+ r=rmw(r,2,2);
+ raw_ror_w_ri(r,i);
+ unlock2(r);
+}
+
+MIDFUNC(2,ror_l_ri,(RR4 r, IMM i))
+{
+ if (!i && !needflags)
+ return;
+ CLOBBER_ROR;
+ r=rmw(r,4,4);
+ raw_ror_l_ri(r,i);
+ unlock2(r);
+}
+
+MIDFUNC(2,ror_l_rr,(RR4 d, RR1 r))
+{
+ if (isconst(r)) {
+ COMPCALL(ror_l_ri)(d,(uae_u8)live.state[r].val);
+ return;
+ }
+ CLOBBER_ROR;
+ r=readreg(r,1);
+ d=rmw(d,4,4);
+ raw_ror_l_rr(d,r);
+ unlock2(r);
+ unlock2(d);
+}
+
+MIDFUNC(2,ror_w_rr,(RR2 d, RR1 r))
+{
+ if (isconst(r)) {
+ COMPCALL(ror_w_ri)(d,(uae_u8)live.state[r].val);
+ return;
+ }
+ CLOBBER_ROR;
+ r=readreg(r,1);
+ d=rmw(d,2,2);
+ raw_ror_w_rr(d,r);
+ unlock2(r);
+ unlock2(d);
+}
+
+MIDFUNC(2,ror_b_rr,(RR1 d, RR1 r))
+{
+ if (isconst(r)) {
+ COMPCALL(ror_b_ri)(d,(uae_u8)live.state[r].val);
+ return;
+ }
+
+ CLOBBER_ROR;
+ r=readreg(r,1);
+ d=rmw(d,1,1);
+ raw_ror_b_rr(d,r);
+ unlock2(r);
+ unlock2(d);
+}
+
+MIDFUNC(2,shrl_l_rr,(RW4 d, RR1 r))
+{
+ if (isconst(r)) {
+ COMPCALL(shrl_l_ri)(d,(uae_u8)live.state[r].val);
+ return;
+ }
+ CLOBBER_SHRL;
+ r=readreg(r,1);
+ d=rmw(d,4,4);
+ raw_shrl_l_rr(d,r);
+ unlock2(r);
+ unlock2(d);
+}
+
+MIDFUNC(2,shrl_w_rr,(RW2 d, RR1 r))
+{ /* Can only do this with r==1, i.e. cl */
+
+ if (isconst(r)) {
+ COMPCALL(shrl_w_ri)(d,(uae_u8)live.state[r].val);
+ return;
+ }
+ CLOBBER_SHRL;
+ r=readreg(r,1);
+ d=rmw(d,2,2);
+ raw_shrl_w_rr(d,r);
+ unlock2(r);
+ unlock2(d);
+}
+
+MIDFUNC(2,shrl_b_rr,(RW1 d, RR1 r))
+{ /* Can only do this with r==1, i.e. cl */
+
+ if (isconst(r)) {
+ COMPCALL(shrl_b_ri)(d,(uae_u8)live.state[r].val);
+ return;
+ }
+
+ CLOBBER_SHRL;
+ r=readreg(r,1);
+ d=rmw(d,1,1);
+ raw_shrl_b_rr(d,r);
+ unlock2(r);
+ unlock2(d);
+}
+
+MIDFUNC(2,shll_l_ri,(RW4 r, IMM i))
+{
+ if (!i && !needflags)
+ return;
+ if (isconst(r) && !needflags) {
+ live.state[r].val<<=i;
+ return;
+ }
+ CLOBBER_SHLL;
+ r=rmw(r,4,4);
+ raw_shll_l_ri(r,i);
+ unlock2(r);
+}
+
+MIDFUNC(2,shll_w_ri,(RW2 r, IMM i))
+{
+ if (!i && !needflags)
+ return;
+ CLOBBER_SHLL;
+ r=rmw(r,2,2);
+ raw_shll_w_ri(r,i);
+ unlock2(r);
+}
+
+MIDFUNC(2,shll_b_ri,(RW1 r, IMM i))
+{
+ if (!i && !needflags)
+ return;
+ CLOBBER_SHLL;
+ r=rmw(r,1,1);
+ raw_shll_b_ri(r,i);
+ unlock2(r);
+}
+
+MIDFUNC(2,shrl_l_ri,(RW4 r, IMM i))
+{
+ if (!i && !needflags)
+ return;
+ if (isconst(r) && !needflags) {
+ live.state[r].val>>=i;
+ return;
+ }
+ CLOBBER_SHRL;
+ r=rmw(r,4,4);
+ raw_shrl_l_ri(r,i);
+ unlock2(r);
+}
+
+MIDFUNC(2,shrl_w_ri,(RW2 r, IMM i))
+{
+ if (!i && !needflags)
+ return;
+ CLOBBER_SHRL;
+ r=rmw(r,2,2);
+ raw_shrl_w_ri(r,i);
+ unlock2(r);
+}
+
+MIDFUNC(2,shrl_b_ri,(RW1 r, IMM i))
+{
+ if (!i && !needflags)
+ return;
+ CLOBBER_SHRL;
+ r=rmw(r,1,1);
+ raw_shrl_b_ri(r,i);
+ unlock2(r);
+}
+
+MIDFUNC(2,shra_l_ri,(RW4 r, IMM i))
+{
+ if (!i && !needflags)
+ return;
+ CLOBBER_SHRA;
+ r=rmw(r,4,4);
+ raw_shra_l_ri(r,i);
+ unlock2(r);
+}
+
+MIDFUNC(2,shra_w_ri,(RW2 r, IMM i))
+{
+ if (!i && !needflags)
+ return;
+ CLOBBER_SHRA;
+ r=rmw(r,2,2);
+ raw_shra_w_ri(r,i);
+ unlock2(r);
+}
+
+MIDFUNC(2,shra_b_ri,(RW1 r, IMM i))
+{
+ if (!i && !needflags)
+ return;
+ CLOBBER_SHRA;
+ r=rmw(r,1,1);
+ raw_shra_b_ri(r,i);
+ unlock2(r);
+}
+
+MIDFUNC(2,shra_l_rr,(RW4 d, RR1 r))
+{
+ if (isconst(r)) {
+ COMPCALL(shra_l_ri)(d,(uae_u8)live.state[r].val);
+ return;
+ }
+ CLOBBER_SHRA;
+ r=readreg(r,1);
+ d=rmw(d,4,4);
+ raw_shra_l_rr(d,r);
+ unlock2(r);
+ unlock2(d);
+}
+
+MIDFUNC(2,shra_w_rr,(RW2 d, RR1 r))
+{ /* Can only do this with r==1, i.e. cl */
+
+ if (isconst(r)) {
+ COMPCALL(shra_w_ri)(d,(uae_u8)live.state[r].val);
+ return;
+ }
+ CLOBBER_SHRA;
+ r=readreg(r,1);
+ d=rmw(d,2,2);
+ raw_shra_w_rr(d,r);
+ unlock2(r);
+ unlock2(d);
+}
+
+MIDFUNC(2,shra_b_rr,(RW1 d, RR1 r))
+{ /* Can only do this with r==1, i.e. cl */
+
+ if (isconst(r)) {
+ COMPCALL(shra_b_ri)(d,(uae_u8)live.state[r].val);
+ return;
+ }
+
+ CLOBBER_SHRA;
+ r=readreg(r,1);
+ d=rmw(d,1,1);
+ raw_shra_b_rr(d,r);
+ unlock2(r);
+ unlock2(d);
+}
+
+MIDFUNC(2,setcc,(W1 d, IMM cc))
+{
+ CLOBBER_SETCC;
+ d=writereg(d,1);
+ raw_setcc(d,cc);
+ unlock2(d);
+}
+
+MIDFUNC(2,setcc_m,(IMM d, IMM cc))
+{
+ CLOBBER_SETCC;
+ raw_setcc_m(d,cc);
+}
+
+MIDFUNC(3,cmov_l_rr,(RW4 d, RR4 s, IMM cc))
+{
+ if (d==s)
+ return;
+ CLOBBER_CMOV;
+ s=readreg(s,4);
+ d=rmw(d,4,4);
+ raw_cmov_l_rr(d,s,cc);
+ unlock2(s);
+ unlock2(d);
+}
+
+MIDFUNC(2,bsf_l_rr,(W4 d, W4 s))
+{
+ CLOBBER_BSF;
+ s = readreg(s, 4);
+ d = writereg(d, 4);
+ raw_bsf_l_rr(d, s);
+ unlock2(s);
+ unlock2(d);
+}
+
+/* Set the Z flag depending on the value in s. Note that the
+ value has to be 0 or -1 (or, more precisely, for non-zero
+ values, bit 14 must be set)! */
+MIDFUNC(2,simulate_bsf,(W4 tmp, RW4 s))
+{
+ CLOBBER_BSF;
+ s=rmw_specific(s,4,4,FLAG_NREG3);
+ tmp=writereg(tmp,4);
+ raw_flags_set_zero(s, tmp);
+ unlock2(tmp);
+ unlock2(s);
+}
+
+MIDFUNC(2,imul_32_32,(RW4 d, RR4 s))
+{
+ CLOBBER_MUL;
+ s=readreg(s,4);
+ d=rmw(d,4,4);
+ raw_imul_32_32(d,s);
+ unlock2(s);
+ unlock2(d);
+}
+
+MIDFUNC(2,imul_64_32,(RW4 d, RW4 s))
+{
+ CLOBBER_MUL;
+ s=rmw_specific(s,4,4,MUL_NREG2);
+ d=rmw_specific(d,4,4,MUL_NREG1);
+ raw_imul_64_32(d,s);
+ unlock2(s);
+ unlock2(d);
+}
+
+MIDFUNC(2,mul_64_32,(RW4 d, RW4 s))
+{
+ CLOBBER_MUL;
+ s=rmw_specific(s,4,4,MUL_NREG2);
+ d=rmw_specific(d,4,4,MUL_NREG1);
+ raw_mul_64_32(d,s);
+ unlock2(s);
+ unlock2(d);
+}
+
+MIDFUNC(2,sign_extend_16_rr,(W4 d, RR2 s))
+{
+ int isrmw;
+
+ if (isconst(s)) {
+ set_const(d,(uae_s32)(uae_s16)live.state[s].val);
+ return;
+ }
+
+ CLOBBER_SE16;
+ isrmw=(s==d);
+ if (!isrmw) {
+ s=readreg(s,2);
+ d=writereg(d,4);
+ }
+ else { /* If we try to lock this twice, with different sizes, we
+ are int trouble! */
+ s=d=rmw(s,4,2);
+ }
+ raw_sign_extend_16_rr(d,s);
+ if (!isrmw) {
+ unlock2(d);
+ unlock2(s);
+ }
+ else {
+ unlock2(s);
+ }
+}
+
+MIDFUNC(2,sign_extend_8_rr,(W4 d, RR1 s))
+{
+ int isrmw;
+
+ if (isconst(s)) {
+ set_const(d,(uae_s32)(uae_s8)live.state[s].val);
+ return;
+ }
+
+ isrmw=(s==d);
+ CLOBBER_SE8;
+ if (!isrmw) {
+ s=readreg(s,1);
+ d=writereg(d,4);
+ }
+ else { /* If we try to lock this twice, with different sizes, we
+ are int trouble! */
+ s=d=rmw(s,4,1);
+ }
+
+ raw_sign_extend_8_rr(d,s);
+
+ if (!isrmw) {
+ unlock2(d);
+ unlock2(s);
+ }
+ else {
+ unlock2(s);
+ }
+}
+
+MIDFUNC(2,zero_extend_16_rr,(W4 d, RR2 s))
+{
+ int isrmw;
+
+ if (isconst(s)) {
+ set_const(d,(uae_u32)(uae_u16)live.state[s].val);
+ return;
+ }
+
+ isrmw=(s==d);
+ CLOBBER_ZE16;
+ if (!isrmw) {
+ s=readreg(s,2);
+ d=writereg(d,4);
+ }
+ else { /* If we try to lock this twice, with different sizes, we
+ are int trouble! */
+ s=d=rmw(s,4,2);
+ }
+ raw_zero_extend_16_rr(d,s);
+ if (!isrmw) {
+ unlock2(d);
+ unlock2(s);
+ }
+ else {
+ unlock2(s);
+ }
+}
+
+MIDFUNC(2,zero_extend_8_rr,(W4 d, RR1 s))
+{
+ int isrmw;
+ if (isconst(s)) {
+ set_const(d,(uae_u32)(uae_u8)live.state[s].val);
+ return;
+ }
+
+ isrmw=(s==d);
+ CLOBBER_ZE8;
+ if (!isrmw) {
+ s=readreg(s,1);
+ d=writereg(d,4);
+ }
+ else { /* If we try to lock this twice, with different sizes, we
+ are int trouble! */
+ s=d=rmw(s,4,1);
+ }
+
+ raw_zero_extend_8_rr(d,s);
+
+ if (!isrmw) {
+ unlock2(d);
+ unlock2(s);
+ }
+ else {
+ unlock2(s);
+ }
+}
+
+MIDFUNC(2,mov_b_rr,(W1 d, RR1 s))
+{
+ if (d==s)
+ return;
+ if (isconst(s)) {
+ COMPCALL(mov_b_ri)(d,(uae_u8)live.state[s].val);
+ return;
+ }
+
+ CLOBBER_MOV;
+ s=readreg(s,1);
+ d=writereg(d,1);
+ raw_mov_b_rr(d,s);
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,mov_w_rr,(W2 d, RR2 s))
+{
+ if (d==s)
+ return;
+ if (isconst(s)) {
+ COMPCALL(mov_w_ri)(d,(uae_u16)live.state[s].val);
+ return;
+ }
+
+ CLOBBER_MOV;
+ s=readreg(s,2);
+ d=writereg(d,2);
+ raw_mov_w_rr(d,s);
+ unlock2(d);
+ unlock2(s);
+}
+
+/* read the long at the address contained in s+offset and store in d */
+MIDFUNC(3,mov_l_rR,(W4 d, RR4 s, IMM offset))
+{
+ if (isconst(s)) {
+ COMPCALL(mov_l_rm)(d,live.state[s].val+offset);
+ return;
+ }
+ CLOBBER_MOV;
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ raw_mov_l_rR(d,s,offset);
+ unlock2(d);
+ unlock2(s);
+}
+
+/* read the word at the address contained in s+offset and store in d */
+MIDFUNC(3,mov_w_rR,(W2 d, RR4 s, IMM offset))
+{
+ if (isconst(s)) {
+ COMPCALL(mov_w_rm)(d,live.state[s].val+offset);
+ return;
+ }
+ CLOBBER_MOV;
+ s=readreg(s,4);
+ d=writereg(d,2);
+
+ raw_mov_w_rR(d,s,offset);
+ unlock2(d);
+ unlock2(s);
+}
+
+/* read the long at the address contained in s+offset and store in d */
+MIDFUNC(3,mov_l_brR,(W4 d, RR4 s, IMM offset))
+{
+ int sreg=s;
+ if (isconst(s)) {
+ COMPCALL(mov_l_rm)(d,live.state[s].val+offset);
+ return;
+ }
+ CLOBBER_MOV;
+ s=readreg_offset(s,4);
+ offset+=get_offset(sreg);
+ d=writereg(d,4);
+
+ raw_mov_l_brR(d,s,offset);
+ unlock2(d);
+ unlock2(s);
+}
+
+/* read the word at the address contained in s+offset and store in d */
+MIDFUNC(3,mov_w_brR,(W2 d, RR4 s, IMM offset))
+{
+ int sreg=s;
+ if (isconst(s)) {
+ COMPCALL(mov_w_rm)(d,live.state[s].val+offset);
+ return;
+ }
+ CLOBBER_MOV;
+ remove_offset(d,-1);
+ s=readreg_offset(s,4);
+ offset+=get_offset(sreg);
+ d=writereg(d,2);
+
+ raw_mov_w_brR(d,s,offset);
+ unlock2(d);
+ unlock2(s);
+}
+
+/* read the word at the address contained in s+offset and store in d */
+MIDFUNC(3,mov_b_brR,(W1 d, RR4 s, IMM offset))
+{
+ int sreg=s;
+ if (isconst(s)) {
+ COMPCALL(mov_b_rm)(d,live.state[s].val+offset);
+ return;
+ }
+ CLOBBER_MOV;
+ remove_offset(d,-1);
+ s=readreg_offset(s,4);
+ offset+=get_offset(sreg);
+ d=writereg(d,1);
+
+ raw_mov_b_brR(d,s,offset);
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,mov_l_Ri,(RR4 d, IMM i, IMM offset))
+{
+ int dreg=d;
+ if (isconst(d)) {
+ COMPCALL(mov_l_mi)(live.state[d].val+offset,i);
+ return;
+ }
+
+ CLOBBER_MOV;
+ d=readreg_offset(d,4);
+ offset+=get_offset(dreg);
+ raw_mov_l_Ri(d,i,offset);
+ unlock2(d);
+}
+
+MIDFUNC(3,mov_w_Ri,(RR4 d, IMM i, IMM offset))
+{
+ int dreg=d;
+ if (isconst(d)) {
+ COMPCALL(mov_w_mi)(live.state[d].val+offset,i);
+ return;
+ }
+
+ CLOBBER_MOV;
+ d=readreg_offset(d,4);
+ offset+=get_offset(dreg);
+ raw_mov_w_Ri(d,i,offset);
+ unlock2(d);
+}
+
+/* Warning! OFFSET is byte sized only! */
+MIDFUNC(3,mov_l_Rr,(RR4 d, RR4 s, IMM offset))
+{
+ if (isconst(d)) {
+ COMPCALL(mov_l_mr)(live.state[d].val+offset,s);
+ return;
+ }
+ if (isconst(s)) {
+ COMPCALL(mov_l_Ri)(d,live.state[s].val,offset);
+ return;
+ }
+
+ CLOBBER_MOV;
+ s=readreg(s,4);
+ d=readreg(d,4);
+
+ raw_mov_l_Rr(d,s,offset);
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,mov_w_Rr,(RR4 d, RR2 s, IMM offset))
+{
+ if (isconst(d)) {
+ COMPCALL(mov_w_mr)(live.state[d].val+offset,s);
+ return;
+ }
+ if (isconst(s)) {
+ COMPCALL(mov_w_Ri)(d,(uae_u16)live.state[s].val,offset);
+ return;
+ }
+
+ CLOBBER_MOV;
+ s=readreg(s,2);
+ d=readreg(d,4);
+ raw_mov_w_Rr(d,s,offset);
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,lea_l_brr,(W4 d, RR4 s, IMM offset))
+{
+ if (isconst(s)) {
+ COMPCALL(mov_l_ri)(d,live.state[s].val+offset);
+ return;
+ }
+#if USE_OFFSET
+ if (d==s) {
+ add_offset(d,offset);
+ return;
+ }
+#endif
+ CLOBBER_LEA;
+ s=readreg(s,4);
+ d=writereg(d,4);
+ raw_lea_l_brr(d,s,offset);
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(5,lea_l_brr_indexed,(W4 d, RR4 s, RR4 index, IMM factor, IMM offset))
+{
+ if (!offset) {
+ COMPCALL(lea_l_rr_indexed)(d,s,index,factor);
+ return;
+ }
+ CLOBBER_LEA;
+ s=readreg(s,4);
+ index=readreg(index,4);
+ d=writereg(d,4);
+
+ raw_lea_l_brr_indexed(d,s,index,factor,offset);
+ unlock2(d);
+ unlock2(index);
+ unlock2(s);
+}
+
+MIDFUNC(4,lea_l_rr_indexed,(W4 d, RR4 s, RR4 index, IMM factor))
+{
+ CLOBBER_LEA;
+ s=readreg(s,4);
+ index=readreg(index,4);
+ d=writereg(d,4);
+
+ raw_lea_l_rr_indexed(d,s,index,factor);
+ unlock2(d);
+ unlock2(index);
+ unlock2(s);
+}
+
+/* write d to the long at the address contained in s+offset */
+MIDFUNC(3,mov_l_bRr,(RR4 d, RR4 s, IMM offset))
+{
+ int dreg=d;
+ if (isconst(d)) {
+ COMPCALL(mov_l_mr)(live.state[d].val+offset,s);
+ return;
+ }
+
+ CLOBBER_MOV;
+ s=readreg(s,4);
+ d=readreg_offset(d,4);
+ offset+=get_offset(dreg);
+
+ raw_mov_l_bRr(d,s,offset);
+ unlock2(d);
+ unlock2(s);
+}
+
+/* write the word at the address contained in s+offset and store in d */
+MIDFUNC(3,mov_w_bRr,(RR4 d, RR2 s, IMM offset))
+{
+ int dreg=d;
+
+ if (isconst(d)) {
+ COMPCALL(mov_w_mr)(live.state[d].val+offset,s);
+ return;
+ }
+
+ CLOBBER_MOV;
+ s=readreg(s,2);
+ d=readreg_offset(d,4);
+ offset+=get_offset(dreg);
+ raw_mov_w_bRr(d,s,offset);
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,mov_b_bRr,(RR4 d, RR1 s, IMM offset))
+{
+ int dreg=d;
+ if (isconst(d)) {
+ COMPCALL(mov_b_mr)(live.state[d].val+offset,s);
+ return;
+ }
+
+ CLOBBER_MOV;
+ s=readreg(s,1);
+ d=readreg_offset(d,4);
+ offset+=get_offset(dreg);
+ raw_mov_b_bRr(d,s,offset);
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(1,mid_bswap_32,(RW4 r))
+{
+
+ if (isconst(r)) {
+ uae_u32 oldv=live.state[r].val;
+ live.state[r].val=reverse32(oldv);
+ return;
+ }
+
+ CLOBBER_SW32;
+ r=rmw(r,4,4);
+ raw_bswap_32(r);
+ unlock2(r);
+}
+
+MIDFUNC(1,mid_bswap_16,(RW2 r))
+{
+ if (isconst(r)) {
+ uae_u32 oldv=live.state[r].val;
+ live.state[r].val=((oldv>>8)&0xff) | ((oldv<<8)&0xff00) |
+ (oldv&0xffff0000);
+ return;
+ }
+
+ CLOBBER_SW16;
+ r=rmw(r,2,2);
+
+ raw_bswap_16(r);
+ unlock2(r);
+}
+
+MIDFUNC(2,mov_l_rr,(W4 d, RR4 s))
+{
+ int olds;
+
+ if (d==s) { /* How pointless! */
+ return;
+ }
+ if (isconst(s)) {
+ COMPCALL(mov_l_ri)(d,live.state[s].val);
+ return;
+ }
+ olds=s;
+ disassociate(d);
+ s=readreg_offset(s,4);
+ live.state[d].realreg=s;
+ live.state[d].realind=live.nat[s].nholds;
+ live.state[d].val=live.state[olds].val;
+ live.state[d].validsize=4;
+ live.state[d].dirtysize=4;
+ set_status(d,DIRTY);
+
+ live.nat[s].holds[live.nat[s].nholds]=d;
+ live.nat[s].nholds++;
+ log_clobberreg(d);
+ D2(panicbug("Added %d to nreg %d(%d), now holds %d regs", d,s,live.state[d].realind,live.nat[s].nholds));
+ unlock2(s);
+}
+
+MIDFUNC(2,mov_l_mr,(IMM d, RR4 s))
+{
+ if (isconst(s)) {
+ COMPCALL(mov_l_mi)(d,live.state[s].val);
+ return;
+ }
+ CLOBBER_MOV;
+ s=readreg(s,4);
+
+ raw_mov_l_mr(d,s);
+ unlock2(s);
+}
+
+MIDFUNC(2,mov_w_mr,(IMM d, RR2 s))
+{
+ if (isconst(s)) {
+ COMPCALL(mov_w_mi)(d,(uae_u16)live.state[s].val);
+ return;
+ }
+ CLOBBER_MOV;
+ s=readreg(s,2);
+
+ raw_mov_w_mr(d,s);
+ unlock2(s);
+}
+
+MIDFUNC(2,mov_w_rm,(W2 d, IMM s))
+{
+ CLOBBER_MOV;
+ d=writereg(d,2);
+
+ raw_mov_w_rm(d,s);
+ unlock2(d);
+}
+
+MIDFUNC(2,mov_b_mr,(IMM d, RR1 s))
+{
+ if (isconst(s)) {
+ COMPCALL(mov_b_mi)(d,(uae_u8)live.state[s].val);
+ return;
+ }
+
+ CLOBBER_MOV;
+ s=readreg(s,1);
+
+ raw_mov_b_mr(d,s);
+ unlock2(s);
+}
+
+MIDFUNC(2,mov_b_rm,(W1 d, IMM s))
+{
+ CLOBBER_MOV;
+ d=writereg(d,1);
+
+ raw_mov_b_rm(d,s);
+ unlock2(d);
+}
+
+MIDFUNC(2,mov_l_ri,(W4 d, IMM s))
+{
+ set_const(d,s);
+ return;
+}
+
+MIDFUNC(2,mov_w_ri,(W2 d, IMM s))
+{
+ CLOBBER_MOV;
+ d=writereg(d,2);
+
+ raw_mov_w_ri(d,s);
+ unlock2(d);
+}
+
+MIDFUNC(2,mov_b_ri,(W1 d, IMM s))
+{
+ CLOBBER_MOV;
+ d=writereg(d,1);
+
+ raw_mov_b_ri(d,s);
+ unlock2(d);
+}
+
+MIDFUNC(2,test_l_ri,(RR4 d, IMM i))
+{
+ CLOBBER_TEST;
+ d=readreg(d,4);
+
+ raw_test_l_ri(d,i);
+ unlock2(d);
+}
+
+MIDFUNC(2,test_l_rr,(RR4 d, RR4 s))
+{
+ CLOBBER_TEST;
+ d=readreg(d,4);
+ s=readreg(s,4);
+
+ raw_test_l_rr(d,s);;
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,test_w_rr,(RR2 d, RR2 s))
+{
+ CLOBBER_TEST;
+ d=readreg(d,2);
+ s=readreg(s,2);
+
+ raw_test_w_rr(d,s);
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,test_b_rr,(RR1 d, RR1 s))
+{
+ CLOBBER_TEST;
+ d=readreg(d,1);
+ s=readreg(s,1);
+
+ raw_test_b_rr(d,s);
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,and_l_ri,(RW4 d, IMM i))
+{
+ if (isconst(d) && !needflags) {
+ live.state[d].val &= i;
+ return;
+ }
+
+ CLOBBER_AND;
+ d=rmw(d,4,4);
+
+ raw_and_l_ri(d,i);
+ unlock2(d);
+}
+
+MIDFUNC(2,and_l,(RW4 d, RR4 s))
+{
+ CLOBBER_AND;
+ s=readreg(s,4);
+ d=rmw(d,4,4);
+
+ raw_and_l(d,s);
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,and_w,(RW2 d, RR2 s))
+{
+ CLOBBER_AND;
+ s=readreg(s,2);
+ d=rmw(d,2,2);
+
+ raw_and_w(d,s);
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,and_b,(RW1 d, RR1 s))
+{
+ CLOBBER_AND;
+ s=readreg(s,1);
+ d=rmw(d,1,1);
+
+ raw_and_b(d,s);
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,or_l_ri,(RW4 d, IMM i))
+{
+ if (isconst(d) && !needflags) {
+ live.state[d].val|=i;
+ return;
+ }
+ CLOBBER_OR;
+ d=rmw(d,4,4);
+
+ raw_or_l_ri(d,i);
+ unlock2(d);
+}
+
+MIDFUNC(2,or_l,(RW4 d, RR4 s))
+{
+ if (isconst(d) && isconst(s) && !needflags) {
+ live.state[d].val|=live.state[s].val;
+ return;
+ }
+ CLOBBER_OR;
+ s=readreg(s,4);
+ d=rmw(d,4,4);
+
+ raw_or_l(d,s);
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,or_w,(RW2 d, RR2 s))
+{
+ CLOBBER_OR;
+ s=readreg(s,2);
+ d=rmw(d,2,2);
+
+ raw_or_w(d,s);
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,or_b,(RW1 d, RR1 s))
+{
+ CLOBBER_OR;
+ s=readreg(s,1);
+ d=rmw(d,1,1);
+
+ raw_or_b(d,s);
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,adc_l,(RW4 d, RR4 s))
+{
+ CLOBBER_ADC;
+ s=readreg(s,4);
+ d=rmw(d,4,4);
+
+ raw_adc_l(d,s);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,adc_w,(RW2 d, RR2 s))
+{
+ CLOBBER_ADC;
+ s=readreg(s,2);
+ d=rmw(d,2,2);
+
+ raw_adc_w(d,s);
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,adc_b,(RW1 d, RR1 s))
+{
+ CLOBBER_ADC;
+ s=readreg(s,1);
+ d=rmw(d,1,1);
+
+ raw_adc_b(d,s);
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,add_l,(RW4 d, RR4 s))
+{
+ if (isconst(s)) {
+ COMPCALL(add_l_ri)(d,live.state[s].val);
+ return;
+ }
+
+ CLOBBER_ADD;
+ s=readreg(s,4);
+ d=rmw(d,4,4);
+
+ raw_add_l(d,s);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,add_w,(RW2 d, RR2 s))
+{
+ if (isconst(s)) {
+ COMPCALL(add_w_ri)(d,(uae_u16)live.state[s].val);
+ return;
+ }
+
+ CLOBBER_ADD;
+ s=readreg(s,2);
+ d=rmw(d,2,2);
+
+ raw_add_w(d,s);
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,add_b,(RW1 d, RR1 s))
+{
+ if (isconst(s)) {
+ COMPCALL(add_b_ri)(d,(uae_u8)live.state[s].val);
+ return;
+ }
+
+ CLOBBER_ADD;
+ s=readreg(s,1);
+ d=rmw(d,1,1);
+
+ raw_add_b(d,s);
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,sub_l_ri,(RW4 d, IMM i))
+{
+ if (!i && !needflags)
+ return;
+ if (isconst(d) && !needflags) {
+ live.state[d].val-=i;
+ return;
+ }
+#if USE_OFFSET
+ if (!needflags) {
+ add_offset(d,-i);
+ return;
+ }
+#endif
+
+ CLOBBER_SUB;
+ d=rmw(d,4,4);
+
+ raw_sub_l_ri(d,i);
+ unlock2(d);
+}
+
+MIDFUNC(2,sub_w_ri,(RW2 d, IMM i))
+{
+ if (!i && !needflags)
+ return;
+
+ CLOBBER_SUB;
+ d=rmw(d,2,2);
+
+ raw_sub_w_ri(d,i);
+ unlock2(d);
+}
+
+MIDFUNC(2,sub_b_ri,(RW1 d, IMM i))
+{
+ if (!i && !needflags)
+ return;
+
+ CLOBBER_SUB;
+ d=rmw(d,1,1);
+
+ raw_sub_b_ri(d,i);
+
+ unlock2(d);
+}
+
+MIDFUNC(2,add_l_ri,(RW4 d, IMM i))
+{
+ if (!i && !needflags)
+ return;
+ if (isconst(d) && !needflags) {
+ live.state[d].val+=i;
+ return;
+ }
+#if USE_OFFSET
+ if (!needflags) {
+ add_offset(d,i);
+ return;
+ }
+#endif
+ CLOBBER_ADD;
+ d=rmw(d,4,4);
+ raw_add_l_ri(d,i);
+ unlock2(d);
+}
+
+MIDFUNC(2,add_w_ri,(RW2 d, IMM i))
+{
+ if (!i && !needflags)
+ return;
+
+ CLOBBER_ADD;
+ d=rmw(d,2,2);
+
+ raw_add_w_ri(d,i);
+ unlock2(d);
+}
+
+MIDFUNC(2,add_b_ri,(RW1 d, IMM i))
+{
+ if (!i && !needflags)
+ return;
+
+ CLOBBER_ADD;
+ d=rmw(d,1,1);
+
+ raw_add_b_ri(d,i);
+
+ unlock2(d);
+}
+
+MIDFUNC(2,sbb_l,(RW4 d, RR4 s))
+{
+ CLOBBER_SBB;
+ s=readreg(s,4);
+ d=rmw(d,4,4);
+
+ raw_sbb_l(d,s);
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,sbb_w,(RW2 d, RR2 s))
+{
+ CLOBBER_SBB;
+ s=readreg(s,2);
+ d=rmw(d,2,2);
+
+ raw_sbb_w(d,s);
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,sbb_b,(RW1 d, RR1 s))
+{
+ CLOBBER_SBB;
+ s=readreg(s,1);
+ d=rmw(d,1,1);
+
+ raw_sbb_b(d,s);
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,sub_l,(RW4 d, RR4 s))
+{
+ if (isconst(s)) {
+ COMPCALL(sub_l_ri)(d,live.state[s].val);
+ return;
+ }
+
+ CLOBBER_SUB;
+ s=readreg(s,4);
+ d=rmw(d,4,4);
+
+ raw_sub_l(d,s);
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,sub_w,(RW2 d, RR2 s))
+{
+ if (isconst(s)) {
+ COMPCALL(sub_w_ri)(d,(uae_u16)live.state[s].val);
+ return;
+ }
+
+ CLOBBER_SUB;
+ s=readreg(s,2);
+ d=rmw(d,2,2);
+
+ raw_sub_w(d,s);
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,sub_b,(RW1 d, RR1 s))
+{
+ if (isconst(s)) {
+ COMPCALL(sub_b_ri)(d,(uae_u8)live.state[s].val);
+ return;
+ }
+
+ CLOBBER_SUB;
+ s=readreg(s,1);
+ d=rmw(d,1,1);
+
+ raw_sub_b(d,s);
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,cmp_l,(RR4 d, RR4 s))
+{
+ CLOBBER_CMP;
+ s=readreg(s,4);
+ d=readreg(d,4);
+
+ raw_cmp_l(d,s);
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,cmp_w,(RR2 d, RR2 s))
+{
+ CLOBBER_CMP;
+ s=readreg(s,2);
+ d=readreg(d,2);
+
+ raw_cmp_w(d,s);
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,cmp_b,(RR1 d, RR1 s))
+{
+ CLOBBER_CMP;
+ s=readreg(s,1);
+ d=readreg(d,1);
+
+ raw_cmp_b(d,s);
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,xor_l,(RW4 d, RR4 s))
+{
+ CLOBBER_XOR;
+ s=readreg(s,4);
+ d=rmw(d,4,4);
+
+ raw_xor_l(d,s);
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,xor_w,(RW2 d, RR2 s))
+{
+ CLOBBER_XOR;
+ s=readreg(s,2);
+ d=rmw(d,2,2);
+
+ raw_xor_w(d,s);
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,xor_b,(RW1 d, RR1 s))
+{
+ CLOBBER_XOR;
+ s=readreg(s,1);
+ d=rmw(d,1,1);
+
+ raw_xor_b(d,s);
+ unlock2(d);
+ unlock2(s);
+}
+
+#if defined(UAE)
+MIDFUNC(5,call_r_02,(RR4 r, RR4 in1, RR4 in2, IMM isize1, IMM isize2))
+{
+ clobber_flags();
+ in1=readreg_specific(in1,isize1,REG_PAR1);
+ in2=readreg_specific(in2,isize2,REG_PAR2);
+ r=readreg(r,4);
+ prepare_for_call_1();
+ unlock2(r);
+ unlock2(in1);
+ unlock2(in2);
+ prepare_for_call_2();
+ compemu_raw_call_r(r);
+}
+#endif
+
+#if defined(UAE)
+MIDFUNC(5,call_r_11,(W4 out1, RR4 r, RR4 in1, IMM osize, IMM isize))
+{
+ clobber_flags();
+
+ if (osize==4) {
+ if (out1!=in1 && out1!=r) {
+ COMPCALL(forget_about)(out1);
+ }
+ }
+ else {
+ tomem_c(out1);
+ }
+
+ in1=readreg_specific(in1,isize,REG_PAR1);
+ r=readreg(r,4);
+
+ prepare_for_call_1();
+ unlock2(in1);
+ unlock2(r);
+
+ prepare_for_call_2();
+
+ compemu_raw_call_r(r);
+
+ live.nat[REG_RESULT].holds[0]=out1;
+ live.nat[REG_RESULT].nholds=1;
+ live.nat[REG_RESULT].touched=touchcnt++;
+
+ live.state[out1].realreg=REG_RESULT;
+ live.state[out1].realind=0;
+ live.state[out1].val=0;
+ live.state[out1].validsize=osize;
+ live.state[out1].dirtysize=osize;
+ set_status(out1,DIRTY);
+}
+#endif
+
+MIDFUNC(0,nop,(void))
+{
+ raw_emit_nop();
+}
+
+/* forget_about() takes a mid-layer register */
+MIDFUNC(1,forget_about,(W4 r))
+{
+ if (isinreg(r))
+ disassociate(r);
+ live.state[r].val=0;
+ set_status(r,UNDEF);
+}
+
+MIDFUNC(1,f_forget_about,(FW r))
+{
+ if (f_isinreg(r))
+ f_disassociate(r);
+ live.fate[r].status=UNDEF;
+}
+
+// ARM optimized functions
+
+MIDFUNC(2,arm_ADD_l,(RW4 d, RR4 s))
+{
+ if (isconst(s)) {
+ COMPCALL(arm_ADD_l_ri)(d,live.state[s].val);
+ return;
+ }
+
+ s=readreg(s,4);
+ d=rmw(d,4,4);
+
+ raw_ADD_l_rr(d,s);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,arm_ADD_l_ri,(RW4 d, IMM i))
+{
+ if (!i) return;
+ if (isconst(d)) {
+ live.state[d].val+=i;
+ return;
+ }
+#if USE_OFFSET
+ add_offset(d,i);
+ return;
+#endif
+ d=rmw(d,4,4);
+
+ raw_LDR_l_ri(REG_WORK1, i);
+ raw_ADD_l_rr(d,REG_WORK1);
+ unlock2(d);
+}
+
+MIDFUNC(2,arm_ADD_l_ri8,(RW4 d, IMM i))
+{
+ if (!i) return;
+ if (isconst(d)) {
+ live.state[d].val+=i;
+ return;
+ }
+#if USE_OFFSET
+ add_offset(d,i);
+ return;
+#endif
+ d=rmw(d,4,4);
+
+ raw_ADD_l_rri(d,d,i);
+ unlock2(d);
+}
+
+MIDFUNC(2,arm_SUB_l_ri8,(RW4 d, IMM i))
+{
+ if (!i) return;
+ if (isconst(d)) {
+ live.state[d].val-=i;
+ return;
+ }
+#if USE_OFFSET
+ add_offset(d,-i);
+ return;
+#endif
+ d=rmw(d,4,4);
+
+ raw_SUB_l_rri(d,d,i);
+ unlock2(d);
+}
+
+MIDFUNC(2,arm_AND_l,(RW4 d, RR4 s))
+{
+ s=readreg(s,4);
+ d=rmw(d,4,4);
+
+ raw_AND_l_rr(d,s);
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,arm_AND_w,(RW2 d, RR2 s))
+{
+ s=readreg(s,2);
+ d=rmw(d,2,2);
+
+ raw_AND_w_rr(d,s);
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,arm_AND_b,(RW1 d, RR1 s))
+{
+ s=readreg(s,1);
+ d=rmw(d,1,1);
+
+ raw_AND_b_rr(d,s);
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,arm_AND_l_ri8,(RW4 d, IMM i))
+{
+ if (isconst(d)) {
+ live.state[d].val &= i;
+ return;
+ }
+
+ d=rmw(d,4,4);
+
+ raw_AND_l_ri(d,i);
+ unlock2(d);
+}
+
+MIDFUNC(2,arm_EOR_b,(RW1 d, RR1 s))
+{
+ s=readreg(s,1);
+ d=rmw(d,1,1);
+
+ raw_EOR_b_rr(d,s);
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,arm_EOR_l,(RW4 d, RR4 s))
+{
+ s=readreg(s,4);
+ d=rmw(d,4,4);
+
+ raw_EOR_l_rr(d,s);
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,arm_EOR_w,(RW2 d, RR2 s))
+{
+ s=readreg(s,2);
+ d=rmw(d,2,2);
+
+ raw_EOR_w_rr(d,s);
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,arm_ORR_b,(RW1 d, RR1 s))
+{
+ s=readreg(s,1);
+ d=rmw(d,1,1);
+
+ raw_ORR_b_rr(d,s);
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,arm_ORR_l,(RW4 d, RR4 s))
+{
+ if (isconst(d) && isconst(s)) {
+ live.state[d].val|=live.state[s].val;
+ return;
+ }
+ s=readreg(s,4);
+ d=rmw(d,4,4);
+
+ raw_ORR_l_rr(d,s);
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,arm_ORR_w,(RW2 d, RR2 s))
+{
+ s=readreg(s,2);
+ d=rmw(d,2,2);
+
+ raw_ORR_w_rr(d,s);
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,arm_ROR_l_ri8,(RW4 r, IMM i))
+{
+ if (!i)
+ return;
+
+ r=rmw(r,4,4);
+ raw_ROR_l_ri(r,i);
+ unlock2(r);
+}
+
+// Other
+static inline void flush_cpu_icache(void *start, void *stop)
+{
+
+ register void *_beg __asm ("a1") = start;
+ register void *_end __asm ("a2") = stop;
+ register void *_flg __asm ("a3") = 0;
+#ifdef __ARM_EABI__
+ register unsigned long _scno __asm ("r7") = 0xf0002;
+ __asm __volatile ("swi 0x0 @ sys_cacheflush"
+ : "=r" (_beg)
+ : "0" (_beg), "r" (_end), "r" (_flg), "r" (_scno));
+#else
+ __asm __volatile ("swi 0x9f0002 @ sys_cacheflush"
+ : "=r" (_beg)
+ : "0" (_beg), "r" (_end), "r" (_flg));
+#endif
+}
+
+static inline void write_jmp_target(uae_u32* jmpaddr, cpuop_func* a) {
+ *(jmpaddr) = (uae_u32) a;
+ flush_cpu_icache((void *) jmpaddr, (void *) &jmpaddr[1]);
+}
+
+static inline void emit_jmp_target(uae_u32 a) {
+ emit_long((uae_u32) a);
+}
--- /dev/null
+/*
+ * compiler/compemu_midfunc_arm.h - Native MIDFUNCS for ARM
+ *
+ * Copyright (c) 2014 Jens Heitmann of ARAnyM dev team (see AUTHORS)
+ *
+ * Inspired by Christian Bauer's Basilisk II
+ *
+ * Original 68040 JIT compiler for UAE, copyright 2000-2002 Bernd Meyer
+ *
+ * Adaptation for Basilisk II and improvements, copyright 2000-2002
+ * Gwenole Beauchesne
+ *
+ * Basilisk II (C) 1997-2002 Christian Bauer
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Note:
+ * File is included by compemu.h
+ *
+ */
+
+// Arm optimized midfunc
+DECLARE_MIDFUNC(arm_ADD_l(RW4 d, RR4 s));
+DECLARE_MIDFUNC(arm_ADD_l_ri(RW4 d, IMM i));
+DECLARE_MIDFUNC(arm_ADD_l_ri8(RW4 d, IMM i));
+DECLARE_MIDFUNC(arm_SUB_l_ri8(RW4 d, IMM i));
+DECLARE_MIDFUNC(arm_AND_b(RW1 d, RR1 s));
+DECLARE_MIDFUNC(arm_AND_w(RW2 d, RR2 s));
+DECLARE_MIDFUNC(arm_AND_l(RW4 d, RR4 s));
+DECLARE_MIDFUNC(arm_AND_l_ri8(RW4 d, IMM i));
+DECLARE_MIDFUNC(arm_EOR_b(RW1 d, RR1 s));
+DECLARE_MIDFUNC(arm_EOR_l(RW4 d, RR4 s));
+DECLARE_MIDFUNC(arm_EOR_w(RW2 d, RR2 s));
+DECLARE_MIDFUNC(arm_ORR_b(RW1 d, RR1 s));
+DECLARE_MIDFUNC(arm_ORR_l(RW4 d, RR4 s));
+DECLARE_MIDFUNC(arm_ORR_w(RW2 d, RR2 s));
+DECLARE_MIDFUNC(arm_ROR_l_ri8(RW4 r, IMM i));
+
+// Emulated midfunc
+DECLARE_MIDFUNC(bt_l_ri(RR4 r, IMM i));
+DECLARE_MIDFUNC(bt_l_rr(RR4 r, RR4 b));
+DECLARE_MIDFUNC(btc_l_rr(RW4 r, RR4 b));
+DECLARE_MIDFUNC(bts_l_rr(RW4 r, RR4 b));
+DECLARE_MIDFUNC(btr_l_rr(RW4 r, RR4 b));
+DECLARE_MIDFUNC(mov_l_rm(W4 d, IMM s));
+DECLARE_MIDFUNC(mov_l_rm_indexed(W4 d, IMM base, RR4 index, IMM factor));
+DECLARE_MIDFUNC(mov_l_mi(IMM d, IMM s));
+DECLARE_MIDFUNC(mov_w_mi(IMM d, IMM s));
+DECLARE_MIDFUNC(mov_b_mi(IMM d, IMM s));
+DECLARE_MIDFUNC(rol_b_ri(RW1 r, IMM i));
+DECLARE_MIDFUNC(rol_w_ri(RW2 r, IMM i));
+DECLARE_MIDFUNC(rol_l_rr(RW4 d, RR1 r));
+DECLARE_MIDFUNC(rol_w_rr(RW2 d, RR1 r));
+DECLARE_MIDFUNC(rol_b_rr(RW1 d, RR1 r));
+DECLARE_MIDFUNC(rol_l_ri(RW4 r, IMM i));
+DECLARE_MIDFUNC(shll_l_rr(RW4 d, RR1 r));
+DECLARE_MIDFUNC(shll_w_rr(RW2 d, RR1 r));
+DECLARE_MIDFUNC(shll_b_rr(RW1 d, RR1 r));
+DECLARE_MIDFUNC(ror_b_ri(RR1 r, IMM i));
+DECLARE_MIDFUNC(ror_w_ri(RR2 r, IMM i));
+DECLARE_MIDFUNC(ror_l_ri(RR4 r, IMM i));
+DECLARE_MIDFUNC(ror_l_rr(RR4 d, RR1 r));
+DECLARE_MIDFUNC(ror_w_rr(RR2 d, RR1 r));
+DECLARE_MIDFUNC(ror_b_rr(RR1 d, RR1 r));
+DECLARE_MIDFUNC(shrl_l_rr(RW4 d, RR1 r));
+DECLARE_MIDFUNC(shrl_w_rr(RW2 d, RR1 r));
+DECLARE_MIDFUNC(shrl_b_rr(RW1 d, RR1 r));
+DECLARE_MIDFUNC(shra_l_rr(RW4 d, RR1 r));
+DECLARE_MIDFUNC(shra_w_rr(RW2 d, RR1 r));
+DECLARE_MIDFUNC(shra_b_rr(RW1 d, RR1 r));
+DECLARE_MIDFUNC(shll_l_ri(RW4 r, IMM i));
+DECLARE_MIDFUNC(shll_w_ri(RW2 r, IMM i));
+DECLARE_MIDFUNC(shll_b_ri(RW1 r, IMM i));
+DECLARE_MIDFUNC(shrl_l_ri(RW4 r, IMM i));
+DECLARE_MIDFUNC(shrl_w_ri(RW2 r, IMM i));
+DECLARE_MIDFUNC(shrl_b_ri(RW1 r, IMM i));
+DECLARE_MIDFUNC(shra_l_ri(RW4 r, IMM i));
+DECLARE_MIDFUNC(shra_w_ri(RW2 r, IMM i));
+DECLARE_MIDFUNC(shra_b_ri(RW1 r, IMM i));
+DECLARE_MIDFUNC(setcc(W1 d, IMM cc));
+DECLARE_MIDFUNC(setcc_m(IMM d, IMM cc));
+DECLARE_MIDFUNC(cmov_l_rr(RW4 d, RR4 s, IMM cc));
+DECLARE_MIDFUNC(bsf_l_rr(W4 d, RR4 s));
+DECLARE_MIDFUNC(pop_l(W4 d));
+DECLARE_MIDFUNC(push_l(RR4 s));
+DECLARE_MIDFUNC(sign_extend_16_rr(W4 d, RR2 s));
+DECLARE_MIDFUNC(sign_extend_8_rr(W4 d, RR1 s));
+DECLARE_MIDFUNC(zero_extend_16_rr(W4 d, RR2 s));
+DECLARE_MIDFUNC(zero_extend_8_rr(W4 d, RR1 s));
+DECLARE_MIDFUNC(simulate_bsf(W4 tmp, RW4 s));
+DECLARE_MIDFUNC(imul_64_32(RW4 d, RW4 s));
+DECLARE_MIDFUNC(mul_64_32(RW4 d, RW4 s));
+DECLARE_MIDFUNC(imul_32_32(RW4 d, RR4 s));
+DECLARE_MIDFUNC(mov_b_rr(W1 d, RR1 s));
+DECLARE_MIDFUNC(mov_w_rr(W2 d, RR2 s));
+DECLARE_MIDFUNC(mov_l_rR(W4 d, RR4 s, IMM offset));
+DECLARE_MIDFUNC(mov_w_rR(W2 d, RR4 s, IMM offset));
+DECLARE_MIDFUNC(mov_l_brR(W4 d, RR4 s, IMM offset));
+DECLARE_MIDFUNC(mov_w_brR(W2 d, RR4 s, IMM offset));
+DECLARE_MIDFUNC(mov_b_brR(W1 d, RR4 s, IMM offset));
+DECLARE_MIDFUNC(mov_l_Ri(RR4 d, IMM i, IMM offset));
+DECLARE_MIDFUNC(mov_w_Ri(RR4 d, IMM i, IMM offset));
+DECLARE_MIDFUNC(mov_l_Rr(RR4 d, RR4 s, IMM offset));
+DECLARE_MIDFUNC(mov_w_Rr(RR4 d, RR2 s, IMM offset));
+DECLARE_MIDFUNC(lea_l_brr(W4 d, RR4 s, IMM offset));
+DECLARE_MIDFUNC(lea_l_brr_indexed(W4 d, RR4 s, RR4 index, IMM factor, IMM offset));
+DECLARE_MIDFUNC(lea_l_rr_indexed(W4 d, RR4 s, RR4 index, IMM factor));
+DECLARE_MIDFUNC(mov_l_bRr(RR4 d, RR4 s, IMM offset));
+DECLARE_MIDFUNC(mov_w_bRr(RR4 d, RR2 s, IMM offset));
+DECLARE_MIDFUNC(mov_b_bRr(RR4 d, RR1 s, IMM offset));
+DECLARE_MIDFUNC(mid_bswap_32(RW4 r));
+DECLARE_MIDFUNC(mid_bswap_16(RW2 r));
+DECLARE_MIDFUNC(mov_l_rr(W4 d, RR4 s));
+DECLARE_MIDFUNC(mov_l_mr(IMM d, RR4 s));
+DECLARE_MIDFUNC(mov_w_mr(IMM d, RR2 s));
+DECLARE_MIDFUNC(mov_w_rm(W2 d, IMM s));
+DECLARE_MIDFUNC(mov_b_mr(IMM d, RR1 s));
+DECLARE_MIDFUNC(mov_b_rm(W1 d, IMM s));
+DECLARE_MIDFUNC(mov_l_ri(W4 d, IMM s));
+DECLARE_MIDFUNC(mov_w_ri(W2 d, IMM s));
+DECLARE_MIDFUNC(mov_b_ri(W1 d, IMM s));
+DECLARE_MIDFUNC(test_l_ri(RR4 d, IMM i));
+DECLARE_MIDFUNC(test_l_rr(RR4 d, RR4 s));
+DECLARE_MIDFUNC(test_w_rr(RR2 d, RR2 s));
+DECLARE_MIDFUNC(test_b_rr(RR1 d, RR1 s));
+DECLARE_MIDFUNC(and_l_ri(RW4 d, IMM i));
+DECLARE_MIDFUNC(and_l(RW4 d, RR4 s));
+DECLARE_MIDFUNC(and_w(RW2 d, RR2 s));
+DECLARE_MIDFUNC(and_b(RW1 d, RR1 s));
+DECLARE_MIDFUNC(or_l_ri(RW4 d, IMM i));
+DECLARE_MIDFUNC(or_l(RW4 d, RR4 s));
+DECLARE_MIDFUNC(or_w(RW2 d, RR2 s));
+DECLARE_MIDFUNC(or_b(RW1 d, RR1 s));
+DECLARE_MIDFUNC(adc_l(RW4 d, RR4 s));
+DECLARE_MIDFUNC(adc_w(RW2 d, RR2 s));
+DECLARE_MIDFUNC(adc_b(RW1 d, RR1 s));
+DECLARE_MIDFUNC(add_l(RW4 d, RR4 s));
+DECLARE_MIDFUNC(add_w(RW2 d, RR2 s));
+DECLARE_MIDFUNC(add_b(RW1 d, RR1 s));
+DECLARE_MIDFUNC(sub_l_ri(RW4 d, IMM i));
+DECLARE_MIDFUNC(sub_w_ri(RW2 d, IMM i));
+DECLARE_MIDFUNC(sub_b_ri(RW1 d, IMM i));
+DECLARE_MIDFUNC(add_l_ri(RW4 d, IMM i));
+DECLARE_MIDFUNC(add_w_ri(RW2 d, IMM i));
+DECLARE_MIDFUNC(add_b_ri(RW1 d, IMM i));
+DECLARE_MIDFUNC(sbb_l(RW4 d, RR4 s));
+DECLARE_MIDFUNC(sbb_w(RW2 d, RR2 s));
+DECLARE_MIDFUNC(sbb_b(RW1 d, RR1 s));
+DECLARE_MIDFUNC(sub_l(RW4 d, RR4 s));
+DECLARE_MIDFUNC(sub_w(RW2 d, RR2 s));
+DECLARE_MIDFUNC(sub_b(RW1 d, RR1 s));
+DECLARE_MIDFUNC(cmp_l(RR4 d, RR4 s));
+DECLARE_MIDFUNC(cmp_w(RR2 d, RR2 s));
+DECLARE_MIDFUNC(cmp_b(RR1 d, RR1 s));
+DECLARE_MIDFUNC(xor_l(RW4 d, RR4 s));
+DECLARE_MIDFUNC(xor_w(RW2 d, RR2 s));
+DECLARE_MIDFUNC(xor_b(RW1 d, RR1 s));
+DECLARE_MIDFUNC(call_r_02(RR4 r, RR4 in1, RR4 in2, IMM isize1, IMM isize2));
+DECLARE_MIDFUNC(call_r_11(W4 out1, RR4 r, RR4 in1, IMM osize, IMM isize));
+DECLARE_MIDFUNC(live_flags(void));
+DECLARE_MIDFUNC(dont_care_flags(void));
+DECLARE_MIDFUNC(duplicate_carry(void));
+DECLARE_MIDFUNC(restore_carry(void));
+DECLARE_MIDFUNC(start_needflags(void));
+DECLARE_MIDFUNC(end_needflags(void));
+DECLARE_MIDFUNC(make_flags_live(void));
+DECLARE_MIDFUNC(forget_about(W4 r));
+DECLARE_MIDFUNC(nop(void));
+
+DECLARE_MIDFUNC(f_forget_about(FW r));
+
+
+
+
--- /dev/null
+/*
+ * compiler/compemu_midfunc_arm.cpp - Native MIDFUNCS for ARM (JIT v2)
+ *
+ * Copyright (c) 2014 Jens Heitmann of ARAnyM dev team (see AUTHORS)
+ *
+ * Inspired by Christian Bauer's Basilisk II
+ *
+ * Original 68040 JIT compiler for UAE, copyright 2000-2002 Bernd Meyer
+ *
+ * Adaptation for Basilisk II and improvements, copyright 2000-2002
+ * Gwenole Beauchesne
+ *
+ * Basilisk II (C) 1997-2002 Christian Bauer
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Note:
+ * File is included by compemu_support.cpp
+ *
+ */
+
+const uae_u32 ARM_CCR_MAP[] = { 0, ARM_C_FLAG, // 1 C
+ ARM_V_FLAG, // 2 V
+ ARM_C_FLAG | ARM_V_FLAG, // 3 VC
+ ARM_Z_FLAG, // 4 Z
+ ARM_Z_FLAG | ARM_C_FLAG, // 5 ZC
+ ARM_Z_FLAG | ARM_V_FLAG, // 6 ZV
+ ARM_Z_FLAG | ARM_C_FLAG | ARM_V_FLAG, // 7 ZVC
+ ARM_N_FLAG, // 8 N
+ ARM_N_FLAG | ARM_C_FLAG, // 9 NC
+ ARM_N_FLAG | ARM_V_FLAG, // 10 NV
+ ARM_N_FLAG | ARM_C_FLAG | ARM_V_FLAG, // 11 NVC
+ ARM_N_FLAG | ARM_Z_FLAG, // 12 NZ
+ ARM_N_FLAG | ARM_Z_FLAG | ARM_C_FLAG, // 13 NZC
+ ARM_N_FLAG | ARM_Z_FLAG | ARM_V_FLAG, // 14 NZV
+ ARM_N_FLAG | ARM_Z_FLAG | ARM_C_FLAG | ARM_V_FLAG, // 15 NZVC
+ };
+
+// First we start with some helper functions (may be moved to codegen_arm)
+static inline void UNSIGNED8_IMM_2_REG(W4 r, IMM v) {
+ MOV_ri8(r, (uint8) v);
+}
+
+static inline void SIGNED8_IMM_2_REG(W4 r, IMM v) {
+ if (v & 0x80) {
+ MVN_ri8(r, (uint8) ~v);
+ } else {
+ MOV_ri8(r, (uint8) v);
+ }
+}
+
+static inline void UNSIGNED16_IMM_2_REG(W4 r, IMM v) {
+ MOV_ri8(r, (uint8) v);
+ ORR_rri8RORi(r, r, (uint8)(v >> 8), 24);
+}
+
+static inline void SIGNED16_IMM_2_REG(W4 r, IMM v) {
+#if defined(ARMV6_ASSEMBLY)
+ MOV_ri8(r, (uint8) v);
+ ORR_rri8RORi(r, r, (uint8)(v >> 8), 24);
+ SXTH_rr(r, r);
+#else
+ MOV_ri8(r, (uint8)(v << 16));
+ ORR_rri8RORi(r, r, (uint8)(v >> 8), 8);
+ ASR_rri(r, r, 16);
+#endif
+}
+
+static inline void UNSIGNED8_REG_2_REG(W4 d, RR4 s) {
+#if defined(ARMV6_ASSEMBLY)
+ UXTB_rr(d, s);
+#else
+ ROR_rri(d, s, 8);
+ LSR_rri(d, d, 24);
+#endif
+}
+
+static inline void SIGNED8_REG_2_REG(W4 d, RR4 s) {
+#if defined(ARMV6_ASSEMBLY)
+ SXTB_rr(d, s);
+#else
+ ROR_rri(d, s, 8);
+ ASR_rri(d, d, 24);
+#endif
+}
+
+static inline void UNSIGNED16_REG_2_REG(W4 d, RR4 s) {
+#if defined(ARMV6_ASSEMBLY)
+ UXTH_rr(d, s);
+#else
+ LSL_rri(d, s, 16);
+ LSR_rri(d, d, 16);
+#endif
+}
+
+static inline void SIGNED16_REG_2_REG(W4 d, RR4 s) {
+#if defined(ARMV6_ASSEMBLY)
+ SXTH_rr(d, s);
+#else
+ LSL_rri(d, s, 16);
+ ASR_rri(d, d, 16);
+#endif
+}
+
+#define ZERO_EXTEND_8_REG_2_REG(d,s) UNSIGNED8_REG_2_REG(d,s)
+#define ZERO_EXTEND_16_REG_2_REG(d,s) UNSIGNED16_REG_2_REG(d,s)
+#define SIGN_EXTEND_8_REG_2_REG(d,s) SIGNED8_REG_2_REG(d,s)
+#define SIGN_EXTEND_16_REG_2_REG(d,s) SIGNED16_REG_2_REG(d,s)
+
+MIDFUNC(0,restore_inverted_carry,(void))
+{
+ RR4 r=readreg(FLAGX,4);
+ MRS_CPSR(REG_WORK1);
+ TEQ_ri(r,1);
+ CC_BIC_rri(NATIVE_CC_EQ, REG_WORK1, REG_WORK1, ARM_C_FLAG);
+ CC_ORR_rri(NATIVE_CC_NE, REG_WORK1, REG_WORK1, ARM_C_FLAG);
+ MSR_CPSRf_r(REG_WORK1);
+ unlock2(r);
+}
+
+/*
+ * ADD
+ * Operand Syntax: <ea>, Dn
+ * Dn, <ea>
+ *
+ * Operand Size: 8,16,32
+ *
+ * X Set the same as the carry bit.
+ * N Set if the result is negative. Cleared otherwise.
+ * Z Set if the result is zero. Cleared otherwise.
+ * V Set if an overflow is generated. Cleared otherwise.
+ * C Set if a carry is generated. Cleared otherwise.
+ *
+ */
+MIDFUNC(3,jnf_ADD_imm,(W4 d, RR4 s, IMM v))
+{
+ if (isconst(s)) {
+ set_const(d,live.state[s].val+v);
+ return;
+ }
+
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ compemu_raw_mov_l_ri(REG_WORK1, v);
+ ADD_rrr(d,s,REG_WORK1);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,jnf_ADD,(W4 d, RR4 s, RR4 v))
+{
+ if (isconst(v)) {
+ COMPCALL(jnf_ADD_imm)(d,s,live.state[v].val);
+ return;
+ }
+
+ v=readreg(v,4);
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ ADD_rrr(d,s,v);
+
+ unlock2(d);
+ unlock2(s);
+ unlock2(v);
+}
+
+MIDFUNC(3,jff_ADD_b_imm,(W4 d, RR1 s, IMM v))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ SIGNED8_IMM_2_REG(REG_WORK2, (uint8)v);
+ SIGNED8_REG_2_REG(REG_WORK1, s);
+ ADDS_rrr(d,REG_WORK1,REG_WORK2);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,jff_ADD_b,(W4 d, RR1 s, RR1 v))
+{
+ if (isconst(v)) {
+ COMPCALL(jff_ADD_b_imm)(d,s,live.state[v].val);
+ return;
+ }
+
+ v=readreg(v,4);
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ SIGNED8_REG_2_REG(REG_WORK1, s);
+ SIGNED8_REG_2_REG(REG_WORK2, v);
+ ADDS_rrr(d,REG_WORK1,REG_WORK2);
+
+ unlock2(d);
+ unlock2(s);
+ unlock2(v);
+}
+
+MIDFUNC(3,jff_ADD_w_imm,(W4 d, RR2 s, IMM v))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ SIGNED16_IMM_2_REG(REG_WORK2, (uint16)v);
+ SIGNED16_REG_2_REG(REG_WORK1, s);
+ ADDS_rrr(d,REG_WORK1,REG_WORK2);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,jff_ADD_w,(W4 d, RR2 s, RR2 v))
+{
+ if (isconst(v)) {
+ COMPCALL(jff_ADD_w_imm)(d,s,live.state[v].val);
+ return;
+ }
+ v=readreg(v,4);
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ SIGNED16_REG_2_REG(REG_WORK1, s);
+ SIGNED16_REG_2_REG(REG_WORK2, v);
+ ADDS_rrr(d,REG_WORK1,REG_WORK2);
+
+ unlock2(d);
+ unlock2(s);
+ unlock2(v);
+}
+
+MIDFUNC(3,jff_ADD_l_imm,(W4 d, RR4 s, IMM v))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ compemu_raw_mov_l_ri(REG_WORK2, v);
+ ADDS_rrr(d,s,REG_WORK2);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,jff_ADD_l,(W4 d, RR4 s, RR4 v))
+{
+ if (isconst(v)) {
+ COMPCALL(jff_ADD_l_imm)(d,s,live.state[v].val);
+ return;
+ }
+
+ v=readreg(v,4);
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ ADDS_rrr(d,s,v);
+
+ unlock2(d);
+ unlock2(s);
+ unlock2(v);
+}
+
+/*
+ * ADDA
+ * Operand Syntax: <ea>, An
+ *
+ * Operand Size: 16,32
+ *
+ * Flags: Not affected.
+ *
+ */
+MIDFUNC(2,jnf_ADDA_b,(W4 d, RR1 s))
+{
+ s=readreg(s,4);
+ d=rmw(d,4,4);
+
+ SIGNED8_REG_2_REG(REG_WORK1,s);
+ ADD_rrr(d,d,REG_WORK1);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,jnf_ADDA_w,(W4 d, RR2 s))
+{
+ s=readreg(s,4);
+ d=rmw(d,4,4);
+
+ SIGNED16_REG_2_REG(REG_WORK1,s);
+ ADD_rrr(d,d,REG_WORK1);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,jnf_ADDA_l,(W4 d, RR4 s))
+{
+ s=readreg(s,4);
+ d=rmw(d,4,4);
+
+ ADD_rrr(d,d,s);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+/*
+ * ADDX
+ * Operand Syntax: Dy, Dx
+ * -(Ay), -(Ax)
+ *
+ * Operand Size: 8,16,32
+ *
+ * X Set the same as the carry bit.
+ * N Set if the result is negative. Cleared otherwise.
+ * Z Cleared if the result is nonzero; unchanged otherwise.
+ * V Set if an overflow is generated. Cleared otherwise.
+ * C Set if a carry is generated. Cleared otherwise.
+ *
+ * Attention: Z is cleared only if the result is nonzero. Unchanged otherwise
+ *
+ */
+MIDFUNC(3,jnf_ADDX,(W4 d, RR4 s, RR4 v))
+{
+ s=readreg(s,4);
+ v=readreg(v,4);
+ d=writereg(d,4);
+
+ ADC_rrr(d,s,v);
+
+ unlock2(d);
+ unlock2(s);
+ unlock2(v);
+}
+
+MIDFUNC(3,jff_ADDX_b,(W4 d, RR1 s, RR1 v))
+{
+ s=readreg(s,4);
+ v=readreg(v,4);
+ d=writereg(d,4);
+
+ CC_MVN_ri(NATIVE_CC_EQ, REG_WORK2, 0);
+ CC_MVN_ri(NATIVE_CC_NE, REG_WORK2, ARM_Z_FLAG);
+ PUSH(REG_WORK2);
+
+ SIGNED8_REG_2_REG(REG_WORK1, s);
+ SIGNED8_REG_2_REG(REG_WORK2, v);
+ ADCS_rrr(d,REG_WORK1,REG_WORK2);
+
+ POP(REG_WORK2);
+ MRS_CPSR(REG_WORK1);
+ AND_rrr(REG_WORK1, REG_WORK1, REG_WORK2);
+ MSR_CPSR_r(REG_WORK1);
+
+ unlock2(d);
+ unlock2(s);
+ unlock2(v);
+}
+
+MIDFUNC(3,jff_ADDX_w,(W4 d, RR2 s, RR2 v))
+{
+ s=readreg(s,4);
+ v=readreg(v,4);
+ d=writereg(d,4);
+
+ CC_MVN_ri(NATIVE_CC_EQ, REG_WORK2, 0);
+ CC_MVN_ri(NATIVE_CC_NE, REG_WORK2, ARM_Z_FLAG);
+ PUSH(REG_WORK2);
+
+ SIGNED16_REG_2_REG(REG_WORK1, s);
+ SIGNED16_REG_2_REG(REG_WORK2, v);
+ ADCS_rrr(d,REG_WORK1,REG_WORK2);
+
+ POP(REG_WORK2);
+ MRS_CPSR(REG_WORK1);
+ AND_rrr(REG_WORK1, REG_WORK1, REG_WORK2);
+ MSR_CPSR_r(REG_WORK1);
+
+ unlock2(d);
+ unlock2(s);
+ unlock2(v);
+}
+
+MIDFUNC(3,jff_ADDX_l,(W4 d, RR4 s, RR4 v))
+{
+ s=readreg(s,4);
+ v=readreg(v,4);
+ d=writereg(d,4);
+
+ CC_MVN_ri(NATIVE_CC_EQ, REG_WORK2, 0);
+ CC_MVN_ri(NATIVE_CC_NE, REG_WORK2, ARM_Z_FLAG);
+ PUSH(REG_WORK2);
+
+ ADCS_rrr(d,s,v);
+
+ POP(REG_WORK2);
+ MRS_CPSR(REG_WORK1);
+ AND_rrr(REG_WORK1, REG_WORK1, REG_WORK2);
+ MSR_CPSR_r(REG_WORK1);
+
+ unlock2(d);
+ unlock2(s);
+ unlock2(v);
+}
+
+/*
+ * ANDI
+ * Operand Syntax: #<data>, CCR
+ *
+ * Operand Size: 8
+ *
+ * X Cleared if bit 4 of immediate operand is zero. Unchanged otherwise.
+ * N Cleared if bit 3 of immediate operand is zero. Unchanged otherwise.
+ * Z Cleared if bit 2 of immediate operand is zero. Unchanged otherwise.
+ * V Cleared if bit 1 of immediate operand is zero. Unchanged otherwise.
+ * C Cleared if bit 0 of immediate operand is zero. Unchanged otherwise.
+ *
+ */
+MIDFUNC(1,jff_ANDSR,(IMM s, IMM x))
+{
+ MRS_CPSR(REG_WORK1);
+ AND_rri(REG_WORK1, REG_WORK1, s);
+ MSR_CPSRf_r(REG_WORK1);
+
+ if (!x) {
+ compemu_raw_mov_l_ri(REG_WORK1, (uintptr)live.state[FLAGX].mem);
+ MOV_ri(REG_WORK2, 0);
+ STRB_rR(REG_WORK2, REG_WORK1);
+ }
+}
+
+/*
+ * AND
+ * Operand Syntax: <ea>, Dn
+ * Dn, <ea>
+ *
+ * Operand Size: 8,16,32
+ *
+ * X Not affected.
+ * N Set if the most significant bit of the result is set.
+ * Cleared otherwise.
+ * Z Set if the result is zero. Cleared otherwise.
+ * V Always cleared.
+ * C Always cleared.
+ *
+ */
+MIDFUNC(3,jnf_AND,(W4 d, RR4 s, RR4 v))
+{
+ if (isconst(s) && isconst(v)) {
+ set_const(d,
+ live.state[s].val&live.state[v].val);
+ return;
+ }
+
+ v=readreg(v,4);
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ AND_rrr(d, s, v);
+
+ unlock2(v);
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,jff_AND_b,(W4 d, RR1 s, RR1 v))
+{
+ v=readreg(v,4);
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ SIGNED8_REG_2_REG(REG_WORK1, s);
+ SIGNED8_REG_2_REG(REG_WORK2, v);
+ MSR_CPSRf_i(0);
+ ANDS_rrr(d, REG_WORK1, REG_WORK2);
+
+ unlock2(v);
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,jff_AND_w,(W4 d, RR2 s, RR2 v))
+{
+ v=readreg(v,4);
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ SIGNED16_REG_2_REG(REG_WORK1, s);
+ SIGNED16_REG_2_REG(REG_WORK2, v);
+ MSR_CPSRf_i(0);
+ ANDS_rrr(d, REG_WORK1, REG_WORK2);
+
+ unlock2(v);
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,jff_AND_l,(W4 d, RR4 s, RR4 v))
+{
+ v=readreg(v,4);
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ MSR_CPSRf_i(0);
+ ANDS_rrr(d, s,v);
+
+ unlock2(v);
+ unlock2(d);
+ unlock2(s);
+}
+
+/*
+ * ASL
+ * Operand Syntax: Dx, Dy
+ * #<data>, Dy
+ * <ea>
+ *
+ * Operand Size: 8,16,32
+ *
+ * X Set according to the last bit shifted out of the operand. Unaffected for a shift count of zero.
+ * N Set if the most significant bit of the result is set. Cleared otherwise.
+ * Z Set if the result is zero. Cleared otherwise.
+ * V Set if the most significant bit is changed at any time during the shift operation. Cleared otherwise.
+ * C Set according to the last bit shifted out of the operand. Unaffected for a shift count of zero.
+ *
+ */
+MIDFUNC(3,jff_ASL_b_imm,(W4 d, RR4 s, IMM i))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ LSL_rri(d, s, 24);
+ if (i) {
+ MRS_CPSR(REG_WORK1); // store flags
+ BIC_rri(REG_WORK1, REG_WORK1, ARM_N_FLAG|ARM_Z_FLAG|ARM_V_FLAG);// Clear everything except N & Z
+ PUSH(REG_WORK1);
+
+ // Calculate V Flag
+ MVN_ri(REG_WORK2, 0);
+ LSR_rri(REG_WORK2, REG_WORK2, (i+1));
+ MVN_rr(REG_WORK2, REG_WORK2);
+ AND_rrr(REG_WORK1, d, REG_WORK2);
+ TST_rr(REG_WORK1, REG_WORK1);
+ CC_TEQ_rr(NATIVE_CC_NE, REG_WORK1, REG_WORK2);
+ POP(REG_WORK1);
+ CC_ORR_rri(NATIVE_CC_NE, REG_WORK1, REG_WORK1, ARM_V_FLAG);
+
+ MSR_CPSRf_r(REG_WORK1);// restore flags
+
+ LSLS_rri(d,d,i);
+ } else {
+ MSR_CPSRf_i(0);
+ TST_rr(d,d);
+ }
+ REV_rr(d,d);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,jff_ASL_w_imm,(W4 d, RR4 s, IMM i))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ LSL_rri(d, s, 16);
+ if (i) {
+ MRS_CPSR(REG_WORK1); // store flags
+ BIC_rri(REG_WORK1, REG_WORK1, ARM_N_FLAG|ARM_Z_FLAG|ARM_V_FLAG);// Clear everything except N & Z
+ PUSH(REG_WORK1);
+
+ // Calculate V Flag
+ MVN_ri(REG_WORK2, 0);
+ LSR_rri(REG_WORK2, REG_WORK2, (i+1));
+ MVN_rr(REG_WORK2, REG_WORK2);
+ AND_rrr(REG_WORK1, d, REG_WORK2);
+ TST_rr(REG_WORK1, REG_WORK1);
+ CC_TEQ_rr(NATIVE_CC_NE, REG_WORK1, REG_WORK2);
+ POP(REG_WORK1);
+ CC_ORR_rri(NATIVE_CC_NE, REG_WORK1, REG_WORK1, ARM_V_FLAG);
+
+ MSR_CPSRf_r(REG_WORK1);// retore flags
+
+ LSLS_rri(d,d,i);
+ } else {
+ MSR_CPSRf_i(0);
+ TST_rr(d,d);
+ }
+ ASR_rri(d,d, 16);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,jff_ASL_l_imm,(W4 d, RR4 s, IMM i))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ if (i) {
+ MRS_CPSR(REG_WORK1); // store flags
+ BIC_rri(REG_WORK1, REG_WORK1, ARM_N_FLAG|ARM_Z_FLAG|ARM_V_FLAG);// Clear everything except C
+ PUSH(REG_WORK1);
+
+ // Calculate V Flag
+ MVN_ri(REG_WORK2, 0);
+ LSR_rri(REG_WORK2, REG_WORK2, (i+1));
+ MVN_rr(REG_WORK2, REG_WORK2);
+ AND_rrr(REG_WORK1, s, REG_WORK2);
+ TST_rr(REG_WORK1, REG_WORK1);
+ CC_TEQ_rr(NATIVE_CC_NE, REG_WORK1, REG_WORK2);
+ POP(REG_WORK1);
+ CC_ORR_rri(NATIVE_CC_NE, REG_WORK1, REG_WORK1, ARM_V_FLAG);
+
+ MSR_CPSRf_r(REG_WORK1);// retore flags
+
+ LSLS_rri(d,s,i);
+ } else {
+ MSR_CPSRf_i(0);
+ MOVS_rr(d, s);
+ }
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,jff_ASL_b_reg,(W4 d, RR4 s, RR4 i))
+{
+ i=readreg(i,4);
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ // Calculate V Flag
+ MRS_CPSR(REG_WORK1);// store flags
+ BIC_rri(REG_WORK1, REG_WORK1, ARM_N_FLAG|ARM_Z_FLAG|ARM_V_FLAG);// Clear everything except C
+ PUSH(REG_WORK1);
+
+ LSL_rri(d, s, 24);
+ // Calculate V Flag
+ MVN_ri(REG_WORK2, 0);
+ LSR_rrr(REG_WORK2, REG_WORK2, i);
+ LSR_rri(REG_WORK2, REG_WORK2, 1);
+ MVN_rr(REG_WORK2, REG_WORK2);
+ AND_rrr(REG_WORK1, d, REG_WORK2);
+ TST_rr(REG_WORK1, REG_WORK1);
+ CC_TEQ_rr(NATIVE_CC_NE, REG_WORK1, REG_WORK2);
+ POP(REG_WORK1);
+ CC_ORR_rri(NATIVE_CC_NE, REG_WORK1, REG_WORK1, ARM_V_FLAG);
+
+ MSR_CPSRf_r(REG_WORK1);// retore flags
+
+ AND_rri(REG_WORK2, i, 63);
+ LSLS_rrr(d,d,REG_WORK2);
+ ASR_rri(d,d, 24);
+
+ unlock2(d);
+ unlock2(s);
+ unlock2(i);
+}
+
+MIDFUNC(3,jff_ASL_w_reg,(W4 d, RR4 s, RR4 i))
+{
+ i=readreg(i,4);
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ // Calculate V Flag
+ MRS_CPSR(REG_WORK1);// store flags
+ BIC_rri(REG_WORK1, REG_WORK1, ARM_N_FLAG|ARM_Z_FLAG|ARM_V_FLAG);// Clear everything except c
+ PUSH(REG_WORK1);
+
+ LSL_rri(d, s, 16);
+ // Calculate V Flag
+ MVN_ri(REG_WORK2, 0);
+ LSR_rrr(REG_WORK2, REG_WORK2, i);
+ LSR_rri(REG_WORK2, REG_WORK2, 1);
+ MVN_rr(REG_WORK2, REG_WORK2);
+ AND_rrr(REG_WORK1, d, REG_WORK2);
+ TST_rr(REG_WORK1, REG_WORK1);
+ CC_TEQ_rr(NATIVE_CC_NE, REG_WORK1, REG_WORK2);
+ POP(REG_WORK1);
+ CC_ORR_rri(NATIVE_CC_NE, REG_WORK1, REG_WORK1, ARM_V_FLAG);
+
+ MSR_CPSRf_r(REG_WORK1);// retore flags
+
+ AND_rri(REG_WORK2, i, 63);
+ LSLS_rrr(d,d,REG_WORK2);
+ ASR_rri(d,d, 16);
+
+ unlock2(d);
+ unlock2(s);
+ unlock2(i);
+}
+
+MIDFUNC(3,jff_ASL_l_reg,(W4 d, RR4 s, RR4 i))
+{
+ i=readreg(i,4);
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ // Calculate V Flag
+ MRS_CPSR(REG_WORK1);// store flags
+ BIC_rri(REG_WORK1, REG_WORK1, ARM_N_FLAG|ARM_Z_FLAG|ARM_V_FLAG);// Clear everything except C
+ PUSH(REG_WORK1);
+
+ // Calculate V Flag
+ MVN_ri(REG_WORK2, 0);
+ LSR_rrr(REG_WORK2, REG_WORK2, i);
+ LSR_rri(REG_WORK2, REG_WORK2, 1);
+ MVN_rr(REG_WORK2, REG_WORK2);
+ AND_rrr(REG_WORK1, s, REG_WORK2);
+ TST_rr(REG_WORK1, REG_WORK1);
+ CC_TEQ_rr(NATIVE_CC_NE, REG_WORK1, REG_WORK2);
+ POP(REG_WORK1);
+ CC_ORR_rri(NATIVE_CC_NE, REG_WORK1, REG_WORK1, ARM_V_FLAG);
+
+ MSR_CPSRf_r(REG_WORK1);// retore flags
+
+ AND_rri(REG_WORK2, i, 63);
+ LSLS_rrr(d,s,REG_WORK2);
+
+ unlock2(d);
+ unlock2(s);
+ unlock2(i);
+}
+
+/*
+ * ASLW
+ * Operand Syntax: <ea>
+ *
+ * Operand Size: 16
+ *
+ * X Set according to the last bit shifted out of the operand.
+ * N Set if the most significant bit of the result is set. Cleared otherwise.
+ * Z Set if the result is zero. Cleared otherwise.
+ * V Set if the most significant bit is changed at any time during the shift operation. Cleared otherwise.
+ * C Set according to the last bit shifted out of the operand.
+ *
+ */
+MIDFUNC(2,jnf_ASLW,(W4 d, RR4 s))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ LSL_rri(d,s,1);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,jff_ASLW,(W4 d, RR4 s))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ MSR_CPSRf_i(0);
+ LSLS_rri(d,s,17);
+
+ MRS_CPSR(REG_WORK1);
+ CC_ORR_rri(NATIVE_CC_MI, REG_WORK1, REG_WORK1, ARM_V_FLAG);
+ CC_EOR_rri(NATIVE_CC_CS, REG_WORK1, REG_WORK1, ARM_V_FLAG);
+ MSR_CPSRf_r(REG_WORK1);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+/*
+ * ASR
+ * Operand Syntax: Dx, Dy
+ * #<data>, Dy
+ * <ea>
+ *
+ * Operand Size: 8,16,32
+ *
+ * X Set according to the last bit shifted out of the operand. Unaffected for a shift count of zero.
+ * N Set if the most significant bit of the result is set. Cleared otherwise.
+ * Z Set if the result is zero. Cleared otherwise.
+ * V Set if the most significant bit is changed at any time during the shift operation. Cleared otherwise.
+ * C Set according to the last bit shifted out of the operand. Unaffected for a shift count of zero.
+ *
+ */
+MIDFUNC(3,jnf_ASR_b_imm,(W4 d, RR4 s, IMM i))
+{
+ if (!i) return;
+
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ SIGNED8_REG_2_REG(d, s);
+ ASR_rri(d,d,i);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,jnf_ASR_w_imm,(W4 d, RR4 s, IMM i))
+{
+ if (!i) return;
+
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ SIGNED16_REG_2_REG(d, s);
+ ASR_rri(d,d,i);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,jnf_ASR_l_imm,(W4 d, RR4 s, IMM i))
+{
+ if (!i) return;
+
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ ASR_rri(d,s,i);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,jff_ASR_b_imm,(W4 d, RR4 s, IMM i))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ SIGNED8_REG_2_REG(d, s);
+ if (i) {
+ MSR_CPSRf_i(0);
+ ASRS_rri(d,d,i);
+ } else {
+ CC_MSR_CPSRf_r(NATIVE_CC_CC, 0); // Clear everything except C
+ CC_MSR_CPSRf_r(NATIVE_CC_CS, ARM_C_FLAG);// Clear everything except C
+ TST_rr(d,d);
+ }
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,jff_ASR_w_imm,(W4 d, RR4 s, IMM i))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ SIGNED16_REG_2_REG(d, s);
+ if (i) {
+ MSR_CPSRf_i(0);
+ ASRS_rri(d,d,i);
+ } else {
+ CC_MSR_CPSRf_r(NATIVE_CC_CC, 0); // Clear everything except C
+ CC_MSR_CPSRf_r(NATIVE_CC_CS, ARM_C_FLAG);// Clear everything except C
+ TST_rr(d,d);
+ }
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,jff_ASR_l_imm,(W4 d, RR4 s, IMM i))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ if (i) {
+ MSR_CPSRf_i(0);
+ ASRS_rri(d,s,i);
+ } else {
+ CC_MSR_CPSRf_r(NATIVE_CC_CC, 0); // Clear everything except C
+ CC_MSR_CPSRf_r(NATIVE_CC_CS, ARM_C_FLAG);// Clear everything except C
+ TST_rr(s,s);
+ }
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,jnf_ASR_b_reg,(W4 d, RR4 s, RR4 i))
+{
+ i=readreg(i,4);
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ SIGNED8_REG_2_REG(d, s);
+ AND_rri(REG_WORK1, i, 63);
+ ASR_rrr(d,d,REG_WORK1);
+
+ unlock2(d);
+ unlock2(s);
+ unlock2(i);
+}
+
+MIDFUNC(3,jnf_ASR_w_reg,(W4 d, RR4 s, RR4 i))
+{
+ i=readreg(i,4);
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ SIGNED16_REG_2_REG(d, s);
+ AND_rri(REG_WORK1, i, 63);
+ ASR_rrr(d,d,REG_WORK1);
+
+ unlock2(d);
+ unlock2(s);
+ unlock2(i);
+}
+
+MIDFUNC(3,jnf_ASR_l_reg,(W4 d, RR4 s, RR4 i))
+{
+ i=readreg(i,4);
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ AND_rri(REG_WORK1, i, 63);
+ ASR_rrr(d,s,REG_WORK1);
+
+ unlock2(d);
+ unlock2(s);
+ unlock2(i);
+}
+
+MIDFUNC(3,jff_ASR_b_reg,(W4 d, RR4 s, RR4 i))
+{
+ i=readreg(i,4);
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ SIGNED8_REG_2_REG(d, s);
+ CC_MSR_CPSRf_r(NATIVE_CC_CC, 0); // Clear everything except C
+ CC_MSR_CPSRf_r(NATIVE_CC_CS, ARM_C_FLAG);// Clear everything except C
+ AND_rri(REG_WORK1, i, 63);
+ ASRS_rrr(d,d,REG_WORK1);
+
+ unlock2(d);
+ unlock2(s);
+ unlock2(i);
+}
+
+MIDFUNC(3,jff_ASR_w_reg,(W4 d, RR4 s, RR4 i))
+{
+ i=readreg(i,4);
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ SIGNED16_REG_2_REG(d, s);
+ CC_MSR_CPSRf_r(NATIVE_CC_CC, 0); // Clear everything except C
+ CC_MSR_CPSRf_r(NATIVE_CC_CS, ARM_C_FLAG);// Clear everything except C
+ AND_rri(REG_WORK1, i, 63);
+ ASRS_rrr(d,d,REG_WORK1);
+
+ unlock2(d);
+ unlock2(s);
+ unlock2(i);
+}
+
+MIDFUNC(3,jff_ASR_l_reg,(W4 d, RR4 s, RR4 i))
+{
+ i=readreg(i,4);
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ CC_MSR_CPSRf_r(NATIVE_CC_CC, 0); // Clear everything except C
+ CC_MSR_CPSRf_r(NATIVE_CC_CS, ARM_C_FLAG);// Clear everything except C
+ AND_rri(REG_WORK1, i, 63);
+ ASRS_rrr(d,s,REG_WORK1);
+
+ unlock2(d);
+ unlock2(s);
+ unlock2(i);
+}
+
+/*
+ * ASRW
+ * Operand Syntax: <ea>
+ *
+ * Operand Size: 16
+ *
+ * X Set according to the last bit shifted out of the operand.
+ * N Set if the most significant bit of the result is set. Cleared otherwise.
+ * Z Set if the result is zero. Cleared otherwise.
+ * V Set if the most significant bit is changed at any time during the shift operation. Cleared otherwise.
+ * C Set according to the last bit shifted out of the operand.
+ *
+ */
+MIDFUNC(2,jnf_ASRW,(W4 d, RR4 s))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ SIGNED16_REG_2_REG(d, s);
+ ASR_rri(d,d,1);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,jff_ASRW,(W4 d, RR4 s))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ SIGNED16_REG_2_REG(d, s);
+ MSR_CPSRf_i(0);
+ ASR_rri(d,d,1);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+/*
+ * BCHG
+ * Operand Syntax: Dn,<ea>
+ * #<data>,<ea>
+ *
+ * Operand Size: 8,32
+ *
+ * X Not affected.
+ * N Not affected.
+ * Z Set if the bit tested is zero. Cleared otherwise.
+ * V Not affected.
+ * C Not affected.
+ *
+ */
+MIDFUNC(2,jnf_BCHG_b_imm,(RW4 d, IMM s))
+{
+ d=rmw(d,4,4);
+ EOR_rri(d,d,(1 << s));
+ unlock2(d);
+}
+
+MIDFUNC(2,jnf_BCHG_l_imm,(RW4 d, IMM s))
+{
+ d=rmw(d,4,4);
+ EOR_rri(d,d,(1 << s));
+ unlock2(d);
+}
+
+MIDFUNC(2,jnf_BCHG_b,(RW4 d, RR4 s))
+{
+ if (isconst(s)) {
+ COMPCALL(jnf_BCHG_b_imm)(d,live.state[s].val&7);
+ return;
+ }
+ s=readreg(s,4);
+ d=rmw(d,4,4);
+
+ AND_rri(REG_WORK1, s, 7);
+ MOV_ri(REG_WORK2, 1);
+ LSL_rrr(REG_WORK2, REG_WORK2, REG_WORK1);
+
+ EOR_rrr(d,d,REG_WORK2);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,jnf_BCHG_l,(RW4 d, RR4 s))
+{
+ if (isconst(s)) {
+ COMPCALL(jnf_BCHG_l_imm)(d,live.state[s].val&31);
+ return;
+ }
+
+ s=readreg(s,4);
+ d=rmw(d,4,4);
+
+ AND_rri(REG_WORK1, s, 31);
+ MOV_ri(REG_WORK2, 1);
+ LSL_rrr(REG_WORK2, REG_WORK2, REG_WORK1);
+
+ EOR_rrr(d,d,REG_WORK2);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,jff_BCHG_b_imm,(RW4 d, IMM s))
+{
+ d=rmw(d,4,4);
+
+ uae_u32 v = (1 << s);
+ MRS_CPSR(REG_WORK1);
+ TST_ri(d,v);
+ CC_BIC_rri(NATIVE_CC_NE, REG_WORK1, REG_WORK1, ARM_Z_FLAG);
+ CC_ORR_rri(NATIVE_CC_EQ, REG_WORK1, REG_WORK1, ARM_Z_FLAG);
+ MSR_CPSR_r(REG_WORK1);
+ EOR_rri(d,d,v);
+
+ unlock2(d);
+}
+
+MIDFUNC(2,jff_BCHG_l_imm,(RW4 d, IMM s))
+{
+ d=rmw(d,4,4);
+
+ uae_u32 v = (1 << s);
+ MRS_CPSR(REG_WORK1);
+ TST_ri(d,v);
+ CC_BIC_rri(NATIVE_CC_NE, REG_WORK1, REG_WORK1, ARM_Z_FLAG);
+ CC_ORR_rri(NATIVE_CC_EQ, REG_WORK1, REG_WORK1, ARM_Z_FLAG);
+ MSR_CPSR_r(REG_WORK1);
+ EOR_rri(d,d,v);
+
+ unlock2(d);
+}
+
+MIDFUNC(2,jff_BCHG_b,(RW4 d, RR4 s))
+{
+ if (isconst(s)) {
+ COMPCALL(jff_BCHG_b_imm)(d,live.state[s].val&7);
+ return;
+ }
+ s=readreg(s,4);
+ d=rmw(d,4,4);
+
+ AND_rri(REG_WORK1, s, 7);
+ MOV_ri(REG_WORK2, 1);
+ LSL_rrr(REG_WORK2, REG_WORK2, REG_WORK1);
+
+ MRS_CPSR(REG_WORK1);
+ TST_rr(d,REG_WORK2);
+ CC_BIC_rri(NATIVE_CC_NE, REG_WORK1, REG_WORK1, ARM_Z_FLAG);
+ CC_ORR_rri(NATIVE_CC_EQ, REG_WORK1, REG_WORK1, ARM_Z_FLAG);
+ MSR_CPSR_r(REG_WORK1);
+ EOR_rrr(d,d,REG_WORK2);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,jff_BCHG_l,(RW4 d, RR4 s))
+{
+ if (isconst(s)) {
+ COMPCALL(jff_BCHG_l_imm)(d,live.state[s].val&31);
+ return;
+ }
+
+ s=readreg(s,4);
+ d=rmw(d,4,4);
+
+ AND_rri(REG_WORK1, s, 31);
+ MOV_ri(REG_WORK2, 1);
+ LSL_rrr(REG_WORK2, REG_WORK2, REG_WORK1);
+
+ MRS_CPSR(REG_WORK1);
+ TST_rr(d,REG_WORK2);
+ CC_BIC_rri(NATIVE_CC_NE, REG_WORK1, REG_WORK1, ARM_Z_FLAG);
+ CC_ORR_rri(NATIVE_CC_EQ, REG_WORK1, REG_WORK1, ARM_Z_FLAG);
+ MSR_CPSR_r(REG_WORK1);
+ EOR_rrr(d,d,REG_WORK2);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+/*
+ * BCLR
+ * Operand Syntax: Dn,<ea>
+ * #<data>,<ea>
+ *
+ * Operand Size: 8,32
+ *
+ * X Not affected.
+ * N Not affected.
+ * Z Set if the bit tested is zero. Cleared otherwise.
+ * V Not affected.
+ * C Not affected.
+ *
+ */
+MIDFUNC(2,jnf_BCLR_b_imm,(RW4 d, IMM s))
+{
+ d=rmw(d,4,4);
+ BIC_rri(d,d,(1 << s));
+ unlock2(d);
+}
+
+MIDFUNC(2,jnf_BCLR_l_imm,(RW4 d, IMM s))
+{
+ d=rmw(d,4,4);
+ BIC_rri(d,d,(1 << s));
+ unlock2(d);
+}
+
+MIDFUNC(2,jnf_BCLR_b,(RW4 d, RR4 s))
+{
+ if (isconst(s)) {
+ COMPCALL(jnf_BCLR_b_imm)(d,live.state[s].val&7);
+ return;
+ }
+ s=readreg(s,4);
+ d=rmw(d,4,4);
+
+ AND_rri(REG_WORK1, s, 7);
+ MOV_ri(REG_WORK2, 1);
+ LSL_rrr(REG_WORK2, REG_WORK2, REG_WORK1);
+
+ BIC_rrr(d,d,REG_WORK2);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,jnf_BCLR_l,(RW4 d, RR4 s))
+{
+ if (isconst(s)) {
+ COMPCALL(jnf_BCLR_l_imm)(d,live.state[s].val&31);
+ return;
+ }
+
+ s=readreg(s,4);
+ d=rmw(d,4,4);
+
+ AND_rri(REG_WORK1, s, 31);
+ MOV_ri(REG_WORK2, 1);
+ LSL_rrr(REG_WORK2, REG_WORK2, REG_WORK1);
+
+ BIC_rrr(d,d,REG_WORK2);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,jff_BCLR_b_imm,(RW4 d, IMM s))
+{
+ d=rmw(d,4,4);
+
+ uae_u32 v = (1 << s);
+ MRS_CPSR(REG_WORK1);
+ TST_ri(d,v);
+ CC_BIC_rri(NATIVE_CC_NE, REG_WORK1, REG_WORK1, ARM_Z_FLAG);
+ CC_ORR_rri(NATIVE_CC_EQ, REG_WORK1, REG_WORK1, ARM_Z_FLAG);
+ MSR_CPSR_r(REG_WORK1);
+ BIC_rri(d,d,v);
+
+ unlock2(d);
+}
+
+MIDFUNC(2,jff_BCLR_l_imm,(RW4 d, IMM s))
+{
+ d=rmw(d,4,4);
+
+ uae_u32 v = (1 << s);
+ MRS_CPSR(REG_WORK1);
+ TST_ri(d,v);
+ CC_BIC_rri(NATIVE_CC_NE, REG_WORK1, REG_WORK1, ARM_Z_FLAG);
+ CC_ORR_rri(NATIVE_CC_EQ, REG_WORK1, REG_WORK1, ARM_Z_FLAG);
+ MSR_CPSR_r(REG_WORK1);
+ BIC_rri(d,d,v);
+
+ unlock2(d);
+}
+
+MIDFUNC(2,jff_BCLR_b,(RW4 d, RR4 s))
+{
+ if (isconst(s)) {
+ COMPCALL(jff_BCLR_b_imm)(d,live.state[s].val&7);
+ return;
+ }
+ s=readreg(s,4);
+ d=rmw(d,4,4);
+
+ AND_rri(REG_WORK1, s, 7);
+ MOV_ri(REG_WORK2, 1);
+ LSL_rrr(REG_WORK2, REG_WORK2, REG_WORK1);
+
+ MRS_CPSR(REG_WORK1);
+ TST_rr(d,REG_WORK2);
+ CC_BIC_rri(NATIVE_CC_NE, REG_WORK1, REG_WORK1, ARM_Z_FLAG);
+ CC_ORR_rri(NATIVE_CC_EQ, REG_WORK1, REG_WORK1, ARM_Z_FLAG);
+ MSR_CPSR_r(REG_WORK1);
+ BIC_rrr(d,d,REG_WORK2);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,jff_BCLR_l,(RW4 d, RR4 s))
+{
+ if (isconst(s)) {
+ COMPCALL(jff_BCLR_l_imm)(d,live.state[s].val&31);
+ return;
+ }
+
+ s=readreg(s,4);
+ d=rmw(d,4,4);
+
+ AND_rri(REG_WORK1, s, 31);
+ MOV_ri(REG_WORK2, 1);
+ LSL_rrr(REG_WORK2, REG_WORK2, REG_WORK1);
+
+ MRS_CPSR(REG_WORK1);
+ TST_rr(d,REG_WORK2);
+ CC_BIC_rri(NATIVE_CC_NE, REG_WORK1, REG_WORK1, ARM_Z_FLAG);
+ CC_ORR_rri(NATIVE_CC_EQ, REG_WORK1, REG_WORK1, ARM_Z_FLAG);
+ MSR_CPSR_r(REG_WORK1);
+ BIC_rrr(d,d,REG_WORK2);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+/*
+ * BSET
+ * Operand Syntax: Dn,<ea>
+ * #<data>,<ea>
+ *
+ * Operand Size: 8,32
+ *
+ * X Not affected.
+ * N Not affected.
+ * Z Set if the bit tested is zero. Cleared otherwise.
+ * V Not affected.
+ * C Not affected.
+ *
+ */
+MIDFUNC(2,jnf_BSET_b_imm,(RW4 d, IMM s))
+{
+ d=rmw(d,4,4);
+ ORR_rri(d,d,(1 << s));
+ unlock2(d);
+}
+
+MIDFUNC(2,jnf_BSET_l_imm,(RW4 d, IMM s))
+{
+ d=rmw(d,4,4);
+ ORR_rri(d,d,(1 << s));
+ unlock2(d);
+}
+
+MIDFUNC(2,jnf_BSET_b,(RW4 d, RR4 s))
+{
+ if (isconst(s)) {
+ COMPCALL(jnf_BSET_b_imm)(d,live.state[s].val&7);
+ return;
+ }
+ s=readreg(s,4);
+ d=rmw(d,4,4);
+
+ AND_rri(REG_WORK1, s, 7);
+ MOV_ri(REG_WORK2, 1);
+ LSL_rrr(REG_WORK2, REG_WORK2, REG_WORK1);
+
+ ORR_rrr(d,d,REG_WORK2);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,jnf_BSET_l,(RW4 d, RR4 s))
+{
+ if (isconst(s)) {
+ COMPCALL(jnf_BSET_l_imm)(d,live.state[s].val&31);
+ return;
+ }
+
+ s=readreg(s,4);
+ d=rmw(d,4,4);
+
+ AND_rri(REG_WORK1, s, 31);
+ MOV_ri(REG_WORK2, 1);
+ LSL_rrr(REG_WORK2, REG_WORK2, REG_WORK1);
+
+ ORR_rrr(d,d,REG_WORK2);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,jff_BSET_b_imm,(RW4 d, IMM s))
+{
+ d=rmw(d,4,4);
+
+ uae_u32 v = (1 << s);
+ MRS_CPSR(REG_WORK1);
+ TST_ri(d,v);
+ CC_BIC_rri(NATIVE_CC_NE, REG_WORK1, REG_WORK1, ARM_Z_FLAG);
+ CC_ORR_rri(NATIVE_CC_EQ, REG_WORK1, REG_WORK1, ARM_Z_FLAG);
+ MSR_CPSR_r(REG_WORK1);
+ ORR_rri(d,d,v);
+
+ unlock2(d);
+}
+
+MIDFUNC(2,jff_BSET_l_imm,(RW4 d, IMM s))
+{
+ d=rmw(d,4,4);
+
+ uae_u32 v = (1 << s);
+ MRS_CPSR(REG_WORK1);
+ TST_ri(d,v);
+ CC_BIC_rri(NATIVE_CC_NE, REG_WORK1, REG_WORK1, ARM_Z_FLAG);
+ CC_ORR_rri(NATIVE_CC_EQ, REG_WORK1, REG_WORK1, ARM_Z_FLAG);
+ MSR_CPSR_r(REG_WORK1);
+ ORR_rri(d,d,v);
+
+ unlock2(d);
+}
+
+MIDFUNC(2,jff_BSET_b,(RW4 d, RR4 s))
+{
+ if (isconst(s)) {
+ COMPCALL(jff_BSET_b_imm)(d,live.state[s].val&7);
+ return;
+ }
+ s=readreg(s,4);
+ d=rmw(d,4,4);
+
+ AND_rri(REG_WORK1, s, 7);
+ MOV_ri(REG_WORK2, 1);
+ LSL_rrr(REG_WORK2, REG_WORK2, REG_WORK1);
+
+ MRS_CPSR(REG_WORK1);
+ TST_rr(d,REG_WORK2);
+ CC_BIC_rri(NATIVE_CC_NE, REG_WORK1, REG_WORK1, ARM_Z_FLAG);
+ CC_ORR_rri(NATIVE_CC_EQ, REG_WORK1, REG_WORK1, ARM_Z_FLAG);
+ MSR_CPSR_r(REG_WORK1);
+ ORR_rrr(d,d,REG_WORK2);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,jff_BSET_l,(RW4 d, RR4 s))
+{
+ if (isconst(s)) {
+ COMPCALL(jff_BSET_l_imm)(d,live.state[s].val&31);
+ return;
+ }
+
+ s=readreg(s,4);
+ d=rmw(d,4,4);
+
+ AND_rri(REG_WORK1, s, 31);
+ MOV_ri(REG_WORK2, 1);
+ LSL_rrr(REG_WORK2, REG_WORK2, REG_WORK1);
+
+ MRS_CPSR(REG_WORK1);
+ TST_rr(d,REG_WORK2);
+ CC_BIC_rri(NATIVE_CC_NE, REG_WORK1, REG_WORK1, ARM_Z_FLAG);
+ CC_ORR_rri(NATIVE_CC_EQ, REG_WORK1, REG_WORK1, ARM_Z_FLAG);
+ MSR_CPSR_r(REG_WORK1);
+ ORR_rrr(d,d,REG_WORK2);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+/*
+ * BTST
+ * Operand Syntax: Dn,<ea>
+ * #<data>,<ea>
+ *
+ * Operand Size: 8,32
+ *
+ * X Not affected
+ * N Not affected
+ * Z Set if the bit tested is zero. Cleared otherwise
+ * V Not affected
+ * C Not affected
+ *
+ */
+MIDFUNC(2,jff_BTST_b_imm,(RR4 d, IMM s))
+{
+ d=readreg(d,4);
+
+ MRS_CPSR(REG_WORK1);
+ TST_ri(d,(1 << s));
+ CC_BIC_rri(NATIVE_CC_NE, REG_WORK1, REG_WORK1, ARM_Z_FLAG);
+ CC_ORR_rri(NATIVE_CC_EQ, REG_WORK1, REG_WORK1, ARM_Z_FLAG);
+ MSR_CPSR_r(REG_WORK1);
+
+ unlock2(d);
+}
+
+MIDFUNC(2,jff_BTST_l_imm,(RR4 d, IMM s))
+{
+ d=readreg(d,4);
+
+ MRS_CPSR(REG_WORK1);
+ TST_ri(d,(1 << s));
+ CC_BIC_rri(NATIVE_CC_NE, REG_WORK1, REG_WORK1, ARM_Z_FLAG);
+ CC_ORR_rri(NATIVE_CC_EQ, REG_WORK1, REG_WORK1, ARM_Z_FLAG);
+ MSR_CPSR_r(REG_WORK1);
+
+ unlock2(d);
+}
+
+MIDFUNC(2,jff_BTST_b,(RR4 d, RR4 s))
+{
+ if (isconst(s)) {
+ COMPCALL(jff_BTST_b_imm)(d,live.state[s].val&7);
+ return;
+ }
+ s=readreg(s,4);
+ d=readreg(d,4);
+
+ AND_rri(REG_WORK1, s, 7);
+ MOV_ri(REG_WORK2, 1);
+ LSL_rrr(REG_WORK2, REG_WORK2, REG_WORK1);
+
+ MRS_CPSR(REG_WORK1);
+ TST_rr(d,REG_WORK2);
+ CC_BIC_rri(NATIVE_CC_NE, REG_WORK1, REG_WORK1, ARM_Z_FLAG);
+ CC_ORR_rri(NATIVE_CC_EQ, REG_WORK1, REG_WORK1, ARM_Z_FLAG);
+ MSR_CPSR_r(REG_WORK1);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,jff_BTST_l,(RR4 d, RR4 s))
+{
+ if (isconst(s)) {
+ COMPCALL(jff_BTST_l_imm)(d,live.state[s].val&31);
+ return;
+ }
+
+ s=readreg(s,4);
+ d=readreg(d,4);
+
+ AND_rri(REG_WORK1, s, 31);
+ MOV_ri(REG_WORK2, 1);
+ LSL_rrr(REG_WORK2, REG_WORK2, REG_WORK1);
+
+ MRS_CPSR(REG_WORK1);
+ TST_rr(d,REG_WORK2);
+ CC_BIC_rri(NATIVE_CC_NE, REG_WORK1, REG_WORK1, ARM_Z_FLAG);
+ CC_ORR_rri(NATIVE_CC_EQ, REG_WORK1, REG_WORK1, ARM_Z_FLAG);
+ MSR_CPSR_r(REG_WORK1);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+/*
+ * CLR
+ * Operand Syntax: <ea>
+ *
+ * Operand Size: 8,16,32
+ *
+ * X Not affected.
+ * N Always cleared.
+ * Z Always set.
+ * V Always cleared.
+ * C Always cleared.
+ *
+ */
+MIDFUNC(1,jnf_CLR,(W4 d))
+{
+ d=writereg(d,4);
+ MOV_ri(d,0);
+ unlock2(d);
+}
+
+MIDFUNC(1,jff_CLR,(W4 d))
+{
+ d=writereg(d,4);
+ MOV_ri(d,0);
+ MSR_CPSR_i(ARM_Z_FLAG);
+ unlock2(d);
+}
+
+/*
+ * CMP
+ * Operand Syntax: <ea>, Dn
+ *
+ * Operand Size: 8,16,32
+ *
+ * X Not affected.
+ * N Set if the result is negative. Cleared otherwise.
+ * Z Set if the result is zero. Cleared otherwise.
+ * V Set if an overflow occurs. Cleared otherwise.
+ * C Set if a borrow occurs. Cleared otherwise.
+ *
+ */
+MIDFUNC(2,jff_CMP_b,(RR1 d, RR1 s))
+{
+ d=readreg(d,4);
+ s=readreg(s,4);
+
+ SIGNED8_REG_2_REG(REG_WORK1, d);
+ SIGNED8_REG_2_REG(REG_WORK2, s);
+ CMP_rr(REG_WORK1,REG_WORK2);
+
+ MRS_CPSR(REG_WORK1);
+ EOR_rri(REG_WORK1, REG_WORK1, ARM_C_FLAG);
+ MSR_CPSR_r(REG_WORK1);
+ // inverted_carry = true;
+
+ unlock2(s);
+ unlock2(d);
+}
+
+MIDFUNC(2,jff_CMP_w,(RR2 d, RR2 s))
+{
+ d=readreg(d,4);
+ s=readreg(s,4);
+
+ SIGNED16_REG_2_REG(REG_WORK1, d);
+ SIGNED16_REG_2_REG(REG_WORK2, s);
+ CMP_rr(REG_WORK1,REG_WORK2);
+
+ MRS_CPSR(REG_WORK1);
+ EOR_rri(REG_WORK1, REG_WORK1, ARM_C_FLAG);
+ MSR_CPSR_r(REG_WORK1);
+ // inverted_carry = true;
+
+ unlock2(s);
+ unlock2(d);
+}
+
+MIDFUNC(2,jff_CMP_l,(RR4 d, RR4 s))
+{
+ d=readreg(d,4);
+ s=readreg(s,4);
+
+ CMP_rr(d,s);
+
+ MRS_CPSR(REG_WORK1);
+ EOR_rri(REG_WORK1, REG_WORK1, ARM_C_FLAG);
+ MSR_CPSR_r(REG_WORK1);
+ // inverted_carry = true;
+
+ unlock2(s);
+ unlock2(d);
+}
+
+/*
+ * CMPA
+ * Operand Syntax: <ea>, An
+ *
+ * Operand Size: 16,32
+ *
+ * X Not affected.
+ * N Set if the result is negative. Cleared otherwise.
+ * Z Set if the result is zero. Cleared otherwise.
+ * V Set if an overflow occurs. Cleared otherwise.
+ * C Set if a borrow occurs. Cleared otherwise.
+ *
+ */
+MIDFUNC(2,jff_CMPA_b,(RR1 d, RR1 s))
+{
+ d=readreg(d,4);
+ s=readreg(s,4);
+
+ SIGNED8_REG_2_REG(REG_WORK2, s);
+ CMP_rr(d,REG_WORK2);
+
+ MRS_CPSR(REG_WORK1);
+ EOR_rri(REG_WORK1, REG_WORK1, ARM_C_FLAG);
+ MSR_CPSR_r(REG_WORK1);
+ // invertedcarry = true;
+
+ unlock2(s);
+ unlock2(d);
+}
+
+MIDFUNC(2,jff_CMPA_w,(RR2 d, RR2 s))
+{
+ d=readreg(d,4);
+ s=readreg(s,4);
+
+ SIGNED16_REG_2_REG(REG_WORK2, s);
+ CMP_rr(d,REG_WORK2);
+
+ MRS_CPSR(REG_WORK1);
+ EOR_rri(REG_WORK1, REG_WORK1, ARM_C_FLAG);
+ MSR_CPSR_r(REG_WORK1);
+ // invertedcarry = true;
+
+ unlock2(s);
+ unlock2(d);
+}
+
+MIDFUNC(2,jff_CMPA_l,(RR4 d, RR4 s))
+{
+ d=readreg(d,4);
+ s=readreg(s,4);
+
+ CMP_rr(d,s);
+
+ MRS_CPSR(REG_WORK1);
+ EOR_rri(REG_WORK1, REG_WORK1, ARM_C_FLAG);
+ MSR_CPSR_r(REG_WORK1);
+ // invertedcarry = true;
+
+ unlock2(s);
+ unlock2(d);
+}
+
+/*
+ * EOR
+ * Operand Syntax: Dn, <ea>
+ *
+ * Operand Size: 8,16,32
+ *
+ * X Not affected.
+ * N Set if the most significant bit of the result is set.
+ * Cleared otherwise.
+ * Z Set if the result is zero. Cleared otherwise.
+ * V Always cleared.
+ * C Always cleared.
+ *
+ */
+MIDFUNC(3,jnf_EOR,(W4 d, RR4 s, RR4 v))
+{
+ if (isconst(s) && isconst(v)) {
+ set_const(d,
+ live.state[s].val^live.state[v].val);
+ return;
+ }
+
+ v=readreg(v,4);
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ EOR_rrr(d, s, v);
+
+ unlock2(v);
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,jff_EOR_b,(W4 d, RR1 s, RR1 v))
+{
+ v=readreg(v,4);
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ SIGNED8_REG_2_REG(REG_WORK1, s);
+ SIGNED8_REG_2_REG(REG_WORK2, v);
+ MSR_CPSRf_i(0);
+ EORS_rrr(d, REG_WORK1, REG_WORK2);
+
+ unlock2(v);
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,jff_EOR_w,(W4 d, RR2 s, RR2 v))
+{
+ v=readreg(v,4);
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ SIGNED16_REG_2_REG(REG_WORK1, s);
+ SIGNED16_REG_2_REG(REG_WORK2, v);
+ MSR_CPSRf_i(0);
+ EORS_rrr(d, REG_WORK1, REG_WORK2);
+
+ unlock2(v);
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,jff_EOR_l,(W4 d, RR4 s, RR4 v))
+{
+ v=readreg(v,4);
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ MSR_CPSRf_i(0);
+ EORS_rrr(d, s,v);
+
+ unlock2(v);
+ unlock2(d);
+ unlock2(s);
+}
+
+/*
+ * EORI
+ * Operand Syntax: #<data>, CCR
+ *
+ * Operand Size: 8
+ *
+ * X — Changed if bit 4 of immediate operand is one; unchanged otherwise.
+ * N — Changed if bit 3 of immediate operand is one; unchanged otherwise.
+ * Z — Changed if bit 2 of immediate operand is one; unchanged otherwise.
+ * V — Changed if bit 1 of immediate operand is one; unchanged otherwise.
+ * C — Changed if bit 0 of immediate operand is one; unchanged otherwise.
+ *
+ */
+MIDFUNC(1,jff_EORSR,(IMM s, IMM x))
+{
+ MRS_CPSR(REG_WORK1);
+ EOR_rri(REG_WORK1, REG_WORK1, s);
+ MSR_CPSRf_r(REG_WORK1);
+
+ if (x) {
+ compemu_raw_mov_l_ri(REG_WORK1, (uintptr)live.state[FLAGX].mem);
+ LDRB_rR(REG_WORK2, REG_WORK1);
+ EOR_rri(REG_WORK2, REG_WORK2, 1);
+ STRB_rR(REG_WORK2, REG_WORK1);
+ }
+}
+
+/*
+ * EXT
+ * Operand Syntax: <ea>
+ *
+ * Operand Size: 16,32
+ *
+ * X Not affected.
+ * N Set if the result is negative. Cleared otherwise.
+ * Z Set if the result is zero. Cleared otherwise.
+ * V Always cleared.
+ * C Always cleared.
+ *
+ */
+MIDFUNC(2,jnf_EXT_b,(W4 d, RR4 s))
+{
+ if (isconst(s)) {
+ set_const(d,(uae_s32)(uae_s8)live.state[s].val);
+ return;
+ }
+
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ SIGNED8_REG_2_REG(d, s);
+
+ unlock2(s);
+ unlock2(d);
+}
+
+MIDFUNC(2,jnf_EXT_w,(W4 d, RR4 s))
+{
+ if (isconst(s)) {
+ set_const(d,(uae_s32)(uae_s8)live.state[s].val);
+ return;
+ }
+
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ SIGNED8_REG_2_REG(d, s);
+
+ unlock2(s);
+ unlock2(d);
+}
+
+MIDFUNC(2,jnf_EXT_l,(W4 d, RR4 s))
+{
+ if (isconst(s)) {
+ set_const(d,(uae_s32)(uae_s16)live.state[s].val);
+ return;
+ }
+
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ SIGNED16_REG_2_REG(d, s);
+
+ unlock2(s);
+ unlock2(d);
+}
+
+MIDFUNC(2,jff_EXT_b,(W4 d, RR4 s))
+{
+ if (isconst(s)) {
+ d=writereg(d,4);
+ SIGNED8_IMM_2_REG(d, (uint8)live.state[s].val);
+ } else {
+ s=readreg(s,4);
+ d=writereg(d,4);
+ SIGNED8_REG_2_REG(d, s);
+ unlock2(s);
+ }
+
+ MSR_CPSRf_i(0);
+ TST_rr(d,d);
+
+ unlock2(d);
+}
+
+MIDFUNC(2,jff_EXT_w,(W4 d, RR4 s))
+{
+ if (isconst(s)) {
+ d=writereg(d,4);
+ SIGNED8_IMM_2_REG(d, (uint8)live.state[s].val);
+ } else {
+ s=readreg(s,4);
+ d=writereg(d,4);
+ SIGNED8_REG_2_REG(d, s);
+ unlock2(s);
+ }
+
+ MSR_CPSRf_i(0);
+ TST_rr(d,d);
+
+ unlock2(d);
+}
+
+MIDFUNC(2,jff_EXT_l,(W4 d, RR4 s))
+{
+ if (isconst(s)) {
+ d=writereg(d,4);
+ SIGNED16_IMM_2_REG(d, (uint16)live.state[s].val);
+ } else {
+ s=readreg(s,4);
+ d=writereg(d,4);
+ SIGNED16_REG_2_REG(d, s);
+ unlock2(s);
+ }
+ MSR_CPSRf_i(0);
+ TST_rr(d,d);
+
+ unlock2(d);
+}
+
+/*
+ * LSL
+ * Operand Syntax: Dx, Dy
+ * #<data>, Dy
+ * <ea>
+ *
+ * Operand Size: 8,16,32
+ *
+ * X Set according to the last bit shifted out of the operand. Unaffected for a shift count of zero.
+ * N Set if the result is negative. Cleared otherwise.
+ * Z Set if the result is zero. Cleared otherwise.
+ * V Always cleared.
+ * C Set according to the last bit shifted out of the operand. Cleared for a shift count of zero.
+ *
+ */
+MIDFUNC(3,jnf_LSL_imm,(W4 d, RR4 s, IMM i))
+{
+ if (!i) return;
+
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ LSL_rri(d,s,i);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,jnf_LSL_reg,(W4 d, RR4 s, RR4 i))
+{
+ i=readreg(i,4);
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ AND_rri(REG_WORK1, i, 63);
+ LSL_rrr(d,s,REG_WORK1);
+
+ unlock2(d);
+ unlock2(s);
+ unlock2(i);
+}
+
+MIDFUNC(3,jff_LSL_b_imm,(W4 d, RR4 s, IMM i))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ UNSIGNED8_REG_2_REG(d, s);
+ MSR_CPSRf_i(0);
+
+ REV_rr(d,d);
+ if (i) {
+ LSLS_rri(d,d,i);
+ } else {
+ TST_rr(d,d);
+ }
+ REV_rr(d,d);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,jff_LSL_w_imm,(W4 d, RR4 s, IMM i))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ MSR_CPSRf_i(0);
+
+ LSL_rri(d,s,16);
+ if (i) {
+ LSLS_rri(d,d,i);
+ } else {
+ TST_rr(d,d);
+ }
+ LSR_rri(d,d,16);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,jff_LSL_l_imm,(W4 d, RR4 s, IMM i))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ MSR_CPSRf_i(0);
+ if (i) {
+ LSLS_rri(d,s,i);
+ } else {
+ MOV_rr(d,s);
+ TST_rr(d,d);
+ }
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,jff_LSL_b_reg,(W4 d, RR4 s, RR4 i))
+{
+ i=readreg(i,4);
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ UNSIGNED8_REG_2_REG(d,s);
+ CC_MSR_CPSRf_r(NATIVE_CC_CC, 0); // Clear everything except C
+ CC_MSR_CPSRf_r(NATIVE_CC_CS, ARM_C_FLAG);// Clear everything except C
+ REV_rr(d,d);
+ AND_rri(REG_WORK1, i, 63);
+ LSLS_rrr(d,d,REG_WORK1);
+ REV_rr(d,d);
+
+ unlock2(d);
+ unlock2(s);
+ unlock2(i);
+}
+
+MIDFUNC(3,jff_LSL_w_reg,(W4 d, RR4 s, RR4 i))
+{
+ i=readreg(i,4);
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ CC_MSR_CPSRf_r(NATIVE_CC_CC, 0); // Clear everything except C
+ CC_MSR_CPSRf_r(NATIVE_CC_CS, ARM_C_FLAG);// Clear everything except C
+ LSL_rri(d, s, 16);
+ AND_rri(REG_WORK1, i, 63);
+ LSLS_rrr(d,d,REG_WORK1);
+ LSR_rri(d, d, 16);
+
+ unlock2(d);
+ unlock2(s);
+ unlock2(i);
+}
+
+MIDFUNC(3,jff_LSL_l_reg,(W4 d, RR4 s, RR4 i))
+{
+ i=readreg(i,4);
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ CC_MSR_CPSRf_r(NATIVE_CC_CC, 0); // Clear everything except C
+ CC_MSR_CPSRf_r(NATIVE_CC_CS, ARM_C_FLAG);// Clear everything except C
+ AND_rri(REG_WORK1, i, 63);
+ LSLS_rrr(d,s,REG_WORK1);
+
+ unlock2(d);
+ unlock2(s);
+ unlock2(i);
+}
+
+/*
+ * LSLW
+ * Operand Syntax: <ea>
+ *
+ * Operand Size: 16
+ *
+ * X Set according to the last bit shifted out of the operand. Unaffected for a shift count of zero.
+ * N Set if the result is negative. Cleared otherwise.
+ * Z Set if the result is zero. Cleared otherwise.
+ * V Always cleared.
+ * C Set according to the last bit shifted out of the operand. Cleared for a shift count of zero.
+ *
+ */
+MIDFUNC(2,jnf_LSLW,(W4 d, RR4 s))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ LSL_rri(d,s,1);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,jff_LSLW,(W4 d, RR4 s))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ MSR_CPSRf_i(0);
+ LSLS_rri(d,s,17);
+ LSR_rri(d,d,16);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+/*
+ * LSR
+ * Operand Syntax: Dx, Dy
+ * #<data>, Dy
+ * <ea>
+ *
+ * Operand Size: 8,16,32
+ *
+ * X Set according to the last bit shifted out of the operand.
+ * Unaffected for a shift count of zero.
+ * N Set if the result is negative. Cleared otherwise.
+ * Z Set if the result is zero. Cleared otherwise.
+ * V Always cleared.
+ * C Set according to the last bit shifted out of the operand.
+ * Cleared for a shift count of zero.
+ *
+ */
+MIDFUNC(3,jnf_LSR_b_imm,(W4 d, RR4 s, IMM i))
+{
+ int isrmw;
+
+ if (!i)
+ return;
+
+ isrmw=(s==d);
+ if (!isrmw) {
+ s=readreg(s,4);
+ d=writereg(d,4);
+ }
+ else {
+ s=d=rmw(s,4,4);
+ }
+
+ UNSIGNED8_REG_2_REG(d, s);
+ LSR_rri(d,d,i);
+
+ if (!isrmw) {
+ unlock2(d);
+ unlock2(s);
+ }
+ else {
+ unlock2(s);
+ }
+}
+
+MIDFUNC(3,jnf_LSR_w_imm,(W4 d, RR4 s, IMM i))
+{
+ int isrmw;
+
+ if (!i)
+ return;
+
+ isrmw=(s==d);
+ if (!isrmw) {
+ s=readreg(s,4);
+ d=writereg(d,4);
+ }
+ else {
+ s=d=rmw(s,4,4);
+ }
+
+ UNSIGNED16_REG_2_REG(d, s);
+ LSR_rri(d,d,i);
+
+ if (!isrmw) {
+ unlock2(d);
+ unlock2(s);
+ }
+ else {
+ unlock2(s);
+ }
+}
+
+MIDFUNC(3,jnf_LSR_l_imm,(W4 d, RR4 s, IMM i))
+{
+ int isrmw;
+
+ if (!i)
+ return;
+
+ isrmw=(s==d);
+ if (!isrmw) {
+ s=readreg(s,4);
+ d=writereg(d,4);
+ }
+ else {
+ s=d=rmw(s,4,4);
+ }
+
+ LSR_rri(d,s,i);
+
+ if (!isrmw) {
+ unlock2(d);
+ unlock2(s);
+ }
+ else {
+ unlock2(s);
+ }
+}
+
+MIDFUNC(3,jff_LSR_b_imm,(W4 d, RR4 s, IMM i))
+{
+ int isrmw;
+
+ isrmw=(s==d);
+ if (!isrmw) {
+ s=readreg(s,4);
+ d=writereg(d,4);
+ }
+ else {
+ s=d=rmw(s,4,4);
+ }
+
+ UNSIGNED8_REG_2_REG(d, s);
+ MSR_CPSRf_i(0);
+ if (i) {
+ LSRS_rri(d,d,i);
+ } else {
+ TST_rr(d,d);
+ }
+ if (!isrmw) {
+ unlock2(d);
+ unlock2(s);
+ }
+ else {
+ unlock2(s);
+ }
+}
+
+MIDFUNC(3,jff_LSR_w_imm,(W4 d, RR4 s, IMM i))
+{
+ int isrmw;
+
+ isrmw=(s==d);
+ if (!isrmw) {
+ s=readreg(s,4);
+ d=writereg(d,4);
+ }
+ else {
+ s=d=rmw(s,4,4);
+ }
+
+ UNSIGNED16_REG_2_REG(d, s);
+ MSR_CPSRf_i(0);
+ if (i) {
+ LSRS_rri(d,d,i);
+ } else {
+ TST_rr(d,d);
+ }
+ if (!isrmw) {
+ unlock2(d);
+ unlock2(s);
+ }
+ else {
+ unlock2(s);
+ }
+}
+
+MIDFUNC(3,jff_LSR_l_imm,(W4 d, RR4 s, IMM i))
+{
+ int isrmw;
+
+ isrmw=(s==d);
+ if (!isrmw) {
+ s=readreg(s,4);
+ d=writereg(d,4);
+ }
+ else {
+ s=d=rmw(s,4,4);
+ }
+
+ MSR_CPSRf_i(0);
+ if (i) {
+ LSRS_rri(d,s,i);
+ } else {
+ TST_rr(s,s);
+ }
+ if (!isrmw) {
+ unlock2(d);
+ unlock2(s);
+ }
+ else {
+ unlock2(s);
+ }
+}
+
+MIDFUNC(3,jnf_LSR_b_reg,(W4 d, RR4 s, RR4 i))
+{
+ int isrmw;
+
+ i=readreg(i,4);
+ isrmw=(s==d);
+ if (!isrmw) {
+ s=readreg(s,4);
+ d=writereg(d,4);
+ }
+ else {
+ s=d=rmw(s,4,4);
+ }
+
+ UNSIGNED8_REG_2_REG(d, s);
+ AND_rri(REG_WORK1, i, 63);
+ LSR_rrr(d,d,REG_WORK1);
+
+ if (!isrmw) {
+ unlock2(d);
+ unlock2(s);
+ }
+ else {
+ unlock2(s);
+ }
+ unlock2(i);
+}
+
+MIDFUNC(3,jnf_LSR_w_reg,(W4 d, RR4 s, RR4 i))
+{
+ int isrmw;
+
+ i=readreg(i,4);
+ isrmw=(s==d);
+ if (!isrmw) {
+ s=readreg(s,4);
+ d=writereg(d,4);
+ }
+ else {
+ s=d=rmw(s,4,4);
+ }
+
+ UNSIGNED16_REG_2_REG(d, s);
+ AND_rri(REG_WORK1, i, 63);
+ LSR_rrr(d,d,REG_WORK1);
+
+ if (!isrmw) {
+ unlock2(d);
+ unlock2(s);
+ }
+ else {
+ unlock2(s);
+ }
+ unlock2(i);
+}
+
+MIDFUNC(3,jnf_LSR_l_reg,(W4 d, RR4 s, RR4 i))
+{
+ int isrmw;
+
+ i=readreg(i,4);
+ isrmw=(s==d);
+ if (!isrmw) {
+ s=readreg(s,4);
+ d=writereg(d,4);
+ }
+ else {
+ s=d=rmw(s,4,4);
+ }
+
+ AND_rri(REG_WORK1, i, 63);
+ LSR_rrr(d,s,REG_WORK1);
+
+ if (!isrmw) {
+ unlock2(d);
+ unlock2(s);
+ }
+ else {
+ unlock2(s);
+ }
+ unlock2(i);
+}
+
+MIDFUNC(3,jff_LSR_b_reg,(W4 d, RR4 s, RR4 i))
+{
+ int isrmw;
+
+ i=readreg(i,4);
+ isrmw=(s==d);
+ if (!isrmw) {
+ s=readreg(s,4);
+ d=writereg(d,4);
+ }
+ else {
+ s=d=rmw(s,4,4);
+ }
+
+ UNSIGNED8_REG_2_REG(d, s);
+ CC_MSR_CPSRf_r(NATIVE_CC_CC, 0); // Clear everything except C
+ CC_MSR_CPSRf_r(NATIVE_CC_CS, ARM_C_FLAG);// Clear everything except C
+ AND_rri(REG_WORK1, i, 63);
+ LSRS_rrr(d,d,REG_WORK1);
+
+ if (!isrmw) {
+ unlock2(d);
+ unlock2(s);
+ }
+ else {
+ unlock2(s);
+ }
+ unlock2(i);
+}
+
+MIDFUNC(3,jff_LSR_w_reg,(W4 d, RR4 s, RR4 i))
+{
+ int isrmw;
+
+ i=readreg(i,4);
+ isrmw=(s==d);
+ if (!isrmw) {
+ s=readreg(s,4);
+ d=writereg(d,4);
+ }
+ else {
+ s=d=rmw(s,4,4);
+ }
+
+ UNSIGNED16_REG_2_REG(d, s);
+ CC_MSR_CPSRf_r(NATIVE_CC_CC, 0); // Clear everything except C
+ CC_MSR_CPSRf_r(NATIVE_CC_CS, ARM_C_FLAG);// Clear everything except C
+ AND_rri(REG_WORK1, i, 63);
+ LSRS_rrr(d,d,REG_WORK1);
+
+ if (!isrmw) {
+ unlock2(d);
+ unlock2(s);
+ }
+ else {
+ unlock2(s);
+ }
+ unlock2(i);
+}
+
+MIDFUNC(3,jff_LSR_l_reg,(W4 d, RR4 s, RR4 i))
+{
+ int isrmw;
+
+ i=readreg(i,4);
+ isrmw=(s==d);
+ if (!isrmw) {
+ s=readreg(s,4);
+ d=writereg(d,4);
+ }
+ else {
+ s=d=rmw(s,4,4);
+ }
+
+ CC_MSR_CPSRf_r(NATIVE_CC_CC, 0); // Clear everything except C
+ CC_MSR_CPSRf_r(NATIVE_CC_CS, ARM_C_FLAG);// Clear everything except C
+ AND_rri(REG_WORK1, i, 63);
+ LSRS_rrr(d,s,REG_WORK1);
+
+ if (!isrmw) {
+ unlock2(d);
+ unlock2(s);
+ }
+ else {
+ unlock2(s);
+ }
+ unlock2(i);
+}
+
+/*
+ * LSRW
+ * Operand Syntax: <ea>
+ *
+ * Operand Size: 16
+ *
+ * X Set according to the last bit shifted out of the operand. Unaffected for a shift count of zero.
+ * N Set if the result is negative. Cleared otherwise.
+ * Z Set if the result is zero. Cleared otherwise.
+ * V Always cleared.
+ * C Set according to the last bit shifted out of the operand. Cleared for a shift count of zero.
+ *
+ */
+MIDFUNC(2,jnf_LSRW,(W4 d, RR4 s))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ UNSIGNED16_REG_2_REG(d, s);
+ LSR_rri(d,d,1);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,jff_LSRW,(W4 d, RR4 s))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ UNSIGNED16_REG_2_REG(d, s);
+ MSR_CPSRf_i(0);
+ LSR_rri(d,d,1);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+/*
+ * MOVE
+ * Operand Syntax: <ea>, <ea>
+ *
+ * Operand Size: 8,16,32
+ *
+ * X Not affected.
+ * N Set if the result is negative. Cleared otherwise.
+ * Z Set if the result is zero. Cleared otherwise.
+ * V Always cleared.
+ * C Always cleared.
+ *
+ */
+MIDFUNC(2,jnf_MOVE,(W4 d, RR4 s))
+{
+ if (isconst(s)) {
+ set_const(d,live.state[s].val);
+ return;
+ }
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ MOV_rr(d, s);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,jff_MOVE_b_imm,(W4 d, IMM s))
+{
+ d=writereg(d,4);
+
+ SIGNED8_IMM_2_REG(d, (uint8)s);
+ MSR_CPSRf_i(0);
+ TST_rr(d,d);
+
+ unlock2(d);
+}
+
+MIDFUNC(2,jff_MOVE_w_imm,(W4 d, IMM s))
+{
+ d=writereg(d,4);
+
+ SIGNED16_IMM_2_REG(d, (uint16)s);
+ MSR_CPSRf_i(0);
+ TST_rr(d,d);
+
+ unlock2(d);
+}
+
+MIDFUNC(2,jff_MOVE_l_imm,(W4 d, IMM s))
+{
+ d=writereg(d,4);
+
+ compemu_raw_mov_l_ri(d, s);
+ MSR_CPSRf_i(0);
+ TST_rr(d,d);
+
+ unlock2(d);
+}
+
+MIDFUNC(2,jff_MOVE_b,(W4 d, RR1 s))
+{
+ if (isconst(s)) {
+ COMPCALL(jff_MOVE_b_imm)(d,live.state[s].val);
+ return;
+ }
+
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ SIGNED8_REG_2_REG(d, s);
+ MSR_CPSRf_i(0);
+ TST_rr(d,d);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,jff_MOVE_w,(W4 d, RR2 s))
+{
+ if (isconst(s)) {
+ COMPCALL(jff_MOVE_w_imm)(d,live.state[s].val);
+ return;
+ }
+
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ SIGNED16_REG_2_REG(d, s);
+ MSR_CPSRf_i(0);
+ TST_rr(d,d);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,jff_MOVE_l,(W4 d, RR4 s))
+{
+ if (isconst(s)) {
+ COMPCALL(jff_MOVE_l_imm)(d,live.state[s].val);
+ return;
+ }
+
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ MSR_CPSRf_i(0);
+ MOVS_rr(d,s);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+/*
+ * MOVE16
+ *
+ * Flags: Not affected.
+ *
+ */
+MIDFUNC(2,jnf_MOVE16,(RR4 d, RR4 s))
+{
+ s=readreg(s,4);
+ d=readreg(d,4);
+
+ BIC_rri(s, s, 0x000000FF);
+ BIC_rri(d, d, 0x000000FF);
+
+ compemu_raw_mov_l_ri(REG_WORK1, (IMM)MEMBaseDiff);
+ ADD_rrr(s, s, REG_WORK1);
+ ADD_rrr(d, d, REG_WORK1);
+
+ LDR_rRI(REG_WORK1, s, 8);
+ LDR_rRI(REG_WORK2, s, 12);
+
+ PUSH_REGS((1<<REG_WORK1)|(1<<REG_WORK2)); // May be optimizable
+ LDR_rR(REG_WORK1, s);
+ LDR_rRI(REG_WORK2, s, 4);
+ STR_rR(REG_WORK1, d);
+ STR_rRI(REG_WORK2, d, 4);
+ POP_REGS((1<<REG_WORK1)|(1<<REG_WORK2));
+ STR_rRI(REG_WORK1, d, 8);
+ STR_rRI(REG_WORK2, d, 12);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+/*
+ * MOVEA
+ * Operand Syntax: <ea>, An
+ *
+ * Operand Size: 16,32
+ *
+ * Flags: Not affected.
+ *
+ */
+MIDFUNC(2,jnf_MOVEA_w,(W4 d, RR2 s))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ SIGNED16_REG_2_REG(d,s);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,jnf_MOVEA_l,(W4 d, RR4 s))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ MOV_rr(d,s);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+/*
+ * MULS
+ * Operand Syntax: <ea>, Dn
+ *
+ * Operand Size: 16
+ *
+ * X Not affected.
+ * N Set if the result is negative. Cleared otherwise.
+ * Z Set if the result is zero. Cleared otherwise.
+ * V Set if overflow. Cleared otherwise. (32 Bit multiply only)
+ * C Always cleared.
+ *
+ */
+MIDFUNC(2,jnf_MULS,(RW4 d, RR4 s))
+{
+ s = readreg(s, 4);
+ d = rmw(d, 4, 4);
+
+ SIGN_EXTEND_16_REG_2_REG(d,d);
+ SIGN_EXTEND_16_REG_2_REG(REG_WORK1,s);
+ MUL_rrr(d, d, REG_WORK1);
+
+ unlock2(s);
+ unlock2(d);
+}
+
+MIDFUNC(2,jff_MULS,(RW4 d, RR4 s))
+{
+ s = readreg(s, 4);
+ d = rmw(d, 4, 4);
+
+ SIGN_EXTEND_16_REG_2_REG(d,d);
+ SIGN_EXTEND_16_REG_2_REG(REG_WORK1,s);
+
+ MSR_CPSRf_i(0);
+ MULS_rrr(d, d, REG_WORK1);
+
+ unlock2(s);
+ unlock2(d);
+}
+
+MIDFUNC(2,jnf_MULS32,(RW4 d, RR4 s))
+{
+ s = readreg(s, 4);
+ d = rmw(d, 4, 4);
+
+ MUL_rrr(d, d, s);
+
+ unlock2(s);
+ unlock2(d);
+}
+
+MIDFUNC(2,jff_MULS32,(RW4 d, RR4 s))
+{
+ s = readreg(s, 4);
+ d = rmw(d, 4, 4);
+
+ MSR_CPSRf_i(0);
+ // L, H,
+ SMULLS_rrrr(d, REG_WORK2, d, s);
+ MRS_CPSR(REG_WORK1);
+ TEQ_rrASRi(REG_WORK2,d,31);
+ CC_ORR_rri(NATIVE_CC_NE, REG_WORK1, REG_WORK1, ARM_V_FLAG);
+ MSR_CPSRf_r(REG_WORK1);
+
+ unlock2(s);
+ unlock2(d);
+}
+
+MIDFUNC(2,jnf_MULS64,(RW4 d, RW4 s))
+{
+ s = rmw(s, 4, 4);
+ d = rmw(d, 4, 4);
+
+ // L, H,
+ SMULL_rrrr(d, s, d, s);
+
+ unlock2(s);
+ unlock2(d);
+}
+
+MIDFUNC(2,jff_MULS64,(RW4 d, RW4 s))
+{
+ s = rmw(s, 4, 4);
+ d = rmw(d, 4, 4);
+
+ MSR_CPSRf_i(0);
+ // L, H,
+ SMULLS_rrrr(d, s, d, s);
+ MRS_CPSR(REG_WORK1);
+ TEQ_rrASRi(s,d,31);
+ CC_ORR_rri(NATIVE_CC_NE, REG_WORK1, REG_WORK1, ARM_V_FLAG);
+ MSR_CPSRf_r(REG_WORK1);
+
+ unlock2(s);
+ unlock2(d);
+}
+
+/*
+ * MULU
+ * Operand Syntax: <ea>, Dn
+ *
+ * Operand Size: 16
+ *
+ * X Not affected.
+ * N Set if the result is negative. Cleared otherwise.
+ * Z Set if the result is zero. Cleared otherwise.
+ * V Set if overflow. Cleared otherwise. (32 Bit multiply only)
+ * C Always cleared.
+ *
+ */
+MIDFUNC(2,jnf_MULU,(RW4 d, RR4 s))
+{
+ s = readreg(s, 4);
+ d = rmw(d, 4, 4);
+
+ ZERO_EXTEND_16_REG_2_REG(d,d);
+ ZERO_EXTEND_16_REG_2_REG(REG_WORK1,s);
+
+ MUL_rrr(d, d, REG_WORK1);
+
+ unlock2(s);
+ unlock2(d);
+}
+
+MIDFUNC(2,jff_MULU,(RW4 d, RR4 s))
+{
+ s = readreg(s, 4);
+ d = rmw(d, 4, 4);
+
+ ZERO_EXTEND_16_REG_2_REG(d,d);
+ ZERO_EXTEND_16_REG_2_REG(REG_WORK1, s);
+
+ MSR_CPSRf_i(0);
+ MULS_rrr(d, d, REG_WORK1);
+
+ unlock2(s);
+ unlock2(d);
+}
+
+MIDFUNC(2,jnf_MULU32,(RW4 d, RR4 s))
+{
+ s = readreg(s, 4);
+ d = rmw(d, 4, 4);
+
+ MUL_rrr(d, d, s);
+
+ unlock2(s);
+ unlock2(d);
+}
+
+MIDFUNC(2,jff_MULU32,(RW4 d, RR4 s))
+{
+ s = readreg(s, 4);
+ d = rmw(d, 4, 4);
+
+ // L, H,
+ MSR_CPSRf_i(0);
+ UMULLS_rrrr(d, REG_WORK2, d, s);
+ MRS_CPSR(REG_WORK1);
+ TST_rr(REG_WORK2,REG_WORK2);
+ CC_ORR_rri(NATIVE_CC_NE, REG_WORK1, REG_WORK1, ARM_V_FLAG);
+ MSR_CPSRf_r(REG_WORK1);
+
+ unlock2(s);
+ unlock2(d);
+}
+
+MIDFUNC(2,jnf_MULU64,(RW4 d, RW4 s))
+{
+ s = rmw(s, 4, 4);
+ d = rmw(d, 4, 4);
+
+ // L, H,
+ UMULL_rrrr(d, s, d, s);
+
+ unlock2(s);
+ unlock2(d);
+}
+
+MIDFUNC(2,jff_MULU64,(RW4 d, RW4 s))
+{
+ s = rmw(s, 4, 4);
+ d = rmw(d, 4, 4);
+
+ // L, H,
+ MSR_CPSRf_i(0);
+ UMULLS_rrrr(d, s, d, s);
+ MRS_CPSR(REG_WORK1);
+ TST_rr(s,s);
+ CC_ORR_rri(NATIVE_CC_NE, REG_WORK1, REG_WORK1, ARM_V_FLAG);
+ MSR_CPSRf_r(REG_WORK1);
+
+ unlock2(s);
+ unlock2(d);
+}
+
+/*
+ * NEG
+ * Operand Syntax: <ea>
+ *
+ * Operand Size: 8,16,32
+ *
+ * X Set the same as the carry bit.
+ * N Set if the result is negative. Cleared otherwise.
+ * Z Set if the result is zero. Cleared otherwise.
+ * V Set if an overflow occurs. Cleared otherwise.
+ * C Cleared if the result is zero. Set otherwise.
+ *
+ */
+MIDFUNC(2,jnf_NEG,(W4 d, RR4 s))
+{
+ d=writereg(d,4);
+ s=readreg(s,4);
+
+ RSB_rri(d,s,0);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,jff_NEG_b,(W4 d, RR1 s))
+{
+ d=writereg(d,4);
+ s=readreg(s,4);
+
+ SIGNED8_REG_2_REG(REG_WORK1, s);
+ RSBS_rri(d,REG_WORK1,0);
+
+ // inverted_carry = true;
+ MRS_CPSR(REG_WORK1);
+ EOR_rri(REG_WORK1, REG_WORK1, ARM_C_FLAG);
+ MSR_CPSR_r(REG_WORK1);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,jff_NEG_w,(W4 d, RR2 s))
+{
+ d=writereg(d,4);
+ s=readreg(s,4);
+
+ SIGNED16_REG_2_REG(REG_WORK1, s);
+ RSBS_rri(d,REG_WORK1,0);
+
+ // inverted_carry = true;
+ MRS_CPSR(REG_WORK1);
+ EOR_rri(REG_WORK1, REG_WORK1, ARM_C_FLAG);
+ MSR_CPSR_r(REG_WORK1);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,jff_NEG_l,(W4 d, RR4 s))
+{
+ d=writereg(d,4);
+ s=readreg(s,4);
+
+ RSBS_rri(d,s,0);
+
+ // inverted_carry = true;
+ MRS_CPSR(REG_WORK1);
+ EOR_rri(REG_WORK1, REG_WORK1, ARM_C_FLAG);
+ MSR_CPSR_r(REG_WORK1);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+/*
+ * NEGX
+ * Operand Syntax: <ea>
+ *
+ * Operand Size: 8,16,32
+ *
+ * X Set the same as the carry bit.
+ * N Set if the result is negative. Cleared otherwise.
+ * Z Cleared if the result is nonzero; unchanged otherwise.
+ * V Set if an overflow occurs. Cleared otherwise.
+ * C Cleared if the result is zero. Set otherwise.
+ *
+ * Attention: Z is cleared only if the result is nonzero. Unchanged otherwise
+ *
+ */
+MIDFUNC(2,jnf_NEGX,(W4 d, RR4 s))
+{
+ d=writereg(d,4);
+ s=readreg(s,4);
+
+ RSC_rri(d,s,0);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,jff_NEGX_b,(W4 d, RR1 s))
+{
+ d=writereg(d,4);
+ s=readreg(s,4);
+
+ MRS_CPSR(REG_WORK2);
+ CC_MVN_ri(NATIVE_CC_EQ, REG_WORK2, 0);
+ CC_MVN_ri(NATIVE_CC_NE, REG_WORK2, ARM_Z_FLAG);
+
+ SIGNED8_REG_2_REG(REG_WORK1, s);
+ RSCS_rri(d,REG_WORK1,0);
+
+ MRS_CPSR(REG_WORK1);
+ EOR_rri(REG_WORK1, REG_WORK1, ARM_C_FLAG);
+ AND_rrr(REG_WORK1, REG_WORK1, REG_WORK2);
+ MSR_CPSR_r(REG_WORK1);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,jff_NEGX_w,(W4 d, RR2 s))
+{
+ d=writereg(d,4);
+ s=readreg(s,4);
+
+ MRS_CPSR(REG_WORK2);
+ CC_MVN_ri(NATIVE_CC_EQ, REG_WORK2, 0);
+ CC_MVN_ri(NATIVE_CC_NE, REG_WORK2, ARM_Z_FLAG);
+
+ SIGNED16_REG_2_REG(REG_WORK1, s);
+ RSCS_rri(d,REG_WORK1,0);
+
+ MRS_CPSR(REG_WORK1);
+ EOR_rri(REG_WORK1, REG_WORK1, ARM_C_FLAG);
+ AND_rrr(REG_WORK1, REG_WORK1, REG_WORK2);
+ MSR_CPSR_r(REG_WORK1);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,jff_NEGX_l,(W4 d, RR4 s))
+{
+ d=writereg(d,4);
+ s=readreg(s,4);
+
+ MRS_CPSR(REG_WORK2);
+ CC_MVN_ri(NATIVE_CC_EQ, REG_WORK2, 0);
+ CC_MVN_ri(NATIVE_CC_NE, REG_WORK2, ARM_Z_FLAG);
+
+ RSCS_rri(d,s,0);
+
+ MRS_CPSR(REG_WORK1);
+ EOR_rri(REG_WORK1, REG_WORK1, ARM_C_FLAG);
+ AND_rrr(REG_WORK1, REG_WORK1, REG_WORK2);
+ MSR_CPSR_r(REG_WORK1);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+/*
+ * NOT
+ * Operand Syntax: <ea>
+ *
+ * Operand Size: 8,16,32
+ *
+ * X Not affected.
+ * N Set if the result is negative. Cleared otherwise.
+ * Z Set if the result is zero. Cleared otherwise.
+ * V Always cleared.
+ * C Always cleared.
+ *
+ */
+MIDFUNC(2,jnf_NOT,(W4 d, RR4 s))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ MVN_rr(d,s);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,jff_NOT_b,(W4 d, RR1 s))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ UNSIGNED8_REG_2_REG(d,s);
+ MSR_CPSRf_i(0); // Clear flags
+ MVNS_rr(d,d);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,jff_NOT_w,(W4 d, RR2 s))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ UNSIGNED16_REG_2_REG(d,s);
+ MSR_CPSRf_i(0); // Clear flags
+ MVNS_rr(d,d);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,jff_NOT_l,(W4 d, RR4 s))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ MSR_CPSRf_i(0); // Clear flags
+ MVNS_rr(d,s);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+/*
+ * OR
+ * Operand Syntax: <ea>, Dn
+ * Dn, <ea>
+ *
+ * Operand Size: 8,16,32
+ *
+ * X Not affected.
+ * N Set if the most significant bit of the result is set. Cleared otherwise.
+ * Z Set if the result is zero. Cleared otherwise.
+ * V Always cleared.
+ * C Always cleared.
+ *
+ */
+MIDFUNC(3,jnf_OR,(W4 d, RR4 s, RR4 v))
+{
+ if (isconst(s) && isconst(v)) {
+ set_const(d,
+ live.state[s].val|live.state[v].val);
+ return;
+ }
+
+ v=readreg(v,4);
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ ORR_rrr(d, s, v);
+
+ unlock2(v);
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,jff_OR_b,(W4 d, RR1 s, RR1 v))
+{
+ v=readreg(v,4);
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ SIGNED8_REG_2_REG(REG_WORK1, s);
+ SIGNED8_REG_2_REG(REG_WORK2, v);
+ MSR_CPSRf_i(0);
+ ORRS_rrr(d, REG_WORK1, REG_WORK2);
+
+ unlock2(v);
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,jff_OR_w,(W4 d, RR2 s, RR2 v))
+{
+ v=readreg(v,4);
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ SIGNED16_REG_2_REG(REG_WORK1, s);
+ SIGNED16_REG_2_REG(REG_WORK2, v);
+ MSR_CPSRf_i(0);
+ ORRS_rrr(d, REG_WORK1, REG_WORK2);
+
+ unlock2(v);
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,jff_OR_l,(W4 d, RR4 s, RR4 v))
+{
+ v=readreg(v,4);
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ MSR_CPSRf_i(0);
+ ORRS_rrr(d, s,v);
+
+ unlock2(v);
+ unlock2(d);
+ unlock2(s);
+}
+
+/*
+ * ORI
+ * Operand Syntax: #<data>, CCR
+ *
+ * Operand Size: 8
+ *
+ * X — Set if bit 4 of immediate operand is one; unchanged otherwise.
+ * N — Set if bit 3 of immediate operand is one; unchanged otherwise.
+ * Z — Set if bit 2 of immediate operand is one; unchanged otherwise.
+ * V — Set if bit 1 of immediate operand is one; unchanged otherwise.
+ * C — Set if bit 0 of immediate operand is one; unchanged otherwise.
+ *
+ */
+MIDFUNC(1,jff_ORSR,(IMM s, IMM x))
+{
+ MRS_CPSR(REG_WORK1);
+ ORR_rri(REG_WORK1, REG_WORK1, s);
+ MSR_CPSRf_r(REG_WORK1);
+
+ if (x) {
+ compemu_raw_mov_l_ri(REG_WORK1, (uintptr)live.state[FLAGX].mem);
+ MOV_ri(REG_WORK2, 1);
+ STRB_rR(REG_WORK2, REG_WORK1);
+ }
+}
+
+/*
+ * ROL
+ * Operand Syntax: Dx, Dy
+ * #<data>, Dy
+ * <ea>
+ *
+ * Operand Size: 8,16,32
+ *
+ * X Not affected.
+ * N Set if the most significant bit of the result is set. Cleared otherwise.
+ * Z Set if the result is zero. Cleared otherwise.
+ * V Always cleared.
+ * C Set according to the last bit rotated out of the operand. Cleared when the rotate count is zero.
+ *
+ */
+MIDFUNC(3,jnf_ROL_b_imm,(W4 d, RR4 s, IMM i))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ LSL_rri(d,s,24);
+ ORR_rrrLSRi(d,d,d,8);
+ ORR_rrrLSRi(d,d,d,16);
+ ROR_rri(d,d,(32-(i&0x1f)));
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,jnf_ROL_w_imm,(W4 d, RR4 s, IMM i))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ LSL_rri(d,s,16);
+ ORR_rrrLSRi(d,d,d,16);
+ ROR_rri(d,d,(32-(i&0x1f)));
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,jnf_ROL_l_imm,(W4 d, RR4 s, IMM i))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ ROR_rri(d,s,(32-(i&0x1f)));
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,jff_ROL_b_imm,(W4 d, RR4 s, IMM i))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ LSL_rri(d,s,24);
+ ORR_rrrLSRi(d,d,d,8);
+ ORR_rrrLSRi(d,d,d,16);
+ MSR_CPSRf_i(0);
+ if (i) {
+ RORS_rri(d,d,(32-(i&0x1f)));
+
+ MRS_CPSR(REG_WORK2);
+ TST_ri(d, 1);
+ CC_ORR_rri(NATIVE_CC_NE, REG_WORK2, REG_WORK2, ARM_C_FLAG);
+ CC_BIC_rri(NATIVE_CC_EQ, REG_WORK2, REG_WORK2, ARM_C_FLAG);
+ MSR_CPSR_r(REG_WORK2);
+
+ } else {
+ TST_rr(d,d);
+ }
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,jff_ROL_w_imm,(W4 d, RR4 s, IMM i))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ LSL_rri(d,s,16);
+ ORR_rrrLSRi(d,d,d,16);
+ MSR_CPSRf_i(0);
+ if (i) {
+ RORS_rri(d,d,(32-(i&0x1f)));
+
+ MRS_CPSR(REG_WORK2);
+ TST_ri(d, 1);
+ CC_ORR_rri(NATIVE_CC_NE, REG_WORK2, REG_WORK2, ARM_C_FLAG);
+ CC_BIC_rri(NATIVE_CC_EQ, REG_WORK2, REG_WORK2, ARM_C_FLAG);
+ MSR_CPSR_r(REG_WORK2);
+
+ } else {
+ TST_rr(d,d);
+ }
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,jff_ROL_l_imm,(W4 d, RR4 s, IMM i))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ MSR_CPSRf_i(0);
+ if (i) {
+ RORS_rri(d,s,(32-(i&0x1f)));
+
+ MRS_CPSR(REG_WORK2);
+ TST_ri(d, 1);
+ CC_ORR_rri(NATIVE_CC_NE, REG_WORK2, REG_WORK2, ARM_C_FLAG);
+ CC_BIC_rri(NATIVE_CC_EQ, REG_WORK2, REG_WORK2, ARM_C_FLAG);
+ MSR_CPSR_r(REG_WORK2);
+
+ } else {
+ MOVS_rr(d,s);
+ }
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,jnf_ROL_b,(W4 d, RR4 s, RR4 i))
+{
+ if (isconst(i)) {
+ COMPCALL(jnf_ROL_b_imm)(d,s,(uae_u8)live.state[i].val);
+ return;
+ }
+ s=readreg(s,4);
+ i=readreg(i,4);
+ d=writereg(d,4);
+
+ AND_rri(REG_WORK1, i, 0x1f);
+ RSB_rri(REG_WORK1, REG_WORK1, 32);
+
+ LSL_rri(d,s,24);
+ ORR_rrrLSRi(d,d,d,8);
+ ORR_rrrLSRi(d,d,d,16);
+ ROR_rrr(d,d,REG_WORK1);
+
+ unlock2(d);
+ unlock2(s);
+ unlock2(i);
+}
+
+MIDFUNC(3,jnf_ROL_w,(W4 d, RR4 s, RR4 i))
+{
+ if (isconst(i)) {
+ COMPCALL(jnf_ROL_w_imm)(d,s,(uae_u8)live.state[i].val);
+ return;
+ }
+ s=readreg(s,4);
+ i=readreg(i,4);
+ d=writereg(d,4);
+
+ AND_rri(REG_WORK1, i, 0x1f);
+ RSB_rri(REG_WORK1, REG_WORK1, 32);
+
+ LSL_rri(d,s,16);
+ ORR_rrrLSRi(d,d,d,16);
+ ROR_rrr(d,d,REG_WORK1);
+
+ unlock2(d);
+ unlock2(s);
+ unlock2(i);
+}
+
+MIDFUNC(3,jnf_ROL_l,(W4 d, RR4 s, RR4 i))
+{
+ if (isconst(i)) {
+ COMPCALL(jnf_ROL_l_imm)(d,s,(uae_u8)live.state[i].val);
+ return;
+ }
+ s=readreg(s,4);
+ i=readreg(i,4);
+ d=writereg(d,4);
+
+ AND_rri(REG_WORK1, i, 0x1f);
+ RSB_rri(REG_WORK1, REG_WORK1, 32);
+
+ ROR_rrr(d,s,REG_WORK1);
+
+ unlock2(d);
+ unlock2(s);
+ unlock2(i);
+}
+
+MIDFUNC(3,jff_ROL_b,(W4 d, RR4 s, RR4 i))
+{
+ if (isconst(i)) {
+ COMPCALL(jff_ROL_b_imm)(d,s,(uae_u8)live.state[i].val);
+ return;
+ }
+
+ s=readreg(s,4);
+ i=readreg(i,4);
+ d=writereg(d,4);
+
+ AND_rri(REG_WORK1, i, 0x1f);
+ RSB_rri(REG_WORK1, REG_WORK1, 32);
+
+ LSL_rri(d,s,24);
+ ORR_rrrLSRi(d,d,d,8);
+ ORR_rrrLSRi(d,d,d,16);
+ MSR_CPSRf_i(0);
+ RORS_rrr(d,d,REG_WORK1);
+
+ MRS_CPSR(REG_WORK2);
+ TST_ri(d, 1);
+ CC_ORR_rri(NATIVE_CC_NE, REG_WORK2, REG_WORK2, ARM_C_FLAG);
+ CC_BIC_rri(NATIVE_CC_EQ, REG_WORK2, REG_WORK2, ARM_C_FLAG);
+ MSR_CPSR_r(REG_WORK2);
+
+ unlock2(d);
+ unlock2(s);
+ unlock2(i);
+}
+
+MIDFUNC(3,jff_ROL_w,(W4 d, RR4 s, RR4 i))
+{
+ if (isconst(i)) {
+ COMPCALL(jff_ROL_w_imm)(d,s,(uae_u8)live.state[i].val);
+ return;
+ }
+
+ s=readreg(s,4);
+ i=readreg(i,4);
+ d=writereg(d,4);
+
+ AND_rri(REG_WORK1, i, 0x1f);
+ RSB_rri(REG_WORK1, REG_WORK1, 32);
+
+ LSL_rri(d,s,16);
+ ORR_rrrLSRi(d,d,d,16);
+ MSR_CPSRf_i(0);
+ RORS_rrr(d,d,REG_WORK1);
+
+ MRS_CPSR(REG_WORK2);
+ TST_ri(d, 1);
+ CC_ORR_rri(NATIVE_CC_NE, REG_WORK2, REG_WORK2, ARM_C_FLAG);
+ CC_BIC_rri(NATIVE_CC_EQ, REG_WORK2, REG_WORK2, ARM_C_FLAG);
+ MSR_CPSR_r(REG_WORK2);
+
+ unlock2(d);
+ unlock2(s);
+ unlock2(i);
+}
+
+MIDFUNC(3,jff_ROL_l,(W4 d, RR4 s, RR4 i))
+{
+ if (isconst(i)) {
+ COMPCALL(jff_ROL_l_imm)(d,s,(uae_u8)live.state[i].val);
+ return;
+ }
+
+ s=readreg(s,4);
+ i=readreg(i,4);
+ d=writereg(d,4);
+
+ AND_rri(REG_WORK1, i, 0x1f);
+ RSB_rri(REG_WORK1, REG_WORK1, 32);
+
+ MSR_CPSRf_i(0);
+ RORS_rrr(d,s,REG_WORK1);
+
+ MRS_CPSR(REG_WORK2);
+ TST_ri(d, 1);
+ CC_ORR_rri(NATIVE_CC_NE, REG_WORK2, REG_WORK2, ARM_C_FLAG);
+ CC_BIC_rri(NATIVE_CC_EQ, REG_WORK2, REG_WORK2, ARM_C_FLAG);
+ MSR_CPSR_r(REG_WORK2);
+
+ unlock2(d);
+ unlock2(s);
+ unlock2(i);
+}
+
+/*
+ * ROLW
+ * Operand Syntax: <ea>
+ *
+ * Operand Size: 16
+ *
+ * X Not affected.
+ * N Set if the most significant bit of the result is set. Cleared otherwise.
+ * Z Set if the result is zero. Cleared otherwise.
+ * V Always cleared.
+ * C Set according to the last bit rotated out of the operand. Cleared when the rotate count is zero.
+ *
+ */
+MIDFUNC(2,jnf_ROLW,(W4 d, RR4 s))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ LSL_rri(d,s,16);
+ ORR_rrrLSRi(d,d,d,16);
+ ROR_rri(d,d,(32-1));
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,jff_ROLW,(W4 d, RR4 s))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ LSL_rri(d,s,16);
+ ORR_rrrLSRi(d,d,d,16);
+ MSR_CPSRf_i(0);
+ RORS_rri(d,d,(32-1));
+
+ MRS_CPSR(REG_WORK2);
+ TST_ri(d, 1);
+ CC_ORR_rri(NATIVE_CC_NE, REG_WORK2, REG_WORK2, ARM_C_FLAG);
+ CC_BIC_rri(NATIVE_CC_EQ, REG_WORK2, REG_WORK2, ARM_C_FLAG);
+ MSR_CPSR_r(REG_WORK2);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+/*
+ * RORW
+ * Operand Syntax: <ea>
+ *
+ * Operand Size: 16
+ *
+ * X Not affected.
+ * N Set if the most significant bit of the result is set. Cleared otherwise.
+ * Z Set if the result is zero. Cleared otherwise.
+ * V Always cleared.
+ * C Set according to the last bit rotated out of the operand.
+ *
+ */
+MIDFUNC(2,jnf_RORW,(W4 d, RR4 s))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ LSL_rri(d,s,16);
+ ORR_rrrLSRi(d,d,d,16);
+ ROR_rri(d,d,1);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,jff_RORW,(W4 d, RR4 s))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ LSL_rri(d,s,16);
+ ORR_rrrLSRi(d,d,d,16);
+ MSR_CPSRf_i(0);
+ RORS_rri(d,d,1);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+/*
+ * ROXL
+ * Operand Syntax: Dx, Dy
+ * #<data>, Dy
+ *
+ * Operand Size: 8,16,32
+ *
+ * X Set according to the last bit rotated out of the operand. Cleared when the rotate count is zero.
+ * N Set if the most significant bit of the result is set. Cleared otherwise.
+ * Z Set if the result is zero. Cleared otherwise.
+ * V Always cleared.
+ * C Set according to the last bit rotated out of the operand. Cleared when the rotate count is zero.
+ *
+ */
+MIDFUNC(3,jnf_ROXL_b_imm,(W4 d, RR4 s, IMM i))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ if (i > 0) {
+ UNSIGNED8_REG_2_REG(d,s);
+ LSL_rri(d,d,i);
+ CC_ORR_rri(NATIVE_CC_CS, d,d, (1 << (i - 1)));
+ if (i > 1) ORR_rrrLSRi(d,d,d,9);
+ } else {
+ MOV_rr(d,s);
+ }
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,jnf_ROXL_w_imm,(W4 d, RR4 s, IMM i))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ if (i > 0) {
+ UNSIGNED16_REG_2_REG(d,s);
+ LSL_rri(d,d,i);
+ CC_ORR_rri(NATIVE_CC_CS, d,d, (1 << (i - 1)));
+ if (i > 1) ORR_rrrLSRi(d,d,d,17);
+ } else {
+ MOV_rr(d,s);
+ }
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,jnf_ROXL_l_imm,(W4 d, RR4 s, IMM i))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ if (i > 0) {
+ LSL_rri(d,s,i);
+ CC_ORR_rri(NATIVE_CC_CS, d,d, (1 << (i - 1)));
+ if (i > 1) ORR_rrrLSRi(d,d,s,(32-i));
+ } else {
+ MOV_rr(d,s);
+ }
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,jff_ROXL_b_imm,(W4 d, RR4 s, IMM i))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ if (i > 0) {
+ UNSIGNED8_REG_2_REG(d,s);
+ LSL_rri(d,d,i);
+ CC_ORR_rri(NATIVE_CC_CS, d,d, (1 << (i - 1)));
+ if (i > 1) ORR_rrrLSRi(d,d,d,9);
+ TST_ri(s, (1<<(8-i)));
+ CC_MSR_CPSRf_i(NATIVE_CC_NE, ARM_C_FLAG);
+ CC_MSR_CPSRf_i(NATIVE_CC_EQ, 0);
+ } else {
+ MOV_rr(d,s);
+ MSR_CPSRf_i(0);
+ }
+
+ SIGNED8_REG_2_REG(d,d);
+ TST_rr(d,d);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,jff_ROXL_w_imm,(W4 d, RR4 s, IMM i))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ if (i > 0) {
+ UNSIGNED16_REG_2_REG(d,s);
+ LSL_rri(d,d,i);
+ CC_ORR_rri(NATIVE_CC_CS, d,d, (1 << (i - 1)));
+ if (i > 1) ORR_rrrLSRi(d,d,d,17);
+ TST_ri(s, (1<<(16-i)));
+ CC_MSR_CPSRf_i(NATIVE_CC_NE, ARM_C_FLAG);
+ CC_MSR_CPSRf_i(NATIVE_CC_EQ, 0);
+ } else {
+ MOV_rr(d,s);
+ MSR_CPSRf_i(0);
+ }
+
+ SIGNED16_REG_2_REG(d,d);
+ TST_rr(d,d);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,jff_ROXL_l_imm,(W4 d, RR4 s, IMM i))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ if (i > 0) {
+ LSL_rri(d,s,i);
+ CC_ORR_rri(NATIVE_CC_CS, d,d, (1 << (i - 1)));
+ if (i > 1) ORR_rrrLSRi(d,d,s,(32-i));
+ TST_ri(s, (1<<(32-i)));
+ CC_MSR_CPSRf_i(NATIVE_CC_NE, ARM_C_FLAG);
+ CC_MSR_CPSRf_i(NATIVE_CC_EQ, 0);
+ } else {
+ MOV_rr(d,s);
+ MSR_CPSRf_i(0);
+ }
+
+ TST_rr(d,d);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,jnf_ROXL_b,(W4 d, RR4 s, RR4 i))
+{
+ if (isconst(i)) {
+ COMPCALL(jnf_ROXL_b_imm)(d,s,(uae_u8)live.state[i].val);
+ return;
+ }
+ s=readreg(s,4);
+ i=readreg(i,4);
+ d=writereg(d,4);
+
+ MOV_rr(d,s);
+ MRS_CPSR(REG_WORK2);
+
+ AND_rri(REG_WORK1, i, 0x3f);
+ CMP_ri(REG_WORK1, 36);
+ CC_SUB_rri(NATIVE_CC_GE, REG_WORK1, REG_WORK1, 36);
+ CMP_ri(REG_WORK1, 18);
+ CC_SUB_rri(NATIVE_CC_GE, REG_WORK1, REG_WORK1, 18);
+ CMP_ri(REG_WORK1, 9);
+ CC_SUB_rri(NATIVE_CC_GE, REG_WORK1, REG_WORK1, 9);
+ CMP_ri(REG_WORK1, 0);
+#if defined(ARMV6_ASSEMBLY)
+ BLE_i(8-1);
+#else
+ BLE_i(9-1);
+#endif
+
+ SUB_rri(REG_WORK1, REG_WORK1, 1);
+ LSL_rri(d, d, 1);
+ MSR_CPSRf_r(REG_WORK2);
+ CC_ORR_rri(NATIVE_CC_CS, d,d,1);
+ LSL_rrr(d, d, REG_WORK1);
+ RSB_rri(REG_WORK1, REG_WORK1, 8);
+#if defined(ARMV6_ASSEMBLY)
+ UXTB_rr(REG_WORK2, s);
+#else
+ ROR_rri(REG_WORK2, s, 8);
+ LSR_rri(REG_WORK2, REG_WORK2, 24);
+#endif
+ ORR_rrrLSRr(d,d,REG_WORK2,REG_WORK1);
+
+ unlock2(d);
+ unlock2(s);
+ unlock2(i);
+}
+
+MIDFUNC(3,jnf_ROXL_w,(W4 d, RR4 s, RR4 i))
+{
+ if (isconst(i)) {
+ COMPCALL(jnf_ROXL_w_imm)(d,s,(uae_u8)live.state[i].val);
+ return;
+ }
+ s=readreg(s,4);
+ i=readreg(i,4);
+ d=writereg(d,4);
+
+ UNSIGNED16_REG_2_REG(d,s);
+ MRS_CPSR(REG_WORK2);
+
+ CMP_ri(REG_WORK1, 34);
+ CC_SUB_rri(NATIVE_CC_GE, REG_WORK1, REG_WORK1, 34);
+ CMP_ri(REG_WORK1, 17);
+ CC_SUB_rri(NATIVE_CC_GE, REG_WORK1, REG_WORK1, 17);
+ CMP_ri(REG_WORK1, 0);
+#if defined(ARMV6_ASSEMBLY)
+ BLE_i(8-1);
+#else
+ BLE_i(9-1);
+#endif
+
+ SUB_rri(REG_WORK1, REG_WORK1, 1);
+ LSL_rri(d, d, 1);
+ MSR_CPSRf_r(REG_WORK2);
+ CC_ORR_rri(NATIVE_CC_CS, d,d,1);
+ LSL_rrr(d, d, REG_WORK1);
+ RSB_rri(REG_WORK1, REG_WORK1, 16);
+#if defined(ARMV6_ASSEMBLY)
+ UXTH_rr(REG_WORK2, s);
+#else
+ LSL_rri(REG_WORK2, s, 16);
+ LSR_rri(REG_WORK2, REG_WORK2, 16);
+#endif
+ ORR_rrrLSRr(d,d,REG_WORK2,REG_WORK1);
+
+ unlock2(d);
+ unlock2(s);
+ unlock2(i);
+}
+
+MIDFUNC(3,jnf_ROXL_l,(W4 d, RR4 s, RR4 i))
+{
+ if (isconst(i)) {
+ COMPCALL(jnf_ROXL_l_imm)(d,s,(uae_u8)live.state[i].val);
+ return;
+ }
+ s=readreg(s,4);
+ i=readreg(i,4);
+ d=writereg(d,4);
+
+ MOV_rr(d,s);
+ MRS_CPSR(REG_WORK2);
+
+ CMP_ri(REG_WORK1, 33);
+ CC_SUB_rri(NATIVE_CC_GE, REG_WORK1, REG_WORK1, 33);
+ CMP_ri(REG_WORK1, 0);
+ BLE_i(7-1);
+
+ SUB_rri(REG_WORK1, REG_WORK1, 1);
+ LSL_rri(d, d, 1);
+ MSR_CPSRf_r(REG_WORK2);
+ CC_ORR_rri(NATIVE_CC_CS, d,d,1);
+ LSL_rrr(d, d, REG_WORK1);
+ RSB_rri(REG_WORK1, REG_WORK1, 32);
+ ORR_rrrLSRr(d,d,s,REG_WORK1);
+
+ unlock2(d);
+ unlock2(s);
+ unlock2(i);
+}
+
+MIDFUNC(3,jff_ROXL_b,(W4 d, RR4 s, RR4 i))
+{
+ if (isconst(i)) {
+ COMPCALL(jff_ROXL_b_imm)(d,s,(uae_u8)live.state[i].val);
+ return;
+ }
+
+ s=readreg(s,4);
+ i=readreg(i,4);
+ d=writereg(d,4);
+
+ MOV_rr(d,s);
+ MRS_CPSR(REG_WORK2);
+
+ AND_rri(REG_WORK1, i, 0x3f);
+ CMP_ri(REG_WORK1, 36);
+ CC_SUB_rri(NATIVE_CC_GE, REG_WORK1, REG_WORK1, 36);
+ CMP_ri(REG_WORK1, 18);
+ CC_SUB_rri(NATIVE_CC_GE, REG_WORK1, REG_WORK1, 18);
+ CMP_ri(REG_WORK1, 9);
+ CC_SUB_rri(NATIVE_CC_GE, REG_WORK1, REG_WORK1, 9);
+ CMP_ri(REG_WORK1, 0);
+#if defined(ARMV6_ASSEMBLY)
+ BLE_i(16-1); // label
+#else
+ BLE_i(17-1); // label
+#endif
+
+ SUB_rri(REG_WORK1, REG_WORK1, 1);
+ LSL_rri(d, d, 1);
+ MSR_CPSRf_r(REG_WORK2);
+ CC_ORR_rri(NATIVE_CC_CS, d,d,1);
+ LSL_rrr(d, d, REG_WORK1);
+
+ MOV_ri(REG_WORK2, 0x80);
+ LSR_rrr(REG_WORK2, REG_WORK2, REG_WORK1);
+ PUSH(REG_WORK2);
+
+ RSB_rri(REG_WORK1, REG_WORK1, 8);
+#if defined(ARMV6_ASSEMBLY)
+ UXTB_rr(REG_WORK2, s);
+#else
+ ROR_rri(REG_WORK2, s, 8);
+ LSR_rri(REG_WORK2, REG_WORK2, 24);
+#endif
+ ORR_rrrLSRr(d,d,REG_WORK2,REG_WORK1);
+
+ POP(REG_WORK2);
+ TST_rr(s, REG_WORK2);
+ CC_MSR_CPSRf_i(NATIVE_CC_NE, ARM_C_FLAG);
+ CC_MSR_CPSRf_i(NATIVE_CC_EQ, 0);
+ B_i(0); // label2
+
+// label:
+ MSR_CPSRf_i(0);
+
+// label2:
+ raw_sign_extend_8_rr(d,d);
+ TST_rr(d,d);
+
+ unlock2(d);
+ unlock2(s);
+ unlock2(i);
+}
+
+MIDFUNC(3,jff_ROXL_w,(W4 d, RR4 s, RR4 i))
+{
+ if (isconst(i)) {
+ COMPCALL(jff_ROXL_w_imm)(d,s,(uae_u8)live.state[i].val);
+ return;
+ }
+
+ s=readreg(s,4);
+ i=readreg(i,4);
+ d=writereg(d,4);
+
+ MOV_rr(d,s);
+ MRS_CPSR(REG_WORK2);
+
+ AND_rri(REG_WORK1, i, 0x3f);
+ CMP_ri(REG_WORK1, 34);
+ CC_SUB_rri(NATIVE_CC_GE, REG_WORK1, REG_WORK1, 34);
+ CMP_ri(REG_WORK1, 17);
+ CC_SUB_rri(NATIVE_CC_GE, REG_WORK1, REG_WORK1, 17);
+ CMP_ri(REG_WORK1, 0);
+#if defined(ARMV6_ASSEMBLY)
+ BLE_i(16-1); // label
+#else
+ BLE_i(17-1); // label
+#endif
+
+ SUB_rri(REG_WORK1, REG_WORK1, 1);
+ LSL_rri(d, d, 1);
+ MSR_CPSRf_r(REG_WORK2);
+ CC_ORR_rri(NATIVE_CC_CS, d,d,1);
+ LSL_rrr(d, d, REG_WORK1);
+
+ MOV_ri(REG_WORK2, 0x8000);
+ LSR_rrr(REG_WORK2, REG_WORK2, REG_WORK1);
+ PUSH(REG_WORK2);
+
+#if defined(ARMV6_ASSEMBLY)
+ UXTH_rr(REG_WORK2, s);
+#else
+ LSL_rri(REG_WORK2, s, 16);
+ LSR_rri(REG_WORK2, REG_WORK2, 16);
+#endif
+
+ RSB_rri(REG_WORK1, REG_WORK1, 16);
+ ORR_rrrLSRr(d,d,REG_WORK2,REG_WORK1);
+
+ POP(REG_WORK2);
+ TST_rr(s, REG_WORK2);
+ CC_MSR_CPSRf_i(NATIVE_CC_NE, ARM_C_FLAG);
+ CC_MSR_CPSRf_i(NATIVE_CC_EQ, 0);
+ B_i(0); // label2
+
+// label:
+ MSR_CPSRf_i(0);
+
+// label2:
+ SIGNED16_REG_2_REG(d,d);
+ TST_rr(d,d);
+
+ unlock2(d);
+ unlock2(s);
+ unlock2(i);
+}
+
+MIDFUNC(3,jff_ROXL_l,(W4 d, RR4 s, RR4 i))
+{
+ if (isconst(i)) {
+ COMPCALL(jff_ROXL_l_imm)(d,s,(uae_u8)live.state[i].val);
+ return;
+ }
+
+ s=readreg(s,4);
+ i=readreg(i,4);
+ d=writereg(d,4);
+
+ MOV_rr(d,s);
+ MRS_CPSR(REG_WORK2);
+
+ AND_rri(REG_WORK1, i, 0x3f);
+ CMP_ri(REG_WORK1, 33);
+ CC_SUB_rri(NATIVE_CC_GE, REG_WORK1, REG_WORK1, 33);
+ CMP_ri(REG_WORK1, 0);
+ BLE_i(13-1); // label
+
+ SUB_rri(REG_WORK1, REG_WORK1, 1);
+ LSL_rri(d, d, 1);
+ MSR_CPSRf_r(REG_WORK2);
+ CC_ORR_rri(NATIVE_CC_CS, d,d,1);
+ LSL_rrr(d, d, REG_WORK1);
+
+ MOV_ri(REG_WORK2, 0x80000000);
+ LSR_rrr(REG_WORK2, REG_WORK2, REG_WORK1);
+
+ RSB_rri(REG_WORK1, REG_WORK1, 32);
+ ORR_rrrLSRr(d,d,s,REG_WORK1);
+
+ TST_rr(s, REG_WORK2);
+ CC_MSR_CPSRf_i(NATIVE_CC_NE, ARM_C_FLAG);
+ CC_MSR_CPSRf_i(NATIVE_CC_EQ, 0);
+ B_i(0);// label2
+
+// label:
+ MSR_CPSRf_i(0);
+
+// label2:
+ TST_rr(d,d);
+
+ unlock2(d);
+ unlock2(s);
+ unlock2(i);
+}
+
+/*
+ * ROXLW
+ * Operand Syntax: <ea>
+ *
+ * Operand Size: 16
+ *
+ * X Not affected.
+ * N Set if the most significant bit of the result is set. Cleared otherwise.
+ * Z Set if the result is zero. Cleared otherwise.
+ * V Always cleared.
+ * C Set according to the last bit rotated out of the operand.
+ *
+ */
+MIDFUNC(2,jnf_ROXLW,(W4 d, RR4 s))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ LSL_rri(d,s,1);
+ ADC_rri(d,d,0);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,jff_ROXLW,(W4 d, RR4 s))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ LSL_rri(d,s,1);
+ ADC_rri(d,d,0);
+ MSR_CPSRf_i(0);
+ LSLS_rri(d,d,15);
+ LSR_rri(d,d,16);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+/*
+ * ROR
+ * Operand Syntax: Dx, Dy
+ * #<data>, Dy
+ * <ea>
+ *
+ * Operand Size: 8,16,32
+ *
+ * X Not affected.
+ * N Set if the most significant bit of the result is set. Cleared otherwise.
+ * Z Set if the result is zero. Cleared otherwise.
+ * V Always cleared.
+ * C Set according to the last bit rotated out of the operand. Cleared when the rotate count is zero.
+ *
+ */
+MIDFUNC(3,jnf_ROR_b_imm,(W4 d, RR4 s, IMM i))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ LSL_rri(d,s,24);
+ ORR_rrrLSRi(d,d,d,8);
+ ORR_rrrLSRi(d,d,d,16);
+ ROR_rri(d,d,i);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,jnf_ROR_w_imm,(W4 d, RR4 s, IMM i))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ LSL_rri(d,s,16);
+ ORR_rrrLSRi(d,d,d,16);
+ ROR_rri(d,d,i);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,jnf_ROR_l_imm,(W4 d, RR4 s, IMM i))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ ROR_rri(d,s,i);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,jff_ROR_b_imm,(W4 d, RR4 s, IMM i))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ LSL_rri(d,s,24);
+ ORR_rrrLSRi(d,d,d,8);
+ ORR_rrrLSRi(d,d,d,16);
+ MSR_CPSRf_i(0);
+ RORS_rri(d,d,i);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,jff_ROR_w_imm,(W4 d, RR4 s, IMM i))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ LSL_rri(d,s,16);
+ ORR_rrrLSRi(d,d,d,16);
+ MSR_CPSRf_i(0);
+ RORS_rrr(d,d,i);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,jff_ROR_l_imm,(W4 d, RR4 s, IMM i))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ MSR_CPSRf_i(0);
+ RORS_rrr(d,s,i);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,jnf_ROR_b,(W4 d, RR4 s, RR4 i))
+{
+ if (isconst(i)) {
+ COMPCALL(jnf_ROR_b_imm)(d,s,(uae_u8)live.state[i].val);
+ return;
+ }
+ s=readreg(s,4);
+ i=readreg(i,4);
+ d=writereg(d,4);
+
+ LSL_rri(d,s,24);
+ ORR_rrrLSRi(d,d,d,8);
+ ORR_rrrLSRi(d,d,d,16);
+ ROR_rrr(d,d,i);
+
+ unlock2(d);
+ unlock2(s);
+ unlock2(i);
+}
+
+MIDFUNC(3,jnf_ROR_w,(W4 d, RR4 s, RR4 i))
+{
+ if (isconst(i)) {
+ COMPCALL(jnf_ROR_w_imm)(d,s,(uae_u8)live.state[i].val);
+ return;
+ }
+ s=readreg(s,4);
+ i=readreg(i,4);
+ d=writereg(d,4);
+
+ LSL_rri(d,s,16);
+ ORR_rrrLSRi(d,d,d,16);
+ ROR_rrr(d,d,i);
+
+ unlock2(d);
+ unlock2(s);
+ unlock2(i);
+}
+
+MIDFUNC(3,jnf_ROR_l,(W4 d, RR4 s, RR4 i))
+{
+ if (isconst(i)) {
+ COMPCALL(jnf_ROR_l_imm)(d,s,(uae_u8)live.state[i].val);
+ return;
+ }
+ s=readreg(s,4);
+ i=readreg(i,4);
+ d=writereg(d,4);
+
+ ROR_rrr(d,s,i);
+
+ unlock2(d);
+ unlock2(s);
+ unlock2(i);
+}
+
+MIDFUNC(3,jff_ROR_b,(W4 d, RR4 s, RR4 i))
+{
+ if (isconst(i)) {
+ COMPCALL(jff_ROR_b_imm)(d,s,(uae_u8)live.state[i].val);
+ return;
+ }
+
+ s=readreg(s,4);
+ i=readreg(i,4);
+ d=writereg(d,4);
+
+ LSL_rri(d,s,24);
+ ORR_rrrLSRi(d,d,d,8);
+ ORR_rrrLSRi(d,d,d,16);
+ MSR_CPSRf_i(0);
+ AND_rri(REG_WORK1, i, 63);
+ RORS_rrr(d,d,REG_WORK1);
+
+ unlock2(d);
+ unlock2(s);
+ unlock2(i);
+}
+
+MIDFUNC(3,jff_ROR_w,(W4 d, RR4 s, RR4 i))
+{
+ if (isconst(i)) {
+ COMPCALL(jff_ROR_w_imm)(d,s,(uae_u8)live.state[i].val);
+ return;
+ }
+
+ s=readreg(s,4);
+ i=readreg(i,4);
+ d=writereg(d,4);
+
+ LSL_rri(d,s,16);
+ ORR_rrrLSRi(d,d,d,16);
+ MSR_CPSRf_i(0);
+ AND_rri(REG_WORK1, i, 63);
+ RORS_rrr(d,d,REG_WORK1);
+
+ unlock2(d);
+ unlock2(s);
+ unlock2(i);
+}
+
+MIDFUNC(3,jff_ROR_l,(W4 d, RR4 s, RR4 i))
+{
+ if (isconst(i)) {
+ COMPCALL(jff_ROR_l_imm)(d,s,(uae_u8)live.state[i].val);
+ return;
+ }
+
+ s=readreg(s,4);
+ i=readreg(i,4);
+ d=writereg(d,4);
+
+ MSR_CPSRf_i(0);
+ AND_rri(REG_WORK1, i, 63);
+ RORS_rrr(d,s,REG_WORK1);
+
+ unlock2(d);
+ unlock2(s);
+ unlock2(i);
+}
+
+/*
+ * ROXR
+ * Operand Syntax: Dx, Dy
+ * #<data>, Dy
+ *
+ * Operand Size: 8,16,32
+ *
+ * X Set according to the last bit rotated out of the operand. Cleared when the rotate count is zero.
+ * N Set if the most significant bit of the result is set. Cleared otherwise.
+ * Z Set if the result is zero. Cleared otherwise.
+ * V Always cleared.
+ * C Set according to the last bit rotated out of the operand. Cleared when the rotate count is zero.
+ *
+ */
+MIDFUNC(3,jnf_ROXR_b_imm,(W4 d, RR4 s, IMM i))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ if (i > 0) {
+ LSR_rri(d,s,i);
+ CC_ORR_rri(NATIVE_CC_CS, d,d, (0x80 >> (i - 1)));
+ if (i > 1) ORR_rrrLSLi(d,d,s,(9-i));
+ } else {
+ MOV_rr(d,s);
+ }
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,jnf_ROXR_w_imm,(W4 d, RR4 s, IMM i))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ if (i > 0) {
+ LSR_rri(d,s,i);
+ CC_ORR_rri(NATIVE_CC_CS, d,d, (0x8000 >> (i - 1)));
+ if (i > 1) ORR_rrrLSLi(d,d,s,(17-i));
+ } else {
+ MOV_rr(d,s);
+ }
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,jnf_ROXR_l_imm,(W4 d, RR4 s, IMM i))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ if (i > 0) {
+ LSR_rri(d,s,i);
+ CC_ORR_rri(NATIVE_CC_CS, d,d, (0x80000000 >> (i - 1)));
+ if (i > 1) ORR_rrrLSLi(d,d,s,(33-i));
+ } else {
+ MOV_rr(d,s);
+ }
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,jff_ROXR_b_imm,(W4 d, RR4 s, IMM i))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ if (i > 0) {
+ UNSIGNED8_REG_2_REG(d,s);
+ LSR_rri(d,d,i);
+ CC_ORR_rri(NATIVE_CC_CS, d,d, (0x80 >> (i - 1)));
+ if (i > 1) ORR_rrrLSLi(d,d,s,(9-i));
+ TST_ri(s, (1<<(i-1)));
+ CC_MSR_CPSRf_i(NATIVE_CC_NE, ARM_C_FLAG);
+ CC_MSR_CPSRf_i(NATIVE_CC_EQ, 0);
+ } else {
+ MOV_rr(d,s);
+ MSR_CPSRf_i(0);
+ }
+
+ SIGNED8_REG_2_REG(d,d);
+ TST_rr(d,d);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,jff_ROXR_w_imm,(W4 d, RR4 s, IMM i))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ if (i > 0) {
+ UNSIGNED16_REG_2_REG(d,s);
+ LSR_rri(d,d,i);
+ CC_ORR_rri(NATIVE_CC_CS, d,d, (0x8000 >> (i - 1)));
+ if (i > 1) ORR_rrrLSLi(d,d,s,(17-i));
+ TST_ri(s, (1<<(i-1)));
+ CC_MSR_CPSRf_i(NATIVE_CC_NE, ARM_C_FLAG);
+ CC_MSR_CPSRf_i(NATIVE_CC_EQ, 0);
+ } else {
+ MOV_rr(d,s);
+ MSR_CPSRf_i(0);
+ }
+
+ SIGNED16_REG_2_REG(d,d);
+ TST_rr(d,d);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,jff_ROXR_l_imm,(W4 d, RR4 s, IMM i))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ if (i > 0) {
+ LSR_rri(d,s,i);
+ CC_ORR_rri(NATIVE_CC_CS, d,d, (0x80000000 >> (i - 1)));
+ if (i > 1) ORR_rrrLSLi(d,d,s,(33-i));
+ TST_ri(s, (1<<(i-1)));
+ CC_MSR_CPSRf_i(NATIVE_CC_NE, ARM_C_FLAG);
+ CC_MSR_CPSRf_i(NATIVE_CC_EQ, 0);
+ } else {
+ MOV_rr(d,s);
+ MSR_CPSRf_i(0);
+ }
+
+ TST_rr(d,d);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,jnf_ROXR_b,(W4 d, RR4 s, RR4 i))
+{
+ if (isconst(i)) {
+ COMPCALL(jnf_ROXR_b_imm)(d,s,(uae_u8)live.state[i].val);
+ return;
+ }
+
+ s=readreg(s,4);
+ i=readreg(i,4);
+ d=writereg(d,4);
+
+ UNSIGNED8_REG_2_REG(d,s);
+ MRS_CPSR(REG_WORK2);
+
+ AND_rri(REG_WORK1, i, 0x3f);
+ CMP_ri(REG_WORK1, 36);
+ CC_SUB_rri(NATIVE_CC_GE, REG_WORK1, REG_WORK1, 36);
+ CMP_ri(REG_WORK1, 18);
+ CC_SUB_rri(NATIVE_CC_GE, REG_WORK1, REG_WORK1, 18);
+ CMP_ri(REG_WORK1, 9);
+ CC_SUB_rri(NATIVE_CC_GE, REG_WORK1, REG_WORK1, 9);
+ CMP_ri(REG_WORK1, 0);
+ BLE_i(7-1);
+
+ SUB_rri(REG_WORK1, REG_WORK1, 1);
+ LSR_rri(d, d, 1);
+ MSR_CPSRf_r(REG_WORK2);
+ CC_ORR_rri(NATIVE_CC_CS, d,d,0x80);
+ LSR_rrr(d, d, REG_WORK1);
+ RSB_rri(REG_WORK1, REG_WORK1, 8);
+ ORR_rrrLSLr(d,d,s,REG_WORK1);
+
+ unlock2(d);
+ unlock2(s);
+ unlock2(i);
+}
+
+MIDFUNC(3,jnf_ROXR_w,(W4 d, RR4 s, RR4 i))
+{
+ if (isconst(i)) {
+ COMPCALL(jnf_ROXR_w_imm)(d,s,(uae_u8)live.state[i].val);
+ return;
+ }
+
+ s=readreg(s,4);
+ i=readreg(i,4);
+ d=writereg(d,4);
+
+ UNSIGNED16_REG_2_REG(d,s);
+ MRS_CPSR(REG_WORK2);
+
+ CMP_ri(REG_WORK1, 34);
+ CC_SUB_rri(NATIVE_CC_GE, REG_WORK1, REG_WORK1, 34);
+ CMP_ri(REG_WORK1, 17);
+ CC_SUB_rri(NATIVE_CC_GE, REG_WORK1, REG_WORK1, 17);
+ CMP_ri(REG_WORK1, 0);
+ BLE_i(7-1);
+
+ SUB_rri(REG_WORK1, REG_WORK1, 1);
+ LSR_rri(d, d, 1);
+ MSR_CPSRf_r(REG_WORK2);
+ CC_ORR_rri(NATIVE_CC_CS, d,d,0x8000);
+ LSR_rrr(d, d, REG_WORK1);
+ RSB_rri(REG_WORK1, REG_WORK1, 16);
+ ORR_rrrLSLr(d,d,s,REG_WORK1);
+
+ unlock2(d);
+ unlock2(s);
+ unlock2(i);
+}
+
+MIDFUNC(3,jnf_ROXR_l,(W4 d, RR4 s, RR4 i))
+{
+ if (isconst(i)) {
+ COMPCALL(jnf_ROXR_l_imm)(d,s,(uae_u8)live.state[i].val);
+ return;
+ }
+
+ s=readreg(s,4);
+ i=readreg(i,4);
+ d=writereg(d,4);
+
+ MOV_rr(d,s);
+ MRS_CPSR(REG_WORK2);
+
+ CMP_ri(REG_WORK1, 33);
+ CC_SUB_rri(NATIVE_CC_GE, REG_WORK1, REG_WORK1, 33);
+ CMP_ri(REG_WORK1, 0);
+ BLE_i(7-1);
+
+ SUB_rri(REG_WORK1, REG_WORK1, 1);
+ LSR_rri(d, d, 1);
+ MSR_CPSRf_r(REG_WORK2);
+ CC_ORR_rri(NATIVE_CC_CS, d,d,0x80000000);
+ LSR_rrr(d, d, REG_WORK1);
+ RSB_rri(REG_WORK1, REG_WORK1, 32);
+ ORR_rrrLSLr(d,d,s,REG_WORK1);
+
+ unlock2(d);
+ unlock2(s);
+ unlock2(i);
+}
+
+MIDFUNC(3,jff_ROXR_b,(W4 d, RR4 s, RR4 i))
+{
+ if (isconst(i)) {
+ COMPCALL(jff_ROXR_b_imm)(d,s,(uae_u8)live.state[i].val);
+ return;
+ }
+
+ s=readreg(s,4);
+ i=readreg(i,4);
+ d=writereg(d,4);
+
+ UNSIGNED8_REG_2_REG(d,s);
+ MRS_CPSR(REG_WORK2);
+
+ AND_rri(REG_WORK1, i, 0x3f);
+ CMP_ri(REG_WORK1, 36);
+ CC_SUB_rri(NATIVE_CC_GE, REG_WORK1, REG_WORK1, 36);
+ CMP_ri(REG_WORK1, 18);
+ CC_SUB_rri(NATIVE_CC_GE, REG_WORK1, REG_WORK1, 18);
+ CMP_ri(REG_WORK1, 9);
+ CC_SUB_rri(NATIVE_CC_GE, REG_WORK1, REG_WORK1, 9);
+ CMP_ri(REG_WORK1, 0);
+ BLE_i(13-1); // label
+
+ SUB_rri(REG_WORK1, REG_WORK1, 1);
+ LSR_rri(d, d, 1);
+ MSR_CPSRf_r(REG_WORK2);
+ CC_ORR_rri(NATIVE_CC_CS, d,d,0x80);
+ LSR_rrr(d, d, REG_WORK1);
+
+ MOV_ri(REG_WORK2, 1);
+ LSL_rrr(REG_WORK2, REG_WORK2, REG_WORK1);
+
+ RSB_rri(REG_WORK1, REG_WORK1, 8);
+ ORR_rrrLSLr(d,d,s,REG_WORK1);
+
+ TST_rr(s, REG_WORK2);
+ CC_MSR_CPSRf_i(NATIVE_CC_NE, ARM_C_FLAG);
+ CC_MSR_CPSRf_i(NATIVE_CC_EQ, 0);
+ B_i(0);// label2
+
+// label:
+ MSR_CPSRf_i(0);
+
+// label2:
+ SIGNED8_REG_2_REG(d,d);
+ TST_rr(d,d);
+
+ unlock2(d);
+ unlock2(s);
+ unlock2(i);
+}
+
+MIDFUNC(3,jff_ROXR_w,(W4 d, RR4 s, RR4 i))
+{
+ if (isconst(i)) {
+ COMPCALL(jff_ROXR_w_imm)(d,s,(uae_u8)live.state[i].val);
+ return;
+ }
+
+ s=readreg(s,4);
+ i=readreg(i,4);
+ d=writereg(d,4);
+
+ UNSIGNED16_REG_2_REG(d,s);
+ MRS_CPSR(REG_WORK2);
+
+ AND_rri(REG_WORK1, i, 0x3f);
+ CMP_ri(REG_WORK1, 34);
+ CC_SUB_rri(NATIVE_CC_GE, REG_WORK1, REG_WORK1, 34);
+ CMP_ri(REG_WORK1, 17);
+ CC_SUB_rri(NATIVE_CC_GE, REG_WORK1, REG_WORK1, 17);
+ CMP_ri(REG_WORK1, 0);
+ BLE_i(13-1); // label
+
+ SUB_rri(REG_WORK1, REG_WORK1, 1);
+ LSR_rri(d, d, 1);
+ MSR_CPSRf_r(REG_WORK2);
+ CC_ORR_rri(NATIVE_CC_CS, d,d,0x8000);
+ LSR_rrr(d, d, REG_WORK1);
+
+ MOV_ri(REG_WORK2, 1);
+ LSL_rrr(REG_WORK2, REG_WORK2, REG_WORK1);
+
+ RSB_rri(REG_WORK1, REG_WORK1, 16);
+ ORR_rrrLSLr(d,d,s,REG_WORK1);
+
+ TST_rr(s, REG_WORK2);
+ CC_MSR_CPSRf_i(NATIVE_CC_NE, ARM_C_FLAG);
+ CC_MSR_CPSRf_i(NATIVE_CC_EQ, 0);
+ B_i(0);// label2
+
+// label:
+ MSR_CPSRf_i(0);
+
+// label2:
+ SIGNED16_REG_2_REG(d,d);
+ TST_rr(d,d);
+
+ unlock2(d);
+ unlock2(s);
+ unlock2(i);
+}
+
+MIDFUNC(3,jff_ROXR_l,(W4 d, RR4 s, RR4 i))
+{
+ if (isconst(i)) {
+ COMPCALL(jff_ROXR_l_imm)(d,s,(uae_u8)live.state[i].val);
+ return;
+ }
+
+ s=readreg(s,4);
+ i=readreg(i,4);
+ d=writereg(d,4);
+
+ MOV_rr(d,s);
+ MRS_CPSR(REG_WORK2);
+
+ AND_rri(REG_WORK1, i, 0x3f);
+ CMP_ri(REG_WORK1, 33);
+ CC_SUB_rri(NATIVE_CC_GE, REG_WORK1, REG_WORK1, 33);
+ CMP_ri(REG_WORK1, 0);
+ BLE_i(13-1); // label
+
+ SUB_rri(REG_WORK1, REG_WORK1, 1);
+ LSR_rri(d, d, 1);
+ MSR_CPSRf_r(REG_WORK2);
+ CC_ORR_rri(NATIVE_CC_CS, d,d,0x80000000);
+ LSR_rrr(d, d, REG_WORK1);
+
+ MOV_ri(REG_WORK2, 1);
+ LSL_rrr(REG_WORK2, REG_WORK2, REG_WORK1);
+
+ RSB_rri(REG_WORK1, REG_WORK1, 32);
+ ORR_rrrLSLr(d,d,s,REG_WORK1);
+
+ TST_rr(s, REG_WORK2);
+ CC_MSR_CPSRf_i(NATIVE_CC_NE, ARM_C_FLAG);
+ CC_MSR_CPSRf_i(NATIVE_CC_EQ, 0);
+ B_i(0);// label2
+
+// label:
+ MSR_CPSRf_i(0);
+
+// label2:
+ TST_rr(d,d);
+
+ unlock2(d);
+ unlock2(s);
+ unlock2(i);
+}
+
+/*
+ * ROXRW
+ * Operand Syntax: <ea>
+ *
+ * Operand Size: 16
+ *
+ * X Not affected.
+ * N Set if the most significant bit of the result is set. Cleared otherwise.
+ * Z Set if the result is zero. Cleared otherwise.
+ * V Always cleared.
+ * C Set according to the last bit rotated out of the operand.
+ *
+ */
+MIDFUNC(2,jnf_ROXRW,(W4 d, RR4 s))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ LSL_rri(d,s,16);
+ RRX_rr(d,d);
+ LSR_rri(d,d,16);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,jff_ROXRW,(W4 d, RR4 s))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ LSL_rri(d,s,16);
+ MSR_CPSRf_i(0);
+ RRXS_rr(d,d);
+ LSR_rri(d,d,16);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+/*
+ * SUB
+ * Operand Syntax: <ea>, Dn
+ * Dn, <ea>
+ *
+ * Operand Size: 8,16,32
+ *
+ * X Set the same as the carry bit.
+ * N Set if the result is negative. Cleared otherwise.
+ * Z Set if the result is zero. Cleared otherwise.
+ * V Set if an overflow is generated. Cleared otherwise.
+ * C Set if a carry is generated. Cleared otherwise.
+ *
+ */
+MIDFUNC(3,jnf_SUB_b_imm,(W4 d, RR4 s, IMM v))
+{
+ if (isconst(s)) {
+ set_const(d,live.state[s].val-v);
+ return;
+ }
+
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ UNSIGNED8_IMM_2_REG(REG_WORK1, (uint8)v);
+ SUB_rrr(d,s,REG_WORK1);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,jnf_SUB_b,(W4 d, RR4 s, RR4 v))
+{
+ if (isconst(v)) {
+ COMPCALL(jnf_SUB_b_imm)(d,s,live.state[v].val);
+ return;
+ }
+
+ // d has to be different to s and v
+ v=readreg(v,4);
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ SUB_rrr(d,s,v);
+
+ unlock2(d);
+ unlock2(s);
+ unlock2(v);
+}
+
+MIDFUNC(3,jnf_SUB_w_imm,(W4 d, RR4 s, IMM v))
+{
+ if (isconst(s)) {
+ set_const(d,live.state[s].val-v);
+ return;
+ }
+
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ UNSIGNED16_IMM_2_REG(REG_WORK1, (uint16)v);
+ SUB_rrr(d,s,REG_WORK1);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,jnf_SUB_w,(W4 d, RR4 s, RR4 v))
+{
+ if (isconst(v)) {
+ COMPCALL(jnf_SUB_w_imm)(d,s,live.state[v].val);
+ return;
+ }
+
+ // d has to be different to s and v
+ v=readreg(v,4);
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ SUB_rrr(d,s,v);
+
+ unlock2(d);
+ unlock2(s);
+ unlock2(v);
+}
+
+MIDFUNC(3,jnf_SUB_l_imm,(W4 d, RR4 s, IMM v))
+{
+ if (isconst(s)) {
+ set_const(d,live.state[s].val-v);
+ return;
+ }
+
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ compemu_raw_mov_l_ri(REG_WORK1, v);
+ SUB_rrr(d,s,REG_WORK1);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,jnf_SUB_l,(W4 d, RR4 s, RR4 v))
+{
+ if (isconst(v)) {
+ COMPCALL(jnf_SUB_l_imm)(d,s,live.state[v].val);
+ return;
+ }
+
+ // d has to be different to s and v
+ v=readreg(v,4);
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ SUB_rrr(d,s,v);
+
+ unlock2(d);
+ unlock2(s);
+ unlock2(v);
+}
+
+MIDFUNC(3,jff_SUB_b_imm,(W4 d, RR1 s, IMM v))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ SIGNED8_IMM_2_REG(REG_WORK2, (uint8)v);
+ SIGNED8_REG_2_REG(REG_WORK1, s);
+ SUBS_rrr(d,REG_WORK1,REG_WORK2);
+
+ // Todo: Handle this with inverted carry
+ MRS_CPSR(REG_WORK1);// mrs r2, CPSR
+ EOR_rri(REG_WORK1, REG_WORK1, ARM_C_FLAG);// eor r2, r2, #0x20000000
+ MSR_CPSR_r(REG_WORK1);// msr CPSR_fc, r2
+ // inverted_carry = true;
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,jff_SUB_b,(W4 d, RR1 s, RR1 v))
+{
+ if (isconst(v)) {
+ COMPCALL(jff_SUB_b_imm)(d,s,live.state[v].val);
+ return;
+ }
+
+ v=readreg(v,4);
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ SIGNED8_REG_2_REG(REG_WORK1, s);
+ SIGNED8_REG_2_REG(REG_WORK2, v);
+ SUBS_rrr(d,REG_WORK1,REG_WORK2);
+
+ // Todo: Handle this with inverted carry
+ MRS_CPSR(REG_WORK1);// mrs r2, CPSR
+ EOR_rri(REG_WORK1, REG_WORK1, ARM_C_FLAG);// eor r2, r2, #0x20000000
+ MSR_CPSR_r(REG_WORK1);// msr CPSR_fc, r2
+ // inverted_carry = true;
+
+ unlock2(d);
+ unlock2(s);
+ unlock2(v);
+}
+
+MIDFUNC(3,jff_SUB_w_imm,(W4 d, RR2 s, IMM v))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ SIGNED16_IMM_2_REG(REG_WORK2, (uint16)v);
+ SIGNED16_REG_2_REG(REG_WORK1, s);
+ SUBS_rrr(d,REG_WORK1,REG_WORK2);
+
+ // Todo: Handle this with inverted carry
+ MRS_CPSR(REG_WORK1);// mrs r2, CPSR
+ EOR_rri(REG_WORK1, REG_WORK1, ARM_C_FLAG);// eor r2, r2, #0x20000000
+ MSR_CPSR_r(REG_WORK1);// msr CPSR_fc, r2
+ // inverted_carry = true;
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,jff_SUB_w,(W4 d, RR2 s, RR2 v))
+{
+ if (isconst(v)) {
+ COMPCALL(jff_SUB_w_imm)(d,s,live.state[v].val);
+ return;
+ }
+
+ v=readreg(v,4);
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ SIGNED16_REG_2_REG(REG_WORK1, s);
+ SIGNED16_REG_2_REG(REG_WORK2, v);
+ SUBS_rrr(d,REG_WORK1,REG_WORK2);
+
+ // Todo: Handle this with inverted carry
+ MRS_CPSR(REG_WORK1);// mrs r2, CPSR
+ EOR_rri(REG_WORK1, REG_WORK1, ARM_C_FLAG);// eor r2, r2, #0x20000000
+ MSR_CPSR_r(REG_WORK1);// msr CPSR_fc, r2
+ // inverted_carry = true;
+
+ unlock2(d);
+ unlock2(s);
+ unlock2(v);
+}
+
+MIDFUNC(3,jff_SUB_l_imm,(W4 d, RR4 s, IMM v))
+{
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ compemu_raw_mov_l_ri(REG_WORK2, v);
+ SUBS_rrr(d,s,REG_WORK2);
+
+ // Todo: Handle this with inverted carry
+ MRS_CPSR(REG_WORK1);// mrs r2, CPSR
+ EOR_rri(REG_WORK1, REG_WORK1, ARM_C_FLAG);// eor r2, r2, #0x20000000
+ MSR_CPSR_r(REG_WORK1);// msr CPSR_fc, r2
+ // inverted_carry = true;
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(3,jff_SUB_l,(W4 d, RR4 s, RR4 v))
+{
+ if (isconst(v)) {
+ COMPCALL(jff_SUB_l_imm)(d,s,live.state[v].val);
+ return;
+ }
+
+ v=readreg(v,4);
+ s=readreg(s,4);
+ d=writereg(d,4);
+
+ SUBS_rrr(d,s,v);
+
+ // Todo: Handle this with inverted carry
+ MRS_CPSR(REG_WORK1);// mrs r2, CPSR
+ EOR_rri(REG_WORK1, REG_WORK1, ARM_C_FLAG);// eor r2, r2, #0x20000000
+ MSR_CPSR_r(REG_WORK1);// msr CPSR_fc, r2
+ // inverted_carry = true;
+
+ unlock2(d);
+ unlock2(s);
+ unlock2(v);
+}
+
+/*
+ * SUBA
+ *
+ * Operand Syntax: <ea>, Dn
+ *
+ * Operand Size: 16,32
+ *
+ * Flags: Not affected.
+ *
+ */
+MIDFUNC(2,jnf_SUBA_b,(W4 d, RR1 s))
+{
+ s=readreg(s,4);
+ d=rmw(d,4,4);
+
+ SIGNED8_REG_2_REG(REG_WORK1,s);
+ SUB_rrr(d,d,REG_WORK1);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,jnf_SUBA_w,(W4 d, RR2 s))
+{
+ s=readreg(s,4);
+ d=rmw(d,4,4);
+
+ SIGNED16_REG_2_REG(REG_WORK1,s);
+ SUB_rrr(d,d,REG_WORK1);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+MIDFUNC(2,jnf_SUBA_l,(W4 d, RR4 s))
+{
+ s=readreg(s,4);
+ d=rmw(d,4,4);
+
+ SUB_rrr(d,d,s);
+
+ unlock2(d);
+ unlock2(s);
+}
+
+/*
+ * SUBX
+ * Operand Syntax: Dy, Dx
+ * -(Ay), -(Ax)
+ *
+ * Operand Size: 8,16,32
+ *
+ * X Set the same as the carry bit.
+ * N Set if the result is negative. Cleared otherwise.
+ * Z Cleared if the result is nonzero. Unchanged otherwise.
+ * V Set if an overflow is generated. Cleared otherwise.
+ * C Set if a carry is generated. Cleared otherwise.
+ *
+ * Attention: Z is cleared only if the result is nonzero. Unchanged otherwise
+ *
+ */
+MIDFUNC(3,jnf_SUBX,(W4 d, RR4 s, RR4 v))
+{
+ s=readreg(s,4);
+ v=readreg(v,4);
+ d=writereg(d,4);
+
+ SBC_rrr(d,s,v);
+
+ unlock2(d);
+ unlock2(s);
+ unlock2(v);
+}
+
+MIDFUNC(3,jff_SUBX_b,(W4 d, RR1 s, RR1 v))
+{
+ s=readreg(s,4);
+ v=readreg(v,4);
+ d=writereg(d,4);
+
+ MRS_CPSR(REG_WORK1);
+ CC_MVN_ri(NATIVE_CC_EQ, REG_WORK1, 0);
+ CC_MVN_ri(NATIVE_CC_NE, REG_WORK1, ARM_Z_FLAG);
+ PUSH(REG_WORK1);
+
+ SIGNED8_REG_2_REG(REG_WORK1, s);
+ SIGNED8_REG_2_REG(REG_WORK2, v);
+ SBCS_rrr(d,REG_WORK1,REG_WORK2);
+
+ POP(REG_WORK2);
+ MRS_CPSR(REG_WORK1);
+ EOR_rri(REG_WORK1, REG_WORK1, ARM_C_FLAG);
+ AND_rrr(REG_WORK1, REG_WORK1, REG_WORK2);
+ MSR_CPSR_r(REG_WORK1);
+
+ unlock2(d);
+ unlock2(s);
+ unlock2(v);
+}
+
+MIDFUNC(3,jff_SUBX_w,(W4 d, RR2 s, RR2 v))
+{
+ s=readreg(s,4);
+ v=readreg(v,4);
+ d=writereg(d,4);
+
+ MRS_CPSR(REG_WORK1);
+ CC_MVN_ri(NATIVE_CC_EQ, REG_WORK1, 0);
+ CC_MVN_ri(NATIVE_CC_NE, REG_WORK1, ARM_Z_FLAG);
+ PUSH(REG_WORK1);
+
+ SIGNED16_REG_2_REG(REG_WORK1, s);
+ SIGNED16_REG_2_REG(REG_WORK2, v);
+ SBCS_rrr(d,REG_WORK1,REG_WORK2);
+
+ POP(REG_WORK2);
+ MRS_CPSR(REG_WORK1);
+ EOR_rri(REG_WORK1, REG_WORK1, ARM_C_FLAG);
+ AND_rrr(REG_WORK1, REG_WORK1, REG_WORK2);
+ MSR_CPSR_r(REG_WORK1);
+
+ unlock2(d);
+ unlock2(s);
+ unlock2(v);
+}
+
+MIDFUNC(3,jff_SUBX_l,(W4 d, RR4 s, RR4 v))
+{
+ s=readreg(s,4);
+ v=readreg(v,4);
+ d=writereg(d,4);
+
+ MRS_CPSR(REG_WORK2);
+ CC_MVN_ri(NATIVE_CC_EQ, REG_WORK2, 0);
+ CC_MVN_ri(NATIVE_CC_NE, REG_WORK2, ARM_Z_FLAG);
+
+ SBCS_rrr(d,s,v);
+
+ MRS_CPSR(REG_WORK1);
+ EOR_rri(REG_WORK1, REG_WORK1, ARM_C_FLAG);
+ AND_rrr(REG_WORK1, REG_WORK1, REG_WORK2);
+ MSR_CPSR_r(REG_WORK1);
+
+ unlock2(d);
+ unlock2(s);
+ unlock2(v);
+}
+
+/*
+ * SWAP
+ * Operand Syntax: Dn
+ *
+ * Operand Size: 16
+ *
+ * X Not affected.
+ * N Set if the most significant bit of the 32-bit result is set. Cleared otherwise.
+ * Z Set if the 32-bit result is zero. Cleared otherwise.
+ * V Always cleared.
+ * C Always cleared.
+ *
+ */
+MIDFUNC(1,jnf_SWAP,(RW4 d))
+{
+ d=rmw(d,4,4);
+
+ ROR_rri(d,d,16);
+
+ unlock2(d);
+}
+
+MIDFUNC(1,jff_SWAP,(RW4 d))
+{
+ d=rmw(d,4,4);
+
+ ROR_rri(d,d,16);
+ MSR_CPSRf_i(0);
+ TST_rr(d,d);
+
+ unlock2(d);
+}
+
+/*
+ * TST
+ * Operand Syntax: <ea>
+ *
+ * Operand Size: 8,16,32
+ *
+ * X Not affected.
+ * N Set if the operand is negative. Cleared otherwise.
+ * Z Set if the operand is zero. Cleared otherwise.
+ * V Always cleared.
+ * C Always cleared.
+ *
+ */
+MIDFUNC(1,jff_TST_b,(RR1 s))
+{
+ if (isconst(s)) {
+ SIGNED8_IMM_2_REG(REG_WORK1, (uint8)live.state[s].val);
+ } else {
+ s=readreg(s,4);
+ SIGNED8_REG_2_REG(REG_WORK1, s);
+ unlock2(s);
+ }
+ MSR_CPSRf_i(0);
+ TST_rr(REG_WORK1,REG_WORK1);
+}
+
+MIDFUNC(1,jff_TST_w,(RR2 s))
+{
+ if (isconst(s)) {
+ SIGNED16_IMM_2_REG(REG_WORK1, (uint16)live.state[s].val);
+ } else {
+ s=readreg(s,4);
+ SIGNED16_REG_2_REG(REG_WORK1, s);
+ unlock2(s);
+ }
+ MSR_CPSRf_i(0);
+ TST_rr(REG_WORK1,REG_WORK1);
+}
+
+MIDFUNC(1,jff_TST_l,(RR4 s))
+{
+ MSR_CPSRf_i(0);
+
+ if (isconst(s)) {
+ compemu_raw_mov_l_ri(REG_WORK1, live.state[s].val);
+ TST_rr(REG_WORK1,REG_WORK1);
+ }
+ else {
+ s=readreg(s,4);
+ TST_rr(s,s);
+ unlock2(s);
+ }
+}
--- /dev/null
+/*
+ * compiler/compemu_midfunc_arm2.h - Native MIDFUNCS for ARM (JIT v2)
+ *
+ * Copyright (c) 2014 Jens Heitmann of ARAnyM dev team (see AUTHORS)
+ *
+ * Inspired by Christian Bauer's Basilisk II
+ *
+ * Original 68040 JIT compiler for UAE, copyright 2000-2002 Bernd Meyer
+ *
+ * Adaptation for Basilisk II and improvements, copyright 2000-2002
+ * Gwenole Beauchesne
+ *
+ * Basilisk II (C) 1997-2002 Christian Bauer
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Note:
+ * File is included by compemu.h
+ *
+ */
+
+// Arm optimized midfunc
+extern const uae_u32 ARM_CCR_MAP[];
+
+DECLARE_MIDFUNC(restore_inverted_carry(void));
+
+// ADD
+DECLARE_MIDFUNC(jnf_ADD(W4 d, RR4 s, RR4 v));
+DECLARE_MIDFUNC(jnf_ADD_imm(W4 d, RR4 s, IMM v));
+DECLARE_MIDFUNC(jff_ADD_b(W4 d, RR1 s, RR1 v));
+DECLARE_MIDFUNC(jff_ADD_w(W4 d, RR2 s, RR2 v));
+DECLARE_MIDFUNC(jff_ADD_l(W4 d, RR4 s, RR4 v));
+DECLARE_MIDFUNC(jff_ADD_b_imm(W4 d, RR1 s, IMM v));
+DECLARE_MIDFUNC(jff_ADD_w_imm(W4 d, RR2 s, IMM v));
+DECLARE_MIDFUNC(jff_ADD_l_imm(W4 d, RR4 s, IMM v));
+
+// ADDA
+DECLARE_MIDFUNC(jnf_ADDA_b(W4 d, RR1 s));
+DECLARE_MIDFUNC(jnf_ADDA_w(W4 d, RR2 s));
+DECLARE_MIDFUNC(jnf_ADDA_l(W4 d, RR4 s));
+
+// ADDX
+DECLARE_MIDFUNC(jnf_ADDX(W4 d, RR4 s, RR4 v));
+DECLARE_MIDFUNC(jff_ADDX_b(W4 d, RR1 s, RR4 v));
+DECLARE_MIDFUNC(jff_ADDX_w(W4 d, RR2 s, RR4 v));
+DECLARE_MIDFUNC(jff_ADDX_l(W4 d, RR4 s, RR4 v));
+
+// AND
+DECLARE_MIDFUNC(jnf_AND(W4 d, RR4 s, RR4 v));
+DECLARE_MIDFUNC(jff_AND_b(W4 d, RR1 s, RR1 v));
+DECLARE_MIDFUNC(jff_AND_w(W4 d, RR2 s, RR2 v));
+DECLARE_MIDFUNC(jff_AND_l(W4 d, RR4 s, RR4 v));
+
+// ANDSR
+DECLARE_MIDFUNC(jff_ANDSR(IMM s, IMM x));
+
+// ASL
+DECLARE_MIDFUNC(jff_ASL_b_imm(W4 d, RR4 s, IMM i));
+DECLARE_MIDFUNC(jff_ASL_w_imm(W4 d, RR4 s, IMM i));
+DECLARE_MIDFUNC(jff_ASL_l_imm(W4 d, RR4 s, IMM i));
+DECLARE_MIDFUNC(jff_ASL_b_reg(W4 d, RR4 s, RR4 i));
+DECLARE_MIDFUNC(jff_ASL_w_reg(W4 d, RR4 s, RR4 i));
+DECLARE_MIDFUNC(jff_ASL_l_reg(W4 d, RR4 s, RR4 i));
+
+// ASLW
+DECLARE_MIDFUNC(jff_ASLW(W4 d, RR4 s));
+DECLARE_MIDFUNC(jnf_ASLW(W4 d, RR4 s));
+
+// ASR
+DECLARE_MIDFUNC(jnf_ASR_b_imm(W4 d, RR4 s, IMM i));
+DECLARE_MIDFUNC(jnf_ASR_w_imm(W4 d, RR4 s, IMM i));
+DECLARE_MIDFUNC(jnf_ASR_l_imm(W4 d, RR4 s, IMM i));
+DECLARE_MIDFUNC(jff_ASR_b_imm(W4 d, RR4 s, IMM i));
+DECLARE_MIDFUNC(jff_ASR_w_imm(W4 d, RR4 s, IMM i));
+DECLARE_MIDFUNC(jff_ASR_l_imm(W4 d, RR4 s, IMM i));
+DECLARE_MIDFUNC(jnf_ASR_b_reg(W4 d, RR4 s, RR4 i));
+DECLARE_MIDFUNC(jnf_ASR_w_reg(W4 d, RR4 s, RR4 i));
+DECLARE_MIDFUNC(jnf_ASR_l_reg(W4 d, RR4 s, RR4 i));
+DECLARE_MIDFUNC(jff_ASR_b_reg(W4 d, RR4 s, RR4 i));
+DECLARE_MIDFUNC(jff_ASR_w_reg(W4 d, RR4 s, RR4 i));
+DECLARE_MIDFUNC(jff_ASR_l_reg(W4 d, RR4 s, RR4 i));
+
+// ASRW
+DECLARE_MIDFUNC(jff_ASRW(W4 d, RR4 s));
+DECLARE_MIDFUNC(jnf_ASRW(W4 d, RR4 s));
+
+// BCHG
+DECLARE_MIDFUNC(jnf_BCHG_b_imm(RW4 d, IMM s));
+DECLARE_MIDFUNC(jnf_BCHG_l_imm(RW4 d, IMM s));
+
+DECLARE_MIDFUNC(jff_BCHG_b_imm(RW4 d, IMM s));
+DECLARE_MIDFUNC(jff_BCHG_l_imm(RW4 d, IMM s));
+
+DECLARE_MIDFUNC(jnf_BCHG_b(RW4 d, RR4 s));
+DECLARE_MIDFUNC(jnf_BCHG_l(RW4 d, RR4 s));
+
+DECLARE_MIDFUNC(jff_BCHG_b(RW4 d, RR4 s));
+DECLARE_MIDFUNC(jff_BCHG_l(RW4 d, RR4 s));
+
+// BCLR
+DECLARE_MIDFUNC(jnf_BCLR_b_imm(RW4 d, IMM s));
+DECLARE_MIDFUNC(jnf_BCLR_l_imm(RW4 d, IMM s));
+
+DECLARE_MIDFUNC(jnf_BCLR_b(RW4 d, RR4 s));
+DECLARE_MIDFUNC(jnf_BCLR_l(RW4 d, RR4 s));
+
+DECLARE_MIDFUNC(jff_BCLR_b_imm(RW4 d, IMM s));
+DECLARE_MIDFUNC(jff_BCLR_l_imm(RW4 d, IMM s));
+
+DECLARE_MIDFUNC(jff_BCLR_b(RW4 d, RR4 s));
+DECLARE_MIDFUNC(jff_BCLR_l(RW4 d, RR4 s));
+
+// BSET
+DECLARE_MIDFUNC(jnf_BSET_b_imm(RW4 d, IMM s));
+DECLARE_MIDFUNC(jnf_BSET_l_imm(RW4 d, IMM s));
+
+DECLARE_MIDFUNC(jnf_BSET_b(RW4 d, RR4 s));
+DECLARE_MIDFUNC(jnf_BSET_l(RW4 d, RR4 s));
+
+DECLARE_MIDFUNC(jff_BSET_b_imm(RW4 d, IMM s));
+DECLARE_MIDFUNC(jff_BSET_l_imm(RW4 d, IMM s));
+
+DECLARE_MIDFUNC(jff_BSET_b(RW4 d, RR4 s));
+DECLARE_MIDFUNC(jff_BSET_l(RW4 d, RR4 s));
+
+// BTST
+DECLARE_MIDFUNC(jff_BTST_b_imm(RR4 d, IMM s));
+DECLARE_MIDFUNC(jff_BTST_l_imm(RR4 d, IMM s));
+
+DECLARE_MIDFUNC(jff_BTST_b(RR4 d, RR4 s));
+DECLARE_MIDFUNC(jff_BTST_l(RR4 d, RR4 s));
+
+// CLR
+DECLARE_MIDFUNC (jnf_CLR(W4 d));
+DECLARE_MIDFUNC (jff_CLR(W4 d));
+
+// CMP
+DECLARE_MIDFUNC(jff_CMP_b(RR1 d, RR1 s));
+DECLARE_MIDFUNC(jff_CMP_w(RR2 d, RR2 s));
+DECLARE_MIDFUNC(jff_CMP_l(RR4 d, RR4 s));
+
+// CMPA
+DECLARE_MIDFUNC(jff_CMPA_b(RR1 d, RR1 s));
+DECLARE_MIDFUNC(jff_CMPA_w(RR2 d, RR2 s));
+DECLARE_MIDFUNC(jff_CMPA_l(RR4 d, RR4 s));
+
+// EOR
+DECLARE_MIDFUNC(jnf_EOR(W4 d, RR4 s, RR4 v));
+DECLARE_MIDFUNC(jff_EOR_b(W4 d, RR1 s, RR1 v));
+DECLARE_MIDFUNC(jff_EOR_w(W4 d, RR2 s, RR2 v));
+DECLARE_MIDFUNC(jff_EOR_l(W4 d, RR4 s, RR4 v));
+
+// EORSR
+DECLARE_MIDFUNC(jff_EORSR(IMM s, IMM x));
+
+// EXT
+DECLARE_MIDFUNC(jnf_EXT_b(W4 d, RR4 s));
+DECLARE_MIDFUNC(jnf_EXT_w(W4 d, RR4 s));
+DECLARE_MIDFUNC(jnf_EXT_l(W4 d, RR4 s));
+DECLARE_MIDFUNC(jff_EXT_b(W4 d, RR4 s));
+DECLARE_MIDFUNC(jff_EXT_w(W4 d, RR4 s));
+DECLARE_MIDFUNC(jff_EXT_l(W4 d, RR4 s));
+
+// LSL
+DECLARE_MIDFUNC(jnf_LSL_imm(W4 d, RR4 s, IMM i));
+DECLARE_MIDFUNC(jnf_LSL_reg(W4 d, RR4 s, RR4 i));
+DECLARE_MIDFUNC(jff_LSL_b_imm(W4 d, RR4 s, IMM i));
+DECLARE_MIDFUNC(jff_LSL_w_imm(W4 d, RR4 s, IMM i));
+DECLARE_MIDFUNC(jff_LSL_l_imm(W4 d, RR4 s, IMM i));
+DECLARE_MIDFUNC(jff_LSL_b_reg(W4 d, RR4 s, RR4 i));
+DECLARE_MIDFUNC(jff_LSL_w_reg(W4 d, RR4 s, RR4 i));
+DECLARE_MIDFUNC(jff_LSL_l_reg(W4 d, RR4 s, RR4 i));
+
+// LSLW
+DECLARE_MIDFUNC(jff_LSLW(W4 d, RR4 s));
+DECLARE_MIDFUNC(jnf_LSLW(W4 d, RR4 s));
+
+// LSR
+DECLARE_MIDFUNC(jnf_LSR_b_imm(W4 d, RR4 s, IMM i));
+DECLARE_MIDFUNC(jnf_LSR_w_imm(W4 d, RR4 s, IMM i));
+DECLARE_MIDFUNC(jnf_LSR_l_imm(W4 d, RR4 s, IMM i));
+DECLARE_MIDFUNC(jff_LSR_b_imm(W4 d, RR4 s, IMM i));
+DECLARE_MIDFUNC(jff_LSR_w_imm(W4 d, RR4 s, IMM i));
+DECLARE_MIDFUNC(jff_LSR_l_imm(W4 d, RR4 s, IMM i));
+DECLARE_MIDFUNC(jnf_LSR_b_reg(W4 d, RR4 s, RR4 i));
+DECLARE_MIDFUNC(jnf_LSR_w_reg(W4 d, RR4 s, RR4 i));
+DECLARE_MIDFUNC(jnf_LSR_l_reg(W4 d, RR4 s, RR4 i));
+DECLARE_MIDFUNC(jff_LSR_b_reg(W4 d, RR4 s, RR4 i));
+DECLARE_MIDFUNC(jff_LSR_w_reg(W4 d, RR4 s, RR4 i));
+DECLARE_MIDFUNC(jff_LSR_l_reg(W4 d, RR4 s, RR4 i));
+
+// LSRW
+DECLARE_MIDFUNC(jff_LSRW(W4 d, RR4 s));
+DECLARE_MIDFUNC(jnf_LSRW(W4 d, RR4 s));
+
+// MOVE
+DECLARE_MIDFUNC(jnf_MOVE(W4 d, RR4 s));
+DECLARE_MIDFUNC(jff_MOVE_b_imm(W4 d, IMM i));
+DECLARE_MIDFUNC(jff_MOVE_w_imm(W4 d, IMM i));
+DECLARE_MIDFUNC(jff_MOVE_l_imm(W4 d, IMM i));
+DECLARE_MIDFUNC(jff_MOVE_b(W4 d, RR1 s));
+DECLARE_MIDFUNC(jff_MOVE_w(W4 d, RR2 s));
+DECLARE_MIDFUNC(jff_MOVE_l(W4 d, RR4 s));
+
+// MOVE16
+DECLARE_MIDFUNC(jnf_MOVE16(RR4 d, RR4 s));
+
+// MOVEA
+DECLARE_MIDFUNC(jnf_MOVEA_w(W4 d, RR2 s));
+DECLARE_MIDFUNC(jnf_MOVEA_l(W4 d, RR4 s));
+
+// MULS
+DECLARE_MIDFUNC (jnf_MULS(RW4 d, RR4 s));
+DECLARE_MIDFUNC (jff_MULS(RW4 d, RR4 s));
+DECLARE_MIDFUNC (jnf_MULS32(RW4 d, RR4 s));
+DECLARE_MIDFUNC (jff_MULS32(RW4 d, RR4 s));
+DECLARE_MIDFUNC (jnf_MULS64(RW4 d, RW4 s));
+DECLARE_MIDFUNC (jff_MULS64(RW4 d, RW4 s));
+
+// MULU
+DECLARE_MIDFUNC (jnf_MULU(RW4 d, RR4 s));
+DECLARE_MIDFUNC (jff_MULU(RW4 d, RR4 s));
+DECLARE_MIDFUNC (jnf_MULU32(RW4 d, RR4 s));
+DECLARE_MIDFUNC (jff_MULU32(RW4 d, RR4 s));
+DECLARE_MIDFUNC (jnf_MULU64(RW4 d, RW4 s));
+DECLARE_MIDFUNC (jff_MULU64(RW4 d, RW4 s));
+
+// NEG
+DECLARE_MIDFUNC(jnf_NEG(W4 d, RR4 s));
+DECLARE_MIDFUNC(jff_NEG_b(W4 d, RR1 s));
+DECLARE_MIDFUNC(jff_NEG_w(W4 d, RR2 s));
+DECLARE_MIDFUNC(jff_NEG_l(W4 d, RR4 s));
+
+// NEGX
+DECLARE_MIDFUNC(jnf_NEGX(W4 d, RR4 s));
+DECLARE_MIDFUNC(jff_NEGX_b(W4 d, RR1 s));
+DECLARE_MIDFUNC(jff_NEGX_w(W4 d, RR2 s));
+DECLARE_MIDFUNC(jff_NEGX_l(W4 d, RR4 s));
+
+// NOT
+DECLARE_MIDFUNC(jnf_NOT(W4 d, RR4 s));
+DECLARE_MIDFUNC(jff_NOT_b(W4 d, RR1 s));
+DECLARE_MIDFUNC(jff_NOT_w(W4 d, RR2 s));
+DECLARE_MIDFUNC(jff_NOT_l(W4 d, RR4 s));
+
+// OR
+DECLARE_MIDFUNC(jnf_OR(W4 d, RR4 s, RR4 v));
+DECLARE_MIDFUNC(jff_OR_b(W4 d, RR1 s, RR1 v));
+DECLARE_MIDFUNC(jff_OR_w(W4 d, RR2 s, RR2 v));
+DECLARE_MIDFUNC(jff_OR_l(W4 d, RR4 s, RR4 v));
+
+// ORSR
+DECLARE_MIDFUNC(jff_ORSR(IMM s, IMM x));
+
+// ROL
+DECLARE_MIDFUNC(jnf_ROL_b(W4 d, RR4 s, RR4 i));
+DECLARE_MIDFUNC(jnf_ROL_w(W4 d, RR4 s, RR4 i));
+DECLARE_MIDFUNC(jnf_ROL_l(W4 d, RR4 s, RR4 i));
+DECLARE_MIDFUNC(jff_ROL_b(W4 d, RR4 s, RR4 i));
+DECLARE_MIDFUNC(jff_ROL_w(W4 d, RR4 s, RR4 i));
+DECLARE_MIDFUNC(jff_ROL_l(W4 d, RR4 s, RR4 i));
+
+// ROLW
+DECLARE_MIDFUNC(jff_ROLW(W4 d, RR4 s));
+DECLARE_MIDFUNC(jnf_ROLW(W4 d, RR4 s));
+
+// RORW
+DECLARE_MIDFUNC(jff_RORW(W4 d, RR4 s));
+DECLARE_MIDFUNC(jnf_RORW(W4 d, RR4 s));
+
+// ROXL
+DECLARE_MIDFUNC(jnf_ROXL_b(W4 d, RR4 s, RR4 i));
+DECLARE_MIDFUNC(jnf_ROXL_w(W4 d, RR4 s, RR4 i));
+DECLARE_MIDFUNC(jnf_ROXL_l(W4 d, RR4 s, RR4 i));
+DECLARE_MIDFUNC(jff_ROXL_b(W4 d, RR4 s, RR4 i));
+DECLARE_MIDFUNC(jff_ROXL_w(W4 d, RR4 s, RR4 i));
+DECLARE_MIDFUNC(jff_ROXL_l(W4 d, RR4 s, RR4 i));
+
+// ROXLW
+DECLARE_MIDFUNC(jff_ROXLW(W4 d, RR4 s));
+DECLARE_MIDFUNC(jnf_ROXLW(W4 d, RR4 s));
+
+// ROR
+DECLARE_MIDFUNC(jnf_ROR_b(W4 d, RR4 s, RR4 i));
+DECLARE_MIDFUNC(jnf_ROR_w(W4 d, RR4 s, RR4 i));
+DECLARE_MIDFUNC(jnf_ROR_l(W4 d, RR4 s, RR4 i));
+DECLARE_MIDFUNC(jff_ROR_b(W4 d, RR4 s, RR4 i));
+DECLARE_MIDFUNC(jff_ROR_w(W4 d, RR4 s, RR4 i));
+DECLARE_MIDFUNC(jff_ROR_l(W4 d, RR4 s, RR4 i));
+
+// ROXR
+DECLARE_MIDFUNC(jnf_ROXR_b(W4 d, RR4 s, RR4 i));
+DECLARE_MIDFUNC(jnf_ROXR_w(W4 d, RR4 s, RR4 i));
+DECLARE_MIDFUNC(jnf_ROXR_l(W4 d, RR4 s, RR4 i));
+DECLARE_MIDFUNC(jff_ROXR_b(W4 d, RR4 s, RR4 i));
+DECLARE_MIDFUNC(jff_ROXR_w(W4 d, RR4 s, RR4 i));
+DECLARE_MIDFUNC(jff_ROXR_l(W4 d, RR4 s, RR4 i));
+
+// ROXRW
+DECLARE_MIDFUNC(jff_ROXRW(W4 d, RR4 s));
+DECLARE_MIDFUNC(jnf_ROXRW(W4 d, RR4 s));
+
+// SUB
+DECLARE_MIDFUNC(jnf_SUB_b_imm(W4 d, RR4 s, IMM v));
+DECLARE_MIDFUNC(jnf_SUB_b(W4 d, RR4 s, RR4 v));
+DECLARE_MIDFUNC(jnf_SUB_w_imm(W4 d, RR4 s, IMM v));
+DECLARE_MIDFUNC(jnf_SUB_w(W4 d, RR4 s, RR4 v));
+DECLARE_MIDFUNC(jnf_SUB_l_imm(W4 d, RR4 s, IMM v));
+DECLARE_MIDFUNC(jnf_SUB_l(W4 d, RR4 s, RR4 v));
+DECLARE_MIDFUNC(jff_SUB_b(W4 d, RR1 s, RR1 v));
+DECLARE_MIDFUNC(jff_SUB_w(W4 d, RR2 s, RR2 v));
+DECLARE_MIDFUNC(jff_SUB_l(W4 d, RR4 s, RR4 v));
+DECLARE_MIDFUNC(jff_SUB_b_imm(W4 d, RR1 s, IMM v));
+DECLARE_MIDFUNC(jff_SUB_w_imm(W4 d, RR2 s, IMM v));
+DECLARE_MIDFUNC(jff_SUB_l_imm(W4 d, RR4 s, IMM v));
+
+// SUBA
+DECLARE_MIDFUNC(jnf_SUBA_b(W4 d, RR1 s));
+DECLARE_MIDFUNC(jnf_SUBA_w(W4 d, RR2 s));
+DECLARE_MIDFUNC(jnf_SUBA_l(W4 d, RR4 s));
+
+// SUBX
+DECLARE_MIDFUNC(jnf_SUBX(W4 d, RR4 s, RR4 v));
+DECLARE_MIDFUNC(jff_SUBX_b(W4 d, RR1 s, RR4 v));
+DECLARE_MIDFUNC(jff_SUBX_w(W4 d, RR2 s, RR4 v));
+DECLARE_MIDFUNC(jff_SUBX_l(W4 d, RR4 s, RR4 v));
+
+// SWAP
+DECLARE_MIDFUNC (jnf_SWAP(RW4 d));
+DECLARE_MIDFUNC (jff_SWAP(RW4 d));
+
+// TST
+DECLARE_MIDFUNC (jff_TST_b(RR1 s));
+DECLARE_MIDFUNC (jff_TST_w(RR2 s));
+DECLARE_MIDFUNC (jff_TST_l(RR4 s));
+
--- /dev/null
+/*
+ * compiler/flags_arm.h - Native flags definitions for ARM
+ *
+ * Copyright (c) 2013 Jens Heitmann of ARAnyM dev team (see AUTHORS)
+ *
+ * Inspired by Christian Bauer's Basilisk II
+ *
+ * Original 68040 JIT compiler for UAE, copyright 2000-2002 Bernd Meyer
+ *
+ * Adaptation for Basilisk II and improvements, copyright 2000-2002
+ * Gwenole Beauchesne
+ *
+ * Basilisk II (C) 1997-2002 Christian Bauer
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef NATIVE_FLAGS_ARM_H
+#define NATIVE_FLAGS_ARM_H
+
+/* Native integer code conditions */
+enum {
+ NATIVE_CC_EQ = 0,
+ NATIVE_CC_NE = 1,
+ NATIVE_CC_CS = 2,
+ NATIVE_CC_CC = 3,
+ NATIVE_CC_MI = 4,
+ NATIVE_CC_PL = 5,
+ NATIVE_CC_VS = 6,
+ NATIVE_CC_VC = 7,
+ NATIVE_CC_HI = 8,
+ NATIVE_CC_LS = 9,
+ NATIVE_CC_GE = 10,
+ NATIVE_CC_LT = 11,
+ NATIVE_CC_GT = 12,
+ NATIVE_CC_LE = 13,
+ NATIVE_CC_AL = 14
+};
+
+#endif /* NATIVE_FLAGS_ARM_H */
--- /dev/null
+/*
+ * compiler/gencomp_arm2.c - MC680x0 compilation generator (ARM Adaption JIT v1 & JIT v2)
+ *
+ * Based on work Copyright 1995, 1996 Bernd Schmidt
+ * Changes for UAE-JIT Copyright 2000 Bernd Meyer
+ *
+ * Adaptation for ARAnyM/ARM, copyright 2001-2015
+ * Milan Jurik, Jens Heitmann
+ *
+ * Basilisk II (C) 1997-2005 Christian Bauer
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Notes
+ * =====
+ *
+ * Advantages of JIT v2
+ * - Processor independent style
+ * - Reduced overhead
+ * - Easier to understand / read
+ * - Easier to optimize
+ * - More precise flag handling
+ * - Better optimization for different CPU version ARM, ARMv6 etc..
+ *
+ * Disadvantages of JIT v2
+ * - Less generated
+ * - Requires more code implementation by hand (MidFunc)
+ * - MIDFUNCS are more CPU minded (closer to raw)
+ * - Separate code for each instruction (but this could be also an advantage, because you can concentrate on it)
+ *
+ * Additional note:
+ * - current using jnf_xxx calls for non-flag operations and
+ * jff_xxx for flag operations
+ *
+ * Still todo:
+ * - Optimize genamode, genastore, gen_writeXXX, gen_readXXX, genmovemXXX
+ *
+ */
+
+#define CC_FOR_BUILD 1
+#include "sysconfig.h"
+
+#include "sysdeps.h"
+#include "readcpu.h"
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <string.h>
+#include <ctype.h>
+#undef abort
+
+#define BOOL_TYPE "int"
+#define failure global_failure=1
+#define FAILURE global_failure=1
+#define isjump global_isjump=1
+#define is_const_jump global_iscjump=1
+#define isaddx global_isaddx=1
+#define uses_cmov global_cmov=1
+#define mayfail global_mayfail=1
+#define uses_fpu global_fpu=1
+
+int hack_opcode;
+
+static int global_failure;
+static int global_isjump;
+static int global_iscjump;
+static int global_isaddx;
+static int global_cmov;
+static int long_opcode;
+static int global_mayfail;
+static int global_fpu;
+
+static char endstr[1000];
+static char lines[100000];
+static int comp_index = 0;
+
+#include "flags_arm.h"
+
+#ifndef __attribute__
+# ifndef __GNUC__
+# define __attribute__(x)
+# endif
+#endif
+
+
+static int cond_codes[] = { //
+ NATIVE_CC_AL, -1, //
+ NATIVE_CC_HI, NATIVE_CC_LS, //
+ NATIVE_CC_CC, NATIVE_CC_CS, //
+ NATIVE_CC_NE, NATIVE_CC_EQ, //
+ NATIVE_CC_VC, NATIVE_CC_VS, //
+ NATIVE_CC_PL, NATIVE_CC_MI, //
+ NATIVE_CC_GE, NATIVE_CC_LT, //
+ NATIVE_CC_GT, NATIVE_CC_LE //
+ };
+
+__attribute__((format(printf, 1, 2)))
+static void comprintf(const char *format, ...)
+{
+ va_list args;
+
+ va_start(args, format);
+ comp_index += vsprintf(lines + comp_index, format, args);
+ va_end(args);
+}
+
+static void com_discard(void)
+{
+ comp_index = 0;
+}
+
+static void com_flush(void)
+{
+ int i;
+ for (i = 0; i < comp_index; i++)
+ putchar(lines[i]);
+ com_discard();
+}
+
+
+static FILE *headerfile;
+static FILE *stblfile;
+
+static int using_prefetch;
+static int using_exception_3;
+static int cpu_level;
+static int noflags;
+
+/* For the current opcode, the next lower level that will have different code.
+ * Initialized to -1 for each opcode. If it remains unchanged, indicates we
+ * are done with that opcode. */
+static int next_cpu_level;
+
+static int *opcode_map;
+static int *opcode_next_clev;
+static int *opcode_last_postfix;
+static unsigned long *counts;
+
+static void read_counts(void)
+{
+ FILE *file;
+ unsigned long opcode, count, total;
+ char name[20];
+ int nr = 0;
+ memset(counts, 0, 65536 * sizeof *counts);
+
+ file = fopen("frequent.68k", "r");
+ if (file) {
+ if (fscanf(file, "Total: %lu\n", &total) != 1)
+ {
+ assert(0);
+ }
+ while (fscanf(file, "%lx: %lu %s\n", &opcode, &count, name) == 3) {
+ opcode_next_clev[nr] = 4;
+ opcode_last_postfix[nr] = -1;
+ opcode_map[nr++] = opcode;
+ counts[opcode] = count;
+ }
+ fclose(file);
+ }
+ if (nr == nr_cpuop_funcs)
+ return;
+ for (opcode = 0; opcode < 0x10000; opcode++) {
+ if (table68k[opcode].handler == -1 && table68k[opcode].mnemo != i_ILLG
+ && counts[opcode] == 0) {
+ opcode_next_clev[nr] = 4;
+ opcode_last_postfix[nr] = -1;
+ opcode_map[nr++] = opcode;
+ counts[opcode] = count;
+ }
+ }
+ assert (nr == nr_cpuop_funcs);
+}
+
+static int n_braces = 0;
+static int insn_n_cycles;
+
+static void start_brace(void) {
+ n_braces++;
+ comprintf("{");
+}
+
+static void close_brace(void) {
+ assert(n_braces > 0);
+ n_braces--;
+ comprintf("}");
+}
+
+static void finish_braces(void) {
+ while (n_braces > 0)
+ close_brace();
+}
+
+static inline void gen_update_next_handler(void) {
+ return; /* Can anything clever be done here? */
+}
+
+static void gen_writebyte(const char *address, const char *source)
+{
+ comprintf("\twritebyte(%s, %s, scratchie);\n", address, source);
+}
+
+static void gen_writeword(const char *address, const char *source)
+{
+ comprintf("\twriteword(%s, %s, scratchie);\n", address, source);
+}
+
+static void gen_writelong(const char *address, const char *source)
+{
+ comprintf("\twritelong(%s, %s, scratchie);\n", address, source);
+}
+
+static void gen_readbyte(const char *address, const char* dest)
+{
+ comprintf("\treadbyte(%s, %s, scratchie);\n", address, dest);
+}
+
+static void gen_readword(const char *address, const char *dest)
+{
+ comprintf("\treadword(%s,%s,scratchie);\n", address, dest);
+}
+
+static void gen_readlong(const char *address, const char *dest)
+{
+ comprintf("\treadlong(%s, %s, scratchie);\n", address, dest);
+}
+
+static const char *
+gen_nextilong(void) {
+ static char buffer[80];
+
+ sprintf(buffer, "comp_get_ilong((m68k_pc_offset+=4)-4)");
+ insn_n_cycles += 4;
+
+ long_opcode = 1;
+ return buffer;
+}
+
+static const char *
+gen_nextiword(void) {
+ static char buffer[80];
+
+ sprintf(buffer, "comp_get_iword((m68k_pc_offset+=2)-2)");
+ insn_n_cycles += 2;
+
+ long_opcode = 1;
+ return buffer;
+}
+
+static const char *
+gen_nextibyte(void) {
+ static char buffer[80];
+
+ sprintf(buffer, "comp_get_ibyte((m68k_pc_offset+=2)-2)");
+ insn_n_cycles += 2;
+
+ long_opcode = 1;
+ return buffer;
+}
+
+#if defined(USE_JIT_FPU)
+// Only used by FPU (future), get rid of unused warning
+static void
+swap_opcode (void)
+{
+ comprintf("#if defined(HAVE_GET_WORD_UNSWAPPED) && !defined(FULLMMU)\n");
+ comprintf("\topcode = do_byteswap_16(opcode);\n");
+ comprintf("#endif\n");
+}
+#endif
+
+static void sync_m68k_pc(void) {
+ comprintf("\t if (m68k_pc_offset>SYNC_PC_OFFSET) sync_m68k_pc();\n");
+}
+
+/* getv == 1: fetch data; getv != 0: check for odd address. If movem != 0,
+ * the calling routine handles Apdi and Aipi modes.
+ * gb-- movem == 2 means the same thing but for a MOVE16 instruction */
+static void genamode(amodes mode, const char *reg, wordsizes size, const char *name, int getv, int movem)
+{
+ start_brace();
+ switch (mode)
+ {
+ case Dreg: /* Do we need to check dodgy here? */
+ assert (!movem);
+ if (getv == 1 || getv == 2)
+ {
+ /* We generate the variable even for getv==2, so we can use
+ it as a destination for MOVE */
+ comprintf("\tint %s = %s;\n", name, reg);
+ }
+ return;
+
+ case Areg:
+ assert (!movem);
+ if (getv == 1 || getv == 2)
+ {
+ /* see above */
+ comprintf("\tint %s = dodgy ? scratchie++ : %s + 8;\n", name, reg);
+ if (getv == 1)
+ {
+ comprintf("\tif (dodgy) \n");
+ comprintf("\t\tmov_l_rr(%s, %s + 8);\n", name, reg);
+ }
+ }
+ return;
+
+ case Aind:
+ comprintf("\tint %sa = dodgy ? scratchie++ : %s + 8;\n", name, reg);
+ comprintf("\tif (dodgy)\n");
+ comprintf("\t\tmov_l_rr(%sa, %s + 8);\n", name, reg);
+ break;
+ case Aipi:
+ comprintf("\tint %sa = scratchie++;\n", name);
+ comprintf("\tmov_l_rr(%sa, %s + 8);\n", name, reg);
+ break;
+ case Apdi:
+ switch (size)
+ {
+ case sz_byte:
+ if (movem)
+ {
+ comprintf("\tint %sa = dodgy ? scratchie++ : %s + 8;\n", name, reg);
+ comprintf("\tif (dodgy)\n");
+ comprintf("\t\tmov_l_rr(%sa, 8 + %s);\n", name, reg);
+ } else
+ {
+ start_brace();
+ comprintf("\tint %sa = dodgy ? scratchie++ : %s + 8;\n", name, reg);
+ comprintf("\tlea_l_brr(%s + 8, %s + 8, (uae_s32)-areg_byteinc[%s]);\n", reg, reg, reg);
+ comprintf("\tif (dodgy)\n");
+ comprintf("\t\tmov_l_rr(%sa, 8 + %s);\n", name, reg);
+ }
+ break;
+ case sz_word:
+ if (movem)
+ {
+ comprintf("\tint %sa=dodgy?scratchie++:%s+8;\n", name, reg);
+ comprintf("\tif (dodgy) \n");
+ comprintf("\tmov_l_rr(%sa,8+%s);\n", name, reg);
+ } else
+ {
+ start_brace();
+ comprintf("\tint %sa = dodgy ? scratchie++ : %s + 8;\n", name, reg);
+ comprintf("\tlea_l_brr(%s + 8, %s + 8, -2);\n", reg, reg);
+ comprintf("\tif (dodgy)\n");
+ comprintf("\t\tmov_l_rr(%sa, 8 + %s);\n", name, reg);
+ }
+ break;
+ case sz_long:
+ if (movem)
+ {
+ comprintf("\tint %sa = dodgy ? scratchie++ : %s + 8;\n", name, reg);
+ comprintf("\tif (dodgy)\n");
+ comprintf("\t\tmov_l_rr(%sa, 8 + %s);\n", name, reg);
+ } else
+ {
+ start_brace();
+ comprintf("\tint %sa = dodgy ? scratchie++ : %s + 8;\n", name, reg);
+ comprintf("\tlea_l_brr(%s + 8, %s + 8, -4);\n", reg, reg);
+ comprintf("\tif (dodgy)\n");
+ comprintf("\t\tmov_l_rr(%sa, 8 + %s);\n", name, reg);
+ }
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ break;
+ case Ad16:
+ comprintf("\tint %sa = scratchie++;\n", name);
+ comprintf("\tmov_l_rr(%sa, 8 + %s);\n", name, reg);
+ comprintf("\tlea_l_brr(%sa, %sa, (uae_s32)(uae_s16)%s);\n", name, name, gen_nextiword());
+ break;
+ case Ad8r:
+ comprintf("\tint %sa = scratchie++;\n", name);
+ comprintf("\tcalc_disp_ea_020(%s + 8, %s, %sa, scratchie);\n", reg, gen_nextiword(), name);
+ break;
+
+ case PC16:
+ comprintf("\tint %sa = scratchie++;\n", name);
+ comprintf("\tuae_u32 address = start_pc + ((char *)comp_pc_p - (char *)start_pc_p) + m68k_pc_offset;\n");
+ comprintf("\tuae_s32 PC16off = (uae_s32)(uae_s16)%s;\n", gen_nextiword());
+ comprintf("\tmov_l_ri(%sa, address + PC16off);\n", name);
+ break;
+
+ case PC8r:
+ comprintf("\tint pctmp = scratchie++;\n");
+ comprintf("\tint %sa = scratchie++;\n", name);
+ comprintf("\tuae_u32 address = start_pc + ((char *)comp_pc_p - (char *)start_pc_p) + m68k_pc_offset;\n");
+ start_brace();
+ comprintf("\tmov_l_ri(pctmp,address);\n");
+
+ comprintf("\tcalc_disp_ea_020(pctmp, %s, %sa, scratchie);\n", gen_nextiword(), name);
+ break;
+ case absw:
+ comprintf("\tint %sa = scratchie++;\n", name);
+ comprintf("\tmov_l_ri(%sa, (uae_s32)(uae_s16)%s);\n", name, gen_nextiword());
+ break;
+ case absl:
+ comprintf("\tint %sa = scratchie++;\n", name);
+ comprintf("\tmov_l_ri(%sa, %s); /* absl */\n", name, gen_nextilong());
+ break;
+ case imm:
+ assert (getv == 1);
+ switch (size)
+ {
+ case sz_byte:
+ comprintf("\tint %s = scratchie++;\n", name);
+ comprintf("\tmov_l_ri(%s, (uae_s32)(uae_s8)%s);\n", name, gen_nextibyte());
+ break;
+ case sz_word:
+ comprintf("\tint %s = scratchie++;\n", name);
+ comprintf("\tmov_l_ri(%s, (uae_s32)(uae_s16)%s);\n", name, gen_nextiword());
+ break;
+ case sz_long:
+ comprintf("\tint %s = scratchie++;\n", name);
+ comprintf("\tmov_l_ri(%s, %s);\n", name, gen_nextilong());
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ return;
+ case imm0:
+ assert (getv == 1);
+ comprintf("\tint %s = scratchie++;\n", name);
+ comprintf("\tmov_l_ri(%s, (uae_s32)(uae_s8)%s);\n", name, gen_nextibyte());
+ return;
+ case imm1:
+ assert (getv == 1);
+ comprintf("\tint %s = scratchie++;\n", name);
+ comprintf("\tmov_l_ri(%s, (uae_s32)(uae_s16)%s);\n", name, gen_nextiword());
+ return;
+ case imm2:
+ assert (getv == 1);
+ comprintf("\tint %s = scratchie++;\n", name);
+ comprintf("\tmov_l_ri(%s, %s);\n", name, gen_nextilong());
+ return;
+ case immi:
+ assert (getv == 1);
+ comprintf("\tint %s = scratchie++;\n", name);
+ comprintf("\tmov_l_ri(%s, %s);\n", name, reg);
+ return;
+ default:
+ assert(0);
+ break;
+ }
+
+ /* We get here for all non-reg non-immediate addressing modes to
+ * actually fetch the value. */
+ if (getv == 1)
+ {
+ char astring[80];
+ sprintf(astring, "%sa", name);
+ switch (size)
+ {
+ case sz_byte:
+ insn_n_cycles += 2;
+ break;
+ case sz_word:
+ insn_n_cycles += 2;
+ break;
+ case sz_long:
+ insn_n_cycles += 4;
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ start_brace();
+ comprintf("\tint %s = scratchie++;\n", name);
+ switch (size)
+ {
+ case sz_byte:
+ gen_readbyte(astring, name);
+ break;
+ case sz_word:
+ gen_readword(astring, name);
+ break;
+ case sz_long:
+ gen_readlong(astring, name);
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ }
+
+ /* We now might have to fix up the register for pre-dec or post-inc
+ * addressing modes. */
+ if (!movem)
+ {
+ switch (mode)
+ {
+ case Aipi:
+ switch (size)
+ {
+ case sz_byte:
+ comprintf("\tlea_l_brr(%s + 8,%s + 8, areg_byteinc[%s]);\n", reg, reg, reg);
+ break;
+ case sz_word:
+ comprintf("\tlea_l_brr(%s + 8, %s + 8, 2);\n", reg, reg);
+ break;
+ case sz_long:
+ comprintf("\tlea_l_brr(%s + 8, %s + 8, 4);\n", reg, reg);
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ break;
+ case Apdi:
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static void genastore(const char *from, amodes mode, const char *reg, wordsizes size, const char *to)
+{
+ switch (mode)
+ {
+ case Dreg:
+ switch (size)
+ {
+ case sz_byte:
+ comprintf("\tif(%s != %s)\n", reg, from);
+ comprintf("\t\tmov_b_rr(%s, %s);\n", reg, from);
+ break;
+ case sz_word:
+ comprintf("\tif(%s != %s)\n", reg, from);
+ comprintf("\t\tmov_w_rr(%s, %s);\n", reg, from);
+ break;
+ case sz_long:
+ comprintf("\tif(%s != %s)\n", reg, from);
+ comprintf("\t\tmov_l_rr(%s, %s);\n", reg, from);
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ break;
+ case Areg:
+ switch (size)
+ {
+ case sz_word:
+ comprintf("\tif(%s + 8 != %s)\n", reg, from);
+ comprintf("\t\tmov_w_rr(%s + 8, %s);\n", reg, from);
+ break;
+ case sz_long:
+ comprintf("\tif(%s + 8 != %s)\n", reg, from);
+ comprintf("\t\tmov_l_rr(%s + 8, %s);\n", reg, from);
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ break;
+
+ case Apdi:
+ case absw:
+ case PC16:
+ case PC8r:
+ case Ad16:
+ case Ad8r:
+ case Aipi:
+ case Aind:
+ case absl:
+ {
+ char astring[80];
+ sprintf(astring, "%sa", to);
+
+ switch (size)
+ {
+ case sz_byte:
+ insn_n_cycles += 2;
+ gen_writebyte(astring, from);
+ break;
+ case sz_word:
+ insn_n_cycles += 2;
+ gen_writeword(astring, from);
+ break;
+ case sz_long:
+ insn_n_cycles += 4;
+ gen_writelong(astring, from);
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ }
+ break;
+ case imm:
+ case imm0:
+ case imm1:
+ case imm2:
+ case immi:
+ assert(0);
+ break;
+ default:
+ assert(0);
+ break;
+ }
+}
+
+static void gen_move16(uae_u32 opcode, struct instr *curi) {
+#if defined(USE_JIT2)
+ comprintf("\tint src=scratchie++;\n");
+ comprintf("\tint dst=scratchie++;\n");
+
+ uae_u32 masked_op = (opcode & 0xfff8);
+ if (masked_op == 0xf620) {
+ // POSTINCREMENT SOURCE AND DESTINATION version
+ comprintf("\t uae_u16 dstreg = ((%s)>>12) & 0x07;\n", gen_nextiword());
+ comprintf("\t jnf_MOVE(src, srcreg + 8);");
+ comprintf("\t jnf_MOVE(dst, dstreg + 8);");
+ comprintf("\t if (srcreg != dstreg)\n");
+ comprintf("\t jnf_ADD_imm(srcreg + 8, srcreg + 8, 16);");
+ comprintf("\t jnf_ADD_imm(dstreg + 8, dstreg + 8, 16);");
+ } else {
+ /* Other variants */
+ genamode(curi->smode, "srcreg", curi->size, "src", 0, 2);
+ genamode(curi->dmode, "dstreg", curi->size, "dst", 0, 2);
+ switch (masked_op) {
+ case 0xf600:
+ comprintf("\t jnf_ADD_imm(srcreg + 8, srcreg + 8, 16);");
+ break;
+ case 0xf608:
+ comprintf("\t jnf_ADD_imm(dstreg + 8, dstreg + 8, 16);");
+ break;
+ }
+ }
+ comprintf("\t jnf_MOVE16(dst, src);");
+#else
+ comprintf("\tint src=scratchie++;\n");
+ comprintf("\tint dst=scratchie++;\n");
+
+ if ((opcode & 0xfff8) == 0xf620) {
+ /* MOVE16 (Ax)+,(Ay)+ */
+ comprintf("\tuae_u16 dstreg=((%s)>>12)&0x07;\n", gen_nextiword());
+ comprintf("\tmov_l_rr(src,8+srcreg);\n");
+ comprintf("\tmov_l_rr(dst,8+dstreg);\n");
+ } else {
+ /* Other variants */
+ genamode(curi->smode, "srcreg", curi->size, "src", 0, 2);
+ genamode(curi->dmode, "dstreg", curi->size, "dst", 0, 2);
+ comprintf("\tmov_l_rr(src,srca);\n");
+ comprintf("\tmov_l_rr(dst,dsta);\n");
+ }
+
+ /* Align on 16-byte boundaries */
+ comprintf("\tand_l_ri(src,~15);\n");
+ comprintf("\tand_l_ri(dst,~15);\n");
+
+ if ((opcode & 0xfff8) == 0xf620) {
+ comprintf("\tif (srcreg != dstreg)\n");
+ comprintf("\tarm_ADD_l_ri8(srcreg+8,16);\n");
+ comprintf("\tarm_ADD_l_ri8(dstreg+8,16);\n");
+ } else if ((opcode & 0xfff8) == 0xf600)
+ comprintf("\tarm_ADD_l_ri8(srcreg+8,16);\n");
+ else if ((opcode & 0xfff8) == 0xf608)
+ comprintf("\tarm_ADD_l_ri8(dstreg+8,16);\n");
+
+ start_brace();
+ comprintf("\tint tmp=scratchie;\n");
+ comprintf("\tscratchie+=4;\n");
+
+ comprintf("\tget_n_addr(src,src,scratchie);\n"
+ "\tget_n_addr(dst,dst,scratchie);\n"
+ "\tmov_l_rR(tmp+0,src,0);\n"
+ "\tmov_l_rR(tmp+1,src,4);\n"
+ "\tmov_l_rR(tmp+2,src,8);\n"
+ "\tmov_l_rR(tmp+3,src,12);\n"
+ "\tmov_l_Rr(dst,tmp+0,0);\n"
+ "\tforget_about(tmp+0);\n"
+ "\tmov_l_Rr(dst,tmp+1,4);\n"
+ "\tforget_about(tmp+1);\n"
+ "\tmov_l_Rr(dst,tmp+2,8);\n"
+ "\tforget_about(tmp+2);\n"
+ "\tmov_l_Rr(dst,tmp+3,12);\n");
+ close_brace();
+#endif
+}
+
+static void genmovemel(uae_u16 opcode) {
+ comprintf("\tuae_u16 mask = %s;\n", gen_nextiword());
+ comprintf("\tint native=scratchie++;\n");
+ comprintf("\tint i;\n");
+ comprintf("\tsigned char offset=0;\n");
+ genamode(table68k[opcode].dmode, "dstreg", table68k[opcode].size, "src", 2,
+ 1);
+ comprintf("\tget_n_addr(srca,native,scratchie);\n");
+
+ comprintf("\tfor (i=0;i<16;i++) {\n"
+ "\t\tif ((mask>>i)&1) {\n");
+ switch (table68k[opcode].size) {
+ case sz_long:
+ comprintf("\t\t\tmov_l_rR(i,native,offset);\n"
+ "\t\t\tmid_bswap_32(i);\n"
+ "\t\t\toffset+=4;\n");
+ break;
+ case sz_word:
+ comprintf("\t\t\tmov_w_rR(i,native,offset);\n"
+ "\t\t\tmid_bswap_16(i);\n"
+ "\t\t\tsign_extend_16_rr(i,i);\n"
+ "\t\t\toffset+=2;\n");
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ comprintf("\t\t}\n"
+ "\t}");
+ if (table68k[opcode].dmode == Aipi) {
+ comprintf("\t\t\tlea_l_brr(8+dstreg,srca,offset);\n");
+ }
+}
+
+static void genmovemle(uae_u16 opcode) {
+ comprintf("\tuae_u16 mask = %s;\n", gen_nextiword());
+ comprintf("\tint native=scratchie++;\n");
+ comprintf("\tint i;\n");
+ comprintf("\tint tmp=scratchie++;\n");
+ comprintf("\tsigned char offset=0;\n");
+ genamode(table68k[opcode].dmode, "dstreg", table68k[opcode].size, "src", 2,
+ 1);
+
+ comprintf("\tget_n_addr(srca,native,scratchie);\n");
+
+ if (table68k[opcode].dmode != Apdi) {
+ comprintf("\tfor (i=0;i<16;i++) {\n"
+ "\t\tif ((mask>>i)&1) {\n");
+ switch (table68k[opcode].size) {
+ case sz_long:
+ comprintf("\t\t\tmov_l_rr(tmp,i);\n"
+ "\t\t\tmid_bswap_32(tmp);\n"
+ "\t\t\tmov_l_Rr(native,tmp,offset);\n"
+ "\t\t\toffset+=4;\n");
+ break;
+ case sz_word:
+ comprintf("\t\t\tmov_l_rr(tmp,i);\n"
+ "\t\t\tmid_bswap_16(tmp);\n"
+ "\t\t\tmov_w_Rr(native,tmp,offset);\n"
+ "\t\t\toffset+=2;\n");
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ } else { /* Pre-decrement */
+ comprintf("\tfor (i=0;i<16;i++) {\n"
+ "\t\tif ((mask>>i)&1) {\n");
+ switch (table68k[opcode].size) {
+ case sz_long:
+ comprintf("\t\t\toffset-=4;\n"
+ "\t\t\tmov_l_rr(tmp,15-i);\n"
+ "\t\t\tmid_bswap_32(tmp);\n"
+ "\t\t\tmov_l_Rr(native,tmp,offset);\n");
+ break;
+ case sz_word:
+ comprintf("\t\t\toffset-=2;\n"
+ "\t\t\tmov_l_rr(tmp,15-i);\n"
+ "\t\t\tmid_bswap_16(tmp);\n"
+ "\t\t\tmov_w_Rr(native,tmp,offset);\n");
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ }
+
+ comprintf("\t\t}\n"
+ "\t}");
+ if (table68k[opcode].dmode == Apdi) {
+ comprintf("\t\t\tlea_l_brr(8+dstreg,srca,(uae_s32)offset);\n");
+ }
+}
+
+static void duplicate_carry(void) {
+ comprintf("\tif (needed_flags&FLAG_X) duplicate_carry();\n");
+}
+
+typedef enum {
+ flag_logical_noclobber,
+ flag_logical,
+ flag_add,
+ flag_sub,
+ flag_cmp,
+ flag_addx,
+ flag_subx,
+ flag_zn,
+ flag_av,
+ flag_sv,
+ flag_and,
+ flag_or,
+ flag_eor,
+ flag_mov
+} flagtypes;
+
+#if !defined(USE_JIT2)
+static void genflags(flagtypes type, wordsizes size, const char *value, const char *src, const char *dst)
+{
+ if (noflags) {
+ switch (type) {
+ case flag_cmp:
+ comprintf("\tdont_care_flags();\n");
+ comprintf("/* Weird --- CMP with noflags ;-) */\n");
+ return;
+ case flag_add:
+ case flag_sub:
+ comprintf("\tdont_care_flags();\n");
+ {
+ const char* op;
+ switch (type) {
+ case flag_add:
+ op = "add";
+ break; // nf
+ case flag_sub:
+ op = "sub";
+ break; // nf
+ default:
+ assert(0);
+ break;
+ }
+ switch (size) {
+ case sz_byte:
+ comprintf("\t%s_b(%s,%s);\n", op, dst, src);
+ break;
+ case sz_word:
+ comprintf("\t%s_w(%s,%s);\n", op, dst, src);
+ break;
+ case sz_long:
+ comprintf("\t%s_l(%s,%s);\n", op, dst, src);
+ break;
+ }
+ return;
+ }
+ break;
+
+ case flag_and:
+ comprintf("\tdont_care_flags();\n");
+ switch (size) {
+ case sz_byte:
+ comprintf("if (kill_rodent(dst)) {\n");
+ comprintf("\tzero_extend_8_rr(scratchie,%s);\n", src);
+ comprintf("\tor_l_ri(scratchie,0xffffff00);\n"); // nf
+ comprintf("\tarm_AND_l(%s,scratchie);\n", dst);
+ comprintf("\tforget_about(scratchie);\n");
+ comprintf("\t} else \n"
+ "\tarm_AND_b(%s,%s);\n", dst, src);
+ break;
+ case sz_word:
+ comprintf("if (kill_rodent(dst)) {\n");
+ comprintf("\tzero_extend_16_rr(scratchie,%s);\n", src);
+ comprintf("\tor_l_ri(scratchie,0xffff0000);\n"); // nf
+ comprintf("\tarm_AND_l(%s,scratchie);\n", dst);
+ comprintf("\tforget_about(scratchie);\n");
+ comprintf("\t} else \n"
+ "\tarm_AND_w(%s,%s);\n", dst, src);
+ break;
+ case sz_long:
+ comprintf("\tarm_AND_l(%s,%s);\n", dst, src);
+ break;
+ }
+ return;
+
+ case flag_mov:
+ comprintf("\tdont_care_flags();\n");
+ switch (size) {
+ case sz_byte:
+ comprintf("if (kill_rodent(dst)) {\n");
+ comprintf("\tzero_extend_8_rr(scratchie,%s);\n", src);
+ comprintf("\tand_l_ri(%s,0xffffff00);\n", dst); // nf
+ comprintf("\tarm_ORR_l(%s,scratchie);\n", dst);
+ comprintf("\tforget_about(scratchie);\n");
+ comprintf("\t} else \n"
+ "\tmov_b_rr(%s,%s);\n", dst, src);
+ break;
+ case sz_word:
+ comprintf("if (kill_rodent(dst)) {\n");
+ comprintf("\tzero_extend_16_rr(scratchie,%s);\n", src);
+ comprintf("\tand_l_ri(%s,0xffff0000);\n", dst); // nf
+ comprintf("\tarm_ORR_l(%s,scratchie);\n", dst);
+ comprintf("\tforget_about(scratchie);\n");
+ comprintf("\t} else \n"
+ "\tmov_w_rr(%s,%s);\n", dst, src);
+ break;
+ case sz_long:
+ comprintf("\tmov_l_rr(%s,%s);\n", dst, src);
+ break;
+ }
+ return;
+
+ case flag_or:
+ case flag_eor:
+ comprintf("\tdont_care_flags();\n");
+ start_brace();
+ {
+ const char* op;
+ switch (type) {
+ case flag_or:
+ op = "ORR";
+ break; // nf
+ case flag_eor:
+ op = "EOR";
+ break; // nf
+ default:
+ assert(0);
+ break;
+ }
+ switch (size) {
+ case sz_byte:
+ comprintf("if (kill_rodent(dst)) {\n");
+ comprintf("\tzero_extend_8_rr(scratchie,%s);\n", src);
+ comprintf("\tarm_%s_l(%s,scratchie);\n", op, dst);
+ comprintf("\tforget_about(scratchie);\n");
+ comprintf("\t} else \n"
+ "\tarm_%s_b(%s,%s);\n", op, dst, src);
+ break;
+ case sz_word:
+ comprintf("if (kill_rodent(dst)) {\n");
+ comprintf("\tzero_extend_16_rr(scratchie,%s);\n", src);
+ comprintf("\tarm_%s_l(%s,scratchie);\n", op, dst);
+ comprintf("\tforget_about(scratchie);\n");
+ comprintf("\t} else \n"
+ "\tarm_%s_w(%s,%s);\n", op, dst, src);
+ break;
+ case sz_long:
+ comprintf("\tarm_%s_l(%s,%s);\n", op, dst, src);
+ break;
+ }
+ close_brace();
+ return;
+ }
+
+ case flag_addx:
+ case flag_subx:
+ comprintf("\tdont_care_flags();\n");
+ {
+ const char* op;
+ switch (type) {
+ case flag_addx:
+ op = "adc";
+ break;
+ case flag_subx:
+ op = "sbb";
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ comprintf("\trestore_carry();\n"); /* Reload the X flag into C */
+ switch (size) {
+ case sz_byte:
+ comprintf("\t%s_b(%s,%s);\n", op, dst, src);
+ break;
+ case sz_word:
+ comprintf("\t%s_w(%s,%s);\n", op, dst, src);
+ break;
+ case sz_long:
+ comprintf("\t%s_l(%s,%s);\n", op, dst, src);
+ break;
+ }
+ return;
+ }
+ break;
+ default:
+ return;
+ }
+ }
+
+ /* Need the flags, but possibly not all of them */
+ switch (type) {
+ case flag_logical_noclobber:
+ failure;
+ /* fall through */
+
+ case flag_and:
+ case flag_or:
+ case flag_eor:
+ comprintf("\tdont_care_flags();\n");
+ start_brace();
+ {
+ const char* op;
+ switch (type) {
+ case flag_and:
+ op = "and";
+ break;
+ case flag_or:
+ op = "or";
+ break;
+ case flag_eor:
+ op = "xor";
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ switch (size) {
+ case sz_byte:
+ comprintf("\tstart_needflags();\n"
+ "\t%s_b(%s,%s);\n", op, dst, src);
+ break;
+ case sz_word:
+ comprintf("\tstart_needflags();\n"
+ "\t%s_w(%s,%s);\n", op, dst, src);
+ break;
+ case sz_long:
+ comprintf("\tstart_needflags();\n"
+ "\t%s_l(%s,%s);\n", op, dst, src);
+ break;
+ }
+ comprintf("\tlive_flags();\n");
+ comprintf("\tend_needflags();\n");
+ close_brace();
+ return;
+ }
+
+ case flag_mov:
+ comprintf("\tdont_care_flags();\n");
+ start_brace();
+ {
+ switch (size) {
+ case sz_byte:
+ comprintf("\tif (%s!=%s) {\n", src, dst);
+ comprintf("\tmov_b_ri(%s,0);\n"
+ "\tstart_needflags();\n", dst);
+ comprintf("\tor_b(%s,%s);\n", dst, src);
+ comprintf("\t} else {\n");
+ comprintf("\tmov_b_rr(%s,%s);\n", dst, src);
+ comprintf("\ttest_b_rr(%s,%s);\n", dst, dst);
+ comprintf("\t}\n");
+ break;
+ case sz_word:
+ comprintf("\tif (%s!=%s) {\n", src, dst);
+ comprintf("\tmov_w_ri(%s,0);\n"
+ "\tstart_needflags();\n", dst);
+ comprintf("\tor_w(%s,%s);\n", dst, src);
+ comprintf("\t} else {\n");
+ comprintf("\tmov_w_rr(%s,%s);\n", dst, src);
+ comprintf("\ttest_w_rr(%s,%s);\n", dst, dst);
+ comprintf("\t}\n");
+ break;
+ case sz_long:
+ comprintf("\tif (%s!=%s) {\n", src, dst);
+ comprintf("\tmov_l_ri(%s,0);\n"
+ "\tstart_needflags();\n", dst);
+ comprintf("\tor_l(%s,%s);\n", dst, src);
+ comprintf("\t} else {\n");
+ comprintf("\tmov_l_rr(%s,%s);\n", dst, src);
+ comprintf("\ttest_l_rr(%s,%s);\n", dst, dst);
+ comprintf("\t}\n");
+ break;
+ }
+ comprintf("\tlive_flags();\n");
+ comprintf("\tend_needflags();\n");
+ close_brace();
+ return;
+ }
+
+ case flag_logical:
+ comprintf("\tdont_care_flags();\n");
+ start_brace();
+ switch (size) {
+ case sz_byte:
+ comprintf("\tstart_needflags();\n"
+ "\ttest_b_rr(%s,%s);\n", value, value);
+ break;
+ case sz_word:
+ comprintf("\tstart_needflags();\n"
+ "\ttest_w_rr(%s,%s);\n", value, value);
+ break;
+ case sz_long:
+ comprintf("\tstart_needflags();\n"
+ "\ttest_l_rr(%s,%s);\n", value, value);
+ break;
+ }
+ comprintf("\tlive_flags();\n");
+ comprintf("\tend_needflags();\n");
+ close_brace();
+ return;
+
+ case flag_add:
+ case flag_sub:
+ case flag_cmp:
+ comprintf("\tdont_care_flags();\n");
+ {
+ const char* op;
+ switch (type) {
+ case flag_add:
+ op = "add";
+ break;
+ case flag_sub:
+ op = "sub";
+ break;
+ case flag_cmp:
+ op = "cmp";
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ switch (size) {
+ case sz_byte:
+ comprintf("\tstart_needflags();\n"
+ "\t%s_b(%s,%s);\n", op, dst, src);
+ break;
+ case sz_word:
+ comprintf("\tstart_needflags();\n"
+ "\t%s_w(%s,%s);\n", op, dst, src);
+ break;
+ case sz_long:
+ comprintf("\tstart_needflags();\n"
+ "\t%s_l(%s,%s);\n", op, dst, src);
+ break;
+ }
+ comprintf("\tlive_flags();\n");
+ comprintf("\tend_needflags();\n");
+ if (type != flag_cmp) {
+ duplicate_carry();
+ }
+ comprintf("if (!(needed_flags & FLAG_CZNV)) dont_care_flags();\n");
+
+ return;
+ }
+
+ case flag_addx:
+ case flag_subx:
+ uses_cmov;
+ comprintf("\tdont_care_flags();\n");
+ {
+ const char* op;
+ switch (type) {
+ case flag_addx:
+ op = "adc";
+ break;
+ case flag_subx:
+ op = "sbb";
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ start_brace();
+ comprintf("\tint zero=scratchie++;\n"
+ "\tint one=scratchie++;\n"
+ "\tif (needed_flags&FLAG_Z) {\n"
+ "\tmov_l_ri(zero,0);\n"
+ "\tmov_l_ri(one,-1);\n"
+ "\tmake_flags_live();\n"
+ "\tcmov_l_rr(zero,one,%d);\n"
+ "\t}\n", NATIVE_CC_NE);
+ comprintf("\trestore_carry();\n"); /* Reload the X flag into C */
+ switch (size) {
+ case sz_byte:
+ comprintf("\tstart_needflags();\n"
+ "\t%s_b(%s,%s);\n", op, dst, src);
+ break;
+ case sz_word:
+ comprintf("\tstart_needflags();\n"
+ "\t%s_w(%s,%s);\n", op, dst, src);
+ break;
+ case sz_long:
+ comprintf("\tstart_needflags();\n"
+ "\t%s_l(%s,%s);\n", op, dst, src);
+ break;
+ }
+ comprintf("\tlive_flags();\n");
+ comprintf("\tif (needed_flags&FLAG_Z) {\n"
+ "\tcmov_l_rr(zero,one,%d);\n"
+ "\tset_zero(zero, one);\n" /* No longer need one */
+ "\tlive_flags();\n"
+ "\t}\n", NATIVE_CC_NE);
+ comprintf("\tend_needflags();\n");
+ duplicate_carry();
+ comprintf("if (!(needed_flags & FLAG_CZNV)) dont_care_flags();\n");
+ return;
+ }
+ default:
+ failure;
+ break;
+ }
+}
+#endif
+
+static void gen_abcd(uae_u32 opcode, struct instr *curi, const char* ssize) {
+#if 0
+#else
+ (void) opcode;
+ (void) curi;
+ (void) ssize;
+ failure;
+ /* No BCD maths for me.... */
+#endif
+}
+
+static void gen_add(uae_u32 opcode, struct instr *curi, const char* ssize) {
+ (void) opcode;
+#if defined(USE_JIT2)
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ genamode(curi->dmode, "dstreg", curi->size, "dst", 1, 0);
+
+ comprintf("\t dont_care_flags();\n");
+ start_brace();
+ comprintf("\t int tmp=scratchie++;\n");
+ // Use tmp register to avoid destroying upper part in .B., .W cases
+ if (!noflags) {
+ comprintf("\t start_needflags();\n");
+ comprintf("\t jff_ADD_%s(tmp,dst,src);\n", ssize);
+ comprintf("\t live_flags();\n");
+ comprintf("\t end_needflags();\n");
+ duplicate_carry();
+ comprintf(
+ "\t if (!(needed_flags & FLAG_CZNV)) dont_care_flags();\n");
+ } else {
+ comprintf("\t jnf_ADD(tmp,dst,src);\n");
+ }
+ genastore("tmp", curi->dmode, "dstreg", curi->size, "dst");
+#else
+ (void) ssize;
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ genamode(curi->dmode, "dstreg", curi->size, "dst", 1, 0);
+ genflags(flag_add, curi->size, "", "src", "dst");
+ genastore("dst", curi->dmode, "dstreg", curi->size, "dst");
+#endif
+}
+
+static void gen_adda(uae_u32 opcode, struct instr *curi, const char* ssize) {
+ (void) opcode;
+#if defined(USE_JIT2)
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ genamode(curi->dmode, "dstreg", sz_long, "dst", 1, 0);
+ start_brace();
+ comprintf("\t jnf_ADDA_%s(dst, src);\n", ssize);
+ genastore("dst", curi->dmode, "dstreg", sz_long, "dst");
+#else
+ (void) ssize;
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ genamode(curi->dmode, "dstreg", sz_long, "dst", 1, 0);
+ start_brace();
+ comprintf("\tint tmp=scratchie++;\n");
+ switch (curi->size) {
+ case sz_byte:
+ comprintf("\tsign_extend_8_rr(tmp,src);\n");
+ break;
+ case sz_word:
+ comprintf("\tsign_extend_16_rr(tmp,src);\n");
+ break;
+ case sz_long:
+ comprintf("\ttmp=src;\n");
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ comprintf("\tarm_ADD_l(dst,tmp);\n");
+ genastore("dst", curi->dmode, "dstreg", sz_long, "dst");
+#endif
+}
+
+static void gen_addx(uae_u32 opcode, struct instr *curi, const char* ssize) {
+ (void) opcode;
+#if defined(USE_JIT2)
+ isaddx;
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ genamode(curi->dmode, "dstreg", curi->size, "dst", 1, 0);
+ start_brace();
+
+ // Use tmp register to avoid destroying upper part in .B., .W cases
+ comprintf("\t dont_care_flags();\n");
+ comprintf("\t int tmp=scratchie++;\n");
+ if (!noflags) {
+ comprintf("\t make_flags_live();\n");
+ comprintf("\t restore_carry();\n"); /* Reload the X flag into C */
+ comprintf("\t start_needflags();\n");
+ comprintf("\t jff_ADDX_%s(tmp,dst,src);\n", ssize);
+ comprintf("\t live_flags();\n");
+ comprintf("\t end_needflags();\n");
+ duplicate_carry();
+ comprintf("\t if (!(needed_flags & FLAG_CZNV)) dont_care_flags();\n");
+ } else {
+ comprintf("\t restore_carry();\n"); /* Reload the X flag into C */
+ comprintf("\t jnf_ADDX(tmp,dst,src);\n");
+ }
+ genastore("tmp", curi->dmode, "dstreg", curi->size, "dst");
+#else
+ (void) ssize;
+ isaddx;
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ genamode(curi->dmode, "dstreg", curi->size, "dst", 1, 0);
+ start_brace();
+ genflags(flag_addx, curi->size, "", "src", "dst");
+ genastore("dst", curi->dmode, "dstreg", curi->size, "dst");
+#endif
+}
+
+static void gen_and(uae_u32 opcode, struct instr *curi, const char* ssize) {
+ (void) opcode;
+#if defined(USE_JIT2)
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ genamode(curi->dmode, "dstreg", curi->size, "dst", 1, 0);
+
+ comprintf("\t dont_care_flags();\n");
+ comprintf("\t int tmp=scratchie++;\n");
+ start_brace();
+ if (!noflags) {
+ comprintf("\t jff_AND_%s(tmp,dst,src);\n", ssize);
+ comprintf("\t live_flags();\n");
+ comprintf("\t end_needflags();\n");
+ } else {
+ comprintf("\t jnf_AND(tmp,dst,src);\n");
+ }
+ genastore("tmp", curi->dmode, "dstreg", curi->size, "dst");
+#else
+ (void) ssize;
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ genamode(curi->dmode, "dstreg", curi->size, "dst", 1, 0);
+ genflags(flag_and, curi->size, "", "src", "dst");
+ genastore("dst", curi->dmode, "dstreg", curi->size, "dst");
+#endif
+}
+
+static void gen_andsr(uae_u32 opcode, struct instr *curi, const char* ssize) {
+ (void) opcode;
+ (void) ssize;
+#if defined(USE_JIT2)
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ if (!noflags) {
+ comprintf("\t make_flags_live();\n");
+ comprintf("\t start_needflags();\n");
+ comprintf("\t jff_ANDSR(ARM_CCR_MAP[src & 0xF], (src & 0x10));\n");
+ comprintf("\t live_flags();\n");
+ comprintf("\t end_needflags();\n");
+ }
+#else
+ (void) curi;
+ failure;
+ isjump;
+#endif
+}
+
+static void gen_asl(uae_u32 opcode, struct instr *curi, const char* ssize) {
+ (void) opcode;
+#if defined(USE_JIT2)
+ mayfail;
+ if (curi->smode == Dreg) {
+ comprintf("if ((uae_u32)srcreg==(uae_u32)dstreg) {\n"
+ " FAIL(1);\n"
+ " return;\n"
+ "} \n");
+ start_brace();
+ }
+ comprintf("\t dont_care_flags();\n");
+ comprintf("\t int tmp=scratchie++;\n");
+
+ genamode(curi->smode, "srcreg", curi->size, "cnt", 1, 0);
+ genamode(curi->dmode, "dstreg", curi->size, "data", 1, 0);
+
+ if (curi->smode != immi) {
+ if (!noflags) {
+ start_brace();
+ comprintf("\t make_flags_live();\n");
+ comprintf("\t start_needflags();\n");
+ comprintf("\t jff_ASL_%s_reg(tmp,data,cnt);\n", ssize);
+ comprintf("\t live_flags();\n");
+ comprintf("\t end_needflags();\n");
+ comprintf("\t duplicate_carry();\n");
+ comprintf(
+ "\t if (!(needed_flags & FLAG_CZNV)) dont_care_flags();\n");
+ } else {
+ start_brace();
+ comprintf("\t jnf_LSL_reg(tmp,data,cnt);\n");
+ }
+ } else {
+ start_brace();
+ if (!noflags) {
+ comprintf("\t make_flags_live();\n");
+ comprintf("\t start_needflags();\n");
+ comprintf("\t jff_ASL_%s_imm(tmp,data,srcreg);\n", ssize);
+ comprintf("\t live_flags();\n");
+ comprintf("\t end_needflags();\n");
+ comprintf("\t duplicate_carry();\n");
+ comprintf(
+ "\t if (!(needed_flags & FLAG_CZNV)) dont_care_flags();\n");
+ } else {
+ comprintf("\t jnf_LSL_imm(tmp,data,srcreg);\n");
+ }
+ }
+ genastore("tmp", curi->dmode, "dstreg", curi->size, "data");
+#else
+ (void) ssize;
+
+ mayfail;
+ if (curi->smode == Dreg) {
+ comprintf("if ((uae_u32)srcreg==(uae_u32)dstreg) {\n"
+ " FAIL(1);\n"
+ " return;\n"
+ "} \n");
+ start_brace();
+ }
+ comprintf("\tdont_care_flags();\n");
+ /* Except for the handling of the V flag, this is identical to
+ LSL. The handling of V is, uhm, unpleasant, so if it's needed,
+ let the normal emulation handle it. Shoulders of giants kinda
+ thing ;-) */
+ comprintf("if (needed_flags & FLAG_V) {\n"
+ " FAIL(1);\n"
+ " return;\n"
+ "} \n");
+
+ genamode(curi->smode, "srcreg", curi->size, "cnt", 1, 0);
+ genamode(curi->dmode, "dstreg", curi->size, "data", 1, 0);
+ if (curi->smode != immi) {
+ if (!noflags) {
+ uses_cmov;
+ start_brace();
+ comprintf("\tint highmask;\n"
+ "\tint cdata=scratchie++;\n"
+ "\tint tmpcnt=scratchie++;\n");
+ comprintf("\tmov_l_rr(tmpcnt,cnt);\n"
+ "\tand_l_ri(tmpcnt,63);\n"
+ "\tmov_l_ri(cdata,0);\n"
+ "\tcmov_l_rr(cdata,data,%d);\n", NATIVE_CC_NE);
+ /* cdata is now either data (for shift count!=0) or
+ 0 (for shift count==0) */
+ switch (curi->size) {
+ case sz_byte:
+ comprintf("\tshll_b_rr(data,cnt);\n"
+ "\thighmask=0x38;\n");
+ break;
+ case sz_word:
+ comprintf("\tshll_w_rr(data,cnt);\n"
+ "\thighmask=0x30;\n");
+ break;
+ case sz_long:
+ comprintf("\tshll_l_rr(data,cnt);\n"
+ "\thighmask=0x20;\n");
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ comprintf("test_l_ri(cnt,highmask);\n"
+ "mov_l_ri(scratchie,0);\n"
+ "cmov_l_rr(scratchie,data,%d);\n", NATIVE_CC_EQ);
+ switch (curi->size) {
+ case sz_byte:
+ comprintf("\tmov_b_rr(data,scratchie);\n");
+ break;
+ case sz_word:
+ comprintf("\tmov_w_rr(data,scratchie);\n");
+ break;
+ case sz_long:
+ comprintf("\tmov_l_rr(data,scratchie);\n");
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ /* Result of shift is now in data. Now we need to determine
+ the carry by shifting cdata one less */
+ comprintf("\tsub_l_ri(tmpcnt,1);\n");
+ switch (curi->size) {
+ case sz_byte:
+ comprintf("\tshll_b_rr(cdata,tmpcnt);\n");
+ break;
+ case sz_word:
+ comprintf("\tshll_w_rr(cdata,tmpcnt);\n");
+ break;
+ case sz_long:
+ comprintf("\tshll_l_rr(cdata,tmpcnt);\n");
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ comprintf("test_l_ri(tmpcnt,highmask);\n"
+ "mov_l_ri(scratchie,0);\n"
+ "cmov_l_rr(cdata,scratchie,%d);\n", NATIVE_CC_NE);
+ /* And create the flags */
+ comprintf("\tstart_needflags();\n");
+
+ comprintf("\tif (needed_flags & FLAG_ZNV)\n");
+ switch (curi->size) {
+ case sz_byte:
+ comprintf("\t test_b_rr(data,data);\n");
+ comprintf("\t bt_l_ri(cdata,7);\n");
+ break;
+ case sz_word:
+ comprintf("\t test_w_rr(data,data);\n");
+ comprintf("\t bt_l_ri(cdata,15);\n");
+ break;
+ case sz_long:
+ comprintf("\t test_l_rr(data,data);\n");
+ comprintf("\t bt_l_ri(cdata,31);\n");
+ break;
+ }
+ comprintf("\t live_flags();\n");
+ comprintf("\t end_needflags();\n");
+ comprintf("\t duplicate_carry();\n");
+ comprintf("if (!(needed_flags & FLAG_CZNV)) dont_care_flags();\n");
+ genastore("data", curi->dmode, "dstreg", curi->size, "data");
+ } else {
+ uses_cmov;
+ start_brace();
+ comprintf("\tint highmask;\n");
+ switch (curi->size) {
+ case sz_byte:
+ comprintf("\tshll_b_rr(data,cnt);\n"
+ "\thighmask=0x38;\n");
+ break;
+ case sz_word:
+ comprintf("\tshll_w_rr(data,cnt);\n"
+ "\thighmask=0x30;\n");
+ break;
+ case sz_long:
+ comprintf("\tshll_l_rr(data,cnt);\n"
+ "\thighmask=0x20;\n");
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ comprintf("test_l_ri(cnt,highmask);\n"
+ "mov_l_ri(scratchie,0);\n"
+ "cmov_l_rr(scratchie,data,%d);\n", NATIVE_CC_EQ);
+ switch (curi->size) {
+ case sz_byte:
+ comprintf("\tmov_b_rr(data,scratchie);\n");
+ break;
+ case sz_word:
+ comprintf("\tmov_w_rr(data,scratchie);\n");
+ break;
+ case sz_long:
+ comprintf("\tmov_l_rr(data,scratchie);\n");
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ genastore("data", curi->dmode, "dstreg", curi->size, "data");
+ }
+ } else {
+ start_brace();
+ comprintf("\tint tmp=scratchie++;\n"
+ "\tint bp;\n"
+ "\tmov_l_rr(tmp,data);\n");
+ switch (curi->size) {
+ case sz_byte:
+ comprintf("\tshll_b_ri(data,srcreg);\n"
+ "\tbp=8-srcreg;\n");
+ break;
+ case sz_word:
+ comprintf("\tshll_w_ri(data,srcreg);\n"
+ "\tbp=16-srcreg;\n");
+ break;
+ case sz_long:
+ comprintf("\tshll_l_ri(data,srcreg);\n"
+ "\tbp=32-srcreg;\n");
+ break;
+ default:
+ assert(0);
+ break;
+ }
+
+ if (!noflags) {
+ comprintf("\tstart_needflags();\n");
+ comprintf("\tif (needed_flags & FLAG_ZNV)\n");
+ switch (curi->size) {
+ case sz_byte:
+ comprintf("\t test_b_rr(data,data);\n");
+ break;
+ case sz_word:
+ comprintf("\t test_w_rr(data,data);\n");
+ break;
+ case sz_long:
+ comprintf("\t test_l_rr(data,data);\n");
+ break;
+ }
+ comprintf("\t bt_l_ri(tmp,bp);\n"); /* Set C */
+ comprintf("\t live_flags();\n");
+ comprintf("\t end_needflags();\n");
+ comprintf("\t duplicate_carry();\n");
+ comprintf("if (!(needed_flags & FLAG_CZNV)) dont_care_flags();\n");
+ }
+ genastore("data", curi->dmode, "dstreg", curi->size, "data");
+ }
+#endif
+}
+
+static void gen_aslw(uae_u32 opcode, struct instr *curi, const char* ssize) {
+ (void) opcode;
+ (void) ssize;
+#if defined(USE_JIT2)
+ comprintf("\t dont_care_flags();\n");
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ start_brace();
+ comprintf("\t int tmp=scratchie++;\n");
+ if (!noflags) {
+ comprintf("\t start_needflags();\n");
+ comprintf("\t jff_ASLW(tmp,src);\n");
+ comprintf("\t live_flags();\n");
+ comprintf("\t end_needflags();\n");
+ } else {
+ comprintf("\t jnf_ASLW(tmp,src);\n");
+ }
+ genastore("tmp", curi->smode, "srcreg", curi->size, "src");
+#else
+ (void) curi;
+ failure;
+#endif
+}
+
+static void gen_asr(uae_u32 opcode, struct instr *curi, const char* ssize) {
+#if defined(USE_JIT2)
+ (void)opcode;
+
+ mayfail;
+ if (curi->smode == Dreg) {
+ comprintf("if ((uae_u32)srcreg==(uae_u32)dstreg) {\n"
+ " FAIL(1);\n"
+ " return;\n"
+ "} \n");
+ start_brace();
+ }
+ comprintf("\t dont_care_flags();\n");
+
+ genamode(curi->smode, "srcreg", curi->size, "cnt", 1, 0);
+ genamode(curi->dmode, "dstreg", curi->size, "data", 1, 0);
+
+ start_brace();
+ comprintf("\t int tmp=scratchie++;\n");
+ if (curi->smode != immi) {
+ if (!noflags) {
+ comprintf("\t make_flags_live();\n");
+ comprintf("\t start_needflags();\n");
+ comprintf("\t jff_ASR_%s_reg(tmp,data,cnt);\n", ssize);
+ comprintf("\t live_flags();\n");
+ comprintf("\t end_needflags();\n");
+ comprintf("\t duplicate_carry();\n");
+ comprintf(
+ "if (!(needed_flags & FLAG_CZNV)) dont_care_flags();\n");
+ } else {
+ comprintf("\t jnf_ASR_%s_reg(tmp,data,cnt);\n", ssize);
+ }
+ } else {
+ char *op;
+ if (!noflags) {
+ comprintf("\t make_flags_live();\n");
+ comprintf("\t start_needflags();\n");
+ op = "ff";
+ } else
+ op = "nf";
+
+ comprintf("\t j%s_ASR_%s_imm(tmp,data,srcreg);\n", op, ssize);
+ if (!noflags) {
+ comprintf("\t live_flags();\n");
+ comprintf("\t end_needflags();\n");
+ comprintf("\t duplicate_carry();\n");
+ comprintf(
+ "\t if (!(needed_flags & FLAG_CZNV)) dont_care_flags();\n");
+ }
+ }
+ genastore("tmp", curi->dmode, "dstreg", curi->size, "data");
+#else
+ (void) opcode;
+ (void) ssize;
+
+ mayfail;
+ if (curi->smode == Dreg) {
+ comprintf("if ((uae_u32)srcreg==(uae_u32)dstreg) {\n"
+ " FAIL(1);\n"
+ " return;\n"
+ "} \n");
+ start_brace();
+ }
+ comprintf("\tdont_care_flags();\n");
+
+ genamode(curi->smode, "srcreg", curi->size, "cnt", 1, 0);
+ genamode(curi->dmode, "dstreg", curi->size, "data", 1, 0);
+ if (curi->smode != immi) {
+ if (!noflags) {
+ uses_cmov;
+ start_brace();
+ comprintf("\tint highmask;\n"
+ "\tint width;\n"
+ "\tint cdata=scratchie++;\n"
+ "\tint tmpcnt=scratchie++;\n"
+ "\tint highshift=scratchie++;\n");
+ comprintf("\tmov_l_rr(tmpcnt,cnt);\n"
+ "\tand_l_ri(tmpcnt,63);\n"
+ "\tmov_l_ri(cdata,0);\n"
+ "\tcmov_l_rr(cdata,data,%d);\n", NATIVE_CC_NE);
+ /* cdata is now either data (for shift count!=0) or
+ 0 (for shift count==0) */
+ switch (curi->size) {
+ case sz_byte:
+ comprintf("\tshra_b_rr(data,cnt);\n"
+ "\thighmask=0x38;\n"
+ "\twidth=8;\n");
+ break;
+ case sz_word:
+ comprintf("\tshra_w_rr(data,cnt);\n"
+ "\thighmask=0x30;\n"
+ "\twidth=16;\n");
+ break;
+ case sz_long:
+ comprintf("\tshra_l_rr(data,cnt);\n"
+ "\thighmask=0x20;\n"
+ "\twidth=32;\n");
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ comprintf("test_l_ri(cnt,highmask);\n"
+ "mov_l_ri(highshift,0);\n"
+ "mov_l_ri(scratchie,width/2);\n"
+ "cmov_l_rr(highshift,scratchie,%d);\n", NATIVE_CC_NE);
+ /* The x86 masks out bits, so we now make sure that things
+ really get shifted as much as planned */
+ switch (curi->size) {
+ case sz_byte:
+ comprintf("\tshra_b_rr(data,highshift);\n");
+ break;
+ case sz_word:
+ comprintf("\tshra_w_rr(data,highshift);\n");
+ break;
+ case sz_long:
+ comprintf("\tshra_l_rr(data,highshift);\n");
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ /* And again */
+ switch (curi->size) {
+ case sz_byte:
+ comprintf("\tshra_b_rr(data,highshift);\n");
+ break;
+ case sz_word:
+ comprintf("\tshra_w_rr(data,highshift);\n");
+ break;
+ case sz_long:
+ comprintf("\tshra_l_rr(data,highshift);\n");
+ break;
+ default:
+ assert(0);
+ break;
+ }
+
+ /* Result of shift is now in data. Now we need to determine
+ the carry by shifting cdata one less */
+ comprintf("\tsub_l_ri(tmpcnt,1);\n");
+ switch (curi->size) {
+ case sz_byte:
+ comprintf("\tshra_b_rr(cdata,tmpcnt);\n");
+ break;
+ case sz_word:
+ comprintf("\tshra_w_rr(cdata,tmpcnt);\n");
+ break;
+ case sz_long:
+ comprintf("\tshra_l_rr(cdata,tmpcnt);\n");
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ /* If the shift count was higher than the width, we need
+ to pick up the sign from data */
+ comprintf("test_l_ri(tmpcnt,highmask);\n"
+ "cmov_l_rr(cdata,data,%d);\n", NATIVE_CC_NE);
+ /* And create the flags */
+ comprintf("\tstart_needflags();\n");
+ comprintf("\tif (needed_flags & FLAG_ZNV)\n");
+ switch (curi->size) {
+ case sz_byte:
+ comprintf("\t test_b_rr(data,data);\n");
+ break;
+ case sz_word:
+ comprintf("\t test_w_rr(data,data);\n");
+ break;
+ case sz_long:
+ comprintf("\t test_l_rr(data,data);\n");
+ break;
+ }
+ comprintf("\t bt_l_ri(cdata,0);\n"); /* Set C */
+ comprintf("\t live_flags();\n");
+ comprintf("\t end_needflags();\n");
+ comprintf("\t duplicate_carry();\n");
+ comprintf("if (!(needed_flags & FLAG_CZNV)) dont_care_flags();\n");
+ genastore("data", curi->dmode, "dstreg", curi->size, "data");
+ } else {
+ uses_cmov;
+ start_brace();
+ comprintf("\tint highmask;\n"
+ "\tint width;\n"
+ "\tint highshift=scratchie++;\n");
+ switch (curi->size) {
+ case sz_byte:
+ comprintf("\tshra_b_rr(data,cnt);\n"
+ "\thighmask=0x38;\n"
+ "\twidth=8;\n");
+ break;
+ case sz_word:
+ comprintf("\tshra_w_rr(data,cnt);\n"
+ "\thighmask=0x30;\n"
+ "\twidth=16;\n");
+ break;
+ case sz_long:
+ comprintf("\tshra_l_rr(data,cnt);\n"
+ "\thighmask=0x20;\n"
+ "\twidth=32;\n");
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ comprintf("test_l_ri(cnt,highmask);\n"
+ "mov_l_ri(highshift,0);\n"
+ "mov_l_ri(scratchie,width/2);\n"
+ "cmov_l_rr(highshift,scratchie,%d);\n", NATIVE_CC_NE);
+ /* The x86 masks out bits, so we now make sure that things
+ really get shifted as much as planned */
+ switch (curi->size) {
+ case sz_byte:
+ comprintf("\tshra_b_rr(data,highshift);\n");
+ break;
+ case sz_word:
+ comprintf("\tshra_w_rr(data,highshift);\n");
+ break;
+ case sz_long:
+ comprintf("\tshra_l_rr(data,highshift);\n");
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ /* And again */
+ switch (curi->size) {
+ case sz_byte:
+ comprintf("\tshra_b_rr(data,highshift);\n");
+ break;
+ case sz_word:
+ comprintf("\tshra_w_rr(data,highshift);\n");
+ break;
+ case sz_long:
+ comprintf("\tshra_l_rr(data,highshift);\n");
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ genastore("data", curi->dmode, "dstreg", curi->size, "data");
+ }
+ } else {
+ start_brace();
+ comprintf("\tint tmp=scratchie++;\n"
+ "\tint bp;\n"
+ "\tmov_l_rr(tmp,data);\n");
+ switch (curi->size) {
+ case sz_byte:
+ comprintf("\tshra_b_ri(data,srcreg);\n"
+ "\tbp=srcreg-1;\n");
+ break;
+ case sz_word:
+ comprintf("\tshra_w_ri(data,srcreg);\n"
+ "\tbp=srcreg-1;\n");
+ break;
+ case sz_long:
+ comprintf("\tshra_l_ri(data,srcreg);\n"
+ "\tbp=srcreg-1;\n");
+ break;
+ default:
+ assert(0);
+ break;
+ }
+
+ if (!noflags) {
+ comprintf("\tstart_needflags();\n");
+ comprintf("\tif (needed_flags & FLAG_ZNV)\n");
+ switch (curi->size) {
+ case sz_byte:
+ comprintf("\t test_b_rr(data,data);\n");
+ break;
+ case sz_word:
+ comprintf("\t test_w_rr(data,data);\n");
+ break;
+ case sz_long:
+ comprintf("\t test_l_rr(data,data);\n");
+ break;
+ }
+ comprintf("\t bt_l_ri(tmp,bp);\n"); /* Set C */
+ comprintf("\t live_flags();\n");
+ comprintf("\t end_needflags();\n");
+ comprintf("\t duplicate_carry();\n");
+ comprintf("if (!(needed_flags & FLAG_CZNV)) dont_care_flags();\n");
+ }
+ genastore("data", curi->dmode, "dstreg", curi->size, "data");
+ }
+#endif
+}
+
+static void gen_asrw(uae_u32 opcode, struct instr *curi, const char* ssize) {
+ (void) opcode;
+ (void) ssize;
+#if defined(USE_JIT2)
+ comprintf("\t dont_care_flags();\n");
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ start_brace();
+ comprintf("\t int tmp = scratchie++;\n");
+
+ if (!noflags) {
+ comprintf("\t start_needflags();\n");
+ comprintf("\t jff_ASRW(tmp,src);\n");
+ comprintf("\t live_flags();\n");
+ comprintf("\t end_needflags();\n");
+ } else {
+ comprintf("\t jnf_ASRW(tmp,src);\n");
+ }
+ genastore("tmp", curi->smode, "srcreg", curi->size, "src");
+#else
+ (void) curi;
+ failure;
+#endif
+}
+
+static void gen_bchg(uae_u32 opcode, struct instr *curi, const char* ssize) {
+ (void) opcode;
+#if defined(USE_JIT2)
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ genamode(curi->dmode, "dstreg", curi->size, "dst", 1, 0);
+ start_brace();
+
+ if (!noflags) {
+ comprintf("\t make_flags_live();\n");
+ comprintf("\t start_needflags();\n");
+ comprintf("\t jff_BCHG_%s(dst,src);\n", ssize);
+ comprintf("\t live_flags();\n");
+ comprintf("\t end_needflags();\n");
+ } else {
+ comprintf("\t jnf_BCHG_%s(dst,src);\n", ssize);
+ comprintf("\t dont_care_flags();\n");
+ }
+ genastore("dst", curi->dmode, "dstreg", curi->size, "dst");
+#else
+ (void) ssize;
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ genamode(curi->dmode, "dstreg", curi->size, "dst", 1, 0);
+ start_brace();
+ comprintf("\tint s=scratchie++;\n"
+ "\tint tmp=scratchie++;\n"
+ "\tmov_l_rr(s,src);\n");
+ if (curi->size == sz_byte)
+ comprintf("\tand_l_ri(s,7);\n");
+ else
+ comprintf("\tand_l_ri(s,31);\n");
+
+ comprintf("\tbtc_l_rr(dst,s);\n" /* Answer now in C */
+ "\tsbb_l(s,s);\n" /* s is 0 if bit was 0, -1 otherwise */
+ "\tmake_flags_live();\n" /* Get the flags back */
+ "\tdont_care_flags();\n");
+ if (!noflags) {
+ comprintf("\tstart_needflags();\n"
+ "\tset_zero(s,tmp);\n"
+ "\tlive_flags();\n"
+ "\tend_needflags();\n");
+ }
+ genastore("dst", curi->dmode, "dstreg", curi->size, "dst");
+#endif
+}
+
+static void gen_bclr(uae_u32 opcode, struct instr *curi, const char* ssize) {
+ (void) opcode;
+#if defined(USE_JIT2)
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ genamode(curi->dmode, "dstreg", curi->size, "dst", 1, 0);
+ start_brace();
+
+ if (!noflags) {
+ comprintf("\t make_flags_live();\n");
+ comprintf("\t start_needflags();\n");
+ comprintf("\t jff_BCLR_%s(dst,src);\n", ssize);
+ comprintf("\t live_flags();\n");
+ comprintf("\t end_needflags();\n");
+ } else {
+ comprintf("\t jnf_BCLR_%s(dst,src);\n", ssize);
+ comprintf("\t dont_care_flags();\n");
+ }
+ genastore("dst", curi->dmode, "dstreg", curi->size, "dst");
+#else
+ (void) ssize;
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ genamode(curi->dmode, "dstreg", curi->size, "dst", 1, 0);
+ start_brace();
+ comprintf("\tint s=scratchie++;\n"
+ "\tint tmp=scratchie++;\n"
+ "\tmov_l_rr(s,src);\n");
+ if (curi->size == sz_byte)
+ comprintf("\tand_l_ri(s,7);\n");
+ else
+ comprintf("\tand_l_ri(s,31);\n");
+
+ comprintf("\tbtr_l_rr(dst,s);\n" /* Answer now in C */
+ "\tsbb_l(s,s);\n" /* s is 0 if bit was 0, -1 otherwise */
+ "\tmake_flags_live();\n" /* Get the flags back */
+ "\tdont_care_flags();\n");
+ if (!noflags) {
+ comprintf("\tstart_needflags();\n"
+ "\tset_zero(s,tmp);\n"
+ "\tlive_flags();\n"
+ "\tend_needflags();\n");
+ }
+ genastore("dst", curi->dmode, "dstreg", curi->size, "dst");
+#endif
+}
+
+static void gen_bset(uae_u32 opcode, struct instr *curi, const char* ssize) {
+ (void) opcode;
+#if defined(USE_JIT2)
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ genamode(curi->dmode, "dstreg", curi->size, "dst", 1, 0);
+ start_brace();
+
+ if (!noflags) {
+ comprintf("\t make_flags_live();\n");
+ comprintf("\t start_needflags();\n");
+ comprintf("\t jff_BSET_%s(dst,src);\n", ssize);
+ comprintf("\t live_flags();\n");
+ comprintf("\t end_needflags();\n");
+ } else {
+ comprintf("\t jnf_BSET_%s(dst,src);\n", ssize);
+ comprintf("\t dont_care_flags();\n");
+ }
+ genastore("dst", curi->dmode, "dstreg", curi->size, "dst");
+#else
+ (void) ssize;
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ genamode(curi->dmode, "dstreg", curi->size, "dst", 1, 0);
+ start_brace();
+ comprintf("\tint s=scratchie++;\n"
+ "\tint tmp=scratchie++;\n"
+ "\tmov_l_rr(s,src);\n");
+ if (curi->size == sz_byte)
+ comprintf("\tand_l_ri(s,7);\n");
+ else
+ comprintf("\tand_l_ri(s,31);\n");
+
+ comprintf("\tbts_l_rr(dst,s);\n" /* Answer now in C */
+ "\tsbb_l(s,s);\n" /* s is 0 if bit was 0, -1 otherwise */
+ "\tmake_flags_live();\n" /* Get the flags back */
+ "\tdont_care_flags();\n");
+ if (!noflags) {
+ comprintf("\tstart_needflags();\n"
+ "\tset_zero(s,tmp);\n"
+ "\tlive_flags();\n"
+ "\tend_needflags();\n");
+ }
+ genastore("dst", curi->dmode, "dstreg", curi->size, "dst");
+#endif
+}
+
+static void gen_btst(uae_u32 opcode, struct instr *curi, const char* ssize) {
+ (void) opcode;
+#if defined(USE_JIT2)
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ genamode(curi->dmode, "dstreg", curi->size, "dst", 1, 0);
+ start_brace();
+
+ // If we are not interested in flags it is not necessary to do
+ // anything with the data
+ if (!noflags) {
+ comprintf("\t make_flags_live();\n");
+ comprintf("\t start_needflags();\n");
+ comprintf("\t jff_BTST_%s(dst,src);\n", ssize);
+ comprintf("\t live_flags();\n");
+ comprintf("\t end_needflags();\n");
+ } else {
+ comprintf("\t dont_care_flags();\n");
+ }
+#else
+ (void) ssize;
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ genamode(curi->dmode, "dstreg", curi->size, "dst", 1, 0);
+ start_brace();
+ comprintf("\tint s=scratchie++;\n"
+ "\tint tmp=scratchie++;\n"
+ "\tmov_l_rr(s,src);\n");
+ if (curi->size == sz_byte)
+ comprintf("\tand_l_ri(s,7);\n");
+ else
+ comprintf("\tand_l_ri(s,31);\n");
+
+ comprintf("\tbt_l_rr(dst,s);\n" /* Answer now in C */
+ "\tsbb_l(s,s);\n" /* s is 0 if bit was 0, -1 otherwise */
+ "\tmake_flags_live();\n" /* Get the flags back */
+ "\tdont_care_flags();\n");
+ if (!noflags) {
+ comprintf("\tstart_needflags();\n"
+ "\tset_zero(s,tmp);\n"
+ "\tlive_flags();\n"
+ "\tend_needflags();\n");
+ }
+#endif
+}
+
+static void gen_clr(uae_u32 opcode, struct instr *curi, const char* ssize) {
+ (void) opcode;
+ (void) ssize;
+#if defined(USE_JIT2)
+ genamode(curi->smode, "srcreg", curi->size, "src", 2, 0);
+ comprintf("\t dont_care_flags();\n");
+ start_brace();
+ comprintf("\t int tmp=scratchie++;\n");
+ if (!noflags) {
+ comprintf("\t start_needflags();\n");
+ comprintf("\t jff_CLR(tmp);\n");
+ comprintf("\t live_flags();\n");
+ comprintf("\t end_needflags();\n");
+ } else {
+ comprintf("\t jnf_CLR(tmp);\n");
+ }
+ genastore("tmp", curi->smode, "srcreg", curi->size, "src");
+#else
+ genamode(curi->smode, "srcreg", curi->size, "src", 2, 0);
+ start_brace();
+ comprintf("\tint dst=scratchie++;\n");
+ comprintf("\tmov_l_ri(dst,0);\n");
+ genflags(flag_logical, curi->size, "dst", "", "");
+ genastore("dst", curi->smode, "srcreg", curi->size, "src");
+#endif
+}
+
+static void gen_cmp(uae_u32 opcode, struct instr *curi, const char* ssize) {
+ (void) opcode;
+#if defined(USE_JIT2)
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ genamode(curi->dmode, "dstreg", curi->size, "dst", 1, 0);
+ start_brace();
+ comprintf("\t dont_care_flags();\n");
+ if (!noflags) {
+ comprintf("\t start_needflags();\n");
+ comprintf("\t jff_CMP_%s(dst,src);\n", ssize);
+ comprintf("\t live_flags();\n");
+ comprintf("\t end_needflags();\n");
+ comprintf("\t if (!(needed_flags & FLAG_CZNV)) dont_care_flags();\n");
+ } else {
+ comprintf("/* Weird --- CMP with noflags ;-) */\n");
+ }
+#else
+ (void) ssize;
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ genamode(curi->dmode, "dstreg", curi->size, "dst", 1, 0);
+ start_brace();
+ genflags(flag_cmp, curi->size, "", "src", "dst");
+#endif
+}
+
+static void gen_cmpa(uae_u32 opcode, struct instr *curi, const char* ssize) {
+ (void) opcode;
+#if defined(USE_JIT2)
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ genamode(curi->dmode, "dstreg", sz_long, "dst", 1, 0);
+ start_brace();
+ if (!noflags) {
+ comprintf("\t dont_care_flags();\n");
+ comprintf("\t start_needflags();\n");
+ comprintf("\t jff_CMPA_%s(dst,src);\n", ssize);
+ comprintf("\t live_flags();\n");
+ comprintf("\t end_needflags();\n");
+ comprintf("\t if (!(needed_flags & FLAG_CZNV)) dont_care_flags();\n");
+ } else {
+ comprintf("\tdont_care_flags();\n");
+ comprintf("/* Weird --- CMP with noflags ;-) */\n");
+ }
+#else
+ (void) ssize;
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ genamode(curi->dmode, "dstreg", sz_long, "dst", 1, 0);
+ start_brace();
+ comprintf("\tint tmps=scratchie++;\n");
+ switch (curi->size) {
+ case sz_byte:
+ comprintf("\tsign_extend_8_rr(tmps,src);\n");
+ break;
+ case sz_word:
+ comprintf("\tsign_extend_16_rr(tmps,src);\n");
+ break;
+ case sz_long:
+ comprintf("tmps=src;\n");
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ genflags(flag_cmp, sz_long, "", "tmps", "dst");
+#endif
+}
+
+static void gen_dbcc(uae_u32 opcode, struct instr *curi, const char* ssize) {
+ (void) opcode;
+ (void) ssize;
+#if 0
+ isjump;
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ genamode(curi->dmode, "dstreg", curi->size, "offs", 1, 0);
+
+ comprintf("uae_u32 voffs;\n");
+ comprintf("voffs = get_const(offs);\n");
+ /* That offs is an immediate, so we can clobber it with abandon */
+ switch (curi->size) {
+ case sz_word:
+ comprintf("\t voffs = (uae_s32)((uae_s16)voffs);\n");
+ break;
+ default:
+ assert(0); /* Seems this only comes in word flavour */
+ break;
+ }
+ comprintf("\t voffs -= m68k_pc_offset - m68k_pc_offset_thisinst - 2;\n");
+ comprintf("\t voffs += (uintptr)comp_pc_p + m68k_pc_offset;\n");
+
+ comprintf("\t add_const_v(PC_P, m68k_pc_offset);\n");
+ comprintf("\t m68k_pc_offset = 0;\n");
+
+ start_brace();
+
+ if (curi->cc >= 2) {
+ comprintf("\t make_flags_live();\n"); /* Load the flags */
+ }
+
+ assert(curi->size == sz_word);
+
+ switch (curi->cc) {
+ case 0: /* This is an elaborate nop? */
+ break;
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ case 8:
+ case 9:
+ case 10:
+ case 11:
+ case 12:
+ case 13:
+ case 14:
+ case 15:
+ comprintf("\t start_needflags();\n");
+ comprintf("\t jnf_DBcc(src,voffs,%d);\n", curi->cc);
+ comprintf("\t end_needflags();\n");
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ genastore("src", curi->smode, "srcreg", curi->size, "src");
+ gen_update_next_handler();
+#else
+ isjump;
+ uses_cmov;
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ genamode(curi->dmode, "dstreg", curi->size, "offs", 1, 0);
+
+ /* That offs is an immediate, so we can clobber it with abandon */
+ switch (curi->size) {
+ case sz_word:
+ comprintf("\tsign_extend_16_rr(offs,offs);\n");
+ break;
+ default:
+ assert(0); /* Seems this only comes in word flavour */
+ break;
+ }
+ comprintf("\tsub_l_ri(offs,m68k_pc_offset-m68k_pc_offset_thisinst-2);\n");
+ comprintf("\tarm_ADD_l_ri(offs,(uintptr)comp_pc_p);\n");
+ /* New PC,
+ once the
+ offset_68k is
+ * also added */
+ /* Let's fold in the m68k_pc_offset at this point */
+ comprintf("\tarm_ADD_l_ri(offs,m68k_pc_offset);\n");
+ comprintf("\tarm_ADD_l_ri(PC_P,m68k_pc_offset);\n");
+ comprintf("\tm68k_pc_offset=0;\n");
+
+ start_brace();
+ comprintf("\tint nsrc=scratchie++;\n");
+
+ if (curi->cc >= 2) {
+ comprintf("\tmake_flags_live();\n"); /* Load the flags */
+ }
+
+ assert (curi->size == sz_word);
+
+ switch (curi->cc) {
+ case 0: /* This is an elaborate nop? */
+ break;
+ case 1:
+ comprintf("\tstart_needflags();\n");
+ comprintf("\tsub_w_ri(src,1);\n");
+ comprintf("\t end_needflags();\n");
+ start_brace();
+ comprintf("\tuae_u32 v2,v;\n"
+ "\tuae_u32 v1=get_const(PC_P);\n");
+ comprintf("\tv2=get_const(offs);\n"
+ "\tregister_branch(v1,v2,%d);\n", NATIVE_CC_CC);
+ break;
+
+ case 8:
+ failure;
+ break; /* Work out details! FIXME */
+ case 9:
+ failure;
+ break; /* Not critical, though! */
+
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ case 10:
+ case 11:
+ case 12:
+ case 13:
+ case 14:
+ case 15:
+ comprintf("\tmov_l_rr(nsrc,src);\n");
+ comprintf("\tlea_l_brr(scratchie,src,(uae_s32)-1);\n"
+ "\tmov_w_rr(src,scratchie);\n");
+ comprintf("\tcmov_l_rr(offs,PC_P,%d);\n", cond_codes[curi->cc]);
+ comprintf("\tcmov_l_rr(src,nsrc,%d);\n", cond_codes[curi->cc]);
+ /* OK, now for cc=true, we have src==nsrc and offs==PC_P,
+ so whether we move them around doesn't matter. However,
+ if cc=false, we have offs==jump_pc, and src==nsrc-1 */
+
+ comprintf("\t start_needflags();\n");
+ comprintf("\ttest_w_rr(nsrc,nsrc);\n");
+ comprintf("\t end_needflags();\n");
+ comprintf("\tcmov_l_rr(PC_P,offs,%d);\n", NATIVE_CC_NE);
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ genastore("src", curi->smode, "srcreg", curi->size, "src");
+ gen_update_next_handler();
+#endif
+}
+
+static void gen_eor(uae_u32 opcode, struct instr *curi, const char* ssize) {
+ (void) opcode;
+#if defined(USE_JIT2)
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ genamode(curi->dmode, "dstreg", curi->size, "dst", 1, 0);
+
+ comprintf("\t dont_care_flags();\n");
+ start_brace();
+ comprintf("\t int tmp=scratchie++;\n");
+ if (!noflags) {
+ comprintf("\t jff_EOR_%s(tmp,dst,src);\n", ssize);
+ comprintf("\t live_flags();\n");
+ comprintf("\t end_needflags();\n");
+ } else {
+ comprintf("\t jnf_EOR(tmp,dst,src);\n");
+ }
+ genastore("tmp", curi->dmode, "dstreg", curi->size, "dst");
+#else
+ (void) ssize;
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ genamode(curi->dmode, "dstreg", curi->size, "dst", 1, 0);
+ genflags(flag_eor, curi->size, "", "src", "dst");
+ genastore("dst", curi->dmode, "dstreg", curi->size, "dst");
+#endif
+}
+
+static void gen_eorsr(uae_u32 opcode, struct instr *curi, const char* ssize) {
+ (void) opcode;
+ (void) ssize;
+#if defined(USE_JIT2)
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ if (!noflags) {
+ comprintf("\t make_flags_live();\n");
+ comprintf("\t start_needflags();\n");
+ comprintf("\t jff_EORSR(ARM_CCR_MAP[src & 0xF], ((src & 0x10) >> 4));\n");
+ comprintf("\t live_flags();\n");
+ comprintf("\t end_needflags();\n");
+ }
+#else
+ (void) curi;
+ failure;
+ isjump;
+#endif
+}
+
+static void gen_exg(uae_u32 opcode, struct instr *curi, const char* ssize) {
+#if 0
+#else
+ (void) opcode;
+ (void) ssize;
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ genamode(curi->dmode, "dstreg", curi->size, "dst", 1, 0);
+ start_brace();
+ comprintf("\tint tmp=scratchie++;\n"
+ "\tmov_l_rr(tmp,src);\n");
+ genastore("dst", curi->smode, "srcreg", curi->size, "src");
+ genastore("tmp", curi->dmode, "dstreg", curi->size, "dst");
+#endif
+}
+
+static void gen_ext(uae_u32 opcode, struct instr *curi, const char* ssize) {
+ (void) opcode;
+#if defined(USE_JIT2)
+ genamode(curi->smode, "srcreg", sz_long, "src", 1, 0);
+ comprintf("\t dont_care_flags();\n");
+ start_brace();
+ comprintf("\t int tmp=scratchie++;\n");
+ if (!noflags) {
+ comprintf("\t start_needflags();\n");
+ comprintf("\t jff_EXT_%s(tmp,src);\n", ssize);
+ comprintf("\t live_flags();\n");
+ comprintf("\t end_needflags();\n");
+ } else {
+ comprintf("\t jnf_EXT_%s(tmp,src);\n", ssize);
+ }
+ genastore("tmp", curi->smode, "srcreg",
+ curi->size == sz_word ? sz_word : sz_long, "src");
+#else
+ (void) ssize;
+ genamode(curi->smode, "srcreg", sz_long, "src", 1, 0);
+ comprintf("\tdont_care_flags();\n");
+ start_brace();
+ switch (curi->size) {
+ case sz_byte:
+ comprintf("\tint dst = src;\n"
+ "\tsign_extend_8_rr(src,src);\n");
+ break;
+ case sz_word:
+ comprintf("\tint dst = scratchie++;\n"
+ "\tsign_extend_8_rr(dst,src);\n");
+ break;
+ case sz_long:
+ comprintf("\tint dst = src;\n"
+ "\tsign_extend_16_rr(src,src);\n");
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ genflags(flag_logical, curi->size == sz_word ? sz_word : sz_long, "dst", "",
+ "");
+ genastore("dst", curi->smode, "srcreg",
+ curi->size == sz_word ? sz_word : sz_long, "src");
+#endif
+}
+
+static void gen_lsl(uae_u32 opcode, struct instr *curi, const char* ssize) {
+ (void) opcode;
+ (void) ssize;
+#if defined(USE_JIT2)
+ mayfail;
+ if (curi->smode == Dreg) {
+ comprintf("if ((uae_u32)srcreg==(uae_u32)dstreg) {\n"
+ " FAIL(1);\n"
+ " return;\n"
+ "} \n");
+ start_brace();
+ }
+ comprintf("\tdont_care_flags();\n");
+
+ genamode(curi->smode, "srcreg", curi->size, "cnt", 1, 0);
+ genamode(curi->dmode, "dstreg", curi->size, "data", 1, 0);
+ comprintf("\t int tmp=scratchie++;\n");
+ if (curi->smode != immi) {
+ if (!noflags) {
+ start_brace();
+ comprintf("\t make_flags_live();\n");
+ comprintf("\t start_needflags();\n");
+ comprintf("\t jff_LSL_%s_reg(tmp,data,cnt);\n", ssize);
+ comprintf("\t live_flags();\n");
+ comprintf("\t end_needflags();\n");
+ comprintf("\t duplicate_carry();\n");
+ comprintf(
+ "\t if (!(needed_flags & FLAG_CZNV)) dont_care_flags();\n");
+ } else {
+ start_brace();
+ comprintf("\t jnf_LSL_reg(tmp,data,cnt);\n");
+ }
+ } else {
+ start_brace();
+ if (!noflags) {
+ comprintf("\t make_flags_live();\n");
+ comprintf("\t start_needflags();\n");
+ comprintf("\t jff_LSL_%s_imm(tmp,data,srcreg);\n", ssize);
+ comprintf("\t live_flags();\n");
+ comprintf("\t end_needflags();\n");
+ comprintf("\t duplicate_carry();\n");
+ comprintf(
+ "\t if (!(needed_flags & FLAG_CZNV)) dont_care_flags();\n");
+ } else {
+ comprintf("\t jnf_LSL_imm(tmp,data,srcreg);\n");
+ }
+ }
+ genastore("tmp", curi->dmode, "dstreg", curi->size, "data");
+#else
+ mayfail;
+ if (curi->smode == Dreg) {
+ comprintf("if ((uae_u32)srcreg==(uae_u32)dstreg) {\n"
+ " FAIL(1);\n"
+ " return;\n"
+ "} \n");
+ start_brace();
+ }
+ comprintf("\tdont_care_flags();\n");
+
+ genamode(curi->smode, "srcreg", curi->size, "cnt", 1, 0);
+ genamode(curi->dmode, "dstreg", curi->size, "data", 1, 0);
+ if (curi->smode != immi) {
+ if (!noflags) {
+ uses_cmov;
+ start_brace();
+ comprintf("\tint highmask;\n"
+ "\tint cdata=scratchie++;\n"
+ "\tint tmpcnt=scratchie++;\n");
+ comprintf("\tmov_l_rr(tmpcnt,cnt);\n"
+ "\tand_l_ri(tmpcnt,63);\n"
+ "\tmov_l_ri(cdata,0);\n"
+ "\tcmov_l_rr(cdata,data,%d);\n", NATIVE_CC_NE);
+ /* cdata is now either data (for shift count!=0) or
+ 0 (for shift count==0) */
+ switch (curi->size) {
+ case sz_byte:
+ comprintf("\tshll_b_rr(data,cnt);\n"
+ "\thighmask=0x38;\n");
+ break;
+ case sz_word:
+ comprintf("\tshll_w_rr(data,cnt);\n"
+ "\thighmask=0x30;\n");
+ break;
+ case sz_long:
+ comprintf("\tshll_l_rr(data,cnt);\n"
+ "\thighmask=0x20;\n");
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ comprintf("test_l_ri(cnt,highmask);\n"
+ "mov_l_ri(scratchie,0);\n"
+ "cmov_l_rr(scratchie,data,%d);\n", NATIVE_CC_EQ);
+ switch (curi->size) {
+ case sz_byte:
+ comprintf("\tmov_b_rr(data,scratchie);\n");
+ break;
+ case sz_word:
+ comprintf("\tmov_w_rr(data,scratchie);\n");
+ break;
+ case sz_long:
+ comprintf("\tmov_l_rr(data,scratchie);\n");
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ /* Result of shift is now in data. Now we need to determine
+ the carry by shifting cdata one less */
+ comprintf("\tsub_l_ri(tmpcnt,1);\n");
+ switch (curi->size) {
+ case sz_byte:
+ comprintf("\tshll_b_rr(cdata,tmpcnt);\n");
+ break;
+ case sz_word:
+ comprintf("\tshll_w_rr(cdata,tmpcnt);\n");
+ break;
+ case sz_long:
+ comprintf("\tshll_l_rr(cdata,tmpcnt);\n");
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ comprintf("test_l_ri(tmpcnt,highmask);\n"
+ "mov_l_ri(scratchie,0);\n"
+ "cmov_l_rr(cdata,scratchie,%d);\n", NATIVE_CC_NE);
+ /* And create the flags */
+ comprintf("\tstart_needflags();\n");
+ comprintf("\tif (needed_flags & FLAG_ZNV)\n");
+ switch (curi->size) {
+ case sz_byte:
+ comprintf("\t test_b_rr(data,data);\n");
+ comprintf("\t bt_l_ri(cdata,7);\n");
+ break;
+ case sz_word:
+ comprintf("\t test_w_rr(data,data);\n");
+ comprintf("\t bt_l_ri(cdata,15);\n");
+ break;
+ case sz_long:
+ comprintf("\t test_l_rr(data,data);\n");
+ comprintf("\t bt_l_ri(cdata,31);\n");
+ break;
+ }
+ comprintf("\t live_flags();\n");
+ comprintf("\t end_needflags();\n");
+ comprintf("\t duplicate_carry();\n");
+ comprintf("if (!(needed_flags & FLAG_CZNV)) dont_care_flags();\n");
+ genastore("data", curi->dmode, "dstreg", curi->size, "data");
+ } else {
+ uses_cmov;
+ start_brace();
+ comprintf("\tint highmask;\n");
+ switch (curi->size) {
+ case sz_byte:
+ comprintf("\tshll_b_rr(data,cnt);\n"
+ "\thighmask=0x38;\n");
+ break;
+ case sz_word:
+ comprintf("\tshll_w_rr(data,cnt);\n"
+ "\thighmask=0x30;\n");
+ break;
+ case sz_long:
+ comprintf("\tshll_l_rr(data,cnt);\n"
+ "\thighmask=0x20;\n");
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ comprintf("test_l_ri(cnt,highmask);\n"
+ "mov_l_ri(scratchie,0);\n"
+ "cmov_l_rr(scratchie,data,%d);\n", NATIVE_CC_EQ);
+ switch (curi->size) {
+ case sz_byte:
+ comprintf("\tmov_b_rr(data,scratchie);\n");
+ break;
+ case sz_word:
+ comprintf("\tmov_w_rr(data,scratchie);\n");
+ break;
+ case sz_long:
+ comprintf("\tmov_l_rr(data,scratchie);\n");
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ genastore("data", curi->dmode, "dstreg", curi->size, "data");
+ }
+ } else {
+ start_brace();
+ comprintf("\tint tmp=scratchie++;\n"
+ "\tint bp;\n"
+ "\tmov_l_rr(tmp,data);\n");
+ switch (curi->size) {
+ case sz_byte:
+ comprintf("\tshll_b_ri(data,srcreg);\n"
+ "\tbp=8-srcreg;\n");
+ break;
+ case sz_word:
+ comprintf("\tshll_w_ri(data,srcreg);\n"
+ "\tbp=16-srcreg;\n");
+ break;
+ case sz_long:
+ comprintf("\tshll_l_ri(data,srcreg);\n"
+ "\tbp=32-srcreg;\n");
+ break;
+ default:
+ assert(0);
+ break;
+ }
+
+ if (!noflags) {
+ comprintf("\tstart_needflags();\n");
+ comprintf("\tif (needed_flags & FLAG_ZNV)\n");
+ switch (curi->size) {
+ case sz_byte:
+ comprintf("\t test_b_rr(data,data);\n");
+ break;
+ case sz_word:
+ comprintf("\t test_w_rr(data,data);\n");
+ break;
+ case sz_long:
+ comprintf("\t test_l_rr(data,data);\n");
+ break;
+ }
+ comprintf("\t bt_l_ri(tmp,bp);\n"); /* Set C */
+ comprintf("\t live_flags();\n");
+ comprintf("\t end_needflags();\n");
+ comprintf("\t duplicate_carry();\n");
+ comprintf("if (!(needed_flags & FLAG_CZNV)) dont_care_flags();\n");
+ }
+ genastore("data", curi->dmode, "dstreg", curi->size, "data");
+ }
+#endif
+}
+
+static void gen_lslw(uae_u32 opcode, struct instr *curi, const char* ssize) {
+ (void) opcode;
+ (void) ssize;
+#if defined(USE_JIT2)
+ comprintf("\t dont_care_flags();\n");
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ start_brace();
+ comprintf("\t int tmp=scratchie++;\n");
+ if (!noflags) {
+ comprintf("\t start_needflags();\n");
+ comprintf("\t jff_LSLW(tmp,src);\n");
+ comprintf("\t live_flags();\n");
+ comprintf("\t end_needflags();\n");
+ } else {
+ comprintf("\t jnf_LSLW(tmp,src);\n");
+ }
+ genastore("tmp", curi->smode, "srcreg", curi->size, "src");
+#else
+ (void) curi;
+ failure;
+#endif
+}
+
+static void gen_lsr(uae_u32 opcode, struct instr *curi, const char* ssize) {
+ (void) opcode;
+#if defined(USE_JIT2)
+ mayfail;
+ if (curi->smode == Dreg) {
+ comprintf("if ((uae_u32)srcreg==(uae_u32)dstreg) {\n"
+ " FAIL(1);\n"
+ " return;\n"
+ "} \n");
+ start_brace();
+ }
+ comprintf("\t dont_care_flags();\n");
+
+ genamode(curi->smode, "srcreg", curi->size, "cnt", 1, 0);
+ genamode(curi->dmode, "dstreg", curi->size, "data", 1, 0);
+ comprintf("\t int tmp=scratchie++;\n");
+ if (curi->smode != immi) {
+ if (!noflags) {
+ start_brace();
+ comprintf("\t make_flags_live();\n");
+ comprintf("\t start_needflags();\n");
+ comprintf("\t jff_LSR_%s_reg(tmp,data,cnt);\n", ssize);
+ comprintf("\t live_flags();\n");
+ comprintf("\t end_needflags();\n");
+ comprintf("\t duplicate_carry();\n");
+ comprintf("if (!(needed_flags & FLAG_CZNV)) dont_care_flags();\n");
+ } else {
+ start_brace();
+ comprintf("\t jnf_LSR_%s_reg(tmp,data,cnt);\n", ssize);
+ }
+ } else {
+ start_brace();
+ char *op;
+ if (!noflags) {
+ comprintf("\t make_flags_live();\n");
+ comprintf("\t start_needflags();\n");
+ op = "ff";
+ } else
+ op = "nf";
+
+ comprintf("\t j%s_LSR_%s_imm(tmp,data,srcreg);\n", op, ssize);
+
+ if (!noflags) {
+ comprintf("\t live_flags();\n");
+ comprintf("\t end_needflags();\n");
+ comprintf("\t duplicate_carry();\n");
+ comprintf(
+ "\t if (!(needed_flags & FLAG_CZNV)) dont_care_flags();\n");
+ }
+ }
+ genastore("tmp", curi->dmode, "dstreg", curi->size, "data");
+#else
+ (void) ssize;
+ mayfail;
+ if (curi->smode == Dreg) {
+ comprintf("if ((uae_u32)srcreg==(uae_u32)dstreg) {\n"
+ " FAIL(1);\n"
+ " return;\n"
+ "} \n");
+ start_brace();
+ }
+ comprintf("\tdont_care_flags();\n");
+
+ genamode(curi->smode, "srcreg", curi->size, "cnt", 1, 0);
+ genamode(curi->dmode, "dstreg", curi->size, "data", 1, 0);
+ if (curi->smode != immi) {
+ if (!noflags) {
+ uses_cmov;
+ start_brace();
+ comprintf("\tint highmask;\n"
+ "\tint cdata=scratchie++;\n"
+ "\tint tmpcnt=scratchie++;\n");
+ comprintf("\tmov_l_rr(tmpcnt,cnt);\n"
+ "\tand_l_ri(tmpcnt,63);\n"
+ "\tmov_l_ri(cdata,0);\n"
+ "\tcmov_l_rr(cdata,data,%d);\n", NATIVE_CC_NE);
+ /* cdata is now either data (for shift count!=0) or
+ 0 (for shift count==0) */
+ switch (curi->size) {
+ case sz_byte:
+ comprintf("\tshrl_b_rr(data,cnt);\n"
+ "\thighmask=0x38;\n");
+ break;
+ case sz_word:
+ comprintf("\tshrl_w_rr(data,cnt);\n"
+ "\thighmask=0x30;\n");
+ break;
+ case sz_long:
+ comprintf("\tshrl_l_rr(data,cnt);\n"
+ "\thighmask=0x20;\n");
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ comprintf("test_l_ri(cnt,highmask);\n"
+ "mov_l_ri(scratchie,0);\n"
+ "cmov_l_rr(scratchie,data,%d);\n", NATIVE_CC_EQ);
+ switch (curi->size) {
+ case sz_byte:
+ comprintf("\tmov_b_rr(data,scratchie);\n");
+ break;
+ case sz_word:
+ comprintf("\tmov_w_rr(data,scratchie);\n");
+ break;
+ case sz_long:
+ comprintf("\tmov_l_rr(data,scratchie);\n");
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ /* Result of shift is now in data. Now we need to determine
+ the carry by shifting cdata one less */
+ comprintf("\tsub_l_ri(tmpcnt,1);\n");
+ switch (curi->size) {
+ case sz_byte:
+ comprintf("\tshrl_b_rr(cdata,tmpcnt);\n");
+ break;
+ case sz_word:
+ comprintf("\tshrl_w_rr(cdata,tmpcnt);\n");
+ break;
+ case sz_long:
+ comprintf("\tshrl_l_rr(cdata,tmpcnt);\n");
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ comprintf("test_l_ri(tmpcnt,highmask);\n"
+ "mov_l_ri(scratchie,0);\n"
+ "cmov_l_rr(cdata,scratchie,%d);\n", NATIVE_CC_NE);
+ /* And create the flags */
+ comprintf("\tstart_needflags();\n");
+ comprintf("\tif (needed_flags & FLAG_ZNV)\n");
+ switch (curi->size) {
+ case sz_byte:
+ comprintf("\t test_b_rr(data,data);\n");
+ break;
+ case sz_word:
+ comprintf("\t test_w_rr(data,data);\n");
+ break;
+ case sz_long:
+ comprintf("\t test_l_rr(data,data);\n");
+ break;
+ }
+ comprintf("\t bt_l_ri(cdata,0);\n"); /* Set C */
+ comprintf("\t live_flags();\n");
+ comprintf("\t end_needflags();\n");
+ comprintf("\t duplicate_carry();\n");
+ comprintf("if (!(needed_flags & FLAG_CZNV)) dont_care_flags();\n");
+ genastore("data", curi->dmode, "dstreg", curi->size, "data");
+ } else {
+ uses_cmov;
+ start_brace();
+ comprintf("\tint highmask;\n");
+ switch (curi->size) {
+ case sz_byte:
+ comprintf("\tshrl_b_rr(data,cnt);\n"
+ "\thighmask=0x38;\n");
+ break;
+ case sz_word:
+ comprintf("\tshrl_w_rr(data,cnt);\n"
+ "\thighmask=0x30;\n");
+ break;
+ case sz_long:
+ comprintf("\tshrl_l_rr(data,cnt);\n"
+ "\thighmask=0x20;\n");
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ comprintf("test_l_ri(cnt,highmask);\n"
+ "mov_l_ri(scratchie,0);\n"
+ "cmov_l_rr(scratchie,data,%d);\n", NATIVE_CC_EQ);
+ switch (curi->size) {
+ case sz_byte:
+ comprintf("\tmov_b_rr(data,scratchie);\n");
+ break;
+ case sz_word:
+ comprintf("\tmov_w_rr(data,scratchie);\n");
+ break;
+ case sz_long:
+ comprintf("\tmov_l_rr(data,scratchie);\n");
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ genastore("data", curi->dmode, "dstreg", curi->size, "data");
+ }
+ } else {
+ start_brace();
+ comprintf("\tint tmp=scratchie++;\n"
+ "\tint bp;\n"
+ "\tmov_l_rr(tmp,data);\n");
+ switch (curi->size) {
+ case sz_byte:
+ comprintf("\tshrl_b_ri(data,srcreg);\n"
+ "\tbp=srcreg-1;\n");
+ break;
+ case sz_word:
+ comprintf("\tshrl_w_ri(data,srcreg);\n"
+ "\tbp=srcreg-1;\n");
+ break;
+ case sz_long:
+ comprintf("\tshrl_l_ri(data,srcreg);\n"
+ "\tbp=srcreg-1;\n");
+ break;
+ default:
+ assert(0);
+ break;
+ }
+
+ if (!noflags) {
+ comprintf("\tstart_needflags();\n");
+ comprintf("\tif (needed_flags & FLAG_ZNV)\n");
+ switch (curi->size) {
+ case sz_byte:
+ comprintf("\t test_b_rr(data,data);\n");
+ break;
+ case sz_word:
+ comprintf("\t test_w_rr(data,data);\n");
+ break;
+ case sz_long:
+ comprintf("\t test_l_rr(data,data);\n");
+ break;
+ }
+ comprintf("\t bt_l_ri(tmp,bp);\n"); /* Set C */
+ comprintf("\t live_flags();\n");
+ comprintf("\t end_needflags();\n");
+ comprintf("\t duplicate_carry();\n");
+ comprintf("if (!(needed_flags & FLAG_CZNV)) dont_care_flags();\n");
+ }
+ genastore("data", curi->dmode, "dstreg", curi->size, "data");
+ }
+#endif
+}
+
+static void gen_lsrw(uae_u32 opcode, struct instr *curi, const char* ssize) {
+ (void) opcode;
+ (void) ssize;
+#if defined(USE_JIT2)
+ comprintf("\t dont_care_flags();\n");
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ start_brace();
+ comprintf("\t int tmp = scratchie++;\n");
+
+ if (!noflags) {
+ comprintf("\t start_needflags();\n");
+ comprintf("\t jff_LSRW(tmp,src);\n");
+ comprintf("\t live_flags();\n");
+ comprintf("\t end_needflags();\n");
+ } else {
+ comprintf("\t jnf_LSRW(tmp,src);\n");
+ }
+ genastore("tmp", curi->smode, "srcreg", curi->size, "src");
+#else
+ (void) curi;
+ failure;
+#endif
+}
+
+static void gen_move(uae_u32 opcode, struct instr *curi, const char* ssize) {
+ (void) opcode;
+#if defined(USE_JIT2)
+ switch (curi->dmode) {
+ case Dreg:
+ case Areg:
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ genamode(curi->dmode, "dstreg", curi->size, "dst", 2, 0);
+ comprintf("\t dont_care_flags();\n");
+ start_brace();
+ comprintf("\t int tmp=scratchie++;\n");
+ if (!noflags && curi->dmode == Dreg) {
+ comprintf("\t start_needflags();\n");
+ comprintf("\t jff_MOVE_%s(tmp, src);\n", ssize);
+ comprintf("\t live_flags();\n");
+ comprintf("\t end_needflags();\n");
+ } else {
+ comprintf("\t tmp = src;\n");
+ }
+ genastore("tmp", curi->dmode, "dstreg", curi->size, "dst");
+ break;
+
+ default: /* It goes to memory, not a register */
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ genamode(curi->dmode, "dstreg", curi->size, "dst", 2, 0);
+ comprintf("\t dont_care_flags();\n");
+ start_brace();
+ if (!noflags) {
+ comprintf("\t start_needflags();\n");
+ comprintf("\t jff_TST_%s(src);\n", ssize);
+ comprintf("\t live_flags();\n");
+ comprintf("\t end_needflags();\n");
+ }
+ genastore("src", curi->dmode, "dstreg", curi->size, "dst");
+ break;
+ }
+#else
+ (void) ssize;
+
+ switch (curi->dmode) {
+ case Dreg:
+ case Areg:
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ genamode(curi->dmode, "dstreg", curi->size, "dst", 2, 0);
+ genflags(flag_mov, curi->size, "", "src", "dst");
+ genastore("dst", curi->dmode, "dstreg", curi->size, "dst");
+ break;
+ default: /* It goes to memory, not a register */
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ genamode(curi->dmode, "dstreg", curi->size, "dst", 2, 0);
+ genflags(flag_logical, curi->size, "src", "", "");
+ genastore("src", curi->dmode, "dstreg", curi->size, "dst");
+ break;
+ }
+#endif
+}
+
+static void gen_movea(uae_u32 opcode, struct instr *curi, const char* ssize) {
+ (void) opcode;
+#if defined(USE_JIT2)
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ genamode(curi->dmode, "dstreg", curi->size, "dst", 2, 0);
+
+ start_brace();
+ comprintf("\t jnf_MOVEA_%s(dst, src);\n", ssize);
+ genastore("dst", curi->dmode, "dstreg", sz_long, "dst");
+#else
+ (void) ssize;
+
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ genamode(curi->dmode, "dstreg", curi->size, "dst", 2, 0);
+
+ start_brace();
+ comprintf("\tint tmps=scratchie++;\n");
+ switch (curi->size) {
+ case sz_word:
+ comprintf("\tsign_extend_16_rr(dst,src);\n");
+ break;
+ case sz_long:
+ comprintf("\tmov_l_rr(dst,src);\n");
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ genastore("dst", curi->dmode, "dstreg", sz_long, "dst");
+#endif
+}
+
+static void gen_mull(uae_u32 opcode, struct instr *curi, const char* ssize) {
+ (void) opcode;
+ (void) ssize;
+#if defined(USE_JIT2)
+ comprintf("\t uae_u16 extra=%s;\n", gen_nextiword());
+ comprintf("\t int r2=(extra>>12)&7;\n"
+ "\t int tmp=scratchie++;\n");
+ genamode(curi->dmode, "dstreg", curi->size, "dst", 1, 0);
+ /* The two operands are in dst and r2 */
+ if (!noflags) {
+ comprintf("\t if (extra & 0x0400) {\n"); /* Need full 64 bit result */
+ comprintf("\t int r3=(extra & 7);\n");
+ comprintf("\t mov_l_rr(r3,dst);\n"); /* operands now in r3 and r2 */
+ comprintf("\t if (extra & 0x0800) { \n"); /* signed */
+ comprintf("\t\t jff_MULS64(r2,r3);\n");
+ comprintf("\t } else { \n");
+ comprintf("\t\t jff_MULU64(r2,r3);\n");
+ comprintf("\t } \n"); /* The result is in r2/r3, with r2 holding the lower 32 bits */
+ comprintf("\t } else {\n"); /* Only want 32 bit result */
+ /* operands in dst and r2, result goes into r2 */
+ /* shouldn't matter whether it's signed or unsigned?!? */
+ comprintf("\t if (extra & 0x0800) { \n"); /* signed */
+ comprintf("\t jff_MULS32(r2,dst);\n");
+ comprintf("\t } else { \n");
+ comprintf("\t\t jff_MULU32(r2,dst);\n");
+ comprintf("\t } \n"); /* The result is in r2, with r2 holding the lower 32 bits */
+ comprintf("\t }\n");
+ } else {
+ comprintf("\t if (extra & 0x0400) {\n"); /* Need full 64 bit result */
+ comprintf("\t int r3=(extra & 7);\n");
+ comprintf("\t mov_l_rr(r3,dst);\n"); /* operands now in r3 and r2 */
+ comprintf("\t if (extra & 0x0800) { \n"); /* signed */
+ comprintf("\t\t jnf_MULS64(r2,r3);\n");
+ comprintf("\t } else { \n");
+ comprintf("\t\t jnf_MULU64(r2,r3);\n");
+ comprintf("\t } \n"); /* The result is in r2/r3, with r2 holding the lower 32 bits */
+ comprintf("\t } else {\n"); /* Only want 32 bit result */
+ /* operands in dst and r2, result foes into r2 */
+ /* shouldn't matter whether it's signed or unsigned?!? */
+ comprintf("\t if (extra & 0x0800) { \n"); /* signed */
+ comprintf("\t jnf_MULS32(r2,dst);\n");
+ comprintf("\t } else { \n");
+ comprintf("\t\t jnf_MULU32(r2,dst);\n");
+ comprintf("\t } \n"); /* The result is in r2, with r2 holding the lower 32 bits */
+ comprintf("\t }\n");
+ }
+#else
+ if (!noflags) {
+ failure;
+ return;
+ }
+ comprintf("\tuae_u16 extra=%s;\n", gen_nextiword());
+ comprintf("\tint r2=(extra>>12)&7;\n"
+ "\tint tmp=scratchie++;\n");
+
+ genamode(curi->dmode, "dstreg", curi->size, "dst", 1, 0);
+ /* The two operands are in dst and r2 */
+ comprintf("\tif (extra&0x0400) {\n" /* Need full 64 bit result */
+ "\tint r3=(extra&7);\n"
+ "\tmov_l_rr(r3,dst);\n"); /* operands now in r3 and r2 */
+ comprintf("\tif (extra&0x0800) { \n" /* signed */
+ "\t\timul_64_32(r2,r3);\n"
+ "\t} else { \n"
+ "\t\tmul_64_32(r2,r3);\n"
+ "\t} \n");
+ /* The result is in r2/tmp, with r2 holding the lower 32 bits */
+ comprintf("\t} else {\n"); /* Only want 32 bit result */
+ /* operands in dst and r2, result foes into r2 */
+ /* shouldn't matter whether it's signed or unsigned?!? */
+ comprintf("\timul_32_32(r2,dst);\n"
+ "\t}\n");
+#endif
+}
+
+static void gen_muls(uae_u32 opcode, struct instr *curi, const char* ssize) {
+ (void) opcode;
+ (void) ssize;
+#if defined(USE_JIT2)
+ comprintf("\t dont_care_flags();\n");
+ genamode(curi->smode, "srcreg", sz_word, "src", 1, 0);
+ genamode(curi->dmode, "dstreg", sz_word, "dst", 1, 0);
+ start_brace();
+ if (!noflags) {
+ comprintf("\t start_needflags();\n");
+ comprintf("\t jff_MULS(dst,src);\n");
+ comprintf("\t live_flags();\n");
+ comprintf("\t end_needflags();\n");
+ } else {
+ comprintf("\t jnf_MULS(dst,src);\n");
+ }
+ genastore("dst", curi->dmode, "dstreg", sz_long, "dst");
+#else
+ comprintf("\tdont_care_flags();\n");
+ genamode(curi->smode, "srcreg", sz_word, "src", 1, 0);
+ genamode(curi->dmode, "dstreg", sz_word, "dst", 1, 0);
+ comprintf("\tsign_extend_16_rr(scratchie,src);\n"
+ "\tsign_extend_16_rr(dst,dst);\n"
+ "\timul_32_32(dst,scratchie);\n");
+ genflags(flag_logical, sz_long, "dst", "", "");
+ genastore("dst", curi->dmode, "dstreg", sz_long, "dst");
+#endif
+}
+
+static void gen_mulu(uae_u32 opcode, struct instr *curi, const char* ssize) {
+ (void) opcode;
+ (void) ssize;
+#if defined(USE_JIT2)
+ comprintf("\t dont_care_flags();\n");
+ genamode(curi->smode, "srcreg", sz_word, "src", 1, 0);
+ genamode(curi->dmode, "dstreg", sz_word, "dst", 1, 0);
+ start_brace();
+ if (!noflags) {
+ comprintf("\t start_needflags();\n");
+ comprintf("\t jff_MULU(dst,src);\n");
+ comprintf("\t live_flags();\n");
+ comprintf("\t end_needflags();\n");
+ } else {
+ comprintf("\t jnf_MULU(dst,src);\n");
+ }
+ genastore("dst", curi->dmode, "dstreg", sz_long, "dst");
+#else
+ comprintf("\tdont_care_flags();\n");
+ genamode(curi->smode, "srcreg", sz_word, "src", 1, 0);
+ genamode(curi->dmode, "dstreg", sz_word, "dst", 1, 0);
+ /* To do 16x16 unsigned multiplication, we actually use
+ 32x32 signed, and zero-extend the registers first.
+ That solves the problem of MUL needing dedicated registers
+ on the x86 */
+ comprintf("\tzero_extend_16_rr(scratchie,src);\n"
+ "\tzero_extend_16_rr(dst,dst);\n"
+ "\timul_32_32(dst,scratchie);\n");
+ genflags(flag_logical, sz_long, "dst", "", "");
+ genastore("dst", curi->dmode, "dstreg", sz_long, "dst");
+
+#endif
+}
+
+static void gen_nbcd(uae_u32 opcode, struct instr *curi, const char* ssize) {
+#if 0
+#else
+ (void) opcode;
+ (void) curi;
+ (void) ssize;
+ failure;
+ /* Nope! */
+#endif
+}
+
+static void gen_neg(uae_u32 opcode, struct instr *curi, const char* ssize) {
+ (void) opcode;
+#if defined(USE_JIT2)
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ start_brace();
+ comprintf("\t int tmp=scratchie++;\n");
+ if (!noflags) {
+ comprintf("\t start_needflags();\n");
+ comprintf("\t jff_NEG_%s(tmp,src);\n", ssize);
+ comprintf("\t live_flags();\n");
+ comprintf("\t end_needflags();\n");
+ duplicate_carry();
+ comprintf("\t if (!(needed_flags & FLAG_CZNV)) dont_care_flags();\n");
+ } else {
+ comprintf("\t jnf_NEG(tmp,src);\n");
+ }
+
+ genastore("tmp", curi->smode, "srcreg", curi->size, "src");
+#else
+ (void) ssize;
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ start_brace();
+ comprintf("\tint dst=scratchie++;\n");
+ comprintf("\tmov_l_ri(dst,0);\n");
+ genflags(flag_sub, curi->size, "", "src", "dst");
+ genastore("dst", curi->smode, "srcreg", curi->size, "src");
+#endif
+}
+
+static void gen_negx(uae_u32 opcode, struct instr *curi, const char* ssize) {
+ (void) opcode;
+#if defined(USE_JIT2)
+ isaddx;
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ start_brace();
+ comprintf("\t int dst=scratchie++;\n");
+
+ if (!noflags) {
+ comprintf("\t make_flags_live();\n");
+ comprintf("\t restore_inverted_carry();\n"); /* Reload the X flag into C */
+ comprintf("\t start_needflags();\n");
+ comprintf("\t jff_NEGX_%s(dst,src);\n", ssize);
+ comprintf("\t live_flags();\n");
+ comprintf("\t end_needflags();\n");
+ duplicate_carry();
+ comprintf("\t if (!(needed_flags & FLAG_CZNV)) dont_care_flags();\n");
+ } else {
+ comprintf("\t restore_inverted_carry();\n"); /* Reload the X flag into C */
+ comprintf("\t jnf_NEGX(dst,src);\n");
+ }
+
+ genastore("dst", curi->smode, "srcreg", curi->size, "src");
+#else
+ (void) ssize;
+ isaddx;
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ start_brace();
+ comprintf("\tint dst=scratchie++;\n");
+ comprintf("\tmov_l_ri(dst,0);\n");
+ genflags(flag_subx, curi->size, "", "src", "dst");
+ genastore("dst", curi->smode, "srcreg", curi->size, "src");
+#endif
+}
+
+static void gen_not(uae_u32 opcode, struct instr *curi, const char* ssize) {
+ (void) opcode;
+#if defined(USE_JIT2)
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ comprintf("\t dont_care_flags();\n");
+ start_brace();
+ comprintf("\t int tmp=scratchie++;\n");
+ if (!noflags) {
+ comprintf("\t start_needflags();\n");
+ comprintf("\t jff_NOT_%s(tmp,src);\n", ssize);
+ comprintf("\t live_flags();\n");
+ comprintf("\t end_needflags();\n");
+ } else {
+ comprintf("\t jnf_NOT(tmp,src);\n", ssize);
+ }
+ genastore("tmp", curi->smode, "srcreg", curi->size, "src");
+#else
+ (void) ssize;
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ start_brace();
+ comprintf("\tint dst=scratchie++;\n");
+ comprintf("\tmov_l_ri(dst,0xffffffff);\n");
+ genflags(flag_eor, curi->size, "", "src", "dst");
+ genastore("dst", curi->smode, "srcreg", curi->size, "src");
+#endif
+}
+
+static void gen_or(uae_u32 opcode, struct instr *curi, const char* ssize) {
+ (void) opcode;
+#if defined(USE_JIT2)
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ genamode(curi->dmode, "dstreg", curi->size, "dst", 1, 0);
+
+ comprintf("\t dont_care_flags();\n");
+ start_brace();
+ comprintf("\t int tmp=scratchie++;\n");
+ if (!noflags) {
+ comprintf("\t jff_OR_%s(tmp, dst,src);\n", ssize);
+ comprintf("\t live_flags();\n");
+ comprintf("\t end_needflags();\n");
+ } else {
+ comprintf("\t jnf_OR(tmp, dst,src);\n");
+ }
+ genastore("tmp", curi->dmode, "dstreg", curi->size, "dst");
+#else
+ (void) ssize;
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ genamode(curi->dmode, "dstreg", curi->size, "dst", 1, 0);
+ genflags(flag_or, curi->size, "", "src", "dst");
+ genastore("dst", curi->dmode, "dstreg", curi->size, "dst");
+#endif
+}
+
+static void gen_orsr(uae_u32 opcode, struct instr *curi, const char* ssize) {
+ (void) opcode;
+ (void) ssize;
+#if defined(USE_JIT2)
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ if (!noflags) {
+ comprintf("\t make_flags_live();\n");
+ comprintf("\t start_needflags();\n");
+ comprintf("\t jff_ORSR(ARM_CCR_MAP[src & 0xF], ((src & 0x10) >> 4));\n");
+ comprintf("\t live_flags();\n");
+ comprintf("\t end_needflags();\n");
+ }
+#else
+ (void) curi;
+ failure;
+ isjump;
+#endif
+}
+
+static void gen_rol(uae_u32 opcode, struct instr *curi, const char* ssize) {
+ (void) opcode;
+#if defined(USE_JIT2)
+ mayfail;
+ if (curi->smode == Dreg) {
+ comprintf("if ((uae_u32)srcreg==(uae_u32)dstreg) {\n"
+ " FAIL(1);\n"
+ " return;\n"
+ "} \n");
+ start_brace();
+ }
+ comprintf("\t dont_care_flags();\n");
+ genamode(curi->smode, "srcreg", curi->size, "cnt", 1, 0);
+ genamode(curi->dmode, "dstreg", curi->size, "data", 1, 0);
+ start_brace();
+ comprintf("\t int tmp=scratchie++;\n");
+
+ if (!noflags) {
+ comprintf("\t start_needflags();\n");
+ comprintf("\t jff_ROL_%s(tmp,data,cnt);\n", ssize);
+ comprintf("\t live_flags();\n");
+ comprintf("\t end_needflags();\n");
+ } else {
+ comprintf("\t jnf_ROL_%s(tmp,data,cnt);\n", ssize);
+ }
+ genastore("tmp", curi->dmode, "dstreg", curi->size, "data");
+#else
+ (void) ssize;
+
+ mayfail;
+ if (curi->smode == Dreg) {
+ comprintf("if ((uae_u32)srcreg==(uae_u32)dstreg) {\n"
+ " FAIL(1);\n"
+ " return;\n"
+ "} \n");
+ start_brace();
+ }
+ comprintf("\tdont_care_flags();\n");
+ genamode(curi->smode, "srcreg", curi->size, "cnt", 1, 0);
+ genamode(curi->dmode, "dstreg", curi->size, "data", 1, 0);
+ start_brace();
+
+ switch (curi->size) {
+ case sz_long:
+ comprintf("\t rol_l_rr(data,cnt);\n");
+ break;
+ case sz_word:
+ comprintf("\t rol_w_rr(data,cnt);\n");
+ break;
+ case sz_byte:
+ comprintf("\t rol_b_rr(data,cnt);\n");
+ break;
+ }
+
+ if (!noflags) {
+ comprintf("\tstart_needflags();\n");
+ comprintf("\tif (needed_flags & FLAG_ZNV)\n");
+ switch (curi->size) {
+ case sz_byte:
+ comprintf("\t test_b_rr(data,data);\n");
+ break;
+ case sz_word:
+ comprintf("\t test_w_rr(data,data);\n");
+ break;
+ case sz_long:
+ comprintf("\t test_l_rr(data,data);\n");
+ break;
+ }
+ comprintf("\t bt_l_ri(data,0x00);\n"); /* Set C */
+ comprintf("\t live_flags();\n");
+ comprintf("\t end_needflags();\n");
+ }
+ genastore("data", curi->dmode, "dstreg", curi->size, "data");
+#endif
+}
+
+static void gen_rolw(uae_u32 opcode, struct instr *curi, const char* ssize) {
+ (void) opcode;
+ (void) ssize;
+#if defined(USE_JIT2)
+ comprintf("\t dont_care_flags();\n");
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ start_brace();
+ comprintf("\t int tmp = scratchie++;\n");
+
+ if (!noflags) {
+ comprintf("\t start_needflags();\n");
+ comprintf("\t jff_ROLW(tmp,src);\n");
+ comprintf("\t live_flags();\n");
+ comprintf("\t end_needflags();\n");
+ } else {
+ comprintf("\t jnf_ROLW(tmp,src);\n");
+ }
+ genastore("tmp", curi->smode, "srcreg", curi->size, "src");
+#else
+ (void) curi;
+ failure;
+#endif
+}
+
+static void gen_ror(uae_u32 opcode, struct instr *curi, const char* ssize) {
+ (void) opcode;
+#if defined(USE_JIT2)
+ mayfail;
+ if (curi->smode == Dreg) {
+ comprintf("if ((uae_u32)srcreg==(uae_u32)dstreg) {\n"
+ " FAIL(1);\n"
+ " return;\n"
+ "} \n");
+ start_brace();
+ }
+ comprintf("\t dont_care_flags();\n");
+ genamode(curi->smode, "srcreg", curi->size, "cnt", 1, 0);
+ genamode(curi->dmode, "dstreg", curi->size, "data", 1, 0);
+ start_brace();
+ comprintf("\t int tmp=scratchie++;\n");
+
+ if (!noflags) {
+ comprintf("\t start_needflags();\n");
+ comprintf("\t jff_ROR_%s(tmp,data,cnt);\n", ssize);
+ comprintf("\t live_flags();\n");
+ comprintf("\t end_needflags();\n");
+ } else {
+ comprintf("\t jnf_ROR_%s(tmp,data,cnt);\n", ssize);
+ }
+ genastore("tmp", curi->dmode, "dstreg", curi->size, "data");
+#else
+ (void) ssize;
+ mayfail;
+ if (curi->smode == Dreg) {
+ comprintf("if ((uae_u32)srcreg==(uae_u32)dstreg) {\n"
+ " FAIL(1);\n"
+ " return;\n"
+ "} \n");
+ start_brace();
+ }
+ comprintf("\tdont_care_flags();\n");
+ genamode(curi->smode, "srcreg", curi->size, "cnt", 1, 0);
+ genamode(curi->dmode, "dstreg", curi->size, "data", 1, 0);
+ start_brace();
+
+ switch (curi->size) {
+ case sz_long:
+ comprintf("\t ror_l_rr(data,cnt);\n");
+ break;
+ case sz_word:
+ comprintf("\t ror_w_rr(data,cnt);\n");
+ break;
+ case sz_byte:
+ comprintf("\t ror_b_rr(data,cnt);\n");
+ break;
+ }
+
+ if (!noflags) {
+ comprintf("\tstart_needflags();\n");
+ comprintf("\tif (needed_flags & FLAG_ZNV)\n");
+ switch (curi->size) {
+ case sz_byte:
+ comprintf("\t test_b_rr(data,data);\n");
+ break;
+ case sz_word:
+ comprintf("\t test_w_rr(data,data);\n");
+ break;
+ case sz_long:
+ comprintf("\t test_l_rr(data,data);\n");
+ break;
+ }
+ switch (curi->size) {
+ case sz_byte:
+ comprintf("\t bt_l_ri(data,0x07);\n");
+ break;
+ case sz_word:
+ comprintf("\t bt_l_ri(data,0x0f);\n");
+ break;
+ case sz_long:
+ comprintf("\t bt_l_ri(data,0x1f);\n");
+ break;
+ }
+ comprintf("\t live_flags();\n");
+ comprintf("\t end_needflags();\n");
+ }
+ genastore("data", curi->dmode, "dstreg", curi->size, "data");
+#endif
+}
+
+static void gen_rorw(uae_u32 opcode, struct instr *curi, const char* ssize) {
+ (void) opcode;
+ (void) ssize;
+#if defined(USE_JIT2)
+ comprintf("\t dont_care_flags();\n");
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ start_brace();
+ comprintf("\t int tmp = scratchie++;\n");
+
+ if (!noflags) {
+ comprintf("\t start_needflags();\n");
+ comprintf("\t jff_RORW(tmp,src);\n");
+ comprintf("\t live_flags();\n");
+ comprintf("\t end_needflags();\n");
+ } else {
+ comprintf("\t jnf_RORW(tmp,src);\n");
+ }
+ genastore("tmp", curi->smode, "srcreg", curi->size, "src");
+#else
+ (void) curi;
+ failure;
+#endif
+}
+
+static void gen_roxl(uae_u32 opcode, struct instr *curi, const char* ssize) {
+ (void) opcode;
+#if defined(USE_JIT2)
+ mayfail;
+ if (curi->smode == Dreg) {
+ comprintf("if ((uae_u32)srcreg==(uae_u32)dstreg) {\n"
+ " FAIL(1);\n"
+ " return;\n"
+ "} \n");
+ start_brace();
+ }
+ isaddx;
+ comprintf("\t dont_care_flags();\n");
+ genamode(curi->smode, "srcreg", curi->size, "cnt", 1, 0);
+ genamode(curi->dmode, "dstreg", curi->size, "data", 1, 0);
+ start_brace();
+ comprintf("\t int tmp=scratchie++;\n");
+
+ if (!noflags) {
+ comprintf("\t make_flags_live();\n");
+ comprintf("\t restore_carry();\n"); /* Reload the X flag into C */
+ comprintf("\t start_needflags();\n");
+ comprintf("\t jff_ROXL_%s(tmp,data,cnt);\n", ssize);
+ comprintf("\t live_flags();\n");
+ comprintf("\t end_needflags();\n");
+ duplicate_carry();
+ } else {
+ comprintf("\t restore_carry();\n"); /* Reload the X flag into C */
+ comprintf("\t jnf_ROXL_%s(tmp,data,cnt);\n", ssize);
+ }
+ genastore("tmp", curi->dmode, "dstreg", curi->size, "data");
+#else
+ (void) curi;
+ (void) ssize;
+ failure;
+#endif
+}
+
+static void gen_roxlw(uae_u32 opcode, struct instr *curi, const char* ssize) {
+ (void) opcode;
+ (void) ssize;
+#if defined(USE_JIT2)
+ isaddx;
+ comprintf("\t dont_care_flags();\n");
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ start_brace();
+ comprintf("\t int tmp = scratchie++;\n");
+
+ if (!noflags) {
+ comprintf("\t make_flags_live();\n");
+ comprintf("\t restore_carry();\n"); /* Reload the X flag into C */
+ comprintf("\t start_needflags();\n");
+ comprintf("\t jff_ROXLW(tmp,src);\n");
+ comprintf("\t live_flags();\n");
+ comprintf("\t end_needflags();\n");
+ duplicate_carry();
+ } else {
+ comprintf("\t restore_carry();\n"); /* Reload the X flag into C */
+ comprintf("\t jnf_ROXLW(tmp,src);\n");
+ }
+ genastore("tmp", curi->smode, "srcreg", curi->size, "src");
+#else
+ (void) curi;
+ failure;
+#endif
+}
+
+static void gen_roxr(uae_u32 opcode, struct instr *curi, const char* ssize) {
+ (void) opcode;
+ (void) ssize;
+#if defined(USE_JIT2)
+ mayfail;
+ if (curi->smode == Dreg) {
+ comprintf("if ((uae_u32)srcreg==(uae_u32)dstreg) {\n"
+ " FAIL(1);\n"
+ " return;\n"
+ "} \n");
+ start_brace();
+ }
+ isaddx;
+ comprintf("\t dont_care_flags();\n");
+ genamode(curi->smode, "srcreg", curi->size, "cnt", 1, 0);
+ genamode(curi->dmode, "dstreg", curi->size, "data", 1, 0);
+ start_brace();
+ comprintf("\t int tmp=scratchie++;\n");
+
+ if (!noflags) {
+ comprintf("\t make_flags_live();\n");
+ comprintf("\t restore_carry();\n"); /* Reload the X flag into C */
+ comprintf("\t start_needflags();\n");
+ comprintf("\t jff_ROXR_%s(tmp,data,cnt);\n", ssize);
+ comprintf("\t live_flags();\n");
+ comprintf("\t end_needflags();\n");
+ duplicate_carry();
+ } else {
+ comprintf("\t restore_carry();\n"); /* Reload the X flag into C */
+ comprintf("\t jnf_ROXR_%s(tmp,data,cnt);\n", ssize);
+ }
+ genastore("tmp", curi->dmode, "dstreg", curi->size, "data");
+#else
+ (void) curi;
+ failure;
+#endif
+}
+
+static void gen_roxrw(uae_u32 opcode, struct instr *curi, const char* ssize) {
+ (void) opcode;
+ (void) ssize;
+#if defined(USE_JIT2)
+ isaddx;
+ comprintf("\t dont_care_flags();\n");
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ start_brace();
+ comprintf("\t int tmp = scratchie++;\n");
+
+ if (!noflags) {
+ comprintf("\t make_flags_live();\n");
+ comprintf("\t restore_carry();\n"); /* Reload the X flag into C */
+ comprintf("\t start_needflags();\n");
+ comprintf("\t jff_ROXRW(tmp,src);\n");
+ comprintf("\t live_flags();\n");
+ comprintf("\t end_needflags();\n");
+ duplicate_carry();
+ } else {
+ comprintf("\t restore_carry();\n"); /* Reload the X flag into C */
+ comprintf("\t jnf_ROXRW(tmp,src);\n");
+ }
+ genastore("tmp", curi->smode, "srcreg", curi->size, "src");
+#else
+ (void) curi;
+ failure;
+#endif
+}
+
+static void gen_sbcd(uae_u32 opcode, struct instr *curi, const char* ssize) {
+#if 0
+#else
+ (void) opcode;
+ (void) curi;
+ (void) ssize;
+ failure;
+ /* I don't think so! */
+#endif
+}
+
+static void gen_scc(uae_u32 opcode, struct instr *curi, const char* ssize) {
+ (void) opcode;
+ (void) ssize;
+#if 0
+ genamode(curi->smode, "srcreg", curi->size, "src", 2, 0);
+ start_brace();
+ comprintf("\t int val = scratchie++;\n");
+ switch (curi->cc) {
+ case 0: /* Unconditional set */
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ case 8:
+ case 9:
+ case 10:
+ case 11:
+ case 12:
+ case 13:
+ case 14:
+ case 15:
+ comprintf("\t make_flags_live();\n"); /* Load the flags */
+ comprintf("\t jnf_Scc_ri(val,%d);\n", curi->cc);
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ genastore("val", curi->smode, "srcreg", curi->size, "src");
+#else
+ genamode(curi->smode, "srcreg", curi->size, "src", 2, 0);
+ start_brace();
+ comprintf("\tint val = scratchie++;\n");
+
+ /* We set val to 0 if we really should use 255, and to 1 for real 0 */
+ switch (curi->cc) {
+ case 0: /* Unconditional set */
+ comprintf("\tmov_l_ri(val,0);\n");
+ break;
+ case 1:
+ /* Unconditional not-set */
+ comprintf("\tmov_l_ri(val,1);\n");
+ break;
+ case 8:
+ failure;
+ break; /* Work out details! FIXME */
+ case 9:
+ failure;
+ break; /* Not critical, though! */
+
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ case 10:
+ case 11:
+ case 12:
+ case 13:
+ case 14:
+ case 15:
+ comprintf("\tmake_flags_live();\n"); /* Load the flags */
+ /* All condition codes can be inverted by changing the LSB */
+ comprintf("\tsetcc(val,%d);\n", cond_codes[curi->cc] ^ 1);
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ comprintf("\tsub_b_ri(val,1);\n");
+ genastore("val", curi->smode, "srcreg", curi->size, "src");
+#endif
+}
+
+static void gen_sub(uae_u32 opcode, struct instr *curi, const char* ssize) {
+ (void) opcode;
+#if defined(USE_JIT2)
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ genamode(curi->dmode, "dstreg", curi->size, "dst", 1, 0);
+
+ comprintf("\t dont_care_flags();\n");
+ start_brace();
+ // Use tmp register to avoid destroying upper part in .B., .W cases
+ comprintf("\t int tmp=scratchie++;\n");
+ if (!noflags) {
+ comprintf("\t start_needflags();\n");
+ comprintf("\t jff_SUB_%s(tmp,dst,src);\n", ssize);
+ comprintf("\t live_flags();\n");
+ comprintf("\t end_needflags();\n");
+ duplicate_carry();
+ comprintf(
+ "\t if (!(needed_flags & FLAG_CZNV)) dont_care_flags();\n");
+ } else {
+ comprintf("\t jnf_SUB_%s(tmp,dst,src);\n", ssize);
+ }
+ genastore("tmp", curi->dmode, "dstreg", curi->size, "dst");
+#else
+ (void) ssize;
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ genamode(curi->dmode, "dstreg", curi->size, "dst", 1, 0);
+ genflags(flag_sub, curi->size, "", "src", "dst");
+ genastore("dst", curi->dmode, "dstreg", curi->size, "dst");
+#endif
+}
+
+static void gen_suba(uae_u32 opcode, struct instr *curi, const char* ssize) {
+ (void) opcode;
+#if defined(USE_JIT2)
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ genamode(curi->dmode, "dstreg", sz_long, "dst", 1, 0);
+ start_brace();
+ comprintf("\t jnf_SUBA_%s(dst, src);\n", ssize);
+ genastore("dst", curi->dmode, "dstreg", sz_long, "dst");
+#else
+ (void) ssize;
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ genamode(curi->dmode, "dstreg", sz_long, "dst", 1, 0);
+ start_brace();
+ comprintf("\tint tmp=scratchie++;\n");
+ switch (curi->size) {
+ case sz_byte:
+ comprintf("\tsign_extend_8_rr(tmp,src);\n");
+ break;
+ case sz_word:
+ comprintf("\tsign_extend_16_rr(tmp,src);\n");
+ break;
+ case sz_long:
+ comprintf("\ttmp=src;\n");
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ comprintf("\tsub_l(dst,tmp);\n");
+ genastore("dst", curi->dmode, "dstreg", sz_long, "dst");
+#endif
+}
+
+static void gen_subx(uae_u32 opcode, struct instr *curi, const char* ssize) {
+ (void) opcode;
+#if defined(USE_JIT2)
+ isaddx;
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ genamode(curi->dmode, "dstreg", curi->size, "dst", 1, 0);
+ start_brace();
+ comprintf("\tint tmp=scratchie++;\n");
+ comprintf("\tdont_care_flags();\n");
+ if (!noflags) {
+ comprintf("\t make_flags_live();\n");
+ comprintf("\t restore_inverted_carry();\n"); /* Reload the X flag into C */
+ comprintf("\t start_needflags();\n");
+ comprintf("\t jff_SUBX_%s(tmp,dst,src);\n", ssize);
+ comprintf("\t live_flags();\n");
+ comprintf("\t end_needflags();\n");
+ duplicate_carry();
+ comprintf("if (!(needed_flags & FLAG_CZNV)) dont_care_flags();\n");
+ } else {
+ comprintf("\t restore_inverted_carry();\n"); /* Reload the X flag into C */
+ comprintf("\t jnf_SUBX(tmp,dst,src);\n");
+ }
+ genastore("tmp", curi->dmode, "dstreg", curi->size, "dst");
+#else
+ (void) ssize;
+ isaddx;
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ genamode(curi->dmode, "dstreg", curi->size, "dst", 1, 0);
+ genflags(flag_subx, curi->size, "", "src", "dst");
+ genastore("dst", curi->dmode, "dstreg", curi->size, "dst");
+#endif
+}
+
+static void gen_swap(uae_u32 opcode, struct instr *curi, const char* ssize) {
+ (void) opcode;
+ (void) ssize;
+#if defined(USE_JIT2)
+ genamode(curi->smode, "srcreg", sz_long, "src", 1, 0);
+ comprintf("\t dont_care_flags();\n");
+ start_brace();
+
+ if (!noflags) {
+ comprintf("\t start_needflags();\n");
+ comprintf("\t jff_SWAP(src);\n");
+ comprintf("\t live_flags();\n");
+ comprintf("\t end_needflags();\n");
+ comprintf("if (!(needed_flags & FLAG_CZNV)) dont_care_flags();\n");
+ } else {
+ comprintf("\t jnf_SWAP(src);\n");
+ }
+ genastore("src", curi->smode, "srcreg", sz_long, "src");
+#else
+ genamode(curi->smode, "srcreg", sz_long, "src", 1, 0);
+ comprintf("\tdont_care_flags();\n");
+ comprintf("\tarm_ROR_l_ri8(src,16);\n");
+ genflags(flag_logical, sz_long, "src", "", "");
+ genastore("src", curi->smode, "srcreg", sz_long, "src");
+#endif
+}
+
+static void gen_tst(uae_u32 opcode, struct instr *curi, const char* ssize) {
+ (void) opcode;
+ (void) ssize;
+#if defined(USE_JIT2)
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ comprintf("\t dont_care_flags();\n");
+ if (!noflags) {
+ start_brace();
+ comprintf("\t start_needflags();\n");
+ comprintf("\t jff_TST_%s(src);\n", ssize);
+ comprintf("\t live_flags();\n");
+ comprintf("\t end_needflags();\n");
+ }
+#else
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ genflags(flag_logical, curi->size, "src", "", "");
+#endif
+}
+
+static int /* returns zero for success, non-zero for failure */
+gen_opcode(unsigned long int opcode) {
+ struct instr *curi = table68k + opcode;
+ const char* ssize = NULL;
+
+ insn_n_cycles = 2;
+ global_failure = 0;
+ long_opcode = 0;
+ global_isjump = 0;
+ global_iscjump = 0;
+ global_isaddx = 0;
+ global_cmov = 0;
+ global_fpu = 0;
+ global_mayfail = 0;
+ hack_opcode = opcode;
+ endstr[0] = 0;
+
+ start_brace();
+ comprintf("\tuae_u8 scratchie=S1;\n");
+ switch (curi->plev) {
+ case 0: /* not privileged */
+ break;
+ case 1: /* unprivileged only on 68000 */
+ if (cpu_level == 0)
+ break;
+ if (next_cpu_level < 0)
+ next_cpu_level = 0;
+
+ /* fall through */
+ case 2: /* priviledged */
+ failure; /* Easy ones first */
+ break;
+ case 3: /* privileged if size == word */
+ if (curi->size == sz_byte)
+ break;
+ failure;
+ break;
+ }
+ switch (curi->size) {
+ case sz_byte:
+ ssize = "b";
+ break;
+ case sz_word:
+ ssize = "w";
+ break;
+ case sz_long:
+ ssize = "l";
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ (void) ssize;
+
+ switch (curi->mnemo) {
+ case i_AND:
+ gen_and(opcode, curi, ssize);
+ break;
+
+ case i_OR:
+ gen_or(opcode, curi, ssize);
+ break;
+
+ case i_EOR:
+ gen_eor(opcode, curi, ssize);
+ break;
+
+ case i_ORSR:
+ gen_orsr(opcode, curi, ssize);
+ break;
+
+ case i_EORSR:
+ gen_eorsr(opcode, curi, ssize);
+ break;
+
+ case i_ANDSR:
+ gen_andsr(opcode, curi, ssize);
+ break;
+
+ case i_SUB:
+ gen_sub(opcode, curi, ssize);
+ break;
+
+ case i_SUBA:
+ gen_suba(opcode, curi, ssize);
+ break;
+
+ case i_SUBX:
+ gen_subx(opcode, curi, ssize);
+ break;
+
+ case i_SBCD:
+ gen_sbcd(opcode, curi, ssize);
+ break;
+
+ case i_ADD:
+ gen_add(opcode, curi, ssize);
+ break;
+
+ case i_ADDA:
+ gen_adda(opcode, curi, ssize);
+ break;
+
+ case i_ADDX:
+ gen_addx(opcode, curi, ssize);
+ break;
+
+ case i_ABCD:
+ gen_abcd(opcode, curi, ssize);
+ break;
+
+ case i_NEG:
+ gen_neg(opcode, curi, ssize);
+ break;
+
+ case i_NEGX:
+ gen_negx(opcode, curi, ssize);
+ break;
+
+ case i_NBCD:
+ gen_nbcd(opcode, curi, ssize);
+ break;
+
+ case i_CLR:
+ gen_clr(opcode, curi, ssize);
+ break;
+
+ case i_NOT:
+ gen_not(opcode, curi, ssize);
+ break;
+
+ case i_TST:
+ gen_tst(opcode, curi, ssize);
+ break;
+
+ case i_BCHG:
+ gen_bchg(opcode, curi, ssize);
+ break;
+
+ case i_BCLR:
+ gen_bclr(opcode, curi, ssize);
+ break;
+
+ case i_BSET:
+ gen_bset(opcode, curi, ssize);
+ break;
+
+ case i_BTST:
+ gen_btst(opcode, curi, ssize);
+ break;
+
+ case i_CMPM:
+ case i_CMP:
+ gen_cmp(opcode, curi, ssize);
+ break;
+
+ case i_CMPA:
+ gen_cmpa(opcode, curi, ssize);
+ break;
+
+ /* The next two are coded a little unconventional, but they are doing
+ * weird things... */
+ case i_MVPRM:
+ isjump;
+ failure;
+ break;
+
+ case i_MVPMR:
+ isjump;
+ failure;
+ break;
+
+ case i_MOVE:
+ gen_move(opcode, curi, ssize);
+ break;
+
+ case i_MOVEA:
+ gen_movea(opcode, curi, ssize);
+ break;
+
+ case i_MVSR2:
+ isjump;
+ failure;
+ break;
+
+ case i_MV2SR:
+ isjump;
+ failure;
+ break;
+
+ case i_SWAP:
+ gen_swap(opcode, curi, ssize);
+ break;
+
+ case i_EXG:
+ gen_exg(opcode, curi, ssize);
+ break;
+
+ case i_EXT:
+ gen_ext(opcode, curi, ssize);
+ break;
+
+ case i_MVMEL:
+ genmovemel(opcode);
+ break;
+
+ case i_MVMLE:
+ genmovemle(opcode);
+ break;
+
+ case i_TRAP:
+ isjump;
+ failure;
+ break;
+
+ case i_MVR2USP:
+ isjump;
+ failure;
+ break;
+
+ case i_MVUSP2R:
+ isjump;
+ failure;
+ break;
+
+ case i_RESET:
+ isjump;
+ failure;
+ break;
+
+ case i_NOP:
+ break;
+
+ case i_STOP:
+ isjump;
+ failure;
+ break;
+
+ case i_RTE:
+ isjump;
+ failure;
+ break;
+
+ case i_RTD:
+ genamode(curi->smode, "srcreg", curi->size, "offs", 1, 0);
+ /* offs is constant */
+ comprintf("\tarm_ADD_l_ri8(offs,4);\n");
+ start_brace();
+ comprintf("\tint newad=scratchie++;\n"
+ "\treadlong(15,newad,scratchie);\n"
+ "\tmov_l_mr((uintptr)®s.pc,newad);\n"
+ "\tget_n_addr_jmp(newad,PC_P,scratchie);\n"
+ "\tmov_l_mr((uintptr)®s.pc_oldp,PC_P);\n"
+ "\tm68k_pc_offset=0;\n"
+ "\tarm_ADD_l(15,offs);\n");
+ gen_update_next_handler();
+ isjump;
+ break;
+
+ case i_LINK:
+ genamode(curi->smode, "srcreg", sz_long, "src", 1, 0);
+ genamode(curi->dmode, "dstreg", curi->size, "offs", 1, 0);
+ comprintf("\tsub_l_ri(15,4);\n"
+ "\twritelong_clobber(15,src,scratchie);\n"
+ "\tmov_l_rr(src,15);\n");
+ if (curi->size == sz_word)
+ comprintf("\tsign_extend_16_rr(offs,offs);\n");
+ comprintf("\tarm_ADD_l(15,offs);\n");
+ genastore("src", curi->smode, "srcreg", sz_long, "src");
+ break;
+
+ case i_UNLK:
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ comprintf("\tmov_l_rr(15,src);\n"
+ "\treadlong(15,src,scratchie);\n"
+ "\tarm_ADD_l_ri8(15,4);\n");
+ genastore("src", curi->smode, "srcreg", curi->size, "src");
+ break;
+
+ case i_RTS:
+ comprintf("\tint newad=scratchie++;\n"
+ "\treadlong(15,newad,scratchie);\n"
+ "\tmov_l_mr((uintptr)®s.pc,newad);\n"
+ "\tget_n_addr_jmp(newad,PC_P,scratchie);\n"
+ "\tmov_l_mr((uintptr)®s.pc_oldp,PC_P);\n"
+ "\tm68k_pc_offset=0;\n"
+ "\tlea_l_brr(15,15,4);\n");
+ gen_update_next_handler();
+ isjump;
+ break;
+
+ case i_TRAPV:
+ isjump;
+ failure;
+ break;
+
+ case i_RTR:
+ isjump;
+ failure;
+ break;
+
+ case i_JSR:
+ isjump;
+ genamode(curi->smode, "srcreg", curi->size, "src", 0, 0);
+ start_brace();
+ comprintf(
+ "\tuae_u32 retadd=start_pc+((char *)comp_pc_p-(char *)start_pc_p)+m68k_pc_offset;\n");
+ comprintf("\tint ret=scratchie++;\n"
+ "\tmov_l_ri(ret,retadd);\n"
+ "\tsub_l_ri(15,4);\n"
+ "\twritelong_clobber(15,ret,scratchie);\n");
+ comprintf("\tmov_l_mr((uintptr)®s.pc,srca);\n"
+ "\tget_n_addr_jmp(srca,PC_P,scratchie);\n"
+ "\tmov_l_mr((uintptr)®s.pc_oldp,PC_P);\n"
+ "\tm68k_pc_offset=0;\n");
+ gen_update_next_handler();
+ break;
+
+ case i_JMP:
+ isjump;
+ genamode(curi->smode, "srcreg", curi->size, "src", 0, 0);
+ comprintf("\tmov_l_mr((uintptr)®s.pc,srca);\n"
+ "\tget_n_addr_jmp(srca,PC_P,scratchie);\n"
+ "\tmov_l_mr((uintptr)®s.pc_oldp,PC_P);\n"
+ "\tm68k_pc_offset=0;\n");
+ gen_update_next_handler();
+ break;
+
+ case i_BSR:
+ is_const_jump;
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ start_brace();
+ comprintf(
+ "\tuae_u32 retadd=start_pc+((char *)comp_pc_p-(char *)start_pc_p)+m68k_pc_offset;\n");
+ comprintf("\tint ret=scratchie++;\n"
+ "\tmov_l_ri(ret,retadd);\n"
+ "\tsub_l_ri(15,4);\n"
+ "\twritelong_clobber(15,ret,scratchie);\n");
+ comprintf("\tarm_ADD_l_ri(src,m68k_pc_offset_thisinst+2);\n");
+ comprintf("\tm68k_pc_offset=0;\n");
+ comprintf("\tarm_ADD_l(PC_P,src);\n");
+ comprintf("\tcomp_pc_p=(uae_u8*)get_const(PC_P);\n");
+ break;
+
+ case i_Bcc:
+ comprintf("\tuae_u32 v,v1,v2;\n");
+ genamode(curi->smode, "srcreg", curi->size, "src", 1, 0);
+ /* That source is an immediate, so we can clobber it with abandon */
+ switch (curi->size) {
+ case sz_byte:
+ comprintf("\tsign_extend_8_rr(src,src);\n");
+ break;
+ case sz_word:
+ comprintf("\tsign_extend_16_rr(src,src);\n");
+ break;
+ case sz_long:
+ break;
+ }
+ comprintf(
+ "\tsub_l_ri(src,m68k_pc_offset-m68k_pc_offset_thisinst-2);\n");
+ /* Leave the following as "add" --- it will allow it to be optimized
+ away due to src being a constant ;-) */
+ comprintf("\tarm_ADD_l_ri(src,(uintptr)comp_pc_p);\n");
+ comprintf("\tmov_l_ri(PC_P,(uintptr)comp_pc_p);\n");
+ /* Now they are both constant. Might as well fold in m68k_pc_offset */
+ comprintf("\tarm_ADD_l_ri(src,m68k_pc_offset);\n");
+ comprintf("\tarm_ADD_l_ri(PC_P,m68k_pc_offset);\n");
+ comprintf("\tm68k_pc_offset=0;\n");
+
+ if (curi->cc >= 2) {
+ comprintf("\tv1=get_const(PC_P);\n"
+ "\tv2=get_const(src);\n"
+ "\tregister_branch(v1,v2,%d);\n", cond_codes[curi->cc]);
+ comprintf("\tmake_flags_live();\n"); /* Load the flags */
+ isjump;
+ } else {
+ is_const_jump;
+ }
+
+ switch (curi->cc) {
+ case 0: /* Unconditional jump */
+ comprintf("\tmov_l_rr(PC_P,src);\n");
+ comprintf("\tcomp_pc_p=(uae_u8*)get_const(PC_P);\n");
+ break;
+ case 1:
+ break; /* This is silly! */
+ case 8:
+ failure;
+ break; /* Work out details! FIXME */
+ case 9:
+ failure;
+ break; /* Not critical, though! */
+
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ case 10:
+ case 11:
+ case 12:
+ case 13:
+ case 14:
+ case 15:
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ break;
+
+ case i_LEA:
+ genamode(curi->smode, "srcreg", curi->size, "src", 0, 0);
+ genamode(curi->dmode, "dstreg", curi->size, "dst", 2, 0);
+ genastore("srca", curi->dmode, "dstreg", curi->size, "dst");
+ break;
+
+ case i_PEA:
+ if (table68k[opcode].smode == Areg || table68k[opcode].smode == Aind
+ || table68k[opcode].smode == Aipi
+ || table68k[opcode].smode == Apdi
+ || table68k[opcode].smode == Ad16
+ || table68k[opcode].smode == Ad8r)
+ comprintf("if (srcreg==7) dodgy=1;\n");
+
+ genamode(curi->smode, "srcreg", curi->size, "src", 0, 0);
+ genamode(Apdi, "7", sz_long, "dst", 2, 0);
+ genastore("srca", Apdi, "7", sz_long, "dst");
+ break;
+
+ case i_DBcc:
+ gen_dbcc(opcode, curi, ssize);
+ break;
+
+ case i_Scc:
+ gen_scc(opcode, curi, ssize);
+ break;
+
+ case i_DIVU:
+ isjump;
+ failure;
+ break;
+
+ case i_DIVS:
+ isjump;
+ failure;
+ break;
+
+ case i_MULU:
+ gen_mulu(opcode, curi, ssize);
+ break;
+
+ case i_MULS:
+ gen_muls(opcode, curi, ssize);
+ break;
+
+ case i_CHK:
+ isjump;
+ failure;
+ break;
+
+ case i_CHK2:
+ isjump;
+ failure;
+ break;
+
+ case i_ASR:
+ gen_asr(opcode, curi, ssize);
+ break;
+
+ case i_ASL:
+ gen_asl(opcode, curi, ssize);
+ break;
+
+ case i_LSR:
+ gen_lsr(opcode, curi, ssize);
+ break;
+
+ case i_LSL:
+ gen_lsl(opcode, curi, ssize);
+ break;
+
+ case i_ROL:
+ gen_rol(opcode, curi, ssize);
+ break;
+
+ case i_ROR:
+ gen_ror(opcode, curi, ssize);
+ break;
+
+ case i_ROXL:
+ gen_roxl(opcode, curi, ssize);
+ break;
+
+ case i_ROXR:
+ gen_roxr(opcode, curi, ssize);
+ break;
+
+ case i_ASRW:
+ gen_asrw(opcode, curi, ssize);
+ break;
+
+ case i_ASLW:
+ gen_aslw(opcode, curi, ssize);
+ break;
+
+ case i_LSRW:
+ gen_lsrw(opcode, curi, ssize);
+ break;
+
+ case i_LSLW:
+ gen_lslw(opcode, curi, ssize);
+ break;
+
+ case i_ROLW:
+ gen_rolw(opcode, curi, ssize);
+ break;
+
+ case i_RORW:
+ gen_rorw(opcode, curi, ssize);
+ break;
+
+ case i_ROXLW:
+ gen_roxlw(opcode, curi, ssize);
+ break;
+
+ case i_ROXRW:
+ gen_roxrw(opcode, curi, ssize);
+ break;
+
+ case i_MOVEC2:
+ isjump;
+ failure;
+ break;
+
+ case i_MOVE2C:
+ isjump;
+ failure;
+ break;
+
+ case i_CAS:
+ failure;
+ break;
+
+ case i_CAS2:
+ failure;
+ break;
+
+ case i_MOVES:
+ /* ignore DFC and SFC because we have no MMU */
+ isjump;
+ failure;
+ break;
+
+ case i_BKPT:
+ /* only needed for hardware emulators */
+ isjump;
+ failure;
+ break;
+
+ case i_CALLM:
+ /* not present in 68030 */
+ isjump;
+ failure;
+ break;
+
+ case i_RTM:
+ /* not present in 68030 */
+ isjump;
+ failure;
+ break;
+
+ case i_TRAPcc:
+ isjump;
+ failure;
+ break;
+
+ case i_DIVL:
+ isjump;
+ failure;
+ break;
+
+ case i_MULL:
+ gen_mull(opcode, curi, ssize);
+ break;
+
+ case i_BFTST:
+ case i_BFEXTU:
+ case i_BFCHG:
+ case i_BFEXTS:
+ case i_BFCLR:
+ case i_BFFFO:
+ case i_BFSET:
+ case i_BFINS:
+ failure;
+ break;
+ case i_PACK:
+ failure;
+ break;
+ case i_UNPK:
+ failure;
+ break;
+ case i_TAS:
+ failure;
+ break;
+ case i_FPP:
+ uses_fpu;
+#ifdef USE_JIT_FPU
+ mayfail;
+ comprintf("\tuae_u16 extra=%s;\n",gen_nextiword());
+ swap_opcode();
+ comprintf("\tcomp_fpp_opp(opcode,extra);\n");
+#else
+ failure;
+#endif
+ break;
+ case i_FBcc:
+ uses_fpu;
+#ifdef USE_JIT_FPU
+ isjump;
+ uses_cmov;
+ mayfail;
+ swap_opcode();
+ comprintf("\tcomp_fbcc_opp(opcode);\n");
+#else
+ isjump;
+ failure;
+#endif
+ break;
+ case i_FDBcc:
+ uses_fpu;
+ isjump;
+ failure;
+ break;
+ case i_FScc:
+ uses_fpu;
+#ifdef USE_JIT_FPU
+ mayfail;
+ uses_cmov;
+ comprintf("\tuae_u16 extra=%s;\n",gen_nextiword());
+ swap_opcode();
+ comprintf("\tcomp_fscc_opp(opcode,extra);\n");
+#else
+ failure;
+#endif
+ break;
+ case i_FTRAPcc:
+ uses_fpu;
+ isjump;
+ failure;
+ break;
+ case i_FSAVE:
+ uses_fpu;
+ failure;
+ break;
+ case i_FRESTORE:
+ uses_fpu;
+ failure;
+ break;
+
+ case i_CINVL:
+ case i_CINVP:
+ case i_CINVA:
+ isjump; /* Not really, but it's probably a good idea to stop
+ translating at this point */
+ failure;
+ comprintf("\tflush_icache();\n"); /* Differentiate a bit more? */
+ break;
+ case i_CPUSHL:
+ case i_CPUSHP:
+ case i_CPUSHA:
+ isjump; /* Not really, but it's probably a good idea to stop
+ translating at this point */
+ failure;
+ break;
+
+ case i_MOVE16:
+ gen_move16(opcode, curi);
+ break;
+
+ case i_EMULOP_RETURN:
+ isjump;
+ failure;
+ break;
+
+ case i_EMULOP:
+ failure;
+ break;
+
+ case i_NATFEAT_ID:
+ case i_NATFEAT_CALL:
+ failure;
+ break;
+
+ case i_MMUOP:
+ isjump;
+ failure;
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ comprintf("%s", endstr);
+ finish_braces();
+ sync_m68k_pc();
+ if (global_mayfail)
+ comprintf("\tif (failure) m68k_pc_offset=m68k_pc_offset_thisinst;\n");
+ return global_failure;
+}
+
+static void generate_includes(FILE * f) {
+ fprintf(f, "#include \"sysdeps.h\"\n");
+ fprintf(f, "#include \"m68k.h\"\n");
+ fprintf(f, "#include \"memory-uae.h\"\n");
+ fprintf(f, "#include \"readcpu.h\"\n");
+ fprintf(f, "#include \"newcpu.h\"\n");
+ fprintf(f, "#include \"comptbl.h\"\n");
+ fprintf(f, "#include \"debug.h\"\n");
+}
+
+static int postfix;
+
+static char *decodeEA (amodes mode, wordsizes size)
+{
+ static char buffer[80];
+
+ buffer[0] = 0;
+ switch (mode){
+ case Dreg:
+ strcpy (buffer,"Dn");
+ break;
+ case Areg:
+ strcpy (buffer,"An");
+ break;
+ case Aind:
+ strcpy (buffer,"(An)");
+ break;
+ case Aipi:
+ strcpy (buffer,"(An)+");
+ break;
+ case Apdi:
+ strcpy (buffer,"-(An)");
+ break;
+ case Ad16:
+ strcpy (buffer,"(d16,An)");
+ break;
+ case Ad8r:
+ strcpy (buffer,"(d8,An,Xn)");
+ break;
+ case PC16:
+ strcpy (buffer,"(d16,PC)");
+ break;
+ case PC8r:
+ strcpy (buffer,"(d8,PC,Xn)");
+ break;
+ case absw:
+ strcpy (buffer,"(xxx).W");
+ break;
+ case absl:
+ strcpy (buffer,"(xxx).L");
+ break;
+ case imm:
+ switch (size){
+ case sz_byte:
+ strcpy (buffer,"#<data>.B");
+ break;
+ case sz_word:
+ strcpy (buffer,"#<data>.W");
+ break;
+ case sz_long:
+ strcpy (buffer,"#<data>.L");
+ break;
+ default:
+ break;
+ }
+ break;
+ case imm0:
+ strcpy (buffer,"#<data>.B");
+ break;
+ case imm1:
+ strcpy (buffer,"#<data>.W");
+ break;
+ case imm2:
+ strcpy (buffer,"#<data>.L");
+ break;
+ case immi:
+ strcpy (buffer,"#<data>");
+ break;
+
+ default:
+ break;
+ }
+ return buffer;
+}
+
+static char *outopcode (const char *name, int opcode)
+{
+ static char out[100];
+ struct instr *ins;
+
+ ins = &table68k[opcode];
+ strcpy (out, name);
+ if (ins->smode == immi)
+ strcat (out, "Q");
+ if (ins->size == sz_byte)
+ strcat (out,".B");
+ if (ins->size == sz_word)
+ strcat (out,".W");
+ if (ins->size == sz_long)
+ strcat (out,".L");
+ strcat (out," ");
+ if (ins->suse)
+ strcat (out, decodeEA (ins->smode, ins->size));
+ if (ins->duse) {
+ if (ins->suse) strcat (out,",");
+ strcat (out, decodeEA (ins->dmode, ins->size));
+ }
+ return out;
+}
+
+
+static void generate_one_opcode(int rp, int noflags) {
+ int i;
+ uae_u16 smsk, dmsk;
+ int opcode = opcode_map[rp];
+ int aborted = 0;
+ int have_srcreg = 0;
+ int have_dstreg = 0;
+ const char *name;
+
+ if (table68k[opcode].mnemo == i_ILLG || table68k[opcode].clev > cpu_level)
+ return;
+
+ for (i = 0; lookuptab[i].name[0]; i++) {
+ if (table68k[opcode].mnemo == lookuptab[i].mnemo)
+ break;
+ }
+
+ if (table68k[opcode].handler != -1)
+ return;
+
+ switch (table68k[opcode].stype) {
+ case 0:
+ smsk = 7;
+ break;
+ case 1:
+ smsk = 255;
+ break;
+ case 2:
+ smsk = 15;
+ break;
+ case 3:
+ smsk = 7;
+ break;
+ case 4:
+ smsk = 7;
+ break;
+ case 5:
+ smsk = 63;
+ break;
+ case 6:
+ smsk = 255;
+ break;
+ case 7:
+ smsk = 3;
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ dmsk = 7;
+
+ next_cpu_level = -1;
+ if (table68k[opcode].suse && table68k[opcode].smode != imm
+ && table68k[opcode].smode != imm0 && table68k[opcode].smode != imm1
+ && table68k[opcode].smode != imm2 && table68k[opcode].smode != absw
+ && table68k[opcode].smode != absl && table68k[opcode].smode != PC8r
+ && table68k[opcode].smode != PC16) {
+ have_srcreg = 1;
+ if (table68k[opcode].spos == -1) {
+ if (((int) table68k[opcode].sreg) >= 128)
+ comprintf("\tuae_s32 srcreg = (uae_s32)(uae_s8)%d;\n",
+ (int) table68k[opcode].sreg);
+ else
+ comprintf("\tuae_s32 srcreg = %d;\n",
+ (int) table68k[opcode].sreg);
+ } else {
+ char source[100];
+ int pos = table68k[opcode].spos;
+
+ comprintf(
+ "#if defined(HAVE_GET_WORD_UNSWAPPED) && !defined(FULLMMU)\n");
+
+ if (pos < 8 && (smsk >> (8 - pos)) != 0)
+ sprintf(source, "(((opcode >> %d) | (opcode << %d)) & %d)",
+ pos ^ 8, 8 - pos, dmsk);
+ else if (pos != 8)
+ sprintf(source, "((opcode >> %d) & %d)", pos ^ 8, smsk);
+ else
+ sprintf(source, "(opcode & %d)", smsk);
+
+ if (table68k[opcode].stype == 3)
+ comprintf("\tuae_u32 srcreg = imm8_table[%s];\n", source);
+ else if (table68k[opcode].stype == 1)
+ comprintf("\tuae_u32 srcreg = (uae_s32)(uae_s8)%s;\n", source);
+ else
+ comprintf("\tuae_u32 srcreg = %s;\n", source);
+
+ comprintf("#else\n");
+
+ if (pos)
+ sprintf(source, "((opcode >> %d) & %d)", pos, smsk);
+ else
+ sprintf(source, "(opcode & %d)", smsk);
+
+ if (table68k[opcode].stype == 3)
+ comprintf("\tuae_s32 srcreg = imm8_table[%s];\n", source);
+ else if (table68k[opcode].stype == 1)
+ comprintf("\tuae_s32 srcreg = (uae_s32)(uae_s8)%s;\n", source);
+ else
+ comprintf("\tuae_s32 srcreg = %s;\n", source);
+
+ comprintf("#endif\n");
+ }
+ }
+ if (table68k[opcode].duse
+ /* Yes, the dmode can be imm, in case of LINK or DBcc */
+ && table68k[opcode].dmode != imm && table68k[opcode].dmode != imm0
+ && table68k[opcode].dmode != imm1 && table68k[opcode].dmode != imm2
+ && table68k[opcode].dmode != absw
+ && table68k[opcode].dmode != absl) {
+ have_dstreg = 1;
+ if (table68k[opcode].dpos == -1) {
+ if (((int) table68k[opcode].dreg) >= 128)
+ comprintf("\tuae_s32 dstreg = (uae_s32)(uae_s8)%d;\n",
+ (int) table68k[opcode].dreg);
+ else
+ comprintf("\tuae_s32 dstreg = %d;\n",
+ (int) table68k[opcode].dreg);
+ } else {
+ int pos = table68k[opcode].dpos;
+
+ comprintf(
+ "#if defined(HAVE_GET_WORD_UNSWAPPED) && !defined(FULLMMU)\n");
+
+ if (pos < 8 && (dmsk >> (8 - pos)) != 0)
+ comprintf(
+ "\tuae_u32 dstreg = ((opcode >> %d) | (opcode << %d)) & %d;\n",
+ pos ^ 8, 8 - pos, dmsk);
+ else if (pos != 8)
+ comprintf("\tuae_u32 dstreg = (opcode >> %d) & %d;\n", pos ^ 8,
+ dmsk);
+ else
+ comprintf("\tuae_u32 dstreg = opcode & %d;\n", dmsk);
+
+ comprintf("#else\n");
+
+ if (pos)
+ comprintf("\tuae_u32 dstreg = (opcode >> %d) & %d;\n", pos,
+ dmsk);
+ else
+ comprintf("\tuae_u32 dstreg = opcode & %d;\n", dmsk);
+
+ comprintf("#endif\n");
+ }
+ }
+
+ if (have_srcreg && have_dstreg
+ && (table68k[opcode].dmode == Areg || table68k[opcode].dmode == Aind
+ || table68k[opcode].dmode == Aipi
+ || table68k[opcode].dmode == Apdi
+ || table68k[opcode].dmode == Ad16
+ || table68k[opcode].dmode == Ad8r)
+ && (table68k[opcode].smode == Areg || table68k[opcode].smode == Aind
+ || table68k[opcode].smode == Aipi
+ || table68k[opcode].smode == Apdi
+ || table68k[opcode].smode == Ad16
+ || table68k[opcode].smode == Ad8r)) {
+ comprintf("\tuae_u32 dodgy=(srcreg==(uae_s32)dstreg);\n");
+ } else {
+ comprintf("\tuae_u32 dodgy=0;\n");
+ }
+ comprintf("\tuae_u32 m68k_pc_offset_thisinst=m68k_pc_offset;\n");
+ comprintf("\tm68k_pc_offset+=2;\n");
+
+ aborted = gen_opcode(opcode);
+ {
+ int flags = 0;
+ if (global_isjump)
+ flags |= 1;
+ if (long_opcode)
+ flags |= 2;
+ if (global_cmov)
+ flags |= 4;
+ if (global_isaddx)
+ flags |= 8;
+ if (global_iscjump)
+ flags |= 16;
+ if (global_fpu)
+ flags |= 32;
+
+ comprintf("}\n");
+
+ name = lookuptab[i].name;
+ if (aborted) {
+ fprintf(stblfile, "{ NULL, 0x%08x, %d }, /* %s */\n", opcode, flags, name);
+ com_discard();
+ } else {
+ const char *tbl = noflags ? "nf" : "ff";
+ fprintf(stblfile,
+ "{ op_%x_%d_comp_%s, %d, 0x%08x }, /* %s */\n",
+ opcode, postfix, tbl, opcode, flags, name);
+ fprintf(headerfile, "extern compop_func op_%x_%d_comp_%s;\n",
+ opcode, postfix, tbl);
+ printf ("/* %s */\n", outopcode (name, opcode));
+ printf(
+ "void REGPARAM2 op_%x_%d_comp_%s(uae_u32 opcode) /* %s */\n{\n",
+ opcode, postfix, tbl, name);
+ com_flush();
+ }
+ }
+ opcode_next_clev[rp] = next_cpu_level;
+ opcode_last_postfix[rp] = postfix;
+}
+
+static void generate_func(int noflags) {
+ int i, j, rp;
+ const char *tbl = noflags ? "nf" : "ff";
+
+ using_prefetch = 0;
+ using_exception_3 = 0;
+ for (i = 0; i < 1; i++) /* We only do one level! */
+ {
+ cpu_level = 4 - i;
+ postfix = i;
+
+ fprintf(stblfile, "const struct comptbl op_smalltbl_%d_comp_%s[] = {\n",
+ postfix, tbl);
+
+ /* sam: this is for people with low memory (eg. me :)) */
+ printf("\n"
+ "#if !defined(PART_1) && !defined(PART_2) && "
+ "!defined(PART_3) && !defined(PART_4) && "
+ "!defined(PART_5) && !defined(PART_6) && "
+ "!defined(PART_7) && !defined(PART_8)"
+ "\n"
+ "#define PART_1 1\n"
+ "#define PART_2 1\n"
+ "#define PART_3 1\n"
+ "#define PART_4 1\n"
+ "#define PART_5 1\n"
+ "#define PART_6 1\n"
+ "#define PART_7 1\n"
+ "#define PART_8 1\n"
+ "#endif\n\n");
+
+ rp = 0;
+ for (j = 1; j <= 8; ++j) {
+ int k = (j * nr_cpuop_funcs) / 8;
+ printf("#ifdef PART_%d\n", j);
+ for (; rp < k; rp++)
+ generate_one_opcode(rp, noflags);
+ printf("#endif\n\n");
+ }
+
+ fprintf(stblfile, "{ 0, 65536, 0 }};\n");
+ }
+
+}
+
+#if (defined(OS_cygwin) || defined(OS_mingw)) && defined(EXTENDED_SIGSEGV)
+void cygwin_mingw_abort()
+{
+#undef abort
+ abort();
+}
+#endif
+
+int main(void)
+{
+ init_table68k();
+
+ opcode_map = (int *) malloc(sizeof(int) * nr_cpuop_funcs);
+ opcode_last_postfix = (int *) malloc(sizeof(int) * nr_cpuop_funcs);
+ opcode_next_clev = (int *) malloc(sizeof(int) * nr_cpuop_funcs);
+ counts = (unsigned long *) malloc(65536 * sizeof(unsigned long));
+ read_counts();
+
+ /* It would be a lot nicer to put all in one file (we'd also get rid of
+ * cputbl.h that way), but cpuopti can't cope. That could be fixed, but
+ * I don't dare to touch the 68k version. */
+
+ headerfile = fopen("comptbl.h", "wb");
+ fprintf (headerfile, ""
+ "extern const struct comptbl op_smalltbl_0_comp_nf[];\n"
+ "extern const struct comptbl op_smalltbl_0_comp_ff[];\n"
+ "");
+
+ stblfile = fopen("compstbl.cpp", "wb");
+ if (freopen("compemu.cpp", "wb", stdout) == NULL)
+ {
+ assert(0);
+ }
+
+ generate_includes(stdout);
+ generate_includes(stblfile);
+
+ printf("#include \"compiler/compemu.h\"\n");
+
+ noflags = 0;
+ generate_func(noflags);
+
+ free(opcode_map);
+ free(opcode_last_postfix);
+ free(opcode_next_clev);
+ free(counts);
+
+ opcode_map = (int *) malloc(sizeof(int) * nr_cpuop_funcs);
+ opcode_last_postfix = (int *) malloc(sizeof(int) * nr_cpuop_funcs);
+ opcode_next_clev = (int *) malloc(sizeof(int) * nr_cpuop_funcs);
+ counts = (unsigned long *) malloc(65536 * sizeof(unsigned long));
+ read_counts();
+ noflags = 1;
+ generate_func(noflags);
+
+ free(opcode_map);
+ free(opcode_last_postfix);
+ free(opcode_next_clev);
+ free(counts);
+
+ free(table68k);
+ fclose(stblfile);
+ fclose(headerfile);
+ return 0;
+}
#define DRIVESOUND
#define GFXFILTER
+#if defined(_M_ARM64) || defined(_M_ARM64EC)
+#define __arm__
+#define MSVC_LONG_DOUBLE
+#else
#define X86_MSVC_ASSEMBLY
//#define OPTIMIZED_FLAGS
#define MSVC_LONG_DOUBLE
#ifndef __i386__
#define __i386__
#endif
+#endif
#define WINDOWS
#define ZLIB_WINAPI
#define PACKAGE_STRING "WinUAE"
return 0;
}
-uae_u64 spincount;
+uae_s64 spincount;
extern bool calculated_scanline;
void target_spin(int total)
if (total > 10)
total = 10;
while (total-- >= 0) {
- uae_u64 v1 = __rdtsc();
+ uae_s64 v1 = read_processor_time_rdtsc();
v1 += spincount;
- while (v1 > __rdtsc());
+ while (v1 > read_processor_time_rdtsc());
}
}
return -13;
}
-extern uae_u64 spincount;
+extern uae_s64 spincount;
bool calculated_scanline = true;
int target_get_display_scanline(int displayindex)
sl = -1;
return sl;
} else {
- static uae_u64 lastrdtsc;
+ static uae_s64 lastrdtsc;
static int lastvpos;
if (spincount == 0 || currprefs.m68k_speed >= 0) {
lastrdtsc = 0;
lastvpos = target_get_display_scanline2(displayindex);
return lastvpos;
}
- uae_u64 v = __rdtsc();
+ uae_s64 v = read_processor_time_rdtsc();
if (lastrdtsc > v)
return lastvpos;
lastvpos = target_get_display_scanline2(displayindex);
- lastrdtsc = __rdtsc() + spincount * 4;
+ lastrdtsc = read_processor_time_rdtsc() + spincount * 4;
return lastvpos;
}
}
return t;
}
-static frame_time_t read_processor_time_rdtsc(void)
+uae_s64 read_processor_time_rdtsc(void)
{
- uae_u32 foo = 0;
-#if defined(X86_MSVC_ASSEMBLY)
- uae_u32 bar;
- __asm
- {
- rdtsc
- mov foo, eax
- mov bar, edx
- }
- /* very high speed CPU's RDTSC might overflow without this.. */
- frame_time_t out;
- out = ((uae_u64)foo << 32) | bar;
- out >>= 6;
+#ifdef __arm__
+ return read_processor_time_qpf();
+#else
+ return __rdtsc();
#endif
- return foo;
}
uae_time_t uae_time(void)