FPP_TO_INT fpp_to_int;
FPP_FROM_INT fpp_from_int;
-FPP_TO_SINGLE fpp_to_single_xn;
-FPP_TO_SINGLE fpp_to_single_x;
-FPP_FROM_SINGLE fpp_from_single_x;
-
-FPP_TO_DOUBLE fpp_to_double_xn;
-FPP_TO_DOUBLE fpp_to_double_x;
-FPP_FROM_DOUBLE fpp_from_double_x;
-
-FPP_TO_EXTEN fpp_to_exten_x;
-FPP_FROM_EXTEN fpp_from_exten_x;
+FPP_TO_SINGLE fpp_to_single;
+FPP_FROM_SINGLE fpp_from_single;
+FPP_TO_DOUBLE fpp_to_double;
+FPP_FROM_DOUBLE fpp_from_double;
+FPP_TO_EXTEN fpp_to_exten;
+FPP_FROM_EXTEN fpp_from_exten;
+FPP_TO_EXTEN fpp_to_exten_fmovem;
+FPP_FROM_EXTEN fpp_from_exten_fmovem;
FPP_A fpp_normalize;
FPP_AB fpp_sgldiv;
FPP_AB fpp_sglmul;
FPP_AB fpp_cmp;
+FPP_AB fpp_tst;
+FPP_AB fpp_move;
#define DEBUG_FPP 0
#define EXCEPTION_FPP 1
}
}
-void to_single(fpdata *fpd, uae_u32 wrd1)
-{
-#if 0 // now done in get_fp_value
- // automatically fix denormals if 6888x
- if (currprefs.fpu_model == 68881 || currprefs.fpu_model == 68882)
- fpp_to_single_xn(fpd, wrd1);
- else
-#endif
- fpp_to_single_x(fpd, wrd1);
-}
-static uae_u32 from_single(fpdata *fpd)
-{
- return fpp_from_single_x(fpd);
-}
-void to_double(fpdata *fpd, uae_u32 wrd1, uae_u32 wrd2)
-{
-#if 0 // now done in get_fp_value
- // automatically fix denormals if 6888x
- if (currprefs.fpu_model == 68881 || currprefs.fpu_model == 68882)
- fpp_to_double_xn(fpd, wrd1, wrd2);
- else
-#endif
- fpp_to_double_x(fpd, wrd1, wrd2);
-}
-static void from_double(fpdata *fpd, uae_u32 *wrd1, uae_u32 *wrd2)
-{
- fpp_from_double_x(fpd, wrd1, wrd2);
-}
-
-void to_exten(fpdata *fpd, uae_u32 wrd1, uae_u32 wrd2, uae_u32 wrd3)
-{
-#if 0 // now done in get_fp_value
- // automatically fix unnormals if 6888x
- if (currprefs.fpu_model == 68881 || currprefs.fpu_model == 68882) {
- normalize_exten(&wrd1, &wrd2, &wrd3);
- }
-#endif
- fpp_to_exten_x(fpd, wrd1, wrd2, wrd3);
-}
-static void to_exten_fmovem(fpdata *fpd, uae_u32 wrd1, uae_u32 wrd2, uae_u32 wrd3)
-{
- fpp_to_exten_x(fpd, wrd1, wrd2, wrd3);
-}
-static void from_exten(fpdata *fpd, uae_u32 * wrd1, uae_u32 * wrd2, uae_u32 * wrd3)
-{
- fpp_from_exten_x(fpd, wrd1, wrd2, wrd3);
-}
-
/* Floating Point Control Register (FPCR)
*
* Exception Enable Byte
int vector = 0;
int vtable[8] = { 49, 49, 50, 51, 53, 52, 54, 48 };
int i;
- for (i = 7; i >= 0; i--) {
+ // BSUN is handled separately
+ for (i = 6; i >= 0; i--) {
if (exception & (1 << i)) {
vector = vtable[i];
break;
}
}
-
static void fpsr_set_result(fpdata *result)
{
#ifdef JIT
regs.fpsr &= 0x00fffff8; // clear cc
if (fpp_is_nan (result)) {
regs.fpsr |= FPSR_CC_NAN;
- // check if result is signaling nan
- if (fpp_is_snan(result))
- regs.fpsr |= FPSR_SNAN;
- } else {
- if (fpp_is_zero(result))
- regs.fpsr |= FPSR_CC_Z;
- if (fpp_is_infinity (result))
- regs.fpsr |= FPSR_CC_I;
+ } else if (fpp_is_zero(result)) {
+ regs.fpsr |= FPSR_CC_Z;
+ } else if (fpp_is_infinity (result)) {
+ regs.fpsr |= FPSR_CC_I;
}
if (fpp_is_neg(result))
regs.fpsr |= FPSR_CC_N;
fpp_clear_status();
}
-static void fpsr_make_status(void)
+static uae_u32 fpsr_make_status(void)
{
// get external status
fpp_get_status(®s.fpsr);
if (regs.fpsr & (FPSR_OVFL | FPSR_INEX2 | FPSR_INEX1))
regs.fpsr |= FPSR_AE_INEX; // INEX = INEX1 || INEX2 || OVFL
- fpsr_check_exception();
+ return (regs.fpsr & regs.fpcr & (FPSR_SNAN | FPSR_OPERR | FPSR_DZ));
}
static int fpsr_set_bsun(void)
static void fpnan (fpdata *fpd)
{
- to_exten(fpd, xhex_nan[0], xhex_nan[1], xhex_nan[2]);
+ fpp_to_exten(fpd, xhex_nan[0], xhex_nan[1], xhex_nan[2]);
}
static void fpclear (fpdata *fpd)
f[2] += fpp_cr[entry].rndoff[(regs.fpcr >> 4) & 3];
}
- to_exten_fmovem(fpd, f[0], f[1], f[2]);
+ fpp_to_exten_fmovem(fpd, f[0], f[1], f[2]);
if (((regs.fpcr >> 6) & 3) == 1)
fpp_roundsgl(fpd);
if (((wrd[0] >> 16) & 0x7fff) == 0x7fff) {
// infinity has extended exponent and all 0 packed fraction
// nans are copies bit by bit
- to_exten_fmovem(fpd, wrd[0], wrd[1], wrd[2]);
+ fpp_to_exten_fmovem(fpd, wrd[0], wrd[1], wrd[2]);
return;
}
if (!(wrd[0] & 0xf) && !wrd[1] && !wrd[2]) {
// exponent is not cared about, if mantissa is zero
wrd[0] &= 0x80000000;
- to_exten_fmovem(fpd, wrd[0], wrd[1], wrd[2]);
+ fpp_to_exten_fmovem(fpd, wrd[0], wrd[1], wrd[2]);
return;
}
if (fpp_is_nan (src)) {
// copied bit by bit, no conversion
- from_exten(src, &wrd[0], &wrd[1], &wrd[2]);
+ fpp_from_exten_fmovem(src, &wrd[0], &wrd[1], &wrd[2]);
return;
}
if (fpp_is_infinity (src)) {
// extended exponent and all 0 packed fraction
- from_exten(src, &wrd[0], &wrd[1], &wrd[2]);
+ fpp_from_exten_fmovem(src, &wrd[0], &wrd[1], &wrd[2]);
wrd[1] = wrd[2] = 0;
return;
}
fpset(src, (uae_s32) m68k_dreg (regs, reg));
break;
case 1:
- to_single (src, m68k_dreg (regs, reg));
+ fpp_to_single (src, m68k_dreg (regs, reg));
if (normalize_or_fault_if_no_denormal_support_pre(opcode, extra, 0, oldpc, src, 0))
return -1;
break;
fpset(src, (uae_s32) (doext ? exts[0] : x_cp_get_long (ad)));
break;
case 1:
- to_single (src, (doext ? exts[0] : x_cp_get_long (ad)));
+ fpp_to_single (src, (doext ? exts[0] : x_cp_get_long (ad)));
if (normalize_or_fault_if_no_denormal_support_pre(opcode, extra, 0, oldpc, src, 0))
return -1;
break;
wrd2 = (doext ? exts[1] : x_cp_get_long (ad));
ad += 4;
wrd3 = (doext ? exts[2] : x_cp_get_long (ad));
- to_exten (src, wrd1, wrd2, wrd3);
+ fpp_to_exten (src, wrd1, wrd2, wrd3);
if (normalize_or_fault_if_no_denormal_support_pre(opcode, extra, 0, oldpc, src, 2))
return -1;
}
wrd1 = (doext ? exts[0] : x_cp_get_long (ad));
ad += 4;
wrd2 = (doext ? exts[1] : x_cp_get_long (ad));
- to_double (src, wrd1, wrd2);
+ fpp_to_double (src, wrd1, wrd2);
if (normalize_or_fault_if_no_denormal_support_pre(opcode, extra, 0, oldpc, src, 1))
return -1;
}
m68k_dreg (regs, reg) = (uae_u32)fpp_to_int (value, 2);
break;
case 1:
- m68k_dreg (regs, reg) = from_single (value);
+ m68k_dreg (regs, reg) = fpp_from_single (value);
break;
default:
return 0;
case 1:
if (normalize_or_fault_if_no_denormal_support_pre(opcode, extra, ad, oldpc, value, 2))
return -1;
- x_cp_put_long(ad, from_single(value));
+ x_cp_put_long(ad, fpp_from_single(value));
break;
case 2:
{
uae_u32 wrd1, wrd2, wrd3;
if (normalize_or_fault_if_no_denormal_support_pre(opcode, extra, ad, oldpc, value, 2))
return 1;
- from_exten(value, &wrd1, &wrd2, &wrd3);
+ fpp_from_exten(value, &wrd1, &wrd2, &wrd3);
x_cp_put_long (ad, wrd1);
ad += 4;
x_cp_put_long (ad, wrd2);
uae_u32 wrd1, wrd2;
if (normalize_or_fault_if_no_denormal_support_pre(opcode, extra, ad, oldpc, value, 1))
return -1;
- from_double(value, &wrd1, &wrd2);
+ fpp_from_double(value, &wrd1, &wrd2);
x_cp_put_long (ad, wrd1);
ad += 4;
x_cp_put_long (ad, wrd2);
if (regs.fpu_exp_state > 1) {
uae_u32 src1[3];
- from_exten (®s.exp_src1, &src1[0], &src1[1], &src1[2]);
+ fpp_from_exten_fmovem (®s.exp_src1, &src1[0], &src1[1], &src1[2]);
frame_id = 0x0000e000 | src1[0];
frame_v1 = src1[1];
frame_v2 = src1[2];
uae_u32 stag, dtag;
uae_u32 extra = regs.exp_extra;
- from_exten(®s.exp_src1, &src1[0], &src1[1], &src1[2]);
- from_exten(®s.exp_src2, &src2[0], &src2[1], &src2[2]);
+ fpp_from_exten_fmovem(®s.exp_src1, &src1[0], &src1[1], &src1[2]);
+ fpp_from_exten_fmovem(®s.exp_src2, &src2[0], &src2[1], &src2[2]);
stag = get_ftag(src1[0], src1[1], src1[2], regs.exp_size);
dtag = get_ftag(src2[0], src2[1], src2[2], -1);
if ((extra & 0x7f) == 4) // FSQRT 4->5
regs.fpu_exp_state = 0;
} else if (ff == 0xe0) {
regs.fpu_exp_state = 1;
- to_exten (®s.exp_src1, d & 0xffff0000, v1, v2);
+ fpp_to_exten (®s.exp_src1, d & 0xffff0000, v1, v2);
} else if (ff) {
write_log (_T("FRESTORE invalid frame format %X!\n"), (d >> 8) & 0xff);
} else {
else
reg = r;
if (list & 0x80) {
- from_exten(®s.fp[reg], &wrd[0], &wrd[1], &wrd[2]);
+ fpp_from_exten_fmovem(®s.fp[reg], &wrd[0], &wrd[1], &wrd[2]);
if (incr < 0)
ad -= 3 * 4;
for (int i = 0; i < 3; i++) {
else
reg = r;
if (list & 0x80) {
- from_exten(®s.fp[reg], &wrd1, &wrd2, &wrd3);
+ fpp_from_exten_fmovem(®s.fp[reg], &wrd1, &wrd2, &wrd3);
if (incr < 0)
ad -= 3 * 4;
x_put_long(ad + 0, wrd1);
mmu030_fmovem_store[i] = wrd[i];
mmu030_state[0]++;
if (i == 2)
- to_exten (®s.fp[reg], mmu030_fmovem_store[0], mmu030_fmovem_store[1], wrd[2]);
+ fpp_to_exten (®s.fp[reg], mmu030_fmovem_store[0], mmu030_fmovem_store[1], wrd[2]);
}
}
if (incr > 0)
wrd3 = x_get_long (ad + 8);
if (incr > 0)
ad += 3 * 4;
- to_exten (®s.fp[reg], wrd1, wrd2, wrd3);
+ fpp_to_exten (®s.fp[reg], wrd1, wrd2, wrd3);
}
list <<= 1;
}
return ad;
}
-static bool arithmetic(fpdata *src, int reg, int extra)
+static bool arithmetic(fpdata *src, fpdata *dst, int extra)
{
uae_u64 q = 0;
uae_u8 s = 0;
- fpdata *dst = ®s.fp[reg];
-
- // SNAN -> QNAN if SNAN interrupt is not enabled
- if (fpp_is_snan(src) && !(regs.fpcr & 0x4000)) {
- fpp_unset_snan(src);
- }
switch (extra & 0x7f)
{
case 0x00: /* FMOVE */
case 0x40:
case 0x44:
- *dst = *src;
+ fpp_move(src, dst);
break;
case 0x01: /* FINT */
fpp_int(src, dst);
break;
case 0x21: /* FMOD */
fpp_mod(dst, src, &q, &s);
+ if (fpsr_make_status())
+ return false;
fpsr_set_quotient(q, s);
break;
case 0x22: /* FADD */
break;
case 0x24: /* FSGLDIV */
fpp_sgldiv(dst, src);
+ if (fpsr_make_status())
+ return false;
fpsr_set_result(dst);
return true;
case 0x25: /* FREM */
fpp_rem(dst, src, &q, &s);
+ if (fpsr_make_status())
+ return false;
fpsr_set_quotient(q, s);
break;
case 0x26: /* FSCALE */
break;
case 0x27: /* FSGLMUL */
fpp_sglmul(dst, src);
+ if (fpsr_make_status())
+ return false;
fpsr_set_result(dst);
return true;
case 0x28: /* FSUB */
case 0x35: /* FSINCOS */
case 0x36: /* FSINCOS */
case 0x37: /* FSINCOS */
- fpp_cos(src, ®s.fp[extra & 7]);
- fpp_sin(src, dst);
+ fpp_cos(src, dst);
if (((regs.fpcr >> 6) & 3) == 1)
- fpp_round32(®s.fp[extra & 7]);
+ fpp_round32(dst);
else if (((regs.fpcr >> 6) & 3) == 2)
- fpp_round64(®s.fp[extra & 7]);
+ fpp_round64(dst);
+ regs.fp[extra & 7] = *dst;
+ fpp_sin(src, dst);
break;
case 0x38: /* FCMP */
{
- fpdata v = *dst;
- fpp_cmp(&v, src);
- fpsr_set_result(&v);
- return true;
+ fpp_cmp(dst, src);
+ if (fpsr_make_status())
+ return false;
+ fpsr_set_result(dst);
+ return false;
}
case 0x3a: /* FTST */
{
- fpsr_set_result(src);
- return true;
+ fpp_tst(dst, src);
+ if (fpsr_make_status())
+ return false;
+ fpsr_set_result(dst);
+ return false;
}
default:
+ write_log (_T("Unknown FPU arithmetic function (%02x)\n"), extra & 0x7f);
return false;
}
fpp_round64(dst);
}
+ if (fpsr_make_status())
+ return false;
+
fpsr_set_result(dst);
return true;
}
{
int reg = -1;
int v;
- fpdata srcd;
+ fpdata src, dst;
uaecptr pc = m68k_getpc () - 4;
uaecptr ad = 0;
if ((extra & 0xfc00) == 0x5c00) {
if (fault_if_no_fpu (opcode, extra, 0, pc))
return;
- if (fault_if_unimplemented_680x0 (opcode, extra, ad, pc, &srcd, reg))
+ if (fault_if_unimplemented_680x0 (opcode, extra, ad, pc, &src, reg))
return;
fpsr_clear_status();
fpu_get_constant(®s.fp[reg], extra);
if (fault_if_unimplemented_6888x (opcode, extra, pc))
return;
- v = get_fp_value (opcode, extra, &srcd, pc, &ad);
+ fpsr_clear_status();
+
+ v = get_fp_value (opcode, extra, &src, pc, &ad);
if (v <= 0) {
if (v == 0)
fpu_noinst (opcode, pc);
}
// get_fp_value() checked this, but only if EA was nonzero (non-register)
- if (fault_if_unimplemented_680x0 (opcode, extra, ad, pc, &srcd, reg))
+ if (fault_if_unimplemented_680x0 (opcode, extra, ad, pc, &src, reg))
return;
regs.fpiar = pc;
+ dst = regs.fp[reg];
+
if((extra & 0x30) == 0x20 || (extra & 0x7f) == 0x38) { // dyadic operation
- if(normalize_or_fault_if_no_denormal_support_pre(opcode, extra, ad, pc, ®s.fp[reg], 2))
+ if(normalize_or_fault_if_no_denormal_support_pre(opcode, extra, ad, pc, &dst, 2))
return;
}
fpsr_clear_status();
- v = arithmetic(&srcd, reg, extra);
- if (!v)
- fpu_noinst (opcode, pc);
- fpsr_make_status();
+ v = arithmetic(&src, &dst, extra);
+ if (v)
+ regs.fp[reg] = dst;
+ fpsr_check_exception();
return;
default:
break;
w1 = restore_u16 () << 16;
w2 = restore_u32 ();
w3 = restore_u32 ();
- to_exten (®s.fp[i], w1, w2, w3);
+ fpp_to_exten_fmovem(®s.fp[i], w1, w2, w3);
}
regs.fpcr = restore_u32 ();
regs.fpsr = restore_u32 ();
w1 = restore_u16() << 16;
w2 = restore_u32();
w3 = restore_u32();
- to_exten(®s.exp_src1, w1, w2, w3);
+ fpp_to_exten_fmovem(®s.exp_src1, w1, w2, w3);
w1 = restore_u16() << 16;
w2 = restore_u32();
w3 = restore_u32();
- to_exten(®s.exp_src2, w1, w2, w3);
+ fpp_to_exten_fmovem(®s.exp_src2, w1, w2, w3);
regs.exp_pack[0] = restore_u32();
regs.exp_pack[1] = restore_u32();
regs.exp_pack[2] = restore_u32();
save_u32 (currprefs.fpu_model);
save_u32 (0x80000000 | 0x40000000 | (regs.fpu_state == 0 ? 1 : 0) | (regs.fpu_exp_state ? 2 : 0) | (regs.fpu_exp_state > 1 ? 4 : 0));
for (i = 0; i < 8; i++) {
- from_exten (®s.fp[i], &w1, &w2, &w3);
+ fpp_from_exten_fmovem(®s.fp[i], &w1, &w2, &w3);
save_u16 (w1 >> 16);
save_u32 (w2);
save_u32 (w3);
save_u32 (-1);
save_u32 (0);
- from_exten(®s.exp_src1, &w1, &w2, &w3);
+ fpp_from_exten_fmovem(®s.exp_src1, &w1, &w2, &w3);
save_u16(w1 >> 16);
save_u32(w2);
save_u32(w3);
- from_exten(®s.exp_src2, &w1, &w2, &w3);
+ fpp_from_exten_fmovem(®s.exp_src2, &w1, &w2, &w3);
save_u16(w1 >> 16);
save_u32(w2);
save_u32(w3);
fpd->fp = fp;
}
-static void fp_to_single_xn(fpdata *fpd, uae_u32 wrd1)
+static void fp_to_single(fpdata *fpd, uae_u32 wrd1)
{
union {
float f;
val.u = wrd1;
fpd->fp = (fptype) val.f;
}
-static void fp_to_single_x(fpdata *fpd, uae_u32 wrd1)
-{
- union {
- float f;
- uae_u32 u;
- } val;
-
- val.u = wrd1;
- fpd->fp = (fptype) val.f;
-}
-static uae_u32 fp_from_single_x(fpdata *fpd)
+static uae_u32 fp_from_single(fpdata *fpd)
{
union {
float f;
return val.u;
}
-static void fp_to_double_xn(fpdata *fpd, uae_u32 wrd1, uae_u32 wrd2)
-{
- union {
- double d;
- uae_u32 u[2];
- } val;
-
-#ifdef WORDS_BIGENDIAN
- val.u[0] = wrd1;
- val.u[1] = wrd2;
-#else
- val.u[1] = wrd1;
- val.u[0] = wrd2;
-#endif
- fpd->fp = (fptype) val.d;
-}
-static void fp_to_double_x(fpdata *fpd, uae_u32 wrd1, uae_u32 wrd2)
+static void fp_to_double(fpdata *fpd, uae_u32 wrd1, uae_u32 wrd2)
{
union {
double d;
#endif
fpd->fp = (fptype) val.d;
}
-static void fp_from_double_x(fpdata *fpd, uae_u32 *wrd1, uae_u32 *wrd2)
+static void fp_from_double(fpdata *fpd, uae_u32 *wrd1, uae_u32 *wrd2)
{
union {
double d;
#endif
}
#ifdef USE_LONG_DOUBLE
-static void fp_to_exten_x(fpdata *fpd, uae_u32 wrd1, uae_u32 wrd2, uae_u32 wrd3)
+static void fp_to_exten(fpdata *fpd, uae_u32 wrd1, uae_u32 wrd2, uae_u32 wrd3)
{
union {
long double ld;
#endif
fpd->fp = val.ld;
}
-static void fp_from_exten_x(fpdata *fpd, uae_u32 *wrd1, uae_u32 *wrd2, uae_u32 *wrd3)
+static void fp_from_exten(fpdata *fpd, uae_u32 *wrd1, uae_u32 *wrd2, uae_u32 *wrd3)
{
union {
long double ld;
#endif
}
#else // if !USE_LONG_DOUBLE
-static void fp_to_exten_x(fpdata *fpd, uae_u32 wrd1, uae_u32 wrd2, uae_u32 wrd3)
+static void fp_to_exten(fpdata *fpd, uae_u32 wrd1, uae_u32 wrd2, uae_u32 wrd3)
{
#if 1
floatx80 fx80;
fx80.high = wrd1 >> 16;
fx80.low = (((uae_u64)wrd2) << 32) | wrd3;
float64 f = floatx80_to_float64(fx80, &fs);
- fp_to_double_x(fpd, f >> 32, (uae_u32)f);
+ fp_to_double(fpd, f >> 32, (uae_u32)f);
#else
double frac;
if ((wrd1 & 0x7fff0000) == 0 && wrd2 == 0 && wrd3 == 0) {
fpd->fp = ldexp (frac, ((wrd1 >> 16) & 0x7fff) - 16383);
#endif
}
-static void fp_from_exten_x(fpdata *fpd, uae_u32 *wrd1, uae_u32 *wrd2, uae_u32 *wrd3)
+static void fp_from_exten(fpdata *fpd, uae_u32 *wrd1, uae_u32 *wrd2, uae_u32 *wrd3)
{
#if 1
uae_u32 w1, w2;
- fp_from_double_x(fpd, &w1, &w2);
+ fp_from_double(fpd, &w1, &w2);
floatx80 f = float64_to_floatx80(((uae_u64)w1 << 32) | w2, &fs);
*wrd1 = f.high << 16;
*wrd2 = f.low >> 32;
fptype fp = src->fp;
if (fp_is_nan(src)) {
uae_u32 w1, w2, w3;
- fp_from_exten_x(src, &w1, &w2, &w3);
+ fp_from_exten(src, &w1, &w2, &w3);
uae_s64 v = 0;
fpsr_set_exception(FPSR_OPERR);
// return mantissa
a->fp = v;
}
+static void fp_tst(fpdata *a, fpdata *b)
+{
+ a->fpx = b->fpx;
+}
+
+static void fp_move(fpdata *src, fpdata *dst)
+{
+ dst->fp = src->fp;
+}
+
void fp_init_native(void)
{
set_floatx80_rounding_precision(80, &fs);
fpp_to_int = fp_to_int;
fpp_from_int = fp_from_int;
- fpp_to_single_xn = fp_to_single_xn;
- fpp_to_single_x = fp_to_single_x;
- fpp_from_single_x = fp_from_single_x;
-
- fpp_to_double_xn = fp_to_double_xn;
- fpp_to_double_x = fp_to_double_x;
- fpp_from_double_x = fp_from_double_x;
-
- fpp_to_exten_x = fp_to_exten_x;
- fpp_from_exten_x = fp_from_exten_x;
+ fpp_to_single = fp_to_single;
+ fpp_from_single = fp_from_single;
+ fpp_to_double = fp_to_double;
+ fpp_from_double = fp_from_double;
+ fpp_to_exten = fp_to_exten;
+ fpp_from_exten = fp_from_exten;
+ fpp_to_exten_fmovem = fp_to_exten;
+ fpp_from_exten_fmovem = fp_from_exten;
fpp_roundsgl = fp_roundsgl;
fpp_rounddbl = fp_rounddbl;
fpp_sgldiv = fp_sgldiv;
fpp_sglmul = fp_sglmul;
fpp_cmp = fp_cmp;
+ fpp_tst = fp_tst;
+ fpp_move = fp_move;
}
* Andreas Grabher and Toni Wilen
*
*/
-
#define __USE_ISOC9X /* We might be able to pick up a NaN */
#define SOFTFLOAT_FAST_INT64
static void fp_get_status(uae_u32 *status)
{
- if (fs.float_exception_flags & float_flag_invalid)
- *status |= 0x2000;
- if (fs.float_exception_flags & float_flag_divbyzero)
- *status |= 0x0400;
- if (fs.float_exception_flags & float_flag_overflow)
- *status |= 0x1000;
- if (fs.float_exception_flags & float_flag_underflow)
- *status |= 0x0800;
- if (fs.float_exception_flags & float_flag_inexact)
- *status |= 0x0200;
+ if (fs.float_exception_flags & float_flag_signaling) {
+ *status |= FPSR_SNAN;
+ } else {
+ if (fs.float_exception_flags & float_flag_invalid)
+ *status |= FPSR_OPERR;
+ if (fs.float_exception_flags & float_flag_divbyzero)
+ *status |= FPSR_DZ;
+ if (fs.float_exception_flags & float_flag_overflow)
+ *status |= FPSR_OVFL;
+ if (fs.float_exception_flags & float_flag_underflow)
+ *status |= FPSR_UNFL;
+ if (fs.float_exception_flags & float_flag_inexact)
+ *status |= FPSR_INEX2;
+ }
}
STATIC_INLINE void fp_clear_status(void)
{
return fsout;
}
-static void softfloat_set(fpdata *fpd, uae_u32 *f)
-{
- fpd->fpx.high = (uae_u16)(f[0] >> 16);
- fpd->fpx.low = ((uae_u64)f[1] << 32) | f[2];
-}
-
-static void softfloat_get(fpdata *fpd, uae_u32 *f)
-{
- f[0] = (uae_u32)(fpd->fpx.high << 16);
- f[1] = fpd->fpx.low >> 32;
- f[2] = (uae_u32)fpd->fpx.low;
-}
-
/* Functions for detecting float type */
static bool fp_is_snan(fpdata *fpd)
{
}
}
-static void to_single_xn(fpdata *fpd, uae_u32 wrd1)
-{
- float32 f = wrd1;
- fpd->fpx = float32_to_floatx80(f, &fs); // automatically fix denormals
-}
-static void to_single_x(fpdata *fpd, uae_u32 wrd1)
+static void to_single(fpdata *fpd, uae_u32 wrd1)
{
float32 f = wrd1;
fpd->fpx = float32_to_floatx80_allowunnormal(f, &fs);
}
-static uae_u32 from_single_x(fpdata *fpd)
+static uae_u32 from_single(fpdata *fpd)
{
float32 f = floatx80_to_float32(fpd->fpx, &fs);
return f;
}
-static void to_double_xn(fpdata *fpd, uae_u32 wrd1, uae_u32 wrd2)
-{
- float64 f = ((float64)wrd1 << 32) | wrd2;
- fpd->fpx = float64_to_floatx80(f, &fs); // automatically fix denormals
-}
-static void to_double_x(fpdata *fpd, uae_u32 wrd1, uae_u32 wrd2)
+static void to_double(fpdata *fpd, uae_u32 wrd1, uae_u32 wrd2)
{
float64 f = ((float64)wrd1 << 32) | wrd2;
fpd->fpx = float64_to_floatx80_allowunnormal(f, &fs);
}
-static void from_double_x(fpdata *fpd, uae_u32 *wrd1, uae_u32 *wrd2)
+static void from_double(fpdata *fpd, uae_u32 *wrd1, uae_u32 *wrd2)
{
float64 f = floatx80_to_float64(fpd->fpx, &fs);
*wrd1 = f >> 32;
*wrd2 = (uae_u32)f;
}
-static void to_exten_x(fpdata *fpd, uae_u32 wrd1, uae_u32 wrd2, uae_u32 wrd3)
+static void to_exten(fpdata *fpd, uae_u32 wrd1, uae_u32 wrd2, uae_u32 wrd3)
+{
+ fpd->fpx.high = (uae_u16)(wrd1 >> 16);
+ fpd->fpx.low = ((uae_u64)wrd2 << 32) | wrd3;
+}
+static void from_exten(fpdata *fpd, uae_u32 *wrd1, uae_u32 *wrd2, uae_u32 *wrd3)
{
- uae_u32 wrd[3] = { wrd1, wrd2, wrd3 };
- softfloat_set(fpd, wrd);
+ floatx80 f = floatx80_to_floatx80(fpd->fpx, &fs);
+ *wrd1 = (uae_u32)(f.high << 16);
+ *wrd2 = f.low >> 32;
+ *wrd3 = (uae_u32)f.low;
}
-static void from_exten_x(fpdata *fpd, uae_u32 *wrd1, uae_u32 *wrd2, uae_u32 *wrd3)
+
+static void to_exten_fmovem(fpdata *fpd, uae_u32 wrd1, uae_u32 wrd2, uae_u32 wrd3)
{
- uae_u32 wrd[3];
- softfloat_get(fpd, wrd);
- *wrd1 = wrd[0];
- *wrd2 = wrd[1];
- *wrd3 = wrd[2];
+ fpd->fpx.high = (uae_u16)(wrd1 >> 16);
+ fpd->fpx.low = ((uae_u64)wrd2 << 32) | wrd3;
}
+static void from_exten_fmovem(fpdata *fpd, uae_u32 *wrd1, uae_u32 *wrd2, uae_u32 *wrd3)
+ {
+ *wrd1 = (uae_u32)(fpd->fpx.high << 16);
+ *wrd2 = fpd->fpx.low >> 32;
+ *wrd3 = (uae_u32)fpd->fpx.low;
+ }
static uae_s64 to_int(fpdata *src, int size)
{
// round to float
static void fp_round32(fpdata *fpd)
{
+ if (fp_is_nan(fpd))
+ return;
float32 f = floatx80_to_float32(fpd->fpx, &fs);
fpd->fpx = float32_to_floatx80(f, &fs);
}
// round to double
static void fp_round64(fpdata *fpd)
{
+ if (fp_is_nan(fpd))
+ return;
float64 f = floatx80_to_float64(fpd->fpx, &fs);
fpd->fpx = float64_to_floatx80(f, &fs);
}
/* Arithmetic functions */
+static void fp_move(fpdata *src, fpdata *dst)
+{
+ dst->fpx = floatx80_move(src->fpx, &fs);
+}
+
static void fp_int(fpdata *a, fpdata *dst)
{
dst->fpx = floatx80_round_to_int(a->fpx, &fs);
{
a->fpx = floatx80_cmp(a->fpx, b->fpx, &fs);
}
+static void fp_tst(fpdata *a, fpdata *b)
+{
+ a->fpx = floatx80_tst(b->fpx, &fs);
+}
/* FIXME: create softfloat functions for following arithmetics */
fpp_to_int = to_int;
fpp_from_int = from_int;
- fpp_to_single_xn = to_single_xn;
- fpp_to_single_x = to_single_x;
- fpp_from_single_x = from_single_x;
-
- fpp_to_double_xn = to_double_xn;
- fpp_to_double_x = to_double_x;
- fpp_from_double_x = from_double_x;
-
- fpp_to_exten_x = to_exten_x;
- fpp_from_exten_x = from_exten_x;
+ fpp_to_single = to_single;
+ fpp_from_single = from_single;
+ fpp_to_double = to_double;
+ fpp_from_double = from_double;
+ fpp_to_exten = to_exten;
+ fpp_from_exten = from_exten;
+ fpp_to_exten_fmovem = to_exten_fmovem;
+ fpp_from_exten_fmovem = from_exten_fmovem;
fpp_roundsgl = fp_roundsgl;
fpp_rounddbl = fp_rounddbl;
fpp_sgldiv = fp_sgldiv;
fpp_sglmul = fp_sglmul;
fpp_cmp = fp_cmp;
+ fpp_tst = fp_tst;
+ fpp_move = fp_move;
}
extern void init_fpucw_x87(void);
#endif
-void to_single(fpdata *fpd, uae_u32 wrd1);
-void to_double(fpdata *fpd, uae_u32 wrd1, uae_u32 wrd2);
-void to_exten(fpdata *fpd, uae_u32 wrd1, uae_u32 wrd2, uae_u32 wrd3);
-
typedef void (*FPP_ABQS)(fpdata*, fpdata*, uae_u64*, uae_u8*);
typedef void (*FPP_AB)(fpdata*, fpdata*);
typedef void (*FPP_A)(fpdata*);
extern FPP_TO_INT fpp_to_int;
extern FPP_FROM_INT fpp_from_int;
-extern FPP_TO_SINGLE fpp_to_single_xn;
-extern FPP_TO_SINGLE fpp_to_single_x;
-extern FPP_FROM_SINGLE fpp_from_single_x;
-
-extern FPP_TO_DOUBLE fpp_to_double_xn;
-extern FPP_TO_DOUBLE fpp_to_double_x;
-extern FPP_FROM_DOUBLE fpp_from_double_x;
-
-extern FPP_TO_EXTEN fpp_to_exten_x;
-extern FPP_FROM_EXTEN fpp_from_exten_x;
+extern FPP_TO_SINGLE fpp_to_single;
+extern FPP_FROM_SINGLE fpp_from_single;
+extern FPP_TO_DOUBLE fpp_to_double;
+extern FPP_FROM_DOUBLE fpp_from_double;
+extern FPP_TO_EXTEN fpp_to_exten;
+extern FPP_FROM_EXTEN fpp_from_exten;
+extern FPP_TO_EXTEN fpp_to_exten_fmovem;
+extern FPP_FROM_EXTEN fpp_from_exten_fmovem;
extern FPP_A fpp_roundsgl;
extern FPP_A fpp_rounddbl;
extern FPP_AB fpp_sgldiv;
extern FPP_AB fpp_sglmul;
extern FPP_AB fpp_cmp;
+extern FPP_AB fpp_tst;
+extern FPP_AB fpp_move;
{
if (status->flush_inputs_to_zero) {
if (extractFloat32Exp(a) == 0 && extractFloat32Frac(a) != 0) {
- float_raise(float_flag_input_denormal, status);
+ //float_raise(float_flag_input_denormal, status);
return make_float32(float32_val(a) & 0x80000000);
}
}
}
if ( zExp < 0 ) {
if (status->flush_to_zero) {
- float_raise(float_flag_output_denormal, status);
+ //float_raise(float_flag_output_denormal, status);
return packFloat32(zSign, 0, 0);
}
isTiny =
{
if (status->flush_inputs_to_zero) {
if (extractFloat64Exp(a) == 0 && extractFloat64Frac(a) != 0) {
- float_raise(float_flag_input_denormal, status);
+ //float_raise(float_flag_input_denormal, status);
return make_float64(float64_val(a) & (1ULL << 63));
}
}
}
if ( zExp < 0 ) {
if (status->flush_to_zero) {
- float_raise(float_flag_output_denormal, status);
+ //float_raise(float_flag_output_denormal, status);
return packFloat64(zSign, 0, 0);
}
isTiny =
if ( zExp <= 0 ) {
#endif
if (status->flush_to_zero) {
- float_raise(float_flag_output_denormal, status);
+ //float_raise(float_flag_output_denormal, status);
return packFloatx80(zSign, 0, 0);
}
isTiny =
}
if ( zExp < 0 ) {
if (status->flush_to_zero) {
- float_raise(float_flag_output_denormal, status);
+ //float_raise(float_flag_output_denormal, status);
return packFloat128(zSign, 0, 0, 0);
}
isTiny =
aExp = extractFloat32Exp( a );
aSign = extractFloat32Sign( a );
if ( aExp == 0xFF ) {
- if (aSig) {
- return commonNaNToFloatx80(float32ToCommonNaN(a, status), status);
- }
- return packFloatx80( aSign, 0x7FFF, LIT64( 0x8000000000000000 ) );
+ aSig |= 0x00800000;
+ return packFloatx80( aSign, 0x7FFF, ( (uint64_t) aSig )<<40 );
}
if ( aExp == 0 ) {
if ( aSig == 0 ) return packFloatx80( aSign, 0, 0 );
}
if ( aExp == 0 ) {
if (status->flush_to_zero) {
- if (aSig | bSig) {
- float_raise(float_flag_output_denormal, status);
- }
+// if (aSig | bSig) {
+// float_raise(float_flag_output_denormal, status);
+// }
return packFloat32(zSign, 0, 0);
}
return packFloat32( zSign, 0, ( aSig + bSig )>>6 );
}
/* Exact zero plus a denorm */
if (status->flush_to_zero) {
- float_raise(float_flag_output_denormal, status);
+ //float_raise(float_flag_output_denormal, status);
return packFloat32(cSign ^ signflip, 0, 0);
}
}
aExp = extractFloat64Exp( a );
aSign = extractFloat64Sign( a );
if ( aExp == 0x7FF ) {
- if (aSig) {
- return commonNaNToFloatx80(float64ToCommonNaN(a, status), status);
- }
- return packFloatx80( aSign, 0x7FFF, LIT64( 0x8000000000000000 ) );
+ return packFloatx80( aSign, 0x7FFF, ( aSig | LIT64( 0x0010000000000000 ) )<<11 );
}
if ( aExp == 0 ) {
if ( aSig == 0 ) return packFloatx80( aSign, 0, 0 );
}
if ( aExp == 0 ) {
if (status->flush_to_zero) {
- if (aSig | bSig) {
- float_raise(float_flag_output_denormal, status);
- }
+// if (aSig | bSig) {
+// float_raise(float_flag_output_denormal, status);
+// }
return packFloat64(zSign, 0, 0);
}
return packFloat64( zSign, 0, ( aSig + bSig )>>9 );
}
/* Exact zero plus a denorm */
if (status->flush_to_zero) {
- float_raise(float_flag_output_denormal, status);
+// float_raise(float_flag_output_denormal, status);
return packFloat64(cSign ^ signflip, 0, 0);
}
}
}
+#ifdef SOFTFLOAT_68K // 31-01-2017
+/*----------------------------------------------------------------------------
+ | Returns the result of converting the extended double-precision floating-
+ | point value `a' to the extended double-precision floating-point format.
+ | The conversion is performed according to the IEC/IEEE Standard for Binary
+ | Floating-Point Arithmetic.
+ *----------------------------------------------------------------------------*/
+
+floatx80 floatx80_to_floatx80( floatx80 a, float_status *status )
+{
+ flag aSign;
+ int32_t aExp;
+ uint64_t aSig;
+
+ aSig = extractFloatx80Frac( a );
+ aExp = extractFloatx80Exp( a );
+ aSign = extractFloatx80Sign( a );
+
+ if ( aExp == 0x7FFF && (uint64_t) ( aSig<<1 ) ) {
+ return propagateFloatx80NaN( a, a, status );
+ }
+ if ( aExp == 0 && aSig != 0 ) {
+ return normalizeRoundAndPackFloatx80( status->floatx80_rounding_precision, aSign, aExp, aSig, 0, status );
+ }
+ return a;
+
+}
+#endif
+
/*----------------------------------------------------------------------------
| Returns the result of converting the extended double-precision floating-
| point value `a' to the quadruple-precision floating-point format. The
shiftCount = countLeadingZeros64( aSig );
- if ( shiftCount > aExp ) {
- shiftCount = aExp;
- aExp = 0;
- } else {
- aExp -= shiftCount;
- }
+ if ( shiftCount > aExp ) shiftCount = aExp;
+
+ aExp -= shiftCount;
aSig <<= shiftCount;
return packFloatx80( aSign, aExp, aSig );
if ( ( aExp == 0x7FFF && (uint64_t) ( aSig<<1 ) ) ||
( bExp == 0x7FFF && (uint64_t) ( bSig<<1 ) ) ) {
+ if ( floatx80_is_signaling_nan( a, status ) || floatx80_is_signaling_nan( b, status ) )
+ float_raise( float_flag_signaling, status );
return packFloatx80(0, 0x7FFF, floatx80_default_nan_low); }
if ( bExp < aExp ) return packFloatx80( aSign, 0x3FFF, LIT64( 0x8000000000000000 ) );
return packFloatx80( aSign, 0x3FFF, LIT64( 0x8000000000000000 ) );
}
+
+floatx80 floatx80_tst( floatx80 a, float_status *status )
+{
+ if ( floatx80_is_signaling_nan( a, status ) )
+ float_raise( float_flag_signaling, status );
+ return a;
+}
+
+floatx80 floatx80_move( floatx80 a, float_status *status )
+{
+ flag aSign;
+ int32_t aExp;
+ uint64_t aSig;
+ aSig = extractFloatx80Frac( a );
+ aExp = extractFloatx80Exp( a );
+ aSign = extractFloatx80Sign( a );
+
+ if ( aExp == 0x7FFF ) {
+ if ( (uint64_t) ( aSig<<1 ) ) return propagateFloatx80NaN( a, a, status );
+ return a;
+ }
+ if ( aExp == 0 ) {
+ if ( aSig == 0 ) return a;
+ normalizeRoundAndPackFloatx80( status->floatx80_rounding_precision, aSign, aExp, aSig, 0, status );
+ }
+ return a;
+}
+
#endif // End of addition for Previous
/*----------------------------------------------------------------------------
float_flag_overflow = 8,
float_flag_underflow = 16,
float_flag_inexact = 32,
- float_flag_input_denormal = 64,
- float_flag_output_denormal = 128
+ float_flag_signaling = 64
+// float_flag_input_denormal = 64,
+// float_flag_output_denormal = 128
};
typedef struct float_status {
int64_t floatx80_to_int64_round_to_zero(floatx80, float_status *status);
float32 floatx80_to_float32(floatx80, float_status *status);
float64 floatx80_to_float64(floatx80, float_status *status);
+#ifdef SOFTFLOAT_68K
+floatx80 floatx80_to_floatx80( floatx80, float_status *status);
+#endif
float128 floatx80_to_float128(floatx80, float_status *status);
floatx80 floatx80_round_to_int_toward_zero( floatx80 a, float_status *status);
floatx80 floatx80_sglmul( floatx80 a, floatx80 b, float_status *status);
floatx80 floatx80_sgldiv( floatx80 a, floatx80 b, float_status *status);
floatx80 floatx80_cmp( floatx80 a, floatx80 b, float_status *status);
+floatx80 floatx80_tst( floatx80 a, float_status *status );
+floatx80 floatx80_move( floatx80 a, float_status *status);
/*----------------------------------------------------------------------------
| Software IEC/IEEE extended double-precision operations.