--- /dev/null
+/* Accurate fp support for CGEN-based simulators.
+ Copyright (C) 1999 Cygnus Solutions.
+
+ This implemention assumes:
+ typedef USI SF;
+ typedef UDI DF;
+
+ TODO:
+ - lazy encoding/decoding
+ - checking return code (say by callback)
+ - proper rounding
+*/
+
+#include "sim-main.h"
+#include "sim-fpu.h"
+
+/* SF mode support */
+
+static SF
+addsf (CGEN_FPU* fpu, SF x, SF y)
+{
+ sim_fpu op1;
+ sim_fpu op2;
+ sim_fpu ans;
+ unsigned32 res;
+ sim_fpu_status status;
+
+ sim_fpu_32to (&op1, x);
+ sim_fpu_32to (&op2, y);
+ status = sim_fpu_add (&ans, &op1, &op2);
+ if (status != 0)
+ (*fpu->ops->error) (fpu, status);
+ sim_fpu_to32 (&res, &ans);
+
+ return res;
+}
+
+static SF
+subsf (CGEN_FPU* fpu, SF x, SF y)
+{
+ sim_fpu op1;
+ sim_fpu op2;
+ sim_fpu ans;
+ unsigned32 res;
+
+ sim_fpu_32to (&op1, x);
+ sim_fpu_32to (&op2, y);
+ sim_fpu_sub (&ans, &op1, &op2);
+ sim_fpu_to32 (&res, &ans);
+
+ return res;
+}
+
+static SF
+mulsf (CGEN_FPU* fpu, SF x, SF y)
+{
+ sim_fpu op1;
+ sim_fpu op2;
+ sim_fpu ans;
+ unsigned32 res;
+
+ sim_fpu_32to (&op1, x);
+ sim_fpu_32to (&op2, y);
+ sim_fpu_mul (&ans, &op1, &op2);
+ sim_fpu_to32 (&res, &ans);
+
+ return res;
+}
+
+static SF
+divsf (CGEN_FPU* fpu, SF x, SF y)
+{
+ sim_fpu op1;
+ sim_fpu op2;
+ sim_fpu ans;
+ unsigned32 res;
+
+ sim_fpu_32to (&op1, x);
+ sim_fpu_32to (&op2, y);
+ sim_fpu_div (&ans, &op1, &op2);
+ sim_fpu_to32 (&res, &ans);
+
+ return res;
+}
+
+static SF
+negsf (CGEN_FPU* fpu, SF x)
+{
+ sim_fpu op1;
+ sim_fpu ans;
+ unsigned32 res;
+
+ sim_fpu_32to (&op1, x);
+ sim_fpu_neg (&ans, &op1);
+ sim_fpu_to32 (&res, &ans);
+
+ return res;
+}
+
+static SF
+abssf (CGEN_FPU* fpu, SF x)
+{
+ sim_fpu op1;
+ sim_fpu ans;
+ unsigned32 res;
+
+ sim_fpu_32to (&op1, x);
+ sim_fpu_abs (&ans, &op1);
+ sim_fpu_to32 (&res, &ans);
+
+ return res;
+}
+
+static SF
+sqrtsf (CGEN_FPU* fpu, SF x)
+{
+ sim_fpu op1;
+ sim_fpu ans;
+ unsigned32 res;
+
+ sim_fpu_32to (&op1, x);
+ sim_fpu_sqrt (&ans, &op1);
+ sim_fpu_to32 (&res, &ans);
+
+ return res;
+}
+
+static SF
+invsf (CGEN_FPU* fpu, SF x)
+{
+ sim_fpu op1;
+ sim_fpu ans;
+ unsigned32 res;
+
+ sim_fpu_32to (&op1, x);
+ sim_fpu_inv (&ans, &op1);
+ sim_fpu_to32 (&res, &ans);
+
+ return res;
+}
+
+static SF
+minsf (CGEN_FPU* fpu, SF x, SF y)
+{
+ sim_fpu op1;
+ sim_fpu op2;
+ sim_fpu ans;
+ unsigned32 res;
+
+ sim_fpu_32to (&op1, x);
+ sim_fpu_32to (&op2, y);
+ sim_fpu_min (&ans, &op1, &op2);
+ sim_fpu_to32 (&res, &ans);
+
+ return res;
+}
+
+static SF
+maxsf (CGEN_FPU* fpu, SF x, SF y)
+{
+ sim_fpu op1;
+ sim_fpu op2;
+ sim_fpu ans;
+ unsigned32 res;
+
+ sim_fpu_32to (&op1, x);
+ sim_fpu_32to (&op2, y);
+ sim_fpu_max (&ans, &op1, &op2);
+ sim_fpu_to32 (&res, &ans);
+
+ return res;
+}
+
+static CGEN_FP_CMP
+cmpsf (CGEN_FPU* fpu, SF x, SF y)
+{
+ sim_fpu op1;
+ sim_fpu op2;
+
+ sim_fpu_32to (&op1, x);
+ sim_fpu_32to (&op2, y);
+
+ if (sim_fpu_is_nan (&op1)
+ || sim_fpu_is_nan (&op2))
+ return FP_CMP_NAN;
+
+ if (x < y)
+ return FP_CMP_LT;
+ if (x > y)
+ return FP_CMP_GT;
+ return FP_CMP_EQ;
+}
+
+static int
+eqsf (CGEN_FPU* fpu, SF x, SF y)
+{
+ sim_fpu op1;
+ sim_fpu op2;
+
+ sim_fpu_32to (&op1, x);
+ sim_fpu_32to (&op2, y);
+ return sim_fpu_is_eq (&op1, &op2);
+}
+
+static int
+nesf (CGEN_FPU* fpu, SF x, SF y)
+{
+ sim_fpu op1;
+ sim_fpu op2;
+
+ sim_fpu_32to (&op1, x);
+ sim_fpu_32to (&op2, y);
+ return sim_fpu_is_ne (&op1, &op2);
+}
+
+static int
+ltsf (CGEN_FPU* fpu, SF x, SF y)
+{
+ sim_fpu op1;
+ sim_fpu op2;
+
+ sim_fpu_32to (&op1, x);
+ sim_fpu_32to (&op2, y);
+ return sim_fpu_is_lt (&op1, &op2);
+}
+
+static int
+lesf (CGEN_FPU* fpu, SF x, SF y)
+{
+ sim_fpu op1;
+ sim_fpu op2;
+
+ sim_fpu_32to (&op1, x);
+ sim_fpu_32to (&op2, y);
+ return sim_fpu_is_le (&op1, &op2);
+}
+
+static int
+gtsf (CGEN_FPU* fpu, SF x, SF y)
+{
+ sim_fpu op1;
+ sim_fpu op2;
+
+ sim_fpu_32to (&op1, x);
+ sim_fpu_32to (&op2, y);
+ return sim_fpu_is_gt (&op1, &op2);
+}
+
+static int
+gesf (CGEN_FPU* fpu, SF x, SF y)
+{
+ sim_fpu op1;
+ sim_fpu op2;
+
+ sim_fpu_32to (&op1, x);
+ sim_fpu_32to (&op2, y);
+ return sim_fpu_is_ge (&op1, &op2);
+}
+
+static SF
+floatsisf (CGEN_FPU* fpu, SI x)
+{
+ sim_fpu ans;
+ unsigned32 res;
+
+ sim_fpu_i32to (&ans, x, sim_fpu_round_near);
+ sim_fpu_to32 (&res, &ans);
+ return res;
+}
+
+static SF
+ufloatsisf (CGEN_FPU* fpu, USI x)
+{
+ sim_fpu ans;
+ unsigned32 res;
+
+ sim_fpu_u32to (&ans, x, sim_fpu_round_near);
+ sim_fpu_to32 (&res, &ans);
+ return res;
+}
+
+static SI
+fixsfsi (CGEN_FPU* fpu, SF x)
+{
+ sim_fpu op1;
+ unsigned32 res;
+
+ sim_fpu_32to (&op1, x);
+ sim_fpu_to32i (&res, &op1, sim_fpu_round_near);
+ return res;
+}
+
+static USI
+ufixsfsi (CGEN_FPU* fpu, SF x)
+{
+ sim_fpu op1;
+ unsigned32 res;
+
+ sim_fpu_32to (&op1, x);
+ sim_fpu_to32u (&res, &op1, sim_fpu_round_near);
+ return res;
+}
+\f
+/* DF mode support */
+
+static DF
+adddf (CGEN_FPU* fpu, DF x, DF y)
+{
+ sim_fpu op1;
+ sim_fpu op2;
+ sim_fpu ans;
+ unsigned64 res;
+ sim_fpu_status status;
+
+ sim_fpu_64to (&op1, x);
+ sim_fpu_64to (&op2, y);
+ status = sim_fpu_add (&ans, &op1, &op2);
+ if (status != 0)
+ (*fpu->ops->error) (fpu, status);
+ sim_fpu_to64 (&res, &ans);
+
+ return res;
+}
+
+static DF
+subdf (CGEN_FPU* fpu, DF x, DF y)
+{
+ sim_fpu op1;
+ sim_fpu op2;
+ sim_fpu ans;
+ unsigned64 res;
+
+ sim_fpu_64to (&op1, x);
+ sim_fpu_64to (&op2, y);
+ sim_fpu_sub (&ans, &op1, &op2);
+ sim_fpu_to64 (&res, &ans);
+
+ return res;
+}
+
+static DF
+muldf (CGEN_FPU* fpu, DF x, DF y)
+{
+ sim_fpu op1;
+ sim_fpu op2;
+ sim_fpu ans;
+ unsigned64 res;
+
+ sim_fpu_64to (&op1, x);
+ sim_fpu_64to (&op2, y);
+ sim_fpu_mul (&ans, &op1, &op2);
+ sim_fpu_to64 (&res, &ans);
+
+ return res;
+}
+
+static DF
+divdf (CGEN_FPU* fpu, DF x, DF y)
+{
+ sim_fpu op1;
+ sim_fpu op2;
+ sim_fpu ans;
+ unsigned64 res;
+
+ sim_fpu_64to (&op1, x);
+ sim_fpu_64to (&op2, y);
+ sim_fpu_div (&ans, &op1, &op2);
+ sim_fpu_to64 (&res, &ans);
+
+ return res;
+}
+
+static DF
+negdf (CGEN_FPU* fpu, DF x)
+{
+ sim_fpu op1;
+ sim_fpu ans;
+ unsigned64 res;
+
+ sim_fpu_64to (&op1, x);
+ sim_fpu_neg (&ans, &op1);
+ sim_fpu_to64 (&res, &ans);
+
+ return res;
+}
+
+static DF
+absdf (CGEN_FPU* fpu, DF x)
+{
+ sim_fpu op1;
+ sim_fpu ans;
+ unsigned64 res;
+
+ sim_fpu_64to (&op1, x);
+ sim_fpu_abs (&ans, &op1);
+ sim_fpu_to64 (&res, &ans);
+
+ return res;
+}
+
+static DF
+sqrtdf (CGEN_FPU* fpu, DF x)
+{
+ sim_fpu op1;
+ sim_fpu ans;
+ unsigned64 res;
+
+ sim_fpu_64to (&op1, x);
+ sim_fpu_sqrt (&ans, &op1);
+ sim_fpu_to64 (&res, &ans);
+
+ return res;
+}
+
+static DF
+invdf (CGEN_FPU* fpu, DF x)
+{
+ sim_fpu op1;
+ sim_fpu ans;
+ unsigned64 res;
+
+ sim_fpu_64to (&op1, x);
+ sim_fpu_inv (&ans, &op1);
+ sim_fpu_to64 (&res, &ans);
+
+ return res;
+}
+
+static DF
+mindf (CGEN_FPU* fpu, DF x, DF y)
+{
+ sim_fpu op1;
+ sim_fpu op2;
+ sim_fpu ans;
+ unsigned64 res;
+
+ sim_fpu_64to (&op1, x);
+ sim_fpu_64to (&op2, y);
+ sim_fpu_min (&ans, &op1, &op2);
+ sim_fpu_to64 (&res, &ans);
+
+ return res;
+}
+
+static DF
+maxdf (CGEN_FPU* fpu, DF x, DF y)
+{
+ sim_fpu op1;
+ sim_fpu op2;
+ sim_fpu ans;
+ unsigned64 res;
+
+ sim_fpu_64to (&op1, x);
+ sim_fpu_64to (&op2, y);
+ sim_fpu_max (&ans, &op1, &op2);
+ sim_fpu_to64 (&res, &ans);
+
+ return res;
+}
+
+static CGEN_FP_CMP
+cmpdf (CGEN_FPU* fpu, DF x, DF y)
+{
+ sim_fpu op1;
+ sim_fpu op2;
+
+ sim_fpu_64to (&op1, x);
+ sim_fpu_64to (&op2, y);
+
+ if (sim_fpu_is_nan (&op1)
+ || sim_fpu_is_nan (&op2))
+ return FP_CMP_NAN;
+
+ if (x < y)
+ return FP_CMP_LT;
+ if (x > y)
+ return FP_CMP_GT;
+ return FP_CMP_EQ;
+}
+
+static int
+eqdf (CGEN_FPU* fpu, DF x, DF y)
+{
+ sim_fpu op1;
+ sim_fpu op2;
+
+ sim_fpu_64to (&op1, x);
+ sim_fpu_64to (&op2, y);
+ return sim_fpu_is_eq (&op1, &op2);
+}
+
+static int
+nedf (CGEN_FPU* fpu, DF x, DF y)
+{
+ sim_fpu op1;
+ sim_fpu op2;
+
+ sim_fpu_64to (&op1, x);
+ sim_fpu_64to (&op2, y);
+ return sim_fpu_is_ne (&op1, &op2);
+}
+
+static int
+ltdf (CGEN_FPU* fpu, DF x, DF y)
+{
+ sim_fpu op1;
+ sim_fpu op2;
+
+ sim_fpu_64to (&op1, x);
+ sim_fpu_64to (&op2, y);
+ return sim_fpu_is_lt (&op1, &op2);
+}
+
+static int
+ledf (CGEN_FPU* fpu, DF x, DF y)
+{
+ sim_fpu op1;
+ sim_fpu op2;
+
+ sim_fpu_64to (&op1, x);
+ sim_fpu_64to (&op2, y);
+ return sim_fpu_is_le (&op1, &op2);
+}
+
+static int
+gtdf (CGEN_FPU* fpu, DF x, DF y)
+{
+ sim_fpu op1;
+ sim_fpu op2;
+
+ sim_fpu_64to (&op1, x);
+ sim_fpu_64to (&op2, y);
+ return sim_fpu_is_gt (&op1, &op2);
+}
+
+static int
+gedf (CGEN_FPU* fpu, DF x, DF y)
+{
+ sim_fpu op1;
+ sim_fpu op2;
+
+ sim_fpu_64to (&op1, x);
+ sim_fpu_64to (&op2, y);
+ return sim_fpu_is_ge (&op1, &op2);
+}
+\f
+/* Initialize FP_OPS to use accurate library. */
+
+void
+cgen_init_accurate_fpu (SIM_CPU* cpu, CGEN_FPU* fpu, CGEN_FPU_ERROR_FN* error)
+{
+ CGEN_FP_OPS* o;
+
+ fpu->owner = cpu;
+ /* ??? small memory leak, not freed by sim_close */
+ fpu->ops = (CGEN_FP_OPS*) xmalloc (sizeof (CGEN_FP_OPS));
+
+ o = fpu->ops;
+ memset (o, 0, sizeof (*o));
+
+ o->error = error;
+
+ o->addsf = addsf;
+ o->subsf = subsf;
+ o->mulsf = mulsf;
+ o->divsf = divsf;
+ o->negsf = negsf;
+ o->abssf = abssf;
+ o->sqrtsf = sqrtsf;
+ o->invsf = invsf;
+ o->minsf = minsf;
+ o->maxsf = maxsf;
+ o->cmpsf = cmpsf;
+ o->eqsf = eqsf;
+ o->nesf = nesf;
+ o->ltsf = ltsf;
+ o->lesf = lesf;
+ o->gtsf = gtsf;
+ o->gesf = gesf;
+
+ o->adddf = adddf;
+ o->subdf = subdf;
+ o->muldf = muldf;
+ o->divdf = divdf;
+ o->negdf = negdf;
+ o->absdf = absdf;
+ o->sqrtdf = sqrtdf;
+ o->invdf = invdf;
+ o->mindf = mindf;
+ o->maxdf = maxdf;
+ o->cmpdf = cmpdf;
+ o->eqdf = eqdf;
+ o->nedf = nedf;
+ o->ltdf = ltdf;
+ o->ledf = ledf;
+ o->gtdf = gtdf;
+ o->gedf = gedf;
+ o->floatsisf = floatsisf;
+ o->ufloatsisf = ufloatsisf;
+ o->fixsfsi = fixsfsi;
+ o->ufixsfsi = ufixsfsi;
+}
--- /dev/null
+/* CGEN fpu support
+ Copyright (C) 1999 Cygnus Solutions. */
+
+#ifndef CGEN_FPU_H
+#define CGEN_FPU_H
+
+/* Floating point support is a little more complicated.
+ We want to support using either host fp insns or an accurate fp library.
+ We also want to support easily added variants (e.g. modified ieee).
+ This is done by vectoring all calls through a table. */
+
+typedef USI SF;
+typedef UDI DF;
+typedef struct { SI parts[3]; } XF;
+typedef struct { SI parts[4]; } TF;
+
+#ifndef TARGET_EXT_FP_WORDS
+#define TARGET_EXT_FP_WORDS 4
+#endif
+
+/* forward decl */
+typedef struct cgen_fp_ops CGEN_FP_OPS;
+
+/* Instance of an fpu. */
+
+typedef struct {
+ /* Usually a pointer back to the SIM_CPU struct. */
+ void* owner;
+ /* Pointer to ops struct, rather than copy of it, to avoid bloating
+ SIM_CPU struct. */
+ CGEN_FP_OPS* ops;
+} CGEN_FPU;
+
+/* result of cmp */
+
+typedef enum {
+ /* ??? May wish to distinguish qnan/snan here. */
+ FP_CMP_EQ, FP_CMP_LT, FP_CMP_GT, FP_CMP_NAN
+} CGEN_FP_CMP;
+
+/* error handler */
+
+typedef void (CGEN_FPU_ERROR_FN) (CGEN_FPU*, int);
+
+/* fpu operation table */
+
+struct cgen_fp_ops {
+
+ /* error (e.g. signalling nan) handler, supplied by owner */
+
+ CGEN_FPU_ERROR_FN *error;
+
+ /* basic SF ops */
+
+ SF (*addsf) (CGEN_FPU*, SF, SF);
+ SF (*subsf) (CGEN_FPU*, SF, SF);
+ SF (*mulsf) (CGEN_FPU*, SF, SF);
+ SF (*divsf) (CGEN_FPU*, SF, SF);
+ SF (*negsf) (CGEN_FPU*, SF);
+ SF (*abssf) (CGEN_FPU*, SF);
+ SF (*sqrtsf) (CGEN_FPU*, SF);
+ SF (*invsf) (CGEN_FPU*, SF);
+ SF (*cossf) (CGEN_FPU*, SF);
+ SF (*sinsf) (CGEN_FPU*, SF);
+ SF (*minsf) (CGEN_FPU*, SF, SF);
+ SF (*maxsf) (CGEN_FPU*, SF, SF);
+
+ /* ??? to be revisited */
+ CGEN_FP_CMP (*cmpsf) (CGEN_FPU*, SF, SF);
+ int (*eqsf) (CGEN_FPU*, SF, SF);
+ int (*nesf) (CGEN_FPU*, SF, SF);
+ int (*ltsf) (CGEN_FPU*, SF, SF);
+ int (*lesf) (CGEN_FPU*, SF, SF);
+ int (*gtsf) (CGEN_FPU*, SF, SF);
+ int (*gesf) (CGEN_FPU*, SF, SF);
+
+ /* basic DF ops */
+
+ DF (*adddf) (CGEN_FPU*, DF, DF);
+ DF (*subdf) (CGEN_FPU*, DF, DF);
+ DF (*muldf) (CGEN_FPU*, DF, DF);
+ DF (*divdf) (CGEN_FPU*, DF, DF);
+ DF (*negdf) (CGEN_FPU*, DF);
+ DF (*absdf) (CGEN_FPU*, DF);
+ DF (*sqrtdf) (CGEN_FPU*, DF);
+ DF (*invdf) (CGEN_FPU*, DF);
+ DF (*cosdf) (CGEN_FPU*, DF);
+ DF (*sindf) (CGEN_FPU*, DF);
+ DF (*mindf) (CGEN_FPU*, DF, DF);
+ DF (*maxdf) (CGEN_FPU*, DF, DF);
+
+ /* ??? to be revisited */
+ CGEN_FP_CMP (*cmpdf) (CGEN_FPU*, DF, DF);
+ int (*eqdf) (CGEN_FPU*, DF, DF);
+ int (*nedf) (CGEN_FPU*, DF, DF);
+ int (*ltdf) (CGEN_FPU*, DF, DF);
+ int (*ledf) (CGEN_FPU*, DF, DF);
+ int (*gtdf) (CGEN_FPU*, DF, DF);
+ int (*gedf) (CGEN_FPU*, DF, DF);
+
+ /* SF/DF conversion ops */
+
+ DF (*extsfdf) (CGEN_FPU*, SF);
+ SF (*truncdfsf) (CGEN_FPU*, DF);
+
+ SF (*floatsisf) (CGEN_FPU*, SI);
+ SF (*floatdisf) (CGEN_FPU*, DI);
+ SF (*ufloatsisf) (CGEN_FPU*, USI);
+ SF (*ufloatdisf) (CGEN_FPU*, UDI);
+
+ SI (*fixsfsi) (CGEN_FPU*, SF);
+ DI (*fixsfdi) (CGEN_FPU*, SF);
+ USI (*ufixsfsi) (CGEN_FPU*, SF);
+ UDI (*ufixsfdi) (CGEN_FPU*, SF);
+
+ DF (*floatsidf) (CGEN_FPU*, SI);
+ DF (*floatdidf) (CGEN_FPU*, DI);
+ DF (*ufloatsidf) (CGEN_FPU*, USI);
+ DF (*ufloatdidf) (CGEN_FPU*, UDI);
+
+ SI (*fixdfsi) (CGEN_FPU*, DF);
+ DI (*fixdfdi) (CGEN_FPU*, DF);
+ USI (*ufixdfsi) (CGEN_FPU*, DF);
+ UDI (*ufixdfdi) (CGEN_FPU*, DF);
+
+ /* XF mode support (kept separate 'cus not always present) */
+
+ XF (*addxf) (CGEN_FPU*, XF, XF);
+ XF (*subxf) (CGEN_FPU*, XF, XF);
+ XF (*mulxf) (CGEN_FPU*, XF, XF);
+ XF (*divxf) (CGEN_FPU*, XF, XF);
+ XF (*negxf) (CGEN_FPU*, XF);
+ XF (*absxf) (CGEN_FPU*, XF);
+ XF (*sqrtxf) (CGEN_FPU*, XF);
+ XF (*invxf) (CGEN_FPU*, XF);
+ XF (*cosxf) (CGEN_FPU*, XF);
+ XF (*sinxf) (CGEN_FPU*, XF);
+ XF (*minxf) (CGEN_FPU*, XF, XF);
+ XF (*maxxf) (CGEN_FPU*, XF, XF);
+
+ CGEN_FP_CMP (*cmpxf) (CGEN_FPU*, XF, XF);
+ int (*eqxf) (CGEN_FPU*, XF, XF);
+ int (*nexf) (CGEN_FPU*, XF, XF);
+ int (*ltxf) (CGEN_FPU*, XF, XF);
+ int (*lexf) (CGEN_FPU*, XF, XF);
+ int (*gtxf) (CGEN_FPU*, XF, XF);
+ int (*gexf) (CGEN_FPU*, XF, XF);
+
+ XF (*extsfxf) (CGEN_FPU*, SF);
+ XF (*extdfxf) (CGEN_FPU*, DF);
+ SF (*truncxfsf) (CGEN_FPU*, XF);
+ DF (*truncxfdf) (CGEN_FPU*, XF);
+
+ XF (*floatsixf) (CGEN_FPU*, SI);
+ XF (*floatdixf) (CGEN_FPU*, DI);
+ XF (*ufloatsixf) (CGEN_FPU*, USI);
+ XF (*ufloatdixf) (CGEN_FPU*, UDI);
+
+ SI (*fixxfsi) (CGEN_FPU*, XF);
+ DI (*fixxfdi) (CGEN_FPU*, XF);
+ USI (*ufixxfsi) (CGEN_FPU*, XF);
+ UDI (*ufixxfdi) (CGEN_FPU*, XF);
+
+ /* TF mode support (kept separate 'cus not always present) */
+
+ TF (*addtf) (CGEN_FPU*, TF, TF);
+ TF (*subtf) (CGEN_FPU*, TF, TF);
+ TF (*multf) (CGEN_FPU*, TF, TF);
+ TF (*divtf) (CGEN_FPU*, TF, TF);
+ TF (*negtf) (CGEN_FPU*, TF);
+ TF (*abstf) (CGEN_FPU*, TF);
+ TF (*sqrttf) (CGEN_FPU*, TF);
+ TF (*invtf) (CGEN_FPU*, TF);
+ TF (*costf) (CGEN_FPU*, TF);
+ TF (*sintf) (CGEN_FPU*, TF);
+ TF (*mintf) (CGEN_FPU*, TF, TF);
+ TF (*maxtf) (CGEN_FPU*, TF, TF);
+
+ CGEN_FP_CMP (*cmptf) (CGEN_FPU*, TF, TF);
+ int (*eqtf) (CGEN_FPU*, TF, TF);
+ int (*netf) (CGEN_FPU*, TF, TF);
+ int (*lttf) (CGEN_FPU*, TF, TF);
+ int (*letf) (CGEN_FPU*, TF, TF);
+ int (*gttf) (CGEN_FPU*, TF, TF);
+ int (*getf) (CGEN_FPU*, TF, TF);
+
+ TF (*extsftf) (CGEN_FPU*, SF);
+ TF (*extdftf) (CGEN_FPU*, DF);
+ SF (*trunctfsf) (CGEN_FPU*, TF);
+ DF (*trunctfdf) (CGEN_FPU*, TF);
+
+ TF (*floatsitf) (CGEN_FPU*, SI);
+ TF (*floatditf) (CGEN_FPU*, DI);
+ TF (*ufloatsitf) (CGEN_FPU*, USI);
+ TF (*ufloatditf) (CGEN_FPU*, UDI);
+
+ SI (*fixtfsi) (CGEN_FPU*, TF);
+ DI (*fixtfdi) (CGEN_FPU*, TF);
+ USI (*ufixtfsi) (CGEN_FPU*, TF);
+ UDI (*ufixtfdi) (CGEN_FPU*, TF);
+
+};
+
+extern void cgen_init_accurate_fpu (SIM_CPU*, CGEN_FPU*, CGEN_FPU_ERROR_FN*);
+
+BI cgen_sf_snan_p (CGEN_FPU*, SF);
+BI cgen_df_snan_p (CGEN_FPU*, DF);
+
+/* no-op fp error handler */
+extern CGEN_FPU_ERROR_FN cgen_fpu_ignore_errors;
+
+#endif /* CGEN_FPU_H */