#define invalid_pc(pc) ((pc) & 1)
/* Convenience wrappers to simplify softfloat code sequences */
+#define unboxF16(r) (isBoxedF16(r) ? (uint16_t)r.v[0] : defaultNaNF16UI)
+#define isBoxedF16(r) (isBoxedF32(r) && ((uint16_t)((r.v[0] >> 16) + 1) == 0))
#define isBoxedF32(r) (isBoxedF64(r) && ((uint32_t)((r.v[0] >> 32) + 1) == 0))
#define unboxF32(r) (isBoxedF32(r) ? (uint32_t)r.v[0] : defaultNaNF32UI)
#define isBoxedF64(r) ((r.v[1] + 1) == 0)
#define unboxF64(r) (isBoxedF64(r) ? r.v[0] : defaultNaNF64UI)
typedef float128_t freg_t;
+inline float16_t f16(uint16_t v) { return { v }; }
inline float32_t f32(uint32_t v) { return { v }; }
inline float64_t f64(uint64_t v) { return { v }; }
+inline float16_t f16(freg_t r) { return f16(unboxF16(r)); }
inline float32_t f32(freg_t r) { return f32(unboxF32(r)); }
inline float64_t f64(freg_t r) { return f64(unboxF64(r)); }
inline float128_t f128(freg_t r) { return r; }
sv_float32_t (sv_proc_t::f32)(sv_freg_t x)
{
+ switch (x.get_elwidth())
+ {
+ // 8-bit
+ case 1: throw trap_illegal_instruction(0); // XXX for now
+ // 16-bit data, up-convert to f32
+ case 2:
+ return f16_to_f32(f16(x.to_uint32()));
+ // 0 and 3 are 32-bit
+ default: break;
+ }
return ::f32(x);
}
sv_float32_t (sv_proc_t::f32)(sv_reg_t const& v)
{
uint64_t x = v;
+ switch (v.get_elwidth())
+ {
+ // 8-bit
+ case 1: throw trap_illegal_instruction(0); // XXX for now
+ // 16-bit data, up-convert to f32
+ case 2:
+ return f16_to_f32(f16(x));
+ // 0 and 3 are 32-bit
+ default: break;
+ }
return ::f32(x);
}