arch-riscv: Clean up new FP code in arch/registers.hh.

Delete unused macros, turn macros into inline functions, simplify them,
comment them, replace custom sign extension with the bitfield.hh
version.

Change-Id: I5962c1f0ac62245385052082e5897e14e4b5adf1
Reviewed-on: https://gem5-review.googlesource.com/c/public/gem5/+/41735
Maintainer: Gabe Black <gabe.black@gmail.com>
Tested-by: kokoro <noreply+kokoro@google.com>
Reviewed-by: Jason Lowe-Power <power.jg@gmail.com>
This commit is contained in:
Gabe Black
2021-02-22 00:40:58 -08:00
parent 8866d766ad
commit 92fe77320f
2 changed files with 65 additions and 70 deletions

View File

@@ -909,7 +909,8 @@ decode QUADRANT default Unknown::unknown() {
freg_t fd;
fd = freg(f32_mulAdd(f32(freg(Fs1_bits)),
f32(freg(Fs2_bits)),
f32(f32(freg(Fs3_bits)).v ^ F32_SIGN)));
f32(f32(freg(Fs3_bits)).v ^
mask(31, 31))));
Fd_bits = fd.v;
}}, FloatMultAccOp);
0x1: fmsub_d({{
@@ -917,7 +918,8 @@ decode QUADRANT default Unknown::unknown() {
freg_t fd;
fd = freg(f64_mulAdd(f64(freg(Fs1_bits)),
f64(freg(Fs2_bits)),
f64(f64(freg(Fs3_bits)).v ^ F64_SIGN)));
f64(f64(freg(Fs3_bits)).v ^
mask(63, 63))));
Fd_bits = fd.v;
}}, FloatMultAccOp);
}
@@ -925,7 +927,8 @@ decode QUADRANT default Unknown::unknown() {
0x0: fnmsub_s({{
RM_REQUIRED;
freg_t fd;
fd = freg(f32_mulAdd(f32(f32(freg(Fs1_bits)).v ^ F32_SIGN),
fd = freg(f32_mulAdd(f32(f32(freg(Fs1_bits)).v ^
mask(31, 31)),
f32(freg(Fs2_bits)),
f32(freg(Fs3_bits))));
Fd_bits = fd.v;
@@ -933,7 +936,8 @@ decode QUADRANT default Unknown::unknown() {
0x1: fnmsub_d({{
RM_REQUIRED;
freg_t fd;
fd = freg(f64_mulAdd(f64(f64(freg(Fs1_bits)).v ^ F64_SIGN),
fd = freg(f64_mulAdd(f64(f64(freg(Fs1_bits)).v ^
mask(63, 63)),
f64(freg(Fs2_bits)),
f64(freg(Fs3_bits))));
Fd_bits = fd.v;
@@ -943,17 +947,21 @@ decode QUADRANT default Unknown::unknown() {
0x0: fnmadd_s({{
RM_REQUIRED;
freg_t fd;
fd = freg(f32_mulAdd(f32(f32(freg(Fs1_bits)).v ^ F32_SIGN),
fd = freg(f32_mulAdd(f32(f32(freg(Fs1_bits)).v ^
mask(31, 31)),
f32(freg(Fs2_bits)),
f32(f32(freg(Fs3_bits)).v ^ F32_SIGN)));
f32(f32(freg(Fs3_bits)).v ^
mask(31, 31))));
Fd_bits = fd.v;
}}, FloatMultAccOp);
0x1: fnmadd_d({{
RM_REQUIRED;
freg_t fd;
fd = freg(f64_mulAdd(f64(f64(freg(Fs1_bits)).v ^ F64_SIGN),
fd = freg(f64_mulAdd(f64(f64(freg(Fs1_bits)).v ^
mask(63, 63)),
f64(freg(Fs2_bits)),
f64(f64(freg(Fs3_bits)).v ^ F64_SIGN)));
f64(f64(freg(Fs3_bits)).v ^
mask(63, 63))));
Fd_bits = fd.v;
}}, FloatMultAccOp);
}
@@ -1016,42 +1024,29 @@ decode QUADRANT default Unknown::unknown() {
}}, FloatDivOp);
0x10: decode ROUND_MODE {
0x0: fsgnj_s({{
freg_t fd;
fd = freg(fsgnj32(freg(Fs1_bits), freg(Fs2_bits),
false, false));
Fd_bits = fd.v;
Fd_bits = boxF32(insertBits(unboxF32(Fs2_bits), 30, 0,
unboxF32(Fs1_bits)));
}}, FloatMiscOp);
0x1: fsgnjn_s({{
freg_t fd;
fd = freg(fsgnj32(freg(Fs1_bits), freg(Fs2_bits),
true, false));
Fd_bits = fd.v;
Fd_bits = boxF32(insertBits(unboxF32(~Fs2_bits), 30, 0,
unboxF32(Fs1_bits)));
}}, FloatMiscOp);
0x2: fsgnjx_s({{
freg_t fd;
fd = freg(fsgnj32(freg(Fs1_bits), freg(Fs2_bits),
false, true));
Fd_bits = fd.v;
Fd_bits = boxF32(insertBits(
unboxF32(Fs1_bits) ^ unboxF32(Fs2_bits),
30, 0, unboxF32(Fs1_bits)));
}}, FloatMiscOp);
}
0x11: decode ROUND_MODE {
0x0: fsgnj_d({{
freg_t fd;
fd = freg(fsgnj64(freg(Fs1_bits), freg(Fs2_bits),
false, false));
Fd_bits = fd.v;
Fd_bits = insertBits(Fs2_bits, 62, 0, Fs1_bits);
}}, FloatMiscOp);
0x1: fsgnjn_d({{
freg_t fd;
fd = freg(fsgnj64(freg(Fs1_bits), freg(Fs2_bits),
true, false));
Fd_bits = fd.v;
Fd_bits = insertBits(~Fs2_bits, 62, 0, Fs1_bits);
}}, FloatMiscOp);
0x2: fsgnjx_d({{
freg_t fd;
fd = freg(fsgnj64(freg(Fs1_bits), freg(Fs2_bits),
false, true));
Fd_bits = fd.v;
Fd_bits = insertBits(
Fs1_bits ^ Fs2_bits, 62, 0, Fs1_bits);
}}, FloatMiscOp);
}
0x14: decode ROUND_MODE {
@@ -1060,7 +1055,7 @@ decode QUADRANT default Unknown::unknown() {
f32(freg(Fs2_bits))) ||
(f32_eq(f32(freg(Fs1_bits)),
f32(freg(Fs2_bits))) &&
(f32(freg(Fs1_bits)).v & F32_SIGN));
bits(f32(freg(Fs1_bits)).v, 31));
Fd_bits = less ||
isNaNF32UI(f32(freg(Fs2_bits)).v) ?
@@ -1074,7 +1069,7 @@ decode QUADRANT default Unknown::unknown() {
f32(freg(Fs1_bits))) ||
(f32_eq(f32(freg(Fs2_bits)),
f32(freg(Fs1_bits))) &&
(f32(freg(Fs2_bits)).v & F32_SIGN));
bits(f32(freg(Fs2_bits)).v, 31));
Fd_bits = greater ||
isNaNF32UI(f32(freg(Fs2_bits)).v) ?
@@ -1090,7 +1085,7 @@ decode QUADRANT default Unknown::unknown() {
f64(freg(Fs2_bits))) ||
(f64_eq(f64(freg(Fs1_bits)),
f64(freg(Fs2_bits))) &&
(f64(freg(Fs1_bits)).v & F64_SIGN));
bits(f64(freg(Fs1_bits)).v, 63));
Fd_bits = less ||
isNaNF64UI(f64(freg(Fs2_bits)).v) ?
@@ -1105,7 +1100,7 @@ decode QUADRANT default Unknown::unknown() {
f64(freg(Fs1_bits))) ||
(f64_eq(f64(freg(Fs2_bits)),
f64(freg(Fs1_bits))) &&
(f64(freg(Fs2_bits)).v & F64_SIGN));
bits(f64(freg(Fs2_bits)).v, 63));
Fd_bits = greater ||
isNaNF64UI(f64(freg(Fs2_bits)).v) ?
@@ -1180,13 +1175,13 @@ decode QUADRANT default Unknown::unknown() {
0x60: decode CONV_SGN {
0x0: fcvt_w_s({{
RM_REQUIRED;
Rd_sd = sext32(f32_to_i32(f32(freg(Fs1_bits)), rm,
true));
Rd_sd = sext<32>(f32_to_i32(f32(freg(Fs1_bits)), rm,
true));
}}, FloatCvtOp);
0x1: fcvt_wu_s({{
RM_REQUIRED;
Rd = sext32(f32_to_ui32(f32(freg(Fs1_bits)), rm,
true));
Rd = sext<32>(f32_to_ui32(f32(freg(Fs1_bits)), rm,
true));
}}, FloatCvtOp);
0x2: fcvt_l_s({{
RM_REQUIRED;
@@ -1200,13 +1195,13 @@ decode QUADRANT default Unknown::unknown() {
0x61: decode CONV_SGN {
0x0: fcvt_w_d({{
RM_REQUIRED;
Rd_sd = sext32(f64_to_i32(f64(freg(Fs1_bits)), rm,
true));
Rd_sd = sext<32>(f64_to_i32(f64(freg(Fs1_bits)), rm,
true));
}}, FloatCvtOp);
0x1: fcvt_wu_d({{
RM_REQUIRED;
Rd = sext32(f64_to_ui32(f64(freg(Fs1_bits)), rm,
true));
Rd = sext<32>(f64_to_ui32(f64(freg(Fs1_bits)), rm,
true));
}}, FloatCvtOp);
0x2: fcvt_l_d({{
RM_REQUIRED;

View File

@@ -62,34 +62,34 @@
namespace RiscvISA
{
/* Convenience wrappers to simplify softfloat code sequences */
#define isBoxedF32(r) ((uint32_t)((r.v >> 32) + 1) == 0)
#define unboxF32(r) (isBoxedF32(r) ? (uint32_t)r.v : defaultNaNF32UI)
#define unboxF64(r) (r.v)
/* Conversion functions for working with softfloat. */
typedef int64_t sreg_t;
typedef uint64_t reg_t;
typedef float64_t freg_t;
inline float32_t f32(uint32_t v) { return { v }; }
inline float64_t f64(uint64_t v) { return { v }; }
inline float32_t f32(freg_t r) { return f32(unboxF32(r)); }
inline float64_t f64(freg_t r) { return f64(unboxF64(r)); }
inline freg_t freg(float32_t f) { return {((uint64_t)-1 << 32) | f.v}; }
inline freg_t freg(float64_t f) { return {f}; }
inline freg_t freg(uint_fast16_t f) { return {f}; }
#define F32_SIGN ((uint32_t)1 << 31)
#define F64_SIGN ((uint64_t)1 << 63)
#define fsgnj32(a, b, n, x) \
f32((f32(a).v & ~F32_SIGN) | \
((((x) ? f32(a).v : (n) ? F32_SIGN : 0) ^ f32(b).v) & F32_SIGN))
#define fsgnj64(a, b, n, x) \
f64((f64(a).v & ~F64_SIGN) | \
((((x) ? f64(a).v : (n) ? F64_SIGN : 0) ^ f64(b).v) & F64_SIGN))
// Generic floating point value type.
using freg_t = float64_t;
#define sext32(x) ((sreg_t)(int32_t)(x))
#define zext32(x) ((reg_t)(uint32_t)(x))
#define sext_xlen(x) (((sreg_t)(x) << (64-xlen)) >> (64-xlen))
#define zext_xlen(x) (((reg_t)(x) << (64-xlen)) >> (64-xlen))
// Extract a 32 bit float packed into a 64 bit value.
static constexpr uint32_t
unboxF32(uint64_t v)
{
// The upper 32 bits should all be ones.
if (bits(v, 63, 32) == mask(32))
return bits(v, 31, 0);
else
return defaultNaNF32UI;
}
static constexpr uint64_t boxF32(uint32_t v) { return {mask(63, 32) | v}; }
// Create fixed size floats from raw bytes or generic floating point values.
static constexpr float32_t f32(uint32_t v) { return {v}; }
static constexpr float64_t f64(uint64_t v) { return {v}; }
static constexpr float32_t f32(freg_t r) { return {unboxF32(r.v)}; }
static constexpr float64_t f64(freg_t r) { return r; }
// Create generic floating point values from fixed size floats.
static constexpr freg_t freg(float32_t f) { return {boxF32(f.v)}; }
static constexpr freg_t freg(float64_t f) { return f; }
static constexpr freg_t freg(uint_fast16_t f) { return {f}; }
// Not applicable to RISC-V
using VecElem = ::DummyVecElem;