From e4fe343e5823fea50116218124d97651f10d3879 Mon Sep 17 00:00:00 2001 From: Luke Kenneth Casson Leighton Date: Sun, 21 Oct 2018 09:14:21 +0100 Subject: [PATCH] calculate src bitwidth - very time-consuming, optimise later --- riscv/insn_template_sv.cc | 2 +- riscv/sv.cc | 50 +++++++++++++++++++++++++++++++++++++-- riscv/sv_decode.h | 6 +++++ 3 files changed, 55 insertions(+), 3 deletions(-) diff --git a/riscv/insn_template_sv.cc b/riscv/insn_template_sv.cc index 1bc3721..5bf0a0f 100644 --- a/riscv/insn_template_sv.cc +++ b/riscv/insn_template_sv.cc @@ -63,7 +63,7 @@ reg_t sv_proc_t::FN(processor_t* p, insn_t s_insn, reg_t pc) reg_t target_pred = ~0x0; bool zeroingtarg = false; #endif - sv_insn_t insn(p, sv_enabled, bits, floatintmap, PRED_ARGS, OFFS_ARGS); + sv_insn_t insn(p, sv_enabled, bits, floatintmap, xlen, PRED_ARGS, OFFS_ARGS); p->s.set_insn(&insn, xlen); #ifdef USING_NOREGS #include INCLUDEFILE diff --git a/riscv/sv.cc b/riscv/sv.cc index 9d6a637..3947fc6 100644 --- a/riscv/sv.cc +++ b/riscv/sv.cc @@ -2,13 +2,24 @@ #include "sv_decode.h" #include "processor.h" +static int get_bitwidth(uint8_t elwidth, int xlen) +{ + switch (elwidth) { + case 0: return xlen; + case 1: return xlen / 2; + case 2: return xlen * 2; + default: return 8; + } +} + sv_insn_t::sv_insn_t(processor_t *pr, bool _sv_enabled, - insn_bits_t bits, unsigned int f, + insn_bits_t bits, unsigned int f, int xlen, uint64_t &p_rd, uint64_t &p_rs1, uint64_t &p_rs2, uint64_t &p_rs3, uint64_t &p_sp, uint64_t *p_im, int *o_rd, int *o_rs1, int *o_rs2, int *o_rs3, int *o_sp, int *o_imm) : - insn_t(bits), p(pr), sv_enabled(_sv_enabled), vloop_continue(false), + insn_t(bits), p(pr), src_bitwidth(0), + sv_enabled(_sv_enabled), vloop_continue(false), at_least_one_reg_vectorised(false), fimap(f), offs_rd(o_rd), offs_rs1(o_rs1), offs_rs2(o_rs2), offs_rs3(o_rs3), offs_sp(o_sp), @@ -16,6 +27,41 @@ sv_insn_t::sv_insn_t(processor_t *pr, bool _sv_enabled, prd(p_rd), prs1(p_rs1), prs2(p_rs2), prs3(p_rs3), psp(p_sp), save_branch_addr(0) { + // work out the source element width based on what is used + // note that this has to match with id_regs.py patterns + + unsigned int bm=2; + for (int i = 1; i < 12; i++, bm<<=1) + { + sv_reg_entry* r = NULL; + if (bm == (REG_RS1 & fimap)) { + r = get_regentry(rs1(), true); + } else if (bm == (REG_RS2 & fimap)) { + r = get_regentry(rs2(), true); + } else if (bm == (REG_RS3 & fimap)) { + r = get_regentry(rs3(), true); + } else if (bm == (REG_RVC_RS1 & fimap)) { + r = get_regentry(rvc_rs1(), true); + } else if (bm == (REG_RVC_RS2 & fimap)) { + r = get_regentry(rvc_rs2(), true); + } else if (bm == (REG_RVC_RS1S & fimap)) { + r = get_regentry(rvc_rs1s(), true); + } else if (bm == (REG_RVC_RS2S & fimap)) { + r = get_regentry(rvc_rs2s(), true); + } else if (bm == (REG_FRS1 & fimap)) { + r = get_regentry(rs1(), false); + } else if (bm == (REG_FRS2 & fimap)) { + r = get_regentry(rs2(), false); + } else if (bm == (REG_FRS3 & fimap)) { + r = get_regentry(rs3(), false); + } + if (r == NULL || !r->active) { + continue; + } + uint8_t elwidth = r->elwidth; + uint8_t bitwidth = get_bitwidth(elwidth, xlen); + src_bitwidth = std::max(src_bitwidth, bitwidth); + } } sv_pred_entry* sv_insn_t::get_predentry(uint64_t reg, bool intreg) diff --git a/riscv/sv_decode.h b/riscv/sv_decode.h index 9dfb890..f8701af 100644 --- a/riscv/sv_decode.h +++ b/riscv/sv_decode.h @@ -18,6 +18,10 @@ #define REG_RVC_RS2 0x20 #define REG_RVC_RS1S 0x40 #define REG_RVC_RS2S 0x80 +#define REG_FRD 0x100 +#define REG_FRS1 0x200 +#define REG_FRS2 0x400 +#define REG_FRS3 0x800 class processor_t; @@ -26,6 +30,7 @@ class sv_insn_t: public insn_t { public: sv_insn_t(processor_t *pr, bool _sv_enabled, insn_bits_t bits, unsigned int f, + int xlen, uint64_t &p_rd, uint64_t &p_rs1, uint64_t &p_rs2, uint64_t &p_rs3, uint64_t &p_sp, uint64_t *p_im, int *o_rd, int *o_rs1, int *o_rs2, int *o_rs3, int *o_sp, @@ -86,6 +91,7 @@ public: bool stop_vloop(void); processor_t *p; + uint8_t src_bitwidth; bool sv_enabled; // cached version of remap: if remap is called multiple times -- 2.30.2