def bitfield ROUND_MODE <14:12>;
def bitfield CONV_SGN <24:20>;
def bitfield FUNCT2 <26:25>;
+
+// AMO
+def bitfield AMOFUNCT <31:27>;
+def bitfield AQ <26>;
+def bitfield RL <25>;
}
}
+ 0x2f: decode FUNCT3 {
+ 0x2: decode AMOFUNCT {
+ 0x2: LoadReserved::lr_w({{
+ Rd_sd = Mem_sw;
+ }}, mem_flags=LLSC, aq=AQ, rl=RL);
+ 0x3: StoreCond::sc_w({{
+ Mem_uw = Rs2_uw;
+ }}, {{
+ Rd = result;
+ }}, inst_flags=IsStoreConditional, mem_flags=LLSC, aq=AQ, rl=RL);
+ format AtomicMemOp {
+ 0x0: amoadd_w({{Rt_sd = Mem_sw;}}, {{
+ Mem_sw = Rs2_sw + Rt_sd;
+ Rd_sd = Rt_sd;
+ }}, {{EA = Rs1;}});
+ 0x1: amoswap_w({{Rt_sd = Mem_sw;}}, {{
+ Mem_sw = Rs2_uw;
+ Rd_sd = Rt_sd;
+ }}, {{EA = Rs1;}});
+ 0x4: amoxor_w({{Rt_sd = Mem_sw;}}, {{
+ Mem_sw = Rs2_uw^Rt_sd;
+ Rd_sd = Rt_sd;
+ }}, {{EA = Rs1;}});
+ 0x8: amoor_w({{Rt_sd = Mem_sw;}}, {{
+ Mem_sw = Rs2_uw | Rt_sd;
+ Rd_sd = Rt_sd;
+ }}, {{EA = Rs1;}});
+ 0xc: amoand_w({{Rt_sd = Mem_sw;}}, {{
+ Mem_sw = Rs2_uw&Rt_sd;
+ Rd_sd = Rt_sd;
+ }}, {{EA = Rs1;}});
+ 0x10: amomin_w({{Rt_sd = Mem_sw;}}, {{
+ Mem_sw = std::min<int32_t>(Rs2_sw, Rt_sd);
+ Rd_sd = Rt_sd;
+ }}, {{EA = Rs1;}});
+ 0x14: amomax_w({{Rt_sd = Mem_sw;}}, {{
+ Mem_sw = std::max<int32_t>(Rs2_sw, Rt_sd);
+ Rd_sd = Rt_sd;
+ }}, {{EA = Rs1;}});
+ 0x18: amominu_w({{Rt_sd = Mem_sw;}}, {{
+ Mem_sw = std::min<uint32_t>(Rs2_uw, Rt_sd);
+ Rd_sd = Rt_sd;
+ }}, {{EA = Rs1;}});
+ 0x1c: amomaxu_w({{Rt_sd = Mem_sw;}}, {{
+ Mem_sw = std::max<uint32_t>(Rs2_uw, Rt_sd);
+ Rd_sd = Rt_sd;
+ }}, {{EA = Rs1;}});
+ }
+ }
+ 0x3: decode AMOFUNCT {
+ 0x2: LoadReserved::lr_d({{
+ Rd_sd = Mem_sd;
+ }}, aq=AQ, rl=RL);
+ 0x3: StoreCond::sc_d({{
+ Mem = Rs2;
+ }}, {{
+ Rd = result;
+ }}, aq=AQ, rl=RL);
+ format AtomicMemOp {
+ 0x0: amoadd_d({{Rt_sd = Mem_sd;}}, {{
+ Mem_sd = Rs2_sd + Rt_sd;
+ Rd_sd = Rt_sd;
+ }}, {{EA = Rs1;}});
+ 0x1: amoswap_d({{Rt = Mem;}}, {{
+ Mem = Rs2;
+ Rd = Rt;
+ }}, {{EA = Rs1;}});
+ 0x4: amoxor_d({{Rt = Mem;}}, {{
+ Mem = Rs2^Rt;
+ Rd = Rt;
+ }}, {{EA = Rs1;}});
+ 0x8: amoor_d({{Rt = Mem;}}, {{
+ Mem = Rs2 | Rt;
+ Rd = Rt;
+ }}, {{EA = Rs1;}});
+ 0xc: amoand_d({{Rt = Mem;}}, {{
+ Mem = Rs2&Rt;
+ Rd = Rt;
+ }}, {{EA = Rs1;}});
+ 0x10: amomin_d({{Rt_sd = Mem_sd;}}, {{
+ Mem_sd = std::min(Rs2_sd, Rt_sd);
+ Rd_sd = Rt_sd;
+ }}, {{EA = Rs1;}});
+ 0x14: amomax_d({{Rt_sd = Mem_sd;}}, {{
+ Mem_sd = std::max(Rs2_sd, Rt_sd);
+ Rd_sd = Rt_sd;
+ }}, {{EA = Rs1;}});
+ 0x18: amominu_d({{Rt = Mem;}}, {{
+ Mem = std::min(Rs2, Rt);
+ Rd = Rt;
+ }}, {{EA = Rs1;}});
+ 0x1c: amomaxu_d({{Rt = Mem;}}, {{
+ Mem = std::max(Rs2, Rt);
+ Rd = Rt;
+ }}, {{EA = Rs1;}});
+ }
+ }
+ }
0x33: decode FUNCT3 {
format ROp {
0x0: decode FUNCT7 {
--- /dev/null
+// -*- mode:c++ -*-
+
+// Copyright (c) 2015 Riscv Developers
+// Copyright (c) 2016 The University of Virginia
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met: redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer;
+// redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution;
+// neither the name of the copyright holders nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Authors: Alec Roelke
+
+////////////////////////////////////////////////////////////////////
+//
+// Atomic memory operation instructions
+//
+output header {{
+ class AtomicMemOp : public RiscvMacroInst
+ {
+ protected:
+ /// Constructor
+ // Each AtomicMemOp has a load and a store phase
+ AtomicMemOp(const char *mnem, ExtMachInst _machInst, OpClass __opClass)
+ : RiscvMacroInst(mnem, _machInst, __opClass)
+ {}
+
+ std::string generateDisassembly(Addr pc,
+ const SymbolTable *symtab) const;
+ };
+
+ class AtomicMemOpMicro : public RiscvMicroInst
+ {
+ protected:
+ /// Memory request flags. See mem/request.hh.
+ Request::Flags memAccessFlags;
+
+ /// Constructor
+ AtomicMemOpMicro(const char *mnem, ExtMachInst _machInst,
+ OpClass __opClass)
+ : RiscvMicroInst(mnem, _machInst, __opClass)
+ {}
+
+ std::string generateDisassembly(Addr pc,
+ const SymbolTable *symtab) const;
+ };
+}};
+
+output decoder {{
+ std::string AtomicMemOp::generateDisassembly(Addr pc,
+ const SymbolTable *symtab) const
+ {
+ std::stringstream ss;
+ ss << csprintf("0x%08x", machInst) << ' ';
+ ss << mnemonic << ' ' << regName(_destRegIdx[0]) << ", "
+ << regName(_srcRegIdx[1]) << ", ("
+ << regName(_srcRegIdx[0]) << ')';
+ return ss.str();
+ }
+
+ std::string AtomicMemOpMicro::generateDisassembly(Addr pc,
+ const SymbolTable *symtab) const
+ {
+ std::stringstream ss;
+ ss << csprintf("0x%08x", machInst) << ' ' << mnemonic;
+ return ss.str();
+ }
+}};
+
+def template AtomicMemOpDeclare {{
+ /**
+ * Static instruction class for an AtomicMemOp operation
+ */
+ class %(class_name)s : public %(base_class)s
+ {
+ public:
+ // Constructor
+ %(class_name)s(ExtMachInst machInst);
+
+ protected:
+
+ class %(class_name)sLoad : public %(base_class)sMicro
+ {
+ public:
+ // Constructor
+ %(class_name)sLoad(ExtMachInst machInst, %(class_name)s *_p);
+
+ %(BasicExecDeclare)s
+
+ %(EACompDeclare)s
+
+ %(InitiateAccDeclare)s
+
+ %(CompleteAccDeclare)s
+ };
+
+ class %(class_name)sStore : public %(base_class)sMicro
+ {
+ public:
+ // Constructor
+ %(class_name)sStore(ExtMachInst machInst, %(class_name)s *_p);
+
+ %(BasicExecDeclare)s
+
+ %(EACompDeclare)s
+
+ %(InitiateAccDeclare)s
+
+ %(CompleteAccDeclare)s
+ };
+ };
+}};
+
+def template AtomicMemOpMacroConstructor {{
+ %(class_name)s::%(class_name)s(ExtMachInst machInst)
+ : %(base_class)s("%(mnemonic)s", machInst, %(op_class)s)
+ {
+ %(constructor)s;
+ microops = {new %(class_name)sLoad(machInst, this),
+ new %(class_name)sStore(machInst, this)};
+ }
+}};
+
+def template AtomicMemOpLoadConstructor {{
+ %(class_name)s::%(class_name)sLoad::%(class_name)sLoad(
+ ExtMachInst machInst, %(class_name)s *_p)
+ : %(base_class)s("%(mnemonic)s[l]", machInst, %(op_class)s)
+ {
+ %(constructor)s;
+ flags[IsFirstMicroop] = true;
+ flags[IsDelayedCommit] = true;
+ if (AQ)
+ memAccessFlags = Request::ACQUIRE;
+ }
+}};
+
+def template AtomicMemOpStoreConstructor {{
+ %(class_name)s::%(class_name)sStore::%(class_name)sStore(
+ ExtMachInst machInst, %(class_name)s *_p)
+ : %(base_class)s("%(mnemonic)s[s]", machInst, %(op_class)s)
+ {
+ %(constructor)s;
+ flags[IsLastMicroop] = true;
+ flags[IsNonSpeculative] = true;
+ if (RL)
+ memAccessFlags = Request::RELEASE;
+ }
+}};
+
+def template AtomicMemOpMacroDecode {{
+ return new %(class_name)s(machInst);
+}};
+
+def template AtomicMemOpLoadExecute {{
+ Fault %(class_name)s::%(class_name)sLoad::execute(CPU_EXEC_CONTEXT *xc,
+ Trace::InstRecord *traceData) const
+ {
+ Addr EA;
+ Fault fault = NoFault;
+
+ %(op_decl)s;
+ %(op_rd)s;
+ %(ea_code)s;
+
+ if (fault == NoFault) {
+ fault = readMemAtomic(xc, traceData, EA, Mem, memAccessFlags);
+ }
+
+ if (fault == NoFault) {
+ %(code)s;
+ }
+
+ if (fault == NoFault) {
+ %(op_wb)s;
+ }
+
+ return fault;
+ }
+}};
+
+def template AtomicMemOpStoreExecute {{
+ Fault %(class_name)s::%(class_name)sStore::execute(CPU_EXEC_CONTEXT *xc,
+ Trace::InstRecord *traceData) const
+ {
+ Addr EA;
+ Fault fault = NoFault;
+
+ %(op_decl)s;
+ %(op_rd)s;
+ %(ea_code)s;
+
+ if (fault == NoFault) {
+ %(code)s;
+ }
+
+ if (fault == NoFault) {
+ fault = writeMemAtomic(xc, traceData, Mem, EA, memAccessFlags,
+ nullptr);
+ }
+
+ if (fault == NoFault) {
+ %(op_wb)s;
+ }
+
+ return fault;
+ }
+}};
+
+def template AtomicMemOpLoadEACompExecute {{
+ Fault %(class_name)s::%(class_name)sLoad::eaComp(CPU_EXEC_CONTEXT *xc,
+ Trace::InstRecord *traceData) const
+ {
+ Addr EA;
+ Fault fault = NoFault;
+
+ %(op_decl)s;
+ %(op_rd)s;
+ %(ea_code)s;
+
+ if (fault == NoFault) {
+ %(op_wb)s;
+ xc->setEA(EA);
+ }
+
+ return fault;
+ }
+}};
+
+def template AtomicMemOpStoreEACompExecute {{
+ Fault %(class_name)s::%(class_name)sStore::eaComp(CPU_EXEC_CONTEXT *xc,
+ Trace::InstRecord *traceData) const
+ {
+ Addr EA;
+ Fault fault = NoFault;
+
+ %(op_decl)s;
+ %(op_rd)s;
+ %(ea_code)s;
+
+ if (fault == NoFault) {
+ %(op_wb)s;
+ xc->setEA(EA);
+ }
+
+ return fault;
+ }
+}};
+
+def template AtomicMemOpLoadInitiateAcc {{
+ Fault %(class_name)s::%(class_name)sLoad::initiateAcc(CPU_EXEC_CONTEXT *xc,
+ Trace::InstRecord *traceData) const
+ {
+ Addr EA;
+ Fault fault = NoFault;
+
+ %(op_src_decl)s;
+ %(op_rd)s;
+ %(ea_code)s;
+
+ if (fault == NoFault) {
+ fault = initiateMemRead(xc, traceData, EA, Mem, memAccessFlags);
+ }
+
+ return fault;
+ }
+}};
+
+def template AtomicMemOpStoreInitiateAcc {{
+ Fault %(class_name)s::%(class_name)sStore::initiateAcc(
+ CPU_EXEC_CONTEXT *xc, Trace::InstRecord *traceData) const
+ {
+ Addr EA;
+ Fault fault = NoFault;
+
+ %(op_decl)s;
+ %(op_rd)s;
+ %(ea_code)s;
+
+ if (fault == NoFault) {
+ %(code)s;
+ }
+
+ if (fault == NoFault) {
+ fault = writeMemTiming(xc, traceData, Mem, EA, memAccessFlags,
+ nullptr);
+ }
+
+ if (fault == NoFault) {
+ %(op_wb)s;
+ }
+
+ return fault;
+ }
+}};
+
+def template AtomicMemOpLoadCompleteAcc {{
+ Fault %(class_name)s::%(class_name)sLoad::completeAcc(PacketPtr pkt,
+ CPU_EXEC_CONTEXT *xc, Trace::InstRecord *traceData) const
+ {
+ Fault fault = NoFault;
+
+ %(op_decl)s;
+ %(op_rd)s;
+
+ getMem(pkt, Mem, traceData);
+
+ if (fault == NoFault) {
+ %(code)s;
+ }
+
+ if (fault == NoFault) {
+ %(op_wb)s;
+ }
+
+ return fault;
+ }
+}};
+
+def template AtomicMemOpStoreCompleteAcc {{
+ Fault %(class_name)s::%(class_name)sStore::completeAcc(PacketPtr pkt,
+ CPU_EXEC_CONTEXT *xc, Trace::InstRecord *traceData) const
+ {
+ return NoFault;
+ }
+}};
+
+def format AtomicMemOp(load_code, store_code, ea_code, load_flags=[],
+ store_flags=[], inst_flags=[]) {{
+ macro_iop = InstObjParams(name, Name, 'AtomicMemOp', ea_code, inst_flags)
+ header_output = AtomicMemOpDeclare.subst(macro_iop)
+ decoder_output = AtomicMemOpMacroConstructor.subst(macro_iop)
+ decode_block = AtomicMemOpMacroDecode.subst(macro_iop)
+ exec_output = ''
+
+ load_inst_flags = makeList(inst_flags) + ["IsMemRef", "IsLoad"]
+ load_iop = InstObjParams(name, Name, 'AtomicMemOpMicro',
+ {'ea_code': ea_code, 'code': load_code}, load_inst_flags)
+ decoder_output += AtomicMemOpLoadConstructor.subst(load_iop)
+ exec_output += AtomicMemOpLoadExecute.subst(load_iop) \
+ + AtomicMemOpLoadEACompExecute.subst(load_iop) \
+ + AtomicMemOpLoadInitiateAcc.subst(load_iop) \
+ + AtomicMemOpLoadCompleteAcc.subst(load_iop)
+
+ store_inst_flags = makeList(inst_flags) + ["IsMemRef", "IsStore"]
+ store_iop = InstObjParams(name, Name, 'AtomicMemOpMicro',
+ {'ea_code': ea_code, 'code': store_code}, store_inst_flags)
+ decoder_output += AtomicMemOpStoreConstructor.subst(store_iop)
+ exec_output += AtomicMemOpStoreExecute.subst(store_iop) \
+ + AtomicMemOpStoreEACompExecute.subst(store_iop) \
+ + AtomicMemOpStoreInitiateAcc.subst(store_iop) \
+ + AtomicMemOpStoreCompleteAcc.subst(store_iop)
+}};
##include "type.isa"
##include "mem.isa"
##include "fp.isa"
+##include "amo.isa"
// Include the unknown
##include "unknown.isa"
-
# select templates
+ # The InitiateAcc template is the same for StoreCond templates as the
+ # corresponding Store template..
+ StoreCondInitiateAcc = StoreInitiateAcc
+
fullExecTemplate = eval(exec_template_base + 'Execute')
initiateAccTemplate = eval(exec_template_base + 'InitiateAcc')
completeAccTemplate = eval(exec_template_base + 'CompleteAcc')
}
}};
+def template StoreCondExecute {{
+ Fault %(class_name)s::execute(CPU_EXEC_CONTEXT *xc,
+ Trace::InstRecord *traceData) const
+ {
+ Addr EA;
+ Fault fault = NoFault;
+ uint64_t result;
+
+ %(op_decl)s;
+ %(op_rd)s;
+ %(ea_code)s;
+
+ if (fault == NoFault) {
+ %(memacc_code)s;
+ }
+
+ if (fault == NoFault) {
+ fault = writeMemAtomic(xc, traceData, Mem, EA, memAccessFlags,
+ &result);
+ }
+
+ if (fault == NoFault) {
+ %(postacc_code)s;
+ }
+
+ if (fault == NoFault) {
+ %(op_wb)s;
+ }
+
+ return fault;
+ }
+}};
+
+def template StoreCondCompleteAcc {{
+ Fault %(class_name)s::completeAcc(Packet *pkt, CPU_EXEC_CONTEXT *xc,
+ Trace::InstRecord *traceData) const
+ {
+ Fault fault = NoFault;
+
+ %(op_dest_decl)s;
+
+ uint64_t result = pkt->req->getExtraData();
+
+ if (fault == NoFault) {
+ %(postacc_code)s;
+ }
+
+ if (fault == NoFault) {
+ %(op_wb)s;
+ }
+
+ return fault;
+ }
+}};
+
def format Load(memacc_code, ea_code = {{EA = Rs1 + ldisp;}}, mem_flags=[],
inst_flags=[]) {{
(header_output, decoder_output, decode_block, exec_output) = \
LoadStoreBase(name, Name, ea_code, memacc_code, mem_flags, inst_flags,
'Store', exec_template_base='Store')
}};
+
+def format StoreCond(memacc_code, postacc_code, ea_code={{EA = Rs1;}},
+ mem_flags=[], inst_flags=[], aq=0, rl=0) {{
+ if aq:
+ mem_flags = makeList(mem_flags) + ["ACQUIRE"]
+ if rl:
+ mem_flags = makeList(mem_flags) + ["RELEASE"]
+ (header_output, decoder_output, decode_block, exec_output) = LoadStoreBase(
+ name, Name, ea_code, memacc_code, mem_flags, inst_flags, 'Store',
+ postacc_code, exec_template_base='StoreCond')
+}};
+
+def format LoadReserved(memacc_code, ea_code={{EA = Rs1;}}, mem_flags=[],
+ inst_flags=[], aq=0, rl=0) {{
+ if aq:
+ mem_flags = makeList(mem_flags) + ["ACQUIRE"]
+ if rl:
+ mem_flags = makeList(mem_flags) + ["RELEASE"]
+ (header_output, decoder_output, decode_block, exec_output) = LoadStoreBase(
+ name, Name, ea_code, memacc_code, mem_flags, inst_flags, 'Load',
+ exec_template_base='Load')
+}};
//Include the base class for riscv instructions, and some support code
##include "base.isa"
+// Include the base class for instructions with micro code
+##include "micro.isa"
+
//Include the definitions for the instruction formats
##include "formats/formats.isa"
--- /dev/null
+// -*- mode:c++ -*-
+
+// Copyright (c) 2015 Riscv Developers
+// Copyright (c) 2016 The University of Virginia
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met: redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer;
+// redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution;
+// neither the name of the copyright holders nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Authors: Alec Roelke
+
+def template MacroInitiateAcc {{
+ Fault initiateAcc(%(CPU_exec_context)s *xc,
+ Trace::InstRecord *traceData) const
+ {
+ panic("Tried to execute a macroop directly!\n");
+ return NoFault;
+ }
+}};
+
+def template MacroCompleteAcc {{
+ Fault completeAcc(PacketPtr pkt, %(CPU_exec_context)s *xc,
+ Trace::InstRecord *traceData) const
+ {
+ panic("Tried to execute a macroop directly!\n");
+ return NoFault;
+ }
+}};
+
+def template MacroExecute {{
+ Fault execute(%(CPU_exec_context)s *xc, Trace::InstRecord *traceData) const
+ {
+ panic("Tried to execute a macroop directly!\n");
+ return NoFault;
+ }
+}};
+
+output header {{
+ /**
+ * Base class for all RISC-V Macroops
+ */
+ class RiscvMacroInst : public RiscvStaticInst
+ {
+ protected:
+ std::vector<StaticInstPtr> microops;
+
+ // Constructor
+ RiscvMacroInst(const char *mnem, ExtMachInst _machInst,
+ OpClass __opClass)
+ : RiscvStaticInst(mnem, _machInst, __opClass)
+ {
+ flags[IsMacroop] = true;
+ }
+
+ ~RiscvMacroInst()
+ {
+ microops.clear();
+ }
+
+ StaticInstPtr fetchMicroop(MicroPC upc) const
+ {
+ return microops[upc];
+ }
+
+ %(MacroInitiateAcc)s
+
+ %(MacroCompleteAcc)s
+
+ %(MacroExecute)s
+ };
+
+ /**
+ * Base class for all RISC-V Microops
+ */
+ class RiscvMicroInst : public RiscvStaticInst
+ {
+ protected:
+ // Constructor
+ RiscvMicroInst(const char *mnem, ExtMachInst _machInst,
+ OpClass __opClass)
+ : RiscvStaticInst(mnem, _machInst, __opClass)
+ {
+ flags[IsMicroop] = true;
+ }
+
+ void advancePC(RiscvISA::PCState &pcState) const
+ {
+ if (flags[IsLastMicroop]) {
+ pcState.uEnd();
+ } else {
+ pcState.uAdvance();
+ }
+ }
+ };
+}};
'Rd': ('IntReg', 'ud', 'RD', 'IsInteger', 1),
'Rs1': ('IntReg', 'ud', 'RS1', 'IsInteger', 2),
'Rs2': ('IntReg', 'ud', 'RS2', 'IsInteger', 3),
+ 'Rt': ('IntReg', 'ud', 'AMOTempReg', 'IsInteger', 4),
'Fd': ('FloatReg', 'df', 'FD', 'IsFloating', 1),
'Fd_bits': ('FloatReg', 'ud', 'FD', 'IsFloating', 1),
typedef uint64_t MiscReg;
const int NumIntArchRegs = 32;
-const int NumIntRegs = NumIntArchRegs;
+const int NumMicroIntRegs = 1;
+const int NumIntRegs = NumIntArchRegs + NumMicroIntRegs;
const int NumFloatRegs = 32;
const int NumCCRegs = 0;
const int NumMiscRegs = 4096;
const int Misc_Reg_Base = CC_Reg_Base + NumCCRegs;
const int Max_Reg_Index = Misc_Reg_Base + NumMiscRegs;
-
// Semantically meaningful register indices
const int ZeroReg = 0;
const int ReturnAddrReg = 1;
const int ReturnValueRegs[] = {10, 11};
const int ReturnValueReg = ReturnValueRegs[0];
const int ArgumentRegs[] = {10, 11, 12, 13, 14, 15, 16, 17};
+const int AMOTempReg = 32;
const char* const RegisterNames[] = {"zero", "ra", "sp", "gp",
"tp", "t0", "t1", "t2",
* unmodified and in its entirety in all distributions of the software,
* modified or unmodified, in source code or in binary form.
*
+ * Copyright (c) 2016 The University of Virginia
+ * All rights reserved.
+ *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
*
* Authors: Andreas Hansson
* Sven Karlsson
+ * Alec Roelke
*/
#ifndef __ARCH_RISCV_TYPES_HH__
typedef uint32_t MachInst;
typedef uint64_t ExtMachInst;
-typedef GenericISA::SimplePCState<MachInst> PCState;
+typedef GenericISA::UPCState<MachInst> PCState;
}