}}, {{
Rd = result;
}}, inst_flags=IsStoreConditional, mem_flags=LLSC);
- format AtomicMemOp {
- 0x0: amoadd_w({{Rt_sd = Mem_sw;}}, {{
- Mem_sw = Rs2_sw + Rt_sd;
- Rd_sd = Rt_sd;
- }}, {{EA = Rs1;}});
- 0x1: amoswap_w({{Rt_sd = Mem_sw;}}, {{
- Mem_sw = Rs2_uw;
- Rd_sd = Rt_sd;
- }}, {{EA = Rs1;}});
- 0x4: amoxor_w({{Rt_sd = Mem_sw;}}, {{
- Mem_sw = Rs2_uw^Rt_sd;
- Rd_sd = Rt_sd;
- }}, {{EA = Rs1;}});
- 0x8: amoor_w({{Rt_sd = Mem_sw;}}, {{
- Mem_sw = Rs2_uw | Rt_sd;
- Rd_sd = Rt_sd;
- }}, {{EA = Rs1;}});
- 0xc: amoand_w({{Rt_sd = Mem_sw;}}, {{
- Mem_sw = Rs2_uw&Rt_sd;
- Rd_sd = Rt_sd;
- }}, {{EA = Rs1;}});
- 0x10: amomin_w({{Rt_sd = Mem_sw;}}, {{
- Mem_sw = min<int32_t>(Rs2_sw, Rt_sd);
- Rd_sd = Rt_sd;
- }}, {{EA = Rs1;}});
- 0x14: amomax_w({{Rt_sd = Mem_sw;}}, {{
- Mem_sw = max<int32_t>(Rs2_sw, Rt_sd);
- Rd_sd = Rt_sd;
- }}, {{EA = Rs1;}});
- 0x18: amominu_w({{Rt_sd = Mem_sw;}}, {{
- Mem_sw = min<uint32_t>(Rs2_uw, Rt_sd);
- Rd_sd = Rt_sd;
- }}, {{EA = Rs1;}});
- 0x1c: amomaxu_w({{Rt_sd = Mem_sw;}}, {{
- Mem_sw = max<uint32_t>(Rs2_uw, Rt_sd);
- Rd_sd = Rt_sd;
- }}, {{EA = Rs1;}});
- }
+ 0x0: AtomicMemOp::amoadd_w({{
+ Rd_sd = Mem_sw;
+ }}, {{
+ TypedAtomicOpFunctor<int32_t> *amo_op =
+ new AtomicGenericOp<int32_t>(Rs2_sw,
+ [](int32_t* b, int32_t a){ *b += a; });
+ }}, mem_flags=ATOMIC_RETURN_OP);
+ 0x1: AtomicMemOp::amoswap_w({{
+ Rd_sd = Mem_sw;
+ }}, {{
+ TypedAtomicOpFunctor<uint32_t> *amo_op =
+ new AtomicGenericOp<uint32_t>(Rs2_uw,
+ [](uint32_t* b, uint32_t a){ *b = a; });
+ }}, mem_flags=ATOMIC_RETURN_OP);
+ 0x4: AtomicMemOp::amoxor_w({{
+ Rd_sd = Mem_sw;
+ }}, {{
+ TypedAtomicOpFunctor<uint32_t> *amo_op =
+ new AtomicGenericOp<uint32_t>(Rs2_uw,
+ [](uint32_t* b, uint32_t a){ *b ^= a; });
+ }}, mem_flags=ATOMIC_RETURN_OP);
+ 0x8: AtomicMemOp::amoor_w({{
+ Rd_sd = Mem_sw;
+ }}, {{
+ TypedAtomicOpFunctor<uint32_t> *amo_op =
+ new AtomicGenericOp<uint32_t>(Rs2_uw,
+ [](uint32_t* b, uint32_t a){ *b |= a; });
+ }}, mem_flags=ATOMIC_RETURN_OP);
+ 0xc: AtomicMemOp::amoand_w({{
+ Rd_sd = Mem_sw;
+ }}, {{
+ TypedAtomicOpFunctor<uint32_t> *amo_op =
+ new AtomicGenericOp<uint32_t>(Rs2_uw,
+ [](uint32_t* b, uint32_t a){ *b &= a; });
+ }}, mem_flags=ATOMIC_RETURN_OP);
+ 0x10: AtomicMemOp::amomin_w({{
+ Rd_sd = Mem_sw;
+ }}, {{
+ TypedAtomicOpFunctor<int32_t> *amo_op =
+ new AtomicGenericOp<int32_t>(Rs2_sw,
+ [](int32_t* b, int32_t a){ if (a < *b) *b = a; });
+ }}, mem_flags=ATOMIC_RETURN_OP);
+ 0x14: AtomicMemOp::amomax_w({{
+ Rd_sd = Mem_sw;
+ }}, {{
+ TypedAtomicOpFunctor<int32_t> *amo_op =
+ new AtomicGenericOp<int32_t>(Rs2_sw,
+ [](int32_t* b, int32_t a){ if (a > *b) *b = a; });
+ }}, mem_flags=ATOMIC_RETURN_OP);
+ 0x18: AtomicMemOp::amominu_w({{
+ Rd_sd = Mem_sw;
+ }}, {{
+ TypedAtomicOpFunctor<uint32_t> *amo_op =
+ new AtomicGenericOp<uint32_t>(Rs2_uw,
+ [](uint32_t* b, uint32_t a){ if (a < *b) *b = a; });
+ }}, mem_flags=ATOMIC_RETURN_OP);
+ 0x1c: AtomicMemOp::amomaxu_w({{
+ Rd_sd = Mem_sw;
+ }}, {{
+ TypedAtomicOpFunctor<uint32_t> *amo_op =
+ new AtomicGenericOp<uint32_t>(Rs2_uw,
+ [](uint32_t* b, uint32_t a){ if (a > *b) *b = a; });
+ }}, mem_flags=ATOMIC_RETURN_OP);
}
0x3: decode AMOFUNCT {
0x2: LoadReserved::lr_d({{
}}, {{
Rd = result;
}}, mem_flags=LLSC, inst_flags=IsStoreConditional);
- format AtomicMemOp {
- 0x0: amoadd_d({{Rt_sd = Mem_sd;}}, {{
- Mem_sd = Rs2_sd + Rt_sd;
- Rd_sd = Rt_sd;
- }}, {{EA = Rs1;}});
- 0x1: amoswap_d({{Rt = Mem;}}, {{
- Mem = Rs2;
- Rd = Rt;
- }}, {{EA = Rs1;}});
- 0x4: amoxor_d({{Rt = Mem;}}, {{
- Mem = Rs2^Rt;
- Rd = Rt;
- }}, {{EA = Rs1;}});
- 0x8: amoor_d({{Rt = Mem;}}, {{
- Mem = Rs2 | Rt;
- Rd = Rt;
- }}, {{EA = Rs1;}});
- 0xc: amoand_d({{Rt = Mem;}}, {{
- Mem = Rs2&Rt;
- Rd = Rt;
- }}, {{EA = Rs1;}});
- 0x10: amomin_d({{Rt_sd = Mem_sd;}}, {{
- Mem_sd = min(Rs2_sd, Rt_sd);
- Rd_sd = Rt_sd;
- }}, {{EA = Rs1;}});
- 0x14: amomax_d({{Rt_sd = Mem_sd;}}, {{
- Mem_sd = max(Rs2_sd, Rt_sd);
- Rd_sd = Rt_sd;
- }}, {{EA = Rs1;}});
- 0x18: amominu_d({{Rt = Mem;}}, {{
- Mem = min(Rs2, Rt);
- Rd = Rt;
- }}, {{EA = Rs1;}});
- 0x1c: amomaxu_d({{Rt = Mem;}}, {{
- Mem = max(Rs2, Rt);
- Rd = Rt;
- }}, {{EA = Rs1;}});
- }
+ 0x0: AtomicMemOp::amoadd_d({{
+ Rd_sd = Mem_sd;
+ }}, {{
+ TypedAtomicOpFunctor<int64_t> *amo_op =
+ new AtomicGenericOp<int64_t>(Rs2_sd,
+ [](int64_t* b, int64_t a){ *b += a; });
+ }}, mem_flags=ATOMIC_RETURN_OP);
+ 0x1: AtomicMemOp::amoswap_d({{
+ Rd_sd = Mem_sd;
+ }}, {{
+ TypedAtomicOpFunctor<uint64_t> *amo_op =
+ new AtomicGenericOp<uint64_t>(Rs2_ud,
+ [](uint64_t* b, uint64_t a){ *b = a; });
+ }}, mem_flags=ATOMIC_RETURN_OP);
+ 0x4: AtomicMemOp::amoxor_d({{
+ Rd_sd = Mem_sd;
+ }}, {{
+ TypedAtomicOpFunctor<uint64_t> *amo_op =
+ new AtomicGenericOp<uint64_t>(Rs2_ud,
+ [](uint64_t* b, uint64_t a){ *b ^= a; });
+ }}, mem_flags=ATOMIC_RETURN_OP);
+ 0x8: AtomicMemOp::amoor_d({{
+ Rd_sd = Mem_sd;
+ }}, {{
+ TypedAtomicOpFunctor<uint64_t> *amo_op =
+ new AtomicGenericOp<uint64_t>(Rs2_ud,
+ [](uint64_t* b, uint64_t a){ *b |= a; });
+ }}, mem_flags=ATOMIC_RETURN_OP);
+ 0xc: AtomicMemOp::amoand_d({{
+ Rd_sd = Mem_sd;
+ }}, {{
+ TypedAtomicOpFunctor<uint64_t> *amo_op =
+ new AtomicGenericOp<uint64_t>(Rs2_ud,
+ [](uint64_t* b, uint64_t a){ *b &= a; });
+ }}, mem_flags=ATOMIC_RETURN_OP);
+ 0x10: AtomicMemOp::amomin_d({{
+ Rd_sd = Mem_sd;
+ }}, {{
+ TypedAtomicOpFunctor<int64_t> *amo_op =
+ new AtomicGenericOp<int64_t>(Rs2_sd,
+ [](int64_t* b, int64_t a){ if (a < *b) *b = a; });
+ }}, mem_flags=ATOMIC_RETURN_OP);
+ 0x14: AtomicMemOp::amomax_d({{
+ Rd_sd = Mem_sd;
+ }}, {{
+ TypedAtomicOpFunctor<int64_t> *amo_op =
+ new AtomicGenericOp<int64_t>(Rs2_sd,
+ [](int64_t* b, int64_t a){ if (a > *b) *b = a; });
+ }}, mem_flags=ATOMIC_RETURN_OP);
+ 0x18: AtomicMemOp::amominu_d({{
+ Rd_sd = Mem_sd;
+ }}, {{
+ TypedAtomicOpFunctor<uint64_t> *amo_op =
+ new AtomicGenericOp<uint64_t>(Rs2_ud,
+ [](uint64_t* b, uint64_t a){ if (a < *b) *b = a; });
+ }}, mem_flags=ATOMIC_RETURN_OP);
+ 0x1c: AtomicMemOp::amomaxu_d({{
+ Rd_sd = Mem_sd;
+ }}, {{
+ TypedAtomicOpFunctor<uint64_t> *amo_op =
+ new AtomicGenericOp<uint64_t>(Rs2_ud,
+ [](uint64_t* b, uint64_t a){ if (a > *b) *b = a; });
+ }}, mem_flags=ATOMIC_RETURN_OP);
}
}
0x0c: decode FUNCT3 {
//
// Authors: Alec Roelke
-////////////////////////////////////////////////////////////////////
-//
-// Atomic memory operation instructions
-//
+// Declaration templates
def template AtomicMemOpDeclare {{
/**
* Static instruction class for an AtomicMemOp operation
protected:
- class %(class_name)sLoad : public %(base_class)sMicro
+ /*
+ * The main RMW part of an AMO
+ */
+ class %(class_name)sRMW : public %(base_class)sMicro
{
public:
// Constructor
- %(class_name)sLoad(ExtMachInst machInst, %(class_name)s *_p);
+ %(class_name)sRMW(ExtMachInst machInst, %(class_name)s *_p);
Fault execute(ExecContext *, Trace::InstRecord *) const override;
Fault initiateAcc(ExecContext *,
Fault completeAcc(PacketPtr, ExecContext *,
Trace::InstRecord *) const override;
};
+ };
+}};
+
+def template LRSCDeclare {{
+ /**
+ * Static instruction class for an AtomicMemOp operation
+ */
+ class %(class_name)s : public %(base_class)s
+ {
+ public:
+ // Constructor
+ %(class_name)s(ExtMachInst machInst);
+
+ protected:
- class %(class_name)sStore : public %(base_class)sMicro
+ class %(class_name)sMicro : public %(base_class)sMicro
{
public:
// Constructor
- %(class_name)sStore(ExtMachInst machInst, %(class_name)s *_p);
+ %(class_name)sMicro(ExtMachInst machInst, %(class_name)s *_p);
Fault execute(ExecContext *, Trace::InstRecord *) const override;
Fault initiateAcc(ExecContext *,
};
}};
-def template LRSCConstructor {{
+// Constructor templates
+def template LRSCMacroConstructor {{
%(class_name)s::%(class_name)s(ExtMachInst machInst):
%(base_class)s("%(mnemonic)s", machInst, %(op_class)s)
{
%(constructor)s;
- if (AQ)
- memAccessFlags = memAccessFlags | Request::ACQUIRE;
- if (RL)
- memAccessFlags = memAccessFlags | Request::RELEASE;
+
+ StaticInstPtr rel_fence;
+ StaticInstPtr lrsc;
+ StaticInstPtr acq_fence;
+
+ // set up release fence
+ if (RL) {
+ rel_fence = new MemFenceMicro(machInst, No_OpClass);
+ rel_fence->setFlag(IsFirstMicroop);
+ rel_fence->setFlag(IsMemBarrier);
+ rel_fence->setFlag(IsDelayedCommit);
+ }
+
+ // set up atomic rmw op
+ lrsc = new %(class_name)sMicro(machInst, this);
+
+ if (!RL) {
+ lrsc->setFlag(IsFirstMicroop);
+ }
+
+ if (!AQ) {
+ lrsc->setFlag(IsLastMicroop);
+ } else {
+ lrsc->setFlag(IsDelayedCommit);
+ }
+
+ // set up acquire fence
+ if (AQ) {
+ acq_fence = new MemFenceMicro(machInst, No_OpClass);
+ acq_fence->setFlag(IsLastMicroop);
+ acq_fence->setFlag(IsMemBarrier);
+ }
+
+ if (RL && AQ) {
+ microops = {rel_fence, lrsc, acq_fence};
+ } else if (RL) {
+ microops = {rel_fence, lrsc};
+ } else if (AQ) {
+ microops = {lrsc, acq_fence};
+ } else {
+ microops = {lrsc};
+ }
+ }
+}};
+
+def template LRSCMicroConstructor {{
+ %(class_name)s::%(class_name)sMicro::%(class_name)sMicro(
+ ExtMachInst machInst, %(class_name)s *_p)
+ : %(base_class)sMicro("%(mnemonic)s", machInst, %(op_class)s)
+ {
+ %(constructor)s;
}
}};
: %(base_class)s("%(mnemonic)s", machInst, %(op_class)s)
{
%(constructor)s;
- microops = {new %(class_name)sLoad(machInst, this),
- new %(class_name)sStore(machInst, this)};
+
+ StaticInstPtr rel_fence;
+ StaticInstPtr rmw_op;
+ StaticInstPtr acq_fence;
+
+ // set up release fence
+ if (RL) {
+ rel_fence = new MemFenceMicro(machInst, No_OpClass);
+ rel_fence->setFlag(IsFirstMicroop);
+ rel_fence->setFlag(IsMemBarrier);
+ rel_fence->setFlag(IsDelayedCommit);
+ }
+
+ // set up atomic rmw op
+ rmw_op = new %(class_name)sRMW(machInst, this);
+
+ if (!RL) {
+ rmw_op->setFlag(IsFirstMicroop);
+ }
+
+ if (!AQ) {
+ rmw_op->setFlag(IsLastMicroop);
+ } else {
+ rmw_op->setFlag(IsDelayedCommit);
+ }
+
+ // set up acquire fence
+ if (AQ) {
+ acq_fence = new MemFenceMicro(machInst, No_OpClass);
+ acq_fence->setFlag(IsLastMicroop);
+ acq_fence->setFlag(IsMemBarrier);
+ }
+
+ if (RL && AQ) {
+ microops = {rel_fence, rmw_op, acq_fence};
+ } else if (RL) {
+ microops = {rel_fence, rmw_op};
+ } else if (AQ) {
+ microops = {rmw_op, acq_fence};
+ } else {
+ microops = {rmw_op};
+ }
}
}};
-def template AtomicMemOpLoadConstructor {{
- %(class_name)s::%(class_name)sLoad::%(class_name)sLoad(
+def template AtomicMemOpRMWConstructor {{
+ %(class_name)s::%(class_name)sRMW::%(class_name)sRMW(
ExtMachInst machInst, %(class_name)s *_p)
: %(base_class)s("%(mnemonic)s[l]", machInst, %(op_class)s)
{
%(constructor)s;
- flags[IsFirstMicroop] = true;
- flags[IsDelayedCommit] = true;
- if (AQ)
- memAccessFlags = Request::ACQUIRE;
+
+ // overwrite default flags
+ flags[IsMemRef] = true;
+ flags[IsLoad] = false;
+ flags[IsStore] = false;
+ flags[IsAtomic] = true;
}
}};
-def template AtomicMemOpStoreConstructor {{
- %(class_name)s::%(class_name)sStore::%(class_name)sStore(
- ExtMachInst machInst, %(class_name)s *_p)
- : %(base_class)s("%(mnemonic)s[s]", machInst, %(op_class)s)
+// execute() templates
+
+def template LoadReservedExecute {{
+ Fault
+ %(class_name)s::%(class_name)sMicro::execute(
+ ExecContext *xc, Trace::InstRecord *traceData) const
{
- %(constructor)s;
- flags[IsLastMicroop] = true;
- flags[IsNonSpeculative] = true;
- if (RL)
- memAccessFlags = Request::RELEASE;
+ Addr EA;
+ Fault fault = NoFault;
+
+ %(op_decl)s;
+ %(op_rd)s;
+ %(ea_code)s;
+
+ if (fault == NoFault) {
+ fault = readMemAtomic(xc, traceData, EA, Mem, memAccessFlags);
+ %(memacc_code)s;
+ }
+
+ if (fault == NoFault) {
+ %(op_wb)s;
+ }
+
+ return fault;
}
}};
def template StoreCondExecute {{
- Fault %(class_name)s::execute(ExecContext *xc,
+ Fault %(class_name)s::%(class_name)sMicro::execute(ExecContext *xc,
Trace::InstRecord *traceData) const
{
Addr EA;
}
}};
-def template AtomicMemOpLoadExecute {{
- Fault %(class_name)s::%(class_name)sLoad::execute(ExecContext *xc,
+def template AtomicMemOpRMWExecute {{
+ Fault %(class_name)s::%(class_name)sRMW::execute(ExecContext *xc,
Trace::InstRecord *traceData) const
{
Addr EA;
%(op_decl)s;
%(op_rd)s;
%(ea_code)s;
+ %(amoop_code)s;
+
+ assert(amo_op);
if (fault == NoFault) {
- fault = readMemAtomic(xc, traceData, EA, Mem, memAccessFlags);
+ fault = amoMemAtomic(xc, traceData, Mem, EA, memAccessFlags,
+ amo_op);
+ %(memacc_code)s;
}
if (fault == NoFault) {
- %(code)s;
+ %(postacc_code)s;
}
if (fault == NoFault) {
}
}};
-def template AtomicMemOpStoreExecute {{
- Fault %(class_name)s::%(class_name)sStore::execute(ExecContext *xc,
+// initiateAcc() templates
+
+def template LoadReservedInitiateAcc {{
+ Fault
+ %(class_name)s::%(class_name)sMicro::initiateAcc(ExecContext *xc,
+ Trace::InstRecord *traceData) const
+ {
+ Addr EA;
+ Fault fault = NoFault;
+
+ %(op_src_decl)s;
+ %(op_rd)s;
+ %(ea_code)s;
+
+ if (fault == NoFault) {
+ fault = initiateMemRead(xc, traceData, EA, Mem, memAccessFlags);
+ }
+
+ return fault;
+ }
+}};
+
+def template StoreCondInitiateAcc {{
+ Fault
+ %(class_name)s::%(class_name)sMicro::initiateAcc(ExecContext *xc,
Trace::InstRecord *traceData) const
{
Addr EA;
%(ea_code)s;
if (fault == NoFault) {
- %(code)s;
+ %(memacc_code)s;
}
if (fault == NoFault) {
- fault = writeMemAtomic(xc, traceData, Mem, EA, memAccessFlags,
- nullptr);
+ fault = writeMemTiming(xc, traceData, Mem, EA,
+ memAccessFlags, nullptr);
}
if (fault == NoFault) {
}
}};
-def template AtomicMemOpLoadInitiateAcc {{
- Fault %(class_name)s::%(class_name)sLoad::initiateAcc(ExecContext *xc,
+def template AtomicMemOpRMWInitiateAcc {{
+ Fault
+ %(class_name)s::%(class_name)sRMW::initiateAcc(ExecContext *xc,
Trace::InstRecord *traceData) const
{
Addr EA;
%(op_src_decl)s;
%(op_rd)s;
%(ea_code)s;
+ %(amoop_code)s;
+
+ assert(amo_op);
if (fault == NoFault) {
- fault = initiateMemRead(xc, traceData, EA, Mem, memAccessFlags);
+ fault = initiateMemAMO(xc, traceData, EA, Mem, memAccessFlags,
+ amo_op);
}
return fault;
}
}};
-def template AtomicMemOpStoreInitiateAcc {{
- Fault %(class_name)s::%(class_name)sStore::initiateAcc(
+// completeAcc() templates
+
+def template LoadReservedCompleteAcc {{
+ Fault
+ %(class_name)s::%(class_name)sMicro::completeAcc(PacketPtr pkt,
ExecContext *xc, Trace::InstRecord *traceData) const
{
- Addr EA;
Fault fault = NoFault;
%(op_decl)s;
%(op_rd)s;
- %(ea_code)s;
- if (fault == NoFault) {
- %(code)s;
- }
+ getMem(pkt, Mem, traceData);
if (fault == NoFault) {
- fault = writeMemTiming(xc, traceData, Mem, EA, memAccessFlags,
- nullptr);
+ %(memacc_code)s;
}
if (fault == NoFault) {
}};
def template StoreCondCompleteAcc {{
- Fault %(class_name)s::completeAcc(Packet *pkt, ExecContext *xc,
- Trace::InstRecord *traceData) const
+ Fault %(class_name)s::%(class_name)sMicro::completeAcc(Packet *pkt,
+ ExecContext *xc, Trace::InstRecord *traceData) const
{
Fault fault = NoFault;
}
}};
-def template AtomicMemOpLoadCompleteAcc {{
- Fault %(class_name)s::%(class_name)sLoad::completeAcc(PacketPtr pkt,
+def template AtomicMemOpRMWCompleteAcc {{
+ Fault %(class_name)s::%(class_name)sRMW::completeAcc(Packet *pkt,
ExecContext *xc, Trace::InstRecord *traceData) const
{
Fault fault = NoFault;
getMem(pkt, Mem, traceData);
if (fault == NoFault) {
- %(code)s;
+ %(memacc_code)s;
}
if (fault == NoFault) {
}
}};
-def template AtomicMemOpStoreCompleteAcc {{
- Fault %(class_name)s::%(class_name)sStore::completeAcc(PacketPtr pkt,
- ExecContext *xc, Trace::InstRecord *traceData) const
- {
- return NoFault;
- }
-}};
+// LR/SC/AMO decode formats
def format LoadReserved(memacc_code, postacc_code={{ }}, ea_code={{EA = Rs1;}},
mem_flags=[], inst_flags=[]) {{
+ macro_ea_code = ''
+ macro_inst_flags = []
+ macro_iop = InstObjParams(name, Name, 'LoadReserved', macro_ea_code,
+ macro_inst_flags)
+ header_output = LRSCDeclare.subst(macro_iop)
+ decoder_output = LRSCMacroConstructor.subst(macro_iop)
+ decode_block = BasicDecode.subst(macro_iop)
+
+ exec_output = ''
+
mem_flags = makeList(mem_flags)
inst_flags = makeList(inst_flags)
iop = InstObjParams(name, Name, 'LoadReserved',
iop.constructor += '\n\tmemAccessFlags = memAccessFlags | ' + \
'|'.join(['Request::%s' % flag for flag in mem_flags]) + ';'
- header_output = LoadStoreDeclare.subst(iop)
- decoder_output = LRSCConstructor.subst(iop)
- decode_block = BasicDecode.subst(iop)
- exec_output = LoadExecute.subst(iop) \
- + LoadInitiateAcc.subst(iop) \
- + LoadCompleteAcc.subst(iop)
+ decoder_output += LRSCMicroConstructor.subst(iop)
+ decode_block += BasicDecode.subst(iop)
+ exec_output += LoadReservedExecute.subst(iop) \
+ + LoadReservedInitiateAcc.subst(iop) \
+ + LoadReservedCompleteAcc.subst(iop)
}};
def format StoreCond(memacc_code, postacc_code={{ }}, ea_code={{EA = Rs1;}},
mem_flags=[], inst_flags=[]) {{
+ macro_ea_code = ''
+ macro_inst_flags = []
+ macro_iop = InstObjParams(name, Name, 'StoreCond', macro_ea_code,
+ macro_inst_flags)
+ header_output = LRSCDeclare.subst(macro_iop)
+ decoder_output = LRSCMacroConstructor.subst(macro_iop)
+ decode_block = BasicDecode.subst(macro_iop)
+
+ exec_output = ''
+
mem_flags = makeList(mem_flags)
inst_flags = makeList(inst_flags)
iop = InstObjParams(name, Name, 'StoreCond',
iop.constructor += '\n\tmemAccessFlags = memAccessFlags | ' + \
'|'.join(['Request::%s' % flag for flag in mem_flags]) + ';'
- header_output = LoadStoreDeclare.subst(iop)
- decoder_output = LRSCConstructor.subst(iop)
- decode_block = BasicDecode.subst(iop)
- exec_output = StoreCondExecute.subst(iop) \
- + StoreInitiateAcc.subst(iop) \
+ decoder_output += LRSCMicroConstructor.subst(iop)
+ decode_block += BasicDecode.subst(iop)
+ exec_output += StoreCondExecute.subst(iop) \
+ + StoreCondInitiateAcc.subst(iop) \
+ StoreCondCompleteAcc.subst(iop)
}};
-def format AtomicMemOp(load_code, store_code, ea_code, load_flags=[],
- store_flags=[], inst_flags=[]) {{
- macro_iop = InstObjParams(name, Name, 'AtomicMemOp', ea_code, inst_flags)
+def format AtomicMemOp(memacc_code, amoop_code, postacc_code={{ }},
+ ea_code={{EA = Rs1;}}, mem_flags=[], inst_flags=[]) {{
+ macro_ea_code = ''
+ macro_inst_flags = []
+ macro_iop = InstObjParams(name, Name, 'AtomicMemOp', macro_ea_code,
+ macro_inst_flags)
header_output = AtomicMemOpDeclare.subst(macro_iop)
decoder_output = AtomicMemOpMacroConstructor.subst(macro_iop)
decode_block = BasicDecode.subst(macro_iop)
+
exec_output = ''
- load_inst_flags = makeList(inst_flags) + ["IsMemRef", "IsLoad"]
- load_iop = InstObjParams(name, Name, 'AtomicMemOpMicro',
- {'ea_code': ea_code, 'code': load_code, 'op_name': 'Load'},
- load_inst_flags)
- decoder_output += AtomicMemOpLoadConstructor.subst(load_iop)
- exec_output += AtomicMemOpLoadExecute.subst(load_iop) \
- + AtomicMemOpLoadInitiateAcc.subst(load_iop) \
- + AtomicMemOpLoadCompleteAcc.subst(load_iop)
-
- store_inst_flags = makeList(inst_flags) + ["IsMemRef", "IsStore"]
- store_iop = InstObjParams(name, Name, 'AtomicMemOpMicro',
- {'ea_code': ea_code, 'code': store_code, 'op_name': 'Store'},
- store_inst_flags)
- decoder_output += AtomicMemOpStoreConstructor.subst(store_iop)
- exec_output += AtomicMemOpStoreExecute.subst(store_iop) \
- + AtomicMemOpStoreInitiateAcc.subst(store_iop) \
- + AtomicMemOpStoreCompleteAcc.subst(store_iop)
+ rmw_mem_flags = makeList(mem_flags)
+ rmw_inst_flags = makeList(inst_flags)
+ rmw_iop = InstObjParams(name, Name, 'AtomicMemOpMicro',
+ {'ea_code': ea_code,
+ 'memacc_code': memacc_code,
+ 'postacc_code': postacc_code,
+ 'amoop_code': amoop_code},
+ rmw_inst_flags)
+
+ rmw_iop.constructor += '\n\tmemAccessFlags = memAccessFlags | ' + \
+ '|'.join(['Request::%s' % flag for flag in rmw_mem_flags]) + ';'
+
+ decoder_output += AtomicMemOpRMWConstructor.subst(rmw_iop)
+ decode_block += BasicDecode.subst(rmw_iop)
+ exec_output += AtomicMemOpRMWExecute.subst(rmw_iop) \
+ + AtomicMemOpRMWInitiateAcc.subst(rmw_iop) \
+ + AtomicMemOpRMWCompleteAcc.subst(rmw_iop)
}};