// Ra and Rb are source registers, Rt is the destintation.
format LoadIndexOp {
87: lbzx({{ Rt = Mem_ub; }});
- 52: lbarx({{ Rt = Mem_ub; Rsv = 1; RsvLen = 1; RsvAddr = EA; }});
+ 52: lbarx({{ Rt = Mem_ub; }}, mem_flags = LLSC);
279: lhzx({{ Rt = Mem_uh; }});
343: lhax({{ Rt = Mem_sh; }});
- 116: lharx({{ Rt = Mem_uh; Rsv = 1; RsvLen = 2; RsvAddr = EA; }});
+ 116: lharx({{ Rt = Mem_uh; }}, mem_flags = LLSC);
790: lhbrx({{ Rt = swap_byte(Mem_uh); }});
23: lwzx({{ Rt = Mem_uw; }});
341: lwax({{ Rt = Mem_sw; }});
- 20: lwarx({{ Rt = Mem_uw; Rsv = 1; RsvLen = 4; RsvAddr = EA; }});
+ 20: lwarx({{ Rt = Mem_uw; }}, mem_flags = LLSC);
534: lwbrx({{ Rt = swap_byte(Mem_uw); }});
21: ldx({{ Rt = Mem; }});
- 84: ldarx({{ Rt = Mem_ud; Rsv = 1; RsvLen = 8; RsvAddr = EA; }});
+ 84: ldarx({{ Rt = Mem_ud; }}, mem_flags = LLSC);
532: ldbrx({{ Rt = swap_byte(Mem); }});
535: lfsx({{ Ft_sf = Mem_sf; }});
599: lfdx({{ Ft = Mem_df; }});
format StoreIndexOp {
215: stbx({{ Mem_ub = Rs_ub; }});
- 694: stbcx({{
- bool store_performed = false;
- Mem_ub = Rs_ub;
- if (Rsv) {
- if (RsvLen == 1) {
- if (RsvAddr == EA) {
- store_performed = true;
- }
- }
- }
- Xer xer = XER;
- Cr cr = CR;
- cr.cr0 = ((store_performed ? 0x2 : 0x0) | xer.so);
- CR = cr;
- Rsv = 0;
- }});
+ 694: stbcx({{ Mem_ub = Rs_ub; }}, mem_flags = LLSC);
407: sthx({{ Mem_uh = Rs_uh; }});
- 726: sthcx({{
- bool store_performed = false;
- Mem_uh = Rs_uh;
- if (Rsv) {
- if (RsvLen == 2) {
- if (RsvAddr == EA) {
- store_performed = true;
- }
- }
- }
- Xer xer = XER;
- Cr cr = CR;
- cr.cr0 = ((store_performed ? 0x2 : 0x0) | xer.so);
- CR = cr;
- Rsv = 0;
- }});
+ 726: sthcx({{ Mem_uh = Rs_uh; }}, mem_flags = LLSC);
918: sthbrx({{ Mem_uh = swap_byte(Rs_uh); }});
151: stwx({{ Mem_uw = Rs_uw; }});
- 150: stwcx({{
- bool store_performed = false;
- Mem_uw = Rs_uw;
- if (Rsv) {
- if (RsvLen == 4) {
- if (RsvAddr == EA) {
- store_performed = true;
- }
- }
- }
- Xer xer = XER;
- Cr cr = CR;
- cr.cr0 = ((store_performed ? 0x2 : 0x0) | xer.so);
- CR = cr;
- Rsv = 0;
- }});
+ 150: stwcx({{ Mem_uw = Rs_uw; }}, mem_flags = LLSC);
662: stwbrx({{ Mem_uw = swap_byte(Rs_uw); }});
149: stdx({{ Mem = Rs }});
- 214: stdcx({{
- bool store_performed = false;
- Mem = Rs;
- if (Rsv) {
- if (RsvLen == 8) {
- if (RsvAddr == EA) {
- store_performed = true;
- }
- }
- }
- Xer xer = XER;
- Cr cr = CR;
- cr.cr0 = ((store_performed ? 0x2 : 0x0) | xer.so);
- CR = cr;
- Rsv = 0;
- }});
+ 214: stdcx({{ Mem_ud = Rs; }}, mem_flags = LLSC);
660: stdbrx({{ Mem = swap_byte(Rs); }});
}
* ISA-specific helper functions for locked memory accesses.
*/
-#include "arch/generic/locked_mem.hh"
+#include <cstdlib>
+
+#include "arch/power/miscregs.hh"
+#include "arch/power/registers.hh"
+#include "cpu/thread_context.hh"
+#include "debug/LLSC.hh"
+#include "mem/packet.hh"
+#include "mem/request.hh"
+
+namespace PowerISA
+{
+
+template <class XC>
+inline void
+handleLockedSnoop(XC *xc, PacketPtr pkt, Addr cacheBlockMask)
+{
+ // If we see a snoop come into the CPU and we currently have an LLSC
+ // operation pending we need to clear the lock flag if it is to the same
+ // addr.
+ ThreadContext *tc = xc->getTC();
+ if (!tc->readIntReg(INTREG_RSV))
+ return;
+
+ Addr locked_addr = tc->readIntReg(INTREG_RSV_ADDR);
+ Addr snoop_addr = pkt->getAddr();
+
+ if (locked_addr == snoop_addr)
+ tc->setIntReg(INTREG_RSV, 0);
+}
+
+template <class XC>
+inline void
+handleLockedRead(XC *xc, Request *req)
+{
+ ThreadContext *tc = xc->getTC();
+ tc->setIntReg(INTREG_RSV, 1);
+ tc->setIntReg(INTREG_RSV_LEN, req->getSize());
+ tc->setIntReg(INTREG_RSV_ADDR, req->getPaddr());
+ DPRINTF(LLSC,"%s: Placing addr %#x in monitor\n", xc->getCpuPtr()->name(),
+ req->getPaddr());
+}
+
+template <class XC>
+inline void
+handleLockedSnoopHit(XC *xc)
+{
+}
+
+
+template <class XC>
+inline bool
+handleLockedWrite(XC *xc, Request *req, Addr cacheBlockMask)
+{
+ DPRINTF(LLSC,"%s: handling locked write for address %#x in monitor\n",
+ xc->getCpuPtr()->name(), req->getPaddr());
+
+ ThreadContext *tc = xc->getTC();
+ int lock_flag = tc->readIntReg(INTREG_RSV);
+ Addr lock_addr = tc->readIntReg(INTREG_RSV_ADDR);
+ unsigned size = tc->readIntReg(INTREG_RSV_LEN);
+ bool store_performed = false;
+ bool undefined_case;
+
+ if (lock_flag) {
+ if (req->getSize() == size && req->getPaddr() == lock_addr)
+ {
+ undefined_case = false;
+ store_performed = true;
+ }
+ else {
+ //Taking smallest real page size supported as 64k
+ int z = 64*1024;
+ if (req->getPaddr()/z == lock_addr/z)
+ undefined_case = true;
+ else {
+ undefined_case = false;
+ store_performed = false;
+ }
+ }
+ }
+ else {
+ undefined_case = false;
+ store_performed = false;
+ }
+ Xer xer = tc->readIntReg(INTREG_XER);
+ Cr cr = tc->readIntReg(INTREG_CR);
+ tc->setIntReg(INTREG_RSV, 0);
+
+ if (undefined_case) {
+ bool randombool = rand() % 1;
+ if (randombool){
+ xc->setStCondFailures(0);
+ }
+ bool secondrandombool = rand() % 1;
+ cr.cr0 = ((secondrandombool ? 0x2 : 0x0) | xer.so);
+ tc->setIntReg(INTREG_CR, cr);
+ return randombool;
+ }
+
+ if (store_performed) {
+ xc->setStCondFailures(0);
+ }
+ else {
+ // Lock flag not set or addr mismatch in CPU;
+ // the rest of this code is not architectural;
+ // it's just a debugging aid to help detect
+ // livelock by warning on long sequences of failed
+ // store conditionals
+ int stCondFailures = xc->readStCondFailures();
+ stCondFailures++;
+ xc->setStCondFailures(stCondFailures);
+ if (stCondFailures % 100000 == 0) {
+ warn("%i: context %d: %d consecutive "
+ "store conditional failures\n",
+ curTick(), xc->contextId(), stCondFailures);
+ }
+
+ if (!lock_flag){
+ DPRINTF(LLSC, "[cid:%i]: Lock Flag Set, "
+ "Store Conditional Failed.\n",
+ req->contextId());
+ }
+ else if (req->getPaddr() != lock_addr) {
+ DPRINTF(LLSC, "[cid:%i]: Load-Link Address Mismatch, "
+ "Store Conditional Failed.\n",
+ req->contextId());
+ }
+ }
+ cr.cr0 = ((store_performed ? 0x2 : 0x0) | xer.so);
+ tc->setIntReg(INTREG_CR, cr);
+ // store conditional failed already, so don't issue it to mem
+ return store_performed;
+}
+
+template <class XC>
+inline void
+globalClearExclusive(XC *xc)
+{
+}
+
+} // namespace PowerISA
#endif // __ARCH_POWER_LOCKED_MEM_HH__