From e5d027df6c753fd2c891235d5b25bb89b155eb65 Mon Sep 17 00:00:00 2001 From: Kajoljain379 Date: Wed, 10 Apr 2019 05:46:01 +0000 Subject: [PATCH] arch-power: Added support for Atomic Instructions Add support for Load and Reserve and Store Conditional Instructions: * Load Byte And Reserve Indexed. * Store Byte Conditional Indexed. * Load Halfword And Reserve Indexed. * Store Halfword Conditional Indexed. * Load Word And Reserve Indexed. * Store Word Conditional Indexed. * Load Doubleword And Reserve Indexed. * Store Doubleword Conditional Indexed. Change-Id: I1dac94928e7a1bb6f458a4ecea0fca3247b26d37 Signed-off-by: Kajoljain379 --- src/arch/power/isa/decoder.isa | 76 ++------------- src/arch/power/isa/formats/util.isa | 1 + src/arch/power/locked_mem.hh | 142 +++++++++++++++++++++++++++- 3 files changed, 150 insertions(+), 69 deletions(-) diff --git a/src/arch/power/isa/decoder.isa b/src/arch/power/isa/decoder.isa index 667a73223..285487d5a 100644 --- a/src/arch/power/isa/decoder.isa +++ b/src/arch/power/isa/decoder.isa @@ -356,17 +356,17 @@ decode PO default Unknown::unknown() { // Ra and Rb are source registers, Rt is the destintation. format LoadIndexOp { 87: lbzx({{ Rt = Mem_ub; }}); - 52: lbarx({{ Rt = Mem_ub; Rsv = 1; RsvLen = 1; RsvAddr = EA; }}); + 52: lbarx({{ Rt = Mem_ub; }}, mem_flags = LLSC); 279: lhzx({{ Rt = Mem_uh; }}); 343: lhax({{ Rt = Mem_sh; }}); - 116: lharx({{ Rt = Mem_uh; Rsv = 1; RsvLen = 2; RsvAddr = EA; }}); + 116: lharx({{ Rt = Mem_uh; }}, mem_flags = LLSC); 790: lhbrx({{ Rt = swap_byte(Mem_uh); }}); 23: lwzx({{ Rt = Mem_uw; }}); 341: lwax({{ Rt = Mem_sw; }}); - 20: lwarx({{ Rt = Mem_uw; Rsv = 1; RsvLen = 4; RsvAddr = EA; }}); + 20: lwarx({{ Rt = Mem_uw; }}, mem_flags = LLSC); 534: lwbrx({{ Rt = swap_byte(Mem_uw); }}); 21: ldx({{ Rt = Mem; }}); - 84: ldarx({{ Rt = Mem_ud; Rsv = 1; RsvLen = 8; RsvAddr = EA; }}); + 84: ldarx({{ Rt = Mem_ud; }}, mem_flags = LLSC); 532: ldbrx({{ Rt = swap_byte(Mem); }}); 535: lfsx({{ Ft_sf = Mem_sf; }}); 599: lfdx({{ Ft = Mem_df; }}); @@ -386,75 +386,15 @@ decode PO default Unknown::unknown() { format StoreIndexOp { 215: stbx({{ Mem_ub = Rs_ub; }}); - 694: stbcx({{ - bool store_performed = false; - Mem_ub = Rs_ub; - if (Rsv) { - if (RsvLen == 1) { - if (RsvAddr == EA) { - store_performed = true; - } - } - } - Xer xer = XER; - Cr cr = CR; - cr.cr0 = ((store_performed ? 0x2 : 0x0) | xer.so); - CR = cr; - Rsv = 0; - }}); + 694: stbcx({{ Mem_ub = Rs_ub; }}, mem_flags = LLSC); 407: sthx({{ Mem_uh = Rs_uh; }}); - 726: sthcx({{ - bool store_performed = false; - Mem_uh = Rs_uh; - if (Rsv) { - if (RsvLen == 2) { - if (RsvAddr == EA) { - store_performed = true; - } - } - } - Xer xer = XER; - Cr cr = CR; - cr.cr0 = ((store_performed ? 0x2 : 0x0) | xer.so); - CR = cr; - Rsv = 0; - }}); + 726: sthcx({{ Mem_uh = Rs_uh; }}, mem_flags = LLSC); 918: sthbrx({{ Mem_uh = swap_byte(Rs_uh); }}); 151: stwx({{ Mem_uw = Rs_uw; }}); - 150: stwcx({{ - bool store_performed = false; - Mem_uw = Rs_uw; - if (Rsv) { - if (RsvLen == 4) { - if (RsvAddr == EA) { - store_performed = true; - } - } - } - Xer xer = XER; - Cr cr = CR; - cr.cr0 = ((store_performed ? 0x2 : 0x0) | xer.so); - CR = cr; - Rsv = 0; - }}); + 150: stwcx({{ Mem_uw = Rs_uw; }}, mem_flags = LLSC); 662: stwbrx({{ Mem_uw = swap_byte(Rs_uw); }}); 149: stdx({{ Mem = Rs }}); - 214: stdcx({{ - bool store_performed = false; - Mem = Rs; - if (Rsv) { - if (RsvLen == 8) { - if (RsvAddr == EA) { - store_performed = true; - } - } - } - Xer xer = XER; - Cr cr = CR; - cr.cr0 = ((store_performed ? 0x2 : 0x0) | xer.so); - CR = cr; - Rsv = 0; - }}); + 214: stdcx({{ Mem_ud = Rs; }}, mem_flags = LLSC); 660: stdbrx({{ Mem = swap_byte(Rs); }}); } diff --git a/src/arch/power/isa/formats/util.isa b/src/arch/power/isa/formats/util.isa index 8fdadee60..f681944a8 100644 --- a/src/arch/power/isa/formats/util.isa +++ b/src/arch/power/isa/formats/util.isa @@ -131,6 +131,7 @@ def LoadStoreBase(name, Name, ea_code, memacc_code, mem_flags, inst_flags, inst_flags) if mem_flags: + mem_flags = [ 'Request::%s' % flag for flag in mem_flags ] s = '\n\tmemAccessFlags = ' + string.join(mem_flags, '|') + ';' iop.constructor += s diff --git a/src/arch/power/locked_mem.hh b/src/arch/power/locked_mem.hh index d7abc64f0..02313d26c 100644 --- a/src/arch/power/locked_mem.hh +++ b/src/arch/power/locked_mem.hh @@ -41,6 +41,146 @@ * ISA-specific helper functions for locked memory accesses. */ -#include "arch/generic/locked_mem.hh" +#include + +#include "arch/power/miscregs.hh" +#include "arch/power/registers.hh" +#include "cpu/thread_context.hh" +#include "debug/LLSC.hh" +#include "mem/packet.hh" +#include "mem/request.hh" + +namespace PowerISA +{ + +template +inline void +handleLockedSnoop(XC *xc, PacketPtr pkt, Addr cacheBlockMask) +{ + // If we see a snoop come into the CPU and we currently have an LLSC + // operation pending we need to clear the lock flag if it is to the same + // addr. + ThreadContext *tc = xc->getTC(); + if (!tc->readIntReg(INTREG_RSV)) + return; + + Addr locked_addr = tc->readIntReg(INTREG_RSV_ADDR); + Addr snoop_addr = pkt->getAddr(); + + if (locked_addr == snoop_addr) + tc->setIntReg(INTREG_RSV, 0); +} + +template +inline void +handleLockedRead(XC *xc, Request *req) +{ + ThreadContext *tc = xc->getTC(); + tc->setIntReg(INTREG_RSV, 1); + tc->setIntReg(INTREG_RSV_LEN, req->getSize()); + tc->setIntReg(INTREG_RSV_ADDR, req->getPaddr()); + DPRINTF(LLSC,"%s: Placing addr %#x in monitor\n", xc->getCpuPtr()->name(), + req->getPaddr()); +} + +template +inline void +handleLockedSnoopHit(XC *xc) +{ +} + + +template +inline bool +handleLockedWrite(XC *xc, Request *req, Addr cacheBlockMask) +{ + DPRINTF(LLSC,"%s: handling locked write for address %#x in monitor\n", + xc->getCpuPtr()->name(), req->getPaddr()); + + ThreadContext *tc = xc->getTC(); + int lock_flag = tc->readIntReg(INTREG_RSV); + Addr lock_addr = tc->readIntReg(INTREG_RSV_ADDR); + unsigned size = tc->readIntReg(INTREG_RSV_LEN); + bool store_performed = false; + bool undefined_case; + + if (lock_flag) { + if (req->getSize() == size && req->getPaddr() == lock_addr) + { + undefined_case = false; + store_performed = true; + } + else { + //Taking smallest real page size supported as 64k + int z = 64*1024; + if (req->getPaddr()/z == lock_addr/z) + undefined_case = true; + else { + undefined_case = false; + store_performed = false; + } + } + } + else { + undefined_case = false; + store_performed = false; + } + Xer xer = tc->readIntReg(INTREG_XER); + Cr cr = tc->readIntReg(INTREG_CR); + tc->setIntReg(INTREG_RSV, 0); + + if (undefined_case) { + bool randombool = rand() % 1; + if (randombool){ + xc->setStCondFailures(0); + } + bool secondrandombool = rand() % 1; + cr.cr0 = ((secondrandombool ? 0x2 : 0x0) | xer.so); + tc->setIntReg(INTREG_CR, cr); + return randombool; + } + + if (store_performed) { + xc->setStCondFailures(0); + } + else { + // Lock flag not set or addr mismatch in CPU; + // the rest of this code is not architectural; + // it's just a debugging aid to help detect + // livelock by warning on long sequences of failed + // store conditionals + int stCondFailures = xc->readStCondFailures(); + stCondFailures++; + xc->setStCondFailures(stCondFailures); + if (stCondFailures % 100000 == 0) { + warn("%i: context %d: %d consecutive " + "store conditional failures\n", + curTick(), xc->contextId(), stCondFailures); + } + + if (!lock_flag){ + DPRINTF(LLSC, "[cid:%i]: Lock Flag Set, " + "Store Conditional Failed.\n", + req->contextId()); + } + else if (req->getPaddr() != lock_addr) { + DPRINTF(LLSC, "[cid:%i]: Load-Link Address Mismatch, " + "Store Conditional Failed.\n", + req->contextId()); + } + } + cr.cr0 = ((store_performed ? 0x2 : 0x0) | xer.so); + tc->setIntReg(INTREG_CR, cr); + // store conditional failed already, so don't issue it to mem + return store_performed; +} + +template +inline void +globalClearExclusive(XC *xc) +{ +} + +} // namespace PowerISA #endif // __ARCH_POWER_LOCKED_MEM_HH__ -- 2.30.2