0x0c: ldwu({{ Ra.uq = Mem.uw; }});
0x0b: ldq_u({{ Ra = Mem.uq; }}, ea_code = {{ EA = (Rb + disp) & ~7; }});
0x23: ldt({{ Fa = Mem.df; }});
- 0x2a: ldl_l({{ Ra.sl = Mem.sl; }}, mem_flags = LOCKED);
- 0x2b: ldq_l({{ Ra.uq = Mem.uq; }}, mem_flags = LOCKED);
+ 0x2a: ldl_l({{ Ra.sl = Mem.sl; }}, mem_flags = LLSC);
+ 0x2b: ldq_l({{ Ra.uq = Mem.uq; }}, mem_flags = LLSC);
#ifdef USE_COPY
0x20: MiscPrefetch::copy_load({{ EA = Ra; }},
{{ fault = xc->copySrcTranslate(EA); }},
if (tmp == 1) {
xc->setStCondFailures(0);
}
- }}, mem_flags = LOCKED, inst_flags = IsStoreConditional);
+ }}, mem_flags = LLSC, inst_flags = IsStoreConditional);
0x2f: stq_c({{ Mem.uq = Ra; }},
{{
uint64_t tmp = write_result;
// only.
xc->setStCondFailures(0);
}
- }}, mem_flags = LOCKED, inst_flags = IsStoreConditional);
+ }}, mem_flags = LLSC, inst_flags = IsStoreConditional);
}
format IntegerOperate {
if (HW_LDST_PHYS) memAccessFlags.set(Request::PHYSICAL);
if (HW_LDST_ALT) memAccessFlags.set(Request::ALTMODE);
if (HW_LDST_VPTE) memAccessFlags.set(Request::VPTE);
- if (HW_LDST_LOCK) memAccessFlags.set(Request::LOCKED);
+ if (HW_LDST_LOCK) memAccessFlags.set(Request::LLSC);
}
std::string
0x6: decode OPCODE_LO {
format LoadMemory {
- 0x0: ll({{ Rt.uw = Mem.uw; }}, mem_flags=LOCKED);
+ 0x0: ll({{ Rt.uw = Mem.uw; }}, mem_flags=LLSC);
0x1: lwc1({{ Ft.uw = Mem.uw; }});
0x5: ldc1({{ Ft.ud = Mem.ud; }});
}
0x0: StoreCond::sc({{ Mem.uw = Rt.uw;}},
{{ uint64_t tmp = write_result;
Rt.uw = (tmp == 0 || tmp == 1) ? tmp : Rt.uw;
- }}, mem_flags=LOCKED, inst_flags = IsStoreConditional);
+ }}, mem_flags=LLSC, inst_flags = IsStoreConditional);
format StoreMemory {
0x1: swc1({{ Mem.uw = Ft.uw;}});
// verify this data.
if (unverifiedReq &&
!(unverifiedReq->isUncacheable()) &&
- (!(unverifiedReq->isLocked()) ||
- ((unverifiedReq->isLocked()) &&
+ (!(unverifiedReq->isLlsc()) ||
+ ((unverifiedReq->isLlsc()) &&
unverifiedReq->getExtraData() == 1))) {
T inst_data;
/*
Request *memReq = cache_req->dataPkt->req;
- if (cache_req->dataPkt->isWrite() && memReq->isLocked()) {
+ if (cache_req->dataPkt->isWrite() && memReq->isLlsc()) {
assert(cache_req->inst->isStoreConditional());
DPRINTF(InOrderCachePort, "Evaluating Store Conditional access\n");
do_access = TheISA::handleLockedWrite(cpu, memReq);
cacheStatus = cacheWaitResponse;
cacheBlocked = false;
}
- } else if (!do_access && memReq->isLocked()){
+ } else if (!do_access && memReq->isLlsc()){
// Store-Conditional instructions complete even if they "failed"
assert(cache_req->inst->isStoreConditional());
cache_req->setCompleted(true);
if (inst->isLoad()) {
assert(cache_pkt->isRead());
- if (cache_pkt->req->isLocked()) {
+ if (cache_pkt->req->isLlsc()) {
DPRINTF(InOrderCachePort,
"[tid:%u]: Handling Load-Linked for [sn:%u]\n",
tid, inst->seqNum);
"storeHead: %i addr: %#x\n",
load_idx, store_idx, storeHead, req->getPaddr());
- if (req->isLocked()) {
+ if (req->isLlsc()) {
// Disable recording the result temporarily. Writing to misc
// regs normally updates the result, but this is not the
// desired behavior when handling store conditionals.
if (!lsq->cacheBlocked()) {
PacketPtr data_pkt =
new Packet(req,
- (req->isLocked() ?
+ (req->isLlsc() ?
MemCmd::LoadLockedReq : MemCmd::ReadReq),
Packet::Broadcast);
data_pkt->dataStatic(load_inst->memData);
MemCmd command =
req->isSwap() ? MemCmd::SwapReq :
- (req->isLocked() ? MemCmd::StoreCondReq : MemCmd::WriteReq);
+ (req->isLlsc() ? MemCmd::StoreCondReq : MemCmd::WriteReq);
PacketPtr data_pkt = new Packet(req, command,
Packet::Broadcast);
data_pkt->dataStatic(inst->memData);
// ++iewExecStoreInsts;
- if (!(inst->req->isLocked())) {
+ if (!(inst->req->isLlsc())) {
inst->setExecuted();
instToCommit(inst);
}
}
/*
- if (req->isLocked()) {
+ if (req->isLlsc()) {
if (req->isUncacheable()) {
// Don't update result register (see stq_c in isa_desc)
req->result = 2;
MemAccessResult result = dcacheInterface->access(req);
//@todo temp fix for LL/SC (works fine for 1 CPU)
- if (req->isLocked()) {
+ if (req->isLlsc()) {
req->result=1;
panic("LL/SC! oh no no support!!!");
}
Event *wb = NULL;
/*
typename IEW::LdWritebackEvent *wb = NULL;
- if (req->isLocked()) {
+ if (req->isLlsc()) {
// Stx_C does not generate a system port transaction.
req->result=0;
wb = new typename IEW::LdWritebackEvent(storeQueue[storeWBIdx].inst,
// DPRINTF(Activity, "Active st accessing mem hit [sn:%lli]\n",
// storeQueue[storeWBIdx].inst->seqNum);
- if (req->isLocked()) {
+ if (req->isLlsc()) {
// Stx_C does not generate a system port transaction.
req->result=1;
typename BackEnd::LdWritebackEvent *wb =
PacketPtr data_pkt =
new Packet(req,
- (req->isLocked() ?
+ (req->isLlsc() ?
MemCmd::LoadLockedReq : Packet::ReadReq),
Packet::Broadcast);
data_pkt->dataStatic(inst->memData);
return NoFault;
}
- if (req->isLocked()) {
+ if (req->isLlsc()) {
cpu->lockFlag = true;
}
MemCmd command =
req->isSwap() ? MemCmd::SwapReq :
- (req->isLocked() ? MemCmd::WriteReq : MemCmd::StoreCondReq);
+ (req->isLlsc() ? MemCmd::WriteReq : MemCmd::StoreCondReq);
PacketPtr data_pkt = new Packet(req, command, Packet::Broadcast);
data_pkt->dataStatic(inst->memData);
inst->seqNum);
// @todo: Remove this SC hack once the memory system handles it.
- if (req->isLocked()) {
+ if (req->isLlsc()) {
if (req->isUncacheable()) {
req->setExtraData(2);
} else {
if (result != MA_HIT && dcacheInterface->doEvents()) {
store_event->miss = true;
typename BackEnd::LdWritebackEvent *wb = NULL;
- if (req->isLocked()) {
+ if (req->isLlsc()) {
wb = new typename BackEnd::LdWritebackEvent(inst,
be);
store_event->wbEvent = wb;
// DPRINTF(Activity, "Active st accessing mem hit [sn:%lli]\n",
// inst->seqNum);
- if (req->isLocked()) {
+ if (req->isLlsc()) {
// Stx_C does not generate a system port
// transaction in the 21264, but that might be
// hard to accomplish in this model.
// Now do the access.
if (fault == NoFault) {
Packet pkt = Packet(req,
- req->isLocked() ? MemCmd::LoadLockedReq : MemCmd::ReadReq,
+ req->isLlsc() ? MemCmd::LoadLockedReq : MemCmd::ReadReq,
Packet::Broadcast);
pkt.dataStatic(dataPtr);
assert(!pkt.isError());
- if (req->isLocked()) {
+ if (req->isLlsc()) {
TheISA::handleLockedRead(thread, req);
}
}
MemCmd cmd = MemCmd::WriteReq; // default
bool do_access = true; // flag to suppress cache access
- if (req->isLocked()) {
+ if (req->isLlsc()) {
cmd = MemCmd::StoreCondReq;
do_access = TheISA::handleLockedWrite(thread, req);
} else if (req->isSwap()) {
} else {
bool do_access = true; // flag to suppress cache access
- if (req->isLocked()) {
+ if (req->isLlsc()) {
do_access = TheISA::handleLockedWrite(thread, req);
} else if (req->isCondSwap()) {
assert(res);
MemCmd cmd;
if (read) {
cmd = MemCmd::ReadReq;
- if (req->isLocked())
+ if (req->isLlsc())
cmd = MemCmd::LoadLockedReq;
} else {
cmd = MemCmd::WriteReq;
- if (req->isLocked()) {
+ if (req->isLlsc()) {
cmd = MemCmd::StoreCondReq;
} else if (req->isSwap()) {
cmd = MemCmd::SwapReq;
_status = DTBWaitResponse;
if (split_addr > addr) {
RequestPtr req1, req2;
- assert(!req->isLocked() && !req->isSwap());
+ assert(!req->isLlsc() && !req->isSwap());
req->splitOnVaddr(split_addr, req1, req2);
typedef SplitDataTranslation::WholeTranslationState WholeState;
_status = DTBWaitResponse;
if (split_addr > addr) {
RequestPtr req1, req2;
- assert(!req->isLocked() && !req->isSwap());
+ assert(!req->isLlsc() && !req->isSwap());
req->splitOnVaddr(split_addr, req1, req2);
typedef SplitDataTranslation::WholeTranslationState WholeState;
// the locked flag may be cleared on the response packet, so check
// pkt->req and not pkt to see if it was a load-locked
- if (pkt->isRead() && pkt->req->isLocked()) {
+ if (pkt->isRead() && pkt->req->isLlsc()) {
TheISA::handleLockedRead(thread, pkt->req);
}
*/
void trackLoadLocked(PacketPtr pkt)
{
- assert(pkt->isLocked());
+ assert(pkt->isLlsc());
lockList.push_front(Lock(pkt->req));
}
bool checkWrite(PacketPtr pkt)
{
Request *req = pkt->req;
- if (pkt->isLocked()) {
+ if (pkt->isLlsc()) {
// it's a store conditional... have to check for matching
// load locked.
bool success = false;
pkt->writeDataToBlock(blk->data, blkSize);
}
} else if (pkt->isRead()) {
- if (pkt->isLocked()) {
+ if (pkt->isLlsc()) {
blk->trackLoadLocked(pkt);
}
pkt->setDataFromBlock(blk->data, blkSize);
incMissCount(pkt);
- if (blk == NULL && pkt->isLocked() && pkt->isWrite()) {
+ if (blk == NULL && pkt->isLlsc() && pkt->isWrite()) {
// complete miss on store conditional... just give up now
pkt->req->setExtraData(0);
return true;
InvalidCmd, "ReadExResp" },
/* LoadLockedReq: note that we use plain ReadResp as response, so that
* we can also use ReadRespWithInvalidate when needed */
- { SET4(IsRead, IsLocked, IsRequest, NeedsResponse),
+ { SET4(IsRead, IsLlsc, IsRequest, NeedsResponse),
ReadResp, "LoadLockedReq" },
/* StoreCondReq */
- { SET6(IsWrite, NeedsExclusive, IsLocked,
+ { SET6(IsWrite, NeedsExclusive, IsLlsc,
IsRequest, NeedsResponse, HasData),
StoreCondResp, "StoreCondReq" },
/* StoreCondResp */
- { SET4(IsWrite, NeedsExclusive, IsLocked, IsResponse),
+ { SET4(IsWrite, NeedsExclusive, IsLlsc, IsResponse),
InvalidCmd, "StoreCondResp" },
/* SwapReq -- for Swap ldstub type operations */
{ SET6(IsRead, IsWrite, NeedsExclusive, IsRequest, HasData, NeedsResponse),
NeedsResponse, //!< Requester needs response from target
IsSWPrefetch,
IsHWPrefetch,
- IsLocked, //!< Alpha/MIPS LL or SC access
+ IsLlsc, //!< Alpha/MIPS LL or SC access
HasData, //!< There is an associated payload
IsError, //!< Error response
IsPrint, //!< Print state matching address (for debugging)
bool isInvalidate() const { return testCmdAttrib(IsInvalidate); }
bool hasData() const { return testCmdAttrib(HasData); }
bool isReadWrite() const { return isRead() && isWrite(); }
- bool isLocked() const { return testCmdAttrib(IsLocked); }
+ bool isLlsc() const { return testCmdAttrib(IsLlsc); }
bool isError() const { return testCmdAttrib(IsError); }
bool isPrint() const { return testCmdAttrib(IsPrint); }
bool isInvalidate() const { return cmd.isInvalidate(); }
bool hasData() const { return cmd.hasData(); }
bool isReadWrite() const { return cmd.isReadWrite(); }
- bool isLocked() const { return cmd.isLocked(); }
+ bool isLlsc() const { return cmd.isLlsc(); }
bool isError() const { return cmd.isError(); }
bool isPrint() const { return cmd.isPrint(); }
// Add load-locked to tracking list. Should only be called if the
-// operation is a load and the LOCKED flag is set.
+// operation is a load and the LLSC flag is set.
void
PhysicalMemory::trackLoadLocked(PacketPtr pkt)
{
{
Request *req = pkt->req;
Addr paddr = LockedAddr::mask(req->getPaddr());
- bool isLocked = pkt->isLocked();
+ bool isLlsc = pkt->isLlsc();
// Initialize return value. Non-conditional stores always
// succeed. Assume conditional stores will fail until proven
// otherwise.
- bool success = !isLocked;
+ bool success = !isLlsc;
// Iterate over list. Note that there could be multiple matching
// records, as more than one context could have done a load locked
if (i->addr == paddr) {
// we have a matching address
- if (isLocked && i->matchesContext(req)) {
+ if (isLlsc && i->matchesContext(req)) {
// it's a store conditional, and as far as the memory
// system can tell, the requesting context's lock is
// still valid.
}
}
- if (isLocked) {
+ if (isLlsc) {
req->setExtraData(success ? 1 : 0);
}
TRACE_PACKET("Read/Write");
} else if (pkt->isRead()) {
assert(!pkt->isWrite());
- if (pkt->isLocked()) {
+ if (pkt->isLlsc()) {
trackLoadLocked(pkt);
}
if (pmemAddr)
Request *req = pkt->req;
if (lockedAddrList.empty()) {
// no locked addrs: nothing to check, store_conditional fails
- bool isLocked = pkt->isLocked();
- if (isLocked) {
+ bool isLlsc = pkt->isLlsc();
+ if (isLlsc) {
req->setExtraData(0);
}
- return !isLocked; // only do write if not an sc
+ return !isLlsc; // only do write if not an sc
} else {
// iterate over list...
return checkLockedAddrList(pkt);
/** ASI information for this request if it exists. */
static const FlagsType ASI_BITS = 0x000000FF;
/** The request is a Load locked/store conditional. */
- static const FlagsType LOCKED = 0x00000100;
+ static const FlagsType LLSC = 0x00000100;
/** The virtual address is also the physical address. */
static const FlagsType PHYSICAL = 0x00000200;
/** The request is an ALPHA VPTE pal access (hw_ld). */
/** Accessor Function to Check Cacheability. */
bool isUncacheable() const { return flags.isSet(UNCACHEABLE); }
bool isInstRead() const { return flags.isSet(INST_READ); }
- bool isLocked() const { return flags.isSet(LOCKED); }
+ bool isLlsc() const { return flags.isSet(LLSC); }
bool isSwap() const { return flags.isSet(MEM_SWAP|MEM_SWAP_COND); }
bool isCondSwap() const { return flags.isSet(MEM_SWAP_COND); }