'(StoreCheck << FlagShift)')
defineMicroLoadOp('Ldstl', 'Data = merge(Data, Mem, dataSize);',
'Data = Mem & mask(dataSize * 8);',
- '(StoreCheck << FlagShift) | Request::LOCKED')
+ '(StoreCheck << FlagShift) | Request::LOCKED_RMW')
defineMicroLoadOp('Ldfp', code='FpData_uqw = Mem', big = False)
defineMicroStoreOp('St', 'Mem = pick(Data, 2, dataSize);')
defineMicroStoreOp('Stul', 'Mem = pick(Data, 2, dataSize);',
- mem_flags="Request::LOCKED")
+ mem_flags="Request::LOCKED_RMW")
defineMicroStoreOp('Stfp', code='Mem = FpData_uqw;')
//If we don't need to access a second cache line, stop now.
if (secondAddr <= addr)
{
- if (req->isLocked() && fault == NoFault) {
+ if (req->isLockedRMW() && fault == NoFault) {
assert(!locked);
locked = true;
}
//stop now.
if (fault != NoFault || secondAddr <= addr)
{
- if (req->isLocked() && fault == NoFault) {
+ if (req->isLockedRMW() && fault == NoFault) {
assert(locked);
locked = false;
}
* made up of a locked load, some operation on the data, and then a locked
* store.
*/
- static const FlagsType LOCKED = 0x00100000;
+ static const FlagsType LOCKED_RMW = 0x00100000;
/** The request is a Load locked/store conditional. */
static const FlagsType LLSC = 0x00200000;
/** This request is for a memory swap. */
bool isPrefetch() const { return _flags.isSet(PREFETCH); }
bool isLLSC() const { return _flags.isSet(LLSC); }
bool isPriv() const { return _flags.isSet(PRIVILEGED); }
- bool isLocked() const { return _flags.isSet(LOCKED); }
+ bool isLockedRMW() const { return _flags.isSet(LOCKED_RMW); }
bool isSwap() const { return _flags.isSet(MEM_SWAP|MEM_SWAP_COND); }
bool isCondSwap() const { return _flags.isSet(MEM_SWAP_COND); }
bool isMmappedIpr() const { return _flags.isSet(MMAPPED_IPR); }
primary_type = RubyRequestType_Load_Linked;
}
secondary_type = RubyRequestType_ATOMIC;
- } else if (pkt->req->isLocked()) {
+ } else if (pkt->req->isLockedRMW()) {
//
// x86 locked instructions are translated to store cache coherence
// requests because these requests should always be treated as read