"IPR memory space not implemented!");
} else {
// mark request as uncacheable
- req->setFlags(Request::UNCACHEABLE);
+ req->setFlags(Request::UNCACHEABLE | Request::STRICT_ORDER);
// Clear bits 42:35 of the physical address (10-2 in
// Tsunami manual)
if (flags & Request::CLEAR_LL){
// @todo: check implications of security extensions
req->setPaddr(0);
- req->setFlags(Request::UNCACHEABLE);
+ req->setFlags(Request::UNCACHEABLE | Request::STRICT_ORDER);
req->setFlags(Request::CLEAR_LL);
return NoFault;
}
if ((req->isInstFetch() && (!sctlr.i)) ||
((!req->isInstFetch()) && (!sctlr.c))){
- req->setFlags(Request::UNCACHEABLE);
+ req->setFlags(Request::UNCACHEABLE | Request::STRICT_ORDER);
}
if (!is_fetch) {
assert(flags & MustBeOne);
// @todo: double check this (ARM ARM issue C B3.2.1)
if (long_desc_format || sctlr.tre == 0) {
- req->setFlags(Request::UNCACHEABLE);
+ req->setFlags(Request::UNCACHEABLE | Request::STRICT_ORDER);
} else {
if (nmrr.ir0 == 0 || nmrr.or0 == 0 || prrr.tr0 != 0x2)
- req->setFlags(Request::UNCACHEABLE);
+ req->setFlags(Request::UNCACHEABLE | Request::STRICT_ORDER);
}
// Set memory attributes
te->shareable, te->innerAttrs, te->outerAttrs,
static_cast<uint8_t>(te->mtype), isStage2);
setAttr(te->attributes);
- if (te->nonCacheable) {
- req->setFlags(Request::UNCACHEABLE);
- }
+
+ if (te->nonCacheable)
+ req->setFlags(Request::UNCACHEABLE | Request::STRICT_ORDER);
Addr pa = te->pAddr(vaddr);
req->setPaddr(pa);
// address or by the TLB entry
if ((req->getVaddr() & VAddrUncacheable) == VAddrUncacheable) {
// mark request as uncacheable
- req->setFlags(Request::UNCACHEABLE);
+ req->setFlags(Request::UNCACHEABLE | Request::STRICT_ORDER);
}
return NoFault;
}
if ((req->getVaddr() & VAddrUncacheable) == VAddrUncacheable) {
// mark request as uncacheable
- req->setFlags(Request::UNCACHEABLE);
+ req->setFlags(Request::UNCACHEABLE | Request::STRICT_ORDER);
}
return NoFault;
}
ce_va < vaddr + size && ce_va + ce->range.size > vaddr &&
(!write || ce->pte.writable())) {
req->setPaddr(ce->pte.translate(vaddr));
- if (ce->pte.sideffect() || (ce->pte.paddr() >> 39) & 1)
- req->setFlags(Request::UNCACHEABLE);
+ if (ce->pte.sideffect() || (ce->pte.paddr() >> 39) & 1) {
+ req->setFlags(
+ Request::UNCACHEABLE | Request::STRICT_ORDER);
+ }
DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
return NoFault;
} // if matched
ce_va < vaddr + size && ce_va + ce->range.size > vaddr &&
(!write || ce->pte.writable())) {
req->setPaddr(ce->pte.translate(vaddr));
- if (ce->pte.sideffect() || (ce->pte.paddr() >> 39) & 1)
- req->setFlags(Request::UNCACHEABLE);
+ if (ce->pte.sideffect() || (ce->pte.paddr() >> 39) & 1) {
+ req->setFlags(
+ Request::UNCACHEABLE | Request::STRICT_ORDER);
+ }
DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
return NoFault;
} // if matched
}
if (e->pte.sideffect() || (e->pte.paddr() >> 39) & 1)
- req->setFlags(Request::UNCACHEABLE);
+ req->setFlags(Request::UNCACHEABLE | Request::STRICT_ORDER);
// cache translation date for next translation
cacheState = tlbdata;
req->setFlags(Request::MMAPPED_IPR);
req->setPaddr(MISCREG_PCI_CONFIG_ADDRESS * sizeof(MiscReg));
} else if ((IOPort & ~mask(2)) == 0xCFC) {
- req->setFlags(Request::UNCACHEABLE);
+ req->setFlags(Request::UNCACHEABLE | Request::STRICT_ORDER);
Addr configAddress =
tc->readMiscRegNoEffect(MISCREG_PCI_CONFIG_ADDRESS);
if (bits(configAddress, 31, 31)) {
req->setPaddr(PhysAddrPrefixIO | IOPort);
}
} else {
- req->setFlags(Request::UNCACHEABLE);
+ req->setFlags(Request::UNCACHEABLE | Request::STRICT_ORDER);
req->setPaddr(PhysAddrPrefixIO | IOPort);
}
return NoFault;
return new GeneralProtection(0);
*/
// Force the access to be uncacheable.
- req->setFlags(Request::UNCACHEABLE);
+ req->setFlags(Request::UNCACHEABLE | Request::STRICT_ORDER);
req->setPaddr(x86LocalAPICAddress(tc->contextId(),
paddr - apicRange.start()));
}
DPRINTF(TLB, "Translated %#x -> %#x.\n", vaddr, paddr);
req->setPaddr(paddr);
if (entry->uncacheable)
- req->setFlags(Request::UNCACHEABLE);
+ req->setFlags(Request::UNCACHEABLE | Request::STRICT_ORDER);
} else {
//Use the address which already has segmentation applied.
DPRINTF(TLB, "Paging disabled.\n");
* @todo: Consider if this is necessary or not.
*/
EACalcDone,
- IsUncacheable,
+ IsStrictlyOrdered,
ReqMade,
MemOpDone,
MaxFlags
/** Returns whether or not the eff. addr. source registers are ready. */
bool eaSrcsReady();
- /** Is this instruction's memory access uncacheable. */
- bool uncacheable() { return instFlags[IsUncacheable]; }
+ /** Is this instruction's memory access strictly ordered? */
+ bool strictlyOrdered() const { return instFlags[IsStrictlyOrdered]; }
/** Has this instruction generated a memory request. */
bool hasRequest() { return instFlags[ReqMade]; }
{
fault = state->getFault();
- instFlags[IsUncacheable] = state->isUncacheable();
+ instFlags[IsStrictlyOrdered] = state->isStrictlyOrdered();
if (fault == NoFault) {
physEffAddr = state->getPaddr();
bool is_load = request->isLoad;
bool is_llsc = request->request.isLLSC();
bool is_swap = request->request.isSwap();
- bool bufferable = !(request->request.isUncacheable() ||
+ bool bufferable = !(request->request.isStrictlyOrdered() ||
is_llsc || is_swap);
if (is_load) {
/// Instruction that caused the a non-mispredict squash
DynInstPtr squashInst; // *F
- /// Hack for now to send back an uncached access to the IEW stage.
- DynInstPtr uncachedLoad; // *I
+ /// Hack for now to send back a strictly ordered access to the
+ /// IEW stage.
+ DynInstPtr strictlyOrderedLoad; // *I
/// Communication specifically to the IQ to tell the IQ that it can
/// schedule a non-speculative instruction.
/// If the interrupt ended up being cleared before being handled
bool clearInterrupt; // *F
- /// Hack for now to send back an uncached access to the IEW stage.
- bool uncached; // *I
+ /// Hack for now to send back an strictly ordered access to
+ /// the IEW stage.
+ bool strictlyOrdered; // *I
};
// think are possible.
assert(head_inst->isNonSpeculative() || head_inst->isStoreConditional()
|| head_inst->isMemBarrier() || head_inst->isWriteBarrier() ||
- (head_inst->isLoad() && head_inst->uncacheable()));
+ (head_inst->isLoad() && head_inst->strictlyOrdered()));
DPRINTF(Commit, "Encountered a barrier or non-speculative "
"instruction [sn:%lli] at the head of the ROB, PC %s.\n",
// it is executed.
head_inst->clearCanCommit();
- if (head_inst->isLoad() && head_inst->uncacheable()) {
- DPRINTF(Commit, "[sn:%lli]: Uncached load, PC %s.\n",
+ if (head_inst->isLoad() && head_inst->strictlyOrdered()) {
+ DPRINTF(Commit, "[sn:%lli]: Strictly ordered load, PC %s.\n",
head_inst->seqNum, head_inst->pcState());
- toIEW->commitInfo[tid].uncached = true;
- toIEW->commitInfo[tid].uncachedLoad = head_inst;
+ toIEW->commitInfo[tid].strictlyOrdered = true;
+ toIEW->commitInfo[tid].strictlyOrderedLoad = head_inst;
} else {
++commitNonSpecStalls;
}
// Some instructions will be sent to commit without having
// executed because they need commit to handle them.
- // E.g. Uncached loads have not actually executed when they
+ // E.g. Strictly ordered loads have not actually executed when they
// are first sent to commit. Instead commit must tell the LSQ
- // when it's ready to execute the uncached load.
+ // when it's ready to execute the strictly ordered load.
if (!inst->isSquashed() && inst->isExecuted() && inst->getFault() == NoFault) {
int dependents = instQueue.wakeDependents(inst);
if (fromCommit->commitInfo[tid].nonSpecSeqNum != 0) {
//DPRINTF(IEW,"NonspecInst from thread %i",tid);
- if (fromCommit->commitInfo[tid].uncached) {
- instQueue.replayMemInst(fromCommit->commitInfo[tid].uncachedLoad);
- fromCommit->commitInfo[tid].uncachedLoad->setAtCommit();
+ if (fromCommit->commitInfo[tid].strictlyOrdered) {
+ instQueue.replayMemInst(
+ fromCommit->commitInfo[tid].strictlyOrderedLoad);
+ fromCommit->commitInfo[tid].strictlyOrderedLoad->setAtCommit();
} else {
instQueue.scheduleNonSpec(
fromCommit->commitInfo[tid].nonSpecSeqNum);
assert(!load_inst->isExecuted());
- // Make sure this isn't an uncacheable access
- // A bit of a hackish way to get uncached accesses to work only if they're
- // at the head of the LSQ and are ready to commit (at the head of the ROB
- // too).
- if (req->isUncacheable() &&
+ // Make sure this isn't a strictly ordered load
+ // A bit of a hackish way to get strictly ordered accesses to work
+ // only if they're at the head of the LSQ and are ready to commit
+ // (at the head of the ROB too).
+ if (req->isStrictlyOrdered() &&
(load_idx != loadHead || !load_inst->isAtCommit())) {
iewStage->rescheduleMemInst(load_inst);
++lsqRescheduledLoads;
- DPRINTF(LSQUnit, "Uncachable load [sn:%lli] PC %s\n",
+ DPRINTF(LSQUnit, "Strictly ordered load [sn:%lli] PC %s\n",
load_inst->seqNum, load_inst->pcState());
// Must delete request now that it wasn't handed off to
delete sreqHigh;
}
return std::make_shared<GenericISA::M5PanicFault>(
- "Uncachable load [sn:%llx] PC %s\n",
+ "Strictly ordered load [sn:%llx] PC %s\n",
load_inst->seqNum, load_inst->pcState());
}
if (store_size == 0)
continue;
- else if (storeQueue[store_idx].inst->uncacheable())
+ else if (storeQueue[store_idx].inst->strictlyOrdered())
continue;
assert(storeQueue[store_idx].inst->effAddrValid());
while (load_idx != loadTail) {
DynInstPtr ld_inst = loadQueue[load_idx];
- if (!ld_inst->effAddrValid() || ld_inst->uncacheable()) {
+ if (!ld_inst->effAddrValid() || ld_inst->strictlyOrdered()) {
incrLdIdx(load_idx);
continue;
}
*/
while (load_idx != loadTail) {
DynInstPtr ld_inst = loadQueue[load_idx];
- if (!ld_inst->effAddrValid() || ld_inst->uncacheable()) {
+ if (!ld_inst->effAddrValid() || ld_inst->strictlyOrdered()) {
incrLdIdx(load_idx);
continue;
}
// along to commit without the instruction completing.
if (load_fault != NoFault || !inst->readPredicate()) {
// Send this instruction to commit, also make sure iew stage
- // realizes there is activity.
- // Mark it as executed unless it is an uncached load that
- // needs to hit the head of commit.
+ // realizes there is activity. Mark it as executed unless it
+ // is a strictly ordered load that needs to hit the head of
+ // commit.
if (!inst->readPredicate())
inst->forwardOldRegs();
DPRINTF(LSQUnit, "Load [sn:%lli] not executed from %s\n",
inst->seqNum,
(load_fault != NoFault ? "fault" : "predication"));
- if (!(inst->hasRequest() && inst->uncacheable()) ||
+ if (!(inst->hasRequest() && inst->strictlyOrdered()) ||
inst->isAtCommit()) {
inst->setExecuted();
}
}
/**
- * Check if this request is uncacheable. We only need to check the main
- * request because the flags will have been copied here on a split
- * translation.
+ * Check if this request is strictly ordered device access. We
+ * only need to check the main request because the flags will have
+ * been copied here on a split translation.
*/
bool
- isUncacheable() const
+ isStrictlyOrdered() const
{
- return mainReq->isUncacheable();
+ return mainReq->isStrictlyOrdered();
}
/**
static const FlagsType INST_FETCH = 0x00000100;
/** The virtual address is also the physical address. */
static const FlagsType PHYSICAL = 0x00000200;
- /** The request is to an uncacheable address. */
- static const FlagsType UNCACHEABLE = 0x00001000;
+ /**
+ * The request is to an uncacheable address.
+ *
+ * @note Uncacheable accesses may be reordered by CPU models. The
+ * STRICT_ORDER flag should be set if such reordering is
+ * undesirable.
+ */
+ static const FlagsType UNCACHEABLE = 0x00000400;
+ /**
+ * The request is required to be strictly ordered by <i>CPU
+ * models</i> and is non-speculative.
+ *
+ * A strictly ordered request is guaranteed to never be re-ordered
+ * or executed speculatively by a CPU model. The memory system may
+ * still reorder requests in caches unless the UNCACHEABLE flag is
+ * set as well.
+ */
+ static const FlagsType STRICT_ORDER = 0x00000800;
/** This request is to a memory mapped register. */
static const FlagsType MMAPPED_IPR = 0x00002000;
/** This request is a clear exclusive. */
/** Accessor functions for flags. Note that these are for testing
only; setting flags should be done via setFlags(). */
bool isUncacheable() const { return _flags.isSet(UNCACHEABLE); }
+ bool isStrictlyOrdered() const { return _flags.isSet(STRICT_ORDER); }
bool isInstFetch() const { return _flags.isSet(INST_FETCH); }
bool isPrefetch() const { return _flags.isSet(PREFETCH); }
bool isLLSC() const { return _flags.isSet(LLSC); }