using namespace ArmISA;
TLB::TLB(const Params *p)
- : BaseTLB(p), size(p->size), nlu(0)
+ : BaseTLB(p), size(p->size)
#if FULL_SYSTEM
, tableWalker(p->walker)
#endif
+ , rangeMRU(1)
{
table = new TlbEntry[size];
memset(table, 0, sizeof(TlbEntry[size]));
delete [] table;
}
+bool
+TLB::translateFunctional(ThreadContext *tc, Addr va, Addr &pa)
+{
+ uint32_t context_id = tc->readMiscReg(MISCREG_CONTEXTIDR);
+ TlbEntry *e = lookup(va, context_id, true);
+ if (!e)
+ return false;
+ pa = e->pAddr(va);
+ return true;
+}
+
TlbEntry*
-TLB::lookup(Addr va, uint8_t cid)
+TLB::lookup(Addr va, uint8_t cid, bool functional)
{
- // XXX This should either turn into a TlbMap or add caching
TlbEntry *retval = NULL;
- // Do some kind of caching, fast indexing, anything
+ // Maitaining LRU array
int x = 0;
while (retval == NULL && x < size) {
if (table[x].match(va, cid)) {
- retval = &table[x];
- if (x == nlu)
- nextnlu();
+ // We only move the hit entry ahead when the position is higher than rangeMRU
+ if (x > rangeMRU) {
+ TlbEntry tmp_entry = table[x];
+ for(int i = x; i > 0; i--)
+ table[i] = table[i-1];
+ table[0] = tmp_entry;
+ retval = &table[0];
+ } else {
+ retval = &table[x];
+ }
break;
}
x++;
entry.N, entry.global, entry.valid, entry.nonCacheable, entry.sNp,
entry.xn, entry.ap, entry.domain);
- if (table[nlu].valid)
+ if (table[size-1].valid)
DPRINTF(TLB, " - Replacing Valid entry %#x, asn %d ppn %#x size: %#x ap:%d\n",
- table[nlu].vpn << table[nlu].N, table[nlu].asid, table[nlu].pfn << table[nlu].N,
- table[nlu].size, table[nlu].ap);
+ table[size-1].vpn << table[size-1].N, table[size-1].asid,
+ table[size-1].pfn << table[size-1].N, table[size-1].size,
+ table[size-1].ap);
- // XXX Update caching, lookup table etc
- table[nlu] = entry;
+ //inserting to MRU position and evicting the LRU one
- // XXX Figure out how entries are generally inserted in ARM
- nextnlu();
+ for(int i = size-1; i > 0; i--)
+ table[i] = table[i-1];
+ table[0] = entry;
}
void
}
memset(table, 0, sizeof(TlbEntry[size]));
- nlu = 0;
}
.desc("DTB misses")
;
- invalids
- .name(name() + ".invalids")
- .desc("DTB access violations")
- ;
-
accesses
.name(name() + ".accesses")
.desc("DTB accesses")
DPRINTF(TLBVerbose, "CPSR is user:%d UserMode:%d\n", cpsr.mode == MODE_USER, flags
& UserMode);
+ // If this is a clrex instruction, provide a PA of 0 with no fault
+ // This will force the monitor to set the tracked address to 0
+ // a bit of a hack but this effectively clrears this processors monitor
+ if (flags & Request::CLEAR_LL){
+ req->setPaddr(0);
+ req->setFlags(Request::UNCACHEABLE);
+ req->setFlags(Request::CLEAR_LL);
+ return NoFault;
+ }
+ if ((req->isInstFetch() && (!sctlr.i)) ||
+ ((!req->isInstFetch()) && (!sctlr.c))){
+ req->setFlags(Request::UNCACHEABLE);
+ }
if (!is_fetch) {
assert(flags & MustBeOne);
if (sctlr.a || !(flags & AllowUnaligned)) {
// Set memory attributes
TlbEntry temp_te;
- tableWalker->memAttrs(temp_te, 0, 1);
+ tableWalker->memAttrs(tc, temp_te, sctlr, 0, 1);
temp_te.shareable = true;
DPRINTF(TLBVerbose, "(No MMU) setting memory attributes: shareable:\
%d, innerAttrs: %d, outerAttrs: %d\n", temp_te.shareable,
TlbEntry *te = lookup(vaddr, context_id);
if (te == NULL) {
+ if (req->isPrefetch()){
+ //if the request is a prefetch don't attempt to fill the TLB
+ //or go any further with the memory access
+ return new PrefetchAbort(vaddr, ArmFault::PrefetchTLBMiss);
+ }
// start translation table walk, pass variables rather than
// re-retreaving in table walker for speed
DPRINTF(TLB, "TLB Miss: Starting hardware table walker for %#x(%d)\n",
outerAttrs: %d\n",
te->shareable, te->innerAttrs, te->outerAttrs);
setAttr(te->attributes);
-
+ if (te->nonCacheable)
+ req->setFlags(Request::UNCACHEABLE);
uint32_t dacr = tc->readMiscReg(MISCREG_DACR);
switch ( (dacr >> (te->domain * 2)) & 0x3) {
case 0: