From: Ali Saidi Date: Fri, 1 Oct 2010 21:04:04 +0000 (-0500) Subject: ARM: Make the TLB a little bit faster by moving most recently used items to front... X-Git-Tag: stable_2012_02_02~798 X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=dcaa0668ae2c243158fbc886e81e6a6b0e451c83;p=gem5.git ARM: Make the TLB a little bit faster by moving most recently used items to front of list --- diff --git a/src/arch/arm/tlb.cc b/src/arch/arm/tlb.cc index a8d78308f..4e98aaf7b 100644 --- a/src/arch/arm/tlb.cc +++ b/src/arch/arm/tlb.cc @@ -65,10 +65,11 @@ using namespace std; using namespace ArmISA; TLB::TLB(const Params *p) - : BaseTLB(p), size(p->size), nlu(0) + : BaseTLB(p), size(p->size) #if FULL_SYSTEM , tableWalker(p->walker) #endif + , rangeMRU(1) { table = new TlbEntry[size]; memset(table, 0, sizeof(TlbEntry[size])); @@ -98,19 +99,25 @@ TLB::translateFunctional(ThreadContext *tc, Addr va, Addr &pa) TlbEntry* TLB::lookup(Addr va, uint8_t cid, bool functional) { - // XXX This should either turn into a TlbMap or add caching TlbEntry *retval = NULL; - // Do some kind of caching, fast indexing, anything + // Maitaining LRU array int x = 0; while (retval == NULL && x < size) { if (table[x].match(va, cid)) { - retval = &table[x]; - if (x == nlu && !functional) - nextnlu(); + // We only move the hit entry ahead when the position is higher than rangeMRU + if (x > rangeMRU) { + TlbEntry tmp_entry = table[x]; + for(int i = x; i > 0; i--) + table[i] = table[i-1]; + table[0] = tmp_entry; + retval = &table[0]; + } else { + retval = &table[x]; + } break; } x++; @@ -134,16 +141,17 @@ TLB::insert(Addr addr, TlbEntry &entry) entry.N, entry.global, entry.valid, entry.nonCacheable, entry.sNp, entry.xn, entry.ap, entry.domain); - if (table[nlu].valid) + if (table[size-1].valid) DPRINTF(TLB, " - Replacing Valid entry %#x, asn %d ppn %#x size: %#x ap:%d\n", - table[nlu].vpn << table[nlu].N, table[nlu].asid, table[nlu].pfn << table[nlu].N, - table[nlu].size, table[nlu].ap); + table[size-1].vpn << table[size-1].N, table[size-1].asid, + table[size-1].pfn << table[size-1].N, table[size-1].size, + table[size-1].ap); - // XXX Update caching, lookup table etc - table[nlu] = entry; + //inserting to MRU position and evicting the LRU one - // XXX Figure out how entries are generally inserted in ARM - nextnlu(); + for(int i = size-1; i > 0; i--) + table[i] = table[i-1]; + table[0] = entry; } void @@ -177,7 +185,6 @@ TLB::flushAll() } memset(table, 0, sizeof(TlbEntry[size])); - nlu = 0; } diff --git a/src/arch/arm/tlb.hh b/src/arch/arm/tlb.hh index caccad873..2d3661f7d 100644 --- a/src/arch/arm/tlb.hh +++ b/src/arch/arm/tlb.hh @@ -88,7 +88,6 @@ class TLB : public BaseTLB TlbEntry *table; // the Page Table int size; // TLB Size - int nlu; // not last used entry (for replacement) uint32_t _attr; // Memory attributes for last accessed TLB entry @@ -96,7 +95,6 @@ class TLB : public BaseTLB TableWalker *tableWalker; #endif - void nextnlu() { if (++nlu >= size) nlu = 0; } /** Lookup an entry in the TLB * @param vpn virtual address * @param asn context id/address space id to use @@ -118,6 +116,7 @@ class TLB : public BaseTLB Stats::Formula misses; Stats::Formula accesses; + int rangeMRU; //On lookup, only move entries ahead when outside rangeMRU public: typedef ArmTLBParams Params;