{
TLB::TLB(const std::string &name, int s)
- : SimObject(name), size(s), usedEntries(0), cacheValid(false)
+ : SimObject(name), size(s), usedEntries(0), lastReplaced(0),
+ cacheValid(false)
{
// To make this work you'll have to change the hypervisor and OS
if (size > 64)
tlb = new TlbEntry[size];
memset(tlb, 0, sizeof(TlbEntry) * size);
+
+ for (int x = 0; x < size; x++)
+ freeList.push_back(&tlb[x]);
}
void
TLB::clearUsedBits()
{
MapIter i;
- for (i = lookupTable.begin(); i != lookupTable.end();) {
+ for (i = lookupTable.begin(); i != lookupTable.end(); i++) {
TlbEntry *t = i->second;
if (!t->pte.locked()) {
t->used = false;
MapIter i;
TlbEntry *new_entry = NULL;
+ TlbRange tr;
int x;
cacheValid = false;
+ tr.va = va;
+ tr.size = PTE.size() - 1;
+ tr.contextId = context_id;
+ tr.partitionId = partition_id;
+ tr.real = real;
+
+
+ DPRINTF(TLB, "TLB: Inserting TLB Entry; va=%#x pa=%#x pid=%d cid=%d r=%d entryid=%d\n",
+ va, PTE.paddr(), partition_id, context_id, (int)real, entry);
+
+ // Demap any entry that conflicts
+ i = lookupTable.find(tr);
+ if (i != lookupTable.end()) {
+ i->second->valid = false;
+ if (i->second->used) {
+ i->second->used = false;
+ usedEntries--;
+ }
+ freeList.push_front(i->second);
+ DPRINTF(TLB, "TLB: Found conflicting entry %#X , deleting it\n",
+ i->second);
+ lookupTable.erase(i);
+ }
- DPRINTF(TLB, "TLB: Inserting TLB Entry; va=%#x pa=%#x pid=%d cid=%d r=%d\n",
- va, PTE.paddr(), partition_id, context_id, (int)real);
if (entry != -1) {
assert(entry < size && entry >= 0);
new_entry = &tlb[entry];
} else {
+ if (!freeList.empty()) {
+ new_entry = freeList.front();
+ } else {
+ x = lastReplaced;
+ do {
+ ++x;
+ if (x == size)
+ x = 0;
+ if (x == lastReplaced)
+ goto insertAllLocked;
+ } while (tlb[x].pte.locked());
+ lastReplaced = x;
+ new_entry = &tlb[x];
+ lookupTable.erase(new_entry->range);
+ }
+ /*
for (x = 0; x < size; x++) {
if (!tlb[x].valid || !tlb[x].used) {
new_entry = &tlb[x];
break;
}
- }
+ }*/
}
+insertAllLocked:
// Update the last ently if their all locked
- if (!new_entry)
+ if (!new_entry) {
new_entry = &tlb[size-1];
+ lookupTable.erase(new_entry->range);
+ }
+
+ freeList.remove(new_entry);
+ DPRINTF(TLB, "Using entry: %#X\n", new_entry);
assert(PTE.valid());
new_entry->range.va = va;
- new_entry->range.size = PTE.size();
+ new_entry->range.size = PTE.size() - 1;
new_entry->range.partitionId = partition_id;
new_entry->range.contextId = context_id;
new_entry->range.real = real;
usedEntries++;
- // Demap any entry that conflicts
- i = lookupTable.find(new_entry->range);
- if (i != lookupTable.end()) {
- i->second->valid = false;
- if (i->second->used) {
- i->second->used = false;
- usedEntries--;
- }
- DPRINTF(TLB, "TLB: Found conflicting entry, deleting it\n");
- lookupTable.erase(i);
- }
i = lookupTable.insert(new_entry->range, new_entry);
assert(i != lookupTable.end());
i->second->used = false;
usedEntries--;
}
+ freeList.push_front(i->second);
+ DPRINTF(TLB, "Freeing TLB entry : %#X\n", i->second);
lookupTable.erase(i);
}
}
for (x = 0; x < size; x++) {
if (tlb[x].range.contextId == context_id &&
tlb[x].range.partitionId == partition_id) {
+ if (tlb[x].valid == true) {
+ freeList.push_front(&tlb[x]);
+ DPRINTF(TLB, "Freeing TLB entry : %#X\n", &tlb[x]);
+ }
tlb[x].valid = false;
if (tlb[x].used) {
tlb[x].used = false;
cacheValid = false;
for (x = 0; x < size; x++) {
if (!tlb[x].pte.locked() && tlb[x].range.partitionId == partition_id) {
+ if (tlb[x].valid == true){
+ freeList.push_front(&tlb[x]);
+ DPRINTF(TLB, "Freeing TLB entry : %#X\n", &tlb[x]);
+ }
tlb[x].valid = false;
if (tlb[x].used) {
tlb[x].used = false;
int x;
cacheValid = false;
+ freeList.clear();
for (x = 0; x < size; x++) {
+ if (tlb[x].valid == true)
+ freeList.push_back(&tlb[x]);
tlb[x].valid = false;
}
usedEntries = 0;
uint64_t
TLB::TteRead(int entry) {
+ if (entry >= size)
+ panic("entry: %d\n", entry);
+
assert(entry < size);
- return tlb[entry].pte();
+ if (tlb[entry].valid)
+ return tlb[entry].pte();
+ else
+ return (uint64_t)-1ll;
}
uint64_t
TLB::TagRead(int entry) {
assert(entry < size);
uint64_t tag;
+ if (!tlb[entry].valid)
+ return (uint64_t)-1ll;
- tag = tlb[entry].range.contextId | tlb[entry].range.va |
- (uint64_t)tlb[entry].range.partitionId << 61;
+ tag = tlb[entry].range.contextId;
+ tag |= tlb[entry].range.va;
+ tag |= (uint64_t)tlb[entry].range.partitionId << 61;
tag |= tlb[entry].range.real ? ULL(1) << 60 : 0;
tag |= (uint64_t)~tlb[entry].pte._size() << 56;
return tag;
// Be fast if we can!
if (cacheValid && cacheState == tlbdata) {
if (cacheEntry[0] && cacheAsi[0] == asi && cacheEntry[0]->range.va < vaddr + size &&
- cacheEntry[0]->range.va + cacheEntry[0]->range.size >= vaddr) {
+ cacheEntry[0]->range.va + cacheEntry[0]->range.size > vaddr) {
req->setPaddr(cacheEntry[0]->pte.paddr() & ~(cacheEntry[0]->pte.size()-1) |
vaddr & cacheEntry[0]->pte.size()-1 );
return NoFault;
}
if (cacheEntry[1] && cacheAsi[1] == asi && cacheEntry[1]->range.va < vaddr + size &&
- cacheEntry[1]->range.va + cacheEntry[1]->range.size >= vaddr) {
+ cacheEntry[1]->range.va + cacheEntry[1]->range.size > vaddr) {
req->setPaddr(cacheEntry[1]->pte.paddr() & ~(cacheEntry[1]->pte.size()-1) |
vaddr & cacheEntry[1]->pte.size()-1 );
return NoFault;
}
// cache translation date for next translation
- cacheValid = true;
cacheState = tlbdata;
+ if (!cacheValid) {
+ cacheEntry[1] = NULL;
+ cacheEntry[0] = NULL;
+ }
+
if (cacheEntry[0] != e && cacheEntry[1] != e) {
cacheEntry[1] = cacheEntry[0];
cacheEntry[0] = e;
if (implicit)
cacheAsi[0] = (ASI)0;
}
-
+ cacheValid = true;
req->setPaddr(e->pte.paddr() & ~(e->pte.size()-1) |
vaddr & e->pte.size()-1);
DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
writeSfr(tc, vaddr, write, Primary, true, IllegalAsi, asi);
return new PrivilegedAction;
}
- if (priv && vaddr & 0xF || vaddr > 0x3f8 || vaddr < 0x3c0) {
+ if (!hpriv && vaddr & 0xF || vaddr > 0x3f8 || vaddr < 0x3c0) {
writeSfr(tc, vaddr, write, Primary, true, IllegalAsi, asi);
return new DataAccessException;
}