MapIter i;
TlbEntry *new_entry = NULL;
- TlbRange tr;
+// TlbRange tr;
int x;
cacheValid = false;
- tr.va = va;
+ /* tr.va = va;
tr.size = PTE.size() - 1;
tr.contextId = context_id;
tr.partitionId = partition_id;
tr.real = real;
-
+*/
DPRINTF(TLB, "TLB: Inserting TLB Entry; va=%#x pa=%#x pid=%d cid=%d r=%d entryid=%d\n",
va, PTE.paddr(), partition_id, context_id, (int)real, entry);
// Demap any entry that conflicts
+ for (x = 0; x < size; x++) {
+ if (tlb[x].range.real == real &&
+ tlb[x].range.partitionId == partition_id &&
+ tlb[x].range.va < va + PTE.size() - 1 &&
+ tlb[x].range.va + tlb[x].range.size >= va &&
+ (real || tlb[x].range.contextId == context_id ))
+ {
+ if (tlb[x].valid) {
+ freeList.push_front(&tlb[x]);
+ DPRINTF(TLB, "TLB: Conflicting entry %#X , deleting it\n", x);
+
+ tlb[x].valid = false;
+ if (tlb[x].used) {
+ tlb[x].used = false;
+ usedEntries--;
+ }
+ lookupTable.erase(tlb[x].range);
+ }
+ }
+ }
+
+
+/*
i = lookupTable.find(tr);
if (i != lookupTable.end()) {
i->second->valid = false;
i->second);
lookupTable.erase(i);
}
-
+*/
if (entry != -1) {
assert(entry < size && entry >= 0);
} while (tlb[x].pte.locked());
lastReplaced = x;
new_entry = &tlb[x];
- lookupTable.erase(new_entry->range);
}
/*
for (x = 0; x < size; x++) {
// Update the last ently if their all locked
if (!new_entry) {
new_entry = &tlb[size-1];
- lookupTable.erase(new_entry->range);
}
freeList.remove(new_entry);
+ if (new_entry->valid && new_entry->used)
+ usedEntries--;
+
+ lookupTable.erase(new_entry->range);
+
+
DPRINTF(TLB, "Using entry: %#X\n", new_entry);
assert(PTE.valid());
cacheValid = false;
freeList.clear();
+ lookupTable.clear();
for (x = 0; x < size; x++) {
if (tlb[x].valid == true)
freeList.push_back(&tlb[x]);
tlb[x].valid = false;
+ tlb[x].used = false;
}
usedEntries = 0;
}