class Checkpoint;
-namespace SparcISA
-{
+namespace SparcISA {
+
struct VAddr
{
VAddr(Addr a) { panic("not implemented yet."); }
public:
TteTag() : entry(0), populated(false) {}
TteTag(uint64_t e) : entry(e), populated(true) {}
- const TteTag &operator=(uint64_t e) { populated = true;
- entry = e; return *this; }
+
+ const TteTag &
+ operator=(uint64_t e)
+ {
+ populated = true;
+ entry = e;
+ return *this;
+ }
+
bool valid() const {assert(populated); return !bits(entry,62,62); }
Addr va() const {assert(populated); return bits(entry,41,0); }
};
uint64_t entry4u;
bool populated;
-
public:
- PageTableEntry() : entry(0), type(invalid), populated(false) {}
+ PageTableEntry()
+ : entry(0), type(invalid), populated(false)
+ {}
PageTableEntry(uint64_t e, EntryType t = sun4u)
: entry(e), type(t), populated(true)
-
{
populate(entry, type);
}
}
}
- void clear()
+ void
+ clear()
{
populated = false;
}
static int pageSizes[6];
-
uint64_t operator()() const { assert(populated); return entry4u; }
- const PageTableEntry &operator=(uint64_t e) { populated = true;
- entry4u = e; return *this; }
-
- const PageTableEntry &operator=(const PageTableEntry &e)
- { populated = true; entry4u = e.entry4u; type = e.type; return *this; }
-
- bool valid() const { return bits(entry4u,63,63) && populated; }
- uint8_t _size() const { assert(populated);
- return bits(entry4u, 62,61) |
- bits(entry4u, 48,48) << 2; }
- Addr size() const { assert(_size() < 6); return pageSizes[_size()]; }
- Addr sizeMask() const { assert(_size() < 6); return pageSizes[_size()]-1;}
- bool ie() const { return bits(entry4u, 59,59); }
- Addr pfn() const { assert(populated); return bits(entry4u,39,13); }
- Addr paddr() const { assert(populated); return mbits(entry4u, 39,13);}
- bool locked() const { assert(populated); return bits(entry4u,6,6); }
- bool cv() const { assert(populated); return bits(entry4u,4,4); }
- bool cp() const { assert(populated); return bits(entry4u,5,5); }
- bool priv() const { assert(populated); return bits(entry4u,2,2); }
- bool writable() const { assert(populated); return bits(entry4u,1,1); }
- bool nofault() const { assert(populated); return bits(entry4u,60,60); }
- bool sideffect() const { assert(populated); return bits(entry4u,3,3); }
- Addr paddrMask() const { assert(populated);
- return mbits(entry4u, 39,13) & ~sizeMask(); }
+ const PageTableEntry &
+ operator=(uint64_t e)
+ {
+ populated = true;
+ entry4u = e;
+ return *this;
+ }
+
+ const PageTableEntry &
+ operator=(const PageTableEntry &e)
+ {
+ populated = true;
+ entry4u = e.entry4u;
+ type = e.type;
+ return *this;
+ }
+
+ bool valid() const { return bits(entry4u,63,63) && populated; }
+
+ uint8_t
+ _size() const
+ {
+ assert(populated);
+ return bits(entry4u, 62,61) | bits(entry4u, 48,48) << 2;
+ }
+
+ Addr size() const { assert(_size() < 6); return pageSizes[_size()]; }
+ Addr sizeMask() const { return size() - 1; }
+ bool ie() const { return bits(entry4u, 59,59); }
+ Addr pfn() const { assert(populated); return bits(entry4u,39,13); }
+ Addr paddr() const { assert(populated); return mbits(entry4u, 39,13);}
+ bool locked() const { assert(populated); return bits(entry4u,6,6); }
+ bool cv() const { assert(populated); return bits(entry4u,4,4); }
+ bool cp() const { assert(populated); return bits(entry4u,5,5); }
+ bool priv() const { assert(populated); return bits(entry4u,2,2); }
+ bool writable() const { assert(populated); return bits(entry4u,1,1); }
+ bool nofault() const { assert(populated); return bits(entry4u,60,60); }
+ bool sideffect() const { assert(populated); return bits(entry4u,3,3); }
+ Addr paddrMask() const { assert(populated); return paddr() & ~sizeMask(); }
+
+ Addr
+ translate(Addr vaddr) const
+ {
+ assert(populated);
+ Addr mask = sizeMask();
+ return (paddr() & ~mask) | (vaddr & mask);
+ }
};
-struct TlbRange {
+struct TlbRange
+{
Addr va;
Addr size;
int contextId;
int partitionId;
bool real;
- inline bool operator<(const TlbRange &r2) const
+ inline bool
+ operator<(const TlbRange &r2) const
{
if (real && !r2.real)
return true;
return true;
return false;
}
- inline bool operator==(const TlbRange &r2) const
+
+ inline bool
+ operator==(const TlbRange &r2) const
{
return va == r2.va &&
size == r2.size &&
};
-struct TlbEntry {
+struct TlbEntry
+{
+ TlbEntry()
+ {}
+
TlbEntry(Addr asn, Addr vaddr, Addr paddr)
{
uint64_t entry = 0;
valid = true;
}
- TlbEntry()
- {}
+
TlbRange range;
PageTableEntry pte;
bool used;
void serialize(std::ostream &os);
void unserialize(Checkpoint *cp, const std::string §ion);
-
};
-
-}; // namespace SparcISA
+} // namespace SparcISA
#endif // __ARCH_SPARC_PAGE_TABLE_HH__
{
// To make this work you'll have to change the hypervisor and OS
if (size > 64)
- fatal("SPARC T1 TLB registers don't support more than 64 TLB entries.");
+ fatal("SPARC T1 TLB registers don't support more than 64 TLB entries");
tlb = new TlbEntry[size];
std::memset(tlb, 0, sizeof(TlbEntry) * size);
TLB::insert(Addr va, int partition_id, int context_id, bool real,
const PageTableEntry& PTE, int entry)
{
-
-
MapIter i;
TlbEntry *new_entry = NULL;
// TlbRange tr;
tr.real = real;
*/
- DPRINTF(TLB, "TLB: Inserting TLB Entry; va=%#x pa=%#x pid=%d cid=%d r=%d entryid=%d\n",
- va, PTE.paddr(), partition_id, context_id, (int)real, entry);
+ DPRINTF(TLB,
+ "TLB: Inserting Entry; va=%#x pa=%#x pid=%d cid=%d r=%d entryid=%d\n",
+ va, PTE.paddr(), partition_id, context_id, (int)real, entry);
// Demap any entry that conflicts
for (x = 0; x < size; x++) {
}
}
-
/*
i = lookupTable.find(tr);
if (i != lookupTable.end()) {
new_entry->valid = true;
usedEntries++;
-
-
i = lookupTable.insert(new_entry->range, new_entry);
assert(i != lookupTable.end());
- // If all entries have there used bit set, clear it on them all, but the
- // one we just inserted
+ // If all entries have their used bit set, clear it on them all,
+ // but the one we just inserted
if (usedEntries == size) {
clearUsedBits();
new_entry->used = true;
usedEntries++;
}
-
}
TlbEntry*
-TLB::lookup(Addr va, int partition_id, bool real, int context_id, bool
- update_used)
+TLB::lookup(Addr va, int partition_id, bool real, int context_id,
+ bool update_used)
{
MapIter i;
TlbRange tr;
DPRINTF(TLB, "TLB: Valid entry found pa: %#x size: %#x\n", t->pte.paddr(),
t->pte.size());
- // Update the used bits only if this is a real access (not a fake one from
- // virttophys()
+ // Update the used bits only if this is a real access (not a fake
+ // one from virttophys()
if (!t->used && update_used) {
t->used = true;
usedEntries++;
void
TLB::demapContext(int partition_id, int context_id)
{
- int x;
DPRINTF(IPR, "TLB: Demapping Context pid=%#d cid=%d\n",
partition_id, context_id);
cacheValid = false;
- for (x = 0; x < size; x++) {
+ for (int x = 0; x < size; x++) {
if (tlb[x].range.contextId == context_id &&
tlb[x].range.partitionId == partition_id) {
if (tlb[x].valid == true) {
void
TLB::demapAll(int partition_id)
{
- int x;
DPRINTF(TLB, "TLB: Demapping All pid=%#d\n", partition_id);
cacheValid = false;
- for (x = 0; x < size; x++) {
+ for (int x = 0; x < size; x++) {
if (tlb[x].valid && !tlb[x].pte.locked() &&
tlb[x].range.partitionId == partition_id) {
freeList.push_front(&tlb[x]);
void
TLB::invalidateAll()
{
- int x;
cacheValid = false;
-
lookupTable.clear();
- for (x = 0; x < size; x++) {
+
+ for (int x = 0; x < size; x++) {
if (tlb[x].valid == true)
freeList.push_back(&tlb[x]);
tlb[x].valid = false;
}
uint64_t
-TLB::TteRead(int entry) {
+TLB::TteRead(int entry)
+{
if (entry >= size)
panic("entry: %d\n", entry);
}
uint64_t
-TLB::TagRead(int entry) {
+TLB::TagRead(int entry)
+{
assert(entry < size);
uint64_t tag;
if (!tlb[entry].valid)
if (cacheEntry) {
if (cacheEntry->range.va < vaddr + sizeof(MachInst) &&
cacheEntry->range.va + cacheEntry->range.size >= vaddr) {
- req->setPaddr(cacheEntry->pte.paddr() & ~(cacheEntry->pte.size()-1) |
- vaddr & cacheEntry->pte.size()-1 );
- return NoFault;
+ req->setPaddr(cacheEntry->pte.translate(vaddr));
+ return NoFault;
}
} else {
req->setPaddr(vaddr & PAddrImplMask);
cacheState = tlbdata;
cacheEntry = e;
- req->setPaddr(e->pte.paddr() & ~(e->pte.size()-1) |
- vaddr & e->pte.size()-1 );
+ req->setPaddr(e->pte.translate(vaddr));
DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
return NoFault;
}
-
-
Fault
DTB::translate(RequestPtr &req, ThreadContext *tc, bool write)
{
- /* @todo this could really use some profiling and fixing to make it faster! */
+ /*
+ * @todo this could really use some profiling and fixing to make
+ * it faster!
+ */
uint64_t tlbdata = tc->readMiscRegNoEffect(MISCREG_TLB_DATA);
Addr vaddr = req->getVaddr();
Addr size = req->getSize();
if (cacheAsi[0] == asi &&
ce_va < vaddr + size && ce_va + ce->range.size > vaddr &&
(!write || ce->pte.writable())) {
- req->setPaddr(ce->pte.paddrMask() | vaddr & ce->pte.sizeMask());
- if (ce->pte.sideffect() || (ce->pte.paddr() >> 39) & 1)
- req->setFlags(req->getFlags() | UNCACHEABLE);
- DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
- return NoFault;
+ req->setPaddr(ce->pte.translate(vaddr));
+ if (ce->pte.sideffect() || (ce->pte.paddr() >> 39) & 1)
+ req->setFlags(req->getFlags() | UNCACHEABLE);
+ DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
+ return NoFault;
} // if matched
} // if cache entry valid
if (cacheEntry[1]) {
if (cacheAsi[1] == asi &&
ce_va < vaddr + size && ce_va + ce->range.size > vaddr &&
(!write || ce->pte.writable())) {
- req->setPaddr(ce->pte.paddrMask() | vaddr & ce->pte.sizeMask());
- if (ce->pte.sideffect() || (ce->pte.paddr() >> 39) & 1)
- req->setFlags(req->getFlags() | UNCACHEABLE);
- DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
- return NoFault;
+ req->setPaddr(ce->pte.translate(vaddr));
+ if (ce->pte.sideffect() || (ce->pte.paddr() >> 39) & 1)
+ req->setFlags(req->getFlags() | UNCACHEABLE);
+ DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
+ return NoFault;
} // if matched
} // if cache entry valid
}
TlbEntry *e;
DPRINTF(TLB, "TLB: priv:%d hpriv:%d red:%d lsudm:%d part_id: %#X\n",
- priv, hpriv, red, lsu_dm, part_id);
+ priv, hpriv, red, lsu_dm, part_id);
if (implicit) {
if (tl > 0) {
return new DataAccessException;
}
-
if ((!lsu_dm && !hpriv && !red) || AsiIsReal(asi)) {
real = true;
context = 0;
- };
+ }
if (hpriv && (implicit || (!AsiIsAsIfUser(asi) && !AsiIsReal(asi)))) {
req->setPaddr(vaddr & PAddrImplMask);
return new DataAccessException;
}
-
if (e->pte.sideffect() || (e->pte.paddr() >> 39) & 1)
req->setFlags(req->getFlags() | UNCACHEABLE);
cacheAsi[0] = (ASI)0;
}
cacheValid = true;
- req->setPaddr(e->pte.paddr() & ~(e->pte.size()-1) |
- vaddr & e->pte.size()-1);
+ req->setPaddr(e->pte.translate(vaddr));
DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
return NoFault;
DPRINTF(IPR, "Memory Mapped IPR Read: asi=%#X a=%#x\n",
(uint32_t)pkt->req->getAsi(), pkt->getAddr());
- ITB * itb = tc->getITBPtr();
+ ITB *itb = tc->getITBPtr();
switch (asi) {
case ASI_LSU_CONTROL_REG:
DPRINTF(IPR, "Memory Mapped IPR Write: asi=%#X a=%#x d=%#X\n",
(uint32_t)asi, va, data);
- ITB * itb = tc->getITBPtr();
+ ITB *itb = tc->getITBPtr();
switch (asi) {
case ASI_LSU_CONTROL_REG:
real_insert = bits(va, 9,9);
pte.populate(data, bits(va,10,10) ? PageTableEntry::sun4v :
PageTableEntry::sun4u);
- insert(va_insert, part_insert, ct_insert, real_insert, pte, entry_insert);
+ insert(va_insert, part_insert, ct_insert, real_insert, pte,
+ entry_insert);
break;
case ASI_IMMU_DEMAP:
ignore = false;
tc->getSystemPtr()->threadContexts[bits(data,12,8)]->getCpuPtr()->
post_interrupt(bits(data,5,0),0);
break;
- default:
+ default:
doMmuWriteError:
panic("need to impl DTB::doMmuRegWrite() got asi=%#x, va=%#x d=%#x\n",
(uint32_t)pkt->req->getAsi(), pkt->getAddr(), data);
itb->cx_config);
}
-
-
-
-
uint64_t
DTB::MakeTsbPtr(TsbPageSize ps, uint64_t tag_access, uint64_t c0_tsb,
uint64_t c0_config, uint64_t cX_tsb, uint64_t cX_config)
return ptr;
}
-
void
TLB::serialize(std::ostream &os)
{