case AlphaISA::IPR_DTB_PTE:
{
- AlphaISA::PTE &pte = tc->getDTBPtr()->index(!tc->misspeculating());
-
- retval |= ((uint64_t)pte.ppn & ULL(0x7ffffff)) << 32;
- retval |= ((uint64_t)pte.xre & ULL(0xf)) << 8;
- retval |= ((uint64_t)pte.xwe & ULL(0xf)) << 12;
- retval |= ((uint64_t)pte.fonr & ULL(0x1)) << 1;
- retval |= ((uint64_t)pte.fonw & ULL(0x1))<< 2;
- retval |= ((uint64_t)pte.asma & ULL(0x1)) << 4;
- retval |= ((uint64_t)pte.asn & ULL(0x7f)) << 57;
+ AlphaISA::TlbEntry &entry
+ = tc->getDTBPtr()->index(!tc->misspeculating());
+
+ retval |= ((uint64_t)entry.ppn & ULL(0x7ffffff)) << 32;
+ retval |= ((uint64_t)entry.xre & ULL(0xf)) << 8;
+ retval |= ((uint64_t)entry.xwe & ULL(0xf)) << 12;
+ retval |= ((uint64_t)entry.fonr & ULL(0x1)) << 1;
+ retval |= ((uint64_t)entry.fonw & ULL(0x1))<< 2;
+ retval |= ((uint64_t)entry.asma & ULL(0x1)) << 4;
+ retval |= ((uint64_t)entry.asn & ULL(0x7f)) << 57;
}
break;
break;
case AlphaISA::IPR_DTB_TAG: {
- struct AlphaISA::PTE pte;
+ struct AlphaISA::TlbEntry entry;
// FIXME: granularity hints NYI...
if (EV5::DTB_PTE_GH(ipr[AlphaISA::IPR_DTB_PTE]) != 0)
ipr[idx] = val;
// construct PTE for new entry
- pte.ppn = EV5::DTB_PTE_PPN(ipr[AlphaISA::IPR_DTB_PTE]);
- pte.xre = EV5::DTB_PTE_XRE(ipr[AlphaISA::IPR_DTB_PTE]);
- pte.xwe = EV5::DTB_PTE_XWE(ipr[AlphaISA::IPR_DTB_PTE]);
- pte.fonr = EV5::DTB_PTE_FONR(ipr[AlphaISA::IPR_DTB_PTE]);
- pte.fonw = EV5::DTB_PTE_FONW(ipr[AlphaISA::IPR_DTB_PTE]);
- pte.asma = EV5::DTB_PTE_ASMA(ipr[AlphaISA::IPR_DTB_PTE]);
- pte.asn = EV5::DTB_ASN_ASN(ipr[AlphaISA::IPR_DTB_ASN]);
+ entry.ppn = EV5::DTB_PTE_PPN(ipr[AlphaISA::IPR_DTB_PTE]);
+ entry.xre = EV5::DTB_PTE_XRE(ipr[AlphaISA::IPR_DTB_PTE]);
+ entry.xwe = EV5::DTB_PTE_XWE(ipr[AlphaISA::IPR_DTB_PTE]);
+ entry.fonr = EV5::DTB_PTE_FONR(ipr[AlphaISA::IPR_DTB_PTE]);
+ entry.fonw = EV5::DTB_PTE_FONW(ipr[AlphaISA::IPR_DTB_PTE]);
+ entry.asma = EV5::DTB_PTE_ASMA(ipr[AlphaISA::IPR_DTB_PTE]);
+ entry.asn = EV5::DTB_ASN_ASN(ipr[AlphaISA::IPR_DTB_ASN]);
// insert new TAG/PTE value into data TLB
- tc->getDTBPtr()->insert(val, pte);
+ tc->getDTBPtr()->insert(val, entry);
}
break;
case AlphaISA::IPR_ITB_PTE: {
- struct AlphaISA::PTE pte;
+ struct AlphaISA::TlbEntry entry;
// FIXME: granularity hints NYI...
if (EV5::ITB_PTE_GH(val) != 0)
ipr[idx] = val;
// construct PTE for new entry
- pte.ppn = EV5::ITB_PTE_PPN(val);
- pte.xre = EV5::ITB_PTE_XRE(val);
- pte.xwe = 0;
- pte.fonr = EV5::ITB_PTE_FONR(val);
- pte.fonw = EV5::ITB_PTE_FONW(val);
- pte.asma = EV5::ITB_PTE_ASMA(val);
- pte.asn = EV5::ITB_ASN_ASN(ipr[AlphaISA::IPR_ITB_ASN]);
+ entry.ppn = EV5::ITB_PTE_PPN(val);
+ entry.xre = EV5::ITB_PTE_XRE(val);
+ entry.xwe = 0;
+ entry.fonr = EV5::ITB_PTE_FONR(val);
+ entry.fonw = EV5::ITB_PTE_FONW(val);
+ entry.asma = EV5::ITB_PTE_ASMA(val);
+ entry.asn = EV5::ITB_ASN_ASN(ipr[AlphaISA::IPR_ITB_ASN]);
// insert new TAG/PTE value into data TLB
- tc->getITBPtr()->insert(ipr[AlphaISA::IPR_ITB_TAG], pte);
+ tc->getITBPtr()->insert(ipr[AlphaISA::IPR_ITB_TAG], entry);
}
break;
VAddr vaddr(pc);
VAddr paddr(physaddr);
- PTE pte;
- pte.tag = vaddr.vpn();
- pte.ppn = paddr.vpn();
- pte.xre = 15; //This can be read in all modes.
- pte.xwe = 1; //This can be written only in kernel mode.
- pte.asn = p->M5_pid; //Address space number.
- pte.asma = false; //Only match on this ASN.
- pte.fonr = false; //Don't fault on read.
- pte.fonw = false; //Don't fault on write.
- pte.valid = true; //This entry is valid.
-
- tc->getITBPtr()->insert(vaddr.page(), pte);
+ TlbEntry entry;
+ entry.tag = vaddr.vpn();
+ entry.ppn = paddr.vpn();
+ entry.xre = 15; //This can be read in all modes.
+ entry.xwe = 1; //This can be written only in kernel mode.
+ entry.asn = p->M5_pid; //Address space number.
+ entry.asma = false; //Only match on this ASN.
+ entry.fonr = false; //Don't fault on read.
+ entry.fonw = false; //Don't fault on write.
+ entry.valid = true; //This entry is valid.
+
+ tc->getITBPtr()->insert(vaddr.page(), entry);
}
}
} else {
VAddr paddr(physaddr);
- PTE pte;
- pte.tag = vaddr.vpn();
- pte.ppn = paddr.vpn();
- pte.xre = 15; //This can be read in all modes.
- pte.xwe = 15; //This can be written in all modes.
- pte.asn = p->M5_pid; //Address space number.
- pte.asma = false; //Only match on this ASN.
- pte.fonr = false; //Don't fault on read.
- pte.fonw = false; //Don't fault on write.
- pte.valid = true; //This entry is valid.
-
- tc->getDTBPtr()->insert(vaddr.page(), pte);
+ TlbEntry entry;
+ entry.tag = vaddr.vpn();
+ entry.ppn = paddr.vpn();
+ entry.xre = 15; //This can be read in all modes.
+ entry.xwe = 15; //This can be written in all modes.
+ entry.asn = p->M5_pid; //Address space number.
+ entry.asma = false; //Only match on this ASN.
+ entry.fonr = false; //Don't fault on read.
+ entry.fonw = false; //Don't fault on write.
+ entry.valid = true; //This entry is valid.
+
+ tc->getDTBPtr()->insert(vaddr.page(), entry);
}
}
return new MachineCheckFault;
}
-static inline Fault genAlignmentFault()
-{
- return new AlignmentFault;
-}
-
class ResetFault : public AlphaFault
{
private:
/*
- * Copyright (c) 2006 The Regents of The University of Michigan
+ * Copyright (c) 2006-2007 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
namespace AlphaISA
{
void
- PTE::serialize(std::ostream &os)
+ TlbEntry::serialize(std::ostream &os)
{
SERIALIZE_SCALAR(tag);
SERIALIZE_SCALAR(ppn);
}
void
- PTE::unserialize(Checkpoint *cp, const std::string §ion)
+ TlbEntry::unserialize(Checkpoint *cp, const std::string §ion)
{
UNSERIALIZE_SCALAR(tag);
UNSERIALIZE_SCALAR(ppn);
Addr paddr() const { return _pfn() << PageShift; }
};
- // ITB/DTB page table entry
- struct PTE
+ // ITB/DTB table entry
+ struct TlbEntry
{
+ //Construct an entry that maps to physical address addr.
+ TlbEntry(Addr addr)
+ {
+ }
+
Addr tag; // virtual page number tag
Addr ppn; // physical page number
uint8_t xre; // read permissions - VMEM_PERM_* mask
TLB::TLB(const string &name, int s)
: SimObject(name), size(s), nlu(0)
{
- table = new PTE[size];
- memset(table, 0, sizeof(PTE[size]));
+ table = new TlbEntry[size];
+ memset(table, 0, sizeof(TlbEntry[size]));
flushCache();
}
}
// look up an entry in the TLB
-PTE *
+TlbEntry *
TLB::lookup(Addr vpn, uint8_t asn)
{
// assume not found...
- PTE *retval = NULL;
-
- if (PTECache[0]) {
- if (vpn == PTECache[0]->tag &&
- (PTECache[0]->asma || PTECache[0]->asn == asn))
- retval = PTECache[0];
- else if (PTECache[1]) {
- if (vpn == PTECache[1]->tag &&
- (PTECache[1]->asma || PTECache[1]->asn == asn))
- retval = PTECache[1];
- else if (PTECache[2] && vpn == PTECache[2]->tag &&
- (PTECache[2]->asma || PTECache[2]->asn == asn))
- retval = PTECache[2];
+ TlbEntry *retval = NULL;
+
+ if (EntryCache[0]) {
+ if (vpn == EntryCache[0]->tag &&
+ (EntryCache[0]->asma || EntryCache[0]->asn == asn))
+ retval = EntryCache[0];
+ else if (EntryCache[1]) {
+ if (vpn == EntryCache[1]->tag &&
+ (EntryCache[1]->asma || EntryCache[1]->asn == asn))
+ retval = EntryCache[1];
+ else if (EntryCache[2] && vpn == EntryCache[2]->tag &&
+ (EntryCache[2]->asma || EntryCache[2]->asn == asn))
+ retval = EntryCache[2];
}
}
if (i != lookupTable.end()) {
while (i->first == vpn) {
int index = i->second;
- PTE *pte = &table[index];
- assert(pte->valid);
- if (vpn == pte->tag && (pte->asma || pte->asn == asn)) {
- retval = updateCache(pte);
+ TlbEntry *entry = &table[index];
+ assert(entry->valid);
+ if (vpn == entry->tag && (entry->asma || entry->asn == asn)) {
+ retval = updateCache(entry);
break;
}
// insert a new TLB entry
void
-TLB::insert(Addr addr, PTE &pte)
+TLB::insert(Addr addr, TlbEntry &entry)
{
flushCache();
VAddr vaddr = addr;
lookupTable.erase(i);
}
- DPRINTF(TLB, "insert @%d: %#x -> %#x\n", nlu, vaddr.vpn(), pte.ppn);
+ DPRINTF(TLB, "insert @%d: %#x -> %#x\n", nlu, vaddr.vpn(), entry.ppn);
- table[nlu] = pte;
+ table[nlu] = entry;
table[nlu].tag = vaddr.vpn();
table[nlu].valid = true;
TLB::flushAll()
{
DPRINTF(TLB, "flushAll\n");
- memset(table, 0, sizeof(PTE[size]));
+ memset(table, 0, sizeof(TlbEntry[size]));
flushCache();
lookupTable.clear();
nlu = 0;
PageTable::iterator end = lookupTable.end();
while (i != end) {
int index = i->second;
- PTE *pte = &table[index];
- assert(pte->valid);
+ TlbEntry *entry = &table[index];
+ assert(entry->valid);
// we can't increment i after we erase it, so save a copy and
// increment it to get the next entry now
PageTable::iterator cur = i;
++i;
- if (!pte->asma) {
- DPRINTF(TLB, "flush @%d: %#x -> %#x\n", index, pte->tag, pte->ppn);
- pte->valid = false;
+ if (!entry->asma) {
+ DPRINTF(TLB, "flush @%d: %#x -> %#x\n", index, entry->tag, entry->ppn);
+ entry->valid = false;
lookupTable.erase(cur);
}
}
while (i != lookupTable.end() && i->first == vaddr.vpn()) {
int index = i->second;
- PTE *pte = &table[index];
- assert(pte->valid);
+ TlbEntry *entry = &table[index];
+ assert(entry->valid);
- if (vaddr.vpn() == pte->tag && (pte->asma || pte->asn == asn)) {
+ if (vaddr.vpn() == entry->tag && (entry->asma || entry->asn == asn)) {
DPRINTF(TLB, "flushaddr @%d: %#x -> %#x\n", index, vaddr.vpn(),
- pte->ppn);
+ entry->ppn);
// invalidate this entry
- pte->valid = false;
+ entry->valid = false;
lookupTable.erase(i++);
} else {
SERIALIZE_SCALAR(nlu);
for (int i = 0; i < size; i++) {
- nameOut(os, csprintf("%s.PTE%d", name(), i));
+ nameOut(os, csprintf("%s.Entry%d", name(), i));
table[i].serialize(os);
}
}
UNSERIALIZE_SCALAR(nlu);
for (int i = 0; i < size; i++) {
- table[i].unserialize(cp, csprintf("%s.PTE%d", section, i));
+ table[i].unserialize(cp, csprintf("%s.Entry%d", section, i));
if (table[i].valid) {
lookupTable.insert(make_pair(table[i].tag, i));
}
} else {
// not a physical address: need to look up pte
int asn = DTB_ASN_ASN(tc->readMiscRegNoEffect(IPR_DTB_ASN));
- PTE *pte = lookup(VAddr(req->getVaddr()).vpn(),
+ TlbEntry *entry = lookup(VAddr(req->getVaddr()).vpn(),
asn);
- if (!pte) {
+ if (!entry) {
misses++;
return new ItbPageFault(req->getVaddr());
}
- req->setPaddr((pte->ppn << PageShift) +
+ req->setPaddr((entry->ppn << PageShift) +
(VAddr(req->getVaddr()).offset()
& ~3));
// check permissions for this access
- if (!(pte->xre &
+ if (!(entry->xre &
(1 << ICM_CM(tc->readMiscRegNoEffect(IPR_ICM))))) {
// instruction access fault
acv++;
int asn = DTB_ASN_ASN(tc->readMiscRegNoEffect(IPR_DTB_ASN));
// not a physical address: need to look up pte
- PTE *pte = lookup(VAddr(req->getVaddr()).vpn(),
- asn);
+ TlbEntry *entry = lookup(VAddr(req->getVaddr()).vpn(), asn);
- if (!pte) {
+ if (!entry) {
// page fault
if (write) { write_misses++; } else { read_misses++; }
uint64_t flags = (write ? MM_STAT_WR_MASK : 0) |
flags));
}
- req->setPaddr((pte->ppn << PageShift) +
+ req->setPaddr((entry->ppn << PageShift) +
VAddr(req->getVaddr()).offset());
if (write) {
- if (!(pte->xwe & MODE2MASK(mode))) {
+ if (!(entry->xwe & MODE2MASK(mode))) {
// declare the instruction access fault
write_acv++;
uint64_t flags = MM_STAT_WR_MASK |
MM_STAT_ACV_MASK |
- (pte->fonw ? MM_STAT_FONW_MASK : 0);
+ (entry->fonw ? MM_STAT_FONW_MASK : 0);
return new DtbPageFault(req->getVaddr(), req->getFlags(), flags);
}
- if (pte->fonw) {
+ if (entry->fonw) {
write_acv++;
uint64_t flags = MM_STAT_WR_MASK |
MM_STAT_FONW_MASK;
return new DtbPageFault(req->getVaddr(), req->getFlags(), flags);
}
} else {
- if (!(pte->xre & MODE2MASK(mode))) {
+ if (!(entry->xre & MODE2MASK(mode))) {
read_acv++;
uint64_t flags = MM_STAT_ACV_MASK |
- (pte->fonr ? MM_STAT_FONR_MASK : 0);
+ (entry->fonr ? MM_STAT_FONR_MASK : 0);
return new DtbAcvFault(req->getVaddr(), req->getFlags(), flags);
}
- if (pte->fonr) {
+ if (entry->fonr) {
read_acv++;
uint64_t flags = MM_STAT_FONR_MASK;
return new DtbPageFault(req->getVaddr(), req->getFlags(), flags);
return checkCacheability(req);
}
-PTE &
+TlbEntry &
TLB::index(bool advance)
{
- PTE *pte = &table[nlu];
+ TlbEntry *entry = &table[nlu];
if (advance)
nextnlu();
- return *pte;
+ return *entry;
}
/* end namespace AlphaISA */ }
namespace AlphaISA
{
- class PTE;
+ class TlbEntry;
class TLB : public SimObject
{
protected:
typedef std::multimap<Addr, int> PageTable;
- PageTable lookupTable; // Quick lookup into page table
+ PageTable lookupTable; // Quick lookup into page table
- PTE *table; // the Page Table
- int size; // TLB Size
- int nlu; // not last used entry (for replacement)
+ TlbEntry *table; // the Page Table
+ int size; // TLB Size
+ int nlu; // not last used entry (for replacement)
void nextnlu() { if (++nlu >= size) nlu = 0; }
- PTE *lookup(Addr vpn, uint8_t asn);
+ TlbEntry *lookup(Addr vpn, uint8_t asn);
public:
TLB(const std::string &name, int size);
int getsize() const { return size; }
- PTE &index(bool advance = true);
- void insert(Addr vaddr, PTE &pte);
+ TlbEntry &index(bool advance = true);
+ void insert(Addr vaddr, TlbEntry &entry);
void flushAll();
void flushProcesses();
virtual void unserialize(Checkpoint *cp, const std::string §ion);
// Most recently used page table entries
- PTE *PTECache[3];
- inline void flushCache() { memset(PTECache, 0, 3 * sizeof(PTE*)); }
- inline PTE* updateCache(PTE *pte) {
- PTECache[2] = PTECache[1];
- PTECache[1] = PTECache[0];
- PTECache[0] = pte;
- return pte;
+ TlbEntry *EntryCache[3];
+ inline void flushCache()
+ {
+ memset(EntryCache, 0, 3 * sizeof(TlbEntry*));
+ }
+
+ inline TlbEntry* updateCache(TlbEntry *entry) {
+ EntryCache[2] = EntryCache[1];
+ EntryCache[1] = EntryCache[0];
+ EntryCache[0] = entry;
+ return entry;
}
};
FaultVect UnimplementedOpcodeFault::_vect = 0x0481;
FaultStat UnimplementedOpcodeFault::_count;
-#if !FULL_SYSTEM
-//FaultName PageTableFault::_name = "page_table_fault";
-//FaultVect PageTableFault::_vect = 0x0000;
-//FaultStat PageTableFault::_count;
-#endif
-
FaultName InterruptFault::_name = "interrupt";
FaultVect InterruptFault::_vect = 0x0101;
FaultStat InterruptFault::_count;
FaultVect DspStateDisabledFault::_vect = 0x001a;
FaultStat DspStateDisabledFault::_count;
-
-/*void PageTableFault::invoke(ThreadContext *tc)
-{
- Process *p = tc->getProcessPtr();
-
- Addr page_addr = p->pTable->pageAlign(vaddr);
-
- warn("%i: [tid:%i]: %s encountered @ addr %x. Allocating new page for address range %x - %x.\n",
- curTick, tc->getThreadNum(), name(), vaddr, page_addr, page_addr+VMPageSize);
-
- p->pTable->allocate(page_addr, VMPageSize);
-
- return;
-}
-*/
- /* address is higher than the stack region or in the current stack region
- if (vaddr > p->stack_base || vaddr > p->stack_min)
- FaultBase::invoke(tc);
-
- // We've accessed the next page
- if (vaddr > p->stack_min - PageBytes) {
- p->stack_min -= PageBytes;
- if (p->stack_base - p->stack_min > 8*1024*1024) {
- warn("Already allocated Over max stack size for one thread\n");
- }
- warn("%i: Allocating page for range %x - %x",
- curTick, p->stack_min, p->stack_min-PageBytes);
-
- p->pTable->allocate(p->stack_min, PageBytes);
- warn("Increasing stack size by one page.");
- } else {
- FaultBase::invoke(tc);
- }*/
-
void ResetFault::invoke(ThreadContext *tc)
{
warn("[tid:%i]: %s encountered.\n", tc->getThreadNum(), name());
FaultStat & countStat() {return _count;}
};
-#if !FULL_SYSTEM
-//class PageTableFault : public MipsFault
-//{
-//private:
-// Addr vaddr;
-// static FaultName _name;
-// static FaultVect _vect;
-// static FaultStat _count;
-//public:
-// PageTableFault(Addr va)
-// : vaddr(va) {}
-// FaultName name() {return _name;}
-// FaultVect vect() {return _vect;}
-// FaultStat & countStat() {return _count;}
-// void invoke(ThreadContext * tc);
-//};
-
-static inline Fault genPageTableFault(Addr va)
-{
- return new PageTableFault(va);
-}
-#endif
-
-
static inline Fault genMachineCheckFault()
{
return new MachineCheckFault;
}
-static inline Fault genAlignmentFault()
-{
- return new AlignmentFault;
-}
-
class ResetFault : public MipsFault
{
private:
#ifndef __ARCH_MIPS_TYPES_HH__
#define __ARCH_MIPS_TYPES_HH__
+#include "mem/types.hh"
#include "sim/host.hh"
namespace MipsISA
RND_DOWN,
RND_UP,
RND_NEAREST
- };
+ };
+
+ typedef ::PageTable<> PageTable;
} // namespace MipsISA
return new InternalProcessorError;
}
-static inline Fault genAlignmentFault()
-{
- return new MemAddressNotAligned;
-}
-
} // SparcISA namespace
#include <inttypes.h>
#include "base/bigint.hh"
+#include "mem/page_table.hh"
namespace SparcISA
{
typedef int RegContextVal;
typedef uint16_t RegIndex;
+
+ typedef ::PageTable<> PageTable;
}
#endif
}
};
- static inline Fault genPageTableFault(Addr va)
- {
- panic("Page table fault not implemented in x86!\n");
- }
-
static inline Fault genMachineCheckFault()
{
panic("Machine check fault not implemented in x86!\n");
}
-
- static inline Fault genAlignmentFault()
- {
- panic("Alignment fault not implemented (or for the most part existant) in x86!\n");
- }
};
#endif // __ARCH_X86_FAULTS_HH__
#include "base/loader/object_file.hh"
#include "base/loader/elf_object.hh"
#include "base/misc.hh"
+#include "base/trace.hh"
#include "cpu/thread_context.hh"
#include "mem/page_table.hh"
#include "mem/translating_port.hh"
#include "arch/x86/tlb.hh"
#include "params/X86DTB.hh"
#include "params/X86ITB.hh"
+#include "sim/serialize.hh"
namespace X86ISA {
+ void
+ TlbEntry::serialize(std::ostream &os)
+ {
+ SERIALIZE_SCALAR(pageStart);
+ }
+
+ void
+ TlbEntry::unserialize(Checkpoint *cp, const std::string §ion)
+ {
+ UNSERIALIZE_SCALAR(pageStart);
+ }
};
X86ISA::ITB *
#ifndef __ARCH_X86_TLB_HH__
#define __ARCH_X86_TLB_HH__
+#include <iostream>
+#include <string>
+
+#include "sim/host.hh"
#include "sim/tlb.hh"
+class Checkpoint;
+
namespace X86ISA
{
- class ITB : public GenericITB
+ struct TlbEntry
+ {
+ Addr pageStart;
+ TlbEntry() {}
+ TlbEntry(Addr paddr) : pageStart(paddr) {}
+
+ void serialize(std::ostream &os);
+ void unserialize(Checkpoint *cp, const std::string §ion);
+ };
+
+ class ITB : public GenericITB<false, false>
{
public:
- ITB(const std::string &name) : GenericITB(name)
+ ITB(const std::string &name) : GenericITB<false, false>(name)
{}
};
- class DTB : public GenericDTB
+ class DTB : public GenericDTB<false, false>
{
public:
- DTB(const std::string &name) : GenericDTB(name)
+ DTB(const std::string &name) : GenericDTB<false, false>(name)
{}
};
};
{
}
-Fault
-PageTable::page_check(Addr addr, int64_t size) const
-{
- if (size < sizeof(uint64_t)) {
- if (!isPowerOf2(size)) {
- panic("Invalid request size!\n");
- return genMachineCheckFault();
- }
-
- if ((size - 1) & addr)
- return genAlignmentFault();
- }
- else {
- if ((addr & (VMPageSize - 1)) + size > VMPageSize) {
- panic("Invalid request size!\n");
- return genMachineCheckFault();
- }
-
- if ((sizeof(uint64_t) - 1) & addr)
- return genAlignmentFault();
- }
-
- return NoFault;
-}
-
-
void
PageTable::allocate(Addr vaddr, int64_t size)
{
DPRINTF(MMU, "Allocating Page: %#x-%#x\n", vaddr, vaddr+ size);
for (; size > 0; size -= pageSize, vaddr += pageSize) {
- m5::hash_map<Addr,Addr>::iterator iter = pTable.find(vaddr);
+ PTableItr iter = pTable.find(vaddr);
if (iter != pTable.end()) {
// already mapped
- fatal("PageTable::allocate: address 0x%x already mapped", vaddr);
+ fatal("PageTable::allocate: address 0x%x already mapped",
+ vaddr);
}
- pTable[vaddr] = system->new_page();
+ pTable[vaddr] = TheISA::TlbEntry(system->new_page());
updateCache(vaddr, pTable[vaddr]);
}
}
-
-
bool
-PageTable::translate(Addr vaddr, Addr &paddr)
+PageTable::lookup(Addr vaddr, TheISA::TlbEntry &entry)
{
Addr page_addr = pageAlign(vaddr);
- paddr = 0;
if (pTableCache[0].vaddr == page_addr) {
- paddr = pTableCache[0].paddr + pageOffset(vaddr);
+ entry = pTableCache[0].entry;
return true;
}
if (pTableCache[1].vaddr == page_addr) {
- paddr = pTableCache[1].paddr + pageOffset(vaddr);
+ entry = pTableCache[1].entry;
return true;
}
if (pTableCache[2].vaddr == page_addr) {
- paddr = pTableCache[2].paddr + pageOffset(vaddr);
+ entry = pTableCache[2].entry;
return true;
}
- m5::hash_map<Addr,Addr>::iterator iter = pTable.find(page_addr);
+ PTableItr iter = pTable.find(page_addr);
if (iter == pTable.end()) {
return false;
}
updateCache(page_addr, iter->second);
- paddr = iter->second + pageOffset(vaddr);
+ entry = iter->second;
return true;
}
+bool
+PageTable::translate(Addr vaddr, Addr &paddr)
+{
+ TheISA::TlbEntry entry;
+ if (!lookup(vaddr, entry))
+ return false;
+ paddr = pageOffset(vaddr) + entry.pageStart;
+ return true;
+}
Fault
-PageTable::translate(RequestPtr &req)
+PageTable::translate(RequestPtr req)
{
Addr paddr;
assert(pageAlign(req->getVaddr() + req->getSize() - 1)
== pageAlign(req->getVaddr()));
if (!translate(req->getVaddr(), paddr)) {
- return Fault(new PageTableFault(req->getVaddr()));
+ return Fault(new GenericPageTableFault(req->getVaddr()));
}
req->setPaddr(paddr);
- return page_check(req->getPaddr(), req->getSize());
+ if ((paddr & (pageSize - 1)) + req->getSize() > pageSize) {
+ panic("Request spans page boundaries!\n");
+ return NoFault;
+ }
+ return NoFault;
}
void
int count = 0;
- m5::hash_map<Addr,Addr>::iterator iter = pTable.begin();
- m5::hash_map<Addr,Addr>::iterator end = pTable.end();
+ PTableItr iter = pTable.begin();
+ PTableItr end = pTable.end();
while (iter != end) {
paramOut(os, csprintf("ptable.entry%dvaddr", count), iter->first);
- paramOut(os, csprintf("ptable.entry%dpaddr", count), iter->second);
+ iter->second.serialize(os);
++iter;
++count;
{
int i = 0, count;
paramIn(cp, section, "ptable.size", count);
- Addr vaddr, paddr;
+ Addr vaddr;
+ TheISA::TlbEntry entry;
pTable.clear();
while(i < count) {
paramIn(cp, section, csprintf("ptable.entry%dvaddr", i), vaddr);
- paramIn(cp, section, csprintf("ptable.entry%dpaddr", i), paddr);
- pTable[vaddr] = paddr;
+ entry.unserialize(cp, section);
+ pTable[vaddr] = entry;
++i;
}
-
}
#include "sim/faults.hh"
#include "arch/isa_traits.hh"
+#include "arch/tlb.hh"
#include "base/hashmap.hh"
-#include "base/trace.hh"
#include "mem/request.hh"
-#include "mem/packet.hh"
-#include "sim/sim_object.hh"
+#include "sim/host.hh"
+#include "sim/serialize.hh"
class System;
class PageTable
{
protected:
- m5::hash_map<Addr,Addr> pTable;
+ typedef m5::hash_map<Addr, TheISA::TlbEntry> PTable;
+ typedef PTable::iterator PTableItr;
+ PTable pTable;
struct cacheElement {
- Addr paddr;
Addr vaddr;
- } ;
+ TheISA::TlbEntry entry;
+ };
struct cacheElement pTableCache[3];
Addr pageAlign(Addr a) { return (a & ~offsetMask); }
Addr pageOffset(Addr a) { return (a & offsetMask); }
- Fault page_check(Addr addr, int64_t size) const;
-
void allocate(Addr vaddr, int64_t size);
+ /**
+ * Lookup function
+ * @param vaddr The virtual address.
+ * @return entry The page table entry corresponding to vaddr.
+ */
+ bool lookup(Addr vaddr, TheISA::TlbEntry &entry);
+
/**
* Translate function
* @param vaddr The virtual address.
/**
* Perform a translation on the memory request, fills in paddr
- * field of mem_req.
+ * field of req.
* @param req The memory request.
*/
- Fault translate(RequestPtr &req);
+ Fault translate(RequestPtr req);
/**
* Update the page table cache.
* @param vaddr virtual address (page aligned) to check
- * @param paddr physical address (page aligned) to return
+ * @param pte page table entry to return
*/
- inline void updateCache(Addr vaddr, Addr paddr)
+ inline void updateCache(Addr vaddr, TheISA::TlbEntry entry)
{
- pTableCache[2].paddr = pTableCache[1].paddr;
+ pTableCache[2].entry = pTableCache[1].entry;
pTableCache[2].vaddr = pTableCache[1].vaddr;
- pTableCache[1].paddr = pTableCache[0].paddr;
+ pTableCache[1].entry = pTableCache[0].entry;
pTableCache[1].vaddr = pTableCache[0].vaddr;
- pTableCache[0].paddr = paddr;
+ pTableCache[0].entry = entry;
pTableCache[0].vaddr = vaddr;
}
void serialize(std::ostream &os);
+
void unserialize(Checkpoint *cp, const std::string §ion);
};
{
panic("Unimpfault: %s\n", panicStr.c_str());
}
+
#if !FULL_SYSTEM
-void PageTableFault::invoke(ThreadContext *tc)
+void GenericPageTableFault::invoke(ThreadContext *tc)
{
Process *p = tc->getProcessPtr();
panic("Page table fault when accessing virtual address %#x\n", vaddr);
}
+
+void GenericAlignmentFault::invoke(ThreadContext *tc)
+{
+ panic("Alignment fault when accessing virtual address %#x\n", vaddr);
+}
#endif
};
#if !FULL_SYSTEM
-class PageTableFault : public FaultBase
+class GenericPageTableFault : public FaultBase
{
private:
Addr vaddr;
public:
- FaultName name() const {return "M5 page table fault";}
- PageTableFault(Addr va) : vaddr(va) {}
+ FaultName name() const {return "Generic page table fault";}
+ GenericPageTableFault(Addr va) : vaddr(va) {}
+ void invoke(ThreadContext * tc);
+};
+
+class GenericAlignmentFault : public FaultBase
+{
+ private:
+ Addr vaddr;
+ public:
+ FaultName name() const {return "Generic alignment fault";}
+ GenericAlignmentFault(Addr va) : vaddr(va) {}
void invoke(ThreadContext * tc);
};
#endif
#include "sim/tlb.hh"
Fault
-GenericITB::translate(RequestPtr &req, ThreadContext *tc)
+GenericTLBBase::translate(RequestPtr req, ThreadContext * tc)
{
#if FULL_SYSTEM
- panic("Generic ITB translation shouldn't be used in full system mode.\n");
+ panic("Generic translation shouldn't be used in full system mode.\n");
#else
- return tc->getProcessPtr()->pTable->translate(req);
-#endif
-}
+ Process * p = tc->getProcessPtr();
-Fault
-GenericDTB::translate(RequestPtr &req, ThreadContext *tc, bool write)
-{
-#if FULL_SYSTEM
- panic("Generic DTB translation shouldn't be used in full system mode.\n");
-#else
- return tc->getProcessPtr()->pTable->translate(req);
+ Fault fault = p->pTable->translate(req);
+ if(fault != NoFault)
+ return fault;
+
+ return NoFault;
#endif
-};
+}
#ifndef __SIM_TLB_HH__
#define __SIM_TLB_HH__
+#include "base/misc.hh"
#include "mem/request.hh"
-#include "sim/sim_object.hh"
#include "sim/faults.hh"
+#include "sim/sim_object.hh"
class ThreadContext;
class Packet;
-class GenericTLB : public SimObject
+class GenericTLBBase : public SimObject
{
- public:
- GenericTLB(const std::string &name) : SimObject(name)
+ protected:
+ GenericTLBBase(const std::string &name) : SimObject(name)
{}
+
+ Fault translate(RequestPtr req, ThreadContext *tc);
};
-class GenericITB : public GenericTLB
+template <bool doSizeCheck=true, bool doAlignmentCheck=true>
+class GenericTLB : public GenericTLBBase
{
public:
- GenericITB(const std::string &name) : GenericTLB(name)
+ GenericTLB(const std::string &name) : GenericTLBBase(name)
{}
- Fault translate(RequestPtr &req, ThreadContext *tc);
+ Fault translate(RequestPtr req, ThreadContext *tc, bool=false)
+ {
+ Fault fault = GenericTLBBase::translate(req, tc);
+ if (fault != NoFault)
+ return fault;
+
+ typeof(req->getSize()) size = req->getSize();
+ Addr paddr = req->getPaddr();
+
+ if(doSizeCheck && !isPowerOf2(size))
+ panic("Invalid request size!\n");
+ if (doAlignmentCheck && ((size - 1) & paddr))
+ return Fault(new GenericAlignmentFault(paddr));
+
+ return NoFault;
+ }
};
-class GenericDTB : public GenericTLB
+template <bool doSizeCheck=true, bool doAlignmentCheck=true>
+class GenericITB : public GenericTLB<doSizeCheck, doAlignmentCheck>
{
public:
- GenericDTB(const std::string &name) : GenericTLB(name)
+ GenericITB(const std::string &name) :
+ GenericTLB<doSizeCheck, doAlignmentCheck>(name)
{}
+};
- Fault translate(RequestPtr &req, ThreadContext *tc, bool write);
+template <bool doSizeCheck=true, bool doAlignmentCheck=true>
+class GenericDTB : public GenericTLB<doSizeCheck, doAlignmentCheck>
+{
+ public:
+ GenericDTB(const std::string &name) :
+ GenericTLB<doSizeCheck, doAlignmentCheck>(name)
+ {}
};
#endif // __ARCH_SPARC_TLB_HH__