//Construct an entry that maps to physical address addr.
- TlbEntry(Addr _asn, Addr _vaddr, Addr _paddr)
+ TlbEntry(Addr _asn, Addr _vaddr, Addr _paddr,
+ bool uncacheable, bool read_only)
{
VAddr vaddr(_vaddr);
VAddr paddr(_paddr);
fonr = false;
fonw = false;
valid = true;
+ if (uncacheable || read_only)
+ warn("Alpha TlbEntry does not support uncacheable"
+ " or read-only mappings\n");
}
TlbEntry()
bool pxn; // Privileged Execute Never (LPAE only)
//Construct an entry that maps to physical address addr for SE mode
- TlbEntry(Addr _asn, Addr _vaddr, Addr _paddr) :
+ TlbEntry(Addr _asn, Addr _vaddr, Addr _paddr,
+ bool uncacheable, bool read_only) :
pfn(_paddr >> PageShift), size(PageBytes - 1), vpn(_vaddr >> PageShift),
attributes(0), lookupLevel(L1), asid(_asn), vmid(0), N(0),
- innerAttrs(0), outerAttrs(0), ap(0), hap(0x3),
+ innerAttrs(0), outerAttrs(0), ap(read_only ? 0x3 : 0), hap(0x3),
domain(DomainType::Client), mtype(MemoryType::StronglyOrdered),
longDescFormat(false), isHyp(false), global(false), valid(true),
- ns(true), nstid(true), el(0), nonCacheable(false), shareable(false),
- outerShareable(false), xn(0), pxn(0)
+ ns(true), nstid(true), el(0), nonCacheable(uncacheable),
+ shareable(false), outerShareable(false), xn(0), pxn(0)
{
// no restrictions by default, hap = 0x3
// @todo Check the memory type
+ if (read_only)
+ warn("ARM TlbEntry does not support read-only mappings\n");
}
TlbEntry() :
{
Addr _pageStart;
TlbEntry() {}
- TlbEntry(Addr asn, Addr vaddr, Addr paddr) : _pageStart(paddr) {}
+ TlbEntry(Addr asn, Addr vaddr, Addr paddr,
+ bool uncacheable, bool read_only)
+ : _pageStart(paddr)
+ {
+ if (uncacheable || read_only)
+ warn("MIPS TlbEntry does not support uncacheable"
+ " or read-only mappings\n");
+ }
Addr pageStart()
{
{
}
- TlbEntry(Addr asn, Addr vaddr, Addr paddr)
+ TlbEntry(Addr asn, Addr vaddr, Addr paddr,
+ bool uncacheable, bool read_only)
: _pageStart(paddr)
{
+ if (uncacheable || read_only)
+ warn("Power TlbEntry does not support uncacheable"
+ " or read-only mappings\n");
}
void
TlbEntry()
{}
- TlbEntry(Addr asn, Addr vaddr, Addr paddr)
+ TlbEntry(Addr asn, Addr vaddr, Addr paddr,
+ bool uncacheable, bool read_only)
{
uint64_t entry = 0;
- entry |= 1ULL << 1; // Writable
+ if (!read_only)
+ entry |= 1ULL << 1; // Writable
entry |= 0ULL << 2; // Available in nonpriveleged mode
entry |= 0ULL << 3; // No side effects
- entry |= 1ULL << 4; // Virtually cachable
- entry |= 1ULL << 5; // Physically cachable
+ if (!uncacheable) {
+ entry |= 1ULL << 4; // Virtually cachable
+ entry |= 1ULL << 5; // Physically cachable
+ }
entry |= 0ULL << 6; // Not locked
entry |= mbits(paddr, 39, 13); // Physical address
entry |= 0ULL << 48; // size = 8k
namespace X86ISA
{
-TlbEntry::TlbEntry(Addr asn, Addr _vaddr, Addr _paddr) :
- paddr(_paddr), vaddr(_vaddr), logBytes(PageShift), writable(true),
- user(true), uncacheable(false), global(false), patBit(0), noExec(false)
+TlbEntry::TlbEntry(Addr asn, Addr _vaddr, Addr _paddr,
+ bool uncacheable, bool read_only) :
+ paddr(_paddr), vaddr(_vaddr), logBytes(PageShift), writable(!read_only),
+ user(true), uncacheable(uncacheable), global(false), patBit(0),
+ noExec(false)
{}
void
TlbEntryTrie::Handle trieHandle;
- TlbEntry(Addr asn, Addr _vaddr, Addr _paddr);
+ TlbEntry(Addr asn, Addr _vaddr, Addr _paddr,
+ bool uncacheable, bool read_only);
TlbEntry() {}
void
*/
const std::vector<uint8_t> PageTableLayout = {9, 9, 9, 9};
+ /* x86 specific PTE flags */
enum PTEField{
- PTE_NotPresent = 0,
- PTE_Present,
- PTE_ReadOnly = 0,
- PTE_ReadWrite,
- PTE_Supervisor = 0,
- PTE_UserSupervisor,
+ PTE_NotPresent = 1,
+ PTE_Supervisor = 2,
+ PTE_ReadOnly = 4,
+ PTE_Uncacheable = 8,
};
/** Page table operations specific to x86 ISA.
class PageTableOps
{
public:
- void setPTEFields(PageTableEntry& PTE,
- uint64_t present = PTE_Present,
- uint64_t read_write = PTE_ReadWrite,
- uint64_t user_supervisor = PTE_UserSupervisor)
+ void setPTEFields(PageTableEntry& PTE, uint64_t flags = 0)
{
- PTE.p = present;
- PTE.w = read_write;
- PTE.u = user_supervisor;// both user and supervisor access allowed
+ PTE.p = flags & PTE_NotPresent ? 0 : 1;
+ PTE.pcd = flags & PTE_Uncacheable ? 1 : 0;
+ PTE.w = flags & PTE_ReadOnly ? 0 : 1;
+ PTE.u = flags & PTE_Supervisor ? 0 : 1;
}
/** returns the physical memory address of the page table */
return PTE.base;
}
+ bool isUncacheable(const PageTableEntry PTE)
+ {
+ return PTE.pcd;
+ }
+
+ bool isReadOnly(PageTableEntry PTE)
+ {
+ return !PTE.w;
+ }
+
/** sets the page number in a page table entry */
void setPnum(PageTableEntry& PTE, Addr paddr)
{
void initState(ThreadContext* tc);
- void map(Addr vaddr, Addr paddr, int64_t size, bool clobber = false);
+ void map(Addr vaddr, Addr paddr, int64_t size,
+ uint64_t flags = 0);
void remap(Addr vaddr, int64_t size, Addr new_vaddr);
void unmap(Addr vaddr, int64_t size);
bool isUnmapped(Addr vaddr, int64_t size);
template <class ISAOps>
void
MultiLevelPageTable<ISAOps>::map(Addr vaddr, Addr paddr,
- int64_t size, bool clobber)
+ int64_t size, uint64_t flags)
{
+ bool clobber = flags & Clobber;
// starting address must be page aligned
assert(pageOffset(vaddr) == 0);
fatal("addr 0x%x already mapped to %x", vaddr, entry_paddr);
}
pTableISAOps.setPnum(PTE, paddr >> PageShift);
- pTableISAOps.setPTEFields(PTE);
+ uint64_t PTE_flags = 0;
+ if (flags & NotPresent)
+ PTE_flags |= TheISA::PTE_NotPresent;
+ if (flags & Uncacheable)
+ PTE_flags |= TheISA::PTE_Uncacheable;
+ if (flags & ReadOnly)
+ PTE_flags |= TheISA::PTE_ReadOnly;
+ pTableISAOps.setPTEFields(PTE, PTE_flags);
p.write<PageTableEntry>(PTE_addr, PTE);
DPRINTF(MMU, "New mapping: %#x-%#x\n", vaddr, paddr);
eraseCacheEntry(vaddr);
- updateCache(vaddr, TlbEntry(pid, vaddr, paddr));
+ updateCache(vaddr, TlbEntry(pid, vaddr, paddr,
+ flags & Uncacheable,
+ flags & ReadOnly));
}
}
}
eraseCacheEntry(vaddr);
- updateCache(new_vaddr, TlbEntry(pid, new_vaddr, paddr));
+ updateCache(new_vaddr, TlbEntry(pid, new_vaddr, paddr,
+ pTableISAOps.isUncacheable(PTE),
+ pTableISAOps.isReadOnly(PTE)));
} else {
fatal("Page fault while remapping");
}
if (pnum == 0)
return false;
- entry = TlbEntry(pid, vaddr, pnum << PageShift);
+ entry = TlbEntry(pid, vaddr, pnum << PageShift,
+ pTableISAOps.isUncacheable(PTE),
+ pTableISAOps.isReadOnly(PTE));
updateCache(page_addr, entry);
} else {
return false;
}
void
-FuncPageTable::map(Addr vaddr, Addr paddr, int64_t size, bool clobber)
+FuncPageTable::map(Addr vaddr, Addr paddr, int64_t size, uint64_t flags)
{
+ bool clobber = flags & Clobber;
// starting address must be page aligned
assert(pageOffset(vaddr) == 0);
fatal("FuncPageTable::allocate: addr 0x%x already mapped", vaddr);
}
- pTable[vaddr] = TheISA::TlbEntry(pid, vaddr, paddr);
+ pTable[vaddr] = TheISA::TlbEntry(pid, vaddr, paddr,
+ flags & Uncacheable,
+ flags & ReadOnly);
eraseCacheEntry(vaddr);
updateCache(vaddr, pTable[vaddr]);
}
virtual ~PageTableBase() {};
+ /* generic page table mapping flags
+ * unset | set
+ * bit 0 - no-clobber | clobber
+ * bit 1 - present | not-present
+ * bit 2 - cacheable | uncacheable
+ * bit 3 - read-write | read-only
+ */
+ enum MappingFlags : uint32_t {
+ Clobber = 1,
+ NotPresent = 2,
+ Uncacheable = 4,
+ ReadOnly = 8,
+ };
+
virtual void initState(ThreadContext* tc) = 0;
// for DPRINTF compatibility
Addr pageAlign(Addr a) { return (a & ~offsetMask); }
Addr pageOffset(Addr a) { return (a & offsetMask); }
+ /**
+ * Maps a virtual memory region to a physical memory region.
+ * @param vaddr The starting virtual address of the region.
+ * @param paddr The starting physical address where the region is mapped.
+ * @param size The length of the region.
+ * @param flags Generic mapping flags that can be set by or-ing values
+ * from MappingFlags enum.
+ */
virtual void map(Addr vaddr, Addr paddr, int64_t size,
- bool clobber = false) = 0;
+ uint64_t flags = 0) = 0;
virtual void remap(Addr vaddr, int64_t size, Addr new_vaddr) = 0;
virtual void unmap(Addr vaddr, int64_t size) = 0;
{
}
- void map(Addr vaddr, Addr paddr, int64_t size, bool clobber = false);
+ void map(Addr vaddr, Addr paddr, int64_t size,
+ uint64_t flags = 0);
void remap(Addr vaddr, int64_t size, Addr new_vaddr);
void unmap(Addr vaddr, int64_t size);
@classmethod
def export_methods(cls, code):
- code('bool map(Addr vaddr, Addr paddr, int size);')
+ code('bool map(Addr vaddr, Addr paddr, int size, bool cacheable=true);')
class EmulatedDriver(SimObject):
type = 'EmulatedDriver'
{
int npages = divCeil(size, (int64_t)PageBytes);
Addr paddr = system->allocPhysPages(npages);
- pTable->map(vaddr, paddr, size, clobber);
+ pTable->map(vaddr, paddr, size, clobber ? PageTableBase::Clobber : 0);
}
bool
bool
-Process::map(Addr vaddr, Addr paddr, int size)
+Process::map(Addr vaddr, Addr paddr, int size, bool cacheable)
{
- pTable->map(vaddr, paddr, size);
+ pTable->map(vaddr, paddr, size,
+ cacheable ? 0 : PageTableBase::Uncacheable);
return true;
}
bool fixupStackFault(Addr vaddr);
/**
- * Map a contiguous range of virtual addresses in this process's
+ * Maps a contiguous range of virtual addresses in this process's
* address space to a contiguous range of physical addresses.
- * This function exists primarily to enable exposing the map
- * operation to python, so that configuration scripts can set up
- * mappings in SE mode.
+ * This function exists primarily to expose the map operation to
+ * python, so that configuration scripts can set up mappings in SE mode.
*
* @param vaddr The starting virtual address of the range.
* @param paddr The starting physical address of the range.
* @param size The length of the range in bytes.
+ * @param cacheable Specifies whether accesses are cacheable.
* @return True if the map operation was successful. (At this
* point in time, the map operation always succeeds.)
*/
- bool map(Addr vaddr, Addr paddr, int size);
+ bool map(Addr vaddr, Addr paddr, int size, bool cacheable = true);
void serialize(std::ostream &os);
void unserialize(Checkpoint *cp, const std::string §ion);