namespace X86ISA
{
- BitUnion64(VAddr)
- Bitfield<20, 12> longl1;
- Bitfield<29, 21> longl2;
- Bitfield<38, 30> longl3;
- Bitfield<47, 39> longl4;
-
- Bitfield<20, 12> pael1;
- Bitfield<29, 21> pael2;
- Bitfield<31, 30> pael3;
-
- Bitfield<21, 12> norml1;
- Bitfield<31, 22> norml2;
- EndBitUnion(VAddr)
-
- // Unfortunately, the placement of the base field in a page table entry is
- // very erratic and would make a mess here. It might be moved here at some
- // point in the future.
- BitUnion64(PageTableEntry)
- Bitfield<63> nx;
- Bitfield<51, 12> base;
- Bitfield<11, 9> avl;
- Bitfield<8> g;
- Bitfield<7> ps;
- Bitfield<6> d;
- Bitfield<5> a;
- Bitfield<4> pcd;
- Bitfield<3> pwt;
- Bitfield<2> u;
- Bitfield<1> w;
- Bitfield<0> p;
- EndBitUnion(PageTableEntry)
-
-
struct TlbEntry : public Serializable
{
// The base of the physical page.
void unserialize(CheckpointIn &cp) override;
};
- /** The size of each level of the page table expressed in base 2
- * logarithmic values
- */
- const std::vector<uint8_t> PageTableLayout = {9, 9, 9, 9};
-
- /* x86 specific PTE flags */
- enum PTEField{
- PTE_NotPresent = 1,
- PTE_Supervisor = 2,
- PTE_ReadOnly = 4,
- PTE_Uncacheable = 8,
- };
- /** Page table operations specific to x86 ISA.
- * Indended to be used as parameter of MultiLevelPageTable.
- */
- class PageTableOps
+ BitUnion64(VAddr)
+ Bitfield<20, 12> longl1;
+ Bitfield<29, 21> longl2;
+ Bitfield<38, 30> longl3;
+ Bitfield<47, 39> longl4;
+
+ Bitfield<20, 12> pael1;
+ Bitfield<29, 21> pael2;
+ Bitfield<31, 30> pael3;
+
+ Bitfield<21, 12> norml1;
+ Bitfield<31, 22> norml2;
+ EndBitUnion(VAddr)
+
+ // Unfortunately, the placement of the base field in a page table entry is
+ // very erratic and would make a mess here. It might be moved here at some
+ // point in the future.
+ BitUnion64(PageTableEntry)
+ Bitfield<63> nx;
+ Bitfield<51, 12> base;
+ Bitfield<11, 9> avl;
+ Bitfield<8> g;
+ Bitfield<7> ps;
+ Bitfield<6> d;
+ Bitfield<5> a;
+ Bitfield<4> pcd;
+ Bitfield<3> pwt;
+ Bitfield<2> u;
+ Bitfield<1> w;
+ Bitfield<0> p;
+ EndBitUnion(PageTableEntry)
+
+ template <int first, int last>
+ class LongModePTE
{
public:
- void setPTEFields(PageTableEntry& PTE, uint64_t flags = 0)
- {
- PTE.p = flags & PTE_NotPresent ? 0 : 1;
- PTE.pcd = flags & PTE_Uncacheable ? 1 : 0;
- PTE.w = flags & PTE_ReadOnly ? 0 : 1;
- PTE.u = flags & PTE_Supervisor ? 0 : 1;
- }
+ Addr paddr() { return pte.base << PageShift; }
+ void paddr(Addr addr) { pte.base = addr >> PageShift; }
- /** returns the page number out of a page table entry */
- Addr getPnum(PageTableEntry PTE)
- {
- return PTE.base;
- }
+ bool present() { return pte.p; }
+ void present(bool p) { pte.p = p ? 1 : 0; }
- bool isUncacheable(const PageTableEntry PTE)
- {
- return PTE.pcd;
- }
+ bool uncacheable() { return pte.pcd; }
+ void uncacheable(bool u) { pte.pcd = u ? 1 : 0; }
- bool isReadOnly(PageTableEntry PTE)
- {
- return !PTE.w;
- }
+ bool readonly() { return !pte.w; }
+ void readonly(bool r) { pte.w = r ? 0 : 1; }
- /** sets the page number in a page table entry */
- void setPnum(PageTableEntry& PTE, Addr paddr)
+ void
+ read(PortProxy &p, Addr table, Addr vaddr)
{
- PTE.base = paddr;
+ entryAddr = table;
+ entryAddr += bits(vaddr, first, last) * sizeof(PageTableEntry);
+ pte = p.read<PageTableEntry>(entryAddr);
}
- /** returns the offsets to index in every level of a page
- * table, contained in a virtual address
- */
- std::vector<uint64_t> getOffsets(Addr vaddr)
+ void
+ reset(Addr _paddr, bool _present=true,
+ bool _uncacheable=false, bool _readonly=false)
+ {
+ pte = 0;
+ pte.u = 1;
+ paddr(_paddr);
+ present(_present);
+ uncacheable(_uncacheable);
+ readonly(_readonly);
+ };
+
+ void write(PortProxy &p) { p.write(entryAddr, pte); }
+
+ static int
+ tableSize()
{
- X86ISA::VAddr addr(vaddr);
- return {addr.longl1, addr.longl2, addr.longl3, addr.longl4};
+ return 1 << ((first - last) + 4 - PageShift);
}
- };
+ protected:
+ PageTableEntry pte;
+ Addr entryAddr;
+ };
}
#endif
static const int NumArgumentRegs32 M5_VAR_USED =
sizeof(ArgumentReg) / sizeof(const int);
+template class MultiLevelPageTable<LongModePTE<47, 39>,
+ LongModePTE<38, 30>,
+ LongModePTE<29, 21>,
+ LongModePTE<20, 12> >;
+typedef MultiLevelPageTable<LongModePTE<47, 39>,
+ LongModePTE<38, 30>,
+ LongModePTE<29, 21>,
+ LongModePTE<20, 12> > ArchPageTable;
+
X86Process::X86Process(ProcessParams *params, ObjectFile *objFile,
SyscallDesc *_syscallDescs, int _numSyscallDescs)
: Process(params, params->useArchPT ?
static_cast<EmulationPageTable *>(
- new ArchPageTable(
- params->name, params->pid,
- params->system, PageBytes,
- PageTableLayout)) :
+ new ArchPageTable(params->name, params->pid,
+ params->system, PageBytes)) :
new EmulationPageTable(params->name, params->pid,
PageBytes),
objFile),
physProxy.writeBlob(pfHandlerPhysAddr, faultBlob, sizeof(faultBlob));
- MultiLevelPageTable<PageTableOps> *pt =
- dynamic_cast<MultiLevelPageTable<PageTableOps> *>(pTable);
-
/* Syscall handler */
- pt->map(syscallCodeVirtAddr, syscallCodePhysAddr, PageBytes, false);
+ pTable->map(syscallCodeVirtAddr, syscallCodePhysAddr,
+ PageBytes, false);
/* GDT */
- pt->map(GDTVirtAddr, gdtPhysAddr, PageBytes, false);
+ pTable->map(GDTVirtAddr, gdtPhysAddr, PageBytes, false);
/* IDT */
- pt->map(IDTVirtAddr, idtPhysAddr, PageBytes, false);
+ pTable->map(IDTVirtAddr, idtPhysAddr, PageBytes, false);
/* TSS */
- pt->map(TSSVirtAddr, tssPhysAddr, PageBytes, false);
+ pTable->map(TSSVirtAddr, tssPhysAddr, PageBytes, false);
/* IST */
- pt->map(ISTVirtAddr, istPhysAddr, PageBytes, false);
+ pTable->map(ISTVirtAddr, istPhysAddr, PageBytes, false);
/* PF handler */
- pt->map(PFHandlerVirtAddr, pfHandlerPhysAddr, PageBytes, false);
+ pTable->map(PFHandlerVirtAddr, pfHandlerPhysAddr, PageBytes, false);
/* MMIO region for m5ops */
- pt->map(MMIORegionVirtAddr, MMIORegionPhysAddr, 16*PageBytes, false);
+ pTable->map(MMIORegionVirtAddr, MMIORegionPhysAddr,
+ 16 * PageBytes, false);
} else {
for (int i = 0; i < contextIds.size(); i++) {
ThreadContext * tc = system->getThreadContext(contextIds[i]);
#include <string>
#include <vector>
+#include "arch/x86/pagetable.hh"
#include "mem/multi_level_page_table.hh"
#include "sim/aux_vector.hh"
#include "sim/process.hh"
* These page tables are stored in system memory and respect x86
* specification.
*/
- typedef MultiLevelPageTable<PageTableOps> ArchPageTable;
Addr _gdtStart;
Addr _gdtSize;
Source('fs_translating_port_proxy.cc')
Source('se_translating_port_proxy.cc')
Source('page_table.cc')
-if env['TARGET_ISA'] == 'x86':
- Source('multi_level_page_table.cc')
if env['HAVE_DRAMSIM']:
SimObject('DRAMSim2.py')
+++ /dev/null
-/*
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * Authors: Alexandru Dutu
- */
-
-#include "mem/multi_level_page_table_impl.hh"
-
-template class MultiLevelPageTable<TheISA::PageTableOps>;
#include <string>
#include "base/types.hh"
-#include "config/the_isa.hh"
#include "mem/page_table.hh"
class System;
*
* @see MultiLevelPageTable
*/
-template <class ISAOps>
+
+namespace {
+
+template <class First, class ...Rest>
+Addr
+prepTopTable(System *system, Addr pageSize)
+{
+ Addr addr = system->allocPhysPages(First::tableSize());
+ PortProxy &p = system->physProxy;
+ p.memsetBlob(addr, 0, First::tableSize() * pageSize);
+ return addr;
+}
+
+template <class ...Types>
+struct LastType;
+
+template <class First, class Second, class ...Rest>
+struct LastType<First, Second, Rest...>
+{
+ typedef typename LastType<Second, Rest...>::type type;
+};
+
+template <class Only>
+struct LastType<Only>
+{
+ typedef Only type;
+};
+
+
+template <class ...Types>
+struct WalkWrapper;
+
+template <class Final, class Only>
+struct WalkWrapper<Final, Only>
+{
+ static void
+ walk(System *system, Addr pageSize, Addr table, Addr vaddr,
+ bool allocate, Final *entry)
+ {
+ entry->read(system->physProxy, table, vaddr);
+ }
+};
+
+template <class Final, class First, class Second, class ...Rest>
+struct WalkWrapper<Final, First, Second, Rest...>
+{
+ static void
+ walk(System *system, Addr pageSize, Addr table, Addr vaddr,
+ bool allocate, Final *entry)
+ {
+ First first;
+ first.read(system->physProxy, table, vaddr);
+
+ Addr next;
+ if (!first.present()) {
+ fatal_if(!allocate,
+ "Page fault while walking the page table.");
+ next = prepTopTable<Second>(system, pageSize);
+ first.reset(next);
+ first.write(system->physProxy);
+ } else {
+ next = first.paddr();
+ }
+ WalkWrapper<Final, Second, Rest...>::walk(
+ system, pageSize, next, vaddr, allocate, entry);
+ }
+};
+
+template <class ...EntryTypes>
+void
+walk(System *system, Addr pageSize, Addr table, Addr vaddr,
+ bool allocate, typename LastType<EntryTypes...>::type *entry)
+{
+ WalkWrapper<typename LastType<EntryTypes...>::type, EntryTypes...>::walk(
+ system, pageSize, table, vaddr, allocate, entry);
+}
+
+}
+
+
+template <class ...EntryTypes>
class MultiLevelPageTable : public EmulationPageTable
{
- /**
- * ISA specific operations
- */
- ISAOps pTableISAOps;
+ typedef typename LastType<EntryTypes...>::type Final;
/**
* Pointer to System object
*/
Addr _basePtr;
- /**
- * Vector with sizes of all levels in base 2 logarithmic
- */
- const std::vector<uint8_t> logLevelSize;
-
- /**
- * Number of levels contained by the page table
- */
- const uint64_t numLevels;
-
- /**
- * Method for walking the page table
- *
- * @param vaddr Virtual address that is being looked-up
- * @param allocate Specifies whether memory should be allocated while
- * walking the page table
- * @return PTE_addr The address of the found PTE
- */
- void walk(Addr vaddr, bool allocate, Addr &PTE_addr);
-
public:
MultiLevelPageTable(const std::string &__name, uint64_t _pid,
- System *_sys, Addr pageSize,
- const std::vector<uint8_t> &layout);
- ~MultiLevelPageTable();
+ System *_sys, Addr pageSize) :
+ EmulationPageTable(__name, _pid, pageSize), system(_sys)
+ {}
+
+ ~MultiLevelPageTable() {}
- void initState(ThreadContext* tc) override;
+ void
+ initState(ThreadContext* tc) override
+ {
+ _basePtr = prepTopTable<EntryTypes...>(system, pageSize);
+ }
Addr basePtr() { return _basePtr; }
- void map(Addr vaddr, Addr paddr, int64_t size,
- uint64_t flags = 0) override;
- void remap(Addr vaddr, int64_t size, Addr new_vaddr) override;
- void unmap(Addr vaddr, int64_t size) override;
- void serialize(CheckpointOut &cp) const override;
- void unserialize(CheckpointIn &cp) override;
+ void
+ map(Addr vaddr, Addr paddr, int64_t size, uint64_t flags = 0) override
+ {
+ EmulationPageTable::map(vaddr, paddr, size, flags);
+
+ Final entry;
+
+ for (int64_t offset = 0; offset < size; offset += pageSize) {
+ walk<EntryTypes...>(system, pageSize, _basePtr,
+ vaddr + offset, true, &entry);
+
+ entry.reset(paddr + offset, true, flags & Uncacheable,
+ flags & ReadOnly);
+ entry.write(system->physProxy);
+
+ DPRINTF(MMU, "New mapping: %#x-%#x\n",
+ vaddr + offset, paddr + offset);
+ }
+ }
+
+ void
+ remap(Addr vaddr, int64_t size, Addr new_vaddr) override
+ {
+ EmulationPageTable::remap(vaddr, size, new_vaddr);
+
+ Final old_entry, new_entry;
+
+ for (int64_t offset = 0; offset < size; offset += pageSize) {
+ // Unmap the original mapping.
+ walk<EntryTypes...>(system, pageSize, _basePtr, vaddr + offset,
+ false, &old_entry);
+ old_entry.present(false);
+ old_entry.write(system->physProxy);
+
+ // Map the new one.
+ walk<EntryTypes...>(system, pageSize, _basePtr, new_vaddr + offset,
+ true, &new_entry);
+ new_entry.reset(old_entry.paddr(), true, old_entry.uncacheable(),
+ old_entry.readonly());
+ new_entry.write(system->physProxy);
+ }
+ }
+
+ void
+ unmap(Addr vaddr, int64_t size) override
+ {
+ EmulationPageTable::unmap(vaddr, size);
+
+ Final entry;
+
+ for (int64_t offset = 0; offset < size; offset += pageSize) {
+ walk<EntryTypes...>(system, pageSize, _basePtr,
+ vaddr + offset, false, &entry);
+ fatal_if(!entry.present(),
+ "PageTable::unmap: Address %#x not mapped.", vaddr);
+ entry.present(false);
+ entry.write(system->physProxy);
+ DPRINTF(MMU, "Unmapping: %#x\n", vaddr);
+ }
+ }
+
+ void
+ serialize(CheckpointOut &cp) const override
+ {
+ EmulationPageTable::serialize(cp);
+ /** Since, the page table is stored in system memory
+ * which is serialized separately, we will serialize
+ * just the base pointer
+ */
+ paramOut(cp, "ptable.pointer", _basePtr);
+ }
+
+ void
+ unserialize(CheckpointIn &cp) override
+ {
+ EmulationPageTable::unserialize(cp);
+ paramIn(cp, "ptable.pointer", _basePtr);
+ }
};
#endif // __MEM_MULTI_LEVEL_PAGE_TABLE_HH__
+++ /dev/null
-/*
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * Authors: Alexandru Dutu
- */
-
-/**
- * @file
- * Definitions of page table
- */
-#include <string>
-
-#include "arch/isa_traits.hh"
-#include "base/trace.hh"
-#include "config/the_isa.hh"
-#include "debug/MMU.hh"
-#include "mem/multi_level_page_table.hh"
-#include "mem/page_table.hh"
-
-using namespace std;
-using namespace TheISA;
-
-template <class ISAOps>
-MultiLevelPageTable<ISAOps>::MultiLevelPageTable(
- const std::string &__name, uint64_t _pid, System *_sys,
- Addr pageSize, const std::vector<uint8_t> &layout)
- : EmulationPageTable(__name, _pid, pageSize), system(_sys),
- logLevelSize(layout), numLevels(logLevelSize.size())
-{
-}
-
-template <class ISAOps>
-MultiLevelPageTable<ISAOps>::~MultiLevelPageTable()
-{
-}
-
-template <class ISAOps>
-void
-MultiLevelPageTable<ISAOps>::initState(ThreadContext* tc)
-{
- /* setting first level of the page table */
- uint64_t log_req_size = floorLog2(sizeof(PageTableEntry)) +
- logLevelSize[numLevels - 1];
- assert(log_req_size >= PageShift);
- uint64_t npages = 1 << (log_req_size - PageShift);
-
- Addr _basePtr = system->allocPhysPages(npages);
-
- PortProxy &p = system->physProxy;
- p.memsetBlob(_basePtr, 0, npages << PageShift);
-}
-
-
-template <class ISAOps>
-void
-MultiLevelPageTable<ISAOps>::walk(Addr vaddr, bool allocate, Addr &PTE_addr)
-{
- std::vector<uint64_t> offsets = pTableISAOps.getOffsets(vaddr);
-
- Addr level_base = _basePtr;
- for (int i = numLevels - 1; i > 0; i--) {
-
- Addr entry_addr = (level_base<<PageShift) +
- offsets[i] * sizeof(PageTableEntry);
-
- PortProxy &p = system->physProxy;
- PageTableEntry entry = p.read<PageTableEntry>(entry_addr);
-
- Addr next_entry_pnum = pTableISAOps.getPnum(entry);
- if (next_entry_pnum == 0) {
-
- fatal_if(!allocate, "Page fault while walking the page table.");
-
- uint64_t log_req_size = floorLog2(sizeof(PageTableEntry)) +
- logLevelSize[i - 1];
- assert(log_req_size >= PageShift);
- uint64_t npages = 1 << (log_req_size - PageShift);
-
- DPRINTF(MMU, "Allocating %d pages needed for entry in level %d\n",
- npages, i - 1);
-
- /* allocate new entry */
- Addr next_entry_paddr = system->allocPhysPages(npages);
- p.memsetBlob(next_entry_paddr, 0, npages << PageShift);
-
- next_entry_pnum = next_entry_paddr >> PageShift;
- pTableISAOps.setPnum(entry, next_entry_pnum);
- pTableISAOps.setPTEFields(entry);
- p.write<PageTableEntry>(entry_addr, entry);
-
- }
- DPRINTF(MMU, "Level %d base: %d offset: %d entry: %d\n",
- i, level_base, offsets[i], next_entry_pnum);
- level_base = next_entry_pnum;
-
- }
- PTE_addr = (level_base << PageShift) +
- offsets[0] * sizeof(PageTableEntry);
- DPRINTF(MMU, "Returning PTE_addr: %x\n", PTE_addr);
-}
-
-template <class ISAOps>
-void
-MultiLevelPageTable<ISAOps>::map(Addr vaddr, Addr paddr,
- int64_t size, uint64_t flags)
-{
- EmulationPageTable::map(vaddr, paddr, size, flags);
-
- PortProxy &p = system->physProxy;
-
- while (size > 0) {
- Addr PTE_addr;
- walk(vaddr, true, PTE_addr);
- PageTableEntry PTE = p.read<PageTableEntry>(PTE_addr);
- pTableISAOps.setPnum(PTE, paddr >> PageShift);
- uint64_t PTE_flags = 0;
- if (flags & NotPresent)
- PTE_flags |= TheISA::PTE_NotPresent;
- if (flags & Uncacheable)
- PTE_flags |= TheISA::PTE_Uncacheable;
- if (flags & ReadOnly)
- PTE_flags |= TheISA::PTE_ReadOnly;
- pTableISAOps.setPTEFields(PTE, PTE_flags);
- p.write<PageTableEntry>(PTE_addr, PTE);
- DPRINTF(MMU, "New mapping: %#x-%#x\n", vaddr, paddr);
- size -= pageSize;
- vaddr += pageSize;
- paddr += pageSize;
- }
-}
-
-template <class ISAOps>
-void
-MultiLevelPageTable<ISAOps>::remap(Addr vaddr, int64_t size, Addr new_vaddr)
-{
- EmulationPageTable::remap(vaddr, size, new_vaddr);
-
- PortProxy &p = system->physProxy;
-
- while (size > 0) {
- Addr PTE_addr;
- walk(vaddr, false, PTE_addr);
- PageTableEntry PTE = p.read<PageTableEntry>(PTE_addr);
- Addr paddr = pTableISAOps.getPnum(PTE);
-
- fatal_if(paddr == 0, "Page fault while remapping");
- /* unmapping vaddr */
- pTableISAOps.setPnum(PTE, 0);
- p.write<PageTableEntry>(PTE_addr, PTE);
-
- /* maping new_vaddr */
- Addr new_PTE_addr;
- walk(new_vaddr, true, new_PTE_addr);
- PageTableEntry new_PTE = p.read<PageTableEntry>(new_PTE_addr);
-
- pTableISAOps.setPnum(new_PTE, paddr >> PageShift);
- pTableISAOps.setPTEFields(new_PTE);
- p.write<PageTableEntry>(new_PTE_addr, new_PTE);
- DPRINTF(MMU, "Remapping: %#x-%#x\n", vaddr, new_PTE_addr);
- size -= pageSize;
- vaddr += pageSize;
- new_vaddr += pageSize;
- }
-}
-
-template <class ISAOps>
-void
-MultiLevelPageTable<ISAOps>::unmap(Addr vaddr, int64_t size)
-{
- EmulationPageTable::unmap(vaddr, size);
-
- PortProxy &p = system->physProxy;
-
- while (size > 0) {
- Addr PTE_addr;
- walk(vaddr, false, PTE_addr);
- PageTableEntry PTE = p.read<PageTableEntry>(PTE_addr);
- Addr paddr = pTableISAOps.getPnum(PTE);
- fatal_if(paddr == 0,
- "PageTable::allocate: address %#x not mapped", vaddr);
- pTableISAOps.setPnum(PTE, 0);
- p.write<PageTableEntry>(PTE_addr, PTE);
- DPRINTF(MMU, "Unmapping: %#x\n", vaddr);
- size -= pageSize;
- vaddr += pageSize;
- }
-}
-
-template <class ISAOps>
-void
-MultiLevelPageTable<ISAOps>::serialize(CheckpointOut &cp) const
-{
- EmulationPageTable::serialize(cp);
- /** Since, the page table is stored in system memory
- * which is serialized separately, we will serialize
- * just the base pointer
- */
- paramOut(cp, "ptable.pointer", _basePtr);
-}
-
-template <class ISAOps>
-void
-MultiLevelPageTable<ISAOps>::unserialize(CheckpointIn &cp)
-{
- EmulationPageTable::unserialize(cp);
- paramIn(cp, "ptable.pointer", _basePtr);
-}
delete it->second;
} else {
// already mapped
- fatal("EmulationPageTable::allocate: addr %#x already mapped",
+ panic("EmulationPageTable::allocate: addr %#x already mapped",
vaddr);
}
} else {
/* generic page table mapping flags
* unset | set
* bit 0 - no-clobber | clobber
- * bit 1 - present | not-present
* bit 2 - cacheable | uncacheable
* bit 3 - read-write | read-only
*/
enum MappingFlags : uint32_t {
- Zero = 0,
Clobber = 1,
- NotPresent = 2,
Uncacheable = 4,
ReadOnly = 8,
};
Addr paddr = system->allocPhysPages(npages);
pTable->map(vaddr, paddr, size,
clobber ? EmulationPageTable::Clobber :
- EmulationPageTable::Zero);
+ EmulationPageTable::MappingFlags(0));
}
void
Process::map(Addr vaddr, Addr paddr, int size, bool cacheable)
{
pTable->map(vaddr, paddr, size,
- cacheable ? EmulationPageTable::Zero :
+ cacheable ? EmulationPageTable::MappingFlags(0) :
EmulationPageTable::Uncacheable);
return true;
}