2 * Copyright (c) 2014 Advanced Micro Devices, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * Authors: Alexandru Dutu
33 * Definitions of page table
37 #include "arch/isa_traits.hh"
38 #include "arch/tlb.hh"
39 #include "base/trace.hh"
40 #include "config/the_isa.hh"
41 #include "debug/MMU.hh"
42 #include "mem/multi_level_page_table.hh"
43 #include "mem/page_table.hh"
46 using namespace TheISA;
48 template <class ISAOps>
49 MultiLevelPageTable<ISAOps>::MultiLevelPageTable(const std::string &__name,
50 uint64_t _pid, System *_sys)
51 : PageTableBase(__name, _pid), system(_sys),
52 logLevelSize(PageTableLayout),
53 numLevels(logLevelSize.size())
57 template <class ISAOps>
58 MultiLevelPageTable<ISAOps>::~MultiLevelPageTable()
62 template <class ISAOps>
64 MultiLevelPageTable<ISAOps>::initState(ThreadContext* tc)
66 basePtr = pTableISAOps.getBasePtr(tc);
67 if (basePtr == 0) basePtr++;
68 DPRINTF(MMU, "basePtr: %d\n", basePtr);
70 system->pagePtr = basePtr;
72 /* setting first level of the page table */
73 uint64_t log_req_size = floorLog2(sizeof(PageTableEntry)) +
74 logLevelSize[numLevels-1];
75 assert(log_req_size >= PageShift);
76 uint64_t npages = 1 << (log_req_size - PageShift);
78 Addr paddr = system->allocPhysPages(npages);
80 PortProxy &p = system->physProxy;
81 p.memsetBlob(paddr, 0, npages << PageShift);
85 template <class ISAOps>
87 MultiLevelPageTable<ISAOps>::walk(Addr vaddr, bool allocate, Addr &PTE_addr)
89 std::vector<uint64_t> offsets = pTableISAOps.getOffsets(vaddr);
91 Addr level_base = basePtr;
92 for (int i = numLevels - 1; i > 0; i--) {
94 Addr entry_addr = (level_base<<PageShift) +
95 offsets[i] * sizeof(PageTableEntry);
97 PortProxy &p = system->physProxy;
98 PageTableEntry entry = p.read<PageTableEntry>(entry_addr);
100 Addr next_entry_pnum = pTableISAOps.getPnum(entry);
101 if (next_entry_pnum == 0) {
103 if (!allocate) return false;
105 uint64_t log_req_size = floorLog2(sizeof(PageTableEntry)) +
107 assert(log_req_size >= PageShift);
108 uint64_t npages = 1 << (log_req_size - PageShift);
110 DPRINTF(MMU, "Allocating %d pages needed for entry in level %d\n",
113 /* allocate new entry */
114 Addr next_entry_paddr = system->allocPhysPages(npages);
115 p.memsetBlob(next_entry_paddr, 0, npages << PageShift);
117 next_entry_pnum = next_entry_paddr >> PageShift;
118 pTableISAOps.setPnum(entry, next_entry_pnum);
119 pTableISAOps.setPTEFields(entry);
120 p.write<PageTableEntry>(entry_addr, entry);
123 DPRINTF(MMU, "Level %d base: %d offset: %d entry: %d\n",
124 i, level_base, offsets[i], next_entry_pnum);
125 level_base = next_entry_pnum;
128 PTE_addr = (level_base<<PageShift) +
129 offsets[0] * sizeof(PageTableEntry);
130 DPRINTF(MMU, "Returning PTE_addr: %x\n", PTE_addr);
134 template <class ISAOps>
136 MultiLevelPageTable<ISAOps>::map(Addr vaddr, Addr paddr,
137 int64_t size, uint64_t flags)
139 bool clobber = flags & Clobber;
140 // starting address must be page aligned
141 assert(pageOffset(vaddr) == 0);
143 DPRINTF(MMU, "Allocating Page: %#x-%#x\n", vaddr, vaddr + size);
145 PortProxy &p = system->physProxy;
147 for (; size > 0; size -= pageSize, vaddr += pageSize, paddr += pageSize) {
149 if (walk(vaddr, true, PTE_addr)) {
150 PageTableEntry PTE = p.read<PageTableEntry>(PTE_addr);
151 Addr entry_paddr = pTableISAOps.getPnum(PTE);
152 if (!clobber && entry_paddr != 0) {
153 fatal("addr 0x%x already mapped to %x", vaddr, entry_paddr);
155 pTableISAOps.setPnum(PTE, paddr >> PageShift);
156 uint64_t PTE_flags = 0;
157 if (flags & NotPresent)
158 PTE_flags |= TheISA::PTE_NotPresent;
159 if (flags & Uncacheable)
160 PTE_flags |= TheISA::PTE_Uncacheable;
161 if (flags & ReadOnly)
162 PTE_flags |= TheISA::PTE_ReadOnly;
163 pTableISAOps.setPTEFields(PTE, PTE_flags);
164 p.write<PageTableEntry>(PTE_addr, PTE);
165 DPRINTF(MMU, "New mapping: %#x-%#x\n", vaddr, paddr);
167 eraseCacheEntry(vaddr);
168 updateCache(vaddr, TlbEntry(pid, vaddr, paddr,
176 template <class ISAOps>
178 MultiLevelPageTable<ISAOps>::remap(Addr vaddr, int64_t size, Addr new_vaddr)
180 assert(pageOffset(vaddr) == 0);
181 assert(pageOffset(new_vaddr) == 0);
183 DPRINTF(MMU, "moving pages from vaddr %08p to %08p, size = %d\n", vaddr,
186 PortProxy &p = system->physProxy;
189 size -= pageSize, vaddr += pageSize, new_vaddr += pageSize)
192 if (walk(vaddr, false, PTE_addr)) {
193 PageTableEntry PTE = p.read<PageTableEntry>(PTE_addr);
194 Addr paddr = pTableISAOps.getPnum(PTE);
197 fatal("Page fault while remapping");
199 /* unmapping vaddr */
200 pTableISAOps.setPnum(PTE, 0);
201 p.write<PageTableEntry>(PTE_addr, PTE);
203 /* maping new_vaddr */
205 walk(new_vaddr, true, new_PTE_addr);
206 PageTableEntry new_PTE = p.read<PageTableEntry>(new_PTE_addr);
208 pTableISAOps.setPnum(new_PTE, paddr>>PageShift);
209 pTableISAOps.setPTEFields(new_PTE);
210 p.write<PageTableEntry>(new_PTE_addr, new_PTE);
211 DPRINTF(MMU, "Remapping: %#x-%#x\n", vaddr, new_PTE_addr);
214 eraseCacheEntry(vaddr);
215 updateCache(new_vaddr, TlbEntry(pid, new_vaddr, paddr,
216 pTableISAOps.isUncacheable(PTE),
217 pTableISAOps.isReadOnly(PTE)));
219 fatal("Page fault while remapping");
224 template <class ISAOps>
226 MultiLevelPageTable<ISAOps>::unmap(Addr vaddr, int64_t size)
228 assert(pageOffset(vaddr) == 0);
230 DPRINTF(MMU, "Unmapping page: %#x-%#x\n", vaddr, vaddr+ size);
232 PortProxy &p = system->physProxy;
234 for (; size > 0; size -= pageSize, vaddr += pageSize) {
236 if (walk(vaddr, false, PTE_addr)) {
237 PageTableEntry PTE = p.read<PageTableEntry>(PTE_addr);
238 Addr paddr = pTableISAOps.getPnum(PTE);
240 fatal("PageTable::allocate: address 0x%x not mapped", vaddr);
242 pTableISAOps.setPnum(PTE, 0);
243 p.write<PageTableEntry>(PTE_addr, PTE);
244 DPRINTF(MMU, "Unmapping: %#x\n", vaddr);
246 eraseCacheEntry(vaddr);
248 fatal("Page fault while unmapping");
254 template <class ISAOps>
256 MultiLevelPageTable<ISAOps>::isUnmapped(Addr vaddr, int64_t size)
258 // starting address must be page aligned
259 assert(pageOffset(vaddr) == 0);
260 PortProxy &p = system->physProxy;
262 for (; size > 0; size -= pageSize, vaddr += pageSize) {
264 if (walk(vaddr, false, PTE_addr)) {
265 PageTableEntry PTE = p.read<PageTableEntry>(PTE_addr);
266 if (pTableISAOps.getPnum(PTE) != 0)
274 template <class ISAOps>
276 MultiLevelPageTable<ISAOps>::lookup(Addr vaddr, TlbEntry &entry)
278 Addr page_addr = pageAlign(vaddr);
280 if (pTableCache[0].valid && pTableCache[0].vaddr == page_addr) {
281 entry = pTableCache[0].entry;
284 if (pTableCache[1].valid && pTableCache[1].vaddr == page_addr) {
285 entry = pTableCache[1].entry;
288 if (pTableCache[2].valid && pTableCache[2].vaddr == page_addr) {
289 entry = pTableCache[2].entry;
293 DPRINTF(MMU, "lookup page_addr: %#x\n", page_addr);
295 if (walk(page_addr, false, PTE_addr)) {
296 PortProxy &p = system->physProxy;
297 PageTableEntry PTE = p.read<PageTableEntry>(PTE_addr);
298 Addr pnum = pTableISAOps.getPnum(PTE);
302 entry = TlbEntry(pid, vaddr, pnum << PageShift,
303 pTableISAOps.isUncacheable(PTE),
304 pTableISAOps.isReadOnly(PTE));
305 updateCache(page_addr, entry);
312 template <class ISAOps>
314 MultiLevelPageTable<ISAOps>::serialize(CheckpointOut &cp) const
316 /** Since, the page table is stored in system memory
317 * which is serialized separately, we will serialize
318 * just the base pointer
320 paramOut(cp, "ptable.pointer", basePtr);
323 template <class ISAOps>
325 MultiLevelPageTable<ISAOps>::unserialize(CheckpointIn &cp)
327 paramIn(cp, "ptable.pointer", basePtr);