arch: Cleanup unused ISA traits constants
[gem5.git] / src / mem / multi_level_page_table_impl.hh
1 /*
2 * Copyright (c) 2014 Advanced Micro Devices, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Alexandru Dutu
29 */
30
31 /**
32 * @file
33 * Definitions of page table
34 */
35 #include <fstream>
36 #include <map>
37 #include <string>
38
39 #include "base/bitfield.hh"
40 #include "base/intmath.hh"
41 #include "base/trace.hh"
42 #include "config/the_isa.hh"
43 #include "debug/MMU.hh"
44 #include "mem/multi_level_page_table.hh"
45 #include "sim/faults.hh"
46 #include "sim/sim_object.hh"
47
48 using namespace std;
49 using namespace TheISA;
50
51 template <class ISAOps>
52 MultiLevelPageTable<ISAOps>::MultiLevelPageTable(const std::string &__name, uint64_t _pid, System *_sys)
53 : PageTableBase(__name, _pid), system(_sys),
54 logLevelSize(PageTableLayout),
55 numLevels(logLevelSize.size())
56 {
57 }
58
59 template <class ISAOps>
60 MultiLevelPageTable<ISAOps>::~MultiLevelPageTable()
61 {
62 }
63
64 template <class ISAOps>
65 void
66 MultiLevelPageTable<ISAOps>::initState(ThreadContext* tc)
67 {
68 basePtr = pTableISAOps.getBasePtr(tc);
69 if (basePtr == 0) basePtr++;
70 DPRINTF(MMU, "basePtr: %d\n", basePtr);
71
72 system->pagePtr = basePtr;
73
74 /* setting first level of the page table */
75 uint64_t log_req_size = floorLog2(sizeof(PageTableEntry)) +
76 logLevelSize[numLevels-1];
77 assert(log_req_size >= PageShift);
78 uint64_t npages = 1 << (log_req_size - PageShift);
79
80 Addr paddr = system->allocPhysPages(npages);
81
82 PortProxy &p = system->physProxy;
83 p.memsetBlob(paddr, 0, npages << PageShift);
84 }
85
86
87 template <class ISAOps>
88 bool
89 MultiLevelPageTable<ISAOps>::walk(Addr vaddr, bool allocate, Addr &PTE_addr)
90 {
91 std::vector<uint64_t> offsets = pTableISAOps.getOffsets(vaddr);
92
93 Addr level_base = basePtr;
94 for (int i = numLevels - 1; i > 0; i--) {
95
96 Addr entry_addr = (level_base<<PageShift) +
97 offsets[i] * sizeof(PageTableEntry);
98
99 PortProxy &p = system->physProxy;
100 PageTableEntry entry = p.read<PageTableEntry>(entry_addr);
101
102 Addr next_entry_pnum = pTableISAOps.getPnum(entry);
103 if (next_entry_pnum == 0) {
104
105 if (!allocate) return false;
106
107 uint64_t log_req_size = floorLog2(sizeof(PageTableEntry)) +
108 logLevelSize[i-1];
109 assert(log_req_size >= PageShift);
110 uint64_t npages = 1 << (log_req_size - PageShift);
111
112 DPRINTF(MMU, "Allocating %d pages needed for entry in level %d\n", npages, i-1);
113
114 /* allocate new entry */
115 Addr next_entry_paddr = system->allocPhysPages(npages);
116 p.memsetBlob(next_entry_paddr, 0, npages << PageShift);
117
118 next_entry_pnum = next_entry_paddr >> PageShift;
119 pTableISAOps.setPnum(entry, next_entry_pnum);
120 pTableISAOps.setPTEFields(entry);
121 p.write<PageTableEntry>(entry_addr, entry);
122
123 }
124 DPRINTF(MMU, "Level %d base: %d offset: %d entry: %d\n", i, level_base, offsets[i], next_entry_pnum);
125 level_base = next_entry_pnum;
126
127 }
128 PTE_addr = (level_base<<PageShift) +
129 offsets[0] * sizeof(PageTableEntry);
130 DPRINTF(MMU, "Returning PTE_addr: %x\n", PTE_addr);
131 return true;
132 }
133
134 template <class ISAOps>
135 void
136 MultiLevelPageTable<ISAOps>::map(Addr vaddr, Addr paddr, int64_t size, bool clobber)
137 {
138 // starting address must be page aligned
139 assert(pageOffset(vaddr) == 0);
140
141 DPRINTF(MMU, "Allocating Page: %#x-%#x\n", vaddr, vaddr + size);
142
143 PortProxy &p = system->physProxy;
144
145 for (; size > 0; size -= pageSize, vaddr += pageSize, paddr += pageSize) {
146 Addr PTE_addr;
147 if (walk(vaddr, true, PTE_addr)) {
148 PageTableEntry PTE = p.read<PageTableEntry>(PTE_addr);
149 Addr entry_paddr = pTableISAOps.getPnum(PTE);
150 if (!clobber && entry_paddr == 0) {
151 pTableISAOps.setPnum(PTE, paddr >> PageShift);
152 pTableISAOps.setPTEFields(PTE);
153 p.write<PageTableEntry>(PTE_addr, PTE);
154 DPRINTF(MMU, "New mapping: %#x-%#x\n", vaddr, paddr);
155 } else {
156 fatal("address 0x%x already mapped to %x", vaddr, entry_paddr);
157 }
158
159 eraseCacheEntry(vaddr);
160 updateCache(vaddr, TlbEntry(pid, vaddr, paddr));
161 }
162
163 }
164 }
165
166 template <class ISAOps>
167 void
168 MultiLevelPageTable<ISAOps>::remap(Addr vaddr, int64_t size, Addr new_vaddr)
169 {
170 assert(pageOffset(vaddr) == 0);
171 assert(pageOffset(new_vaddr) == 0);
172
173 DPRINTF(MMU, "moving pages from vaddr %08p to %08p, size = %d\n", vaddr,
174 new_vaddr, size);
175
176 PortProxy &p = system->physProxy;
177
178 for (; size > 0; size -= pageSize, vaddr += pageSize, new_vaddr += pageSize) {
179 Addr PTE_addr;
180 if (walk(vaddr, false, PTE_addr)) {
181 PageTableEntry PTE = p.read<PageTableEntry>(PTE_addr);
182 Addr paddr = pTableISAOps.getPnum(PTE);
183
184 if (paddr == 0) {
185 fatal("Page fault while remapping");
186 } else {
187 /* unmapping vaddr */
188 pTableISAOps.setPnum(PTE, 0);
189 p.write<PageTableEntry>(PTE_addr, PTE);
190
191 /* maping new_vaddr */
192 Addr new_PTE_addr;
193 walk(new_vaddr, true, new_PTE_addr);
194 PageTableEntry new_PTE = p.read<PageTableEntry>(new_PTE_addr);
195
196 pTableISAOps.setPnum(new_PTE, paddr>>PageShift);
197 pTableISAOps.setPTEFields(new_PTE);
198 p.write<PageTableEntry>(new_PTE_addr, new_PTE);
199 DPRINTF(MMU, "Remapping: %#x-%#x\n", vaddr, new_PTE_addr);
200 }
201
202 eraseCacheEntry(vaddr);
203 updateCache(new_vaddr, TlbEntry(pid, new_vaddr, paddr));
204 } else {
205 fatal("Page fault while remapping");
206 }
207 }
208 }
209
210 template <class ISAOps>
211 void
212 MultiLevelPageTable<ISAOps>::unmap(Addr vaddr, int64_t size)
213 {
214 assert(pageOffset(vaddr) == 0);
215
216 DPRINTF(MMU, "Unmapping page: %#x-%#x\n", vaddr, vaddr+ size);
217
218 PortProxy &p = system->physProxy;
219
220 for (; size > 0; size -= pageSize, vaddr += pageSize) {
221 Addr PTE_addr;
222 if (walk(vaddr, false, PTE_addr)) {
223 PageTableEntry PTE = p.read<PageTableEntry>(PTE_addr);
224 Addr paddr = pTableISAOps.getPnum(PTE);
225 if (paddr == 0) {
226 fatal("PageTable::allocate: address 0x%x not mapped", vaddr);
227 } else {
228 pTableISAOps.setPnum(PTE, 0);
229 p.write<PageTableEntry>(PTE_addr, PTE);
230 DPRINTF(MMU, "Unmapping: %#x\n", vaddr);
231 }
232 eraseCacheEntry(vaddr);
233 } else {
234 fatal("Page fault while unmapping");
235 }
236 }
237
238 }
239
240 template <class ISAOps>
241 bool
242 MultiLevelPageTable<ISAOps>::isUnmapped(Addr vaddr, int64_t size)
243 {
244 // starting address must be page aligned
245 assert(pageOffset(vaddr) == 0);
246 PortProxy &p = system->physProxy;
247
248 for (; size > 0; size -= pageSize, vaddr += pageSize) {
249 Addr PTE_addr;
250 if (walk(vaddr, false, PTE_addr)) {
251 PageTableEntry PTE = p.read<PageTableEntry>(PTE_addr);
252 if (pTableISAOps.getPnum(PTE) != 0)
253 return false;
254 }
255 }
256
257 return true;
258 }
259
260 template <class ISAOps>
261 bool
262 MultiLevelPageTable<ISAOps>::lookup(Addr vaddr, TlbEntry &entry)
263 {
264 Addr page_addr = pageAlign(vaddr);
265
266 if (pTableCache[0].valid && pTableCache[0].vaddr == page_addr) {
267 entry = pTableCache[0].entry;
268 return true;
269 }
270 if (pTableCache[1].valid && pTableCache[1].vaddr == page_addr) {
271 entry = pTableCache[1].entry;
272 return true;
273 }
274 if (pTableCache[2].valid && pTableCache[2].vaddr == page_addr) {
275 entry = pTableCache[2].entry;
276 return true;
277 }
278
279 DPRINTF(MMU, "lookup page_addr: %#x\n", page_addr);
280 Addr PTE_addr;
281 if (walk(page_addr, false, PTE_addr)) {
282 PortProxy &p = system->physProxy;
283 PageTableEntry PTE = p.read<PageTableEntry>(PTE_addr);
284 Addr pnum = pTableISAOps.getPnum(PTE);
285 if (pnum == 0)
286 return false;
287
288 entry = TlbEntry(pid, vaddr, pnum << PageShift);
289 updateCache(page_addr, entry);
290 } else {
291 return false;
292 }
293 return true;
294 }
295
296 template <class ISAOps>
297 void
298 MultiLevelPageTable<ISAOps>::serialize(std::ostream &os)
299 {
300 /** Since, the page table is stored in system memory
301 * which is serialized separately, we will serialize
302 * just the base pointer
303 */
304 paramOut(os, "ptable.pointer", basePtr);
305 }
306
307 template <class ISAOps>
308 void
309 MultiLevelPageTable<ISAOps>::unserialize(Checkpoint *cp, const std::string &section)
310 {
311 paramIn(cp, section, "ptable.pointer", basePtr);
312 }