scons: Try to handle problems with gcc, lto and partial linking.
[gem5.git] / src / mem / multi_level_page_table_impl.hh
1 /*
2 * Copyright (c) 2014 Advanced Micro Devices, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Alexandru Dutu
29 */
30
31 /**
32 * @file
33 * Definitions of page table
34 */
35 #include <string>
36
37 #include "arch/isa_traits.hh"
38 #include "arch/tlb.hh"
39 #include "base/trace.hh"
40 #include "config/the_isa.hh"
41 #include "debug/MMU.hh"
42 #include "mem/multi_level_page_table.hh"
43 #include "mem/page_table.hh"
44
45 using namespace std;
46 using namespace TheISA;
47
48 template <class ISAOps>
49 MultiLevelPageTable<ISAOps>::MultiLevelPageTable(const std::string &__name,
50 uint64_t _pid, System *_sys)
51 : PageTableBase(__name, _pid), system(_sys),
52 logLevelSize(PageTableLayout),
53 numLevels(logLevelSize.size())
54 {
55 }
56
57 template <class ISAOps>
58 MultiLevelPageTable<ISAOps>::~MultiLevelPageTable()
59 {
60 }
61
62 template <class ISAOps>
63 void
64 MultiLevelPageTable<ISAOps>::initState(ThreadContext* tc)
65 {
66 basePtr = pTableISAOps.getBasePtr(tc);
67 if (basePtr == 0) basePtr++;
68 DPRINTF(MMU, "basePtr: %d\n", basePtr);
69
70 system->pagePtr = basePtr;
71
72 /* setting first level of the page table */
73 uint64_t log_req_size = floorLog2(sizeof(PageTableEntry)) +
74 logLevelSize[numLevels-1];
75 assert(log_req_size >= PageShift);
76 uint64_t npages = 1 << (log_req_size - PageShift);
77
78 Addr paddr = system->allocPhysPages(npages);
79
80 PortProxy &p = system->physProxy;
81 p.memsetBlob(paddr, 0, npages << PageShift);
82 }
83
84
85 template <class ISAOps>
86 bool
87 MultiLevelPageTable<ISAOps>::walk(Addr vaddr, bool allocate, Addr &PTE_addr)
88 {
89 std::vector<uint64_t> offsets = pTableISAOps.getOffsets(vaddr);
90
91 Addr level_base = basePtr;
92 for (int i = numLevels - 1; i > 0; i--) {
93
94 Addr entry_addr = (level_base<<PageShift) +
95 offsets[i] * sizeof(PageTableEntry);
96
97 PortProxy &p = system->physProxy;
98 PageTableEntry entry = p.read<PageTableEntry>(entry_addr);
99
100 Addr next_entry_pnum = pTableISAOps.getPnum(entry);
101 if (next_entry_pnum == 0) {
102
103 if (!allocate) return false;
104
105 uint64_t log_req_size = floorLog2(sizeof(PageTableEntry)) +
106 logLevelSize[i-1];
107 assert(log_req_size >= PageShift);
108 uint64_t npages = 1 << (log_req_size - PageShift);
109
110 DPRINTF(MMU, "Allocating %d pages needed for entry in level %d\n",
111 npages, i - 1);
112
113 /* allocate new entry */
114 Addr next_entry_paddr = system->allocPhysPages(npages);
115 p.memsetBlob(next_entry_paddr, 0, npages << PageShift);
116
117 next_entry_pnum = next_entry_paddr >> PageShift;
118 pTableISAOps.setPnum(entry, next_entry_pnum);
119 pTableISAOps.setPTEFields(entry);
120 p.write<PageTableEntry>(entry_addr, entry);
121
122 }
123 DPRINTF(MMU, "Level %d base: %d offset: %d entry: %d\n",
124 i, level_base, offsets[i], next_entry_pnum);
125 level_base = next_entry_pnum;
126
127 }
128 PTE_addr = (level_base<<PageShift) +
129 offsets[0] * sizeof(PageTableEntry);
130 DPRINTF(MMU, "Returning PTE_addr: %x\n", PTE_addr);
131 return true;
132 }
133
134 template <class ISAOps>
135 void
136 MultiLevelPageTable<ISAOps>::map(Addr vaddr, Addr paddr,
137 int64_t size, uint64_t flags)
138 {
139 bool clobber = flags & Clobber;
140 // starting address must be page aligned
141 assert(pageOffset(vaddr) == 0);
142
143 DPRINTF(MMU, "Allocating Page: %#x-%#x\n", vaddr, vaddr + size);
144
145 PortProxy &p = system->physProxy;
146
147 for (; size > 0; size -= pageSize, vaddr += pageSize, paddr += pageSize) {
148 Addr PTE_addr;
149 if (walk(vaddr, true, PTE_addr)) {
150 PageTableEntry PTE = p.read<PageTableEntry>(PTE_addr);
151 Addr entry_paddr = pTableISAOps.getPnum(PTE);
152 if (!clobber && entry_paddr != 0) {
153 fatal("addr 0x%x already mapped to %x", vaddr, entry_paddr);
154 }
155 pTableISAOps.setPnum(PTE, paddr >> PageShift);
156 uint64_t PTE_flags = 0;
157 if (flags & NotPresent)
158 PTE_flags |= TheISA::PTE_NotPresent;
159 if (flags & Uncacheable)
160 PTE_flags |= TheISA::PTE_Uncacheable;
161 if (flags & ReadOnly)
162 PTE_flags |= TheISA::PTE_ReadOnly;
163 pTableISAOps.setPTEFields(PTE, PTE_flags);
164 p.write<PageTableEntry>(PTE_addr, PTE);
165 DPRINTF(MMU, "New mapping: %#x-%#x\n", vaddr, paddr);
166
167 eraseCacheEntry(vaddr);
168 updateCache(vaddr, TlbEntry(pid, vaddr, paddr,
169 flags & Uncacheable,
170 flags & ReadOnly));
171 }
172
173 }
174 }
175
176 template <class ISAOps>
177 void
178 MultiLevelPageTable<ISAOps>::remap(Addr vaddr, int64_t size, Addr new_vaddr)
179 {
180 assert(pageOffset(vaddr) == 0);
181 assert(pageOffset(new_vaddr) == 0);
182
183 DPRINTF(MMU, "moving pages from vaddr %08p to %08p, size = %d\n", vaddr,
184 new_vaddr, size);
185
186 PortProxy &p = system->physProxy;
187
188 for (; size > 0;
189 size -= pageSize, vaddr += pageSize, new_vaddr += pageSize)
190 {
191 Addr PTE_addr;
192 if (walk(vaddr, false, PTE_addr)) {
193 PageTableEntry PTE = p.read<PageTableEntry>(PTE_addr);
194 Addr paddr = pTableISAOps.getPnum(PTE);
195
196 if (paddr == 0) {
197 fatal("Page fault while remapping");
198 } else {
199 /* unmapping vaddr */
200 pTableISAOps.setPnum(PTE, 0);
201 p.write<PageTableEntry>(PTE_addr, PTE);
202
203 /* maping new_vaddr */
204 Addr new_PTE_addr;
205 walk(new_vaddr, true, new_PTE_addr);
206 PageTableEntry new_PTE = p.read<PageTableEntry>(new_PTE_addr);
207
208 pTableISAOps.setPnum(new_PTE, paddr>>PageShift);
209 pTableISAOps.setPTEFields(new_PTE);
210 p.write<PageTableEntry>(new_PTE_addr, new_PTE);
211 DPRINTF(MMU, "Remapping: %#x-%#x\n", vaddr, new_PTE_addr);
212 }
213
214 eraseCacheEntry(vaddr);
215 updateCache(new_vaddr, TlbEntry(pid, new_vaddr, paddr,
216 pTableISAOps.isUncacheable(PTE),
217 pTableISAOps.isReadOnly(PTE)));
218 } else {
219 fatal("Page fault while remapping");
220 }
221 }
222 }
223
224 template <class ISAOps>
225 void
226 MultiLevelPageTable<ISAOps>::unmap(Addr vaddr, int64_t size)
227 {
228 assert(pageOffset(vaddr) == 0);
229
230 DPRINTF(MMU, "Unmapping page: %#x-%#x\n", vaddr, vaddr+ size);
231
232 PortProxy &p = system->physProxy;
233
234 for (; size > 0; size -= pageSize, vaddr += pageSize) {
235 Addr PTE_addr;
236 if (walk(vaddr, false, PTE_addr)) {
237 PageTableEntry PTE = p.read<PageTableEntry>(PTE_addr);
238 Addr paddr = pTableISAOps.getPnum(PTE);
239 if (paddr == 0) {
240 fatal("PageTable::allocate: address 0x%x not mapped", vaddr);
241 } else {
242 pTableISAOps.setPnum(PTE, 0);
243 p.write<PageTableEntry>(PTE_addr, PTE);
244 DPRINTF(MMU, "Unmapping: %#x\n", vaddr);
245 }
246 eraseCacheEntry(vaddr);
247 } else {
248 fatal("Page fault while unmapping");
249 }
250 }
251
252 }
253
254 template <class ISAOps>
255 bool
256 MultiLevelPageTable<ISAOps>::isUnmapped(Addr vaddr, int64_t size)
257 {
258 // starting address must be page aligned
259 assert(pageOffset(vaddr) == 0);
260 PortProxy &p = system->physProxy;
261
262 for (; size > 0; size -= pageSize, vaddr += pageSize) {
263 Addr PTE_addr;
264 if (walk(vaddr, false, PTE_addr)) {
265 PageTableEntry PTE = p.read<PageTableEntry>(PTE_addr);
266 if (pTableISAOps.getPnum(PTE) != 0)
267 return false;
268 }
269 }
270
271 return true;
272 }
273
274 template <class ISAOps>
275 bool
276 MultiLevelPageTable<ISAOps>::lookup(Addr vaddr, TlbEntry &entry)
277 {
278 Addr page_addr = pageAlign(vaddr);
279
280 if (pTableCache[0].valid && pTableCache[0].vaddr == page_addr) {
281 entry = pTableCache[0].entry;
282 return true;
283 }
284 if (pTableCache[1].valid && pTableCache[1].vaddr == page_addr) {
285 entry = pTableCache[1].entry;
286 return true;
287 }
288 if (pTableCache[2].valid && pTableCache[2].vaddr == page_addr) {
289 entry = pTableCache[2].entry;
290 return true;
291 }
292
293 DPRINTF(MMU, "lookup page_addr: %#x\n", page_addr);
294 Addr PTE_addr;
295 if (walk(page_addr, false, PTE_addr)) {
296 PortProxy &p = system->physProxy;
297 PageTableEntry PTE = p.read<PageTableEntry>(PTE_addr);
298 Addr pnum = pTableISAOps.getPnum(PTE);
299 if (pnum == 0)
300 return false;
301
302 entry = TlbEntry(pid, vaddr, pnum << PageShift,
303 pTableISAOps.isUncacheable(PTE),
304 pTableISAOps.isReadOnly(PTE));
305 updateCache(page_addr, entry);
306 } else {
307 return false;
308 }
309 return true;
310 }
311
312 template <class ISAOps>
313 void
314 MultiLevelPageTable<ISAOps>::serialize(CheckpointOut &cp) const
315 {
316 /** Since, the page table is stored in system memory
317 * which is serialized separately, we will serialize
318 * just the base pointer
319 */
320 paramOut(cp, "ptable.pointer", basePtr);
321 }
322
323 template <class ISAOps>
324 void
325 MultiLevelPageTable<ISAOps>::unserialize(CheckpointIn &cp)
326 {
327 paramIn(cp, "ptable.pointer", basePtr);
328 }