ruby: guard usage of GPUCoalescer code in Profiler
[gem5.git] / src / mem / multi_level_page_table_impl.hh
1 /*
2 * Copyright (c) 2014 Advanced Micro Devices, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Alexandru Dutu
29 */
30
31 /**
32 * @file
33 * Definitions of page table
34 */
35 #include <fstream>
36 #include <map>
37 #include <string>
38
39 #include "base/bitfield.hh"
40 #include "base/intmath.hh"
41 #include "base/trace.hh"
42 #include "config/the_isa.hh"
43 #include "debug/MMU.hh"
44 #include "mem/multi_level_page_table.hh"
45 #include "sim/faults.hh"
46 #include "sim/sim_object.hh"
47
48 using namespace std;
49 using namespace TheISA;
50
51 template <class ISAOps>
52 MultiLevelPageTable<ISAOps>::MultiLevelPageTable(const std::string &__name,
53 uint64_t _pid, System *_sys)
54 : PageTableBase(__name, _pid), system(_sys),
55 logLevelSize(PageTableLayout),
56 numLevels(logLevelSize.size())
57 {
58 }
59
60 template <class ISAOps>
61 MultiLevelPageTable<ISAOps>::~MultiLevelPageTable()
62 {
63 }
64
65 template <class ISAOps>
66 void
67 MultiLevelPageTable<ISAOps>::initState(ThreadContext* tc)
68 {
69 basePtr = pTableISAOps.getBasePtr(tc);
70 if (basePtr == 0) basePtr++;
71 DPRINTF(MMU, "basePtr: %d\n", basePtr);
72
73 system->pagePtr = basePtr;
74
75 /* setting first level of the page table */
76 uint64_t log_req_size = floorLog2(sizeof(PageTableEntry)) +
77 logLevelSize[numLevels-1];
78 assert(log_req_size >= PageShift);
79 uint64_t npages = 1 << (log_req_size - PageShift);
80
81 Addr paddr = system->allocPhysPages(npages);
82
83 PortProxy &p = system->physProxy;
84 p.memsetBlob(paddr, 0, npages << PageShift);
85 }
86
87
88 template <class ISAOps>
89 bool
90 MultiLevelPageTable<ISAOps>::walk(Addr vaddr, bool allocate, Addr &PTE_addr)
91 {
92 std::vector<uint64_t> offsets = pTableISAOps.getOffsets(vaddr);
93
94 Addr level_base = basePtr;
95 for (int i = numLevels - 1; i > 0; i--) {
96
97 Addr entry_addr = (level_base<<PageShift) +
98 offsets[i] * sizeof(PageTableEntry);
99
100 PortProxy &p = system->physProxy;
101 PageTableEntry entry = p.read<PageTableEntry>(entry_addr);
102
103 Addr next_entry_pnum = pTableISAOps.getPnum(entry);
104 if (next_entry_pnum == 0) {
105
106 if (!allocate) return false;
107
108 uint64_t log_req_size = floorLog2(sizeof(PageTableEntry)) +
109 logLevelSize[i-1];
110 assert(log_req_size >= PageShift);
111 uint64_t npages = 1 << (log_req_size - PageShift);
112
113 DPRINTF(MMU, "Allocating %d pages needed for entry in level %d\n",
114 npages, i - 1);
115
116 /* allocate new entry */
117 Addr next_entry_paddr = system->allocPhysPages(npages);
118 p.memsetBlob(next_entry_paddr, 0, npages << PageShift);
119
120 next_entry_pnum = next_entry_paddr >> PageShift;
121 pTableISAOps.setPnum(entry, next_entry_pnum);
122 pTableISAOps.setPTEFields(entry);
123 p.write<PageTableEntry>(entry_addr, entry);
124
125 }
126 DPRINTF(MMU, "Level %d base: %d offset: %d entry: %d\n",
127 i, level_base, offsets[i], next_entry_pnum);
128 level_base = next_entry_pnum;
129
130 }
131 PTE_addr = (level_base<<PageShift) +
132 offsets[0] * sizeof(PageTableEntry);
133 DPRINTF(MMU, "Returning PTE_addr: %x\n", PTE_addr);
134 return true;
135 }
136
137 template <class ISAOps>
138 void
139 MultiLevelPageTable<ISAOps>::map(Addr vaddr, Addr paddr,
140 int64_t size, uint64_t flags)
141 {
142 bool clobber = flags & Clobber;
143 // starting address must be page aligned
144 assert(pageOffset(vaddr) == 0);
145
146 DPRINTF(MMU, "Allocating Page: %#x-%#x\n", vaddr, vaddr + size);
147
148 PortProxy &p = system->physProxy;
149
150 for (; size > 0; size -= pageSize, vaddr += pageSize, paddr += pageSize) {
151 Addr PTE_addr;
152 if (walk(vaddr, true, PTE_addr)) {
153 PageTableEntry PTE = p.read<PageTableEntry>(PTE_addr);
154 Addr entry_paddr = pTableISAOps.getPnum(PTE);
155 if (!clobber && entry_paddr != 0) {
156 fatal("addr 0x%x already mapped to %x", vaddr, entry_paddr);
157 }
158 pTableISAOps.setPnum(PTE, paddr >> PageShift);
159 uint64_t PTE_flags = 0;
160 if (flags & NotPresent)
161 PTE_flags |= TheISA::PTE_NotPresent;
162 if (flags & Uncacheable)
163 PTE_flags |= TheISA::PTE_Uncacheable;
164 if (flags & ReadOnly)
165 PTE_flags |= TheISA::PTE_ReadOnly;
166 pTableISAOps.setPTEFields(PTE, PTE_flags);
167 p.write<PageTableEntry>(PTE_addr, PTE);
168 DPRINTF(MMU, "New mapping: %#x-%#x\n", vaddr, paddr);
169
170 eraseCacheEntry(vaddr);
171 updateCache(vaddr, TlbEntry(pid, vaddr, paddr,
172 flags & Uncacheable,
173 flags & ReadOnly));
174 }
175
176 }
177 }
178
179 template <class ISAOps>
180 void
181 MultiLevelPageTable<ISAOps>::remap(Addr vaddr, int64_t size, Addr new_vaddr)
182 {
183 assert(pageOffset(vaddr) == 0);
184 assert(pageOffset(new_vaddr) == 0);
185
186 DPRINTF(MMU, "moving pages from vaddr %08p to %08p, size = %d\n", vaddr,
187 new_vaddr, size);
188
189 PortProxy &p = system->physProxy;
190
191 for (; size > 0;
192 size -= pageSize, vaddr += pageSize, new_vaddr += pageSize)
193 {
194 Addr PTE_addr;
195 if (walk(vaddr, false, PTE_addr)) {
196 PageTableEntry PTE = p.read<PageTableEntry>(PTE_addr);
197 Addr paddr = pTableISAOps.getPnum(PTE);
198
199 if (paddr == 0) {
200 fatal("Page fault while remapping");
201 } else {
202 /* unmapping vaddr */
203 pTableISAOps.setPnum(PTE, 0);
204 p.write<PageTableEntry>(PTE_addr, PTE);
205
206 /* maping new_vaddr */
207 Addr new_PTE_addr;
208 walk(new_vaddr, true, new_PTE_addr);
209 PageTableEntry new_PTE = p.read<PageTableEntry>(new_PTE_addr);
210
211 pTableISAOps.setPnum(new_PTE, paddr>>PageShift);
212 pTableISAOps.setPTEFields(new_PTE);
213 p.write<PageTableEntry>(new_PTE_addr, new_PTE);
214 DPRINTF(MMU, "Remapping: %#x-%#x\n", vaddr, new_PTE_addr);
215 }
216
217 eraseCacheEntry(vaddr);
218 updateCache(new_vaddr, TlbEntry(pid, new_vaddr, paddr,
219 pTableISAOps.isUncacheable(PTE),
220 pTableISAOps.isReadOnly(PTE)));
221 } else {
222 fatal("Page fault while remapping");
223 }
224 }
225 }
226
227 template <class ISAOps>
228 void
229 MultiLevelPageTable<ISAOps>::unmap(Addr vaddr, int64_t size)
230 {
231 assert(pageOffset(vaddr) == 0);
232
233 DPRINTF(MMU, "Unmapping page: %#x-%#x\n", vaddr, vaddr+ size);
234
235 PortProxy &p = system->physProxy;
236
237 for (; size > 0; size -= pageSize, vaddr += pageSize) {
238 Addr PTE_addr;
239 if (walk(vaddr, false, PTE_addr)) {
240 PageTableEntry PTE = p.read<PageTableEntry>(PTE_addr);
241 Addr paddr = pTableISAOps.getPnum(PTE);
242 if (paddr == 0) {
243 fatal("PageTable::allocate: address 0x%x not mapped", vaddr);
244 } else {
245 pTableISAOps.setPnum(PTE, 0);
246 p.write<PageTableEntry>(PTE_addr, PTE);
247 DPRINTF(MMU, "Unmapping: %#x\n", vaddr);
248 }
249 eraseCacheEntry(vaddr);
250 } else {
251 fatal("Page fault while unmapping");
252 }
253 }
254
255 }
256
257 template <class ISAOps>
258 bool
259 MultiLevelPageTable<ISAOps>::isUnmapped(Addr vaddr, int64_t size)
260 {
261 // starting address must be page aligned
262 assert(pageOffset(vaddr) == 0);
263 PortProxy &p = system->physProxy;
264
265 for (; size > 0; size -= pageSize, vaddr += pageSize) {
266 Addr PTE_addr;
267 if (walk(vaddr, false, PTE_addr)) {
268 PageTableEntry PTE = p.read<PageTableEntry>(PTE_addr);
269 if (pTableISAOps.getPnum(PTE) != 0)
270 return false;
271 }
272 }
273
274 return true;
275 }
276
277 template <class ISAOps>
278 bool
279 MultiLevelPageTable<ISAOps>::lookup(Addr vaddr, TlbEntry &entry)
280 {
281 Addr page_addr = pageAlign(vaddr);
282
283 if (pTableCache[0].valid && pTableCache[0].vaddr == page_addr) {
284 entry = pTableCache[0].entry;
285 return true;
286 }
287 if (pTableCache[1].valid && pTableCache[1].vaddr == page_addr) {
288 entry = pTableCache[1].entry;
289 return true;
290 }
291 if (pTableCache[2].valid && pTableCache[2].vaddr == page_addr) {
292 entry = pTableCache[2].entry;
293 return true;
294 }
295
296 DPRINTF(MMU, "lookup page_addr: %#x\n", page_addr);
297 Addr PTE_addr;
298 if (walk(page_addr, false, PTE_addr)) {
299 PortProxy &p = system->physProxy;
300 PageTableEntry PTE = p.read<PageTableEntry>(PTE_addr);
301 Addr pnum = pTableISAOps.getPnum(PTE);
302 if (pnum == 0)
303 return false;
304
305 entry = TlbEntry(pid, vaddr, pnum << PageShift,
306 pTableISAOps.isUncacheable(PTE),
307 pTableISAOps.isReadOnly(PTE));
308 updateCache(page_addr, entry);
309 } else {
310 return false;
311 }
312 return true;
313 }
314
315 template <class ISAOps>
316 void
317 MultiLevelPageTable<ISAOps>::serialize(CheckpointOut &cp) const
318 {
319 /** Since, the page table is stored in system memory
320 * which is serialized separately, we will serialize
321 * just the base pointer
322 */
323 paramOut(cp, "ptable.pointer", basePtr);
324 }
325
326 template <class ISAOps>
327 void
328 MultiLevelPageTable<ISAOps>::unserialize(CheckpointIn &cp)
329 {
330 paramIn(cp, "ptable.pointer", basePtr);
331 }