mem: Consistently use ISO prefixes
[gem5.git] / src / mem / multi_level_page_table.hh
1 /*
2 * Copyright (c) 2014 Advanced Micro Devices, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /**
30 * @file
31 * Declaration of a multi-level page table.
32 */
33
34 #ifndef __MEM_MULTI_LEVEL_PAGE_TABLE_HH__
35 #define __MEM_MULTI_LEVEL_PAGE_TABLE_HH__
36
37 #include <string>
38
39 #include "base/types.hh"
40 #include "debug/MMU.hh"
41 #include "mem/page_table.hh"
42 #include "sim/system.hh"
43
44 /**
45 * This class implements an in-memory multi-level page table that can be
46 * configured to follow ISA specifications. It can be used instead of the
47 * PageTable class in SE mode to allow CPU models (e.g. X86KvmCPU)
48 * to do a normal page table walk.
49 *
50 * To reduce memory required to store the page table, a multi-level page
51 * table stores its translations similarly with a radix tree. Let n be
52 * the number of levels and {Ln, Ln-1, ..., L1, L0} a set that specifies
53 * the number of entries for each level as base 2 logarithm values. A
54 * multi-level page table will store its translations at level 0 (the
55 * leaves of the tree) and it will be layed out in memory in the
56 * following way:
57 *
58 * +------------------------------+
59 * level n |Ln-1_E0|Ln-1_E1|...|Ln-1_E2^Ln|
60 * +------------------------------+
61 * / \
62 * +------------------------+ +------------------------+
63 * level n-1 |Ln-2_E0|...|Ln-2_E2^Ln-1| |Ln-2_E0|...|Ln-2_E2^Ln-1|
64 * +------------------------+ +------------------------+
65 * / \ / \
66 * .
67 * .
68 * .
69 * / / \
70 * +------------------+ +------------+ +------------+
71 * level 1 |L0_E1|...|L0_E2^L1| |...|L0_E2^L1| ... |...|L0_E2^L1|
72 * +------------------+ +------------+ +------------+
73 * , where
74 * +------------------------------+
75 * |Lk-1_E0|Lk-1_E1|...|Lk-1_E2^Lk|
76 * +------------------------------+
77 * is a level k entry that holds 2^Lk entries in Lk-1 level.
78 *
79 * Essentially, a level n entry will contain 2^Ln level n-1 entries,
80 * a level n-1 entry will hold 2^Ln-1 level n-2 entries etc.
81 *
82 * The virtual address is split into offsets that index into the
83 * different levels of the page table.
84 *
85 * +--------------------------------+
86 * |LnOffset|...|L1Offset|PageOffset|
87 * +--------------------------------+
88 *
89 * For example L0Offset will be formed by the bits in range
90 * [log2(PageOffset), log2(PageOffset)+L0].
91 *
92 * For every level of the page table, from n to 1, the base address
93 * of the entry is loaded, the offset in the virtual address for
94 * that particular level is used to index into the entry which
95 * will reveal the memory address of the entry in the next level.
96 *
97 * @see MultiLevelPageTable
98 */
99
100 namespace {
101
102 template <class First, class ...Rest>
103 Addr
104 prepTopTable(System *system, Addr pageSize)
105 {
106 Addr addr = system->allocPhysPages(First::tableSize());
107 PortProxy &p = system->physProxy;
108 p.memsetBlob(addr, 0, First::tableSize() * pageSize);
109 return addr;
110 }
111
112 template <class ...Types>
113 struct LastType;
114
115 template <class First, class Second, class ...Rest>
116 struct LastType<First, Second, Rest...>
117 {
118 typedef typename LastType<Second, Rest...>::type type;
119 };
120
121 template <class Only>
122 struct LastType<Only>
123 {
124 typedef Only type;
125 };
126
127
128 template <class ...Types>
129 struct WalkWrapper;
130
131 template <class Final, class Only>
132 struct WalkWrapper<Final, Only>
133 {
134 static void
135 walk(System *system, Addr pageSize, Addr table, Addr vaddr,
136 bool allocate, Final *entry)
137 {
138 entry->read(system->physProxy, table, vaddr);
139 }
140 };
141
142 template <class Final, class First, class Second, class ...Rest>
143 struct WalkWrapper<Final, First, Second, Rest...>
144 {
145 static void
146 walk(System *system, Addr pageSize, Addr table, Addr vaddr,
147 bool allocate, Final *entry)
148 {
149 First first;
150 first.read(system->physProxy, table, vaddr);
151
152 Addr next;
153 if (!first.present()) {
154 fatal_if(!allocate,
155 "Page fault while walking the page table.");
156 next = prepTopTable<Second>(system, pageSize);
157 first.reset(next);
158 first.write(system->physProxy);
159 } else {
160 next = first.paddr();
161 }
162 WalkWrapper<Final, Second, Rest...>::walk(
163 system, pageSize, next, vaddr, allocate, entry);
164 }
165 };
166
167 template <class ...EntryTypes>
168 void
169 walk(System *system, Addr pageSize, Addr table, Addr vaddr,
170 bool allocate, typename LastType<EntryTypes...>::type *entry)
171 {
172 WalkWrapper<typename LastType<EntryTypes...>::type, EntryTypes...>::walk(
173 system, pageSize, table, vaddr, allocate, entry);
174 }
175
176 }
177
178
179 template <class ...EntryTypes>
180 class MultiLevelPageTable : public EmulationPageTable
181 {
182 typedef typename LastType<EntryTypes...>::type Final;
183
184 /**
185 * Pointer to System object
186 */
187 System *system;
188
189 /**
190 * Physical address to the last level of the page table
191 */
192 Addr _basePtr;
193
194 public:
195 MultiLevelPageTable(const std::string &__name, uint64_t _pid,
196 System *_sys, Addr _pageSize) :
197 EmulationPageTable(__name, _pid, _pageSize), system(_sys)
198 {}
199
200 ~MultiLevelPageTable() {}
201
202 void
203 initState() override
204 {
205 if (shared)
206 return;
207
208 _basePtr = prepTopTable<EntryTypes...>(system, _pageSize);
209 }
210
211 Addr basePtr() { return _basePtr; }
212
213 void
214 map(Addr vaddr, Addr paddr, int64_t size, uint64_t flags = 0) override
215 {
216 EmulationPageTable::map(vaddr, paddr, size, flags);
217
218 Final entry;
219
220 for (int64_t offset = 0; offset < size; offset += _pageSize) {
221 walk<EntryTypes...>(system, _pageSize, _basePtr,
222 vaddr + offset, true, &entry);
223
224 entry.reset(paddr + offset, true, flags & Uncacheable,
225 flags & ReadOnly);
226 entry.write(system->physProxy);
227
228 DPRINTF(MMU, "New mapping: %#x-%#x\n",
229 vaddr + offset, paddr + offset);
230 }
231 }
232
233 void
234 remap(Addr vaddr, int64_t size, Addr new_vaddr) override
235 {
236 EmulationPageTable::remap(vaddr, size, new_vaddr);
237
238 Final old_entry, new_entry;
239
240 for (int64_t offset = 0; offset < size; offset += _pageSize) {
241 // Unmap the original mapping.
242 walk<EntryTypes...>(system, _pageSize, _basePtr, vaddr + offset,
243 false, &old_entry);
244 old_entry.present(false);
245 old_entry.write(system->physProxy);
246
247 // Map the new one.
248 walk<EntryTypes...>(system, _pageSize, _basePtr,
249 new_vaddr + offset, true, &new_entry);
250 new_entry.reset(old_entry.paddr(), true, old_entry.uncacheable(),
251 old_entry.readonly());
252 new_entry.write(system->physProxy);
253 }
254 }
255
256 void
257 unmap(Addr vaddr, int64_t size) override
258 {
259 EmulationPageTable::unmap(vaddr, size);
260
261 Final entry;
262
263 for (int64_t offset = 0; offset < size; offset += _pageSize) {
264 walk<EntryTypes...>(system, _pageSize, _basePtr,
265 vaddr + offset, false, &entry);
266 fatal_if(!entry.present(),
267 "PageTable::unmap: Address %#x not mapped.", vaddr);
268 entry.present(false);
269 entry.write(system->physProxy);
270 DPRINTF(MMU, "Unmapping: %#x\n", vaddr);
271 }
272 }
273
274 void
275 serialize(CheckpointOut &cp) const override
276 {
277 EmulationPageTable::serialize(cp);
278 /** Since, the page table is stored in system memory
279 * which is serialized separately, we will serialize
280 * just the base pointer
281 */
282 paramOut(cp, "ptable.pointer", _basePtr);
283 }
284
285 void
286 unserialize(CheckpointIn &cp) override
287 {
288 EmulationPageTable::unserialize(cp);
289 paramIn(cp, "ptable.pointer", _basePtr);
290 }
291 };
292 #endif // __MEM_MULTI_LEVEL_PAGE_TABLE_HH__