mem-cache: Fix setting prefetch bit
[gem5.git] / src / mem / multi_level_page_table.hh
1 /*
2 * Copyright (c) 2014 Advanced Micro Devices, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /**
30 * @file
31 * Declaration of a multi-level page table.
32 */
33
34 #ifndef __MEM_MULTI_LEVEL_PAGE_TABLE_HH__
35 #define __MEM_MULTI_LEVEL_PAGE_TABLE_HH__
36
37 #include <string>
38
39 #include "base/types.hh"
40 #include "mem/page_table.hh"
41 #include "sim/system.hh"
42
43 /**
44 * This class implements an in-memory multi-level page table that can be
45 * configured to follow ISA specifications. It can be used instead of the
46 * PageTable class in SE mode to allow CPU models (e.g. X86KvmCPU)
47 * to do a normal page table walk.
48 *
49 * To reduce memory required to store the page table, a multi-level page
50 * table stores its translations similarly with a radix tree. Let n be
51 * the number of levels and {Ln, Ln-1, ..., L1, L0} a set that specifies
52 * the number of entries for each level as base 2 logarithm values. A
53 * multi-level page table will store its translations at level 0 (the
54 * leaves of the tree) and it will be layed out in memory in the
55 * following way:
56 *
57 * +------------------------------+
58 * level n |Ln-1_E0|Ln-1_E1|...|Ln-1_E2^Ln|
59 * +------------------------------+
60 * / \
61 * +------------------------+ +------------------------+
62 * level n-1 |Ln-2_E0|...|Ln-2_E2^Ln-1| |Ln-2_E0|...|Ln-2_E2^Ln-1|
63 * +------------------------+ +------------------------+
64 * / \ / \
65 * .
66 * .
67 * .
68 * / / \
69 * +------------------+ +------------+ +------------+
70 * level 1 |L0_E1|...|L0_E2^L1| |...|L0_E2^L1| ... |...|L0_E2^L1|
71 * +------------------+ +------------+ +------------+
72 * , where
73 * +------------------------------+
74 * |Lk-1_E0|Lk-1_E1|...|Lk-1_E2^Lk|
75 * +------------------------------+
76 * is a level k entry that holds 2^Lk entries in Lk-1 level.
77 *
78 * Essentially, a level n entry will contain 2^Ln level n-1 entries,
79 * a level n-1 entry will hold 2^Ln-1 level n-2 entries etc.
80 *
81 * The virtual address is split into offsets that index into the
82 * different levels of the page table.
83 *
84 * +--------------------------------+
85 * |LnOffset|...|L1Offset|PageOffset|
86 * +--------------------------------+
87 *
88 * For example L0Offset will be formed by the bits in range
89 * [log2(PageOffset), log2(PageOffset)+L0].
90 *
91 * For every level of the page table, from n to 1, the base address
92 * of the entry is loaded, the offset in the virtual address for
93 * that particular level is used to index into the entry which
94 * will reveal the memory address of the entry in the next level.
95 *
96 * @see MultiLevelPageTable
97 */
98
99 namespace {
100
101 template <class First, class ...Rest>
102 Addr
103 prepTopTable(System *system, Addr pageSize)
104 {
105 Addr addr = system->allocPhysPages(First::tableSize());
106 PortProxy &p = system->physProxy;
107 p.memsetBlob(addr, 0, First::tableSize() * pageSize);
108 return addr;
109 }
110
111 template <class ...Types>
112 struct LastType;
113
114 template <class First, class Second, class ...Rest>
115 struct LastType<First, Second, Rest...>
116 {
117 typedef typename LastType<Second, Rest...>::type type;
118 };
119
120 template <class Only>
121 struct LastType<Only>
122 {
123 typedef Only type;
124 };
125
126
127 template <class ...Types>
128 struct WalkWrapper;
129
130 template <class Final, class Only>
131 struct WalkWrapper<Final, Only>
132 {
133 static void
134 walk(System *system, Addr pageSize, Addr table, Addr vaddr,
135 bool allocate, Final *entry)
136 {
137 entry->read(system->physProxy, table, vaddr);
138 }
139 };
140
141 template <class Final, class First, class Second, class ...Rest>
142 struct WalkWrapper<Final, First, Second, Rest...>
143 {
144 static void
145 walk(System *system, Addr pageSize, Addr table, Addr vaddr,
146 bool allocate, Final *entry)
147 {
148 First first;
149 first.read(system->physProxy, table, vaddr);
150
151 Addr next;
152 if (!first.present()) {
153 fatal_if(!allocate,
154 "Page fault while walking the page table.");
155 next = prepTopTable<Second>(system, pageSize);
156 first.reset(next);
157 first.write(system->physProxy);
158 } else {
159 next = first.paddr();
160 }
161 WalkWrapper<Final, Second, Rest...>::walk(
162 system, pageSize, next, vaddr, allocate, entry);
163 }
164 };
165
166 template <class ...EntryTypes>
167 void
168 walk(System *system, Addr pageSize, Addr table, Addr vaddr,
169 bool allocate, typename LastType<EntryTypes...>::type *entry)
170 {
171 WalkWrapper<typename LastType<EntryTypes...>::type, EntryTypes...>::walk(
172 system, pageSize, table, vaddr, allocate, entry);
173 }
174
175 }
176
177
178 template <class ...EntryTypes>
179 class MultiLevelPageTable : public EmulationPageTable
180 {
181 typedef typename LastType<EntryTypes...>::type Final;
182
183 /**
184 * Pointer to System object
185 */
186 System *system;
187
188 /**
189 * Physical address to the last level of the page table
190 */
191 Addr _basePtr;
192
193 public:
194 MultiLevelPageTable(const std::string &__name, uint64_t _pid,
195 System *_sys, Addr _pageSize) :
196 EmulationPageTable(__name, _pid, _pageSize), system(_sys)
197 {}
198
199 ~MultiLevelPageTable() {}
200
201 void
202 initState() override
203 {
204 if (shared)
205 return;
206
207 _basePtr = prepTopTable<EntryTypes...>(system, _pageSize);
208 }
209
210 Addr basePtr() { return _basePtr; }
211
212 void
213 map(Addr vaddr, Addr paddr, int64_t size, uint64_t flags = 0) override
214 {
215 EmulationPageTable::map(vaddr, paddr, size, flags);
216
217 Final entry;
218
219 for (int64_t offset = 0; offset < size; offset += _pageSize) {
220 walk<EntryTypes...>(system, _pageSize, _basePtr,
221 vaddr + offset, true, &entry);
222
223 entry.reset(paddr + offset, true, flags & Uncacheable,
224 flags & ReadOnly);
225 entry.write(system->physProxy);
226
227 DPRINTF(MMU, "New mapping: %#x-%#x\n",
228 vaddr + offset, paddr + offset);
229 }
230 }
231
232 void
233 remap(Addr vaddr, int64_t size, Addr new_vaddr) override
234 {
235 EmulationPageTable::remap(vaddr, size, new_vaddr);
236
237 Final old_entry, new_entry;
238
239 for (int64_t offset = 0; offset < size; offset += _pageSize) {
240 // Unmap the original mapping.
241 walk<EntryTypes...>(system, _pageSize, _basePtr, vaddr + offset,
242 false, &old_entry);
243 old_entry.present(false);
244 old_entry.write(system->physProxy);
245
246 // Map the new one.
247 walk<EntryTypes...>(system, _pageSize, _basePtr,
248 new_vaddr + offset, true, &new_entry);
249 new_entry.reset(old_entry.paddr(), true, old_entry.uncacheable(),
250 old_entry.readonly());
251 new_entry.write(system->physProxy);
252 }
253 }
254
255 void
256 unmap(Addr vaddr, int64_t size) override
257 {
258 EmulationPageTable::unmap(vaddr, size);
259
260 Final entry;
261
262 for (int64_t offset = 0; offset < size; offset += _pageSize) {
263 walk<EntryTypes...>(system, _pageSize, _basePtr,
264 vaddr + offset, false, &entry);
265 fatal_if(!entry.present(),
266 "PageTable::unmap: Address %#x not mapped.", vaddr);
267 entry.present(false);
268 entry.write(system->physProxy);
269 DPRINTF(MMU, "Unmapping: %#x\n", vaddr);
270 }
271 }
272
273 void
274 serialize(CheckpointOut &cp) const override
275 {
276 EmulationPageTable::serialize(cp);
277 /** Since, the page table is stored in system memory
278 * which is serialized separately, we will serialize
279 * just the base pointer
280 */
281 paramOut(cp, "ptable.pointer", _basePtr);
282 }
283
284 void
285 unserialize(CheckpointIn &cp) override
286 {
287 EmulationPageTable::unserialize(cp);
288 paramIn(cp, "ptable.pointer", _basePtr);
289 }
290 };
291 #endif // __MEM_MULTI_LEVEL_PAGE_TABLE_HH__