mem-cache: Create an address aware TempCacheBlk
[gem5.git] / src / mem / multi_level_page_table.hh
1 /*
2 * Copyright (c) 2014 Advanced Micro Devices, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Alexandru Dutu
29 */
30
31 /**
32 * @file
33 * Declaration of a multi-level page table.
34 */
35
36 #ifndef __MEM_MULTI_LEVEL_PAGE_TABLE_HH__
37 #define __MEM_MULTI_LEVEL_PAGE_TABLE_HH__
38
39 #include <string>
40
41 #include "base/types.hh"
42 #include "mem/page_table.hh"
43
44 class System;
45
46 /**
47 * This class implements an in-memory multi-level page table that can be
48 * configured to follow ISA specifications. It can be used instead of the
49 * PageTable class in SE mode to allow CPU models (e.g. X86KvmCPU)
50 * to do a normal page table walk.
51 *
52 * To reduce memory required to store the page table, a multi-level page
53 * table stores its translations similarly with a radix tree. Let n be
54 * the number of levels and {Ln, Ln-1, ..., L1, L0} a set that specifies
55 * the number of entries for each level as base 2 logarithm values. A
56 * multi-level page table will store its translations at level 0 (the
57 * leaves of the tree) and it will be layed out in memory in the
58 * following way:
59 *
60 * +------------------------------+
61 * level n |Ln-1_E0|Ln-1_E1|...|Ln-1_E2^Ln|
62 * +------------------------------+
63 * / \
64 * +------------------------+ +------------------------+
65 * level n-1 |Ln-2_E0|...|Ln-2_E2^Ln-1| |Ln-2_E0|...|Ln-2_E2^Ln-1|
66 * +------------------------+ +------------------------+
67 * / \ / \
68 * .
69 * .
70 * .
71 * / / \
72 * +------------------+ +------------+ +------------+
73 * level 1 |L0_E1|...|L0_E2^L1| |...|L0_E2^L1| ... |...|L0_E2^L1|
74 * +------------------+ +------------+ +------------+
75 * , where
76 * +------------------------------+
77 * |Lk-1_E0|Lk-1_E1|...|Lk-1_E2^Lk|
78 * +------------------------------+
79 * is a level k entry that holds 2^Lk entries in Lk-1 level.
80 *
81 * Essentially, a level n entry will contain 2^Ln level n-1 entries,
82 * a level n-1 entry will hold 2^Ln-1 level n-2 entries etc.
83 *
84 * The virtual address is split into offsets that index into the
85 * different levels of the page table.
86 *
87 * +--------------------------------+
88 * |LnOffset|...|L1Offset|PageOffset|
89 * +--------------------------------+
90 *
91 * For example L0Offset will be formed by the bits in range
92 * [log2(PageOffset), log2(PageOffset)+L0].
93 *
94 * For every level of the page table, from n to 1, the base address
95 * of the entry is loaded, the offset in the virtual address for
96 * that particular level is used to index into the entry which
97 * will reveal the memory address of the entry in the next level.
98 *
99 * @see MultiLevelPageTable
100 */
101
102 namespace {
103
104 template <class First, class ...Rest>
105 Addr
106 prepTopTable(System *system, Addr pageSize)
107 {
108 Addr addr = system->allocPhysPages(First::tableSize());
109 PortProxy &p = system->physProxy;
110 p.memsetBlob(addr, 0, First::tableSize() * pageSize);
111 return addr;
112 }
113
114 template <class ...Types>
115 struct LastType;
116
117 template <class First, class Second, class ...Rest>
118 struct LastType<First, Second, Rest...>
119 {
120 typedef typename LastType<Second, Rest...>::type type;
121 };
122
123 template <class Only>
124 struct LastType<Only>
125 {
126 typedef Only type;
127 };
128
129
130 template <class ...Types>
131 struct WalkWrapper;
132
133 template <class Final, class Only>
134 struct WalkWrapper<Final, Only>
135 {
136 static void
137 walk(System *system, Addr pageSize, Addr table, Addr vaddr,
138 bool allocate, Final *entry)
139 {
140 entry->read(system->physProxy, table, vaddr);
141 }
142 };
143
144 template <class Final, class First, class Second, class ...Rest>
145 struct WalkWrapper<Final, First, Second, Rest...>
146 {
147 static void
148 walk(System *system, Addr pageSize, Addr table, Addr vaddr,
149 bool allocate, Final *entry)
150 {
151 First first;
152 first.read(system->physProxy, table, vaddr);
153
154 Addr next;
155 if (!first.present()) {
156 fatal_if(!allocate,
157 "Page fault while walking the page table.");
158 next = prepTopTable<Second>(system, pageSize);
159 first.reset(next);
160 first.write(system->physProxy);
161 } else {
162 next = first.paddr();
163 }
164 WalkWrapper<Final, Second, Rest...>::walk(
165 system, pageSize, next, vaddr, allocate, entry);
166 }
167 };
168
169 template <class ...EntryTypes>
170 void
171 walk(System *system, Addr pageSize, Addr table, Addr vaddr,
172 bool allocate, typename LastType<EntryTypes...>::type *entry)
173 {
174 WalkWrapper<typename LastType<EntryTypes...>::type, EntryTypes...>::walk(
175 system, pageSize, table, vaddr, allocate, entry);
176 }
177
178 }
179
180
181 template <class ...EntryTypes>
182 class MultiLevelPageTable : public EmulationPageTable
183 {
184 typedef typename LastType<EntryTypes...>::type Final;
185
186 /**
187 * Pointer to System object
188 */
189 System *system;
190
191 /**
192 * Physical address to the last level of the page table
193 */
194 Addr _basePtr;
195
196 public:
197 MultiLevelPageTable(const std::string &__name, uint64_t _pid,
198 System *_sys, Addr pageSize) :
199 EmulationPageTable(__name, _pid, pageSize), system(_sys)
200 {}
201
202 ~MultiLevelPageTable() {}
203
204 void
205 initState(ThreadContext* tc) override
206 {
207 _basePtr = prepTopTable<EntryTypes...>(system, pageSize);
208 }
209
210 Addr basePtr() { return _basePtr; }
211
212 void
213 map(Addr vaddr, Addr paddr, int64_t size, uint64_t flags = 0) override
214 {
215 EmulationPageTable::map(vaddr, paddr, size, flags);
216
217 Final entry;
218
219 for (int64_t offset = 0; offset < size; offset += pageSize) {
220 walk<EntryTypes...>(system, pageSize, _basePtr,
221 vaddr + offset, true, &entry);
222
223 entry.reset(paddr + offset, true, flags & Uncacheable,
224 flags & ReadOnly);
225 entry.write(system->physProxy);
226
227 DPRINTF(MMU, "New mapping: %#x-%#x\n",
228 vaddr + offset, paddr + offset);
229 }
230 }
231
232 void
233 remap(Addr vaddr, int64_t size, Addr new_vaddr) override
234 {
235 EmulationPageTable::remap(vaddr, size, new_vaddr);
236
237 Final old_entry, new_entry;
238
239 for (int64_t offset = 0; offset < size; offset += pageSize) {
240 // Unmap the original mapping.
241 walk<EntryTypes...>(system, pageSize, _basePtr, vaddr + offset,
242 false, &old_entry);
243 old_entry.present(false);
244 old_entry.write(system->physProxy);
245
246 // Map the new one.
247 walk<EntryTypes...>(system, pageSize, _basePtr, new_vaddr + offset,
248 true, &new_entry);
249 new_entry.reset(old_entry.paddr(), true, old_entry.uncacheable(),
250 old_entry.readonly());
251 new_entry.write(system->physProxy);
252 }
253 }
254
255 void
256 unmap(Addr vaddr, int64_t size) override
257 {
258 EmulationPageTable::unmap(vaddr, size);
259
260 Final entry;
261
262 for (int64_t offset = 0; offset < size; offset += pageSize) {
263 walk<EntryTypes...>(system, pageSize, _basePtr,
264 vaddr + offset, false, &entry);
265 fatal_if(!entry.present(),
266 "PageTable::unmap: Address %#x not mapped.", vaddr);
267 entry.present(false);
268 entry.write(system->physProxy);
269 DPRINTF(MMU, "Unmapping: %#x\n", vaddr);
270 }
271 }
272
273 void
274 serialize(CheckpointOut &cp) const override
275 {
276 EmulationPageTable::serialize(cp);
277 /** Since, the page table is stored in system memory
278 * which is serialized separately, we will serialize
279 * just the base pointer
280 */
281 paramOut(cp, "ptable.pointer", _basePtr);
282 }
283
284 void
285 unserialize(CheckpointIn &cp) override
286 {
287 EmulationPageTable::unserialize(cp);
288 paramIn(cp, "ptable.pointer", _basePtr);
289 }
290 };
291 #endif // __MEM_MULTI_LEVEL_PAGE_TABLE_HH__