style: [patch 1/22] use /r/3648/ to reorganize includes
[gem5.git] / src / mem / page_table.cc
1 /*
2 * Copyright (c) 2014 Advanced Micro Devices, Inc.
3 * Copyright (c) 2003 The Regents of The University of Michigan
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * Authors: Steve Reinhardt
30 * Ron Dreslinski
31 * Ali Saidi
32 */
33
34 /**
35 * @file
36 * Definitions of functional page table.
37 */
38 #include "mem/page_table.hh"
39
40 #include <fstream>
41 #include <map>
42 #include <memory>
43 #include <string>
44
45 #include "base/bitfield.hh"
46 #include "base/intmath.hh"
47 #include "base/trace.hh"
48 #include "config/the_isa.hh"
49 #include "debug/MMU.hh"
50 #include "sim/faults.hh"
51 #include "sim/sim_object.hh"
52
53 using namespace std;
54 using namespace TheISA;
55
56 FuncPageTable::FuncPageTable(const std::string &__name,
57 uint64_t _pid, Addr _pageSize)
58 : PageTableBase(__name, _pid, _pageSize)
59 {
60 }
61
62 FuncPageTable::~FuncPageTable()
63 {
64 }
65
66 void
67 FuncPageTable::map(Addr vaddr, Addr paddr, int64_t size, uint64_t flags)
68 {
69 bool clobber = flags & Clobber;
70 // starting address must be page aligned
71 assert(pageOffset(vaddr) == 0);
72
73 DPRINTF(MMU, "Allocating Page: %#x-%#x\n", vaddr, vaddr+ size);
74
75 for (; size > 0; size -= pageSize, vaddr += pageSize, paddr += pageSize) {
76 if (!clobber && (pTable.find(vaddr) != pTable.end())) {
77 // already mapped
78 fatal("FuncPageTable::allocate: addr 0x%x already mapped", vaddr);
79 }
80
81 pTable[vaddr] = TheISA::TlbEntry(pid, vaddr, paddr,
82 flags & Uncacheable,
83 flags & ReadOnly);
84 eraseCacheEntry(vaddr);
85 updateCache(vaddr, pTable[vaddr]);
86 }
87 }
88
89 void
90 FuncPageTable::remap(Addr vaddr, int64_t size, Addr new_vaddr)
91 {
92 assert(pageOffset(vaddr) == 0);
93 assert(pageOffset(new_vaddr) == 0);
94
95 DPRINTF(MMU, "moving pages from vaddr %08p to %08p, size = %d\n", vaddr,
96 new_vaddr, size);
97
98 for (; size > 0;
99 size -= pageSize, vaddr += pageSize, new_vaddr += pageSize)
100 {
101 assert(pTable.find(vaddr) != pTable.end());
102
103 pTable[new_vaddr] = pTable[vaddr];
104 pTable.erase(vaddr);
105 eraseCacheEntry(vaddr);
106 pTable[new_vaddr].updateVaddr(new_vaddr);
107 updateCache(new_vaddr, pTable[new_vaddr]);
108 }
109 }
110
111 void
112 FuncPageTable::unmap(Addr vaddr, int64_t size)
113 {
114 assert(pageOffset(vaddr) == 0);
115
116 DPRINTF(MMU, "Unmapping page: %#x-%#x\n", vaddr, vaddr+ size);
117
118 for (; size > 0; size -= pageSize, vaddr += pageSize) {
119 assert(pTable.find(vaddr) != pTable.end());
120 pTable.erase(vaddr);
121 eraseCacheEntry(vaddr);
122 }
123
124 }
125
126 bool
127 FuncPageTable::isUnmapped(Addr vaddr, int64_t size)
128 {
129 // starting address must be page aligned
130 assert(pageOffset(vaddr) == 0);
131
132 for (; size > 0; size -= pageSize, vaddr += pageSize) {
133 if (pTable.find(vaddr) != pTable.end()) {
134 return false;
135 }
136 }
137
138 return true;
139 }
140
141 bool
142 FuncPageTable::lookup(Addr vaddr, TheISA::TlbEntry &entry)
143 {
144 Addr page_addr = pageAlign(vaddr);
145
146 if (pTableCache[0].valid && pTableCache[0].vaddr == page_addr) {
147 entry = pTableCache[0].entry;
148 return true;
149 }
150 if (pTableCache[1].valid && pTableCache[1].vaddr == page_addr) {
151 entry = pTableCache[1].entry;
152 return true;
153 }
154 if (pTableCache[2].valid && pTableCache[2].vaddr == page_addr) {
155 entry = pTableCache[2].entry;
156 return true;
157 }
158
159 PTableItr iter = pTable.find(page_addr);
160
161 if (iter == pTable.end()) {
162 return false;
163 }
164
165 updateCache(page_addr, iter->second);
166 entry = iter->second;
167 return true;
168 }
169
170 bool
171 PageTableBase::translate(Addr vaddr, Addr &paddr)
172 {
173 TheISA::TlbEntry entry;
174 if (!lookup(vaddr, entry)) {
175 DPRINTF(MMU, "Couldn't Translate: %#x\n", vaddr);
176 return false;
177 }
178 paddr = pageOffset(vaddr) + entry.pageStart();
179 DPRINTF(MMU, "Translating: %#x->%#x\n", vaddr, paddr);
180 return true;
181 }
182
183 Fault
184 PageTableBase::translate(RequestPtr req)
185 {
186 Addr paddr;
187 assert(pageAlign(req->getVaddr() + req->getSize() - 1)
188 == pageAlign(req->getVaddr()));
189 if (!translate(req->getVaddr(), paddr)) {
190 return Fault(new GenericPageTableFault(req->getVaddr()));
191 }
192 req->setPaddr(paddr);
193 if ((paddr & (pageSize - 1)) + req->getSize() > pageSize) {
194 panic("Request spans page boundaries!\n");
195 return NoFault;
196 }
197 return NoFault;
198 }
199
200 void
201 FuncPageTable::serialize(CheckpointOut &cp) const
202 {
203 paramOut(cp, "ptable.size", pTable.size());
204
205 PTable::size_type count = 0;
206 for (auto &pte : pTable) {
207 ScopedCheckpointSection sec(cp, csprintf("Entry%d", count++));
208
209 paramOut(cp, "vaddr", pte.first);
210 pte.second.serialize(cp);
211 }
212 assert(count == pTable.size());
213 }
214
215 void
216 FuncPageTable::unserialize(CheckpointIn &cp)
217 {
218 int count;
219 paramIn(cp, "ptable.size", count);
220
221 for (int i = 0; i < count; ++i) {
222 ScopedCheckpointSection sec(cp, csprintf("Entry%d", i));
223
224 std::unique_ptr<TheISA::TlbEntry> entry;
225 Addr vaddr;
226
227 paramIn(cp, "vaddr", vaddr);
228 entry.reset(new TheISA::TlbEntry());
229 entry->unserialize(cp);
230
231 pTable[vaddr] = *entry;
232 }
233 }
234