scons: Try to handle problems with gcc, lto and partial linking.
[gem5.git] / src / mem / page_table.cc
1 /*
2 * Copyright (c) 2014 Advanced Micro Devices, Inc.
3 * Copyright (c) 2003 The Regents of The University of Michigan
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * Authors: Steve Reinhardt
30 * Ron Dreslinski
31 * Ali Saidi
32 */
33
34 /**
35 * @file
36 * Definitions of functional page table.
37 */
38 #include "mem/page_table.hh"
39
40 #include <string>
41
42 #include "base/trace.hh"
43 #include "config/the_isa.hh"
44 #include "debug/MMU.hh"
45 #include "sim/faults.hh"
46 #include "sim/serialize.hh"
47
48 using namespace std;
49 using namespace TheISA;
50
51 FuncPageTable::FuncPageTable(const std::string &__name,
52 uint64_t _pid, Addr _pageSize)
53 : PageTableBase(__name, _pid, _pageSize)
54 {
55 }
56
57 FuncPageTable::~FuncPageTable()
58 {
59 }
60
61 void
62 FuncPageTable::map(Addr vaddr, Addr paddr, int64_t size, uint64_t flags)
63 {
64 bool clobber = flags & Clobber;
65 // starting address must be page aligned
66 assert(pageOffset(vaddr) == 0);
67
68 DPRINTF(MMU, "Allocating Page: %#x-%#x\n", vaddr, vaddr+ size);
69
70 for (; size > 0; size -= pageSize, vaddr += pageSize, paddr += pageSize) {
71 if (!clobber && (pTable.find(vaddr) != pTable.end())) {
72 // already mapped
73 fatal("FuncPageTable::allocate: addr 0x%x already mapped", vaddr);
74 }
75
76 pTable[vaddr] = TheISA::TlbEntry(pid, vaddr, paddr,
77 flags & Uncacheable,
78 flags & ReadOnly);
79 eraseCacheEntry(vaddr);
80 updateCache(vaddr, pTable[vaddr]);
81 }
82 }
83
84 void
85 FuncPageTable::remap(Addr vaddr, int64_t size, Addr new_vaddr)
86 {
87 assert(pageOffset(vaddr) == 0);
88 assert(pageOffset(new_vaddr) == 0);
89
90 DPRINTF(MMU, "moving pages from vaddr %08p to %08p, size = %d\n", vaddr,
91 new_vaddr, size);
92
93 for (; size > 0;
94 size -= pageSize, vaddr += pageSize, new_vaddr += pageSize)
95 {
96 assert(pTable.find(vaddr) != pTable.end());
97
98 pTable[new_vaddr] = pTable[vaddr];
99 pTable.erase(vaddr);
100 eraseCacheEntry(vaddr);
101 pTable[new_vaddr].updateVaddr(new_vaddr);
102 updateCache(new_vaddr, pTable[new_vaddr]);
103 }
104 }
105
106 void
107 FuncPageTable::getMappings(std::vector<std::pair<Addr, Addr>> *addr_maps)
108 {
109 for (auto &iter : pTable)
110 addr_maps->push_back(make_pair(iter.first, iter.second.pageStart()));
111 }
112
113 void
114 FuncPageTable::unmap(Addr vaddr, int64_t size)
115 {
116 assert(pageOffset(vaddr) == 0);
117
118 DPRINTF(MMU, "Unmapping page: %#x-%#x\n", vaddr, vaddr+ size);
119
120 for (; size > 0; size -= pageSize, vaddr += pageSize) {
121 assert(pTable.find(vaddr) != pTable.end());
122 pTable.erase(vaddr);
123 eraseCacheEntry(vaddr);
124 }
125
126 }
127
128 bool
129 FuncPageTable::isUnmapped(Addr vaddr, int64_t size)
130 {
131 // starting address must be page aligned
132 assert(pageOffset(vaddr) == 0);
133
134 for (; size > 0; size -= pageSize, vaddr += pageSize) {
135 if (pTable.find(vaddr) != pTable.end()) {
136 return false;
137 }
138 }
139
140 return true;
141 }
142
143 bool
144 FuncPageTable::lookup(Addr vaddr, TheISA::TlbEntry &entry)
145 {
146 Addr page_addr = pageAlign(vaddr);
147
148 if (pTableCache[0].valid && pTableCache[0].vaddr == page_addr) {
149 entry = pTableCache[0].entry;
150 return true;
151 }
152 if (pTableCache[1].valid && pTableCache[1].vaddr == page_addr) {
153 entry = pTableCache[1].entry;
154 return true;
155 }
156 if (pTableCache[2].valid && pTableCache[2].vaddr == page_addr) {
157 entry = pTableCache[2].entry;
158 return true;
159 }
160
161 PTableItr iter = pTable.find(page_addr);
162
163 if (iter == pTable.end()) {
164 return false;
165 }
166
167 updateCache(page_addr, iter->second);
168 entry = iter->second;
169 return true;
170 }
171
172 bool
173 PageTableBase::translate(Addr vaddr, Addr &paddr)
174 {
175 TheISA::TlbEntry entry;
176 if (!lookup(vaddr, entry)) {
177 DPRINTF(MMU, "Couldn't Translate: %#x\n", vaddr);
178 return false;
179 }
180 paddr = pageOffset(vaddr) + entry.pageStart();
181 DPRINTF(MMU, "Translating: %#x->%#x\n", vaddr, paddr);
182 return true;
183 }
184
185 Fault
186 PageTableBase::translate(RequestPtr req)
187 {
188 Addr paddr;
189 assert(pageAlign(req->getVaddr() + req->getSize() - 1)
190 == pageAlign(req->getVaddr()));
191 if (!translate(req->getVaddr(), paddr)) {
192 return Fault(new GenericPageTableFault(req->getVaddr()));
193 }
194 req->setPaddr(paddr);
195 if ((paddr & (pageSize - 1)) + req->getSize() > pageSize) {
196 panic("Request spans page boundaries!\n");
197 return NoFault;
198 }
199 return NoFault;
200 }
201
202 void
203 FuncPageTable::serialize(CheckpointOut &cp) const
204 {
205 paramOut(cp, "ptable.size", pTable.size());
206
207 PTable::size_type count = 0;
208 for (auto &pte : pTable) {
209 ScopedCheckpointSection sec(cp, csprintf("Entry%d", count++));
210
211 paramOut(cp, "vaddr", pte.first);
212 pte.second.serialize(cp);
213 }
214 assert(count == pTable.size());
215 }
216
217 void
218 FuncPageTable::unserialize(CheckpointIn &cp)
219 {
220 int count;
221 paramIn(cp, "ptable.size", count);
222
223 for (int i = 0; i < count; ++i) {
224 ScopedCheckpointSection sec(cp, csprintf("Entry%d", i));
225
226 std::unique_ptr<TheISA::TlbEntry> entry;
227 Addr vaddr;
228
229 paramIn(cp, "vaddr", vaddr);
230 entry.reset(new TheISA::TlbEntry());
231 entry->unserialize(cp);
232
233 pTable[vaddr] = *entry;
234 }
235 }
236