2 * Copyright (c) 2007-2008 The Hewlett-Packard Development Company
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43 #include "arch/generic/mmapped_ipr.hh"
44 #include "arch/x86/insts/microldstop.hh"
45 #include "arch/x86/regs/misc.hh"
46 #include "arch/x86/regs/msr.hh"
47 #include "arch/x86/faults.hh"
48 #include "arch/x86/pagetable.hh"
49 #include "arch/x86/pagetable_walker.hh"
50 #include "arch/x86/tlb.hh"
51 #include "arch/x86/x86_traits.hh"
52 #include "base/bitfield.hh"
53 #include "base/trace.hh"
54 #include "cpu/base.hh"
55 #include "cpu/thread_context.hh"
56 #include "debug/TLB.hh"
57 #include "mem/packet_access.hh"
58 #include "mem/page_table.hh"
59 #include "mem/request.hh"
60 #include "sim/full_system.hh"
61 #include "sim/process.hh"
65 TLB::TLB(const Params
*p
) : BaseTLB(p
), configAddress(0), size(p
->size
),
69 fatal("TLBs must have a non-zero size.\n");
70 tlb
= new TlbEntry
[size
];
71 std::memset(tlb
, 0, sizeof(TlbEntry
) * size
);
73 for (int x
= 0; x
< size
; x
++) {
74 tlb
[x
].trieHandle
= NULL
;
75 freeList
.push_back(&tlb
[x
]);
85 // Find the entry with the lowest (and hence least recently updated)
89 for (unsigned i
= 1; i
< size
; i
++) {
90 if (tlb
[i
].lruSeq
< tlb
[lru
].lruSeq
)
94 assert(tlb
[lru
].trieHandle
);
95 trie
.remove(tlb
[lru
].trieHandle
);
96 tlb
[lru
].trieHandle
= NULL
;
97 freeList
.push_back(&tlb
[lru
]);
101 TLB::insert(Addr vpn
, TlbEntry
&entry
)
103 // If somebody beat us to it, just use that existing entry.
104 TlbEntry
*newEntry
= trie
.lookup(vpn
);
106 assert(newEntry
->vaddr
== vpn
);
110 if (freeList
.empty())
113 newEntry
= freeList
.front();
114 freeList
.pop_front();
117 newEntry
->lruSeq
= nextSeq();
118 newEntry
->vaddr
= vpn
;
119 newEntry
->trieHandle
=
120 trie
.insert(vpn
, TlbEntryTrie::MaxBits
- entry
.logBytes
, newEntry
);
125 TLB::lookup(Addr va
, bool update_lru
)
127 TlbEntry
*entry
= trie
.lookup(va
);
128 if (entry
&& update_lru
)
129 entry
->lruSeq
= nextSeq();
136 DPRINTF(TLB
, "Invalidating all entries.\n");
137 for (unsigned i
= 0; i
< size
; i
++) {
138 if (tlb
[i
].trieHandle
) {
139 trie
.remove(tlb
[i
].trieHandle
);
140 tlb
[i
].trieHandle
= NULL
;
141 freeList
.push_back(&tlb
[i
]);
147 TLB::setConfigAddress(uint32_t addr
)
149 configAddress
= addr
;
153 TLB::flushNonGlobal()
155 DPRINTF(TLB
, "Invalidating all non global entries.\n");
156 for (unsigned i
= 0; i
< size
; i
++) {
157 if (tlb
[i
].trieHandle
&& !tlb
[i
].global
) {
158 trie
.remove(tlb
[i
].trieHandle
);
159 tlb
[i
].trieHandle
= NULL
;
160 freeList
.push_back(&tlb
[i
]);
166 TLB::demapPage(Addr va
, uint64_t asn
)
168 TlbEntry
*entry
= trie
.lookup(va
);
170 trie
.remove(entry
->trieHandle
);
171 entry
->trieHandle
= NULL
;
172 freeList
.push_back(entry
);
177 TLB::translateInt(RequestPtr req
, ThreadContext
*tc
)
179 DPRINTF(TLB
, "Addresses references internal memory.\n");
180 Addr vaddr
= req
->getVaddr();
181 Addr prefix
= (vaddr
>> 3) & IntAddrPrefixMask
;
182 if (prefix
== IntAddrPrefixCPUID
) {
183 panic("CPUID memory space not yet implemented!\n");
184 } else if (prefix
== IntAddrPrefixMSR
) {
185 vaddr
= (vaddr
>> 3) & ~IntAddrPrefixMask
;
186 req
->setFlags(Request::MMAPPED_IPR
);
189 if (!msrAddrToIndex(regNum
, vaddr
))
190 return std::make_shared
<GeneralProtection
>(0);
192 //The index is multiplied by the size of a MiscReg so that
193 //any memory dependence calculations will not see these as
195 req
->setPaddr((Addr
)regNum
* sizeof(MiscReg
));
197 } else if (prefix
== IntAddrPrefixIO
) {
198 // TODO If CPL > IOPL or in virtual mode, check the I/O permission
199 // bitmap in the TSS.
201 Addr IOPort
= vaddr
& ~IntAddrPrefixMask
;
202 // Make sure the address fits in the expected 16 bit IO address
204 assert(!(IOPort
& ~0xFFFF));
205 if (IOPort
== 0xCF8 && req
->getSize() == 4) {
206 req
->setFlags(Request::MMAPPED_IPR
);
207 req
->setPaddr(MISCREG_PCI_CONFIG_ADDRESS
* sizeof(MiscReg
));
208 } else if ((IOPort
& ~mask(2)) == 0xCFC) {
209 req
->setFlags(Request::UNCACHEABLE
);
211 tc
->readMiscRegNoEffect(MISCREG_PCI_CONFIG_ADDRESS
);
212 if (bits(configAddress
, 31, 31)) {
213 req
->setPaddr(PhysAddrPrefixPciConfig
|
214 mbits(configAddress
, 30, 2) |
217 req
->setPaddr(PhysAddrPrefixIO
| IOPort
);
220 req
->setFlags(Request::UNCACHEABLE
);
221 req
->setPaddr(PhysAddrPrefixIO
| IOPort
);
225 panic("Access to unrecognized internal address space %#x.\n",
231 TLB::finalizePhysical(RequestPtr req
, ThreadContext
*tc
, Mode mode
) const
233 Addr paddr
= req
->getPaddr();
235 // Check for an access to the local APIC
237 LocalApicBase localApicBase
=
238 tc
->readMiscRegNoEffect(MISCREG_APIC_BASE
);
239 AddrRange
apicRange(localApicBase
.base
* PageBytes
,
240 (localApicBase
.base
+ 1) * PageBytes
- 1);
242 AddrRange
m5opRange(0xFFFF0000, 0xFFFFFFFF);
244 if (apicRange
.contains(paddr
)) {
245 // The Intel developer's manuals say the below restrictions apply,
246 // but the linux kernel, because of a compiler optimization, breaks
250 if (paddr & ((32/8) - 1))
251 return new GeneralProtection(0);
253 if (req->getSize() != (32/8))
254 return new GeneralProtection(0);
256 // Force the access to be uncacheable.
257 req
->setFlags(Request::UNCACHEABLE
);
258 req
->setPaddr(x86LocalAPICAddress(tc
->contextId(),
259 paddr
- apicRange
.start()));
260 } else if (m5opRange
.contains(paddr
)) {
261 req
->setFlags(Request::MMAPPED_IPR
| Request::GENERIC_IPR
);
262 req
->setPaddr(GenericISA::iprAddressPseudoInst(
272 TLB::translate(RequestPtr req
, ThreadContext
*tc
, Translation
*translation
,
273 Mode mode
, bool &delayedResponse
, bool timing
)
275 uint32_t flags
= req
->getFlags();
276 int seg
= flags
& SegmentFlagMask
;
277 bool storeCheck
= flags
& (StoreCheck
<< FlagShift
);
279 delayedResponse
= false;
281 // If this is true, we're dealing with a request to a non-memory address
283 if (seg
== SEGMENT_REG_MS
) {
284 return translateInt(req
, tc
);
287 Addr vaddr
= req
->getVaddr();
288 DPRINTF(TLB
, "Translating vaddr %#x.\n", vaddr
);
290 HandyM5Reg m5Reg
= tc
->readMiscRegNoEffect(MISCREG_M5_REG
);
292 // If protected mode has been enabled...
294 DPRINTF(TLB
, "In protected mode.\n");
295 // If we're not in 64-bit mode, do protection/limit checks
296 if (m5Reg
.mode
!= LongMode
) {
297 DPRINTF(TLB
, "Not in long mode. Checking segment protection.\n");
298 // Check for a NULL segment selector.
299 if (!(seg
== SEGMENT_REG_TSG
|| seg
== SYS_SEGMENT_REG_IDTR
||
300 seg
== SEGMENT_REG_HS
|| seg
== SEGMENT_REG_LS
)
301 && !tc
->readMiscRegNoEffect(MISCREG_SEG_SEL(seg
)))
302 return std::make_shared
<GeneralProtection
>(0);
303 bool expandDown
= false;
304 SegAttr attr
= tc
->readMiscRegNoEffect(MISCREG_SEG_ATTR(seg
));
305 if (seg
>= SEGMENT_REG_ES
&& seg
<= SEGMENT_REG_HS
) {
306 if (!attr
.writable
&& (mode
== Write
|| storeCheck
))
307 return std::make_shared
<GeneralProtection
>(0);
308 if (!attr
.readable
&& mode
== Read
)
309 return std::make_shared
<GeneralProtection
>(0);
310 expandDown
= attr
.expandDown
;
313 Addr base
= tc
->readMiscRegNoEffect(MISCREG_SEG_BASE(seg
));
314 Addr limit
= tc
->readMiscRegNoEffect(MISCREG_SEG_LIMIT(seg
));
315 bool sizeOverride
= (flags
& (AddrSizeFlagBit
<< FlagShift
));
316 unsigned logSize
= sizeOverride
? (unsigned)m5Reg
.altAddr
317 : (unsigned)m5Reg
.defAddr
;
318 int size
= (1 << logSize
) * 8;
319 Addr offset
= bits(vaddr
- base
, size
- 1, 0);
320 Addr endOffset
= offset
+ req
->getSize() - 1;
322 DPRINTF(TLB
, "Checking an expand down segment.\n");
323 warn_once("Expand down segments are untested.\n");
324 if (offset
<= limit
|| endOffset
<= limit
)
325 return std::make_shared
<GeneralProtection
>(0);
327 if (offset
> limit
|| endOffset
> limit
)
328 return std::make_shared
<GeneralProtection
>(0);
331 if (m5Reg
.submode
!= SixtyFourBitMode
||
332 (flags
& (AddrSizeFlagBit
<< FlagShift
)))
334 // If paging is enabled, do the translation.
336 DPRINTF(TLB
, "Paging enabled.\n");
337 // The vaddr already has the segment base applied.
338 TlbEntry
*entry
= lookup(vaddr
);
341 Fault fault
= walker
->start(tc
, translation
, req
, mode
);
342 if (timing
|| fault
!= NoFault
) {
343 // This gets ignored in atomic mode.
344 delayedResponse
= true;
347 entry
= lookup(vaddr
);
350 DPRINTF(TLB
, "Handling a TLB miss for "
351 "address %#x at pc %#x.\n",
352 vaddr
, tc
->instAddr());
354 Process
*p
= tc
->getProcessPtr();
356 bool success
= p
->pTable
->lookup(vaddr
, newEntry
);
357 if (!success
&& mode
!= Execute
) {
358 // Check if we just need to grow the stack.
359 if (p
->fixupStackFault(vaddr
)) {
360 // If we did, lookup the entry for the new page.
361 success
= p
->pTable
->lookup(vaddr
, newEntry
);
365 return std::make_shared
<PageFault
>(vaddr
, true, mode
,
368 Addr alignedVaddr
= p
->pTable
->pageAlign(vaddr
);
369 DPRINTF(TLB
, "Mapping %#x to %#x\n", alignedVaddr
,
370 newEntry
.pageStart());
371 entry
= insert(alignedVaddr
, newEntry
);
373 DPRINTF(TLB
, "Miss was serviced.\n");
377 DPRINTF(TLB
, "Entry found with paddr %#x, "
378 "doing protection checks.\n", entry
->paddr
);
379 // Do paging protection checks.
380 bool inUser
= (m5Reg
.cpl
== 3 &&
381 !(flags
& (CPL0FlagBit
<< FlagShift
)));
382 CR0 cr0
= tc
->readMiscRegNoEffect(MISCREG_CR0
);
383 bool badWrite
= (!entry
->writable
&& (inUser
|| cr0
.wp
));
384 if ((inUser
&& !entry
->user
) || (mode
== Write
&& badWrite
)) {
385 // The page must have been present to get into the TLB in
386 // the first place. We'll assume the reserved bits are
387 // fine even though we're not checking them.
388 return std::make_shared
<PageFault
>(vaddr
, true, mode
, inUser
,
391 if (storeCheck
&& badWrite
) {
392 // This would fault if this were a write, so return a page
393 // fault that reflects that happening.
394 return std::make_shared
<PageFault
>(vaddr
, true, Write
, inUser
,
398 Addr paddr
= entry
->paddr
| (vaddr
& mask(entry
->logBytes
));
399 DPRINTF(TLB
, "Translated %#x -> %#x.\n", vaddr
, paddr
);
400 req
->setPaddr(paddr
);
401 if (entry
->uncacheable
)
402 req
->setFlags(Request::UNCACHEABLE
);
404 //Use the address which already has segmentation applied.
405 DPRINTF(TLB
, "Paging disabled.\n");
406 DPRINTF(TLB
, "Translated %#x -> %#x.\n", vaddr
, vaddr
);
407 req
->setPaddr(vaddr
);
411 DPRINTF(TLB
, "In real mode.\n");
412 DPRINTF(TLB
, "Translated %#x -> %#x.\n", vaddr
, vaddr
);
413 req
->setPaddr(vaddr
);
416 return finalizePhysical(req
, tc
, mode
);
420 TLB::translateAtomic(RequestPtr req
, ThreadContext
*tc
, Mode mode
)
422 bool delayedResponse
;
423 return TLB::translate(req
, tc
, NULL
, mode
, delayedResponse
, false);
427 TLB::translateTiming(RequestPtr req
, ThreadContext
*tc
,
428 Translation
*translation
, Mode mode
)
430 bool delayedResponse
;
433 TLB::translate(req
, tc
, translation
, mode
, delayedResponse
, true);
434 if (!delayedResponse
)
435 translation
->finish(fault
, req
, tc
, mode
);
439 TLB::translateFunctional(RequestPtr req
, ThreadContext
*tc
, Mode mode
)
441 panic("Not implemented\n");
452 TLB::serialize(std::ostream
&os
)
454 // Only store the entries in use.
455 uint32_t _size
= size
- freeList
.size();
456 SERIALIZE_SCALAR(_size
);
457 SERIALIZE_SCALAR(lruSeq
);
461 for (uint32_t x
= 0; x
< size
; x
++) {
462 if (tlb
[x
].trieHandle
!= NULL
) {
463 os
<< "\n[" << csprintf("%s.Entry%d", name(), _count
) << "]\n";
464 tlb
[x
].serialize(os
);
471 TLB::unserialize(Checkpoint
*cp
, const std::string
§ion
)
473 // Do not allow to restore with a smaller tlb.
475 UNSERIALIZE_SCALAR(_size
);
477 fatal("TLB size less than the one in checkpoint!");
480 UNSERIALIZE_SCALAR(lruSeq
);
482 for (uint32_t x
= 0; x
< _size
; x
++) {
483 TlbEntry
*newEntry
= freeList
.front();
484 freeList
.pop_front();
486 newEntry
->unserialize(cp
, csprintf("%s.Entry%d", name(), x
));
487 newEntry
->trieHandle
= trie
.insert(newEntry
->vaddr
,
488 TlbEntryTrie::MaxBits
- newEntry
->logBytes
, newEntry
);
495 return &walker
->getMasterPort("port");
498 } // namespace X86ISA
501 X86TLBParams::create()
503 return new X86ISA::TLB(this);