2 * Copyright (c) 2001-2005 The Regents of The University of Michigan
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * Authors: Nathan Binkert
37 #include "arch/alpha/faults.hh"
38 #include "arch/alpha/pagetable.hh"
39 #include "arch/alpha/tlb.hh"
40 #include "arch/generic/debugfaults.hh"
41 #include "base/inifile.hh"
42 #include "base/str.hh"
43 #include "base/trace.hh"
44 #include "cpu/thread_context.hh"
45 #include "debug/TLB.hh"
46 #include "sim/full_system.hh"
52 ///////////////////////////////////////////////////////////////////////
58 bool uncacheBit39
= false;
59 bool uncacheBit40
= false;
62 #define MODE2MASK(X) (1 << (X))
64 TLB::TLB(const Params
*p
)
65 : BaseTLB(p
), size(p
->size
), nlu(0)
67 table
= new TlbEntry
[size
];
68 memset(table
, 0, sizeof(TlbEntry
) * size
);
82 .name(name() + ".fetch_hits")
85 .name(name() + ".fetch_misses")
88 .name(name() + ".fetch_acv")
91 .name(name() + ".fetch_accesses")
92 .desc("ITB accesses");
94 fetch_accesses
= fetch_hits
+ fetch_misses
;
97 .name(name() + ".read_hits")
98 .desc("DTB read hits")
102 .name(name() + ".read_misses")
103 .desc("DTB read misses")
107 .name(name() + ".read_acv")
108 .desc("DTB read access violations")
112 .name(name() + ".read_accesses")
113 .desc("DTB read accesses")
117 .name(name() + ".write_hits")
118 .desc("DTB write hits")
122 .name(name() + ".write_misses")
123 .desc("DTB write misses")
127 .name(name() + ".write_acv")
128 .desc("DTB write access violations")
132 .name(name() + ".write_accesses")
133 .desc("DTB write accesses")
137 .name(name() + ".data_hits")
142 .name(name() + ".data_misses")
147 .name(name() + ".data_acv")
148 .desc("DTB access violations")
152 .name(name() + ".data_accesses")
153 .desc("DTB accesses")
156 data_hits
= read_hits
+ write_hits
;
157 data_misses
= read_misses
+ write_misses
;
158 data_acv
= read_acv
+ write_acv
;
159 data_accesses
= read_accesses
+ write_accesses
;
162 // look up an entry in the TLB
164 TLB::lookup(Addr vpn
, uint8_t asn
)
166 // assume not found...
167 TlbEntry
*retval
= NULL
;
170 if (vpn
== EntryCache
[0]->tag
&&
171 (EntryCache
[0]->asma
|| EntryCache
[0]->asn
== asn
))
172 retval
= EntryCache
[0];
173 else if (EntryCache
[1]) {
174 if (vpn
== EntryCache
[1]->tag
&&
175 (EntryCache
[1]->asma
|| EntryCache
[1]->asn
== asn
))
176 retval
= EntryCache
[1];
177 else if (EntryCache
[2] && vpn
== EntryCache
[2]->tag
&&
178 (EntryCache
[2]->asma
|| EntryCache
[2]->asn
== asn
))
179 retval
= EntryCache
[2];
183 if (retval
== NULL
) {
184 PageTable::const_iterator i
= lookupTable
.find(vpn
);
185 if (i
!= lookupTable
.end()) {
186 while (i
->first
== vpn
) {
187 int index
= i
->second
;
188 TlbEntry
*entry
= &table
[index
];
189 assert(entry
->valid
);
190 if (vpn
== entry
->tag
&& (entry
->asma
|| entry
->asn
== asn
)) {
191 retval
= updateCache(entry
);
200 DPRINTF(TLB
, "lookup %#x, asn %#x -> %s ppn %#x\n", vpn
, (int)asn
,
201 retval
? "hit" : "miss", retval
? retval
->ppn
: 0);
206 TLB::checkCacheability(RequestPtr
&req
, bool itb
)
208 // in Alpha, cacheability is controlled by upper-level bits of the
212 * We support having the uncacheable bit in either bit 39 or bit
213 * 40. The Turbolaser platform (and EV5) support having the bit
214 * in 39, but Tsunami (which Linux assumes uses an EV6) generates
215 * accesses with the bit in 40. So we must check for both, but we
216 * have debug flags to catch a weird case where both are used,
217 * which shouldn't happen.
221 if (req
->getPaddr() & PAddrUncachedBit43
) {
222 // IPR memory space not implemented
223 if (PAddrIprSpace(req
->getPaddr())) {
224 return std::make_shared
<UnimpFault
>(
225 "IPR memory space not implemented!");
227 // mark request as uncacheable
228 req
->setFlags(Request::UNCACHEABLE
);
230 // Clear bits 42:35 of the physical address (10-2 in
232 req
->setPaddr(req
->getPaddr() & PAddrUncachedMask
);
234 // We shouldn't be able to read from an uncachable address in Alpha as
235 // we don't have a ROM and we don't want to try to fetch from a device
236 // register as we destroy any data that is clear-on-read.
237 if (req
->isUncacheable() && itb
)
238 return std::make_shared
<UnimpFault
>(
239 "CPU trying to fetch from uncached I/O");
246 // insert a new TLB entry
248 TLB::insert(Addr addr
, TlbEntry
&entry
)
252 if (table
[nlu
].valid
) {
253 Addr oldvpn
= table
[nlu
].tag
;
254 PageTable::iterator i
= lookupTable
.find(oldvpn
);
256 if (i
== lookupTable
.end())
257 panic("TLB entry not found in lookupTable");
260 while ((index
= i
->second
) != nlu
) {
261 if (table
[index
].tag
!= oldvpn
)
262 panic("TLB entry not found in lookupTable");
267 DPRINTF(TLB
, "remove @%d: %#x -> %#x\n", nlu
, oldvpn
, table
[nlu
].ppn
);
269 lookupTable
.erase(i
);
272 DPRINTF(TLB
, "insert @%d: %#x -> %#x\n", nlu
, vaddr
.vpn(), entry
.ppn
);
275 table
[nlu
].tag
= vaddr
.vpn();
276 table
[nlu
].valid
= true;
278 lookupTable
.insert(make_pair(vaddr
.vpn(), nlu
));
285 DPRINTF(TLB
, "flushAll\n");
286 memset(table
, 0, sizeof(TlbEntry
) * size
);
293 TLB::flushProcesses()
296 PageTable::iterator i
= lookupTable
.begin();
297 PageTable::iterator end
= lookupTable
.end();
299 int index
= i
->second
;
300 TlbEntry
*entry
= &table
[index
];
301 assert(entry
->valid
);
303 // we can't increment i after we erase it, so save a copy and
304 // increment it to get the next entry now
305 PageTable::iterator cur
= i
;
309 DPRINTF(TLB
, "flush @%d: %#x -> %#x\n", index
,
310 entry
->tag
, entry
->ppn
);
311 entry
->valid
= false;
312 lookupTable
.erase(cur
);
318 TLB::flushAddr(Addr addr
, uint8_t asn
)
323 PageTable::iterator i
= lookupTable
.find(vaddr
.vpn());
324 if (i
== lookupTable
.end())
327 while (i
!= lookupTable
.end() && i
->first
== vaddr
.vpn()) {
328 int index
= i
->second
;
329 TlbEntry
*entry
= &table
[index
];
330 assert(entry
->valid
);
332 if (vaddr
.vpn() == entry
->tag
&& (entry
->asma
|| entry
->asn
== asn
)) {
333 DPRINTF(TLB
, "flushaddr @%d: %#x -> %#x\n", index
, vaddr
.vpn(),
336 // invalidate this entry
337 entry
->valid
= false;
339 lookupTable
.erase(i
++);
348 TLB::serialize(ostream
&os
)
350 SERIALIZE_SCALAR(size
);
351 SERIALIZE_SCALAR(nlu
);
353 for (int i
= 0; i
< size
; i
++) {
354 nameOut(os
, csprintf("%s.Entry%d", name(), i
));
355 table
[i
].serialize(os
);
360 TLB::unserialize(Checkpoint
*cp
, const string
§ion
)
362 UNSERIALIZE_SCALAR(size
);
363 UNSERIALIZE_SCALAR(nlu
);
365 for (int i
= 0; i
< size
; i
++) {
366 table
[i
].unserialize(cp
, csprintf("%s.Entry%d", section
, i
));
367 if (table
[i
].valid
) {
368 lookupTable
.insert(make_pair(table
[i
].tag
, i
));
374 TLB::translateInst(RequestPtr req
, ThreadContext
*tc
)
376 //If this is a pal pc, then set PHYSICAL
377 if (FullSystem
&& PcPAL(req
->getPC()))
378 req
->setFlags(Request::PHYSICAL
);
380 if (PcPAL(req
->getPC())) {
381 // strip off PAL PC marker (lsb is 1)
382 req
->setPaddr((req
->getVaddr() & ~3) & PAddrImplMask
);
387 if (req
->getFlags() & Request::PHYSICAL
) {
388 req
->setPaddr(req
->getVaddr());
390 // verify that this is a good virtual address
391 if (!validVirtualAddress(req
->getVaddr())) {
393 return std::make_shared
<ItbAcvFault
>(req
->getVaddr());
397 // VA<42:41> == 2, VA<39:13> maps directly to PA<39:13> for EV5
398 // VA<47:41> == 0x7e, VA<40:13> maps directly to PA<40:13> for EV6
399 if (VAddrSpaceEV6(req
->getVaddr()) == 0x7e) {
400 // only valid in kernel mode
401 if (ICM_CM(tc
->readMiscRegNoEffect(IPR_ICM
)) !=
404 return std::make_shared
<ItbAcvFault
>(req
->getVaddr());
407 req
->setPaddr(req
->getVaddr() & PAddrImplMask
);
409 // sign extend the physical address properly
410 if (req
->getPaddr() & PAddrUncachedBit40
)
411 req
->setPaddr(req
->getPaddr() | ULL(0xf0000000000));
413 req
->setPaddr(req
->getPaddr() & ULL(0xffffffffff));
415 // not a physical address: need to look up pte
416 int asn
= DTB_ASN_ASN(tc
->readMiscRegNoEffect(IPR_DTB_ASN
));
417 TlbEntry
*entry
= lookup(VAddr(req
->getVaddr()).vpn(),
422 return std::make_shared
<ItbPageFault
>(req
->getVaddr());
425 req
->setPaddr((entry
->ppn
<< PageShift
) +
426 (VAddr(req
->getVaddr()).offset()
429 // check permissions for this access
431 (1 << ICM_CM(tc
->readMiscRegNoEffect(IPR_ICM
))))) {
432 // instruction access fault
434 return std::make_shared
<ItbAcvFault
>(req
->getVaddr());
441 // check that the physical address is ok (catch bad physical addresses)
442 if (req
->getPaddr() & ~PAddrImplMask
) {
443 return std::make_shared
<MachineCheckFault
>();
446 return checkCacheability(req
, true);
451 TLB::translateData(RequestPtr req
, ThreadContext
*tc
, bool write
)
454 (mode_type
)DTB_CM_CM(tc
->readMiscRegNoEffect(IPR_DTB_CM
));
457 * Check for alignment faults
459 if (req
->getVaddr() & (req
->getSize() - 1)) {
460 DPRINTF(TLB
, "Alignment Fault on %#x, size = %d\n", req
->getVaddr(),
462 uint64_t flags
= write
? MM_STAT_WR_MASK
: 0;
463 return std::make_shared
<DtbAlignmentFault
>(req
->getVaddr(),
468 if (PcPAL(req
->getPC())) {
469 mode
= (req
->getFlags() & Request::ALTMODE
) ?
470 (mode_type
)ALT_MODE_AM(
471 tc
->readMiscRegNoEffect(IPR_ALT_MODE
))
475 if (req
->getFlags() & Request::PHYSICAL
) {
476 req
->setPaddr(req
->getVaddr());
478 // verify that this is a good virtual address
479 if (!validVirtualAddress(req
->getVaddr())) {
480 if (write
) { write_acv
++; } else { read_acv
++; }
481 uint64_t flags
= (write
? MM_STAT_WR_MASK
: 0) |
482 MM_STAT_BAD_VA_MASK
|
484 return std::make_shared
<DtbPageFault
>(req
->getVaddr(),
489 // Check for "superpage" mapping
490 if (VAddrSpaceEV6(req
->getVaddr()) == 0x7e) {
491 // only valid in kernel mode
492 if (DTB_CM_CM(tc
->readMiscRegNoEffect(IPR_DTB_CM
)) !=
494 if (write
) { write_acv
++; } else { read_acv
++; }
495 uint64_t flags
= ((write
? MM_STAT_WR_MASK
: 0) |
498 return std::make_shared
<DtbAcvFault
>(req
->getVaddr(),
503 req
->setPaddr(req
->getVaddr() & PAddrImplMask
);
505 // sign extend the physical address properly
506 if (req
->getPaddr() & PAddrUncachedBit40
)
507 req
->setPaddr(req
->getPaddr() | ULL(0xf0000000000));
509 req
->setPaddr(req
->getPaddr() & ULL(0xffffffffff));
516 int asn
= DTB_ASN_ASN(tc
->readMiscRegNoEffect(IPR_DTB_ASN
));
518 // not a physical address: need to look up pte
519 TlbEntry
*entry
= lookup(VAddr(req
->getVaddr()).vpn(), asn
);
523 if (write
) { write_misses
++; } else { read_misses
++; }
524 uint64_t flags
= (write
? MM_STAT_WR_MASK
: 0) |
525 MM_STAT_DTB_MISS_MASK
;
526 return (req
->getFlags() & Request::VPTE
) ?
527 (Fault
)(std::make_shared
<PDtbMissFault
>(req
->getVaddr(),
530 (Fault
)(std::make_shared
<NDtbMissFault
>(req
->getVaddr(),
535 req
->setPaddr((entry
->ppn
<< PageShift
) +
536 VAddr(req
->getVaddr()).offset());
539 if (!(entry
->xwe
& MODE2MASK(mode
))) {
540 // declare the instruction access fault
542 uint64_t flags
= MM_STAT_WR_MASK
|
544 (entry
->fonw
? MM_STAT_FONW_MASK
: 0);
545 return std::make_shared
<DtbPageFault
>(req
->getVaddr(),
551 uint64_t flags
= MM_STAT_WR_MASK
| MM_STAT_FONW_MASK
;
552 return std::make_shared
<DtbPageFault
>(req
->getVaddr(),
557 if (!(entry
->xre
& MODE2MASK(mode
))) {
559 uint64_t flags
= MM_STAT_ACV_MASK
|
560 (entry
->fonr
? MM_STAT_FONR_MASK
: 0);
561 return std::make_shared
<DtbAcvFault
>(req
->getVaddr(),
567 uint64_t flags
= MM_STAT_FONR_MASK
;
568 return std::make_shared
<DtbPageFault
>(req
->getVaddr(),
581 // check that the physical address is ok (catch bad physical addresses)
582 if (req
->getPaddr() & ~PAddrImplMask
) {
583 return std::make_shared
<MachineCheckFault
>();
586 return checkCacheability(req
);
590 TLB::index(bool advance
)
592 TlbEntry
*entry
= &table
[nlu
];
601 TLB::translateAtomic(RequestPtr req
, ThreadContext
*tc
, Mode mode
)
604 return translateInst(req
, tc
);
606 return translateData(req
, tc
, mode
== Write
);
610 TLB::translateTiming(RequestPtr req
, ThreadContext
*tc
,
611 Translation
*translation
, Mode mode
)
614 translation
->finish(translateAtomic(req
, tc
, mode
), req
, tc
, mode
);
618 TLB::translateFunctional(RequestPtr req
, ThreadContext
*tc
, Mode mode
)
620 panic("Not implemented\n");
625 TLB::finalizePhysical(RequestPtr req
, ThreadContext
*tc
, Mode mode
) const
630 } // namespace AlphaISA
633 AlphaTLBParams::create()
635 return new AlphaISA::TLB(this);