2 * Copyright (c) 2001-2005 The Regents of The University of Michigan
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * Authors: Nathan Binkert
36 #include "arch/alpha/pagetable.hh"
37 #include "arch/alpha/tlb.hh"
38 #include "arch/alpha/faults.hh"
39 #include "base/inifile.hh"
40 #include "base/str.hh"
41 #include "base/trace.hh"
42 #include "config/alpha_tlaser.hh"
43 #include "cpu/thread_context.hh"
49 ///////////////////////////////////////////////////////////////////////
55 bool uncacheBit39
= false;
56 bool uncacheBit40
= false;
59 #define MODE2MASK(X) (1 << (X))
61 TLB::TLB(const Params
*p
)
62 : BaseTLB(p
), size(p
->size
), nlu(0)
64 table
= new TlbEntry
[size
];
65 memset(table
, 0, sizeof(TlbEntry
[size
]));
75 // look up an entry in the TLB
77 TLB::lookup(Addr vpn
, uint8_t asn
)
79 // assume not found...
80 TlbEntry
*retval
= NULL
;
83 if (vpn
== EntryCache
[0]->tag
&&
84 (EntryCache
[0]->asma
|| EntryCache
[0]->asn
== asn
))
85 retval
= EntryCache
[0];
86 else if (EntryCache
[1]) {
87 if (vpn
== EntryCache
[1]->tag
&&
88 (EntryCache
[1]->asma
|| EntryCache
[1]->asn
== asn
))
89 retval
= EntryCache
[1];
90 else if (EntryCache
[2] && vpn
== EntryCache
[2]->tag
&&
91 (EntryCache
[2]->asma
|| EntryCache
[2]->asn
== asn
))
92 retval
= EntryCache
[2];
97 PageTable::const_iterator i
= lookupTable
.find(vpn
);
98 if (i
!= lookupTable
.end()) {
99 while (i
->first
== vpn
) {
100 int index
= i
->second
;
101 TlbEntry
*entry
= &table
[index
];
102 assert(entry
->valid
);
103 if (vpn
== entry
->tag
&& (entry
->asma
|| entry
->asn
== asn
)) {
104 retval
= updateCache(entry
);
113 DPRINTF(TLB
, "lookup %#x, asn %#x -> %s ppn %#x\n", vpn
, (int)asn
,
114 retval
? "hit" : "miss", retval
? retval
->ppn
: 0);
119 TLB::checkCacheability(RequestPtr
&req
, bool itb
)
121 // in Alpha, cacheability is controlled by upper-level bits of the
125 * We support having the uncacheable bit in either bit 39 or bit
126 * 40. The Turbolaser platform (and EV5) support having the bit
127 * in 39, but Tsunami (which Linux assumes uses an EV6) generates
128 * accesses with the bit in 40. So we must check for both, but we
129 * have debug flags to catch a weird case where both are used,
130 * which shouldn't happen.
135 if (req
->getPaddr() & PAddrUncachedBit39
)
137 if (req
->getPaddr() & PAddrUncachedBit43
)
140 // IPR memory space not implemented
141 if (PAddrIprSpace(req
->getPaddr())) {
142 return new UnimpFault("IPR memory space not implemented!");
144 // mark request as uncacheable
145 req
->setFlags(req
->getFlags() | UNCACHEABLE
);
148 // Clear bits 42:35 of the physical address (10-2 in
150 req
->setPaddr(req
->getPaddr() & PAddrUncachedMask
);
153 // We shouldn't be able to read from an uncachable address in Alpha as
154 // we don't have a ROM and we don't want to try to fetch from a device
155 // register as we destroy any data that is clear-on-read.
156 if (req
->isUncacheable() && itb
)
157 return new UnimpFault("CPU trying to fetch from uncached I/O");
164 // insert a new TLB entry
166 TLB::insert(Addr addr
, TlbEntry
&entry
)
170 if (table
[nlu
].valid
) {
171 Addr oldvpn
= table
[nlu
].tag
;
172 PageTable::iterator i
= lookupTable
.find(oldvpn
);
174 if (i
== lookupTable
.end())
175 panic("TLB entry not found in lookupTable");
178 while ((index
= i
->second
) != nlu
) {
179 if (table
[index
].tag
!= oldvpn
)
180 panic("TLB entry not found in lookupTable");
185 DPRINTF(TLB
, "remove @%d: %#x -> %#x\n", nlu
, oldvpn
, table
[nlu
].ppn
);
187 lookupTable
.erase(i
);
190 DPRINTF(TLB
, "insert @%d: %#x -> %#x\n", nlu
, vaddr
.vpn(), entry
.ppn
);
193 table
[nlu
].tag
= vaddr
.vpn();
194 table
[nlu
].valid
= true;
196 lookupTable
.insert(make_pair(vaddr
.vpn(), nlu
));
203 DPRINTF(TLB
, "flushAll\n");
204 memset(table
, 0, sizeof(TlbEntry
[size
]));
211 TLB::flushProcesses()
214 PageTable::iterator i
= lookupTable
.begin();
215 PageTable::iterator end
= lookupTable
.end();
217 int index
= i
->second
;
218 TlbEntry
*entry
= &table
[index
];
219 assert(entry
->valid
);
221 // we can't increment i after we erase it, so save a copy and
222 // increment it to get the next entry now
223 PageTable::iterator cur
= i
;
227 DPRINTF(TLB
, "flush @%d: %#x -> %#x\n", index
,
228 entry
->tag
, entry
->ppn
);
229 entry
->valid
= false;
230 lookupTable
.erase(cur
);
236 TLB::flushAddr(Addr addr
, uint8_t asn
)
241 PageTable::iterator i
= lookupTable
.find(vaddr
.vpn());
242 if (i
== lookupTable
.end())
245 while (i
!= lookupTable
.end() && i
->first
== vaddr
.vpn()) {
246 int index
= i
->second
;
247 TlbEntry
*entry
= &table
[index
];
248 assert(entry
->valid
);
250 if (vaddr
.vpn() == entry
->tag
&& (entry
->asma
|| entry
->asn
== asn
)) {
251 DPRINTF(TLB
, "flushaddr @%d: %#x -> %#x\n", index
, vaddr
.vpn(),
254 // invalidate this entry
255 entry
->valid
= false;
257 lookupTable
.erase(i
++);
266 TLB::serialize(ostream
&os
)
268 SERIALIZE_SCALAR(size
);
269 SERIALIZE_SCALAR(nlu
);
271 for (int i
= 0; i
< size
; i
++) {
272 nameOut(os
, csprintf("%s.Entry%d", name(), i
));
273 table
[i
].serialize(os
);
278 TLB::unserialize(Checkpoint
*cp
, const string
§ion
)
280 UNSERIALIZE_SCALAR(size
);
281 UNSERIALIZE_SCALAR(nlu
);
283 for (int i
= 0; i
< size
; i
++) {
284 table
[i
].unserialize(cp
, csprintf("%s.Entry%d", section
, i
));
285 if (table
[i
].valid
) {
286 lookupTable
.insert(make_pair(table
[i
].tag
, i
));
291 ///////////////////////////////////////////////////////////////////////
295 ITB::ITB(const Params
*p
)
304 .name(name() + ".hits")
307 .name(name() + ".misses")
310 .name(name() + ".acv")
313 .name(name() + ".accesses")
314 .desc("ITB accesses");
316 accesses
= hits
+ misses
;
320 ITB::translate(RequestPtr
&req
, ThreadContext
*tc
)
322 //If this is a pal pc, then set PHYSICAL
323 if (FULL_SYSTEM
&& PcPAL(req
->getPC()))
324 req
->setFlags(req
->getFlags() | PHYSICAL
);
326 if (PcPAL(req
->getPC())) {
327 // strip off PAL PC marker (lsb is 1)
328 req
->setPaddr((req
->getVaddr() & ~3) & PAddrImplMask
);
333 if (req
->getFlags() & PHYSICAL
) {
334 req
->setPaddr(req
->getVaddr());
336 // verify that this is a good virtual address
337 if (!validVirtualAddress(req
->getVaddr())) {
339 return new ItbAcvFault(req
->getVaddr());
343 // VA<42:41> == 2, VA<39:13> maps directly to PA<39:13> for EV5
344 // VA<47:41> == 0x7e, VA<40:13> maps directly to PA<40:13> for EV6
346 if ((MCSR_SP(tc
->readMiscRegNoEffect(IPR_MCSR
)) & 2) &&
347 VAddrSpaceEV5(req
->getVaddr()) == 2)
349 if (VAddrSpaceEV6(req
->getVaddr()) == 0x7e)
352 // only valid in kernel mode
353 if (ICM_CM(tc
->readMiscRegNoEffect(IPR_ICM
)) !=
356 return new ItbAcvFault(req
->getVaddr());
359 req
->setPaddr(req
->getVaddr() & PAddrImplMask
);
362 // sign extend the physical address properly
363 if (req
->getPaddr() & PAddrUncachedBit40
)
364 req
->setPaddr(req
->getPaddr() | ULL(0xf0000000000));
366 req
->setPaddr(req
->getPaddr() & ULL(0xffffffffff));
370 // not a physical address: need to look up pte
371 int asn
= DTB_ASN_ASN(tc
->readMiscRegNoEffect(IPR_DTB_ASN
));
372 TlbEntry
*entry
= lookup(VAddr(req
->getVaddr()).vpn(),
377 return new ItbPageFault(req
->getVaddr());
380 req
->setPaddr((entry
->ppn
<< PageShift
) +
381 (VAddr(req
->getVaddr()).offset()
384 // check permissions for this access
386 (1 << ICM_CM(tc
->readMiscRegNoEffect(IPR_ICM
))))) {
387 // instruction access fault
389 return new ItbAcvFault(req
->getVaddr());
396 // check that the physical address is ok (catch bad physical addresses)
397 if (req
->getPaddr() & ~PAddrImplMask
)
398 return genMachineCheckFault();
400 return checkCacheability(req
, true);
404 ///////////////////////////////////////////////////////////////////////
408 DTB::DTB(const Params
*p
)
416 .name(name() + ".read_hits")
417 .desc("DTB read hits")
421 .name(name() + ".read_misses")
422 .desc("DTB read misses")
426 .name(name() + ".read_acv")
427 .desc("DTB read access violations")
431 .name(name() + ".read_accesses")
432 .desc("DTB read accesses")
436 .name(name() + ".write_hits")
437 .desc("DTB write hits")
441 .name(name() + ".write_misses")
442 .desc("DTB write misses")
446 .name(name() + ".write_acv")
447 .desc("DTB write access violations")
451 .name(name() + ".write_accesses")
452 .desc("DTB write accesses")
456 .name(name() + ".hits")
461 .name(name() + ".misses")
466 .name(name() + ".acv")
467 .desc("DTB access violations")
471 .name(name() + ".accesses")
472 .desc("DTB accesses")
475 hits
= read_hits
+ write_hits
;
476 misses
= read_misses
+ write_misses
;
477 acv
= read_acv
+ write_acv
;
478 accesses
= read_accesses
+ write_accesses
;
482 DTB::translate(RequestPtr
&req
, ThreadContext
*tc
, bool write
)
484 Addr pc
= tc
->readPC();
487 (mode_type
)DTB_CM_CM(tc
->readMiscRegNoEffect(IPR_DTB_CM
));
490 * Check for alignment faults
492 if (req
->getVaddr() & (req
->getSize() - 1)) {
493 DPRINTF(TLB
, "Alignment Fault on %#x, size = %d", req
->getVaddr(),
495 uint64_t flags
= write
? MM_STAT_WR_MASK
: 0;
496 return new DtbAlignmentFault(req
->getVaddr(), req
->getFlags(), flags
);
500 mode
= (req
->getFlags() & ALTMODE
) ?
501 (mode_type
)ALT_MODE_AM(
502 tc
->readMiscRegNoEffect(IPR_ALT_MODE
))
506 if (req
->getFlags() & PHYSICAL
) {
507 req
->setPaddr(req
->getVaddr());
509 // verify that this is a good virtual address
510 if (!validVirtualAddress(req
->getVaddr())) {
511 if (write
) { write_acv
++; } else { read_acv
++; }
512 uint64_t flags
= (write
? MM_STAT_WR_MASK
: 0) |
513 MM_STAT_BAD_VA_MASK
|
515 return new DtbPageFault(req
->getVaddr(), req
->getFlags(), flags
);
518 // Check for "superpage" mapping
520 if ((MCSR_SP(tc
->readMiscRegNoEffect(IPR_MCSR
)) & 2) &&
521 VAddrSpaceEV5(req
->getVaddr()) == 2)
523 if (VAddrSpaceEV6(req
->getVaddr()) == 0x7e)
526 // only valid in kernel mode
527 if (DTB_CM_CM(tc
->readMiscRegNoEffect(IPR_DTB_CM
)) !=
529 if (write
) { write_acv
++; } else { read_acv
++; }
530 uint64_t flags
= ((write
? MM_STAT_WR_MASK
: 0) |
533 return new DtbAcvFault(req
->getVaddr(), req
->getFlags(),
537 req
->setPaddr(req
->getVaddr() & PAddrImplMask
);
540 // sign extend the physical address properly
541 if (req
->getPaddr() & PAddrUncachedBit40
)
542 req
->setPaddr(req
->getPaddr() | ULL(0xf0000000000));
544 req
->setPaddr(req
->getPaddr() & ULL(0xffffffffff));
553 int asn
= DTB_ASN_ASN(tc
->readMiscRegNoEffect(IPR_DTB_ASN
));
555 // not a physical address: need to look up pte
556 TlbEntry
*entry
= lookup(VAddr(req
->getVaddr()).vpn(), asn
);
560 if (write
) { write_misses
++; } else { read_misses
++; }
561 uint64_t flags
= (write
? MM_STAT_WR_MASK
: 0) |
562 MM_STAT_DTB_MISS_MASK
;
563 return (req
->getFlags() & VPTE
) ?
564 (Fault
)(new PDtbMissFault(req
->getVaddr(), req
->getFlags(),
566 (Fault
)(new NDtbMissFault(req
->getVaddr(), req
->getFlags(),
570 req
->setPaddr((entry
->ppn
<< PageShift
) +
571 VAddr(req
->getVaddr()).offset());
574 if (!(entry
->xwe
& MODE2MASK(mode
))) {
575 // declare the instruction access fault
577 uint64_t flags
= MM_STAT_WR_MASK
|
579 (entry
->fonw
? MM_STAT_FONW_MASK
: 0);
580 return new DtbPageFault(req
->getVaddr(), req
->getFlags(),
585 uint64_t flags
= MM_STAT_WR_MASK
| MM_STAT_FONW_MASK
;
586 return new DtbPageFault(req
->getVaddr(), req
->getFlags(),
590 if (!(entry
->xre
& MODE2MASK(mode
))) {
592 uint64_t flags
= MM_STAT_ACV_MASK
|
593 (entry
->fonr
? MM_STAT_FONR_MASK
: 0);
594 return new DtbAcvFault(req
->getVaddr(), req
->getFlags(),
599 uint64_t flags
= MM_STAT_FONR_MASK
;
600 return new DtbPageFault(req
->getVaddr(), req
->getFlags(),
612 // check that the physical address is ok (catch bad physical addresses)
613 if (req
->getPaddr() & ~PAddrImplMask
)
614 return genMachineCheckFault();
616 return checkCacheability(req
);
620 TLB::index(bool advance
)
622 TlbEntry
*entry
= &table
[nlu
];
630 /* end namespace AlphaISA */ }
633 AlphaITBParams::create()
635 return new AlphaISA::ITB(this);
639 AlphaDTBParams::create()
641 return new AlphaISA::DTB(this);