2 * Copyright (c) 2001-2005 The Regents of The University of Michigan
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * Authors: Nathan Binkert
36 #include "arch/alpha/pagetable.hh"
37 #include "arch/alpha/tlb.hh"
38 #include "arch/alpha/faults.hh"
39 #include "base/inifile.hh"
40 #include "base/str.hh"
41 #include "base/trace.hh"
42 #include "config/alpha_tlaser.hh"
43 #include "cpu/thread_context.hh"
44 #include "sim/builder.hh"
50 ///////////////////////////////////////////////////////////////////////
55 bool uncacheBit39
= false;
56 bool uncacheBit40
= false;
59 #define MODE2MASK(X) (1 << (X))
61 TLB::TLB(const string
&name
, int s
)
62 : SimObject(name
), size(s
), nlu(0)
64 table
= new PTE
[size
];
65 memset(table
, 0, sizeof(PTE
[size
]));
74 // look up an entry in the TLB
76 TLB::lookup(Addr vpn
, uint8_t asn
) const
78 // assume not found...
81 PageTable::const_iterator i
= lookupTable
.find(vpn
);
82 if (i
!= lookupTable
.end()) {
83 while (i
->first
== vpn
) {
84 int index
= i
->second
;
85 PTE
*pte
= &table
[index
];
87 if (vpn
== pte
->tag
&& (pte
->asma
|| pte
->asn
== asn
)) {
96 DPRINTF(TLB
, "lookup %#x, asn %#x -> %s ppn %#x\n", vpn
, (int)asn
,
97 retval
? "hit" : "miss", retval
? retval
->ppn
: 0);
103 TLB::checkCacheability(RequestPtr
&req
)
105 // in Alpha, cacheability is controlled by upper-level bits of the
109 * We support having the uncacheable bit in either bit 39 or bit 40.
110 * The Turbolaser platform (and EV5) support having the bit in 39, but
111 * Tsunami (which Linux assumes uses an EV6) generates accesses with
112 * the bit in 40. So we must check for both, but we have debug flags
113 * to catch a weird case where both are used, which shouldn't happen.
118 if (req
->getPaddr() & PAddrUncachedBit39
)
120 if (req
->getPaddr() & PAddrUncachedBit43
)
123 // IPR memory space not implemented
124 if (PAddrIprSpace(req
->getPaddr())) {
125 return new UnimpFault("IPR memory space not implemented!");
127 // mark request as uncacheable
128 req
->setFlags(req
->getFlags() | UNCACHEABLE
);
131 // Clear bits 42:35 of the physical address (10-2 in Tsunami manual)
132 req
->setPaddr(req
->getPaddr() & PAddrUncachedMask
);
140 // insert a new TLB entry
142 TLB::insert(Addr addr
, PTE
&pte
)
145 if (table
[nlu
].valid
) {
146 Addr oldvpn
= table
[nlu
].tag
;
147 PageTable::iterator i
= lookupTable
.find(oldvpn
);
149 if (i
== lookupTable
.end())
150 panic("TLB entry not found in lookupTable");
153 while ((index
= i
->second
) != nlu
) {
154 if (table
[index
].tag
!= oldvpn
)
155 panic("TLB entry not found in lookupTable");
160 DPRINTF(TLB
, "remove @%d: %#x -> %#x\n", nlu
, oldvpn
, table
[nlu
].ppn
);
162 lookupTable
.erase(i
);
165 DPRINTF(TLB
, "insert @%d: %#x -> %#x\n", nlu
, vaddr
.vpn(), pte
.ppn
);
168 table
[nlu
].tag
= vaddr
.vpn();
169 table
[nlu
].valid
= true;
171 lookupTable
.insert(make_pair(vaddr
.vpn(), nlu
));
178 DPRINTF(TLB
, "flushAll\n");
179 memset(table
, 0, sizeof(PTE
[size
]));
185 TLB::flushProcesses()
187 PageTable::iterator i
= lookupTable
.begin();
188 PageTable::iterator end
= lookupTable
.end();
190 int index
= i
->second
;
191 PTE
*pte
= &table
[index
];
194 // we can't increment i after we erase it, so save a copy and
195 // increment it to get the next entry now
196 PageTable::iterator cur
= i
;
200 DPRINTF(TLB
, "flush @%d: %#x -> %#x\n", index
, pte
->tag
, pte
->ppn
);
202 lookupTable
.erase(cur
);
208 TLB::flushAddr(Addr addr
, uint8_t asn
)
212 PageTable::iterator i
= lookupTable
.find(vaddr
.vpn());
213 if (i
== lookupTable
.end())
216 while (i
->first
== vaddr
.vpn()) {
217 int index
= i
->second
;
218 PTE
*pte
= &table
[index
];
221 if (vaddr
.vpn() == pte
->tag
&& (pte
->asma
|| pte
->asn
== asn
)) {
222 DPRINTF(TLB
, "flushaddr @%d: %#x -> %#x\n", index
, vaddr
.vpn(),
225 // invalidate this entry
228 lookupTable
.erase(i
);
237 TLB::serialize(ostream
&os
)
239 SERIALIZE_SCALAR(size
);
240 SERIALIZE_SCALAR(nlu
);
242 for (int i
= 0; i
< size
; i
++) {
243 nameOut(os
, csprintf("%s.PTE%d", name(), i
));
244 table
[i
].serialize(os
);
249 TLB::unserialize(Checkpoint
*cp
, const string
§ion
)
251 UNSERIALIZE_SCALAR(size
);
252 UNSERIALIZE_SCALAR(nlu
);
254 for (int i
= 0; i
< size
; i
++) {
255 table
[i
].unserialize(cp
, csprintf("%s.PTE%d", section
, i
));
256 if (table
[i
].valid
) {
257 lookupTable
.insert(make_pair(table
[i
].tag
, i
));
263 ///////////////////////////////////////////////////////////////////////
267 ITB::ITB(const std::string
&name
, int size
)
276 .name(name() + ".hits")
279 .name(name() + ".misses")
282 .name(name() + ".acv")
285 .name(name() + ".accesses")
286 .desc("ITB accesses");
288 accesses
= hits
+ misses
;
293 ITB::translate(RequestPtr
&req
, ThreadContext
*tc
) const
295 if (PcPAL(req
->getPC())) {
296 // strip off PAL PC marker (lsb is 1)
297 req
->setPaddr((req
->getVaddr() & ~3) & PAddrImplMask
);
302 if (req
->getFlags() & PHYSICAL
) {
303 req
->setPaddr(req
->getVaddr());
305 // verify that this is a good virtual address
306 if (!validVirtualAddress(req
->getVaddr())) {
308 return new ItbAcvFault(req
->getVaddr());
312 // VA<42:41> == 2, VA<39:13> maps directly to PA<39:13> for EV5
313 // VA<47:41> == 0x7e, VA<40:13> maps directly to PA<40:13> for EV6
315 if ((MCSR_SP(tc
->readMiscReg(IPR_MCSR
)) & 2) &&
316 VAddrSpaceEV5(req
->getVaddr()) == 2)
318 if (VAddrSpaceEV6(req
->getVaddr()) == 0x7e)
321 // only valid in kernel mode
322 if (ICM_CM(tc
->readMiscReg(IPR_ICM
)) !=
325 return new ItbAcvFault(req
->getVaddr());
328 req
->setPaddr(req
->getVaddr() & PAddrImplMask
);
331 // sign extend the physical address properly
332 if (req
->getPaddr() & PAddrUncachedBit40
)
333 req
->setPaddr(req
->getPaddr() | ULL(0xf0000000000));
335 req
->setPaddr(req
->getPaddr() & ULL(0xffffffffff));
339 // not a physical address: need to look up pte
340 int asn
= DTB_ASN_ASN(tc
->readMiscReg(IPR_DTB_ASN
));
341 PTE
*pte
= lookup(VAddr(req
->getVaddr()).vpn(),
346 return new ItbPageFault(req
->getVaddr());
349 req
->setPaddr((pte
->ppn
<< PageShift
) +
350 (VAddr(req
->getVaddr()).offset()
353 // check permissions for this access
355 (1 << ICM_CM(tc
->readMiscReg(IPR_ICM
))))) {
356 // instruction access fault
358 return new ItbAcvFault(req
->getVaddr());
365 // check that the physical address is ok (catch bad physical addresses)
366 if (req
->getPaddr() & ~PAddrImplMask
)
367 return genMachineCheckFault();
369 return checkCacheability(req
);
373 ///////////////////////////////////////////////////////////////////////
377 DTB::DTB(const std::string
&name
, int size
)
385 .name(name() + ".read_hits")
386 .desc("DTB read hits")
390 .name(name() + ".read_misses")
391 .desc("DTB read misses")
395 .name(name() + ".read_acv")
396 .desc("DTB read access violations")
400 .name(name() + ".read_accesses")
401 .desc("DTB read accesses")
405 .name(name() + ".write_hits")
406 .desc("DTB write hits")
410 .name(name() + ".write_misses")
411 .desc("DTB write misses")
415 .name(name() + ".write_acv")
416 .desc("DTB write access violations")
420 .name(name() + ".write_accesses")
421 .desc("DTB write accesses")
425 .name(name() + ".hits")
430 .name(name() + ".misses")
435 .name(name() + ".acv")
436 .desc("DTB access violations")
440 .name(name() + ".accesses")
441 .desc("DTB accesses")
444 hits
= read_hits
+ write_hits
;
445 misses
= read_misses
+ write_misses
;
446 acv
= read_acv
+ write_acv
;
447 accesses
= read_accesses
+ write_accesses
;
451 DTB::translate(RequestPtr
&req
, ThreadContext
*tc
, bool write
) const
453 Addr pc
= tc
->readPC();
456 (mode_type
)DTB_CM_CM(tc
->readMiscReg(IPR_DTB_CM
));
460 * Check for alignment faults
462 if (req
->getVaddr() & (req
->getSize() - 1)) {
463 DPRINTF(TLB
, "Alignment Fault on %#x, size = %d", req
->getVaddr(),
465 uint64_t flags
= write
? MM_STAT_WR_MASK
: 0;
466 return new DtbAlignmentFault(req
->getVaddr(), req
->getFlags(), flags
);
470 mode
= (req
->getFlags() & ALTMODE
) ?
471 (mode_type
)ALT_MODE_AM(
472 tc
->readMiscReg(IPR_ALT_MODE
))
476 if (req
->getFlags() & PHYSICAL
) {
477 req
->setPaddr(req
->getVaddr());
479 // verify that this is a good virtual address
480 if (!validVirtualAddress(req
->getVaddr())) {
481 if (write
) { write_acv
++; } else { read_acv
++; }
482 uint64_t flags
= (write
? MM_STAT_WR_MASK
: 0) |
483 MM_STAT_BAD_VA_MASK
|
485 return new DtbPageFault(req
->getVaddr(), req
->getFlags(), flags
);
488 // Check for "superpage" mapping
490 if ((MCSR_SP(tc
->readMiscReg(IPR_MCSR
)) & 2) &&
491 VAddrSpaceEV5(req
->getVaddr()) == 2)
493 if (VAddrSpaceEV6(req
->getVaddr()) == 0x7e)
497 // only valid in kernel mode
498 if (DTB_CM_CM(tc
->readMiscReg(IPR_DTB_CM
)) !=
500 if (write
) { write_acv
++; } else { read_acv
++; }
501 uint64_t flags
= ((write
? MM_STAT_WR_MASK
: 0) |
503 return new DtbAcvFault(req
->getVaddr(), req
->getFlags(), flags
);
506 req
->setPaddr(req
->getVaddr() & PAddrImplMask
);
509 // sign extend the physical address properly
510 if (req
->getPaddr() & PAddrUncachedBit40
)
511 req
->setPaddr(req
->getPaddr() | ULL(0xf0000000000));
513 req
->setPaddr(req
->getPaddr() & ULL(0xffffffffff));
522 int asn
= DTB_ASN_ASN(tc
->readMiscReg(IPR_DTB_ASN
));
524 // not a physical address: need to look up pte
525 PTE
*pte
= lookup(VAddr(req
->getVaddr()).vpn(),
530 if (write
) { write_misses
++; } else { read_misses
++; }
531 uint64_t flags
= (write
? MM_STAT_WR_MASK
: 0) |
532 MM_STAT_DTB_MISS_MASK
;
533 return (req
->getFlags() & VPTE
) ?
534 (Fault
)(new PDtbMissFault(req
->getVaddr(), req
->getFlags(),
536 (Fault
)(new NDtbMissFault(req
->getVaddr(), req
->getFlags(),
540 req
->setPaddr((pte
->ppn
<< PageShift
) +
541 VAddr(req
->getVaddr()).offset());
544 if (!(pte
->xwe
& MODE2MASK(mode
))) {
545 // declare the instruction access fault
547 uint64_t flags
= MM_STAT_WR_MASK
|
549 (pte
->fonw
? MM_STAT_FONW_MASK
: 0);
550 return new DtbPageFault(req
->getVaddr(), req
->getFlags(), flags
);
554 uint64_t flags
= MM_STAT_WR_MASK
|
556 return new DtbPageFault(req
->getVaddr(), req
->getFlags(), flags
);
559 if (!(pte
->xre
& MODE2MASK(mode
))) {
561 uint64_t flags
= MM_STAT_ACV_MASK
|
562 (pte
->fonr
? MM_STAT_FONR_MASK
: 0);
563 return new DtbAcvFault(req
->getVaddr(), req
->getFlags(), flags
);
567 uint64_t flags
= MM_STAT_FONR_MASK
;
568 return new DtbPageFault(req
->getVaddr(), req
->getFlags(), flags
);
579 // check that the physical address is ok (catch bad physical addresses)
580 if (req
->getPaddr() & ~PAddrImplMask
)
581 return genMachineCheckFault();
583 return checkCacheability(req
);
587 TLB::index(bool advance
)
589 PTE
*pte
= &table
[nlu
];
597 /* end namespace AlphaISA */ }
599 DEFINE_SIM_OBJECT_CLASS_NAME("AlphaTLB", TLB
)
601 BEGIN_DECLARE_SIM_OBJECT_PARAMS(ITB
)
605 END_DECLARE_SIM_OBJECT_PARAMS(ITB
)
607 BEGIN_INIT_SIM_OBJECT_PARAMS(ITB
)
609 INIT_PARAM_DFLT(size
, "TLB size", 48)
611 END_INIT_SIM_OBJECT_PARAMS(ITB
)
614 CREATE_SIM_OBJECT(ITB
)
616 return new ITB(getInstanceName(), size
);
619 REGISTER_SIM_OBJECT("AlphaITB", ITB
)
621 BEGIN_DECLARE_SIM_OBJECT_PARAMS(DTB
)
625 END_DECLARE_SIM_OBJECT_PARAMS(DTB
)
627 BEGIN_INIT_SIM_OBJECT_PARAMS(DTB
)
629 INIT_PARAM_DFLT(size
, "TLB size", 64)
631 END_INIT_SIM_OBJECT_PARAMS(DTB
)
634 CREATE_SIM_OBJECT(DTB
)
636 return new DTB(getInstanceName(), size
);
639 REGISTER_SIM_OBJECT("AlphaDTB", DTB
)