2 * Copyright (c) 2001-2005 The Regents of The University of Michigan
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include "arch/alpha/tlb.hh"
34 #include "base/inifile.hh"
35 #include "base/str.hh"
36 #include "base/trace.hh"
37 #include "config/alpha_tlaser.hh"
38 #include "cpu/exec_context.hh"
39 #include "sim/builder.hh"
44 ///////////////////////////////////////////////////////////////////////
49 bool uncacheBit39
= false;
50 bool uncacheBit40
= false;
53 #define MODE2MASK(X) (1 << (X))
55 AlphaTLB::AlphaTLB(const string
&name
, int s
)
56 : SimObject(name
), size(s
), nlu(0)
58 table
= new AlphaISA::PTE
[size
];
59 memset(table
, 0, sizeof(AlphaISA::PTE
[size
]));
68 // look up an entry in the TLB
70 AlphaTLB::lookup(Addr vpn
, uint8_t asn
) const
72 // assume not found...
73 AlphaISA::PTE
*retval
= NULL
;
75 PageTable::const_iterator i
= lookupTable
.find(vpn
);
76 if (i
!= lookupTable
.end()) {
77 while (i
->first
== vpn
) {
78 int index
= i
->second
;
79 AlphaISA::PTE
*pte
= &table
[index
];
81 if (vpn
== pte
->tag
&& (pte
->asma
|| pte
->asn
== asn
)) {
90 DPRINTF(TLB
, "lookup %#x, asn %#x -> %s ppn %#x\n", vpn
, (int)asn
,
91 retval
? "hit" : "miss", retval
? retval
->ppn
: 0);
97 AlphaTLB::checkCacheability(MemReqPtr
&req
)
99 // in Alpha, cacheability is controlled by upper-level bits of the
103 * We support having the uncacheable bit in either bit 39 or bit 40.
104 * The Turbolaser platform (and EV5) support having the bit in 39, but
105 * Tsunami (which Linux assumes uses an EV6) generates accesses with
106 * the bit in 40. So we must check for both, but we have debug flags
107 * to catch a weird case where both are used, which shouldn't happen.
112 if (req
->paddr
& PAddrUncachedBit39
) {
114 if (req
->paddr
& PAddrUncachedBit43
) {
116 // IPR memory space not implemented
117 if (PAddrIprSpace(req
->paddr
)) {
118 if (!req
->xc
->misspeculating()) {
119 switch (req
->paddr
) {
120 case ULL(0xFFFFF00188):
125 panic("IPR memory space not implemented! PA=%x\n",
130 // mark request as uncacheable
131 req
->flags
|= UNCACHEABLE
;
134 // Clear bits 42:35 of the physical address (10-2 in Tsunami manual)
135 req
->paddr
&= PAddrUncachedMask
;
142 // insert a new TLB entry
144 AlphaTLB::insert(Addr addr
, AlphaISA::PTE
&pte
)
146 AlphaISA::VAddr vaddr
= addr
;
147 if (table
[nlu
].valid
) {
148 Addr oldvpn
= table
[nlu
].tag
;
149 PageTable::iterator i
= lookupTable
.find(oldvpn
);
151 if (i
== lookupTable
.end())
152 panic("TLB entry not found in lookupTable");
155 while ((index
= i
->second
) != nlu
) {
156 if (table
[index
].tag
!= oldvpn
)
157 panic("TLB entry not found in lookupTable");
162 DPRINTF(TLB
, "remove @%d: %#x -> %#x\n", nlu
, oldvpn
, table
[nlu
].ppn
);
164 lookupTable
.erase(i
);
167 DPRINTF(TLB
, "insert @%d: %#x -> %#x\n", nlu
, vaddr
.vpn(), pte
.ppn
);
170 table
[nlu
].tag
= vaddr
.vpn();
171 table
[nlu
].valid
= true;
173 lookupTable
.insert(make_pair(vaddr
.vpn(), nlu
));
180 DPRINTF(TLB
, "flushAll\n");
181 memset(table
, 0, sizeof(AlphaISA::PTE
[size
]));
187 AlphaTLB::flushProcesses()
189 PageTable::iterator i
= lookupTable
.begin();
190 PageTable::iterator end
= lookupTable
.end();
192 int index
= i
->second
;
193 AlphaISA::PTE
*pte
= &table
[index
];
196 // we can't increment i after we erase it, so save a copy and
197 // increment it to get the next entry now
198 PageTable::iterator cur
= i
;
202 DPRINTF(TLB
, "flush @%d: %#x -> %#x\n", index
, pte
->tag
, pte
->ppn
);
204 lookupTable
.erase(cur
);
210 AlphaTLB::flushAddr(Addr addr
, uint8_t asn
)
212 AlphaISA::VAddr vaddr
= addr
;
214 PageTable::iterator i
= lookupTable
.find(vaddr
.vpn());
215 if (i
== lookupTable
.end())
218 while (i
->first
== vaddr
.vpn()) {
219 int index
= i
->second
;
220 AlphaISA::PTE
*pte
= &table
[index
];
223 if (vaddr
.vpn() == pte
->tag
&& (pte
->asma
|| pte
->asn
== asn
)) {
224 DPRINTF(TLB
, "flushaddr @%d: %#x -> %#x\n", index
, vaddr
.vpn(),
227 // invalidate this entry
230 lookupTable
.erase(i
);
239 AlphaTLB::serialize(ostream
&os
)
241 SERIALIZE_SCALAR(size
);
242 SERIALIZE_SCALAR(nlu
);
244 for (int i
= 0; i
< size
; i
++) {
245 nameOut(os
, csprintf("%s.PTE%d", name(), i
));
246 table
[i
].serialize(os
);
251 AlphaTLB::unserialize(Checkpoint
*cp
, const string
§ion
)
253 UNSERIALIZE_SCALAR(size
);
254 UNSERIALIZE_SCALAR(nlu
);
256 for (int i
= 0; i
< size
; i
++) {
257 table
[i
].unserialize(cp
, csprintf("%s.PTE%d", section
, i
));
258 if (table
[i
].valid
) {
259 lookupTable
.insert(make_pair(table
[i
].tag
, i
));
265 ///////////////////////////////////////////////////////////////////////
269 AlphaITB::AlphaITB(const std::string
&name
, int size
)
270 : AlphaTLB(name
, size
)
278 .name(name() + ".hits")
281 .name(name() + ".misses")
284 .name(name() + ".acv")
287 .name(name() + ".accesses")
288 .desc("ITB accesses");
290 accesses
= hits
+ misses
;
294 AlphaITB::fault(Addr pc
, ExecContext
*xc
) const
296 if (!xc
->misspeculating()) {
297 xc
->setMiscReg(AlphaISA::IPR_ITB_TAG
, pc
);
298 xc
->setMiscReg(AlphaISA::IPR_IFAULT_VA_FORM
,
299 xc
->readMiscReg(AlphaISA::IPR_IVPTBR
) |
300 (AlphaISA::VAddr(pc
).vpn() << 3));
306 AlphaITB::translate(MemReqPtr
&req
) const
308 ExecContext
*xc
= req
->xc
;
310 if (AlphaISA::PcPAL(req
->vaddr
)) {
311 // strip off PAL PC marker (lsb is 1)
312 req
->paddr
= (req
->vaddr
& ~3) & PAddrImplMask
;
317 if (req
->flags
& PHYSICAL
) {
318 req
->paddr
= req
->vaddr
;
320 // verify that this is a good virtual address
321 if (!validVirtualAddress(req
->vaddr
)) {
322 fault(req
->vaddr
, req
->xc
);
324 return new ItbAcvFault
;
328 // VA<42:41> == 2, VA<39:13> maps directly to PA<39:13> for EV5
329 // VA<47:41> == 0x7e, VA<40:13> maps directly to PA<40:13> for EV6
331 if ((MCSR_SP(xc
->readMiscReg(AlphaISA::IPR_MCSR
)) & 2) &&
332 VAddrSpaceEV5(req
->vaddr
) == 2) {
334 if (VAddrSpaceEV6(req
->vaddr
) == 0x7e) {
336 // only valid in kernel mode
337 if (ICM_CM(xc
->readMiscReg(AlphaISA::IPR_ICM
)) !=
338 AlphaISA::mode_kernel
) {
339 fault(req
->vaddr
, req
->xc
);
341 return new ItbAcvFault
;
344 req
->paddr
= req
->vaddr
& PAddrImplMask
;
347 // sign extend the physical address properly
348 if (req
->paddr
& PAddrUncachedBit40
)
349 req
->paddr
|= ULL(0xf0000000000);
351 req
->paddr
&= ULL(0xffffffffff);
355 // not a physical address: need to look up pte
356 int asn
= DTB_ASN_ASN(xc
->readMiscReg(AlphaISA::IPR_DTB_ASN
));
357 AlphaISA::PTE
*pte
= lookup(AlphaISA::VAddr(req
->vaddr
).vpn(),
361 fault(req
->vaddr
, req
->xc
);
363 return new ItbPageFault
;
366 req
->paddr
= (pte
->ppn
<< AlphaISA::PageShift
) +
367 (AlphaISA::VAddr(req
->vaddr
).offset() & ~3);
369 // check permissions for this access
371 (1 << ICM_CM(xc
->readMiscReg(AlphaISA::IPR_ICM
))))) {
372 // instruction access fault
373 fault(req
->vaddr
, req
->xc
);
375 return new ItbAcvFault
;
382 // check that the physical address is ok (catch bad physical addresses)
383 if (req
->paddr
& ~PAddrImplMask
)
384 return genMachineCheckFault();
386 checkCacheability(req
);
391 ///////////////////////////////////////////////////////////////////////
395 AlphaDTB::AlphaDTB(const std::string
&name
, int size
)
396 : AlphaTLB(name
, size
)
403 .name(name() + ".read_hits")
404 .desc("DTB read hits")
408 .name(name() + ".read_misses")
409 .desc("DTB read misses")
413 .name(name() + ".read_acv")
414 .desc("DTB read access violations")
418 .name(name() + ".read_accesses")
419 .desc("DTB read accesses")
423 .name(name() + ".write_hits")
424 .desc("DTB write hits")
428 .name(name() + ".write_misses")
429 .desc("DTB write misses")
433 .name(name() + ".write_acv")
434 .desc("DTB write access violations")
438 .name(name() + ".write_accesses")
439 .desc("DTB write accesses")
443 .name(name() + ".hits")
448 .name(name() + ".misses")
453 .name(name() + ".acv")
454 .desc("DTB access violations")
458 .name(name() + ".accesses")
459 .desc("DTB accesses")
462 hits
= read_hits
+ write_hits
;
463 misses
= read_misses
+ write_misses
;
464 acv
= read_acv
+ write_acv
;
465 accesses
= read_accesses
+ write_accesses
;
469 AlphaDTB::fault(MemReqPtr
&req
, uint64_t flags
) const
471 ExecContext
*xc
= req
->xc
;
472 AlphaISA::VAddr vaddr
= req
->vaddr
;
474 // Set fault address and flags. Even though we're modeling an
475 // EV5, we use the EV6 technique of not latching fault registers
476 // on VPTE loads (instead of locking the registers until IPR_VA is
477 // read, like the EV5). The EV6 approach is cleaner and seems to
478 // work with EV5 PAL code, but not the other way around.
479 if (!xc
->misspeculating()
480 && !(req
->flags
& VPTE
) && !(req
->flags
& NO_FAULT
)) {
481 // set VA register with faulting address
482 xc
->setMiscReg(AlphaISA::IPR_VA
, req
->vaddr
);
484 // set MM_STAT register flags
485 xc
->setMiscReg(AlphaISA::IPR_MM_STAT
,
486 (((Opcode(xc
->getInst()) & 0x3f) << 11)
487 | ((Ra(xc
->getInst()) & 0x1f) << 6)
490 // set VA_FORM register with faulting formatted address
491 xc
->setMiscReg(AlphaISA::IPR_VA_FORM
,
492 xc
->readMiscReg(AlphaISA::IPR_MVPTBR
) | (vaddr
.vpn() << 3));
497 AlphaDTB::translate(MemReqPtr
&req
, bool write
) const
499 RegFile
*regs
= &req
->xc
->regs
;
500 ExecContext
*xc
= req
->xc
;
503 AlphaISA::mode_type mode
=
504 (AlphaISA::mode_type
)DTB_CM_CM(xc
->readMiscReg(AlphaISA::IPR_DTB_CM
));
508 * Check for alignment faults
510 if (req
->vaddr
& (req
->size
- 1)) {
511 fault(req
, write
? MM_STAT_WR_MASK
: 0);
512 DPRINTF(TLB
, "Alignment Fault on %#x, size = %d", req
->vaddr
,
514 return genAlignmentFault();
518 mode
= (req
->flags
& ALTMODE
) ?
519 (AlphaISA::mode_type
)ALT_MODE_AM(
520 xc
->readMiscReg(AlphaISA::IPR_ALT_MODE
))
521 : AlphaISA::mode_kernel
;
524 if (req
->flags
& PHYSICAL
) {
525 req
->paddr
= req
->vaddr
;
527 // verify that this is a good virtual address
528 if (!validVirtualAddress(req
->vaddr
)) {
529 fault(req
, (write
? MM_STAT_WR_MASK
: 0) |
530 MM_STAT_BAD_VA_MASK
|
533 if (write
) { write_acv
++; } else { read_acv
++; }
534 return new DtbPageFault
;
537 // Check for "superpage" mapping
539 if ((MCSR_SP(xc
->readMiscReg(AlphaISA::IPR_MCSR
)) & 2) &&
540 VAddrSpaceEV5(req
->vaddr
) == 2) {
542 if (VAddrSpaceEV6(req
->vaddr
) == 0x7e) {
545 // only valid in kernel mode
546 if (DTB_CM_CM(xc
->readMiscReg(AlphaISA::IPR_DTB_CM
)) !=
547 AlphaISA::mode_kernel
) {
548 fault(req
, ((write
? MM_STAT_WR_MASK
: 0) |
550 if (write
) { write_acv
++; } else { read_acv
++; }
551 return new DtbAcvFault
;
554 req
->paddr
= req
->vaddr
& PAddrImplMask
;
557 // sign extend the physical address properly
558 if (req
->paddr
& PAddrUncachedBit40
)
559 req
->paddr
|= ULL(0xf0000000000);
561 req
->paddr
&= ULL(0xffffffffff);
570 int asn
= DTB_ASN_ASN(xc
->readMiscReg(AlphaISA::IPR_DTB_ASN
));
572 // not a physical address: need to look up pte
573 AlphaISA::PTE
*pte
= lookup(AlphaISA::VAddr(req
->vaddr
).vpn(),
578 fault(req
, (write
? MM_STAT_WR_MASK
: 0) |
579 MM_STAT_DTB_MISS_MASK
);
580 if (write
) { write_misses
++; } else { read_misses
++; }
581 return (req
->flags
& VPTE
) ?
582 (Fault
)(new PDtbMissFault
) :
583 (Fault
)(new NDtbMissFault
);
586 req
->paddr
= (pte
->ppn
<< AlphaISA::PageShift
) +
587 AlphaISA::VAddr(req
->vaddr
).offset();
590 if (!(pte
->xwe
& MODE2MASK(mode
))) {
591 // declare the instruction access fault
592 fault(req
, MM_STAT_WR_MASK
|
594 (pte
->fonw
? MM_STAT_FONW_MASK
: 0));
596 return new DtbPageFault
;
599 fault(req
, MM_STAT_WR_MASK
|
602 return new DtbPageFault
;
605 if (!(pte
->xre
& MODE2MASK(mode
))) {
606 fault(req
, MM_STAT_ACV_MASK
|
607 (pte
->fonr
? MM_STAT_FONR_MASK
: 0));
609 return new DtbAcvFault
;
612 fault(req
, MM_STAT_FONR_MASK
);
614 return new DtbPageFault
;
625 // check that the physical address is ok (catch bad physical addresses)
626 if (req
->paddr
& ~PAddrImplMask
)
627 return genMachineCheckFault();
629 checkCacheability(req
);
635 AlphaTLB::index(bool advance
)
637 AlphaISA::PTE
*pte
= &table
[nlu
];
645 DEFINE_SIM_OBJECT_CLASS_NAME("AlphaTLB", AlphaTLB
)
647 BEGIN_DECLARE_SIM_OBJECT_PARAMS(AlphaITB
)
651 END_DECLARE_SIM_OBJECT_PARAMS(AlphaITB
)
653 BEGIN_INIT_SIM_OBJECT_PARAMS(AlphaITB
)
655 INIT_PARAM_DFLT(size
, "TLB size", 48)
657 END_INIT_SIM_OBJECT_PARAMS(AlphaITB
)
660 CREATE_SIM_OBJECT(AlphaITB
)
662 return new AlphaITB(getInstanceName(), size
);
665 REGISTER_SIM_OBJECT("AlphaITB", AlphaITB
)
667 BEGIN_DECLARE_SIM_OBJECT_PARAMS(AlphaDTB
)
671 END_DECLARE_SIM_OBJECT_PARAMS(AlphaDTB
)
673 BEGIN_INIT_SIM_OBJECT_PARAMS(AlphaDTB
)
675 INIT_PARAM_DFLT(size
, "TLB size", 64)
677 END_INIT_SIM_OBJECT_PARAMS(AlphaDTB
)
680 CREATE_SIM_OBJECT(AlphaDTB
)
682 return new AlphaDTB(getInstanceName(), size
);
685 REGISTER_SIM_OBJECT("AlphaDTB", AlphaDTB
)