Merge gblack@m5.eecs.umich.edu:/bk/multiarch
[gem5.git] / arch / alpha / tlb.cc
1 /*
2 * Copyright (c) 2001-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include <sstream>
30 #include <string>
31 #include <vector>
32
33 #include "arch/alpha/tlb.hh"
34 #include "base/inifile.hh"
35 #include "base/str.hh"
36 #include "base/trace.hh"
37 #include "config/alpha_tlaser.hh"
38 #include "cpu/exec_context.hh"
39 #include "sim/builder.hh"
40
41 using namespace std;
42 using namespace EV5;
43
44 ///////////////////////////////////////////////////////////////////////
45 //
46 // Alpha TLB
47 //
48 #ifdef DEBUG
49 bool uncacheBit39 = false;
50 bool uncacheBit40 = false;
51 #endif
52
53 #define MODE2MASK(X) (1 << (X))
54
55 AlphaTLB::AlphaTLB(const string &name, int s)
56 : SimObject(name), size(s), nlu(0)
57 {
58 table = new AlphaISA::PTE[size];
59 memset(table, 0, sizeof(AlphaISA::PTE[size]));
60 }
61
62 AlphaTLB::~AlphaTLB()
63 {
64 if (table)
65 delete [] table;
66 }
67
68 // look up an entry in the TLB
69 AlphaISA::PTE *
70 AlphaTLB::lookup(Addr vpn, uint8_t asn) const
71 {
72 // assume not found...
73 AlphaISA::PTE *retval = NULL;
74
75 PageTable::const_iterator i = lookupTable.find(vpn);
76 if (i != lookupTable.end()) {
77 while (i->first == vpn) {
78 int index = i->second;
79 AlphaISA::PTE *pte = &table[index];
80 assert(pte->valid);
81 if (vpn == pte->tag && (pte->asma || pte->asn == asn)) {
82 retval = pte;
83 break;
84 }
85
86 ++i;
87 }
88 }
89
90 DPRINTF(TLB, "lookup %#x, asn %#x -> %s ppn %#x\n", vpn, (int)asn,
91 retval ? "hit" : "miss", retval ? retval->ppn : 0);
92 return retval;
93 }
94
95
96 void
97 AlphaTLB::checkCacheability(MemReqPtr &req)
98 {
99 // in Alpha, cacheability is controlled by upper-level bits of the
100 // physical address
101
102 /*
103 * We support having the uncacheable bit in either bit 39 or bit 40.
104 * The Turbolaser platform (and EV5) support having the bit in 39, but
105 * Tsunami (which Linux assumes uses an EV6) generates accesses with
106 * the bit in 40. So we must check for both, but we have debug flags
107 * to catch a weird case where both are used, which shouldn't happen.
108 */
109
110
111 #if ALPHA_TLASER
112 if (req->paddr & PAddrUncachedBit39) {
113 #else
114 if (req->paddr & PAddrUncachedBit43) {
115 #endif
116 // IPR memory space not implemented
117 if (PAddrIprSpace(req->paddr)) {
118 if (!req->xc->misspeculating()) {
119 switch (req->paddr) {
120 case ULL(0xFFFFF00188):
121 req->data = 0;
122 break;
123
124 default:
125 panic("IPR memory space not implemented! PA=%x\n",
126 req->paddr);
127 }
128 }
129 } else {
130 // mark request as uncacheable
131 req->flags |= UNCACHEABLE;
132
133 #if !ALPHA_TLASER
134 // Clear bits 42:35 of the physical address (10-2 in Tsunami manual)
135 req->paddr &= PAddrUncachedMask;
136 #endif
137 }
138 }
139 }
140
141
142 // insert a new TLB entry
143 void
144 AlphaTLB::insert(Addr addr, AlphaISA::PTE &pte)
145 {
146 AlphaISA::VAddr vaddr = addr;
147 if (table[nlu].valid) {
148 Addr oldvpn = table[nlu].tag;
149 PageTable::iterator i = lookupTable.find(oldvpn);
150
151 if (i == lookupTable.end())
152 panic("TLB entry not found in lookupTable");
153
154 int index;
155 while ((index = i->second) != nlu) {
156 if (table[index].tag != oldvpn)
157 panic("TLB entry not found in lookupTable");
158
159 ++i;
160 }
161
162 DPRINTF(TLB, "remove @%d: %#x -> %#x\n", nlu, oldvpn, table[nlu].ppn);
163
164 lookupTable.erase(i);
165 }
166
167 DPRINTF(TLB, "insert @%d: %#x -> %#x\n", nlu, vaddr.vpn(), pte.ppn);
168
169 table[nlu] = pte;
170 table[nlu].tag = vaddr.vpn();
171 table[nlu].valid = true;
172
173 lookupTable.insert(make_pair(vaddr.vpn(), nlu));
174 nextnlu();
175 }
176
177 void
178 AlphaTLB::flushAll()
179 {
180 DPRINTF(TLB, "flushAll\n");
181 memset(table, 0, sizeof(AlphaISA::PTE[size]));
182 lookupTable.clear();
183 nlu = 0;
184 }
185
186 void
187 AlphaTLB::flushProcesses()
188 {
189 PageTable::iterator i = lookupTable.begin();
190 PageTable::iterator end = lookupTable.end();
191 while (i != end) {
192 int index = i->second;
193 AlphaISA::PTE *pte = &table[index];
194 assert(pte->valid);
195
196 // we can't increment i after we erase it, so save a copy and
197 // increment it to get the next entry now
198 PageTable::iterator cur = i;
199 ++i;
200
201 if (!pte->asma) {
202 DPRINTF(TLB, "flush @%d: %#x -> %#x\n", index, pte->tag, pte->ppn);
203 pte->valid = false;
204 lookupTable.erase(cur);
205 }
206 }
207 }
208
209 void
210 AlphaTLB::flushAddr(Addr addr, uint8_t asn)
211 {
212 AlphaISA::VAddr vaddr = addr;
213
214 PageTable::iterator i = lookupTable.find(vaddr.vpn());
215 if (i == lookupTable.end())
216 return;
217
218 while (i->first == vaddr.vpn()) {
219 int index = i->second;
220 AlphaISA::PTE *pte = &table[index];
221 assert(pte->valid);
222
223 if (vaddr.vpn() == pte->tag && (pte->asma || pte->asn == asn)) {
224 DPRINTF(TLB, "flushaddr @%d: %#x -> %#x\n", index, vaddr.vpn(),
225 pte->ppn);
226
227 // invalidate this entry
228 pte->valid = false;
229
230 lookupTable.erase(i);
231 }
232
233 ++i;
234 }
235 }
236
237
238 void
239 AlphaTLB::serialize(ostream &os)
240 {
241 SERIALIZE_SCALAR(size);
242 SERIALIZE_SCALAR(nlu);
243
244 for (int i = 0; i < size; i++) {
245 nameOut(os, csprintf("%s.PTE%d", name(), i));
246 table[i].serialize(os);
247 }
248 }
249
250 void
251 AlphaTLB::unserialize(Checkpoint *cp, const string &section)
252 {
253 UNSERIALIZE_SCALAR(size);
254 UNSERIALIZE_SCALAR(nlu);
255
256 for (int i = 0; i < size; i++) {
257 table[i].unserialize(cp, csprintf("%s.PTE%d", section, i));
258 if (table[i].valid) {
259 lookupTable.insert(make_pair(table[i].tag, i));
260 }
261 }
262 }
263
264
265 ///////////////////////////////////////////////////////////////////////
266 //
267 // Alpha ITB
268 //
269 AlphaITB::AlphaITB(const std::string &name, int size)
270 : AlphaTLB(name, size)
271 {}
272
273
274 void
275 AlphaITB::regStats()
276 {
277 hits
278 .name(name() + ".hits")
279 .desc("ITB hits");
280 misses
281 .name(name() + ".misses")
282 .desc("ITB misses");
283 acv
284 .name(name() + ".acv")
285 .desc("ITB acv");
286 accesses
287 .name(name() + ".accesses")
288 .desc("ITB accesses");
289
290 accesses = hits + misses;
291 }
292
293 void
294 AlphaITB::fault(Addr pc, ExecContext *xc) const
295 {
296 if (!xc->misspeculating()) {
297 xc->setMiscReg(AlphaISA::IPR_ITB_TAG, pc);
298 xc->setMiscReg(AlphaISA::IPR_IFAULT_VA_FORM,
299 xc->readMiscReg(AlphaISA::IPR_IVPTBR) |
300 (AlphaISA::VAddr(pc).vpn() << 3));
301 }
302 }
303
304
305 Fault
306 AlphaITB::translate(MemReqPtr &req) const
307 {
308 ExecContext *xc = req->xc;
309
310 if (AlphaISA::PcPAL(req->vaddr)) {
311 // strip off PAL PC marker (lsb is 1)
312 req->paddr = (req->vaddr & ~3) & PAddrImplMask;
313 hits++;
314 return NoFault;
315 }
316
317 if (req->flags & PHYSICAL) {
318 req->paddr = req->vaddr;
319 } else {
320 // verify that this is a good virtual address
321 if (!validVirtualAddress(req->vaddr)) {
322 fault(req->vaddr, req->xc);
323 acv++;
324 return new ItbAcvFault;
325 }
326
327
328 // VA<42:41> == 2, VA<39:13> maps directly to PA<39:13> for EV5
329 // VA<47:41> == 0x7e, VA<40:13> maps directly to PA<40:13> for EV6
330 #if ALPHA_TLASER
331 if ((MCSR_SP(xc->readMiscReg(AlphaISA::IPR_MCSR)) & 2) &&
332 VAddrSpaceEV5(req->vaddr) == 2) {
333 #else
334 if (VAddrSpaceEV6(req->vaddr) == 0x7e) {
335 #endif
336 // only valid in kernel mode
337 if (ICM_CM(xc->readMiscReg(AlphaISA::IPR_ICM)) !=
338 AlphaISA::mode_kernel) {
339 fault(req->vaddr, req->xc);
340 acv++;
341 return new ItbAcvFault;
342 }
343
344 req->paddr = req->vaddr & PAddrImplMask;
345
346 #if !ALPHA_TLASER
347 // sign extend the physical address properly
348 if (req->paddr & PAddrUncachedBit40)
349 req->paddr |= ULL(0xf0000000000);
350 else
351 req->paddr &= ULL(0xffffffffff);
352 #endif
353
354 } else {
355 // not a physical address: need to look up pte
356 int asn = DTB_ASN_ASN(xc->readMiscReg(AlphaISA::IPR_DTB_ASN));
357 AlphaISA::PTE *pte = lookup(AlphaISA::VAddr(req->vaddr).vpn(),
358 asn);
359
360 if (!pte) {
361 fault(req->vaddr, req->xc);
362 misses++;
363 return new ItbPageFault;
364 }
365
366 req->paddr = (pte->ppn << AlphaISA::PageShift) +
367 (AlphaISA::VAddr(req->vaddr).offset() & ~3);
368
369 // check permissions for this access
370 if (!(pte->xre &
371 (1 << ICM_CM(xc->readMiscReg(AlphaISA::IPR_ICM))))) {
372 // instruction access fault
373 fault(req->vaddr, req->xc);
374 acv++;
375 return new ItbAcvFault;
376 }
377
378 hits++;
379 }
380 }
381
382 // check that the physical address is ok (catch bad physical addresses)
383 if (req->paddr & ~PAddrImplMask)
384 return genMachineCheckFault();
385
386 checkCacheability(req);
387
388 return NoFault;
389 }
390
391 ///////////////////////////////////////////////////////////////////////
392 //
393 // Alpha DTB
394 //
395 AlphaDTB::AlphaDTB(const std::string &name, int size)
396 : AlphaTLB(name, size)
397 {}
398
399 void
400 AlphaDTB::regStats()
401 {
402 read_hits
403 .name(name() + ".read_hits")
404 .desc("DTB read hits")
405 ;
406
407 read_misses
408 .name(name() + ".read_misses")
409 .desc("DTB read misses")
410 ;
411
412 read_acv
413 .name(name() + ".read_acv")
414 .desc("DTB read access violations")
415 ;
416
417 read_accesses
418 .name(name() + ".read_accesses")
419 .desc("DTB read accesses")
420 ;
421
422 write_hits
423 .name(name() + ".write_hits")
424 .desc("DTB write hits")
425 ;
426
427 write_misses
428 .name(name() + ".write_misses")
429 .desc("DTB write misses")
430 ;
431
432 write_acv
433 .name(name() + ".write_acv")
434 .desc("DTB write access violations")
435 ;
436
437 write_accesses
438 .name(name() + ".write_accesses")
439 .desc("DTB write accesses")
440 ;
441
442 hits
443 .name(name() + ".hits")
444 .desc("DTB hits")
445 ;
446
447 misses
448 .name(name() + ".misses")
449 .desc("DTB misses")
450 ;
451
452 acv
453 .name(name() + ".acv")
454 .desc("DTB access violations")
455 ;
456
457 accesses
458 .name(name() + ".accesses")
459 .desc("DTB accesses")
460 ;
461
462 hits = read_hits + write_hits;
463 misses = read_misses + write_misses;
464 acv = read_acv + write_acv;
465 accesses = read_accesses + write_accesses;
466 }
467
468 void
469 AlphaDTB::fault(MemReqPtr &req, uint64_t flags) const
470 {
471 ExecContext *xc = req->xc;
472 AlphaISA::VAddr vaddr = req->vaddr;
473
474 // Set fault address and flags. Even though we're modeling an
475 // EV5, we use the EV6 technique of not latching fault registers
476 // on VPTE loads (instead of locking the registers until IPR_VA is
477 // read, like the EV5). The EV6 approach is cleaner and seems to
478 // work with EV5 PAL code, but not the other way around.
479 if (!xc->misspeculating()
480 && !(req->flags & VPTE) && !(req->flags & NO_FAULT)) {
481 // set VA register with faulting address
482 xc->setMiscReg(AlphaISA::IPR_VA, req->vaddr);
483
484 // set MM_STAT register flags
485 xc->setMiscReg(AlphaISA::IPR_MM_STAT,
486 (((Opcode(xc->getInst()) & 0x3f) << 11)
487 | ((Ra(xc->getInst()) & 0x1f) << 6)
488 | (flags & 0x3f)));
489
490 // set VA_FORM register with faulting formatted address
491 xc->setMiscReg(AlphaISA::IPR_VA_FORM,
492 xc->readMiscReg(AlphaISA::IPR_MVPTBR) | (vaddr.vpn() << 3));
493 }
494 }
495
496 Fault
497 AlphaDTB::translate(MemReqPtr &req, bool write) const
498 {
499 RegFile *regs = &req->xc->regs;
500 ExecContext *xc = req->xc;
501 Addr pc = regs->pc;
502
503 AlphaISA::mode_type mode =
504 (AlphaISA::mode_type)DTB_CM_CM(xc->readMiscReg(AlphaISA::IPR_DTB_CM));
505
506
507 /**
508 * Check for alignment faults
509 */
510 if (req->vaddr & (req->size - 1)) {
511 fault(req, write ? MM_STAT_WR_MASK : 0);
512 DPRINTF(TLB, "Alignment Fault on %#x, size = %d", req->vaddr,
513 req->size);
514 return genAlignmentFault();
515 }
516
517 if (pc & 0x1) {
518 mode = (req->flags & ALTMODE) ?
519 (AlphaISA::mode_type)ALT_MODE_AM(
520 xc->readMiscReg(AlphaISA::IPR_ALT_MODE))
521 : AlphaISA::mode_kernel;
522 }
523
524 if (req->flags & PHYSICAL) {
525 req->paddr = req->vaddr;
526 } else {
527 // verify that this is a good virtual address
528 if (!validVirtualAddress(req->vaddr)) {
529 fault(req, (write ? MM_STAT_WR_MASK : 0) |
530 MM_STAT_BAD_VA_MASK |
531 MM_STAT_ACV_MASK);
532
533 if (write) { write_acv++; } else { read_acv++; }
534 return new DtbPageFault;
535 }
536
537 // Check for "superpage" mapping
538 #if ALPHA_TLASER
539 if ((MCSR_SP(xc->readMiscReg(AlphaISA::IPR_MCSR)) & 2) &&
540 VAddrSpaceEV5(req->vaddr) == 2) {
541 #else
542 if (VAddrSpaceEV6(req->vaddr) == 0x7e) {
543 #endif
544
545 // only valid in kernel mode
546 if (DTB_CM_CM(xc->readMiscReg(AlphaISA::IPR_DTB_CM)) !=
547 AlphaISA::mode_kernel) {
548 fault(req, ((write ? MM_STAT_WR_MASK : 0) |
549 MM_STAT_ACV_MASK));
550 if (write) { write_acv++; } else { read_acv++; }
551 return new DtbAcvFault;
552 }
553
554 req->paddr = req->vaddr & PAddrImplMask;
555
556 #if !ALPHA_TLASER
557 // sign extend the physical address properly
558 if (req->paddr & PAddrUncachedBit40)
559 req->paddr |= ULL(0xf0000000000);
560 else
561 req->paddr &= ULL(0xffffffffff);
562 #endif
563
564 } else {
565 if (write)
566 write_accesses++;
567 else
568 read_accesses++;
569
570 int asn = DTB_ASN_ASN(xc->readMiscReg(AlphaISA::IPR_DTB_ASN));
571
572 // not a physical address: need to look up pte
573 AlphaISA::PTE *pte = lookup(AlphaISA::VAddr(req->vaddr).vpn(),
574 asn);
575
576 if (!pte) {
577 // page fault
578 fault(req, (write ? MM_STAT_WR_MASK : 0) |
579 MM_STAT_DTB_MISS_MASK);
580 if (write) { write_misses++; } else { read_misses++; }
581 return (req->flags & VPTE) ?
582 (Fault)(new PDtbMissFault) :
583 (Fault)(new NDtbMissFault);
584 }
585
586 req->paddr = (pte->ppn << AlphaISA::PageShift) +
587 AlphaISA::VAddr(req->vaddr).offset();
588
589 if (write) {
590 if (!(pte->xwe & MODE2MASK(mode))) {
591 // declare the instruction access fault
592 fault(req, MM_STAT_WR_MASK |
593 MM_STAT_ACV_MASK |
594 (pte->fonw ? MM_STAT_FONW_MASK : 0));
595 write_acv++;
596 return new DtbPageFault;
597 }
598 if (pte->fonw) {
599 fault(req, MM_STAT_WR_MASK |
600 MM_STAT_FONW_MASK);
601 write_acv++;
602 return new DtbPageFault;
603 }
604 } else {
605 if (!(pte->xre & MODE2MASK(mode))) {
606 fault(req, MM_STAT_ACV_MASK |
607 (pte->fonr ? MM_STAT_FONR_MASK : 0));
608 read_acv++;
609 return new DtbAcvFault;
610 }
611 if (pte->fonr) {
612 fault(req, MM_STAT_FONR_MASK);
613 read_acv++;
614 return new DtbPageFault;
615 }
616 }
617 }
618
619 if (write)
620 write_hits++;
621 else
622 read_hits++;
623 }
624
625 // check that the physical address is ok (catch bad physical addresses)
626 if (req->paddr & ~PAddrImplMask)
627 return genMachineCheckFault();
628
629 checkCacheability(req);
630
631 return NoFault;
632 }
633
634 AlphaISA::PTE &
635 AlphaTLB::index(bool advance)
636 {
637 AlphaISA::PTE *pte = &table[nlu];
638
639 if (advance)
640 nextnlu();
641
642 return *pte;
643 }
644
645 DEFINE_SIM_OBJECT_CLASS_NAME("AlphaTLB", AlphaTLB)
646
647 BEGIN_DECLARE_SIM_OBJECT_PARAMS(AlphaITB)
648
649 Param<int> size;
650
651 END_DECLARE_SIM_OBJECT_PARAMS(AlphaITB)
652
653 BEGIN_INIT_SIM_OBJECT_PARAMS(AlphaITB)
654
655 INIT_PARAM_DFLT(size, "TLB size", 48)
656
657 END_INIT_SIM_OBJECT_PARAMS(AlphaITB)
658
659
660 CREATE_SIM_OBJECT(AlphaITB)
661 {
662 return new AlphaITB(getInstanceName(), size);
663 }
664
665 REGISTER_SIM_OBJECT("AlphaITB", AlphaITB)
666
667 BEGIN_DECLARE_SIM_OBJECT_PARAMS(AlphaDTB)
668
669 Param<int> size;
670
671 END_DECLARE_SIM_OBJECT_PARAMS(AlphaDTB)
672
673 BEGIN_INIT_SIM_OBJECT_PARAMS(AlphaDTB)
674
675 INIT_PARAM_DFLT(size, "TLB size", 64)
676
677 END_INIT_SIM_OBJECT_PARAMS(AlphaDTB)
678
679
680 CREATE_SIM_OBJECT(AlphaDTB)
681 {
682 return new AlphaDTB(getInstanceName(), size);
683 }
684
685 REGISTER_SIM_OBJECT("AlphaDTB", AlphaDTB)
686