ifdefed ev5 vs. ev6 differences so Tlaser can work in the linux tree
[gem5.git] / arch / alpha / alpha_memory.cc
1 /*
2 * Copyright (c) 2001-2004 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include <sstream>
30 #include <string>
31 #include <vector>
32
33 #include "base/inifile.hh"
34 #include "base/str.hh"
35 #include "base/trace.hh"
36 #include "cpu/exec_context.hh"
37 #include "sim/builder.hh"
38 #include "targetarch/alpha_memory.hh"
39 #include "targetarch/ev5.hh"
40
41 using namespace std;
42
43 ///////////////////////////////////////////////////////////////////////
44 //
45 // Alpha TLB
46 //
47 #ifdef DEBUG
48 bool uncacheBit39 = false;
49 bool uncacheBit40 = false;
50 #endif
51
52 AlphaTLB::AlphaTLB(const string &name, int s)
53 : SimObject(name), size(s), nlu(0)
54 {
55 table = new AlphaISA::PTE[size];
56 memset(table, 0, sizeof(AlphaISA::PTE[size]));
57 }
58
59 AlphaTLB::~AlphaTLB()
60 {
61 if (table)
62 delete [] table;
63 }
64
65 // look up an entry in the TLB
66 AlphaISA::PTE *
67 AlphaTLB::lookup(Addr vpn, uint8_t asn) const
68 {
69 DPRINTF(TLB, "lookup %#x\n", vpn);
70
71 PageTable::const_iterator i = lookupTable.find(vpn);
72 if (i == lookupTable.end())
73 return NULL;
74
75 while (i->first == vpn) {
76 int index = i->second;
77 AlphaISA::PTE *pte = &table[index];
78 assert(pte->valid);
79 if (vpn == pte->tag && (pte->asma || pte->asn == asn))
80 return pte;
81
82 ++i;
83 }
84
85 // not found...
86 return NULL;
87 }
88
89
90 void
91 AlphaTLB::checkCacheability(MemReqPtr &req)
92 {
93 // in Alpha, cacheability is controlled by upper-level bits of the
94 // physical address
95
96 /*
97 * We support having the uncacheable bit in either bit 39 or bit 40.
98 * The Turbolaser platform (and EV5) support having the bit in 39, but
99 * Tsunami (which Linux assumes uses an EV6) generates accesses with
100 * the bit in 40. So we must check for both, but we have debug flags
101 * to catch a weird case where both are used, which shouldn't happen.
102 */
103
104
105 #ifdef ALPHA_TLASER
106 if (req->paddr & PA_UNCACHED_BIT_39) {
107 #else
108 if (req->paddr & PA_UNCACHED_BIT_43) {
109 #endif
110 // IPR memory space not implemented
111 if (PA_IPR_SPACE(req->paddr)) {
112 if (!req->xc->misspeculating()) {
113 switch (req->paddr) {
114 case ULL(0xFFFFF00188):
115 req->data = 0;
116 break;
117
118 default:
119 panic("IPR memory space not implemented! PA=%x\n",
120 req->paddr);
121 }
122 }
123 } else {
124 // mark request as uncacheable
125 req->flags |= UNCACHEABLE;
126
127 #ifndef ALPHA_TLASER
128 // Clear bits 42:35 of the physical address (10-2 in Tsunami manual)
129 req->paddr &= PA_UNCACHED_MASK;
130 #endif
131 }
132 }
133 }
134
135
136 // insert a new TLB entry
137 void
138 AlphaTLB::insert(Addr vaddr, AlphaISA::PTE &pte)
139 {
140 if (table[nlu].valid) {
141 Addr oldvpn = table[nlu].tag;
142 PageTable::iterator i = lookupTable.find(oldvpn);
143
144 if (i == lookupTable.end())
145 panic("TLB entry not found in lookupTable");
146
147 int index;
148 while ((index = i->second) != nlu) {
149 if (table[index].tag != oldvpn)
150 panic("TLB entry not found in lookupTable");
151
152 ++i;
153 }
154
155 DPRINTF(TLB, "remove @%d: %#x -> %#x\n", nlu, oldvpn, table[nlu].ppn);
156
157 lookupTable.erase(i);
158 }
159
160 Addr vpn = VA_VPN(vaddr);
161 DPRINTF(TLB, "insert @%d: %#x -> %#x\n", nlu, vpn, pte.ppn);
162
163 table[nlu] = pte;
164 table[nlu].tag = vpn;
165 table[nlu].valid = true;
166
167 lookupTable.insert(make_pair(vpn, nlu));
168 nextnlu();
169 }
170
171 void
172 AlphaTLB::flushAll()
173 {
174 memset(table, 0, sizeof(AlphaISA::PTE[size]));
175 lookupTable.clear();
176 nlu = 0;
177 }
178
179 void
180 AlphaTLB::flushProcesses()
181 {
182 PageTable::iterator i = lookupTable.begin();
183 PageTable::iterator end = lookupTable.end();
184 while (i != end) {
185 int index = i->second;
186 AlphaISA::PTE *pte = &table[index];
187 assert(pte->valid);
188
189 if (!pte->asma) {
190 DPRINTF(TLB, "flush @%d: %#x -> %#x\n", index, pte->tag, pte->ppn);
191 pte->valid = false;
192 lookupTable.erase(i);
193 }
194
195 ++i;
196 }
197 }
198
199 void
200 AlphaTLB::flushAddr(Addr vaddr, uint8_t asn)
201 {
202 Addr vpn = VA_VPN(vaddr);
203
204 PageTable::iterator i = lookupTable.find(vpn);
205 if (i == lookupTable.end())
206 return;
207
208 while (i->first == vpn) {
209 int index = i->second;
210 AlphaISA::PTE *pte = &table[index];
211 assert(pte->valid);
212
213 if (vpn == pte->tag && (pte->asma || pte->asn == asn)) {
214 DPRINTF(TLB, "flushaddr @%d: %#x -> %#x\n", index, vpn, pte->ppn);
215
216 // invalidate this entry
217 pte->valid = false;
218
219 lookupTable.erase(i);
220 }
221
222 ++i;
223 }
224 }
225
226
227 void
228 AlphaTLB::serialize(ostream &os)
229 {
230 SERIALIZE_SCALAR(size);
231 SERIALIZE_SCALAR(nlu);
232
233 for (int i = 0; i < size; i++) {
234 nameOut(os, csprintf("%s.PTE%d", name(), i));
235 table[i].serialize(os);
236 }
237 }
238
239 void
240 AlphaTLB::unserialize(Checkpoint *cp, const string &section)
241 {
242 UNSERIALIZE_SCALAR(size);
243 UNSERIALIZE_SCALAR(nlu);
244
245 for (int i = 0; i < size; i++) {
246 table[i].unserialize(cp, csprintf("%s.PTE%d", section, i));
247 if (table[i].valid) {
248 lookupTable.insert(make_pair(table[i].tag, i));
249 }
250 }
251 }
252
253
254 ///////////////////////////////////////////////////////////////////////
255 //
256 // Alpha ITB
257 //
258 AlphaITB::AlphaITB(const std::string &name, int size)
259 : AlphaTLB(name, size)
260 {}
261
262
263 void
264 AlphaITB::regStats()
265 {
266 hits
267 .name(name() + ".hits")
268 .desc("ITB hits");
269 misses
270 .name(name() + ".misses")
271 .desc("ITB misses");
272 acv
273 .name(name() + ".acv")
274 .desc("ITB acv");
275 accesses
276 .name(name() + ".accesses")
277 .desc("ITB accesses");
278
279 accesses = hits + misses;
280 }
281
282 void
283 AlphaITB::fault(Addr pc, ExecContext *xc) const
284 {
285 uint64_t *ipr = xc->regs.ipr;
286
287 if (!xc->misspeculating()) {
288 ipr[AlphaISA::IPR_ITB_TAG] = pc;
289 ipr[AlphaISA::IPR_IFAULT_VA_FORM] =
290 ipr[AlphaISA::IPR_IVPTBR] | (VA_VPN(pc) << 3);
291 }
292 }
293
294
295 Fault
296 AlphaITB::translate(MemReqPtr &req) const
297 {
298 InternalProcReg *ipr = req->xc->regs.ipr;
299
300 if (PC_PAL(req->vaddr)) {
301 // strip off PAL PC marker (lsb is 1)
302 req->paddr = (req->vaddr & ~3) & PA_IMPL_MASK;
303 hits++;
304 return No_Fault;
305 }
306
307 if (req->flags & PHYSICAL) {
308 req->paddr = req->vaddr;
309 } else {
310 // verify that this is a good virtual address
311 if (!validVirtualAddress(req->vaddr)) {
312 fault(req->vaddr, req->xc);
313 acv++;
314 return ITB_Acv_Fault;
315 }
316
317
318 // VA<42:41> == 2, VA<39:13> maps directly to PA<39:13> for EV5
319 // VA<47:41> == 0x7e, VA<40:13> maps directly to PA<40:13> for EV6
320 #ifdef ALPHA_TLASER
321 if ((MCSR_SP(ipr[AlphaISA::IPR_MCSR]) & 2) &&
322 VA_SPACE_EV5(req->vaddr) == 2) {
323 #else
324 if (VA_SPACE_EV6(req->vaddr) == 0x7e) {
325 #endif
326
327
328 // only valid in kernel mode
329 if (ICM_CM(ipr[AlphaISA::IPR_ICM]) != AlphaISA::mode_kernel) {
330 fault(req->vaddr, req->xc);
331 acv++;
332 return ITB_Acv_Fault;
333 }
334
335 req->paddr = req->vaddr & PA_IMPL_MASK;
336
337 #ifndef ALPHA_TLASER
338 // sign extend the physical address properly
339 if (req->paddr & PA_UNCACHED_BIT_40)
340 req->paddr |= ULL(0xf0000000000);
341 else
342 req->paddr &= ULL(0xffffffffff);
343 #endif
344
345 } else {
346 // not a physical address: need to look up pte
347 AlphaISA::PTE *pte = lookup(VA_VPN(req->vaddr),
348 DTB_ASN_ASN(ipr[AlphaISA::IPR_DTB_ASN]));
349
350 if (!pte) {
351 fault(req->vaddr, req->xc);
352 misses++;
353 return ITB_Fault_Fault;
354 }
355
356 req->paddr = PA_PFN2PA(pte->ppn) + VA_POFS(req->vaddr & ~3);
357
358 // check permissions for this access
359 if (!(pte->xre & (1 << ICM_CM(ipr[AlphaISA::IPR_ICM])))) {
360 // instruction access fault
361 fault(req->vaddr, req->xc);
362 acv++;
363 return ITB_Acv_Fault;
364 }
365
366 hits++;
367 }
368 }
369
370 // check that the physical address is ok (catch bad physical addresses)
371 if (req->paddr & ~PA_IMPL_MASK)
372 return Machine_Check_Fault;
373
374 checkCacheability(req);
375
376 return No_Fault;
377 }
378
379 ///////////////////////////////////////////////////////////////////////
380 //
381 // Alpha DTB
382 //
383 AlphaDTB::AlphaDTB(const std::string &name, int size)
384 : AlphaTLB(name, size)
385 {}
386
387 void
388 AlphaDTB::regStats()
389 {
390 read_hits
391 .name(name() + ".read_hits")
392 .desc("DTB read hits")
393 ;
394
395 read_misses
396 .name(name() + ".read_misses")
397 .desc("DTB read misses")
398 ;
399
400 read_acv
401 .name(name() + ".read_acv")
402 .desc("DTB read access violations")
403 ;
404
405 read_accesses
406 .name(name() + ".read_accesses")
407 .desc("DTB read accesses")
408 ;
409
410 write_hits
411 .name(name() + ".write_hits")
412 .desc("DTB write hits")
413 ;
414
415 write_misses
416 .name(name() + ".write_misses")
417 .desc("DTB write misses")
418 ;
419
420 write_acv
421 .name(name() + ".write_acv")
422 .desc("DTB write access violations")
423 ;
424
425 write_accesses
426 .name(name() + ".write_accesses")
427 .desc("DTB write accesses")
428 ;
429
430 hits
431 .name(name() + ".hits")
432 .desc("DTB hits")
433 ;
434
435 misses
436 .name(name() + ".misses")
437 .desc("DTB misses")
438 ;
439
440 acv
441 .name(name() + ".acv")
442 .desc("DTB access violations")
443 ;
444
445 accesses
446 .name(name() + ".accesses")
447 .desc("DTB accesses")
448 ;
449
450 hits = read_hits + write_hits;
451 misses = read_misses + write_misses;
452 acv = read_acv + write_acv;
453 accesses = read_accesses + write_accesses;
454 }
455
456 void
457 AlphaDTB::fault(MemReqPtr &req, uint64_t flags) const
458 {
459 ExecContext *xc = req->xc;
460 Addr vaddr = req->vaddr;
461 uint64_t *ipr = xc->regs.ipr;
462
463 // Set fault address and flags. Even though we're modeling an
464 // EV5, we use the EV6 technique of not latching fault registers
465 // on VPTE loads (instead of locking the registers until IPR_VA is
466 // read, like the EV5). The EV6 approach is cleaner and seems to
467 // work with EV5 PAL code, but not the other way around.
468 if (!xc->misspeculating()
469 && !(req->flags & VPTE) && !(req->flags & NO_FAULT)) {
470 // set VA register with faulting address
471 ipr[AlphaISA::IPR_VA] = vaddr;
472
473 // set MM_STAT register flags
474 ipr[AlphaISA::IPR_MM_STAT] = (((OPCODE(xc->getInst()) & 0x3f) << 11)
475 | ((RA(xc->getInst()) & 0x1f) << 6)
476 | (flags & 0x3f));
477
478 // set VA_FORM register with faulting formatted address
479 ipr[AlphaISA::IPR_VA_FORM] =
480 ipr[AlphaISA::IPR_MVPTBR] | (VA_VPN(vaddr) << 3);
481 }
482 }
483
484 Fault
485 AlphaDTB::translate(MemReqPtr &req, bool write) const
486 {
487 RegFile *regs = &req->xc->regs;
488 Addr pc = regs->pc;
489 InternalProcReg *ipr = regs->ipr;
490
491 AlphaISA::mode_type mode =
492 (AlphaISA::mode_type)DTB_CM_CM(ipr[AlphaISA::IPR_DTB_CM]);
493
494 if (PC_PAL(pc)) {
495 mode = (req->flags & ALTMODE) ?
496 (AlphaISA::mode_type)ALT_MODE_AM(ipr[AlphaISA::IPR_ALT_MODE])
497 : AlphaISA::mode_kernel;
498 }
499
500 if (req->flags & PHYSICAL) {
501 req->paddr = req->vaddr;
502 } else {
503 // verify that this is a good virtual address
504 if (!validVirtualAddress(req->vaddr)) {
505 fault(req, ((write ? MM_STAT_WR_MASK : 0) | MM_STAT_BAD_VA_MASK |
506 MM_STAT_ACV_MASK));
507
508 if (write) { write_acv++; } else { read_acv++; }
509 return DTB_Fault_Fault;
510 }
511
512 // Check for "superpage" mapping
513 #ifdef ALPHA_TLASER
514 if ((MCSR_SP(ipr[AlphaISA::IPR_MCSR]) & 2) &&
515 VA_SPACE_EV5(req->vaddr) == 2) {
516 #else
517 if (VA_SPACE_EV6(req->vaddr) == 0x7e) {
518 #endif
519
520 // only valid in kernel mode
521 if (DTB_CM_CM(ipr[AlphaISA::IPR_DTB_CM]) !=
522 AlphaISA::mode_kernel) {
523 fault(req, ((write ? MM_STAT_WR_MASK : 0) | MM_STAT_ACV_MASK));
524 if (write) { write_acv++; } else { read_acv++; }
525 return DTB_Acv_Fault;
526 }
527
528 req->paddr = req->vaddr & PA_IMPL_MASK;
529
530 #ifndef ALPHA_TLASER
531 // sign extend the physical address properly
532 if (req->paddr & PA_UNCACHED_BIT_40)
533 req->paddr |= ULL(0xf0000000000);
534 else
535 req->paddr &= ULL(0xffffffffff);
536 #endif
537
538 } else {
539 if (write)
540 write_accesses++;
541 else
542 read_accesses++;
543
544 // not a physical address: need to look up pte
545 AlphaISA::PTE *pte = lookup(VA_VPN(req->vaddr),
546 DTB_ASN_ASN(ipr[AlphaISA::IPR_DTB_ASN]));
547
548 if (!pte) {
549 // page fault
550 fault(req,
551 (write ? MM_STAT_WR_MASK : 0) | MM_STAT_DTB_MISS_MASK);
552 if (write) { write_misses++; } else { read_misses++; }
553 return (req->flags & VPTE) ? Pdtb_Miss_Fault : Ndtb_Miss_Fault;
554 }
555
556 req->paddr = PA_PFN2PA(pte->ppn) | VA_POFS(req->vaddr);
557
558 if (write) {
559 if (!(pte->xwe & MODE2MASK(mode))) {
560 // declare the instruction access fault
561 fault(req, (MM_STAT_WR_MASK | MM_STAT_ACV_MASK |
562 (pte->fonw ? MM_STAT_FONW_MASK : 0)));
563 write_acv++;
564 return DTB_Fault_Fault;
565 }
566 if (pte->fonw) {
567 fault(req, MM_STAT_WR_MASK | MM_STAT_FONW_MASK);
568 write_acv++;
569 return DTB_Fault_Fault;
570 }
571 } else {
572 if (!(pte->xre & MODE2MASK(mode))) {
573 fault(req, (MM_STAT_ACV_MASK |
574 (pte->fonr ? MM_STAT_FONR_MASK : 0)));
575 read_acv++;
576 return DTB_Acv_Fault;
577 }
578 if (pte->fonr) {
579 fault(req, MM_STAT_FONR_MASK);
580 read_acv++;
581 return DTB_Fault_Fault;
582 }
583 }
584 }
585
586 if (write)
587 write_hits++;
588 else
589 read_hits++;
590 }
591
592 // check that the physical address is ok (catch bad physical addresses)
593 if (req->paddr & ~PA_IMPL_MASK)
594 return Machine_Check_Fault;
595
596 checkCacheability(req);
597
598 return No_Fault;
599 }
600
601 AlphaISA::PTE &
602 AlphaTLB::index(bool advance)
603 {
604 AlphaISA::PTE *pte = &table[nlu];
605
606 if (advance)
607 nextnlu();
608
609 return *pte;
610 }
611
612 DEFINE_SIM_OBJECT_CLASS_NAME("AlphaTLB", AlphaTLB)
613
614 BEGIN_DECLARE_SIM_OBJECT_PARAMS(AlphaITB)
615
616 Param<int> size;
617
618 END_DECLARE_SIM_OBJECT_PARAMS(AlphaITB)
619
620 BEGIN_INIT_SIM_OBJECT_PARAMS(AlphaITB)
621
622 INIT_PARAM_DFLT(size, "TLB size", 48)
623
624 END_INIT_SIM_OBJECT_PARAMS(AlphaITB)
625
626
627 CREATE_SIM_OBJECT(AlphaITB)
628 {
629 return new AlphaITB(getInstanceName(), size);
630 }
631
632 REGISTER_SIM_OBJECT("AlphaITB", AlphaITB)
633
634 BEGIN_DECLARE_SIM_OBJECT_PARAMS(AlphaDTB)
635
636 Param<int> size;
637
638 END_DECLARE_SIM_OBJECT_PARAMS(AlphaDTB)
639
640 BEGIN_INIT_SIM_OBJECT_PARAMS(AlphaDTB)
641
642 INIT_PARAM_DFLT(size, "TLB size", 64)
643
644 END_INIT_SIM_OBJECT_PARAMS(AlphaDTB)
645
646
647 CREATE_SIM_OBJECT(AlphaDTB)
648 {
649 return new AlphaDTB(getInstanceName(), size);
650 }
651
652 REGISTER_SIM_OBJECT("AlphaDTB", AlphaDTB)
653