Merge with head, hopefully the last time for this batch.
[gem5.git] / src / arch / alpha / tlb.cc
1 /*
2 * Copyright (c) 2001-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Nathan Binkert
29 * Steve Reinhardt
30 * Andrew Schultz
31 */
32
33 #include <string>
34 #include <vector>
35
36 #include "arch/alpha/faults.hh"
37 #include "arch/alpha/pagetable.hh"
38 #include "arch/alpha/tlb.hh"
39 #include "arch/generic/debugfaults.hh"
40 #include "base/inifile.hh"
41 #include "base/str.hh"
42 #include "base/trace.hh"
43 #include "cpu/thread_context.hh"
44 #include "debug/TLB.hh"
45 #include "sim/full_system.hh"
46
47 using namespace std;
48
49 namespace AlphaISA {
50
51 ///////////////////////////////////////////////////////////////////////
52 //
53 // Alpha TLB
54 //
55
56 #ifdef DEBUG
57 bool uncacheBit39 = false;
58 bool uncacheBit40 = false;
59 #endif
60
61 #define MODE2MASK(X) (1 << (X))
62
63 TLB::TLB(const Params *p)
64 : BaseTLB(p), size(p->size), nlu(0)
65 {
66 table = new TlbEntry[size];
67 memset(table, 0, sizeof(TlbEntry) * size);
68 flushCache();
69 }
70
71 TLB::~TLB()
72 {
73 if (table)
74 delete [] table;
75 }
76
77 void
78 TLB::regStats()
79 {
80 fetch_hits
81 .name(name() + ".fetch_hits")
82 .desc("ITB hits");
83 fetch_misses
84 .name(name() + ".fetch_misses")
85 .desc("ITB misses");
86 fetch_acv
87 .name(name() + ".fetch_acv")
88 .desc("ITB acv");
89 fetch_accesses
90 .name(name() + ".fetch_accesses")
91 .desc("ITB accesses");
92
93 fetch_accesses = fetch_hits + fetch_misses;
94
95 read_hits
96 .name(name() + ".read_hits")
97 .desc("DTB read hits")
98 ;
99
100 read_misses
101 .name(name() + ".read_misses")
102 .desc("DTB read misses")
103 ;
104
105 read_acv
106 .name(name() + ".read_acv")
107 .desc("DTB read access violations")
108 ;
109
110 read_accesses
111 .name(name() + ".read_accesses")
112 .desc("DTB read accesses")
113 ;
114
115 write_hits
116 .name(name() + ".write_hits")
117 .desc("DTB write hits")
118 ;
119
120 write_misses
121 .name(name() + ".write_misses")
122 .desc("DTB write misses")
123 ;
124
125 write_acv
126 .name(name() + ".write_acv")
127 .desc("DTB write access violations")
128 ;
129
130 write_accesses
131 .name(name() + ".write_accesses")
132 .desc("DTB write accesses")
133 ;
134
135 data_hits
136 .name(name() + ".data_hits")
137 .desc("DTB hits")
138 ;
139
140 data_misses
141 .name(name() + ".data_misses")
142 .desc("DTB misses")
143 ;
144
145 data_acv
146 .name(name() + ".data_acv")
147 .desc("DTB access violations")
148 ;
149
150 data_accesses
151 .name(name() + ".data_accesses")
152 .desc("DTB accesses")
153 ;
154
155 data_hits = read_hits + write_hits;
156 data_misses = read_misses + write_misses;
157 data_acv = read_acv + write_acv;
158 data_accesses = read_accesses + write_accesses;
159 }
160
161 // look up an entry in the TLB
162 TlbEntry *
163 TLB::lookup(Addr vpn, uint8_t asn)
164 {
165 // assume not found...
166 TlbEntry *retval = NULL;
167
168 if (EntryCache[0]) {
169 if (vpn == EntryCache[0]->tag &&
170 (EntryCache[0]->asma || EntryCache[0]->asn == asn))
171 retval = EntryCache[0];
172 else if (EntryCache[1]) {
173 if (vpn == EntryCache[1]->tag &&
174 (EntryCache[1]->asma || EntryCache[1]->asn == asn))
175 retval = EntryCache[1];
176 else if (EntryCache[2] && vpn == EntryCache[2]->tag &&
177 (EntryCache[2]->asma || EntryCache[2]->asn == asn))
178 retval = EntryCache[2];
179 }
180 }
181
182 if (retval == NULL) {
183 PageTable::const_iterator i = lookupTable.find(vpn);
184 if (i != lookupTable.end()) {
185 while (i->first == vpn) {
186 int index = i->second;
187 TlbEntry *entry = &table[index];
188 assert(entry->valid);
189 if (vpn == entry->tag && (entry->asma || entry->asn == asn)) {
190 retval = updateCache(entry);
191 break;
192 }
193
194 ++i;
195 }
196 }
197 }
198
199 DPRINTF(TLB, "lookup %#x, asn %#x -> %s ppn %#x\n", vpn, (int)asn,
200 retval ? "hit" : "miss", retval ? retval->ppn : 0);
201 return retval;
202 }
203
204 Fault
205 TLB::checkCacheability(RequestPtr &req, bool itb)
206 {
207 // in Alpha, cacheability is controlled by upper-level bits of the
208 // physical address
209
210 /*
211 * We support having the uncacheable bit in either bit 39 or bit
212 * 40. The Turbolaser platform (and EV5) support having the bit
213 * in 39, but Tsunami (which Linux assumes uses an EV6) generates
214 * accesses with the bit in 40. So we must check for both, but we
215 * have debug flags to catch a weird case where both are used,
216 * which shouldn't happen.
217 */
218
219
220 if (req->getPaddr() & PAddrUncachedBit43) {
221 // IPR memory space not implemented
222 if (PAddrIprSpace(req->getPaddr())) {
223 return new UnimpFault("IPR memory space not implemented!");
224 } else {
225 // mark request as uncacheable
226 req->setFlags(Request::UNCACHEABLE);
227
228 // Clear bits 42:35 of the physical address (10-2 in
229 // Tsunami manual)
230 req->setPaddr(req->getPaddr() & PAddrUncachedMask);
231 }
232 // We shouldn't be able to read from an uncachable address in Alpha as
233 // we don't have a ROM and we don't want to try to fetch from a device
234 // register as we destroy any data that is clear-on-read.
235 if (req->isUncacheable() && itb)
236 return new UnimpFault("CPU trying to fetch from uncached I/O");
237
238 }
239 return NoFault;
240 }
241
242
243 // insert a new TLB entry
244 void
245 TLB::insert(Addr addr, TlbEntry &entry)
246 {
247 flushCache();
248 VAddr vaddr = addr;
249 if (table[nlu].valid) {
250 Addr oldvpn = table[nlu].tag;
251 PageTable::iterator i = lookupTable.find(oldvpn);
252
253 if (i == lookupTable.end())
254 panic("TLB entry not found in lookupTable");
255
256 int index;
257 while ((index = i->second) != nlu) {
258 if (table[index].tag != oldvpn)
259 panic("TLB entry not found in lookupTable");
260
261 ++i;
262 }
263
264 DPRINTF(TLB, "remove @%d: %#x -> %#x\n", nlu, oldvpn, table[nlu].ppn);
265
266 lookupTable.erase(i);
267 }
268
269 DPRINTF(TLB, "insert @%d: %#x -> %#x\n", nlu, vaddr.vpn(), entry.ppn);
270
271 table[nlu] = entry;
272 table[nlu].tag = vaddr.vpn();
273 table[nlu].valid = true;
274
275 lookupTable.insert(make_pair(vaddr.vpn(), nlu));
276 nextnlu();
277 }
278
279 void
280 TLB::flushAll()
281 {
282 DPRINTF(TLB, "flushAll\n");
283 memset(table, 0, sizeof(TlbEntry) * size);
284 flushCache();
285 lookupTable.clear();
286 nlu = 0;
287 }
288
289 void
290 TLB::flushProcesses()
291 {
292 flushCache();
293 PageTable::iterator i = lookupTable.begin();
294 PageTable::iterator end = lookupTable.end();
295 while (i != end) {
296 int index = i->second;
297 TlbEntry *entry = &table[index];
298 assert(entry->valid);
299
300 // we can't increment i after we erase it, so save a copy and
301 // increment it to get the next entry now
302 PageTable::iterator cur = i;
303 ++i;
304
305 if (!entry->asma) {
306 DPRINTF(TLB, "flush @%d: %#x -> %#x\n", index,
307 entry->tag, entry->ppn);
308 entry->valid = false;
309 lookupTable.erase(cur);
310 }
311 }
312 }
313
314 void
315 TLB::flushAddr(Addr addr, uint8_t asn)
316 {
317 flushCache();
318 VAddr vaddr = addr;
319
320 PageTable::iterator i = lookupTable.find(vaddr.vpn());
321 if (i == lookupTable.end())
322 return;
323
324 while (i != lookupTable.end() && i->first == vaddr.vpn()) {
325 int index = i->second;
326 TlbEntry *entry = &table[index];
327 assert(entry->valid);
328
329 if (vaddr.vpn() == entry->tag && (entry->asma || entry->asn == asn)) {
330 DPRINTF(TLB, "flushaddr @%d: %#x -> %#x\n", index, vaddr.vpn(),
331 entry->ppn);
332
333 // invalidate this entry
334 entry->valid = false;
335
336 lookupTable.erase(i++);
337 } else {
338 ++i;
339 }
340 }
341 }
342
343
344 void
345 TLB::serialize(ostream &os)
346 {
347 SERIALIZE_SCALAR(size);
348 SERIALIZE_SCALAR(nlu);
349
350 for (int i = 0; i < size; i++) {
351 nameOut(os, csprintf("%s.Entry%d", name(), i));
352 table[i].serialize(os);
353 }
354 }
355
356 void
357 TLB::unserialize(Checkpoint *cp, const string &section)
358 {
359 UNSERIALIZE_SCALAR(size);
360 UNSERIALIZE_SCALAR(nlu);
361
362 for (int i = 0; i < size; i++) {
363 table[i].unserialize(cp, csprintf("%s.Entry%d", section, i));
364 if (table[i].valid) {
365 lookupTable.insert(make_pair(table[i].tag, i));
366 }
367 }
368 }
369
370 Fault
371 TLB::translateInst(RequestPtr req, ThreadContext *tc)
372 {
373 //If this is a pal pc, then set PHYSICAL
374 if (FullSystem && PcPAL(req->getPC()))
375 req->setFlags(Request::PHYSICAL);
376
377 if (PcPAL(req->getPC())) {
378 // strip off PAL PC marker (lsb is 1)
379 req->setPaddr((req->getVaddr() & ~3) & PAddrImplMask);
380 fetch_hits++;
381 return NoFault;
382 }
383
384 if (req->getFlags() & Request::PHYSICAL) {
385 req->setPaddr(req->getVaddr());
386 } else {
387 // verify that this is a good virtual address
388 if (!validVirtualAddress(req->getVaddr())) {
389 fetch_acv++;
390 return new ItbAcvFault(req->getVaddr());
391 }
392
393
394 // VA<42:41> == 2, VA<39:13> maps directly to PA<39:13> for EV5
395 // VA<47:41> == 0x7e, VA<40:13> maps directly to PA<40:13> for EV6
396 if (VAddrSpaceEV6(req->getVaddr()) == 0x7e) {
397 // only valid in kernel mode
398 if (ICM_CM(tc->readMiscRegNoEffect(IPR_ICM)) !=
399 mode_kernel) {
400 fetch_acv++;
401 return new ItbAcvFault(req->getVaddr());
402 }
403
404 req->setPaddr(req->getVaddr() & PAddrImplMask);
405
406 // sign extend the physical address properly
407 if (req->getPaddr() & PAddrUncachedBit40)
408 req->setPaddr(req->getPaddr() | ULL(0xf0000000000));
409 else
410 req->setPaddr(req->getPaddr() & ULL(0xffffffffff));
411 } else {
412 // not a physical address: need to look up pte
413 int asn = DTB_ASN_ASN(tc->readMiscRegNoEffect(IPR_DTB_ASN));
414 TlbEntry *entry = lookup(VAddr(req->getVaddr()).vpn(),
415 asn);
416
417 if (!entry) {
418 fetch_misses++;
419 return new ItbPageFault(req->getVaddr());
420 }
421
422 req->setPaddr((entry->ppn << PageShift) +
423 (VAddr(req->getVaddr()).offset()
424 & ~3));
425
426 // check permissions for this access
427 if (!(entry->xre &
428 (1 << ICM_CM(tc->readMiscRegNoEffect(IPR_ICM))))) {
429 // instruction access fault
430 fetch_acv++;
431 return new ItbAcvFault(req->getVaddr());
432 }
433
434 fetch_hits++;
435 }
436 }
437
438 // check that the physical address is ok (catch bad physical addresses)
439 if (req->getPaddr() & ~PAddrImplMask) {
440 return new MachineCheckFault();
441 }
442
443 return checkCacheability(req, true);
444
445 }
446
447 Fault
448 TLB::translateData(RequestPtr req, ThreadContext *tc, bool write)
449 {
450 mode_type mode =
451 (mode_type)DTB_CM_CM(tc->readMiscRegNoEffect(IPR_DTB_CM));
452
453 /**
454 * Check for alignment faults
455 */
456 if (req->getVaddr() & (req->getSize() - 1)) {
457 DPRINTF(TLB, "Alignment Fault on %#x, size = %d\n", req->getVaddr(),
458 req->getSize());
459 uint64_t flags = write ? MM_STAT_WR_MASK : 0;
460 return new DtbAlignmentFault(req->getVaddr(), req->getFlags(), flags);
461 }
462
463 if (PcPAL(req->getPC())) {
464 mode = (req->getFlags() & Request::ALTMODE) ?
465 (mode_type)ALT_MODE_AM(
466 tc->readMiscRegNoEffect(IPR_ALT_MODE))
467 : mode_kernel;
468 }
469
470 if (req->getFlags() & Request::PHYSICAL) {
471 req->setPaddr(req->getVaddr());
472 } else {
473 // verify that this is a good virtual address
474 if (!validVirtualAddress(req->getVaddr())) {
475 if (write) { write_acv++; } else { read_acv++; }
476 uint64_t flags = (write ? MM_STAT_WR_MASK : 0) |
477 MM_STAT_BAD_VA_MASK |
478 MM_STAT_ACV_MASK;
479 return new DtbPageFault(req->getVaddr(), req->getFlags(), flags);
480 }
481
482 // Check for "superpage" mapping
483 if (VAddrSpaceEV6(req->getVaddr()) == 0x7e) {
484 // only valid in kernel mode
485 if (DTB_CM_CM(tc->readMiscRegNoEffect(IPR_DTB_CM)) !=
486 mode_kernel) {
487 if (write) { write_acv++; } else { read_acv++; }
488 uint64_t flags = ((write ? MM_STAT_WR_MASK : 0) |
489 MM_STAT_ACV_MASK);
490
491 return new DtbAcvFault(req->getVaddr(), req->getFlags(),
492 flags);
493 }
494
495 req->setPaddr(req->getVaddr() & PAddrImplMask);
496
497 // sign extend the physical address properly
498 if (req->getPaddr() & PAddrUncachedBit40)
499 req->setPaddr(req->getPaddr() | ULL(0xf0000000000));
500 else
501 req->setPaddr(req->getPaddr() & ULL(0xffffffffff));
502 } else {
503 if (write)
504 write_accesses++;
505 else
506 read_accesses++;
507
508 int asn = DTB_ASN_ASN(tc->readMiscRegNoEffect(IPR_DTB_ASN));
509
510 // not a physical address: need to look up pte
511 TlbEntry *entry = lookup(VAddr(req->getVaddr()).vpn(), asn);
512
513 if (!entry) {
514 // page fault
515 if (write) { write_misses++; } else { read_misses++; }
516 uint64_t flags = (write ? MM_STAT_WR_MASK : 0) |
517 MM_STAT_DTB_MISS_MASK;
518 return (req->getFlags() & Request::VPTE) ?
519 (Fault)(new PDtbMissFault(req->getVaddr(), req->getFlags(),
520 flags)) :
521 (Fault)(new NDtbMissFault(req->getVaddr(), req->getFlags(),
522 flags));
523 }
524
525 req->setPaddr((entry->ppn << PageShift) +
526 VAddr(req->getVaddr()).offset());
527
528 if (write) {
529 if (!(entry->xwe & MODE2MASK(mode))) {
530 // declare the instruction access fault
531 write_acv++;
532 uint64_t flags = MM_STAT_WR_MASK |
533 MM_STAT_ACV_MASK |
534 (entry->fonw ? MM_STAT_FONW_MASK : 0);
535 return new DtbPageFault(req->getVaddr(), req->getFlags(),
536 flags);
537 }
538 if (entry->fonw) {
539 write_acv++;
540 uint64_t flags = MM_STAT_WR_MASK | MM_STAT_FONW_MASK;
541 return new DtbPageFault(req->getVaddr(), req->getFlags(),
542 flags);
543 }
544 } else {
545 if (!(entry->xre & MODE2MASK(mode))) {
546 read_acv++;
547 uint64_t flags = MM_STAT_ACV_MASK |
548 (entry->fonr ? MM_STAT_FONR_MASK : 0);
549 return new DtbAcvFault(req->getVaddr(), req->getFlags(),
550 flags);
551 }
552 if (entry->fonr) {
553 read_acv++;
554 uint64_t flags = MM_STAT_FONR_MASK;
555 return new DtbPageFault(req->getVaddr(), req->getFlags(),
556 flags);
557 }
558 }
559 }
560
561 if (write)
562 write_hits++;
563 else
564 read_hits++;
565 }
566
567 // check that the physical address is ok (catch bad physical addresses)
568 if (req->getPaddr() & ~PAddrImplMask) {
569 return new MachineCheckFault();
570 }
571
572 return checkCacheability(req);
573 }
574
575 TlbEntry &
576 TLB::index(bool advance)
577 {
578 TlbEntry *entry = &table[nlu];
579
580 if (advance)
581 nextnlu();
582
583 return *entry;
584 }
585
586 Fault
587 TLB::translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode)
588 {
589 if (mode == Execute)
590 return translateInst(req, tc);
591 else
592 return translateData(req, tc, mode == Write);
593 }
594
595 void
596 TLB::translateTiming(RequestPtr req, ThreadContext *tc,
597 Translation *translation, Mode mode)
598 {
599 assert(translation);
600 translation->finish(translateAtomic(req, tc, mode), req, tc, mode);
601 }
602
603 } // namespace AlphaISA
604
605 AlphaISA::TLB *
606 AlphaTLBParams::create()
607 {
608 return new AlphaISA::TLB(this);
609 }