alpha: get rid of all turbolaser remnants
[gem5.git] / src / arch / alpha / tlb.cc
1 /*
2 * Copyright (c) 2001-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Nathan Binkert
29 * Steve Reinhardt
30 * Andrew Schultz
31 */
32
33 #include <string>
34 #include <vector>
35
36 #include "arch/alpha/pagetable.hh"
37 #include "arch/alpha/tlb.hh"
38 #include "arch/alpha/faults.hh"
39 #include "base/inifile.hh"
40 #include "base/str.hh"
41 #include "base/trace.hh"
42 #include "cpu/thread_context.hh"
43
44 using namespace std;
45
46 namespace AlphaISA {
47
48 ///////////////////////////////////////////////////////////////////////
49 //
50 // Alpha TLB
51 //
52
53 #ifdef DEBUG
54 bool uncacheBit39 = false;
55 bool uncacheBit40 = false;
56 #endif
57
58 #define MODE2MASK(X) (1 << (X))
59
60 TLB::TLB(const Params *p)
61 : BaseTLB(p), size(p->size), nlu(0)
62 {
63 table = new TlbEntry[size];
64 memset(table, 0, sizeof(TlbEntry[size]));
65 flushCache();
66 }
67
68 TLB::~TLB()
69 {
70 if (table)
71 delete [] table;
72 }
73
74 void
75 TLB::regStats()
76 {
77 fetch_hits
78 .name(name() + ".fetch_hits")
79 .desc("ITB hits");
80 fetch_misses
81 .name(name() + ".fetch_misses")
82 .desc("ITB misses");
83 fetch_acv
84 .name(name() + ".fetch_acv")
85 .desc("ITB acv");
86 fetch_accesses
87 .name(name() + ".fetch_accesses")
88 .desc("ITB accesses");
89
90 fetch_accesses = fetch_hits + fetch_misses;
91
92 read_hits
93 .name(name() + ".read_hits")
94 .desc("DTB read hits")
95 ;
96
97 read_misses
98 .name(name() + ".read_misses")
99 .desc("DTB read misses")
100 ;
101
102 read_acv
103 .name(name() + ".read_acv")
104 .desc("DTB read access violations")
105 ;
106
107 read_accesses
108 .name(name() + ".read_accesses")
109 .desc("DTB read accesses")
110 ;
111
112 write_hits
113 .name(name() + ".write_hits")
114 .desc("DTB write hits")
115 ;
116
117 write_misses
118 .name(name() + ".write_misses")
119 .desc("DTB write misses")
120 ;
121
122 write_acv
123 .name(name() + ".write_acv")
124 .desc("DTB write access violations")
125 ;
126
127 write_accesses
128 .name(name() + ".write_accesses")
129 .desc("DTB write accesses")
130 ;
131
132 data_hits
133 .name(name() + ".data_hits")
134 .desc("DTB hits")
135 ;
136
137 data_misses
138 .name(name() + ".data_misses")
139 .desc("DTB misses")
140 ;
141
142 data_acv
143 .name(name() + ".data_acv")
144 .desc("DTB access violations")
145 ;
146
147 data_accesses
148 .name(name() + ".data_accesses")
149 .desc("DTB accesses")
150 ;
151
152 data_hits = read_hits + write_hits;
153 data_misses = read_misses + write_misses;
154 data_acv = read_acv + write_acv;
155 data_accesses = read_accesses + write_accesses;
156 }
157
158 // look up an entry in the TLB
159 TlbEntry *
160 TLB::lookup(Addr vpn, uint8_t asn)
161 {
162 // assume not found...
163 TlbEntry *retval = NULL;
164
165 if (EntryCache[0]) {
166 if (vpn == EntryCache[0]->tag &&
167 (EntryCache[0]->asma || EntryCache[0]->asn == asn))
168 retval = EntryCache[0];
169 else if (EntryCache[1]) {
170 if (vpn == EntryCache[1]->tag &&
171 (EntryCache[1]->asma || EntryCache[1]->asn == asn))
172 retval = EntryCache[1];
173 else if (EntryCache[2] && vpn == EntryCache[2]->tag &&
174 (EntryCache[2]->asma || EntryCache[2]->asn == asn))
175 retval = EntryCache[2];
176 }
177 }
178
179 if (retval == NULL) {
180 PageTable::const_iterator i = lookupTable.find(vpn);
181 if (i != lookupTable.end()) {
182 while (i->first == vpn) {
183 int index = i->second;
184 TlbEntry *entry = &table[index];
185 assert(entry->valid);
186 if (vpn == entry->tag && (entry->asma || entry->asn == asn)) {
187 retval = updateCache(entry);
188 break;
189 }
190
191 ++i;
192 }
193 }
194 }
195
196 DPRINTF(TLB, "lookup %#x, asn %#x -> %s ppn %#x\n", vpn, (int)asn,
197 retval ? "hit" : "miss", retval ? retval->ppn : 0);
198 return retval;
199 }
200
201 Fault
202 TLB::checkCacheability(RequestPtr &req, bool itb)
203 {
204 // in Alpha, cacheability is controlled by upper-level bits of the
205 // physical address
206
207 /*
208 * We support having the uncacheable bit in either bit 39 or bit
209 * 40. The Turbolaser platform (and EV5) support having the bit
210 * in 39, but Tsunami (which Linux assumes uses an EV6) generates
211 * accesses with the bit in 40. So we must check for both, but we
212 * have debug flags to catch a weird case where both are used,
213 * which shouldn't happen.
214 */
215
216
217 if (req->getPaddr() & PAddrUncachedBit43) {
218 // IPR memory space not implemented
219 if (PAddrIprSpace(req->getPaddr())) {
220 return new UnimpFault("IPR memory space not implemented!");
221 } else {
222 // mark request as uncacheable
223 req->setFlags(Request::UNCACHEABLE);
224
225 // Clear bits 42:35 of the physical address (10-2 in
226 // Tsunami manual)
227 req->setPaddr(req->getPaddr() & PAddrUncachedMask);
228 }
229 // We shouldn't be able to read from an uncachable address in Alpha as
230 // we don't have a ROM and we don't want to try to fetch from a device
231 // register as we destroy any data that is clear-on-read.
232 if (req->isUncacheable() && itb)
233 return new UnimpFault("CPU trying to fetch from uncached I/O");
234
235 }
236 return NoFault;
237 }
238
239
240 // insert a new TLB entry
241 void
242 TLB::insert(Addr addr, TlbEntry &entry)
243 {
244 flushCache();
245 VAddr vaddr = addr;
246 if (table[nlu].valid) {
247 Addr oldvpn = table[nlu].tag;
248 PageTable::iterator i = lookupTable.find(oldvpn);
249
250 if (i == lookupTable.end())
251 panic("TLB entry not found in lookupTable");
252
253 int index;
254 while ((index = i->second) != nlu) {
255 if (table[index].tag != oldvpn)
256 panic("TLB entry not found in lookupTable");
257
258 ++i;
259 }
260
261 DPRINTF(TLB, "remove @%d: %#x -> %#x\n", nlu, oldvpn, table[nlu].ppn);
262
263 lookupTable.erase(i);
264 }
265
266 DPRINTF(TLB, "insert @%d: %#x -> %#x\n", nlu, vaddr.vpn(), entry.ppn);
267
268 table[nlu] = entry;
269 table[nlu].tag = vaddr.vpn();
270 table[nlu].valid = true;
271
272 lookupTable.insert(make_pair(vaddr.vpn(), nlu));
273 nextnlu();
274 }
275
276 void
277 TLB::flushAll()
278 {
279 DPRINTF(TLB, "flushAll\n");
280 memset(table, 0, sizeof(TlbEntry[size]));
281 flushCache();
282 lookupTable.clear();
283 nlu = 0;
284 }
285
286 void
287 TLB::flushProcesses()
288 {
289 flushCache();
290 PageTable::iterator i = lookupTable.begin();
291 PageTable::iterator end = lookupTable.end();
292 while (i != end) {
293 int index = i->second;
294 TlbEntry *entry = &table[index];
295 assert(entry->valid);
296
297 // we can't increment i after we erase it, so save a copy and
298 // increment it to get the next entry now
299 PageTable::iterator cur = i;
300 ++i;
301
302 if (!entry->asma) {
303 DPRINTF(TLB, "flush @%d: %#x -> %#x\n", index,
304 entry->tag, entry->ppn);
305 entry->valid = false;
306 lookupTable.erase(cur);
307 }
308 }
309 }
310
311 void
312 TLB::flushAddr(Addr addr, uint8_t asn)
313 {
314 flushCache();
315 VAddr vaddr = addr;
316
317 PageTable::iterator i = lookupTable.find(vaddr.vpn());
318 if (i == lookupTable.end())
319 return;
320
321 while (i != lookupTable.end() && i->first == vaddr.vpn()) {
322 int index = i->second;
323 TlbEntry *entry = &table[index];
324 assert(entry->valid);
325
326 if (vaddr.vpn() == entry->tag && (entry->asma || entry->asn == asn)) {
327 DPRINTF(TLB, "flushaddr @%d: %#x -> %#x\n", index, vaddr.vpn(),
328 entry->ppn);
329
330 // invalidate this entry
331 entry->valid = false;
332
333 lookupTable.erase(i++);
334 } else {
335 ++i;
336 }
337 }
338 }
339
340
341 void
342 TLB::serialize(ostream &os)
343 {
344 SERIALIZE_SCALAR(size);
345 SERIALIZE_SCALAR(nlu);
346
347 for (int i = 0; i < size; i++) {
348 nameOut(os, csprintf("%s.Entry%d", name(), i));
349 table[i].serialize(os);
350 }
351 }
352
353 void
354 TLB::unserialize(Checkpoint *cp, const string &section)
355 {
356 UNSERIALIZE_SCALAR(size);
357 UNSERIALIZE_SCALAR(nlu);
358
359 for (int i = 0; i < size; i++) {
360 table[i].unserialize(cp, csprintf("%s.Entry%d", section, i));
361 if (table[i].valid) {
362 lookupTable.insert(make_pair(table[i].tag, i));
363 }
364 }
365 }
366
367 Fault
368 TLB::translateInst(RequestPtr req, ThreadContext *tc)
369 {
370 //If this is a pal pc, then set PHYSICAL
371 if (FULL_SYSTEM && PcPAL(req->getPC()))
372 req->setFlags(Request::PHYSICAL);
373
374 if (PcPAL(req->getPC())) {
375 // strip off PAL PC marker (lsb is 1)
376 req->setPaddr((req->getVaddr() & ~3) & PAddrImplMask);
377 fetch_hits++;
378 return NoFault;
379 }
380
381 if (req->getFlags() & Request::PHYSICAL) {
382 req->setPaddr(req->getVaddr());
383 } else {
384 // verify that this is a good virtual address
385 if (!validVirtualAddress(req->getVaddr())) {
386 fetch_acv++;
387 return new ItbAcvFault(req->getVaddr());
388 }
389
390
391 // VA<42:41> == 2, VA<39:13> maps directly to PA<39:13> for EV5
392 // VA<47:41> == 0x7e, VA<40:13> maps directly to PA<40:13> for EV6
393 if (VAddrSpaceEV6(req->getVaddr()) == 0x7e) {
394 // only valid in kernel mode
395 if (ICM_CM(tc->readMiscRegNoEffect(IPR_ICM)) !=
396 mode_kernel) {
397 fetch_acv++;
398 return new ItbAcvFault(req->getVaddr());
399 }
400
401 req->setPaddr(req->getVaddr() & PAddrImplMask);
402
403 // sign extend the physical address properly
404 if (req->getPaddr() & PAddrUncachedBit40)
405 req->setPaddr(req->getPaddr() | ULL(0xf0000000000));
406 else
407 req->setPaddr(req->getPaddr() & ULL(0xffffffffff));
408 } else {
409 // not a physical address: need to look up pte
410 int asn = DTB_ASN_ASN(tc->readMiscRegNoEffect(IPR_DTB_ASN));
411 TlbEntry *entry = lookup(VAddr(req->getVaddr()).vpn(),
412 asn);
413
414 if (!entry) {
415 fetch_misses++;
416 return new ItbPageFault(req->getVaddr());
417 }
418
419 req->setPaddr((entry->ppn << PageShift) +
420 (VAddr(req->getVaddr()).offset()
421 & ~3));
422
423 // check permissions for this access
424 if (!(entry->xre &
425 (1 << ICM_CM(tc->readMiscRegNoEffect(IPR_ICM))))) {
426 // instruction access fault
427 fetch_acv++;
428 return new ItbAcvFault(req->getVaddr());
429 }
430
431 fetch_hits++;
432 }
433 }
434
435 // check that the physical address is ok (catch bad physical addresses)
436 if (req->getPaddr() & ~PAddrImplMask)
437 return genMachineCheckFault();
438
439 return checkCacheability(req, true);
440
441 }
442
443 Fault
444 TLB::translateData(RequestPtr req, ThreadContext *tc, bool write)
445 {
446 Addr pc = tc->readPC();
447
448 mode_type mode =
449 (mode_type)DTB_CM_CM(tc->readMiscRegNoEffect(IPR_DTB_CM));
450
451 /**
452 * Check for alignment faults
453 */
454 if (req->getVaddr() & (req->getSize() - 1)) {
455 DPRINTF(TLB, "Alignment Fault on %#x, size = %d", req->getVaddr(),
456 req->getSize());
457 uint64_t flags = write ? MM_STAT_WR_MASK : 0;
458 return new DtbAlignmentFault(req->getVaddr(), req->getFlags(), flags);
459 }
460
461 if (PcPAL(pc)) {
462 mode = (req->getFlags() & Request::ALTMODE) ?
463 (mode_type)ALT_MODE_AM(
464 tc->readMiscRegNoEffect(IPR_ALT_MODE))
465 : mode_kernel;
466 }
467
468 if (req->getFlags() & Request::PHYSICAL) {
469 req->setPaddr(req->getVaddr());
470 } else {
471 // verify that this is a good virtual address
472 if (!validVirtualAddress(req->getVaddr())) {
473 if (write) { write_acv++; } else { read_acv++; }
474 uint64_t flags = (write ? MM_STAT_WR_MASK : 0) |
475 MM_STAT_BAD_VA_MASK |
476 MM_STAT_ACV_MASK;
477 return new DtbPageFault(req->getVaddr(), req->getFlags(), flags);
478 }
479
480 // Check for "superpage" mapping
481 if (VAddrSpaceEV6(req->getVaddr()) == 0x7e) {
482 // only valid in kernel mode
483 if (DTB_CM_CM(tc->readMiscRegNoEffect(IPR_DTB_CM)) !=
484 mode_kernel) {
485 if (write) { write_acv++; } else { read_acv++; }
486 uint64_t flags = ((write ? MM_STAT_WR_MASK : 0) |
487 MM_STAT_ACV_MASK);
488
489 return new DtbAcvFault(req->getVaddr(), req->getFlags(),
490 flags);
491 }
492
493 req->setPaddr(req->getVaddr() & PAddrImplMask);
494
495 // sign extend the physical address properly
496 if (req->getPaddr() & PAddrUncachedBit40)
497 req->setPaddr(req->getPaddr() | ULL(0xf0000000000));
498 else
499 req->setPaddr(req->getPaddr() & ULL(0xffffffffff));
500 } else {
501 if (write)
502 write_accesses++;
503 else
504 read_accesses++;
505
506 int asn = DTB_ASN_ASN(tc->readMiscRegNoEffect(IPR_DTB_ASN));
507
508 // not a physical address: need to look up pte
509 TlbEntry *entry = lookup(VAddr(req->getVaddr()).vpn(), asn);
510
511 if (!entry) {
512 // page fault
513 if (write) { write_misses++; } else { read_misses++; }
514 uint64_t flags = (write ? MM_STAT_WR_MASK : 0) |
515 MM_STAT_DTB_MISS_MASK;
516 return (req->getFlags() & Request::VPTE) ?
517 (Fault)(new PDtbMissFault(req->getVaddr(), req->getFlags(),
518 flags)) :
519 (Fault)(new NDtbMissFault(req->getVaddr(), req->getFlags(),
520 flags));
521 }
522
523 req->setPaddr((entry->ppn << PageShift) +
524 VAddr(req->getVaddr()).offset());
525
526 if (write) {
527 if (!(entry->xwe & MODE2MASK(mode))) {
528 // declare the instruction access fault
529 write_acv++;
530 uint64_t flags = MM_STAT_WR_MASK |
531 MM_STAT_ACV_MASK |
532 (entry->fonw ? MM_STAT_FONW_MASK : 0);
533 return new DtbPageFault(req->getVaddr(), req->getFlags(),
534 flags);
535 }
536 if (entry->fonw) {
537 write_acv++;
538 uint64_t flags = MM_STAT_WR_MASK | MM_STAT_FONW_MASK;
539 return new DtbPageFault(req->getVaddr(), req->getFlags(),
540 flags);
541 }
542 } else {
543 if (!(entry->xre & MODE2MASK(mode))) {
544 read_acv++;
545 uint64_t flags = MM_STAT_ACV_MASK |
546 (entry->fonr ? MM_STAT_FONR_MASK : 0);
547 return new DtbAcvFault(req->getVaddr(), req->getFlags(),
548 flags);
549 }
550 if (entry->fonr) {
551 read_acv++;
552 uint64_t flags = MM_STAT_FONR_MASK;
553 return new DtbPageFault(req->getVaddr(), req->getFlags(),
554 flags);
555 }
556 }
557 }
558
559 if (write)
560 write_hits++;
561 else
562 read_hits++;
563 }
564
565 // check that the physical address is ok (catch bad physical addresses)
566 if (req->getPaddr() & ~PAddrImplMask)
567 return genMachineCheckFault();
568
569 return checkCacheability(req);
570 }
571
572 TlbEntry &
573 TLB::index(bool advance)
574 {
575 TlbEntry *entry = &table[nlu];
576
577 if (advance)
578 nextnlu();
579
580 return *entry;
581 }
582
583 Fault
584 TLB::translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode)
585 {
586 if (mode == Execute)
587 return translateInst(req, tc);
588 else
589 return translateData(req, tc, mode == Write);
590 }
591
592 void
593 TLB::translateTiming(RequestPtr req, ThreadContext *tc,
594 Translation *translation, Mode mode)
595 {
596 assert(translation);
597 translation->finish(translateAtomic(req, tc, mode), req, tc, mode);
598 }
599
600 /* end namespace AlphaISA */ }
601
602 AlphaISA::TLB *
603 AlphaTLBParams::create()
604 {
605 return new AlphaISA::TLB(this);
606 }