Merge zizzer.eecs.umich.edu:/bk/newmem
[gem5.git] / src / arch / sparc / tlb.cc
1 /*
2 * Copyright (c) 2001-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Ali Saidi
29 */
30
31 #include <cstring>
32
33 #include "arch/sparc/asi.hh"
34 #include "arch/sparc/miscregfile.hh"
35 #include "arch/sparc/tlb.hh"
36 #include "base/bitfield.hh"
37 #include "base/trace.hh"
38 #include "cpu/thread_context.hh"
39 #include "cpu/base.hh"
40 #include "mem/packet_access.hh"
41 #include "mem/request.hh"
42 #include "sim/builder.hh"
43
44 /* @todo remove some of the magic constants. -- ali
45 * */
46 namespace SparcISA {
47
48 TLB::TLB(const std::string &name, int s)
49 : SimObject(name), size(s), usedEntries(0), lastReplaced(0),
50 cacheValid(false)
51 {
52 // To make this work you'll have to change the hypervisor and OS
53 if (size > 64)
54 fatal("SPARC T1 TLB registers don't support more than 64 TLB entries.");
55
56 tlb = new TlbEntry[size];
57 std::memset(tlb, 0, sizeof(TlbEntry) * size);
58
59 for (int x = 0; x < size; x++)
60 freeList.push_back(&tlb[x]);
61 }
62
63 void
64 TLB::clearUsedBits()
65 {
66 MapIter i;
67 for (i = lookupTable.begin(); i != lookupTable.end(); i++) {
68 TlbEntry *t = i->second;
69 if (!t->pte.locked()) {
70 t->used = false;
71 usedEntries--;
72 }
73 }
74 }
75
76
77 void
78 TLB::insert(Addr va, int partition_id, int context_id, bool real,
79 const PageTableEntry& PTE, int entry)
80 {
81
82
83 MapIter i;
84 TlbEntry *new_entry = NULL;
85 // TlbRange tr;
86 int x;
87
88 cacheValid = false;
89 va &= ~(PTE.size()-1);
90 /* tr.va = va;
91 tr.size = PTE.size() - 1;
92 tr.contextId = context_id;
93 tr.partitionId = partition_id;
94 tr.real = real;
95 */
96
97 DPRINTF(TLB, "TLB: Inserting TLB Entry; va=%#x pa=%#x pid=%d cid=%d r=%d entryid=%d\n",
98 va, PTE.paddr(), partition_id, context_id, (int)real, entry);
99
100 // Demap any entry that conflicts
101 for (x = 0; x < size; x++) {
102 if (tlb[x].range.real == real &&
103 tlb[x].range.partitionId == partition_id &&
104 tlb[x].range.va < va + PTE.size() - 1 &&
105 tlb[x].range.va + tlb[x].range.size >= va &&
106 (real || tlb[x].range.contextId == context_id ))
107 {
108 if (tlb[x].valid) {
109 freeList.push_front(&tlb[x]);
110 DPRINTF(TLB, "TLB: Conflicting entry %#X , deleting it\n", x);
111
112 tlb[x].valid = false;
113 if (tlb[x].used) {
114 tlb[x].used = false;
115 usedEntries--;
116 }
117 lookupTable.erase(tlb[x].range);
118 }
119 }
120 }
121
122
123 /*
124 i = lookupTable.find(tr);
125 if (i != lookupTable.end()) {
126 i->second->valid = false;
127 if (i->second->used) {
128 i->second->used = false;
129 usedEntries--;
130 }
131 freeList.push_front(i->second);
132 DPRINTF(TLB, "TLB: Found conflicting entry %#X , deleting it\n",
133 i->second);
134 lookupTable.erase(i);
135 }
136 */
137
138 if (entry != -1) {
139 assert(entry < size && entry >= 0);
140 new_entry = &tlb[entry];
141 } else {
142 if (!freeList.empty()) {
143 new_entry = freeList.front();
144 } else {
145 x = lastReplaced;
146 do {
147 ++x;
148 if (x == size)
149 x = 0;
150 if (x == lastReplaced)
151 goto insertAllLocked;
152 } while (tlb[x].pte.locked());
153 lastReplaced = x;
154 new_entry = &tlb[x];
155 }
156 /*
157 for (x = 0; x < size; x++) {
158 if (!tlb[x].valid || !tlb[x].used) {
159 new_entry = &tlb[x];
160 break;
161 }
162 }*/
163 }
164
165 insertAllLocked:
166 // Update the last ently if their all locked
167 if (!new_entry) {
168 new_entry = &tlb[size-1];
169 }
170
171 freeList.remove(new_entry);
172 if (new_entry->valid && new_entry->used)
173 usedEntries--;
174 if (new_entry->valid)
175 lookupTable.erase(new_entry->range);
176
177
178 assert(PTE.valid());
179 new_entry->range.va = va;
180 new_entry->range.size = PTE.size() - 1;
181 new_entry->range.partitionId = partition_id;
182 new_entry->range.contextId = context_id;
183 new_entry->range.real = real;
184 new_entry->pte = PTE;
185 new_entry->used = true;;
186 new_entry->valid = true;
187 usedEntries++;
188
189
190
191 i = lookupTable.insert(new_entry->range, new_entry);
192 assert(i != lookupTable.end());
193
194 // If all entries have there used bit set, clear it on them all, but the
195 // one we just inserted
196 if (usedEntries == size) {
197 clearUsedBits();
198 new_entry->used = true;
199 usedEntries++;
200 }
201
202 }
203
204
205 TlbEntry*
206 TLB::lookup(Addr va, int partition_id, bool real, int context_id, bool
207 update_used)
208 {
209 MapIter i;
210 TlbRange tr;
211 TlbEntry *t;
212
213 DPRINTF(TLB, "TLB: Looking up entry va=%#x pid=%d cid=%d r=%d\n",
214 va, partition_id, context_id, real);
215 // Assemble full address structure
216 tr.va = va;
217 tr.size = MachineBytes;
218 tr.contextId = context_id;
219 tr.partitionId = partition_id;
220 tr.real = real;
221
222 // Try to find the entry
223 i = lookupTable.find(tr);
224 if (i == lookupTable.end()) {
225 DPRINTF(TLB, "TLB: No valid entry found\n");
226 return NULL;
227 }
228
229 // Mark the entries used bit and clear other used bits in needed
230 t = i->second;
231 DPRINTF(TLB, "TLB: Valid entry found pa: %#x size: %#x\n", t->pte.paddr(),
232 t->pte.size());
233
234 // Update the used bits only if this is a real access (not a fake one from
235 // virttophys()
236 if (!t->used && update_used) {
237 t->used = true;
238 usedEntries++;
239 if (usedEntries == size) {
240 clearUsedBits();
241 t->used = true;
242 usedEntries++;
243 }
244 }
245
246 return t;
247 }
248
249 void
250 TLB::dumpAll()
251 {
252 MapIter i;
253 for (int x = 0; x < size; x++) {
254 if (tlb[x].valid) {
255 DPRINTFN("%4d: %#2x:%#2x %c %#4x %#8x %#8x %#16x\n",
256 x, tlb[x].range.partitionId, tlb[x].range.contextId,
257 tlb[x].range.real ? 'R' : ' ', tlb[x].range.size,
258 tlb[x].range.va, tlb[x].pte.paddr(), tlb[x].pte());
259 }
260 }
261 }
262
263 void
264 TLB::demapPage(Addr va, int partition_id, bool real, int context_id)
265 {
266 TlbRange tr;
267 MapIter i;
268
269 DPRINTF(IPR, "TLB: Demapping Page va=%#x pid=%#d cid=%d r=%d\n",
270 va, partition_id, context_id, real);
271
272 cacheValid = false;
273
274 // Assemble full address structure
275 tr.va = va;
276 tr.size = MachineBytes;
277 tr.contextId = context_id;
278 tr.partitionId = partition_id;
279 tr.real = real;
280
281 // Demap any entry that conflicts
282 i = lookupTable.find(tr);
283 if (i != lookupTable.end()) {
284 DPRINTF(IPR, "TLB: Demapped page\n");
285 i->second->valid = false;
286 if (i->second->used) {
287 i->second->used = false;
288 usedEntries--;
289 }
290 freeList.push_front(i->second);
291 lookupTable.erase(i);
292 }
293 }
294
295 void
296 TLB::demapContext(int partition_id, int context_id)
297 {
298 int x;
299 DPRINTF(IPR, "TLB: Demapping Context pid=%#d cid=%d\n",
300 partition_id, context_id);
301 cacheValid = false;
302 for (x = 0; x < size; x++) {
303 if (tlb[x].range.contextId == context_id &&
304 tlb[x].range.partitionId == partition_id) {
305 if (tlb[x].valid == true) {
306 freeList.push_front(&tlb[x]);
307 }
308 tlb[x].valid = false;
309 if (tlb[x].used) {
310 tlb[x].used = false;
311 usedEntries--;
312 }
313 lookupTable.erase(tlb[x].range);
314 }
315 }
316 }
317
318 void
319 TLB::demapAll(int partition_id)
320 {
321 int x;
322 DPRINTF(TLB, "TLB: Demapping All pid=%#d\n", partition_id);
323 cacheValid = false;
324 for (x = 0; x < size; x++) {
325 if (!tlb[x].pte.locked() && tlb[x].range.partitionId == partition_id) {
326 if (tlb[x].valid == true){
327 freeList.push_front(&tlb[x]);
328 }
329 tlb[x].valid = false;
330 if (tlb[x].used) {
331 tlb[x].used = false;
332 usedEntries--;
333 }
334 lookupTable.erase(tlb[x].range);
335 }
336 }
337 }
338
339 void
340 TLB::invalidateAll()
341 {
342 int x;
343 cacheValid = false;
344
345 freeList.clear();
346 lookupTable.clear();
347 for (x = 0; x < size; x++) {
348 if (tlb[x].valid == true)
349 freeList.push_back(&tlb[x]);
350 tlb[x].valid = false;
351 tlb[x].used = false;
352 }
353 usedEntries = 0;
354 }
355
356 uint64_t
357 TLB::TteRead(int entry) {
358 if (entry >= size)
359 panic("entry: %d\n", entry);
360
361 assert(entry < size);
362 if (tlb[entry].valid)
363 return tlb[entry].pte();
364 else
365 return (uint64_t)-1ll;
366 }
367
368 uint64_t
369 TLB::TagRead(int entry) {
370 assert(entry < size);
371 uint64_t tag;
372 if (!tlb[entry].valid)
373 return (uint64_t)-1ll;
374
375 tag = tlb[entry].range.contextId;
376 tag |= tlb[entry].range.va;
377 tag |= (uint64_t)tlb[entry].range.partitionId << 61;
378 tag |= tlb[entry].range.real ? ULL(1) << 60 : 0;
379 tag |= (uint64_t)~tlb[entry].pte._size() << 56;
380 return tag;
381 }
382
383 bool
384 TLB::validVirtualAddress(Addr va, bool am)
385 {
386 if (am)
387 return true;
388 if (va >= StartVAddrHole && va <= EndVAddrHole)
389 return false;
390 return true;
391 }
392
393 void
394 TLB::writeSfsr(ThreadContext *tc, int reg, bool write, ContextType ct,
395 bool se, FaultTypes ft, int asi)
396 {
397 uint64_t sfsr;
398 sfsr = tc->readMiscReg(reg);
399
400 if (sfsr & 0x1)
401 sfsr = 0x3;
402 else
403 sfsr = 1;
404
405 if (write)
406 sfsr |= 1 << 2;
407 sfsr |= ct << 4;
408 if (se)
409 sfsr |= 1 << 6;
410 sfsr |= ft << 7;
411 sfsr |= asi << 16;
412 tc->setMiscRegWithEffect(reg, sfsr);
413 }
414
415 void
416 TLB::writeTagAccess(ThreadContext *tc, int reg, Addr va, int context)
417 {
418 DPRINTF(TLB, "TLB: Writing Tag Access: va: %#X ctx: %#X value: %#X\n",
419 va, context, mbits(va, 63,13) | mbits(context,12,0));
420
421 tc->setMiscRegWithEffect(reg, mbits(va, 63,13) | mbits(context,12,0));
422 }
423
424 void
425 ITB::writeSfsr(ThreadContext *tc, bool write, ContextType ct,
426 bool se, FaultTypes ft, int asi)
427 {
428 DPRINTF(TLB, "TLB: ITB Fault: w=%d ct=%d ft=%d asi=%d\n",
429 (int)write, ct, ft, asi);
430 TLB::writeSfsr(tc, MISCREG_MMU_ITLB_SFSR, write, ct, se, ft, asi);
431 }
432
433 void
434 ITB::writeTagAccess(ThreadContext *tc, Addr va, int context)
435 {
436 TLB::writeTagAccess(tc, MISCREG_MMU_ITLB_TAG_ACCESS, va, context);
437 }
438
439 void
440 DTB::writeSfr(ThreadContext *tc, Addr a, bool write, ContextType ct,
441 bool se, FaultTypes ft, int asi)
442 {
443 DPRINTF(TLB, "TLB: DTB Fault: A=%#x w=%d ct=%d ft=%d asi=%d\n",
444 a, (int)write, ct, ft, asi);
445 TLB::writeSfsr(tc, MISCREG_MMU_DTLB_SFSR, write, ct, se, ft, asi);
446 tc->setMiscRegWithEffect(MISCREG_MMU_DTLB_SFAR, a);
447 }
448
449 void
450 DTB::writeTagAccess(ThreadContext *tc, Addr va, int context)
451 {
452 TLB::writeTagAccess(tc, MISCREG_MMU_DTLB_TAG_ACCESS, va, context);
453 }
454
455
456
457 Fault
458 ITB::translate(RequestPtr &req, ThreadContext *tc)
459 {
460 uint64_t tlbdata = tc->readMiscReg(MISCREG_TLB_DATA);
461
462 Addr vaddr = req->getVaddr();
463 TlbEntry *e;
464
465 assert(req->getAsi() == ASI_IMPLICIT);
466
467 DPRINTF(TLB, "TLB: ITB Request to translate va=%#x size=%d\n",
468 vaddr, req->getSize());
469
470 // Be fast if we can!
471 if (cacheValid && cacheState == tlbdata) {
472 if (cacheEntry) {
473 if (cacheEntry->range.va < vaddr + sizeof(MachInst) &&
474 cacheEntry->range.va + cacheEntry->range.size >= vaddr) {
475 req->setPaddr(cacheEntry->pte.paddr() & ~(cacheEntry->pte.size()-1) |
476 vaddr & cacheEntry->pte.size()-1 );
477 return NoFault;
478 }
479 } else {
480 req->setPaddr(vaddr & PAddrImplMask);
481 return NoFault;
482 }
483 }
484
485 bool hpriv = bits(tlbdata,0,0);
486 bool red = bits(tlbdata,1,1);
487 bool priv = bits(tlbdata,2,2);
488 bool addr_mask = bits(tlbdata,3,3);
489 bool lsu_im = bits(tlbdata,4,4);
490
491 int part_id = bits(tlbdata,15,8);
492 int tl = bits(tlbdata,18,16);
493 int pri_context = bits(tlbdata,47,32);
494 int context;
495 ContextType ct;
496 int asi;
497 bool real = false;
498
499 DPRINTF(TLB, "TLB: priv:%d hpriv:%d red:%d lsuim:%d part_id: %#X\n",
500 priv, hpriv, red, lsu_im, part_id);
501
502 if (tl > 0) {
503 asi = ASI_N;
504 ct = Nucleus;
505 context = 0;
506 } else {
507 asi = ASI_P;
508 ct = Primary;
509 context = pri_context;
510 }
511
512 if ( hpriv || red ) {
513 cacheValid = true;
514 cacheState = tlbdata;
515 cacheEntry = NULL;
516 req->setPaddr(vaddr & PAddrImplMask);
517 return NoFault;
518 }
519
520 // If the access is unaligned trap
521 if (vaddr & 0x3) {
522 writeSfsr(tc, false, ct, false, OtherFault, asi);
523 return new MemAddressNotAligned;
524 }
525
526 if (addr_mask)
527 vaddr = vaddr & VAddrAMask;
528
529 if (!validVirtualAddress(vaddr, addr_mask)) {
530 writeSfsr(tc, false, ct, false, VaOutOfRange, asi);
531 return new InstructionAccessException;
532 }
533
534 if (!lsu_im) {
535 e = lookup(vaddr, part_id, true);
536 real = true;
537 context = 0;
538 } else {
539 e = lookup(vaddr, part_id, false, context);
540 }
541
542 if (e == NULL || !e->valid) {
543 writeTagAccess(tc, vaddr, context);
544 if (real)
545 return new InstructionRealTranslationMiss;
546 else
547 return new FastInstructionAccessMMUMiss;
548 }
549
550 // were not priviledged accesing priv page
551 if (!priv && e->pte.priv()) {
552 writeTagAccess(tc, vaddr, context);
553 writeSfsr(tc, false, ct, false, PrivViolation, asi);
554 return new InstructionAccessException;
555 }
556
557 // cache translation date for next translation
558 cacheValid = true;
559 cacheState = tlbdata;
560 cacheEntry = e;
561
562 req->setPaddr(e->pte.paddr() & ~(e->pte.size()-1) |
563 vaddr & e->pte.size()-1 );
564 DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
565 return NoFault;
566 }
567
568
569
570 Fault
571 DTB::translate(RequestPtr &req, ThreadContext *tc, bool write)
572 {
573 /* @todo this could really use some profiling and fixing to make it faster! */
574 uint64_t tlbdata = tc->readMiscReg(MISCREG_TLB_DATA);
575 Addr vaddr = req->getVaddr();
576 Addr size = req->getSize();
577 ASI asi;
578 asi = (ASI)req->getAsi();
579 bool implicit = false;
580 bool hpriv = bits(tlbdata,0,0);
581
582 DPRINTF(TLB, "TLB: DTB Request to translate va=%#x size=%d asi=%#x\n",
583 vaddr, size, asi);
584
585 if (lookupTable.size() != 64 - freeList.size())
586 panic("Lookup table size: %d tlb size: %d\n", lookupTable.size(),
587 freeList.size());
588 if (asi == ASI_IMPLICIT)
589 implicit = true;
590
591 if (hpriv && implicit) {
592 req->setPaddr(vaddr & PAddrImplMask);
593 return NoFault;
594 }
595
596 // Be fast if we can!
597 if (cacheValid && cacheState == tlbdata) {
598
599
600
601 if (cacheEntry[0]) {
602 TlbEntry *ce = cacheEntry[0];
603 Addr ce_va = ce->range.va;
604 if (cacheAsi[0] == asi &&
605 ce_va < vaddr + size && ce_va + ce->range.size > vaddr &&
606 (!write || ce->pte.writable())) {
607 req->setPaddr(ce->pte.paddrMask() | vaddr & ce->pte.sizeMask());
608 if (ce->pte.sideffect() || (ce->pte.paddr() >> 39) & 1)
609 req->setFlags(req->getFlags() | UNCACHEABLE);
610 DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
611 return NoFault;
612 } // if matched
613 } // if cache entry valid
614 if (cacheEntry[1]) {
615 TlbEntry *ce = cacheEntry[1];
616 Addr ce_va = ce->range.va;
617 if (cacheAsi[1] == asi &&
618 ce_va < vaddr + size && ce_va + ce->range.size > vaddr &&
619 (!write || ce->pte.writable())) {
620 req->setPaddr(ce->pte.paddrMask() | vaddr & ce->pte.sizeMask());
621 if (ce->pte.sideffect() || (ce->pte.paddr() >> 39) & 1)
622 req->setFlags(req->getFlags() | UNCACHEABLE);
623 DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
624 return NoFault;
625 } // if matched
626 } // if cache entry valid
627 }
628
629 bool red = bits(tlbdata,1,1);
630 bool priv = bits(tlbdata,2,2);
631 bool addr_mask = bits(tlbdata,3,3);
632 bool lsu_dm = bits(tlbdata,5,5);
633
634 int part_id = bits(tlbdata,15,8);
635 int tl = bits(tlbdata,18,16);
636 int pri_context = bits(tlbdata,47,32);
637 int sec_context = bits(tlbdata,63,48);
638
639 bool real = false;
640 ContextType ct = Primary;
641 int context = 0;
642
643 TlbEntry *e;
644
645 DPRINTF(TLB, "TLB: priv:%d hpriv:%d red:%d lsudm:%d part_id: %#X\n",
646 priv, hpriv, red, lsu_dm, part_id);
647
648 if (implicit) {
649 if (tl > 0) {
650 asi = ASI_N;
651 ct = Nucleus;
652 context = 0;
653 } else {
654 asi = ASI_P;
655 ct = Primary;
656 context = pri_context;
657 }
658 } else {
659 // We need to check for priv level/asi priv
660 if (!priv && !hpriv && !AsiIsUnPriv(asi)) {
661 // It appears that context should be Nucleus in these cases?
662 writeSfr(tc, vaddr, write, Nucleus, false, IllegalAsi, asi);
663 return new PrivilegedAction;
664 }
665
666 if (!hpriv && AsiIsHPriv(asi)) {
667 writeSfr(tc, vaddr, write, Nucleus, false, IllegalAsi, asi);
668 return new DataAccessException;
669 }
670
671 if (AsiIsPrimary(asi)) {
672 context = pri_context;
673 ct = Primary;
674 } else if (AsiIsSecondary(asi)) {
675 context = sec_context;
676 ct = Secondary;
677 } else if (AsiIsNucleus(asi)) {
678 ct = Nucleus;
679 context = 0;
680 } else { // ????
681 ct = Primary;
682 context = pri_context;
683 }
684 }
685
686 if (!implicit && asi != ASI_P && asi != ASI_S) {
687 if (AsiIsLittle(asi))
688 panic("Little Endian ASIs not supported\n");
689 if (AsiIsNoFault(asi))
690 panic("No Fault ASIs not supported\n");
691
692 if (AsiIsPartialStore(asi))
693 panic("Partial Store ASIs not supported\n");
694 if (AsiIsInterrupt(asi))
695 panic("Interrupt ASIs not supported\n");
696
697 if (AsiIsMmu(asi))
698 goto handleMmuRegAccess;
699 if (AsiIsScratchPad(asi))
700 goto handleScratchRegAccess;
701 if (AsiIsQueue(asi))
702 goto handleQueueRegAccess;
703 if (AsiIsSparcError(asi))
704 goto handleSparcErrorRegAccess;
705
706 if (!AsiIsReal(asi) && !AsiIsNucleus(asi) && !AsiIsAsIfUser(asi) &&
707 !AsiIsTwin(asi) && !AsiIsBlock(asi))
708 panic("Accessing ASI %#X. Should we?\n", asi);
709 }
710
711 // If the asi is unaligned trap
712 if (vaddr & size-1) {
713 writeSfr(tc, vaddr, false, ct, false, OtherFault, asi);
714 return new MemAddressNotAligned;
715 }
716
717 if (addr_mask)
718 vaddr = vaddr & VAddrAMask;
719
720 if (!validVirtualAddress(vaddr, addr_mask)) {
721 writeSfr(tc, vaddr, false, ct, true, VaOutOfRange, asi);
722 return new DataAccessException;
723 }
724
725
726 if ((!lsu_dm && !hpriv && !red) || AsiIsReal(asi)) {
727 real = true;
728 context = 0;
729 };
730
731 if (hpriv && (implicit || (!AsiIsAsIfUser(asi) && !AsiIsReal(asi)))) {
732 req->setPaddr(vaddr & PAddrImplMask);
733 return NoFault;
734 }
735
736 e = lookup(vaddr, part_id, real, context);
737
738 if (e == NULL || !e->valid) {
739 writeTagAccess(tc, vaddr, context);
740 DPRINTF(TLB, "TLB: DTB Failed to find matching TLB entry\n");
741 if (real)
742 return new DataRealTranslationMiss;
743 else
744 return new FastDataAccessMMUMiss;
745
746 }
747
748 if (!priv && e->pte.priv()) {
749 writeTagAccess(tc, vaddr, context);
750 writeSfr(tc, vaddr, write, ct, e->pte.sideffect(), PrivViolation, asi);
751 return new DataAccessException;
752 }
753
754 if (write && !e->pte.writable()) {
755 writeTagAccess(tc, vaddr, context);
756 writeSfr(tc, vaddr, write, ct, e->pte.sideffect(), OtherFault, asi);
757 return new FastDataAccessProtection;
758 }
759
760 if (e->pte.nofault() && !AsiIsNoFault(asi)) {
761 writeTagAccess(tc, vaddr, context);
762 writeSfr(tc, vaddr, write, ct, e->pte.sideffect(), LoadFromNfo, asi);
763 return new DataAccessException;
764 }
765
766 if (e->pte.sideffect() && AsiIsNoFault(asi)) {
767 writeTagAccess(tc, vaddr, context);
768 writeSfr(tc, vaddr, write, ct, e->pte.sideffect(), SideEffect, asi);
769 return new DataAccessException;
770 }
771
772
773 if (e->pte.sideffect() || (e->pte.paddr() >> 39) & 1)
774 req->setFlags(req->getFlags() | UNCACHEABLE);
775
776 // cache translation date for next translation
777 cacheState = tlbdata;
778 if (!cacheValid) {
779 cacheEntry[1] = NULL;
780 cacheEntry[0] = NULL;
781 }
782
783 if (cacheEntry[0] != e && cacheEntry[1] != e) {
784 cacheEntry[1] = cacheEntry[0];
785 cacheEntry[0] = e;
786 cacheAsi[1] = cacheAsi[0];
787 cacheAsi[0] = asi;
788 if (implicit)
789 cacheAsi[0] = (ASI)0;
790 }
791 cacheValid = true;
792 req->setPaddr(e->pte.paddr() & ~(e->pte.size()-1) |
793 vaddr & e->pte.size()-1);
794 DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
795 return NoFault;
796 /** Normal flow ends here. */
797
798 handleScratchRegAccess:
799 if (vaddr > 0x38 || (vaddr >= 0x20 && vaddr < 0x30 && !hpriv)) {
800 writeSfr(tc, vaddr, write, Primary, true, IllegalAsi, asi);
801 return new DataAccessException;
802 }
803 goto regAccessOk;
804
805 handleQueueRegAccess:
806 if (!priv && !hpriv) {
807 writeSfr(tc, vaddr, write, Primary, true, IllegalAsi, asi);
808 return new PrivilegedAction;
809 }
810 if (!hpriv && vaddr & 0xF || vaddr > 0x3f8 || vaddr < 0x3c0) {
811 writeSfr(tc, vaddr, write, Primary, true, IllegalAsi, asi);
812 return new DataAccessException;
813 }
814 goto regAccessOk;
815
816 handleSparcErrorRegAccess:
817 if (!hpriv) {
818 writeSfr(tc, vaddr, write, Primary, true, IllegalAsi, asi);
819 if (priv)
820 return new DataAccessException;
821 else
822 return new PrivilegedAction;
823 }
824 goto regAccessOk;
825
826
827 regAccessOk:
828 handleMmuRegAccess:
829 DPRINTF(TLB, "TLB: DTB Translating MM IPR access\n");
830 req->setMmapedIpr(true);
831 req->setPaddr(req->getVaddr());
832 return NoFault;
833 };
834
835 Tick
836 DTB::doMmuRegRead(ThreadContext *tc, Packet *pkt)
837 {
838 Addr va = pkt->getAddr();
839 ASI asi = (ASI)pkt->req->getAsi();
840 uint64_t temp;
841
842 DPRINTF(IPR, "Memory Mapped IPR Read: asi=%#X a=%#x\n",
843 (uint32_t)pkt->req->getAsi(), pkt->getAddr());
844
845 switch (asi) {
846 case ASI_LSU_CONTROL_REG:
847 assert(va == 0);
848 pkt->set(tc->readMiscRegWithEffect(MISCREG_MMU_LSU_CTRL));
849 break;
850 case ASI_MMU:
851 switch (va) {
852 case 0x8:
853 pkt->set(tc->readMiscRegWithEffect(MISCREG_MMU_P_CONTEXT));
854 break;
855 case 0x10:
856 pkt->set(tc->readMiscRegWithEffect(MISCREG_MMU_S_CONTEXT));
857 break;
858 default:
859 goto doMmuReadError;
860 }
861 break;
862 case ASI_QUEUE:
863 pkt->set(tc->readMiscRegWithEffect(MISCREG_QUEUE_CPU_MONDO_HEAD +
864 (va >> 4) - 0x3c));
865 break;
866 case ASI_DMMU_CTXT_ZERO_TSB_BASE_PS0:
867 assert(va == 0);
868 pkt->set(tc->readMiscRegWithEffect(MISCREG_MMU_DTLB_C0_TSB_PS0));
869 break;
870 case ASI_DMMU_CTXT_ZERO_TSB_BASE_PS1:
871 assert(va == 0);
872 pkt->set(tc->readMiscRegWithEffect(MISCREG_MMU_DTLB_C0_TSB_PS1));
873 break;
874 case ASI_DMMU_CTXT_ZERO_CONFIG:
875 assert(va == 0);
876 pkt->set(tc->readMiscRegWithEffect(MISCREG_MMU_DTLB_C0_CONFIG));
877 break;
878 case ASI_IMMU_CTXT_ZERO_TSB_BASE_PS0:
879 assert(va == 0);
880 pkt->set(tc->readMiscRegWithEffect(MISCREG_MMU_ITLB_C0_TSB_PS0));
881 break;
882 case ASI_IMMU_CTXT_ZERO_TSB_BASE_PS1:
883 assert(va == 0);
884 pkt->set(tc->readMiscRegWithEffect(MISCREG_MMU_ITLB_C0_TSB_PS1));
885 break;
886 case ASI_IMMU_CTXT_ZERO_CONFIG:
887 assert(va == 0);
888 pkt->set(tc->readMiscRegWithEffect(MISCREG_MMU_ITLB_C0_CONFIG));
889 break;
890 case ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS0:
891 assert(va == 0);
892 pkt->set(tc->readMiscRegWithEffect(MISCREG_MMU_DTLB_CX_TSB_PS0));
893 break;
894 case ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS1:
895 assert(va == 0);
896 pkt->set(tc->readMiscRegWithEffect(MISCREG_MMU_DTLB_CX_TSB_PS1));
897 break;
898 case ASI_DMMU_CTXT_NONZERO_CONFIG:
899 assert(va == 0);
900 pkt->set(tc->readMiscRegWithEffect(MISCREG_MMU_DTLB_CX_CONFIG));
901 break;
902 case ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS0:
903 assert(va == 0);
904 pkt->set(tc->readMiscRegWithEffect(MISCREG_MMU_ITLB_CX_TSB_PS0));
905 break;
906 case ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS1:
907 assert(va == 0);
908 pkt->set(tc->readMiscRegWithEffect(MISCREG_MMU_ITLB_CX_TSB_PS1));
909 break;
910 case ASI_IMMU_CTXT_NONZERO_CONFIG:
911 assert(va == 0);
912 pkt->set(tc->readMiscRegWithEffect(MISCREG_MMU_ITLB_CX_CONFIG));
913 break;
914 case ASI_SPARC_ERROR_STATUS_REG:
915 pkt->set((uint64_t)0);
916 break;
917 case ASI_HYP_SCRATCHPAD:
918 case ASI_SCRATCHPAD:
919 pkt->set(tc->readMiscRegWithEffect(MISCREG_SCRATCHPAD_R0 + (va >> 3)));
920 break;
921 case ASI_IMMU:
922 switch (va) {
923 case 0x0:
924 temp = tc->readMiscRegWithEffect(MISCREG_MMU_ITLB_TAG_ACCESS);
925 pkt->set(bits(temp,63,22) | bits(temp,12,0) << 48);
926 break;
927 case 0x18:
928 pkt->set(tc->readMiscRegWithEffect(MISCREG_MMU_ITLB_SFSR));
929 break;
930 case 0x30:
931 pkt->set(tc->readMiscRegWithEffect(MISCREG_MMU_ITLB_TAG_ACCESS));
932 break;
933 default:
934 goto doMmuReadError;
935 }
936 break;
937 case ASI_DMMU:
938 switch (va) {
939 case 0x0:
940 temp = tc->readMiscRegWithEffect(MISCREG_MMU_DTLB_TAG_ACCESS);
941 pkt->set(bits(temp,63,22) | bits(temp,12,0) << 48);
942 break;
943 case 0x18:
944 pkt->set(tc->readMiscRegWithEffect(MISCREG_MMU_DTLB_SFSR));
945 break;
946 case 0x20:
947 pkt->set(tc->readMiscRegWithEffect(MISCREG_MMU_DTLB_SFAR));
948 break;
949 case 0x30:
950 pkt->set(tc->readMiscRegWithEffect(MISCREG_MMU_DTLB_TAG_ACCESS));
951 break;
952 case 0x80:
953 pkt->set(tc->readMiscRegWithEffect(MISCREG_MMU_PART_ID));
954 break;
955 default:
956 goto doMmuReadError;
957 }
958 break;
959 case ASI_DMMU_TSB_PS0_PTR_REG:
960 pkt->set(MakeTsbPtr(Ps0,
961 tc->readMiscRegWithEffect(MISCREG_MMU_DTLB_TAG_ACCESS),
962 tc->readMiscRegWithEffect(MISCREG_MMU_DTLB_C0_TSB_PS0),
963 tc->readMiscRegWithEffect(MISCREG_MMU_DTLB_C0_CONFIG),
964 tc->readMiscRegWithEffect(MISCREG_MMU_DTLB_CX_TSB_PS0),
965 tc->readMiscRegWithEffect(MISCREG_MMU_DTLB_CX_CONFIG)));
966 break;
967 case ASI_DMMU_TSB_PS1_PTR_REG:
968 pkt->set(MakeTsbPtr(Ps1,
969 tc->readMiscRegWithEffect(MISCREG_MMU_DTLB_TAG_ACCESS),
970 tc->readMiscRegWithEffect(MISCREG_MMU_DTLB_C0_TSB_PS1),
971 tc->readMiscRegWithEffect(MISCREG_MMU_DTLB_C0_CONFIG),
972 tc->readMiscRegWithEffect(MISCREG_MMU_DTLB_CX_TSB_PS1),
973 tc->readMiscRegWithEffect(MISCREG_MMU_DTLB_CX_CONFIG)));
974 break;
975 case ASI_IMMU_TSB_PS0_PTR_REG:
976 pkt->set(MakeTsbPtr(Ps0,
977 tc->readMiscRegWithEffect(MISCREG_MMU_ITLB_TAG_ACCESS),
978 tc->readMiscRegWithEffect(MISCREG_MMU_ITLB_C0_TSB_PS0),
979 tc->readMiscRegWithEffect(MISCREG_MMU_ITLB_C0_CONFIG),
980 tc->readMiscRegWithEffect(MISCREG_MMU_ITLB_CX_TSB_PS0),
981 tc->readMiscRegWithEffect(MISCREG_MMU_ITLB_CX_CONFIG)));
982 break;
983 case ASI_IMMU_TSB_PS1_PTR_REG:
984 pkt->set(MakeTsbPtr(Ps1,
985 tc->readMiscRegWithEffect(MISCREG_MMU_ITLB_TAG_ACCESS),
986 tc->readMiscRegWithEffect(MISCREG_MMU_ITLB_C0_TSB_PS1),
987 tc->readMiscRegWithEffect(MISCREG_MMU_ITLB_C0_CONFIG),
988 tc->readMiscRegWithEffect(MISCREG_MMU_ITLB_CX_TSB_PS1),
989 tc->readMiscRegWithEffect(MISCREG_MMU_ITLB_CX_CONFIG)));
990 break;
991
992 default:
993 doMmuReadError:
994 panic("need to impl DTB::doMmuRegRead() got asi=%#x, va=%#x\n",
995 (uint32_t)asi, va);
996 }
997 pkt->result = Packet::Success;
998 return tc->getCpuPtr()->cycles(1);
999 }
1000
1001 Tick
1002 DTB::doMmuRegWrite(ThreadContext *tc, Packet *pkt)
1003 {
1004 uint64_t data = gtoh(pkt->get<uint64_t>());
1005 Addr va = pkt->getAddr();
1006 ASI asi = (ASI)pkt->req->getAsi();
1007
1008 Addr ta_insert;
1009 Addr va_insert;
1010 Addr ct_insert;
1011 int part_insert;
1012 int entry_insert = -1;
1013 bool real_insert;
1014 bool ignore;
1015 int part_id;
1016 int ctx_id;
1017 PageTableEntry pte;
1018
1019 DPRINTF(IPR, "Memory Mapped IPR Write: asi=%#X a=%#x d=%#X\n",
1020 (uint32_t)asi, va, data);
1021
1022 switch (asi) {
1023 case ASI_LSU_CONTROL_REG:
1024 assert(va == 0);
1025 tc->setMiscRegWithEffect(MISCREG_MMU_LSU_CTRL, data);
1026 break;
1027 case ASI_MMU:
1028 switch (va) {
1029 case 0x8:
1030 tc->setMiscRegWithEffect(MISCREG_MMU_P_CONTEXT, data);
1031 break;
1032 case 0x10:
1033 tc->setMiscRegWithEffect(MISCREG_MMU_S_CONTEXT, data);
1034 break;
1035 default:
1036 goto doMmuWriteError;
1037 }
1038 break;
1039 case ASI_QUEUE:
1040 assert(mbits(data,13,6) == data);
1041 tc->setMiscRegWithEffect(MISCREG_QUEUE_CPU_MONDO_HEAD +
1042 (va >> 4) - 0x3c, data);
1043 break;
1044 case ASI_DMMU_CTXT_ZERO_TSB_BASE_PS0:
1045 assert(va == 0);
1046 tc->setMiscRegWithEffect(MISCREG_MMU_DTLB_C0_TSB_PS0, data);
1047 break;
1048 case ASI_DMMU_CTXT_ZERO_TSB_BASE_PS1:
1049 assert(va == 0);
1050 tc->setMiscRegWithEffect(MISCREG_MMU_DTLB_C0_TSB_PS1, data);
1051 break;
1052 case ASI_DMMU_CTXT_ZERO_CONFIG:
1053 assert(va == 0);
1054 tc->setMiscRegWithEffect(MISCREG_MMU_DTLB_C0_CONFIG, data);
1055 break;
1056 case ASI_IMMU_CTXT_ZERO_TSB_BASE_PS0:
1057 assert(va == 0);
1058 tc->setMiscRegWithEffect(MISCREG_MMU_ITLB_C0_TSB_PS0, data);
1059 break;
1060 case ASI_IMMU_CTXT_ZERO_TSB_BASE_PS1:
1061 assert(va == 0);
1062 tc->setMiscRegWithEffect(MISCREG_MMU_ITLB_C0_TSB_PS1, data);
1063 break;
1064 case ASI_IMMU_CTXT_ZERO_CONFIG:
1065 assert(va == 0);
1066 tc->setMiscRegWithEffect(MISCREG_MMU_ITLB_C0_CONFIG, data);
1067 break;
1068 case ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS0:
1069 assert(va == 0);
1070 tc->setMiscRegWithEffect(MISCREG_MMU_DTLB_CX_TSB_PS0, data);
1071 break;
1072 case ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS1:
1073 assert(va == 0);
1074 tc->setMiscRegWithEffect(MISCREG_MMU_DTLB_CX_TSB_PS1, data);
1075 break;
1076 case ASI_DMMU_CTXT_NONZERO_CONFIG:
1077 assert(va == 0);
1078 tc->setMiscRegWithEffect(MISCREG_MMU_DTLB_CX_CONFIG, data);
1079 break;
1080 case ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS0:
1081 assert(va == 0);
1082 tc->setMiscRegWithEffect(MISCREG_MMU_ITLB_CX_TSB_PS0, data);
1083 break;
1084 case ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS1:
1085 assert(va == 0);
1086 tc->setMiscRegWithEffect(MISCREG_MMU_ITLB_CX_TSB_PS1, data);
1087 break;
1088 case ASI_IMMU_CTXT_NONZERO_CONFIG:
1089 assert(va == 0);
1090 tc->setMiscRegWithEffect(MISCREG_MMU_ITLB_CX_CONFIG, data);
1091 break;
1092 case ASI_SPARC_ERROR_EN_REG:
1093 case ASI_SPARC_ERROR_STATUS_REG:
1094 warn("Ignoring write to SPARC ERROR regsiter\n");
1095 break;
1096 case ASI_HYP_SCRATCHPAD:
1097 case ASI_SCRATCHPAD:
1098 tc->setMiscRegWithEffect(MISCREG_SCRATCHPAD_R0 + (va >> 3), data);
1099 break;
1100 case ASI_IMMU:
1101 switch (va) {
1102 case 0x18:
1103 tc->setMiscRegWithEffect(MISCREG_MMU_ITLB_SFSR, data);
1104 break;
1105 case 0x30:
1106 sext<59>(bits(data, 59,0));
1107 tc->setMiscRegWithEffect(MISCREG_MMU_ITLB_TAG_ACCESS, data);
1108 break;
1109 default:
1110 goto doMmuWriteError;
1111 }
1112 break;
1113 case ASI_ITLB_DATA_ACCESS_REG:
1114 entry_insert = bits(va, 8,3);
1115 case ASI_ITLB_DATA_IN_REG:
1116 assert(entry_insert != -1 || mbits(va,10,9) == va);
1117 ta_insert = tc->readMiscRegWithEffect(MISCREG_MMU_ITLB_TAG_ACCESS);
1118 va_insert = mbits(ta_insert, 63,13);
1119 ct_insert = mbits(ta_insert, 12,0);
1120 part_insert = tc->readMiscRegWithEffect(MISCREG_MMU_PART_ID);
1121 real_insert = bits(va, 9,9);
1122 pte.populate(data, bits(va,10,10) ? PageTableEntry::sun4v :
1123 PageTableEntry::sun4u);
1124 tc->getITBPtr()->insert(va_insert, part_insert, ct_insert, real_insert,
1125 pte, entry_insert);
1126 break;
1127 case ASI_DTLB_DATA_ACCESS_REG:
1128 entry_insert = bits(va, 8,3);
1129 case ASI_DTLB_DATA_IN_REG:
1130 assert(entry_insert != -1 || mbits(va,10,9) == va);
1131 ta_insert = tc->readMiscRegWithEffect(MISCREG_MMU_DTLB_TAG_ACCESS);
1132 va_insert = mbits(ta_insert, 63,13);
1133 ct_insert = mbits(ta_insert, 12,0);
1134 part_insert = tc->readMiscRegWithEffect(MISCREG_MMU_PART_ID);
1135 real_insert = bits(va, 9,9);
1136 pte.populate(data, bits(va,10,10) ? PageTableEntry::sun4v :
1137 PageTableEntry::sun4u);
1138 insert(va_insert, part_insert, ct_insert, real_insert, pte, entry_insert);
1139 break;
1140 case ASI_IMMU_DEMAP:
1141 ignore = false;
1142 ctx_id = -1;
1143 part_id = tc->readMiscRegWithEffect(MISCREG_MMU_PART_ID);
1144 switch (bits(va,5,4)) {
1145 case 0:
1146 ctx_id = tc->readMiscRegWithEffect(MISCREG_MMU_P_CONTEXT);
1147 break;
1148 case 1:
1149 ignore = true;
1150 break;
1151 case 3:
1152 ctx_id = 0;
1153 break;
1154 default:
1155 ignore = true;
1156 }
1157
1158 switch(bits(va,7,6)) {
1159 case 0: // demap page
1160 if (!ignore)
1161 tc->getITBPtr()->demapPage(mbits(va,63,13), part_id,
1162 bits(va,9,9), ctx_id);
1163 break;
1164 case 1: //demap context
1165 if (!ignore)
1166 tc->getITBPtr()->demapContext(part_id, ctx_id);
1167 break;
1168 case 2:
1169 tc->getITBPtr()->demapAll(part_id);
1170 break;
1171 default:
1172 panic("Invalid type for IMMU demap\n");
1173 }
1174 break;
1175 case ASI_DMMU:
1176 switch (va) {
1177 case 0x18:
1178 tc->setMiscRegWithEffect(MISCREG_MMU_DTLB_SFSR, data);
1179 break;
1180 case 0x30:
1181 sext<59>(bits(data, 59,0));
1182 tc->setMiscRegWithEffect(MISCREG_MMU_DTLB_TAG_ACCESS, data);
1183 break;
1184 case 0x80:
1185 tc->setMiscRegWithEffect(MISCREG_MMU_PART_ID, data);
1186 break;
1187 default:
1188 goto doMmuWriteError;
1189 }
1190 break;
1191 case ASI_DMMU_DEMAP:
1192 ignore = false;
1193 ctx_id = -1;
1194 part_id = tc->readMiscRegWithEffect(MISCREG_MMU_PART_ID);
1195 switch (bits(va,5,4)) {
1196 case 0:
1197 ctx_id = tc->readMiscRegWithEffect(MISCREG_MMU_P_CONTEXT);
1198 break;
1199 case 1:
1200 ctx_id = tc->readMiscRegWithEffect(MISCREG_MMU_S_CONTEXT);
1201 break;
1202 case 3:
1203 ctx_id = 0;
1204 break;
1205 default:
1206 ignore = true;
1207 }
1208
1209 switch(bits(va,7,6)) {
1210 case 0: // demap page
1211 if (!ignore)
1212 demapPage(mbits(va,63,13), part_id, bits(va,9,9), ctx_id);
1213 break;
1214 case 1: //demap context
1215 if (!ignore)
1216 demapContext(part_id, ctx_id);
1217 break;
1218 case 2:
1219 demapAll(part_id);
1220 break;
1221 default:
1222 panic("Invalid type for IMMU demap\n");
1223 }
1224 break;
1225 default:
1226 doMmuWriteError:
1227 panic("need to impl DTB::doMmuRegWrite() got asi=%#x, va=%#x d=%#x\n",
1228 (uint32_t)pkt->req->getAsi(), pkt->getAddr(), data);
1229 }
1230 pkt->result = Packet::Success;
1231 return tc->getCpuPtr()->cycles(1);
1232 }
1233
1234 void
1235 DTB::GetTsbPtr(ThreadContext *tc, Addr addr, int ctx, Addr *ptrs)
1236 {
1237 uint64_t tag_access = mbits(addr,63,13) | mbits(ctx,12,0);
1238 ptrs[0] = MakeTsbPtr(Ps0, tag_access,
1239 tc->readMiscRegWithEffect(MISCREG_MMU_DTLB_C0_TSB_PS0),
1240 tc->readMiscRegWithEffect(MISCREG_MMU_DTLB_C0_CONFIG),
1241 tc->readMiscRegWithEffect(MISCREG_MMU_DTLB_CX_TSB_PS0),
1242 tc->readMiscRegWithEffect(MISCREG_MMU_DTLB_CX_CONFIG));
1243 ptrs[1] = MakeTsbPtr(Ps1, tag_access,
1244 tc->readMiscRegWithEffect(MISCREG_MMU_DTLB_C0_TSB_PS1),
1245 tc->readMiscRegWithEffect(MISCREG_MMU_DTLB_C0_CONFIG),
1246 tc->readMiscRegWithEffect(MISCREG_MMU_DTLB_CX_TSB_PS1),
1247 tc->readMiscRegWithEffect(MISCREG_MMU_DTLB_CX_CONFIG));
1248 ptrs[2] = MakeTsbPtr(Ps0, tag_access,
1249 tc->readMiscRegWithEffect(MISCREG_MMU_ITLB_C0_TSB_PS0),
1250 tc->readMiscRegWithEffect(MISCREG_MMU_ITLB_C0_CONFIG),
1251 tc->readMiscRegWithEffect(MISCREG_MMU_ITLB_CX_TSB_PS0),
1252 tc->readMiscRegWithEffect(MISCREG_MMU_ITLB_CX_CONFIG));
1253 ptrs[3] = MakeTsbPtr(Ps1, tag_access,
1254 tc->readMiscRegWithEffect(MISCREG_MMU_ITLB_C0_TSB_PS1),
1255 tc->readMiscRegWithEffect(MISCREG_MMU_ITLB_C0_CONFIG),
1256 tc->readMiscRegWithEffect(MISCREG_MMU_ITLB_CX_TSB_PS1),
1257 tc->readMiscRegWithEffect(MISCREG_MMU_ITLB_CX_CONFIG));
1258 }
1259
1260
1261
1262
1263
1264 uint64_t
1265 DTB::MakeTsbPtr(TsbPageSize ps, uint64_t tag_access, uint64_t c0_tsb,
1266 uint64_t c0_config, uint64_t cX_tsb, uint64_t cX_config)
1267 {
1268 uint64_t tsb;
1269 uint64_t config;
1270
1271 if (bits(tag_access, 12,0) == 0) {
1272 tsb = c0_tsb;
1273 config = c0_config;
1274 } else {
1275 tsb = cX_tsb;
1276 config = cX_config;
1277 }
1278
1279 uint64_t ptr = mbits(tsb,63,13);
1280 bool split = bits(tsb,12,12);
1281 int tsb_size = bits(tsb,3,0);
1282 int page_size = (ps == Ps0) ? bits(config, 2,0) : bits(config,10,8);
1283
1284 if (ps == Ps1 && split)
1285 ptr |= ULL(1) << (13 + tsb_size);
1286 ptr |= (tag_access >> (9 + page_size * 3)) & mask(12+tsb_size, 4);
1287
1288 return ptr;
1289 }
1290
1291
1292 void
1293 TLB::serialize(std::ostream &os)
1294 {
1295 SERIALIZE_SCALAR(size);
1296 SERIALIZE_SCALAR(usedEntries);
1297 SERIALIZE_SCALAR(lastReplaced);
1298
1299 // convert the pointer based free list into an index based one
1300 int *free_list = (int*)malloc(sizeof(int) * size);
1301 int cntr = 0;
1302 std::list<TlbEntry*>::iterator i;
1303 i = freeList.begin();
1304 while (i != freeList.end()) {
1305 free_list[cntr++] = ((size_t)*i - (size_t)tlb)/ sizeof(TlbEntry);
1306 i++;
1307 }
1308 SERIALIZE_SCALAR(cntr);
1309 SERIALIZE_ARRAY(free_list, cntr);
1310
1311 for (int x = 0; x < size; x++) {
1312 nameOut(os, csprintf("%s.PTE%d", name(), x));
1313 tlb[x].serialize(os);
1314 }
1315 }
1316
1317 void
1318 TLB::unserialize(Checkpoint *cp, const std::string &section)
1319 {
1320 int oldSize;
1321
1322 paramIn(cp, section, "size", oldSize);
1323 if (oldSize != size)
1324 panic("Don't support unserializing different sized TLBs\n");
1325 UNSERIALIZE_SCALAR(usedEntries);
1326 UNSERIALIZE_SCALAR(lastReplaced);
1327
1328 int cntr;
1329 UNSERIALIZE_SCALAR(cntr);
1330
1331 int *free_list = (int*)malloc(sizeof(int) * cntr);
1332 freeList.clear();
1333 UNSERIALIZE_ARRAY(free_list, cntr);
1334 for (int x = 0; x < cntr; x++)
1335 freeList.push_back(&tlb[free_list[x]]);
1336
1337 lookupTable.clear();
1338 for (int x = 0; x < size; x++) {
1339 tlb[x].unserialize(cp, csprintf("%s.PTE%d", section, x));
1340 if (tlb[x].valid)
1341 lookupTable.insert(tlb[x].range, &tlb[x]);
1342
1343 }
1344 }
1345
1346 /* end namespace SparcISA */ }
1347
1348 using namespace SparcISA;
1349
1350 DEFINE_SIM_OBJECT_CLASS_NAME("SparcTLB", TLB)
1351
1352 BEGIN_DECLARE_SIM_OBJECT_PARAMS(ITB)
1353
1354 Param<int> size;
1355
1356 END_DECLARE_SIM_OBJECT_PARAMS(ITB)
1357
1358 BEGIN_INIT_SIM_OBJECT_PARAMS(ITB)
1359
1360 INIT_PARAM_DFLT(size, "TLB size", 48)
1361
1362 END_INIT_SIM_OBJECT_PARAMS(ITB)
1363
1364
1365 CREATE_SIM_OBJECT(ITB)
1366 {
1367 return new ITB(getInstanceName(), size);
1368 }
1369
1370 REGISTER_SIM_OBJECT("SparcITB", ITB)
1371
1372 BEGIN_DECLARE_SIM_OBJECT_PARAMS(DTB)
1373
1374 Param<int> size;
1375
1376 END_DECLARE_SIM_OBJECT_PARAMS(DTB)
1377
1378 BEGIN_INIT_SIM_OBJECT_PARAMS(DTB)
1379
1380 INIT_PARAM_DFLT(size, "TLB size", 64)
1381
1382 END_INIT_SIM_OBJECT_PARAMS(DTB)
1383
1384
1385 CREATE_SIM_OBJECT(DTB)
1386 {
1387 return new DTB(getInstanceName(), size);
1388 }
1389
1390 REGISTER_SIM_OBJECT("SparcDTB", DTB)