TLB: Make a TLB base class and put a virtual demapPage function in it.
[gem5.git] / src / arch / sparc / tlb.cc
1 /*
2 * Copyright (c) 2001-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Ali Saidi
29 */
30
31 #include <cstring>
32
33 #include "arch/sparc/asi.hh"
34 #include "arch/sparc/miscregfile.hh"
35 #include "arch/sparc/tlb.hh"
36 #include "base/bitfield.hh"
37 #include "base/trace.hh"
38 #include "cpu/thread_context.hh"
39 #include "cpu/base.hh"
40 #include "mem/packet_access.hh"
41 #include "mem/request.hh"
42 #include "sim/system.hh"
43
44 /* @todo remove some of the magic constants. -- ali
45 * */
46 namespace SparcISA {
47
48 TLB::TLB(const Params *p)
49 : BaseTLB(p), size(p->size), usedEntries(0), lastReplaced(0),
50 cacheValid(false)
51 {
52 // To make this work you'll have to change the hypervisor and OS
53 if (size > 64)
54 fatal("SPARC T1 TLB registers don't support more than 64 TLB entries.");
55
56 tlb = new TlbEntry[size];
57 std::memset(tlb, 0, sizeof(TlbEntry) * size);
58
59 for (int x = 0; x < size; x++)
60 freeList.push_back(&tlb[x]);
61
62 c0_tsb_ps0 = 0;
63 c0_tsb_ps1 = 0;
64 c0_config = 0;
65 cx_tsb_ps0 = 0;
66 cx_tsb_ps1 = 0;
67 cx_config = 0;
68 sfsr = 0;
69 tag_access = 0;
70 }
71
72 void
73 TLB::clearUsedBits()
74 {
75 MapIter i;
76 for (i = lookupTable.begin(); i != lookupTable.end(); i++) {
77 TlbEntry *t = i->second;
78 if (!t->pte.locked()) {
79 t->used = false;
80 usedEntries--;
81 }
82 }
83 }
84
85
86 void
87 TLB::insert(Addr va, int partition_id, int context_id, bool real,
88 const PageTableEntry& PTE, int entry)
89 {
90
91
92 MapIter i;
93 TlbEntry *new_entry = NULL;
94 // TlbRange tr;
95 int x;
96
97 cacheValid = false;
98 va &= ~(PTE.size()-1);
99 /* tr.va = va;
100 tr.size = PTE.size() - 1;
101 tr.contextId = context_id;
102 tr.partitionId = partition_id;
103 tr.real = real;
104 */
105
106 DPRINTF(TLB, "TLB: Inserting TLB Entry; va=%#x pa=%#x pid=%d cid=%d r=%d entryid=%d\n",
107 va, PTE.paddr(), partition_id, context_id, (int)real, entry);
108
109 // Demap any entry that conflicts
110 for (x = 0; x < size; x++) {
111 if (tlb[x].range.real == real &&
112 tlb[x].range.partitionId == partition_id &&
113 tlb[x].range.va < va + PTE.size() - 1 &&
114 tlb[x].range.va + tlb[x].range.size >= va &&
115 (real || tlb[x].range.contextId == context_id ))
116 {
117 if (tlb[x].valid) {
118 freeList.push_front(&tlb[x]);
119 DPRINTF(TLB, "TLB: Conflicting entry %#X , deleting it\n", x);
120
121 tlb[x].valid = false;
122 if (tlb[x].used) {
123 tlb[x].used = false;
124 usedEntries--;
125 }
126 lookupTable.erase(tlb[x].range);
127 }
128 }
129 }
130
131
132 /*
133 i = lookupTable.find(tr);
134 if (i != lookupTable.end()) {
135 i->second->valid = false;
136 if (i->second->used) {
137 i->second->used = false;
138 usedEntries--;
139 }
140 freeList.push_front(i->second);
141 DPRINTF(TLB, "TLB: Found conflicting entry %#X , deleting it\n",
142 i->second);
143 lookupTable.erase(i);
144 }
145 */
146
147 if (entry != -1) {
148 assert(entry < size && entry >= 0);
149 new_entry = &tlb[entry];
150 } else {
151 if (!freeList.empty()) {
152 new_entry = freeList.front();
153 } else {
154 x = lastReplaced;
155 do {
156 ++x;
157 if (x == size)
158 x = 0;
159 if (x == lastReplaced)
160 goto insertAllLocked;
161 } while (tlb[x].pte.locked());
162 lastReplaced = x;
163 new_entry = &tlb[x];
164 }
165 /*
166 for (x = 0; x < size; x++) {
167 if (!tlb[x].valid || !tlb[x].used) {
168 new_entry = &tlb[x];
169 break;
170 }
171 }*/
172 }
173
174 insertAllLocked:
175 // Update the last ently if their all locked
176 if (!new_entry) {
177 new_entry = &tlb[size-1];
178 }
179
180 freeList.remove(new_entry);
181 if (new_entry->valid && new_entry->used)
182 usedEntries--;
183 if (new_entry->valid)
184 lookupTable.erase(new_entry->range);
185
186
187 assert(PTE.valid());
188 new_entry->range.va = va;
189 new_entry->range.size = PTE.size() - 1;
190 new_entry->range.partitionId = partition_id;
191 new_entry->range.contextId = context_id;
192 new_entry->range.real = real;
193 new_entry->pte = PTE;
194 new_entry->used = true;;
195 new_entry->valid = true;
196 usedEntries++;
197
198
199
200 i = lookupTable.insert(new_entry->range, new_entry);
201 assert(i != lookupTable.end());
202
203 // If all entries have there used bit set, clear it on them all, but the
204 // one we just inserted
205 if (usedEntries == size) {
206 clearUsedBits();
207 new_entry->used = true;
208 usedEntries++;
209 }
210
211 }
212
213
214 TlbEntry*
215 TLB::lookup(Addr va, int partition_id, bool real, int context_id, bool
216 update_used)
217 {
218 MapIter i;
219 TlbRange tr;
220 TlbEntry *t;
221
222 DPRINTF(TLB, "TLB: Looking up entry va=%#x pid=%d cid=%d r=%d\n",
223 va, partition_id, context_id, real);
224 // Assemble full address structure
225 tr.va = va;
226 tr.size = 1;
227 tr.contextId = context_id;
228 tr.partitionId = partition_id;
229 tr.real = real;
230
231 // Try to find the entry
232 i = lookupTable.find(tr);
233 if (i == lookupTable.end()) {
234 DPRINTF(TLB, "TLB: No valid entry found\n");
235 return NULL;
236 }
237
238 // Mark the entries used bit and clear other used bits in needed
239 t = i->second;
240 DPRINTF(TLB, "TLB: Valid entry found pa: %#x size: %#x\n", t->pte.paddr(),
241 t->pte.size());
242
243 // Update the used bits only if this is a real access (not a fake one from
244 // virttophys()
245 if (!t->used && update_used) {
246 t->used = true;
247 usedEntries++;
248 if (usedEntries == size) {
249 clearUsedBits();
250 t->used = true;
251 usedEntries++;
252 }
253 }
254
255 return t;
256 }
257
258 void
259 TLB::dumpAll()
260 {
261 MapIter i;
262 for (int x = 0; x < size; x++) {
263 if (tlb[x].valid) {
264 DPRINTFN("%4d: %#2x:%#2x %c %#4x %#8x %#8x %#16x\n",
265 x, tlb[x].range.partitionId, tlb[x].range.contextId,
266 tlb[x].range.real ? 'R' : ' ', tlb[x].range.size,
267 tlb[x].range.va, tlb[x].pte.paddr(), tlb[x].pte());
268 }
269 }
270 }
271
272 void
273 TLB::demapPage(Addr va, int partition_id, bool real, int context_id)
274 {
275 TlbRange tr;
276 MapIter i;
277
278 DPRINTF(IPR, "TLB: Demapping Page va=%#x pid=%#d cid=%d r=%d\n",
279 va, partition_id, context_id, real);
280
281 cacheValid = false;
282
283 // Assemble full address structure
284 tr.va = va;
285 tr.size = 1;
286 tr.contextId = context_id;
287 tr.partitionId = partition_id;
288 tr.real = real;
289
290 // Demap any entry that conflicts
291 i = lookupTable.find(tr);
292 if (i != lookupTable.end()) {
293 DPRINTF(IPR, "TLB: Demapped page\n");
294 i->second->valid = false;
295 if (i->second->used) {
296 i->second->used = false;
297 usedEntries--;
298 }
299 freeList.push_front(i->second);
300 lookupTable.erase(i);
301 }
302 }
303
304 void
305 TLB::demapContext(int partition_id, int context_id)
306 {
307 int x;
308 DPRINTF(IPR, "TLB: Demapping Context pid=%#d cid=%d\n",
309 partition_id, context_id);
310 cacheValid = false;
311 for (x = 0; x < size; x++) {
312 if (tlb[x].range.contextId == context_id &&
313 tlb[x].range.partitionId == partition_id) {
314 if (tlb[x].valid == true) {
315 freeList.push_front(&tlb[x]);
316 }
317 tlb[x].valid = false;
318 if (tlb[x].used) {
319 tlb[x].used = false;
320 usedEntries--;
321 }
322 lookupTable.erase(tlb[x].range);
323 }
324 }
325 }
326
327 void
328 TLB::demapAll(int partition_id)
329 {
330 int x;
331 DPRINTF(TLB, "TLB: Demapping All pid=%#d\n", partition_id);
332 cacheValid = false;
333 for (x = 0; x < size; x++) {
334 if (tlb[x].valid && !tlb[x].pte.locked() &&
335 tlb[x].range.partitionId == partition_id) {
336 freeList.push_front(&tlb[x]);
337 tlb[x].valid = false;
338 if (tlb[x].used) {
339 tlb[x].used = false;
340 usedEntries--;
341 }
342 lookupTable.erase(tlb[x].range);
343 }
344 }
345 }
346
347 void
348 TLB::invalidateAll()
349 {
350 int x;
351 cacheValid = false;
352
353 lookupTable.clear();
354 for (x = 0; x < size; x++) {
355 if (tlb[x].valid == true)
356 freeList.push_back(&tlb[x]);
357 tlb[x].valid = false;
358 tlb[x].used = false;
359 }
360 usedEntries = 0;
361 }
362
363 uint64_t
364 TLB::TteRead(int entry) {
365 if (entry >= size)
366 panic("entry: %d\n", entry);
367
368 assert(entry < size);
369 if (tlb[entry].valid)
370 return tlb[entry].pte();
371 else
372 return (uint64_t)-1ll;
373 }
374
375 uint64_t
376 TLB::TagRead(int entry) {
377 assert(entry < size);
378 uint64_t tag;
379 if (!tlb[entry].valid)
380 return (uint64_t)-1ll;
381
382 tag = tlb[entry].range.contextId;
383 tag |= tlb[entry].range.va;
384 tag |= (uint64_t)tlb[entry].range.partitionId << 61;
385 tag |= tlb[entry].range.real ? ULL(1) << 60 : 0;
386 tag |= (uint64_t)~tlb[entry].pte._size() << 56;
387 return tag;
388 }
389
390 bool
391 TLB::validVirtualAddress(Addr va, bool am)
392 {
393 if (am)
394 return true;
395 if (va >= StartVAddrHole && va <= EndVAddrHole)
396 return false;
397 return true;
398 }
399
400 void
401 TLB::writeSfsr(bool write, ContextType ct, bool se, FaultTypes ft, int asi)
402 {
403 if (sfsr & 0x1)
404 sfsr = 0x3;
405 else
406 sfsr = 1;
407
408 if (write)
409 sfsr |= 1 << 2;
410 sfsr |= ct << 4;
411 if (se)
412 sfsr |= 1 << 6;
413 sfsr |= ft << 7;
414 sfsr |= asi << 16;
415 }
416
417 void
418 TLB::writeTagAccess(Addr va, int context)
419 {
420 DPRINTF(TLB, "TLB: Writing Tag Access: va: %#X ctx: %#X value: %#X\n",
421 va, context, mbits(va, 63,13) | mbits(context,12,0));
422
423 tag_access = mbits(va, 63,13) | mbits(context,12,0);
424 }
425
426 void
427 ITB::writeSfsr(bool write, ContextType ct, bool se, FaultTypes ft, int asi)
428 {
429 DPRINTF(TLB, "TLB: ITB Fault: w=%d ct=%d ft=%d asi=%d\n",
430 (int)write, ct, ft, asi);
431 TLB::writeSfsr(write, ct, se, ft, asi);
432 }
433
434 void
435 DTB::writeSfsr(Addr a, bool write, ContextType ct,
436 bool se, FaultTypes ft, int asi)
437 {
438 DPRINTF(TLB, "TLB: DTB Fault: A=%#x w=%d ct=%d ft=%d asi=%d\n",
439 a, (int)write, ct, ft, asi);
440 TLB::writeSfsr(write, ct, se, ft, asi);
441 sfar = a;
442 }
443
444 Fault
445 ITB::translate(RequestPtr &req, ThreadContext *tc)
446 {
447 uint64_t tlbdata = tc->readMiscRegNoEffect(MISCREG_TLB_DATA);
448
449 Addr vaddr = req->getVaddr();
450 TlbEntry *e;
451
452 assert(req->getAsi() == ASI_IMPLICIT);
453
454 DPRINTF(TLB, "TLB: ITB Request to translate va=%#x size=%d\n",
455 vaddr, req->getSize());
456
457 // Be fast if we can!
458 if (cacheValid && cacheState == tlbdata) {
459 if (cacheEntry) {
460 if (cacheEntry->range.va < vaddr + sizeof(MachInst) &&
461 cacheEntry->range.va + cacheEntry->range.size >= vaddr) {
462 req->setPaddr(cacheEntry->pte.paddr() & ~(cacheEntry->pte.size()-1) |
463 vaddr & cacheEntry->pte.size()-1 );
464 return NoFault;
465 }
466 } else {
467 req->setPaddr(vaddr & PAddrImplMask);
468 return NoFault;
469 }
470 }
471
472 bool hpriv = bits(tlbdata,0,0);
473 bool red = bits(tlbdata,1,1);
474 bool priv = bits(tlbdata,2,2);
475 bool addr_mask = bits(tlbdata,3,3);
476 bool lsu_im = bits(tlbdata,4,4);
477
478 int part_id = bits(tlbdata,15,8);
479 int tl = bits(tlbdata,18,16);
480 int pri_context = bits(tlbdata,47,32);
481 int context;
482 ContextType ct;
483 int asi;
484 bool real = false;
485
486 DPRINTF(TLB, "TLB: priv:%d hpriv:%d red:%d lsuim:%d part_id: %#X\n",
487 priv, hpriv, red, lsu_im, part_id);
488
489 if (tl > 0) {
490 asi = ASI_N;
491 ct = Nucleus;
492 context = 0;
493 } else {
494 asi = ASI_P;
495 ct = Primary;
496 context = pri_context;
497 }
498
499 if ( hpriv || red ) {
500 cacheValid = true;
501 cacheState = tlbdata;
502 cacheEntry = NULL;
503 req->setPaddr(vaddr & PAddrImplMask);
504 return NoFault;
505 }
506
507 // If the access is unaligned trap
508 if (vaddr & 0x3) {
509 writeSfsr(false, ct, false, OtherFault, asi);
510 return new MemAddressNotAligned;
511 }
512
513 if (addr_mask)
514 vaddr = vaddr & VAddrAMask;
515
516 if (!validVirtualAddress(vaddr, addr_mask)) {
517 writeSfsr(false, ct, false, VaOutOfRange, asi);
518 return new InstructionAccessException;
519 }
520
521 if (!lsu_im) {
522 e = lookup(vaddr, part_id, true);
523 real = true;
524 context = 0;
525 } else {
526 e = lookup(vaddr, part_id, false, context);
527 }
528
529 if (e == NULL || !e->valid) {
530 writeTagAccess(vaddr, context);
531 if (real)
532 return new InstructionRealTranslationMiss;
533 else
534 #if FULL_SYSTEM
535 return new FastInstructionAccessMMUMiss;
536 #else
537 return new FastInstructionAccessMMUMiss(req->getVaddr());
538 #endif
539 }
540
541 // were not priviledged accesing priv page
542 if (!priv && e->pte.priv()) {
543 writeTagAccess(vaddr, context);
544 writeSfsr(false, ct, false, PrivViolation, asi);
545 return new InstructionAccessException;
546 }
547
548 // cache translation date for next translation
549 cacheValid = true;
550 cacheState = tlbdata;
551 cacheEntry = e;
552
553 req->setPaddr(e->pte.paddr() & ~(e->pte.size()-1) |
554 vaddr & e->pte.size()-1 );
555 DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
556 return NoFault;
557 }
558
559
560
561 Fault
562 DTB::translate(RequestPtr &req, ThreadContext *tc, bool write)
563 {
564 /* @todo this could really use some profiling and fixing to make it faster! */
565 uint64_t tlbdata = tc->readMiscRegNoEffect(MISCREG_TLB_DATA);
566 Addr vaddr = req->getVaddr();
567 Addr size = req->getSize();
568 ASI asi;
569 asi = (ASI)req->getAsi();
570 bool implicit = false;
571 bool hpriv = bits(tlbdata,0,0);
572 bool unaligned = (vaddr & size-1);
573
574 DPRINTF(TLB, "TLB: DTB Request to translate va=%#x size=%d asi=%#x\n",
575 vaddr, size, asi);
576
577 if (lookupTable.size() != 64 - freeList.size())
578 panic("Lookup table size: %d tlb size: %d\n", lookupTable.size(),
579 freeList.size());
580 if (asi == ASI_IMPLICIT)
581 implicit = true;
582
583 // Only use the fast path here if there doesn't need to be an unaligned
584 // trap later
585 if (!unaligned) {
586 if (hpriv && implicit) {
587 req->setPaddr(vaddr & PAddrImplMask);
588 return NoFault;
589 }
590
591 // Be fast if we can!
592 if (cacheValid && cacheState == tlbdata) {
593
594
595
596 if (cacheEntry[0]) {
597 TlbEntry *ce = cacheEntry[0];
598 Addr ce_va = ce->range.va;
599 if (cacheAsi[0] == asi &&
600 ce_va < vaddr + size && ce_va + ce->range.size > vaddr &&
601 (!write || ce->pte.writable())) {
602 req->setPaddr(ce->pte.paddrMask() | vaddr & ce->pte.sizeMask());
603 if (ce->pte.sideffect() || (ce->pte.paddr() >> 39) & 1)
604 req->setFlags(req->getFlags() | UNCACHEABLE);
605 DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
606 return NoFault;
607 } // if matched
608 } // if cache entry valid
609 if (cacheEntry[1]) {
610 TlbEntry *ce = cacheEntry[1];
611 Addr ce_va = ce->range.va;
612 if (cacheAsi[1] == asi &&
613 ce_va < vaddr + size && ce_va + ce->range.size > vaddr &&
614 (!write || ce->pte.writable())) {
615 req->setPaddr(ce->pte.paddrMask() | vaddr & ce->pte.sizeMask());
616 if (ce->pte.sideffect() || (ce->pte.paddr() >> 39) & 1)
617 req->setFlags(req->getFlags() | UNCACHEABLE);
618 DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
619 return NoFault;
620 } // if matched
621 } // if cache entry valid
622 }
623 }
624
625 bool red = bits(tlbdata,1,1);
626 bool priv = bits(tlbdata,2,2);
627 bool addr_mask = bits(tlbdata,3,3);
628 bool lsu_dm = bits(tlbdata,5,5);
629
630 int part_id = bits(tlbdata,15,8);
631 int tl = bits(tlbdata,18,16);
632 int pri_context = bits(tlbdata,47,32);
633 int sec_context = bits(tlbdata,63,48);
634
635 bool real = false;
636 ContextType ct = Primary;
637 int context = 0;
638
639 TlbEntry *e;
640
641 DPRINTF(TLB, "TLB: priv:%d hpriv:%d red:%d lsudm:%d part_id: %#X\n",
642 priv, hpriv, red, lsu_dm, part_id);
643
644 if (implicit) {
645 if (tl > 0) {
646 asi = ASI_N;
647 ct = Nucleus;
648 context = 0;
649 } else {
650 asi = ASI_P;
651 ct = Primary;
652 context = pri_context;
653 }
654 } else {
655 // We need to check for priv level/asi priv
656 if (!priv && !hpriv && !AsiIsUnPriv(asi)) {
657 // It appears that context should be Nucleus in these cases?
658 writeSfsr(vaddr, write, Nucleus, false, IllegalAsi, asi);
659 return new PrivilegedAction;
660 }
661
662 if (!hpriv && AsiIsHPriv(asi)) {
663 writeSfsr(vaddr, write, Nucleus, false, IllegalAsi, asi);
664 return new DataAccessException;
665 }
666
667 if (AsiIsPrimary(asi)) {
668 context = pri_context;
669 ct = Primary;
670 } else if (AsiIsSecondary(asi)) {
671 context = sec_context;
672 ct = Secondary;
673 } else if (AsiIsNucleus(asi)) {
674 ct = Nucleus;
675 context = 0;
676 } else { // ????
677 ct = Primary;
678 context = pri_context;
679 }
680 }
681
682 if (!implicit && asi != ASI_P && asi != ASI_S) {
683 if (AsiIsLittle(asi))
684 panic("Little Endian ASIs not supported\n");
685
686 //XXX It's unclear from looking at the documentation how a no fault
687 //load differs from a regular one, other than what happens concerning
688 //nfo and e bits in the TTE
689 // if (AsiIsNoFault(asi))
690 // panic("No Fault ASIs not supported\n");
691
692 if (AsiIsPartialStore(asi))
693 panic("Partial Store ASIs not supported\n");
694
695 if (AsiIsCmt(asi))
696 panic("Cmt ASI registers not implmented\n");
697
698 if (AsiIsInterrupt(asi))
699 goto handleIntRegAccess;
700 if (AsiIsMmu(asi))
701 goto handleMmuRegAccess;
702 if (AsiIsScratchPad(asi))
703 goto handleScratchRegAccess;
704 if (AsiIsQueue(asi))
705 goto handleQueueRegAccess;
706 if (AsiIsSparcError(asi))
707 goto handleSparcErrorRegAccess;
708
709 if (!AsiIsReal(asi) && !AsiIsNucleus(asi) && !AsiIsAsIfUser(asi) &&
710 !AsiIsTwin(asi) && !AsiIsBlock(asi) && !AsiIsNoFault(asi))
711 panic("Accessing ASI %#X. Should we?\n", asi);
712 }
713
714 // If the asi is unaligned trap
715 if (unaligned) {
716 writeSfsr(vaddr, false, ct, false, OtherFault, asi);
717 return new MemAddressNotAligned;
718 }
719
720 if (addr_mask)
721 vaddr = vaddr & VAddrAMask;
722
723 if (!validVirtualAddress(vaddr, addr_mask)) {
724 writeSfsr(vaddr, false, ct, true, VaOutOfRange, asi);
725 return new DataAccessException;
726 }
727
728
729 if ((!lsu_dm && !hpriv && !red) || AsiIsReal(asi)) {
730 real = true;
731 context = 0;
732 };
733
734 if (hpriv && (implicit || (!AsiIsAsIfUser(asi) && !AsiIsReal(asi)))) {
735 req->setPaddr(vaddr & PAddrImplMask);
736 return NoFault;
737 }
738
739 e = lookup(vaddr, part_id, real, context);
740
741 if (e == NULL || !e->valid) {
742 writeTagAccess(vaddr, context);
743 DPRINTF(TLB, "TLB: DTB Failed to find matching TLB entry\n");
744 if (real)
745 return new DataRealTranslationMiss;
746 else
747 #if FULL_SYSTEM
748 return new FastDataAccessMMUMiss;
749 #else
750 return new FastDataAccessMMUMiss(req->getVaddr());
751 #endif
752
753 }
754
755 if (!priv && e->pte.priv()) {
756 writeTagAccess(vaddr, context);
757 writeSfsr(vaddr, write, ct, e->pte.sideffect(), PrivViolation, asi);
758 return new DataAccessException;
759 }
760
761 if (write && !e->pte.writable()) {
762 writeTagAccess(vaddr, context);
763 writeSfsr(vaddr, write, ct, e->pte.sideffect(), OtherFault, asi);
764 return new FastDataAccessProtection;
765 }
766
767 if (e->pte.nofault() && !AsiIsNoFault(asi)) {
768 writeTagAccess(vaddr, context);
769 writeSfsr(vaddr, write, ct, e->pte.sideffect(), LoadFromNfo, asi);
770 return new DataAccessException;
771 }
772
773 if (e->pte.sideffect() && AsiIsNoFault(asi)) {
774 writeTagAccess(vaddr, context);
775 writeSfsr(vaddr, write, ct, e->pte.sideffect(), SideEffect, asi);
776 return new DataAccessException;
777 }
778
779
780 if (e->pte.sideffect() || (e->pte.paddr() >> 39) & 1)
781 req->setFlags(req->getFlags() | UNCACHEABLE);
782
783 // cache translation date for next translation
784 cacheState = tlbdata;
785 if (!cacheValid) {
786 cacheEntry[1] = NULL;
787 cacheEntry[0] = NULL;
788 }
789
790 if (cacheEntry[0] != e && cacheEntry[1] != e) {
791 cacheEntry[1] = cacheEntry[0];
792 cacheEntry[0] = e;
793 cacheAsi[1] = cacheAsi[0];
794 cacheAsi[0] = asi;
795 if (implicit)
796 cacheAsi[0] = (ASI)0;
797 }
798 cacheValid = true;
799 req->setPaddr(e->pte.paddr() & ~(e->pte.size()-1) |
800 vaddr & e->pte.size()-1);
801 DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
802 return NoFault;
803
804 /** Normal flow ends here. */
805 handleIntRegAccess:
806 if (!hpriv) {
807 writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
808 if (priv)
809 return new DataAccessException;
810 else
811 return new PrivilegedAction;
812 }
813
814 if (asi == ASI_SWVR_UDB_INTR_W && !write ||
815 asi == ASI_SWVR_UDB_INTR_R && write) {
816 writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
817 return new DataAccessException;
818 }
819
820 goto regAccessOk;
821
822
823 handleScratchRegAccess:
824 if (vaddr > 0x38 || (vaddr >= 0x20 && vaddr < 0x30 && !hpriv)) {
825 writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
826 return new DataAccessException;
827 }
828 goto regAccessOk;
829
830 handleQueueRegAccess:
831 if (!priv && !hpriv) {
832 writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
833 return new PrivilegedAction;
834 }
835 if (!hpriv && vaddr & 0xF || vaddr > 0x3f8 || vaddr < 0x3c0) {
836 writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
837 return new DataAccessException;
838 }
839 goto regAccessOk;
840
841 handleSparcErrorRegAccess:
842 if (!hpriv) {
843 writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
844 if (priv)
845 return new DataAccessException;
846 else
847 return new PrivilegedAction;
848 }
849 goto regAccessOk;
850
851
852 regAccessOk:
853 handleMmuRegAccess:
854 DPRINTF(TLB, "TLB: DTB Translating MM IPR access\n");
855 req->setMmapedIpr(true);
856 req->setPaddr(req->getVaddr());
857 return NoFault;
858 };
859
860 #if FULL_SYSTEM
861
862 Tick
863 DTB::doMmuRegRead(ThreadContext *tc, Packet *pkt)
864 {
865 Addr va = pkt->getAddr();
866 ASI asi = (ASI)pkt->req->getAsi();
867 uint64_t temp;
868
869 DPRINTF(IPR, "Memory Mapped IPR Read: asi=%#X a=%#x\n",
870 (uint32_t)pkt->req->getAsi(), pkt->getAddr());
871
872 ITB * itb = tc->getITBPtr();
873
874 switch (asi) {
875 case ASI_LSU_CONTROL_REG:
876 assert(va == 0);
877 pkt->set(tc->readMiscReg(MISCREG_MMU_LSU_CTRL));
878 break;
879 case ASI_MMU:
880 switch (va) {
881 case 0x8:
882 pkt->set(tc->readMiscReg(MISCREG_MMU_P_CONTEXT));
883 break;
884 case 0x10:
885 pkt->set(tc->readMiscReg(MISCREG_MMU_S_CONTEXT));
886 break;
887 default:
888 goto doMmuReadError;
889 }
890 break;
891 case ASI_QUEUE:
892 pkt->set(tc->readMiscReg(MISCREG_QUEUE_CPU_MONDO_HEAD +
893 (va >> 4) - 0x3c));
894 break;
895 case ASI_DMMU_CTXT_ZERO_TSB_BASE_PS0:
896 assert(va == 0);
897 pkt->set(c0_tsb_ps0);
898 break;
899 case ASI_DMMU_CTXT_ZERO_TSB_BASE_PS1:
900 assert(va == 0);
901 pkt->set(c0_tsb_ps1);
902 break;
903 case ASI_DMMU_CTXT_ZERO_CONFIG:
904 assert(va == 0);
905 pkt->set(c0_config);
906 break;
907 case ASI_IMMU_CTXT_ZERO_TSB_BASE_PS0:
908 assert(va == 0);
909 pkt->set(itb->c0_tsb_ps0);
910 break;
911 case ASI_IMMU_CTXT_ZERO_TSB_BASE_PS1:
912 assert(va == 0);
913 pkt->set(itb->c0_tsb_ps1);
914 break;
915 case ASI_IMMU_CTXT_ZERO_CONFIG:
916 assert(va == 0);
917 pkt->set(itb->c0_config);
918 break;
919 case ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS0:
920 assert(va == 0);
921 pkt->set(cx_tsb_ps0);
922 break;
923 case ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS1:
924 assert(va == 0);
925 pkt->set(cx_tsb_ps1);
926 break;
927 case ASI_DMMU_CTXT_NONZERO_CONFIG:
928 assert(va == 0);
929 pkt->set(cx_config);
930 break;
931 case ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS0:
932 assert(va == 0);
933 pkt->set(itb->cx_tsb_ps0);
934 break;
935 case ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS1:
936 assert(va == 0);
937 pkt->set(itb->cx_tsb_ps1);
938 break;
939 case ASI_IMMU_CTXT_NONZERO_CONFIG:
940 assert(va == 0);
941 pkt->set(itb->cx_config);
942 break;
943 case ASI_SPARC_ERROR_STATUS_REG:
944 pkt->set((uint64_t)0);
945 break;
946 case ASI_HYP_SCRATCHPAD:
947 case ASI_SCRATCHPAD:
948 pkt->set(tc->readMiscReg(MISCREG_SCRATCHPAD_R0 + (va >> 3)));
949 break;
950 case ASI_IMMU:
951 switch (va) {
952 case 0x0:
953 temp = itb->tag_access;
954 pkt->set(bits(temp,63,22) | bits(temp,12,0) << 48);
955 break;
956 case 0x18:
957 pkt->set(itb->sfsr);
958 break;
959 case 0x30:
960 pkt->set(itb->tag_access);
961 break;
962 default:
963 goto doMmuReadError;
964 }
965 break;
966 case ASI_DMMU:
967 switch (va) {
968 case 0x0:
969 temp = tag_access;
970 pkt->set(bits(temp,63,22) | bits(temp,12,0) << 48);
971 break;
972 case 0x18:
973 pkt->set(sfsr);
974 break;
975 case 0x20:
976 pkt->set(sfar);
977 break;
978 case 0x30:
979 pkt->set(tag_access);
980 break;
981 case 0x80:
982 pkt->set(tc->readMiscReg(MISCREG_MMU_PART_ID));
983 break;
984 default:
985 goto doMmuReadError;
986 }
987 break;
988 case ASI_DMMU_TSB_PS0_PTR_REG:
989 pkt->set(MakeTsbPtr(Ps0,
990 tag_access,
991 c0_tsb_ps0,
992 c0_config,
993 cx_tsb_ps0,
994 cx_config));
995 break;
996 case ASI_DMMU_TSB_PS1_PTR_REG:
997 pkt->set(MakeTsbPtr(Ps1,
998 tag_access,
999 c0_tsb_ps1,
1000 c0_config,
1001 cx_tsb_ps1,
1002 cx_config));
1003 break;
1004 case ASI_IMMU_TSB_PS0_PTR_REG:
1005 pkt->set(MakeTsbPtr(Ps0,
1006 itb->tag_access,
1007 itb->c0_tsb_ps0,
1008 itb->c0_config,
1009 itb->cx_tsb_ps0,
1010 itb->cx_config));
1011 break;
1012 case ASI_IMMU_TSB_PS1_PTR_REG:
1013 pkt->set(MakeTsbPtr(Ps1,
1014 itb->tag_access,
1015 itb->c0_tsb_ps1,
1016 itb->c0_config,
1017 itb->cx_tsb_ps1,
1018 itb->cx_config));
1019 break;
1020 case ASI_SWVR_INTR_RECEIVE:
1021 pkt->set(tc->getCpuPtr()->get_interrupts(IT_INT_VEC));
1022 break;
1023 case ASI_SWVR_UDB_INTR_R:
1024 temp = findMsbSet(tc->getCpuPtr()->get_interrupts(IT_INT_VEC));
1025 tc->getCpuPtr()->clear_interrupt(IT_INT_VEC, temp);
1026 pkt->set(temp);
1027 break;
1028 default:
1029 doMmuReadError:
1030 panic("need to impl DTB::doMmuRegRead() got asi=%#x, va=%#x\n",
1031 (uint32_t)asi, va);
1032 }
1033 pkt->makeAtomicResponse();
1034 return tc->getCpuPtr()->ticks(1);
1035 }
1036
1037 Tick
1038 DTB::doMmuRegWrite(ThreadContext *tc, Packet *pkt)
1039 {
1040 uint64_t data = gtoh(pkt->get<uint64_t>());
1041 Addr va = pkt->getAddr();
1042 ASI asi = (ASI)pkt->req->getAsi();
1043
1044 Addr ta_insert;
1045 Addr va_insert;
1046 Addr ct_insert;
1047 int part_insert;
1048 int entry_insert = -1;
1049 bool real_insert;
1050 bool ignore;
1051 int part_id;
1052 int ctx_id;
1053 PageTableEntry pte;
1054
1055 DPRINTF(IPR, "Memory Mapped IPR Write: asi=%#X a=%#x d=%#X\n",
1056 (uint32_t)asi, va, data);
1057
1058 ITB * itb = tc->getITBPtr();
1059
1060 switch (asi) {
1061 case ASI_LSU_CONTROL_REG:
1062 assert(va == 0);
1063 tc->setMiscReg(MISCREG_MMU_LSU_CTRL, data);
1064 break;
1065 case ASI_MMU:
1066 switch (va) {
1067 case 0x8:
1068 tc->setMiscReg(MISCREG_MMU_P_CONTEXT, data);
1069 break;
1070 case 0x10:
1071 tc->setMiscReg(MISCREG_MMU_S_CONTEXT, data);
1072 break;
1073 default:
1074 goto doMmuWriteError;
1075 }
1076 break;
1077 case ASI_QUEUE:
1078 assert(mbits(data,13,6) == data);
1079 tc->setMiscReg(MISCREG_QUEUE_CPU_MONDO_HEAD +
1080 (va >> 4) - 0x3c, data);
1081 break;
1082 case ASI_DMMU_CTXT_ZERO_TSB_BASE_PS0:
1083 assert(va == 0);
1084 c0_tsb_ps0 = data;
1085 break;
1086 case ASI_DMMU_CTXT_ZERO_TSB_BASE_PS1:
1087 assert(va == 0);
1088 c0_tsb_ps1 = data;
1089 break;
1090 case ASI_DMMU_CTXT_ZERO_CONFIG:
1091 assert(va == 0);
1092 c0_config = data;
1093 break;
1094 case ASI_IMMU_CTXT_ZERO_TSB_BASE_PS0:
1095 assert(va == 0);
1096 itb->c0_tsb_ps0 = data;
1097 break;
1098 case ASI_IMMU_CTXT_ZERO_TSB_BASE_PS1:
1099 assert(va == 0);
1100 itb->c0_tsb_ps1 = data;
1101 break;
1102 case ASI_IMMU_CTXT_ZERO_CONFIG:
1103 assert(va == 0);
1104 itb->c0_config = data;
1105 break;
1106 case ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS0:
1107 assert(va == 0);
1108 cx_tsb_ps0 = data;
1109 break;
1110 case ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS1:
1111 assert(va == 0);
1112 cx_tsb_ps1 = data;
1113 break;
1114 case ASI_DMMU_CTXT_NONZERO_CONFIG:
1115 assert(va == 0);
1116 cx_config = data;
1117 break;
1118 case ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS0:
1119 assert(va == 0);
1120 itb->cx_tsb_ps0 = data;
1121 break;
1122 case ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS1:
1123 assert(va == 0);
1124 itb->cx_tsb_ps1 = data;
1125 break;
1126 case ASI_IMMU_CTXT_NONZERO_CONFIG:
1127 assert(va == 0);
1128 itb->cx_config = data;
1129 break;
1130 case ASI_SPARC_ERROR_EN_REG:
1131 case ASI_SPARC_ERROR_STATUS_REG:
1132 warn("Ignoring write to SPARC ERROR regsiter\n");
1133 break;
1134 case ASI_HYP_SCRATCHPAD:
1135 case ASI_SCRATCHPAD:
1136 tc->setMiscReg(MISCREG_SCRATCHPAD_R0 + (va >> 3), data);
1137 break;
1138 case ASI_IMMU:
1139 switch (va) {
1140 case 0x18:
1141 itb->sfsr = data;
1142 break;
1143 case 0x30:
1144 sext<59>(bits(data, 59,0));
1145 itb->tag_access = data;
1146 break;
1147 default:
1148 goto doMmuWriteError;
1149 }
1150 break;
1151 case ASI_ITLB_DATA_ACCESS_REG:
1152 entry_insert = bits(va, 8,3);
1153 case ASI_ITLB_DATA_IN_REG:
1154 assert(entry_insert != -1 || mbits(va,10,9) == va);
1155 ta_insert = itb->tag_access;
1156 va_insert = mbits(ta_insert, 63,13);
1157 ct_insert = mbits(ta_insert, 12,0);
1158 part_insert = tc->readMiscReg(MISCREG_MMU_PART_ID);
1159 real_insert = bits(va, 9,9);
1160 pte.populate(data, bits(va,10,10) ? PageTableEntry::sun4v :
1161 PageTableEntry::sun4u);
1162 tc->getITBPtr()->insert(va_insert, part_insert, ct_insert, real_insert,
1163 pte, entry_insert);
1164 break;
1165 case ASI_DTLB_DATA_ACCESS_REG:
1166 entry_insert = bits(va, 8,3);
1167 case ASI_DTLB_DATA_IN_REG:
1168 assert(entry_insert != -1 || mbits(va,10,9) == va);
1169 ta_insert = tag_access;
1170 va_insert = mbits(ta_insert, 63,13);
1171 ct_insert = mbits(ta_insert, 12,0);
1172 part_insert = tc->readMiscReg(MISCREG_MMU_PART_ID);
1173 real_insert = bits(va, 9,9);
1174 pte.populate(data, bits(va,10,10) ? PageTableEntry::sun4v :
1175 PageTableEntry::sun4u);
1176 insert(va_insert, part_insert, ct_insert, real_insert, pte, entry_insert);
1177 break;
1178 case ASI_IMMU_DEMAP:
1179 ignore = false;
1180 ctx_id = -1;
1181 part_id = tc->readMiscReg(MISCREG_MMU_PART_ID);
1182 switch (bits(va,5,4)) {
1183 case 0:
1184 ctx_id = tc->readMiscReg(MISCREG_MMU_P_CONTEXT);
1185 break;
1186 case 1:
1187 ignore = true;
1188 break;
1189 case 3:
1190 ctx_id = 0;
1191 break;
1192 default:
1193 ignore = true;
1194 }
1195
1196 switch(bits(va,7,6)) {
1197 case 0: // demap page
1198 if (!ignore)
1199 tc->getITBPtr()->demapPage(mbits(va,63,13), part_id,
1200 bits(va,9,9), ctx_id);
1201 break;
1202 case 1: //demap context
1203 if (!ignore)
1204 tc->getITBPtr()->demapContext(part_id, ctx_id);
1205 break;
1206 case 2:
1207 tc->getITBPtr()->demapAll(part_id);
1208 break;
1209 default:
1210 panic("Invalid type for IMMU demap\n");
1211 }
1212 break;
1213 case ASI_DMMU:
1214 switch (va) {
1215 case 0x18:
1216 sfsr = data;
1217 break;
1218 case 0x30:
1219 sext<59>(bits(data, 59,0));
1220 tag_access = data;
1221 break;
1222 case 0x80:
1223 tc->setMiscReg(MISCREG_MMU_PART_ID, data);
1224 break;
1225 default:
1226 goto doMmuWriteError;
1227 }
1228 break;
1229 case ASI_DMMU_DEMAP:
1230 ignore = false;
1231 ctx_id = -1;
1232 part_id = tc->readMiscReg(MISCREG_MMU_PART_ID);
1233 switch (bits(va,5,4)) {
1234 case 0:
1235 ctx_id = tc->readMiscReg(MISCREG_MMU_P_CONTEXT);
1236 break;
1237 case 1:
1238 ctx_id = tc->readMiscReg(MISCREG_MMU_S_CONTEXT);
1239 break;
1240 case 3:
1241 ctx_id = 0;
1242 break;
1243 default:
1244 ignore = true;
1245 }
1246
1247 switch(bits(va,7,6)) {
1248 case 0: // demap page
1249 if (!ignore)
1250 demapPage(mbits(va,63,13), part_id, bits(va,9,9), ctx_id);
1251 break;
1252 case 1: //demap context
1253 if (!ignore)
1254 demapContext(part_id, ctx_id);
1255 break;
1256 case 2:
1257 demapAll(part_id);
1258 break;
1259 default:
1260 panic("Invalid type for IMMU demap\n");
1261 }
1262 break;
1263 case ASI_SWVR_INTR_RECEIVE:
1264 int msb;
1265 // clear all the interrupts that aren't set in the write
1266 while(tc->getCpuPtr()->get_interrupts(IT_INT_VEC) & data) {
1267 msb = findMsbSet(tc->getCpuPtr()->get_interrupts(IT_INT_VEC) & data);
1268 tc->getCpuPtr()->clear_interrupt(IT_INT_VEC, msb);
1269 }
1270 break;
1271 case ASI_SWVR_UDB_INTR_W:
1272 tc->getSystemPtr()->threadContexts[bits(data,12,8)]->getCpuPtr()->
1273 post_interrupt(bits(data,5,0),0);
1274 break;
1275 default:
1276 doMmuWriteError:
1277 panic("need to impl DTB::doMmuRegWrite() got asi=%#x, va=%#x d=%#x\n",
1278 (uint32_t)pkt->req->getAsi(), pkt->getAddr(), data);
1279 }
1280 pkt->makeAtomicResponse();
1281 return tc->getCpuPtr()->ticks(1);
1282 }
1283
1284 #endif
1285
1286 void
1287 DTB::GetTsbPtr(ThreadContext *tc, Addr addr, int ctx, Addr *ptrs)
1288 {
1289 uint64_t tag_access = mbits(addr,63,13) | mbits(ctx,12,0);
1290 ITB * itb = tc->getITBPtr();
1291 ptrs[0] = MakeTsbPtr(Ps0, tag_access,
1292 c0_tsb_ps0,
1293 c0_config,
1294 cx_tsb_ps0,
1295 cx_config);
1296 ptrs[1] = MakeTsbPtr(Ps1, tag_access,
1297 c0_tsb_ps1,
1298 c0_config,
1299 cx_tsb_ps1,
1300 cx_config);
1301 ptrs[2] = MakeTsbPtr(Ps0, tag_access,
1302 itb->c0_tsb_ps0,
1303 itb->c0_config,
1304 itb->cx_tsb_ps0,
1305 itb->cx_config);
1306 ptrs[3] = MakeTsbPtr(Ps1, tag_access,
1307 itb->c0_tsb_ps1,
1308 itb->c0_config,
1309 itb->cx_tsb_ps1,
1310 itb->cx_config);
1311 }
1312
1313
1314
1315
1316
1317 uint64_t
1318 DTB::MakeTsbPtr(TsbPageSize ps, uint64_t tag_access, uint64_t c0_tsb,
1319 uint64_t c0_config, uint64_t cX_tsb, uint64_t cX_config)
1320 {
1321 uint64_t tsb;
1322 uint64_t config;
1323
1324 if (bits(tag_access, 12,0) == 0) {
1325 tsb = c0_tsb;
1326 config = c0_config;
1327 } else {
1328 tsb = cX_tsb;
1329 config = cX_config;
1330 }
1331
1332 uint64_t ptr = mbits(tsb,63,13);
1333 bool split = bits(tsb,12,12);
1334 int tsb_size = bits(tsb,3,0);
1335 int page_size = (ps == Ps0) ? bits(config, 2,0) : bits(config,10,8);
1336
1337 if (ps == Ps1 && split)
1338 ptr |= ULL(1) << (13 + tsb_size);
1339 ptr |= (tag_access >> (9 + page_size * 3)) & mask(12+tsb_size, 4);
1340
1341 return ptr;
1342 }
1343
1344
1345 void
1346 TLB::serialize(std::ostream &os)
1347 {
1348 SERIALIZE_SCALAR(size);
1349 SERIALIZE_SCALAR(usedEntries);
1350 SERIALIZE_SCALAR(lastReplaced);
1351
1352 // convert the pointer based free list into an index based one
1353 int *free_list = (int*)malloc(sizeof(int) * size);
1354 int cntr = 0;
1355 std::list<TlbEntry*>::iterator i;
1356 i = freeList.begin();
1357 while (i != freeList.end()) {
1358 free_list[cntr++] = ((size_t)*i - (size_t)tlb)/ sizeof(TlbEntry);
1359 i++;
1360 }
1361 SERIALIZE_SCALAR(cntr);
1362 SERIALIZE_ARRAY(free_list, cntr);
1363
1364 SERIALIZE_SCALAR(c0_tsb_ps0);
1365 SERIALIZE_SCALAR(c0_tsb_ps1);
1366 SERIALIZE_SCALAR(c0_config);
1367 SERIALIZE_SCALAR(cx_tsb_ps0);
1368 SERIALIZE_SCALAR(cx_tsb_ps1);
1369 SERIALIZE_SCALAR(cx_config);
1370 SERIALIZE_SCALAR(sfsr);
1371 SERIALIZE_SCALAR(tag_access);
1372
1373 for (int x = 0; x < size; x++) {
1374 nameOut(os, csprintf("%s.PTE%d", name(), x));
1375 tlb[x].serialize(os);
1376 }
1377 }
1378
1379 void
1380 TLB::unserialize(Checkpoint *cp, const std::string &section)
1381 {
1382 int oldSize;
1383
1384 paramIn(cp, section, "size", oldSize);
1385 if (oldSize != size)
1386 panic("Don't support unserializing different sized TLBs\n");
1387 UNSERIALIZE_SCALAR(usedEntries);
1388 UNSERIALIZE_SCALAR(lastReplaced);
1389
1390 int cntr;
1391 UNSERIALIZE_SCALAR(cntr);
1392
1393 int *free_list = (int*)malloc(sizeof(int) * cntr);
1394 freeList.clear();
1395 UNSERIALIZE_ARRAY(free_list, cntr);
1396 for (int x = 0; x < cntr; x++)
1397 freeList.push_back(&tlb[free_list[x]]);
1398
1399 UNSERIALIZE_SCALAR(c0_tsb_ps0);
1400 UNSERIALIZE_SCALAR(c0_tsb_ps1);
1401 UNSERIALIZE_SCALAR(c0_config);
1402 UNSERIALIZE_SCALAR(cx_tsb_ps0);
1403 UNSERIALIZE_SCALAR(cx_tsb_ps1);
1404 UNSERIALIZE_SCALAR(cx_config);
1405 UNSERIALIZE_SCALAR(sfsr);
1406 UNSERIALIZE_SCALAR(tag_access);
1407
1408 lookupTable.clear();
1409 for (int x = 0; x < size; x++) {
1410 tlb[x].unserialize(cp, csprintf("%s.PTE%d", section, x));
1411 if (tlb[x].valid)
1412 lookupTable.insert(tlb[x].range, &tlb[x]);
1413
1414 }
1415 }
1416
1417 void
1418 DTB::serialize(std::ostream &os)
1419 {
1420 TLB::serialize(os);
1421 SERIALIZE_SCALAR(sfar);
1422 }
1423
1424 void
1425 DTB::unserialize(Checkpoint *cp, const std::string &section)
1426 {
1427 TLB::unserialize(cp, section);
1428 UNSERIALIZE_SCALAR(sfar);
1429 }
1430
1431 /* end namespace SparcISA */ }
1432
1433 SparcISA::ITB *
1434 SparcITBParams::create()
1435 {
1436 return new SparcISA::ITB(this);
1437 }
1438
1439 SparcISA::DTB *
1440 SparcDTBParams::create()
1441 {
1442 return new SparcISA::DTB(this);
1443 }