tlb: Don't separate the TLB classes into an instruction TLB and a data TLB
[gem5.git] / src / arch / sparc / tlb.cc
1 /*
2 * Copyright (c) 2001-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Ali Saidi
29 */
30
31 #include <cstring>
32
33 #include "arch/sparc/asi.hh"
34 #include "arch/sparc/miscregfile.hh"
35 #include "arch/sparc/tlb.hh"
36 #include "base/bitfield.hh"
37 #include "base/trace.hh"
38 #include "cpu/thread_context.hh"
39 #include "cpu/base.hh"
40 #include "mem/packet_access.hh"
41 #include "mem/request.hh"
42 #include "sim/system.hh"
43
44 /* @todo remove some of the magic constants. -- ali
45 * */
46 namespace SparcISA {
47
48 TLB::TLB(const Params *p)
49 : BaseTLB(p), size(p->size), usedEntries(0), lastReplaced(0),
50 cacheValid(false)
51 {
52 // To make this work you'll have to change the hypervisor and OS
53 if (size > 64)
54 fatal("SPARC T1 TLB registers don't support more than 64 TLB entries");
55
56 tlb = new TlbEntry[size];
57 std::memset(tlb, 0, sizeof(TlbEntry) * size);
58
59 for (int x = 0; x < size; x++)
60 freeList.push_back(&tlb[x]);
61
62 c0_tsb_ps0 = 0;
63 c0_tsb_ps1 = 0;
64 c0_config = 0;
65 cx_tsb_ps0 = 0;
66 cx_tsb_ps1 = 0;
67 cx_config = 0;
68 sfsr = 0;
69 tag_access = 0;
70 sfar = 0;
71 cacheEntry[0] = NULL;
72 cacheEntry[1] = NULL;
73 }
74
75 void
76 TLB::clearUsedBits()
77 {
78 MapIter i;
79 for (i = lookupTable.begin(); i != lookupTable.end(); i++) {
80 TlbEntry *t = i->second;
81 if (!t->pte.locked()) {
82 t->used = false;
83 usedEntries--;
84 }
85 }
86 }
87
88
89 void
90 TLB::insert(Addr va, int partition_id, int context_id, bool real,
91 const PageTableEntry& PTE, int entry)
92 {
93 MapIter i;
94 TlbEntry *new_entry = NULL;
95 // TlbRange tr;
96 int x;
97
98 cacheValid = false;
99 va &= ~(PTE.size()-1);
100 /* tr.va = va;
101 tr.size = PTE.size() - 1;
102 tr.contextId = context_id;
103 tr.partitionId = partition_id;
104 tr.real = real;
105 */
106
107 DPRINTF(TLB,
108 "TLB: Inserting Entry; va=%#x pa=%#x pid=%d cid=%d r=%d entryid=%d\n",
109 va, PTE.paddr(), partition_id, context_id, (int)real, entry);
110
111 // Demap any entry that conflicts
112 for (x = 0; x < size; x++) {
113 if (tlb[x].range.real == real &&
114 tlb[x].range.partitionId == partition_id &&
115 tlb[x].range.va < va + PTE.size() - 1 &&
116 tlb[x].range.va + tlb[x].range.size >= va &&
117 (real || tlb[x].range.contextId == context_id ))
118 {
119 if (tlb[x].valid) {
120 freeList.push_front(&tlb[x]);
121 DPRINTF(TLB, "TLB: Conflicting entry %#X , deleting it\n", x);
122
123 tlb[x].valid = false;
124 if (tlb[x].used) {
125 tlb[x].used = false;
126 usedEntries--;
127 }
128 lookupTable.erase(tlb[x].range);
129 }
130 }
131 }
132
133 /*
134 i = lookupTable.find(tr);
135 if (i != lookupTable.end()) {
136 i->second->valid = false;
137 if (i->second->used) {
138 i->second->used = false;
139 usedEntries--;
140 }
141 freeList.push_front(i->second);
142 DPRINTF(TLB, "TLB: Found conflicting entry %#X , deleting it\n",
143 i->second);
144 lookupTable.erase(i);
145 }
146 */
147
148 if (entry != -1) {
149 assert(entry < size && entry >= 0);
150 new_entry = &tlb[entry];
151 } else {
152 if (!freeList.empty()) {
153 new_entry = freeList.front();
154 } else {
155 x = lastReplaced;
156 do {
157 ++x;
158 if (x == size)
159 x = 0;
160 if (x == lastReplaced)
161 goto insertAllLocked;
162 } while (tlb[x].pte.locked());
163 lastReplaced = x;
164 new_entry = &tlb[x];
165 }
166 /*
167 for (x = 0; x < size; x++) {
168 if (!tlb[x].valid || !tlb[x].used) {
169 new_entry = &tlb[x];
170 break;
171 }
172 }*/
173 }
174
175 insertAllLocked:
176 // Update the last ently if their all locked
177 if (!new_entry) {
178 new_entry = &tlb[size-1];
179 }
180
181 freeList.remove(new_entry);
182 if (new_entry->valid && new_entry->used)
183 usedEntries--;
184 if (new_entry->valid)
185 lookupTable.erase(new_entry->range);
186
187
188 assert(PTE.valid());
189 new_entry->range.va = va;
190 new_entry->range.size = PTE.size() - 1;
191 new_entry->range.partitionId = partition_id;
192 new_entry->range.contextId = context_id;
193 new_entry->range.real = real;
194 new_entry->pte = PTE;
195 new_entry->used = true;;
196 new_entry->valid = true;
197 usedEntries++;
198
199 i = lookupTable.insert(new_entry->range, new_entry);
200 assert(i != lookupTable.end());
201
202 // If all entries have their used bit set, clear it on them all,
203 // but the one we just inserted
204 if (usedEntries == size) {
205 clearUsedBits();
206 new_entry->used = true;
207 usedEntries++;
208 }
209 }
210
211
212 TlbEntry*
213 TLB::lookup(Addr va, int partition_id, bool real, int context_id,
214 bool update_used)
215 {
216 MapIter i;
217 TlbRange tr;
218 TlbEntry *t;
219
220 DPRINTF(TLB, "TLB: Looking up entry va=%#x pid=%d cid=%d r=%d\n",
221 va, partition_id, context_id, real);
222 // Assemble full address structure
223 tr.va = va;
224 tr.size = 1;
225 tr.contextId = context_id;
226 tr.partitionId = partition_id;
227 tr.real = real;
228
229 // Try to find the entry
230 i = lookupTable.find(tr);
231 if (i == lookupTable.end()) {
232 DPRINTF(TLB, "TLB: No valid entry found\n");
233 return NULL;
234 }
235
236 // Mark the entries used bit and clear other used bits in needed
237 t = i->second;
238 DPRINTF(TLB, "TLB: Valid entry found pa: %#x size: %#x\n", t->pte.paddr(),
239 t->pte.size());
240
241 // Update the used bits only if this is a real access (not a fake
242 // one from virttophys()
243 if (!t->used && update_used) {
244 t->used = true;
245 usedEntries++;
246 if (usedEntries == size) {
247 clearUsedBits();
248 t->used = true;
249 usedEntries++;
250 }
251 }
252
253 return t;
254 }
255
256 void
257 TLB::dumpAll()
258 {
259 MapIter i;
260 for (int x = 0; x < size; x++) {
261 if (tlb[x].valid) {
262 DPRINTFN("%4d: %#2x:%#2x %c %#4x %#8x %#8x %#16x\n",
263 x, tlb[x].range.partitionId, tlb[x].range.contextId,
264 tlb[x].range.real ? 'R' : ' ', tlb[x].range.size,
265 tlb[x].range.va, tlb[x].pte.paddr(), tlb[x].pte());
266 }
267 }
268 }
269
270 void
271 TLB::demapPage(Addr va, int partition_id, bool real, int context_id)
272 {
273 TlbRange tr;
274 MapIter i;
275
276 DPRINTF(IPR, "TLB: Demapping Page va=%#x pid=%#d cid=%d r=%d\n",
277 va, partition_id, context_id, real);
278
279 cacheValid = false;
280
281 // Assemble full address structure
282 tr.va = va;
283 tr.size = 1;
284 tr.contextId = context_id;
285 tr.partitionId = partition_id;
286 tr.real = real;
287
288 // Demap any entry that conflicts
289 i = lookupTable.find(tr);
290 if (i != lookupTable.end()) {
291 DPRINTF(IPR, "TLB: Demapped page\n");
292 i->second->valid = false;
293 if (i->second->used) {
294 i->second->used = false;
295 usedEntries--;
296 }
297 freeList.push_front(i->second);
298 lookupTable.erase(i);
299 }
300 }
301
302 void
303 TLB::demapContext(int partition_id, int context_id)
304 {
305 DPRINTF(IPR, "TLB: Demapping Context pid=%#d cid=%d\n",
306 partition_id, context_id);
307 cacheValid = false;
308 for (int x = 0; x < size; x++) {
309 if (tlb[x].range.contextId == context_id &&
310 tlb[x].range.partitionId == partition_id) {
311 if (tlb[x].valid == true) {
312 freeList.push_front(&tlb[x]);
313 }
314 tlb[x].valid = false;
315 if (tlb[x].used) {
316 tlb[x].used = false;
317 usedEntries--;
318 }
319 lookupTable.erase(tlb[x].range);
320 }
321 }
322 }
323
324 void
325 TLB::demapAll(int partition_id)
326 {
327 DPRINTF(TLB, "TLB: Demapping All pid=%#d\n", partition_id);
328 cacheValid = false;
329 for (int x = 0; x < size; x++) {
330 if (tlb[x].valid && !tlb[x].pte.locked() &&
331 tlb[x].range.partitionId == partition_id) {
332 freeList.push_front(&tlb[x]);
333 tlb[x].valid = false;
334 if (tlb[x].used) {
335 tlb[x].used = false;
336 usedEntries--;
337 }
338 lookupTable.erase(tlb[x].range);
339 }
340 }
341 }
342
343 void
344 TLB::invalidateAll()
345 {
346 cacheValid = false;
347 lookupTable.clear();
348
349 for (int x = 0; x < size; x++) {
350 if (tlb[x].valid == true)
351 freeList.push_back(&tlb[x]);
352 tlb[x].valid = false;
353 tlb[x].used = false;
354 }
355 usedEntries = 0;
356 }
357
358 uint64_t
359 TLB::TteRead(int entry)
360 {
361 if (entry >= size)
362 panic("entry: %d\n", entry);
363
364 assert(entry < size);
365 if (tlb[entry].valid)
366 return tlb[entry].pte();
367 else
368 return (uint64_t)-1ll;
369 }
370
371 uint64_t
372 TLB::TagRead(int entry)
373 {
374 assert(entry < size);
375 uint64_t tag;
376 if (!tlb[entry].valid)
377 return (uint64_t)-1ll;
378
379 tag = tlb[entry].range.contextId;
380 tag |= tlb[entry].range.va;
381 tag |= (uint64_t)tlb[entry].range.partitionId << 61;
382 tag |= tlb[entry].range.real ? ULL(1) << 60 : 0;
383 tag |= (uint64_t)~tlb[entry].pte._size() << 56;
384 return tag;
385 }
386
387 bool
388 TLB::validVirtualAddress(Addr va, bool am)
389 {
390 if (am)
391 return true;
392 if (va >= StartVAddrHole && va <= EndVAddrHole)
393 return false;
394 return true;
395 }
396
397 void
398 TLB::writeSfsr(bool write, ContextType ct, bool se, FaultTypes ft, int asi)
399 {
400 if (sfsr & 0x1)
401 sfsr = 0x3;
402 else
403 sfsr = 1;
404
405 if (write)
406 sfsr |= 1 << 2;
407 sfsr |= ct << 4;
408 if (se)
409 sfsr |= 1 << 6;
410 sfsr |= ft << 7;
411 sfsr |= asi << 16;
412 }
413
414 void
415 TLB::writeTagAccess(Addr va, int context)
416 {
417 DPRINTF(TLB, "TLB: Writing Tag Access: va: %#X ctx: %#X value: %#X\n",
418 va, context, mbits(va, 63,13) | mbits(context,12,0));
419
420 tag_access = mbits(va, 63,13) | mbits(context,12,0);
421 }
422
423 void
424 TLB::writeSfsr(Addr a, bool write, ContextType ct,
425 bool se, FaultTypes ft, int asi)
426 {
427 DPRINTF(TLB, "TLB: Fault: A=%#x w=%d ct=%d ft=%d asi=%d\n",
428 a, (int)write, ct, ft, asi);
429 TLB::writeSfsr(write, ct, se, ft, asi);
430 sfar = a;
431 }
432
433 Fault
434 TLB::translateInst(RequestPtr req, ThreadContext *tc)
435 {
436 uint64_t tlbdata = tc->readMiscRegNoEffect(MISCREG_TLB_DATA);
437
438 Addr vaddr = req->getVaddr();
439 TlbEntry *e;
440
441 assert(req->getAsi() == ASI_IMPLICIT);
442
443 DPRINTF(TLB, "TLB: ITB Request to translate va=%#x size=%d\n",
444 vaddr, req->getSize());
445
446 // Be fast if we can!
447 if (cacheValid && cacheState == tlbdata) {
448 if (cacheEntry[0]) {
449 if (cacheEntry[0]->range.va < vaddr + sizeof(MachInst) &&
450 cacheEntry[0]->range.va + cacheEntry[0]->range.size >= vaddr) {
451 req->setPaddr(cacheEntry[0]->pte.translate(vaddr));
452 return NoFault;
453 }
454 } else {
455 req->setPaddr(vaddr & PAddrImplMask);
456 return NoFault;
457 }
458 }
459
460 bool hpriv = bits(tlbdata,0,0);
461 bool red = bits(tlbdata,1,1);
462 bool priv = bits(tlbdata,2,2);
463 bool addr_mask = bits(tlbdata,3,3);
464 bool lsu_im = bits(tlbdata,4,4);
465
466 int part_id = bits(tlbdata,15,8);
467 int tl = bits(tlbdata,18,16);
468 int pri_context = bits(tlbdata,47,32);
469 int context;
470 ContextType ct;
471 int asi;
472 bool real = false;
473
474 DPRINTF(TLB, "TLB: priv:%d hpriv:%d red:%d lsuim:%d part_id: %#X\n",
475 priv, hpriv, red, lsu_im, part_id);
476
477 if (tl > 0) {
478 asi = ASI_N;
479 ct = Nucleus;
480 context = 0;
481 } else {
482 asi = ASI_P;
483 ct = Primary;
484 context = pri_context;
485 }
486
487 if ( hpriv || red ) {
488 cacheValid = true;
489 cacheState = tlbdata;
490 cacheEntry[0] = NULL;
491 req->setPaddr(vaddr & PAddrImplMask);
492 return NoFault;
493 }
494
495 // If the access is unaligned trap
496 if (vaddr & 0x3) {
497 writeSfsr(false, ct, false, OtherFault, asi);
498 return new MemAddressNotAligned;
499 }
500
501 if (addr_mask)
502 vaddr = vaddr & VAddrAMask;
503
504 if (!validVirtualAddress(vaddr, addr_mask)) {
505 writeSfsr(false, ct, false, VaOutOfRange, asi);
506 return new InstructionAccessException;
507 }
508
509 if (!lsu_im) {
510 e = lookup(vaddr, part_id, true);
511 real = true;
512 context = 0;
513 } else {
514 e = lookup(vaddr, part_id, false, context);
515 }
516
517 if (e == NULL || !e->valid) {
518 writeTagAccess(vaddr, context);
519 if (real)
520 return new InstructionRealTranslationMiss;
521 else
522 #if FULL_SYSTEM
523 return new FastInstructionAccessMMUMiss;
524 #else
525 return new FastInstructionAccessMMUMiss(req->getVaddr());
526 #endif
527 }
528
529 // were not priviledged accesing priv page
530 if (!priv && e->pte.priv()) {
531 writeTagAccess(vaddr, context);
532 writeSfsr(false, ct, false, PrivViolation, asi);
533 return new InstructionAccessException;
534 }
535
536 // cache translation date for next translation
537 cacheValid = true;
538 cacheState = tlbdata;
539 cacheEntry[0] = e;
540
541 req->setPaddr(e->pte.translate(vaddr));
542 DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
543 return NoFault;
544 }
545
546 Fault
547 TLB::translateData(RequestPtr req, ThreadContext *tc, bool write)
548 {
549 /*
550 * @todo this could really use some profiling and fixing to make
551 * it faster!
552 */
553 uint64_t tlbdata = tc->readMiscRegNoEffect(MISCREG_TLB_DATA);
554 Addr vaddr = req->getVaddr();
555 Addr size = req->getSize();
556 ASI asi;
557 asi = (ASI)req->getAsi();
558 bool implicit = false;
559 bool hpriv = bits(tlbdata,0,0);
560 bool unaligned = vaddr & (size - 1);
561
562 DPRINTF(TLB, "TLB: DTB Request to translate va=%#x size=%d asi=%#x\n",
563 vaddr, size, asi);
564
565 if (lookupTable.size() != 64 - freeList.size())
566 panic("Lookup table size: %d tlb size: %d\n", lookupTable.size(),
567 freeList.size());
568 if (asi == ASI_IMPLICIT)
569 implicit = true;
570
571 // Only use the fast path here if there doesn't need to be an unaligned
572 // trap later
573 if (!unaligned) {
574 if (hpriv && implicit) {
575 req->setPaddr(vaddr & PAddrImplMask);
576 return NoFault;
577 }
578
579 // Be fast if we can!
580 if (cacheValid && cacheState == tlbdata) {
581
582
583
584 if (cacheEntry[0]) {
585 TlbEntry *ce = cacheEntry[0];
586 Addr ce_va = ce->range.va;
587 if (cacheAsi[0] == asi &&
588 ce_va < vaddr + size && ce_va + ce->range.size > vaddr &&
589 (!write || ce->pte.writable())) {
590 req->setPaddr(ce->pte.translate(vaddr));
591 if (ce->pte.sideffect() || (ce->pte.paddr() >> 39) & 1)
592 req->setFlags(Request::UNCACHEABLE);
593 DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
594 return NoFault;
595 } // if matched
596 } // if cache entry valid
597 if (cacheEntry[1]) {
598 TlbEntry *ce = cacheEntry[1];
599 Addr ce_va = ce->range.va;
600 if (cacheAsi[1] == asi &&
601 ce_va < vaddr + size && ce_va + ce->range.size > vaddr &&
602 (!write || ce->pte.writable())) {
603 req->setPaddr(ce->pte.translate(vaddr));
604 if (ce->pte.sideffect() || (ce->pte.paddr() >> 39) & 1)
605 req->setFlags(Request::UNCACHEABLE);
606 DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
607 return NoFault;
608 } // if matched
609 } // if cache entry valid
610 }
611 }
612
613 bool red = bits(tlbdata,1,1);
614 bool priv = bits(tlbdata,2,2);
615 bool addr_mask = bits(tlbdata,3,3);
616 bool lsu_dm = bits(tlbdata,5,5);
617
618 int part_id = bits(tlbdata,15,8);
619 int tl = bits(tlbdata,18,16);
620 int pri_context = bits(tlbdata,47,32);
621 int sec_context = bits(tlbdata,63,48);
622
623 bool real = false;
624 ContextType ct = Primary;
625 int context = 0;
626
627 TlbEntry *e;
628
629 DPRINTF(TLB, "TLB: priv:%d hpriv:%d red:%d lsudm:%d part_id: %#X\n",
630 priv, hpriv, red, lsu_dm, part_id);
631
632 if (implicit) {
633 if (tl > 0) {
634 asi = ASI_N;
635 ct = Nucleus;
636 context = 0;
637 } else {
638 asi = ASI_P;
639 ct = Primary;
640 context = pri_context;
641 }
642 } else {
643 // We need to check for priv level/asi priv
644 if (!priv && !hpriv && !AsiIsUnPriv(asi)) {
645 // It appears that context should be Nucleus in these cases?
646 writeSfsr(vaddr, write, Nucleus, false, IllegalAsi, asi);
647 return new PrivilegedAction;
648 }
649
650 if (!hpriv && AsiIsHPriv(asi)) {
651 writeSfsr(vaddr, write, Nucleus, false, IllegalAsi, asi);
652 return new DataAccessException;
653 }
654
655 if (AsiIsPrimary(asi)) {
656 context = pri_context;
657 ct = Primary;
658 } else if (AsiIsSecondary(asi)) {
659 context = sec_context;
660 ct = Secondary;
661 } else if (AsiIsNucleus(asi)) {
662 ct = Nucleus;
663 context = 0;
664 } else { // ????
665 ct = Primary;
666 context = pri_context;
667 }
668 }
669
670 if (!implicit && asi != ASI_P && asi != ASI_S) {
671 if (AsiIsLittle(asi))
672 panic("Little Endian ASIs not supported\n");
673
674 //XXX It's unclear from looking at the documentation how a no fault
675 //load differs from a regular one, other than what happens concerning
676 //nfo and e bits in the TTE
677 // if (AsiIsNoFault(asi))
678 // panic("No Fault ASIs not supported\n");
679
680 if (AsiIsPartialStore(asi))
681 panic("Partial Store ASIs not supported\n");
682
683 if (AsiIsCmt(asi))
684 panic("Cmt ASI registers not implmented\n");
685
686 if (AsiIsInterrupt(asi))
687 goto handleIntRegAccess;
688 if (AsiIsMmu(asi))
689 goto handleMmuRegAccess;
690 if (AsiIsScratchPad(asi))
691 goto handleScratchRegAccess;
692 if (AsiIsQueue(asi))
693 goto handleQueueRegAccess;
694 if (AsiIsSparcError(asi))
695 goto handleSparcErrorRegAccess;
696
697 if (!AsiIsReal(asi) && !AsiIsNucleus(asi) && !AsiIsAsIfUser(asi) &&
698 !AsiIsTwin(asi) && !AsiIsBlock(asi) && !AsiIsNoFault(asi))
699 panic("Accessing ASI %#X. Should we?\n", asi);
700 }
701
702 // If the asi is unaligned trap
703 if (unaligned) {
704 writeSfsr(vaddr, false, ct, false, OtherFault, asi);
705 return new MemAddressNotAligned;
706 }
707
708 if (addr_mask)
709 vaddr = vaddr & VAddrAMask;
710
711 if (!validVirtualAddress(vaddr, addr_mask)) {
712 writeSfsr(vaddr, false, ct, true, VaOutOfRange, asi);
713 return new DataAccessException;
714 }
715
716 if ((!lsu_dm && !hpriv && !red) || AsiIsReal(asi)) {
717 real = true;
718 context = 0;
719 }
720
721 if (hpriv && (implicit || (!AsiIsAsIfUser(asi) && !AsiIsReal(asi)))) {
722 req->setPaddr(vaddr & PAddrImplMask);
723 return NoFault;
724 }
725
726 e = lookup(vaddr, part_id, real, context);
727
728 if (e == NULL || !e->valid) {
729 writeTagAccess(vaddr, context);
730 DPRINTF(TLB, "TLB: DTB Failed to find matching TLB entry\n");
731 if (real)
732 return new DataRealTranslationMiss;
733 else
734 #if FULL_SYSTEM
735 return new FastDataAccessMMUMiss;
736 #else
737 return new FastDataAccessMMUMiss(req->getVaddr());
738 #endif
739
740 }
741
742 if (!priv && e->pte.priv()) {
743 writeTagAccess(vaddr, context);
744 writeSfsr(vaddr, write, ct, e->pte.sideffect(), PrivViolation, asi);
745 return new DataAccessException;
746 }
747
748 if (write && !e->pte.writable()) {
749 writeTagAccess(vaddr, context);
750 writeSfsr(vaddr, write, ct, e->pte.sideffect(), OtherFault, asi);
751 return new FastDataAccessProtection;
752 }
753
754 if (e->pte.nofault() && !AsiIsNoFault(asi)) {
755 writeTagAccess(vaddr, context);
756 writeSfsr(vaddr, write, ct, e->pte.sideffect(), LoadFromNfo, asi);
757 return new DataAccessException;
758 }
759
760 if (e->pte.sideffect() && AsiIsNoFault(asi)) {
761 writeTagAccess(vaddr, context);
762 writeSfsr(vaddr, write, ct, e->pte.sideffect(), SideEffect, asi);
763 return new DataAccessException;
764 }
765
766 if (e->pte.sideffect() || (e->pte.paddr() >> 39) & 1)
767 req->setFlags(Request::UNCACHEABLE);
768
769 // cache translation date for next translation
770 cacheState = tlbdata;
771 if (!cacheValid) {
772 cacheEntry[1] = NULL;
773 cacheEntry[0] = NULL;
774 }
775
776 if (cacheEntry[0] != e && cacheEntry[1] != e) {
777 cacheEntry[1] = cacheEntry[0];
778 cacheEntry[0] = e;
779 cacheAsi[1] = cacheAsi[0];
780 cacheAsi[0] = asi;
781 if (implicit)
782 cacheAsi[0] = (ASI)0;
783 }
784 cacheValid = true;
785 req->setPaddr(e->pte.translate(vaddr));
786 DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
787 return NoFault;
788
789 /** Normal flow ends here. */
790 handleIntRegAccess:
791 if (!hpriv) {
792 writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
793 if (priv)
794 return new DataAccessException;
795 else
796 return new PrivilegedAction;
797 }
798
799 if ((asi == ASI_SWVR_UDB_INTR_W && !write) ||
800 (asi == ASI_SWVR_UDB_INTR_R && write)) {
801 writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
802 return new DataAccessException;
803 }
804
805 goto regAccessOk;
806
807
808 handleScratchRegAccess:
809 if (vaddr > 0x38 || (vaddr >= 0x20 && vaddr < 0x30 && !hpriv)) {
810 writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
811 return new DataAccessException;
812 }
813 goto regAccessOk;
814
815 handleQueueRegAccess:
816 if (!priv && !hpriv) {
817 writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
818 return new PrivilegedAction;
819 }
820 if ((!hpriv && vaddr & 0xF) || vaddr > 0x3f8 || vaddr < 0x3c0) {
821 writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
822 return new DataAccessException;
823 }
824 goto regAccessOk;
825
826 handleSparcErrorRegAccess:
827 if (!hpriv) {
828 writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
829 if (priv)
830 return new DataAccessException;
831 else
832 return new PrivilegedAction;
833 }
834 goto regAccessOk;
835
836
837 regAccessOk:
838 handleMmuRegAccess:
839 DPRINTF(TLB, "TLB: DTB Translating MM IPR access\n");
840 req->setMmapedIpr(true);
841 req->setPaddr(req->getVaddr());
842 return NoFault;
843 };
844
845 Fault
846 TLB::translateAtomic(RequestPtr req, ThreadContext *tc,
847 bool write, bool execute)
848 {
849 if (execute)
850 return translateInst(req, tc);
851 else
852 return translateData(req, tc, write);
853 }
854
855 void
856 TLB::translateTiming(RequestPtr req, ThreadContext *tc,
857 Translation *translation, bool write, bool execute)
858 {
859 assert(translation);
860 translation->finish(translateAtomic(req, tc, write, execute),
861 req, tc, write, execute);
862 }
863
864 #if FULL_SYSTEM
865
866 Tick
867 TLB::doMmuRegRead(ThreadContext *tc, Packet *pkt)
868 {
869 Addr va = pkt->getAddr();
870 ASI asi = (ASI)pkt->req->getAsi();
871 uint64_t temp;
872
873 DPRINTF(IPR, "Memory Mapped IPR Read: asi=%#X a=%#x\n",
874 (uint32_t)pkt->req->getAsi(), pkt->getAddr());
875
876 TLB *itb = tc->getITBPtr();
877
878 switch (asi) {
879 case ASI_LSU_CONTROL_REG:
880 assert(va == 0);
881 pkt->set(tc->readMiscReg(MISCREG_MMU_LSU_CTRL));
882 break;
883 case ASI_MMU:
884 switch (va) {
885 case 0x8:
886 pkt->set(tc->readMiscReg(MISCREG_MMU_P_CONTEXT));
887 break;
888 case 0x10:
889 pkt->set(tc->readMiscReg(MISCREG_MMU_S_CONTEXT));
890 break;
891 default:
892 goto doMmuReadError;
893 }
894 break;
895 case ASI_QUEUE:
896 pkt->set(tc->readMiscReg(MISCREG_QUEUE_CPU_MONDO_HEAD +
897 (va >> 4) - 0x3c));
898 break;
899 case ASI_DMMU_CTXT_ZERO_TSB_BASE_PS0:
900 assert(va == 0);
901 pkt->set(c0_tsb_ps0);
902 break;
903 case ASI_DMMU_CTXT_ZERO_TSB_BASE_PS1:
904 assert(va == 0);
905 pkt->set(c0_tsb_ps1);
906 break;
907 case ASI_DMMU_CTXT_ZERO_CONFIG:
908 assert(va == 0);
909 pkt->set(c0_config);
910 break;
911 case ASI_IMMU_CTXT_ZERO_TSB_BASE_PS0:
912 assert(va == 0);
913 pkt->set(itb->c0_tsb_ps0);
914 break;
915 case ASI_IMMU_CTXT_ZERO_TSB_BASE_PS1:
916 assert(va == 0);
917 pkt->set(itb->c0_tsb_ps1);
918 break;
919 case ASI_IMMU_CTXT_ZERO_CONFIG:
920 assert(va == 0);
921 pkt->set(itb->c0_config);
922 break;
923 case ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS0:
924 assert(va == 0);
925 pkt->set(cx_tsb_ps0);
926 break;
927 case ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS1:
928 assert(va == 0);
929 pkt->set(cx_tsb_ps1);
930 break;
931 case ASI_DMMU_CTXT_NONZERO_CONFIG:
932 assert(va == 0);
933 pkt->set(cx_config);
934 break;
935 case ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS0:
936 assert(va == 0);
937 pkt->set(itb->cx_tsb_ps0);
938 break;
939 case ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS1:
940 assert(va == 0);
941 pkt->set(itb->cx_tsb_ps1);
942 break;
943 case ASI_IMMU_CTXT_NONZERO_CONFIG:
944 assert(va == 0);
945 pkt->set(itb->cx_config);
946 break;
947 case ASI_SPARC_ERROR_STATUS_REG:
948 pkt->set((uint64_t)0);
949 break;
950 case ASI_HYP_SCRATCHPAD:
951 case ASI_SCRATCHPAD:
952 pkt->set(tc->readMiscReg(MISCREG_SCRATCHPAD_R0 + (va >> 3)));
953 break;
954 case ASI_IMMU:
955 switch (va) {
956 case 0x0:
957 temp = itb->tag_access;
958 pkt->set(bits(temp,63,22) | bits(temp,12,0) << 48);
959 break;
960 case 0x18:
961 pkt->set(itb->sfsr);
962 break;
963 case 0x30:
964 pkt->set(itb->tag_access);
965 break;
966 default:
967 goto doMmuReadError;
968 }
969 break;
970 case ASI_DMMU:
971 switch (va) {
972 case 0x0:
973 temp = tag_access;
974 pkt->set(bits(temp,63,22) | bits(temp,12,0) << 48);
975 break;
976 case 0x18:
977 pkt->set(sfsr);
978 break;
979 case 0x20:
980 pkt->set(sfar);
981 break;
982 case 0x30:
983 pkt->set(tag_access);
984 break;
985 case 0x80:
986 pkt->set(tc->readMiscReg(MISCREG_MMU_PART_ID));
987 break;
988 default:
989 goto doMmuReadError;
990 }
991 break;
992 case ASI_DMMU_TSB_PS0_PTR_REG:
993 pkt->set(MakeTsbPtr(Ps0,
994 tag_access,
995 c0_tsb_ps0,
996 c0_config,
997 cx_tsb_ps0,
998 cx_config));
999 break;
1000 case ASI_DMMU_TSB_PS1_PTR_REG:
1001 pkt->set(MakeTsbPtr(Ps1,
1002 tag_access,
1003 c0_tsb_ps1,
1004 c0_config,
1005 cx_tsb_ps1,
1006 cx_config));
1007 break;
1008 case ASI_IMMU_TSB_PS0_PTR_REG:
1009 pkt->set(MakeTsbPtr(Ps0,
1010 itb->tag_access,
1011 itb->c0_tsb_ps0,
1012 itb->c0_config,
1013 itb->cx_tsb_ps0,
1014 itb->cx_config));
1015 break;
1016 case ASI_IMMU_TSB_PS1_PTR_REG:
1017 pkt->set(MakeTsbPtr(Ps1,
1018 itb->tag_access,
1019 itb->c0_tsb_ps1,
1020 itb->c0_config,
1021 itb->cx_tsb_ps1,
1022 itb->cx_config));
1023 break;
1024 case ASI_SWVR_INTR_RECEIVE:
1025 {
1026 SparcISA::Interrupts * interrupts =
1027 dynamic_cast<SparcISA::Interrupts *>(
1028 tc->getCpuPtr()->getInterruptController());
1029 pkt->set(interrupts->get_vec(IT_INT_VEC));
1030 }
1031 break;
1032 case ASI_SWVR_UDB_INTR_R:
1033 {
1034 SparcISA::Interrupts * interrupts =
1035 dynamic_cast<SparcISA::Interrupts *>(
1036 tc->getCpuPtr()->getInterruptController());
1037 temp = findMsbSet(interrupts->get_vec(IT_INT_VEC));
1038 tc->getCpuPtr()->clearInterrupt(IT_INT_VEC, temp);
1039 pkt->set(temp);
1040 }
1041 break;
1042 default:
1043 doMmuReadError:
1044 panic("need to impl DTB::doMmuRegRead() got asi=%#x, va=%#x\n",
1045 (uint32_t)asi, va);
1046 }
1047 pkt->makeAtomicResponse();
1048 return tc->getCpuPtr()->ticks(1);
1049 }
1050
1051 Tick
1052 TLB::doMmuRegWrite(ThreadContext *tc, Packet *pkt)
1053 {
1054 uint64_t data = gtoh(pkt->get<uint64_t>());
1055 Addr va = pkt->getAddr();
1056 ASI asi = (ASI)pkt->req->getAsi();
1057
1058 Addr ta_insert;
1059 Addr va_insert;
1060 Addr ct_insert;
1061 int part_insert;
1062 int entry_insert = -1;
1063 bool real_insert;
1064 bool ignore;
1065 int part_id;
1066 int ctx_id;
1067 PageTableEntry pte;
1068
1069 DPRINTF(IPR, "Memory Mapped IPR Write: asi=%#X a=%#x d=%#X\n",
1070 (uint32_t)asi, va, data);
1071
1072 TLB *itb = tc->getITBPtr();
1073
1074 switch (asi) {
1075 case ASI_LSU_CONTROL_REG:
1076 assert(va == 0);
1077 tc->setMiscReg(MISCREG_MMU_LSU_CTRL, data);
1078 break;
1079 case ASI_MMU:
1080 switch (va) {
1081 case 0x8:
1082 tc->setMiscReg(MISCREG_MMU_P_CONTEXT, data);
1083 break;
1084 case 0x10:
1085 tc->setMiscReg(MISCREG_MMU_S_CONTEXT, data);
1086 break;
1087 default:
1088 goto doMmuWriteError;
1089 }
1090 break;
1091 case ASI_QUEUE:
1092 assert(mbits(data,13,6) == data);
1093 tc->setMiscReg(MISCREG_QUEUE_CPU_MONDO_HEAD +
1094 (va >> 4) - 0x3c, data);
1095 break;
1096 case ASI_DMMU_CTXT_ZERO_TSB_BASE_PS0:
1097 assert(va == 0);
1098 c0_tsb_ps0 = data;
1099 break;
1100 case ASI_DMMU_CTXT_ZERO_TSB_BASE_PS1:
1101 assert(va == 0);
1102 c0_tsb_ps1 = data;
1103 break;
1104 case ASI_DMMU_CTXT_ZERO_CONFIG:
1105 assert(va == 0);
1106 c0_config = data;
1107 break;
1108 case ASI_IMMU_CTXT_ZERO_TSB_BASE_PS0:
1109 assert(va == 0);
1110 itb->c0_tsb_ps0 = data;
1111 break;
1112 case ASI_IMMU_CTXT_ZERO_TSB_BASE_PS1:
1113 assert(va == 0);
1114 itb->c0_tsb_ps1 = data;
1115 break;
1116 case ASI_IMMU_CTXT_ZERO_CONFIG:
1117 assert(va == 0);
1118 itb->c0_config = data;
1119 break;
1120 case ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS0:
1121 assert(va == 0);
1122 cx_tsb_ps0 = data;
1123 break;
1124 case ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS1:
1125 assert(va == 0);
1126 cx_tsb_ps1 = data;
1127 break;
1128 case ASI_DMMU_CTXT_NONZERO_CONFIG:
1129 assert(va == 0);
1130 cx_config = data;
1131 break;
1132 case ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS0:
1133 assert(va == 0);
1134 itb->cx_tsb_ps0 = data;
1135 break;
1136 case ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS1:
1137 assert(va == 0);
1138 itb->cx_tsb_ps1 = data;
1139 break;
1140 case ASI_IMMU_CTXT_NONZERO_CONFIG:
1141 assert(va == 0);
1142 itb->cx_config = data;
1143 break;
1144 case ASI_SPARC_ERROR_EN_REG:
1145 case ASI_SPARC_ERROR_STATUS_REG:
1146 inform("Ignoring write to SPARC ERROR regsiter\n");
1147 break;
1148 case ASI_HYP_SCRATCHPAD:
1149 case ASI_SCRATCHPAD:
1150 tc->setMiscReg(MISCREG_SCRATCHPAD_R0 + (va >> 3), data);
1151 break;
1152 case ASI_IMMU:
1153 switch (va) {
1154 case 0x18:
1155 itb->sfsr = data;
1156 break;
1157 case 0x30:
1158 sext<59>(bits(data, 59,0));
1159 itb->tag_access = data;
1160 break;
1161 default:
1162 goto doMmuWriteError;
1163 }
1164 break;
1165 case ASI_ITLB_DATA_ACCESS_REG:
1166 entry_insert = bits(va, 8,3);
1167 case ASI_ITLB_DATA_IN_REG:
1168 assert(entry_insert != -1 || mbits(va,10,9) == va);
1169 ta_insert = itb->tag_access;
1170 va_insert = mbits(ta_insert, 63,13);
1171 ct_insert = mbits(ta_insert, 12,0);
1172 part_insert = tc->readMiscReg(MISCREG_MMU_PART_ID);
1173 real_insert = bits(va, 9,9);
1174 pte.populate(data, bits(va,10,10) ? PageTableEntry::sun4v :
1175 PageTableEntry::sun4u);
1176 tc->getITBPtr()->insert(va_insert, part_insert, ct_insert, real_insert,
1177 pte, entry_insert);
1178 break;
1179 case ASI_DTLB_DATA_ACCESS_REG:
1180 entry_insert = bits(va, 8,3);
1181 case ASI_DTLB_DATA_IN_REG:
1182 assert(entry_insert != -1 || mbits(va,10,9) == va);
1183 ta_insert = tag_access;
1184 va_insert = mbits(ta_insert, 63,13);
1185 ct_insert = mbits(ta_insert, 12,0);
1186 part_insert = tc->readMiscReg(MISCREG_MMU_PART_ID);
1187 real_insert = bits(va, 9,9);
1188 pte.populate(data, bits(va,10,10) ? PageTableEntry::sun4v :
1189 PageTableEntry::sun4u);
1190 insert(va_insert, part_insert, ct_insert, real_insert, pte,
1191 entry_insert);
1192 break;
1193 case ASI_IMMU_DEMAP:
1194 ignore = false;
1195 ctx_id = -1;
1196 part_id = tc->readMiscReg(MISCREG_MMU_PART_ID);
1197 switch (bits(va,5,4)) {
1198 case 0:
1199 ctx_id = tc->readMiscReg(MISCREG_MMU_P_CONTEXT);
1200 break;
1201 case 1:
1202 ignore = true;
1203 break;
1204 case 3:
1205 ctx_id = 0;
1206 break;
1207 default:
1208 ignore = true;
1209 }
1210
1211 switch(bits(va,7,6)) {
1212 case 0: // demap page
1213 if (!ignore)
1214 tc->getITBPtr()->demapPage(mbits(va,63,13), part_id,
1215 bits(va,9,9), ctx_id);
1216 break;
1217 case 1: //demap context
1218 if (!ignore)
1219 tc->getITBPtr()->demapContext(part_id, ctx_id);
1220 break;
1221 case 2:
1222 tc->getITBPtr()->demapAll(part_id);
1223 break;
1224 default:
1225 panic("Invalid type for IMMU demap\n");
1226 }
1227 break;
1228 case ASI_DMMU:
1229 switch (va) {
1230 case 0x18:
1231 sfsr = data;
1232 break;
1233 case 0x30:
1234 sext<59>(bits(data, 59,0));
1235 tag_access = data;
1236 break;
1237 case 0x80:
1238 tc->setMiscReg(MISCREG_MMU_PART_ID, data);
1239 break;
1240 default:
1241 goto doMmuWriteError;
1242 }
1243 break;
1244 case ASI_DMMU_DEMAP:
1245 ignore = false;
1246 ctx_id = -1;
1247 part_id = tc->readMiscReg(MISCREG_MMU_PART_ID);
1248 switch (bits(va,5,4)) {
1249 case 0:
1250 ctx_id = tc->readMiscReg(MISCREG_MMU_P_CONTEXT);
1251 break;
1252 case 1:
1253 ctx_id = tc->readMiscReg(MISCREG_MMU_S_CONTEXT);
1254 break;
1255 case 3:
1256 ctx_id = 0;
1257 break;
1258 default:
1259 ignore = true;
1260 }
1261
1262 switch(bits(va,7,6)) {
1263 case 0: // demap page
1264 if (!ignore)
1265 demapPage(mbits(va,63,13), part_id, bits(va,9,9), ctx_id);
1266 break;
1267 case 1: //demap context
1268 if (!ignore)
1269 demapContext(part_id, ctx_id);
1270 break;
1271 case 2:
1272 demapAll(part_id);
1273 break;
1274 default:
1275 panic("Invalid type for IMMU demap\n");
1276 }
1277 break;
1278 case ASI_SWVR_INTR_RECEIVE:
1279 {
1280 int msb;
1281 // clear all the interrupts that aren't set in the write
1282 SparcISA::Interrupts * interrupts =
1283 dynamic_cast<SparcISA::Interrupts *>(
1284 tc->getCpuPtr()->getInterruptController());
1285 while (interrupts->get_vec(IT_INT_VEC) & data) {
1286 msb = findMsbSet(interrupts->get_vec(IT_INT_VEC) & data);
1287 tc->getCpuPtr()->clearInterrupt(IT_INT_VEC, msb);
1288 }
1289 }
1290 break;
1291 case ASI_SWVR_UDB_INTR_W:
1292 tc->getSystemPtr()->threadContexts[bits(data,12,8)]->getCpuPtr()->
1293 postInterrupt(bits(data, 5, 0), 0);
1294 break;
1295 default:
1296 doMmuWriteError:
1297 panic("need to impl DTB::doMmuRegWrite() got asi=%#x, va=%#x d=%#x\n",
1298 (uint32_t)pkt->req->getAsi(), pkt->getAddr(), data);
1299 }
1300 pkt->makeAtomicResponse();
1301 return tc->getCpuPtr()->ticks(1);
1302 }
1303
1304 #endif
1305
1306 void
1307 TLB::GetTsbPtr(ThreadContext *tc, Addr addr, int ctx, Addr *ptrs)
1308 {
1309 uint64_t tag_access = mbits(addr,63,13) | mbits(ctx,12,0);
1310 TLB * itb = tc->getITBPtr();
1311 ptrs[0] = MakeTsbPtr(Ps0, tag_access,
1312 c0_tsb_ps0,
1313 c0_config,
1314 cx_tsb_ps0,
1315 cx_config);
1316 ptrs[1] = MakeTsbPtr(Ps1, tag_access,
1317 c0_tsb_ps1,
1318 c0_config,
1319 cx_tsb_ps1,
1320 cx_config);
1321 ptrs[2] = MakeTsbPtr(Ps0, tag_access,
1322 itb->c0_tsb_ps0,
1323 itb->c0_config,
1324 itb->cx_tsb_ps0,
1325 itb->cx_config);
1326 ptrs[3] = MakeTsbPtr(Ps1, tag_access,
1327 itb->c0_tsb_ps1,
1328 itb->c0_config,
1329 itb->cx_tsb_ps1,
1330 itb->cx_config);
1331 }
1332
1333 uint64_t
1334 TLB::MakeTsbPtr(TsbPageSize ps, uint64_t tag_access, uint64_t c0_tsb,
1335 uint64_t c0_config, uint64_t cX_tsb, uint64_t cX_config)
1336 {
1337 uint64_t tsb;
1338 uint64_t config;
1339
1340 if (bits(tag_access, 12,0) == 0) {
1341 tsb = c0_tsb;
1342 config = c0_config;
1343 } else {
1344 tsb = cX_tsb;
1345 config = cX_config;
1346 }
1347
1348 uint64_t ptr = mbits(tsb,63,13);
1349 bool split = bits(tsb,12,12);
1350 int tsb_size = bits(tsb,3,0);
1351 int page_size = (ps == Ps0) ? bits(config, 2,0) : bits(config,10,8);
1352
1353 if (ps == Ps1 && split)
1354 ptr |= ULL(1) << (13 + tsb_size);
1355 ptr |= (tag_access >> (9 + page_size * 3)) & mask(12+tsb_size, 4);
1356
1357 return ptr;
1358 }
1359
1360 void
1361 TLB::serialize(std::ostream &os)
1362 {
1363 SERIALIZE_SCALAR(size);
1364 SERIALIZE_SCALAR(usedEntries);
1365 SERIALIZE_SCALAR(lastReplaced);
1366
1367 // convert the pointer based free list into an index based one
1368 int *free_list = (int*)malloc(sizeof(int) * size);
1369 int cntr = 0;
1370 std::list<TlbEntry*>::iterator i;
1371 i = freeList.begin();
1372 while (i != freeList.end()) {
1373 free_list[cntr++] = ((size_t)*i - (size_t)tlb)/ sizeof(TlbEntry);
1374 i++;
1375 }
1376 SERIALIZE_SCALAR(cntr);
1377 SERIALIZE_ARRAY(free_list, cntr);
1378
1379 SERIALIZE_SCALAR(c0_tsb_ps0);
1380 SERIALIZE_SCALAR(c0_tsb_ps1);
1381 SERIALIZE_SCALAR(c0_config);
1382 SERIALIZE_SCALAR(cx_tsb_ps0);
1383 SERIALIZE_SCALAR(cx_tsb_ps1);
1384 SERIALIZE_SCALAR(cx_config);
1385 SERIALIZE_SCALAR(sfsr);
1386 SERIALIZE_SCALAR(tag_access);
1387
1388 for (int x = 0; x < size; x++) {
1389 nameOut(os, csprintf("%s.PTE%d", name(), x));
1390 tlb[x].serialize(os);
1391 }
1392 SERIALIZE_SCALAR(sfar);
1393 }
1394
1395 void
1396 TLB::unserialize(Checkpoint *cp, const std::string &section)
1397 {
1398 int oldSize;
1399
1400 paramIn(cp, section, "size", oldSize);
1401 if (oldSize != size)
1402 panic("Don't support unserializing different sized TLBs\n");
1403 UNSERIALIZE_SCALAR(usedEntries);
1404 UNSERIALIZE_SCALAR(lastReplaced);
1405
1406 int cntr;
1407 UNSERIALIZE_SCALAR(cntr);
1408
1409 int *free_list = (int*)malloc(sizeof(int) * cntr);
1410 freeList.clear();
1411 UNSERIALIZE_ARRAY(free_list, cntr);
1412 for (int x = 0; x < cntr; x++)
1413 freeList.push_back(&tlb[free_list[x]]);
1414
1415 UNSERIALIZE_SCALAR(c0_tsb_ps0);
1416 UNSERIALIZE_SCALAR(c0_tsb_ps1);
1417 UNSERIALIZE_SCALAR(c0_config);
1418 UNSERIALIZE_SCALAR(cx_tsb_ps0);
1419 UNSERIALIZE_SCALAR(cx_tsb_ps1);
1420 UNSERIALIZE_SCALAR(cx_config);
1421 UNSERIALIZE_SCALAR(sfsr);
1422 UNSERIALIZE_SCALAR(tag_access);
1423
1424 lookupTable.clear();
1425 for (int x = 0; x < size; x++) {
1426 tlb[x].unserialize(cp, csprintf("%s.PTE%d", section, x));
1427 if (tlb[x].valid)
1428 lookupTable.insert(tlb[x].range, &tlb[x]);
1429
1430 }
1431 UNSERIALIZE_SCALAR(sfar);
1432 }
1433
1434 /* end namespace SparcISA */ }
1435
1436 SparcISA::TLB *
1437 SparcTLBParams::create()
1438 {
1439 return new SparcISA::TLB(this);
1440 }