2 * Copyright (c) 2001-2005 The Regents of The University of Michigan
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include "arch/sparc/asi.hh"
34 #include "arch/sparc/faults.hh"
35 #include "arch/sparc/registers.hh"
36 #include "arch/sparc/tlb.hh"
37 #include "base/bitfield.hh"
38 #include "base/trace.hh"
39 #include "cpu/base.hh"
40 #include "cpu/thread_context.hh"
41 #include "debug/IPR.hh"
42 #include "debug/TLB.hh"
43 #include "mem/packet_access.hh"
44 #include "mem/request.hh"
45 #include "sim/system.hh"
47 /* @todo remove some of the magic constants. -- ali
51 TLB::TLB(const Params
*p
)
52 : BaseTLB(p
), size(p
->size
), usedEntries(0), lastReplaced(0),
55 // To make this work you'll have to change the hypervisor and OS
57 fatal("SPARC T1 TLB registers don't support more than 64 TLB entries");
59 tlb
= new TlbEntry
[size
];
60 std::memset(tlb
, 0, sizeof(TlbEntry
) * size
);
62 for (int x
= 0; x
< size
; x
++)
63 freeList
.push_back(&tlb
[x
]);
82 for (i
= lookupTable
.begin(); i
!= lookupTable
.end(); i
++) {
83 TlbEntry
*t
= i
->second
;
84 if (!t
->pte
.locked()) {
93 TLB::insert(Addr va
, int partition_id
, int context_id
, bool real
,
94 const PageTableEntry
& PTE
, int entry
)
97 TlbEntry
*new_entry
= NULL
;
102 va
&= ~(PTE
.size()-1);
104 tr.size = PTE.size() - 1;
105 tr.contextId = context_id;
106 tr.partitionId = partition_id;
111 "TLB: Inserting Entry; va=%#x pa=%#x pid=%d cid=%d r=%d entryid=%d\n",
112 va
, PTE
.paddr(), partition_id
, context_id
, (int)real
, entry
);
114 // Demap any entry that conflicts
115 for (x
= 0; x
< size
; x
++) {
116 if (tlb
[x
].range
.real
== real
&&
117 tlb
[x
].range
.partitionId
== partition_id
&&
118 tlb
[x
].range
.va
< va
+ PTE
.size() - 1 &&
119 tlb
[x
].range
.va
+ tlb
[x
].range
.size
>= va
&&
120 (real
|| tlb
[x
].range
.contextId
== context_id
))
123 freeList
.push_front(&tlb
[x
]);
124 DPRINTF(TLB
, "TLB: Conflicting entry %#X , deleting it\n", x
);
126 tlb
[x
].valid
= false;
131 lookupTable
.erase(tlb
[x
].range
);
137 assert(entry
< size
&& entry
>= 0);
138 new_entry
= &tlb
[entry
];
140 if (!freeList
.empty()) {
141 new_entry
= freeList
.front();
148 if (x
== lastReplaced
)
149 goto insertAllLocked
;
150 } while (tlb
[x
].pte
.locked());
157 // Update the last ently if their all locked
159 new_entry
= &tlb
[size
-1];
162 freeList
.remove(new_entry
);
163 if (new_entry
->valid
&& new_entry
->used
)
165 if (new_entry
->valid
)
166 lookupTable
.erase(new_entry
->range
);
170 new_entry
->range
.va
= va
;
171 new_entry
->range
.size
= PTE
.size() - 1;
172 new_entry
->range
.partitionId
= partition_id
;
173 new_entry
->range
.contextId
= context_id
;
174 new_entry
->range
.real
= real
;
175 new_entry
->pte
= PTE
;
176 new_entry
->used
= true;;
177 new_entry
->valid
= true;
180 i
= lookupTable
.insert(new_entry
->range
, new_entry
);
181 assert(i
!= lookupTable
.end());
183 // If all entries have their used bit set, clear it on them all,
184 // but the one we just inserted
185 if (usedEntries
== size
) {
187 new_entry
->used
= true;
194 TLB::lookup(Addr va
, int partition_id
, bool real
, int context_id
,
201 DPRINTF(TLB
, "TLB: Looking up entry va=%#x pid=%d cid=%d r=%d\n",
202 va
, partition_id
, context_id
, real
);
203 // Assemble full address structure
206 tr
.contextId
= context_id
;
207 tr
.partitionId
= partition_id
;
210 // Try to find the entry
211 i
= lookupTable
.find(tr
);
212 if (i
== lookupTable
.end()) {
213 DPRINTF(TLB
, "TLB: No valid entry found\n");
217 // Mark the entries used bit and clear other used bits in needed
219 DPRINTF(TLB
, "TLB: Valid entry found pa: %#x size: %#x\n", t
->pte
.paddr(),
222 // Update the used bits only if this is a real access (not a fake
223 // one from virttophys()
224 if (!t
->used
&& update_used
) {
227 if (usedEntries
== size
) {
241 for (int x
= 0; x
< size
; x
++) {
243 DPRINTFN("%4d: %#2x:%#2x %c %#4x %#8x %#8x %#16x\n",
244 x
, tlb
[x
].range
.partitionId
, tlb
[x
].range
.contextId
,
245 tlb
[x
].range
.real
? 'R' : ' ', tlb
[x
].range
.size
,
246 tlb
[x
].range
.va
, tlb
[x
].pte
.paddr(), tlb
[x
].pte());
252 TLB::demapPage(Addr va
, int partition_id
, bool real
, int context_id
)
257 DPRINTF(IPR
, "TLB: Demapping Page va=%#x pid=%#d cid=%d r=%d\n",
258 va
, partition_id
, context_id
, real
);
262 // Assemble full address structure
265 tr
.contextId
= context_id
;
266 tr
.partitionId
= partition_id
;
269 // Demap any entry that conflicts
270 i
= lookupTable
.find(tr
);
271 if (i
!= lookupTable
.end()) {
272 DPRINTF(IPR
, "TLB: Demapped page\n");
273 i
->second
->valid
= false;
274 if (i
->second
->used
) {
275 i
->second
->used
= false;
278 freeList
.push_front(i
->second
);
279 lookupTable
.erase(i
);
284 TLB::demapContext(int partition_id
, int context_id
)
286 DPRINTF(IPR
, "TLB: Demapping Context pid=%#d cid=%d\n",
287 partition_id
, context_id
);
289 for (int x
= 0; x
< size
; x
++) {
290 if (tlb
[x
].range
.contextId
== context_id
&&
291 tlb
[x
].range
.partitionId
== partition_id
) {
292 if (tlb
[x
].valid
== true) {
293 freeList
.push_front(&tlb
[x
]);
295 tlb
[x
].valid
= false;
300 lookupTable
.erase(tlb
[x
].range
);
306 TLB::demapAll(int partition_id
)
308 DPRINTF(TLB
, "TLB: Demapping All pid=%#d\n", partition_id
);
310 for (int x
= 0; x
< size
; x
++) {
311 if (tlb
[x
].valid
&& !tlb
[x
].pte
.locked() &&
312 tlb
[x
].range
.partitionId
== partition_id
) {
313 freeList
.push_front(&tlb
[x
]);
314 tlb
[x
].valid
= false;
319 lookupTable
.erase(tlb
[x
].range
);
330 for (int x
= 0; x
< size
; x
++) {
331 if (tlb
[x
].valid
== true)
332 freeList
.push_back(&tlb
[x
]);
333 tlb
[x
].valid
= false;
340 TLB::TteRead(int entry
)
343 panic("entry: %d\n", entry
);
345 assert(entry
< size
);
346 if (tlb
[entry
].valid
)
347 return tlb
[entry
].pte();
349 return (uint64_t)-1ll;
353 TLB::TagRead(int entry
)
355 assert(entry
< size
);
357 if (!tlb
[entry
].valid
)
358 return (uint64_t)-1ll;
360 tag
= tlb
[entry
].range
.contextId
;
361 tag
|= tlb
[entry
].range
.va
;
362 tag
|= (uint64_t)tlb
[entry
].range
.partitionId
<< 61;
363 tag
|= tlb
[entry
].range
.real
? ULL(1) << 60 : 0;
364 tag
|= (uint64_t)~tlb
[entry
].pte
._size() << 56;
369 TLB::validVirtualAddress(Addr va
, bool am
)
373 if (va
>= StartVAddrHole
&& va
<= EndVAddrHole
)
379 TLB::writeSfsr(bool write
, ContextType ct
, bool se
, FaultTypes ft
, int asi
)
396 TLB::writeTagAccess(Addr va
, int context
)
398 DPRINTF(TLB
, "TLB: Writing Tag Access: va: %#X ctx: %#X value: %#X\n",
399 va
, context
, mbits(va
, 63,13) | mbits(context
,12,0));
401 tag_access
= mbits(va
, 63,13) | mbits(context
,12,0);
405 TLB::writeSfsr(Addr a
, bool write
, ContextType ct
,
406 bool se
, FaultTypes ft
, int asi
)
408 DPRINTF(TLB
, "TLB: Fault: A=%#x w=%d ct=%d ft=%d asi=%d\n",
409 a
, (int)write
, ct
, ft
, asi
);
410 TLB::writeSfsr(write
, ct
, se
, ft
, asi
);
415 TLB::translateInst(RequestPtr req
, ThreadContext
*tc
)
417 uint64_t tlbdata
= tc
->readMiscRegNoEffect(MISCREG_TLB_DATA
);
419 Addr vaddr
= req
->getVaddr();
422 assert(req
->getAsi() == ASI_IMPLICIT
);
424 DPRINTF(TLB
, "TLB: ITB Request to translate va=%#x size=%d\n",
425 vaddr
, req
->getSize());
427 // Be fast if we can!
428 if (cacheValid
&& cacheState
== tlbdata
) {
430 if (cacheEntry
[0]->range
.va
< vaddr
+ sizeof(MachInst
) &&
431 cacheEntry
[0]->range
.va
+ cacheEntry
[0]->range
.size
>= vaddr
) {
432 req
->setPaddr(cacheEntry
[0]->pte
.translate(vaddr
));
436 req
->setPaddr(vaddr
& PAddrImplMask
);
441 bool hpriv
= bits(tlbdata
,0,0);
442 bool red
= bits(tlbdata
,1,1);
443 bool priv
= bits(tlbdata
,2,2);
444 bool addr_mask
= bits(tlbdata
,3,3);
445 bool lsu_im
= bits(tlbdata
,4,4);
447 int part_id
= bits(tlbdata
,15,8);
448 int tl
= bits(tlbdata
,18,16);
449 int pri_context
= bits(tlbdata
,47,32);
455 DPRINTF(TLB
, "TLB: priv:%d hpriv:%d red:%d lsuim:%d part_id: %#X\n",
456 priv
, hpriv
, red
, lsu_im
, part_id
);
465 context
= pri_context
;
468 if ( hpriv
|| red
) {
470 cacheState
= tlbdata
;
471 cacheEntry
[0] = NULL
;
472 req
->setPaddr(vaddr
& PAddrImplMask
);
476 // If the access is unaligned trap
478 writeSfsr(false, ct
, false, OtherFault
, asi
);
479 return new MemAddressNotAligned
;
483 vaddr
= vaddr
& VAddrAMask
;
485 if (!validVirtualAddress(vaddr
, addr_mask
)) {
486 writeSfsr(false, ct
, false, VaOutOfRange
, asi
);
487 return new InstructionAccessException
;
491 e
= lookup(vaddr
, part_id
, true);
495 e
= lookup(vaddr
, part_id
, false, context
);
498 if (e
== NULL
|| !e
->valid
) {
499 writeTagAccess(vaddr
, context
);
501 return new InstructionRealTranslationMiss
;
504 return new FastInstructionAccessMMUMiss
;
506 return new FastInstructionAccessMMUMiss(req
->getVaddr());
510 // were not priviledged accesing priv page
511 if (!priv
&& e
->pte
.priv()) {
512 writeTagAccess(vaddr
, context
);
513 writeSfsr(false, ct
, false, PrivViolation
, asi
);
514 return new InstructionAccessException
;
517 // cache translation date for next translation
519 cacheState
= tlbdata
;
522 req
->setPaddr(e
->pte
.translate(vaddr
));
523 DPRINTF(TLB
, "TLB: %#X -> %#X\n", vaddr
, req
->getPaddr());
528 TLB::translateData(RequestPtr req
, ThreadContext
*tc
, bool write
)
531 * @todo this could really use some profiling and fixing to make
534 uint64_t tlbdata
= tc
->readMiscRegNoEffect(MISCREG_TLB_DATA
);
535 Addr vaddr
= req
->getVaddr();
536 Addr size
= req
->getSize();
538 asi
= (ASI
)req
->getAsi();
539 bool implicit
= false;
540 bool hpriv
= bits(tlbdata
,0,0);
541 bool unaligned
= vaddr
& (size
- 1);
543 DPRINTF(TLB
, "TLB: DTB Request to translate va=%#x size=%d asi=%#x\n",
546 if (lookupTable
.size() != 64 - freeList
.size())
547 panic("Lookup table size: %d tlb size: %d\n", lookupTable
.size(),
549 if (asi
== ASI_IMPLICIT
)
552 // Only use the fast path here if there doesn't need to be an unaligned
555 if (hpriv
&& implicit
) {
556 req
->setPaddr(vaddr
& PAddrImplMask
);
560 // Be fast if we can!
561 if (cacheValid
&& cacheState
== tlbdata
) {
566 TlbEntry
*ce
= cacheEntry
[0];
567 Addr ce_va
= ce
->range
.va
;
568 if (cacheAsi
[0] == asi
&&
569 ce_va
< vaddr
+ size
&& ce_va
+ ce
->range
.size
> vaddr
&&
570 (!write
|| ce
->pte
.writable())) {
571 req
->setPaddr(ce
->pte
.translate(vaddr
));
572 if (ce
->pte
.sideffect() || (ce
->pte
.paddr() >> 39) & 1)
573 req
->setFlags(Request::UNCACHEABLE
);
574 DPRINTF(TLB
, "TLB: %#X -> %#X\n", vaddr
, req
->getPaddr());
577 } // if cache entry valid
579 TlbEntry
*ce
= cacheEntry
[1];
580 Addr ce_va
= ce
->range
.va
;
581 if (cacheAsi
[1] == asi
&&
582 ce_va
< vaddr
+ size
&& ce_va
+ ce
->range
.size
> vaddr
&&
583 (!write
|| ce
->pte
.writable())) {
584 req
->setPaddr(ce
->pte
.translate(vaddr
));
585 if (ce
->pte
.sideffect() || (ce
->pte
.paddr() >> 39) & 1)
586 req
->setFlags(Request::UNCACHEABLE
);
587 DPRINTF(TLB
, "TLB: %#X -> %#X\n", vaddr
, req
->getPaddr());
590 } // if cache entry valid
594 bool red
= bits(tlbdata
,1,1);
595 bool priv
= bits(tlbdata
,2,2);
596 bool addr_mask
= bits(tlbdata
,3,3);
597 bool lsu_dm
= bits(tlbdata
,5,5);
599 int part_id
= bits(tlbdata
,15,8);
600 int tl
= bits(tlbdata
,18,16);
601 int pri_context
= bits(tlbdata
,47,32);
602 int sec_context
= bits(tlbdata
,63,48);
605 ContextType ct
= Primary
;
610 DPRINTF(TLB
, "TLB: priv:%d hpriv:%d red:%d lsudm:%d part_id: %#X\n",
611 priv
, hpriv
, red
, lsu_dm
, part_id
);
621 context
= pri_context
;
624 // We need to check for priv level/asi priv
625 if (!priv
&& !hpriv
&& !asiIsUnPriv(asi
)) {
626 // It appears that context should be Nucleus in these cases?
627 writeSfsr(vaddr
, write
, Nucleus
, false, IllegalAsi
, asi
);
628 return new PrivilegedAction
;
631 if (!hpriv
&& asiIsHPriv(asi
)) {
632 writeSfsr(vaddr
, write
, Nucleus
, false, IllegalAsi
, asi
);
633 return new DataAccessException
;
636 if (asiIsPrimary(asi
)) {
637 context
= pri_context
;
639 } else if (asiIsSecondary(asi
)) {
640 context
= sec_context
;
642 } else if (asiIsNucleus(asi
)) {
647 context
= pri_context
;
651 if (!implicit
&& asi
!= ASI_P
&& asi
!= ASI_S
) {
652 if (asiIsLittle(asi
))
653 panic("Little Endian ASIs not supported\n");
655 //XXX It's unclear from looking at the documentation how a no fault
656 // load differs from a regular one, other than what happens concerning
657 // nfo and e bits in the TTE
658 // if (asiIsNoFault(asi))
659 // panic("No Fault ASIs not supported\n");
661 if (asiIsPartialStore(asi
))
662 panic("Partial Store ASIs not supported\n");
665 panic("Cmt ASI registers not implmented\n");
667 if (asiIsInterrupt(asi
))
668 goto handleIntRegAccess
;
670 goto handleMmuRegAccess
;
671 if (asiIsScratchPad(asi
))
672 goto handleScratchRegAccess
;
674 goto handleQueueRegAccess
;
675 if (asiIsSparcError(asi
))
676 goto handleSparcErrorRegAccess
;
678 if (!asiIsReal(asi
) && !asiIsNucleus(asi
) && !asiIsAsIfUser(asi
) &&
679 !asiIsTwin(asi
) && !asiIsBlock(asi
) && !asiIsNoFault(asi
))
680 panic("Accessing ASI %#X. Should we?\n", asi
);
683 // If the asi is unaligned trap
685 writeSfsr(vaddr
, false, ct
, false, OtherFault
, asi
);
686 return new MemAddressNotAligned
;
690 vaddr
= vaddr
& VAddrAMask
;
692 if (!validVirtualAddress(vaddr
, addr_mask
)) {
693 writeSfsr(vaddr
, false, ct
, true, VaOutOfRange
, asi
);
694 return new DataAccessException
;
697 if ((!lsu_dm
&& !hpriv
&& !red
) || asiIsReal(asi
)) {
702 if (hpriv
&& (implicit
|| (!asiIsAsIfUser(asi
) && !asiIsReal(asi
)))) {
703 req
->setPaddr(vaddr
& PAddrImplMask
);
707 e
= lookup(vaddr
, part_id
, real
, context
);
709 if (e
== NULL
|| !e
->valid
) {
710 writeTagAccess(vaddr
, context
);
711 DPRINTF(TLB
, "TLB: DTB Failed to find matching TLB entry\n");
713 return new DataRealTranslationMiss
;
716 return new FastDataAccessMMUMiss
;
718 return new FastDataAccessMMUMiss(req
->getVaddr());
723 if (!priv
&& e
->pte
.priv()) {
724 writeTagAccess(vaddr
, context
);
725 writeSfsr(vaddr
, write
, ct
, e
->pte
.sideffect(), PrivViolation
, asi
);
726 return new DataAccessException
;
729 if (write
&& !e
->pte
.writable()) {
730 writeTagAccess(vaddr
, context
);
731 writeSfsr(vaddr
, write
, ct
, e
->pte
.sideffect(), OtherFault
, asi
);
732 return new FastDataAccessProtection
;
735 if (e
->pte
.nofault() && !asiIsNoFault(asi
)) {
736 writeTagAccess(vaddr
, context
);
737 writeSfsr(vaddr
, write
, ct
, e
->pte
.sideffect(), LoadFromNfo
, asi
);
738 return new DataAccessException
;
741 if (e
->pte
.sideffect() && asiIsNoFault(asi
)) {
742 writeTagAccess(vaddr
, context
);
743 writeSfsr(vaddr
, write
, ct
, e
->pte
.sideffect(), SideEffect
, asi
);
744 return new DataAccessException
;
747 if (e
->pte
.sideffect() || (e
->pte
.paddr() >> 39) & 1)
748 req
->setFlags(Request::UNCACHEABLE
);
750 // cache translation date for next translation
751 cacheState
= tlbdata
;
753 cacheEntry
[1] = NULL
;
754 cacheEntry
[0] = NULL
;
757 if (cacheEntry
[0] != e
&& cacheEntry
[1] != e
) {
758 cacheEntry
[1] = cacheEntry
[0];
760 cacheAsi
[1] = cacheAsi
[0];
763 cacheAsi
[0] = (ASI
)0;
766 req
->setPaddr(e
->pte
.translate(vaddr
));
767 DPRINTF(TLB
, "TLB: %#X -> %#X\n", vaddr
, req
->getPaddr());
770 /** Normal flow ends here. */
773 writeSfsr(vaddr
, write
, Primary
, true, IllegalAsi
, asi
);
775 return new DataAccessException
;
777 return new PrivilegedAction
;
780 if ((asi
== ASI_SWVR_UDB_INTR_W
&& !write
) ||
781 (asi
== ASI_SWVR_UDB_INTR_R
&& write
)) {
782 writeSfsr(vaddr
, write
, Primary
, true, IllegalAsi
, asi
);
783 return new DataAccessException
;
789 handleScratchRegAccess
:
790 if (vaddr
> 0x38 || (vaddr
>= 0x20 && vaddr
< 0x30 && !hpriv
)) {
791 writeSfsr(vaddr
, write
, Primary
, true, IllegalAsi
, asi
);
792 return new DataAccessException
;
796 handleQueueRegAccess
:
797 if (!priv
&& !hpriv
) {
798 writeSfsr(vaddr
, write
, Primary
, true, IllegalAsi
, asi
);
799 return new PrivilegedAction
;
801 if ((!hpriv
&& vaddr
& 0xF) || vaddr
> 0x3f8 || vaddr
< 0x3c0) {
802 writeSfsr(vaddr
, write
, Primary
, true, IllegalAsi
, asi
);
803 return new DataAccessException
;
807 handleSparcErrorRegAccess
:
809 writeSfsr(vaddr
, write
, Primary
, true, IllegalAsi
, asi
);
811 return new DataAccessException
;
813 return new PrivilegedAction
;
820 DPRINTF(TLB
, "TLB: DTB Translating MM IPR access\n");
821 req
->setFlags(Request::MMAPPED_IPR
);
822 req
->setPaddr(req
->getVaddr());
827 TLB::translateAtomic(RequestPtr req
, ThreadContext
*tc
, Mode mode
)
830 return translateInst(req
, tc
);
832 return translateData(req
, tc
, mode
== Write
);
836 TLB::translateTiming(RequestPtr req
, ThreadContext
*tc
,
837 Translation
*translation
, Mode mode
)
840 translation
->finish(translateAtomic(req
, tc
, mode
), req
, tc
, mode
);
846 TLB::doMmuRegRead(ThreadContext
*tc
, Packet
*pkt
)
848 Addr va
= pkt
->getAddr();
849 ASI asi
= (ASI
)pkt
->req
->getAsi();
852 DPRINTF(IPR
, "Memory Mapped IPR Read: asi=%#X a=%#x\n",
853 (uint32_t)pkt
->req
->getAsi(), pkt
->getAddr());
855 TLB
*itb
= tc
->getITBPtr();
858 case ASI_LSU_CONTROL_REG
:
860 pkt
->set(tc
->readMiscReg(MISCREG_MMU_LSU_CTRL
));
865 pkt
->set(tc
->readMiscReg(MISCREG_MMU_P_CONTEXT
));
868 pkt
->set(tc
->readMiscReg(MISCREG_MMU_S_CONTEXT
));
875 pkt
->set(tc
->readMiscReg(MISCREG_QUEUE_CPU_MONDO_HEAD
+
878 case ASI_DMMU_CTXT_ZERO_TSB_BASE_PS0
:
880 pkt
->set(c0_tsb_ps0
);
882 case ASI_DMMU_CTXT_ZERO_TSB_BASE_PS1
:
884 pkt
->set(c0_tsb_ps1
);
886 case ASI_DMMU_CTXT_ZERO_CONFIG
:
890 case ASI_IMMU_CTXT_ZERO_TSB_BASE_PS0
:
892 pkt
->set(itb
->c0_tsb_ps0
);
894 case ASI_IMMU_CTXT_ZERO_TSB_BASE_PS1
:
896 pkt
->set(itb
->c0_tsb_ps1
);
898 case ASI_IMMU_CTXT_ZERO_CONFIG
:
900 pkt
->set(itb
->c0_config
);
902 case ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS0
:
904 pkt
->set(cx_tsb_ps0
);
906 case ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS1
:
908 pkt
->set(cx_tsb_ps1
);
910 case ASI_DMMU_CTXT_NONZERO_CONFIG
:
914 case ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS0
:
916 pkt
->set(itb
->cx_tsb_ps0
);
918 case ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS1
:
920 pkt
->set(itb
->cx_tsb_ps1
);
922 case ASI_IMMU_CTXT_NONZERO_CONFIG
:
924 pkt
->set(itb
->cx_config
);
926 case ASI_SPARC_ERROR_STATUS_REG
:
927 pkt
->set((uint64_t)0);
929 case ASI_HYP_SCRATCHPAD
:
931 pkt
->set(tc
->readMiscReg(MISCREG_SCRATCHPAD_R0
+ (va
>> 3)));
936 temp
= itb
->tag_access
;
937 pkt
->set(bits(temp
,63,22) | bits(temp
,12,0) << 48);
943 pkt
->set(itb
->tag_access
);
953 pkt
->set(bits(temp
,63,22) | bits(temp
,12,0) << 48);
962 pkt
->set(tag_access
);
965 pkt
->set(tc
->readMiscReg(MISCREG_MMU_PART_ID
));
971 case ASI_DMMU_TSB_PS0_PTR_REG
:
972 pkt
->set(MakeTsbPtr(Ps0
,
979 case ASI_DMMU_TSB_PS1_PTR_REG
:
980 pkt
->set(MakeTsbPtr(Ps1
,
987 case ASI_IMMU_TSB_PS0_PTR_REG
:
988 pkt
->set(MakeTsbPtr(Ps0
,
995 case ASI_IMMU_TSB_PS1_PTR_REG
:
996 pkt
->set(MakeTsbPtr(Ps1
,
1003 case ASI_SWVR_INTR_RECEIVE
:
1005 SparcISA::Interrupts
* interrupts
=
1006 dynamic_cast<SparcISA::Interrupts
*>(
1007 tc
->getCpuPtr()->getInterruptController());
1008 pkt
->set(interrupts
->get_vec(IT_INT_VEC
));
1011 case ASI_SWVR_UDB_INTR_R
:
1013 SparcISA::Interrupts
* interrupts
=
1014 dynamic_cast<SparcISA::Interrupts
*>(
1015 tc
->getCpuPtr()->getInterruptController());
1016 temp
= findMsbSet(interrupts
->get_vec(IT_INT_VEC
));
1017 tc
->getCpuPtr()->clearInterrupt(IT_INT_VEC
, temp
);
1023 panic("need to impl DTB::doMmuRegRead() got asi=%#x, va=%#x\n",
1026 pkt
->makeAtomicResponse();
1027 return tc
->getCpuPtr()->ticks(1);
1031 TLB::doMmuRegWrite(ThreadContext
*tc
, Packet
*pkt
)
1033 uint64_t data
= pkt
->get
<uint64_t>();
1034 Addr va
= pkt
->getAddr();
1035 ASI asi
= (ASI
)pkt
->req
->getAsi();
1041 int entry_insert
= -1;
1048 DPRINTF(IPR
, "Memory Mapped IPR Write: asi=%#X a=%#x d=%#X\n",
1049 (uint32_t)asi
, va
, data
);
1051 TLB
*itb
= tc
->getITBPtr();
1054 case ASI_LSU_CONTROL_REG
:
1056 tc
->setMiscReg(MISCREG_MMU_LSU_CTRL
, data
);
1061 tc
->setMiscReg(MISCREG_MMU_P_CONTEXT
, data
);
1064 tc
->setMiscReg(MISCREG_MMU_S_CONTEXT
, data
);
1067 goto doMmuWriteError
;
1071 assert(mbits(data
,13,6) == data
);
1072 tc
->setMiscReg(MISCREG_QUEUE_CPU_MONDO_HEAD
+
1073 (va
>> 4) - 0x3c, data
);
1075 case ASI_DMMU_CTXT_ZERO_TSB_BASE_PS0
:
1079 case ASI_DMMU_CTXT_ZERO_TSB_BASE_PS1
:
1083 case ASI_DMMU_CTXT_ZERO_CONFIG
:
1087 case ASI_IMMU_CTXT_ZERO_TSB_BASE_PS0
:
1089 itb
->c0_tsb_ps0
= data
;
1091 case ASI_IMMU_CTXT_ZERO_TSB_BASE_PS1
:
1093 itb
->c0_tsb_ps1
= data
;
1095 case ASI_IMMU_CTXT_ZERO_CONFIG
:
1097 itb
->c0_config
= data
;
1099 case ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS0
:
1103 case ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS1
:
1107 case ASI_DMMU_CTXT_NONZERO_CONFIG
:
1111 case ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS0
:
1113 itb
->cx_tsb_ps0
= data
;
1115 case ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS1
:
1117 itb
->cx_tsb_ps1
= data
;
1119 case ASI_IMMU_CTXT_NONZERO_CONFIG
:
1121 itb
->cx_config
= data
;
1123 case ASI_SPARC_ERROR_EN_REG
:
1124 case ASI_SPARC_ERROR_STATUS_REG
:
1125 inform("Ignoring write to SPARC ERROR regsiter\n");
1127 case ASI_HYP_SCRATCHPAD
:
1128 case ASI_SCRATCHPAD
:
1129 tc
->setMiscReg(MISCREG_SCRATCHPAD_R0
+ (va
>> 3), data
);
1137 sext
<59>(bits(data
, 59,0));
1138 itb
->tag_access
= data
;
1141 goto doMmuWriteError
;
1144 case ASI_ITLB_DATA_ACCESS_REG
:
1145 entry_insert
= bits(va
, 8,3);
1146 case ASI_ITLB_DATA_IN_REG
:
1147 assert(entry_insert
!= -1 || mbits(va
,10,9) == va
);
1148 ta_insert
= itb
->tag_access
;
1149 va_insert
= mbits(ta_insert
, 63,13);
1150 ct_insert
= mbits(ta_insert
, 12,0);
1151 part_insert
= tc
->readMiscReg(MISCREG_MMU_PART_ID
);
1152 real_insert
= bits(va
, 9,9);
1153 pte
.populate(data
, bits(va
,10,10) ? PageTableEntry::sun4v
:
1154 PageTableEntry::sun4u
);
1155 tc
->getITBPtr()->insert(va_insert
, part_insert
, ct_insert
, real_insert
,
1158 case ASI_DTLB_DATA_ACCESS_REG
:
1159 entry_insert
= bits(va
, 8,3);
1160 case ASI_DTLB_DATA_IN_REG
:
1161 assert(entry_insert
!= -1 || mbits(va
,10,9) == va
);
1162 ta_insert
= tag_access
;
1163 va_insert
= mbits(ta_insert
, 63,13);
1164 ct_insert
= mbits(ta_insert
, 12,0);
1165 part_insert
= tc
->readMiscReg(MISCREG_MMU_PART_ID
);
1166 real_insert
= bits(va
, 9,9);
1167 pte
.populate(data
, bits(va
,10,10) ? PageTableEntry::sun4v
:
1168 PageTableEntry::sun4u
);
1169 insert(va_insert
, part_insert
, ct_insert
, real_insert
, pte
,
1172 case ASI_IMMU_DEMAP
:
1175 part_id
= tc
->readMiscReg(MISCREG_MMU_PART_ID
);
1176 switch (bits(va
,5,4)) {
1178 ctx_id
= tc
->readMiscReg(MISCREG_MMU_P_CONTEXT
);
1190 switch (bits(va
,7,6)) {
1191 case 0: // demap page
1193 tc
->getITBPtr()->demapPage(mbits(va
,63,13), part_id
,
1194 bits(va
,9,9), ctx_id
);
1196 case 1: // demap context
1198 tc
->getITBPtr()->demapContext(part_id
, ctx_id
);
1201 tc
->getITBPtr()->demapAll(part_id
);
1204 panic("Invalid type for IMMU demap\n");
1213 sext
<59>(bits(data
, 59,0));
1217 tc
->setMiscReg(MISCREG_MMU_PART_ID
, data
);
1220 goto doMmuWriteError
;
1223 case ASI_DMMU_DEMAP
:
1226 part_id
= tc
->readMiscReg(MISCREG_MMU_PART_ID
);
1227 switch (bits(va
,5,4)) {
1229 ctx_id
= tc
->readMiscReg(MISCREG_MMU_P_CONTEXT
);
1232 ctx_id
= tc
->readMiscReg(MISCREG_MMU_S_CONTEXT
);
1241 switch (bits(va
,7,6)) {
1242 case 0: // demap page
1244 demapPage(mbits(va
,63,13), part_id
, bits(va
,9,9), ctx_id
);
1246 case 1: // demap context
1248 demapContext(part_id
, ctx_id
);
1254 panic("Invalid type for IMMU demap\n");
1257 case ASI_SWVR_INTR_RECEIVE
:
1260 // clear all the interrupts that aren't set in the write
1261 SparcISA::Interrupts
* interrupts
=
1262 dynamic_cast<SparcISA::Interrupts
*>(
1263 tc
->getCpuPtr()->getInterruptController());
1264 while (interrupts
->get_vec(IT_INT_VEC
) & data
) {
1265 msb
= findMsbSet(interrupts
->get_vec(IT_INT_VEC
) & data
);
1266 tc
->getCpuPtr()->clearInterrupt(IT_INT_VEC
, msb
);
1270 case ASI_SWVR_UDB_INTR_W
:
1271 tc
->getSystemPtr()->threadContexts
[bits(data
,12,8)]->getCpuPtr()->
1272 postInterrupt(bits(data
, 5, 0), 0);
1276 panic("need to impl DTB::doMmuRegWrite() got asi=%#x, va=%#x d=%#x\n",
1277 (uint32_t)pkt
->req
->getAsi(), pkt
->getAddr(), data
);
1279 pkt
->makeAtomicResponse();
1280 return tc
->getCpuPtr()->ticks(1);
1286 TLB::GetTsbPtr(ThreadContext
*tc
, Addr addr
, int ctx
, Addr
*ptrs
)
1288 uint64_t tag_access
= mbits(addr
,63,13) | mbits(ctx
,12,0);
1289 TLB
* itb
= tc
->getITBPtr();
1290 ptrs
[0] = MakeTsbPtr(Ps0
, tag_access
,
1295 ptrs
[1] = MakeTsbPtr(Ps1
, tag_access
,
1300 ptrs
[2] = MakeTsbPtr(Ps0
, tag_access
,
1305 ptrs
[3] = MakeTsbPtr(Ps1
, tag_access
,
1313 TLB::MakeTsbPtr(TsbPageSize ps
, uint64_t tag_access
, uint64_t c0_tsb
,
1314 uint64_t c0_config
, uint64_t cX_tsb
, uint64_t cX_config
)
1319 if (bits(tag_access
, 12,0) == 0) {
1327 uint64_t ptr
= mbits(tsb
,63,13);
1328 bool split
= bits(tsb
,12,12);
1329 int tsb_size
= bits(tsb
,3,0);
1330 int page_size
= (ps
== Ps0
) ? bits(config
, 2,0) : bits(config
,10,8);
1332 if (ps
== Ps1
&& split
)
1333 ptr
|= ULL(1) << (13 + tsb_size
);
1334 ptr
|= (tag_access
>> (9 + page_size
* 3)) & mask(12+tsb_size
, 4);
1340 TLB::serialize(std::ostream
&os
)
1342 SERIALIZE_SCALAR(size
);
1343 SERIALIZE_SCALAR(usedEntries
);
1344 SERIALIZE_SCALAR(lastReplaced
);
1346 // convert the pointer based free list into an index based one
1347 int *free_list
= (int*)malloc(sizeof(int) * size
);
1349 std::list
<TlbEntry
*>::iterator i
;
1350 i
= freeList
.begin();
1351 while (i
!= freeList
.end()) {
1352 free_list
[cntr
++] = ((size_t)*i
- (size_t)tlb
)/ sizeof(TlbEntry
);
1355 SERIALIZE_SCALAR(cntr
);
1356 SERIALIZE_ARRAY(free_list
, cntr
);
1358 SERIALIZE_SCALAR(c0_tsb_ps0
);
1359 SERIALIZE_SCALAR(c0_tsb_ps1
);
1360 SERIALIZE_SCALAR(c0_config
);
1361 SERIALIZE_SCALAR(cx_tsb_ps0
);
1362 SERIALIZE_SCALAR(cx_tsb_ps1
);
1363 SERIALIZE_SCALAR(cx_config
);
1364 SERIALIZE_SCALAR(sfsr
);
1365 SERIALIZE_SCALAR(tag_access
);
1367 for (int x
= 0; x
< size
; x
++) {
1368 nameOut(os
, csprintf("%s.PTE%d", name(), x
));
1369 tlb
[x
].serialize(os
);
1371 SERIALIZE_SCALAR(sfar
);
1375 TLB::unserialize(Checkpoint
*cp
, const std::string
§ion
)
1379 paramIn(cp
, section
, "size", oldSize
);
1380 if (oldSize
!= size
)
1381 panic("Don't support unserializing different sized TLBs\n");
1382 UNSERIALIZE_SCALAR(usedEntries
);
1383 UNSERIALIZE_SCALAR(lastReplaced
);
1386 UNSERIALIZE_SCALAR(cntr
);
1388 int *free_list
= (int*)malloc(sizeof(int) * cntr
);
1390 UNSERIALIZE_ARRAY(free_list
, cntr
);
1391 for (int x
= 0; x
< cntr
; x
++)
1392 freeList
.push_back(&tlb
[free_list
[x
]]);
1394 UNSERIALIZE_SCALAR(c0_tsb_ps0
);
1395 UNSERIALIZE_SCALAR(c0_tsb_ps1
);
1396 UNSERIALIZE_SCALAR(c0_config
);
1397 UNSERIALIZE_SCALAR(cx_tsb_ps0
);
1398 UNSERIALIZE_SCALAR(cx_tsb_ps1
);
1399 UNSERIALIZE_SCALAR(cx_config
);
1400 UNSERIALIZE_SCALAR(sfsr
);
1401 UNSERIALIZE_SCALAR(tag_access
);
1403 lookupTable
.clear();
1404 for (int x
= 0; x
< size
; x
++) {
1405 tlb
[x
].unserialize(cp
, csprintf("%s.PTE%d", section
, x
));
1407 lookupTable
.insert(tlb
[x
].range
, &tlb
[x
]);
1410 UNSERIALIZE_SCALAR(sfar
);
1413 } // namespace SparcISA
1416 SparcTLBParams::create()
1418 return new SparcISA::TLB(this);