2 * Copyright (c) 2001-2005 The Regents of The University of Michigan
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include "arch/sparc/asi.hh"
34 #include "arch/sparc/miscregfile.hh"
35 #include "arch/sparc/tlb.hh"
36 #include "base/bitfield.hh"
37 #include "base/trace.hh"
38 #include "cpu/thread_context.hh"
39 #include "cpu/base.hh"
40 #include "mem/packet_access.hh"
41 #include "mem/request.hh"
42 #include "sim/system.hh"
44 /* @todo remove some of the magic constants. -- ali
48 TLB::TLB(const Params
*p
)
49 : BaseTLB(p
), size(p
->size
), usedEntries(0), lastReplaced(0),
52 // To make this work you'll have to change the hypervisor and OS
54 fatal("SPARC T1 TLB registers don't support more than 64 TLB entries.");
56 tlb
= new TlbEntry
[size
];
57 std::memset(tlb
, 0, sizeof(TlbEntry
) * size
);
59 for (int x
= 0; x
< size
; x
++)
60 freeList
.push_back(&tlb
[x
]);
76 for (i
= lookupTable
.begin(); i
!= lookupTable
.end(); i
++) {
77 TlbEntry
*t
= i
->second
;
78 if (!t
->pte
.locked()) {
87 TLB::insert(Addr va
, int partition_id
, int context_id
, bool real
,
88 const PageTableEntry
& PTE
, int entry
)
93 TlbEntry
*new_entry
= NULL
;
98 va
&= ~(PTE
.size()-1);
100 tr.size = PTE.size() - 1;
101 tr.contextId = context_id;
102 tr.partitionId = partition_id;
106 DPRINTF(TLB
, "TLB: Inserting TLB Entry; va=%#x pa=%#x pid=%d cid=%d r=%d entryid=%d\n",
107 va
, PTE
.paddr(), partition_id
, context_id
, (int)real
, entry
);
109 // Demap any entry that conflicts
110 for (x
= 0; x
< size
; x
++) {
111 if (tlb
[x
].range
.real
== real
&&
112 tlb
[x
].range
.partitionId
== partition_id
&&
113 tlb
[x
].range
.va
< va
+ PTE
.size() - 1 &&
114 tlb
[x
].range
.va
+ tlb
[x
].range
.size
>= va
&&
115 (real
|| tlb
[x
].range
.contextId
== context_id
))
118 freeList
.push_front(&tlb
[x
]);
119 DPRINTF(TLB
, "TLB: Conflicting entry %#X , deleting it\n", x
);
121 tlb
[x
].valid
= false;
126 lookupTable
.erase(tlb
[x
].range
);
133 i = lookupTable.find(tr);
134 if (i != lookupTable.end()) {
135 i->second->valid = false;
136 if (i->second->used) {
137 i->second->used = false;
140 freeList.push_front(i->second);
141 DPRINTF(TLB, "TLB: Found conflicting entry %#X , deleting it\n",
143 lookupTable.erase(i);
148 assert(entry
< size
&& entry
>= 0);
149 new_entry
= &tlb
[entry
];
151 if (!freeList
.empty()) {
152 new_entry
= freeList
.front();
159 if (x
== lastReplaced
)
160 goto insertAllLocked
;
161 } while (tlb
[x
].pte
.locked());
166 for (x = 0; x < size; x++) {
167 if (!tlb[x].valid || !tlb[x].used) {
175 // Update the last ently if their all locked
177 new_entry
= &tlb
[size
-1];
180 freeList
.remove(new_entry
);
181 if (new_entry
->valid
&& new_entry
->used
)
183 if (new_entry
->valid
)
184 lookupTable
.erase(new_entry
->range
);
188 new_entry
->range
.va
= va
;
189 new_entry
->range
.size
= PTE
.size() - 1;
190 new_entry
->range
.partitionId
= partition_id
;
191 new_entry
->range
.contextId
= context_id
;
192 new_entry
->range
.real
= real
;
193 new_entry
->pte
= PTE
;
194 new_entry
->used
= true;;
195 new_entry
->valid
= true;
200 i
= lookupTable
.insert(new_entry
->range
, new_entry
);
201 assert(i
!= lookupTable
.end());
203 // If all entries have there used bit set, clear it on them all, but the
204 // one we just inserted
205 if (usedEntries
== size
) {
207 new_entry
->used
= true;
215 TLB::lookup(Addr va
, int partition_id
, bool real
, int context_id
, bool
222 DPRINTF(TLB
, "TLB: Looking up entry va=%#x pid=%d cid=%d r=%d\n",
223 va
, partition_id
, context_id
, real
);
224 // Assemble full address structure
227 tr
.contextId
= context_id
;
228 tr
.partitionId
= partition_id
;
231 // Try to find the entry
232 i
= lookupTable
.find(tr
);
233 if (i
== lookupTable
.end()) {
234 DPRINTF(TLB
, "TLB: No valid entry found\n");
238 // Mark the entries used bit and clear other used bits in needed
240 DPRINTF(TLB
, "TLB: Valid entry found pa: %#x size: %#x\n", t
->pte
.paddr(),
243 // Update the used bits only if this is a real access (not a fake one from
245 if (!t
->used
&& update_used
) {
248 if (usedEntries
== size
) {
262 for (int x
= 0; x
< size
; x
++) {
264 DPRINTFN("%4d: %#2x:%#2x %c %#4x %#8x %#8x %#16x\n",
265 x
, tlb
[x
].range
.partitionId
, tlb
[x
].range
.contextId
,
266 tlb
[x
].range
.real
? 'R' : ' ', tlb
[x
].range
.size
,
267 tlb
[x
].range
.va
, tlb
[x
].pte
.paddr(), tlb
[x
].pte());
273 TLB::demapPage(Addr va
, int partition_id
, bool real
, int context_id
)
278 DPRINTF(IPR
, "TLB: Demapping Page va=%#x pid=%#d cid=%d r=%d\n",
279 va
, partition_id
, context_id
, real
);
283 // Assemble full address structure
286 tr
.contextId
= context_id
;
287 tr
.partitionId
= partition_id
;
290 // Demap any entry that conflicts
291 i
= lookupTable
.find(tr
);
292 if (i
!= lookupTable
.end()) {
293 DPRINTF(IPR
, "TLB: Demapped page\n");
294 i
->second
->valid
= false;
295 if (i
->second
->used
) {
296 i
->second
->used
= false;
299 freeList
.push_front(i
->second
);
300 lookupTable
.erase(i
);
305 TLB::demapContext(int partition_id
, int context_id
)
308 DPRINTF(IPR
, "TLB: Demapping Context pid=%#d cid=%d\n",
309 partition_id
, context_id
);
311 for (x
= 0; x
< size
; x
++) {
312 if (tlb
[x
].range
.contextId
== context_id
&&
313 tlb
[x
].range
.partitionId
== partition_id
) {
314 if (tlb
[x
].valid
== true) {
315 freeList
.push_front(&tlb
[x
]);
317 tlb
[x
].valid
= false;
322 lookupTable
.erase(tlb
[x
].range
);
328 TLB::demapAll(int partition_id
)
331 DPRINTF(TLB
, "TLB: Demapping All pid=%#d\n", partition_id
);
333 for (x
= 0; x
< size
; x
++) {
334 if (tlb
[x
].valid
&& !tlb
[x
].pte
.locked() &&
335 tlb
[x
].range
.partitionId
== partition_id
) {
336 freeList
.push_front(&tlb
[x
]);
337 tlb
[x
].valid
= false;
342 lookupTable
.erase(tlb
[x
].range
);
354 for (x
= 0; x
< size
; x
++) {
355 if (tlb
[x
].valid
== true)
356 freeList
.push_back(&tlb
[x
]);
357 tlb
[x
].valid
= false;
364 TLB::TteRead(int entry
) {
366 panic("entry: %d\n", entry
);
368 assert(entry
< size
);
369 if (tlb
[entry
].valid
)
370 return tlb
[entry
].pte();
372 return (uint64_t)-1ll;
376 TLB::TagRead(int entry
) {
377 assert(entry
< size
);
379 if (!tlb
[entry
].valid
)
380 return (uint64_t)-1ll;
382 tag
= tlb
[entry
].range
.contextId
;
383 tag
|= tlb
[entry
].range
.va
;
384 tag
|= (uint64_t)tlb
[entry
].range
.partitionId
<< 61;
385 tag
|= tlb
[entry
].range
.real
? ULL(1) << 60 : 0;
386 tag
|= (uint64_t)~tlb
[entry
].pte
._size() << 56;
391 TLB::validVirtualAddress(Addr va
, bool am
)
395 if (va
>= StartVAddrHole
&& va
<= EndVAddrHole
)
401 TLB::writeSfsr(bool write
, ContextType ct
, bool se
, FaultTypes ft
, int asi
)
418 TLB::writeTagAccess(Addr va
, int context
)
420 DPRINTF(TLB
, "TLB: Writing Tag Access: va: %#X ctx: %#X value: %#X\n",
421 va
, context
, mbits(va
, 63,13) | mbits(context
,12,0));
423 tag_access
= mbits(va
, 63,13) | mbits(context
,12,0);
427 ITB::writeSfsr(bool write
, ContextType ct
, bool se
, FaultTypes ft
, int asi
)
429 DPRINTF(TLB
, "TLB: ITB Fault: w=%d ct=%d ft=%d asi=%d\n",
430 (int)write
, ct
, ft
, asi
);
431 TLB::writeSfsr(write
, ct
, se
, ft
, asi
);
435 DTB::writeSfsr(Addr a
, bool write
, ContextType ct
,
436 bool se
, FaultTypes ft
, int asi
)
438 DPRINTF(TLB
, "TLB: DTB Fault: A=%#x w=%d ct=%d ft=%d asi=%d\n",
439 a
, (int)write
, ct
, ft
, asi
);
440 TLB::writeSfsr(write
, ct
, se
, ft
, asi
);
445 ITB::translate(RequestPtr
&req
, ThreadContext
*tc
)
447 uint64_t tlbdata
= tc
->readMiscRegNoEffect(MISCREG_TLB_DATA
);
449 Addr vaddr
= req
->getVaddr();
452 assert(req
->getAsi() == ASI_IMPLICIT
);
454 DPRINTF(TLB
, "TLB: ITB Request to translate va=%#x size=%d\n",
455 vaddr
, req
->getSize());
457 // Be fast if we can!
458 if (cacheValid
&& cacheState
== tlbdata
) {
460 if (cacheEntry
->range
.va
< vaddr
+ sizeof(MachInst
) &&
461 cacheEntry
->range
.va
+ cacheEntry
->range
.size
>= vaddr
) {
462 req
->setPaddr(cacheEntry
->pte
.paddr() & ~(cacheEntry
->pte
.size()-1) |
463 vaddr
& cacheEntry
->pte
.size()-1 );
467 req
->setPaddr(vaddr
& PAddrImplMask
);
472 bool hpriv
= bits(tlbdata
,0,0);
473 bool red
= bits(tlbdata
,1,1);
474 bool priv
= bits(tlbdata
,2,2);
475 bool addr_mask
= bits(tlbdata
,3,3);
476 bool lsu_im
= bits(tlbdata
,4,4);
478 int part_id
= bits(tlbdata
,15,8);
479 int tl
= bits(tlbdata
,18,16);
480 int pri_context
= bits(tlbdata
,47,32);
486 DPRINTF(TLB
, "TLB: priv:%d hpriv:%d red:%d lsuim:%d part_id: %#X\n",
487 priv
, hpriv
, red
, lsu_im
, part_id
);
496 context
= pri_context
;
499 if ( hpriv
|| red
) {
501 cacheState
= tlbdata
;
503 req
->setPaddr(vaddr
& PAddrImplMask
);
507 // If the access is unaligned trap
509 writeSfsr(false, ct
, false, OtherFault
, asi
);
510 return new MemAddressNotAligned
;
514 vaddr
= vaddr
& VAddrAMask
;
516 if (!validVirtualAddress(vaddr
, addr_mask
)) {
517 writeSfsr(false, ct
, false, VaOutOfRange
, asi
);
518 return new InstructionAccessException
;
522 e
= lookup(vaddr
, part_id
, true);
526 e
= lookup(vaddr
, part_id
, false, context
);
529 if (e
== NULL
|| !e
->valid
) {
530 writeTagAccess(vaddr
, context
);
532 return new InstructionRealTranslationMiss
;
535 return new FastInstructionAccessMMUMiss
;
537 return new FastInstructionAccessMMUMiss(req
->getVaddr());
541 // were not priviledged accesing priv page
542 if (!priv
&& e
->pte
.priv()) {
543 writeTagAccess(vaddr
, context
);
544 writeSfsr(false, ct
, false, PrivViolation
, asi
);
545 return new InstructionAccessException
;
548 // cache translation date for next translation
550 cacheState
= tlbdata
;
553 req
->setPaddr(e
->pte
.paddr() & ~(e
->pte
.size()-1) |
554 vaddr
& e
->pte
.size()-1 );
555 DPRINTF(TLB
, "TLB: %#X -> %#X\n", vaddr
, req
->getPaddr());
562 DTB::translate(RequestPtr
&req
, ThreadContext
*tc
, bool write
)
564 /* @todo this could really use some profiling and fixing to make it faster! */
565 uint64_t tlbdata
= tc
->readMiscRegNoEffect(MISCREG_TLB_DATA
);
566 Addr vaddr
= req
->getVaddr();
567 Addr size
= req
->getSize();
569 asi
= (ASI
)req
->getAsi();
570 bool implicit
= false;
571 bool hpriv
= bits(tlbdata
,0,0);
572 bool unaligned
= (vaddr
& size
-1);
574 DPRINTF(TLB
, "TLB: DTB Request to translate va=%#x size=%d asi=%#x\n",
577 if (lookupTable
.size() != 64 - freeList
.size())
578 panic("Lookup table size: %d tlb size: %d\n", lookupTable
.size(),
580 if (asi
== ASI_IMPLICIT
)
583 // Only use the fast path here if there doesn't need to be an unaligned
586 if (hpriv
&& implicit
) {
587 req
->setPaddr(vaddr
& PAddrImplMask
);
591 // Be fast if we can!
592 if (cacheValid
&& cacheState
== tlbdata
) {
597 TlbEntry
*ce
= cacheEntry
[0];
598 Addr ce_va
= ce
->range
.va
;
599 if (cacheAsi
[0] == asi
&&
600 ce_va
< vaddr
+ size
&& ce_va
+ ce
->range
.size
> vaddr
&&
601 (!write
|| ce
->pte
.writable())) {
602 req
->setPaddr(ce
->pte
.paddrMask() | vaddr
& ce
->pte
.sizeMask());
603 if (ce
->pte
.sideffect() || (ce
->pte
.paddr() >> 39) & 1)
604 req
->setFlags(req
->getFlags() | UNCACHEABLE
);
605 DPRINTF(TLB
, "TLB: %#X -> %#X\n", vaddr
, req
->getPaddr());
608 } // if cache entry valid
610 TlbEntry
*ce
= cacheEntry
[1];
611 Addr ce_va
= ce
->range
.va
;
612 if (cacheAsi
[1] == asi
&&
613 ce_va
< vaddr
+ size
&& ce_va
+ ce
->range
.size
> vaddr
&&
614 (!write
|| ce
->pte
.writable())) {
615 req
->setPaddr(ce
->pte
.paddrMask() | vaddr
& ce
->pte
.sizeMask());
616 if (ce
->pte
.sideffect() || (ce
->pte
.paddr() >> 39) & 1)
617 req
->setFlags(req
->getFlags() | UNCACHEABLE
);
618 DPRINTF(TLB
, "TLB: %#X -> %#X\n", vaddr
, req
->getPaddr());
621 } // if cache entry valid
625 bool red
= bits(tlbdata
,1,1);
626 bool priv
= bits(tlbdata
,2,2);
627 bool addr_mask
= bits(tlbdata
,3,3);
628 bool lsu_dm
= bits(tlbdata
,5,5);
630 int part_id
= bits(tlbdata
,15,8);
631 int tl
= bits(tlbdata
,18,16);
632 int pri_context
= bits(tlbdata
,47,32);
633 int sec_context
= bits(tlbdata
,63,48);
636 ContextType ct
= Primary
;
641 DPRINTF(TLB
, "TLB: priv:%d hpriv:%d red:%d lsudm:%d part_id: %#X\n",
642 priv
, hpriv
, red
, lsu_dm
, part_id
);
652 context
= pri_context
;
655 // We need to check for priv level/asi priv
656 if (!priv
&& !hpriv
&& !AsiIsUnPriv(asi
)) {
657 // It appears that context should be Nucleus in these cases?
658 writeSfsr(vaddr
, write
, Nucleus
, false, IllegalAsi
, asi
);
659 return new PrivilegedAction
;
662 if (!hpriv
&& AsiIsHPriv(asi
)) {
663 writeSfsr(vaddr
, write
, Nucleus
, false, IllegalAsi
, asi
);
664 return new DataAccessException
;
667 if (AsiIsPrimary(asi
)) {
668 context
= pri_context
;
670 } else if (AsiIsSecondary(asi
)) {
671 context
= sec_context
;
673 } else if (AsiIsNucleus(asi
)) {
678 context
= pri_context
;
682 if (!implicit
&& asi
!= ASI_P
&& asi
!= ASI_S
) {
683 if (AsiIsLittle(asi
))
684 panic("Little Endian ASIs not supported\n");
686 //XXX It's unclear from looking at the documentation how a no fault
687 //load differs from a regular one, other than what happens concerning
688 //nfo and e bits in the TTE
689 // if (AsiIsNoFault(asi))
690 // panic("No Fault ASIs not supported\n");
692 if (AsiIsPartialStore(asi
))
693 panic("Partial Store ASIs not supported\n");
696 panic("Cmt ASI registers not implmented\n");
698 if (AsiIsInterrupt(asi
))
699 goto handleIntRegAccess
;
701 goto handleMmuRegAccess
;
702 if (AsiIsScratchPad(asi
))
703 goto handleScratchRegAccess
;
705 goto handleQueueRegAccess
;
706 if (AsiIsSparcError(asi
))
707 goto handleSparcErrorRegAccess
;
709 if (!AsiIsReal(asi
) && !AsiIsNucleus(asi
) && !AsiIsAsIfUser(asi
) &&
710 !AsiIsTwin(asi
) && !AsiIsBlock(asi
) && !AsiIsNoFault(asi
))
711 panic("Accessing ASI %#X. Should we?\n", asi
);
714 // If the asi is unaligned trap
716 writeSfsr(vaddr
, false, ct
, false, OtherFault
, asi
);
717 return new MemAddressNotAligned
;
721 vaddr
= vaddr
& VAddrAMask
;
723 if (!validVirtualAddress(vaddr
, addr_mask
)) {
724 writeSfsr(vaddr
, false, ct
, true, VaOutOfRange
, asi
);
725 return new DataAccessException
;
729 if ((!lsu_dm
&& !hpriv
&& !red
) || AsiIsReal(asi
)) {
734 if (hpriv
&& (implicit
|| (!AsiIsAsIfUser(asi
) && !AsiIsReal(asi
)))) {
735 req
->setPaddr(vaddr
& PAddrImplMask
);
739 e
= lookup(vaddr
, part_id
, real
, context
);
741 if (e
== NULL
|| !e
->valid
) {
742 writeTagAccess(vaddr
, context
);
743 DPRINTF(TLB
, "TLB: DTB Failed to find matching TLB entry\n");
745 return new DataRealTranslationMiss
;
748 return new FastDataAccessMMUMiss
;
750 return new FastDataAccessMMUMiss(req
->getVaddr());
755 if (!priv
&& e
->pte
.priv()) {
756 writeTagAccess(vaddr
, context
);
757 writeSfsr(vaddr
, write
, ct
, e
->pte
.sideffect(), PrivViolation
, asi
);
758 return new DataAccessException
;
761 if (write
&& !e
->pte
.writable()) {
762 writeTagAccess(vaddr
, context
);
763 writeSfsr(vaddr
, write
, ct
, e
->pte
.sideffect(), OtherFault
, asi
);
764 return new FastDataAccessProtection
;
767 if (e
->pte
.nofault() && !AsiIsNoFault(asi
)) {
768 writeTagAccess(vaddr
, context
);
769 writeSfsr(vaddr
, write
, ct
, e
->pte
.sideffect(), LoadFromNfo
, asi
);
770 return new DataAccessException
;
773 if (e
->pte
.sideffect() && AsiIsNoFault(asi
)) {
774 writeTagAccess(vaddr
, context
);
775 writeSfsr(vaddr
, write
, ct
, e
->pte
.sideffect(), SideEffect
, asi
);
776 return new DataAccessException
;
780 if (e
->pte
.sideffect() || (e
->pte
.paddr() >> 39) & 1)
781 req
->setFlags(req
->getFlags() | UNCACHEABLE
);
783 // cache translation date for next translation
784 cacheState
= tlbdata
;
786 cacheEntry
[1] = NULL
;
787 cacheEntry
[0] = NULL
;
790 if (cacheEntry
[0] != e
&& cacheEntry
[1] != e
) {
791 cacheEntry
[1] = cacheEntry
[0];
793 cacheAsi
[1] = cacheAsi
[0];
796 cacheAsi
[0] = (ASI
)0;
799 req
->setPaddr(e
->pte
.paddr() & ~(e
->pte
.size()-1) |
800 vaddr
& e
->pte
.size()-1);
801 DPRINTF(TLB
, "TLB: %#X -> %#X\n", vaddr
, req
->getPaddr());
804 /** Normal flow ends here. */
807 writeSfsr(vaddr
, write
, Primary
, true, IllegalAsi
, asi
);
809 return new DataAccessException
;
811 return new PrivilegedAction
;
814 if (asi
== ASI_SWVR_UDB_INTR_W
&& !write
||
815 asi
== ASI_SWVR_UDB_INTR_R
&& write
) {
816 writeSfsr(vaddr
, write
, Primary
, true, IllegalAsi
, asi
);
817 return new DataAccessException
;
823 handleScratchRegAccess
:
824 if (vaddr
> 0x38 || (vaddr
>= 0x20 && vaddr
< 0x30 && !hpriv
)) {
825 writeSfsr(vaddr
, write
, Primary
, true, IllegalAsi
, asi
);
826 return new DataAccessException
;
830 handleQueueRegAccess
:
831 if (!priv
&& !hpriv
) {
832 writeSfsr(vaddr
, write
, Primary
, true, IllegalAsi
, asi
);
833 return new PrivilegedAction
;
835 if (!hpriv
&& vaddr
& 0xF || vaddr
> 0x3f8 || vaddr
< 0x3c0) {
836 writeSfsr(vaddr
, write
, Primary
, true, IllegalAsi
, asi
);
837 return new DataAccessException
;
841 handleSparcErrorRegAccess
:
843 writeSfsr(vaddr
, write
, Primary
, true, IllegalAsi
, asi
);
845 return new DataAccessException
;
847 return new PrivilegedAction
;
854 DPRINTF(TLB
, "TLB: DTB Translating MM IPR access\n");
855 req
->setMmapedIpr(true);
856 req
->setPaddr(req
->getVaddr());
863 DTB::doMmuRegRead(ThreadContext
*tc
, Packet
*pkt
)
865 Addr va
= pkt
->getAddr();
866 ASI asi
= (ASI
)pkt
->req
->getAsi();
869 DPRINTF(IPR
, "Memory Mapped IPR Read: asi=%#X a=%#x\n",
870 (uint32_t)pkt
->req
->getAsi(), pkt
->getAddr());
872 ITB
* itb
= tc
->getITBPtr();
875 case ASI_LSU_CONTROL_REG
:
877 pkt
->set(tc
->readMiscReg(MISCREG_MMU_LSU_CTRL
));
882 pkt
->set(tc
->readMiscReg(MISCREG_MMU_P_CONTEXT
));
885 pkt
->set(tc
->readMiscReg(MISCREG_MMU_S_CONTEXT
));
892 pkt
->set(tc
->readMiscReg(MISCREG_QUEUE_CPU_MONDO_HEAD
+
895 case ASI_DMMU_CTXT_ZERO_TSB_BASE_PS0
:
897 pkt
->set(c0_tsb_ps0
);
899 case ASI_DMMU_CTXT_ZERO_TSB_BASE_PS1
:
901 pkt
->set(c0_tsb_ps1
);
903 case ASI_DMMU_CTXT_ZERO_CONFIG
:
907 case ASI_IMMU_CTXT_ZERO_TSB_BASE_PS0
:
909 pkt
->set(itb
->c0_tsb_ps0
);
911 case ASI_IMMU_CTXT_ZERO_TSB_BASE_PS1
:
913 pkt
->set(itb
->c0_tsb_ps1
);
915 case ASI_IMMU_CTXT_ZERO_CONFIG
:
917 pkt
->set(itb
->c0_config
);
919 case ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS0
:
921 pkt
->set(cx_tsb_ps0
);
923 case ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS1
:
925 pkt
->set(cx_tsb_ps1
);
927 case ASI_DMMU_CTXT_NONZERO_CONFIG
:
931 case ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS0
:
933 pkt
->set(itb
->cx_tsb_ps0
);
935 case ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS1
:
937 pkt
->set(itb
->cx_tsb_ps1
);
939 case ASI_IMMU_CTXT_NONZERO_CONFIG
:
941 pkt
->set(itb
->cx_config
);
943 case ASI_SPARC_ERROR_STATUS_REG
:
944 pkt
->set((uint64_t)0);
946 case ASI_HYP_SCRATCHPAD
:
948 pkt
->set(tc
->readMiscReg(MISCREG_SCRATCHPAD_R0
+ (va
>> 3)));
953 temp
= itb
->tag_access
;
954 pkt
->set(bits(temp
,63,22) | bits(temp
,12,0) << 48);
960 pkt
->set(itb
->tag_access
);
970 pkt
->set(bits(temp
,63,22) | bits(temp
,12,0) << 48);
979 pkt
->set(tag_access
);
982 pkt
->set(tc
->readMiscReg(MISCREG_MMU_PART_ID
));
988 case ASI_DMMU_TSB_PS0_PTR_REG
:
989 pkt
->set(MakeTsbPtr(Ps0
,
996 case ASI_DMMU_TSB_PS1_PTR_REG
:
997 pkt
->set(MakeTsbPtr(Ps1
,
1004 case ASI_IMMU_TSB_PS0_PTR_REG
:
1005 pkt
->set(MakeTsbPtr(Ps0
,
1012 case ASI_IMMU_TSB_PS1_PTR_REG
:
1013 pkt
->set(MakeTsbPtr(Ps1
,
1020 case ASI_SWVR_INTR_RECEIVE
:
1021 pkt
->set(tc
->getCpuPtr()->get_interrupts(IT_INT_VEC
));
1023 case ASI_SWVR_UDB_INTR_R
:
1024 temp
= findMsbSet(tc
->getCpuPtr()->get_interrupts(IT_INT_VEC
));
1025 tc
->getCpuPtr()->clear_interrupt(IT_INT_VEC
, temp
);
1030 panic("need to impl DTB::doMmuRegRead() got asi=%#x, va=%#x\n",
1033 pkt
->makeAtomicResponse();
1034 return tc
->getCpuPtr()->ticks(1);
1038 DTB::doMmuRegWrite(ThreadContext
*tc
, Packet
*pkt
)
1040 uint64_t data
= gtoh(pkt
->get
<uint64_t>());
1041 Addr va
= pkt
->getAddr();
1042 ASI asi
= (ASI
)pkt
->req
->getAsi();
1048 int entry_insert
= -1;
1055 DPRINTF(IPR
, "Memory Mapped IPR Write: asi=%#X a=%#x d=%#X\n",
1056 (uint32_t)asi
, va
, data
);
1058 ITB
* itb
= tc
->getITBPtr();
1061 case ASI_LSU_CONTROL_REG
:
1063 tc
->setMiscReg(MISCREG_MMU_LSU_CTRL
, data
);
1068 tc
->setMiscReg(MISCREG_MMU_P_CONTEXT
, data
);
1071 tc
->setMiscReg(MISCREG_MMU_S_CONTEXT
, data
);
1074 goto doMmuWriteError
;
1078 assert(mbits(data
,13,6) == data
);
1079 tc
->setMiscReg(MISCREG_QUEUE_CPU_MONDO_HEAD
+
1080 (va
>> 4) - 0x3c, data
);
1082 case ASI_DMMU_CTXT_ZERO_TSB_BASE_PS0
:
1086 case ASI_DMMU_CTXT_ZERO_TSB_BASE_PS1
:
1090 case ASI_DMMU_CTXT_ZERO_CONFIG
:
1094 case ASI_IMMU_CTXT_ZERO_TSB_BASE_PS0
:
1096 itb
->c0_tsb_ps0
= data
;
1098 case ASI_IMMU_CTXT_ZERO_TSB_BASE_PS1
:
1100 itb
->c0_tsb_ps1
= data
;
1102 case ASI_IMMU_CTXT_ZERO_CONFIG
:
1104 itb
->c0_config
= data
;
1106 case ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS0
:
1110 case ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS1
:
1114 case ASI_DMMU_CTXT_NONZERO_CONFIG
:
1118 case ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS0
:
1120 itb
->cx_tsb_ps0
= data
;
1122 case ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS1
:
1124 itb
->cx_tsb_ps1
= data
;
1126 case ASI_IMMU_CTXT_NONZERO_CONFIG
:
1128 itb
->cx_config
= data
;
1130 case ASI_SPARC_ERROR_EN_REG
:
1131 case ASI_SPARC_ERROR_STATUS_REG
:
1132 warn("Ignoring write to SPARC ERROR regsiter\n");
1134 case ASI_HYP_SCRATCHPAD
:
1135 case ASI_SCRATCHPAD
:
1136 tc
->setMiscReg(MISCREG_SCRATCHPAD_R0
+ (va
>> 3), data
);
1144 sext
<59>(bits(data
, 59,0));
1145 itb
->tag_access
= data
;
1148 goto doMmuWriteError
;
1151 case ASI_ITLB_DATA_ACCESS_REG
:
1152 entry_insert
= bits(va
, 8,3);
1153 case ASI_ITLB_DATA_IN_REG
:
1154 assert(entry_insert
!= -1 || mbits(va
,10,9) == va
);
1155 ta_insert
= itb
->tag_access
;
1156 va_insert
= mbits(ta_insert
, 63,13);
1157 ct_insert
= mbits(ta_insert
, 12,0);
1158 part_insert
= tc
->readMiscReg(MISCREG_MMU_PART_ID
);
1159 real_insert
= bits(va
, 9,9);
1160 pte
.populate(data
, bits(va
,10,10) ? PageTableEntry::sun4v
:
1161 PageTableEntry::sun4u
);
1162 tc
->getITBPtr()->insert(va_insert
, part_insert
, ct_insert
, real_insert
,
1165 case ASI_DTLB_DATA_ACCESS_REG
:
1166 entry_insert
= bits(va
, 8,3);
1167 case ASI_DTLB_DATA_IN_REG
:
1168 assert(entry_insert
!= -1 || mbits(va
,10,9) == va
);
1169 ta_insert
= tag_access
;
1170 va_insert
= mbits(ta_insert
, 63,13);
1171 ct_insert
= mbits(ta_insert
, 12,0);
1172 part_insert
= tc
->readMiscReg(MISCREG_MMU_PART_ID
);
1173 real_insert
= bits(va
, 9,9);
1174 pte
.populate(data
, bits(va
,10,10) ? PageTableEntry::sun4v
:
1175 PageTableEntry::sun4u
);
1176 insert(va_insert
, part_insert
, ct_insert
, real_insert
, pte
, entry_insert
);
1178 case ASI_IMMU_DEMAP
:
1181 part_id
= tc
->readMiscReg(MISCREG_MMU_PART_ID
);
1182 switch (bits(va
,5,4)) {
1184 ctx_id
= tc
->readMiscReg(MISCREG_MMU_P_CONTEXT
);
1196 switch(bits(va
,7,6)) {
1197 case 0: // demap page
1199 tc
->getITBPtr()->demapPage(mbits(va
,63,13), part_id
,
1200 bits(va
,9,9), ctx_id
);
1202 case 1: //demap context
1204 tc
->getITBPtr()->demapContext(part_id
, ctx_id
);
1207 tc
->getITBPtr()->demapAll(part_id
);
1210 panic("Invalid type for IMMU demap\n");
1219 sext
<59>(bits(data
, 59,0));
1223 tc
->setMiscReg(MISCREG_MMU_PART_ID
, data
);
1226 goto doMmuWriteError
;
1229 case ASI_DMMU_DEMAP
:
1232 part_id
= tc
->readMiscReg(MISCREG_MMU_PART_ID
);
1233 switch (bits(va
,5,4)) {
1235 ctx_id
= tc
->readMiscReg(MISCREG_MMU_P_CONTEXT
);
1238 ctx_id
= tc
->readMiscReg(MISCREG_MMU_S_CONTEXT
);
1247 switch(bits(va
,7,6)) {
1248 case 0: // demap page
1250 demapPage(mbits(va
,63,13), part_id
, bits(va
,9,9), ctx_id
);
1252 case 1: //demap context
1254 demapContext(part_id
, ctx_id
);
1260 panic("Invalid type for IMMU demap\n");
1263 case ASI_SWVR_INTR_RECEIVE
:
1265 // clear all the interrupts that aren't set in the write
1266 while(tc
->getCpuPtr()->get_interrupts(IT_INT_VEC
) & data
) {
1267 msb
= findMsbSet(tc
->getCpuPtr()->get_interrupts(IT_INT_VEC
) & data
);
1268 tc
->getCpuPtr()->clear_interrupt(IT_INT_VEC
, msb
);
1271 case ASI_SWVR_UDB_INTR_W
:
1272 tc
->getSystemPtr()->threadContexts
[bits(data
,12,8)]->getCpuPtr()->
1273 post_interrupt(bits(data
,5,0),0);
1277 panic("need to impl DTB::doMmuRegWrite() got asi=%#x, va=%#x d=%#x\n",
1278 (uint32_t)pkt
->req
->getAsi(), pkt
->getAddr(), data
);
1280 pkt
->makeAtomicResponse();
1281 return tc
->getCpuPtr()->ticks(1);
1287 DTB::GetTsbPtr(ThreadContext
*tc
, Addr addr
, int ctx
, Addr
*ptrs
)
1289 uint64_t tag_access
= mbits(addr
,63,13) | mbits(ctx
,12,0);
1290 ITB
* itb
= tc
->getITBPtr();
1291 ptrs
[0] = MakeTsbPtr(Ps0
, tag_access
,
1296 ptrs
[1] = MakeTsbPtr(Ps1
, tag_access
,
1301 ptrs
[2] = MakeTsbPtr(Ps0
, tag_access
,
1306 ptrs
[3] = MakeTsbPtr(Ps1
, tag_access
,
1318 DTB::MakeTsbPtr(TsbPageSize ps
, uint64_t tag_access
, uint64_t c0_tsb
,
1319 uint64_t c0_config
, uint64_t cX_tsb
, uint64_t cX_config
)
1324 if (bits(tag_access
, 12,0) == 0) {
1332 uint64_t ptr
= mbits(tsb
,63,13);
1333 bool split
= bits(tsb
,12,12);
1334 int tsb_size
= bits(tsb
,3,0);
1335 int page_size
= (ps
== Ps0
) ? bits(config
, 2,0) : bits(config
,10,8);
1337 if (ps
== Ps1
&& split
)
1338 ptr
|= ULL(1) << (13 + tsb_size
);
1339 ptr
|= (tag_access
>> (9 + page_size
* 3)) & mask(12+tsb_size
, 4);
1346 TLB::serialize(std::ostream
&os
)
1348 SERIALIZE_SCALAR(size
);
1349 SERIALIZE_SCALAR(usedEntries
);
1350 SERIALIZE_SCALAR(lastReplaced
);
1352 // convert the pointer based free list into an index based one
1353 int *free_list
= (int*)malloc(sizeof(int) * size
);
1355 std::list
<TlbEntry
*>::iterator i
;
1356 i
= freeList
.begin();
1357 while (i
!= freeList
.end()) {
1358 free_list
[cntr
++] = ((size_t)*i
- (size_t)tlb
)/ sizeof(TlbEntry
);
1361 SERIALIZE_SCALAR(cntr
);
1362 SERIALIZE_ARRAY(free_list
, cntr
);
1364 SERIALIZE_SCALAR(c0_tsb_ps0
);
1365 SERIALIZE_SCALAR(c0_tsb_ps1
);
1366 SERIALIZE_SCALAR(c0_config
);
1367 SERIALIZE_SCALAR(cx_tsb_ps0
);
1368 SERIALIZE_SCALAR(cx_tsb_ps1
);
1369 SERIALIZE_SCALAR(cx_config
);
1370 SERIALIZE_SCALAR(sfsr
);
1371 SERIALIZE_SCALAR(tag_access
);
1373 for (int x
= 0; x
< size
; x
++) {
1374 nameOut(os
, csprintf("%s.PTE%d", name(), x
));
1375 tlb
[x
].serialize(os
);
1380 TLB::unserialize(Checkpoint
*cp
, const std::string
§ion
)
1384 paramIn(cp
, section
, "size", oldSize
);
1385 if (oldSize
!= size
)
1386 panic("Don't support unserializing different sized TLBs\n");
1387 UNSERIALIZE_SCALAR(usedEntries
);
1388 UNSERIALIZE_SCALAR(lastReplaced
);
1391 UNSERIALIZE_SCALAR(cntr
);
1393 int *free_list
= (int*)malloc(sizeof(int) * cntr
);
1395 UNSERIALIZE_ARRAY(free_list
, cntr
);
1396 for (int x
= 0; x
< cntr
; x
++)
1397 freeList
.push_back(&tlb
[free_list
[x
]]);
1399 UNSERIALIZE_SCALAR(c0_tsb_ps0
);
1400 UNSERIALIZE_SCALAR(c0_tsb_ps1
);
1401 UNSERIALIZE_SCALAR(c0_config
);
1402 UNSERIALIZE_SCALAR(cx_tsb_ps0
);
1403 UNSERIALIZE_SCALAR(cx_tsb_ps1
);
1404 UNSERIALIZE_SCALAR(cx_config
);
1405 UNSERIALIZE_SCALAR(sfsr
);
1406 UNSERIALIZE_SCALAR(tag_access
);
1408 lookupTable
.clear();
1409 for (int x
= 0; x
< size
; x
++) {
1410 tlb
[x
].unserialize(cp
, csprintf("%s.PTE%d", section
, x
));
1412 lookupTable
.insert(tlb
[x
].range
, &tlb
[x
]);
1418 DTB::serialize(std::ostream
&os
)
1421 SERIALIZE_SCALAR(sfar
);
1425 DTB::unserialize(Checkpoint
*cp
, const std::string
§ion
)
1427 TLB::unserialize(cp
, section
);
1428 UNSERIALIZE_SCALAR(sfar
);
1431 /* end namespace SparcISA */ }
1434 SparcITBParams::create()
1436 return new SparcISA::ITB(this);
1440 SparcDTBParams::create()
1442 return new SparcISA::DTB(this);