2 * Copyright (c) 2001-2005 The Regents of The University of Michigan
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include "arch/sparc/asi.hh"
34 #include "arch/sparc/miscregfile.hh"
35 #include "arch/sparc/tlb.hh"
36 #include "base/bitfield.hh"
37 #include "base/trace.hh"
38 #include "cpu/thread_context.hh"
39 #include "cpu/base.hh"
40 #include "mem/packet_access.hh"
41 #include "mem/request.hh"
42 #include "sim/system.hh"
44 /* @todo remove some of the magic constants. -- ali
48 TLB::TLB(const Params
*p
)
49 : BaseTLB(p
), size(p
->size
), usedEntries(0), lastReplaced(0),
52 // To make this work you'll have to change the hypervisor and OS
54 fatal("SPARC T1 TLB registers don't support more than 64 TLB entries");
56 tlb
= new TlbEntry
[size
];
57 std::memset(tlb
, 0, sizeof(TlbEntry
) * size
);
59 for (int x
= 0; x
< size
; x
++)
60 freeList
.push_back(&tlb
[x
]);
76 for (i
= lookupTable
.begin(); i
!= lookupTable
.end(); i
++) {
77 TlbEntry
*t
= i
->second
;
78 if (!t
->pte
.locked()) {
87 TLB::insert(Addr va
, int partition_id
, int context_id
, bool real
,
88 const PageTableEntry
& PTE
, int entry
)
91 TlbEntry
*new_entry
= NULL
;
96 va
&= ~(PTE
.size()-1);
98 tr.size = PTE.size() - 1;
99 tr.contextId = context_id;
100 tr.partitionId = partition_id;
105 "TLB: Inserting Entry; va=%#x pa=%#x pid=%d cid=%d r=%d entryid=%d\n",
106 va
, PTE
.paddr(), partition_id
, context_id
, (int)real
, entry
);
108 // Demap any entry that conflicts
109 for (x
= 0; x
< size
; x
++) {
110 if (tlb
[x
].range
.real
== real
&&
111 tlb
[x
].range
.partitionId
== partition_id
&&
112 tlb
[x
].range
.va
< va
+ PTE
.size() - 1 &&
113 tlb
[x
].range
.va
+ tlb
[x
].range
.size
>= va
&&
114 (real
|| tlb
[x
].range
.contextId
== context_id
))
117 freeList
.push_front(&tlb
[x
]);
118 DPRINTF(TLB
, "TLB: Conflicting entry %#X , deleting it\n", x
);
120 tlb
[x
].valid
= false;
125 lookupTable
.erase(tlb
[x
].range
);
131 i = lookupTable.find(tr);
132 if (i != lookupTable.end()) {
133 i->second->valid = false;
134 if (i->second->used) {
135 i->second->used = false;
138 freeList.push_front(i->second);
139 DPRINTF(TLB, "TLB: Found conflicting entry %#X , deleting it\n",
141 lookupTable.erase(i);
146 assert(entry
< size
&& entry
>= 0);
147 new_entry
= &tlb
[entry
];
149 if (!freeList
.empty()) {
150 new_entry
= freeList
.front();
157 if (x
== lastReplaced
)
158 goto insertAllLocked
;
159 } while (tlb
[x
].pte
.locked());
164 for (x = 0; x < size; x++) {
165 if (!tlb[x].valid || !tlb[x].used) {
173 // Update the last ently if their all locked
175 new_entry
= &tlb
[size
-1];
178 freeList
.remove(new_entry
);
179 if (new_entry
->valid
&& new_entry
->used
)
181 if (new_entry
->valid
)
182 lookupTable
.erase(new_entry
->range
);
186 new_entry
->range
.va
= va
;
187 new_entry
->range
.size
= PTE
.size() - 1;
188 new_entry
->range
.partitionId
= partition_id
;
189 new_entry
->range
.contextId
= context_id
;
190 new_entry
->range
.real
= real
;
191 new_entry
->pte
= PTE
;
192 new_entry
->used
= true;;
193 new_entry
->valid
= true;
196 i
= lookupTable
.insert(new_entry
->range
, new_entry
);
197 assert(i
!= lookupTable
.end());
199 // If all entries have their used bit set, clear it on them all,
200 // but the one we just inserted
201 if (usedEntries
== size
) {
203 new_entry
->used
= true;
210 TLB::lookup(Addr va
, int partition_id
, bool real
, int context_id
,
217 DPRINTF(TLB
, "TLB: Looking up entry va=%#x pid=%d cid=%d r=%d\n",
218 va
, partition_id
, context_id
, real
);
219 // Assemble full address structure
222 tr
.contextId
= context_id
;
223 tr
.partitionId
= partition_id
;
226 // Try to find the entry
227 i
= lookupTable
.find(tr
);
228 if (i
== lookupTable
.end()) {
229 DPRINTF(TLB
, "TLB: No valid entry found\n");
233 // Mark the entries used bit and clear other used bits in needed
235 DPRINTF(TLB
, "TLB: Valid entry found pa: %#x size: %#x\n", t
->pte
.paddr(),
238 // Update the used bits only if this is a real access (not a fake
239 // one from virttophys()
240 if (!t
->used
&& update_used
) {
243 if (usedEntries
== size
) {
257 for (int x
= 0; x
< size
; x
++) {
259 DPRINTFN("%4d: %#2x:%#2x %c %#4x %#8x %#8x %#16x\n",
260 x
, tlb
[x
].range
.partitionId
, tlb
[x
].range
.contextId
,
261 tlb
[x
].range
.real
? 'R' : ' ', tlb
[x
].range
.size
,
262 tlb
[x
].range
.va
, tlb
[x
].pte
.paddr(), tlb
[x
].pte());
268 TLB::demapPage(Addr va
, int partition_id
, bool real
, int context_id
)
273 DPRINTF(IPR
, "TLB: Demapping Page va=%#x pid=%#d cid=%d r=%d\n",
274 va
, partition_id
, context_id
, real
);
278 // Assemble full address structure
281 tr
.contextId
= context_id
;
282 tr
.partitionId
= partition_id
;
285 // Demap any entry that conflicts
286 i
= lookupTable
.find(tr
);
287 if (i
!= lookupTable
.end()) {
288 DPRINTF(IPR
, "TLB: Demapped page\n");
289 i
->second
->valid
= false;
290 if (i
->second
->used
) {
291 i
->second
->used
= false;
294 freeList
.push_front(i
->second
);
295 lookupTable
.erase(i
);
300 TLB::demapContext(int partition_id
, int context_id
)
302 DPRINTF(IPR
, "TLB: Demapping Context pid=%#d cid=%d\n",
303 partition_id
, context_id
);
305 for (int x
= 0; x
< size
; x
++) {
306 if (tlb
[x
].range
.contextId
== context_id
&&
307 tlb
[x
].range
.partitionId
== partition_id
) {
308 if (tlb
[x
].valid
== true) {
309 freeList
.push_front(&tlb
[x
]);
311 tlb
[x
].valid
= false;
316 lookupTable
.erase(tlb
[x
].range
);
322 TLB::demapAll(int partition_id
)
324 DPRINTF(TLB
, "TLB: Demapping All pid=%#d\n", partition_id
);
326 for (int x
= 0; x
< size
; x
++) {
327 if (tlb
[x
].valid
&& !tlb
[x
].pte
.locked() &&
328 tlb
[x
].range
.partitionId
== partition_id
) {
329 freeList
.push_front(&tlb
[x
]);
330 tlb
[x
].valid
= false;
335 lookupTable
.erase(tlb
[x
].range
);
346 for (int x
= 0; x
< size
; x
++) {
347 if (tlb
[x
].valid
== true)
348 freeList
.push_back(&tlb
[x
]);
349 tlb
[x
].valid
= false;
356 TLB::TteRead(int entry
)
359 panic("entry: %d\n", entry
);
361 assert(entry
< size
);
362 if (tlb
[entry
].valid
)
363 return tlb
[entry
].pte();
365 return (uint64_t)-1ll;
369 TLB::TagRead(int entry
)
371 assert(entry
< size
);
373 if (!tlb
[entry
].valid
)
374 return (uint64_t)-1ll;
376 tag
= tlb
[entry
].range
.contextId
;
377 tag
|= tlb
[entry
].range
.va
;
378 tag
|= (uint64_t)tlb
[entry
].range
.partitionId
<< 61;
379 tag
|= tlb
[entry
].range
.real
? ULL(1) << 60 : 0;
380 tag
|= (uint64_t)~tlb
[entry
].pte
._size() << 56;
385 TLB::validVirtualAddress(Addr va
, bool am
)
389 if (va
>= StartVAddrHole
&& va
<= EndVAddrHole
)
395 TLB::writeSfsr(bool write
, ContextType ct
, bool se
, FaultTypes ft
, int asi
)
412 TLB::writeTagAccess(Addr va
, int context
)
414 DPRINTF(TLB
, "TLB: Writing Tag Access: va: %#X ctx: %#X value: %#X\n",
415 va
, context
, mbits(va
, 63,13) | mbits(context
,12,0));
417 tag_access
= mbits(va
, 63,13) | mbits(context
,12,0);
421 ITB::writeSfsr(bool write
, ContextType ct
, bool se
, FaultTypes ft
, int asi
)
423 DPRINTF(TLB
, "TLB: ITB Fault: w=%d ct=%d ft=%d asi=%d\n",
424 (int)write
, ct
, ft
, asi
);
425 TLB::writeSfsr(write
, ct
, se
, ft
, asi
);
429 DTB::writeSfsr(Addr a
, bool write
, ContextType ct
,
430 bool se
, FaultTypes ft
, int asi
)
432 DPRINTF(TLB
, "TLB: DTB Fault: A=%#x w=%d ct=%d ft=%d asi=%d\n",
433 a
, (int)write
, ct
, ft
, asi
);
434 TLB::writeSfsr(write
, ct
, se
, ft
, asi
);
439 ITB::translateAtomic(RequestPtr req
, ThreadContext
*tc
)
441 uint64_t tlbdata
= tc
->readMiscRegNoEffect(MISCREG_TLB_DATA
);
443 Addr vaddr
= req
->getVaddr();
446 assert(req
->getAsi() == ASI_IMPLICIT
);
448 DPRINTF(TLB
, "TLB: ITB Request to translate va=%#x size=%d\n",
449 vaddr
, req
->getSize());
451 // Be fast if we can!
452 if (cacheValid
&& cacheState
== tlbdata
) {
454 if (cacheEntry
->range
.va
< vaddr
+ sizeof(MachInst
) &&
455 cacheEntry
->range
.va
+ cacheEntry
->range
.size
>= vaddr
) {
456 req
->setPaddr(cacheEntry
->pte
.translate(vaddr
));
460 req
->setPaddr(vaddr
& PAddrImplMask
);
465 bool hpriv
= bits(tlbdata
,0,0);
466 bool red
= bits(tlbdata
,1,1);
467 bool priv
= bits(tlbdata
,2,2);
468 bool addr_mask
= bits(tlbdata
,3,3);
469 bool lsu_im
= bits(tlbdata
,4,4);
471 int part_id
= bits(tlbdata
,15,8);
472 int tl
= bits(tlbdata
,18,16);
473 int pri_context
= bits(tlbdata
,47,32);
479 DPRINTF(TLB
, "TLB: priv:%d hpriv:%d red:%d lsuim:%d part_id: %#X\n",
480 priv
, hpriv
, red
, lsu_im
, part_id
);
489 context
= pri_context
;
492 if ( hpriv
|| red
) {
494 cacheState
= tlbdata
;
496 req
->setPaddr(vaddr
& PAddrImplMask
);
500 // If the access is unaligned trap
502 writeSfsr(false, ct
, false, OtherFault
, asi
);
503 return new MemAddressNotAligned
;
507 vaddr
= vaddr
& VAddrAMask
;
509 if (!validVirtualAddress(vaddr
, addr_mask
)) {
510 writeSfsr(false, ct
, false, VaOutOfRange
, asi
);
511 return new InstructionAccessException
;
515 e
= lookup(vaddr
, part_id
, true);
519 e
= lookup(vaddr
, part_id
, false, context
);
522 if (e
== NULL
|| !e
->valid
) {
523 writeTagAccess(vaddr
, context
);
525 return new InstructionRealTranslationMiss
;
528 return new FastInstructionAccessMMUMiss
;
530 return new FastInstructionAccessMMUMiss(req
->getVaddr());
534 // were not priviledged accesing priv page
535 if (!priv
&& e
->pte
.priv()) {
536 writeTagAccess(vaddr
, context
);
537 writeSfsr(false, ct
, false, PrivViolation
, asi
);
538 return new InstructionAccessException
;
541 // cache translation date for next translation
543 cacheState
= tlbdata
;
546 req
->setPaddr(e
->pte
.translate(vaddr
));
547 DPRINTF(TLB
, "TLB: %#X -> %#X\n", vaddr
, req
->getPaddr());
552 ITB::translateTiming(RequestPtr req
, ThreadContext
*tc
,
553 Translation
*translation
)
556 translation
->finish(translateAtomic(req
, tc
), req
, tc
, false);
560 DTB::translateAtomic(RequestPtr req
, ThreadContext
*tc
, bool write
)
563 * @todo this could really use some profiling and fixing to make
566 uint64_t tlbdata
= tc
->readMiscRegNoEffect(MISCREG_TLB_DATA
);
567 Addr vaddr
= req
->getVaddr();
568 Addr size
= req
->getSize();
570 asi
= (ASI
)req
->getAsi();
571 bool implicit
= false;
572 bool hpriv
= bits(tlbdata
,0,0);
573 bool unaligned
= vaddr
& (size
- 1);
575 DPRINTF(TLB
, "TLB: DTB Request to translate va=%#x size=%d asi=%#x\n",
578 if (lookupTable
.size() != 64 - freeList
.size())
579 panic("Lookup table size: %d tlb size: %d\n", lookupTable
.size(),
581 if (asi
== ASI_IMPLICIT
)
584 // Only use the fast path here if there doesn't need to be an unaligned
587 if (hpriv
&& implicit
) {
588 req
->setPaddr(vaddr
& PAddrImplMask
);
592 // Be fast if we can!
593 if (cacheValid
&& cacheState
== tlbdata
) {
598 TlbEntry
*ce
= cacheEntry
[0];
599 Addr ce_va
= ce
->range
.va
;
600 if (cacheAsi
[0] == asi
&&
601 ce_va
< vaddr
+ size
&& ce_va
+ ce
->range
.size
> vaddr
&&
602 (!write
|| ce
->pte
.writable())) {
603 req
->setPaddr(ce
->pte
.translate(vaddr
));
604 if (ce
->pte
.sideffect() || (ce
->pte
.paddr() >> 39) & 1)
605 req
->setFlags(Request::UNCACHEABLE
);
606 DPRINTF(TLB
, "TLB: %#X -> %#X\n", vaddr
, req
->getPaddr());
609 } // if cache entry valid
611 TlbEntry
*ce
= cacheEntry
[1];
612 Addr ce_va
= ce
->range
.va
;
613 if (cacheAsi
[1] == asi
&&
614 ce_va
< vaddr
+ size
&& ce_va
+ ce
->range
.size
> vaddr
&&
615 (!write
|| ce
->pte
.writable())) {
616 req
->setPaddr(ce
->pte
.translate(vaddr
));
617 if (ce
->pte
.sideffect() || (ce
->pte
.paddr() >> 39) & 1)
618 req
->setFlags(Request::UNCACHEABLE
);
619 DPRINTF(TLB
, "TLB: %#X -> %#X\n", vaddr
, req
->getPaddr());
622 } // if cache entry valid
626 bool red
= bits(tlbdata
,1,1);
627 bool priv
= bits(tlbdata
,2,2);
628 bool addr_mask
= bits(tlbdata
,3,3);
629 bool lsu_dm
= bits(tlbdata
,5,5);
631 int part_id
= bits(tlbdata
,15,8);
632 int tl
= bits(tlbdata
,18,16);
633 int pri_context
= bits(tlbdata
,47,32);
634 int sec_context
= bits(tlbdata
,63,48);
637 ContextType ct
= Primary
;
642 DPRINTF(TLB
, "TLB: priv:%d hpriv:%d red:%d lsudm:%d part_id: %#X\n",
643 priv
, hpriv
, red
, lsu_dm
, part_id
);
653 context
= pri_context
;
656 // We need to check for priv level/asi priv
657 if (!priv
&& !hpriv
&& !AsiIsUnPriv(asi
)) {
658 // It appears that context should be Nucleus in these cases?
659 writeSfsr(vaddr
, write
, Nucleus
, false, IllegalAsi
, asi
);
660 return new PrivilegedAction
;
663 if (!hpriv
&& AsiIsHPriv(asi
)) {
664 writeSfsr(vaddr
, write
, Nucleus
, false, IllegalAsi
, asi
);
665 return new DataAccessException
;
668 if (AsiIsPrimary(asi
)) {
669 context
= pri_context
;
671 } else if (AsiIsSecondary(asi
)) {
672 context
= sec_context
;
674 } else if (AsiIsNucleus(asi
)) {
679 context
= pri_context
;
683 if (!implicit
&& asi
!= ASI_P
&& asi
!= ASI_S
) {
684 if (AsiIsLittle(asi
))
685 panic("Little Endian ASIs not supported\n");
687 //XXX It's unclear from looking at the documentation how a no fault
688 //load differs from a regular one, other than what happens concerning
689 //nfo and e bits in the TTE
690 // if (AsiIsNoFault(asi))
691 // panic("No Fault ASIs not supported\n");
693 if (AsiIsPartialStore(asi
))
694 panic("Partial Store ASIs not supported\n");
697 panic("Cmt ASI registers not implmented\n");
699 if (AsiIsInterrupt(asi
))
700 goto handleIntRegAccess
;
702 goto handleMmuRegAccess
;
703 if (AsiIsScratchPad(asi
))
704 goto handleScratchRegAccess
;
706 goto handleQueueRegAccess
;
707 if (AsiIsSparcError(asi
))
708 goto handleSparcErrorRegAccess
;
710 if (!AsiIsReal(asi
) && !AsiIsNucleus(asi
) && !AsiIsAsIfUser(asi
) &&
711 !AsiIsTwin(asi
) && !AsiIsBlock(asi
) && !AsiIsNoFault(asi
))
712 panic("Accessing ASI %#X. Should we?\n", asi
);
715 // If the asi is unaligned trap
717 writeSfsr(vaddr
, false, ct
, false, OtherFault
, asi
);
718 return new MemAddressNotAligned
;
722 vaddr
= vaddr
& VAddrAMask
;
724 if (!validVirtualAddress(vaddr
, addr_mask
)) {
725 writeSfsr(vaddr
, false, ct
, true, VaOutOfRange
, asi
);
726 return new DataAccessException
;
729 if ((!lsu_dm
&& !hpriv
&& !red
) || AsiIsReal(asi
)) {
734 if (hpriv
&& (implicit
|| (!AsiIsAsIfUser(asi
) && !AsiIsReal(asi
)))) {
735 req
->setPaddr(vaddr
& PAddrImplMask
);
739 e
= lookup(vaddr
, part_id
, real
, context
);
741 if (e
== NULL
|| !e
->valid
) {
742 writeTagAccess(vaddr
, context
);
743 DPRINTF(TLB
, "TLB: DTB Failed to find matching TLB entry\n");
745 return new DataRealTranslationMiss
;
748 return new FastDataAccessMMUMiss
;
750 return new FastDataAccessMMUMiss(req
->getVaddr());
755 if (!priv
&& e
->pte
.priv()) {
756 writeTagAccess(vaddr
, context
);
757 writeSfsr(vaddr
, write
, ct
, e
->pte
.sideffect(), PrivViolation
, asi
);
758 return new DataAccessException
;
761 if (write
&& !e
->pte
.writable()) {
762 writeTagAccess(vaddr
, context
);
763 writeSfsr(vaddr
, write
, ct
, e
->pte
.sideffect(), OtherFault
, asi
);
764 return new FastDataAccessProtection
;
767 if (e
->pte
.nofault() && !AsiIsNoFault(asi
)) {
768 writeTagAccess(vaddr
, context
);
769 writeSfsr(vaddr
, write
, ct
, e
->pte
.sideffect(), LoadFromNfo
, asi
);
770 return new DataAccessException
;
773 if (e
->pte
.sideffect() && AsiIsNoFault(asi
)) {
774 writeTagAccess(vaddr
, context
);
775 writeSfsr(vaddr
, write
, ct
, e
->pte
.sideffect(), SideEffect
, asi
);
776 return new DataAccessException
;
779 if (e
->pte
.sideffect() || (e
->pte
.paddr() >> 39) & 1)
780 req
->setFlags(Request::UNCACHEABLE
);
782 // cache translation date for next translation
783 cacheState
= tlbdata
;
785 cacheEntry
[1] = NULL
;
786 cacheEntry
[0] = NULL
;
789 if (cacheEntry
[0] != e
&& cacheEntry
[1] != e
) {
790 cacheEntry
[1] = cacheEntry
[0];
792 cacheAsi
[1] = cacheAsi
[0];
795 cacheAsi
[0] = (ASI
)0;
798 req
->setPaddr(e
->pte
.translate(vaddr
));
799 DPRINTF(TLB
, "TLB: %#X -> %#X\n", vaddr
, req
->getPaddr());
802 /** Normal flow ends here. */
805 writeSfsr(vaddr
, write
, Primary
, true, IllegalAsi
, asi
);
807 return new DataAccessException
;
809 return new PrivilegedAction
;
812 if ((asi
== ASI_SWVR_UDB_INTR_W
&& !write
) ||
813 (asi
== ASI_SWVR_UDB_INTR_R
&& write
)) {
814 writeSfsr(vaddr
, write
, Primary
, true, IllegalAsi
, asi
);
815 return new DataAccessException
;
821 handleScratchRegAccess
:
822 if (vaddr
> 0x38 || (vaddr
>= 0x20 && vaddr
< 0x30 && !hpriv
)) {
823 writeSfsr(vaddr
, write
, Primary
, true, IllegalAsi
, asi
);
824 return new DataAccessException
;
828 handleQueueRegAccess
:
829 if (!priv
&& !hpriv
) {
830 writeSfsr(vaddr
, write
, Primary
, true, IllegalAsi
, asi
);
831 return new PrivilegedAction
;
833 if ((!hpriv
&& vaddr
& 0xF) || vaddr
> 0x3f8 || vaddr
< 0x3c0) {
834 writeSfsr(vaddr
, write
, Primary
, true, IllegalAsi
, asi
);
835 return new DataAccessException
;
839 handleSparcErrorRegAccess
:
841 writeSfsr(vaddr
, write
, Primary
, true, IllegalAsi
, asi
);
843 return new DataAccessException
;
845 return new PrivilegedAction
;
852 DPRINTF(TLB
, "TLB: DTB Translating MM IPR access\n");
853 req
->setMmapedIpr(true);
854 req
->setPaddr(req
->getVaddr());
859 DTB::translateTiming(RequestPtr req
, ThreadContext
*tc
,
860 Translation
*translation
, bool write
)
863 translation
->finish(translateAtomic(req
, tc
, write
), req
, tc
, write
);
869 DTB::doMmuRegRead(ThreadContext
*tc
, Packet
*pkt
)
871 Addr va
= pkt
->getAddr();
872 ASI asi
= (ASI
)pkt
->req
->getAsi();
875 DPRINTF(IPR
, "Memory Mapped IPR Read: asi=%#X a=%#x\n",
876 (uint32_t)pkt
->req
->getAsi(), pkt
->getAddr());
878 ITB
*itb
= tc
->getITBPtr();
881 case ASI_LSU_CONTROL_REG
:
883 pkt
->set(tc
->readMiscReg(MISCREG_MMU_LSU_CTRL
));
888 pkt
->set(tc
->readMiscReg(MISCREG_MMU_P_CONTEXT
));
891 pkt
->set(tc
->readMiscReg(MISCREG_MMU_S_CONTEXT
));
898 pkt
->set(tc
->readMiscReg(MISCREG_QUEUE_CPU_MONDO_HEAD
+
901 case ASI_DMMU_CTXT_ZERO_TSB_BASE_PS0
:
903 pkt
->set(c0_tsb_ps0
);
905 case ASI_DMMU_CTXT_ZERO_TSB_BASE_PS1
:
907 pkt
->set(c0_tsb_ps1
);
909 case ASI_DMMU_CTXT_ZERO_CONFIG
:
913 case ASI_IMMU_CTXT_ZERO_TSB_BASE_PS0
:
915 pkt
->set(itb
->c0_tsb_ps0
);
917 case ASI_IMMU_CTXT_ZERO_TSB_BASE_PS1
:
919 pkt
->set(itb
->c0_tsb_ps1
);
921 case ASI_IMMU_CTXT_ZERO_CONFIG
:
923 pkt
->set(itb
->c0_config
);
925 case ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS0
:
927 pkt
->set(cx_tsb_ps0
);
929 case ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS1
:
931 pkt
->set(cx_tsb_ps1
);
933 case ASI_DMMU_CTXT_NONZERO_CONFIG
:
937 case ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS0
:
939 pkt
->set(itb
->cx_tsb_ps0
);
941 case ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS1
:
943 pkt
->set(itb
->cx_tsb_ps1
);
945 case ASI_IMMU_CTXT_NONZERO_CONFIG
:
947 pkt
->set(itb
->cx_config
);
949 case ASI_SPARC_ERROR_STATUS_REG
:
950 pkt
->set((uint64_t)0);
952 case ASI_HYP_SCRATCHPAD
:
954 pkt
->set(tc
->readMiscReg(MISCREG_SCRATCHPAD_R0
+ (va
>> 3)));
959 temp
= itb
->tag_access
;
960 pkt
->set(bits(temp
,63,22) | bits(temp
,12,0) << 48);
966 pkt
->set(itb
->tag_access
);
976 pkt
->set(bits(temp
,63,22) | bits(temp
,12,0) << 48);
985 pkt
->set(tag_access
);
988 pkt
->set(tc
->readMiscReg(MISCREG_MMU_PART_ID
));
994 case ASI_DMMU_TSB_PS0_PTR_REG
:
995 pkt
->set(MakeTsbPtr(Ps0
,
1002 case ASI_DMMU_TSB_PS1_PTR_REG
:
1003 pkt
->set(MakeTsbPtr(Ps1
,
1010 case ASI_IMMU_TSB_PS0_PTR_REG
:
1011 pkt
->set(MakeTsbPtr(Ps0
,
1018 case ASI_IMMU_TSB_PS1_PTR_REG
:
1019 pkt
->set(MakeTsbPtr(Ps1
,
1026 case ASI_SWVR_INTR_RECEIVE
:
1028 SparcISA::Interrupts
* interrupts
=
1029 dynamic_cast<SparcISA::Interrupts
*>(
1030 tc
->getCpuPtr()->getInterruptController());
1031 pkt
->set(interrupts
->get_vec(IT_INT_VEC
));
1034 case ASI_SWVR_UDB_INTR_R
:
1036 SparcISA::Interrupts
* interrupts
=
1037 dynamic_cast<SparcISA::Interrupts
*>(
1038 tc
->getCpuPtr()->getInterruptController());
1039 temp
= findMsbSet(interrupts
->get_vec(IT_INT_VEC
));
1040 tc
->getCpuPtr()->clearInterrupt(IT_INT_VEC
, temp
);
1046 panic("need to impl DTB::doMmuRegRead() got asi=%#x, va=%#x\n",
1049 pkt
->makeAtomicResponse();
1050 return tc
->getCpuPtr()->ticks(1);
1054 DTB::doMmuRegWrite(ThreadContext
*tc
, Packet
*pkt
)
1056 uint64_t data
= gtoh(pkt
->get
<uint64_t>());
1057 Addr va
= pkt
->getAddr();
1058 ASI asi
= (ASI
)pkt
->req
->getAsi();
1064 int entry_insert
= -1;
1071 DPRINTF(IPR
, "Memory Mapped IPR Write: asi=%#X a=%#x d=%#X\n",
1072 (uint32_t)asi
, va
, data
);
1074 ITB
*itb
= tc
->getITBPtr();
1077 case ASI_LSU_CONTROL_REG
:
1079 tc
->setMiscReg(MISCREG_MMU_LSU_CTRL
, data
);
1084 tc
->setMiscReg(MISCREG_MMU_P_CONTEXT
, data
);
1087 tc
->setMiscReg(MISCREG_MMU_S_CONTEXT
, data
);
1090 goto doMmuWriteError
;
1094 assert(mbits(data
,13,6) == data
);
1095 tc
->setMiscReg(MISCREG_QUEUE_CPU_MONDO_HEAD
+
1096 (va
>> 4) - 0x3c, data
);
1098 case ASI_DMMU_CTXT_ZERO_TSB_BASE_PS0
:
1102 case ASI_DMMU_CTXT_ZERO_TSB_BASE_PS1
:
1106 case ASI_DMMU_CTXT_ZERO_CONFIG
:
1110 case ASI_IMMU_CTXT_ZERO_TSB_BASE_PS0
:
1112 itb
->c0_tsb_ps0
= data
;
1114 case ASI_IMMU_CTXT_ZERO_TSB_BASE_PS1
:
1116 itb
->c0_tsb_ps1
= data
;
1118 case ASI_IMMU_CTXT_ZERO_CONFIG
:
1120 itb
->c0_config
= data
;
1122 case ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS0
:
1126 case ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS1
:
1130 case ASI_DMMU_CTXT_NONZERO_CONFIG
:
1134 case ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS0
:
1136 itb
->cx_tsb_ps0
= data
;
1138 case ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS1
:
1140 itb
->cx_tsb_ps1
= data
;
1142 case ASI_IMMU_CTXT_NONZERO_CONFIG
:
1144 itb
->cx_config
= data
;
1146 case ASI_SPARC_ERROR_EN_REG
:
1147 case ASI_SPARC_ERROR_STATUS_REG
:
1148 inform("Ignoring write to SPARC ERROR regsiter\n");
1150 case ASI_HYP_SCRATCHPAD
:
1151 case ASI_SCRATCHPAD
:
1152 tc
->setMiscReg(MISCREG_SCRATCHPAD_R0
+ (va
>> 3), data
);
1160 sext
<59>(bits(data
, 59,0));
1161 itb
->tag_access
= data
;
1164 goto doMmuWriteError
;
1167 case ASI_ITLB_DATA_ACCESS_REG
:
1168 entry_insert
= bits(va
, 8,3);
1169 case ASI_ITLB_DATA_IN_REG
:
1170 assert(entry_insert
!= -1 || mbits(va
,10,9) == va
);
1171 ta_insert
= itb
->tag_access
;
1172 va_insert
= mbits(ta_insert
, 63,13);
1173 ct_insert
= mbits(ta_insert
, 12,0);
1174 part_insert
= tc
->readMiscReg(MISCREG_MMU_PART_ID
);
1175 real_insert
= bits(va
, 9,9);
1176 pte
.populate(data
, bits(va
,10,10) ? PageTableEntry::sun4v
:
1177 PageTableEntry::sun4u
);
1178 tc
->getITBPtr()->insert(va_insert
, part_insert
, ct_insert
, real_insert
,
1181 case ASI_DTLB_DATA_ACCESS_REG
:
1182 entry_insert
= bits(va
, 8,3);
1183 case ASI_DTLB_DATA_IN_REG
:
1184 assert(entry_insert
!= -1 || mbits(va
,10,9) == va
);
1185 ta_insert
= tag_access
;
1186 va_insert
= mbits(ta_insert
, 63,13);
1187 ct_insert
= mbits(ta_insert
, 12,0);
1188 part_insert
= tc
->readMiscReg(MISCREG_MMU_PART_ID
);
1189 real_insert
= bits(va
, 9,9);
1190 pte
.populate(data
, bits(va
,10,10) ? PageTableEntry::sun4v
:
1191 PageTableEntry::sun4u
);
1192 insert(va_insert
, part_insert
, ct_insert
, real_insert
, pte
,
1195 case ASI_IMMU_DEMAP
:
1198 part_id
= tc
->readMiscReg(MISCREG_MMU_PART_ID
);
1199 switch (bits(va
,5,4)) {
1201 ctx_id
= tc
->readMiscReg(MISCREG_MMU_P_CONTEXT
);
1213 switch(bits(va
,7,6)) {
1214 case 0: // demap page
1216 tc
->getITBPtr()->demapPage(mbits(va
,63,13), part_id
,
1217 bits(va
,9,9), ctx_id
);
1219 case 1: //demap context
1221 tc
->getITBPtr()->demapContext(part_id
, ctx_id
);
1224 tc
->getITBPtr()->demapAll(part_id
);
1227 panic("Invalid type for IMMU demap\n");
1236 sext
<59>(bits(data
, 59,0));
1240 tc
->setMiscReg(MISCREG_MMU_PART_ID
, data
);
1243 goto doMmuWriteError
;
1246 case ASI_DMMU_DEMAP
:
1249 part_id
= tc
->readMiscReg(MISCREG_MMU_PART_ID
);
1250 switch (bits(va
,5,4)) {
1252 ctx_id
= tc
->readMiscReg(MISCREG_MMU_P_CONTEXT
);
1255 ctx_id
= tc
->readMiscReg(MISCREG_MMU_S_CONTEXT
);
1264 switch(bits(va
,7,6)) {
1265 case 0: // demap page
1267 demapPage(mbits(va
,63,13), part_id
, bits(va
,9,9), ctx_id
);
1269 case 1: //demap context
1271 demapContext(part_id
, ctx_id
);
1277 panic("Invalid type for IMMU demap\n");
1280 case ASI_SWVR_INTR_RECEIVE
:
1283 // clear all the interrupts that aren't set in the write
1284 SparcISA::Interrupts
* interrupts
=
1285 dynamic_cast<SparcISA::Interrupts
*>(
1286 tc
->getCpuPtr()->getInterruptController());
1287 while (interrupts
->get_vec(IT_INT_VEC
) & data
) {
1288 msb
= findMsbSet(interrupts
->get_vec(IT_INT_VEC
) & data
);
1289 tc
->getCpuPtr()->clearInterrupt(IT_INT_VEC
, msb
);
1293 case ASI_SWVR_UDB_INTR_W
:
1294 tc
->getSystemPtr()->threadContexts
[bits(data
,12,8)]->getCpuPtr()->
1295 postInterrupt(bits(data
, 5, 0), 0);
1299 panic("need to impl DTB::doMmuRegWrite() got asi=%#x, va=%#x d=%#x\n",
1300 (uint32_t)pkt
->req
->getAsi(), pkt
->getAddr(), data
);
1302 pkt
->makeAtomicResponse();
1303 return tc
->getCpuPtr()->ticks(1);
1309 DTB::GetTsbPtr(ThreadContext
*tc
, Addr addr
, int ctx
, Addr
*ptrs
)
1311 uint64_t tag_access
= mbits(addr
,63,13) | mbits(ctx
,12,0);
1312 ITB
* itb
= tc
->getITBPtr();
1313 ptrs
[0] = MakeTsbPtr(Ps0
, tag_access
,
1318 ptrs
[1] = MakeTsbPtr(Ps1
, tag_access
,
1323 ptrs
[2] = MakeTsbPtr(Ps0
, tag_access
,
1328 ptrs
[3] = MakeTsbPtr(Ps1
, tag_access
,
1336 DTB::MakeTsbPtr(TsbPageSize ps
, uint64_t tag_access
, uint64_t c0_tsb
,
1337 uint64_t c0_config
, uint64_t cX_tsb
, uint64_t cX_config
)
1342 if (bits(tag_access
, 12,0) == 0) {
1350 uint64_t ptr
= mbits(tsb
,63,13);
1351 bool split
= bits(tsb
,12,12);
1352 int tsb_size
= bits(tsb
,3,0);
1353 int page_size
= (ps
== Ps0
) ? bits(config
, 2,0) : bits(config
,10,8);
1355 if (ps
== Ps1
&& split
)
1356 ptr
|= ULL(1) << (13 + tsb_size
);
1357 ptr
|= (tag_access
>> (9 + page_size
* 3)) & mask(12+tsb_size
, 4);
1363 TLB::serialize(std::ostream
&os
)
1365 SERIALIZE_SCALAR(size
);
1366 SERIALIZE_SCALAR(usedEntries
);
1367 SERIALIZE_SCALAR(lastReplaced
);
1369 // convert the pointer based free list into an index based one
1370 int *free_list
= (int*)malloc(sizeof(int) * size
);
1372 std::list
<TlbEntry
*>::iterator i
;
1373 i
= freeList
.begin();
1374 while (i
!= freeList
.end()) {
1375 free_list
[cntr
++] = ((size_t)*i
- (size_t)tlb
)/ sizeof(TlbEntry
);
1378 SERIALIZE_SCALAR(cntr
);
1379 SERIALIZE_ARRAY(free_list
, cntr
);
1381 SERIALIZE_SCALAR(c0_tsb_ps0
);
1382 SERIALIZE_SCALAR(c0_tsb_ps1
);
1383 SERIALIZE_SCALAR(c0_config
);
1384 SERIALIZE_SCALAR(cx_tsb_ps0
);
1385 SERIALIZE_SCALAR(cx_tsb_ps1
);
1386 SERIALIZE_SCALAR(cx_config
);
1387 SERIALIZE_SCALAR(sfsr
);
1388 SERIALIZE_SCALAR(tag_access
);
1390 for (int x
= 0; x
< size
; x
++) {
1391 nameOut(os
, csprintf("%s.PTE%d", name(), x
));
1392 tlb
[x
].serialize(os
);
1397 TLB::unserialize(Checkpoint
*cp
, const std::string
§ion
)
1401 paramIn(cp
, section
, "size", oldSize
);
1402 if (oldSize
!= size
)
1403 panic("Don't support unserializing different sized TLBs\n");
1404 UNSERIALIZE_SCALAR(usedEntries
);
1405 UNSERIALIZE_SCALAR(lastReplaced
);
1408 UNSERIALIZE_SCALAR(cntr
);
1410 int *free_list
= (int*)malloc(sizeof(int) * cntr
);
1412 UNSERIALIZE_ARRAY(free_list
, cntr
);
1413 for (int x
= 0; x
< cntr
; x
++)
1414 freeList
.push_back(&tlb
[free_list
[x
]]);
1416 UNSERIALIZE_SCALAR(c0_tsb_ps0
);
1417 UNSERIALIZE_SCALAR(c0_tsb_ps1
);
1418 UNSERIALIZE_SCALAR(c0_config
);
1419 UNSERIALIZE_SCALAR(cx_tsb_ps0
);
1420 UNSERIALIZE_SCALAR(cx_tsb_ps1
);
1421 UNSERIALIZE_SCALAR(cx_config
);
1422 UNSERIALIZE_SCALAR(sfsr
);
1423 UNSERIALIZE_SCALAR(tag_access
);
1425 lookupTable
.clear();
1426 for (int x
= 0; x
< size
; x
++) {
1427 tlb
[x
].unserialize(cp
, csprintf("%s.PTE%d", section
, x
));
1429 lookupTable
.insert(tlb
[x
].range
, &tlb
[x
]);
1435 DTB::serialize(std::ostream
&os
)
1438 SERIALIZE_SCALAR(sfar
);
1442 DTB::unserialize(Checkpoint
*cp
, const std::string
§ion
)
1444 TLB::unserialize(cp
, section
);
1445 UNSERIALIZE_SCALAR(sfar
);
1448 /* end namespace SparcISA */ }
1451 SparcITBParams::create()
1453 return new SparcISA::ITB(this);
1457 SparcDTBParams::create()
1459 return new SparcISA::DTB(this);