2 * Copyright (c) 2001-2005 The Regents of The University of Michigan
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include "arch/sparc/asi.hh"
34 #include "arch/sparc/miscregfile.hh"
35 #include "arch/sparc/tlb.hh"
36 #include "base/bitfield.hh"
37 #include "base/trace.hh"
38 #include "cpu/thread_context.hh"
39 #include "cpu/base.hh"
40 #include "mem/packet_access.hh"
41 #include "mem/request.hh"
42 #include "params/SparcDTB.hh"
43 #include "params/SparcITB.hh"
44 #include "sim/system.hh"
46 /* @todo remove some of the magic constants. -- ali
50 TLB::TLB(const std::string
&name
, int s
)
51 : SimObject(name
), size(s
), usedEntries(0), lastReplaced(0),
54 // To make this work you'll have to change the hypervisor and OS
56 fatal("SPARC T1 TLB registers don't support more than 64 TLB entries.");
58 tlb
= new TlbEntry
[size
];
59 std::memset(tlb
, 0, sizeof(TlbEntry
) * size
);
61 for (int x
= 0; x
< size
; x
++)
62 freeList
.push_back(&tlb
[x
]);
78 for (i
= lookupTable
.begin(); i
!= lookupTable
.end(); i
++) {
79 TlbEntry
*t
= i
->second
;
80 if (!t
->pte
.locked()) {
89 TLB::insert(Addr va
, int partition_id
, int context_id
, bool real
,
90 const PageTableEntry
& PTE
, int entry
)
95 TlbEntry
*new_entry
= NULL
;
100 va
&= ~(PTE
.size()-1);
102 tr.size = PTE.size() - 1;
103 tr.contextId = context_id;
104 tr.partitionId = partition_id;
108 DPRINTF(TLB
, "TLB: Inserting TLB Entry; va=%#x pa=%#x pid=%d cid=%d r=%d entryid=%d\n",
109 va
, PTE
.paddr(), partition_id
, context_id
, (int)real
, entry
);
111 // Demap any entry that conflicts
112 for (x
= 0; x
< size
; x
++) {
113 if (tlb
[x
].range
.real
== real
&&
114 tlb
[x
].range
.partitionId
== partition_id
&&
115 tlb
[x
].range
.va
< va
+ PTE
.size() - 1 &&
116 tlb
[x
].range
.va
+ tlb
[x
].range
.size
>= va
&&
117 (real
|| tlb
[x
].range
.contextId
== context_id
))
120 freeList
.push_front(&tlb
[x
]);
121 DPRINTF(TLB
, "TLB: Conflicting entry %#X , deleting it\n", x
);
123 tlb
[x
].valid
= false;
128 lookupTable
.erase(tlb
[x
].range
);
135 i = lookupTable.find(tr);
136 if (i != lookupTable.end()) {
137 i->second->valid = false;
138 if (i->second->used) {
139 i->second->used = false;
142 freeList.push_front(i->second);
143 DPRINTF(TLB, "TLB: Found conflicting entry %#X , deleting it\n",
145 lookupTable.erase(i);
150 assert(entry
< size
&& entry
>= 0);
151 new_entry
= &tlb
[entry
];
153 if (!freeList
.empty()) {
154 new_entry
= freeList
.front();
161 if (x
== lastReplaced
)
162 goto insertAllLocked
;
163 } while (tlb
[x
].pte
.locked());
168 for (x = 0; x < size; x++) {
169 if (!tlb[x].valid || !tlb[x].used) {
177 // Update the last ently if their all locked
179 new_entry
= &tlb
[size
-1];
182 freeList
.remove(new_entry
);
183 if (new_entry
->valid
&& new_entry
->used
)
185 if (new_entry
->valid
)
186 lookupTable
.erase(new_entry
->range
);
190 new_entry
->range
.va
= va
;
191 new_entry
->range
.size
= PTE
.size() - 1;
192 new_entry
->range
.partitionId
= partition_id
;
193 new_entry
->range
.contextId
= context_id
;
194 new_entry
->range
.real
= real
;
195 new_entry
->pte
= PTE
;
196 new_entry
->used
= true;;
197 new_entry
->valid
= true;
202 i
= lookupTable
.insert(new_entry
->range
, new_entry
);
203 assert(i
!= lookupTable
.end());
205 // If all entries have there used bit set, clear it on them all, but the
206 // one we just inserted
207 if (usedEntries
== size
) {
209 new_entry
->used
= true;
217 TLB::lookup(Addr va
, int partition_id
, bool real
, int context_id
, bool
224 DPRINTF(TLB
, "TLB: Looking up entry va=%#x pid=%d cid=%d r=%d\n",
225 va
, partition_id
, context_id
, real
);
226 // Assemble full address structure
228 tr
.size
= MachineBytes
;
229 tr
.contextId
= context_id
;
230 tr
.partitionId
= partition_id
;
233 // Try to find the entry
234 i
= lookupTable
.find(tr
);
235 if (i
== lookupTable
.end()) {
236 DPRINTF(TLB
, "TLB: No valid entry found\n");
240 // Mark the entries used bit and clear other used bits in needed
242 DPRINTF(TLB
, "TLB: Valid entry found pa: %#x size: %#x\n", t
->pte
.paddr(),
245 // Update the used bits only if this is a real access (not a fake one from
247 if (!t
->used
&& update_used
) {
250 if (usedEntries
== size
) {
264 for (int x
= 0; x
< size
; x
++) {
266 DPRINTFN("%4d: %#2x:%#2x %c %#4x %#8x %#8x %#16x\n",
267 x
, tlb
[x
].range
.partitionId
, tlb
[x
].range
.contextId
,
268 tlb
[x
].range
.real
? 'R' : ' ', tlb
[x
].range
.size
,
269 tlb
[x
].range
.va
, tlb
[x
].pte
.paddr(), tlb
[x
].pte());
275 TLB::demapPage(Addr va
, int partition_id
, bool real
, int context_id
)
280 DPRINTF(IPR
, "TLB: Demapping Page va=%#x pid=%#d cid=%d r=%d\n",
281 va
, partition_id
, context_id
, real
);
285 // Assemble full address structure
287 tr
.size
= MachineBytes
;
288 tr
.contextId
= context_id
;
289 tr
.partitionId
= partition_id
;
292 // Demap any entry that conflicts
293 i
= lookupTable
.find(tr
);
294 if (i
!= lookupTable
.end()) {
295 DPRINTF(IPR
, "TLB: Demapped page\n");
296 i
->second
->valid
= false;
297 if (i
->second
->used
) {
298 i
->second
->used
= false;
301 freeList
.push_front(i
->second
);
302 lookupTable
.erase(i
);
307 TLB::demapContext(int partition_id
, int context_id
)
310 DPRINTF(IPR
, "TLB: Demapping Context pid=%#d cid=%d\n",
311 partition_id
, context_id
);
313 for (x
= 0; x
< size
; x
++) {
314 if (tlb
[x
].range
.contextId
== context_id
&&
315 tlb
[x
].range
.partitionId
== partition_id
) {
316 if (tlb
[x
].valid
== true) {
317 freeList
.push_front(&tlb
[x
]);
319 tlb
[x
].valid
= false;
324 lookupTable
.erase(tlb
[x
].range
);
330 TLB::demapAll(int partition_id
)
333 DPRINTF(TLB
, "TLB: Demapping All pid=%#d\n", partition_id
);
335 for (x
= 0; x
< size
; x
++) {
336 if (!tlb
[x
].pte
.locked() && tlb
[x
].range
.partitionId
== partition_id
) {
337 if (tlb
[x
].valid
== true){
338 freeList
.push_front(&tlb
[x
]);
340 tlb
[x
].valid
= false;
345 lookupTable
.erase(tlb
[x
].range
);
358 for (x
= 0; x
< size
; x
++) {
359 if (tlb
[x
].valid
== true)
360 freeList
.push_back(&tlb
[x
]);
361 tlb
[x
].valid
= false;
368 TLB::TteRead(int entry
) {
370 panic("entry: %d\n", entry
);
372 assert(entry
< size
);
373 if (tlb
[entry
].valid
)
374 return tlb
[entry
].pte();
376 return (uint64_t)-1ll;
380 TLB::TagRead(int entry
) {
381 assert(entry
< size
);
383 if (!tlb
[entry
].valid
)
384 return (uint64_t)-1ll;
386 tag
= tlb
[entry
].range
.contextId
;
387 tag
|= tlb
[entry
].range
.va
;
388 tag
|= (uint64_t)tlb
[entry
].range
.partitionId
<< 61;
389 tag
|= tlb
[entry
].range
.real
? ULL(1) << 60 : 0;
390 tag
|= (uint64_t)~tlb
[entry
].pte
._size() << 56;
395 TLB::validVirtualAddress(Addr va
, bool am
)
399 if (va
>= StartVAddrHole
&& va
<= EndVAddrHole
)
405 TLB::writeSfsr(bool write
, ContextType ct
, bool se
, FaultTypes ft
, int asi
)
422 TLB::writeTagAccess(Addr va
, int context
)
424 DPRINTF(TLB
, "TLB: Writing Tag Access: va: %#X ctx: %#X value: %#X\n",
425 va
, context
, mbits(va
, 63,13) | mbits(context
,12,0));
427 tag_access
= mbits(va
, 63,13) | mbits(context
,12,0);
431 ITB::writeSfsr(bool write
, ContextType ct
, bool se
, FaultTypes ft
, int asi
)
433 DPRINTF(TLB
, "TLB: ITB Fault: w=%d ct=%d ft=%d asi=%d\n",
434 (int)write
, ct
, ft
, asi
);
435 TLB::writeSfsr(write
, ct
, se
, ft
, asi
);
439 DTB::writeSfsr(Addr a
, bool write
, ContextType ct
,
440 bool se
, FaultTypes ft
, int asi
)
442 DPRINTF(TLB
, "TLB: DTB Fault: A=%#x w=%d ct=%d ft=%d asi=%d\n",
443 a
, (int)write
, ct
, ft
, asi
);
444 TLB::writeSfsr(write
, ct
, se
, ft
, asi
);
449 ITB::translate(RequestPtr
&req
, ThreadContext
*tc
)
451 uint64_t tlbdata
= tc
->readMiscRegNoEffect(MISCREG_TLB_DATA
);
453 Addr vaddr
= req
->getVaddr();
456 assert(req
->getAsi() == ASI_IMPLICIT
);
458 DPRINTF(TLB
, "TLB: ITB Request to translate va=%#x size=%d\n",
459 vaddr
, req
->getSize());
461 // Be fast if we can!
462 if (cacheValid
&& cacheState
== tlbdata
) {
464 if (cacheEntry
->range
.va
< vaddr
+ sizeof(MachInst
) &&
465 cacheEntry
->range
.va
+ cacheEntry
->range
.size
>= vaddr
) {
466 req
->setPaddr(cacheEntry
->pte
.paddr() & ~(cacheEntry
->pte
.size()-1) |
467 vaddr
& cacheEntry
->pte
.size()-1 );
471 req
->setPaddr(vaddr
& PAddrImplMask
);
476 bool hpriv
= bits(tlbdata
,0,0);
477 bool red
= bits(tlbdata
,1,1);
478 bool priv
= bits(tlbdata
,2,2);
479 bool addr_mask
= bits(tlbdata
,3,3);
480 bool lsu_im
= bits(tlbdata
,4,4);
482 int part_id
= bits(tlbdata
,15,8);
483 int tl
= bits(tlbdata
,18,16);
484 int pri_context
= bits(tlbdata
,47,32);
490 DPRINTF(TLB
, "TLB: priv:%d hpriv:%d red:%d lsuim:%d part_id: %#X\n",
491 priv
, hpriv
, red
, lsu_im
, part_id
);
500 context
= pri_context
;
503 if ( hpriv
|| red
) {
505 cacheState
= tlbdata
;
507 req
->setPaddr(vaddr
& PAddrImplMask
);
511 // If the access is unaligned trap
513 writeSfsr(false, ct
, false, OtherFault
, asi
);
514 return new MemAddressNotAligned
;
518 vaddr
= vaddr
& VAddrAMask
;
520 if (!validVirtualAddress(vaddr
, addr_mask
)) {
521 writeSfsr(false, ct
, false, VaOutOfRange
, asi
);
522 return new InstructionAccessException
;
526 e
= lookup(vaddr
, part_id
, true);
530 e
= lookup(vaddr
, part_id
, false, context
);
533 if (e
== NULL
|| !e
->valid
) {
534 writeTagAccess(vaddr
, context
);
536 return new InstructionRealTranslationMiss
;
538 return new FastInstructionAccessMMUMiss
;
541 // were not priviledged accesing priv page
542 if (!priv
&& e
->pte
.priv()) {
543 writeTagAccess(vaddr
, context
);
544 writeSfsr(false, ct
, false, PrivViolation
, asi
);
545 return new InstructionAccessException
;
548 // cache translation date for next translation
550 cacheState
= tlbdata
;
553 req
->setPaddr(e
->pte
.paddr() & ~(e
->pte
.size()-1) |
554 vaddr
& e
->pte
.size()-1 );
555 DPRINTF(TLB
, "TLB: %#X -> %#X\n", vaddr
, req
->getPaddr());
562 DTB::translate(RequestPtr
&req
, ThreadContext
*tc
, bool write
)
564 /* @todo this could really use some profiling and fixing to make it faster! */
565 uint64_t tlbdata
= tc
->readMiscRegNoEffect(MISCREG_TLB_DATA
);
566 Addr vaddr
= req
->getVaddr();
567 Addr size
= req
->getSize();
569 asi
= (ASI
)req
->getAsi();
570 bool implicit
= false;
571 bool hpriv
= bits(tlbdata
,0,0);
573 DPRINTF(TLB
, "TLB: DTB Request to translate va=%#x size=%d asi=%#x\n",
576 if (lookupTable
.size() != 64 - freeList
.size())
577 panic("Lookup table size: %d tlb size: %d\n", lookupTable
.size(),
579 if (asi
== ASI_IMPLICIT
)
582 if (hpriv
&& implicit
) {
583 req
->setPaddr(vaddr
& PAddrImplMask
);
587 // Be fast if we can!
588 if (cacheValid
&& cacheState
== tlbdata
) {
593 TlbEntry
*ce
= cacheEntry
[0];
594 Addr ce_va
= ce
->range
.va
;
595 if (cacheAsi
[0] == asi
&&
596 ce_va
< vaddr
+ size
&& ce_va
+ ce
->range
.size
> vaddr
&&
597 (!write
|| ce
->pte
.writable())) {
598 req
->setPaddr(ce
->pte
.paddrMask() | vaddr
& ce
->pte
.sizeMask());
599 if (ce
->pte
.sideffect() || (ce
->pte
.paddr() >> 39) & 1)
600 req
->setFlags(req
->getFlags() | UNCACHEABLE
);
601 DPRINTF(TLB
, "TLB: %#X -> %#X\n", vaddr
, req
->getPaddr());
604 } // if cache entry valid
606 TlbEntry
*ce
= cacheEntry
[1];
607 Addr ce_va
= ce
->range
.va
;
608 if (cacheAsi
[1] == asi
&&
609 ce_va
< vaddr
+ size
&& ce_va
+ ce
->range
.size
> vaddr
&&
610 (!write
|| ce
->pte
.writable())) {
611 req
->setPaddr(ce
->pte
.paddrMask() | vaddr
& ce
->pte
.sizeMask());
612 if (ce
->pte
.sideffect() || (ce
->pte
.paddr() >> 39) & 1)
613 req
->setFlags(req
->getFlags() | UNCACHEABLE
);
614 DPRINTF(TLB
, "TLB: %#X -> %#X\n", vaddr
, req
->getPaddr());
617 } // if cache entry valid
620 bool red
= bits(tlbdata
,1,1);
621 bool priv
= bits(tlbdata
,2,2);
622 bool addr_mask
= bits(tlbdata
,3,3);
623 bool lsu_dm
= bits(tlbdata
,5,5);
625 int part_id
= bits(tlbdata
,15,8);
626 int tl
= bits(tlbdata
,18,16);
627 int pri_context
= bits(tlbdata
,47,32);
628 int sec_context
= bits(tlbdata
,63,48);
631 ContextType ct
= Primary
;
636 DPRINTF(TLB
, "TLB: priv:%d hpriv:%d red:%d lsudm:%d part_id: %#X\n",
637 priv
, hpriv
, red
, lsu_dm
, part_id
);
647 context
= pri_context
;
650 // We need to check for priv level/asi priv
651 if (!priv
&& !hpriv
&& !AsiIsUnPriv(asi
)) {
652 // It appears that context should be Nucleus in these cases?
653 writeSfsr(vaddr
, write
, Nucleus
, false, IllegalAsi
, asi
);
654 return new PrivilegedAction
;
657 if (!hpriv
&& AsiIsHPriv(asi
)) {
658 writeSfsr(vaddr
, write
, Nucleus
, false, IllegalAsi
, asi
);
659 return new DataAccessException
;
662 if (AsiIsPrimary(asi
)) {
663 context
= pri_context
;
665 } else if (AsiIsSecondary(asi
)) {
666 context
= sec_context
;
668 } else if (AsiIsNucleus(asi
)) {
673 context
= pri_context
;
677 if (!implicit
&& asi
!= ASI_P
&& asi
!= ASI_S
) {
678 if (AsiIsLittle(asi
))
679 panic("Little Endian ASIs not supported\n");
681 //XXX It's unclear from looking at the documentation how a no fault
682 //load differs from a regular one, other than what happens concerning
683 //nfo and e bits in the TTE
684 // if (AsiIsNoFault(asi))
685 // panic("No Fault ASIs not supported\n");
687 if (AsiIsPartialStore(asi
))
688 panic("Partial Store ASIs not supported\n");
691 panic("Cmt ASI registers not implmented\n");
693 if (AsiIsInterrupt(asi
))
694 goto handleIntRegAccess
;
696 goto handleMmuRegAccess
;
697 if (AsiIsScratchPad(asi
))
698 goto handleScratchRegAccess
;
700 goto handleQueueRegAccess
;
701 if (AsiIsSparcError(asi
))
702 goto handleSparcErrorRegAccess
;
704 if (!AsiIsReal(asi
) && !AsiIsNucleus(asi
) && !AsiIsAsIfUser(asi
) &&
705 !AsiIsTwin(asi
) && !AsiIsBlock(asi
) && !AsiIsNoFault(asi
))
706 panic("Accessing ASI %#X. Should we?\n", asi
);
709 // If the asi is unaligned trap
710 if (vaddr
& size
-1) {
711 writeSfsr(vaddr
, false, ct
, false, OtherFault
, asi
);
712 return new MemAddressNotAligned
;
716 vaddr
= vaddr
& VAddrAMask
;
718 if (!validVirtualAddress(vaddr
, addr_mask
)) {
719 writeSfsr(vaddr
, false, ct
, true, VaOutOfRange
, asi
);
720 return new DataAccessException
;
724 if ((!lsu_dm
&& !hpriv
&& !red
) || AsiIsReal(asi
)) {
729 if (hpriv
&& (implicit
|| (!AsiIsAsIfUser(asi
) && !AsiIsReal(asi
)))) {
730 req
->setPaddr(vaddr
& PAddrImplMask
);
734 e
= lookup(vaddr
, part_id
, real
, context
);
736 if (e
== NULL
|| !e
->valid
) {
737 writeTagAccess(vaddr
, context
);
738 DPRINTF(TLB
, "TLB: DTB Failed to find matching TLB entry\n");
740 return new DataRealTranslationMiss
;
742 return new FastDataAccessMMUMiss
;
746 if (!priv
&& e
->pte
.priv()) {
747 writeTagAccess(vaddr
, context
);
748 writeSfsr(vaddr
, write
, ct
, e
->pte
.sideffect(), PrivViolation
, asi
);
749 return new DataAccessException
;
752 if (write
&& !e
->pte
.writable()) {
753 writeTagAccess(vaddr
, context
);
754 writeSfsr(vaddr
, write
, ct
, e
->pte
.sideffect(), OtherFault
, asi
);
755 return new FastDataAccessProtection
;
758 if (e
->pte
.nofault() && !AsiIsNoFault(asi
)) {
759 writeTagAccess(vaddr
, context
);
760 writeSfsr(vaddr
, write
, ct
, e
->pte
.sideffect(), LoadFromNfo
, asi
);
761 return new DataAccessException
;
764 if (e
->pte
.sideffect() && AsiIsNoFault(asi
)) {
765 writeTagAccess(vaddr
, context
);
766 writeSfsr(vaddr
, write
, ct
, e
->pte
.sideffect(), SideEffect
, asi
);
767 return new DataAccessException
;
771 if (e
->pte
.sideffect() || (e
->pte
.paddr() >> 39) & 1)
772 req
->setFlags(req
->getFlags() | UNCACHEABLE
);
774 // cache translation date for next translation
775 cacheState
= tlbdata
;
777 cacheEntry
[1] = NULL
;
778 cacheEntry
[0] = NULL
;
781 if (cacheEntry
[0] != e
&& cacheEntry
[1] != e
) {
782 cacheEntry
[1] = cacheEntry
[0];
784 cacheAsi
[1] = cacheAsi
[0];
787 cacheAsi
[0] = (ASI
)0;
790 req
->setPaddr(e
->pte
.paddr() & ~(e
->pte
.size()-1) |
791 vaddr
& e
->pte
.size()-1);
792 DPRINTF(TLB
, "TLB: %#X -> %#X\n", vaddr
, req
->getPaddr());
795 /** Normal flow ends here. */
798 writeSfsr(vaddr
, write
, Primary
, true, IllegalAsi
, asi
);
800 return new DataAccessException
;
802 return new PrivilegedAction
;
805 if (asi
== ASI_SWVR_UDB_INTR_W
&& !write
||
806 asi
== ASI_SWVR_UDB_INTR_R
&& write
) {
807 writeSfsr(vaddr
, write
, Primary
, true, IllegalAsi
, asi
);
808 return new DataAccessException
;
814 handleScratchRegAccess
:
815 if (vaddr
> 0x38 || (vaddr
>= 0x20 && vaddr
< 0x30 && !hpriv
)) {
816 writeSfsr(vaddr
, write
, Primary
, true, IllegalAsi
, asi
);
817 return new DataAccessException
;
821 handleQueueRegAccess
:
822 if (!priv
&& !hpriv
) {
823 writeSfsr(vaddr
, write
, Primary
, true, IllegalAsi
, asi
);
824 return new PrivilegedAction
;
826 if (!hpriv
&& vaddr
& 0xF || vaddr
> 0x3f8 || vaddr
< 0x3c0) {
827 writeSfsr(vaddr
, write
, Primary
, true, IllegalAsi
, asi
);
828 return new DataAccessException
;
832 handleSparcErrorRegAccess
:
834 writeSfsr(vaddr
, write
, Primary
, true, IllegalAsi
, asi
);
836 return new DataAccessException
;
838 return new PrivilegedAction
;
845 DPRINTF(TLB
, "TLB: DTB Translating MM IPR access\n");
846 req
->setMmapedIpr(true);
847 req
->setPaddr(req
->getVaddr());
852 DTB::doMmuRegRead(ThreadContext
*tc
, Packet
*pkt
)
854 Addr va
= pkt
->getAddr();
855 ASI asi
= (ASI
)pkt
->req
->getAsi();
858 DPRINTF(IPR
, "Memory Mapped IPR Read: asi=%#X a=%#x\n",
859 (uint32_t)pkt
->req
->getAsi(), pkt
->getAddr());
861 ITB
* itb
= tc
->getITBPtr();
864 case ASI_LSU_CONTROL_REG
:
866 pkt
->set(tc
->readMiscReg(MISCREG_MMU_LSU_CTRL
));
871 pkt
->set(tc
->readMiscReg(MISCREG_MMU_P_CONTEXT
));
874 pkt
->set(tc
->readMiscReg(MISCREG_MMU_S_CONTEXT
));
881 pkt
->set(tc
->readMiscReg(MISCREG_QUEUE_CPU_MONDO_HEAD
+
884 case ASI_DMMU_CTXT_ZERO_TSB_BASE_PS0
:
886 pkt
->set(c0_tsb_ps0
);
888 case ASI_DMMU_CTXT_ZERO_TSB_BASE_PS1
:
890 pkt
->set(c0_tsb_ps1
);
892 case ASI_DMMU_CTXT_ZERO_CONFIG
:
896 case ASI_IMMU_CTXT_ZERO_TSB_BASE_PS0
:
898 pkt
->set(itb
->c0_tsb_ps0
);
900 case ASI_IMMU_CTXT_ZERO_TSB_BASE_PS1
:
902 pkt
->set(itb
->c0_tsb_ps1
);
904 case ASI_IMMU_CTXT_ZERO_CONFIG
:
906 pkt
->set(itb
->c0_config
);
908 case ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS0
:
910 pkt
->set(cx_tsb_ps0
);
912 case ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS1
:
914 pkt
->set(cx_tsb_ps1
);
916 case ASI_DMMU_CTXT_NONZERO_CONFIG
:
920 case ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS0
:
922 pkt
->set(itb
->cx_tsb_ps0
);
924 case ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS1
:
926 pkt
->set(itb
->cx_tsb_ps1
);
928 case ASI_IMMU_CTXT_NONZERO_CONFIG
:
930 pkt
->set(itb
->cx_config
);
932 case ASI_SPARC_ERROR_STATUS_REG
:
933 pkt
->set((uint64_t)0);
935 case ASI_HYP_SCRATCHPAD
:
937 pkt
->set(tc
->readMiscReg(MISCREG_SCRATCHPAD_R0
+ (va
>> 3)));
942 temp
= itb
->tag_access
;
943 pkt
->set(bits(temp
,63,22) | bits(temp
,12,0) << 48);
949 pkt
->set(itb
->tag_access
);
959 pkt
->set(bits(temp
,63,22) | bits(temp
,12,0) << 48);
968 pkt
->set(tag_access
);
971 pkt
->set(tc
->readMiscReg(MISCREG_MMU_PART_ID
));
977 case ASI_DMMU_TSB_PS0_PTR_REG
:
978 pkt
->set(MakeTsbPtr(Ps0
,
985 case ASI_DMMU_TSB_PS1_PTR_REG
:
986 pkt
->set(MakeTsbPtr(Ps1
,
993 case ASI_IMMU_TSB_PS0_PTR_REG
:
994 pkt
->set(MakeTsbPtr(Ps0
,
1001 case ASI_IMMU_TSB_PS1_PTR_REG
:
1002 pkt
->set(MakeTsbPtr(Ps1
,
1009 case ASI_SWVR_INTR_RECEIVE
:
1010 pkt
->set(tc
->getCpuPtr()->get_interrupts(IT_INT_VEC
));
1012 case ASI_SWVR_UDB_INTR_R
:
1013 temp
= findMsbSet(tc
->getCpuPtr()->get_interrupts(IT_INT_VEC
));
1014 tc
->getCpuPtr()->clear_interrupt(IT_INT_VEC
, temp
);
1019 panic("need to impl DTB::doMmuRegRead() got asi=%#x, va=%#x\n",
1022 pkt
->makeAtomicResponse();
1023 return tc
->getCpuPtr()->cycles(1);
1027 DTB::doMmuRegWrite(ThreadContext
*tc
, Packet
*pkt
)
1029 uint64_t data
= gtoh(pkt
->get
<uint64_t>());
1030 Addr va
= pkt
->getAddr();
1031 ASI asi
= (ASI
)pkt
->req
->getAsi();
1037 int entry_insert
= -1;
1044 DPRINTF(IPR
, "Memory Mapped IPR Write: asi=%#X a=%#x d=%#X\n",
1045 (uint32_t)asi
, va
, data
);
1047 ITB
* itb
= tc
->getITBPtr();
1050 case ASI_LSU_CONTROL_REG
:
1052 tc
->setMiscReg(MISCREG_MMU_LSU_CTRL
, data
);
1057 tc
->setMiscReg(MISCREG_MMU_P_CONTEXT
, data
);
1060 tc
->setMiscReg(MISCREG_MMU_S_CONTEXT
, data
);
1063 goto doMmuWriteError
;
1067 assert(mbits(data
,13,6) == data
);
1068 tc
->setMiscReg(MISCREG_QUEUE_CPU_MONDO_HEAD
+
1069 (va
>> 4) - 0x3c, data
);
1071 case ASI_DMMU_CTXT_ZERO_TSB_BASE_PS0
:
1075 case ASI_DMMU_CTXT_ZERO_TSB_BASE_PS1
:
1079 case ASI_DMMU_CTXT_ZERO_CONFIG
:
1083 case ASI_IMMU_CTXT_ZERO_TSB_BASE_PS0
:
1085 itb
->c0_tsb_ps0
= data
;
1087 case ASI_IMMU_CTXT_ZERO_TSB_BASE_PS1
:
1089 itb
->c0_tsb_ps1
= data
;
1091 case ASI_IMMU_CTXT_ZERO_CONFIG
:
1093 itb
->c0_config
= data
;
1095 case ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS0
:
1099 case ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS1
:
1103 case ASI_DMMU_CTXT_NONZERO_CONFIG
:
1107 case ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS0
:
1109 itb
->cx_tsb_ps0
= data
;
1111 case ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS1
:
1113 itb
->cx_tsb_ps1
= data
;
1115 case ASI_IMMU_CTXT_NONZERO_CONFIG
:
1117 itb
->cx_config
= data
;
1119 case ASI_SPARC_ERROR_EN_REG
:
1120 case ASI_SPARC_ERROR_STATUS_REG
:
1121 warn("Ignoring write to SPARC ERROR regsiter\n");
1123 case ASI_HYP_SCRATCHPAD
:
1124 case ASI_SCRATCHPAD
:
1125 tc
->setMiscReg(MISCREG_SCRATCHPAD_R0
+ (va
>> 3), data
);
1133 sext
<59>(bits(data
, 59,0));
1134 itb
->tag_access
= data
;
1137 goto doMmuWriteError
;
1140 case ASI_ITLB_DATA_ACCESS_REG
:
1141 entry_insert
= bits(va
, 8,3);
1142 case ASI_ITLB_DATA_IN_REG
:
1143 assert(entry_insert
!= -1 || mbits(va
,10,9) == va
);
1144 ta_insert
= itb
->tag_access
;
1145 va_insert
= mbits(ta_insert
, 63,13);
1146 ct_insert
= mbits(ta_insert
, 12,0);
1147 part_insert
= tc
->readMiscReg(MISCREG_MMU_PART_ID
);
1148 real_insert
= bits(va
, 9,9);
1149 pte
.populate(data
, bits(va
,10,10) ? PageTableEntry::sun4v
:
1150 PageTableEntry::sun4u
);
1151 tc
->getITBPtr()->insert(va_insert
, part_insert
, ct_insert
, real_insert
,
1154 case ASI_DTLB_DATA_ACCESS_REG
:
1155 entry_insert
= bits(va
, 8,3);
1156 case ASI_DTLB_DATA_IN_REG
:
1157 assert(entry_insert
!= -1 || mbits(va
,10,9) == va
);
1158 ta_insert
= tag_access
;
1159 va_insert
= mbits(ta_insert
, 63,13);
1160 ct_insert
= mbits(ta_insert
, 12,0);
1161 part_insert
= tc
->readMiscReg(MISCREG_MMU_PART_ID
);
1162 real_insert
= bits(va
, 9,9);
1163 pte
.populate(data
, bits(va
,10,10) ? PageTableEntry::sun4v
:
1164 PageTableEntry::sun4u
);
1165 insert(va_insert
, part_insert
, ct_insert
, real_insert
, pte
, entry_insert
);
1167 case ASI_IMMU_DEMAP
:
1170 part_id
= tc
->readMiscReg(MISCREG_MMU_PART_ID
);
1171 switch (bits(va
,5,4)) {
1173 ctx_id
= tc
->readMiscReg(MISCREG_MMU_P_CONTEXT
);
1185 switch(bits(va
,7,6)) {
1186 case 0: // demap page
1188 tc
->getITBPtr()->demapPage(mbits(va
,63,13), part_id
,
1189 bits(va
,9,9), ctx_id
);
1191 case 1: //demap context
1193 tc
->getITBPtr()->demapContext(part_id
, ctx_id
);
1196 tc
->getITBPtr()->demapAll(part_id
);
1199 panic("Invalid type for IMMU demap\n");
1208 sext
<59>(bits(data
, 59,0));
1212 tc
->setMiscReg(MISCREG_MMU_PART_ID
, data
);
1215 goto doMmuWriteError
;
1218 case ASI_DMMU_DEMAP
:
1221 part_id
= tc
->readMiscReg(MISCREG_MMU_PART_ID
);
1222 switch (bits(va
,5,4)) {
1224 ctx_id
= tc
->readMiscReg(MISCREG_MMU_P_CONTEXT
);
1227 ctx_id
= tc
->readMiscReg(MISCREG_MMU_S_CONTEXT
);
1236 switch(bits(va
,7,6)) {
1237 case 0: // demap page
1239 demapPage(mbits(va
,63,13), part_id
, bits(va
,9,9), ctx_id
);
1241 case 1: //demap context
1243 demapContext(part_id
, ctx_id
);
1249 panic("Invalid type for IMMU demap\n");
1252 case ASI_SWVR_INTR_RECEIVE
:
1254 // clear all the interrupts that aren't set in the write
1255 while(tc
->getCpuPtr()->get_interrupts(IT_INT_VEC
) & data
) {
1256 msb
= findMsbSet(tc
->getCpuPtr()->get_interrupts(IT_INT_VEC
) & data
);
1257 tc
->getCpuPtr()->clear_interrupt(IT_INT_VEC
, msb
);
1260 case ASI_SWVR_UDB_INTR_W
:
1261 tc
->getSystemPtr()->threadContexts
[bits(data
,12,8)]->getCpuPtr()->
1262 post_interrupt(bits(data
,5,0),0);
1266 panic("need to impl DTB::doMmuRegWrite() got asi=%#x, va=%#x d=%#x\n",
1267 (uint32_t)pkt
->req
->getAsi(), pkt
->getAddr(), data
);
1269 pkt
->makeAtomicResponse();
1270 return tc
->getCpuPtr()->cycles(1);
1274 DTB::GetTsbPtr(ThreadContext
*tc
, Addr addr
, int ctx
, Addr
*ptrs
)
1276 uint64_t tag_access
= mbits(addr
,63,13) | mbits(ctx
,12,0);
1277 ITB
* itb
= tc
->getITBPtr();
1278 ptrs
[0] = MakeTsbPtr(Ps0
, tag_access
,
1283 ptrs
[1] = MakeTsbPtr(Ps1
, tag_access
,
1288 ptrs
[2] = MakeTsbPtr(Ps0
, tag_access
,
1293 ptrs
[3] = MakeTsbPtr(Ps1
, tag_access
,
1305 DTB::MakeTsbPtr(TsbPageSize ps
, uint64_t tag_access
, uint64_t c0_tsb
,
1306 uint64_t c0_config
, uint64_t cX_tsb
, uint64_t cX_config
)
1311 if (bits(tag_access
, 12,0) == 0) {
1319 uint64_t ptr
= mbits(tsb
,63,13);
1320 bool split
= bits(tsb
,12,12);
1321 int tsb_size
= bits(tsb
,3,0);
1322 int page_size
= (ps
== Ps0
) ? bits(config
, 2,0) : bits(config
,10,8);
1324 if (ps
== Ps1
&& split
)
1325 ptr
|= ULL(1) << (13 + tsb_size
);
1326 ptr
|= (tag_access
>> (9 + page_size
* 3)) & mask(12+tsb_size
, 4);
1333 TLB::serialize(std::ostream
&os
)
1335 SERIALIZE_SCALAR(size
);
1336 SERIALIZE_SCALAR(usedEntries
);
1337 SERIALIZE_SCALAR(lastReplaced
);
1339 // convert the pointer based free list into an index based one
1340 int *free_list
= (int*)malloc(sizeof(int) * size
);
1342 std::list
<TlbEntry
*>::iterator i
;
1343 i
= freeList
.begin();
1344 while (i
!= freeList
.end()) {
1345 free_list
[cntr
++] = ((size_t)*i
- (size_t)tlb
)/ sizeof(TlbEntry
);
1348 SERIALIZE_SCALAR(cntr
);
1349 SERIALIZE_ARRAY(free_list
, cntr
);
1351 for (int x
= 0; x
< size
; x
++) {
1352 nameOut(os
, csprintf("%s.PTE%d", name(), x
));
1353 tlb
[x
].serialize(os
);
1356 SERIALIZE_SCALAR(c0_tsb_ps0
);
1357 SERIALIZE_SCALAR(c0_tsb_ps1
);
1358 SERIALIZE_SCALAR(c0_config
);
1359 SERIALIZE_SCALAR(cx_tsb_ps0
);
1360 SERIALIZE_SCALAR(cx_tsb_ps1
);
1361 SERIALIZE_SCALAR(cx_config
);
1362 SERIALIZE_SCALAR(sfsr
);
1363 SERIALIZE_SCALAR(tag_access
);
1367 TLB::unserialize(Checkpoint
*cp
, const std::string
§ion
)
1371 paramIn(cp
, section
, "size", oldSize
);
1372 if (oldSize
!= size
)
1373 panic("Don't support unserializing different sized TLBs\n");
1374 UNSERIALIZE_SCALAR(usedEntries
);
1375 UNSERIALIZE_SCALAR(lastReplaced
);
1378 UNSERIALIZE_SCALAR(cntr
);
1380 int *free_list
= (int*)malloc(sizeof(int) * cntr
);
1382 UNSERIALIZE_ARRAY(free_list
, cntr
);
1383 for (int x
= 0; x
< cntr
; x
++)
1384 freeList
.push_back(&tlb
[free_list
[x
]]);
1386 lookupTable
.clear();
1387 for (int x
= 0; x
< size
; x
++) {
1388 tlb
[x
].unserialize(cp
, csprintf("%s.PTE%d", section
, x
));
1390 lookupTable
.insert(tlb
[x
].range
, &tlb
[x
]);
1394 UNSERIALIZE_SCALAR(c0_tsb_ps0
);
1395 UNSERIALIZE_SCALAR(c0_tsb_ps1
);
1396 UNSERIALIZE_SCALAR(c0_config
);
1397 UNSERIALIZE_SCALAR(cx_tsb_ps0
);
1398 UNSERIALIZE_SCALAR(cx_tsb_ps1
);
1399 UNSERIALIZE_SCALAR(cx_config
);
1400 UNSERIALIZE_SCALAR(sfsr
);
1401 UNSERIALIZE_SCALAR(tag_access
);
1405 DTB::serialize(std::ostream
&os
)
1408 SERIALIZE_SCALAR(sfar
);
1412 DTB::unserialize(Checkpoint
*cp
, const std::string
§ion
)
1414 TLB::unserialize(cp
, section
);
1415 UNSERIALIZE_SCALAR(sfar
);
1418 /* end namespace SparcISA */ }
1421 SparcITBParams::create()
1423 return new SparcISA::ITB(name
, size
);
1427 SparcDTBParams::create()
1429 return new SparcISA::DTB(name
, size
);