2 * Copyright (c) 2001-2005 The Regents of The University of Michigan
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include "arch/sparc/asi.hh"
34 #include "arch/sparc/miscregfile.hh"
35 #include "arch/sparc/tlb.hh"
36 #include "base/bitfield.hh"
37 #include "base/trace.hh"
38 #include "cpu/thread_context.hh"
39 #include "cpu/base.hh"
40 #include "mem/packet_access.hh"
41 #include "mem/request.hh"
42 #include "params/SparcDTB.hh"
43 #include "params/SparcITB.hh"
44 #include "sim/system.hh"
46 /* @todo remove some of the magic constants. -- ali
50 TLB::TLB(const std::string
&name
, int s
)
51 : SimObject(name
), size(s
), usedEntries(0), lastReplaced(0),
54 // To make this work you'll have to change the hypervisor and OS
56 fatal("SPARC T1 TLB registers don't support more than 64 TLB entries.");
58 tlb
= new TlbEntry
[size
];
59 std::memset(tlb
, 0, sizeof(TlbEntry
) * size
);
61 for (int x
= 0; x
< size
; x
++)
62 freeList
.push_back(&tlb
[x
]);
78 for (i
= lookupTable
.begin(); i
!= lookupTable
.end(); i
++) {
79 TlbEntry
*t
= i
->second
;
80 if (!t
->pte
.locked()) {
89 TLB::insert(Addr va
, int partition_id
, int context_id
, bool real
,
90 const PageTableEntry
& PTE
, int entry
)
95 TlbEntry
*new_entry
= NULL
;
100 va
&= ~(PTE
.size()-1);
102 tr.size = PTE.size() - 1;
103 tr.contextId = context_id;
104 tr.partitionId = partition_id;
108 DPRINTF(TLB
, "TLB: Inserting TLB Entry; va=%#x pa=%#x pid=%d cid=%d r=%d entryid=%d\n",
109 va
, PTE
.paddr(), partition_id
, context_id
, (int)real
, entry
);
111 // Demap any entry that conflicts
112 for (x
= 0; x
< size
; x
++) {
113 if (tlb
[x
].range
.real
== real
&&
114 tlb
[x
].range
.partitionId
== partition_id
&&
115 tlb
[x
].range
.va
< va
+ PTE
.size() - 1 &&
116 tlb
[x
].range
.va
+ tlb
[x
].range
.size
>= va
&&
117 (real
|| tlb
[x
].range
.contextId
== context_id
))
120 freeList
.push_front(&tlb
[x
]);
121 DPRINTF(TLB
, "TLB: Conflicting entry %#X , deleting it\n", x
);
123 tlb
[x
].valid
= false;
128 lookupTable
.erase(tlb
[x
].range
);
135 i = lookupTable.find(tr);
136 if (i != lookupTable.end()) {
137 i->second->valid = false;
138 if (i->second->used) {
139 i->second->used = false;
142 freeList.push_front(i->second);
143 DPRINTF(TLB, "TLB: Found conflicting entry %#X , deleting it\n",
145 lookupTable.erase(i);
150 assert(entry
< size
&& entry
>= 0);
151 new_entry
= &tlb
[entry
];
153 if (!freeList
.empty()) {
154 new_entry
= freeList
.front();
161 if (x
== lastReplaced
)
162 goto insertAllLocked
;
163 } while (tlb
[x
].pte
.locked());
168 for (x = 0; x < size; x++) {
169 if (!tlb[x].valid || !tlb[x].used) {
177 // Update the last ently if their all locked
179 new_entry
= &tlb
[size
-1];
182 freeList
.remove(new_entry
);
183 if (new_entry
->valid
&& new_entry
->used
)
185 if (new_entry
->valid
)
186 lookupTable
.erase(new_entry
->range
);
190 new_entry
->range
.va
= va
;
191 new_entry
->range
.size
= PTE
.size() - 1;
192 new_entry
->range
.partitionId
= partition_id
;
193 new_entry
->range
.contextId
= context_id
;
194 new_entry
->range
.real
= real
;
195 new_entry
->pte
= PTE
;
196 new_entry
->used
= true;;
197 new_entry
->valid
= true;
202 i
= lookupTable
.insert(new_entry
->range
, new_entry
);
203 assert(i
!= lookupTable
.end());
205 // If all entries have there used bit set, clear it on them all, but the
206 // one we just inserted
207 if (usedEntries
== size
) {
209 new_entry
->used
= true;
217 TLB::lookup(Addr va
, int partition_id
, bool real
, int context_id
, bool
224 DPRINTF(TLB
, "TLB: Looking up entry va=%#x pid=%d cid=%d r=%d\n",
225 va
, partition_id
, context_id
, real
);
226 // Assemble full address structure
228 tr
.size
= MachineBytes
;
229 tr
.contextId
= context_id
;
230 tr
.partitionId
= partition_id
;
233 // Try to find the entry
234 i
= lookupTable
.find(tr
);
235 if (i
== lookupTable
.end()) {
236 DPRINTF(TLB
, "TLB: No valid entry found\n");
240 // Mark the entries used bit and clear other used bits in needed
242 DPRINTF(TLB
, "TLB: Valid entry found pa: %#x size: %#x\n", t
->pte
.paddr(),
245 // Update the used bits only if this is a real access (not a fake one from
247 if (!t
->used
&& update_used
) {
250 if (usedEntries
== size
) {
264 for (int x
= 0; x
< size
; x
++) {
266 DPRINTFN("%4d: %#2x:%#2x %c %#4x %#8x %#8x %#16x\n",
267 x
, tlb
[x
].range
.partitionId
, tlb
[x
].range
.contextId
,
268 tlb
[x
].range
.real
? 'R' : ' ', tlb
[x
].range
.size
,
269 tlb
[x
].range
.va
, tlb
[x
].pte
.paddr(), tlb
[x
].pte());
275 TLB::demapPage(Addr va
, int partition_id
, bool real
, int context_id
)
280 DPRINTF(IPR
, "TLB: Demapping Page va=%#x pid=%#d cid=%d r=%d\n",
281 va
, partition_id
, context_id
, real
);
285 // Assemble full address structure
287 tr
.size
= MachineBytes
;
288 tr
.contextId
= context_id
;
289 tr
.partitionId
= partition_id
;
292 // Demap any entry that conflicts
293 i
= lookupTable
.find(tr
);
294 if (i
!= lookupTable
.end()) {
295 DPRINTF(IPR
, "TLB: Demapped page\n");
296 i
->second
->valid
= false;
297 if (i
->second
->used
) {
298 i
->second
->used
= false;
301 freeList
.push_front(i
->second
);
302 lookupTable
.erase(i
);
307 TLB::demapContext(int partition_id
, int context_id
)
310 DPRINTF(IPR
, "TLB: Demapping Context pid=%#d cid=%d\n",
311 partition_id
, context_id
);
313 for (x
= 0; x
< size
; x
++) {
314 if (tlb
[x
].range
.contextId
== context_id
&&
315 tlb
[x
].range
.partitionId
== partition_id
) {
316 if (tlb
[x
].valid
== true) {
317 freeList
.push_front(&tlb
[x
]);
319 tlb
[x
].valid
= false;
324 lookupTable
.erase(tlb
[x
].range
);
330 TLB::demapAll(int partition_id
)
333 DPRINTF(TLB
, "TLB: Demapping All pid=%#d\n", partition_id
);
335 for (x
= 0; x
< size
; x
++) {
336 if (!tlb
[x
].pte
.locked() && tlb
[x
].range
.partitionId
== partition_id
) {
337 if (tlb
[x
].valid
== true){
338 freeList
.push_front(&tlb
[x
]);
340 tlb
[x
].valid
= false;
345 lookupTable
.erase(tlb
[x
].range
);
358 for (x
= 0; x
< size
; x
++) {
359 if (tlb
[x
].valid
== true)
360 freeList
.push_back(&tlb
[x
]);
361 tlb
[x
].valid
= false;
368 TLB::TteRead(int entry
) {
370 panic("entry: %d\n", entry
);
372 assert(entry
< size
);
373 if (tlb
[entry
].valid
)
374 return tlb
[entry
].pte();
376 return (uint64_t)-1ll;
380 TLB::TagRead(int entry
) {
381 assert(entry
< size
);
383 if (!tlb
[entry
].valid
)
384 return (uint64_t)-1ll;
386 tag
= tlb
[entry
].range
.contextId
;
387 tag
|= tlb
[entry
].range
.va
;
388 tag
|= (uint64_t)tlb
[entry
].range
.partitionId
<< 61;
389 tag
|= tlb
[entry
].range
.real
? ULL(1) << 60 : 0;
390 tag
|= (uint64_t)~tlb
[entry
].pte
._size() << 56;
395 TLB::validVirtualAddress(Addr va
, bool am
)
399 if (va
>= StartVAddrHole
&& va
<= EndVAddrHole
)
405 TLB::writeSfsr(bool write
, ContextType ct
, bool se
, FaultTypes ft
, int asi
)
422 TLB::writeTagAccess(Addr va
, int context
)
424 DPRINTF(TLB
, "TLB: Writing Tag Access: va: %#X ctx: %#X value: %#X\n",
425 va
, context
, mbits(va
, 63,13) | mbits(context
,12,0));
427 tag_access
= mbits(va
, 63,13) | mbits(context
,12,0);
431 ITB::writeSfsr(bool write
, ContextType ct
, bool se
, FaultTypes ft
, int asi
)
433 DPRINTF(TLB
, "TLB: ITB Fault: w=%d ct=%d ft=%d asi=%d\n",
434 (int)write
, ct
, ft
, asi
);
435 TLB::writeSfsr(write
, ct
, se
, ft
, asi
);
439 DTB::writeSfsr(Addr a
, bool write
, ContextType ct
,
440 bool se
, FaultTypes ft
, int asi
)
442 DPRINTF(TLB
, "TLB: DTB Fault: A=%#x w=%d ct=%d ft=%d asi=%d\n",
443 a
, (int)write
, ct
, ft
, asi
);
444 TLB::writeSfsr(write
, ct
, se
, ft
, asi
);
449 ITB::translate(RequestPtr
&req
, ThreadContext
*tc
)
451 uint64_t tlbdata
= tc
->readMiscRegNoEffect(MISCREG_TLB_DATA
);
453 Addr vaddr
= req
->getVaddr();
456 assert(req
->getAsi() == ASI_IMPLICIT
);
458 DPRINTF(TLB
, "TLB: ITB Request to translate va=%#x size=%d\n",
459 vaddr
, req
->getSize());
461 // Be fast if we can!
462 if (cacheValid
&& cacheState
== tlbdata
) {
464 if (cacheEntry
->range
.va
< vaddr
+ sizeof(MachInst
) &&
465 cacheEntry
->range
.va
+ cacheEntry
->range
.size
>= vaddr
) {
466 req
->setPaddr(cacheEntry
->pte
.paddr() & ~(cacheEntry
->pte
.size()-1) |
467 vaddr
& cacheEntry
->pte
.size()-1 );
471 req
->setPaddr(vaddr
& PAddrImplMask
);
476 bool hpriv
= bits(tlbdata
,0,0);
477 bool red
= bits(tlbdata
,1,1);
478 bool priv
= bits(tlbdata
,2,2);
479 bool addr_mask
= bits(tlbdata
,3,3);
480 bool lsu_im
= bits(tlbdata
,4,4);
482 int part_id
= bits(tlbdata
,15,8);
483 int tl
= bits(tlbdata
,18,16);
484 int pri_context
= bits(tlbdata
,47,32);
490 DPRINTF(TLB
, "TLB: priv:%d hpriv:%d red:%d lsuim:%d part_id: %#X\n",
491 priv
, hpriv
, red
, lsu_im
, part_id
);
500 context
= pri_context
;
503 if ( hpriv
|| red
) {
505 cacheState
= tlbdata
;
507 req
->setPaddr(vaddr
& PAddrImplMask
);
511 // If the access is unaligned trap
513 writeSfsr(false, ct
, false, OtherFault
, asi
);
514 return new MemAddressNotAligned
;
518 vaddr
= vaddr
& VAddrAMask
;
520 if (!validVirtualAddress(vaddr
, addr_mask
)) {
521 writeSfsr(false, ct
, false, VaOutOfRange
, asi
);
522 return new InstructionAccessException
;
526 e
= lookup(vaddr
, part_id
, true);
530 e
= lookup(vaddr
, part_id
, false, context
);
533 if (e
== NULL
|| !e
->valid
) {
534 writeTagAccess(vaddr
, context
);
536 return new InstructionRealTranslationMiss
;
539 return new FastInstructionAccessMMUMiss
;
541 return new FastInstructionAccessMMUMiss(req
->getVaddr());
545 // were not priviledged accesing priv page
546 if (!priv
&& e
->pte
.priv()) {
547 writeTagAccess(vaddr
, context
);
548 writeSfsr(false, ct
, false, PrivViolation
, asi
);
549 return new InstructionAccessException
;
552 // cache translation date for next translation
554 cacheState
= tlbdata
;
557 req
->setPaddr(e
->pte
.paddr() & ~(e
->pte
.size()-1) |
558 vaddr
& e
->pte
.size()-1 );
559 DPRINTF(TLB
, "TLB: %#X -> %#X\n", vaddr
, req
->getPaddr());
566 DTB::translate(RequestPtr
&req
, ThreadContext
*tc
, bool write
)
568 /* @todo this could really use some profiling and fixing to make it faster! */
569 uint64_t tlbdata
= tc
->readMiscRegNoEffect(MISCREG_TLB_DATA
);
570 Addr vaddr
= req
->getVaddr();
571 Addr size
= req
->getSize();
573 asi
= (ASI
)req
->getAsi();
574 bool implicit
= false;
575 bool hpriv
= bits(tlbdata
,0,0);
576 bool unaligned
= (vaddr
& size
-1);
578 DPRINTF(TLB
, "TLB: DTB Request to translate va=%#x size=%d asi=%#x\n",
581 if (lookupTable
.size() != 64 - freeList
.size())
582 panic("Lookup table size: %d tlb size: %d\n", lookupTable
.size(),
584 if (asi
== ASI_IMPLICIT
)
587 // Only use the fast path here if there doesn't need to be an unaligned
590 if (hpriv
&& implicit
) {
591 req
->setPaddr(vaddr
& PAddrImplMask
);
595 // Be fast if we can!
596 if (cacheValid
&& cacheState
== tlbdata
) {
601 TlbEntry
*ce
= cacheEntry
[0];
602 Addr ce_va
= ce
->range
.va
;
603 if (cacheAsi
[0] == asi
&&
604 ce_va
< vaddr
+ size
&& ce_va
+ ce
->range
.size
> vaddr
&&
605 (!write
|| ce
->pte
.writable())) {
606 req
->setPaddr(ce
->pte
.paddrMask() | vaddr
& ce
->pte
.sizeMask());
607 if (ce
->pte
.sideffect() || (ce
->pte
.paddr() >> 39) & 1)
608 req
->setFlags(req
->getFlags() | UNCACHEABLE
);
609 DPRINTF(TLB
, "TLB: %#X -> %#X\n", vaddr
, req
->getPaddr());
612 } // if cache entry valid
614 TlbEntry
*ce
= cacheEntry
[1];
615 Addr ce_va
= ce
->range
.va
;
616 if (cacheAsi
[1] == asi
&&
617 ce_va
< vaddr
+ size
&& ce_va
+ ce
->range
.size
> vaddr
&&
618 (!write
|| ce
->pte
.writable())) {
619 req
->setPaddr(ce
->pte
.paddrMask() | vaddr
& ce
->pte
.sizeMask());
620 if (ce
->pte
.sideffect() || (ce
->pte
.paddr() >> 39) & 1)
621 req
->setFlags(req
->getFlags() | UNCACHEABLE
);
622 DPRINTF(TLB
, "TLB: %#X -> %#X\n", vaddr
, req
->getPaddr());
625 } // if cache entry valid
629 bool red
= bits(tlbdata
,1,1);
630 bool priv
= bits(tlbdata
,2,2);
631 bool addr_mask
= bits(tlbdata
,3,3);
632 bool lsu_dm
= bits(tlbdata
,5,5);
634 int part_id
= bits(tlbdata
,15,8);
635 int tl
= bits(tlbdata
,18,16);
636 int pri_context
= bits(tlbdata
,47,32);
637 int sec_context
= bits(tlbdata
,63,48);
640 ContextType ct
= Primary
;
645 DPRINTF(TLB
, "TLB: priv:%d hpriv:%d red:%d lsudm:%d part_id: %#X\n",
646 priv
, hpriv
, red
, lsu_dm
, part_id
);
656 context
= pri_context
;
659 // We need to check for priv level/asi priv
660 if (!priv
&& !hpriv
&& !AsiIsUnPriv(asi
)) {
661 // It appears that context should be Nucleus in these cases?
662 writeSfsr(vaddr
, write
, Nucleus
, false, IllegalAsi
, asi
);
663 return new PrivilegedAction
;
666 if (!hpriv
&& AsiIsHPriv(asi
)) {
667 writeSfsr(vaddr
, write
, Nucleus
, false, IllegalAsi
, asi
);
668 return new DataAccessException
;
671 if (AsiIsPrimary(asi
)) {
672 context
= pri_context
;
674 } else if (AsiIsSecondary(asi
)) {
675 context
= sec_context
;
677 } else if (AsiIsNucleus(asi
)) {
682 context
= pri_context
;
686 if (!implicit
&& asi
!= ASI_P
&& asi
!= ASI_S
) {
687 if (AsiIsLittle(asi
))
688 panic("Little Endian ASIs not supported\n");
690 //XXX It's unclear from looking at the documentation how a no fault
691 //load differs from a regular one, other than what happens concerning
692 //nfo and e bits in the TTE
693 // if (AsiIsNoFault(asi))
694 // panic("No Fault ASIs not supported\n");
696 if (AsiIsPartialStore(asi
))
697 panic("Partial Store ASIs not supported\n");
700 panic("Cmt ASI registers not implmented\n");
702 if (AsiIsInterrupt(asi
))
703 goto handleIntRegAccess
;
705 goto handleMmuRegAccess
;
706 if (AsiIsScratchPad(asi
))
707 goto handleScratchRegAccess
;
709 goto handleQueueRegAccess
;
710 if (AsiIsSparcError(asi
))
711 goto handleSparcErrorRegAccess
;
713 if (!AsiIsReal(asi
) && !AsiIsNucleus(asi
) && !AsiIsAsIfUser(asi
) &&
714 !AsiIsTwin(asi
) && !AsiIsBlock(asi
) && !AsiIsNoFault(asi
))
715 panic("Accessing ASI %#X. Should we?\n", asi
);
718 // If the asi is unaligned trap
720 writeSfsr(vaddr
, false, ct
, false, OtherFault
, asi
);
721 return new MemAddressNotAligned
;
725 vaddr
= vaddr
& VAddrAMask
;
727 if (!validVirtualAddress(vaddr
, addr_mask
)) {
728 writeSfsr(vaddr
, false, ct
, true, VaOutOfRange
, asi
);
729 return new DataAccessException
;
733 if ((!lsu_dm
&& !hpriv
&& !red
) || AsiIsReal(asi
)) {
738 if (hpriv
&& (implicit
|| (!AsiIsAsIfUser(asi
) && !AsiIsReal(asi
)))) {
739 req
->setPaddr(vaddr
& PAddrImplMask
);
743 e
= lookup(vaddr
, part_id
, real
, context
);
745 if (e
== NULL
|| !e
->valid
) {
746 writeTagAccess(vaddr
, context
);
747 DPRINTF(TLB
, "TLB: DTB Failed to find matching TLB entry\n");
749 return new DataRealTranslationMiss
;
752 return new FastDataAccessMMUMiss
;
754 return new FastDataAccessMMUMiss(req
->getVaddr());
759 if (!priv
&& e
->pte
.priv()) {
760 writeTagAccess(vaddr
, context
);
761 writeSfsr(vaddr
, write
, ct
, e
->pte
.sideffect(), PrivViolation
, asi
);
762 return new DataAccessException
;
765 if (write
&& !e
->pte
.writable()) {
766 writeTagAccess(vaddr
, context
);
767 writeSfsr(vaddr
, write
, ct
, e
->pte
.sideffect(), OtherFault
, asi
);
768 return new FastDataAccessProtection
;
771 if (e
->pte
.nofault() && !AsiIsNoFault(asi
)) {
772 writeTagAccess(vaddr
, context
);
773 writeSfsr(vaddr
, write
, ct
, e
->pte
.sideffect(), LoadFromNfo
, asi
);
774 return new DataAccessException
;
777 if (e
->pte
.sideffect() && AsiIsNoFault(asi
)) {
778 writeTagAccess(vaddr
, context
);
779 writeSfsr(vaddr
, write
, ct
, e
->pte
.sideffect(), SideEffect
, asi
);
780 return new DataAccessException
;
784 if (e
->pte
.sideffect() || (e
->pte
.paddr() >> 39) & 1)
785 req
->setFlags(req
->getFlags() | UNCACHEABLE
);
787 // cache translation date for next translation
788 cacheState
= tlbdata
;
790 cacheEntry
[1] = NULL
;
791 cacheEntry
[0] = NULL
;
794 if (cacheEntry
[0] != e
&& cacheEntry
[1] != e
) {
795 cacheEntry
[1] = cacheEntry
[0];
797 cacheAsi
[1] = cacheAsi
[0];
800 cacheAsi
[0] = (ASI
)0;
803 req
->setPaddr(e
->pte
.paddr() & ~(e
->pte
.size()-1) |
804 vaddr
& e
->pte
.size()-1);
805 DPRINTF(TLB
, "TLB: %#X -> %#X\n", vaddr
, req
->getPaddr());
808 /** Normal flow ends here. */
811 writeSfsr(vaddr
, write
, Primary
, true, IllegalAsi
, asi
);
813 return new DataAccessException
;
815 return new PrivilegedAction
;
818 if (asi
== ASI_SWVR_UDB_INTR_W
&& !write
||
819 asi
== ASI_SWVR_UDB_INTR_R
&& write
) {
820 writeSfsr(vaddr
, write
, Primary
, true, IllegalAsi
, asi
);
821 return new DataAccessException
;
827 handleScratchRegAccess
:
828 if (vaddr
> 0x38 || (vaddr
>= 0x20 && vaddr
< 0x30 && !hpriv
)) {
829 writeSfsr(vaddr
, write
, Primary
, true, IllegalAsi
, asi
);
830 return new DataAccessException
;
834 handleQueueRegAccess
:
835 if (!priv
&& !hpriv
) {
836 writeSfsr(vaddr
, write
, Primary
, true, IllegalAsi
, asi
);
837 return new PrivilegedAction
;
839 if (!hpriv
&& vaddr
& 0xF || vaddr
> 0x3f8 || vaddr
< 0x3c0) {
840 writeSfsr(vaddr
, write
, Primary
, true, IllegalAsi
, asi
);
841 return new DataAccessException
;
845 handleSparcErrorRegAccess
:
847 writeSfsr(vaddr
, write
, Primary
, true, IllegalAsi
, asi
);
849 return new DataAccessException
;
851 return new PrivilegedAction
;
858 DPRINTF(TLB
, "TLB: DTB Translating MM IPR access\n");
859 req
->setMmapedIpr(true);
860 req
->setPaddr(req
->getVaddr());
867 DTB::doMmuRegRead(ThreadContext
*tc
, Packet
*pkt
)
869 Addr va
= pkt
->getAddr();
870 ASI asi
= (ASI
)pkt
->req
->getAsi();
873 DPRINTF(IPR
, "Memory Mapped IPR Read: asi=%#X a=%#x\n",
874 (uint32_t)pkt
->req
->getAsi(), pkt
->getAddr());
876 ITB
* itb
= tc
->getITBPtr();
879 case ASI_LSU_CONTROL_REG
:
881 pkt
->set(tc
->readMiscReg(MISCREG_MMU_LSU_CTRL
));
886 pkt
->set(tc
->readMiscReg(MISCREG_MMU_P_CONTEXT
));
889 pkt
->set(tc
->readMiscReg(MISCREG_MMU_S_CONTEXT
));
896 pkt
->set(tc
->readMiscReg(MISCREG_QUEUE_CPU_MONDO_HEAD
+
899 case ASI_DMMU_CTXT_ZERO_TSB_BASE_PS0
:
901 pkt
->set(c0_tsb_ps0
);
903 case ASI_DMMU_CTXT_ZERO_TSB_BASE_PS1
:
905 pkt
->set(c0_tsb_ps1
);
907 case ASI_DMMU_CTXT_ZERO_CONFIG
:
911 case ASI_IMMU_CTXT_ZERO_TSB_BASE_PS0
:
913 pkt
->set(itb
->c0_tsb_ps0
);
915 case ASI_IMMU_CTXT_ZERO_TSB_BASE_PS1
:
917 pkt
->set(itb
->c0_tsb_ps1
);
919 case ASI_IMMU_CTXT_ZERO_CONFIG
:
921 pkt
->set(itb
->c0_config
);
923 case ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS0
:
925 pkt
->set(cx_tsb_ps0
);
927 case ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS1
:
929 pkt
->set(cx_tsb_ps1
);
931 case ASI_DMMU_CTXT_NONZERO_CONFIG
:
935 case ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS0
:
937 pkt
->set(itb
->cx_tsb_ps0
);
939 case ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS1
:
941 pkt
->set(itb
->cx_tsb_ps1
);
943 case ASI_IMMU_CTXT_NONZERO_CONFIG
:
945 pkt
->set(itb
->cx_config
);
947 case ASI_SPARC_ERROR_STATUS_REG
:
948 pkt
->set((uint64_t)0);
950 case ASI_HYP_SCRATCHPAD
:
952 pkt
->set(tc
->readMiscReg(MISCREG_SCRATCHPAD_R0
+ (va
>> 3)));
957 temp
= itb
->tag_access
;
958 pkt
->set(bits(temp
,63,22) | bits(temp
,12,0) << 48);
964 pkt
->set(itb
->tag_access
);
974 pkt
->set(bits(temp
,63,22) | bits(temp
,12,0) << 48);
983 pkt
->set(tag_access
);
986 pkt
->set(tc
->readMiscReg(MISCREG_MMU_PART_ID
));
992 case ASI_DMMU_TSB_PS0_PTR_REG
:
993 pkt
->set(MakeTsbPtr(Ps0
,
1000 case ASI_DMMU_TSB_PS1_PTR_REG
:
1001 pkt
->set(MakeTsbPtr(Ps1
,
1008 case ASI_IMMU_TSB_PS0_PTR_REG
:
1009 pkt
->set(MakeTsbPtr(Ps0
,
1016 case ASI_IMMU_TSB_PS1_PTR_REG
:
1017 pkt
->set(MakeTsbPtr(Ps1
,
1024 case ASI_SWVR_INTR_RECEIVE
:
1025 pkt
->set(tc
->getCpuPtr()->get_interrupts(IT_INT_VEC
));
1027 case ASI_SWVR_UDB_INTR_R
:
1028 temp
= findMsbSet(tc
->getCpuPtr()->get_interrupts(IT_INT_VEC
));
1029 tc
->getCpuPtr()->clear_interrupt(IT_INT_VEC
, temp
);
1034 panic("need to impl DTB::doMmuRegRead() got asi=%#x, va=%#x\n",
1037 pkt
->makeAtomicResponse();
1038 return tc
->getCpuPtr()->cycles(1);
1042 DTB::doMmuRegWrite(ThreadContext
*tc
, Packet
*pkt
)
1044 uint64_t data
= gtoh(pkt
->get
<uint64_t>());
1045 Addr va
= pkt
->getAddr();
1046 ASI asi
= (ASI
)pkt
->req
->getAsi();
1052 int entry_insert
= -1;
1059 DPRINTF(IPR
, "Memory Mapped IPR Write: asi=%#X a=%#x d=%#X\n",
1060 (uint32_t)asi
, va
, data
);
1062 ITB
* itb
= tc
->getITBPtr();
1065 case ASI_LSU_CONTROL_REG
:
1067 tc
->setMiscReg(MISCREG_MMU_LSU_CTRL
, data
);
1072 tc
->setMiscReg(MISCREG_MMU_P_CONTEXT
, data
);
1075 tc
->setMiscReg(MISCREG_MMU_S_CONTEXT
, data
);
1078 goto doMmuWriteError
;
1082 assert(mbits(data
,13,6) == data
);
1083 tc
->setMiscReg(MISCREG_QUEUE_CPU_MONDO_HEAD
+
1084 (va
>> 4) - 0x3c, data
);
1086 case ASI_DMMU_CTXT_ZERO_TSB_BASE_PS0
:
1090 case ASI_DMMU_CTXT_ZERO_TSB_BASE_PS1
:
1094 case ASI_DMMU_CTXT_ZERO_CONFIG
:
1098 case ASI_IMMU_CTXT_ZERO_TSB_BASE_PS0
:
1100 itb
->c0_tsb_ps0
= data
;
1102 case ASI_IMMU_CTXT_ZERO_TSB_BASE_PS1
:
1104 itb
->c0_tsb_ps1
= data
;
1106 case ASI_IMMU_CTXT_ZERO_CONFIG
:
1108 itb
->c0_config
= data
;
1110 case ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS0
:
1114 case ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS1
:
1118 case ASI_DMMU_CTXT_NONZERO_CONFIG
:
1122 case ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS0
:
1124 itb
->cx_tsb_ps0
= data
;
1126 case ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS1
:
1128 itb
->cx_tsb_ps1
= data
;
1130 case ASI_IMMU_CTXT_NONZERO_CONFIG
:
1132 itb
->cx_config
= data
;
1134 case ASI_SPARC_ERROR_EN_REG
:
1135 case ASI_SPARC_ERROR_STATUS_REG
:
1136 warn("Ignoring write to SPARC ERROR regsiter\n");
1138 case ASI_HYP_SCRATCHPAD
:
1139 case ASI_SCRATCHPAD
:
1140 tc
->setMiscReg(MISCREG_SCRATCHPAD_R0
+ (va
>> 3), data
);
1148 sext
<59>(bits(data
, 59,0));
1149 itb
->tag_access
= data
;
1152 goto doMmuWriteError
;
1155 case ASI_ITLB_DATA_ACCESS_REG
:
1156 entry_insert
= bits(va
, 8,3);
1157 case ASI_ITLB_DATA_IN_REG
:
1158 assert(entry_insert
!= -1 || mbits(va
,10,9) == va
);
1159 ta_insert
= itb
->tag_access
;
1160 va_insert
= mbits(ta_insert
, 63,13);
1161 ct_insert
= mbits(ta_insert
, 12,0);
1162 part_insert
= tc
->readMiscReg(MISCREG_MMU_PART_ID
);
1163 real_insert
= bits(va
, 9,9);
1164 pte
.populate(data
, bits(va
,10,10) ? PageTableEntry::sun4v
:
1165 PageTableEntry::sun4u
);
1166 tc
->getITBPtr()->insert(va_insert
, part_insert
, ct_insert
, real_insert
,
1169 case ASI_DTLB_DATA_ACCESS_REG
:
1170 entry_insert
= bits(va
, 8,3);
1171 case ASI_DTLB_DATA_IN_REG
:
1172 assert(entry_insert
!= -1 || mbits(va
,10,9) == va
);
1173 ta_insert
= tag_access
;
1174 va_insert
= mbits(ta_insert
, 63,13);
1175 ct_insert
= mbits(ta_insert
, 12,0);
1176 part_insert
= tc
->readMiscReg(MISCREG_MMU_PART_ID
);
1177 real_insert
= bits(va
, 9,9);
1178 pte
.populate(data
, bits(va
,10,10) ? PageTableEntry::sun4v
:
1179 PageTableEntry::sun4u
);
1180 insert(va_insert
, part_insert
, ct_insert
, real_insert
, pte
, entry_insert
);
1182 case ASI_IMMU_DEMAP
:
1185 part_id
= tc
->readMiscReg(MISCREG_MMU_PART_ID
);
1186 switch (bits(va
,5,4)) {
1188 ctx_id
= tc
->readMiscReg(MISCREG_MMU_P_CONTEXT
);
1200 switch(bits(va
,7,6)) {
1201 case 0: // demap page
1203 tc
->getITBPtr()->demapPage(mbits(va
,63,13), part_id
,
1204 bits(va
,9,9), ctx_id
);
1206 case 1: //demap context
1208 tc
->getITBPtr()->demapContext(part_id
, ctx_id
);
1211 tc
->getITBPtr()->demapAll(part_id
);
1214 panic("Invalid type for IMMU demap\n");
1223 sext
<59>(bits(data
, 59,0));
1227 tc
->setMiscReg(MISCREG_MMU_PART_ID
, data
);
1230 goto doMmuWriteError
;
1233 case ASI_DMMU_DEMAP
:
1236 part_id
= tc
->readMiscReg(MISCREG_MMU_PART_ID
);
1237 switch (bits(va
,5,4)) {
1239 ctx_id
= tc
->readMiscReg(MISCREG_MMU_P_CONTEXT
);
1242 ctx_id
= tc
->readMiscReg(MISCREG_MMU_S_CONTEXT
);
1251 switch(bits(va
,7,6)) {
1252 case 0: // demap page
1254 demapPage(mbits(va
,63,13), part_id
, bits(va
,9,9), ctx_id
);
1256 case 1: //demap context
1258 demapContext(part_id
, ctx_id
);
1264 panic("Invalid type for IMMU demap\n");
1267 case ASI_SWVR_INTR_RECEIVE
:
1269 // clear all the interrupts that aren't set in the write
1270 while(tc
->getCpuPtr()->get_interrupts(IT_INT_VEC
) & data
) {
1271 msb
= findMsbSet(tc
->getCpuPtr()->get_interrupts(IT_INT_VEC
) & data
);
1272 tc
->getCpuPtr()->clear_interrupt(IT_INT_VEC
, msb
);
1275 case ASI_SWVR_UDB_INTR_W
:
1276 tc
->getSystemPtr()->threadContexts
[bits(data
,12,8)]->getCpuPtr()->
1277 post_interrupt(bits(data
,5,0),0);
1281 panic("need to impl DTB::doMmuRegWrite() got asi=%#x, va=%#x d=%#x\n",
1282 (uint32_t)pkt
->req
->getAsi(), pkt
->getAddr(), data
);
1284 pkt
->makeAtomicResponse();
1285 return tc
->getCpuPtr()->cycles(1);
1291 DTB::GetTsbPtr(ThreadContext
*tc
, Addr addr
, int ctx
, Addr
*ptrs
)
1293 uint64_t tag_access
= mbits(addr
,63,13) | mbits(ctx
,12,0);
1294 ITB
* itb
= tc
->getITBPtr();
1295 ptrs
[0] = MakeTsbPtr(Ps0
, tag_access
,
1300 ptrs
[1] = MakeTsbPtr(Ps1
, tag_access
,
1305 ptrs
[2] = MakeTsbPtr(Ps0
, tag_access
,
1310 ptrs
[3] = MakeTsbPtr(Ps1
, tag_access
,
1322 DTB::MakeTsbPtr(TsbPageSize ps
, uint64_t tag_access
, uint64_t c0_tsb
,
1323 uint64_t c0_config
, uint64_t cX_tsb
, uint64_t cX_config
)
1328 if (bits(tag_access
, 12,0) == 0) {
1336 uint64_t ptr
= mbits(tsb
,63,13);
1337 bool split
= bits(tsb
,12,12);
1338 int tsb_size
= bits(tsb
,3,0);
1339 int page_size
= (ps
== Ps0
) ? bits(config
, 2,0) : bits(config
,10,8);
1341 if (ps
== Ps1
&& split
)
1342 ptr
|= ULL(1) << (13 + tsb_size
);
1343 ptr
|= (tag_access
>> (9 + page_size
* 3)) & mask(12+tsb_size
, 4);
1350 TLB::serialize(std::ostream
&os
)
1352 SERIALIZE_SCALAR(size
);
1353 SERIALIZE_SCALAR(usedEntries
);
1354 SERIALIZE_SCALAR(lastReplaced
);
1356 // convert the pointer based free list into an index based one
1357 int *free_list
= (int*)malloc(sizeof(int) * size
);
1359 std::list
<TlbEntry
*>::iterator i
;
1360 i
= freeList
.begin();
1361 while (i
!= freeList
.end()) {
1362 free_list
[cntr
++] = ((size_t)*i
- (size_t)tlb
)/ sizeof(TlbEntry
);
1365 SERIALIZE_SCALAR(cntr
);
1366 SERIALIZE_ARRAY(free_list
, cntr
);
1368 for (int x
= 0; x
< size
; x
++) {
1369 nameOut(os
, csprintf("%s.PTE%d", name(), x
));
1370 tlb
[x
].serialize(os
);
1373 SERIALIZE_SCALAR(c0_tsb_ps0
);
1374 SERIALIZE_SCALAR(c0_tsb_ps1
);
1375 SERIALIZE_SCALAR(c0_config
);
1376 SERIALIZE_SCALAR(cx_tsb_ps0
);
1377 SERIALIZE_SCALAR(cx_tsb_ps1
);
1378 SERIALIZE_SCALAR(cx_config
);
1379 SERIALIZE_SCALAR(sfsr
);
1380 SERIALIZE_SCALAR(tag_access
);
1384 TLB::unserialize(Checkpoint
*cp
, const std::string
§ion
)
1388 paramIn(cp
, section
, "size", oldSize
);
1389 if (oldSize
!= size
)
1390 panic("Don't support unserializing different sized TLBs\n");
1391 UNSERIALIZE_SCALAR(usedEntries
);
1392 UNSERIALIZE_SCALAR(lastReplaced
);
1395 UNSERIALIZE_SCALAR(cntr
);
1397 int *free_list
= (int*)malloc(sizeof(int) * cntr
);
1399 UNSERIALIZE_ARRAY(free_list
, cntr
);
1400 for (int x
= 0; x
< cntr
; x
++)
1401 freeList
.push_back(&tlb
[free_list
[x
]]);
1403 lookupTable
.clear();
1404 for (int x
= 0; x
< size
; x
++) {
1405 tlb
[x
].unserialize(cp
, csprintf("%s.PTE%d", section
, x
));
1407 lookupTable
.insert(tlb
[x
].range
, &tlb
[x
]);
1411 UNSERIALIZE_SCALAR(c0_tsb_ps0
);
1412 UNSERIALIZE_SCALAR(c0_tsb_ps1
);
1413 UNSERIALIZE_SCALAR(c0_config
);
1414 UNSERIALIZE_SCALAR(cx_tsb_ps0
);
1415 UNSERIALIZE_SCALAR(cx_tsb_ps1
);
1416 UNSERIALIZE_SCALAR(cx_config
);
1417 UNSERIALIZE_SCALAR(sfsr
);
1418 UNSERIALIZE_SCALAR(tag_access
);
1422 DTB::serialize(std::ostream
&os
)
1425 SERIALIZE_SCALAR(sfar
);
1429 DTB::unserialize(Checkpoint
*cp
, const std::string
§ion
)
1431 TLB::unserialize(cp
, section
);
1432 UNSERIALIZE_SCALAR(sfar
);
1435 /* end namespace SparcISA */ }
1438 SparcITBParams::create()
1440 return new SparcISA::ITB(name
, size
);
1444 SparcDTBParams::create()
1446 return new SparcISA::DTB(name
, size
);