2 * Copyright (c) 2013, 2018-2019 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 * Authors: Stan Czerniawski
40 #include "dev/arm/smmu_v3.hh"
45 #include "base/bitfield.hh"
46 #include "base/cast.hh"
47 #include "base/logging.hh"
48 #include "base/trace.hh"
49 #include "base/types.hh"
50 #include "debug/Checkpoint.hh"
51 #include "debug/SMMUv3.hh"
52 #include "dev/arm/smmu_v3_transl.hh"
53 #include "mem/packet_access.hh"
54 #include "sim/system.hh"
56 SMMUv3::SMMUv3(SMMUv3Params
*params
) :
57 ClockedObject(params
),
58 system(*params
->system
),
59 masterId(params
->system
->getMasterId(this)),
60 masterPort(name() + ".master", *this),
61 masterTableWalkPort(name() + ".master_walker", *this),
62 controlPort(name() + ".control", *this, params
->reg_map
),
63 tlb(params
->tlb_entries
, params
->tlb_assoc
, params
->tlb_policy
),
64 configCache(params
->cfg_entries
, params
->cfg_assoc
, params
->cfg_policy
),
65 ipaCache(params
->ipa_entries
, params
->ipa_assoc
, params
->ipa_policy
),
66 walkCache({ { params
->walk_S1L0
, params
->walk_S1L1
,
67 params
->walk_S1L2
, params
->walk_S1L3
,
68 params
->walk_S2L0
, params
->walk_S2L1
,
69 params
->walk_S2L2
, params
->walk_S2L3
} },
70 params
->walk_assoc
, params
->walk_policy
),
71 tlbEnable(params
->tlb_enable
),
72 configCacheEnable(params
->cfg_enable
),
73 ipaCacheEnable(params
->ipa_enable
),
74 walkCacheEnable(params
->walk_enable
),
75 tableWalkPortEnable(false),
76 walkCacheNonfinalEnable(params
->wc_nonfinal_enable
),
77 walkCacheS1Levels(params
->wc_s1_levels
),
78 walkCacheS2Levels(params
->wc_s2_levels
),
79 masterPortWidth(params
->master_port_width
),
80 tlbSem(params
->tlb_slots
),
83 configSem(params
->cfg_slots
),
84 ipaSem(params
->ipa_slots
),
85 walkSem(params
->walk_slots
),
87 transSem(params
->xlate_slots
),
88 ptwSem(params
->ptw_slots
),
90 tlbLat(params
->tlb_lat
),
91 ifcSmmuLat(params
->ifc_smmu_lat
),
92 smmuIfcLat(params
->smmu_ifc_lat
),
93 configLat(params
->cfg_lat
),
94 ipaLat(params
->ipa_lat
),
95 walkLat(params
->walk_lat
),
96 slaveInterfaces(params
->slave_interfaces
),
97 commandExecutor(name() + ".cmd_exec", *this),
98 regsMap(params
->reg_map
),
99 processCommandsEvent(this)
101 fatal_if(regsMap
.size() != SMMU_REG_SIZE
,
102 "Invalid register map size: %#x different than SMMU_REG_SIZE = %#x\n",
103 regsMap
.size(), SMMU_REG_SIZE
);
105 // Init smmu registers to 0
106 memset(®s
, 0, sizeof(regs
));
108 // Setup RO ID registers
109 regs
.idr0
= params
->smmu_idr0
;
110 regs
.idr1
= params
->smmu_idr1
;
111 regs
.idr2
= params
->smmu_idr2
;
112 regs
.idr3
= params
->smmu_idr3
;
113 regs
.idr4
= params
->smmu_idr4
;
114 regs
.idr5
= params
->smmu_idr5
;
115 regs
.iidr
= params
->smmu_iidr
;
116 regs
.aidr
= params
->smmu_aidr
;
118 // TODO: At the moment it possible to set the ID registers to hold
119 // any possible value. It would be nice to have a sanity check here
120 // at construction time in case some idx registers are programmed to
121 // store an unallowed values or if the are configuration conflicts.
122 warn("SMMUv3 IDx register values unchecked\n");
124 for (auto ifc
: slaveInterfaces
)
129 SMMUv3::masterRecvTimingResp(PacketPtr pkt
)
131 DPRINTF(SMMUv3
, "[t] master resp addr=%#x size=%#x\n",
132 pkt
->getAddr(), pkt
->getSize());
134 // @todo: We need to pay for this and not just zero it out
135 pkt
->headerDelay
= pkt
->payloadDelay
= 0;
138 safe_cast
<SMMUProcess
*>(pkt
->popSenderState());
140 runProcessTiming(proc
, pkt
);
146 SMMUv3::masterRecvReqRetry()
148 assert(!packetsToRetry
.empty());
150 while (!packetsToRetry
.empty()) {
151 SMMUAction a
= packetsToRetry
.front();
153 assert(a
.type
==ACTION_SEND_REQ
|| a
.type
==ACTION_SEND_REQ_FINAL
);
155 DPRINTF(SMMUv3
, "[t] master retr addr=%#x size=%#x\n",
156 a
.pkt
->getAddr(), a
.pkt
->getSize());
158 if (!masterPort
.sendTimingReq(a
.pkt
))
161 packetsToRetry
.pop();
164 * ACTION_SEND_REQ_FINAL means that we have just forwarded the packet
165 * on the master interface; this means that we no longer hold on to
166 * that transaction and therefore can accept a new one.
167 * If the slave port was stalled then unstall it (send retry).
169 if (a
.type
== ACTION_SEND_REQ_FINAL
)
170 scheduleSlaveRetries();
175 SMMUv3::masterTableWalkRecvTimingResp(PacketPtr pkt
)
177 DPRINTF(SMMUv3
, "[t] master HWTW resp addr=%#x size=%#x\n",
178 pkt
->getAddr(), pkt
->getSize());
180 // @todo: We need to pay for this and not just zero it out
181 pkt
->headerDelay
= pkt
->payloadDelay
= 0;
184 safe_cast
<SMMUProcess
*>(pkt
->popSenderState());
186 runProcessTiming(proc
, pkt
);
192 SMMUv3::masterTableWalkRecvReqRetry()
194 assert(tableWalkPortEnable
);
195 assert(!packetsTableWalkToRetry
.empty());
197 while (!packetsTableWalkToRetry
.empty()) {
198 SMMUAction a
= packetsTableWalkToRetry
.front();
200 assert(a
.type
==ACTION_SEND_REQ
);
202 DPRINTF(SMMUv3
, "[t] master HWTW retr addr=%#x size=%#x\n",
203 a
.pkt
->getAddr(), a
.pkt
->getSize());
205 if (!masterTableWalkPort
.sendTimingReq(a
.pkt
))
208 packetsTableWalkToRetry
.pop();
213 SMMUv3::scheduleSlaveRetries()
215 for (auto ifc
: slaveInterfaces
) {
216 ifc
->scheduleDeviceRetry();
221 SMMUv3::runProcess(SMMUProcess
*proc
, PacketPtr pkt
)
223 if (system
.isAtomicMode()) {
224 return runProcessAtomic(proc
, pkt
);
225 } else if (system
.isTimingMode()) {
226 return runProcessTiming(proc
, pkt
);
228 panic("Not in timing or atomic mode!");
233 SMMUv3::runProcessAtomic(SMMUProcess
*proc
, PacketPtr pkt
)
237 bool finished
= false;
240 action
= proc
->run(pkt
);
242 switch (action
.type
) {
243 case ACTION_SEND_REQ
:
244 // Send an MMU initiated request on the table walk port if it is
245 // enabled. Otherwise, fall through and handle same as the final
246 // ACTION_SEND_REQ_FINAL request.
247 if (tableWalkPortEnable
) {
248 delay
+= masterTableWalkPort
.sendAtomic(action
.pkt
);
253 case ACTION_SEND_REQ_FINAL
:
254 delay
+= masterPort
.sendAtomic(action
.pkt
);
258 case ACTION_SEND_RESP
:
259 case ACTION_SEND_RESP_ATS
:
265 delay
+= action
.delay
;
268 case ACTION_TERMINATE
:
269 panic("ACTION_TERMINATE in atomic mode\n");
272 panic("Unknown action\n");
276 action
.delay
= delay
;
282 SMMUv3::runProcessTiming(SMMUProcess
*proc
, PacketPtr pkt
)
284 SMMUAction action
= proc
->run(pkt
);
286 switch (action
.type
) {
287 case ACTION_SEND_REQ
:
288 // Send an MMU initiated request on the table walk port if it is
289 // enabled. Otherwise, fall through and handle same as the final
290 // ACTION_SEND_REQ_FINAL request.
291 if (tableWalkPortEnable
) {
292 action
.pkt
->pushSenderState(proc
);
294 DPRINTF(SMMUv3
, "[t] master HWTW req addr=%#x size=%#x\n",
295 action
.pkt
->getAddr(), action
.pkt
->getSize());
297 if (packetsTableWalkToRetry
.empty()
298 && masterTableWalkPort
.sendTimingReq(action
.pkt
)) {
299 scheduleSlaveRetries();
301 DPRINTF(SMMUv3
, "[t] master HWTW req needs retry,"
302 " qlen=%d\n", packetsTableWalkToRetry
.size());
303 packetsTableWalkToRetry
.push(action
);
309 case ACTION_SEND_REQ_FINAL
:
310 action
.pkt
->pushSenderState(proc
);
312 DPRINTF(SMMUv3
, "[t] master req addr=%#x size=%#x\n",
313 action
.pkt
->getAddr(), action
.pkt
->getSize());
315 if (packetsToRetry
.empty() && masterPort
.sendTimingReq(action
.pkt
)) {
316 scheduleSlaveRetries();
318 DPRINTF(SMMUv3
, "[t] master req needs retry, qlen=%d\n",
319 packetsToRetry
.size());
320 packetsToRetry
.push(action
);
325 case ACTION_SEND_RESP
:
326 // @todo: We need to pay for this and not just zero it out
327 action
.pkt
->headerDelay
= action
.pkt
->payloadDelay
= 0;
329 DPRINTF(SMMUv3
, "[t] slave resp addr=%#x size=%#x\n",
330 action
.pkt
->getAddr(),
331 action
.pkt
->getSize());
334 action
.ifc
->schedTimingResp(action
.pkt
);
339 case ACTION_SEND_RESP_ATS
:
340 // @todo: We need to pay for this and not just zero it out
341 action
.pkt
->headerDelay
= action
.pkt
->payloadDelay
= 0;
343 DPRINTF(SMMUv3
, "[t] ATS slave resp addr=%#x size=%#x\n",
344 action
.pkt
->getAddr(), action
.pkt
->getSize());
347 action
.ifc
->schedAtsTimingResp(action
.pkt
);
356 case ACTION_TERMINATE
:
361 panic("Unknown action\n");
368 SMMUv3::processCommands()
370 DPRINTF(SMMUv3
, "processCommands()\n");
372 if (system
.isAtomicMode()) {
373 SMMUAction a
= runProcessAtomic(&commandExecutor
, NULL
);
375 } else if (system
.isTimingMode()) {
376 if (!commandExecutor
.isBusy())
377 runProcessTiming(&commandExecutor
, NULL
);
379 panic("Not in timing or atomic mode!");
384 SMMUv3::processCommand(const SMMUCommand
&cmd
)
386 switch (cmd
.dw0
.type
) {
388 DPRINTF(SMMUv3
, "CMD_PREFETCH_CONFIG - ignored\n");
392 DPRINTF(SMMUv3
, "CMD_PREFETCH_ADDR - ignored\n");
396 DPRINTF(SMMUv3
, "CMD_CFGI_STE sid=%#x\n", cmd
.dw0
.sid
);
397 configCache
.invalidateSID(cmd
.dw0
.sid
);
399 for (auto slave_interface
: slaveInterfaces
) {
400 slave_interface
->microTLB
->invalidateSID(cmd
.dw0
.sid
);
401 slave_interface
->mainTLB
->invalidateSID(cmd
.dw0
.sid
);
406 case CMD_CFGI_STE_RANGE
: {
407 const auto range
= cmd
.dw1
.range
;
409 // CMD_CFGI_ALL is an alias of CMD_CFGI_STE_RANGE with
411 DPRINTF(SMMUv3
, "CMD_CFGI_ALL\n");
412 configCache
.invalidateAll();
414 for (auto slave_interface
: slaveInterfaces
) {
415 slave_interface
->microTLB
->invalidateAll();
416 slave_interface
->mainTLB
->invalidateAll();
419 DPRINTF(SMMUv3
, "CMD_CFGI_STE_RANGE\n");
420 const auto start_sid
= cmd
.dw0
.sid
& ~((1 << (range
+ 1)) - 1);
421 const auto end_sid
= start_sid
+ (1 << (range
+ 1)) - 1;
422 for (auto sid
= start_sid
; sid
<= end_sid
; sid
++) {
423 configCache
.invalidateSID(sid
);
425 for (auto slave_interface
: slaveInterfaces
) {
426 slave_interface
->microTLB
->invalidateSID(sid
);
427 slave_interface
->mainTLB
->invalidateSID(sid
);
435 DPRINTF(SMMUv3
, "CMD_CFGI_CD sid=%#x ssid=%#x\n",
436 cmd
.dw0
.sid
, cmd
.dw0
.ssid
);
437 configCache
.invalidateSSID(cmd
.dw0
.sid
, cmd
.dw0
.ssid
);
439 for (auto slave_interface
: slaveInterfaces
) {
440 slave_interface
->microTLB
->invalidateSSID(
441 cmd
.dw0
.sid
, cmd
.dw0
.ssid
);
442 slave_interface
->mainTLB
->invalidateSSID(
443 cmd
.dw0
.sid
, cmd
.dw0
.ssid
);
448 case CMD_CFGI_CD_ALL
: {
449 DPRINTF(SMMUv3
, "CMD_CFGI_CD_ALL sid=%#x\n", cmd
.dw0
.sid
);
450 configCache
.invalidateSID(cmd
.dw0
.sid
);
452 for (auto slave_interface
: slaveInterfaces
) {
453 slave_interface
->microTLB
->invalidateSID(cmd
.dw0
.sid
);
454 slave_interface
->mainTLB
->invalidateSID(cmd
.dw0
.sid
);
459 case CMD_TLBI_NH_ALL
: {
460 DPRINTF(SMMUv3
, "CMD_TLBI_NH_ALL vmid=%#x\n", cmd
.dw0
.vmid
);
461 for (auto slave_interface
: slaveInterfaces
) {
462 slave_interface
->microTLB
->invalidateVMID(cmd
.dw0
.vmid
);
463 slave_interface
->mainTLB
->invalidateVMID(cmd
.dw0
.vmid
);
465 tlb
.invalidateVMID(cmd
.dw0
.vmid
);
466 walkCache
.invalidateVMID(cmd
.dw0
.vmid
);
470 case CMD_TLBI_NH_ASID
: {
471 DPRINTF(SMMUv3
, "CMD_TLBI_NH_ASID asid=%#x vmid=%#x\n",
472 cmd
.dw0
.asid
, cmd
.dw0
.vmid
);
473 for (auto slave_interface
: slaveInterfaces
) {
474 slave_interface
->microTLB
->invalidateASID(
475 cmd
.dw0
.asid
, cmd
.dw0
.vmid
);
476 slave_interface
->mainTLB
->invalidateASID(
477 cmd
.dw0
.asid
, cmd
.dw0
.vmid
);
479 tlb
.invalidateASID(cmd
.dw0
.asid
, cmd
.dw0
.vmid
);
480 walkCache
.invalidateASID(cmd
.dw0
.asid
, cmd
.dw0
.vmid
);
484 case CMD_TLBI_NH_VAA
: {
485 const Addr addr
= cmd
.addr();
486 DPRINTF(SMMUv3
, "CMD_TLBI_NH_VAA va=%#08x vmid=%#x\n",
488 for (auto slave_interface
: slaveInterfaces
) {
489 slave_interface
->microTLB
->invalidateVAA(
491 slave_interface
->mainTLB
->invalidateVAA(
494 tlb
.invalidateVAA(addr
, cmd
.dw0
.vmid
);
495 const bool leaf_only
= cmd
.dw1
.leaf
? true : false;
496 walkCache
.invalidateVAA(addr
, cmd
.dw0
.vmid
, leaf_only
);
500 case CMD_TLBI_NH_VA
: {
501 const Addr addr
= cmd
.addr();
502 DPRINTF(SMMUv3
, "CMD_TLBI_NH_VA va=%#08x asid=%#x vmid=%#x\n",
503 addr
, cmd
.dw0
.asid
, cmd
.dw0
.vmid
);
504 for (auto slave_interface
: slaveInterfaces
) {
505 slave_interface
->microTLB
->invalidateVA(
506 addr
, cmd
.dw0
.asid
, cmd
.dw0
.vmid
);
507 slave_interface
->mainTLB
->invalidateVA(
508 addr
, cmd
.dw0
.asid
, cmd
.dw0
.vmid
);
510 tlb
.invalidateVA(addr
, cmd
.dw0
.asid
, cmd
.dw0
.vmid
);
511 const bool leaf_only
= cmd
.dw1
.leaf
? true : false;
512 walkCache
.invalidateVA(addr
, cmd
.dw0
.asid
, cmd
.dw0
.vmid
,
517 case CMD_TLBI_S2_IPA
: {
518 const Addr addr
= cmd
.addr();
519 DPRINTF(SMMUv3
, "CMD_TLBI_S2_IPA ipa=%#08x vmid=%#x\n",
521 // This does not invalidate TLBs containing
522 // combined Stage1 + Stage2 translations, as per the spec.
523 ipaCache
.invalidateIPA(addr
, cmd
.dw0
.vmid
);
526 walkCache
.invalidateVMID(cmd
.dw0
.vmid
);
530 case CMD_TLBI_S12_VMALL
: {
531 DPRINTF(SMMUv3
, "CMD_TLBI_S12_VMALL vmid=%#x\n", cmd
.dw0
.vmid
);
532 for (auto slave_interface
: slaveInterfaces
) {
533 slave_interface
->microTLB
->invalidateVMID(cmd
.dw0
.vmid
);
534 slave_interface
->mainTLB
->invalidateVMID(cmd
.dw0
.vmid
);
536 tlb
.invalidateVMID(cmd
.dw0
.vmid
);
537 ipaCache
.invalidateVMID(cmd
.dw0
.vmid
);
538 walkCache
.invalidateVMID(cmd
.dw0
.vmid
);
542 case CMD_TLBI_NSNH_ALL
: {
543 DPRINTF(SMMUv3
, "CMD_TLBI_NSNH_ALL\n");
544 for (auto slave_interface
: slaveInterfaces
) {
545 slave_interface
->microTLB
->invalidateAll();
546 slave_interface
->mainTLB
->invalidateAll();
549 ipaCache
.invalidateAll();
550 walkCache
.invalidateAll();
555 DPRINTF(SMMUv3
, "CMD_RESUME\n");
556 panic("resume unimplemented");
560 warn("Unimplemented command %#x\n", cmd
.dw0
.type
);
566 SMMUv3::getPageTableOps(uint8_t trans_granule
)
568 static V8PageTableOps4k ptOps4k
;
569 static V8PageTableOps16k ptOps16k
;
570 static V8PageTableOps64k ptOps64k
;
572 switch (trans_granule
) {
573 case TRANS_GRANULE_4K
: return &ptOps4k
;
574 case TRANS_GRANULE_16K
: return &ptOps16k
;
575 case TRANS_GRANULE_64K
: return &ptOps64k
;
577 panic("Unknown translation granule size %d", trans_granule
);
582 SMMUv3::readControl(PacketPtr pkt
)
584 DPRINTF(SMMUv3
, "readControl: addr=%08x size=%d\n",
585 pkt
->getAddr(), pkt
->getSize());
587 int offset
= pkt
->getAddr() - regsMap
.start();
588 assert(offset
>= 0 && offset
< SMMU_REG_SIZE
);
590 if (inSecureBlock(offset
)) {
591 warn("smmu: secure registers (0x%x) are not implemented\n",
595 auto reg_ptr
= regs
.data
+ offset
;
597 switch (pkt
->getSize()) {
598 case sizeof(uint32_t):
599 pkt
->setLE
<uint32_t>(*reinterpret_cast<uint32_t *>(reg_ptr
));
601 case sizeof(uint64_t):
602 pkt
->setLE
<uint64_t>(*reinterpret_cast<uint64_t *>(reg_ptr
));
605 panic("smmu: unallowed access size: %d bytes\n", pkt
->getSize());
609 pkt
->makeAtomicResponse();
615 SMMUv3::writeControl(PacketPtr pkt
)
617 int offset
= pkt
->getAddr() - regsMap
.start();
618 assert(offset
>= 0 && offset
< SMMU_REG_SIZE
);
620 DPRINTF(SMMUv3
, "writeControl: addr=%08x size=%d data=%16x\n",
621 pkt
->getAddr(), pkt
->getSize(),
622 pkt
->getSize() == sizeof(uint64_t) ?
623 pkt
->getLE
<uint64_t>() : pkt
->getLE
<uint32_t>());
626 case offsetof(SMMURegs
, cr0
):
627 assert(pkt
->getSize() == sizeof(uint32_t));
628 regs
.cr0
= regs
.cr0ack
= pkt
->getLE
<uint32_t>();
631 case offsetof(SMMURegs
, cr1
):
632 case offsetof(SMMURegs
, cr2
):
633 case offsetof(SMMURegs
, strtab_base_cfg
):
634 case offsetof(SMMURegs
, eventq_cons
):
635 case offsetof(SMMURegs
, eventq_irq_cfg1
):
636 case offsetof(SMMURegs
, priq_cons
):
637 assert(pkt
->getSize() == sizeof(uint32_t));
638 *reinterpret_cast<uint32_t *>(regs
.data
+ offset
) =
639 pkt
->getLE
<uint32_t>();
642 case offsetof(SMMURegs
, cmdq_cons
):
643 assert(pkt
->getSize() == sizeof(uint32_t));
644 if (regs
.cr0
& CR0_CMDQEN_MASK
) {
645 warn("CMDQ is enabled: ignoring write to CMDQ_CONS\n");
647 *reinterpret_cast<uint32_t *>(regs
.data
+ offset
) =
648 pkt
->getLE
<uint32_t>();
652 case offsetof(SMMURegs
, cmdq_prod
):
653 assert(pkt
->getSize() == sizeof(uint32_t));
654 *reinterpret_cast<uint32_t *>(regs
.data
+ offset
) =
655 pkt
->getLE
<uint32_t>();
656 schedule(processCommandsEvent
, nextCycle());
659 case offsetof(SMMURegs
, strtab_base
):
660 case offsetof(SMMURegs
, eventq_irq_cfg0
):
661 assert(pkt
->getSize() == sizeof(uint64_t));
662 *reinterpret_cast<uint64_t *>(regs
.data
+ offset
) =
663 pkt
->getLE
<uint64_t>();
666 case offsetof(SMMURegs
, cmdq_base
):
667 assert(pkt
->getSize() == sizeof(uint64_t));
668 if (regs
.cr0
& CR0_CMDQEN_MASK
) {
669 warn("CMDQ is enabled: ignoring write to CMDQ_BASE\n");
671 *reinterpret_cast<uint64_t *>(regs
.data
+ offset
) =
672 pkt
->getLE
<uint64_t>();
678 case offsetof(SMMURegs
, eventq_base
):
679 assert(pkt
->getSize() == sizeof(uint64_t));
680 *reinterpret_cast<uint64_t *>(regs
.data
+ offset
) =
681 pkt
->getLE
<uint64_t>();
682 regs
.eventq_cons
= 0;
683 regs
.eventq_prod
= 0;
686 case offsetof(SMMURegs
, priq_base
):
687 assert(pkt
->getSize() == sizeof(uint64_t));
688 *reinterpret_cast<uint64_t *>(regs
.data
+ offset
) =
689 pkt
->getLE
<uint64_t>();
695 if (inSecureBlock(offset
)) {
696 warn("smmu: secure registers (0x%x) are not implemented\n",
699 warn("smmu: write to read-only/undefined register at 0x%x\n",
704 pkt
->makeAtomicResponse();
710 SMMUv3::inSecureBlock(uint32_t offs
) const
712 if (offs
>= offsetof(SMMURegs
, _secure_regs
) && offs
< SMMU_SECURE_SZ
)
721 // make sure both sides are connected and have the same block size
722 if (!masterPort
.isConnected())
723 fatal("Master port is not connected.\n");
725 // If the second master port is connected for the table walks, enable
726 // the mode to send table walks through this port instead
727 if (masterTableWalkPort
.isConnected())
728 tableWalkPortEnable
= true;
730 // notify the master side of our address ranges
731 for (auto ifc
: slaveInterfaces
) {
735 if (controlPort
.isConnected())
736 controlPort
.sendRangeChange();
742 ClockedObject::regStats();
744 using namespace Stats
;
746 for (size_t i
= 0; i
< slaveInterfaces
.size(); i
++) {
747 slaveInterfaces
[i
]->microTLB
->regStats(
748 csprintf("%s.utlb%d", name(), i
));
749 slaveInterfaces
[i
]->mainTLB
->regStats(
750 csprintf("%s.maintlb%d", name(), i
));
753 tlb
.regStats(name() + ".tlb");
754 configCache
.regStats(name() + ".cfg");
755 ipaCache
.regStats(name() + ".ipa");
756 walkCache
.regStats(name() + ".walk");
759 .name(name() + ".steL1Fetches")
760 .desc("STE L1 fetches")
764 .name(name() + ".steFetches")
769 .name(name() + ".cdL1Fetches")
770 .desc("CD L1 fetches")
774 .name(name() + ".cdFetches")
779 .init(0, 2000000, 2000)
780 .name(name() + ".translationTimeDist")
781 .desc("Time to translate address")
785 .init(0, 2000000, 2000)
786 .name(name() + ".ptwTimeDist")
787 .desc("Time to walk page tables")
794 // Wait until the Command Executor is not busy
795 if (commandExecutor
.isBusy()) {
796 return DrainState::Draining
;
798 return DrainState::Drained
;
802 SMMUv3::serialize(CheckpointOut
&cp
) const
804 DPRINTF(Checkpoint
, "Serializing SMMUv3\n");
806 SERIALIZE_ARRAY(regs
.data
, sizeof(regs
.data
) / sizeof(regs
.data
[0]));
810 SMMUv3::unserialize(CheckpointIn
&cp
)
812 DPRINTF(Checkpoint
, "Unserializing SMMUv3\n");
814 UNSERIALIZE_ARRAY(regs
.data
, sizeof(regs
.data
) / sizeof(regs
.data
[0]));
818 SMMUv3::getPort(const std::string
&name
, PortID id
)
820 if (name
== "master") {
822 } else if (name
== "master_walker") {
823 return masterTableWalkPort
;
824 } else if (name
== "control") {
827 return ClockedObject::getPort(name
, id
);
832 SMMUv3Params::create()
834 return new SMMUv3(this);