misc: Replaced master/slave terminology
[gem5.git] / src / dev / arm / smmu_v3.cc
1 /*
2 * Copyright (c) 2013, 2018-2019 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 #include "dev/arm/smmu_v3.hh"
39
40 #include <cstddef>
41 #include <cstring>
42
43 #include "base/bitfield.hh"
44 #include "base/cast.hh"
45 #include "base/logging.hh"
46 #include "base/trace.hh"
47 #include "base/types.hh"
48 #include "debug/Checkpoint.hh"
49 #include "debug/SMMUv3.hh"
50 #include "dev/arm/smmu_v3_transl.hh"
51 #include "mem/packet_access.hh"
52 #include "sim/system.hh"
53
54 SMMUv3::SMMUv3(SMMUv3Params *params) :
55 ClockedObject(params),
56 system(*params->system),
57 requestorId(params->system->getRequestorId(this)),
58 requestPort(name() + ".request", *this),
59 tableWalkPort(name() + ".walker", *this),
60 controlPort(name() + ".control", *this, params->reg_map),
61 tlb(params->tlb_entries, params->tlb_assoc, params->tlb_policy),
62 configCache(params->cfg_entries, params->cfg_assoc, params->cfg_policy),
63 ipaCache(params->ipa_entries, params->ipa_assoc, params->ipa_policy),
64 walkCache({ { params->walk_S1L0, params->walk_S1L1,
65 params->walk_S1L2, params->walk_S1L3,
66 params->walk_S2L0, params->walk_S2L1,
67 params->walk_S2L2, params->walk_S2L3 } },
68 params->walk_assoc, params->walk_policy),
69 tlbEnable(params->tlb_enable),
70 configCacheEnable(params->cfg_enable),
71 ipaCacheEnable(params->ipa_enable),
72 walkCacheEnable(params->walk_enable),
73 tableWalkPortEnable(false),
74 walkCacheNonfinalEnable(params->wc_nonfinal_enable),
75 walkCacheS1Levels(params->wc_s1_levels),
76 walkCacheS2Levels(params->wc_s2_levels),
77 requestPortWidth(params->request_port_width),
78 tlbSem(params->tlb_slots),
79 ifcSmmuSem(1),
80 smmuIfcSem(1),
81 configSem(params->cfg_slots),
82 ipaSem(params->ipa_slots),
83 walkSem(params->walk_slots),
84 requestPortSem(1),
85 transSem(params->xlate_slots),
86 ptwSem(params->ptw_slots),
87 cycleSem(1),
88 tlbLat(params->tlb_lat),
89 ifcSmmuLat(params->ifc_smmu_lat),
90 smmuIfcLat(params->smmu_ifc_lat),
91 configLat(params->cfg_lat),
92 ipaLat(params->ipa_lat),
93 walkLat(params->walk_lat),
94 deviceInterfaces(params->device_interfaces),
95 commandExecutor(name() + ".cmd_exec", *this),
96 regsMap(params->reg_map),
97 processCommandsEvent(this)
98 {
99 fatal_if(regsMap.size() != SMMU_REG_SIZE,
100 "Invalid register map size: %#x different than SMMU_REG_SIZE = %#x\n",
101 regsMap.size(), SMMU_REG_SIZE);
102
103 // Init smmu registers to 0
104 memset(&regs, 0, sizeof(regs));
105
106 // Setup RO ID registers
107 regs.idr0 = params->smmu_idr0;
108 regs.idr1 = params->smmu_idr1;
109 regs.idr2 = params->smmu_idr2;
110 regs.idr3 = params->smmu_idr3;
111 regs.idr4 = params->smmu_idr4;
112 regs.idr5 = params->smmu_idr5;
113 regs.iidr = params->smmu_iidr;
114 regs.aidr = params->smmu_aidr;
115
116 // TODO: At the moment it possible to set the ID registers to hold
117 // any possible value. It would be nice to have a sanity check here
118 // at construction time in case some idx registers are programmed to
119 // store an unallowed values or if the are configuration conflicts.
120 warn("SMMUv3 IDx register values unchecked\n");
121
122 for (auto ifc : deviceInterfaces)
123 ifc->setSMMU(this);
124 }
125
126 bool
127 SMMUv3::recvTimingResp(PacketPtr pkt)
128 {
129 DPRINTF(SMMUv3, "[t] requestor resp addr=%#x size=%#x\n",
130 pkt->getAddr(), pkt->getSize());
131
132 // @todo: We need to pay for this and not just zero it out
133 pkt->headerDelay = pkt->payloadDelay = 0;
134
135 SMMUProcess *proc =
136 safe_cast<SMMUProcess *>(pkt->popSenderState());
137
138 runProcessTiming(proc, pkt);
139
140 return true;
141 }
142
143 void
144 SMMUv3::recvReqRetry()
145 {
146 assert(!packetsToRetry.empty());
147
148 while (!packetsToRetry.empty()) {
149 SMMUAction a = packetsToRetry.front();
150
151 assert(a.type==ACTION_SEND_REQ || a.type==ACTION_SEND_REQ_FINAL);
152
153 DPRINTF(SMMUv3, "[t] requestor retr addr=%#x size=%#x\n",
154 a.pkt->getAddr(), a.pkt->getSize());
155
156 if (!requestPort.sendTimingReq(a.pkt))
157 break;
158
159 packetsToRetry.pop();
160
161 /*
162 * ACTION_SEND_REQ_FINAL means that we have just forwarded the packet
163 * on the requestor interface; this means that we no longer hold on to
164 * that transaction and therefore can accept a new one.
165 * If the response port was stalled then unstall it (send retry).
166 */
167 if (a.type == ACTION_SEND_REQ_FINAL)
168 scheduleDeviceRetries();
169 }
170 }
171
172 bool
173 SMMUv3::tableWalkRecvTimingResp(PacketPtr pkt)
174 {
175 DPRINTF(SMMUv3, "[t] requestor HWTW resp addr=%#x size=%#x\n",
176 pkt->getAddr(), pkt->getSize());
177
178 // @todo: We need to pay for this and not just zero it out
179 pkt->headerDelay = pkt->payloadDelay = 0;
180
181 SMMUProcess *proc =
182 safe_cast<SMMUProcess *>(pkt->popSenderState());
183
184 runProcessTiming(proc, pkt);
185
186 return true;
187 }
188
189 void
190 SMMUv3::tableWalkRecvReqRetry()
191 {
192 assert(tableWalkPortEnable);
193 assert(!packetsTableWalkToRetry.empty());
194
195 while (!packetsTableWalkToRetry.empty()) {
196 SMMUAction a = packetsTableWalkToRetry.front();
197
198 assert(a.type==ACTION_SEND_REQ);
199
200 DPRINTF(SMMUv3, "[t] requestor HWTW retr addr=%#x size=%#x\n",
201 a.pkt->getAddr(), a.pkt->getSize());
202
203 if (!tableWalkPort.sendTimingReq(a.pkt))
204 break;
205
206 packetsTableWalkToRetry.pop();
207 }
208 }
209
210 void
211 SMMUv3::scheduleDeviceRetries()
212 {
213 for (auto ifc : deviceInterfaces) {
214 ifc->scheduleDeviceRetry();
215 }
216 }
217
218 SMMUAction
219 SMMUv3::runProcess(SMMUProcess *proc, PacketPtr pkt)
220 {
221 if (system.isAtomicMode()) {
222 return runProcessAtomic(proc, pkt);
223 } else if (system.isTimingMode()) {
224 return runProcessTiming(proc, pkt);
225 } else {
226 panic("Not in timing or atomic mode!");
227 }
228 }
229
230 SMMUAction
231 SMMUv3::runProcessAtomic(SMMUProcess *proc, PacketPtr pkt)
232 {
233 SMMUAction action;
234 Tick delay = 0;
235 bool finished = false;
236
237 do {
238 action = proc->run(pkt);
239
240 switch (action.type) {
241 case ACTION_SEND_REQ:
242 // Send an MMU initiated request on the table walk port if
243 // it is enabled. Otherwise, fall through and handle same
244 // as the final ACTION_SEND_REQ_FINAL request.
245 if (tableWalkPortEnable) {
246 delay += tableWalkPort.sendAtomic(action.pkt);
247 pkt = action.pkt;
248 break;
249 }
250 M5_FALLTHROUGH;
251 case ACTION_SEND_REQ_FINAL:
252 delay += requestPort.sendAtomic(action.pkt);
253 pkt = action.pkt;
254 break;
255
256 case ACTION_SEND_RESP:
257 case ACTION_SEND_RESP_ATS:
258 case ACTION_SLEEP:
259 finished = true;
260 break;
261
262 case ACTION_DELAY:
263 delay += action.delay;
264 break;
265
266 case ACTION_TERMINATE:
267 panic("ACTION_TERMINATE in atomic mode\n");
268
269 default:
270 panic("Unknown action\n");
271 }
272 } while (!finished);
273
274 action.delay = delay;
275
276 return action;
277 }
278
279 SMMUAction
280 SMMUv3::runProcessTiming(SMMUProcess *proc, PacketPtr pkt)
281 {
282 SMMUAction action = proc->run(pkt);
283
284 switch (action.type) {
285 case ACTION_SEND_REQ:
286 // Send an MMU initiated request on the table walk port if it is
287 // enabled. Otherwise, fall through and handle same as the final
288 // ACTION_SEND_REQ_FINAL request.
289 if (tableWalkPortEnable) {
290 action.pkt->pushSenderState(proc);
291
292 DPRINTF(SMMUv3, "[t] requestor HWTW req addr=%#x size=%#x\n",
293 action.pkt->getAddr(), action.pkt->getSize());
294
295 if (packetsTableWalkToRetry.empty()
296 && tableWalkPort.sendTimingReq(action.pkt)) {
297 scheduleDeviceRetries();
298 } else {
299 DPRINTF(SMMUv3, "[t] requestor HWTW req needs retry,"
300 " qlen=%d\n", packetsTableWalkToRetry.size());
301 packetsTableWalkToRetry.push(action);
302 }
303
304 break;
305 }
306 M5_FALLTHROUGH;
307 case ACTION_SEND_REQ_FINAL:
308 action.pkt->pushSenderState(proc);
309
310 DPRINTF(SMMUv3, "[t] requestor req addr=%#x size=%#x\n",
311 action.pkt->getAddr(), action.pkt->getSize());
312
313 if (packetsToRetry.empty() &&
314 requestPort.sendTimingReq(action.pkt)) {
315 scheduleDeviceRetries();
316 } else {
317 DPRINTF(SMMUv3, "[t] requestor req needs retry, qlen=%d\n",
318 packetsToRetry.size());
319 packetsToRetry.push(action);
320 }
321
322 break;
323
324 case ACTION_SEND_RESP:
325 // @todo: We need to pay for this and not just zero it out
326 action.pkt->headerDelay = action.pkt->payloadDelay = 0;
327
328 DPRINTF(SMMUv3, "[t] responder resp addr=%#x size=%#x\n",
329 action.pkt->getAddr(),
330 action.pkt->getSize());
331
332 assert(action.ifc);
333 action.ifc->schedTimingResp(action.pkt);
334
335 delete proc;
336 break;
337
338 case ACTION_SEND_RESP_ATS:
339 // @todo: We need to pay for this and not just zero it out
340 action.pkt->headerDelay = action.pkt->payloadDelay = 0;
341
342 DPRINTF(SMMUv3, "[t] ATS responder resp addr=%#x size=%#x\n",
343 action.pkt->getAddr(), action.pkt->getSize());
344
345 assert(action.ifc);
346 action.ifc->schedAtsTimingResp(action.pkt);
347
348 delete proc;
349 break;
350
351 case ACTION_DELAY:
352 case ACTION_SLEEP:
353 break;
354
355 case ACTION_TERMINATE:
356 delete proc;
357 break;
358
359 default:
360 panic("Unknown action\n");
361 }
362
363 return action;
364 }
365
366 void
367 SMMUv3::processCommands()
368 {
369 DPRINTF(SMMUv3, "processCommands()\n");
370
371 if (system.isAtomicMode()) {
372 SMMUAction a = runProcessAtomic(&commandExecutor, NULL);
373 (void) a;
374 } else if (system.isTimingMode()) {
375 if (!commandExecutor.isBusy())
376 runProcessTiming(&commandExecutor, NULL);
377 } else {
378 panic("Not in timing or atomic mode!");
379 }
380 }
381
382 void
383 SMMUv3::processCommand(const SMMUCommand &cmd)
384 {
385 switch (cmd.dw0.type) {
386 case CMD_PRF_CONFIG:
387 DPRINTF(SMMUv3, "CMD_PREFETCH_CONFIG - ignored\n");
388 break;
389
390 case CMD_PRF_ADDR:
391 DPRINTF(SMMUv3, "CMD_PREFETCH_ADDR - ignored\n");
392 break;
393
394 case CMD_CFGI_STE: {
395 DPRINTF(SMMUv3, "CMD_CFGI_STE sid=%#x\n", cmd.dw0.sid);
396 configCache.invalidateSID(cmd.dw0.sid);
397
398 for (auto dev_interface : deviceInterfaces) {
399 dev_interface->microTLB->invalidateSID(cmd.dw0.sid);
400 dev_interface->mainTLB->invalidateSID(cmd.dw0.sid);
401 }
402 break;
403 }
404
405 case CMD_CFGI_STE_RANGE: {
406 const auto range = cmd.dw1.range;
407 if (range == 31) {
408 // CMD_CFGI_ALL is an alias of CMD_CFGI_STE_RANGE with
409 // range = 31
410 DPRINTF(SMMUv3, "CMD_CFGI_ALL\n");
411 configCache.invalidateAll();
412
413 for (auto dev_interface : deviceInterfaces) {
414 dev_interface->microTLB->invalidateAll();
415 dev_interface->mainTLB->invalidateAll();
416 }
417 } else {
418 DPRINTF(SMMUv3, "CMD_CFGI_STE_RANGE\n");
419 const auto start_sid = cmd.dw0.sid & ~((1 << (range + 1)) - 1);
420 const auto end_sid = start_sid + (1 << (range + 1)) - 1;
421 for (auto sid = start_sid; sid <= end_sid; sid++) {
422 configCache.invalidateSID(sid);
423
424 for (auto dev_interface : deviceInterfaces) {
425 dev_interface->microTLB->invalidateSID(sid);
426 dev_interface->mainTLB->invalidateSID(sid);
427 }
428 }
429 }
430 break;
431 }
432
433 case CMD_CFGI_CD: {
434 DPRINTF(SMMUv3, "CMD_CFGI_CD sid=%#x ssid=%#x\n",
435 cmd.dw0.sid, cmd.dw0.ssid);
436 configCache.invalidateSSID(cmd.dw0.sid, cmd.dw0.ssid);
437
438 for (auto dev_interface : deviceInterfaces) {
439 dev_interface->microTLB->invalidateSSID(
440 cmd.dw0.sid, cmd.dw0.ssid);
441 dev_interface->mainTLB->invalidateSSID(
442 cmd.dw0.sid, cmd.dw0.ssid);
443 }
444 break;
445 }
446
447 case CMD_CFGI_CD_ALL: {
448 DPRINTF(SMMUv3, "CMD_CFGI_CD_ALL sid=%#x\n", cmd.dw0.sid);
449 configCache.invalidateSID(cmd.dw0.sid);
450
451 for (auto dev_interface : deviceInterfaces) {
452 dev_interface->microTLB->invalidateSID(cmd.dw0.sid);
453 dev_interface->mainTLB->invalidateSID(cmd.dw0.sid);
454 }
455 break;
456 }
457
458 case CMD_TLBI_NH_ALL: {
459 DPRINTF(SMMUv3, "CMD_TLBI_NH_ALL vmid=%#x\n", cmd.dw0.vmid);
460 for (auto dev_interface : deviceInterfaces) {
461 dev_interface->microTLB->invalidateVMID(cmd.dw0.vmid);
462 dev_interface->mainTLB->invalidateVMID(cmd.dw0.vmid);
463 }
464 tlb.invalidateVMID(cmd.dw0.vmid);
465 walkCache.invalidateVMID(cmd.dw0.vmid);
466 break;
467 }
468
469 case CMD_TLBI_NH_ASID: {
470 DPRINTF(SMMUv3, "CMD_TLBI_NH_ASID asid=%#x vmid=%#x\n",
471 cmd.dw0.asid, cmd.dw0.vmid);
472 for (auto dev_interface : deviceInterfaces) {
473 dev_interface->microTLB->invalidateASID(
474 cmd.dw0.asid, cmd.dw0.vmid);
475 dev_interface->mainTLB->invalidateASID(
476 cmd.dw0.asid, cmd.dw0.vmid);
477 }
478 tlb.invalidateASID(cmd.dw0.asid, cmd.dw0.vmid);
479 walkCache.invalidateASID(cmd.dw0.asid, cmd.dw0.vmid);
480 break;
481 }
482
483 case CMD_TLBI_NH_VAA: {
484 const Addr addr = cmd.addr();
485 DPRINTF(SMMUv3, "CMD_TLBI_NH_VAA va=%#08x vmid=%#x\n",
486 addr, cmd.dw0.vmid);
487 for (auto dev_interface : deviceInterfaces) {
488 dev_interface->microTLB->invalidateVAA(
489 addr, cmd.dw0.vmid);
490 dev_interface->mainTLB->invalidateVAA(
491 addr, cmd.dw0.vmid);
492 }
493 tlb.invalidateVAA(addr, cmd.dw0.vmid);
494 const bool leaf_only = cmd.dw1.leaf ? true : false;
495 walkCache.invalidateVAA(addr, cmd.dw0.vmid, leaf_only);
496 break;
497 }
498
499 case CMD_TLBI_NH_VA: {
500 const Addr addr = cmd.addr();
501 DPRINTF(SMMUv3, "CMD_TLBI_NH_VA va=%#08x asid=%#x vmid=%#x\n",
502 addr, cmd.dw0.asid, cmd.dw0.vmid);
503 for (auto dev_interface : deviceInterfaces) {
504 dev_interface->microTLB->invalidateVA(
505 addr, cmd.dw0.asid, cmd.dw0.vmid);
506 dev_interface->mainTLB->invalidateVA(
507 addr, cmd.dw0.asid, cmd.dw0.vmid);
508 }
509 tlb.invalidateVA(addr, cmd.dw0.asid, cmd.dw0.vmid);
510 const bool leaf_only = cmd.dw1.leaf ? true : false;
511 walkCache.invalidateVA(addr, cmd.dw0.asid, cmd.dw0.vmid,
512 leaf_only);
513 break;
514 }
515
516 case CMD_TLBI_S2_IPA: {
517 const Addr addr = cmd.addr();
518 DPRINTF(SMMUv3, "CMD_TLBI_S2_IPA ipa=%#08x vmid=%#x\n",
519 addr, cmd.dw0.vmid);
520 // This does not invalidate TLBs containing
521 // combined Stage1 + Stage2 translations, as per the spec.
522 ipaCache.invalidateIPA(addr, cmd.dw0.vmid);
523
524 if (!cmd.dw1.leaf)
525 walkCache.invalidateVMID(cmd.dw0.vmid);
526 break;
527 }
528
529 case CMD_TLBI_S12_VMALL: {
530 DPRINTF(SMMUv3, "CMD_TLBI_S12_VMALL vmid=%#x\n", cmd.dw0.vmid);
531 for (auto dev_interface : deviceInterfaces) {
532 dev_interface->microTLB->invalidateVMID(cmd.dw0.vmid);
533 dev_interface->mainTLB->invalidateVMID(cmd.dw0.vmid);
534 }
535 tlb.invalidateVMID(cmd.dw0.vmid);
536 ipaCache.invalidateVMID(cmd.dw0.vmid);
537 walkCache.invalidateVMID(cmd.dw0.vmid);
538 break;
539 }
540
541 case CMD_TLBI_NSNH_ALL: {
542 DPRINTF(SMMUv3, "CMD_TLBI_NSNH_ALL\n");
543 for (auto dev_interface : deviceInterfaces) {
544 dev_interface->microTLB->invalidateAll();
545 dev_interface->mainTLB->invalidateAll();
546 }
547 tlb.invalidateAll();
548 ipaCache.invalidateAll();
549 walkCache.invalidateAll();
550 break;
551 }
552
553 case CMD_RESUME:
554 DPRINTF(SMMUv3, "CMD_RESUME\n");
555 panic("resume unimplemented");
556 break;
557
558 default:
559 warn("Unimplemented command %#x\n", cmd.dw0.type);
560 break;
561 }
562 }
563
564 const PageTableOps*
565 SMMUv3::getPageTableOps(uint8_t trans_granule)
566 {
567 static V8PageTableOps4k ptOps4k;
568 static V8PageTableOps16k ptOps16k;
569 static V8PageTableOps64k ptOps64k;
570
571 switch (trans_granule) {
572 case TRANS_GRANULE_4K: return &ptOps4k;
573 case TRANS_GRANULE_16K: return &ptOps16k;
574 case TRANS_GRANULE_64K: return &ptOps64k;
575 default:
576 panic("Unknown translation granule size %d", trans_granule);
577 }
578 }
579
580 Tick
581 SMMUv3::readControl(PacketPtr pkt)
582 {
583 DPRINTF(SMMUv3, "readControl: addr=%08x size=%d\n",
584 pkt->getAddr(), pkt->getSize());
585
586 int offset = pkt->getAddr() - regsMap.start();
587 assert(offset >= 0 && offset < SMMU_REG_SIZE);
588
589 if (inSecureBlock(offset)) {
590 warn("smmu: secure registers (0x%x) are not implemented\n",
591 offset);
592 }
593
594 auto reg_ptr = regs.data + offset;
595
596 switch (pkt->getSize()) {
597 case sizeof(uint32_t):
598 pkt->setLE<uint32_t>(*reinterpret_cast<uint32_t *>(reg_ptr));
599 break;
600 case sizeof(uint64_t):
601 pkt->setLE<uint64_t>(*reinterpret_cast<uint64_t *>(reg_ptr));
602 break;
603 default:
604 panic("smmu: unallowed access size: %d bytes\n", pkt->getSize());
605 break;
606 }
607
608 pkt->makeAtomicResponse();
609
610 return 0;
611 }
612
613 Tick
614 SMMUv3::writeControl(PacketPtr pkt)
615 {
616 int offset = pkt->getAddr() - regsMap.start();
617 assert(offset >= 0 && offset < SMMU_REG_SIZE);
618
619 DPRINTF(SMMUv3, "writeControl: addr=%08x size=%d data=%16x\n",
620 pkt->getAddr(), pkt->getSize(),
621 pkt->getSize() == sizeof(uint64_t) ?
622 pkt->getLE<uint64_t>() : pkt->getLE<uint32_t>());
623
624 switch (offset) {
625 case offsetof(SMMURegs, cr0):
626 assert(pkt->getSize() == sizeof(uint32_t));
627 regs.cr0 = regs.cr0ack = pkt->getLE<uint32_t>();
628 break;
629
630 case offsetof(SMMURegs, cr1):
631 case offsetof(SMMURegs, cr2):
632 case offsetof(SMMURegs, strtab_base_cfg):
633 case offsetof(SMMURegs, eventq_cons):
634 case offsetof(SMMURegs, eventq_irq_cfg1):
635 case offsetof(SMMURegs, priq_cons):
636 assert(pkt->getSize() == sizeof(uint32_t));
637 *reinterpret_cast<uint32_t *>(regs.data + offset) =
638 pkt->getLE<uint32_t>();
639 break;
640
641 case offsetof(SMMURegs, cmdq_cons):
642 assert(pkt->getSize() == sizeof(uint32_t));
643 if (regs.cr0 & CR0_CMDQEN_MASK) {
644 warn("CMDQ is enabled: ignoring write to CMDQ_CONS\n");
645 } else {
646 *reinterpret_cast<uint32_t *>(regs.data + offset) =
647 pkt->getLE<uint32_t>();
648 }
649 break;
650
651 case offsetof(SMMURegs, cmdq_prod):
652 assert(pkt->getSize() == sizeof(uint32_t));
653 *reinterpret_cast<uint32_t *>(regs.data + offset) =
654 pkt->getLE<uint32_t>();
655 schedule(processCommandsEvent, nextCycle());
656 break;
657
658 case offsetof(SMMURegs, strtab_base):
659 case offsetof(SMMURegs, eventq_irq_cfg0):
660 assert(pkt->getSize() == sizeof(uint64_t));
661 *reinterpret_cast<uint64_t *>(regs.data + offset) =
662 pkt->getLE<uint64_t>();
663 break;
664
665 case offsetof(SMMURegs, cmdq_base):
666 assert(pkt->getSize() == sizeof(uint64_t));
667 if (regs.cr0 & CR0_CMDQEN_MASK) {
668 warn("CMDQ is enabled: ignoring write to CMDQ_BASE\n");
669 } else {
670 *reinterpret_cast<uint64_t *>(regs.data + offset) =
671 pkt->getLE<uint64_t>();
672 regs.cmdq_cons = 0;
673 regs.cmdq_prod = 0;
674 }
675 break;
676
677 case offsetof(SMMURegs, eventq_base):
678 assert(pkt->getSize() == sizeof(uint64_t));
679 *reinterpret_cast<uint64_t *>(regs.data + offset) =
680 pkt->getLE<uint64_t>();
681 regs.eventq_cons = 0;
682 regs.eventq_prod = 0;
683 break;
684
685 case offsetof(SMMURegs, priq_base):
686 assert(pkt->getSize() == sizeof(uint64_t));
687 *reinterpret_cast<uint64_t *>(regs.data + offset) =
688 pkt->getLE<uint64_t>();
689 regs.priq_cons = 0;
690 regs.priq_prod = 0;
691 break;
692
693 default:
694 if (inSecureBlock(offset)) {
695 warn("smmu: secure registers (0x%x) are not implemented\n",
696 offset);
697 } else {
698 warn("smmu: write to read-only/undefined register at 0x%x\n",
699 offset);
700 }
701 }
702
703 pkt->makeAtomicResponse();
704
705 return 0;
706 }
707
708 bool
709 SMMUv3::inSecureBlock(uint32_t offs) const
710 {
711 if (offs >= offsetof(SMMURegs, _secure_regs) && offs < SMMU_SECURE_SZ)
712 return true;
713 else
714 return false;
715 }
716
717 void
718 SMMUv3::init()
719 {
720 // make sure both sides are connected and have the same block size
721 if (!requestPort.isConnected())
722 fatal("Request port is not connected.\n");
723
724 // If the second request port is connected for the table walks, enable
725 // the mode to send table walks through this port instead
726 if (tableWalkPort.isConnected())
727 tableWalkPortEnable = true;
728
729 // notify the request side of our address ranges
730 for (auto ifc : deviceInterfaces) {
731 ifc->sendRange();
732 }
733
734 if (controlPort.isConnected())
735 controlPort.sendRangeChange();
736 }
737
738 void
739 SMMUv3::regStats()
740 {
741 ClockedObject::regStats();
742
743 using namespace Stats;
744
745 for (size_t i = 0; i < deviceInterfaces.size(); i++) {
746 deviceInterfaces[i]->microTLB->regStats(
747 csprintf("%s.utlb%d", name(), i));
748 deviceInterfaces[i]->mainTLB->regStats(
749 csprintf("%s.maintlb%d", name(), i));
750 }
751
752 tlb.regStats(name() + ".tlb");
753 configCache.regStats(name() + ".cfg");
754 ipaCache.regStats(name() + ".ipa");
755 walkCache.regStats(name() + ".walk");
756
757 steL1Fetches
758 .name(name() + ".steL1Fetches")
759 .desc("STE L1 fetches")
760 .flags(pdf);
761
762 steFetches
763 .name(name() + ".steFetches")
764 .desc("STE fetches")
765 .flags(pdf);
766
767 cdL1Fetches
768 .name(name() + ".cdL1Fetches")
769 .desc("CD L1 fetches")
770 .flags(pdf);
771
772 cdFetches
773 .name(name() + ".cdFetches")
774 .desc("CD fetches")
775 .flags(pdf);
776
777 translationTimeDist
778 .init(0, 2000000, 2000)
779 .name(name() + ".translationTimeDist")
780 .desc("Time to translate address")
781 .flags(pdf);
782
783 ptwTimeDist
784 .init(0, 2000000, 2000)
785 .name(name() + ".ptwTimeDist")
786 .desc("Time to walk page tables")
787 .flags(pdf);
788 }
789
790 DrainState
791 SMMUv3::drain()
792 {
793 // Wait until the Command Executor is not busy
794 if (commandExecutor.isBusy()) {
795 return DrainState::Draining;
796 }
797 return DrainState::Drained;
798 }
799
800 void
801 SMMUv3::serialize(CheckpointOut &cp) const
802 {
803 DPRINTF(Checkpoint, "Serializing SMMUv3\n");
804
805 SERIALIZE_ARRAY(regs.data, sizeof(regs.data) / sizeof(regs.data[0]));
806 }
807
808 void
809 SMMUv3::unserialize(CheckpointIn &cp)
810 {
811 DPRINTF(Checkpoint, "Unserializing SMMUv3\n");
812
813 UNSERIALIZE_ARRAY(regs.data, sizeof(regs.data) / sizeof(regs.data[0]));
814 }
815
816 Port&
817 SMMUv3::getPort(const std::string &name, PortID id)
818 {
819 if (name == "request") {
820 return requestPort;
821 } else if (name == "walker") {
822 return tableWalkPort;
823 } else if (name == "control") {
824 return controlPort;
825 } else {
826 return ClockedObject::getPort(name, id);
827 }
828 }
829
830 SMMUv3*
831 SMMUv3Params::create()
832 {
833 return new SMMUv3(this);
834 }