dev: Delete the authors list from files in src/dev.
[gem5.git] / src / dev / arm / smmu_v3.cc
1 /*
2 * Copyright (c) 2013, 2018-2019 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 #include "dev/arm/smmu_v3.hh"
39
40 #include <cstddef>
41 #include <cstring>
42
43 #include "base/bitfield.hh"
44 #include "base/cast.hh"
45 #include "base/logging.hh"
46 #include "base/trace.hh"
47 #include "base/types.hh"
48 #include "debug/Checkpoint.hh"
49 #include "debug/SMMUv3.hh"
50 #include "dev/arm/smmu_v3_transl.hh"
51 #include "mem/packet_access.hh"
52 #include "sim/system.hh"
53
54 SMMUv3::SMMUv3(SMMUv3Params *params) :
55 ClockedObject(params),
56 system(*params->system),
57 masterId(params->system->getMasterId(this)),
58 masterPort(name() + ".master", *this),
59 masterTableWalkPort(name() + ".master_walker", *this),
60 controlPort(name() + ".control", *this, params->reg_map),
61 tlb(params->tlb_entries, params->tlb_assoc, params->tlb_policy),
62 configCache(params->cfg_entries, params->cfg_assoc, params->cfg_policy),
63 ipaCache(params->ipa_entries, params->ipa_assoc, params->ipa_policy),
64 walkCache({ { params->walk_S1L0, params->walk_S1L1,
65 params->walk_S1L2, params->walk_S1L3,
66 params->walk_S2L0, params->walk_S2L1,
67 params->walk_S2L2, params->walk_S2L3 } },
68 params->walk_assoc, params->walk_policy),
69 tlbEnable(params->tlb_enable),
70 configCacheEnable(params->cfg_enable),
71 ipaCacheEnable(params->ipa_enable),
72 walkCacheEnable(params->walk_enable),
73 tableWalkPortEnable(false),
74 walkCacheNonfinalEnable(params->wc_nonfinal_enable),
75 walkCacheS1Levels(params->wc_s1_levels),
76 walkCacheS2Levels(params->wc_s2_levels),
77 masterPortWidth(params->master_port_width),
78 tlbSem(params->tlb_slots),
79 ifcSmmuSem(1),
80 smmuIfcSem(1),
81 configSem(params->cfg_slots),
82 ipaSem(params->ipa_slots),
83 walkSem(params->walk_slots),
84 masterPortSem(1),
85 transSem(params->xlate_slots),
86 ptwSem(params->ptw_slots),
87 cycleSem(1),
88 tlbLat(params->tlb_lat),
89 ifcSmmuLat(params->ifc_smmu_lat),
90 smmuIfcLat(params->smmu_ifc_lat),
91 configLat(params->cfg_lat),
92 ipaLat(params->ipa_lat),
93 walkLat(params->walk_lat),
94 slaveInterfaces(params->slave_interfaces),
95 commandExecutor(name() + ".cmd_exec", *this),
96 regsMap(params->reg_map),
97 processCommandsEvent(this)
98 {
99 fatal_if(regsMap.size() != SMMU_REG_SIZE,
100 "Invalid register map size: %#x different than SMMU_REG_SIZE = %#x\n",
101 regsMap.size(), SMMU_REG_SIZE);
102
103 // Init smmu registers to 0
104 memset(&regs, 0, sizeof(regs));
105
106 // Setup RO ID registers
107 regs.idr0 = params->smmu_idr0;
108 regs.idr1 = params->smmu_idr1;
109 regs.idr2 = params->smmu_idr2;
110 regs.idr3 = params->smmu_idr3;
111 regs.idr4 = params->smmu_idr4;
112 regs.idr5 = params->smmu_idr5;
113 regs.iidr = params->smmu_iidr;
114 regs.aidr = params->smmu_aidr;
115
116 // TODO: At the moment it possible to set the ID registers to hold
117 // any possible value. It would be nice to have a sanity check here
118 // at construction time in case some idx registers are programmed to
119 // store an unallowed values or if the are configuration conflicts.
120 warn("SMMUv3 IDx register values unchecked\n");
121
122 for (auto ifc : slaveInterfaces)
123 ifc->setSMMU(this);
124 }
125
126 bool
127 SMMUv3::masterRecvTimingResp(PacketPtr pkt)
128 {
129 DPRINTF(SMMUv3, "[t] master resp addr=%#x size=%#x\n",
130 pkt->getAddr(), pkt->getSize());
131
132 // @todo: We need to pay for this and not just zero it out
133 pkt->headerDelay = pkt->payloadDelay = 0;
134
135 SMMUProcess *proc =
136 safe_cast<SMMUProcess *>(pkt->popSenderState());
137
138 runProcessTiming(proc, pkt);
139
140 return true;
141 }
142
143 void
144 SMMUv3::masterRecvReqRetry()
145 {
146 assert(!packetsToRetry.empty());
147
148 while (!packetsToRetry.empty()) {
149 SMMUAction a = packetsToRetry.front();
150
151 assert(a.type==ACTION_SEND_REQ || a.type==ACTION_SEND_REQ_FINAL);
152
153 DPRINTF(SMMUv3, "[t] master retr addr=%#x size=%#x\n",
154 a.pkt->getAddr(), a.pkt->getSize());
155
156 if (!masterPort.sendTimingReq(a.pkt))
157 break;
158
159 packetsToRetry.pop();
160
161 /*
162 * ACTION_SEND_REQ_FINAL means that we have just forwarded the packet
163 * on the master interface; this means that we no longer hold on to
164 * that transaction and therefore can accept a new one.
165 * If the slave port was stalled then unstall it (send retry).
166 */
167 if (a.type == ACTION_SEND_REQ_FINAL)
168 scheduleSlaveRetries();
169 }
170 }
171
172 bool
173 SMMUv3::masterTableWalkRecvTimingResp(PacketPtr pkt)
174 {
175 DPRINTF(SMMUv3, "[t] master HWTW resp addr=%#x size=%#x\n",
176 pkt->getAddr(), pkt->getSize());
177
178 // @todo: We need to pay for this and not just zero it out
179 pkt->headerDelay = pkt->payloadDelay = 0;
180
181 SMMUProcess *proc =
182 safe_cast<SMMUProcess *>(pkt->popSenderState());
183
184 runProcessTiming(proc, pkt);
185
186 return true;
187 }
188
189 void
190 SMMUv3::masterTableWalkRecvReqRetry()
191 {
192 assert(tableWalkPortEnable);
193 assert(!packetsTableWalkToRetry.empty());
194
195 while (!packetsTableWalkToRetry.empty()) {
196 SMMUAction a = packetsTableWalkToRetry.front();
197
198 assert(a.type==ACTION_SEND_REQ);
199
200 DPRINTF(SMMUv3, "[t] master HWTW retr addr=%#x size=%#x\n",
201 a.pkt->getAddr(), a.pkt->getSize());
202
203 if (!masterTableWalkPort.sendTimingReq(a.pkt))
204 break;
205
206 packetsTableWalkToRetry.pop();
207 }
208 }
209
210 void
211 SMMUv3::scheduleSlaveRetries()
212 {
213 for (auto ifc : slaveInterfaces) {
214 ifc->scheduleDeviceRetry();
215 }
216 }
217
218 SMMUAction
219 SMMUv3::runProcess(SMMUProcess *proc, PacketPtr pkt)
220 {
221 if (system.isAtomicMode()) {
222 return runProcessAtomic(proc, pkt);
223 } else if (system.isTimingMode()) {
224 return runProcessTiming(proc, pkt);
225 } else {
226 panic("Not in timing or atomic mode!");
227 }
228 }
229
230 SMMUAction
231 SMMUv3::runProcessAtomic(SMMUProcess *proc, PacketPtr pkt)
232 {
233 SMMUAction action;
234 Tick delay = 0;
235 bool finished = false;
236
237 do {
238 action = proc->run(pkt);
239
240 switch (action.type) {
241 case ACTION_SEND_REQ:
242 // Send an MMU initiated request on the table walk port if it is
243 // enabled. Otherwise, fall through and handle same as the final
244 // ACTION_SEND_REQ_FINAL request.
245 if (tableWalkPortEnable) {
246 delay += masterTableWalkPort.sendAtomic(action.pkt);
247 pkt = action.pkt;
248 break;
249 }
250 M5_FALLTHROUGH;
251 case ACTION_SEND_REQ_FINAL:
252 delay += masterPort.sendAtomic(action.pkt);
253 pkt = action.pkt;
254 break;
255
256 case ACTION_SEND_RESP:
257 case ACTION_SEND_RESP_ATS:
258 case ACTION_SLEEP:
259 finished = true;
260 break;
261
262 case ACTION_DELAY:
263 delay += action.delay;
264 break;
265
266 case ACTION_TERMINATE:
267 panic("ACTION_TERMINATE in atomic mode\n");
268
269 default:
270 panic("Unknown action\n");
271 }
272 } while (!finished);
273
274 action.delay = delay;
275
276 return action;
277 }
278
279 SMMUAction
280 SMMUv3::runProcessTiming(SMMUProcess *proc, PacketPtr pkt)
281 {
282 SMMUAction action = proc->run(pkt);
283
284 switch (action.type) {
285 case ACTION_SEND_REQ:
286 // Send an MMU initiated request on the table walk port if it is
287 // enabled. Otherwise, fall through and handle same as the final
288 // ACTION_SEND_REQ_FINAL request.
289 if (tableWalkPortEnable) {
290 action.pkt->pushSenderState(proc);
291
292 DPRINTF(SMMUv3, "[t] master HWTW req addr=%#x size=%#x\n",
293 action.pkt->getAddr(), action.pkt->getSize());
294
295 if (packetsTableWalkToRetry.empty()
296 && masterTableWalkPort.sendTimingReq(action.pkt)) {
297 scheduleSlaveRetries();
298 } else {
299 DPRINTF(SMMUv3, "[t] master HWTW req needs retry,"
300 " qlen=%d\n", packetsTableWalkToRetry.size());
301 packetsTableWalkToRetry.push(action);
302 }
303
304 break;
305 }
306 M5_FALLTHROUGH;
307 case ACTION_SEND_REQ_FINAL:
308 action.pkt->pushSenderState(proc);
309
310 DPRINTF(SMMUv3, "[t] master req addr=%#x size=%#x\n",
311 action.pkt->getAddr(), action.pkt->getSize());
312
313 if (packetsToRetry.empty() && masterPort.sendTimingReq(action.pkt)) {
314 scheduleSlaveRetries();
315 } else {
316 DPRINTF(SMMUv3, "[t] master req needs retry, qlen=%d\n",
317 packetsToRetry.size());
318 packetsToRetry.push(action);
319 }
320
321 break;
322
323 case ACTION_SEND_RESP:
324 // @todo: We need to pay for this and not just zero it out
325 action.pkt->headerDelay = action.pkt->payloadDelay = 0;
326
327 DPRINTF(SMMUv3, "[t] slave resp addr=%#x size=%#x\n",
328 action.pkt->getAddr(),
329 action.pkt->getSize());
330
331 assert(action.ifc);
332 action.ifc->schedTimingResp(action.pkt);
333
334 delete proc;
335 break;
336
337 case ACTION_SEND_RESP_ATS:
338 // @todo: We need to pay for this and not just zero it out
339 action.pkt->headerDelay = action.pkt->payloadDelay = 0;
340
341 DPRINTF(SMMUv3, "[t] ATS slave resp addr=%#x size=%#x\n",
342 action.pkt->getAddr(), action.pkt->getSize());
343
344 assert(action.ifc);
345 action.ifc->schedAtsTimingResp(action.pkt);
346
347 delete proc;
348 break;
349
350 case ACTION_DELAY:
351 case ACTION_SLEEP:
352 break;
353
354 case ACTION_TERMINATE:
355 delete proc;
356 break;
357
358 default:
359 panic("Unknown action\n");
360 }
361
362 return action;
363 }
364
365 void
366 SMMUv3::processCommands()
367 {
368 DPRINTF(SMMUv3, "processCommands()\n");
369
370 if (system.isAtomicMode()) {
371 SMMUAction a = runProcessAtomic(&commandExecutor, NULL);
372 (void) a;
373 } else if (system.isTimingMode()) {
374 if (!commandExecutor.isBusy())
375 runProcessTiming(&commandExecutor, NULL);
376 } else {
377 panic("Not in timing or atomic mode!");
378 }
379 }
380
381 void
382 SMMUv3::processCommand(const SMMUCommand &cmd)
383 {
384 switch (cmd.dw0.type) {
385 case CMD_PRF_CONFIG:
386 DPRINTF(SMMUv3, "CMD_PREFETCH_CONFIG - ignored\n");
387 break;
388
389 case CMD_PRF_ADDR:
390 DPRINTF(SMMUv3, "CMD_PREFETCH_ADDR - ignored\n");
391 break;
392
393 case CMD_CFGI_STE: {
394 DPRINTF(SMMUv3, "CMD_CFGI_STE sid=%#x\n", cmd.dw0.sid);
395 configCache.invalidateSID(cmd.dw0.sid);
396
397 for (auto slave_interface : slaveInterfaces) {
398 slave_interface->microTLB->invalidateSID(cmd.dw0.sid);
399 slave_interface->mainTLB->invalidateSID(cmd.dw0.sid);
400 }
401 break;
402 }
403
404 case CMD_CFGI_STE_RANGE: {
405 const auto range = cmd.dw1.range;
406 if (range == 31) {
407 // CMD_CFGI_ALL is an alias of CMD_CFGI_STE_RANGE with
408 // range = 31
409 DPRINTF(SMMUv3, "CMD_CFGI_ALL\n");
410 configCache.invalidateAll();
411
412 for (auto slave_interface : slaveInterfaces) {
413 slave_interface->microTLB->invalidateAll();
414 slave_interface->mainTLB->invalidateAll();
415 }
416 } else {
417 DPRINTF(SMMUv3, "CMD_CFGI_STE_RANGE\n");
418 const auto start_sid = cmd.dw0.sid & ~((1 << (range + 1)) - 1);
419 const auto end_sid = start_sid + (1 << (range + 1)) - 1;
420 for (auto sid = start_sid; sid <= end_sid; sid++) {
421 configCache.invalidateSID(sid);
422
423 for (auto slave_interface : slaveInterfaces) {
424 slave_interface->microTLB->invalidateSID(sid);
425 slave_interface->mainTLB->invalidateSID(sid);
426 }
427 }
428 }
429 break;
430 }
431
432 case CMD_CFGI_CD: {
433 DPRINTF(SMMUv3, "CMD_CFGI_CD sid=%#x ssid=%#x\n",
434 cmd.dw0.sid, cmd.dw0.ssid);
435 configCache.invalidateSSID(cmd.dw0.sid, cmd.dw0.ssid);
436
437 for (auto slave_interface : slaveInterfaces) {
438 slave_interface->microTLB->invalidateSSID(
439 cmd.dw0.sid, cmd.dw0.ssid);
440 slave_interface->mainTLB->invalidateSSID(
441 cmd.dw0.sid, cmd.dw0.ssid);
442 }
443 break;
444 }
445
446 case CMD_CFGI_CD_ALL: {
447 DPRINTF(SMMUv3, "CMD_CFGI_CD_ALL sid=%#x\n", cmd.dw0.sid);
448 configCache.invalidateSID(cmd.dw0.sid);
449
450 for (auto slave_interface : slaveInterfaces) {
451 slave_interface->microTLB->invalidateSID(cmd.dw0.sid);
452 slave_interface->mainTLB->invalidateSID(cmd.dw0.sid);
453 }
454 break;
455 }
456
457 case CMD_TLBI_NH_ALL: {
458 DPRINTF(SMMUv3, "CMD_TLBI_NH_ALL vmid=%#x\n", cmd.dw0.vmid);
459 for (auto slave_interface : slaveInterfaces) {
460 slave_interface->microTLB->invalidateVMID(cmd.dw0.vmid);
461 slave_interface->mainTLB->invalidateVMID(cmd.dw0.vmid);
462 }
463 tlb.invalidateVMID(cmd.dw0.vmid);
464 walkCache.invalidateVMID(cmd.dw0.vmid);
465 break;
466 }
467
468 case CMD_TLBI_NH_ASID: {
469 DPRINTF(SMMUv3, "CMD_TLBI_NH_ASID asid=%#x vmid=%#x\n",
470 cmd.dw0.asid, cmd.dw0.vmid);
471 for (auto slave_interface : slaveInterfaces) {
472 slave_interface->microTLB->invalidateASID(
473 cmd.dw0.asid, cmd.dw0.vmid);
474 slave_interface->mainTLB->invalidateASID(
475 cmd.dw0.asid, cmd.dw0.vmid);
476 }
477 tlb.invalidateASID(cmd.dw0.asid, cmd.dw0.vmid);
478 walkCache.invalidateASID(cmd.dw0.asid, cmd.dw0.vmid);
479 break;
480 }
481
482 case CMD_TLBI_NH_VAA: {
483 const Addr addr = cmd.addr();
484 DPRINTF(SMMUv3, "CMD_TLBI_NH_VAA va=%#08x vmid=%#x\n",
485 addr, cmd.dw0.vmid);
486 for (auto slave_interface : slaveInterfaces) {
487 slave_interface->microTLB->invalidateVAA(
488 addr, cmd.dw0.vmid);
489 slave_interface->mainTLB->invalidateVAA(
490 addr, cmd.dw0.vmid);
491 }
492 tlb.invalidateVAA(addr, cmd.dw0.vmid);
493 const bool leaf_only = cmd.dw1.leaf ? true : false;
494 walkCache.invalidateVAA(addr, cmd.dw0.vmid, leaf_only);
495 break;
496 }
497
498 case CMD_TLBI_NH_VA: {
499 const Addr addr = cmd.addr();
500 DPRINTF(SMMUv3, "CMD_TLBI_NH_VA va=%#08x asid=%#x vmid=%#x\n",
501 addr, cmd.dw0.asid, cmd.dw0.vmid);
502 for (auto slave_interface : slaveInterfaces) {
503 slave_interface->microTLB->invalidateVA(
504 addr, cmd.dw0.asid, cmd.dw0.vmid);
505 slave_interface->mainTLB->invalidateVA(
506 addr, cmd.dw0.asid, cmd.dw0.vmid);
507 }
508 tlb.invalidateVA(addr, cmd.dw0.asid, cmd.dw0.vmid);
509 const bool leaf_only = cmd.dw1.leaf ? true : false;
510 walkCache.invalidateVA(addr, cmd.dw0.asid, cmd.dw0.vmid,
511 leaf_only);
512 break;
513 }
514
515 case CMD_TLBI_S2_IPA: {
516 const Addr addr = cmd.addr();
517 DPRINTF(SMMUv3, "CMD_TLBI_S2_IPA ipa=%#08x vmid=%#x\n",
518 addr, cmd.dw0.vmid);
519 // This does not invalidate TLBs containing
520 // combined Stage1 + Stage2 translations, as per the spec.
521 ipaCache.invalidateIPA(addr, cmd.dw0.vmid);
522
523 if (!cmd.dw1.leaf)
524 walkCache.invalidateVMID(cmd.dw0.vmid);
525 break;
526 }
527
528 case CMD_TLBI_S12_VMALL: {
529 DPRINTF(SMMUv3, "CMD_TLBI_S12_VMALL vmid=%#x\n", cmd.dw0.vmid);
530 for (auto slave_interface : slaveInterfaces) {
531 slave_interface->microTLB->invalidateVMID(cmd.dw0.vmid);
532 slave_interface->mainTLB->invalidateVMID(cmd.dw0.vmid);
533 }
534 tlb.invalidateVMID(cmd.dw0.vmid);
535 ipaCache.invalidateVMID(cmd.dw0.vmid);
536 walkCache.invalidateVMID(cmd.dw0.vmid);
537 break;
538 }
539
540 case CMD_TLBI_NSNH_ALL: {
541 DPRINTF(SMMUv3, "CMD_TLBI_NSNH_ALL\n");
542 for (auto slave_interface : slaveInterfaces) {
543 slave_interface->microTLB->invalidateAll();
544 slave_interface->mainTLB->invalidateAll();
545 }
546 tlb.invalidateAll();
547 ipaCache.invalidateAll();
548 walkCache.invalidateAll();
549 break;
550 }
551
552 case CMD_RESUME:
553 DPRINTF(SMMUv3, "CMD_RESUME\n");
554 panic("resume unimplemented");
555 break;
556
557 default:
558 warn("Unimplemented command %#x\n", cmd.dw0.type);
559 break;
560 }
561 }
562
563 const PageTableOps*
564 SMMUv3::getPageTableOps(uint8_t trans_granule)
565 {
566 static V8PageTableOps4k ptOps4k;
567 static V8PageTableOps16k ptOps16k;
568 static V8PageTableOps64k ptOps64k;
569
570 switch (trans_granule) {
571 case TRANS_GRANULE_4K: return &ptOps4k;
572 case TRANS_GRANULE_16K: return &ptOps16k;
573 case TRANS_GRANULE_64K: return &ptOps64k;
574 default:
575 panic("Unknown translation granule size %d", trans_granule);
576 }
577 }
578
579 Tick
580 SMMUv3::readControl(PacketPtr pkt)
581 {
582 DPRINTF(SMMUv3, "readControl: addr=%08x size=%d\n",
583 pkt->getAddr(), pkt->getSize());
584
585 int offset = pkt->getAddr() - regsMap.start();
586 assert(offset >= 0 && offset < SMMU_REG_SIZE);
587
588 if (inSecureBlock(offset)) {
589 warn("smmu: secure registers (0x%x) are not implemented\n",
590 offset);
591 }
592
593 auto reg_ptr = regs.data + offset;
594
595 switch (pkt->getSize()) {
596 case sizeof(uint32_t):
597 pkt->setLE<uint32_t>(*reinterpret_cast<uint32_t *>(reg_ptr));
598 break;
599 case sizeof(uint64_t):
600 pkt->setLE<uint64_t>(*reinterpret_cast<uint64_t *>(reg_ptr));
601 break;
602 default:
603 panic("smmu: unallowed access size: %d bytes\n", pkt->getSize());
604 break;
605 }
606
607 pkt->makeAtomicResponse();
608
609 return 0;
610 }
611
612 Tick
613 SMMUv3::writeControl(PacketPtr pkt)
614 {
615 int offset = pkt->getAddr() - regsMap.start();
616 assert(offset >= 0 && offset < SMMU_REG_SIZE);
617
618 DPRINTF(SMMUv3, "writeControl: addr=%08x size=%d data=%16x\n",
619 pkt->getAddr(), pkt->getSize(),
620 pkt->getSize() == sizeof(uint64_t) ?
621 pkt->getLE<uint64_t>() : pkt->getLE<uint32_t>());
622
623 switch (offset) {
624 case offsetof(SMMURegs, cr0):
625 assert(pkt->getSize() == sizeof(uint32_t));
626 regs.cr0 = regs.cr0ack = pkt->getLE<uint32_t>();
627 break;
628
629 case offsetof(SMMURegs, cr1):
630 case offsetof(SMMURegs, cr2):
631 case offsetof(SMMURegs, strtab_base_cfg):
632 case offsetof(SMMURegs, eventq_cons):
633 case offsetof(SMMURegs, eventq_irq_cfg1):
634 case offsetof(SMMURegs, priq_cons):
635 assert(pkt->getSize() == sizeof(uint32_t));
636 *reinterpret_cast<uint32_t *>(regs.data + offset) =
637 pkt->getLE<uint32_t>();
638 break;
639
640 case offsetof(SMMURegs, cmdq_cons):
641 assert(pkt->getSize() == sizeof(uint32_t));
642 if (regs.cr0 & CR0_CMDQEN_MASK) {
643 warn("CMDQ is enabled: ignoring write to CMDQ_CONS\n");
644 } else {
645 *reinterpret_cast<uint32_t *>(regs.data + offset) =
646 pkt->getLE<uint32_t>();
647 }
648 break;
649
650 case offsetof(SMMURegs, cmdq_prod):
651 assert(pkt->getSize() == sizeof(uint32_t));
652 *reinterpret_cast<uint32_t *>(regs.data + offset) =
653 pkt->getLE<uint32_t>();
654 schedule(processCommandsEvent, nextCycle());
655 break;
656
657 case offsetof(SMMURegs, strtab_base):
658 case offsetof(SMMURegs, eventq_irq_cfg0):
659 assert(pkt->getSize() == sizeof(uint64_t));
660 *reinterpret_cast<uint64_t *>(regs.data + offset) =
661 pkt->getLE<uint64_t>();
662 break;
663
664 case offsetof(SMMURegs, cmdq_base):
665 assert(pkt->getSize() == sizeof(uint64_t));
666 if (regs.cr0 & CR0_CMDQEN_MASK) {
667 warn("CMDQ is enabled: ignoring write to CMDQ_BASE\n");
668 } else {
669 *reinterpret_cast<uint64_t *>(regs.data + offset) =
670 pkt->getLE<uint64_t>();
671 regs.cmdq_cons = 0;
672 regs.cmdq_prod = 0;
673 }
674 break;
675
676 case offsetof(SMMURegs, eventq_base):
677 assert(pkt->getSize() == sizeof(uint64_t));
678 *reinterpret_cast<uint64_t *>(regs.data + offset) =
679 pkt->getLE<uint64_t>();
680 regs.eventq_cons = 0;
681 regs.eventq_prod = 0;
682 break;
683
684 case offsetof(SMMURegs, priq_base):
685 assert(pkt->getSize() == sizeof(uint64_t));
686 *reinterpret_cast<uint64_t *>(regs.data + offset) =
687 pkt->getLE<uint64_t>();
688 regs.priq_cons = 0;
689 regs.priq_prod = 0;
690 break;
691
692 default:
693 if (inSecureBlock(offset)) {
694 warn("smmu: secure registers (0x%x) are not implemented\n",
695 offset);
696 } else {
697 warn("smmu: write to read-only/undefined register at 0x%x\n",
698 offset);
699 }
700 }
701
702 pkt->makeAtomicResponse();
703
704 return 0;
705 }
706
707 bool
708 SMMUv3::inSecureBlock(uint32_t offs) const
709 {
710 if (offs >= offsetof(SMMURegs, _secure_regs) && offs < SMMU_SECURE_SZ)
711 return true;
712 else
713 return false;
714 }
715
716 void
717 SMMUv3::init()
718 {
719 // make sure both sides are connected and have the same block size
720 if (!masterPort.isConnected())
721 fatal("Master port is not connected.\n");
722
723 // If the second master port is connected for the table walks, enable
724 // the mode to send table walks through this port instead
725 if (masterTableWalkPort.isConnected())
726 tableWalkPortEnable = true;
727
728 // notify the master side of our address ranges
729 for (auto ifc : slaveInterfaces) {
730 ifc->sendRange();
731 }
732
733 if (controlPort.isConnected())
734 controlPort.sendRangeChange();
735 }
736
737 void
738 SMMUv3::regStats()
739 {
740 ClockedObject::regStats();
741
742 using namespace Stats;
743
744 for (size_t i = 0; i < slaveInterfaces.size(); i++) {
745 slaveInterfaces[i]->microTLB->regStats(
746 csprintf("%s.utlb%d", name(), i));
747 slaveInterfaces[i]->mainTLB->regStats(
748 csprintf("%s.maintlb%d", name(), i));
749 }
750
751 tlb.regStats(name() + ".tlb");
752 configCache.regStats(name() + ".cfg");
753 ipaCache.regStats(name() + ".ipa");
754 walkCache.regStats(name() + ".walk");
755
756 steL1Fetches
757 .name(name() + ".steL1Fetches")
758 .desc("STE L1 fetches")
759 .flags(pdf);
760
761 steFetches
762 .name(name() + ".steFetches")
763 .desc("STE fetches")
764 .flags(pdf);
765
766 cdL1Fetches
767 .name(name() + ".cdL1Fetches")
768 .desc("CD L1 fetches")
769 .flags(pdf);
770
771 cdFetches
772 .name(name() + ".cdFetches")
773 .desc("CD fetches")
774 .flags(pdf);
775
776 translationTimeDist
777 .init(0, 2000000, 2000)
778 .name(name() + ".translationTimeDist")
779 .desc("Time to translate address")
780 .flags(pdf);
781
782 ptwTimeDist
783 .init(0, 2000000, 2000)
784 .name(name() + ".ptwTimeDist")
785 .desc("Time to walk page tables")
786 .flags(pdf);
787 }
788
789 DrainState
790 SMMUv3::drain()
791 {
792 // Wait until the Command Executor is not busy
793 if (commandExecutor.isBusy()) {
794 return DrainState::Draining;
795 }
796 return DrainState::Drained;
797 }
798
799 void
800 SMMUv3::serialize(CheckpointOut &cp) const
801 {
802 DPRINTF(Checkpoint, "Serializing SMMUv3\n");
803
804 SERIALIZE_ARRAY(regs.data, sizeof(regs.data) / sizeof(regs.data[0]));
805 }
806
807 void
808 SMMUv3::unserialize(CheckpointIn &cp)
809 {
810 DPRINTF(Checkpoint, "Unserializing SMMUv3\n");
811
812 UNSERIALIZE_ARRAY(regs.data, sizeof(regs.data) / sizeof(regs.data[0]));
813 }
814
815 Port&
816 SMMUv3::getPort(const std::string &name, PortID id)
817 {
818 if (name == "master") {
819 return masterPort;
820 } else if (name == "master_walker") {
821 return masterTableWalkPort;
822 } else if (name == "control") {
823 return controlPort;
824 } else {
825 return ClockedObject::getPort(name, id);
826 }
827 }
828
829 SMMUv3*
830 SMMUv3Params::create()
831 {
832 return new SMMUv3(this);
833 }