misc: Replaced master/slave terminology
[gem5.git] / src / dev / arm / smmu_v3_transl.cc
1 /*
2 * Copyright (c) 2013, 2018-2019 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 #include "dev/arm/smmu_v3_transl.hh"
39
40 #include "debug/SMMUv3.hh"
41 #include "debug/SMMUv3Hazard.hh"
42 #include "dev/arm/amba.hh"
43 #include "dev/arm/smmu_v3.hh"
44 #include "sim/system.hh"
45
46 SMMUTranslRequest
47 SMMUTranslRequest::fromPacket(PacketPtr pkt, bool ats)
48 {
49 SMMUTranslRequest req;
50 req.addr = pkt->getAddr();
51 req.size = pkt->getSize();
52 req.sid = pkt->req->streamId();
53 req.ssid = pkt->req->hasSubstreamId() ?
54 pkt->req->substreamId() : 0;
55 req.isWrite = pkt->isWrite();
56 req.isPrefetch = false;
57 req.isAtsRequest = ats;
58 req.pkt = pkt;
59
60 return req;
61 }
62
63 SMMUTranslRequest
64 SMMUTranslRequest::prefetch(Addr addr, uint32_t sid, uint32_t ssid)
65 {
66 SMMUTranslRequest req;
67 req.addr = addr;
68 req.size = 0;
69 req.sid = sid;
70 req.ssid = ssid;
71 req.isWrite = false;
72 req.isPrefetch = true;
73 req.isAtsRequest = false;
74 req.pkt = NULL;
75
76 return req;
77 }
78
79 SMMUTranslationProcess::SMMUTranslationProcess(const std::string &name,
80 SMMUv3 &_smmu, SMMUv3DeviceInterface &_ifc)
81 :
82 SMMUProcess(name, _smmu),
83 ifc(_ifc)
84 {
85 // Decrease number of pending translation slots on the device interface
86 assert(ifc.xlateSlotsRemaining > 0);
87 ifc.xlateSlotsRemaining--;
88
89 ifc.pendingMemAccesses++;
90 reinit();
91 }
92
93 SMMUTranslationProcess::~SMMUTranslationProcess()
94 {
95 // Increase number of pending translation slots on the device interface
96 assert(ifc.pendingMemAccesses > 0);
97 ifc.pendingMemAccesses--;
98
99 // If no more SMMU memory accesses are pending,
100 // signal SMMU Device Interface as drained
101 if (ifc.pendingMemAccesses == 0) {
102 ifc.signalDrainDone();
103 }
104 }
105
106 void
107 SMMUTranslationProcess::beginTransaction(const SMMUTranslRequest &req)
108 {
109 request = req;
110
111 reinit();
112 }
113
114 void
115 SMMUTranslationProcess::resumeTransaction()
116 {
117 assert(smmu.system.isTimingMode());
118
119 assert(!"Stalls are broken");
120
121 Tick resumeTick = curTick();
122
123 (void) resumeTick;
124 DPRINTF(SMMUv3, "Resume at tick = %d. Fault duration = %d (%.3fus)\n",
125 resumeTick, resumeTick-faultTick, (resumeTick-faultTick) / 1e6);
126
127 beginTransaction(request);
128
129 smmu.runProcessTiming(this, request.pkt);
130 }
131
132 void
133 SMMUTranslationProcess::main(Yield &yield)
134 {
135 // Hack:
136 // The coroutine starts running as soon as it's created.
137 // But we need to wait for request data esp. in atomic mode.
138 SMMUAction a;
139 a.type = ACTION_INITIAL_NOP;
140 a.pkt = NULL;
141 yield(a);
142
143 const Addr next4k = (request.addr + 0x1000ULL) & ~0xfffULL;
144
145 if ((request.addr + request.size) > next4k)
146 panic("Transaction crosses 4k boundary (addr=%#x size=%#x)!\n",
147 request.addr, request.size);
148
149
150 unsigned numResponderBeats = request.isWrite ?
151 (request.size + (ifc.portWidth - 1)) / ifc.portWidth : 1;
152
153 doSemaphoreDown(yield, ifc.devicePortSem);
154 doDelay(yield, Cycles(numResponderBeats));
155 doSemaphoreUp(ifc.devicePortSem);
156
157
158 recvTick = curTick();
159
160 if (!(smmu.regs.cr0 & CR0_SMMUEN_MASK)) {
161 // SMMU disabled
162 doDelay(yield, Cycles(1));
163 completeTransaction(yield, bypass(request.addr));
164 return;
165 }
166
167 TranslResult tr;
168 bool wasPrefetched = false;
169
170 if (request.isPrefetch) {
171 // Abort prefetch if:
172 // - there's already a transaction looking up the same 4k page, OR
173 // - requested address is already in the TLB.
174 if (hazard4kCheck() || ifcTLBLookup(yield, tr, wasPrefetched))
175 completePrefetch(yield); // this never returns
176
177 hazard4kRegister();
178
179 tr = smmuTranslation(yield);
180
181 if (tr.fault == FAULT_NONE)
182 ifcTLBUpdate(yield, tr);
183
184 hazard4kRelease();
185
186 completePrefetch(yield);
187 } else {
188 hazardIdRegister();
189
190 if (!microTLBLookup(yield, tr)) {
191 bool hit = ifcTLBLookup(yield, tr, wasPrefetched);
192 if (!hit) {
193 while (!hit && hazard4kCheck()) {
194 hazard4kHold(yield);
195 hit = ifcTLBLookup(yield, tr, wasPrefetched);
196 }
197 }
198
199 // Issue prefetch if:
200 // - there was a TLB hit and the entry was prefetched, OR
201 // - TLB miss was successfully serviced
202 if (hit) {
203 if (wasPrefetched)
204 issuePrefetch(next4k);
205 } else {
206 hazard4kRegister();
207
208 tr = smmuTranslation(yield);
209
210 if (tr.fault == FAULT_NONE) {
211 ifcTLBUpdate(yield, tr);
212
213 issuePrefetch(next4k);
214 }
215
216 hazard4kRelease();
217 }
218
219 if (tr.fault == FAULT_NONE)
220 microTLBUpdate(yield, tr);
221 }
222
223 hazardIdHold(yield);
224 hazardIdRelease();
225
226 if (tr.fault != FAULT_NONE)
227 panic("Translation Fault (addr=%#x, size=%#x, sid=%d, ssid=%d, "
228 "isWrite=%d, isPrefetch=%d, isAtsRequest=%d)\n",
229 request.addr, request.size, request.sid, request.ssid,
230 request.isWrite, request.isPrefetch, request.isAtsRequest);
231
232 completeTransaction(yield, tr);
233 }
234 }
235
236 SMMUTranslationProcess::TranslResult
237 SMMUTranslationProcess::bypass(Addr addr) const
238 {
239 TranslResult tr;
240 tr.fault = FAULT_NONE;
241 tr.addr = addr;
242 tr.addrMask = 0;
243 tr.writable = 1;
244
245 return tr;
246 }
247
248 SMMUTranslationProcess::TranslResult
249 SMMUTranslationProcess::smmuTranslation(Yield &yield)
250 {
251 TranslResult tr;
252
253 // Need SMMU credit to proceed
254 doSemaphoreDown(yield, smmu.transSem);
255
256 // Simulate pipelined IFC->SMMU link
257 doSemaphoreDown(yield, smmu.ifcSmmuSem);
258 doDelay(yield, Cycles(1)); // serialize transactions
259 doSemaphoreUp(smmu.ifcSmmuSem);
260 doDelay(yield, smmu.ifcSmmuLat - Cycles(1)); // remaining pipeline delay
261
262 bool haveConfig = true;
263 if (!configCacheLookup(yield, context)) {
264 if (findConfig(yield, context, tr)) {
265 configCacheUpdate(yield, context);
266 } else {
267 haveConfig = false;
268 }
269 }
270
271 if (haveConfig && !smmuTLBLookup(yield, tr)) {
272 // SMMU main TLB miss
273
274 // Need PTW slot to proceed
275 doSemaphoreDown(yield, smmu.ptwSem);
276
277 // Page table walk
278 Tick ptwStartTick = curTick();
279
280 if (context.stage1Enable) {
281 tr = translateStage1And2(yield, request.addr);
282 } else if (context.stage2Enable) {
283 tr = translateStage2(yield, request.addr, true);
284 } else {
285 tr = bypass(request.addr);
286 }
287
288 if (context.stage1Enable || context.stage2Enable)
289 smmu.ptwTimeDist.sample(curTick() - ptwStartTick);
290
291 // Free PTW slot
292 doSemaphoreUp(smmu.ptwSem);
293
294 if (tr.fault == FAULT_NONE)
295 smmuTLBUpdate(yield, tr);
296 }
297
298 // Simulate pipelined SMMU->RESPONSE INTERFACE link
299 doSemaphoreDown(yield, smmu.smmuIfcSem);
300 doDelay(yield, Cycles(1)); // serialize transactions
301 doSemaphoreUp(smmu.smmuIfcSem);
302 doDelay(yield, smmu.smmuIfcLat - Cycles(1)); // remaining pipeline delay
303
304 // return SMMU credit
305 doSemaphoreUp(smmu.transSem);
306
307 return tr;
308 }
309
310 bool
311 SMMUTranslationProcess::microTLBLookup(Yield &yield, TranslResult &tr)
312 {
313 if (!ifc.microTLBEnable)
314 return false;
315
316 doSemaphoreDown(yield, ifc.microTLBSem);
317 doDelay(yield, ifc.microTLBLat);
318 const SMMUTLB::Entry *e =
319 ifc.microTLB->lookup(request.sid, request.ssid, request.addr);
320 doSemaphoreUp(ifc.microTLBSem);
321
322 if (!e) {
323 DPRINTF(SMMUv3, "micro TLB miss vaddr=%#x sid=%#x ssid=%#x\n",
324 request.addr, request.sid, request.ssid);
325
326 return false;
327 }
328
329 DPRINTF(SMMUv3,
330 "micro TLB hit vaddr=%#x amask=%#x sid=%#x ssid=%#x paddr=%#x\n",
331 request.addr, e->vaMask, request.sid, request.ssid, e->pa);
332
333 tr.fault = FAULT_NONE;
334 tr.addr = e->pa + (request.addr & ~e->vaMask);;
335 tr.addrMask = e->vaMask;
336 tr.writable = e->permissions;
337
338 return true;
339 }
340
341 bool
342 SMMUTranslationProcess::ifcTLBLookup(Yield &yield, TranslResult &tr,
343 bool &wasPrefetched)
344 {
345 if (!ifc.mainTLBEnable)
346 return false;
347
348 doSemaphoreDown(yield, ifc.mainTLBSem);
349 doDelay(yield, ifc.mainTLBLat);
350 const SMMUTLB::Entry *e =
351 ifc.mainTLB->lookup(request.sid, request.ssid, request.addr);
352 doSemaphoreUp(ifc.mainTLBSem);
353
354 if (!e) {
355 DPRINTF(SMMUv3,
356 "RESPONSE Interface TLB miss vaddr=%#x sid=%#x ssid=%#x\n",
357 request.addr, request.sid, request.ssid);
358
359 return false;
360 }
361
362 DPRINTF(SMMUv3,
363 "RESPONSE Interface TLB hit vaddr=%#x amask=%#x sid=%#x ssid=%#x "
364 "paddr=%#x\n", request.addr, e->vaMask, request.sid,
365 request.ssid, e->pa);
366
367 tr.fault = FAULT_NONE;
368 tr.addr = e->pa + (request.addr & ~e->vaMask);;
369 tr.addrMask = e->vaMask;
370 tr.writable = e->permissions;
371 wasPrefetched = e->prefetched;
372
373 return true;
374 }
375
376 bool
377 SMMUTranslationProcess::smmuTLBLookup(Yield &yield, TranslResult &tr)
378 {
379 if (!smmu.tlbEnable)
380 return false;
381
382 doSemaphoreDown(yield, smmu.tlbSem);
383 doDelay(yield, smmu.tlbLat);
384 const ARMArchTLB::Entry *e =
385 smmu.tlb.lookup(request.addr, context.asid, context.vmid);
386 doSemaphoreUp(smmu.tlbSem);
387
388 if (!e) {
389 DPRINTF(SMMUv3, "SMMU TLB miss vaddr=%#x asid=%#x vmid=%#x\n",
390 request.addr, context.asid, context.vmid);
391
392 return false;
393 }
394
395 DPRINTF(SMMUv3,
396 "SMMU TLB hit vaddr=%#x amask=%#x asid=%#x vmid=%#x paddr=%#x\n",
397 request.addr, e->vaMask, context.asid, context.vmid, e->pa);
398
399 tr.fault = FAULT_NONE;
400 tr.addr = e->pa + (request.addr & ~e->vaMask);;
401 tr.addrMask = e->vaMask;
402 tr.writable = e->permissions;
403
404 return true;
405 }
406
407 void
408 SMMUTranslationProcess::microTLBUpdate(Yield &yield,
409 const TranslResult &tr)
410 {
411 assert(tr.fault == FAULT_NONE);
412
413 if (!ifc.microTLBEnable)
414 return;
415
416 SMMUTLB::Entry e;
417 e.valid = true;
418 e.prefetched = false;
419 e.sid = request.sid;
420 e.ssid = request.ssid;
421 e.vaMask = tr.addrMask;
422 e.va = request.addr & e.vaMask;
423 e.pa = tr.addr & e.vaMask;
424 e.permissions = tr.writable;
425 e.asid = context.asid;
426 e.vmid = context.vmid;
427
428 doSemaphoreDown(yield, ifc.microTLBSem);
429
430 DPRINTF(SMMUv3,
431 "micro TLB upd vaddr=%#x amask=%#x paddr=%#x sid=%#x ssid=%#x\n",
432 e.va, e.vaMask, e.pa, e.sid, e.ssid);
433
434 ifc.microTLB->store(e, SMMUTLB::ALLOC_ANY_WAY);
435
436 doSemaphoreUp(ifc.microTLBSem);
437 }
438
439 void
440 SMMUTranslationProcess::ifcTLBUpdate(Yield &yield,
441 const TranslResult &tr)
442 {
443 assert(tr.fault == FAULT_NONE);
444
445 if (!ifc.mainTLBEnable)
446 return;
447
448 SMMUTLB::Entry e;
449 e.valid = true;
450 e.prefetched = request.isPrefetch;
451 e.sid = request.sid;
452 e.ssid = request.ssid;
453 e.vaMask = tr.addrMask;
454 e.va = request.addr & e.vaMask;
455 e.pa = tr.addr & e.vaMask;
456 e.permissions = tr.writable;
457 e.asid = context.asid;
458 e.vmid = context.vmid;
459
460 SMMUTLB::AllocPolicy alloc = SMMUTLB::ALLOC_ANY_WAY;
461 if (ifc.prefetchEnable && ifc.prefetchReserveLastWay)
462 alloc = request.isPrefetch ?
463 SMMUTLB::ALLOC_LAST_WAY : SMMUTLB::ALLOC_ANY_BUT_LAST_WAY;
464
465 doSemaphoreDown(yield, ifc.mainTLBSem);
466
467 DPRINTF(SMMUv3,
468 "RESPONSE Interface upd vaddr=%#x amask=%#x paddr=%#x sid=%#x "
469 "ssid=%#x\n", e.va, e.vaMask, e.pa, e.sid, e.ssid);
470
471 ifc.mainTLB->store(e, alloc);
472
473 doSemaphoreUp(ifc.mainTLBSem);
474 }
475
476 void
477 SMMUTranslationProcess::smmuTLBUpdate(Yield &yield,
478 const TranslResult &tr)
479 {
480 assert(tr.fault == FAULT_NONE);
481
482 if (!smmu.tlbEnable)
483 return;
484
485 ARMArchTLB::Entry e;
486 e.valid = true;
487 e.vaMask = tr.addrMask;
488 e.va = request.addr & e.vaMask;
489 e.asid = context.asid;
490 e.vmid = context.vmid;
491 e.pa = tr.addr & e.vaMask;
492 e.permissions = tr.writable;
493
494 doSemaphoreDown(yield, smmu.tlbSem);
495
496 DPRINTF(SMMUv3,
497 "SMMU TLB upd vaddr=%#x amask=%#x paddr=%#x asid=%#x vmid=%#x\n",
498 e.va, e.vaMask, e.pa, e.asid, e.vmid);
499
500 smmu.tlb.store(e);
501
502 doSemaphoreUp(smmu.tlbSem);
503 }
504
505 bool
506 SMMUTranslationProcess::configCacheLookup(Yield &yield, TranslContext &tc)
507 {
508 if (!smmu.configCacheEnable)
509 return false;
510
511 doSemaphoreDown(yield, smmu.configSem);
512 doDelay(yield, smmu.configLat);
513 const ConfigCache::Entry *e =
514 smmu.configCache.lookup(request.sid, request.ssid);
515 doSemaphoreUp(smmu.configSem);
516
517 if (!e) {
518 DPRINTF(SMMUv3, "Config miss sid=%#x ssid=%#x\n",
519 request.sid, request.ssid);
520
521 return false;
522 }
523
524 DPRINTF(SMMUv3, "Config hit sid=%#x ssid=%#x ttb=%#08x asid=%#x\n",
525 request.sid, request.ssid, e->ttb0, e->asid);
526
527 tc.stage1Enable = e->stage1_en;
528 tc.stage2Enable = e->stage2_en;
529
530 tc.ttb0 = e->ttb0;
531 tc.ttb1 = e->ttb1;
532 tc.asid = e->asid;
533 tc.httb = e->httb;
534 tc.vmid = e->vmid;
535
536 tc.stage1TranslGranule = e->stage1_tg;
537 tc.stage2TranslGranule = e->stage2_tg;
538
539 tc.t0sz = e->t0sz;
540 tc.s2t0sz = e->s2t0sz;
541
542 return true;
543 }
544
545 void
546 SMMUTranslationProcess::configCacheUpdate(Yield &yield,
547 const TranslContext &tc)
548 {
549 if (!smmu.configCacheEnable)
550 return;
551
552 ConfigCache::Entry e;
553 e.valid = true;
554 e.sid = request.sid;
555 e.ssid = request.ssid;
556 e.stage1_en = tc.stage1Enable;
557 e.stage2_en = tc.stage2Enable;
558 e.ttb0 = tc.ttb0;
559 e.ttb1 = tc.ttb1;
560 e.asid = tc.asid;
561 e.httb = tc.httb;
562 e.vmid = tc.vmid;
563 e.stage1_tg = tc.stage1TranslGranule;
564 e.stage2_tg = tc.stage2TranslGranule;
565 e.t0sz = tc.t0sz;
566 e.s2t0sz = tc.s2t0sz;
567
568 doSemaphoreDown(yield, smmu.configSem);
569
570 DPRINTF(SMMUv3, "Config upd sid=%#x ssid=%#x\n", e.sid, e.ssid);
571
572 smmu.configCache.store(e);
573
574 doSemaphoreUp(smmu.configSem);
575 }
576
577 bool
578 SMMUTranslationProcess::findConfig(Yield &yield,
579 TranslContext &tc,
580 TranslResult &tr)
581 {
582 tc.stage1Enable = false;
583 tc.stage2Enable = false;
584
585 StreamTableEntry ste;
586 doReadSTE(yield, ste, request.sid);
587
588 switch (ste.dw0.config) {
589 case STE_CONFIG_BYPASS:
590 break;
591
592 case STE_CONFIG_STAGE1_ONLY:
593 tc.stage1Enable = true;
594 break;
595
596 case STE_CONFIG_STAGE2_ONLY:
597 tc.stage2Enable = true;
598 break;
599
600 case STE_CONFIG_STAGE1_AND_2:
601 tc.stage1Enable = true;
602 tc.stage2Enable = true;
603 break;
604
605 default:
606 panic("Bad or unimplemented STE config %d\n",
607 ste.dw0.config);
608 }
609
610
611 // Establish stage 2 context first since
612 // Context Descriptors can be in IPA space.
613 if (tc.stage2Enable) {
614 tc.httb = ste.dw3.s2ttb << STE_S2TTB_SHIFT;
615 tc.vmid = ste.dw2.s2vmid;
616 tc.stage2TranslGranule = ste.dw2.s2tg;
617 tc.s2t0sz = ste.dw2.s2t0sz;
618 } else {
619 tc.httb = 0xdeadbeef;
620 tc.vmid = 0;
621 tc.stage2TranslGranule = TRANS_GRANULE_INVALID;
622 tc.s2t0sz = 0;
623 }
624
625
626 // Now fetch stage 1 config.
627 if (context.stage1Enable) {
628 ContextDescriptor cd;
629 doReadCD(yield, cd, ste, request.sid, request.ssid);
630
631 tc.ttb0 = cd.dw1.ttb0 << CD_TTB_SHIFT;
632 tc.ttb1 = cd.dw2.ttb1 << CD_TTB_SHIFT;
633 tc.asid = cd.dw0.asid;
634 tc.stage1TranslGranule = cd.dw0.tg0;
635 tc.t0sz = cd.dw0.t0sz;
636 } else {
637 tc.ttb0 = 0xcafebabe;
638 tc.ttb1 = 0xcafed00d;
639 tc.asid = 0;
640 tc.stage1TranslGranule = TRANS_GRANULE_INVALID;
641 tc.t0sz = 0;
642 }
643
644 return true;
645 }
646
647 void
648 SMMUTranslationProcess::walkCacheLookup(
649 Yield &yield,
650 const WalkCache::Entry *&walkEntry,
651 Addr addr, uint16_t asid, uint16_t vmid,
652 unsigned stage, unsigned level)
653 {
654 const char *indent = stage==2 ? " " : "";
655 (void) indent; // this is only used in DPRINTFs
656
657 const PageTableOps *pt_ops =
658 stage == 1 ?
659 smmu.getPageTableOps(context.stage1TranslGranule) :
660 smmu.getPageTableOps(context.stage2TranslGranule);
661
662 unsigned walkCacheLevels =
663 smmu.walkCacheEnable ?
664 (stage == 1 ? smmu.walkCacheS1Levels : smmu.walkCacheS2Levels) :
665 0;
666
667 if ((1 << level) & walkCacheLevels) {
668 doSemaphoreDown(yield, smmu.walkSem);
669 doDelay(yield, smmu.walkLat);
670
671 walkEntry = smmu.walkCache.lookup(addr, pt_ops->walkMask(level),
672 asid, vmid, stage, level);
673
674 if (walkEntry) {
675 DPRINTF(SMMUv3, "%sWalkCache hit va=%#x asid=%#x vmid=%#x "
676 "base=%#x (S%d, L%d)\n",
677 indent, addr, asid, vmid, walkEntry->pa, stage, level);
678 } else {
679 DPRINTF(SMMUv3, "%sWalkCache miss va=%#x asid=%#x vmid=%#x "
680 "(S%d, L%d)\n",
681 indent, addr, asid, vmid, stage, level);
682 }
683
684 doSemaphoreUp(smmu.walkSem);
685 }
686 }
687
688 void
689 SMMUTranslationProcess::walkCacheUpdate(Yield &yield, Addr va,
690 Addr vaMask, Addr pa,
691 unsigned stage, unsigned level,
692 bool leaf, uint8_t permissions)
693 {
694 unsigned walkCacheLevels =
695 stage == 1 ? smmu.walkCacheS1Levels : smmu.walkCacheS2Levels;
696
697 if (smmu.walkCacheEnable && ((1<<level) & walkCacheLevels)) {
698 WalkCache::Entry e;
699 e.valid = true;
700 e.va = va;
701 e.vaMask = vaMask;
702 e.asid = stage==1 ? context.asid : 0;
703 e.vmid = context.vmid;
704 e.stage = stage;
705 e.level = level;
706 e.leaf = leaf;
707 e.pa = pa;
708 e.permissions = permissions;
709
710 doSemaphoreDown(yield, smmu.walkSem);
711
712 DPRINTF(SMMUv3, "%sWalkCache upd va=%#x mask=%#x asid=%#x vmid=%#x "
713 "tpa=%#x leaf=%s (S%d, L%d)\n",
714 e.stage==2 ? " " : "",
715 e.va, e.vaMask, e.asid, e.vmid,
716 e.pa, e.leaf, e.stage, e.level);
717
718 smmu.walkCache.store(e);
719
720 doSemaphoreUp(smmu.walkSem);
721 }
722 }
723
724 /*
725 * Please note:
726 * This does not deal with the case where stage 1 page size
727 * is larger than stage 2 page size.
728 */
729 SMMUTranslationProcess::TranslResult
730 SMMUTranslationProcess::walkStage1And2(Yield &yield, Addr addr,
731 const PageTableOps *pt_ops,
732 unsigned level, Addr walkPtr)
733 {
734 PageTableOps::pte_t pte = 0;
735
736 doSemaphoreDown(yield, smmu.cycleSem);
737 doDelay(yield, Cycles(1));
738 doSemaphoreUp(smmu.cycleSem);
739
740 for (; level <= pt_ops->lastLevel(); level++) {
741 Addr pte_addr = walkPtr + pt_ops->index(addr, level);
742
743 DPRINTF(SMMUv3, "Fetching S1 L%d PTE from pa=%#08x\n",
744 level, pte_addr);
745
746 doReadPTE(yield, addr, pte_addr, &pte, 1, level);
747
748 DPRINTF(SMMUv3, "Got S1 L%d PTE=%#x from pa=%#08x\n",
749 level, pte, pte_addr);
750
751 doSemaphoreDown(yield, smmu.cycleSem);
752 doDelay(yield, Cycles(1));
753 doSemaphoreUp(smmu.cycleSem);
754
755 bool valid = pt_ops->isValid(pte, level);
756 bool leaf = pt_ops->isLeaf(pte, level);
757
758 if (!valid) {
759 DPRINTF(SMMUv3, "S1 PTE not valid - fault\n");
760
761 TranslResult tr;
762 tr.fault = FAULT_TRANSLATION;
763 return tr;
764 }
765
766 if (valid && leaf && request.isWrite &&
767 !pt_ops->isWritable(pte, level, false))
768 {
769 DPRINTF(SMMUv3, "S1 page not writable - fault\n");
770
771 TranslResult tr;
772 tr.fault = FAULT_PERMISSION;
773 return tr;
774 }
775
776 walkPtr = pt_ops->nextLevelPointer(pte, level);
777
778 if (leaf)
779 break;
780
781 if (context.stage2Enable) {
782 TranslResult s2tr = translateStage2(yield, walkPtr, false);
783 if (s2tr.fault != FAULT_NONE)
784 return s2tr;
785
786 walkPtr = s2tr.addr;
787 }
788
789 walkCacheUpdate(yield, addr, pt_ops->walkMask(level), walkPtr,
790 1, level, leaf, 0);
791 }
792
793 TranslResult tr;
794 tr.fault = FAULT_NONE;
795 tr.addrMask = pt_ops->pageMask(pte, level);
796 tr.addr = walkPtr + (addr & ~tr.addrMask);
797 tr.writable = pt_ops->isWritable(pte, level, false);
798
799 if (context.stage2Enable) {
800 TranslResult s2tr = translateStage2(yield, tr.addr, true);
801 if (s2tr.fault != FAULT_NONE)
802 return s2tr;
803
804 tr = combineTranslations(tr, s2tr);
805 }
806
807 walkCacheUpdate(yield, addr, tr.addrMask, walkPtr,
808 1, level, true, tr.writable);
809
810 return tr;
811 }
812
813 SMMUTranslationProcess::TranslResult
814 SMMUTranslationProcess::walkStage2(Yield &yield, Addr addr, bool final_tr,
815 const PageTableOps *pt_ops,
816 unsigned level, Addr walkPtr)
817 {
818 PageTableOps::pte_t pte;
819
820 doSemaphoreDown(yield, smmu.cycleSem);
821 doDelay(yield, Cycles(1));
822 doSemaphoreUp(smmu.cycleSem);
823
824 for (; level <= pt_ops->lastLevel(); level++) {
825 Addr pte_addr = walkPtr + pt_ops->index(addr, level);
826
827 DPRINTF(SMMUv3, " Fetching S2 L%d PTE from pa=%#08x\n",
828 level, pte_addr);
829
830 doReadPTE(yield, addr, pte_addr, &pte, 2, level);
831
832 DPRINTF(SMMUv3, " Got S2 L%d PTE=%#x from pa=%#08x\n",
833 level, pte, pte_addr);
834
835 doSemaphoreDown(yield, smmu.cycleSem);
836 doDelay(yield, Cycles(1));
837 doSemaphoreUp(smmu.cycleSem);
838
839 bool valid = pt_ops->isValid(pte, level);
840 bool leaf = pt_ops->isLeaf(pte, level);
841
842 if (!valid) {
843 DPRINTF(SMMUv3, " S2 PTE not valid - fault\n");
844
845 TranslResult tr;
846 tr.fault = FAULT_TRANSLATION;
847 return tr;
848 }
849
850 if (valid && leaf && request.isWrite &&
851 !pt_ops->isWritable(pte, level, true))
852 {
853 DPRINTF(SMMUv3, " S2 PTE not writable = fault\n");
854
855 TranslResult tr;
856 tr.fault = FAULT_PERMISSION;
857 return tr;
858 }
859
860 walkPtr = pt_ops->nextLevelPointer(pte, level);
861
862 if (final_tr || smmu.walkCacheNonfinalEnable)
863 walkCacheUpdate(yield, addr, pt_ops->walkMask(level), walkPtr,
864 2, level, leaf,
865 leaf ? pt_ops->isWritable(pte, level, true) : 0);
866 if (leaf)
867 break;
868 }
869
870 TranslResult tr;
871 tr.fault = FAULT_NONE;
872 tr.addrMask = pt_ops->pageMask(pte, level);
873 tr.addr = walkPtr + (addr & ~tr.addrMask);
874 tr.writable = pt_ops->isWritable(pte, level, true);
875
876 return tr;
877 }
878
879 SMMUTranslationProcess::TranslResult
880 SMMUTranslationProcess::translateStage1And2(Yield &yield, Addr addr)
881 {
882 const PageTableOps *pt_ops =
883 smmu.getPageTableOps(context.stage1TranslGranule);
884
885 const WalkCache::Entry *walk_ep = NULL;
886 unsigned level;
887
888 // Level here is actually (level+1) so we can count down
889 // to 0 using unsigned int.
890 for (level = pt_ops->lastLevel() + 1;
891 level > pt_ops->firstLevel(context.t0sz);
892 level--)
893 {
894 walkCacheLookup(yield, walk_ep, addr,
895 context.asid, context.vmid, 1, level-1);
896
897 if (walk_ep)
898 break;
899 }
900
901 // Correct level (see above).
902 level -= 1;
903
904 TranslResult tr;
905 if (walk_ep) {
906 if (walk_ep->leaf) {
907 tr.fault = FAULT_NONE;
908 tr.addr = walk_ep->pa + (addr & ~walk_ep->vaMask);
909 tr.addrMask = walk_ep->vaMask;
910 tr.writable = walk_ep->permissions;
911 } else {
912 tr = walkStage1And2(yield, addr, pt_ops, level+1, walk_ep->pa);
913 }
914 } else {
915 Addr table_addr = context.ttb0;
916 if (context.stage2Enable) {
917 TranslResult s2tr = translateStage2(yield, table_addr, false);
918 if (s2tr.fault != FAULT_NONE)
919 return s2tr;
920
921 table_addr = s2tr.addr;
922 }
923
924 tr = walkStage1And2(yield, addr, pt_ops,
925 pt_ops->firstLevel(context.t0sz),
926 table_addr);
927 }
928
929 if (tr.fault == FAULT_NONE)
930 DPRINTF(SMMUv3, "Translated vaddr %#x to paddr %#x\n", addr, tr.addr);
931
932 return tr;
933 }
934
935 SMMUTranslationProcess::TranslResult
936 SMMUTranslationProcess::translateStage2(Yield &yield, Addr addr, bool final_tr)
937 {
938 const PageTableOps *pt_ops =
939 smmu.getPageTableOps(context.stage2TranslGranule);
940
941 const IPACache::Entry *ipa_ep = NULL;
942 if (smmu.ipaCacheEnable) {
943 doSemaphoreDown(yield, smmu.ipaSem);
944 doDelay(yield, smmu.ipaLat);
945 ipa_ep = smmu.ipaCache.lookup(addr, context.vmid);
946 doSemaphoreUp(smmu.ipaSem);
947 }
948
949 if (ipa_ep) {
950 TranslResult tr;
951 tr.fault = FAULT_NONE;
952 tr.addr = ipa_ep->pa + (addr & ~ipa_ep->ipaMask);
953 tr.addrMask = ipa_ep->ipaMask;
954 tr.writable = ipa_ep->permissions;
955
956 DPRINTF(SMMUv3, " IPACache hit ipa=%#x vmid=%#x pa=%#x\n",
957 addr, context.vmid, tr.addr);
958
959 return tr;
960 } else if (smmu.ipaCacheEnable) {
961 DPRINTF(SMMUv3, " IPACache miss ipa=%#x vmid=%#x\n",
962 addr, context.vmid);
963 }
964
965 const WalkCache::Entry *walk_ep = NULL;
966 unsigned level = pt_ops->firstLevel(context.s2t0sz);
967
968 if (final_tr || smmu.walkCacheNonfinalEnable) {
969 // Level here is actually (level+1) so we can count down
970 // to 0 using unsigned int.
971 for (level = pt_ops->lastLevel() + 1;
972 level > pt_ops->firstLevel(context.s2t0sz);
973 level--)
974 {
975 walkCacheLookup(yield, walk_ep, addr,
976 0, context.vmid, 2, level-1);
977
978 if (walk_ep)
979 break;
980 }
981
982 // Correct level (see above).
983 level -= 1;
984 }
985
986 TranslResult tr;
987 if (walk_ep) {
988 if (walk_ep->leaf) {
989 tr.fault = FAULT_NONE;
990 tr.addr = walk_ep->pa + (addr & ~walk_ep->vaMask);
991 tr.addrMask = walk_ep->vaMask;
992 tr.writable = walk_ep->permissions;
993 } else {
994 tr = walkStage2(yield, addr, final_tr, pt_ops,
995 level + 1, walk_ep->pa);
996 }
997 } else {
998 tr = walkStage2(yield, addr, final_tr, pt_ops,
999 pt_ops->firstLevel(context.s2t0sz),
1000 context.httb);
1001 }
1002
1003 if (tr.fault == FAULT_NONE)
1004 DPRINTF(SMMUv3, " Translated %saddr %#x to paddr %#x\n",
1005 context.stage1Enable ? "ip" : "v", addr, tr.addr);
1006
1007 if (smmu.ipaCacheEnable) {
1008 IPACache::Entry e;
1009 e.valid = true;
1010 e.ipaMask = tr.addrMask;
1011 e.ipa = addr & e.ipaMask;
1012 e.pa = tr.addr & tr.addrMask;
1013 e.permissions = tr.writable;
1014 e.vmid = context.vmid;
1015
1016 doSemaphoreDown(yield, smmu.ipaSem);
1017 smmu.ipaCache.store(e);
1018 doSemaphoreUp(smmu.ipaSem);
1019 }
1020
1021 return tr;
1022 }
1023
1024 SMMUTranslationProcess::TranslResult
1025 SMMUTranslationProcess::combineTranslations(const TranslResult &s1tr,
1026 const TranslResult &s2tr) const
1027 {
1028 if (s2tr.fault != FAULT_NONE)
1029 return s2tr;
1030
1031 assert(s1tr.fault == FAULT_NONE);
1032
1033 TranslResult tr;
1034 tr.fault = FAULT_NONE;
1035 tr.addr = s2tr.addr;
1036 tr.addrMask = s1tr.addrMask | s2tr.addrMask;
1037 tr.writable = s1tr.writable & s2tr.writable;
1038
1039 return tr;
1040 }
1041
1042 bool
1043 SMMUTranslationProcess::hazard4kCheck()
1044 {
1045 Addr addr4k = request.addr & ~0xfffULL;
1046
1047 for (auto it = ifc.duplicateReqs.begin();
1048 it != ifc.duplicateReqs.end();
1049 ++it)
1050 {
1051 Addr other4k = (*it)->request.addr & ~0xfffULL;
1052 if (addr4k == other4k)
1053 return true;
1054 }
1055
1056 return false;
1057 }
1058
1059 void
1060 SMMUTranslationProcess::hazard4kRegister()
1061 {
1062 DPRINTF(SMMUv3Hazard, "4kReg: p=%p a4k=%#x\n",
1063 this, request.addr & ~0xfffULL);
1064
1065 ifc.duplicateReqs.push_back(this);
1066 }
1067
1068 void
1069 SMMUTranslationProcess::hazard4kHold(Yield &yield)
1070 {
1071 Addr addr4k = request.addr & ~0xfffULL;
1072
1073 bool found_hazard;
1074
1075 do {
1076 found_hazard = false;
1077
1078 for (auto it = ifc.duplicateReqs.begin();
1079 it!=ifc.duplicateReqs.end() && *it!=this;
1080 ++it)
1081 {
1082 Addr other4k = (*it)->request.addr & ~0xfffULL;
1083
1084 DPRINTF(SMMUv3Hazard, "4kHold: p=%p a4k=%#x Q: p=%p a4k=%#x\n",
1085 this, addr4k, *it, other4k);
1086
1087 if (addr4k == other4k) {
1088 DPRINTF(SMMUv3Hazard,
1089 "4kHold: p=%p a4k=%#x WAIT on p=%p a4k=%#x\n",
1090 this, addr4k, *it, other4k);
1091
1092 doWaitForSignal(yield, ifc.duplicateReqRemoved);
1093
1094 DPRINTF(SMMUv3Hazard, "4kHold: p=%p a4k=%#x RESUME\n",
1095 this, addr4k);
1096
1097 // This is to avoid checking *it!=this after doWaitForSignal()
1098 // since it could have been deleted.
1099 found_hazard = true;
1100 break;
1101 }
1102 }
1103 } while (found_hazard);
1104 }
1105
1106 void
1107 SMMUTranslationProcess::hazard4kRelease()
1108 {
1109 DPRINTF(SMMUv3Hazard, "4kRel: p=%p a4k=%#x\n",
1110 this, request.addr & ~0xfffULL);
1111
1112 std::list<SMMUTranslationProcess *>::iterator it;
1113
1114 for (it = ifc.duplicateReqs.begin(); it != ifc.duplicateReqs.end(); ++it)
1115 if (*it == this)
1116 break;
1117
1118 if (it == ifc.duplicateReqs.end())
1119 panic("hazard4kRelease: request not found");
1120
1121 ifc.duplicateReqs.erase(it);
1122
1123 doBroadcastSignal(ifc.duplicateReqRemoved);
1124 }
1125
1126 void
1127 SMMUTranslationProcess::hazardIdRegister()
1128 {
1129 auto orderId = AMBA::orderId(request.pkt);
1130
1131 DPRINTF(SMMUv3Hazard, "IdReg: p=%p oid=%d\n", this, orderId);
1132
1133 assert(orderId < SMMU_MAX_TRANS_ID);
1134
1135 std::list<SMMUTranslationProcess *> &depReqs =
1136 request.isWrite ?
1137 ifc.dependentWrites[orderId] : ifc.dependentReads[orderId];
1138 depReqs.push_back(this);
1139 }
1140
1141 void
1142 SMMUTranslationProcess::hazardIdHold(Yield &yield)
1143 {
1144 auto orderId = AMBA::orderId(request.pkt);
1145
1146 DPRINTF(SMMUv3Hazard, "IdHold: p=%p oid=%d\n", this, orderId);
1147
1148 std::list<SMMUTranslationProcess *> &depReqs =
1149 request.isWrite ?
1150 ifc.dependentWrites[orderId] : ifc.dependentReads[orderId];
1151 std::list<SMMUTranslationProcess *>::iterator it;
1152
1153 bool found_hazard;
1154
1155 do {
1156 found_hazard = false;
1157
1158 for (auto it = depReqs.begin(); it!=depReqs.end() && *it!=this; ++it) {
1159 DPRINTF(SMMUv3Hazard, "IdHold: p=%p oid=%d Q: %p\n",
1160 this, orderId, *it);
1161
1162 if (AMBA::orderId((*it)->request.pkt) == orderId) {
1163 DPRINTF(SMMUv3Hazard, "IdHold: p=%p oid=%d WAIT on=%p\n",
1164 this, orderId, *it);
1165
1166 doWaitForSignal(yield, ifc.dependentReqRemoved);
1167
1168 DPRINTF(SMMUv3Hazard, "IdHold: p=%p oid=%d RESUME\n",
1169 this, orderId);
1170
1171 // This is to avoid checking *it!=this after doWaitForSignal()
1172 // since it could have been deleted.
1173 found_hazard = true;
1174 break;
1175 }
1176 }
1177 } while (found_hazard);
1178 }
1179
1180 void
1181 SMMUTranslationProcess::hazardIdRelease()
1182 {
1183 auto orderId = AMBA::orderId(request.pkt);
1184
1185 DPRINTF(SMMUv3Hazard, "IdRel: p=%p oid=%d\n", this, orderId);
1186
1187 std::list<SMMUTranslationProcess *> &depReqs =
1188 request.isWrite ?
1189 ifc.dependentWrites[orderId] : ifc.dependentReads[orderId];
1190 std::list<SMMUTranslationProcess *>::iterator it;
1191
1192 for (it = depReqs.begin(); it != depReqs.end(); ++it) {
1193 if (*it == this)
1194 break;
1195 }
1196
1197 if (it == depReqs.end())
1198 panic("hazardIdRelease: request not found");
1199
1200 depReqs.erase(it);
1201
1202 doBroadcastSignal(ifc.dependentReqRemoved);
1203 }
1204
1205 void
1206 SMMUTranslationProcess::issuePrefetch(Addr addr)
1207 {
1208 if (!smmu.system.isTimingMode())
1209 return;
1210
1211 if (!ifc.prefetchEnable || ifc.xlateSlotsRemaining == 0)
1212 return;
1213
1214 std::string proc_name = csprintf("%sprf", name());
1215 SMMUTranslationProcess *proc =
1216 new SMMUTranslationProcess(proc_name, smmu, ifc);
1217
1218 proc->beginTransaction(
1219 SMMUTranslRequest::prefetch(addr, request.sid, request.ssid));
1220 proc->scheduleWakeup(smmu.clockEdge(Cycles(1)));
1221 }
1222
1223 void
1224 SMMUTranslationProcess::completeTransaction(Yield &yield,
1225 const TranslResult &tr)
1226 {
1227 assert(tr.fault == FAULT_NONE);
1228
1229 unsigned numRequestorBeats = request.isWrite ?
1230 (request.size + (smmu.requestPortWidth-1))
1231 / smmu.requestPortWidth :
1232 1;
1233
1234 doSemaphoreDown(yield, smmu.requestPortSem);
1235 doDelay(yield, Cycles(numRequestorBeats));
1236 doSemaphoreUp(smmu.requestPortSem);
1237
1238
1239 smmu.translationTimeDist.sample(curTick() - recvTick);
1240 ifc.xlateSlotsRemaining++;
1241 if (!request.isAtsRequest && request.isWrite)
1242 ifc.wrBufSlotsRemaining +=
1243 (request.size + (ifc.portWidth-1)) / ifc.portWidth;
1244
1245 smmu.scheduleDeviceRetries();
1246
1247
1248 SMMUAction a;
1249
1250 if (request.isAtsRequest) {
1251 a.type = ACTION_SEND_RESP_ATS;
1252
1253 if (smmu.system.isAtomicMode()) {
1254 request.pkt->makeAtomicResponse();
1255 } else if (smmu.system.isTimingMode()) {
1256 request.pkt->makeTimingResponse();
1257 } else {
1258 panic("Not in atomic or timing mode");
1259 }
1260 } else {
1261 a.type = ACTION_SEND_REQ_FINAL;
1262 a.ifc = &ifc;
1263 }
1264
1265 a.pkt = request.pkt;
1266 a.delay = 0;
1267
1268 a.pkt->setAddr(tr.addr);
1269 a.pkt->req->setPaddr(tr.addr);
1270
1271 yield(a);
1272
1273 if (!request.isAtsRequest) {
1274 PacketPtr pkt = yield.get();
1275 pkt->setAddr(request.addr);
1276
1277 a.type = ACTION_SEND_RESP;
1278 a.pkt = pkt;
1279 a.ifc = &ifc;
1280 a.delay = 0;
1281 yield(a);
1282 }
1283 }
1284
1285 void
1286 SMMUTranslationProcess::completePrefetch(Yield &yield)
1287 {
1288 ifc.xlateSlotsRemaining++;
1289
1290 SMMUAction a;
1291 a.type = ACTION_TERMINATE;
1292 a.pkt = NULL;
1293 a.ifc = &ifc;
1294 a.delay = 0;
1295 yield(a);
1296 }
1297
1298 void
1299 SMMUTranslationProcess::sendEvent(Yield &yield, const SMMUEvent &ev)
1300 {
1301 int sizeMask = mask(smmu.regs.eventq_base & Q_BASE_SIZE_MASK);
1302
1303 if (((smmu.regs.eventq_prod+1) & sizeMask) ==
1304 (smmu.regs.eventq_cons & sizeMask))
1305 panic("Event queue full - aborting\n");
1306
1307 Addr event_addr =
1308 (smmu.regs.eventq_base & Q_BASE_ADDR_MASK) +
1309 (smmu.regs.eventq_prod & sizeMask) * sizeof(ev);
1310
1311 DPRINTF(SMMUv3, "Sending event to addr=%#08x (pos=%d): type=%#x stag=%#x "
1312 "flags=%#x sid=%#x ssid=%#x va=%#08x ipa=%#x\n",
1313 event_addr, smmu.regs.eventq_prod, ev.type, ev.stag,
1314 ev.flags, ev.streamId, ev.substreamId, ev.va, ev.ipa);
1315
1316 // This deliberately resets the overflow field in eventq_prod!
1317 smmu.regs.eventq_prod = (smmu.regs.eventq_prod + 1) & sizeMask;
1318
1319 doWrite(yield, event_addr, &ev, sizeof(ev));
1320
1321 if (!(smmu.regs.eventq_irq_cfg0 & E_BASE_ENABLE_MASK))
1322 panic("eventq msi not enabled\n");
1323
1324 doWrite(yield, smmu.regs.eventq_irq_cfg0 & E_BASE_ADDR_MASK,
1325 &smmu.regs.eventq_irq_cfg1, sizeof(smmu.regs.eventq_irq_cfg1));
1326 }
1327
1328 void
1329 SMMUTranslationProcess::doReadSTE(Yield &yield,
1330 StreamTableEntry &ste,
1331 uint32_t sid)
1332 {
1333 unsigned max_sid = 1 << (smmu.regs.strtab_base_cfg & ST_CFG_SIZE_MASK);
1334 if (sid >= max_sid)
1335 panic("SID %#x out of range, max=%#x", sid, max_sid);
1336
1337 Addr ste_addr;
1338
1339 if ((smmu.regs.strtab_base_cfg & ST_CFG_FMT_MASK) == ST_CFG_FMT_2LEVEL) {
1340 unsigned split =
1341 (smmu.regs.strtab_base_cfg & ST_CFG_SPLIT_MASK) >> ST_CFG_SPLIT_SHIFT;
1342
1343 if (split!= 7 && split!=8 && split!=16)
1344 panic("Invalid stream table split %d", split);
1345
1346 uint64_t l2_ptr;
1347 uint64_t l2_addr =
1348 (smmu.regs.strtab_base & VMT_BASE_ADDR_MASK) +
1349 bits(sid, 32, split) * sizeof(l2_ptr);
1350
1351 DPRINTF(SMMUv3, "Read L1STE at %#x\n", l2_addr);
1352
1353 doReadConfig(yield, l2_addr, &l2_ptr, sizeof(l2_ptr), sid, 0);
1354
1355 DPRINTF(SMMUv3, "Got L1STE L1 at %#x: 0x%016x\n", l2_addr, l2_ptr);
1356
1357 unsigned span = l2_ptr & ST_L2_SPAN_MASK;
1358 if (span == 0)
1359 panic("Invalid level 1 stream table descriptor");
1360
1361 unsigned index = bits(sid, split-1, 0);
1362 if (index >= (1 << span))
1363 panic("StreamID %d out of level 1 descriptor range %d",
1364 sid, 1<<span);
1365
1366 ste_addr = (l2_ptr & ST_L2_ADDR_MASK) + index * sizeof(ste);
1367
1368 smmu.steL1Fetches++;
1369 } else if ((smmu.regs.strtab_base_cfg & ST_CFG_FMT_MASK) == ST_CFG_FMT_LINEAR) {
1370 ste_addr =
1371 (smmu.regs.strtab_base & VMT_BASE_ADDR_MASK) + sid * sizeof(ste);
1372 } else {
1373 panic("Invalid stream table format");
1374 }
1375
1376 DPRINTF(SMMUv3, "Read STE at %#x\n", ste_addr);
1377
1378 doReadConfig(yield, ste_addr, &ste, sizeof(ste), sid, 0);
1379
1380 DPRINTF(SMMUv3, "Got STE at %#x [0]: 0x%016x\n", ste_addr, ste.dw0);
1381 DPRINTF(SMMUv3, " STE at %#x [1]: 0x%016x\n", ste_addr, ste.dw1);
1382 DPRINTF(SMMUv3, " STE at %#x [2]: 0x%016x\n", ste_addr, ste.dw2);
1383 DPRINTF(SMMUv3, " STE at %#x [3]: 0x%016x\n", ste_addr, ste.dw3);
1384 DPRINTF(SMMUv3, " STE at %#x [4]: 0x%016x\n", ste_addr, ste._pad[0]);
1385 DPRINTF(SMMUv3, " STE at %#x [5]: 0x%016x\n", ste_addr, ste._pad[1]);
1386 DPRINTF(SMMUv3, " STE at %#x [6]: 0x%016x\n", ste_addr, ste._pad[2]);
1387 DPRINTF(SMMUv3, " STE at %#x [7]: 0x%016x\n", ste_addr, ste._pad[3]);
1388
1389 if (!ste.dw0.valid)
1390 panic("STE @ %#x not valid\n", ste_addr);
1391
1392 smmu.steFetches++;
1393 }
1394
1395 void
1396 SMMUTranslationProcess::doReadCD(Yield &yield,
1397 ContextDescriptor &cd,
1398 const StreamTableEntry &ste,
1399 uint32_t sid, uint32_t ssid)
1400 {
1401 Addr cd_addr = 0;
1402
1403 if (ste.dw0.s1cdmax == 0) {
1404 cd_addr = ste.dw0.s1ctxptr << ST_CD_ADDR_SHIFT;
1405 } else {
1406 unsigned max_ssid = 1 << ste.dw0.s1cdmax;
1407 if (ssid >= max_ssid)
1408 panic("SSID %#x out of range, max=%#x", ssid, max_ssid);
1409
1410 if (ste.dw0.s1fmt==STAGE1_CFG_2L_4K ||
1411 ste.dw0.s1fmt==STAGE1_CFG_2L_64K)
1412 {
1413 unsigned split = ste.dw0.s1fmt==STAGE1_CFG_2L_4K ? 7 : 11;
1414
1415 uint64_t l2_ptr;
1416 uint64_t l2_addr = (ste.dw0.s1ctxptr << ST_CD_ADDR_SHIFT) +
1417 bits(ssid, 24, split) * sizeof(l2_ptr);
1418
1419 if (context.stage2Enable)
1420 l2_addr = translateStage2(yield, l2_addr, false).addr;
1421
1422 DPRINTF(SMMUv3, "Read L1CD at %#x\n", l2_addr);
1423
1424 doReadConfig(yield, l2_addr, &l2_ptr, sizeof(l2_ptr), sid, ssid);
1425
1426 DPRINTF(SMMUv3, "Got L1CD at %#x: 0x%016x\n", l2_addr, l2_ptr);
1427
1428 cd_addr = l2_ptr + bits(ssid, split-1, 0) * sizeof(cd);
1429
1430 smmu.cdL1Fetches++;
1431 } else if (ste.dw0.s1fmt == STAGE1_CFG_1L) {
1432 cd_addr = (ste.dw0.s1ctxptr << ST_CD_ADDR_SHIFT) + ssid*sizeof(cd);
1433 }
1434 }
1435
1436 if (context.stage2Enable)
1437 cd_addr = translateStage2(yield, cd_addr, false).addr;
1438
1439 DPRINTF(SMMUv3, "Read CD at %#x\n", cd_addr);
1440
1441 doReadConfig(yield, cd_addr, &cd, sizeof(cd), sid, ssid);
1442
1443 DPRINTF(SMMUv3, "Got CD at %#x [0]: 0x%016x\n", cd_addr, cd.dw0);
1444 DPRINTF(SMMUv3, " CD at %#x [1]: 0x%016x\n", cd_addr, cd.dw1);
1445 DPRINTF(SMMUv3, " CD at %#x [2]: 0x%016x\n", cd_addr, cd.dw2);
1446 DPRINTF(SMMUv3, " CD at %#x [3]: 0x%016x\n", cd_addr, cd.mair);
1447 DPRINTF(SMMUv3, " CD at %#x [4]: 0x%016x\n", cd_addr, cd.amair);
1448 DPRINTF(SMMUv3, " CD at %#x [5]: 0x%016x\n", cd_addr, cd._pad[0]);
1449 DPRINTF(SMMUv3, " CD at %#x [6]: 0x%016x\n", cd_addr, cd._pad[1]);
1450 DPRINTF(SMMUv3, " CD at %#x [7]: 0x%016x\n", cd_addr, cd._pad[2]);
1451
1452
1453 if (!cd.dw0.valid)
1454 panic("CD @ %#x not valid\n", cd_addr);
1455
1456 smmu.cdFetches++;
1457 }
1458
1459 void
1460 SMMUTranslationProcess::doReadConfig(Yield &yield, Addr addr,
1461 void *ptr, size_t size,
1462 uint32_t sid, uint32_t ssid)
1463 {
1464 doRead(yield, addr, ptr, size);
1465 }
1466
1467 void
1468 SMMUTranslationProcess::doReadPTE(Yield &yield, Addr va, Addr addr,
1469 void *ptr, unsigned stage,
1470 unsigned level)
1471 {
1472 size_t pte_size = sizeof(PageTableOps::pte_t);
1473
1474 Addr mask = pte_size - 1;
1475 Addr base = addr & ~mask;
1476
1477 doRead(yield, base, ptr, pte_size);
1478 }