c1d998ea0f2f2b8da4fb5a37dc2ca07c2f2fd295
[gem5.git] / src / dev / arm / smmu_v3_transl.cc
1 /*
2 * Copyright (c) 2013, 2018-2019 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Stan Czerniawski
38 */
39
40 #include "dev/arm/smmu_v3_transl.hh"
41
42 #include "debug/SMMUv3.hh"
43 #include "debug/SMMUv3Hazard.hh"
44 #include "dev/arm/amba.hh"
45 #include "dev/arm/smmu_v3.hh"
46 #include "sim/system.hh"
47
48 SMMUTranslRequest
49 SMMUTranslRequest::fromPacket(PacketPtr pkt, bool ats)
50 {
51 SMMUTranslRequest req;
52 req.addr = pkt->getAddr();
53 req.size = pkt->getSize();
54 req.sid = pkt->req->streamId();
55 req.ssid = pkt->req->hasSubstreamId() ?
56 pkt->req->substreamId() : 0;
57 req.isWrite = pkt->isWrite();
58 req.isPrefetch = false;
59 req.isAtsRequest = ats;
60 req.pkt = pkt;
61
62 return req;
63 }
64
65 SMMUTranslRequest
66 SMMUTranslRequest::prefetch(Addr addr, uint32_t sid, uint32_t ssid)
67 {
68 SMMUTranslRequest req;
69 req.addr = addr;
70 req.size = 0;
71 req.sid = sid;
72 req.ssid = ssid;
73 req.isWrite = false;
74 req.isPrefetch = true;
75 req.isAtsRequest = false;
76 req.pkt = NULL;
77
78 return req;
79 }
80
81 SMMUTranslationProcess::SMMUTranslationProcess(const std::string &name,
82 SMMUv3 &_smmu, SMMUv3SlaveInterface &_ifc)
83 :
84 SMMUProcess(name, _smmu),
85 ifc(_ifc)
86 {
87 // Decrease number of pending translation slots on the slave interface
88 assert(ifc.xlateSlotsRemaining > 0);
89 ifc.xlateSlotsRemaining--;
90 reinit();
91 }
92
93 SMMUTranslationProcess::~SMMUTranslationProcess()
94 {
95 // Increase number of pending translation slots on the slave interface
96 ifc.xlateSlotsRemaining++;
97 // If no more SMMU translations are pending (all slots available),
98 // signal SMMU Slave Interface as drained
99 if (ifc.xlateSlotsRemaining == ifc.params()->xlate_slots) {
100 ifc.signalDrainDone();
101 }
102 }
103
104 void
105 SMMUTranslationProcess::beginTransaction(const SMMUTranslRequest &req)
106 {
107 request = req;
108
109 reinit();
110 }
111
112 void
113 SMMUTranslationProcess::resumeTransaction()
114 {
115 assert(smmu.system.isTimingMode());
116
117 assert(!"Stalls are broken");
118
119 Tick resumeTick = curTick();
120
121 (void) resumeTick;
122 DPRINTF(SMMUv3, "Resume at tick = %d. Fault duration = %d (%.3fus)\n",
123 resumeTick, resumeTick-faultTick, (resumeTick-faultTick) / 1e6);
124
125 beginTransaction(request);
126
127 smmu.runProcessTiming(this, request.pkt);
128 }
129
130 void
131 SMMUTranslationProcess::main(Yield &yield)
132 {
133 // Hack:
134 // The coroutine starts running as soon as it's created.
135 // But we need to wait for request data esp. in atomic mode.
136 SMMUAction a;
137 a.type = ACTION_INITIAL_NOP;
138 a.pkt = NULL;
139 yield(a);
140
141 const Addr next4k = (request.addr + 0x1000ULL) & ~0xfffULL;
142
143 if ((request.addr + request.size) > next4k)
144 panic("Transaction crosses 4k boundary (addr=%#x size=%#x)!\n",
145 request.addr, request.size);
146
147
148 unsigned numSlaveBeats = request.isWrite ?
149 (request.size + (ifc.portWidth - 1)) / ifc.portWidth : 1;
150
151 doSemaphoreDown(yield, ifc.slavePortSem);
152 doDelay(yield, Cycles(numSlaveBeats));
153 doSemaphoreUp(ifc.slavePortSem);
154
155
156 recvTick = curTick();
157
158
159 if (!(smmu.regs.cr0 & 0x1)) {
160 // SMMU disabled
161 doDelay(yield, Cycles(1));
162 completeTransaction(yield, bypass(request.addr));
163 return;
164 }
165
166 TranslResult tr;
167 bool wasPrefetched = false;
168
169 if (request.isPrefetch) {
170 // Abort prefetch if:
171 // - there's already a transaction looking up the same 4k page, OR
172 // - requested address is already in the TLB.
173 if (hazard4kCheck() || ifcTLBLookup(yield, tr, wasPrefetched))
174 completePrefetch(yield); // this never returns
175
176 hazard4kRegister();
177
178 tr = smmuTranslation(yield);
179
180 if (tr.fault == FAULT_NONE)
181 ifcTLBUpdate(yield, tr);
182
183 hazard4kRelease();
184
185 completePrefetch(yield);
186 } else {
187 hazardIdRegister();
188
189 if (!microTLBLookup(yield, tr)) {
190 bool hit = ifcTLBLookup(yield, tr, wasPrefetched);
191 if (!hit) {
192 while (!hit && hazard4kCheck()) {
193 hazard4kHold(yield);
194 hit = ifcTLBLookup(yield, tr, wasPrefetched);
195 }
196 }
197
198 // Issue prefetch if:
199 // - there was a TLB hit and the entry was prefetched, OR
200 // - TLB miss was successfully serviced
201 if (hit) {
202 if (wasPrefetched)
203 issuePrefetch(next4k);
204 } else {
205 hazard4kRegister();
206
207 tr = smmuTranslation(yield);
208
209 if (tr.fault == FAULT_NONE) {
210 ifcTLBUpdate(yield, tr);
211
212 issuePrefetch(next4k);
213 }
214
215 hazard4kRelease();
216 }
217
218 if (tr.fault == FAULT_NONE)
219 microTLBUpdate(yield, tr);
220 }
221
222 hazardIdHold(yield);
223 hazardIdRelease();
224
225 if (tr.fault != FAULT_NONE)
226 panic("fault\n");
227
228 completeTransaction(yield, tr);
229 }
230 }
231
232 SMMUTranslationProcess::TranslResult
233 SMMUTranslationProcess::bypass(Addr addr) const
234 {
235 TranslResult tr;
236 tr.fault = FAULT_NONE;
237 tr.addr = addr;
238 tr.addrMask = 0;
239 tr.writable = 1;
240
241 return tr;
242 }
243
244 SMMUTranslationProcess::TranslResult
245 SMMUTranslationProcess::smmuTranslation(Yield &yield)
246 {
247 TranslResult tr;
248
249 // Need SMMU credit to proceed
250 doSemaphoreDown(yield, smmu.transSem);
251
252 // Simulate pipelined IFC->SMMU link
253 doSemaphoreDown(yield, smmu.ifcSmmuSem);
254 doDelay(yield, Cycles(1)); // serialize transactions
255 doSemaphoreUp(smmu.ifcSmmuSem);
256 doDelay(yield, smmu.ifcSmmuLat - Cycles(1)); // remaining pipeline delay
257
258 bool haveConfig = true;
259 if (!configCacheLookup(yield, context)) {
260 if(findConfig(yield, context, tr)) {
261 configCacheUpdate(yield, context);
262 } else {
263 haveConfig = false;
264 }
265 }
266
267 if (haveConfig && !smmuTLBLookup(yield, tr)) {
268 // SMMU main TLB miss
269
270 // Need PTW slot to proceed
271 doSemaphoreDown(yield, smmu.ptwSem);
272
273 // Page table walk
274 Tick ptwStartTick = curTick();
275
276 if (context.stage1Enable) {
277 tr = translateStage1And2(yield, request.addr);
278 } else if (context.stage2Enable) {
279 tr = translateStage2(yield, request.addr, true);
280 } else {
281 tr = bypass(request.addr);
282 }
283
284 if (context.stage1Enable || context.stage2Enable)
285 smmu.ptwTimeDist.sample(curTick() - ptwStartTick);
286
287 // Free PTW slot
288 doSemaphoreUp(smmu.ptwSem);
289
290 if (tr.fault == FAULT_NONE)
291 smmuTLBUpdate(yield, tr);
292 }
293
294 // Simulate pipelined SMMU->SLAVE INTERFACE link
295 doSemaphoreDown(yield, smmu.smmuIfcSem);
296 doDelay(yield, Cycles(1)); // serialize transactions
297 doSemaphoreUp(smmu.smmuIfcSem);
298 doDelay(yield, smmu.smmuIfcLat - Cycles(1)); // remaining pipeline delay
299
300 // return SMMU credit
301 doSemaphoreUp(smmu.transSem);
302
303 return tr;
304 }
305
306 bool
307 SMMUTranslationProcess::microTLBLookup(Yield &yield, TranslResult &tr)
308 {
309 if (!ifc.microTLBEnable)
310 return false;
311
312 doSemaphoreDown(yield, ifc.microTLBSem);
313 doDelay(yield, ifc.microTLBLat);
314 const SMMUTLB::Entry *e =
315 ifc.microTLB->lookup(request.sid, request.ssid, request.addr);
316 doSemaphoreUp(ifc.microTLBSem);
317
318 if (!e) {
319 DPRINTF(SMMUv3, "micro TLB miss vaddr=%#x sid=%#x ssid=%#x\n",
320 request.addr, request.sid, request.ssid);
321
322 return false;
323 }
324
325 DPRINTF(SMMUv3,
326 "micro TLB hit vaddr=%#x amask=%#x sid=%#x ssid=%#x paddr=%#x\n",
327 request.addr, e->vaMask, request.sid, request.ssid, e->pa);
328
329 tr.fault = FAULT_NONE;
330 tr.addr = e->pa + (request.addr & ~e->vaMask);;
331 tr.addrMask = e->vaMask;
332 tr.writable = e->permissions;
333
334 return true;
335 }
336
337 bool
338 SMMUTranslationProcess::ifcTLBLookup(Yield &yield, TranslResult &tr,
339 bool &wasPrefetched)
340 {
341 if (!ifc.mainTLBEnable)
342 return false;
343
344 doSemaphoreDown(yield, ifc.mainTLBSem);
345 doDelay(yield, ifc.mainTLBLat);
346 const SMMUTLB::Entry *e =
347 ifc.mainTLB->lookup(request.sid, request.ssid, request.addr);
348 doSemaphoreUp(ifc.mainTLBSem);
349
350 if (!e) {
351 DPRINTF(SMMUv3,
352 "SLAVE Interface TLB miss vaddr=%#x sid=%#x ssid=%#x\n",
353 request.addr, request.sid, request.ssid);
354
355 return false;
356 }
357
358 DPRINTF(SMMUv3,
359 "SLAVE Interface TLB hit vaddr=%#x amask=%#x sid=%#x ssid=%#x "
360 "paddr=%#x\n", request.addr, e->vaMask, request.sid,
361 request.ssid, e->pa);
362
363 tr.fault = FAULT_NONE;
364 tr.addr = e->pa + (request.addr & ~e->vaMask);;
365 tr.addrMask = e->vaMask;
366 tr.writable = e->permissions;
367 wasPrefetched = e->prefetched;
368
369 return true;
370 }
371
372 bool
373 SMMUTranslationProcess::smmuTLBLookup(Yield &yield, TranslResult &tr)
374 {
375 if (!smmu.tlbEnable)
376 return false;
377
378 doSemaphoreDown(yield, smmu.tlbSem);
379 doDelay(yield, smmu.tlbLat);
380 const ARMArchTLB::Entry *e =
381 smmu.tlb.lookup(request.addr, context.asid, context.vmid);
382 doSemaphoreUp(smmu.tlbSem);
383
384 if (!e) {
385 DPRINTF(SMMUv3, "SMMU TLB miss vaddr=%#x asid=%#x vmid=%#x\n",
386 request.addr, context.asid, context.vmid);
387
388 return false;
389 }
390
391 DPRINTF(SMMUv3,
392 "SMMU TLB hit vaddr=%#x amask=%#x asid=%#x vmid=%#x paddr=%#x\n",
393 request.addr, e->vaMask, context.asid, context.vmid, e->pa);
394
395 tr.fault = FAULT_NONE;
396 tr.addr = e->pa + (request.addr & ~e->vaMask);;
397 tr.addrMask = e->vaMask;
398 tr.writable = e->permissions;
399
400 return true;
401 }
402
403 void
404 SMMUTranslationProcess::microTLBUpdate(Yield &yield,
405 const TranslResult &tr)
406 {
407 assert(tr.fault == FAULT_NONE);
408
409 if (!ifc.microTLBEnable)
410 return;
411
412 SMMUTLB::Entry e;
413 e.valid = true;
414 e.prefetched = false;
415 e.sid = request.sid;
416 e.ssid = request.ssid;
417 e.vaMask = tr.addrMask;
418 e.va = request.addr & e.vaMask;
419 e.pa = tr.addr & e.vaMask;
420 e.permissions = tr.writable;
421 e.asid = context.asid;
422 e.vmid = context.vmid;
423
424 doSemaphoreDown(yield, ifc.microTLBSem);
425
426 DPRINTF(SMMUv3,
427 "micro TLB upd vaddr=%#x amask=%#x paddr=%#x sid=%#x ssid=%#x\n",
428 e.va, e.vaMask, e.pa, e.sid, e.ssid);
429
430 ifc.microTLB->store(e, SMMUTLB::ALLOC_ANY_WAY);
431
432 doSemaphoreUp(ifc.microTLBSem);
433 }
434
435 void
436 SMMUTranslationProcess::ifcTLBUpdate(Yield &yield,
437 const TranslResult &tr)
438 {
439 assert(tr.fault == FAULT_NONE);
440
441 if (!ifc.mainTLBEnable)
442 return;
443
444 SMMUTLB::Entry e;
445 e.valid = true;
446 e.prefetched = request.isPrefetch;
447 e.sid = request.sid;
448 e.ssid = request.ssid;
449 e.vaMask = tr.addrMask;
450 e.va = request.addr & e.vaMask;
451 e.pa = tr.addr & e.vaMask;
452 e.permissions = tr.writable;
453 e.asid = context.asid;
454 e.vmid = context.vmid;
455
456 SMMUTLB::AllocPolicy alloc = SMMUTLB::ALLOC_ANY_WAY;
457 if (ifc.prefetchEnable && ifc.prefetchReserveLastWay)
458 alloc = request.isPrefetch ?
459 SMMUTLB::ALLOC_LAST_WAY : SMMUTLB::ALLOC_ANY_BUT_LAST_WAY;
460
461 doSemaphoreDown(yield, ifc.mainTLBSem);
462
463 DPRINTF(SMMUv3,
464 "SLAVE Interface upd vaddr=%#x amask=%#x paddr=%#x sid=%#x "
465 "ssid=%#x\n", e.va, e.vaMask, e.pa, e.sid, e.ssid);
466
467 ifc.mainTLB->store(e, alloc);
468
469 doSemaphoreUp(ifc.mainTLBSem);
470 }
471
472 void
473 SMMUTranslationProcess::smmuTLBUpdate(Yield &yield,
474 const TranslResult &tr)
475 {
476 assert(tr.fault == FAULT_NONE);
477
478 if (!smmu.tlbEnable)
479 return;
480
481 ARMArchTLB::Entry e;
482 e.valid = true;
483 e.vaMask = tr.addrMask;
484 e.va = request.addr & e.vaMask;
485 e.asid = context.asid;
486 e.vmid = context.vmid;
487 e.pa = tr.addr & e.vaMask;
488 e.permissions = tr.writable;
489
490 doSemaphoreDown(yield, smmu.tlbSem);
491
492 DPRINTF(SMMUv3,
493 "SMMU TLB upd vaddr=%#x amask=%#x paddr=%#x asid=%#x vmid=%#x\n",
494 e.va, e.vaMask, e.pa, e.asid, e.vmid);
495
496 smmu.tlb.store(e);
497
498 doSemaphoreUp(smmu.tlbSem);
499 }
500
501 bool
502 SMMUTranslationProcess::configCacheLookup(Yield &yield, TranslContext &tc)
503 {
504 if (!smmu.configCacheEnable)
505 return false;
506
507 doSemaphoreDown(yield, smmu.configSem);
508 doDelay(yield, smmu.configLat);
509 const ConfigCache::Entry *e =
510 smmu.configCache.lookup(request.sid, request.ssid);
511 doSemaphoreUp(smmu.configSem);
512
513 if (!e) {
514 DPRINTF(SMMUv3, "Config miss sid=%#x ssid=%#x\n",
515 request.sid, request.ssid);
516
517 return false;
518 }
519
520 DPRINTF(SMMUv3, "Config hit sid=%#x ssid=%#x ttb=%#08x asid=%#x\n",
521 request.sid, request.ssid, e->ttb0, e->asid);
522
523 tc.stage1Enable = e->stage1_en;
524 tc.stage2Enable = e->stage2_en;
525
526 tc.ttb0 = e->ttb0;
527 tc.ttb1 = e->ttb1;
528 tc.asid = e->asid;
529 tc.httb = e->httb;
530 tc.vmid = e->vmid;
531
532 tc.stage1TranslGranule = e->stage1_tg;
533 tc.stage2TranslGranule = e->stage2_tg;
534
535 tc.t0sz = e->t0sz;
536 tc.s2t0sz = e->s2t0sz;
537
538 return true;
539 }
540
541 void
542 SMMUTranslationProcess::configCacheUpdate(Yield &yield,
543 const TranslContext &tc)
544 {
545 if (!smmu.configCacheEnable)
546 return;
547
548 ConfigCache::Entry e;
549 e.valid = true;
550 e.sid = request.sid;
551 e.ssid = request.ssid;
552 e.stage1_en = tc.stage1Enable;
553 e.stage2_en = tc.stage2Enable;
554 e.ttb0 = tc.ttb0;
555 e.ttb1 = tc.ttb1;
556 e.asid = tc.asid;
557 e.httb = tc.httb;
558 e.vmid = tc.vmid;
559 e.stage1_tg = tc.stage1TranslGranule;
560 e.stage2_tg = tc.stage2TranslGranule;
561 e.t0sz = tc.t0sz;
562 e.s2t0sz = tc.s2t0sz;
563
564 doSemaphoreDown(yield, smmu.configSem);
565
566 DPRINTF(SMMUv3, "Config upd sid=%#x ssid=%#x\n", e.sid, e.ssid);
567
568 smmu.configCache.store(e);
569
570 doSemaphoreUp(smmu.configSem);
571 }
572
573 bool
574 SMMUTranslationProcess::findConfig(Yield &yield,
575 TranslContext &tc,
576 TranslResult &tr)
577 {
578 tc.stage1Enable = false;
579 tc.stage2Enable = false;
580
581 StreamTableEntry ste;
582 doReadSTE(yield, ste, request.sid);
583
584 switch (ste.dw0.config) {
585 case STE_CONFIG_BYPASS:
586 break;
587
588 case STE_CONFIG_STAGE1_ONLY:
589 tc.stage1Enable = true;
590 break;
591
592 case STE_CONFIG_STAGE2_ONLY:
593 tc.stage2Enable = true;
594 break;
595
596 case STE_CONFIG_STAGE1_AND_2:
597 tc.stage1Enable = true;
598 tc.stage2Enable = true;
599 break;
600
601 default:
602 panic("Bad or unimplemented STE config %d\n",
603 ste.dw0.config);
604 }
605
606
607 // Establish stage 2 context first since
608 // Context Descriptors can be in IPA space.
609 if (tc.stage2Enable) {
610 tc.httb = ste.dw3.s2ttb << STE_S2TTB_SHIFT;
611 tc.vmid = ste.dw2.s2vmid;
612 tc.stage2TranslGranule = ste.dw2.s2tg;
613 tc.s2t0sz = ste.dw2.s2t0sz;
614 } else {
615 tc.httb = 0xdeadbeef;
616 tc.vmid = 0;
617 tc.stage2TranslGranule = TRANS_GRANULE_INVALID;
618 tc.s2t0sz = 0;
619 }
620
621
622 // Now fetch stage 1 config.
623 if (context.stage1Enable) {
624 ContextDescriptor cd;
625 doReadCD(yield, cd, ste, request.sid, request.ssid);
626
627 tc.ttb0 = cd.dw1.ttb0 << CD_TTB_SHIFT;
628 tc.ttb1 = cd.dw2.ttb1 << CD_TTB_SHIFT;
629 tc.asid = cd.dw0.asid;
630 tc.stage1TranslGranule = cd.dw0.tg0;
631 tc.t0sz = cd.dw0.t0sz;
632 } else {
633 tc.ttb0 = 0xcafebabe;
634 tc.ttb1 = 0xcafed00d;
635 tc.asid = 0;
636 tc.stage1TranslGranule = TRANS_GRANULE_INVALID;
637 tc.t0sz = 0;
638 }
639
640 return true;
641 }
642
643 void
644 SMMUTranslationProcess::walkCacheLookup(
645 Yield &yield,
646 const WalkCache::Entry *&walkEntry,
647 Addr addr, uint16_t asid, uint16_t vmid,
648 unsigned stage, unsigned level)
649 {
650 const char *indent = stage==2 ? " " : "";
651 (void) indent; // this is only used in DPRINTFs
652
653 const PageTableOps *pt_ops =
654 stage == 1 ?
655 smmu.getPageTableOps(context.stage1TranslGranule) :
656 smmu.getPageTableOps(context.stage2TranslGranule);
657
658 unsigned walkCacheLevels =
659 smmu.walkCacheEnable ?
660 (stage == 1 ? smmu.walkCacheS1Levels : smmu.walkCacheS2Levels) :
661 0;
662
663 if ((1 << level) & walkCacheLevels) {
664 doSemaphoreDown(yield, smmu.walkSem);
665 doDelay(yield, smmu.walkLat);
666
667 walkEntry = smmu.walkCache.lookup(addr, pt_ops->walkMask(level),
668 asid, vmid, stage, level);
669
670 if (walkEntry) {
671 DPRINTF(SMMUv3, "%sWalkCache hit va=%#x asid=%#x vmid=%#x "
672 "base=%#x (S%d, L%d)\n",
673 indent, addr, asid, vmid, walkEntry->pa, stage, level);
674 } else {
675 DPRINTF(SMMUv3, "%sWalkCache miss va=%#x asid=%#x vmid=%#x "
676 "(S%d, L%d)\n",
677 indent, addr, asid, vmid, stage, level);
678 }
679
680 doSemaphoreUp(smmu.walkSem);
681 }
682 }
683
684 void
685 SMMUTranslationProcess::walkCacheUpdate(Yield &yield, Addr va,
686 Addr vaMask, Addr pa,
687 unsigned stage, unsigned level,
688 bool leaf, uint8_t permissions)
689 {
690 unsigned walkCacheLevels =
691 stage == 1 ? smmu.walkCacheS1Levels : smmu.walkCacheS2Levels;
692
693 if (smmu.walkCacheEnable && ((1<<level) & walkCacheLevels)) {
694 WalkCache::Entry e;
695 e.valid = true;
696 e.va = va;
697 e.vaMask = vaMask;
698 e.asid = stage==1 ? context.asid : 0;
699 e.vmid = context.vmid;
700 e.stage = stage;
701 e.level = level;
702 e.leaf = leaf;
703 e.pa = pa;
704 e.permissions = permissions;
705
706 doSemaphoreDown(yield, smmu.walkSem);
707
708 DPRINTF(SMMUv3, "%sWalkCache upd va=%#x mask=%#x asid=%#x vmid=%#x "
709 "tpa=%#x leaf=%s (S%d, L%d)\n",
710 e.stage==2 ? " " : "",
711 e.va, e.vaMask, e.asid, e.vmid,
712 e.pa, e.leaf, e.stage, e.level);
713
714 smmu.walkCache.store(e);
715
716 doSemaphoreUp(smmu.walkSem);
717 }
718 }
719
720 /*
721 * Please note:
722 * This does not deal with the case where stage 1 page size
723 * is larger than stage 2 page size.
724 */
725 SMMUTranslationProcess::TranslResult
726 SMMUTranslationProcess::walkStage1And2(Yield &yield, Addr addr,
727 const PageTableOps *pt_ops,
728 unsigned level, Addr walkPtr)
729 {
730 PageTableOps::pte_t pte = 0;
731
732 doSemaphoreDown(yield, smmu.cycleSem);
733 doDelay(yield, Cycles(1));
734 doSemaphoreUp(smmu.cycleSem);
735
736 for (; level <= pt_ops->lastLevel(); level++) {
737 Addr pte_addr = walkPtr + pt_ops->index(addr, level);
738
739 DPRINTF(SMMUv3, "Fetching S1 L%d PTE from pa=%#08x\n",
740 level, pte_addr);
741
742 doReadPTE(yield, addr, pte_addr, &pte, 1, level);
743
744 DPRINTF(SMMUv3, "Got S1 L%d PTE=%#x from pa=%#08x\n",
745 level, pte, pte_addr);
746
747 doSemaphoreDown(yield, smmu.cycleSem);
748 doDelay(yield, Cycles(1));
749 doSemaphoreUp(smmu.cycleSem);
750
751 bool valid = pt_ops->isValid(pte, level);
752 bool leaf = pt_ops->isLeaf(pte, level);
753
754 if (!valid) {
755 DPRINTF(SMMUv3, "S1 PTE not valid - fault\n");
756
757 TranslResult tr;
758 tr.fault = FAULT_TRANSLATION;
759 return tr;
760 }
761
762 if (valid && leaf && request.isWrite &&
763 !pt_ops->isWritable(pte, level, false))
764 {
765 DPRINTF(SMMUv3, "S1 page not writable - fault\n");
766
767 TranslResult tr;
768 tr.fault = FAULT_PERMISSION;
769 return tr;
770 }
771
772 walkPtr = pt_ops->nextLevelPointer(pte, level);
773
774 if (leaf)
775 break;
776
777 if (context.stage2Enable) {
778 TranslResult s2tr = translateStage2(yield, walkPtr, false);
779 if (s2tr.fault != FAULT_NONE)
780 return s2tr;
781
782 walkPtr = s2tr.addr;
783 }
784
785 walkCacheUpdate(yield, addr, pt_ops->walkMask(level), walkPtr,
786 1, level, leaf, 0);
787 }
788
789 TranslResult tr;
790 tr.fault = FAULT_NONE;
791 tr.addrMask = pt_ops->pageMask(pte, level);
792 tr.addr = walkPtr + (addr & ~tr.addrMask);
793 tr.writable = pt_ops->isWritable(pte, level, false);
794
795 if (context.stage2Enable) {
796 TranslResult s2tr = translateStage2(yield, tr.addr, true);
797 if (s2tr.fault != FAULT_NONE)
798 return s2tr;
799
800 tr = combineTranslations(tr, s2tr);
801 }
802
803 walkCacheUpdate(yield, addr, tr.addrMask, tr.addr,
804 1, level, true, tr.writable);
805
806 return tr;
807 }
808
809 SMMUTranslationProcess::TranslResult
810 SMMUTranslationProcess::walkStage2(Yield &yield, Addr addr, bool final_tr,
811 const PageTableOps *pt_ops,
812 unsigned level, Addr walkPtr)
813 {
814 PageTableOps::pte_t pte;
815
816 doSemaphoreDown(yield, smmu.cycleSem);
817 doDelay(yield, Cycles(1));
818 doSemaphoreUp(smmu.cycleSem);
819
820 for (; level <= pt_ops->lastLevel(); level++) {
821 Addr pte_addr = walkPtr + pt_ops->index(addr, level);
822
823 DPRINTF(SMMUv3, " Fetching S2 L%d PTE from pa=%#08x\n",
824 level, pte_addr);
825
826 doReadPTE(yield, addr, pte_addr, &pte, 2, level);
827
828 DPRINTF(SMMUv3, " Got S2 L%d PTE=%#x from pa=%#08x\n",
829 level, pte, pte_addr);
830
831 doSemaphoreDown(yield, smmu.cycleSem);
832 doDelay(yield, Cycles(1));
833 doSemaphoreUp(smmu.cycleSem);
834
835 bool valid = pt_ops->isValid(pte, level);
836 bool leaf = pt_ops->isLeaf(pte, level);
837
838 if (!valid) {
839 DPRINTF(SMMUv3, " S2 PTE not valid - fault\n");
840
841 TranslResult tr;
842 tr.fault = FAULT_TRANSLATION;
843 return tr;
844 }
845
846 if (valid && leaf && request.isWrite &&
847 !pt_ops->isWritable(pte, level, true))
848 {
849 DPRINTF(SMMUv3, " S2 PTE not writable = fault\n");
850
851 TranslResult tr;
852 tr.fault = FAULT_PERMISSION;
853 return tr;
854 }
855
856 walkPtr = pt_ops->nextLevelPointer(pte, level);
857
858 if (final_tr || smmu.walkCacheNonfinalEnable)
859 walkCacheUpdate(yield, addr, pt_ops->walkMask(level), walkPtr,
860 2, level, leaf,
861 leaf ? pt_ops->isWritable(pte, level, true) : 0);
862 if (leaf)
863 break;
864 }
865
866 TranslResult tr;
867 tr.fault = FAULT_NONE;
868 tr.addrMask = pt_ops->pageMask(pte, level);
869 tr.addr = walkPtr + (addr & ~tr.addrMask);
870 tr.writable = pt_ops->isWritable(pte, level, true);
871
872 return tr;
873 }
874
875 SMMUTranslationProcess::TranslResult
876 SMMUTranslationProcess::translateStage1And2(Yield &yield, Addr addr)
877 {
878 const PageTableOps *pt_ops =
879 smmu.getPageTableOps(context.stage1TranslGranule);
880
881 const WalkCache::Entry *walk_ep = NULL;
882 unsigned level;
883
884 // Level here is actually (level+1) so we can count down
885 // to 0 using unsigned int.
886 for (level = pt_ops->lastLevel() + 1;
887 level > pt_ops->firstLevel(context.t0sz);
888 level--)
889 {
890 walkCacheLookup(yield, walk_ep, addr,
891 context.asid, context.vmid, 1, level-1);
892
893 if (walk_ep)
894 break;
895 }
896
897 // Correct level (see above).
898 level -= 1;
899
900 TranslResult tr;
901 if (walk_ep) {
902 if (walk_ep->leaf) {
903 tr.fault = FAULT_NONE;
904 tr.addr = walk_ep->pa + (addr & ~walk_ep->vaMask);
905 tr.addrMask = walk_ep->vaMask;
906 tr.writable = walk_ep->permissions;
907 } else {
908 tr = walkStage1And2(yield, addr, pt_ops, level+1, walk_ep->pa);
909 }
910 } else {
911 Addr table_addr = context.ttb0;
912 if (context.stage2Enable) {
913 TranslResult s2tr = translateStage2(yield, table_addr, false);
914 if (s2tr.fault != FAULT_NONE)
915 return s2tr;
916
917 table_addr = s2tr.addr;
918 }
919
920 tr = walkStage1And2(yield, addr, pt_ops,
921 pt_ops->firstLevel(context.t0sz),
922 table_addr);
923 }
924
925 if (tr.fault == FAULT_NONE)
926 DPRINTF(SMMUv3, "Translated vaddr %#x to paddr %#x\n", addr, tr.addr);
927
928 return tr;
929 }
930
931 SMMUTranslationProcess::TranslResult
932 SMMUTranslationProcess::translateStage2(Yield &yield, Addr addr, bool final_tr)
933 {
934 const PageTableOps *pt_ops =
935 smmu.getPageTableOps(context.stage2TranslGranule);
936
937 const IPACache::Entry *ipa_ep = NULL;
938 if (smmu.ipaCacheEnable) {
939 doSemaphoreDown(yield, smmu.ipaSem);
940 doDelay(yield, smmu.ipaLat);
941 ipa_ep = smmu.ipaCache.lookup(addr, context.vmid);
942 doSemaphoreUp(smmu.ipaSem);
943 }
944
945 if (ipa_ep) {
946 TranslResult tr;
947 tr.fault = FAULT_NONE;
948 tr.addr = ipa_ep->pa + (addr & ~ipa_ep->ipaMask);
949 tr.addrMask = ipa_ep->ipaMask;
950 tr.writable = ipa_ep->permissions;
951
952 DPRINTF(SMMUv3, " IPACache hit ipa=%#x vmid=%#x pa=%#x\n",
953 addr, context.vmid, tr.addr);
954
955 return tr;
956 } else if (smmu.ipaCacheEnable) {
957 DPRINTF(SMMUv3, " IPACache miss ipa=%#x vmid=%#x\n",
958 addr, context.vmid);
959 }
960
961 const WalkCache::Entry *walk_ep = NULL;
962 unsigned level = pt_ops->firstLevel(context.s2t0sz);
963
964 if (final_tr || smmu.walkCacheNonfinalEnable) {
965 // Level here is actually (level+1) so we can count down
966 // to 0 using unsigned int.
967 for (level = pt_ops->lastLevel() + 1;
968 level > pt_ops->firstLevel(context.s2t0sz);
969 level--)
970 {
971 walkCacheLookup(yield, walk_ep, addr,
972 0, context.vmid, 2, level-1);
973
974 if (walk_ep)
975 break;
976 }
977
978 // Correct level (see above).
979 level -= 1;
980 }
981
982 TranslResult tr;
983 if (walk_ep) {
984 if (walk_ep->leaf) {
985 tr.fault = FAULT_NONE;
986 tr.addr = walk_ep->pa + (addr & ~walk_ep->vaMask);
987 tr.addrMask = walk_ep->vaMask;
988 tr.writable = walk_ep->permissions;
989 } else {
990 tr = walkStage2(yield, addr, final_tr, pt_ops,
991 level + 1, walk_ep->pa);
992 }
993 } else {
994 tr = walkStage2(yield, addr, final_tr, pt_ops,
995 pt_ops->firstLevel(context.s2t0sz),
996 context.httb);
997 }
998
999 if (tr.fault == FAULT_NONE)
1000 DPRINTF(SMMUv3, " Translated %saddr %#x to paddr %#x\n",
1001 context.stage1Enable ? "ip" : "v", addr, tr.addr);
1002
1003 if (smmu.ipaCacheEnable) {
1004 IPACache::Entry e;
1005 e.valid = true;
1006 e.ipaMask = tr.addrMask;
1007 e.ipa = addr & e.ipaMask;
1008 e.pa = tr.addr & tr.addrMask;
1009 e.permissions = tr.writable;
1010 e.vmid = context.vmid;
1011
1012 doSemaphoreDown(yield, smmu.ipaSem);
1013 smmu.ipaCache.store(e);
1014 doSemaphoreUp(smmu.ipaSem);
1015 }
1016
1017 return tr;
1018 }
1019
1020 SMMUTranslationProcess::TranslResult
1021 SMMUTranslationProcess::combineTranslations(const TranslResult &s1tr,
1022 const TranslResult &s2tr) const
1023 {
1024 if (s2tr.fault != FAULT_NONE)
1025 return s2tr;
1026
1027 assert(s1tr.fault == FAULT_NONE);
1028
1029 TranslResult tr;
1030 tr.fault = FAULT_NONE;
1031 tr.addr = s2tr.addr;
1032 tr.addrMask = s1tr.addrMask | s2tr.addrMask;
1033 tr.writable = s1tr.writable & s2tr.writable;
1034
1035 return tr;
1036 }
1037
1038 bool
1039 SMMUTranslationProcess::hazard4kCheck()
1040 {
1041 Addr addr4k = request.addr & ~0xfffULL;
1042
1043 for (auto it = ifc.duplicateReqs.begin();
1044 it != ifc.duplicateReqs.end();
1045 ++it)
1046 {
1047 Addr other4k = (*it)->request.addr & ~0xfffULL;
1048 if (addr4k == other4k)
1049 return true;
1050 }
1051
1052 return false;
1053 }
1054
1055 void
1056 SMMUTranslationProcess::hazard4kRegister()
1057 {
1058 DPRINTF(SMMUv3Hazard, "4kReg: p=%p a4k=%#x\n",
1059 this, request.addr & ~0xfffULL);
1060
1061 ifc.duplicateReqs.push_back(this);
1062 }
1063
1064 void
1065 SMMUTranslationProcess::hazard4kHold(Yield &yield)
1066 {
1067 Addr addr4k = request.addr & ~0xfffULL;
1068
1069 bool found_hazard;
1070
1071 do {
1072 found_hazard = false;
1073
1074 for (auto it = ifc.duplicateReqs.begin();
1075 it!=ifc.duplicateReqs.end() && *it!=this;
1076 ++it)
1077 {
1078 Addr other4k = (*it)->request.addr & ~0xfffULL;
1079
1080 DPRINTF(SMMUv3Hazard, "4kHold: p=%p a4k=%#x Q: p=%p a4k=%#x\n",
1081 this, addr4k, *it, other4k);
1082
1083 if (addr4k == other4k) {
1084 DPRINTF(SMMUv3Hazard,
1085 "4kHold: p=%p a4k=%#x WAIT on p=%p a4k=%#x\n",
1086 this, addr4k, *it, other4k);
1087
1088 doWaitForSignal(yield, ifc.duplicateReqRemoved);
1089
1090 DPRINTF(SMMUv3Hazard, "4kHold: p=%p a4k=%#x RESUME\n",
1091 this, addr4k);
1092
1093 // This is to avoid checking *it!=this after doWaitForSignal()
1094 // since it could have been deleted.
1095 found_hazard = true;
1096 break;
1097 }
1098 }
1099 } while (found_hazard);
1100 }
1101
1102 void
1103 SMMUTranslationProcess::hazard4kRelease()
1104 {
1105 DPRINTF(SMMUv3Hazard, "4kRel: p=%p a4k=%#x\n",
1106 this, request.addr & ~0xfffULL);
1107
1108 std::list<SMMUTranslationProcess *>::iterator it;
1109
1110 for (it = ifc.duplicateReqs.begin(); it != ifc.duplicateReqs.end(); ++it)
1111 if (*it == this)
1112 break;
1113
1114 if (it == ifc.duplicateReqs.end())
1115 panic("hazard4kRelease: request not found");
1116
1117 ifc.duplicateReqs.erase(it);
1118
1119 doBroadcastSignal(ifc.duplicateReqRemoved);
1120 }
1121
1122 void
1123 SMMUTranslationProcess::hazardIdRegister()
1124 {
1125 auto orderId = AMBA::orderId(request.pkt);
1126
1127 DPRINTF(SMMUv3Hazard, "IdReg: p=%p oid=%d\n", this, orderId);
1128
1129 assert(orderId < SMMU_MAX_TRANS_ID);
1130
1131 std::list<SMMUTranslationProcess *> &depReqs =
1132 request.isWrite ?
1133 ifc.dependentWrites[orderId] : ifc.dependentReads[orderId];
1134 depReqs.push_back(this);
1135 }
1136
1137 void
1138 SMMUTranslationProcess::hazardIdHold(Yield &yield)
1139 {
1140 auto orderId = AMBA::orderId(request.pkt);
1141
1142 DPRINTF(SMMUv3Hazard, "IdHold: p=%p oid=%d\n", this, orderId);
1143
1144 std::list<SMMUTranslationProcess *> &depReqs =
1145 request.isWrite ?
1146 ifc.dependentWrites[orderId] : ifc.dependentReads[orderId];
1147 std::list<SMMUTranslationProcess *>::iterator it;
1148
1149 bool found_hazard;
1150
1151 do {
1152 found_hazard = false;
1153
1154 for (auto it = depReqs.begin(); it!=depReqs.end() && *it!=this; ++it) {
1155 DPRINTF(SMMUv3Hazard, "IdHold: p=%p oid=%d Q: %p\n",
1156 this, orderId, *it);
1157
1158 if (AMBA::orderId((*it)->request.pkt) == orderId) {
1159 DPRINTF(SMMUv3Hazard, "IdHold: p=%p oid=%d WAIT on=%p\n",
1160 this, orderId, *it);
1161
1162 doWaitForSignal(yield, ifc.dependentReqRemoved);
1163
1164 DPRINTF(SMMUv3Hazard, "IdHold: p=%p oid=%d RESUME\n",
1165 this, orderId);
1166
1167 // This is to avoid checking *it!=this after doWaitForSignal()
1168 // since it could have been deleted.
1169 found_hazard = true;
1170 break;
1171 }
1172 }
1173 } while (found_hazard);
1174 }
1175
1176 void
1177 SMMUTranslationProcess::hazardIdRelease()
1178 {
1179 auto orderId = AMBA::orderId(request.pkt);
1180
1181 DPRINTF(SMMUv3Hazard, "IdRel: p=%p oid=%d\n", this, orderId);
1182
1183 std::list<SMMUTranslationProcess *> &depReqs =
1184 request.isWrite ?
1185 ifc.dependentWrites[orderId] : ifc.dependentReads[orderId];
1186 std::list<SMMUTranslationProcess *>::iterator it;
1187
1188 for (it = depReqs.begin(); it != depReqs.end(); ++it) {
1189 if (*it == this)
1190 break;
1191 }
1192
1193 if (it == depReqs.end())
1194 panic("hazardIdRelease: request not found");
1195
1196 depReqs.erase(it);
1197
1198 doBroadcastSignal(ifc.dependentReqRemoved);
1199 }
1200
1201 void
1202 SMMUTranslationProcess::issuePrefetch(Addr addr)
1203 {
1204 if (!smmu.system.isTimingMode())
1205 return;
1206
1207 if (!ifc.prefetchEnable || ifc.xlateSlotsRemaining == 0)
1208 return;
1209
1210 std::string proc_name = csprintf("%sprf", name());
1211 SMMUTranslationProcess *proc =
1212 new SMMUTranslationProcess(proc_name, smmu, ifc);
1213
1214 proc->beginTransaction(
1215 SMMUTranslRequest::prefetch(addr, request.sid, request.ssid));
1216 proc->scheduleWakeup(smmu.clockEdge(Cycles(1)));
1217 }
1218
1219 void
1220 SMMUTranslationProcess::completeTransaction(Yield &yield,
1221 const TranslResult &tr)
1222 {
1223 assert(tr.fault == FAULT_NONE);
1224
1225 unsigned numMasterBeats = request.isWrite ?
1226 (request.size + (smmu.masterPortWidth-1))
1227 / smmu.masterPortWidth :
1228 1;
1229
1230 doSemaphoreDown(yield, smmu.masterPortSem);
1231 doDelay(yield, Cycles(numMasterBeats));
1232 doSemaphoreUp(smmu.masterPortSem);
1233
1234
1235 smmu.translationTimeDist.sample(curTick() - recvTick);
1236 if (!request.isAtsRequest && request.isWrite)
1237 ifc.wrBufSlotsRemaining +=
1238 (request.size + (ifc.portWidth-1)) / ifc.portWidth;
1239
1240 smmu.scheduleSlaveRetries();
1241
1242
1243 SMMUAction a;
1244
1245 if (request.isAtsRequest) {
1246 a.type = ACTION_SEND_RESP_ATS;
1247
1248 if (smmu.system.isAtomicMode()) {
1249 request.pkt->makeAtomicResponse();
1250 } else if (smmu.system.isTimingMode()) {
1251 request.pkt->makeTimingResponse();
1252 } else {
1253 panic("Not in atomic or timing mode");
1254 }
1255 } else {
1256 a.type = ACTION_SEND_REQ_FINAL;
1257 a.ifc = &ifc;
1258 }
1259
1260 a.pkt = request.pkt;
1261 a.delay = 0;
1262
1263 a.pkt->setAddr(tr.addr);
1264 a.pkt->req->setPaddr(tr.addr);
1265
1266 yield(a);
1267
1268 if (!request.isAtsRequest) {
1269 PacketPtr pkt = yield.get();
1270 pkt->setAddr(request.addr);
1271
1272 a.type = ACTION_SEND_RESP;
1273 a.pkt = pkt;
1274 a.ifc = &ifc;
1275 a.delay = 0;
1276 yield(a);
1277 }
1278 }
1279
1280 void
1281 SMMUTranslationProcess::completePrefetch(Yield &yield)
1282 {
1283 SMMUAction a;
1284 a.type = ACTION_TERMINATE;
1285 a.pkt = NULL;
1286 a.ifc = &ifc;
1287 a.delay = 0;
1288 yield(a);
1289 }
1290
1291 void
1292 SMMUTranslationProcess::sendEvent(Yield &yield, const SMMUEvent &ev)
1293 {
1294 int sizeMask = mask(smmu.regs.eventq_base & Q_BASE_SIZE_MASK);
1295
1296 if (((smmu.regs.eventq_prod+1) & sizeMask) ==
1297 (smmu.regs.eventq_cons & sizeMask))
1298 panic("Event queue full - aborting\n");
1299
1300 Addr event_addr =
1301 (smmu.regs.eventq_base & Q_BASE_ADDR_MASK) +
1302 (smmu.regs.eventq_prod & sizeMask) * sizeof(ev);
1303
1304 DPRINTF(SMMUv3, "Sending event to addr=%#08x (pos=%d): type=%#x stag=%#x "
1305 "flags=%#x sid=%#x ssid=%#x va=%#08x ipa=%#x\n",
1306 event_addr, smmu.regs.eventq_prod, ev.type, ev.stag,
1307 ev.flags, ev.streamId, ev.substreamId, ev.va, ev.ipa);
1308
1309 // This deliberately resets the overflow field in eventq_prod!
1310 smmu.regs.eventq_prod = (smmu.regs.eventq_prod + 1) & sizeMask;
1311
1312 doWrite(yield, event_addr, &ev, sizeof(ev));
1313
1314 if (!(smmu.regs.eventq_irq_cfg0 & E_BASE_ENABLE_MASK))
1315 panic("eventq msi not enabled\n");
1316
1317 doWrite(yield, smmu.regs.eventq_irq_cfg0 & E_BASE_ADDR_MASK,
1318 &smmu.regs.eventq_irq_cfg1, sizeof(smmu.regs.eventq_irq_cfg1));
1319 }
1320
1321 void
1322 SMMUTranslationProcess::doReadSTE(Yield &yield,
1323 StreamTableEntry &ste,
1324 uint32_t sid)
1325 {
1326 unsigned max_sid = 1 << (smmu.regs.strtab_base_cfg & ST_CFG_SIZE_MASK);
1327 if (sid >= max_sid)
1328 panic("SID %#x out of range, max=%#x", sid, max_sid);
1329
1330 Addr ste_addr;
1331
1332 if ((smmu.regs.strtab_base_cfg & ST_CFG_FMT_MASK) == ST_CFG_FMT_2LEVEL) {
1333 unsigned split =
1334 (smmu.regs.strtab_base_cfg & ST_CFG_SPLIT_MASK) >> ST_CFG_SPLIT_SHIFT;
1335
1336 if (split!= 7 && split!=8 && split!=16)
1337 panic("Invalid stream table split %d", split);
1338
1339 uint64_t l2_ptr;
1340 uint64_t l2_addr =
1341 (smmu.regs.strtab_base & VMT_BASE_ADDR_MASK) +
1342 bits(sid, 32, split) * sizeof(l2_ptr);
1343
1344 DPRINTF(SMMUv3, "Read L1STE at %#x\n", l2_addr);
1345
1346 doReadConfig(yield, l2_addr, &l2_ptr, sizeof(l2_ptr), sid, 0);
1347
1348 DPRINTF(SMMUv3, "Got L1STE L1 at %#x: 0x%016x\n", l2_addr, l2_ptr);
1349
1350 unsigned span = l2_ptr & ST_L2_SPAN_MASK;
1351 if (span == 0)
1352 panic("Invalid level 1 stream table descriptor");
1353
1354 unsigned index = bits(sid, split-1, 0);
1355 if (index >= (1 << span))
1356 panic("StreamID %d out of level 1 descriptor range %d",
1357 sid, 1<<span);
1358
1359 ste_addr = (l2_ptr & ST_L2_ADDR_MASK) + index * sizeof(ste);
1360
1361 smmu.steL1Fetches++;
1362 } else if ((smmu.regs.strtab_base_cfg & ST_CFG_FMT_MASK) == ST_CFG_FMT_LINEAR) {
1363 ste_addr =
1364 (smmu.regs.strtab_base & VMT_BASE_ADDR_MASK) + sid * sizeof(ste);
1365 } else {
1366 panic("Invalid stream table format");
1367 }
1368
1369 DPRINTF(SMMUv3, "Read STE at %#x\n", ste_addr);
1370
1371 doReadConfig(yield, ste_addr, &ste, sizeof(ste), sid, 0);
1372
1373 DPRINTF(SMMUv3, "Got STE at %#x [0]: 0x%016x\n", ste_addr, ste.dw0);
1374 DPRINTF(SMMUv3, " STE at %#x [1]: 0x%016x\n", ste_addr, ste.dw1);
1375 DPRINTF(SMMUv3, " STE at %#x [2]: 0x%016x\n", ste_addr, ste.dw2);
1376 DPRINTF(SMMUv3, " STE at %#x [3]: 0x%016x\n", ste_addr, ste.dw3);
1377 DPRINTF(SMMUv3, " STE at %#x [4]: 0x%016x\n", ste_addr, ste._pad[0]);
1378 DPRINTF(SMMUv3, " STE at %#x [5]: 0x%016x\n", ste_addr, ste._pad[1]);
1379 DPRINTF(SMMUv3, " STE at %#x [6]: 0x%016x\n", ste_addr, ste._pad[2]);
1380 DPRINTF(SMMUv3, " STE at %#x [7]: 0x%016x\n", ste_addr, ste._pad[3]);
1381
1382 if (!ste.dw0.valid)
1383 panic("STE @ %#x not valid\n", ste_addr);
1384
1385 smmu.steFetches++;
1386 }
1387
1388 void
1389 SMMUTranslationProcess::doReadCD(Yield &yield,
1390 ContextDescriptor &cd,
1391 const StreamTableEntry &ste,
1392 uint32_t sid, uint32_t ssid)
1393 {
1394 Addr cd_addr;
1395
1396 if (ste.dw0.s1cdmax == 0) {
1397 cd_addr = ste.dw0.s1ctxptr << ST_CD_ADDR_SHIFT;
1398 } else {
1399 unsigned max_ssid = 1 << ste.dw0.s1cdmax;
1400 if (ssid >= max_ssid)
1401 panic("SSID %#x out of range, max=%#x", ssid, max_ssid);
1402
1403 if (ste.dw0.s1fmt==STAGE1_CFG_2L_4K ||
1404 ste.dw0.s1fmt==STAGE1_CFG_2L_64K)
1405 {
1406 unsigned split = ste.dw0.s1fmt==STAGE1_CFG_2L_4K ? 7 : 11;
1407
1408 uint64_t l2_ptr;
1409 uint64_t l2_addr = (ste.dw0.s1ctxptr << ST_CD_ADDR_SHIFT) +
1410 bits(ssid, 24, split) * sizeof(l2_ptr);
1411
1412 if (context.stage2Enable)
1413 l2_addr = translateStage2(yield, l2_addr, false).addr;
1414
1415 DPRINTF(SMMUv3, "Read L1CD at %#x\n", l2_addr);
1416
1417 doReadConfig(yield, l2_addr, &l2_ptr, sizeof(l2_ptr), sid, ssid);
1418
1419 DPRINTF(SMMUv3, "Got L1CD at %#x: 0x%016x\n", l2_addr, l2_ptr);
1420
1421 cd_addr = l2_ptr + bits(ssid, split-1, 0) * sizeof(cd);
1422
1423 smmu.cdL1Fetches++;
1424 } else if (ste.dw0.s1fmt == STAGE1_CFG_1L) {
1425 cd_addr = (ste.dw0.s1ctxptr << ST_CD_ADDR_SHIFT) + ssid*sizeof(cd);
1426 }
1427 }
1428
1429 if (context.stage2Enable)
1430 cd_addr = translateStage2(yield, cd_addr, false).addr;
1431
1432 DPRINTF(SMMUv3, "Read CD at %#x\n", cd_addr);
1433
1434 doReadConfig(yield, cd_addr, &cd, sizeof(cd), sid, ssid);
1435
1436 DPRINTF(SMMUv3, "Got CD at %#x [0]: 0x%016x\n", cd_addr, cd.dw0);
1437 DPRINTF(SMMUv3, " CD at %#x [1]: 0x%016x\n", cd_addr, cd.dw1);
1438 DPRINTF(SMMUv3, " CD at %#x [2]: 0x%016x\n", cd_addr, cd.dw2);
1439 DPRINTF(SMMUv3, " CD at %#x [3]: 0x%016x\n", cd_addr, cd.mair);
1440 DPRINTF(SMMUv3, " CD at %#x [4]: 0x%016x\n", cd_addr, cd.amair);
1441 DPRINTF(SMMUv3, " CD at %#x [5]: 0x%016x\n", cd_addr, cd._pad[0]);
1442 DPRINTF(SMMUv3, " CD at %#x [6]: 0x%016x\n", cd_addr, cd._pad[1]);
1443 DPRINTF(SMMUv3, " CD at %#x [7]: 0x%016x\n", cd_addr, cd._pad[2]);
1444
1445
1446 if (!cd.dw0.valid)
1447 panic("CD @ %#x not valid\n", cd_addr);
1448
1449 smmu.cdFetches++;
1450 }
1451
1452 void
1453 SMMUTranslationProcess::doReadConfig(Yield &yield, Addr addr,
1454 void *ptr, size_t size,
1455 uint32_t sid, uint32_t ssid)
1456 {
1457 doRead(yield, addr, ptr, size);
1458 }
1459
1460 void
1461 SMMUTranslationProcess::doReadPTE(Yield &yield, Addr va, Addr addr,
1462 void *ptr, unsigned stage,
1463 unsigned level)
1464 {
1465 size_t pte_size = sizeof(PageTableOps::pte_t);
1466
1467 Addr mask = pte_size - 1;
1468 Addr base = addr & ~mask;
1469
1470 doRead(yield, base, ptr, pte_size);
1471 }