arch-arm: Do not use _flushMva for TLBI IPA
[gem5.git] / src / arch / arm / utility.hh
1 /*
2 * Copyright (c) 2010, 2012-2013, 2016-2020 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2003-2005 The Regents of The University of Michigan
15 * Copyright (c) 2007-2008 The Florida State University
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 */
41
42 #ifndef __ARCH_ARM_UTILITY_HH__
43 #define __ARCH_ARM_UTILITY_HH__
44
45 #include "arch/arm/isa_traits.hh"
46 #include "arch/arm/miscregs.hh"
47 #include "arch/arm/types.hh"
48 #include "base/logging.hh"
49 #include "base/trace.hh"
50 #include "base/types.hh"
51 #include "cpu/static_inst.hh"
52 #include "cpu/thread_context.hh"
53
54 class ArmSystem;
55
56 namespace ArmISA {
57
58 inline PCState
59 buildRetPC(const PCState &curPC, const PCState &callPC)
60 {
61 PCState retPC = callPC;
62 retPC.uEnd();
63 return retPC;
64 }
65
66 inline bool
67 testPredicate(uint32_t nz, uint32_t c, uint32_t v, ConditionCode code)
68 {
69 bool n = (nz & 0x2);
70 bool z = (nz & 0x1);
71
72 switch (code)
73 {
74 case COND_EQ: return z;
75 case COND_NE: return !z;
76 case COND_CS: return c;
77 case COND_CC: return !c;
78 case COND_MI: return n;
79 case COND_PL: return !n;
80 case COND_VS: return v;
81 case COND_VC: return !v;
82 case COND_HI: return (c && !z);
83 case COND_LS: return !(c && !z);
84 case COND_GE: return !(n ^ v);
85 case COND_LT: return (n ^ v);
86 case COND_GT: return !(n ^ v || z);
87 case COND_LE: return (n ^ v || z);
88 case COND_AL: return true;
89 case COND_UC: return true;
90 default:
91 panic("Unhandled predicate condition: %d\n", code);
92 }
93 }
94
95 void copyRegs(ThreadContext *src, ThreadContext *dest);
96
97 static inline void
98 copyMiscRegs(ThreadContext *src, ThreadContext *dest)
99 {
100 panic("Copy Misc. Regs Not Implemented Yet\n");
101 }
102
103 /** Send an event (SEV) to a specific PE if there isn't
104 * already a pending event */
105 void sendEvent(ThreadContext *tc);
106
107 static inline bool
108 inUserMode(CPSR cpsr)
109 {
110 return cpsr.mode == MODE_USER || cpsr.mode == MODE_EL0T;
111 }
112
113 static inline bool
114 inUserMode(ThreadContext *tc)
115 {
116 return inUserMode(tc->readMiscRegNoEffect(MISCREG_CPSR));
117 }
118
119 static inline bool
120 inPrivilegedMode(CPSR cpsr)
121 {
122 return !inUserMode(cpsr);
123 }
124
125 static inline bool
126 inPrivilegedMode(ThreadContext *tc)
127 {
128 return !inUserMode(tc);
129 }
130
131 bool isSecure(ThreadContext *tc);
132
133 bool inAArch64(ThreadContext *tc);
134
135 static inline OperatingMode
136 currOpMode(const ThreadContext *tc)
137 {
138 CPSR cpsr = tc->readMiscRegNoEffect(MISCREG_CPSR);
139 return (OperatingMode) (uint8_t) cpsr.mode;
140 }
141
142 static inline ExceptionLevel
143 currEL(const ThreadContext *tc)
144 {
145 return opModeToEL(currOpMode(tc));
146 }
147
148 inline ExceptionLevel
149 currEL(CPSR cpsr)
150 {
151 return opModeToEL((OperatingMode) (uint8_t)cpsr.mode);
152 }
153
154 bool HavePACExt(ThreadContext *tc);
155 bool HaveVirtHostExt(ThreadContext *tc);
156 bool HaveLVA(ThreadContext *tc);
157 bool HaveSecureEL2Ext(ThreadContext *tc);
158 bool IsSecureEL2Enabled(ThreadContext *tc);
159 bool EL2Enabled(ThreadContext *tc);
160
161 /**
162 * This function checks whether selected EL provided as an argument
163 * is using the AArch32 ISA. This information might be unavailable
164 * at the current EL status: it hence returns a pair of boolean values:
165 * a first boolean, true if information is available (known),
166 * and a second one, true if EL is using AArch32, false for AArch64.
167 *
168 * @param tc The thread context.
169 * @param el The target exception level.
170 * @retval known is FALSE for EL0 if the current Exception level
171 * is not EL0 and EL1 is using AArch64, since it cannot
172 * determine the state of EL0; TRUE otherwise.
173 * @retval aarch32 is TRUE if the specified Exception level is using AArch32;
174 * FALSE otherwise.
175 */
176 std::pair<bool, bool>
177 ELUsingAArch32K(ThreadContext *tc, ExceptionLevel el);
178
179 std::pair<bool, bool>
180 ELStateUsingAArch32K(ThreadContext *tc, ExceptionLevel el, bool secure);
181
182 bool
183 ELStateUsingAArch32(ThreadContext *tc, ExceptionLevel el, bool secure);
184
185 bool ELIs32(ThreadContext *tc, ExceptionLevel el);
186
187 bool ELIs64(ThreadContext *tc, ExceptionLevel el);
188
189 /**
190 * Returns true if the current exception level `el` is executing a Host OS or
191 * an application of a Host OS (Armv8.1 Virtualization Host Extensions).
192 */
193 bool ELIsInHost(ThreadContext *tc, ExceptionLevel el);
194
195 ExceptionLevel debugTargetFrom(ThreadContext *tc, bool secure);
196
197 bool isBigEndian64(const ThreadContext *tc);
198
199
200 /**
201 * badMode is checking if the execution mode provided as an argument is
202 * valid and implemented for AArch32
203 *
204 * @param tc ThreadContext
205 * @param mode OperatingMode to check
206 * @return false if mode is valid and implemented, true otherwise
207 */
208 bool badMode32(ThreadContext *tc, OperatingMode mode);
209
210 /**
211 * badMode is checking if the execution mode provided as an argument is
212 * valid and implemented.
213 *
214 * @param tc ThreadContext
215 * @param mode OperatingMode to check
216 * @return false if mode is valid and implemented, true otherwise
217 */
218 bool badMode(ThreadContext *tc, OperatingMode mode);
219
220 static inline uint8_t
221 itState(CPSR psr)
222 {
223 ITSTATE it = 0;
224 it.top6 = psr.it2;
225 it.bottom2 = psr.it1;
226
227 return (uint8_t)it;
228 }
229
230 ExceptionLevel s1TranslationRegime(ThreadContext* tc, ExceptionLevel el);
231
232 /**
233 * Removes the tag from tagged addresses if that mode is enabled.
234 * @param addr The address to be purified.
235 * @param tc The thread context.
236 * @param el The controlled exception level.
237 * @return The purified address.
238 */
239 Addr purifyTaggedAddr(Addr addr, ThreadContext *tc, ExceptionLevel el,
240 TCR tcr, bool isInstr);
241 Addr purifyTaggedAddr(Addr addr, ThreadContext *tc, ExceptionLevel el,
242 bool isInstr);
243 int computeAddrTop(ThreadContext *tc, bool selbit, bool isInstr,
244 TCR tcr, ExceptionLevel el);
245
246 static inline bool
247 inSecureState(SCR scr, CPSR cpsr)
248 {
249 switch ((OperatingMode) (uint8_t) cpsr.mode) {
250 case MODE_MON:
251 case MODE_EL3T:
252 case MODE_EL3H:
253 return true;
254 case MODE_HYP:
255 case MODE_EL2T:
256 case MODE_EL2H:
257 return false;
258 default:
259 return !scr.ns;
260 }
261 }
262
263 bool isSecureBelowEL3(ThreadContext *tc);
264
265 bool longDescFormatInUse(ThreadContext *tc);
266
267 /** This helper function is either returing the value of
268 * MPIDR_EL1 (by calling getMPIDR), or it is issuing a read
269 * to VMPIDR_EL2 (as it happens in virtualized systems) */
270 RegVal readMPIDR(ArmSystem *arm_sys, ThreadContext *tc);
271
272 /** This helper function is returning the value of MPIDR_EL1 */
273 RegVal getMPIDR(ArmSystem *arm_sys, ThreadContext *tc);
274
275 /** Retrieves MPIDR_EL1.{Aff2,Aff1,Aff0} affinity numbers */
276 RegVal getAffinity(ArmSystem *arm_sys, ThreadContext *tc);
277
278 static inline uint32_t
279 mcrMrcIssBuild(bool isRead, uint32_t crm, IntRegIndex rt, uint32_t crn,
280 uint32_t opc1, uint32_t opc2)
281 {
282 return (isRead << 0) |
283 (crm << 1) |
284 (rt << 5) |
285 (crn << 10) |
286 (opc1 << 14) |
287 (opc2 << 17);
288 }
289
290 static inline void
291 mcrMrcIssExtract(uint32_t iss, bool &isRead, uint32_t &crm, IntRegIndex &rt,
292 uint32_t &crn, uint32_t &opc1, uint32_t &opc2)
293 {
294 isRead = (iss >> 0) & 0x1;
295 crm = (iss >> 1) & 0xF;
296 rt = (IntRegIndex) ((iss >> 5) & 0xF);
297 crn = (iss >> 10) & 0xF;
298 opc1 = (iss >> 14) & 0x7;
299 opc2 = (iss >> 17) & 0x7;
300 }
301
302 static inline uint32_t
303 mcrrMrrcIssBuild(bool isRead, uint32_t crm, IntRegIndex rt, IntRegIndex rt2,
304 uint32_t opc1)
305 {
306 return (isRead << 0) |
307 (crm << 1) |
308 (rt << 5) |
309 (rt2 << 10) |
310 (opc1 << 16);
311 }
312
313 static inline uint32_t
314 msrMrs64IssBuild(bool isRead, uint32_t op0, uint32_t op1, uint32_t crn,
315 uint32_t crm, uint32_t op2, IntRegIndex rt)
316 {
317 return isRead |
318 (crm << 1) |
319 (rt << 5) |
320 (crn << 10) |
321 (op1 << 14) |
322 (op2 << 17) |
323 (op0 << 20);
324 }
325
326 Fault
327 mcrMrc15Trap(const MiscRegIndex miscReg, ExtMachInst machInst,
328 ThreadContext *tc, uint32_t imm);
329 bool
330 mcrMrc15TrapToHyp(const MiscRegIndex miscReg, ThreadContext *tc, uint32_t iss,
331 ExceptionClass *ec = nullptr);
332
333 bool
334 mcrMrc14TrapToHyp(const MiscRegIndex miscReg, HCR hcr, CPSR cpsr, SCR scr,
335 HDCR hdcr, HSTR hstr, HCPTR hcptr, uint32_t iss);
336
337 Fault
338 mcrrMrrc15Trap(const MiscRegIndex miscReg, ExtMachInst machInst,
339 ThreadContext *tc, uint32_t imm);
340 bool
341 mcrrMrrc15TrapToHyp(const MiscRegIndex miscReg, ThreadContext *tc,
342 uint32_t iss, ExceptionClass *ec = nullptr);
343
344 Fault
345 AArch64AArch32SystemAccessTrap(const MiscRegIndex miscReg,
346 ExtMachInst machInst, ThreadContext *tc,
347 uint32_t imm, ExceptionClass ec);
348 bool
349 isAArch64AArch32SystemAccessTrapEL1(const MiscRegIndex miscReg,
350 ThreadContext *tc);
351 bool
352 isAArch64AArch32SystemAccessTrapEL2(const MiscRegIndex miscReg,
353 ThreadContext *tc);
354 bool
355 isGenericTimerHypTrap(const MiscRegIndex miscReg, ThreadContext *tc,
356 ExceptionClass *ec);
357 bool condGenericTimerPhysHypTrap(const MiscRegIndex miscReg,
358 ThreadContext *tc);
359 bool
360 isGenericTimerCommonEL0HypTrap(const MiscRegIndex miscReg, ThreadContext *tc,
361 ExceptionClass *ec);
362 bool
363 isGenericTimerPhysHypTrap(const MiscRegIndex miscReg, ThreadContext *tc,
364 ExceptionClass *ec);
365 bool
366 condGenericTimerPhysHypTrap(const MiscRegIndex miscReg, ThreadContext *tc);
367 bool
368 isGenericTimerSystemAccessTrapEL1(const MiscRegIndex miscReg,
369 ThreadContext *tc);
370 bool
371 condGenericTimerSystemAccessTrapEL1(const MiscRegIndex miscReg,
372 ThreadContext *tc);
373 bool
374 isGenericTimerSystemAccessTrapEL2(const MiscRegIndex miscReg,
375 ThreadContext *tc);
376 bool
377 isGenericTimerCommonEL0SystemAccessTrapEL2(const MiscRegIndex miscReg,
378 ThreadContext *tc);
379 bool
380 isGenericTimerPhysEL0SystemAccessTrapEL2(const MiscRegIndex miscReg,
381 ThreadContext *tc);
382 bool
383 isGenericTimerPhysEL1SystemAccessTrapEL2(const MiscRegIndex miscReg,
384 ThreadContext *tc);
385 bool
386 isGenericTimerVirtSystemAccessTrapEL2(const MiscRegIndex miscReg,
387 ThreadContext *tc);
388 bool
389 condGenericTimerCommonEL0SystemAccessTrapEL2(const MiscRegIndex miscReg,
390 ThreadContext *tc);
391 bool
392 condGenericTimerCommonEL1SystemAccessTrapEL2(const MiscRegIndex miscReg,
393 ThreadContext *tc);
394 bool
395 condGenericTimerPhysEL1SystemAccessTrapEL2(const MiscRegIndex miscReg,
396 ThreadContext *tc);
397 bool
398 isGenericTimerSystemAccessTrapEL3(const MiscRegIndex miscReg,
399 ThreadContext *tc);
400
401 bool SPAlignmentCheckEnabled(ThreadContext* tc);
402
403 uint64_t getArgument(ThreadContext *tc, int &number, uint16_t size, bool fp);
404
405 inline void
406 advancePC(PCState &pc, const StaticInstPtr &inst)
407 {
408 inst->advancePC(pc);
409 }
410
411 Addr truncPage(Addr addr);
412 Addr roundPage(Addr addr);
413
414 inline uint64_t
415 getExecutingAsid(ThreadContext *tc)
416 {
417 return tc->readMiscReg(MISCREG_CONTEXTIDR);
418 }
419
420 // Decodes the register index to access based on the fields used in a MSR
421 // or MRS instruction
422 bool
423 decodeMrsMsrBankedReg(uint8_t sysM, bool r, bool &isIntReg, int &regIdx,
424 CPSR cpsr, SCR scr, NSACR nsacr,
425 bool checkSecurity = true);
426
427 // This wrapper function is used to turn the register index into a source
428 // parameter for the instruction. See Operands.isa
429 static inline int
430 decodeMrsMsrBankedIntRegIndex(uint8_t sysM, bool r)
431 {
432 int regIdx;
433 bool isIntReg;
434 bool validReg;
435
436 validReg = decodeMrsMsrBankedReg(sysM, r, isIntReg, regIdx, 0, 0, 0, false);
437 return (validReg && isIntReg) ? regIdx : INTREG_DUMMY;
438 }
439
440 /**
441 * Returns the n. of PA bits corresponding to the specified encoding.
442 */
443 int decodePhysAddrRange64(uint8_t pa_enc);
444
445 /**
446 * Returns the encoding corresponding to the specified n. of PA bits.
447 */
448 uint8_t encodePhysAddrRange64(int pa_size);
449
450 inline ByteOrder byteOrder(const ThreadContext *tc)
451 {
452 return isBigEndian64(tc) ? ByteOrder::big : ByteOrder::little;
453 };
454
455 bool isUnpriviledgeAccess(ThreadContext * tc);
456
457 }
458 #endif