kvm,arm: Update the KVM ARM v8 CPU to use vector regs.
[gem5.git] / src / arch / arm / utility.hh
1 /*
2 * Copyright (c) 2010, 2012-2013, 2016-2019 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2003-2005 The Regents of The University of Michigan
15 * Copyright (c) 2007-2008 The Florida State University
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Korey Sewell
42 * Stephen Hines
43 */
44
45 #ifndef __ARCH_ARM_UTILITY_HH__
46 #define __ARCH_ARM_UTILITY_HH__
47
48 #include "arch/arm/isa_traits.hh"
49 #include "arch/arm/miscregs.hh"
50 #include "arch/arm/types.hh"
51 #include "base/logging.hh"
52 #include "base/trace.hh"
53 #include "base/types.hh"
54 #include "cpu/static_inst.hh"
55 #include "cpu/thread_context.hh"
56
57 class ArmSystem;
58
59 namespace ArmISA {
60
61 inline PCState
62 buildRetPC(const PCState &curPC, const PCState &callPC)
63 {
64 PCState retPC = callPC;
65 retPC.uEnd();
66 return retPC;
67 }
68
69 inline bool
70 testPredicate(uint32_t nz, uint32_t c, uint32_t v, ConditionCode code)
71 {
72 bool n = (nz & 0x2);
73 bool z = (nz & 0x1);
74
75 switch (code)
76 {
77 case COND_EQ: return z;
78 case COND_NE: return !z;
79 case COND_CS: return c;
80 case COND_CC: return !c;
81 case COND_MI: return n;
82 case COND_PL: return !n;
83 case COND_VS: return v;
84 case COND_VC: return !v;
85 case COND_HI: return (c && !z);
86 case COND_LS: return !(c && !z);
87 case COND_GE: return !(n ^ v);
88 case COND_LT: return (n ^ v);
89 case COND_GT: return !(n ^ v || z);
90 case COND_LE: return (n ^ v || z);
91 case COND_AL: return true;
92 case COND_UC: return true;
93 default:
94 panic("Unhandled predicate condition: %d\n", code);
95 }
96 }
97
98 /**
99 * Function to insure ISA semantics about 0 registers.
100 * @param tc The thread context.
101 */
102 template <class TC>
103 void zeroRegisters(TC *tc);
104
105 inline void startupCPU(ThreadContext *tc, int cpuId)
106 {
107 tc->activate();
108 }
109
110 void copyRegs(ThreadContext *src, ThreadContext *dest);
111
112 static inline void
113 copyMiscRegs(ThreadContext *src, ThreadContext *dest)
114 {
115 panic("Copy Misc. Regs Not Implemented Yet\n");
116 }
117
118 void initCPU(ThreadContext *tc, int cpuId);
119
120 /** Send an event (SEV) to a specific PE if there isn't
121 * already a pending event */
122 void sendEvent(ThreadContext *tc);
123
124 static inline bool
125 inUserMode(CPSR cpsr)
126 {
127 return cpsr.mode == MODE_USER || cpsr.mode == MODE_EL0T;
128 }
129
130 static inline bool
131 inUserMode(ThreadContext *tc)
132 {
133 return inUserMode(tc->readMiscRegNoEffect(MISCREG_CPSR));
134 }
135
136 static inline bool
137 inPrivilegedMode(CPSR cpsr)
138 {
139 return !inUserMode(cpsr);
140 }
141
142 static inline bool
143 inPrivilegedMode(ThreadContext *tc)
144 {
145 return !inUserMode(tc);
146 }
147
148 bool inAArch64(ThreadContext *tc);
149
150 static inline OperatingMode
151 currOpMode(ThreadContext *tc)
152 {
153 CPSR cpsr = tc->readMiscReg(MISCREG_CPSR);
154 return (OperatingMode) (uint8_t) cpsr.mode;
155 }
156
157 static inline ExceptionLevel
158 currEL(ThreadContext *tc)
159 {
160 return opModeToEL(currOpMode(tc));
161 }
162
163 inline ExceptionLevel
164 currEL(CPSR cpsr)
165 {
166 return opModeToEL((OperatingMode) (uint8_t)cpsr.mode);
167 }
168
169 /**
170 * This function checks whether selected EL provided as an argument
171 * is using the AArch32 ISA. This information might be unavailable
172 * at the current EL status: it hence returns a pair of boolean values:
173 * a first boolean, true if information is available (known),
174 * and a second one, true if EL is using AArch32, false for AArch64.
175 *
176 * @param tc The thread context.
177 * @param el The target exception level.
178 * @retval known is FALSE for EL0 if the current Exception level
179 * is not EL0 and EL1 is using AArch64, since it cannot
180 * determine the state of EL0; TRUE otherwise.
181 * @retval aarch32 is TRUE if the specified Exception level is using AArch32;
182 * FALSE otherwise.
183 */
184 std::pair<bool, bool>
185 ELUsingAArch32K(ThreadContext *tc, ExceptionLevel el);
186
187 bool ELIs32(ThreadContext *tc, ExceptionLevel el);
188
189 bool ELIs64(ThreadContext *tc, ExceptionLevel el);
190
191 /**
192 * Returns true if the current exception level `el` is executing a Host OS or
193 * an application of a Host OS (Armv8.1 Virtualization Host Extensions).
194 */
195 bool ELIsInHost(ThreadContext *tc, ExceptionLevel el);
196
197 bool isBigEndian64(ThreadContext *tc);
198
199 /**
200 * badMode is checking if the execution mode provided as an argument is
201 * valid and implemented for AArch32
202 *
203 * @param tc ThreadContext
204 * @param mode OperatingMode to check
205 * @return false if mode is valid and implemented, true otherwise
206 */
207 bool badMode32(ThreadContext *tc, OperatingMode mode);
208
209 /**
210 * badMode is checking if the execution mode provided as an argument is
211 * valid and implemented.
212 *
213 * @param tc ThreadContext
214 * @param mode OperatingMode to check
215 * @return false if mode is valid and implemented, true otherwise
216 */
217 bool badMode(ThreadContext *tc, OperatingMode mode);
218
219 static inline uint8_t
220 itState(CPSR psr)
221 {
222 ITSTATE it = 0;
223 it.top6 = psr.it2;
224 it.bottom2 = psr.it1;
225
226 return (uint8_t)it;
227 }
228
229 /**
230 * Removes the tag from tagged addresses if that mode is enabled.
231 * @param addr The address to be purified.
232 * @param tc The thread context.
233 * @param el The controlled exception level.
234 * @return The purified address.
235 */
236 Addr purifyTaggedAddr(Addr addr, ThreadContext *tc, ExceptionLevel el,
237 TTBCR tcr);
238 Addr purifyTaggedAddr(Addr addr, ThreadContext *tc, ExceptionLevel el);
239
240 static inline bool
241 inSecureState(SCR scr, CPSR cpsr)
242 {
243 switch ((OperatingMode) (uint8_t) cpsr.mode) {
244 case MODE_MON:
245 case MODE_EL3T:
246 case MODE_EL3H:
247 return true;
248 case MODE_HYP:
249 case MODE_EL2T:
250 case MODE_EL2H:
251 return false;
252 default:
253 return !scr.ns;
254 }
255 }
256
257 bool inSecureState(ThreadContext *tc);
258
259 /**
260 * Return TRUE if an Exception level below EL3 is in Secure state.
261 * Differs from inSecureState in that it ignores the current EL
262 * or Mode in considering security state.
263 */
264 inline bool isSecureBelowEL3(ThreadContext *tc);
265
266 bool longDescFormatInUse(ThreadContext *tc);
267
268 /** This helper function is either returing the value of
269 * MPIDR_EL1 (by calling getMPIDR), or it is issuing a read
270 * to VMPIDR_EL2 (as it happens in virtualized systems) */
271 RegVal readMPIDR(ArmSystem *arm_sys, ThreadContext *tc);
272
273 /** This helper function is returing the value of MPIDR_EL1 */
274 RegVal getMPIDR(ArmSystem *arm_sys, ThreadContext *tc);
275
276 static inline uint32_t
277 mcrMrcIssBuild(bool isRead, uint32_t crm, IntRegIndex rt, uint32_t crn,
278 uint32_t opc1, uint32_t opc2)
279 {
280 return (isRead << 0) |
281 (crm << 1) |
282 (rt << 5) |
283 (crn << 10) |
284 (opc1 << 14) |
285 (opc2 << 17);
286 }
287
288 static inline void
289 mcrMrcIssExtract(uint32_t iss, bool &isRead, uint32_t &crm, IntRegIndex &rt,
290 uint32_t &crn, uint32_t &opc1, uint32_t &opc2)
291 {
292 isRead = (iss >> 0) & 0x1;
293 crm = (iss >> 1) & 0xF;
294 rt = (IntRegIndex) ((iss >> 5) & 0xF);
295 crn = (iss >> 10) & 0xF;
296 opc1 = (iss >> 14) & 0x7;
297 opc2 = (iss >> 17) & 0x7;
298 }
299
300 static inline uint32_t
301 mcrrMrrcIssBuild(bool isRead, uint32_t crm, IntRegIndex rt, IntRegIndex rt2,
302 uint32_t opc1)
303 {
304 return (isRead << 0) |
305 (crm << 1) |
306 (rt << 5) |
307 (rt2 << 10) |
308 (opc1 << 16);
309 }
310
311 static inline uint32_t
312 msrMrs64IssBuild(bool isRead, uint32_t op0, uint32_t op1, uint32_t crn,
313 uint32_t crm, uint32_t op2, IntRegIndex rt)
314 {
315 return isRead |
316 (crm << 1) |
317 (rt << 5) |
318 (crn << 10) |
319 (op1 << 14) |
320 (op2 << 17) |
321 (op0 << 20);
322 }
323
324 bool
325 mcrMrc15TrapToHyp(const MiscRegIndex miscReg, ThreadContext *tc, uint32_t iss);
326
327 bool
328 mcrMrc14TrapToHyp(const MiscRegIndex miscReg, HCR hcr, CPSR cpsr, SCR scr,
329 HDCR hdcr, HSTR hstr, HCPTR hcptr, uint32_t iss);
330 bool
331 mcrrMrrc15TrapToHyp(const MiscRegIndex miscReg, CPSR cpsr, SCR scr, HSTR hstr,
332 HCR hcr, uint32_t iss);
333
334 bool SPAlignmentCheckEnabled(ThreadContext* tc);
335
336 uint64_t getArgument(ThreadContext *tc, int &number, uint16_t size, bool fp);
337
338 void skipFunction(ThreadContext *tc);
339
340 inline void
341 advancePC(PCState &pc, const StaticInstPtr &inst)
342 {
343 inst->advancePC(pc);
344 }
345
346 Addr truncPage(Addr addr);
347 Addr roundPage(Addr addr);
348
349 inline uint64_t
350 getExecutingAsid(ThreadContext *tc)
351 {
352 return tc->readMiscReg(MISCREG_CONTEXTIDR);
353 }
354
355 // Decodes the register index to access based on the fields used in a MSR
356 // or MRS instruction
357 bool
358 decodeMrsMsrBankedReg(uint8_t sysM, bool r, bool &isIntReg, int &regIdx,
359 CPSR cpsr, SCR scr, NSACR nsacr,
360 bool checkSecurity = true);
361
362 // This wrapper function is used to turn the register index into a source
363 // parameter for the instruction. See Operands.isa
364 static inline int
365 decodeMrsMsrBankedIntRegIndex(uint8_t sysM, bool r)
366 {
367 int regIdx;
368 bool isIntReg;
369 bool validReg;
370
371 validReg = decodeMrsMsrBankedReg(sysM, r, isIntReg, regIdx, 0, 0, 0, false);
372 return (validReg && isIntReg) ? regIdx : INTREG_DUMMY;
373 }
374
375 /**
376 * Returns the n. of PA bits corresponding to the specified encoding.
377 */
378 int decodePhysAddrRange64(uint8_t pa_enc);
379
380 /**
381 * Returns the encoding corresponding to the specified n. of PA bits.
382 */
383 uint8_t encodePhysAddrRange64(int pa_size);
384
385 inline ByteOrder byteOrder(ThreadContext *tc)
386 {
387 return isBigEndian64(tc) ? BigEndianByteOrder : LittleEndianByteOrder;
388 };
389
390 }
391
392 #endif