arch-arm,cpu: Add initial support for Arm SVE
[gem5.git] / src / arch / arm / utility.hh
1 /*
2 * Copyright (c) 2010, 2012-2013, 2016-2018 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2003-2005 The Regents of The University of Michigan
15 * Copyright (c) 2007-2008 The Florida State University
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Korey Sewell
42 * Stephen Hines
43 */
44
45 #ifndef __ARCH_ARM_UTILITY_HH__
46 #define __ARCH_ARM_UTILITY_HH__
47
48 #include "arch/arm/isa_traits.hh"
49 #include "arch/arm/miscregs.hh"
50 #include "arch/arm/types.hh"
51 #include "base/logging.hh"
52 #include "base/trace.hh"
53 #include "base/types.hh"
54 #include "cpu/static_inst.hh"
55 #include "cpu/thread_context.hh"
56
57 class ArmSystem;
58
59 namespace ArmISA {
60
61 inline PCState
62 buildRetPC(const PCState &curPC, const PCState &callPC)
63 {
64 PCState retPC = callPC;
65 retPC.uEnd();
66 return retPC;
67 }
68
69 inline bool
70 testPredicate(uint32_t nz, uint32_t c, uint32_t v, ConditionCode code)
71 {
72 bool n = (nz & 0x2);
73 bool z = (nz & 0x1);
74
75 switch (code)
76 {
77 case COND_EQ: return z;
78 case COND_NE: return !z;
79 case COND_CS: return c;
80 case COND_CC: return !c;
81 case COND_MI: return n;
82 case COND_PL: return !n;
83 case COND_VS: return v;
84 case COND_VC: return !v;
85 case COND_HI: return (c && !z);
86 case COND_LS: return !(c && !z);
87 case COND_GE: return !(n ^ v);
88 case COND_LT: return (n ^ v);
89 case COND_GT: return !(n ^ v || z);
90 case COND_LE: return (n ^ v || z);
91 case COND_AL: return true;
92 case COND_UC: return true;
93 default:
94 panic("Unhandled predicate condition: %d\n", code);
95 }
96 }
97
98 /**
99 * Function to insure ISA semantics about 0 registers.
100 * @param tc The thread context.
101 */
102 template <class TC>
103 void zeroRegisters(TC *tc);
104
105 inline void startupCPU(ThreadContext *tc, int cpuId)
106 {
107 tc->activate();
108 }
109
110 void copyRegs(ThreadContext *src, ThreadContext *dest);
111
112 static inline void
113 copyMiscRegs(ThreadContext *src, ThreadContext *dest)
114 {
115 panic("Copy Misc. Regs Not Implemented Yet\n");
116 }
117
118 void initCPU(ThreadContext *tc, int cpuId);
119
120 static inline bool
121 inUserMode(CPSR cpsr)
122 {
123 return cpsr.mode == MODE_USER || cpsr.mode == MODE_EL0T;
124 }
125
126 static inline bool
127 inUserMode(ThreadContext *tc)
128 {
129 return inUserMode(tc->readMiscRegNoEffect(MISCREG_CPSR));
130 }
131
132 static inline bool
133 inPrivilegedMode(CPSR cpsr)
134 {
135 return !inUserMode(cpsr);
136 }
137
138 static inline bool
139 inPrivilegedMode(ThreadContext *tc)
140 {
141 return !inUserMode(tc);
142 }
143
144 bool inAArch64(ThreadContext *tc);
145
146 static inline OperatingMode
147 currOpMode(ThreadContext *tc)
148 {
149 CPSR cpsr = tc->readMiscReg(MISCREG_CPSR);
150 return (OperatingMode) (uint8_t) cpsr.mode;
151 }
152
153 static inline ExceptionLevel
154 currEL(ThreadContext *tc)
155 {
156 CPSR cpsr = tc->readMiscReg(MISCREG_CPSR);
157 return (ExceptionLevel) (uint8_t) cpsr.el;
158 }
159
160 /**
161 * This function checks whether selected EL provided as an argument
162 * is using the AArch32 ISA. This information might be unavailable
163 * at the current EL status: it hence returns a pair of boolean values:
164 * a first boolean, true if information is available (known),
165 * and a second one, true if EL is using AArch32, false for AArch64.
166 *
167 * @param tc The thread context.
168 * @param el The target exception level.
169 * @retval known is FALSE for EL0 if the current Exception level
170 * is not EL0 and EL1 is using AArch64, since it cannot
171 * determine the state of EL0; TRUE otherwise.
172 * @retval aarch32 is TRUE if the specified Exception level is using AArch32;
173 * FALSE otherwise.
174 */
175 std::pair<bool, bool>
176 ELUsingAArch32K(ThreadContext *tc, ExceptionLevel el);
177
178 bool ELIs32(ThreadContext *tc, ExceptionLevel el);
179
180 bool ELIs64(ThreadContext *tc, ExceptionLevel el);
181
182 /**
183 * Returns true if the current exception level `el` is executing a Host OS or
184 * an application of a Host OS (Armv8.1 Virtualization Host Extensions).
185 */
186 bool ELIsInHost(ThreadContext *tc, ExceptionLevel el);
187
188 bool isBigEndian64(ThreadContext *tc);
189
190 /**
191 * badMode is checking if the execution mode provided as an argument is
192 * valid and implemented for AArch32
193 *
194 * @param tc ThreadContext
195 * @param mode OperatingMode to check
196 * @return false if mode is valid and implemented, true otherwise
197 */
198 bool badMode32(ThreadContext *tc, OperatingMode mode);
199
200 /**
201 * badMode is checking if the execution mode provided as an argument is
202 * valid and implemented.
203 *
204 * @param tc ThreadContext
205 * @param mode OperatingMode to check
206 * @return false if mode is valid and implemented, true otherwise
207 */
208 bool badMode(ThreadContext *tc, OperatingMode mode);
209
210 static inline uint8_t
211 itState(CPSR psr)
212 {
213 ITSTATE it = 0;
214 it.top6 = psr.it2;
215 it.bottom2 = psr.it1;
216
217 return (uint8_t)it;
218 }
219
220 /**
221 * Removes the tag from tagged addresses if that mode is enabled.
222 * @param addr The address to be purified.
223 * @param tc The thread context.
224 * @param el The controlled exception level.
225 * @return The purified address.
226 */
227 Addr purifyTaggedAddr(Addr addr, ThreadContext *tc, ExceptionLevel el,
228 TTBCR tcr);
229 Addr purifyTaggedAddr(Addr addr, ThreadContext *tc, ExceptionLevel el);
230
231 static inline bool
232 inSecureState(SCR scr, CPSR cpsr)
233 {
234 switch ((OperatingMode) (uint8_t) cpsr.mode) {
235 case MODE_MON:
236 case MODE_EL3T:
237 case MODE_EL3H:
238 return true;
239 case MODE_HYP:
240 case MODE_EL2T:
241 case MODE_EL2H:
242 return false;
243 default:
244 return !scr.ns;
245 }
246 }
247
248 bool inSecureState(ThreadContext *tc);
249
250 /**
251 * Return TRUE if an Exception level below EL3 is in Secure state.
252 * Differs from inSecureState in that it ignores the current EL
253 * or Mode in considering security state.
254 */
255 inline bool isSecureBelowEL3(ThreadContext *tc);
256
257 bool longDescFormatInUse(ThreadContext *tc);
258
259 /** This helper function is either returing the value of
260 * MPIDR_EL1 (by calling getMPIDR), or it is issuing a read
261 * to VMPIDR_EL2 (as it happens in virtualized systems) */
262 RegVal readMPIDR(ArmSystem *arm_sys, ThreadContext *tc);
263
264 /** This helper function is returing the value of MPIDR_EL1 */
265 RegVal getMPIDR(ArmSystem *arm_sys, ThreadContext *tc);
266
267 static inline uint32_t
268 mcrMrcIssBuild(bool isRead, uint32_t crm, IntRegIndex rt, uint32_t crn,
269 uint32_t opc1, uint32_t opc2)
270 {
271 return (isRead << 0) |
272 (crm << 1) |
273 (rt << 5) |
274 (crn << 10) |
275 (opc1 << 14) |
276 (opc2 << 17);
277 }
278
279 static inline void
280 mcrMrcIssExtract(uint32_t iss, bool &isRead, uint32_t &crm, IntRegIndex &rt,
281 uint32_t &crn, uint32_t &opc1, uint32_t &opc2)
282 {
283 isRead = (iss >> 0) & 0x1;
284 crm = (iss >> 1) & 0xF;
285 rt = (IntRegIndex) ((iss >> 5) & 0xF);
286 crn = (iss >> 10) & 0xF;
287 opc1 = (iss >> 14) & 0x7;
288 opc2 = (iss >> 17) & 0x7;
289 }
290
291 static inline uint32_t
292 mcrrMrrcIssBuild(bool isRead, uint32_t crm, IntRegIndex rt, IntRegIndex rt2,
293 uint32_t opc1)
294 {
295 return (isRead << 0) |
296 (crm << 1) |
297 (rt << 5) |
298 (rt2 << 10) |
299 (opc1 << 16);
300 }
301
302 static inline uint32_t
303 msrMrs64IssBuild(bool isRead, uint32_t op0, uint32_t op1, uint32_t crn,
304 uint32_t crm, uint32_t op2, IntRegIndex rt)
305 {
306 return isRead |
307 (crm << 1) |
308 (rt << 5) |
309 (crn << 10) |
310 (op1 << 14) |
311 (op2 << 17) |
312 (op0 << 20);
313 }
314
315 bool
316 mcrMrc15TrapToHyp(const MiscRegIndex miscReg, HCR hcr, CPSR cpsr, SCR scr,
317 HDCR hdcr, HSTR hstr, HCPTR hcptr, uint32_t iss);
318 bool
319 mcrMrc14TrapToHyp(const MiscRegIndex miscReg, HCR hcr, CPSR cpsr, SCR scr,
320 HDCR hdcr, HSTR hstr, HCPTR hcptr, uint32_t iss);
321 bool
322 mcrrMrrc15TrapToHyp(const MiscRegIndex miscReg, CPSR cpsr, SCR scr, HSTR hstr,
323 HCR hcr, uint32_t iss);
324
325 bool SPAlignmentCheckEnabled(ThreadContext* tc);
326
327 uint64_t getArgument(ThreadContext *tc, int &number, uint16_t size, bool fp);
328
329 void skipFunction(ThreadContext *tc);
330
331 inline void
332 advancePC(PCState &pc, const StaticInstPtr &inst)
333 {
334 inst->advancePC(pc);
335 }
336
337 Addr truncPage(Addr addr);
338 Addr roundPage(Addr addr);
339
340 inline uint64_t
341 getExecutingAsid(ThreadContext *tc)
342 {
343 return tc->readMiscReg(MISCREG_CONTEXTIDR);
344 }
345
346 // Decodes the register index to access based on the fields used in a MSR
347 // or MRS instruction
348 bool
349 decodeMrsMsrBankedReg(uint8_t sysM, bool r, bool &isIntReg, int &regIdx,
350 CPSR cpsr, SCR scr, NSACR nsacr,
351 bool checkSecurity = true);
352
353 // This wrapper function is used to turn the register index into a source
354 // parameter for the instruction. See Operands.isa
355 static inline int
356 decodeMrsMsrBankedIntRegIndex(uint8_t sysM, bool r)
357 {
358 int regIdx;
359 bool isIntReg;
360 bool validReg;
361
362 validReg = decodeMrsMsrBankedReg(sysM, r, isIntReg, regIdx, 0, 0, 0, false);
363 return (validReg && isIntReg) ? regIdx : INTREG_DUMMY;
364 }
365
366 /**
367 * Returns the n. of PA bits corresponding to the specified encoding.
368 */
369 int decodePhysAddrRange64(uint8_t pa_enc);
370
371 /**
372 * Returns the encoding corresponding to the specified n. of PA bits.
373 */
374 uint8_t encodePhysAddrRange64(int pa_size);
375
376 inline ByteOrder byteOrder(ThreadContext *tc)
377 {
378 return isBigEndian64(tc) ? BigEndianByteOrder : LittleEndianByteOrder;
379 };
380
381 }
382
383 #endif