8610f996fd0f5e3bac9a8c457170b91d7bc30aeb
[gem5.git] / src / arch / arm / insts / static_inst.hh
1 /*
2 * Copyright (c) 2010-2013,2016-2018 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2007-2008 The Florida State University
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 */
40
41 #ifndef __ARCH_ARM_INSTS_STATICINST_HH__
42 #define __ARCH_ARM_INSTS_STATICINST_HH__
43
44 #include <memory>
45
46 #include "arch/arm/faults.hh"
47 #include "arch/arm/utility.hh"
48 #include "arch/arm/isa.hh"
49 #include "arch/arm/self_debug.hh"
50 #include "arch/arm/system.hh"
51 #include "base/trace.hh"
52 #include "cpu/exec_context.hh"
53 #include "cpu/static_inst.hh"
54 #include "sim/byteswap.hh"
55 #include "sim/full_system.hh"
56
57 namespace ArmISA
58 {
59
60 class ArmStaticInst : public StaticInst
61 {
62 protected:
63 bool aarch64;
64 uint8_t intWidth;
65
66 int32_t shift_rm_imm(uint32_t base, uint32_t shamt,
67 uint32_t type, uint32_t cfval) const;
68 int32_t shift_rm_rs(uint32_t base, uint32_t shamt,
69 uint32_t type, uint32_t cfval) const;
70
71 bool shift_carry_imm(uint32_t base, uint32_t shamt,
72 uint32_t type, uint32_t cfval) const;
73 bool shift_carry_rs(uint32_t base, uint32_t shamt,
74 uint32_t type, uint32_t cfval) const;
75
76 int64_t shiftReg64(uint64_t base, uint64_t shiftAmt,
77 ArmShiftType type, uint8_t width) const;
78 int64_t extendReg64(uint64_t base, ArmExtendType type,
79 uint64_t shiftAmt, uint8_t width) const;
80
81 template<int width>
82 static inline bool
83 saturateOp(int32_t &res, int64_t op1, int64_t op2, bool sub=false)
84 {
85 int64_t midRes = sub ? (op1 - op2) : (op1 + op2);
86 if (bits(midRes, width) != bits(midRes, width - 1)) {
87 if (midRes > 0)
88 res = (LL(1) << (width - 1)) - 1;
89 else
90 res = -(LL(1) << (width - 1));
91 return true;
92 } else {
93 res = midRes;
94 return false;
95 }
96 }
97
98 static inline bool
99 satInt(int32_t &res, int64_t op, int width)
100 {
101 width--;
102 if (op >= (LL(1) << width)) {
103 res = (LL(1) << width) - 1;
104 return true;
105 } else if (op < -(LL(1) << width)) {
106 res = -(LL(1) << width);
107 return true;
108 } else {
109 res = op;
110 return false;
111 }
112 }
113
114 template<int width>
115 static inline bool
116 uSaturateOp(uint32_t &res, int64_t op1, int64_t op2, bool sub=false)
117 {
118 int64_t midRes = sub ? (op1 - op2) : (op1 + op2);
119 if (midRes >= (LL(1) << width)) {
120 res = (LL(1) << width) - 1;
121 return true;
122 } else if (midRes < 0) {
123 res = 0;
124 return true;
125 } else {
126 res = midRes;
127 return false;
128 }
129 }
130
131 static inline bool
132 uSatInt(int32_t &res, int64_t op, int width)
133 {
134 if (op >= (LL(1) << width)) {
135 res = (LL(1) << width) - 1;
136 return true;
137 } else if (op < 0) {
138 res = 0;
139 return true;
140 } else {
141 res = op;
142 return false;
143 }
144 }
145
146 // Constructor
147 ArmStaticInst(const char *mnem, ExtMachInst _machInst,
148 OpClass __opClass)
149 : StaticInst(mnem, _machInst, __opClass)
150 {
151 aarch64 = machInst.aarch64;
152 if (bits(machInst, 28, 24) == 0x10)
153 intWidth = 64; // Force 64-bit width for ADR/ADRP
154 else
155 intWidth = (aarch64 && bits(machInst, 31)) ? 64 : 32;
156 }
157
158 /// Print a register name for disassembly given the unique
159 /// dependence tag number (FP or int).
160 void printIntReg(std::ostream &os, RegIndex reg_idx,
161 uint8_t opWidth = 0) const;
162 void printFloatReg(std::ostream &os, RegIndex reg_idx) const;
163 void printVecReg(std::ostream &os, RegIndex reg_idx,
164 bool isSveVecReg = false) const;
165 void printVecPredReg(std::ostream &os, RegIndex reg_idx) const;
166 void printCCReg(std::ostream &os, RegIndex reg_idx) const;
167 void printMiscReg(std::ostream &os, RegIndex reg_idx) const;
168 void printMnemonic(std::ostream &os,
169 const std::string &suffix = "",
170 bool withPred = true,
171 bool withCond64 = false,
172 ConditionCode cond64 = COND_UC) const;
173 void printTarget(std::ostream &os, Addr target,
174 const Loader::SymbolTable *symtab) const;
175 void printCondition(std::ostream &os, unsigned code,
176 bool noImplicit=false) const;
177 void printMemSymbol(std::ostream &os, const Loader::SymbolTable *symtab,
178 const std::string &prefix, const Addr addr,
179 const std::string &suffix) const;
180 void printShiftOperand(std::ostream &os, IntRegIndex rm,
181 bool immShift, uint32_t shiftAmt,
182 IntRegIndex rs, ArmShiftType type) const;
183 void printExtendOperand(bool firstOperand, std::ostream &os,
184 IntRegIndex rm, ArmExtendType type,
185 int64_t shiftAmt) const;
186 void printPFflags(std::ostream &os, int flag) const;
187
188 void printDataInst(std::ostream &os, bool withImm) const;
189 void printDataInst(std::ostream &os, bool withImm, bool immShift, bool s,
190 IntRegIndex rd, IntRegIndex rn, IntRegIndex rm,
191 IntRegIndex rs, uint32_t shiftAmt, ArmShiftType type,
192 uint64_t imm) const;
193
194 void
195 advancePC(PCState &pcState) const override
196 {
197 pcState.advance();
198 }
199
200 std::string generateDisassembly(
201 Addr pc, const Loader::SymbolTable *symtab) const override;
202
203 static void
204 activateBreakpoint(ThreadContext *tc)
205 {
206 auto *isa = static_cast<ArmISA::ISA *>(tc->getIsaPtr());
207 SelfDebug * sd = isa->getSelfDebug();
208 sd->activateDebug();
209 }
210
211 static inline uint32_t
212 cpsrWriteByInstr(CPSR cpsr, uint32_t val, SCR scr, NSACR nsacr,
213 uint8_t byteMask, bool affectState, bool nmfi, ThreadContext *tc)
214 {
215 bool privileged = (cpsr.mode != MODE_USER);
216 bool haveVirt = ArmSystem::haveVirtualization(tc);
217 bool haveSecurity = ArmSystem::haveSecurity(tc);
218 bool isSecure = inSecureState(scr, cpsr) || !haveSecurity;
219
220 uint32_t bitMask = 0;
221
222 if (affectState && byteMask==0xF){
223 activateBreakpoint(tc);
224 }
225 if (bits(byteMask, 3)) {
226 unsigned lowIdx = affectState ? 24 : 27;
227 bitMask = bitMask | mask(31, lowIdx);
228 }
229 if (bits(byteMask, 2)) {
230 bitMask = bitMask | mask(19, 16);
231 }
232 if (bits(byteMask, 1)) {
233 unsigned highIdx = affectState ? 15 : 9;
234 unsigned lowIdx = (privileged && (isSecure || scr.aw || haveVirt))
235 ? 8 : 9;
236 bitMask = bitMask | mask(highIdx, lowIdx);
237 }
238 if (bits(byteMask, 0)) {
239 if (privileged) {
240 bitMask |= 1 << 7;
241 if ( (!nmfi || !((val >> 6) & 0x1)) &&
242 (isSecure || scr.fw || haveVirt) ) {
243 bitMask |= 1 << 6;
244 }
245 // Now check the new mode is allowed
246 OperatingMode newMode = (OperatingMode) (val & mask(5));
247 OperatingMode oldMode = (OperatingMode)(uint32_t)cpsr.mode;
248 if (!badMode(tc, newMode)) {
249 bool validModeChange = true;
250 // Check for attempts to enter modes only permitted in
251 // Secure state from Non-secure state. These are Monitor
252 // mode ('10110'), and FIQ mode ('10001') if the Security
253 // Extensions have reserved it.
254 if (!isSecure && newMode == MODE_MON)
255 validModeChange = false;
256 if (!isSecure && newMode == MODE_FIQ && nsacr.rfr == '1')
257 validModeChange = false;
258 // There is no Hyp mode ('11010') in Secure state, so that
259 // is UNPREDICTABLE
260 if (scr.ns == 0 && newMode == MODE_HYP)
261 validModeChange = false;
262 // Cannot move into Hyp mode directly from a Non-secure
263 // PL1 mode
264 if (!isSecure && oldMode != MODE_HYP && newMode == MODE_HYP)
265 validModeChange = false;
266 // Cannot move out of Hyp mode with this function except
267 // on an exception return
268 if (oldMode == MODE_HYP && newMode != MODE_HYP && !affectState)
269 validModeChange = false;
270 // Must not change to 64 bit when running in 32 bit mode
271 if (!opModeIs64(oldMode) && opModeIs64(newMode))
272 validModeChange = false;
273
274 // If we passed all of the above then set the bit mask to
275 // copy the mode accross
276 if (validModeChange) {
277 bitMask = bitMask | mask(5);
278 } else {
279 warn_once("Illegal change to CPSR mode attempted\n");
280 }
281 } else {
282 warn_once("Ignoring write of bad mode to CPSR.\n");
283 }
284 }
285 if (affectState)
286 bitMask = bitMask | (1 << 5);
287 }
288
289 return ((uint32_t)cpsr & ~bitMask) | (val & bitMask);
290 }
291
292 static inline uint32_t
293 spsrWriteByInstr(uint32_t spsr, uint32_t val,
294 uint8_t byteMask, bool affectState)
295 {
296 uint32_t bitMask = 0;
297
298 if (bits(byteMask, 3))
299 bitMask = bitMask | mask(31, 24);
300 if (bits(byteMask, 2))
301 bitMask = bitMask | mask(19, 16);
302 if (bits(byteMask, 1))
303 bitMask = bitMask | mask(15, 8);
304 if (bits(byteMask, 0))
305 bitMask = bitMask | mask(7, 0);
306
307 return ((spsr & ~bitMask) | (val & bitMask));
308 }
309
310 static inline Addr
311 readPC(ExecContext *xc)
312 {
313 return xc->pcState().instPC();
314 }
315
316 static inline void
317 setNextPC(ExecContext *xc, Addr val)
318 {
319 PCState pc = xc->pcState();
320 pc.instNPC(val);
321 xc->pcState(pc);
322 }
323
324 template<class T>
325 static inline T
326 cSwap(T val, bool big)
327 {
328 if (big) {
329 return letobe(val);
330 } else {
331 return val;
332 }
333 }
334
335 template<class T, class E>
336 static inline T
337 cSwap(T val, bool big)
338 {
339 const unsigned count = sizeof(T) / sizeof(E);
340 union {
341 T tVal;
342 E eVals[count];
343 } conv;
344 conv.tVal = htole(val);
345 if (big) {
346 for (unsigned i = 0; i < count; i++) {
347 conv.eVals[i] = letobe(conv.eVals[i]);
348 }
349 } else {
350 for (unsigned i = 0; i < count; i++) {
351 conv.eVals[i] = conv.eVals[i];
352 }
353 }
354 return letoh(conv.tVal);
355 }
356
357 // Perform an interworking branch.
358 static inline void
359 setIWNextPC(ExecContext *xc, Addr val)
360 {
361 PCState pc = xc->pcState();
362 pc.instIWNPC(val);
363 xc->pcState(pc);
364 }
365
366 // Perform an interworking branch in ARM mode, a regular branch
367 // otherwise.
368 static inline void
369 setAIWNextPC(ExecContext *xc, Addr val)
370 {
371 PCState pc = xc->pcState();
372 pc.instAIWNPC(val);
373 xc->pcState(pc);
374 }
375
376 inline Fault
377 disabledFault() const
378 {
379 return std::make_shared<UndefinedInstruction>(machInst, false,
380 mnemonic, true);
381 }
382
383 // Utility function used by checkForWFxTrap32 and checkForWFxTrap64
384 // Returns true if processor has to trap a WFI/WFE instruction.
385 bool isWFxTrapping(ThreadContext *tc,
386 ExceptionLevel targetEL, bool isWfe) const;
387
388 /**
389 * Trigger a Software Breakpoint.
390 *
391 * See aarch32/exceptions/debug/AArch32.SoftwareBreakpoint in the
392 * ARM ARM psueodcode library.
393 */
394 Fault softwareBreakpoint32(ExecContext *xc, uint16_t imm) const;
395
396 /**
397 * Trap an access to Advanced SIMD or FP registers due to access
398 * control bits.
399 *
400 * See aarch64/exceptions/traps/AArch64.AdvSIMDFPAccessTrap in the
401 * ARM ARM psueodcode library.
402 *
403 * @param el Target EL for the trap
404 */
405 Fault advSIMDFPAccessTrap64(ExceptionLevel el) const;
406
407
408 /**
409 * Check an Advaned SIMD access against CPTR_EL2 and CPTR_EL3.
410 *
411 * See aarch64/exceptions/traps/AArch64.CheckFPAdvSIMDTrap in the
412 * ARM ARM psueodcode library.
413 */
414 Fault checkFPAdvSIMDTrap64(ThreadContext *tc, CPSR cpsr) const;
415
416 /**
417 * Check an Advaned SIMD access against CPACR_EL1, CPTR_EL2, and
418 * CPTR_EL3.
419 *
420 * See aarch64/exceptions/traps/AArch64.CheckFPAdvSIMDEnabled in the
421 * ARM ARM psueodcode library.
422 */
423 Fault checkFPAdvSIMDEnabled64(ThreadContext *tc,
424 CPSR cpsr, CPACR cpacr) const;
425
426 /**
427 * Check if a VFP/SIMD access from aarch32 should be allowed.
428 *
429 * See aarch32/exceptions/traps/AArch32.CheckAdvSIMDOrFPEnabled in the
430 * ARM ARM psueodcode library.
431 */
432 Fault checkAdvSIMDOrFPEnabled32(ThreadContext *tc,
433 CPSR cpsr, CPACR cpacr,
434 NSACR nsacr, FPEXC fpexc,
435 bool fpexc_check, bool advsimd) const;
436
437 /**
438 * Check if WFE/WFI instruction execution in aarch32 should be trapped.
439 *
440 * See aarch32/exceptions/traps/AArch32.checkForWFxTrap in the
441 * ARM ARM psueodcode library.
442 */
443 Fault checkForWFxTrap32(ThreadContext *tc,
444 ExceptionLevel tgtEl, bool isWfe) const;
445
446 /**
447 * Check if WFE/WFI instruction execution in aarch64 should be trapped.
448 *
449 * See aarch64/exceptions/traps/AArch64.checkForWFxTrap in the
450 * ARM ARM psueodcode library.
451 */
452 Fault checkForWFxTrap64(ThreadContext *tc,
453 ExceptionLevel tgtEl, bool isWfe) const;
454
455 /**
456 * WFE/WFI trapping helper function.
457 */
458 Fault trapWFx(ThreadContext *tc, CPSR cpsr, SCR scr, bool isWfe) const;
459
460 /**
461 * Check if SETEND instruction execution in aarch32 should be trapped.
462 *
463 * See aarch32/exceptions/traps/AArch32.CheckSETENDEnabled in the
464 * ARM ARM pseudocode library.
465 */
466 Fault checkSETENDEnabled(ThreadContext *tc, CPSR cpsr) const;
467
468 /**
469 * UNDEFINED behaviour in AArch32
470 *
471 * See aarch32/exceptions/traps/AArch32.UndefinedFault in the
472 * ARM ARM pseudocode library.
473 */
474 Fault undefinedFault32(ThreadContext *tc, ExceptionLevel el) const;
475
476 /**
477 * UNDEFINED behaviour in AArch64
478 *
479 * See aarch64/exceptions/traps/AArch64.UndefinedFault in the
480 * ARM ARM pseudocode library.
481 */
482 Fault undefinedFault64(ThreadContext *tc, ExceptionLevel el) const;
483
484 /**
485 * Trap an access to SVE registers due to access control bits.
486 *
487 * @param el Target EL for the trap.
488 */
489 Fault sveAccessTrap(ExceptionLevel el) const;
490
491 /**
492 * Check an SVE access against CPACR_EL1, CPTR_EL2, and CPTR_EL3.
493 */
494 Fault checkSveEnabled(ThreadContext *tc, CPSR cpsr, CPACR cpacr) const;
495
496 /**
497 * Get the new PSTATE from a SPSR register in preparation for an
498 * exception return.
499 *
500 * See shared/functions/system/SetPSTATEFromPSR in the ARM ARM
501 * pseudocode library.
502 */
503 CPSR getPSTATEFromPSR(ThreadContext *tc, CPSR cpsr, CPSR spsr) const;
504
505 /**
506 * Return true if exceptions normally routed to EL1 are being handled
507 * at an Exception level using AArch64, because either EL1 is using
508 * AArch64 or TGE is in force and EL2 is using AArch64.
509 *
510 * See aarch32/exceptions/exceptions/AArch32.GeneralExceptionsToAArch64
511 * in the ARM ARM pseudocode library.
512 */
513 bool generalExceptionsToAArch64(ThreadContext *tc,
514 ExceptionLevel pstateEL) const;
515
516 public:
517 virtual void
518 annotateFault(ArmFault *fault) {}
519
520 uint8_t
521 getIntWidth() const
522 {
523 return intWidth;
524 }
525
526 /** Returns the byte size of current instruction */
527 ssize_t
528 instSize() const
529 {
530 return (!machInst.thumb || machInst.bigThumb) ? 4 : 2;
531 }
532
533 /**
534 * Returns the real encoding of the instruction:
535 * the machInst field is in fact always 64 bit wide and
536 * contains some instruction metadata, which means it differs
537 * from the real opcode.
538 */
539 MachInst
540 encoding() const
541 {
542 return static_cast<MachInst>(machInst & (mask(instSize() * 8)));
543 }
544
545 size_t
546 asBytes(void *buf, size_t max_size) override
547 {
548 return simpleAsBytes(buf, max_size, machInst);
549 }
550
551 static unsigned getCurSveVecLenInBits(ThreadContext *tc);
552
553 static unsigned
554 getCurSveVecLenInQWords(ThreadContext *tc)
555 {
556 return getCurSveVecLenInBits(tc) >> 6;
557 }
558
559 template<typename T>
560 static unsigned
561 getCurSveVecLen(ThreadContext *tc)
562 {
563 return getCurSveVecLenInBits(tc) / (8 * sizeof(T));
564 }
565 };
566 }
567
568 #endif //__ARCH_ARM_INSTS_STATICINST_HH__