1 /* Common target dependent code for GDB on AArch64 systems.
3 Copyright (C) 2009-2015 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
29 #include "reggroups.h"
32 #include "arch-utils.h"
34 #include "frame-unwind.h"
35 #include "frame-base.h"
36 #include "trad-frame.h"
38 #include "dwarf2-frame.h"
40 #include "prologue-value.h"
41 #include "target-descriptions.h"
42 #include "user-regs.h"
46 #include "aarch64-tdep.h"
49 #include "elf/aarch64.h"
54 #include "record-full.h"
56 #include "features/aarch64.c"
58 /* Pseudo register base numbers. */
59 #define AARCH64_Q0_REGNUM 0
60 #define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + 32)
61 #define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
62 #define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
63 #define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
65 /* The standard register names, and all the valid aliases for them. */
68 const char *const name
;
70 } aarch64_register_aliases
[] =
72 /* 64-bit register names. */
73 {"fp", AARCH64_FP_REGNUM
},
74 {"lr", AARCH64_LR_REGNUM
},
75 {"sp", AARCH64_SP_REGNUM
},
77 /* 32-bit register names. */
78 {"w0", AARCH64_X0_REGNUM
+ 0},
79 {"w1", AARCH64_X0_REGNUM
+ 1},
80 {"w2", AARCH64_X0_REGNUM
+ 2},
81 {"w3", AARCH64_X0_REGNUM
+ 3},
82 {"w4", AARCH64_X0_REGNUM
+ 4},
83 {"w5", AARCH64_X0_REGNUM
+ 5},
84 {"w6", AARCH64_X0_REGNUM
+ 6},
85 {"w7", AARCH64_X0_REGNUM
+ 7},
86 {"w8", AARCH64_X0_REGNUM
+ 8},
87 {"w9", AARCH64_X0_REGNUM
+ 9},
88 {"w10", AARCH64_X0_REGNUM
+ 10},
89 {"w11", AARCH64_X0_REGNUM
+ 11},
90 {"w12", AARCH64_X0_REGNUM
+ 12},
91 {"w13", AARCH64_X0_REGNUM
+ 13},
92 {"w14", AARCH64_X0_REGNUM
+ 14},
93 {"w15", AARCH64_X0_REGNUM
+ 15},
94 {"w16", AARCH64_X0_REGNUM
+ 16},
95 {"w17", AARCH64_X0_REGNUM
+ 17},
96 {"w18", AARCH64_X0_REGNUM
+ 18},
97 {"w19", AARCH64_X0_REGNUM
+ 19},
98 {"w20", AARCH64_X0_REGNUM
+ 20},
99 {"w21", AARCH64_X0_REGNUM
+ 21},
100 {"w22", AARCH64_X0_REGNUM
+ 22},
101 {"w23", AARCH64_X0_REGNUM
+ 23},
102 {"w24", AARCH64_X0_REGNUM
+ 24},
103 {"w25", AARCH64_X0_REGNUM
+ 25},
104 {"w26", AARCH64_X0_REGNUM
+ 26},
105 {"w27", AARCH64_X0_REGNUM
+ 27},
106 {"w28", AARCH64_X0_REGNUM
+ 28},
107 {"w29", AARCH64_X0_REGNUM
+ 29},
108 {"w30", AARCH64_X0_REGNUM
+ 30},
111 {"ip0", AARCH64_X0_REGNUM
+ 16},
112 {"ip1", AARCH64_X0_REGNUM
+ 17}
115 /* The required core 'R' registers. */
116 static const char *const aarch64_r_register_names
[] =
118 /* These registers must appear in consecutive RAW register number
119 order and they must begin with AARCH64_X0_REGNUM! */
120 "x0", "x1", "x2", "x3",
121 "x4", "x5", "x6", "x7",
122 "x8", "x9", "x10", "x11",
123 "x12", "x13", "x14", "x15",
124 "x16", "x17", "x18", "x19",
125 "x20", "x21", "x22", "x23",
126 "x24", "x25", "x26", "x27",
127 "x28", "x29", "x30", "sp",
131 /* The FP/SIMD 'V' registers. */
132 static const char *const aarch64_v_register_names
[] =
134 /* These registers must appear in consecutive RAW register number
135 order and they must begin with AARCH64_V0_REGNUM! */
136 "v0", "v1", "v2", "v3",
137 "v4", "v5", "v6", "v7",
138 "v8", "v9", "v10", "v11",
139 "v12", "v13", "v14", "v15",
140 "v16", "v17", "v18", "v19",
141 "v20", "v21", "v22", "v23",
142 "v24", "v25", "v26", "v27",
143 "v28", "v29", "v30", "v31",
148 /* AArch64 prologue cache structure. */
149 struct aarch64_prologue_cache
151 /* The program counter at the start of the function. It is used to
152 identify this frame as a prologue frame. */
155 /* The program counter at the time this frame was created; i.e. where
156 this function was called from. It is used to identify this frame as a
160 /* The stack pointer at the time this frame was created; i.e. the
161 caller's stack pointer when this function was called. It is used
162 to identify this frame. */
165 /* The frame base for this frame is just prev_sp - frame size.
166 FRAMESIZE is the distance from the frame pointer to the
167 initial stack pointer. */
170 /* The register used to hold the frame pointer for this frame. */
173 /* Saved register offsets. */
174 struct trad_frame_saved_reg
*saved_regs
;
177 /* Toggle this file's internal debugging dump. */
178 static int aarch64_debug
;
181 show_aarch64_debug (struct ui_file
*file
, int from_tty
,
182 struct cmd_list_element
*c
, const char *value
)
184 fprintf_filtered (file
, _("AArch64 debugging is %s.\n"), value
);
187 /* Extract a signed value from a bit field within an instruction
190 INSN is the instruction opcode.
192 WIDTH specifies the width of the bit field to extract (in bits).
194 OFFSET specifies the least significant bit of the field where bits
195 are numbered zero counting from least to most significant. */
198 extract_signed_bitfield (uint32_t insn
, unsigned width
, unsigned offset
)
200 unsigned shift_l
= sizeof (int32_t) * 8 - (offset
+ width
);
201 unsigned shift_r
= sizeof (int32_t) * 8 - width
;
203 return ((int32_t) insn
<< shift_l
) >> shift_r
;
206 /* Determine if specified bits within an instruction opcode matches a
209 INSN is the instruction opcode.
211 MASK specifies the bits within the opcode that are to be tested
212 agsinst for a match with PATTERN. */
215 decode_masked_match (uint32_t insn
, uint32_t mask
, uint32_t pattern
)
217 return (insn
& mask
) == pattern
;
220 /* Decode an opcode if it represents an immediate ADD or SUB instruction.
222 ADDR specifies the address of the opcode.
223 INSN specifies the opcode to test.
224 RD receives the 'rd' field from the decoded instruction.
225 RN receives the 'rn' field from the decoded instruction.
227 Return 1 if the opcodes matches and is decoded, otherwise 0. */
229 decode_add_sub_imm (CORE_ADDR addr
, uint32_t insn
, unsigned *rd
, unsigned *rn
,
232 if ((insn
& 0x9f000000) == 0x91000000)
237 *rd
= (insn
>> 0) & 0x1f;
238 *rn
= (insn
>> 5) & 0x1f;
239 *imm
= (insn
>> 10) & 0xfff;
240 shift
= (insn
>> 22) & 0x3;
241 op_is_sub
= (insn
>> 30) & 0x1;
259 fprintf_unfiltered (gdb_stdlog
,
260 "decode: 0x%s 0x%x add x%u, x%u, #%d\n",
261 core_addr_to_string_nz (addr
), insn
, *rd
, *rn
,
268 /* Decode an opcode if it represents an ADRP instruction.
270 ADDR specifies the address of the opcode.
271 INSN specifies the opcode to test.
272 RD receives the 'rd' field from the decoded instruction.
274 Return 1 if the opcodes matches and is decoded, otherwise 0. */
277 decode_adrp (CORE_ADDR addr
, uint32_t insn
, unsigned *rd
)
279 if (decode_masked_match (insn
, 0x9f000000, 0x90000000))
281 *rd
= (insn
>> 0) & 0x1f;
284 fprintf_unfiltered (gdb_stdlog
,
285 "decode: 0x%s 0x%x adrp x%u, #?\n",
286 core_addr_to_string_nz (addr
), insn
, *rd
);
292 /* Decode an opcode if it represents an branch immediate or branch
293 and link immediate instruction.
295 ADDR specifies the address of the opcode.
296 INSN specifies the opcode to test.
297 LINK receives the 'link' bit from the decoded instruction.
298 OFFSET receives the immediate offset from the decoded instruction.
300 Return 1 if the opcodes matches and is decoded, otherwise 0. */
303 decode_b (CORE_ADDR addr
, uint32_t insn
, unsigned *link
, int32_t *offset
)
305 /* b 0001 01ii iiii iiii iiii iiii iiii iiii */
306 /* bl 1001 01ii iiii iiii iiii iiii iiii iiii */
307 if (decode_masked_match (insn
, 0x7c000000, 0x14000000))
310 *offset
= extract_signed_bitfield (insn
, 26, 0) << 2;
313 fprintf_unfiltered (gdb_stdlog
,
314 "decode: 0x%s 0x%x %s 0x%s\n",
315 core_addr_to_string_nz (addr
), insn
,
317 core_addr_to_string_nz (addr
+ *offset
));
324 /* Decode an opcode if it represents a conditional branch instruction.
326 ADDR specifies the address of the opcode.
327 INSN specifies the opcode to test.
328 COND receives the branch condition field from the decoded
330 OFFSET receives the immediate offset from the decoded instruction.
332 Return 1 if the opcodes matches and is decoded, otherwise 0. */
335 decode_bcond (CORE_ADDR addr
, uint32_t insn
, unsigned *cond
, int32_t *offset
)
337 if (decode_masked_match (insn
, 0xfe000000, 0x54000000))
339 *cond
= (insn
>> 0) & 0xf;
340 *offset
= extract_signed_bitfield (insn
, 19, 5) << 2;
343 fprintf_unfiltered (gdb_stdlog
,
344 "decode: 0x%s 0x%x b<%u> 0x%s\n",
345 core_addr_to_string_nz (addr
), insn
, *cond
,
346 core_addr_to_string_nz (addr
+ *offset
));
352 /* Decode an opcode if it represents a branch via register instruction.
354 ADDR specifies the address of the opcode.
355 INSN specifies the opcode to test.
356 LINK receives the 'link' bit from the decoded instruction.
357 RN receives the 'rn' field from the decoded instruction.
359 Return 1 if the opcodes matches and is decoded, otherwise 0. */
362 decode_br (CORE_ADDR addr
, uint32_t insn
, unsigned *link
, unsigned *rn
)
364 /* 8 4 0 6 2 8 4 0 */
365 /* blr 110101100011111100000000000rrrrr */
366 /* br 110101100001111100000000000rrrrr */
367 if (decode_masked_match (insn
, 0xffdffc1f, 0xd61f0000))
369 *link
= (insn
>> 21) & 1;
370 *rn
= (insn
>> 5) & 0x1f;
373 fprintf_unfiltered (gdb_stdlog
,
374 "decode: 0x%s 0x%x %s 0x%x\n",
375 core_addr_to_string_nz (addr
), insn
,
376 *link
? "blr" : "br", *rn
);
383 /* Decode an opcode if it represents a CBZ or CBNZ instruction.
385 ADDR specifies the address of the opcode.
386 INSN specifies the opcode to test.
387 IS64 receives the 'sf' field from the decoded instruction.
388 OP receives the 'op' field from the decoded instruction.
389 RN receives the 'rn' field from the decoded instruction.
390 OFFSET receives the 'imm19' field from the decoded instruction.
392 Return 1 if the opcodes matches and is decoded, otherwise 0. */
395 decode_cb (CORE_ADDR addr
,
396 uint32_t insn
, int *is64
, unsigned *op
, unsigned *rn
,
399 if (decode_masked_match (insn
, 0x7e000000, 0x34000000))
401 /* cbz T011 010o iiii iiii iiii iiii iiir rrrr */
402 /* cbnz T011 010o iiii iiii iiii iiii iiir rrrr */
404 *rn
= (insn
>> 0) & 0x1f;
405 *is64
= (insn
>> 31) & 0x1;
406 *op
= (insn
>> 24) & 0x1;
407 *offset
= extract_signed_bitfield (insn
, 19, 5) << 2;
410 fprintf_unfiltered (gdb_stdlog
,
411 "decode: 0x%s 0x%x %s 0x%s\n",
412 core_addr_to_string_nz (addr
), insn
,
413 *op
? "cbnz" : "cbz",
414 core_addr_to_string_nz (addr
+ *offset
));
420 /* Decode an opcode if it represents a ERET instruction.
422 ADDR specifies the address of the opcode.
423 INSN specifies the opcode to test.
425 Return 1 if the opcodes matches and is decoded, otherwise 0. */
428 decode_eret (CORE_ADDR addr
, uint32_t insn
)
430 /* eret 1101 0110 1001 1111 0000 0011 1110 0000 */
431 if (insn
== 0xd69f03e0)
434 fprintf_unfiltered (gdb_stdlog
, "decode: 0x%s 0x%x eret\n",
435 core_addr_to_string_nz (addr
), insn
);
441 /* Decode an opcode if it represents a MOVZ instruction.
443 ADDR specifies the address of the opcode.
444 INSN specifies the opcode to test.
445 RD receives the 'rd' field from the decoded instruction.
447 Return 1 if the opcodes matches and is decoded, otherwise 0. */
450 decode_movz (CORE_ADDR addr
, uint32_t insn
, unsigned *rd
)
452 if (decode_masked_match (insn
, 0xff800000, 0x52800000))
454 *rd
= (insn
>> 0) & 0x1f;
457 fprintf_unfiltered (gdb_stdlog
,
458 "decode: 0x%s 0x%x movz x%u, #?\n",
459 core_addr_to_string_nz (addr
), insn
, *rd
);
465 /* Decode an opcode if it represents a ORR (shifted register)
468 ADDR specifies the address of the opcode.
469 INSN specifies the opcode to test.
470 RD receives the 'rd' field from the decoded instruction.
471 RN receives the 'rn' field from the decoded instruction.
472 RM receives the 'rm' field from the decoded instruction.
473 IMM receives the 'imm6' field from the decoded instruction.
475 Return 1 if the opcodes matches and is decoded, otherwise 0. */
478 decode_orr_shifted_register_x (CORE_ADDR addr
,
479 uint32_t insn
, unsigned *rd
, unsigned *rn
,
480 unsigned *rm
, int32_t *imm
)
482 if (decode_masked_match (insn
, 0xff200000, 0xaa000000))
484 *rd
= (insn
>> 0) & 0x1f;
485 *rn
= (insn
>> 5) & 0x1f;
486 *rm
= (insn
>> 16) & 0x1f;
487 *imm
= (insn
>> 10) & 0x3f;
490 fprintf_unfiltered (gdb_stdlog
,
491 "decode: 0x%s 0x%x orr x%u, x%u, x%u, #%u\n",
492 core_addr_to_string_nz (addr
), insn
, *rd
,
499 /* Decode an opcode if it represents a RET instruction.
501 ADDR specifies the address of the opcode.
502 INSN specifies the opcode to test.
503 RN receives the 'rn' field from the decoded instruction.
505 Return 1 if the opcodes matches and is decoded, otherwise 0. */
508 decode_ret (CORE_ADDR addr
, uint32_t insn
, unsigned *rn
)
510 if (decode_masked_match (insn
, 0xfffffc1f, 0xd65f0000))
512 *rn
= (insn
>> 5) & 0x1f;
514 fprintf_unfiltered (gdb_stdlog
,
515 "decode: 0x%s 0x%x ret x%u\n",
516 core_addr_to_string_nz (addr
), insn
, *rn
);
522 /* Decode an opcode if it represents the following instruction:
523 STP rt, rt2, [rn, #imm]
525 ADDR specifies the address of the opcode.
526 INSN specifies the opcode to test.
527 RT1 receives the 'rt' field from the decoded instruction.
528 RT2 receives the 'rt2' field from the decoded instruction.
529 RN receives the 'rn' field from the decoded instruction.
530 IMM receives the 'imm' field from the decoded instruction.
532 Return 1 if the opcodes matches and is decoded, otherwise 0. */
535 decode_stp_offset (CORE_ADDR addr
,
537 unsigned *rt1
, unsigned *rt2
, unsigned *rn
, int32_t *imm
)
539 if (decode_masked_match (insn
, 0xffc00000, 0xa9000000))
541 *rt1
= (insn
>> 0) & 0x1f;
542 *rn
= (insn
>> 5) & 0x1f;
543 *rt2
= (insn
>> 10) & 0x1f;
544 *imm
= extract_signed_bitfield (insn
, 7, 15);
548 fprintf_unfiltered (gdb_stdlog
,
549 "decode: 0x%s 0x%x stp x%u, x%u, [x%u + #%d]\n",
550 core_addr_to_string_nz (addr
), insn
,
551 *rt1
, *rt2
, *rn
, *imm
);
557 /* Decode an opcode if it represents the following instruction:
558 STP rt, rt2, [rn, #imm]!
560 ADDR specifies the address of the opcode.
561 INSN specifies the opcode to test.
562 RT1 receives the 'rt' field from the decoded instruction.
563 RT2 receives the 'rt2' field from the decoded instruction.
564 RN receives the 'rn' field from the decoded instruction.
565 IMM receives the 'imm' field from the decoded instruction.
567 Return 1 if the opcodes matches and is decoded, otherwise 0. */
570 decode_stp_offset_wb (CORE_ADDR addr
,
572 unsigned *rt1
, unsigned *rt2
, unsigned *rn
,
575 if (decode_masked_match (insn
, 0xffc00000, 0xa9800000))
577 *rt1
= (insn
>> 0) & 0x1f;
578 *rn
= (insn
>> 5) & 0x1f;
579 *rt2
= (insn
>> 10) & 0x1f;
580 *imm
= extract_signed_bitfield (insn
, 7, 15);
584 fprintf_unfiltered (gdb_stdlog
,
585 "decode: 0x%s 0x%x stp x%u, x%u, [x%u + #%d]!\n",
586 core_addr_to_string_nz (addr
), insn
,
587 *rt1
, *rt2
, *rn
, *imm
);
593 /* Decode an opcode if it represents the following instruction:
596 ADDR specifies the address of the opcode.
597 INSN specifies the opcode to test.
598 IS64 receives size field from the decoded instruction.
599 RT receives the 'rt' field from the decoded instruction.
600 RN receives the 'rn' field from the decoded instruction.
601 IMM receives the 'imm' field from the decoded instruction.
603 Return 1 if the opcodes matches and is decoded, otherwise 0. */
606 decode_stur (CORE_ADDR addr
, uint32_t insn
, int *is64
, unsigned *rt
,
607 unsigned *rn
, int32_t *imm
)
609 if (decode_masked_match (insn
, 0xbfe00c00, 0xb8000000))
611 *is64
= (insn
>> 30) & 1;
612 *rt
= (insn
>> 0) & 0x1f;
613 *rn
= (insn
>> 5) & 0x1f;
614 *imm
= extract_signed_bitfield (insn
, 9, 12);
617 fprintf_unfiltered (gdb_stdlog
,
618 "decode: 0x%s 0x%x stur %c%u, [x%u + #%d]\n",
619 core_addr_to_string_nz (addr
), insn
,
620 *is64
? 'x' : 'w', *rt
, *rn
, *imm
);
626 /* Decode an opcode if it represents a TB or TBNZ instruction.
628 ADDR specifies the address of the opcode.
629 INSN specifies the opcode to test.
630 OP receives the 'op' field from the decoded instruction.
631 BIT receives the bit position field from the decoded instruction.
632 RT receives 'rt' field from the decoded instruction.
633 IMM receives 'imm' field from the decoded instruction.
635 Return 1 if the opcodes matches and is decoded, otherwise 0. */
638 decode_tb (CORE_ADDR addr
,
639 uint32_t insn
, unsigned *op
, unsigned *bit
, unsigned *rt
,
642 if (decode_masked_match (insn
, 0x7e000000, 0x36000000))
644 /* tbz b011 0110 bbbb biii iiii iiii iiir rrrr */
645 /* tbnz B011 0111 bbbb biii iiii iiii iiir rrrr */
647 *rt
= (insn
>> 0) & 0x1f;
648 *op
= insn
& (1 << 24);
649 *bit
= ((insn
>> (31 - 4)) & 0x20) | ((insn
>> 19) & 0x1f);
650 *imm
= extract_signed_bitfield (insn
, 14, 5) << 2;
653 fprintf_unfiltered (gdb_stdlog
,
654 "decode: 0x%s 0x%x %s x%u, #%u, 0x%s\n",
655 core_addr_to_string_nz (addr
), insn
,
656 *op
? "tbnz" : "tbz", *rt
, *bit
,
657 core_addr_to_string_nz (addr
+ *imm
));
663 /* Analyze a prologue, looking for a recognizable stack frame
664 and frame pointer. Scan until we encounter a store that could
665 clobber the stack frame unexpectedly, or an unknown instruction. */
668 aarch64_analyze_prologue (struct gdbarch
*gdbarch
,
669 CORE_ADDR start
, CORE_ADDR limit
,
670 struct aarch64_prologue_cache
*cache
)
672 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
674 pv_t regs
[AARCH64_X_REGISTER_COUNT
];
675 struct pv_area
*stack
;
676 struct cleanup
*back_to
;
678 for (i
= 0; i
< AARCH64_X_REGISTER_COUNT
; i
++)
679 regs
[i
] = pv_register (i
, 0);
680 stack
= make_pv_area (AARCH64_SP_REGNUM
, gdbarch_addr_bit (gdbarch
));
681 back_to
= make_cleanup_free_pv_area (stack
);
683 for (; start
< limit
; start
+= 4)
701 insn
= read_memory_unsigned_integer (start
, 4, byte_order_for_code
);
703 if (decode_add_sub_imm (start
, insn
, &rd
, &rn
, &imm
))
704 regs
[rd
] = pv_add_constant (regs
[rn
], imm
);
705 else if (decode_adrp (start
, insn
, &rd
))
706 regs
[rd
] = pv_unknown ();
707 else if (decode_b (start
, insn
, &is_link
, &offset
))
709 /* Stop analysis on branch. */
712 else if (decode_bcond (start
, insn
, &cond
, &offset
))
714 /* Stop analysis on branch. */
717 else if (decode_br (start
, insn
, &is_link
, &rn
))
719 /* Stop analysis on branch. */
722 else if (decode_cb (start
, insn
, &is64
, &op
, &rn
, &offset
))
724 /* Stop analysis on branch. */
727 else if (decode_eret (start
, insn
))
729 /* Stop analysis on branch. */
732 else if (decode_movz (start
, insn
, &rd
))
733 regs
[rd
] = pv_unknown ();
735 if (decode_orr_shifted_register_x (start
, insn
, &rd
, &rn
, &rm
, &imm
))
737 if (imm
== 0 && rn
== 31)
744 "aarch64: prologue analysis gave up addr=0x%s "
745 "opcode=0x%x (orr x register)\n",
746 core_addr_to_string_nz (start
),
751 else if (decode_ret (start
, insn
, &rn
))
753 /* Stop analysis on branch. */
756 else if (decode_stur (start
, insn
, &is64
, &rt
, &rn
, &offset
))
758 pv_area_store (stack
, pv_add_constant (regs
[rn
], offset
),
759 is64
? 8 : 4, regs
[rt
]);
761 else if (decode_stp_offset (start
, insn
, &rt1
, &rt2
, &rn
, &imm
))
763 /* If recording this store would invalidate the store area
764 (perhaps because rn is not known) then we should abandon
765 further prologue analysis. */
766 if (pv_area_store_would_trash (stack
,
767 pv_add_constant (regs
[rn
], imm
)))
770 if (pv_area_store_would_trash (stack
,
771 pv_add_constant (regs
[rn
], imm
+ 8)))
774 pv_area_store (stack
, pv_add_constant (regs
[rn
], imm
), 8,
776 pv_area_store (stack
, pv_add_constant (regs
[rn
], imm
+ 8), 8,
779 else if (decode_stp_offset_wb (start
, insn
, &rt1
, &rt2
, &rn
, &imm
))
781 /* If recording this store would invalidate the store area
782 (perhaps because rn is not known) then we should abandon
783 further prologue analysis. */
784 if (pv_area_store_would_trash (stack
,
785 pv_add_constant (regs
[rn
], imm
)))
788 if (pv_area_store_would_trash (stack
,
789 pv_add_constant (regs
[rn
], imm
+ 8)))
792 pv_area_store (stack
, pv_add_constant (regs
[rn
], imm
), 8,
794 pv_area_store (stack
, pv_add_constant (regs
[rn
], imm
+ 8), 8,
796 regs
[rn
] = pv_add_constant (regs
[rn
], imm
);
798 else if (decode_tb (start
, insn
, &op
, &bit
, &rn
, &offset
))
800 /* Stop analysis on branch. */
806 fprintf_unfiltered (gdb_stdlog
,
807 "aarch64: prologue analysis gave up addr=0x%s"
809 core_addr_to_string_nz (start
), insn
);
816 do_cleanups (back_to
);
820 if (pv_is_register (regs
[AARCH64_FP_REGNUM
], AARCH64_SP_REGNUM
))
822 /* Frame pointer is fp. Frame size is constant. */
823 cache
->framereg
= AARCH64_FP_REGNUM
;
824 cache
->framesize
= -regs
[AARCH64_FP_REGNUM
].k
;
826 else if (pv_is_register (regs
[AARCH64_SP_REGNUM
], AARCH64_SP_REGNUM
))
828 /* Try the stack pointer. */
829 cache
->framesize
= -regs
[AARCH64_SP_REGNUM
].k
;
830 cache
->framereg
= AARCH64_SP_REGNUM
;
834 /* We're just out of luck. We don't know where the frame is. */
835 cache
->framereg
= -1;
836 cache
->framesize
= 0;
839 for (i
= 0; i
< AARCH64_X_REGISTER_COUNT
; i
++)
843 if (pv_area_find_reg (stack
, gdbarch
, i
, &offset
))
844 cache
->saved_regs
[i
].addr
= offset
;
847 do_cleanups (back_to
);
851 /* Implement the "skip_prologue" gdbarch method. */
854 aarch64_skip_prologue (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
858 CORE_ADDR func_addr
, limit_pc
;
859 struct symtab_and_line sal
;
861 /* See if we can determine the end of the prologue via the symbol
862 table. If so, then return either PC, or the PC after the
863 prologue, whichever is greater. */
864 if (find_pc_partial_function (pc
, NULL
, &func_addr
, NULL
))
866 CORE_ADDR post_prologue_pc
867 = skip_prologue_using_sal (gdbarch
, func_addr
);
869 if (post_prologue_pc
!= 0)
870 return max (pc
, post_prologue_pc
);
873 /* Can't determine prologue from the symbol table, need to examine
876 /* Find an upper limit on the function prologue using the debug
877 information. If the debug information could not be used to
878 provide that bound, then use an arbitrary large number as the
880 limit_pc
= skip_prologue_using_sal (gdbarch
, pc
);
882 limit_pc
= pc
+ 128; /* Magic. */
884 /* Try disassembling prologue. */
885 return aarch64_analyze_prologue (gdbarch
, pc
, limit_pc
, NULL
);
888 /* Scan the function prologue for THIS_FRAME and populate the prologue
892 aarch64_scan_prologue (struct frame_info
*this_frame
,
893 struct aarch64_prologue_cache
*cache
)
895 CORE_ADDR block_addr
= get_frame_address_in_block (this_frame
);
896 CORE_ADDR prologue_start
;
897 CORE_ADDR prologue_end
;
898 CORE_ADDR prev_pc
= get_frame_pc (this_frame
);
899 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
901 cache
->prev_pc
= prev_pc
;
903 /* Assume we do not find a frame. */
904 cache
->framereg
= -1;
905 cache
->framesize
= 0;
907 if (find_pc_partial_function (block_addr
, NULL
, &prologue_start
,
910 struct symtab_and_line sal
= find_pc_line (prologue_start
, 0);
914 /* No line info so use the current PC. */
915 prologue_end
= prev_pc
;
917 else if (sal
.end
< prologue_end
)
919 /* The next line begins after the function end. */
920 prologue_end
= sal
.end
;
923 prologue_end
= min (prologue_end
, prev_pc
);
924 aarch64_analyze_prologue (gdbarch
, prologue_start
, prologue_end
, cache
);
931 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
933 frame_loc
= get_frame_register_unsigned (this_frame
, AARCH64_FP_REGNUM
);
937 cache
->framereg
= AARCH64_FP_REGNUM
;
938 cache
->framesize
= 16;
939 cache
->saved_regs
[29].addr
= 0;
940 cache
->saved_regs
[30].addr
= 8;
944 /* Allocate and fill in *THIS_CACHE with information about the prologue of
945 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
946 Return a pointer to the current aarch64_prologue_cache in
949 static struct aarch64_prologue_cache
*
950 aarch64_make_prologue_cache (struct frame_info
*this_frame
, void **this_cache
)
952 struct aarch64_prologue_cache
*cache
;
953 CORE_ADDR unwound_fp
;
956 if (*this_cache
!= NULL
)
959 cache
= FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache
);
960 cache
->saved_regs
= trad_frame_alloc_saved_regs (this_frame
);
963 aarch64_scan_prologue (this_frame
, cache
);
965 if (cache
->framereg
== -1)
968 unwound_fp
= get_frame_register_unsigned (this_frame
, cache
->framereg
);
972 cache
->prev_sp
= unwound_fp
+ cache
->framesize
;
974 /* Calculate actual addresses of saved registers using offsets
975 determined by aarch64_analyze_prologue. */
976 for (reg
= 0; reg
< gdbarch_num_regs (get_frame_arch (this_frame
)); reg
++)
977 if (trad_frame_addr_p (cache
->saved_regs
, reg
))
978 cache
->saved_regs
[reg
].addr
+= cache
->prev_sp
;
980 cache
->func
= get_frame_func (this_frame
);
985 /* Our frame ID for a normal frame is the current function's starting
986 PC and the caller's SP when we were called. */
989 aarch64_prologue_this_id (struct frame_info
*this_frame
,
990 void **this_cache
, struct frame_id
*this_id
)
992 struct aarch64_prologue_cache
*cache
993 = aarch64_make_prologue_cache (this_frame
, this_cache
);
995 /* This is meant to halt the backtrace at "_start". */
996 if (cache
->prev_pc
<= gdbarch_tdep (get_frame_arch (this_frame
))->lowest_pc
)
999 /* If we've hit a wall, stop. */
1000 if (cache
->prev_sp
== 0)
1003 *this_id
= frame_id_build (cache
->prev_sp
, cache
->func
);
1006 /* Implement the "prev_register" frame_unwind method. */
1008 static struct value
*
1009 aarch64_prologue_prev_register (struct frame_info
*this_frame
,
1010 void **this_cache
, int prev_regnum
)
1012 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
1013 struct aarch64_prologue_cache
*cache
1014 = aarch64_make_prologue_cache (this_frame
, this_cache
);
1016 /* If we are asked to unwind the PC, then we need to return the LR
1017 instead. The prologue may save PC, but it will point into this
1018 frame's prologue, not the next frame's resume location. */
1019 if (prev_regnum
== AARCH64_PC_REGNUM
)
1023 lr
= frame_unwind_register_unsigned (this_frame
, AARCH64_LR_REGNUM
);
1024 return frame_unwind_got_constant (this_frame
, prev_regnum
, lr
);
1027 /* SP is generally not saved to the stack, but this frame is
1028 identified by the next frame's stack pointer at the time of the
1029 call. The value was already reconstructed into PREV_SP. */
1035 | | | <- Previous SP
1038 +--| saved fp |<- FP
1042 if (prev_regnum
== AARCH64_SP_REGNUM
)
1043 return frame_unwind_got_constant (this_frame
, prev_regnum
,
1046 return trad_frame_get_prev_register (this_frame
, cache
->saved_regs
,
1050 /* AArch64 prologue unwinder. */
1051 struct frame_unwind aarch64_prologue_unwind
=
1054 default_frame_unwind_stop_reason
,
1055 aarch64_prologue_this_id
,
1056 aarch64_prologue_prev_register
,
1058 default_frame_sniffer
1061 /* Allocate and fill in *THIS_CACHE with information about the prologue of
1062 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
1063 Return a pointer to the current aarch64_prologue_cache in
1066 static struct aarch64_prologue_cache
*
1067 aarch64_make_stub_cache (struct frame_info
*this_frame
, void **this_cache
)
1069 struct aarch64_prologue_cache
*cache
;
1071 if (*this_cache
!= NULL
)
1074 cache
= FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache
);
1075 cache
->saved_regs
= trad_frame_alloc_saved_regs (this_frame
);
1076 *this_cache
= cache
;
1079 = get_frame_register_unsigned (this_frame
, AARCH64_SP_REGNUM
);
1080 cache
->prev_pc
= get_frame_pc (this_frame
);
1085 /* Our frame ID for a stub frame is the current SP and LR. */
1088 aarch64_stub_this_id (struct frame_info
*this_frame
,
1089 void **this_cache
, struct frame_id
*this_id
)
1091 struct aarch64_prologue_cache
*cache
1092 = aarch64_make_stub_cache (this_frame
, this_cache
);
1094 *this_id
= frame_id_build (cache
->prev_sp
, cache
->prev_pc
);
1097 /* Implement the "sniffer" frame_unwind method. */
1100 aarch64_stub_unwind_sniffer (const struct frame_unwind
*self
,
1101 struct frame_info
*this_frame
,
1102 void **this_prologue_cache
)
1104 CORE_ADDR addr_in_block
;
1107 addr_in_block
= get_frame_address_in_block (this_frame
);
1108 if (in_plt_section (addr_in_block
)
1109 /* We also use the stub winder if the target memory is unreadable
1110 to avoid having the prologue unwinder trying to read it. */
1111 || target_read_memory (get_frame_pc (this_frame
), dummy
, 4) != 0)
1117 /* AArch64 stub unwinder. */
1118 struct frame_unwind aarch64_stub_unwind
=
1121 default_frame_unwind_stop_reason
,
1122 aarch64_stub_this_id
,
1123 aarch64_prologue_prev_register
,
1125 aarch64_stub_unwind_sniffer
1128 /* Return the frame base address of *THIS_FRAME. */
1131 aarch64_normal_frame_base (struct frame_info
*this_frame
, void **this_cache
)
1133 struct aarch64_prologue_cache
*cache
1134 = aarch64_make_prologue_cache (this_frame
, this_cache
);
1136 return cache
->prev_sp
- cache
->framesize
;
1139 /* AArch64 default frame base information. */
1140 struct frame_base aarch64_normal_base
=
1142 &aarch64_prologue_unwind
,
1143 aarch64_normal_frame_base
,
1144 aarch64_normal_frame_base
,
1145 aarch64_normal_frame_base
1148 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
1149 dummy frame. The frame ID's base needs to match the TOS value
1150 saved by save_dummy_frame_tos () and returned from
1151 aarch64_push_dummy_call, and the PC needs to match the dummy
1152 frame's breakpoint. */
1154 static struct frame_id
1155 aarch64_dummy_id (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
1157 return frame_id_build (get_frame_register_unsigned (this_frame
,
1159 get_frame_pc (this_frame
));
1162 /* Implement the "unwind_pc" gdbarch method. */
1165 aarch64_unwind_pc (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
1168 = frame_unwind_register_unsigned (this_frame
, AARCH64_PC_REGNUM
);
1173 /* Implement the "unwind_sp" gdbarch method. */
1176 aarch64_unwind_sp (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
1178 return frame_unwind_register_unsigned (this_frame
, AARCH64_SP_REGNUM
);
1181 /* Return the value of the REGNUM register in the previous frame of
1184 static struct value
*
1185 aarch64_dwarf2_prev_register (struct frame_info
*this_frame
,
1186 void **this_cache
, int regnum
)
1188 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
1193 case AARCH64_PC_REGNUM
:
1194 lr
= frame_unwind_register_unsigned (this_frame
, AARCH64_LR_REGNUM
);
1195 return frame_unwind_got_constant (this_frame
, regnum
, lr
);
1198 internal_error (__FILE__
, __LINE__
,
1199 _("Unexpected register %d"), regnum
);
1203 /* Implement the "init_reg" dwarf2_frame_ops method. */
1206 aarch64_dwarf2_frame_init_reg (struct gdbarch
*gdbarch
, int regnum
,
1207 struct dwarf2_frame_state_reg
*reg
,
1208 struct frame_info
*this_frame
)
1212 case AARCH64_PC_REGNUM
:
1213 reg
->how
= DWARF2_FRAME_REG_FN
;
1214 reg
->loc
.fn
= aarch64_dwarf2_prev_register
;
1216 case AARCH64_SP_REGNUM
:
1217 reg
->how
= DWARF2_FRAME_REG_CFA
;
1222 /* When arguments must be pushed onto the stack, they go on in reverse
1223 order. The code below implements a FILO (stack) to do this. */
1227 /* Value to pass on stack. */
1230 /* Size in bytes of value to pass on stack. */
1234 DEF_VEC_O (stack_item_t
);
1236 /* Return the alignment (in bytes) of the given type. */
1239 aarch64_type_align (struct type
*t
)
1245 t
= check_typedef (t
);
1246 switch (TYPE_CODE (t
))
1249 /* Should never happen. */
1250 internal_error (__FILE__
, __LINE__
, _("unknown type alignment"));
1254 case TYPE_CODE_ENUM
:
1258 case TYPE_CODE_RANGE
:
1259 case TYPE_CODE_BITSTRING
:
1261 case TYPE_CODE_CHAR
:
1262 case TYPE_CODE_BOOL
:
1263 return TYPE_LENGTH (t
);
1265 case TYPE_CODE_ARRAY
:
1266 case TYPE_CODE_COMPLEX
:
1267 return aarch64_type_align (TYPE_TARGET_TYPE (t
));
1269 case TYPE_CODE_STRUCT
:
1270 case TYPE_CODE_UNION
:
1272 for (n
= 0; n
< TYPE_NFIELDS (t
); n
++)
1274 falign
= aarch64_type_align (TYPE_FIELD_TYPE (t
, n
));
1282 /* Return 1 if *TY is a homogeneous floating-point aggregate as
1283 defined in the AAPCS64 ABI document; otherwise return 0. */
1286 is_hfa (struct type
*ty
)
1288 switch (TYPE_CODE (ty
))
1290 case TYPE_CODE_ARRAY
:
1292 struct type
*target_ty
= TYPE_TARGET_TYPE (ty
);
1293 if (TYPE_CODE (target_ty
) == TYPE_CODE_FLT
&& TYPE_LENGTH (ty
) <= 4)
1298 case TYPE_CODE_UNION
:
1299 case TYPE_CODE_STRUCT
:
1301 if (TYPE_NFIELDS (ty
) > 0 && TYPE_NFIELDS (ty
) <= 4)
1303 struct type
*member0_type
;
1305 member0_type
= check_typedef (TYPE_FIELD_TYPE (ty
, 0));
1306 if (TYPE_CODE (member0_type
) == TYPE_CODE_FLT
)
1310 for (i
= 0; i
< TYPE_NFIELDS (ty
); i
++)
1312 struct type
*member1_type
;
1314 member1_type
= check_typedef (TYPE_FIELD_TYPE (ty
, i
));
1315 if (TYPE_CODE (member0_type
) != TYPE_CODE (member1_type
)
1316 || (TYPE_LENGTH (member0_type
)
1317 != TYPE_LENGTH (member1_type
)))
1333 /* AArch64 function call information structure. */
1334 struct aarch64_call_info
1336 /* the current argument number. */
1339 /* The next general purpose register number, equivalent to NGRN as
1340 described in the AArch64 Procedure Call Standard. */
1343 /* The next SIMD and floating point register number, equivalent to
1344 NSRN as described in the AArch64 Procedure Call Standard. */
1347 /* The next stacked argument address, equivalent to NSAA as
1348 described in the AArch64 Procedure Call Standard. */
1351 /* Stack item vector. */
1352 VEC(stack_item_t
) *si
;
1355 /* Pass a value in a sequence of consecutive X registers. The caller
1356 is responsbile for ensuring sufficient registers are available. */
1359 pass_in_x (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
1360 struct aarch64_call_info
*info
, struct type
*type
,
1361 const bfd_byte
*buf
)
1363 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1364 int len
= TYPE_LENGTH (type
);
1365 enum type_code typecode
= TYPE_CODE (type
);
1366 int regnum
= AARCH64_X0_REGNUM
+ info
->ngrn
;
1372 int partial_len
= len
< X_REGISTER_SIZE
? len
: X_REGISTER_SIZE
;
1373 CORE_ADDR regval
= extract_unsigned_integer (buf
, partial_len
,
1377 /* Adjust sub-word struct/union args when big-endian. */
1378 if (byte_order
== BFD_ENDIAN_BIG
1379 && partial_len
< X_REGISTER_SIZE
1380 && (typecode
== TYPE_CODE_STRUCT
|| typecode
== TYPE_CODE_UNION
))
1381 regval
<<= ((X_REGISTER_SIZE
- partial_len
) * TARGET_CHAR_BIT
);
1384 fprintf_unfiltered (gdb_stdlog
, "arg %d in %s = 0x%s\n",
1386 gdbarch_register_name (gdbarch
, regnum
),
1387 phex (regval
, X_REGISTER_SIZE
));
1388 regcache_cooked_write_unsigned (regcache
, regnum
, regval
);
1395 /* Attempt to marshall a value in a V register. Return 1 if
1396 successful, or 0 if insufficient registers are available. This
1397 function, unlike the equivalent pass_in_x() function does not
1398 handle arguments spread across multiple registers. */
1401 pass_in_v (struct gdbarch
*gdbarch
,
1402 struct regcache
*regcache
,
1403 struct aarch64_call_info
*info
,
1404 const bfd_byte
*buf
)
1408 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1409 int regnum
= AARCH64_V0_REGNUM
+ info
->nsrn
;
1414 regcache_cooked_write (regcache
, regnum
, buf
);
1416 fprintf_unfiltered (gdb_stdlog
, "arg %d in %s\n",
1418 gdbarch_register_name (gdbarch
, regnum
));
1425 /* Marshall an argument onto the stack. */
1428 pass_on_stack (struct aarch64_call_info
*info
, struct type
*type
,
1429 const bfd_byte
*buf
)
1431 int len
= TYPE_LENGTH (type
);
1437 align
= aarch64_type_align (type
);
1439 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1440 Natural alignment of the argument's type. */
1441 align
= align_up (align
, 8);
1443 /* The AArch64 PCS requires at most doubleword alignment. */
1448 fprintf_unfiltered (gdb_stdlog
, "arg %d len=%d @ sp + %d\n",
1449 info
->argnum
, len
, info
->nsaa
);
1453 VEC_safe_push (stack_item_t
, info
->si
, &item
);
1456 if (info
->nsaa
& (align
- 1))
1458 /* Push stack alignment padding. */
1459 int pad
= align
- (info
->nsaa
& (align
- 1));
1464 VEC_safe_push (stack_item_t
, info
->si
, &item
);
1469 /* Marshall an argument into a sequence of one or more consecutive X
1470 registers or, if insufficient X registers are available then onto
1474 pass_in_x_or_stack (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
1475 struct aarch64_call_info
*info
, struct type
*type
,
1476 const bfd_byte
*buf
)
1478 int len
= TYPE_LENGTH (type
);
1479 int nregs
= (len
+ X_REGISTER_SIZE
- 1) / X_REGISTER_SIZE
;
1481 /* PCS C.13 - Pass in registers if we have enough spare */
1482 if (info
->ngrn
+ nregs
<= 8)
1484 pass_in_x (gdbarch
, regcache
, info
, type
, buf
);
1485 info
->ngrn
+= nregs
;
1490 pass_on_stack (info
, type
, buf
);
1494 /* Pass a value in a V register, or on the stack if insufficient are
1498 pass_in_v_or_stack (struct gdbarch
*gdbarch
,
1499 struct regcache
*regcache
,
1500 struct aarch64_call_info
*info
,
1502 const bfd_byte
*buf
)
1504 if (!pass_in_v (gdbarch
, regcache
, info
, buf
))
1505 pass_on_stack (info
, type
, buf
);
1508 /* Implement the "push_dummy_call" gdbarch method. */
1511 aarch64_push_dummy_call (struct gdbarch
*gdbarch
, struct value
*function
,
1512 struct regcache
*regcache
, CORE_ADDR bp_addr
,
1514 struct value
**args
, CORE_ADDR sp
, int struct_return
,
1515 CORE_ADDR struct_addr
)
1521 struct aarch64_call_info info
;
1522 struct type
*func_type
;
1523 struct type
*return_type
;
1524 int lang_struct_return
;
1526 memset (&info
, 0, sizeof (info
));
1528 /* We need to know what the type of the called function is in order
1529 to determine the number of named/anonymous arguments for the
1530 actual argument placement, and the return type in order to handle
1531 return value correctly.
1533 The generic code above us views the decision of return in memory
1534 or return in registers as a two stage processes. The language
1535 handler is consulted first and may decide to return in memory (eg
1536 class with copy constructor returned by value), this will cause
1537 the generic code to allocate space AND insert an initial leading
1540 If the language code does not decide to pass in memory then the
1541 target code is consulted.
1543 If the language code decides to pass in memory we want to move
1544 the pointer inserted as the initial argument from the argument
1545 list and into X8, the conventional AArch64 struct return pointer
1548 This is slightly awkward, ideally the flag "lang_struct_return"
1549 would be passed to the targets implementation of push_dummy_call.
1550 Rather that change the target interface we call the language code
1551 directly ourselves. */
1553 func_type
= check_typedef (value_type (function
));
1555 /* Dereference function pointer types. */
1556 if (TYPE_CODE (func_type
) == TYPE_CODE_PTR
)
1557 func_type
= TYPE_TARGET_TYPE (func_type
);
1559 gdb_assert (TYPE_CODE (func_type
) == TYPE_CODE_FUNC
1560 || TYPE_CODE (func_type
) == TYPE_CODE_METHOD
);
1562 /* If language_pass_by_reference () returned true we will have been
1563 given an additional initial argument, a hidden pointer to the
1564 return slot in memory. */
1565 return_type
= TYPE_TARGET_TYPE (func_type
);
1566 lang_struct_return
= language_pass_by_reference (return_type
);
1568 /* Set the return address. For the AArch64, the return breakpoint
1569 is always at BP_ADDR. */
1570 regcache_cooked_write_unsigned (regcache
, AARCH64_LR_REGNUM
, bp_addr
);
1572 /* If we were given an initial argument for the return slot because
1573 lang_struct_return was true, lose it. */
1574 if (lang_struct_return
)
1580 /* The struct_return pointer occupies X8. */
1581 if (struct_return
|| lang_struct_return
)
1584 fprintf_unfiltered (gdb_stdlog
, "struct return in %s = 0x%s\n",
1585 gdbarch_register_name
1587 AARCH64_STRUCT_RETURN_REGNUM
),
1588 paddress (gdbarch
, struct_addr
));
1589 regcache_cooked_write_unsigned (regcache
, AARCH64_STRUCT_RETURN_REGNUM
,
1593 for (argnum
= 0; argnum
< nargs
; argnum
++)
1595 struct value
*arg
= args
[argnum
];
1596 struct type
*arg_type
;
1599 arg_type
= check_typedef (value_type (arg
));
1600 len
= TYPE_LENGTH (arg_type
);
1602 switch (TYPE_CODE (arg_type
))
1605 case TYPE_CODE_BOOL
:
1606 case TYPE_CODE_CHAR
:
1607 case TYPE_CODE_RANGE
:
1608 case TYPE_CODE_ENUM
:
1611 /* Promote to 32 bit integer. */
1612 if (TYPE_UNSIGNED (arg_type
))
1613 arg_type
= builtin_type (gdbarch
)->builtin_uint32
;
1615 arg_type
= builtin_type (gdbarch
)->builtin_int32
;
1616 arg
= value_cast (arg_type
, arg
);
1618 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
,
1619 value_contents (arg
));
1622 case TYPE_CODE_COMPLEX
:
1625 const bfd_byte
*buf
= value_contents (arg
);
1626 struct type
*target_type
=
1627 check_typedef (TYPE_TARGET_TYPE (arg_type
));
1629 pass_in_v (gdbarch
, regcache
, &info
, buf
);
1630 pass_in_v (gdbarch
, regcache
, &info
,
1631 buf
+ TYPE_LENGTH (target_type
));
1636 pass_on_stack (&info
, arg_type
, value_contents (arg
));
1640 pass_in_v_or_stack (gdbarch
, regcache
, &info
, arg_type
,
1641 value_contents (arg
));
1644 case TYPE_CODE_STRUCT
:
1645 case TYPE_CODE_ARRAY
:
1646 case TYPE_CODE_UNION
:
1647 if (is_hfa (arg_type
))
1649 int elements
= TYPE_NFIELDS (arg_type
);
1651 /* Homogeneous Aggregates */
1652 if (info
.nsrn
+ elements
< 8)
1656 for (i
= 0; i
< elements
; i
++)
1658 /* We know that we have sufficient registers
1659 available therefore this will never fallback
1661 struct value
*field
=
1662 value_primitive_field (arg
, 0, i
, arg_type
);
1663 struct type
*field_type
=
1664 check_typedef (value_type (field
));
1666 pass_in_v_or_stack (gdbarch
, regcache
, &info
, field_type
,
1667 value_contents_writeable (field
));
1673 pass_on_stack (&info
, arg_type
, value_contents (arg
));
1678 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1679 invisible reference. */
1681 /* Allocate aligned storage. */
1682 sp
= align_down (sp
- len
, 16);
1684 /* Write the real data into the stack. */
1685 write_memory (sp
, value_contents (arg
), len
);
1687 /* Construct the indirection. */
1688 arg_type
= lookup_pointer_type (arg_type
);
1689 arg
= value_from_pointer (arg_type
, sp
);
1690 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
,
1691 value_contents (arg
));
1694 /* PCS C.15 / C.18 multiple values pass. */
1695 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
,
1696 value_contents (arg
));
1700 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
,
1701 value_contents (arg
));
1706 /* Make sure stack retains 16 byte alignment. */
1708 sp
-= 16 - (info
.nsaa
& 15);
1710 while (!VEC_empty (stack_item_t
, info
.si
))
1712 stack_item_t
*si
= VEC_last (stack_item_t
, info
.si
);
1715 write_memory (sp
, si
->data
, si
->len
);
1716 VEC_pop (stack_item_t
, info
.si
);
1719 VEC_free (stack_item_t
, info
.si
);
1721 /* Finally, update the SP register. */
1722 regcache_cooked_write_unsigned (regcache
, AARCH64_SP_REGNUM
, sp
);
1727 /* Implement the "frame_align" gdbarch method. */
1730 aarch64_frame_align (struct gdbarch
*gdbarch
, CORE_ADDR sp
)
1732 /* Align the stack to sixteen bytes. */
1733 return sp
& ~(CORE_ADDR
) 15;
1736 /* Return the type for an AdvSISD Q register. */
1738 static struct type
*
1739 aarch64_vnq_type (struct gdbarch
*gdbarch
)
1741 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1743 if (tdep
->vnq_type
== NULL
)
1748 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnq",
1751 elem
= builtin_type (gdbarch
)->builtin_uint128
;
1752 append_composite_type_field (t
, "u", elem
);
1754 elem
= builtin_type (gdbarch
)->builtin_int128
;
1755 append_composite_type_field (t
, "s", elem
);
1760 return tdep
->vnq_type
;
1763 /* Return the type for an AdvSISD D register. */
1765 static struct type
*
1766 aarch64_vnd_type (struct gdbarch
*gdbarch
)
1768 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1770 if (tdep
->vnd_type
== NULL
)
1775 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnd",
1778 elem
= builtin_type (gdbarch
)->builtin_double
;
1779 append_composite_type_field (t
, "f", elem
);
1781 elem
= builtin_type (gdbarch
)->builtin_uint64
;
1782 append_composite_type_field (t
, "u", elem
);
1784 elem
= builtin_type (gdbarch
)->builtin_int64
;
1785 append_composite_type_field (t
, "s", elem
);
1790 return tdep
->vnd_type
;
1793 /* Return the type for an AdvSISD S register. */
1795 static struct type
*
1796 aarch64_vns_type (struct gdbarch
*gdbarch
)
1798 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1800 if (tdep
->vns_type
== NULL
)
1805 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vns",
1808 elem
= builtin_type (gdbarch
)->builtin_float
;
1809 append_composite_type_field (t
, "f", elem
);
1811 elem
= builtin_type (gdbarch
)->builtin_uint32
;
1812 append_composite_type_field (t
, "u", elem
);
1814 elem
= builtin_type (gdbarch
)->builtin_int32
;
1815 append_composite_type_field (t
, "s", elem
);
1820 return tdep
->vns_type
;
1823 /* Return the type for an AdvSISD H register. */
1825 static struct type
*
1826 aarch64_vnh_type (struct gdbarch
*gdbarch
)
1828 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1830 if (tdep
->vnh_type
== NULL
)
1835 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnh",
1838 elem
= builtin_type (gdbarch
)->builtin_uint16
;
1839 append_composite_type_field (t
, "u", elem
);
1841 elem
= builtin_type (gdbarch
)->builtin_int16
;
1842 append_composite_type_field (t
, "s", elem
);
1847 return tdep
->vnh_type
;
1850 /* Return the type for an AdvSISD B register. */
1852 static struct type
*
1853 aarch64_vnb_type (struct gdbarch
*gdbarch
)
1855 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1857 if (tdep
->vnb_type
== NULL
)
1862 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnb",
1865 elem
= builtin_type (gdbarch
)->builtin_uint8
;
1866 append_composite_type_field (t
, "u", elem
);
1868 elem
= builtin_type (gdbarch
)->builtin_int8
;
1869 append_composite_type_field (t
, "s", elem
);
1874 return tdep
->vnb_type
;
1877 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1880 aarch64_dwarf_reg_to_regnum (struct gdbarch
*gdbarch
, int reg
)
1882 if (reg
>= AARCH64_DWARF_X0
&& reg
<= AARCH64_DWARF_X0
+ 30)
1883 return AARCH64_X0_REGNUM
+ reg
- AARCH64_DWARF_X0
;
1885 if (reg
== AARCH64_DWARF_SP
)
1886 return AARCH64_SP_REGNUM
;
1888 if (reg
>= AARCH64_DWARF_V0
&& reg
<= AARCH64_DWARF_V0
+ 31)
1889 return AARCH64_V0_REGNUM
+ reg
- AARCH64_DWARF_V0
;
1895 /* Implement the "print_insn" gdbarch method. */
1898 aarch64_gdb_print_insn (bfd_vma memaddr
, disassemble_info
*info
)
1900 info
->symbols
= NULL
;
1901 return print_insn_aarch64 (memaddr
, info
);
1904 /* AArch64 BRK software debug mode instruction.
1905 Note that AArch64 code is always little-endian.
1906 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
1907 static const gdb_byte aarch64_default_breakpoint
[] = {0x00, 0x00, 0x20, 0xd4};
1909 /* Implement the "breakpoint_from_pc" gdbarch method. */
1911 static const gdb_byte
*
1912 aarch64_breakpoint_from_pc (struct gdbarch
*gdbarch
, CORE_ADDR
*pcptr
,
1915 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1917 *lenptr
= sizeof (aarch64_default_breakpoint
);
1918 return aarch64_default_breakpoint
;
1921 /* Extract from an array REGS containing the (raw) register state a
1922 function return value of type TYPE, and copy that, in virtual
1923 format, into VALBUF. */
1926 aarch64_extract_return_value (struct type
*type
, struct regcache
*regs
,
1929 struct gdbarch
*gdbarch
= get_regcache_arch (regs
);
1930 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1932 if (TYPE_CODE (type
) == TYPE_CODE_FLT
)
1934 bfd_byte buf
[V_REGISTER_SIZE
];
1935 int len
= TYPE_LENGTH (type
);
1937 regcache_cooked_read (regs
, AARCH64_V0_REGNUM
, buf
);
1938 memcpy (valbuf
, buf
, len
);
1940 else if (TYPE_CODE (type
) == TYPE_CODE_INT
1941 || TYPE_CODE (type
) == TYPE_CODE_CHAR
1942 || TYPE_CODE (type
) == TYPE_CODE_BOOL
1943 || TYPE_CODE (type
) == TYPE_CODE_PTR
1944 || TYPE_CODE (type
) == TYPE_CODE_REF
1945 || TYPE_CODE (type
) == TYPE_CODE_ENUM
)
1947 /* If the the type is a plain integer, then the access is
1948 straight-forward. Otherwise we have to play around a bit
1950 int len
= TYPE_LENGTH (type
);
1951 int regno
= AARCH64_X0_REGNUM
;
1956 /* By using store_unsigned_integer we avoid having to do
1957 anything special for small big-endian values. */
1958 regcache_cooked_read_unsigned (regs
, regno
++, &tmp
);
1959 store_unsigned_integer (valbuf
,
1960 (len
> X_REGISTER_SIZE
1961 ? X_REGISTER_SIZE
: len
), byte_order
, tmp
);
1962 len
-= X_REGISTER_SIZE
;
1963 valbuf
+= X_REGISTER_SIZE
;
1966 else if (TYPE_CODE (type
) == TYPE_CODE_COMPLEX
)
1968 int regno
= AARCH64_V0_REGNUM
;
1969 bfd_byte buf
[V_REGISTER_SIZE
];
1970 struct type
*target_type
= check_typedef (TYPE_TARGET_TYPE (type
));
1971 int len
= TYPE_LENGTH (target_type
);
1973 regcache_cooked_read (regs
, regno
, buf
);
1974 memcpy (valbuf
, buf
, len
);
1976 regcache_cooked_read (regs
, regno
+ 1, buf
);
1977 memcpy (valbuf
, buf
, len
);
1980 else if (is_hfa (type
))
1982 int elements
= TYPE_NFIELDS (type
);
1983 struct type
*member_type
= check_typedef (TYPE_FIELD_TYPE (type
, 0));
1984 int len
= TYPE_LENGTH (member_type
);
1987 for (i
= 0; i
< elements
; i
++)
1989 int regno
= AARCH64_V0_REGNUM
+ i
;
1990 bfd_byte buf
[X_REGISTER_SIZE
];
1993 fprintf_unfiltered (gdb_stdlog
,
1994 "read HFA return value element %d from %s\n",
1996 gdbarch_register_name (gdbarch
, regno
));
1997 regcache_cooked_read (regs
, regno
, buf
);
1999 memcpy (valbuf
, buf
, len
);
2005 /* For a structure or union the behaviour is as if the value had
2006 been stored to word-aligned memory and then loaded into
2007 registers with 64-bit load instruction(s). */
2008 int len
= TYPE_LENGTH (type
);
2009 int regno
= AARCH64_X0_REGNUM
;
2010 bfd_byte buf
[X_REGISTER_SIZE
];
2014 regcache_cooked_read (regs
, regno
++, buf
);
2015 memcpy (valbuf
, buf
, len
> X_REGISTER_SIZE
? X_REGISTER_SIZE
: len
);
2016 len
-= X_REGISTER_SIZE
;
2017 valbuf
+= X_REGISTER_SIZE
;
2023 /* Will a function return an aggregate type in memory or in a
2024 register? Return 0 if an aggregate type can be returned in a
2025 register, 1 if it must be returned in memory. */
2028 aarch64_return_in_memory (struct gdbarch
*gdbarch
, struct type
*type
)
2031 enum type_code code
;
2033 CHECK_TYPEDEF (type
);
2035 /* In the AArch64 ABI, "integer" like aggregate types are returned
2036 in registers. For an aggregate type to be integer like, its size
2037 must be less than or equal to 4 * X_REGISTER_SIZE. */
2041 /* PCS B.5 If the argument is a Named HFA, then the argument is
2046 if (TYPE_LENGTH (type
) > 16)
2048 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2049 invisible reference. */
2057 /* Write into appropriate registers a function return value of type
2058 TYPE, given in virtual format. */
2061 aarch64_store_return_value (struct type
*type
, struct regcache
*regs
,
2062 const gdb_byte
*valbuf
)
2064 struct gdbarch
*gdbarch
= get_regcache_arch (regs
);
2065 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2067 if (TYPE_CODE (type
) == TYPE_CODE_FLT
)
2069 bfd_byte buf
[V_REGISTER_SIZE
];
2070 int len
= TYPE_LENGTH (type
);
2072 memcpy (buf
, valbuf
, len
> V_REGISTER_SIZE
? V_REGISTER_SIZE
: len
);
2073 regcache_cooked_write (regs
, AARCH64_V0_REGNUM
, buf
);
2075 else if (TYPE_CODE (type
) == TYPE_CODE_INT
2076 || TYPE_CODE (type
) == TYPE_CODE_CHAR
2077 || TYPE_CODE (type
) == TYPE_CODE_BOOL
2078 || TYPE_CODE (type
) == TYPE_CODE_PTR
2079 || TYPE_CODE (type
) == TYPE_CODE_REF
2080 || TYPE_CODE (type
) == TYPE_CODE_ENUM
)
2082 if (TYPE_LENGTH (type
) <= X_REGISTER_SIZE
)
2084 /* Values of one word or less are zero/sign-extended and
2086 bfd_byte tmpbuf
[X_REGISTER_SIZE
];
2087 LONGEST val
= unpack_long (type
, valbuf
);
2089 store_signed_integer (tmpbuf
, X_REGISTER_SIZE
, byte_order
, val
);
2090 regcache_cooked_write (regs
, AARCH64_X0_REGNUM
, tmpbuf
);
2094 /* Integral values greater than one word are stored in
2095 consecutive registers starting with r0. This will always
2096 be a multiple of the regiser size. */
2097 int len
= TYPE_LENGTH (type
);
2098 int regno
= AARCH64_X0_REGNUM
;
2102 regcache_cooked_write (regs
, regno
++, valbuf
);
2103 len
-= X_REGISTER_SIZE
;
2104 valbuf
+= X_REGISTER_SIZE
;
2108 else if (is_hfa (type
))
2110 int elements
= TYPE_NFIELDS (type
);
2111 struct type
*member_type
= check_typedef (TYPE_FIELD_TYPE (type
, 0));
2112 int len
= TYPE_LENGTH (member_type
);
2115 for (i
= 0; i
< elements
; i
++)
2117 int regno
= AARCH64_V0_REGNUM
+ i
;
2118 bfd_byte tmpbuf
[MAX_REGISTER_SIZE
];
2121 fprintf_unfiltered (gdb_stdlog
,
2122 "write HFA return value element %d to %s\n",
2124 gdbarch_register_name (gdbarch
, regno
));
2126 memcpy (tmpbuf
, valbuf
, len
);
2127 regcache_cooked_write (regs
, regno
, tmpbuf
);
2133 /* For a structure or union the behaviour is as if the value had
2134 been stored to word-aligned memory and then loaded into
2135 registers with 64-bit load instruction(s). */
2136 int len
= TYPE_LENGTH (type
);
2137 int regno
= AARCH64_X0_REGNUM
;
2138 bfd_byte tmpbuf
[X_REGISTER_SIZE
];
2142 memcpy (tmpbuf
, valbuf
,
2143 len
> X_REGISTER_SIZE
? X_REGISTER_SIZE
: len
);
2144 regcache_cooked_write (regs
, regno
++, tmpbuf
);
2145 len
-= X_REGISTER_SIZE
;
2146 valbuf
+= X_REGISTER_SIZE
;
2151 /* Implement the "return_value" gdbarch method. */
2153 static enum return_value_convention
2154 aarch64_return_value (struct gdbarch
*gdbarch
, struct value
*func_value
,
2155 struct type
*valtype
, struct regcache
*regcache
,
2156 gdb_byte
*readbuf
, const gdb_byte
*writebuf
)
2158 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2160 if (TYPE_CODE (valtype
) == TYPE_CODE_STRUCT
2161 || TYPE_CODE (valtype
) == TYPE_CODE_UNION
2162 || TYPE_CODE (valtype
) == TYPE_CODE_ARRAY
)
2164 if (aarch64_return_in_memory (gdbarch
, valtype
))
2167 fprintf_unfiltered (gdb_stdlog
, "return value in memory\n");
2168 return RETURN_VALUE_STRUCT_CONVENTION
;
2173 aarch64_store_return_value (valtype
, regcache
, writebuf
);
2176 aarch64_extract_return_value (valtype
, regcache
, readbuf
);
2179 fprintf_unfiltered (gdb_stdlog
, "return value in registers\n");
2181 return RETURN_VALUE_REGISTER_CONVENTION
;
2184 /* Implement the "get_longjmp_target" gdbarch method. */
2187 aarch64_get_longjmp_target (struct frame_info
*frame
, CORE_ADDR
*pc
)
2190 gdb_byte buf
[X_REGISTER_SIZE
];
2191 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
2192 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2193 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2195 jb_addr
= get_frame_register_unsigned (frame
, AARCH64_X0_REGNUM
);
2197 if (target_read_memory (jb_addr
+ tdep
->jb_pc
* tdep
->jb_elt_size
, buf
,
2201 *pc
= extract_unsigned_integer (buf
, X_REGISTER_SIZE
, byte_order
);
2206 /* Return the pseudo register name corresponding to register regnum. */
2209 aarch64_pseudo_register_name (struct gdbarch
*gdbarch
, int regnum
)
2211 static const char *const q_name
[] =
2213 "q0", "q1", "q2", "q3",
2214 "q4", "q5", "q6", "q7",
2215 "q8", "q9", "q10", "q11",
2216 "q12", "q13", "q14", "q15",
2217 "q16", "q17", "q18", "q19",
2218 "q20", "q21", "q22", "q23",
2219 "q24", "q25", "q26", "q27",
2220 "q28", "q29", "q30", "q31",
2223 static const char *const d_name
[] =
2225 "d0", "d1", "d2", "d3",
2226 "d4", "d5", "d6", "d7",
2227 "d8", "d9", "d10", "d11",
2228 "d12", "d13", "d14", "d15",
2229 "d16", "d17", "d18", "d19",
2230 "d20", "d21", "d22", "d23",
2231 "d24", "d25", "d26", "d27",
2232 "d28", "d29", "d30", "d31",
2235 static const char *const s_name
[] =
2237 "s0", "s1", "s2", "s3",
2238 "s4", "s5", "s6", "s7",
2239 "s8", "s9", "s10", "s11",
2240 "s12", "s13", "s14", "s15",
2241 "s16", "s17", "s18", "s19",
2242 "s20", "s21", "s22", "s23",
2243 "s24", "s25", "s26", "s27",
2244 "s28", "s29", "s30", "s31",
2247 static const char *const h_name
[] =
2249 "h0", "h1", "h2", "h3",
2250 "h4", "h5", "h6", "h7",
2251 "h8", "h9", "h10", "h11",
2252 "h12", "h13", "h14", "h15",
2253 "h16", "h17", "h18", "h19",
2254 "h20", "h21", "h22", "h23",
2255 "h24", "h25", "h26", "h27",
2256 "h28", "h29", "h30", "h31",
2259 static const char *const b_name
[] =
2261 "b0", "b1", "b2", "b3",
2262 "b4", "b5", "b6", "b7",
2263 "b8", "b9", "b10", "b11",
2264 "b12", "b13", "b14", "b15",
2265 "b16", "b17", "b18", "b19",
2266 "b20", "b21", "b22", "b23",
2267 "b24", "b25", "b26", "b27",
2268 "b28", "b29", "b30", "b31",
2271 regnum
-= gdbarch_num_regs (gdbarch
);
2273 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2274 return q_name
[regnum
- AARCH64_Q0_REGNUM
];
2276 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2277 return d_name
[regnum
- AARCH64_D0_REGNUM
];
2279 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2280 return s_name
[regnum
- AARCH64_S0_REGNUM
];
2282 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2283 return h_name
[regnum
- AARCH64_H0_REGNUM
];
2285 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2286 return b_name
[regnum
- AARCH64_B0_REGNUM
];
2288 internal_error (__FILE__
, __LINE__
,
2289 _("aarch64_pseudo_register_name: bad register number %d"),
2293 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2295 static struct type
*
2296 aarch64_pseudo_register_type (struct gdbarch
*gdbarch
, int regnum
)
2298 regnum
-= gdbarch_num_regs (gdbarch
);
2300 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2301 return aarch64_vnq_type (gdbarch
);
2303 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2304 return aarch64_vnd_type (gdbarch
);
2306 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2307 return aarch64_vns_type (gdbarch
);
2309 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2310 return aarch64_vnh_type (gdbarch
);
2312 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2313 return aarch64_vnb_type (gdbarch
);
2315 internal_error (__FILE__
, __LINE__
,
2316 _("aarch64_pseudo_register_type: bad register number %d"),
2320 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2323 aarch64_pseudo_register_reggroup_p (struct gdbarch
*gdbarch
, int regnum
,
2324 struct reggroup
*group
)
2326 regnum
-= gdbarch_num_regs (gdbarch
);
2328 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2329 return group
== all_reggroup
|| group
== vector_reggroup
;
2330 else if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2331 return (group
== all_reggroup
|| group
== vector_reggroup
2332 || group
== float_reggroup
);
2333 else if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2334 return (group
== all_reggroup
|| group
== vector_reggroup
2335 || group
== float_reggroup
);
2336 else if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2337 return group
== all_reggroup
|| group
== vector_reggroup
;
2338 else if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2339 return group
== all_reggroup
|| group
== vector_reggroup
;
2341 return group
== all_reggroup
;
2344 /* Implement the "pseudo_register_read_value" gdbarch method. */
2346 static struct value
*
2347 aarch64_pseudo_read_value (struct gdbarch
*gdbarch
,
2348 struct regcache
*regcache
,
2351 gdb_byte reg_buf
[MAX_REGISTER_SIZE
];
2352 struct value
*result_value
;
2355 result_value
= allocate_value (register_type (gdbarch
, regnum
));
2356 VALUE_LVAL (result_value
) = lval_register
;
2357 VALUE_REGNUM (result_value
) = regnum
;
2358 buf
= value_contents_raw (result_value
);
2360 regnum
-= gdbarch_num_regs (gdbarch
);
2362 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2364 enum register_status status
;
2367 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_Q0_REGNUM
;
2368 status
= regcache_raw_read (regcache
, v_regnum
, reg_buf
);
2369 if (status
!= REG_VALID
)
2370 mark_value_bytes_unavailable (result_value
, 0,
2371 TYPE_LENGTH (value_type (result_value
)));
2373 memcpy (buf
, reg_buf
, Q_REGISTER_SIZE
);
2374 return result_value
;
2377 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2379 enum register_status status
;
2382 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_D0_REGNUM
;
2383 status
= regcache_raw_read (regcache
, v_regnum
, reg_buf
);
2384 if (status
!= REG_VALID
)
2385 mark_value_bytes_unavailable (result_value
, 0,
2386 TYPE_LENGTH (value_type (result_value
)));
2388 memcpy (buf
, reg_buf
, D_REGISTER_SIZE
);
2389 return result_value
;
2392 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2394 enum register_status status
;
2397 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_S0_REGNUM
;
2398 status
= regcache_raw_read (regcache
, v_regnum
, reg_buf
);
2399 memcpy (buf
, reg_buf
, S_REGISTER_SIZE
);
2400 return result_value
;
2403 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2405 enum register_status status
;
2408 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_H0_REGNUM
;
2409 status
= regcache_raw_read (regcache
, v_regnum
, reg_buf
);
2410 if (status
!= REG_VALID
)
2411 mark_value_bytes_unavailable (result_value
, 0,
2412 TYPE_LENGTH (value_type (result_value
)));
2414 memcpy (buf
, reg_buf
, H_REGISTER_SIZE
);
2415 return result_value
;
2418 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2420 enum register_status status
;
2423 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_B0_REGNUM
;
2424 status
= regcache_raw_read (regcache
, v_regnum
, reg_buf
);
2425 if (status
!= REG_VALID
)
2426 mark_value_bytes_unavailable (result_value
, 0,
2427 TYPE_LENGTH (value_type (result_value
)));
2429 memcpy (buf
, reg_buf
, B_REGISTER_SIZE
);
2430 return result_value
;
2433 gdb_assert_not_reached ("regnum out of bound");
2436 /* Implement the "pseudo_register_write" gdbarch method. */
2439 aarch64_pseudo_write (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
2440 int regnum
, const gdb_byte
*buf
)
2442 gdb_byte reg_buf
[MAX_REGISTER_SIZE
];
2444 /* Ensure the register buffer is zero, we want gdb writes of the
2445 various 'scalar' pseudo registers to behavior like architectural
2446 writes, register width bytes are written the remainder are set to
2448 memset (reg_buf
, 0, sizeof (reg_buf
));
2450 regnum
-= gdbarch_num_regs (gdbarch
);
2452 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2454 /* pseudo Q registers */
2457 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_Q0_REGNUM
;
2458 memcpy (reg_buf
, buf
, Q_REGISTER_SIZE
);
2459 regcache_raw_write (regcache
, v_regnum
, reg_buf
);
2463 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2465 /* pseudo D registers */
2468 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_D0_REGNUM
;
2469 memcpy (reg_buf
, buf
, D_REGISTER_SIZE
);
2470 regcache_raw_write (regcache
, v_regnum
, reg_buf
);
2474 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2478 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_S0_REGNUM
;
2479 memcpy (reg_buf
, buf
, S_REGISTER_SIZE
);
2480 regcache_raw_write (regcache
, v_regnum
, reg_buf
);
2484 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2486 /* pseudo H registers */
2489 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_H0_REGNUM
;
2490 memcpy (reg_buf
, buf
, H_REGISTER_SIZE
);
2491 regcache_raw_write (regcache
, v_regnum
, reg_buf
);
2495 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2497 /* pseudo B registers */
2500 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_B0_REGNUM
;
2501 memcpy (reg_buf
, buf
, B_REGISTER_SIZE
);
2502 regcache_raw_write (regcache
, v_regnum
, reg_buf
);
2506 gdb_assert_not_reached ("regnum out of bound");
2509 /* Callback function for user_reg_add. */
2511 static struct value
*
2512 value_of_aarch64_user_reg (struct frame_info
*frame
, const void *baton
)
2514 const int *reg_p
= baton
;
2516 return value_of_register (*reg_p
, frame
);
2520 /* Implement the "software_single_step" gdbarch method, needed to
2521 single step through atomic sequences on AArch64. */
2524 aarch64_software_single_step (struct frame_info
*frame
)
2526 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
2527 struct address_space
*aspace
= get_frame_address_space (frame
);
2528 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
2529 const int insn_size
= 4;
2530 const int atomic_sequence_length
= 16; /* Instruction sequence length. */
2531 CORE_ADDR pc
= get_frame_pc (frame
);
2532 CORE_ADDR breaks
[2] = { -1, -1 };
2534 CORE_ADDR closing_insn
= 0;
2535 uint32_t insn
= read_memory_unsigned_integer (loc
, insn_size
,
2536 byte_order_for_code
);
2539 int bc_insn_count
= 0; /* Conditional branch instruction count. */
2540 int last_breakpoint
= 0; /* Defaults to 0 (no breakpoints placed). */
2542 /* Look for a Load Exclusive instruction which begins the sequence. */
2543 if (!decode_masked_match (insn
, 0x3fc00000, 0x08400000))
2546 for (insn_count
= 0; insn_count
< atomic_sequence_length
; ++insn_count
)
2552 insn
= read_memory_unsigned_integer (loc
, insn_size
,
2553 byte_order_for_code
);
2555 /* Check if the instruction is a conditional branch. */
2556 if (decode_bcond (loc
, insn
, &cond
, &offset
))
2558 if (bc_insn_count
>= 1)
2561 /* It is, so we'll try to set a breakpoint at the destination. */
2562 breaks
[1] = loc
+ offset
;
2568 /* Look for the Store Exclusive which closes the atomic sequence. */
2569 if (decode_masked_match (insn
, 0x3fc00000, 0x08000000))
2576 /* We didn't find a closing Store Exclusive instruction, fall back. */
2580 /* Insert breakpoint after the end of the atomic sequence. */
2581 breaks
[0] = loc
+ insn_size
;
2583 /* Check for duplicated breakpoints, and also check that the second
2584 breakpoint is not within the atomic sequence. */
2586 && (breaks
[1] == breaks
[0]
2587 || (breaks
[1] >= pc
&& breaks
[1] <= closing_insn
)))
2588 last_breakpoint
= 0;
2590 /* Insert the breakpoint at the end of the sequence, and one at the
2591 destination of the conditional branch, if it exists. */
2592 for (index
= 0; index
<= last_breakpoint
; index
++)
2593 insert_single_step_breakpoint (gdbarch
, aspace
, breaks
[index
]);
2598 /* Initialize the current architecture based on INFO. If possible,
2599 re-use an architecture from ARCHES, which is a list of
2600 architectures already created during this debugging session.
2602 Called e.g. at program startup, when reading a core file, and when
2603 reading a binary file. */
2605 static struct gdbarch
*
2606 aarch64_gdbarch_init (struct gdbarch_info info
, struct gdbarch_list
*arches
)
2608 struct gdbarch_tdep
*tdep
;
2609 struct gdbarch
*gdbarch
;
2610 struct gdbarch_list
*best_arch
;
2611 struct tdesc_arch_data
*tdesc_data
= NULL
;
2612 const struct target_desc
*tdesc
= info
.target_desc
;
2614 int have_fpa_registers
= 1;
2616 const struct tdesc_feature
*feature
;
2618 int num_pseudo_regs
= 0;
2620 /* Ensure we always have a target descriptor. */
2621 if (!tdesc_has_registers (tdesc
))
2622 tdesc
= tdesc_aarch64
;
2626 feature
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.core");
2628 if (feature
== NULL
)
2631 tdesc_data
= tdesc_data_alloc ();
2633 /* Validate the descriptor provides the mandatory core R registers
2634 and allocate their numbers. */
2635 for (i
= 0; i
< ARRAY_SIZE (aarch64_r_register_names
); i
++)
2637 tdesc_numbered_register (feature
, tdesc_data
, AARCH64_X0_REGNUM
+ i
,
2638 aarch64_r_register_names
[i
]);
2640 num_regs
= AARCH64_X0_REGNUM
+ i
;
2642 /* Look for the V registers. */
2643 feature
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.fpu");
2646 /* Validate the descriptor provides the mandatory V registers
2647 and allocate their numbers. */
2648 for (i
= 0; i
< ARRAY_SIZE (aarch64_v_register_names
); i
++)
2650 tdesc_numbered_register (feature
, tdesc_data
, AARCH64_V0_REGNUM
+ i
,
2651 aarch64_v_register_names
[i
]);
2653 num_regs
= AARCH64_V0_REGNUM
+ i
;
2655 num_pseudo_regs
+= 32; /* add the Qn scalar register pseudos */
2656 num_pseudo_regs
+= 32; /* add the Dn scalar register pseudos */
2657 num_pseudo_regs
+= 32; /* add the Sn scalar register pseudos */
2658 num_pseudo_regs
+= 32; /* add the Hn scalar register pseudos */
2659 num_pseudo_regs
+= 32; /* add the Bn scalar register pseudos */
2664 tdesc_data_cleanup (tdesc_data
);
2668 /* AArch64 code is always little-endian. */
2669 info
.byte_order_for_code
= BFD_ENDIAN_LITTLE
;
2671 /* If there is already a candidate, use it. */
2672 for (best_arch
= gdbarch_list_lookup_by_info (arches
, &info
);
2674 best_arch
= gdbarch_list_lookup_by_info (best_arch
->next
, &info
))
2676 /* Found a match. */
2680 if (best_arch
!= NULL
)
2682 if (tdesc_data
!= NULL
)
2683 tdesc_data_cleanup (tdesc_data
);
2684 return best_arch
->gdbarch
;
2687 tdep
= xcalloc (1, sizeof (struct gdbarch_tdep
));
2688 gdbarch
= gdbarch_alloc (&info
, tdep
);
2690 /* This should be low enough for everything. */
2691 tdep
->lowest_pc
= 0x20;
2692 tdep
->jb_pc
= -1; /* Longjump support not enabled by default. */
2693 tdep
->jb_elt_size
= 8;
2695 set_gdbarch_push_dummy_call (gdbarch
, aarch64_push_dummy_call
);
2696 set_gdbarch_frame_align (gdbarch
, aarch64_frame_align
);
2698 /* Frame handling. */
2699 set_gdbarch_dummy_id (gdbarch
, aarch64_dummy_id
);
2700 set_gdbarch_unwind_pc (gdbarch
, aarch64_unwind_pc
);
2701 set_gdbarch_unwind_sp (gdbarch
, aarch64_unwind_sp
);
2703 /* Advance PC across function entry code. */
2704 set_gdbarch_skip_prologue (gdbarch
, aarch64_skip_prologue
);
2706 /* The stack grows downward. */
2707 set_gdbarch_inner_than (gdbarch
, core_addr_lessthan
);
2709 /* Breakpoint manipulation. */
2710 set_gdbarch_breakpoint_from_pc (gdbarch
, aarch64_breakpoint_from_pc
);
2711 set_gdbarch_have_nonsteppable_watchpoint (gdbarch
, 1);
2712 set_gdbarch_software_single_step (gdbarch
, aarch64_software_single_step
);
2714 /* Information about registers, etc. */
2715 set_gdbarch_sp_regnum (gdbarch
, AARCH64_SP_REGNUM
);
2716 set_gdbarch_pc_regnum (gdbarch
, AARCH64_PC_REGNUM
);
2717 set_gdbarch_num_regs (gdbarch
, num_regs
);
2719 set_gdbarch_num_pseudo_regs (gdbarch
, num_pseudo_regs
);
2720 set_gdbarch_pseudo_register_read_value (gdbarch
, aarch64_pseudo_read_value
);
2721 set_gdbarch_pseudo_register_write (gdbarch
, aarch64_pseudo_write
);
2722 set_tdesc_pseudo_register_name (gdbarch
, aarch64_pseudo_register_name
);
2723 set_tdesc_pseudo_register_type (gdbarch
, aarch64_pseudo_register_type
);
2724 set_tdesc_pseudo_register_reggroup_p (gdbarch
,
2725 aarch64_pseudo_register_reggroup_p
);
2728 set_gdbarch_short_bit (gdbarch
, 16);
2729 set_gdbarch_int_bit (gdbarch
, 32);
2730 set_gdbarch_float_bit (gdbarch
, 32);
2731 set_gdbarch_double_bit (gdbarch
, 64);
2732 set_gdbarch_long_double_bit (gdbarch
, 128);
2733 set_gdbarch_long_bit (gdbarch
, 64);
2734 set_gdbarch_long_long_bit (gdbarch
, 64);
2735 set_gdbarch_ptr_bit (gdbarch
, 64);
2736 set_gdbarch_char_signed (gdbarch
, 0);
2737 set_gdbarch_float_format (gdbarch
, floatformats_ieee_single
);
2738 set_gdbarch_double_format (gdbarch
, floatformats_ieee_double
);
2739 set_gdbarch_long_double_format (gdbarch
, floatformats_ia64_quad
);
2741 /* Internal <-> external register number maps. */
2742 set_gdbarch_dwarf2_reg_to_regnum (gdbarch
, aarch64_dwarf_reg_to_regnum
);
2744 /* Returning results. */
2745 set_gdbarch_return_value (gdbarch
, aarch64_return_value
);
2748 set_gdbarch_print_insn (gdbarch
, aarch64_gdb_print_insn
);
2750 /* Virtual tables. */
2751 set_gdbarch_vbit_in_delta (gdbarch
, 1);
2753 /* Hook in the ABI-specific overrides, if they have been registered. */
2754 info
.target_desc
= tdesc
;
2755 info
.tdep_info
= (void *) tdesc_data
;
2756 gdbarch_init_osabi (info
, gdbarch
);
2758 dwarf2_frame_set_init_reg (gdbarch
, aarch64_dwarf2_frame_init_reg
);
2760 /* Add some default predicates. */
2761 frame_unwind_append_unwinder (gdbarch
, &aarch64_stub_unwind
);
2762 dwarf2_append_unwinders (gdbarch
);
2763 frame_unwind_append_unwinder (gdbarch
, &aarch64_prologue_unwind
);
2765 frame_base_set_default (gdbarch
, &aarch64_normal_base
);
2767 /* Now we have tuned the configuration, set a few final things,
2768 based on what the OS ABI has told us. */
2770 if (tdep
->jb_pc
>= 0)
2771 set_gdbarch_get_longjmp_target (gdbarch
, aarch64_get_longjmp_target
);
2773 tdesc_use_registers (gdbarch
, tdesc
, tdesc_data
);
2775 /* Add standard register aliases. */
2776 for (i
= 0; i
< ARRAY_SIZE (aarch64_register_aliases
); i
++)
2777 user_reg_add (gdbarch
, aarch64_register_aliases
[i
].name
,
2778 value_of_aarch64_user_reg
,
2779 &aarch64_register_aliases
[i
].regnum
);
2785 aarch64_dump_tdep (struct gdbarch
*gdbarch
, struct ui_file
*file
)
2787 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2792 fprintf_unfiltered (file
, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
2793 paddress (gdbarch
, tdep
->lowest_pc
));
2796 /* Suppress warning from -Wmissing-prototypes. */
2797 extern initialize_file_ftype _initialize_aarch64_tdep
;
2800 _initialize_aarch64_tdep (void)
2802 gdbarch_register (bfd_arch_aarch64
, aarch64_gdbarch_init
,
2805 initialize_tdesc_aarch64 ();
2807 /* Debug this file's internals. */
2808 add_setshow_boolean_cmd ("aarch64", class_maintenance
, &aarch64_debug
, _("\
2809 Set AArch64 debugging."), _("\
2810 Show AArch64 debugging."), _("\
2811 When on, AArch64 specific debugging is enabled."),
2814 &setdebuglist
, &showdebuglist
);
2817 /* AArch64 process record-replay related structures, defines etc. */
2819 #define submask(x) ((1L << ((x) + 1)) - 1)
2820 #define bit(obj,st) (((obj) >> (st)) & 1)
2821 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
2823 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
2826 unsigned int reg_len = LENGTH; \
2829 REGS = XNEWVEC (uint32_t, reg_len); \
2830 memcpy(®S[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
2835 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
2838 unsigned int mem_len = LENGTH; \
2841 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
2842 memcpy(&MEMS->len, &RECORD_BUF[0], \
2843 sizeof(struct aarch64_mem_r) * LENGTH); \
2848 /* AArch64 record/replay structures and enumerations. */
2850 struct aarch64_mem_r
2852 uint64_t len
; /* Record length. */
2853 uint64_t addr
; /* Memory address. */
2856 enum aarch64_record_result
2858 AARCH64_RECORD_SUCCESS
,
2859 AARCH64_RECORD_FAILURE
,
2860 AARCH64_RECORD_UNSUPPORTED
,
2861 AARCH64_RECORD_UNKNOWN
2864 typedef struct insn_decode_record_t
2866 struct gdbarch
*gdbarch
;
2867 struct regcache
*regcache
;
2868 CORE_ADDR this_addr
; /* Address of insn to be recorded. */
2869 uint32_t aarch64_insn
; /* Insn to be recorded. */
2870 uint32_t mem_rec_count
; /* Count of memory records. */
2871 uint32_t reg_rec_count
; /* Count of register records. */
2872 uint32_t *aarch64_regs
; /* Registers to be recorded. */
2873 struct aarch64_mem_r
*aarch64_mems
; /* Memory locations to be recorded. */
2874 } insn_decode_record
;
2876 /* Record handler for data processing - register instructions. */
2879 aarch64_record_data_proc_reg (insn_decode_record
*aarch64_insn_r
)
2881 uint8_t reg_rd
, insn_bits24_27
, insn_bits21_23
;
2882 uint32_t record_buf
[4];
2884 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
2885 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
2886 insn_bits21_23
= bits (aarch64_insn_r
->aarch64_insn
, 21, 23);
2888 if (!bit (aarch64_insn_r
->aarch64_insn
, 28))
2892 /* Logical (shifted register). */
2893 if (insn_bits24_27
== 0x0a)
2894 setflags
= (bits (aarch64_insn_r
->aarch64_insn
, 29, 30) == 0x03);
2896 else if (insn_bits24_27
== 0x0b)
2897 setflags
= bit (aarch64_insn_r
->aarch64_insn
, 29);
2899 return AARCH64_RECORD_UNKNOWN
;
2901 record_buf
[0] = reg_rd
;
2902 aarch64_insn_r
->reg_rec_count
= 1;
2904 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
2908 if (insn_bits24_27
== 0x0b)
2910 /* Data-processing (3 source). */
2911 record_buf
[0] = reg_rd
;
2912 aarch64_insn_r
->reg_rec_count
= 1;
2914 else if (insn_bits24_27
== 0x0a)
2916 if (insn_bits21_23
== 0x00)
2918 /* Add/subtract (with carry). */
2919 record_buf
[0] = reg_rd
;
2920 aarch64_insn_r
->reg_rec_count
= 1;
2921 if (bit (aarch64_insn_r
->aarch64_insn
, 29))
2923 record_buf
[1] = AARCH64_CPSR_REGNUM
;
2924 aarch64_insn_r
->reg_rec_count
= 2;
2927 else if (insn_bits21_23
== 0x02)
2929 /* Conditional compare (register) and conditional compare
2930 (immediate) instructions. */
2931 record_buf
[0] = AARCH64_CPSR_REGNUM
;
2932 aarch64_insn_r
->reg_rec_count
= 1;
2934 else if (insn_bits21_23
== 0x04 || insn_bits21_23
== 0x06)
2936 /* CConditional select. */
2937 /* Data-processing (2 source). */
2938 /* Data-processing (1 source). */
2939 record_buf
[0] = reg_rd
;
2940 aarch64_insn_r
->reg_rec_count
= 1;
2943 return AARCH64_RECORD_UNKNOWN
;
2947 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
2949 return AARCH64_RECORD_SUCCESS
;
2952 /* Record handler for data processing - immediate instructions. */
2955 aarch64_record_data_proc_imm (insn_decode_record
*aarch64_insn_r
)
2957 uint8_t reg_rd
, insn_bit28
, insn_bit23
, insn_bits24_27
, setflags
;
2958 uint32_t record_buf
[4];
2960 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
2961 insn_bit28
= bit (aarch64_insn_r
->aarch64_insn
, 28);
2962 insn_bit23
= bit (aarch64_insn_r
->aarch64_insn
, 23);
2963 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
2965 if (insn_bits24_27
== 0x00 /* PC rel addressing. */
2966 || insn_bits24_27
== 0x03 /* Bitfield and Extract. */
2967 || (insn_bits24_27
== 0x02 && insn_bit23
)) /* Move wide (immediate). */
2969 record_buf
[0] = reg_rd
;
2970 aarch64_insn_r
->reg_rec_count
= 1;
2972 else if (insn_bits24_27
== 0x01)
2974 /* Add/Subtract (immediate). */
2975 setflags
= bit (aarch64_insn_r
->aarch64_insn
, 29);
2976 record_buf
[0] = reg_rd
;
2977 aarch64_insn_r
->reg_rec_count
= 1;
2979 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
2981 else if (insn_bits24_27
== 0x02 && !insn_bit23
)
2983 /* Logical (immediate). */
2984 setflags
= bits (aarch64_insn_r
->aarch64_insn
, 29, 30) == 0x03;
2985 record_buf
[0] = reg_rd
;
2986 aarch64_insn_r
->reg_rec_count
= 1;
2988 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
2991 return AARCH64_RECORD_UNKNOWN
;
2993 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
2995 return AARCH64_RECORD_SUCCESS
;
2998 /* Record handler for branch, exception generation and system instructions. */
3001 aarch64_record_branch_except_sys (insn_decode_record
*aarch64_insn_r
)
3003 struct gdbarch_tdep
*tdep
= gdbarch_tdep (aarch64_insn_r
->gdbarch
);
3004 uint8_t insn_bits24_27
, insn_bits28_31
, insn_bits22_23
;
3005 uint32_t record_buf
[4];
3007 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3008 insn_bits28_31
= bits (aarch64_insn_r
->aarch64_insn
, 28, 31);
3009 insn_bits22_23
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3011 if (insn_bits28_31
== 0x0d)
3013 /* Exception generation instructions. */
3014 if (insn_bits24_27
== 0x04)
3016 if (!bits (aarch64_insn_r
->aarch64_insn
, 2, 4)
3017 && !bits (aarch64_insn_r
->aarch64_insn
, 21, 23)
3018 && bits (aarch64_insn_r
->aarch64_insn
, 0, 1) == 0x01)
3020 ULONGEST svc_number
;
3022 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, 8,
3024 return tdep
->aarch64_syscall_record (aarch64_insn_r
->regcache
,
3028 return AARCH64_RECORD_UNSUPPORTED
;
3030 /* System instructions. */
3031 else if (insn_bits24_27
== 0x05 && insn_bits22_23
== 0x00)
3033 uint32_t reg_rt
, reg_crn
;
3035 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3036 reg_crn
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
3038 /* Record rt in case of sysl and mrs instructions. */
3039 if (bit (aarch64_insn_r
->aarch64_insn
, 21))
3041 record_buf
[0] = reg_rt
;
3042 aarch64_insn_r
->reg_rec_count
= 1;
3044 /* Record cpsr for hint and msr(immediate) instructions. */
3045 else if (reg_crn
== 0x02 || reg_crn
== 0x04)
3047 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3048 aarch64_insn_r
->reg_rec_count
= 1;
3051 /* Unconditional branch (register). */
3052 else if((insn_bits24_27
& 0x0e) == 0x06)
3054 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
3055 if (bits (aarch64_insn_r
->aarch64_insn
, 21, 22) == 0x01)
3056 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_LR_REGNUM
;
3059 return AARCH64_RECORD_UNKNOWN
;
3061 /* Unconditional branch (immediate). */
3062 else if ((insn_bits28_31
& 0x07) == 0x01 && (insn_bits24_27
& 0x0c) == 0x04)
3064 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
3065 if (bit (aarch64_insn_r
->aarch64_insn
, 31))
3066 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_LR_REGNUM
;
3069 /* Compare & branch (immediate), Test & branch (immediate) and
3070 Conditional branch (immediate). */
3071 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
3073 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3075 return AARCH64_RECORD_SUCCESS
;
3078 /* Record handler for advanced SIMD load and store instructions. */
3081 aarch64_record_asimd_load_store (insn_decode_record
*aarch64_insn_r
)
3084 uint64_t addr_offset
= 0;
3085 uint32_t record_buf
[24];
3086 uint64_t record_buf_mem
[24];
3087 uint32_t reg_rn
, reg_rt
;
3088 uint32_t reg_index
= 0, mem_index
= 0;
3089 uint8_t opcode_bits
, size_bits
;
3091 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3092 reg_rn
= bits (aarch64_insn_r
->aarch64_insn
, 5, 9);
3093 size_bits
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
3094 opcode_bits
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
3095 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
, &address
);
3099 fprintf_unfiltered (gdb_stdlog
,
3100 "Process record: Advanced SIMD load/store\n");
3103 /* Load/store single structure. */
3104 if (bit (aarch64_insn_r
->aarch64_insn
, 24))
3106 uint8_t sindex
, scale
, selem
, esize
, replicate
= 0;
3107 scale
= opcode_bits
>> 2;
3108 selem
= ((opcode_bits
& 0x02) |
3109 bit (aarch64_insn_r
->aarch64_insn
, 21)) + 1;
3113 if (size_bits
& 0x01)
3114 return AARCH64_RECORD_UNKNOWN
;
3117 if ((size_bits
>> 1) & 0x01)
3118 return AARCH64_RECORD_UNKNOWN
;
3119 if (size_bits
& 0x01)
3121 if (!((opcode_bits
>> 1) & 0x01))
3124 return AARCH64_RECORD_UNKNOWN
;
3128 if (bit (aarch64_insn_r
->aarch64_insn
, 22) && !(opcode_bits
& 0x01))
3135 return AARCH64_RECORD_UNKNOWN
;
3141 for (sindex
= 0; sindex
< selem
; sindex
++)
3143 record_buf
[reg_index
++] = reg_rt
+ AARCH64_V0_REGNUM
;
3144 reg_rt
= (reg_rt
+ 1) % 32;
3148 for (sindex
= 0; sindex
< selem
; sindex
++)
3149 if (bit (aarch64_insn_r
->aarch64_insn
, 22))
3150 record_buf
[reg_index
++] = reg_rt
+ AARCH64_V0_REGNUM
;
3153 record_buf_mem
[mem_index
++] = esize
/ 8;
3154 record_buf_mem
[mem_index
++] = address
+ addr_offset
;
3156 addr_offset
= addr_offset
+ (esize
/ 8);
3157 reg_rt
= (reg_rt
+ 1) % 32;
3160 /* Load/store multiple structure. */
3163 uint8_t selem
, esize
, rpt
, elements
;
3164 uint8_t eindex
, rindex
;
3166 esize
= 8 << size_bits
;
3167 if (bit (aarch64_insn_r
->aarch64_insn
, 30))
3168 elements
= 128 / esize
;
3170 elements
= 64 / esize
;
3172 switch (opcode_bits
)
3174 /*LD/ST4 (4 Registers). */
3179 /*LD/ST1 (4 Registers). */
3184 /*LD/ST3 (3 Registers). */
3189 /*LD/ST1 (3 Registers). */
3194 /*LD/ST1 (1 Register). */
3199 /*LD/ST2 (2 Registers). */
3204 /*LD/ST1 (2 Registers). */
3210 return AARCH64_RECORD_UNSUPPORTED
;
3213 for (rindex
= 0; rindex
< rpt
; rindex
++)
3214 for (eindex
= 0; eindex
< elements
; eindex
++)
3216 uint8_t reg_tt
, sindex
;
3217 reg_tt
= (reg_rt
+ rindex
) % 32;
3218 for (sindex
= 0; sindex
< selem
; sindex
++)
3220 if (bit (aarch64_insn_r
->aarch64_insn
, 22))
3221 record_buf
[reg_index
++] = reg_tt
+ AARCH64_V0_REGNUM
;
3224 record_buf_mem
[mem_index
++] = esize
/ 8;
3225 record_buf_mem
[mem_index
++] = address
+ addr_offset
;
3227 addr_offset
= addr_offset
+ (esize
/ 8);
3228 reg_tt
= (reg_tt
+ 1) % 32;
3233 if (bit (aarch64_insn_r
->aarch64_insn
, 23))
3234 record_buf
[reg_index
++] = reg_rn
;
3236 aarch64_insn_r
->reg_rec_count
= reg_index
;
3237 aarch64_insn_r
->mem_rec_count
= mem_index
/ 2;
3238 MEM_ALLOC (aarch64_insn_r
->aarch64_mems
, aarch64_insn_r
->mem_rec_count
,
3240 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3242 return AARCH64_RECORD_SUCCESS
;
3245 /* Record handler for load and store instructions. */
3248 aarch64_record_load_store (insn_decode_record
*aarch64_insn_r
)
3250 uint8_t insn_bits24_27
, insn_bits28_29
, insn_bits10_11
;
3251 uint8_t insn_bit23
, insn_bit21
;
3252 uint8_t opc
, size_bits
, ld_flag
, vector_flag
;
3253 uint32_t reg_rn
, reg_rt
, reg_rt2
;
3254 uint64_t datasize
, offset
;
3255 uint32_t record_buf
[8];
3256 uint64_t record_buf_mem
[8];
3259 insn_bits10_11
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
3260 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3261 insn_bits28_29
= bits (aarch64_insn_r
->aarch64_insn
, 28, 29);
3262 insn_bit21
= bit (aarch64_insn_r
->aarch64_insn
, 21);
3263 insn_bit23
= bit (aarch64_insn_r
->aarch64_insn
, 23);
3264 ld_flag
= bit (aarch64_insn_r
->aarch64_insn
, 22);
3265 vector_flag
= bit (aarch64_insn_r
->aarch64_insn
, 26);
3266 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3267 reg_rn
= bits (aarch64_insn_r
->aarch64_insn
, 5, 9);
3268 reg_rt2
= bits (aarch64_insn_r
->aarch64_insn
, 10, 14);
3269 size_bits
= bits (aarch64_insn_r
->aarch64_insn
, 30, 31);
3271 /* Load/store exclusive. */
3272 if (insn_bits24_27
== 0x08 && insn_bits28_29
== 0x00)
3276 fprintf_unfiltered (gdb_stdlog
,
3277 "Process record: load/store exclusive\n");
3282 record_buf
[0] = reg_rt
;
3283 aarch64_insn_r
->reg_rec_count
= 1;
3286 record_buf
[1] = reg_rt2
;
3287 aarch64_insn_r
->reg_rec_count
= 2;
3293 datasize
= (8 << size_bits
) * 2;
3295 datasize
= (8 << size_bits
);
3296 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3298 record_buf_mem
[0] = datasize
/ 8;
3299 record_buf_mem
[1] = address
;
3300 aarch64_insn_r
->mem_rec_count
= 1;
3303 /* Save register rs. */
3304 record_buf
[0] = bits (aarch64_insn_r
->aarch64_insn
, 16, 20);
3305 aarch64_insn_r
->reg_rec_count
= 1;
3309 /* Load register (literal) instructions decoding. */
3310 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x01)
3314 fprintf_unfiltered (gdb_stdlog
,
3315 "Process record: load register (literal)\n");
3318 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3320 record_buf
[0] = reg_rt
;
3321 aarch64_insn_r
->reg_rec_count
= 1;
3323 /* All types of load/store pair instructions decoding. */
3324 else if ((insn_bits24_27
& 0x0a) == 0x08 && insn_bits28_29
== 0x02)
3328 fprintf_unfiltered (gdb_stdlog
,
3329 "Process record: load/store pair\n");
3336 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3337 record_buf
[1] = reg_rt2
+ AARCH64_V0_REGNUM
;
3341 record_buf
[0] = reg_rt
;
3342 record_buf
[1] = reg_rt2
;
3344 aarch64_insn_r
->reg_rec_count
= 2;
3349 imm7_off
= bits (aarch64_insn_r
->aarch64_insn
, 15, 21);
3351 size_bits
= size_bits
>> 1;
3352 datasize
= 8 << (2 + size_bits
);
3353 offset
= (imm7_off
& 0x40) ? (~imm7_off
& 0x007f) + 1 : imm7_off
;
3354 offset
= offset
<< (2 + size_bits
);
3355 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3357 if (!((insn_bits24_27
& 0x0b) == 0x08 && insn_bit23
))
3359 if (imm7_off
& 0x40)
3360 address
= address
- offset
;
3362 address
= address
+ offset
;
3365 record_buf_mem
[0] = datasize
/ 8;
3366 record_buf_mem
[1] = address
;
3367 record_buf_mem
[2] = datasize
/ 8;
3368 record_buf_mem
[3] = address
+ (datasize
/ 8);
3369 aarch64_insn_r
->mem_rec_count
= 2;
3371 if (bit (aarch64_insn_r
->aarch64_insn
, 23))
3372 record_buf
[aarch64_insn_r
->reg_rec_count
++] = reg_rn
;
3374 /* Load/store register (unsigned immediate) instructions. */
3375 else if ((insn_bits24_27
& 0x0b) == 0x09 && insn_bits28_29
== 0x03)
3377 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3384 if (size_bits
!= 0x03)
3387 return AARCH64_RECORD_UNKNOWN
;
3391 fprintf_unfiltered (gdb_stdlog
,
3392 "Process record: load/store (unsigned immediate):"
3393 " size %x V %d opc %x\n", size_bits
, vector_flag
,
3399 offset
= bits (aarch64_insn_r
->aarch64_insn
, 10, 21);
3400 datasize
= 8 << size_bits
;
3401 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3403 offset
= offset
<< size_bits
;
3404 address
= address
+ offset
;
3406 record_buf_mem
[0] = datasize
>> 3;
3407 record_buf_mem
[1] = address
;
3408 aarch64_insn_r
->mem_rec_count
= 1;
3413 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3415 record_buf
[0] = reg_rt
;
3416 aarch64_insn_r
->reg_rec_count
= 1;
3419 /* Load/store register (register offset) instructions. */
3420 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x03
3421 && insn_bits10_11
== 0x02 && insn_bit21
)
3425 fprintf_unfiltered (gdb_stdlog
,
3426 "Process record: load/store (register offset)\n");
3428 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3435 if (size_bits
!= 0x03)
3438 return AARCH64_RECORD_UNKNOWN
;
3442 uint64_t reg_rm_val
;
3443 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
,
3444 bits (aarch64_insn_r
->aarch64_insn
, 16, 20), ®_rm_val
);
3445 if (bit (aarch64_insn_r
->aarch64_insn
, 12))
3446 offset
= reg_rm_val
<< size_bits
;
3448 offset
= reg_rm_val
;
3449 datasize
= 8 << size_bits
;
3450 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3452 address
= address
+ offset
;
3453 record_buf_mem
[0] = datasize
>> 3;
3454 record_buf_mem
[1] = address
;
3455 aarch64_insn_r
->mem_rec_count
= 1;
3460 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3462 record_buf
[0] = reg_rt
;
3463 aarch64_insn_r
->reg_rec_count
= 1;
3466 /* Load/store register (immediate and unprivileged) instructions. */
3467 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x03
3472 fprintf_unfiltered (gdb_stdlog
,
3473 "Process record: load/store (immediate and unprivileged)\n");
3475 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3482 if (size_bits
!= 0x03)
3485 return AARCH64_RECORD_UNKNOWN
;
3490 imm9_off
= bits (aarch64_insn_r
->aarch64_insn
, 12, 20);
3491 offset
= (imm9_off
& 0x0100) ? (((~imm9_off
) & 0x01ff) + 1) : imm9_off
;
3492 datasize
= 8 << size_bits
;
3493 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3495 if (insn_bits10_11
!= 0x01)
3497 if (imm9_off
& 0x0100)
3498 address
= address
- offset
;
3500 address
= address
+ offset
;
3502 record_buf_mem
[0] = datasize
>> 3;
3503 record_buf_mem
[1] = address
;
3504 aarch64_insn_r
->mem_rec_count
= 1;
3509 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3511 record_buf
[0] = reg_rt
;
3512 aarch64_insn_r
->reg_rec_count
= 1;
3514 if (insn_bits10_11
== 0x01 || insn_bits10_11
== 0x03)
3515 record_buf
[aarch64_insn_r
->reg_rec_count
++] = reg_rn
;
3517 /* Advanced SIMD load/store instructions. */
3519 return aarch64_record_asimd_load_store (aarch64_insn_r
);
3521 MEM_ALLOC (aarch64_insn_r
->aarch64_mems
, aarch64_insn_r
->mem_rec_count
,
3523 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3525 return AARCH64_RECORD_SUCCESS
;
3528 /* Record handler for data processing SIMD and floating point instructions. */
3531 aarch64_record_data_proc_simd_fp (insn_decode_record
*aarch64_insn_r
)
3533 uint8_t insn_bit21
, opcode
, rmode
, reg_rd
;
3534 uint8_t insn_bits24_27
, insn_bits28_31
, insn_bits10_11
, insn_bits12_15
;
3535 uint8_t insn_bits11_14
;
3536 uint32_t record_buf
[2];
3538 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3539 insn_bits28_31
= bits (aarch64_insn_r
->aarch64_insn
, 28, 31);
3540 insn_bits10_11
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
3541 insn_bits12_15
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
3542 insn_bits11_14
= bits (aarch64_insn_r
->aarch64_insn
, 11, 14);
3543 opcode
= bits (aarch64_insn_r
->aarch64_insn
, 16, 18);
3544 rmode
= bits (aarch64_insn_r
->aarch64_insn
, 19, 20);
3545 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3546 insn_bit21
= bit (aarch64_insn_r
->aarch64_insn
, 21);
3550 fprintf_unfiltered (gdb_stdlog
,
3551 "Process record: data processing SIMD/FP: ");
3554 if ((insn_bits28_31
& 0x05) == 0x01 && insn_bits24_27
== 0x0e)
3556 /* Floating point - fixed point conversion instructions. */
3560 fprintf_unfiltered (gdb_stdlog
, "FP - fixed point conversion");
3562 if ((opcode
>> 1) == 0x0 && rmode
== 0x03)
3563 record_buf
[0] = reg_rd
;
3565 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3567 /* Floating point - conditional compare instructions. */
3568 else if (insn_bits10_11
== 0x01)
3571 fprintf_unfiltered (gdb_stdlog
, "FP - conditional compare");
3573 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3575 /* Floating point - data processing (2-source) and
3576 conditional select instructions. */
3577 else if (insn_bits10_11
== 0x02 || insn_bits10_11
== 0x03)
3580 fprintf_unfiltered (gdb_stdlog
, "FP - DP (2-source)");
3582 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3584 else if (insn_bits10_11
== 0x00)
3586 /* Floating point - immediate instructions. */
3587 if ((insn_bits12_15
& 0x01) == 0x01
3588 || (insn_bits12_15
& 0x07) == 0x04)
3591 fprintf_unfiltered (gdb_stdlog
, "FP - immediate");
3592 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3594 /* Floating point - compare instructions. */
3595 else if ((insn_bits12_15
& 0x03) == 0x02)
3598 fprintf_unfiltered (gdb_stdlog
, "FP - immediate");
3599 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3601 /* Floating point - integer conversions instructions. */
3602 else if (insn_bits12_15
== 0x00)
3604 /* Convert float to integer instruction. */
3605 if (!(opcode
>> 1) || ((opcode
>> 1) == 0x02 && !rmode
))
3608 fprintf_unfiltered (gdb_stdlog
, "float to int conversion");
3610 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
3612 /* Convert integer to float instruction. */
3613 else if ((opcode
>> 1) == 0x01 && !rmode
)
3616 fprintf_unfiltered (gdb_stdlog
, "int to float conversion");
3618 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3620 /* Move float to integer instruction. */
3621 else if ((opcode
>> 1) == 0x03)
3624 fprintf_unfiltered (gdb_stdlog
, "move float to int");
3626 if (!(opcode
& 0x01))
3627 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
3629 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3632 return AARCH64_RECORD_UNKNOWN
;
3635 return AARCH64_RECORD_UNKNOWN
;
3638 return AARCH64_RECORD_UNKNOWN
;
3640 else if ((insn_bits28_31
& 0x09) == 0x00 && insn_bits24_27
== 0x0e)
3643 fprintf_unfiltered (gdb_stdlog
, "SIMD copy");
3645 /* Advanced SIMD copy instructions. */
3646 if (!bits (aarch64_insn_r
->aarch64_insn
, 21, 23)
3647 && !bit (aarch64_insn_r
->aarch64_insn
, 15)
3648 && bit (aarch64_insn_r
->aarch64_insn
, 10))
3650 if (insn_bits11_14
== 0x05 || insn_bits11_14
== 0x07)
3651 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
3653 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3656 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3658 /* All remaining floating point or advanced SIMD instructions. */
3662 fprintf_unfiltered (gdb_stdlog
, "all remain");
3664 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3668 fprintf_unfiltered (gdb_stdlog
, "\n");
3670 aarch64_insn_r
->reg_rec_count
++;
3671 gdb_assert (aarch64_insn_r
->reg_rec_count
== 1);
3672 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3674 return AARCH64_RECORD_SUCCESS
;
3677 /* Decodes insns type and invokes its record handler. */
3680 aarch64_record_decode_insn_handler (insn_decode_record
*aarch64_insn_r
)
3682 uint32_t ins_bit25
, ins_bit26
, ins_bit27
, ins_bit28
;
3684 ins_bit25
= bit (aarch64_insn_r
->aarch64_insn
, 25);
3685 ins_bit26
= bit (aarch64_insn_r
->aarch64_insn
, 26);
3686 ins_bit27
= bit (aarch64_insn_r
->aarch64_insn
, 27);
3687 ins_bit28
= bit (aarch64_insn_r
->aarch64_insn
, 28);
3689 /* Data processing - immediate instructions. */
3690 if (!ins_bit26
&& !ins_bit27
&& ins_bit28
)
3691 return aarch64_record_data_proc_imm (aarch64_insn_r
);
3693 /* Branch, exception generation and system instructions. */
3694 if (ins_bit26
&& !ins_bit27
&& ins_bit28
)
3695 return aarch64_record_branch_except_sys (aarch64_insn_r
);
3697 /* Load and store instructions. */
3698 if (!ins_bit25
&& ins_bit27
)
3699 return aarch64_record_load_store (aarch64_insn_r
);
3701 /* Data processing - register instructions. */
3702 if (ins_bit25
&& !ins_bit26
&& ins_bit27
)
3703 return aarch64_record_data_proc_reg (aarch64_insn_r
);
3705 /* Data processing - SIMD and floating point instructions. */
3706 if (ins_bit25
&& ins_bit26
&& ins_bit27
)
3707 return aarch64_record_data_proc_simd_fp (aarch64_insn_r
);
3709 return AARCH64_RECORD_UNSUPPORTED
;
3712 /* Cleans up local record registers and memory allocations. */
3715 deallocate_reg_mem (insn_decode_record
*record
)
3717 xfree (record
->aarch64_regs
);
3718 xfree (record
->aarch64_mems
);
3721 /* Parse the current instruction and record the values of the registers and
3722 memory that will be changed in current instruction to record_arch_list
3723 return -1 if something is wrong. */
3726 aarch64_process_record (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
3727 CORE_ADDR insn_addr
)
3729 uint32_t rec_no
= 0;
3730 uint8_t insn_size
= 4;
3732 ULONGEST t_bit
= 0, insn_id
= 0;
3733 gdb_byte buf
[insn_size
];
3734 insn_decode_record aarch64_record
;
3736 memset (&buf
[0], 0, insn_size
);
3737 memset (&aarch64_record
, 0, sizeof (insn_decode_record
));
3738 target_read_memory (insn_addr
, &buf
[0], insn_size
);
3739 aarch64_record
.aarch64_insn
3740 = (uint32_t) extract_unsigned_integer (&buf
[0],
3742 gdbarch_byte_order (gdbarch
));
3743 aarch64_record
.regcache
= regcache
;
3744 aarch64_record
.this_addr
= insn_addr
;
3745 aarch64_record
.gdbarch
= gdbarch
;
3747 ret
= aarch64_record_decode_insn_handler (&aarch64_record
);
3748 if (ret
== AARCH64_RECORD_UNSUPPORTED
)
3750 printf_unfiltered (_("Process record does not support instruction "
3751 "0x%0x at address %s.\n"),
3752 aarch64_record
.aarch64_insn
,
3753 paddress (gdbarch
, insn_addr
));
3759 /* Record registers. */
3760 record_full_arch_list_add_reg (aarch64_record
.regcache
,
3762 /* Always record register CPSR. */
3763 record_full_arch_list_add_reg (aarch64_record
.regcache
,
3764 AARCH64_CPSR_REGNUM
);
3765 if (aarch64_record
.aarch64_regs
)
3766 for (rec_no
= 0; rec_no
< aarch64_record
.reg_rec_count
; rec_no
++)
3767 if (record_full_arch_list_add_reg (aarch64_record
.regcache
,
3768 aarch64_record
.aarch64_regs
[rec_no
]))
3771 /* Record memories. */
3772 if (aarch64_record
.aarch64_mems
)
3773 for (rec_no
= 0; rec_no
< aarch64_record
.mem_rec_count
; rec_no
++)
3774 if (record_full_arch_list_add_mem
3775 ((CORE_ADDR
)aarch64_record
.aarch64_mems
[rec_no
].addr
,
3776 aarch64_record
.aarch64_mems
[rec_no
].len
))
3779 if (record_full_arch_list_add_end ())
3783 deallocate_reg_mem (&aarch64_record
);