1 /* Common target dependent code for GDB on AArch64 systems.
3 Copyright (C) 2009-2015 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
29 #include "reggroups.h"
32 #include "arch-utils.h"
34 #include "frame-unwind.h"
35 #include "frame-base.h"
36 #include "trad-frame.h"
38 #include "dwarf2-frame.h"
40 #include "prologue-value.h"
41 #include "target-descriptions.h"
42 #include "user-regs.h"
46 #include "aarch64-tdep.h"
49 #include "elf/aarch64.h"
54 #include "record-full.h"
56 #include "features/aarch64.c"
58 /* Pseudo register base numbers. */
59 #define AARCH64_Q0_REGNUM 0
60 #define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + 32)
61 #define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
62 #define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
63 #define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
65 /* The standard register names, and all the valid aliases for them. */
68 const char *const name
;
70 } aarch64_register_aliases
[] =
72 /* 64-bit register names. */
73 {"fp", AARCH64_FP_REGNUM
},
74 {"lr", AARCH64_LR_REGNUM
},
75 {"sp", AARCH64_SP_REGNUM
},
77 /* 32-bit register names. */
78 {"w0", AARCH64_X0_REGNUM
+ 0},
79 {"w1", AARCH64_X0_REGNUM
+ 1},
80 {"w2", AARCH64_X0_REGNUM
+ 2},
81 {"w3", AARCH64_X0_REGNUM
+ 3},
82 {"w4", AARCH64_X0_REGNUM
+ 4},
83 {"w5", AARCH64_X0_REGNUM
+ 5},
84 {"w6", AARCH64_X0_REGNUM
+ 6},
85 {"w7", AARCH64_X0_REGNUM
+ 7},
86 {"w8", AARCH64_X0_REGNUM
+ 8},
87 {"w9", AARCH64_X0_REGNUM
+ 9},
88 {"w10", AARCH64_X0_REGNUM
+ 10},
89 {"w11", AARCH64_X0_REGNUM
+ 11},
90 {"w12", AARCH64_X0_REGNUM
+ 12},
91 {"w13", AARCH64_X0_REGNUM
+ 13},
92 {"w14", AARCH64_X0_REGNUM
+ 14},
93 {"w15", AARCH64_X0_REGNUM
+ 15},
94 {"w16", AARCH64_X0_REGNUM
+ 16},
95 {"w17", AARCH64_X0_REGNUM
+ 17},
96 {"w18", AARCH64_X0_REGNUM
+ 18},
97 {"w19", AARCH64_X0_REGNUM
+ 19},
98 {"w20", AARCH64_X0_REGNUM
+ 20},
99 {"w21", AARCH64_X0_REGNUM
+ 21},
100 {"w22", AARCH64_X0_REGNUM
+ 22},
101 {"w23", AARCH64_X0_REGNUM
+ 23},
102 {"w24", AARCH64_X0_REGNUM
+ 24},
103 {"w25", AARCH64_X0_REGNUM
+ 25},
104 {"w26", AARCH64_X0_REGNUM
+ 26},
105 {"w27", AARCH64_X0_REGNUM
+ 27},
106 {"w28", AARCH64_X0_REGNUM
+ 28},
107 {"w29", AARCH64_X0_REGNUM
+ 29},
108 {"w30", AARCH64_X0_REGNUM
+ 30},
111 {"ip0", AARCH64_X0_REGNUM
+ 16},
112 {"ip1", AARCH64_X0_REGNUM
+ 17}
115 /* The required core 'R' registers. */
116 static const char *const aarch64_r_register_names
[] =
118 /* These registers must appear in consecutive RAW register number
119 order and they must begin with AARCH64_X0_REGNUM! */
120 "x0", "x1", "x2", "x3",
121 "x4", "x5", "x6", "x7",
122 "x8", "x9", "x10", "x11",
123 "x12", "x13", "x14", "x15",
124 "x16", "x17", "x18", "x19",
125 "x20", "x21", "x22", "x23",
126 "x24", "x25", "x26", "x27",
127 "x28", "x29", "x30", "sp",
131 /* The FP/SIMD 'V' registers. */
132 static const char *const aarch64_v_register_names
[] =
134 /* These registers must appear in consecutive RAW register number
135 order and they must begin with AARCH64_V0_REGNUM! */
136 "v0", "v1", "v2", "v3",
137 "v4", "v5", "v6", "v7",
138 "v8", "v9", "v10", "v11",
139 "v12", "v13", "v14", "v15",
140 "v16", "v17", "v18", "v19",
141 "v20", "v21", "v22", "v23",
142 "v24", "v25", "v26", "v27",
143 "v28", "v29", "v30", "v31",
148 /* AArch64 prologue cache structure. */
149 struct aarch64_prologue_cache
151 /* The stack pointer at the time this frame was created; i.e. the
152 caller's stack pointer when this function was called. It is used
153 to identify this frame. */
156 /* The frame base for this frame is just prev_sp - frame size.
157 FRAMESIZE is the distance from the frame pointer to the
158 initial stack pointer. */
161 /* The register used to hold the frame pointer for this frame. */
164 /* Saved register offsets. */
165 struct trad_frame_saved_reg
*saved_regs
;
168 /* Toggle this file's internal debugging dump. */
169 static int aarch64_debug
;
172 show_aarch64_debug (struct ui_file
*file
, int from_tty
,
173 struct cmd_list_element
*c
, const char *value
)
175 fprintf_filtered (file
, _("AArch64 debugging is %s.\n"), value
);
178 /* Extract a signed value from a bit field within an instruction
181 INSN is the instruction opcode.
183 WIDTH specifies the width of the bit field to extract (in bits).
185 OFFSET specifies the least significant bit of the field where bits
186 are numbered zero counting from least to most significant. */
189 extract_signed_bitfield (uint32_t insn
, unsigned width
, unsigned offset
)
191 unsigned shift_l
= sizeof (int32_t) * 8 - (offset
+ width
);
192 unsigned shift_r
= sizeof (int32_t) * 8 - width
;
194 return ((int32_t) insn
<< shift_l
) >> shift_r
;
197 /* Determine if specified bits within an instruction opcode matches a
200 INSN is the instruction opcode.
202 MASK specifies the bits within the opcode that are to be tested
203 agsinst for a match with PATTERN. */
206 decode_masked_match (uint32_t insn
, uint32_t mask
, uint32_t pattern
)
208 return (insn
& mask
) == pattern
;
211 /* Decode an opcode if it represents an immediate ADD or SUB instruction.
213 ADDR specifies the address of the opcode.
214 INSN specifies the opcode to test.
215 RD receives the 'rd' field from the decoded instruction.
216 RN receives the 'rn' field from the decoded instruction.
218 Return 1 if the opcodes matches and is decoded, otherwise 0. */
220 decode_add_sub_imm (CORE_ADDR addr
, uint32_t insn
, unsigned *rd
, unsigned *rn
,
223 if ((insn
& 0x9f000000) == 0x91000000)
228 *rd
= (insn
>> 0) & 0x1f;
229 *rn
= (insn
>> 5) & 0x1f;
230 *imm
= (insn
>> 10) & 0xfff;
231 shift
= (insn
>> 22) & 0x3;
232 op_is_sub
= (insn
>> 30) & 0x1;
250 fprintf_unfiltered (gdb_stdlog
,
251 "decode: 0x%s 0x%x add x%u, x%u, #%d\n",
252 core_addr_to_string_nz (addr
), insn
, *rd
, *rn
,
259 /* Decode an opcode if it represents an ADRP instruction.
261 ADDR specifies the address of the opcode.
262 INSN specifies the opcode to test.
263 RD receives the 'rd' field from the decoded instruction.
265 Return 1 if the opcodes matches and is decoded, otherwise 0. */
268 decode_adrp (CORE_ADDR addr
, uint32_t insn
, unsigned *rd
)
270 if (decode_masked_match (insn
, 0x9f000000, 0x90000000))
272 *rd
= (insn
>> 0) & 0x1f;
275 fprintf_unfiltered (gdb_stdlog
,
276 "decode: 0x%s 0x%x adrp x%u, #?\n",
277 core_addr_to_string_nz (addr
), insn
, *rd
);
283 /* Decode an opcode if it represents an branch immediate or branch
284 and link immediate instruction.
286 ADDR specifies the address of the opcode.
287 INSN specifies the opcode to test.
288 LINK receives the 'link' bit from the decoded instruction.
289 OFFSET receives the immediate offset from the decoded instruction.
291 Return 1 if the opcodes matches and is decoded, otherwise 0. */
294 decode_b (CORE_ADDR addr
, uint32_t insn
, unsigned *link
, int32_t *offset
)
296 /* b 0001 01ii iiii iiii iiii iiii iiii iiii */
297 /* bl 1001 01ii iiii iiii iiii iiii iiii iiii */
298 if (decode_masked_match (insn
, 0x7c000000, 0x14000000))
301 *offset
= extract_signed_bitfield (insn
, 26, 0) << 2;
304 fprintf_unfiltered (gdb_stdlog
,
305 "decode: 0x%s 0x%x %s 0x%s\n",
306 core_addr_to_string_nz (addr
), insn
,
308 core_addr_to_string_nz (addr
+ *offset
));
315 /* Decode an opcode if it represents a conditional branch instruction.
317 ADDR specifies the address of the opcode.
318 INSN specifies the opcode to test.
319 COND receives the branch condition field from the decoded
321 OFFSET receives the immediate offset from the decoded instruction.
323 Return 1 if the opcodes matches and is decoded, otherwise 0. */
326 decode_bcond (CORE_ADDR addr
, uint32_t insn
, unsigned *cond
, int32_t *offset
)
328 if (decode_masked_match (insn
, 0xfe000000, 0x54000000))
330 *cond
= (insn
>> 0) & 0xf;
331 *offset
= extract_signed_bitfield (insn
, 19, 5) << 2;
334 fprintf_unfiltered (gdb_stdlog
,
335 "decode: 0x%s 0x%x b<%u> 0x%s\n",
336 core_addr_to_string_nz (addr
), insn
, *cond
,
337 core_addr_to_string_nz (addr
+ *offset
));
343 /* Decode an opcode if it represents a branch via register instruction.
345 ADDR specifies the address of the opcode.
346 INSN specifies the opcode to test.
347 LINK receives the 'link' bit from the decoded instruction.
348 RN receives the 'rn' field from the decoded instruction.
350 Return 1 if the opcodes matches and is decoded, otherwise 0. */
353 decode_br (CORE_ADDR addr
, uint32_t insn
, unsigned *link
, unsigned *rn
)
355 /* 8 4 0 6 2 8 4 0 */
356 /* blr 110101100011111100000000000rrrrr */
357 /* br 110101100001111100000000000rrrrr */
358 if (decode_masked_match (insn
, 0xffdffc1f, 0xd61f0000))
360 *link
= (insn
>> 21) & 1;
361 *rn
= (insn
>> 5) & 0x1f;
364 fprintf_unfiltered (gdb_stdlog
,
365 "decode: 0x%s 0x%x %s 0x%x\n",
366 core_addr_to_string_nz (addr
), insn
,
367 *link
? "blr" : "br", *rn
);
374 /* Decode an opcode if it represents a CBZ or CBNZ instruction.
376 ADDR specifies the address of the opcode.
377 INSN specifies the opcode to test.
378 IS64 receives the 'sf' field from the decoded instruction.
379 OP receives the 'op' field from the decoded instruction.
380 RN receives the 'rn' field from the decoded instruction.
381 OFFSET receives the 'imm19' field from the decoded instruction.
383 Return 1 if the opcodes matches and is decoded, otherwise 0. */
386 decode_cb (CORE_ADDR addr
,
387 uint32_t insn
, int *is64
, unsigned *op
, unsigned *rn
,
390 if (decode_masked_match (insn
, 0x7e000000, 0x34000000))
392 /* cbz T011 010o iiii iiii iiii iiii iiir rrrr */
393 /* cbnz T011 010o iiii iiii iiii iiii iiir rrrr */
395 *rn
= (insn
>> 0) & 0x1f;
396 *is64
= (insn
>> 31) & 0x1;
397 *op
= (insn
>> 24) & 0x1;
398 *offset
= extract_signed_bitfield (insn
, 19, 5) << 2;
401 fprintf_unfiltered (gdb_stdlog
,
402 "decode: 0x%s 0x%x %s 0x%s\n",
403 core_addr_to_string_nz (addr
), insn
,
404 *op
? "cbnz" : "cbz",
405 core_addr_to_string_nz (addr
+ *offset
));
411 /* Decode an opcode if it represents a ERET instruction.
413 ADDR specifies the address of the opcode.
414 INSN specifies the opcode to test.
416 Return 1 if the opcodes matches and is decoded, otherwise 0. */
419 decode_eret (CORE_ADDR addr
, uint32_t insn
)
421 /* eret 1101 0110 1001 1111 0000 0011 1110 0000 */
422 if (insn
== 0xd69f03e0)
425 fprintf_unfiltered (gdb_stdlog
, "decode: 0x%s 0x%x eret\n",
426 core_addr_to_string_nz (addr
), insn
);
432 /* Decode an opcode if it represents a MOVZ instruction.
434 ADDR specifies the address of the opcode.
435 INSN specifies the opcode to test.
436 RD receives the 'rd' field from the decoded instruction.
438 Return 1 if the opcodes matches and is decoded, otherwise 0. */
441 decode_movz (CORE_ADDR addr
, uint32_t insn
, unsigned *rd
)
443 if (decode_masked_match (insn
, 0xff800000, 0x52800000))
445 *rd
= (insn
>> 0) & 0x1f;
448 fprintf_unfiltered (gdb_stdlog
,
449 "decode: 0x%s 0x%x movz x%u, #?\n",
450 core_addr_to_string_nz (addr
), insn
, *rd
);
456 /* Decode an opcode if it represents a ORR (shifted register)
459 ADDR specifies the address of the opcode.
460 INSN specifies the opcode to test.
461 RD receives the 'rd' field from the decoded instruction.
462 RN receives the 'rn' field from the decoded instruction.
463 RM receives the 'rm' field from the decoded instruction.
464 IMM receives the 'imm6' field from the decoded instruction.
466 Return 1 if the opcodes matches and is decoded, otherwise 0. */
469 decode_orr_shifted_register_x (CORE_ADDR addr
,
470 uint32_t insn
, unsigned *rd
, unsigned *rn
,
471 unsigned *rm
, int32_t *imm
)
473 if (decode_masked_match (insn
, 0xff200000, 0xaa000000))
475 *rd
= (insn
>> 0) & 0x1f;
476 *rn
= (insn
>> 5) & 0x1f;
477 *rm
= (insn
>> 16) & 0x1f;
478 *imm
= (insn
>> 10) & 0x3f;
481 fprintf_unfiltered (gdb_stdlog
,
482 "decode: 0x%s 0x%x orr x%u, x%u, x%u, #%u\n",
483 core_addr_to_string_nz (addr
), insn
, *rd
,
490 /* Decode an opcode if it represents a RET instruction.
492 ADDR specifies the address of the opcode.
493 INSN specifies the opcode to test.
494 RN receives the 'rn' field from the decoded instruction.
496 Return 1 if the opcodes matches and is decoded, otherwise 0. */
499 decode_ret (CORE_ADDR addr
, uint32_t insn
, unsigned *rn
)
501 if (decode_masked_match (insn
, 0xfffffc1f, 0xd65f0000))
503 *rn
= (insn
>> 5) & 0x1f;
505 fprintf_unfiltered (gdb_stdlog
,
506 "decode: 0x%s 0x%x ret x%u\n",
507 core_addr_to_string_nz (addr
), insn
, *rn
);
513 /* Decode an opcode if it represents the following instruction:
514 STP rt, rt2, [rn, #imm]
516 ADDR specifies the address of the opcode.
517 INSN specifies the opcode to test.
518 RT1 receives the 'rt' field from the decoded instruction.
519 RT2 receives the 'rt2' field from the decoded instruction.
520 RN receives the 'rn' field from the decoded instruction.
521 IMM receives the 'imm' field from the decoded instruction.
523 Return 1 if the opcodes matches and is decoded, otherwise 0. */
526 decode_stp_offset (CORE_ADDR addr
,
528 unsigned *rt1
, unsigned *rt2
, unsigned *rn
, int32_t *imm
)
530 if (decode_masked_match (insn
, 0xffc00000, 0xa9000000))
532 *rt1
= (insn
>> 0) & 0x1f;
533 *rn
= (insn
>> 5) & 0x1f;
534 *rt2
= (insn
>> 10) & 0x1f;
535 *imm
= extract_signed_bitfield (insn
, 7, 15);
539 fprintf_unfiltered (gdb_stdlog
,
540 "decode: 0x%s 0x%x stp x%u, x%u, [x%u + #%d]\n",
541 core_addr_to_string_nz (addr
), insn
,
542 *rt1
, *rt2
, *rn
, *imm
);
548 /* Decode an opcode if it represents the following instruction:
549 STP rt, rt2, [rn, #imm]!
551 ADDR specifies the address of the opcode.
552 INSN specifies the opcode to test.
553 RT1 receives the 'rt' field from the decoded instruction.
554 RT2 receives the 'rt2' field from the decoded instruction.
555 RN receives the 'rn' field from the decoded instruction.
556 IMM receives the 'imm' field from the decoded instruction.
558 Return 1 if the opcodes matches and is decoded, otherwise 0. */
561 decode_stp_offset_wb (CORE_ADDR addr
,
563 unsigned *rt1
, unsigned *rt2
, unsigned *rn
,
566 if (decode_masked_match (insn
, 0xffc00000, 0xa9800000))
568 *rt1
= (insn
>> 0) & 0x1f;
569 *rn
= (insn
>> 5) & 0x1f;
570 *rt2
= (insn
>> 10) & 0x1f;
571 *imm
= extract_signed_bitfield (insn
, 7, 15);
575 fprintf_unfiltered (gdb_stdlog
,
576 "decode: 0x%s 0x%x stp x%u, x%u, [x%u + #%d]!\n",
577 core_addr_to_string_nz (addr
), insn
,
578 *rt1
, *rt2
, *rn
, *imm
);
584 /* Decode an opcode if it represents the following instruction:
587 ADDR specifies the address of the opcode.
588 INSN specifies the opcode to test.
589 IS64 receives size field from the decoded instruction.
590 RT receives the 'rt' field from the decoded instruction.
591 RN receives the 'rn' field from the decoded instruction.
592 IMM receives the 'imm' field from the decoded instruction.
594 Return 1 if the opcodes matches and is decoded, otherwise 0. */
597 decode_stur (CORE_ADDR addr
, uint32_t insn
, int *is64
, unsigned *rt
,
598 unsigned *rn
, int32_t *imm
)
600 if (decode_masked_match (insn
, 0xbfe00c00, 0xb8000000))
602 *is64
= (insn
>> 30) & 1;
603 *rt
= (insn
>> 0) & 0x1f;
604 *rn
= (insn
>> 5) & 0x1f;
605 *imm
= extract_signed_bitfield (insn
, 9, 12);
608 fprintf_unfiltered (gdb_stdlog
,
609 "decode: 0x%s 0x%x stur %c%u, [x%u + #%d]\n",
610 core_addr_to_string_nz (addr
), insn
,
611 *is64
? 'x' : 'w', *rt
, *rn
, *imm
);
617 /* Decode an opcode if it represents a TB or TBNZ instruction.
619 ADDR specifies the address of the opcode.
620 INSN specifies the opcode to test.
621 OP receives the 'op' field from the decoded instruction.
622 BIT receives the bit position field from the decoded instruction.
623 RT receives 'rt' field from the decoded instruction.
624 IMM receives 'imm' field from the decoded instruction.
626 Return 1 if the opcodes matches and is decoded, otherwise 0. */
629 decode_tb (CORE_ADDR addr
,
630 uint32_t insn
, unsigned *op
, unsigned *bit
, unsigned *rt
,
633 if (decode_masked_match (insn
, 0x7e000000, 0x36000000))
635 /* tbz b011 0110 bbbb biii iiii iiii iiir rrrr */
636 /* tbnz B011 0111 bbbb biii iiii iiii iiir rrrr */
638 *rt
= (insn
>> 0) & 0x1f;
639 *op
= insn
& (1 << 24);
640 *bit
= ((insn
>> (31 - 4)) & 0x20) | ((insn
>> 19) & 0x1f);
641 *imm
= extract_signed_bitfield (insn
, 14, 5) << 2;
644 fprintf_unfiltered (gdb_stdlog
,
645 "decode: 0x%s 0x%x %s x%u, #%u, 0x%s\n",
646 core_addr_to_string_nz (addr
), insn
,
647 *op
? "tbnz" : "tbz", *rt
, *bit
,
648 core_addr_to_string_nz (addr
+ *imm
));
654 /* Analyze a prologue, looking for a recognizable stack frame
655 and frame pointer. Scan until we encounter a store that could
656 clobber the stack frame unexpectedly, or an unknown instruction. */
659 aarch64_analyze_prologue (struct gdbarch
*gdbarch
,
660 CORE_ADDR start
, CORE_ADDR limit
,
661 struct aarch64_prologue_cache
*cache
)
663 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
665 pv_t regs
[AARCH64_X_REGISTER_COUNT
];
666 struct pv_area
*stack
;
667 struct cleanup
*back_to
;
669 for (i
= 0; i
< AARCH64_X_REGISTER_COUNT
; i
++)
670 regs
[i
] = pv_register (i
, 0);
671 stack
= make_pv_area (AARCH64_SP_REGNUM
, gdbarch_addr_bit (gdbarch
));
672 back_to
= make_cleanup_free_pv_area (stack
);
674 for (; start
< limit
; start
+= 4)
692 insn
= read_memory_unsigned_integer (start
, 4, byte_order_for_code
);
694 if (decode_add_sub_imm (start
, insn
, &rd
, &rn
, &imm
))
695 regs
[rd
] = pv_add_constant (regs
[rn
], imm
);
696 else if (decode_adrp (start
, insn
, &rd
))
697 regs
[rd
] = pv_unknown ();
698 else if (decode_b (start
, insn
, &is_link
, &offset
))
700 /* Stop analysis on branch. */
703 else if (decode_bcond (start
, insn
, &cond
, &offset
))
705 /* Stop analysis on branch. */
708 else if (decode_br (start
, insn
, &is_link
, &rn
))
710 /* Stop analysis on branch. */
713 else if (decode_cb (start
, insn
, &is64
, &op
, &rn
, &offset
))
715 /* Stop analysis on branch. */
718 else if (decode_eret (start
, insn
))
720 /* Stop analysis on branch. */
723 else if (decode_movz (start
, insn
, &rd
))
724 regs
[rd
] = pv_unknown ();
726 if (decode_orr_shifted_register_x (start
, insn
, &rd
, &rn
, &rm
, &imm
))
728 if (imm
== 0 && rn
== 31)
735 "aarch64: prologue analysis gave up addr=0x%s "
736 "opcode=0x%x (orr x register)\n",
737 core_addr_to_string_nz (start
),
742 else if (decode_ret (start
, insn
, &rn
))
744 /* Stop analysis on branch. */
747 else if (decode_stur (start
, insn
, &is64
, &rt
, &rn
, &offset
))
749 pv_area_store (stack
, pv_add_constant (regs
[rn
], offset
),
750 is64
? 8 : 4, regs
[rt
]);
752 else if (decode_stp_offset (start
, insn
, &rt1
, &rt2
, &rn
, &imm
))
754 /* If recording this store would invalidate the store area
755 (perhaps because rn is not known) then we should abandon
756 further prologue analysis. */
757 if (pv_area_store_would_trash (stack
,
758 pv_add_constant (regs
[rn
], imm
)))
761 if (pv_area_store_would_trash (stack
,
762 pv_add_constant (regs
[rn
], imm
+ 8)))
765 pv_area_store (stack
, pv_add_constant (regs
[rn
], imm
), 8,
767 pv_area_store (stack
, pv_add_constant (regs
[rn
], imm
+ 8), 8,
770 else if (decode_stp_offset_wb (start
, insn
, &rt1
, &rt2
, &rn
, &imm
))
772 /* If recording this store would invalidate the store area
773 (perhaps because rn is not known) then we should abandon
774 further prologue analysis. */
775 if (pv_area_store_would_trash (stack
,
776 pv_add_constant (regs
[rn
], imm
)))
779 if (pv_area_store_would_trash (stack
,
780 pv_add_constant (regs
[rn
], imm
+ 8)))
783 pv_area_store (stack
, pv_add_constant (regs
[rn
], imm
), 8,
785 pv_area_store (stack
, pv_add_constant (regs
[rn
], imm
+ 8), 8,
787 regs
[rn
] = pv_add_constant (regs
[rn
], imm
);
789 else if (decode_tb (start
, insn
, &op
, &bit
, &rn
, &offset
))
791 /* Stop analysis on branch. */
797 fprintf_unfiltered (gdb_stdlog
,
798 "aarch64: prologue analysis gave up addr=0x%s"
800 core_addr_to_string_nz (start
), insn
);
807 do_cleanups (back_to
);
811 if (pv_is_register (regs
[AARCH64_FP_REGNUM
], AARCH64_SP_REGNUM
))
813 /* Frame pointer is fp. Frame size is constant. */
814 cache
->framereg
= AARCH64_FP_REGNUM
;
815 cache
->framesize
= -regs
[AARCH64_FP_REGNUM
].k
;
817 else if (pv_is_register (regs
[AARCH64_SP_REGNUM
], AARCH64_SP_REGNUM
))
819 /* Try the stack pointer. */
820 cache
->framesize
= -regs
[AARCH64_SP_REGNUM
].k
;
821 cache
->framereg
= AARCH64_SP_REGNUM
;
825 /* We're just out of luck. We don't know where the frame is. */
826 cache
->framereg
= -1;
827 cache
->framesize
= 0;
830 for (i
= 0; i
< AARCH64_X_REGISTER_COUNT
; i
++)
834 if (pv_area_find_reg (stack
, gdbarch
, i
, &offset
))
835 cache
->saved_regs
[i
].addr
= offset
;
838 do_cleanups (back_to
);
842 /* Implement the "skip_prologue" gdbarch method. */
845 aarch64_skip_prologue (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
849 CORE_ADDR func_addr
, limit_pc
;
850 struct symtab_and_line sal
;
852 /* See if we can determine the end of the prologue via the symbol
853 table. If so, then return either PC, or the PC after the
854 prologue, whichever is greater. */
855 if (find_pc_partial_function (pc
, NULL
, &func_addr
, NULL
))
857 CORE_ADDR post_prologue_pc
858 = skip_prologue_using_sal (gdbarch
, func_addr
);
860 if (post_prologue_pc
!= 0)
861 return max (pc
, post_prologue_pc
);
864 /* Can't determine prologue from the symbol table, need to examine
867 /* Find an upper limit on the function prologue using the debug
868 information. If the debug information could not be used to
869 provide that bound, then use an arbitrary large number as the
871 limit_pc
= skip_prologue_using_sal (gdbarch
, pc
);
873 limit_pc
= pc
+ 128; /* Magic. */
875 /* Try disassembling prologue. */
876 return aarch64_analyze_prologue (gdbarch
, pc
, limit_pc
, NULL
);
879 /* Scan the function prologue for THIS_FRAME and populate the prologue
883 aarch64_scan_prologue (struct frame_info
*this_frame
,
884 struct aarch64_prologue_cache
*cache
)
886 CORE_ADDR block_addr
= get_frame_address_in_block (this_frame
);
887 CORE_ADDR prologue_start
;
888 CORE_ADDR prologue_end
;
889 CORE_ADDR prev_pc
= get_frame_pc (this_frame
);
890 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
892 /* Assume we do not find a frame. */
893 cache
->framereg
= -1;
894 cache
->framesize
= 0;
896 if (find_pc_partial_function (block_addr
, NULL
, &prologue_start
,
899 struct symtab_and_line sal
= find_pc_line (prologue_start
, 0);
903 /* No line info so use the current PC. */
904 prologue_end
= prev_pc
;
906 else if (sal
.end
< prologue_end
)
908 /* The next line begins after the function end. */
909 prologue_end
= sal
.end
;
912 prologue_end
= min (prologue_end
, prev_pc
);
913 aarch64_analyze_prologue (gdbarch
, prologue_start
, prologue_end
, cache
);
920 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
922 frame_loc
= get_frame_register_unsigned (this_frame
, AARCH64_FP_REGNUM
);
926 cache
->framereg
= AARCH64_FP_REGNUM
;
927 cache
->framesize
= 16;
928 cache
->saved_regs
[29].addr
= 0;
929 cache
->saved_regs
[30].addr
= 8;
933 /* Allocate and fill in *THIS_CACHE with information about the prologue of
934 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
935 Return a pointer to the current aarch64_prologue_cache in
938 static struct aarch64_prologue_cache
*
939 aarch64_make_prologue_cache (struct frame_info
*this_frame
, void **this_cache
)
941 struct aarch64_prologue_cache
*cache
;
942 CORE_ADDR unwound_fp
;
945 if (*this_cache
!= NULL
)
948 cache
= FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache
);
949 cache
->saved_regs
= trad_frame_alloc_saved_regs (this_frame
);
952 aarch64_scan_prologue (this_frame
, cache
);
954 if (cache
->framereg
== -1)
957 unwound_fp
= get_frame_register_unsigned (this_frame
, cache
->framereg
);
961 cache
->prev_sp
= unwound_fp
+ cache
->framesize
;
963 /* Calculate actual addresses of saved registers using offsets
964 determined by aarch64_analyze_prologue. */
965 for (reg
= 0; reg
< gdbarch_num_regs (get_frame_arch (this_frame
)); reg
++)
966 if (trad_frame_addr_p (cache
->saved_regs
, reg
))
967 cache
->saved_regs
[reg
].addr
+= cache
->prev_sp
;
972 /* Our frame ID for a normal frame is the current function's starting
973 PC and the caller's SP when we were called. */
976 aarch64_prologue_this_id (struct frame_info
*this_frame
,
977 void **this_cache
, struct frame_id
*this_id
)
979 struct aarch64_prologue_cache
*cache
980 = aarch64_make_prologue_cache (this_frame
, this_cache
);
984 /* This is meant to halt the backtrace at "_start". */
985 pc
= get_frame_pc (this_frame
);
986 if (pc
<= gdbarch_tdep (get_frame_arch (this_frame
))->lowest_pc
)
989 /* If we've hit a wall, stop. */
990 if (cache
->prev_sp
== 0)
993 func
= get_frame_func (this_frame
);
994 id
= frame_id_build (cache
->prev_sp
, func
);
998 /* Implement the "prev_register" frame_unwind method. */
1000 static struct value
*
1001 aarch64_prologue_prev_register (struct frame_info
*this_frame
,
1002 void **this_cache
, int prev_regnum
)
1004 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
1005 struct aarch64_prologue_cache
*cache
1006 = aarch64_make_prologue_cache (this_frame
, this_cache
);
1008 /* If we are asked to unwind the PC, then we need to return the LR
1009 instead. The prologue may save PC, but it will point into this
1010 frame's prologue, not the next frame's resume location. */
1011 if (prev_regnum
== AARCH64_PC_REGNUM
)
1015 lr
= frame_unwind_register_unsigned (this_frame
, AARCH64_LR_REGNUM
);
1016 return frame_unwind_got_constant (this_frame
, prev_regnum
, lr
);
1019 /* SP is generally not saved to the stack, but this frame is
1020 identified by the next frame's stack pointer at the time of the
1021 call. The value was already reconstructed into PREV_SP. */
1027 | | | <- Previous SP
1030 +--| saved fp |<- FP
1034 if (prev_regnum
== AARCH64_SP_REGNUM
)
1035 return frame_unwind_got_constant (this_frame
, prev_regnum
,
1038 return trad_frame_get_prev_register (this_frame
, cache
->saved_regs
,
1042 /* AArch64 prologue unwinder. */
1043 struct frame_unwind aarch64_prologue_unwind
=
1046 default_frame_unwind_stop_reason
,
1047 aarch64_prologue_this_id
,
1048 aarch64_prologue_prev_register
,
1050 default_frame_sniffer
1053 /* Allocate an aarch64_prologue_cache and fill it with information
1054 about the prologue of *THIS_FRAME. */
1056 static struct aarch64_prologue_cache
*
1057 aarch64_make_stub_cache (struct frame_info
*this_frame
)
1060 struct aarch64_prologue_cache
*cache
;
1061 CORE_ADDR unwound_fp
;
1063 cache
= FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache
);
1064 cache
->saved_regs
= trad_frame_alloc_saved_regs (this_frame
);
1067 = get_frame_register_unsigned (this_frame
, AARCH64_SP_REGNUM
);
1072 /* Our frame ID for a stub frame is the current SP and LR. */
1075 aarch64_stub_this_id (struct frame_info
*this_frame
,
1076 void **this_cache
, struct frame_id
*this_id
)
1078 struct aarch64_prologue_cache
*cache
;
1080 if (*this_cache
== NULL
)
1081 *this_cache
= aarch64_make_stub_cache (this_frame
);
1082 cache
= *this_cache
;
1084 *this_id
= frame_id_build (cache
->prev_sp
, get_frame_pc (this_frame
));
1087 /* Implement the "sniffer" frame_unwind method. */
1090 aarch64_stub_unwind_sniffer (const struct frame_unwind
*self
,
1091 struct frame_info
*this_frame
,
1092 void **this_prologue_cache
)
1094 CORE_ADDR addr_in_block
;
1097 addr_in_block
= get_frame_address_in_block (this_frame
);
1098 if (in_plt_section (addr_in_block
)
1099 /* We also use the stub winder if the target memory is unreadable
1100 to avoid having the prologue unwinder trying to read it. */
1101 || target_read_memory (get_frame_pc (this_frame
), dummy
, 4) != 0)
1107 /* AArch64 stub unwinder. */
1108 struct frame_unwind aarch64_stub_unwind
=
1111 default_frame_unwind_stop_reason
,
1112 aarch64_stub_this_id
,
1113 aarch64_prologue_prev_register
,
1115 aarch64_stub_unwind_sniffer
1118 /* Return the frame base address of *THIS_FRAME. */
1121 aarch64_normal_frame_base (struct frame_info
*this_frame
, void **this_cache
)
1123 struct aarch64_prologue_cache
*cache
1124 = aarch64_make_prologue_cache (this_frame
, this_cache
);
1126 return cache
->prev_sp
- cache
->framesize
;
1129 /* AArch64 default frame base information. */
1130 struct frame_base aarch64_normal_base
=
1132 &aarch64_prologue_unwind
,
1133 aarch64_normal_frame_base
,
1134 aarch64_normal_frame_base
,
1135 aarch64_normal_frame_base
1138 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
1139 dummy frame. The frame ID's base needs to match the TOS value
1140 saved by save_dummy_frame_tos () and returned from
1141 aarch64_push_dummy_call, and the PC needs to match the dummy
1142 frame's breakpoint. */
1144 static struct frame_id
1145 aarch64_dummy_id (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
1147 return frame_id_build (get_frame_register_unsigned (this_frame
,
1149 get_frame_pc (this_frame
));
1152 /* Implement the "unwind_pc" gdbarch method. */
1155 aarch64_unwind_pc (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
1158 = frame_unwind_register_unsigned (this_frame
, AARCH64_PC_REGNUM
);
1163 /* Implement the "unwind_sp" gdbarch method. */
1166 aarch64_unwind_sp (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
1168 return frame_unwind_register_unsigned (this_frame
, AARCH64_SP_REGNUM
);
1171 /* Return the value of the REGNUM register in the previous frame of
1174 static struct value
*
1175 aarch64_dwarf2_prev_register (struct frame_info
*this_frame
,
1176 void **this_cache
, int regnum
)
1178 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
1183 case AARCH64_PC_REGNUM
:
1184 lr
= frame_unwind_register_unsigned (this_frame
, AARCH64_LR_REGNUM
);
1185 return frame_unwind_got_constant (this_frame
, regnum
, lr
);
1188 internal_error (__FILE__
, __LINE__
,
1189 _("Unexpected register %d"), regnum
);
1193 /* Implement the "init_reg" dwarf2_frame_ops method. */
1196 aarch64_dwarf2_frame_init_reg (struct gdbarch
*gdbarch
, int regnum
,
1197 struct dwarf2_frame_state_reg
*reg
,
1198 struct frame_info
*this_frame
)
1202 case AARCH64_PC_REGNUM
:
1203 reg
->how
= DWARF2_FRAME_REG_FN
;
1204 reg
->loc
.fn
= aarch64_dwarf2_prev_register
;
1206 case AARCH64_SP_REGNUM
:
1207 reg
->how
= DWARF2_FRAME_REG_CFA
;
1212 /* When arguments must be pushed onto the stack, they go on in reverse
1213 order. The code below implements a FILO (stack) to do this. */
1217 /* Value to pass on stack. */
1220 /* Size in bytes of value to pass on stack. */
1224 DEF_VEC_O (stack_item_t
);
1226 /* Return the alignment (in bytes) of the given type. */
1229 aarch64_type_align (struct type
*t
)
1235 t
= check_typedef (t
);
1236 switch (TYPE_CODE (t
))
1239 /* Should never happen. */
1240 internal_error (__FILE__
, __LINE__
, _("unknown type alignment"));
1244 case TYPE_CODE_ENUM
:
1248 case TYPE_CODE_RANGE
:
1249 case TYPE_CODE_BITSTRING
:
1251 case TYPE_CODE_CHAR
:
1252 case TYPE_CODE_BOOL
:
1253 return TYPE_LENGTH (t
);
1255 case TYPE_CODE_ARRAY
:
1256 case TYPE_CODE_COMPLEX
:
1257 return aarch64_type_align (TYPE_TARGET_TYPE (t
));
1259 case TYPE_CODE_STRUCT
:
1260 case TYPE_CODE_UNION
:
1262 for (n
= 0; n
< TYPE_NFIELDS (t
); n
++)
1264 falign
= aarch64_type_align (TYPE_FIELD_TYPE (t
, n
));
1272 /* Return 1 if *TY is a homogeneous floating-point aggregate as
1273 defined in the AAPCS64 ABI document; otherwise return 0. */
1276 is_hfa (struct type
*ty
)
1278 switch (TYPE_CODE (ty
))
1280 case TYPE_CODE_ARRAY
:
1282 struct type
*target_ty
= TYPE_TARGET_TYPE (ty
);
1283 if (TYPE_CODE (target_ty
) == TYPE_CODE_FLT
&& TYPE_LENGTH (ty
) <= 4)
1288 case TYPE_CODE_UNION
:
1289 case TYPE_CODE_STRUCT
:
1291 if (TYPE_NFIELDS (ty
) > 0 && TYPE_NFIELDS (ty
) <= 4)
1293 struct type
*member0_type
;
1295 member0_type
= check_typedef (TYPE_FIELD_TYPE (ty
, 0));
1296 if (TYPE_CODE (member0_type
) == TYPE_CODE_FLT
)
1300 for (i
= 0; i
< TYPE_NFIELDS (ty
); i
++)
1302 struct type
*member1_type
;
1304 member1_type
= check_typedef (TYPE_FIELD_TYPE (ty
, i
));
1305 if (TYPE_CODE (member0_type
) != TYPE_CODE (member1_type
)
1306 || (TYPE_LENGTH (member0_type
)
1307 != TYPE_LENGTH (member1_type
)))
1323 /* AArch64 function call information structure. */
1324 struct aarch64_call_info
1326 /* the current argument number. */
1329 /* The next general purpose register number, equivalent to NGRN as
1330 described in the AArch64 Procedure Call Standard. */
1333 /* The next SIMD and floating point register number, equivalent to
1334 NSRN as described in the AArch64 Procedure Call Standard. */
1337 /* The next stacked argument address, equivalent to NSAA as
1338 described in the AArch64 Procedure Call Standard. */
1341 /* Stack item vector. */
1342 VEC(stack_item_t
) *si
;
1345 /* Pass a value in a sequence of consecutive X registers. The caller
1346 is responsbile for ensuring sufficient registers are available. */
1349 pass_in_x (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
1350 struct aarch64_call_info
*info
, struct type
*type
,
1351 const bfd_byte
*buf
)
1353 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1354 int len
= TYPE_LENGTH (type
);
1355 enum type_code typecode
= TYPE_CODE (type
);
1356 int regnum
= AARCH64_X0_REGNUM
+ info
->ngrn
;
1362 int partial_len
= len
< X_REGISTER_SIZE
? len
: X_REGISTER_SIZE
;
1363 CORE_ADDR regval
= extract_unsigned_integer (buf
, partial_len
,
1367 /* Adjust sub-word struct/union args when big-endian. */
1368 if (byte_order
== BFD_ENDIAN_BIG
1369 && partial_len
< X_REGISTER_SIZE
1370 && (typecode
== TYPE_CODE_STRUCT
|| typecode
== TYPE_CODE_UNION
))
1371 regval
<<= ((X_REGISTER_SIZE
- partial_len
) * TARGET_CHAR_BIT
);
1374 fprintf_unfiltered (gdb_stdlog
, "arg %d in %s = 0x%s\n",
1376 gdbarch_register_name (gdbarch
, regnum
),
1377 phex (regval
, X_REGISTER_SIZE
));
1378 regcache_cooked_write_unsigned (regcache
, regnum
, regval
);
1385 /* Attempt to marshall a value in a V register. Return 1 if
1386 successful, or 0 if insufficient registers are available. This
1387 function, unlike the equivalent pass_in_x() function does not
1388 handle arguments spread across multiple registers. */
1391 pass_in_v (struct gdbarch
*gdbarch
,
1392 struct regcache
*regcache
,
1393 struct aarch64_call_info
*info
,
1394 const bfd_byte
*buf
)
1398 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1399 int regnum
= AARCH64_V0_REGNUM
+ info
->nsrn
;
1404 regcache_cooked_write (regcache
, regnum
, buf
);
1406 fprintf_unfiltered (gdb_stdlog
, "arg %d in %s\n",
1408 gdbarch_register_name (gdbarch
, regnum
));
1415 /* Marshall an argument onto the stack. */
1418 pass_on_stack (struct aarch64_call_info
*info
, struct type
*type
,
1419 const bfd_byte
*buf
)
1421 int len
= TYPE_LENGTH (type
);
1427 align
= aarch64_type_align (type
);
1429 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1430 Natural alignment of the argument's type. */
1431 align
= align_up (align
, 8);
1433 /* The AArch64 PCS requires at most doubleword alignment. */
1438 fprintf_unfiltered (gdb_stdlog
, "arg %d len=%d @ sp + %d\n",
1439 info
->argnum
, len
, info
->nsaa
);
1443 VEC_safe_push (stack_item_t
, info
->si
, &item
);
1446 if (info
->nsaa
& (align
- 1))
1448 /* Push stack alignment padding. */
1449 int pad
= align
- (info
->nsaa
& (align
- 1));
1454 VEC_safe_push (stack_item_t
, info
->si
, &item
);
1459 /* Marshall an argument into a sequence of one or more consecutive X
1460 registers or, if insufficient X registers are available then onto
1464 pass_in_x_or_stack (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
1465 struct aarch64_call_info
*info
, struct type
*type
,
1466 const bfd_byte
*buf
)
1468 int len
= TYPE_LENGTH (type
);
1469 int nregs
= (len
+ X_REGISTER_SIZE
- 1) / X_REGISTER_SIZE
;
1471 /* PCS C.13 - Pass in registers if we have enough spare */
1472 if (info
->ngrn
+ nregs
<= 8)
1474 pass_in_x (gdbarch
, regcache
, info
, type
, buf
);
1475 info
->ngrn
+= nregs
;
1480 pass_on_stack (info
, type
, buf
);
1484 /* Pass a value in a V register, or on the stack if insufficient are
1488 pass_in_v_or_stack (struct gdbarch
*gdbarch
,
1489 struct regcache
*regcache
,
1490 struct aarch64_call_info
*info
,
1492 const bfd_byte
*buf
)
1494 if (!pass_in_v (gdbarch
, regcache
, info
, buf
))
1495 pass_on_stack (info
, type
, buf
);
1498 /* Implement the "push_dummy_call" gdbarch method. */
1501 aarch64_push_dummy_call (struct gdbarch
*gdbarch
, struct value
*function
,
1502 struct regcache
*regcache
, CORE_ADDR bp_addr
,
1504 struct value
**args
, CORE_ADDR sp
, int struct_return
,
1505 CORE_ADDR struct_addr
)
1511 struct aarch64_call_info info
;
1512 struct type
*func_type
;
1513 struct type
*return_type
;
1514 int lang_struct_return
;
1516 memset (&info
, 0, sizeof (info
));
1518 /* We need to know what the type of the called function is in order
1519 to determine the number of named/anonymous arguments for the
1520 actual argument placement, and the return type in order to handle
1521 return value correctly.
1523 The generic code above us views the decision of return in memory
1524 or return in registers as a two stage processes. The language
1525 handler is consulted first and may decide to return in memory (eg
1526 class with copy constructor returned by value), this will cause
1527 the generic code to allocate space AND insert an initial leading
1530 If the language code does not decide to pass in memory then the
1531 target code is consulted.
1533 If the language code decides to pass in memory we want to move
1534 the pointer inserted as the initial argument from the argument
1535 list and into X8, the conventional AArch64 struct return pointer
1538 This is slightly awkward, ideally the flag "lang_struct_return"
1539 would be passed to the targets implementation of push_dummy_call.
1540 Rather that change the target interface we call the language code
1541 directly ourselves. */
1543 func_type
= check_typedef (value_type (function
));
1545 /* Dereference function pointer types. */
1546 if (TYPE_CODE (func_type
) == TYPE_CODE_PTR
)
1547 func_type
= TYPE_TARGET_TYPE (func_type
);
1549 gdb_assert (TYPE_CODE (func_type
) == TYPE_CODE_FUNC
1550 || TYPE_CODE (func_type
) == TYPE_CODE_METHOD
);
1552 /* If language_pass_by_reference () returned true we will have been
1553 given an additional initial argument, a hidden pointer to the
1554 return slot in memory. */
1555 return_type
= TYPE_TARGET_TYPE (func_type
);
1556 lang_struct_return
= language_pass_by_reference (return_type
);
1558 /* Set the return address. For the AArch64, the return breakpoint
1559 is always at BP_ADDR. */
1560 regcache_cooked_write_unsigned (regcache
, AARCH64_LR_REGNUM
, bp_addr
);
1562 /* If we were given an initial argument for the return slot because
1563 lang_struct_return was true, lose it. */
1564 if (lang_struct_return
)
1570 /* The struct_return pointer occupies X8. */
1571 if (struct_return
|| lang_struct_return
)
1574 fprintf_unfiltered (gdb_stdlog
, "struct return in %s = 0x%s\n",
1575 gdbarch_register_name
1577 AARCH64_STRUCT_RETURN_REGNUM
),
1578 paddress (gdbarch
, struct_addr
));
1579 regcache_cooked_write_unsigned (regcache
, AARCH64_STRUCT_RETURN_REGNUM
,
1583 for (argnum
= 0; argnum
< nargs
; argnum
++)
1585 struct value
*arg
= args
[argnum
];
1586 struct type
*arg_type
;
1589 arg_type
= check_typedef (value_type (arg
));
1590 len
= TYPE_LENGTH (arg_type
);
1592 switch (TYPE_CODE (arg_type
))
1595 case TYPE_CODE_BOOL
:
1596 case TYPE_CODE_CHAR
:
1597 case TYPE_CODE_RANGE
:
1598 case TYPE_CODE_ENUM
:
1601 /* Promote to 32 bit integer. */
1602 if (TYPE_UNSIGNED (arg_type
))
1603 arg_type
= builtin_type (gdbarch
)->builtin_uint32
;
1605 arg_type
= builtin_type (gdbarch
)->builtin_int32
;
1606 arg
= value_cast (arg_type
, arg
);
1608 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
,
1609 value_contents (arg
));
1612 case TYPE_CODE_COMPLEX
:
1615 const bfd_byte
*buf
= value_contents (arg
);
1616 struct type
*target_type
=
1617 check_typedef (TYPE_TARGET_TYPE (arg_type
));
1619 pass_in_v (gdbarch
, regcache
, &info
, buf
);
1620 pass_in_v (gdbarch
, regcache
, &info
,
1621 buf
+ TYPE_LENGTH (target_type
));
1626 pass_on_stack (&info
, arg_type
, value_contents (arg
));
1630 pass_in_v_or_stack (gdbarch
, regcache
, &info
, arg_type
,
1631 value_contents (arg
));
1634 case TYPE_CODE_STRUCT
:
1635 case TYPE_CODE_ARRAY
:
1636 case TYPE_CODE_UNION
:
1637 if (is_hfa (arg_type
))
1639 int elements
= TYPE_NFIELDS (arg_type
);
1641 /* Homogeneous Aggregates */
1642 if (info
.nsrn
+ elements
< 8)
1646 for (i
= 0; i
< elements
; i
++)
1648 /* We know that we have sufficient registers
1649 available therefore this will never fallback
1651 struct value
*field
=
1652 value_primitive_field (arg
, 0, i
, arg_type
);
1653 struct type
*field_type
=
1654 check_typedef (value_type (field
));
1656 pass_in_v_or_stack (gdbarch
, regcache
, &info
, field_type
,
1657 value_contents_writeable (field
));
1663 pass_on_stack (&info
, arg_type
, value_contents (arg
));
1668 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1669 invisible reference. */
1671 /* Allocate aligned storage. */
1672 sp
= align_down (sp
- len
, 16);
1674 /* Write the real data into the stack. */
1675 write_memory (sp
, value_contents (arg
), len
);
1677 /* Construct the indirection. */
1678 arg_type
= lookup_pointer_type (arg_type
);
1679 arg
= value_from_pointer (arg_type
, sp
);
1680 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
,
1681 value_contents (arg
));
1684 /* PCS C.15 / C.18 multiple values pass. */
1685 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
,
1686 value_contents (arg
));
1690 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
,
1691 value_contents (arg
));
1696 /* Make sure stack retains 16 byte alignment. */
1698 sp
-= 16 - (info
.nsaa
& 15);
1700 while (!VEC_empty (stack_item_t
, info
.si
))
1702 stack_item_t
*si
= VEC_last (stack_item_t
, info
.si
);
1705 write_memory (sp
, si
->data
, si
->len
);
1706 VEC_pop (stack_item_t
, info
.si
);
1709 VEC_free (stack_item_t
, info
.si
);
1711 /* Finally, update the SP register. */
1712 regcache_cooked_write_unsigned (regcache
, AARCH64_SP_REGNUM
, sp
);
1717 /* Implement the "frame_align" gdbarch method. */
1720 aarch64_frame_align (struct gdbarch
*gdbarch
, CORE_ADDR sp
)
1722 /* Align the stack to sixteen bytes. */
1723 return sp
& ~(CORE_ADDR
) 15;
1726 /* Return the type for an AdvSISD Q register. */
1728 static struct type
*
1729 aarch64_vnq_type (struct gdbarch
*gdbarch
)
1731 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1733 if (tdep
->vnq_type
== NULL
)
1738 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnq",
1741 elem
= builtin_type (gdbarch
)->builtin_uint128
;
1742 append_composite_type_field (t
, "u", elem
);
1744 elem
= builtin_type (gdbarch
)->builtin_int128
;
1745 append_composite_type_field (t
, "s", elem
);
1750 return tdep
->vnq_type
;
1753 /* Return the type for an AdvSISD D register. */
1755 static struct type
*
1756 aarch64_vnd_type (struct gdbarch
*gdbarch
)
1758 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1760 if (tdep
->vnd_type
== NULL
)
1765 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnd",
1768 elem
= builtin_type (gdbarch
)->builtin_double
;
1769 append_composite_type_field (t
, "f", elem
);
1771 elem
= builtin_type (gdbarch
)->builtin_uint64
;
1772 append_composite_type_field (t
, "u", elem
);
1774 elem
= builtin_type (gdbarch
)->builtin_int64
;
1775 append_composite_type_field (t
, "s", elem
);
1780 return tdep
->vnd_type
;
1783 /* Return the type for an AdvSISD S register. */
1785 static struct type
*
1786 aarch64_vns_type (struct gdbarch
*gdbarch
)
1788 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1790 if (tdep
->vns_type
== NULL
)
1795 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vns",
1798 elem
= builtin_type (gdbarch
)->builtin_float
;
1799 append_composite_type_field (t
, "f", elem
);
1801 elem
= builtin_type (gdbarch
)->builtin_uint32
;
1802 append_composite_type_field (t
, "u", elem
);
1804 elem
= builtin_type (gdbarch
)->builtin_int32
;
1805 append_composite_type_field (t
, "s", elem
);
1810 return tdep
->vns_type
;
1813 /* Return the type for an AdvSISD H register. */
1815 static struct type
*
1816 aarch64_vnh_type (struct gdbarch
*gdbarch
)
1818 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1820 if (tdep
->vnh_type
== NULL
)
1825 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnh",
1828 elem
= builtin_type (gdbarch
)->builtin_uint16
;
1829 append_composite_type_field (t
, "u", elem
);
1831 elem
= builtin_type (gdbarch
)->builtin_int16
;
1832 append_composite_type_field (t
, "s", elem
);
1837 return tdep
->vnh_type
;
1840 /* Return the type for an AdvSISD B register. */
1842 static struct type
*
1843 aarch64_vnb_type (struct gdbarch
*gdbarch
)
1845 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1847 if (tdep
->vnb_type
== NULL
)
1852 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnb",
1855 elem
= builtin_type (gdbarch
)->builtin_uint8
;
1856 append_composite_type_field (t
, "u", elem
);
1858 elem
= builtin_type (gdbarch
)->builtin_int8
;
1859 append_composite_type_field (t
, "s", elem
);
1864 return tdep
->vnb_type
;
1867 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1870 aarch64_dwarf_reg_to_regnum (struct gdbarch
*gdbarch
, int reg
)
1872 if (reg
>= AARCH64_DWARF_X0
&& reg
<= AARCH64_DWARF_X0
+ 30)
1873 return AARCH64_X0_REGNUM
+ reg
- AARCH64_DWARF_X0
;
1875 if (reg
== AARCH64_DWARF_SP
)
1876 return AARCH64_SP_REGNUM
;
1878 if (reg
>= AARCH64_DWARF_V0
&& reg
<= AARCH64_DWARF_V0
+ 31)
1879 return AARCH64_V0_REGNUM
+ reg
- AARCH64_DWARF_V0
;
1885 /* Implement the "print_insn" gdbarch method. */
1888 aarch64_gdb_print_insn (bfd_vma memaddr
, disassemble_info
*info
)
1890 info
->symbols
= NULL
;
1891 return print_insn_aarch64 (memaddr
, info
);
1894 /* AArch64 BRK software debug mode instruction.
1895 Note that AArch64 code is always little-endian.
1896 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
1897 static const gdb_byte aarch64_default_breakpoint
[] = {0x00, 0x00, 0x20, 0xd4};
1899 /* Implement the "breakpoint_from_pc" gdbarch method. */
1901 static const gdb_byte
*
1902 aarch64_breakpoint_from_pc (struct gdbarch
*gdbarch
, CORE_ADDR
*pcptr
,
1905 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1907 *lenptr
= sizeof (aarch64_default_breakpoint
);
1908 return aarch64_default_breakpoint
;
1911 /* Extract from an array REGS containing the (raw) register state a
1912 function return value of type TYPE, and copy that, in virtual
1913 format, into VALBUF. */
1916 aarch64_extract_return_value (struct type
*type
, struct regcache
*regs
,
1919 struct gdbarch
*gdbarch
= get_regcache_arch (regs
);
1920 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1922 if (TYPE_CODE (type
) == TYPE_CODE_FLT
)
1924 bfd_byte buf
[V_REGISTER_SIZE
];
1925 int len
= TYPE_LENGTH (type
);
1927 regcache_cooked_read (regs
, AARCH64_V0_REGNUM
, buf
);
1928 memcpy (valbuf
, buf
, len
);
1930 else if (TYPE_CODE (type
) == TYPE_CODE_INT
1931 || TYPE_CODE (type
) == TYPE_CODE_CHAR
1932 || TYPE_CODE (type
) == TYPE_CODE_BOOL
1933 || TYPE_CODE (type
) == TYPE_CODE_PTR
1934 || TYPE_CODE (type
) == TYPE_CODE_REF
1935 || TYPE_CODE (type
) == TYPE_CODE_ENUM
)
1937 /* If the the type is a plain integer, then the access is
1938 straight-forward. Otherwise we have to play around a bit
1940 int len
= TYPE_LENGTH (type
);
1941 int regno
= AARCH64_X0_REGNUM
;
1946 /* By using store_unsigned_integer we avoid having to do
1947 anything special for small big-endian values. */
1948 regcache_cooked_read_unsigned (regs
, regno
++, &tmp
);
1949 store_unsigned_integer (valbuf
,
1950 (len
> X_REGISTER_SIZE
1951 ? X_REGISTER_SIZE
: len
), byte_order
, tmp
);
1952 len
-= X_REGISTER_SIZE
;
1953 valbuf
+= X_REGISTER_SIZE
;
1956 else if (TYPE_CODE (type
) == TYPE_CODE_COMPLEX
)
1958 int regno
= AARCH64_V0_REGNUM
;
1959 bfd_byte buf
[V_REGISTER_SIZE
];
1960 struct type
*target_type
= check_typedef (TYPE_TARGET_TYPE (type
));
1961 int len
= TYPE_LENGTH (target_type
);
1963 regcache_cooked_read (regs
, regno
, buf
);
1964 memcpy (valbuf
, buf
, len
);
1966 regcache_cooked_read (regs
, regno
+ 1, buf
);
1967 memcpy (valbuf
, buf
, len
);
1970 else if (is_hfa (type
))
1972 int elements
= TYPE_NFIELDS (type
);
1973 struct type
*member_type
= check_typedef (TYPE_FIELD_TYPE (type
, 0));
1974 int len
= TYPE_LENGTH (member_type
);
1977 for (i
= 0; i
< elements
; i
++)
1979 int regno
= AARCH64_V0_REGNUM
+ i
;
1980 bfd_byte buf
[X_REGISTER_SIZE
];
1983 fprintf_unfiltered (gdb_stdlog
,
1984 "read HFA return value element %d from %s\n",
1986 gdbarch_register_name (gdbarch
, regno
));
1987 regcache_cooked_read (regs
, regno
, buf
);
1989 memcpy (valbuf
, buf
, len
);
1995 /* For a structure or union the behaviour is as if the value had
1996 been stored to word-aligned memory and then loaded into
1997 registers with 64-bit load instruction(s). */
1998 int len
= TYPE_LENGTH (type
);
1999 int regno
= AARCH64_X0_REGNUM
;
2000 bfd_byte buf
[X_REGISTER_SIZE
];
2004 regcache_cooked_read (regs
, regno
++, buf
);
2005 memcpy (valbuf
, buf
, len
> X_REGISTER_SIZE
? X_REGISTER_SIZE
: len
);
2006 len
-= X_REGISTER_SIZE
;
2007 valbuf
+= X_REGISTER_SIZE
;
2013 /* Will a function return an aggregate type in memory or in a
2014 register? Return 0 if an aggregate type can be returned in a
2015 register, 1 if it must be returned in memory. */
2018 aarch64_return_in_memory (struct gdbarch
*gdbarch
, struct type
*type
)
2021 enum type_code code
;
2023 CHECK_TYPEDEF (type
);
2025 /* In the AArch64 ABI, "integer" like aggregate types are returned
2026 in registers. For an aggregate type to be integer like, its size
2027 must be less than or equal to 4 * X_REGISTER_SIZE. */
2031 /* PCS B.5 If the argument is a Named HFA, then the argument is
2036 if (TYPE_LENGTH (type
) > 16)
2038 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2039 invisible reference. */
2047 /* Write into appropriate registers a function return value of type
2048 TYPE, given in virtual format. */
2051 aarch64_store_return_value (struct type
*type
, struct regcache
*regs
,
2052 const gdb_byte
*valbuf
)
2054 struct gdbarch
*gdbarch
= get_regcache_arch (regs
);
2055 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2057 if (TYPE_CODE (type
) == TYPE_CODE_FLT
)
2059 bfd_byte buf
[V_REGISTER_SIZE
];
2060 int len
= TYPE_LENGTH (type
);
2062 memcpy (buf
, valbuf
, len
> V_REGISTER_SIZE
? V_REGISTER_SIZE
: len
);
2063 regcache_cooked_write (regs
, AARCH64_V0_REGNUM
, buf
);
2065 else if (TYPE_CODE (type
) == TYPE_CODE_INT
2066 || TYPE_CODE (type
) == TYPE_CODE_CHAR
2067 || TYPE_CODE (type
) == TYPE_CODE_BOOL
2068 || TYPE_CODE (type
) == TYPE_CODE_PTR
2069 || TYPE_CODE (type
) == TYPE_CODE_REF
2070 || TYPE_CODE (type
) == TYPE_CODE_ENUM
)
2072 if (TYPE_LENGTH (type
) <= X_REGISTER_SIZE
)
2074 /* Values of one word or less are zero/sign-extended and
2076 bfd_byte tmpbuf
[X_REGISTER_SIZE
];
2077 LONGEST val
= unpack_long (type
, valbuf
);
2079 store_signed_integer (tmpbuf
, X_REGISTER_SIZE
, byte_order
, val
);
2080 regcache_cooked_write (regs
, AARCH64_X0_REGNUM
, tmpbuf
);
2084 /* Integral values greater than one word are stored in
2085 consecutive registers starting with r0. This will always
2086 be a multiple of the regiser size. */
2087 int len
= TYPE_LENGTH (type
);
2088 int regno
= AARCH64_X0_REGNUM
;
2092 regcache_cooked_write (regs
, regno
++, valbuf
);
2093 len
-= X_REGISTER_SIZE
;
2094 valbuf
+= X_REGISTER_SIZE
;
2098 else if (is_hfa (type
))
2100 int elements
= TYPE_NFIELDS (type
);
2101 struct type
*member_type
= check_typedef (TYPE_FIELD_TYPE (type
, 0));
2102 int len
= TYPE_LENGTH (member_type
);
2105 for (i
= 0; i
< elements
; i
++)
2107 int regno
= AARCH64_V0_REGNUM
+ i
;
2108 bfd_byte tmpbuf
[MAX_REGISTER_SIZE
];
2111 fprintf_unfiltered (gdb_stdlog
,
2112 "write HFA return value element %d to %s\n",
2114 gdbarch_register_name (gdbarch
, regno
));
2116 memcpy (tmpbuf
, valbuf
, len
);
2117 regcache_cooked_write (regs
, regno
, tmpbuf
);
2123 /* For a structure or union the behaviour is as if the value had
2124 been stored to word-aligned memory and then loaded into
2125 registers with 64-bit load instruction(s). */
2126 int len
= TYPE_LENGTH (type
);
2127 int regno
= AARCH64_X0_REGNUM
;
2128 bfd_byte tmpbuf
[X_REGISTER_SIZE
];
2132 memcpy (tmpbuf
, valbuf
,
2133 len
> X_REGISTER_SIZE
? X_REGISTER_SIZE
: len
);
2134 regcache_cooked_write (regs
, regno
++, tmpbuf
);
2135 len
-= X_REGISTER_SIZE
;
2136 valbuf
+= X_REGISTER_SIZE
;
2141 /* Implement the "return_value" gdbarch method. */
2143 static enum return_value_convention
2144 aarch64_return_value (struct gdbarch
*gdbarch
, struct value
*func_value
,
2145 struct type
*valtype
, struct regcache
*regcache
,
2146 gdb_byte
*readbuf
, const gdb_byte
*writebuf
)
2148 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2150 if (TYPE_CODE (valtype
) == TYPE_CODE_STRUCT
2151 || TYPE_CODE (valtype
) == TYPE_CODE_UNION
2152 || TYPE_CODE (valtype
) == TYPE_CODE_ARRAY
)
2154 if (aarch64_return_in_memory (gdbarch
, valtype
))
2157 fprintf_unfiltered (gdb_stdlog
, "return value in memory\n");
2158 return RETURN_VALUE_STRUCT_CONVENTION
;
2163 aarch64_store_return_value (valtype
, regcache
, writebuf
);
2166 aarch64_extract_return_value (valtype
, regcache
, readbuf
);
2169 fprintf_unfiltered (gdb_stdlog
, "return value in registers\n");
2171 return RETURN_VALUE_REGISTER_CONVENTION
;
2174 /* Implement the "get_longjmp_target" gdbarch method. */
2177 aarch64_get_longjmp_target (struct frame_info
*frame
, CORE_ADDR
*pc
)
2180 gdb_byte buf
[X_REGISTER_SIZE
];
2181 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
2182 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2183 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2185 jb_addr
= get_frame_register_unsigned (frame
, AARCH64_X0_REGNUM
);
2187 if (target_read_memory (jb_addr
+ tdep
->jb_pc
* tdep
->jb_elt_size
, buf
,
2191 *pc
= extract_unsigned_integer (buf
, X_REGISTER_SIZE
, byte_order
);
2196 /* Return the pseudo register name corresponding to register regnum. */
2199 aarch64_pseudo_register_name (struct gdbarch
*gdbarch
, int regnum
)
2201 static const char *const q_name
[] =
2203 "q0", "q1", "q2", "q3",
2204 "q4", "q5", "q6", "q7",
2205 "q8", "q9", "q10", "q11",
2206 "q12", "q13", "q14", "q15",
2207 "q16", "q17", "q18", "q19",
2208 "q20", "q21", "q22", "q23",
2209 "q24", "q25", "q26", "q27",
2210 "q28", "q29", "q30", "q31",
2213 static const char *const d_name
[] =
2215 "d0", "d1", "d2", "d3",
2216 "d4", "d5", "d6", "d7",
2217 "d8", "d9", "d10", "d11",
2218 "d12", "d13", "d14", "d15",
2219 "d16", "d17", "d18", "d19",
2220 "d20", "d21", "d22", "d23",
2221 "d24", "d25", "d26", "d27",
2222 "d28", "d29", "d30", "d31",
2225 static const char *const s_name
[] =
2227 "s0", "s1", "s2", "s3",
2228 "s4", "s5", "s6", "s7",
2229 "s8", "s9", "s10", "s11",
2230 "s12", "s13", "s14", "s15",
2231 "s16", "s17", "s18", "s19",
2232 "s20", "s21", "s22", "s23",
2233 "s24", "s25", "s26", "s27",
2234 "s28", "s29", "s30", "s31",
2237 static const char *const h_name
[] =
2239 "h0", "h1", "h2", "h3",
2240 "h4", "h5", "h6", "h7",
2241 "h8", "h9", "h10", "h11",
2242 "h12", "h13", "h14", "h15",
2243 "h16", "h17", "h18", "h19",
2244 "h20", "h21", "h22", "h23",
2245 "h24", "h25", "h26", "h27",
2246 "h28", "h29", "h30", "h31",
2249 static const char *const b_name
[] =
2251 "b0", "b1", "b2", "b3",
2252 "b4", "b5", "b6", "b7",
2253 "b8", "b9", "b10", "b11",
2254 "b12", "b13", "b14", "b15",
2255 "b16", "b17", "b18", "b19",
2256 "b20", "b21", "b22", "b23",
2257 "b24", "b25", "b26", "b27",
2258 "b28", "b29", "b30", "b31",
2261 regnum
-= gdbarch_num_regs (gdbarch
);
2263 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2264 return q_name
[regnum
- AARCH64_Q0_REGNUM
];
2266 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2267 return d_name
[regnum
- AARCH64_D0_REGNUM
];
2269 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2270 return s_name
[regnum
- AARCH64_S0_REGNUM
];
2272 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2273 return h_name
[regnum
- AARCH64_H0_REGNUM
];
2275 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2276 return b_name
[regnum
- AARCH64_B0_REGNUM
];
2278 internal_error (__FILE__
, __LINE__
,
2279 _("aarch64_pseudo_register_name: bad register number %d"),
2283 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2285 static struct type
*
2286 aarch64_pseudo_register_type (struct gdbarch
*gdbarch
, int regnum
)
2288 regnum
-= gdbarch_num_regs (gdbarch
);
2290 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2291 return aarch64_vnq_type (gdbarch
);
2293 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2294 return aarch64_vnd_type (gdbarch
);
2296 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2297 return aarch64_vns_type (gdbarch
);
2299 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2300 return aarch64_vnh_type (gdbarch
);
2302 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2303 return aarch64_vnb_type (gdbarch
);
2305 internal_error (__FILE__
, __LINE__
,
2306 _("aarch64_pseudo_register_type: bad register number %d"),
2310 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2313 aarch64_pseudo_register_reggroup_p (struct gdbarch
*gdbarch
, int regnum
,
2314 struct reggroup
*group
)
2316 regnum
-= gdbarch_num_regs (gdbarch
);
2318 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2319 return group
== all_reggroup
|| group
== vector_reggroup
;
2320 else if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2321 return (group
== all_reggroup
|| group
== vector_reggroup
2322 || group
== float_reggroup
);
2323 else if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2324 return (group
== all_reggroup
|| group
== vector_reggroup
2325 || group
== float_reggroup
);
2326 else if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2327 return group
== all_reggroup
|| group
== vector_reggroup
;
2328 else if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2329 return group
== all_reggroup
|| group
== vector_reggroup
;
2331 return group
== all_reggroup
;
2334 /* Implement the "pseudo_register_read_value" gdbarch method. */
2336 static struct value
*
2337 aarch64_pseudo_read_value (struct gdbarch
*gdbarch
,
2338 struct regcache
*regcache
,
2341 gdb_byte reg_buf
[MAX_REGISTER_SIZE
];
2342 struct value
*result_value
;
2345 result_value
= allocate_value (register_type (gdbarch
, regnum
));
2346 VALUE_LVAL (result_value
) = lval_register
;
2347 VALUE_REGNUM (result_value
) = regnum
;
2348 buf
= value_contents_raw (result_value
);
2350 regnum
-= gdbarch_num_regs (gdbarch
);
2352 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2354 enum register_status status
;
2357 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_Q0_REGNUM
;
2358 status
= regcache_raw_read (regcache
, v_regnum
, reg_buf
);
2359 if (status
!= REG_VALID
)
2360 mark_value_bytes_unavailable (result_value
, 0,
2361 TYPE_LENGTH (value_type (result_value
)));
2363 memcpy (buf
, reg_buf
, Q_REGISTER_SIZE
);
2364 return result_value
;
2367 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2369 enum register_status status
;
2372 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_D0_REGNUM
;
2373 status
= regcache_raw_read (regcache
, v_regnum
, reg_buf
);
2374 if (status
!= REG_VALID
)
2375 mark_value_bytes_unavailable (result_value
, 0,
2376 TYPE_LENGTH (value_type (result_value
)));
2378 memcpy (buf
, reg_buf
, D_REGISTER_SIZE
);
2379 return result_value
;
2382 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2384 enum register_status status
;
2387 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_S0_REGNUM
;
2388 status
= regcache_raw_read (regcache
, v_regnum
, reg_buf
);
2389 memcpy (buf
, reg_buf
, S_REGISTER_SIZE
);
2390 return result_value
;
2393 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2395 enum register_status status
;
2398 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_H0_REGNUM
;
2399 status
= regcache_raw_read (regcache
, v_regnum
, reg_buf
);
2400 if (status
!= REG_VALID
)
2401 mark_value_bytes_unavailable (result_value
, 0,
2402 TYPE_LENGTH (value_type (result_value
)));
2404 memcpy (buf
, reg_buf
, H_REGISTER_SIZE
);
2405 return result_value
;
2408 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2410 enum register_status status
;
2413 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_B0_REGNUM
;
2414 status
= regcache_raw_read (regcache
, v_regnum
, reg_buf
);
2415 if (status
!= REG_VALID
)
2416 mark_value_bytes_unavailable (result_value
, 0,
2417 TYPE_LENGTH (value_type (result_value
)));
2419 memcpy (buf
, reg_buf
, B_REGISTER_SIZE
);
2420 return result_value
;
2423 gdb_assert_not_reached ("regnum out of bound");
2426 /* Implement the "pseudo_register_write" gdbarch method. */
2429 aarch64_pseudo_write (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
2430 int regnum
, const gdb_byte
*buf
)
2432 gdb_byte reg_buf
[MAX_REGISTER_SIZE
];
2434 /* Ensure the register buffer is zero, we want gdb writes of the
2435 various 'scalar' pseudo registers to behavior like architectural
2436 writes, register width bytes are written the remainder are set to
2438 memset (reg_buf
, 0, sizeof (reg_buf
));
2440 regnum
-= gdbarch_num_regs (gdbarch
);
2442 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2444 /* pseudo Q registers */
2447 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_Q0_REGNUM
;
2448 memcpy (reg_buf
, buf
, Q_REGISTER_SIZE
);
2449 regcache_raw_write (regcache
, v_regnum
, reg_buf
);
2453 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2455 /* pseudo D registers */
2458 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_D0_REGNUM
;
2459 memcpy (reg_buf
, buf
, D_REGISTER_SIZE
);
2460 regcache_raw_write (regcache
, v_regnum
, reg_buf
);
2464 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2468 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_S0_REGNUM
;
2469 memcpy (reg_buf
, buf
, S_REGISTER_SIZE
);
2470 regcache_raw_write (regcache
, v_regnum
, reg_buf
);
2474 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2476 /* pseudo H registers */
2479 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_H0_REGNUM
;
2480 memcpy (reg_buf
, buf
, H_REGISTER_SIZE
);
2481 regcache_raw_write (regcache
, v_regnum
, reg_buf
);
2485 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2487 /* pseudo B registers */
2490 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_B0_REGNUM
;
2491 memcpy (reg_buf
, buf
, B_REGISTER_SIZE
);
2492 regcache_raw_write (regcache
, v_regnum
, reg_buf
);
2496 gdb_assert_not_reached ("regnum out of bound");
2499 /* Callback function for user_reg_add. */
2501 static struct value
*
2502 value_of_aarch64_user_reg (struct frame_info
*frame
, const void *baton
)
2504 const int *reg_p
= baton
;
2506 return value_of_register (*reg_p
, frame
);
2510 /* Implement the "software_single_step" gdbarch method, needed to
2511 single step through atomic sequences on AArch64. */
2514 aarch64_software_single_step (struct frame_info
*frame
)
2516 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
2517 struct address_space
*aspace
= get_frame_address_space (frame
);
2518 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
2519 const int insn_size
= 4;
2520 const int atomic_sequence_length
= 16; /* Instruction sequence length. */
2521 CORE_ADDR pc
= get_frame_pc (frame
);
2522 CORE_ADDR breaks
[2] = { -1, -1 };
2524 CORE_ADDR closing_insn
= 0;
2525 uint32_t insn
= read_memory_unsigned_integer (loc
, insn_size
,
2526 byte_order_for_code
);
2529 int bc_insn_count
= 0; /* Conditional branch instruction count. */
2530 int last_breakpoint
= 0; /* Defaults to 0 (no breakpoints placed). */
2532 /* Look for a Load Exclusive instruction which begins the sequence. */
2533 if (!decode_masked_match (insn
, 0x3fc00000, 0x08400000))
2536 for (insn_count
= 0; insn_count
< atomic_sequence_length
; ++insn_count
)
2542 insn
= read_memory_unsigned_integer (loc
, insn_size
,
2543 byte_order_for_code
);
2545 /* Check if the instruction is a conditional branch. */
2546 if (decode_bcond (loc
, insn
, &cond
, &offset
))
2548 if (bc_insn_count
>= 1)
2551 /* It is, so we'll try to set a breakpoint at the destination. */
2552 breaks
[1] = loc
+ offset
;
2558 /* Look for the Store Exclusive which closes the atomic sequence. */
2559 if (decode_masked_match (insn
, 0x3fc00000, 0x08000000))
2566 /* We didn't find a closing Store Exclusive instruction, fall back. */
2570 /* Insert breakpoint after the end of the atomic sequence. */
2571 breaks
[0] = loc
+ insn_size
;
2573 /* Check for duplicated breakpoints, and also check that the second
2574 breakpoint is not within the atomic sequence. */
2576 && (breaks
[1] == breaks
[0]
2577 || (breaks
[1] >= pc
&& breaks
[1] <= closing_insn
)))
2578 last_breakpoint
= 0;
2580 /* Insert the breakpoint at the end of the sequence, and one at the
2581 destination of the conditional branch, if it exists. */
2582 for (index
= 0; index
<= last_breakpoint
; index
++)
2583 insert_single_step_breakpoint (gdbarch
, aspace
, breaks
[index
]);
2588 /* Initialize the current architecture based on INFO. If possible,
2589 re-use an architecture from ARCHES, which is a list of
2590 architectures already created during this debugging session.
2592 Called e.g. at program startup, when reading a core file, and when
2593 reading a binary file. */
2595 static struct gdbarch
*
2596 aarch64_gdbarch_init (struct gdbarch_info info
, struct gdbarch_list
*arches
)
2598 struct gdbarch_tdep
*tdep
;
2599 struct gdbarch
*gdbarch
;
2600 struct gdbarch_list
*best_arch
;
2601 struct tdesc_arch_data
*tdesc_data
= NULL
;
2602 const struct target_desc
*tdesc
= info
.target_desc
;
2604 int have_fpa_registers
= 1;
2606 const struct tdesc_feature
*feature
;
2608 int num_pseudo_regs
= 0;
2610 /* Ensure we always have a target descriptor. */
2611 if (!tdesc_has_registers (tdesc
))
2612 tdesc
= tdesc_aarch64
;
2616 feature
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.core");
2618 if (feature
== NULL
)
2621 tdesc_data
= tdesc_data_alloc ();
2623 /* Validate the descriptor provides the mandatory core R registers
2624 and allocate their numbers. */
2625 for (i
= 0; i
< ARRAY_SIZE (aarch64_r_register_names
); i
++)
2627 tdesc_numbered_register (feature
, tdesc_data
, AARCH64_X0_REGNUM
+ i
,
2628 aarch64_r_register_names
[i
]);
2630 num_regs
= AARCH64_X0_REGNUM
+ i
;
2632 /* Look for the V registers. */
2633 feature
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.fpu");
2636 /* Validate the descriptor provides the mandatory V registers
2637 and allocate their numbers. */
2638 for (i
= 0; i
< ARRAY_SIZE (aarch64_v_register_names
); i
++)
2640 tdesc_numbered_register (feature
, tdesc_data
, AARCH64_V0_REGNUM
+ i
,
2641 aarch64_v_register_names
[i
]);
2643 num_regs
= AARCH64_V0_REGNUM
+ i
;
2645 num_pseudo_regs
+= 32; /* add the Qn scalar register pseudos */
2646 num_pseudo_regs
+= 32; /* add the Dn scalar register pseudos */
2647 num_pseudo_regs
+= 32; /* add the Sn scalar register pseudos */
2648 num_pseudo_regs
+= 32; /* add the Hn scalar register pseudos */
2649 num_pseudo_regs
+= 32; /* add the Bn scalar register pseudos */
2654 tdesc_data_cleanup (tdesc_data
);
2658 /* AArch64 code is always little-endian. */
2659 info
.byte_order_for_code
= BFD_ENDIAN_LITTLE
;
2661 /* If there is already a candidate, use it. */
2662 for (best_arch
= gdbarch_list_lookup_by_info (arches
, &info
);
2664 best_arch
= gdbarch_list_lookup_by_info (best_arch
->next
, &info
))
2666 /* Found a match. */
2670 if (best_arch
!= NULL
)
2672 if (tdesc_data
!= NULL
)
2673 tdesc_data_cleanup (tdesc_data
);
2674 return best_arch
->gdbarch
;
2677 tdep
= xcalloc (1, sizeof (struct gdbarch_tdep
));
2678 gdbarch
= gdbarch_alloc (&info
, tdep
);
2680 /* This should be low enough for everything. */
2681 tdep
->lowest_pc
= 0x20;
2682 tdep
->jb_pc
= -1; /* Longjump support not enabled by default. */
2683 tdep
->jb_elt_size
= 8;
2685 set_gdbarch_push_dummy_call (gdbarch
, aarch64_push_dummy_call
);
2686 set_gdbarch_frame_align (gdbarch
, aarch64_frame_align
);
2688 /* Frame handling. */
2689 set_gdbarch_dummy_id (gdbarch
, aarch64_dummy_id
);
2690 set_gdbarch_unwind_pc (gdbarch
, aarch64_unwind_pc
);
2691 set_gdbarch_unwind_sp (gdbarch
, aarch64_unwind_sp
);
2693 /* Advance PC across function entry code. */
2694 set_gdbarch_skip_prologue (gdbarch
, aarch64_skip_prologue
);
2696 /* The stack grows downward. */
2697 set_gdbarch_inner_than (gdbarch
, core_addr_lessthan
);
2699 /* Breakpoint manipulation. */
2700 set_gdbarch_breakpoint_from_pc (gdbarch
, aarch64_breakpoint_from_pc
);
2701 set_gdbarch_have_nonsteppable_watchpoint (gdbarch
, 1);
2702 set_gdbarch_software_single_step (gdbarch
, aarch64_software_single_step
);
2704 /* Information about registers, etc. */
2705 set_gdbarch_sp_regnum (gdbarch
, AARCH64_SP_REGNUM
);
2706 set_gdbarch_pc_regnum (gdbarch
, AARCH64_PC_REGNUM
);
2707 set_gdbarch_num_regs (gdbarch
, num_regs
);
2709 set_gdbarch_num_pseudo_regs (gdbarch
, num_pseudo_regs
);
2710 set_gdbarch_pseudo_register_read_value (gdbarch
, aarch64_pseudo_read_value
);
2711 set_gdbarch_pseudo_register_write (gdbarch
, aarch64_pseudo_write
);
2712 set_tdesc_pseudo_register_name (gdbarch
, aarch64_pseudo_register_name
);
2713 set_tdesc_pseudo_register_type (gdbarch
, aarch64_pseudo_register_type
);
2714 set_tdesc_pseudo_register_reggroup_p (gdbarch
,
2715 aarch64_pseudo_register_reggroup_p
);
2718 set_gdbarch_short_bit (gdbarch
, 16);
2719 set_gdbarch_int_bit (gdbarch
, 32);
2720 set_gdbarch_float_bit (gdbarch
, 32);
2721 set_gdbarch_double_bit (gdbarch
, 64);
2722 set_gdbarch_long_double_bit (gdbarch
, 128);
2723 set_gdbarch_long_bit (gdbarch
, 64);
2724 set_gdbarch_long_long_bit (gdbarch
, 64);
2725 set_gdbarch_ptr_bit (gdbarch
, 64);
2726 set_gdbarch_char_signed (gdbarch
, 0);
2727 set_gdbarch_float_format (gdbarch
, floatformats_ieee_single
);
2728 set_gdbarch_double_format (gdbarch
, floatformats_ieee_double
);
2729 set_gdbarch_long_double_format (gdbarch
, floatformats_ia64_quad
);
2731 /* Internal <-> external register number maps. */
2732 set_gdbarch_dwarf2_reg_to_regnum (gdbarch
, aarch64_dwarf_reg_to_regnum
);
2734 /* Returning results. */
2735 set_gdbarch_return_value (gdbarch
, aarch64_return_value
);
2738 set_gdbarch_print_insn (gdbarch
, aarch64_gdb_print_insn
);
2740 /* Virtual tables. */
2741 set_gdbarch_vbit_in_delta (gdbarch
, 1);
2743 /* Hook in the ABI-specific overrides, if they have been registered. */
2744 info
.target_desc
= tdesc
;
2745 info
.tdep_info
= (void *) tdesc_data
;
2746 gdbarch_init_osabi (info
, gdbarch
);
2748 dwarf2_frame_set_init_reg (gdbarch
, aarch64_dwarf2_frame_init_reg
);
2750 /* Add some default predicates. */
2751 frame_unwind_append_unwinder (gdbarch
, &aarch64_stub_unwind
);
2752 dwarf2_append_unwinders (gdbarch
);
2753 frame_unwind_append_unwinder (gdbarch
, &aarch64_prologue_unwind
);
2755 frame_base_set_default (gdbarch
, &aarch64_normal_base
);
2757 /* Now we have tuned the configuration, set a few final things,
2758 based on what the OS ABI has told us. */
2760 if (tdep
->jb_pc
>= 0)
2761 set_gdbarch_get_longjmp_target (gdbarch
, aarch64_get_longjmp_target
);
2763 tdesc_use_registers (gdbarch
, tdesc
, tdesc_data
);
2765 /* Add standard register aliases. */
2766 for (i
= 0; i
< ARRAY_SIZE (aarch64_register_aliases
); i
++)
2767 user_reg_add (gdbarch
, aarch64_register_aliases
[i
].name
,
2768 value_of_aarch64_user_reg
,
2769 &aarch64_register_aliases
[i
].regnum
);
2775 aarch64_dump_tdep (struct gdbarch
*gdbarch
, struct ui_file
*file
)
2777 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2782 fprintf_unfiltered (file
, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
2783 paddress (gdbarch
, tdep
->lowest_pc
));
2786 /* Suppress warning from -Wmissing-prototypes. */
2787 extern initialize_file_ftype _initialize_aarch64_tdep
;
2790 _initialize_aarch64_tdep (void)
2792 gdbarch_register (bfd_arch_aarch64
, aarch64_gdbarch_init
,
2795 initialize_tdesc_aarch64 ();
2797 /* Debug this file's internals. */
2798 add_setshow_boolean_cmd ("aarch64", class_maintenance
, &aarch64_debug
, _("\
2799 Set AArch64 debugging."), _("\
2800 Show AArch64 debugging."), _("\
2801 When on, AArch64 specific debugging is enabled."),
2804 &setdebuglist
, &showdebuglist
);
2807 /* AArch64 process record-replay related structures, defines etc. */
2809 #define submask(x) ((1L << ((x) + 1)) - 1)
2810 #define bit(obj,st) (((obj) >> (st)) & 1)
2811 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
2813 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
2816 unsigned int reg_len = LENGTH; \
2819 REGS = XNEWVEC (uint32_t, reg_len); \
2820 memcpy(®S[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
2825 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
2828 unsigned int mem_len = LENGTH; \
2831 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
2832 memcpy(&MEMS->len, &RECORD_BUF[0], \
2833 sizeof(struct aarch64_mem_r) * LENGTH); \
2838 /* AArch64 record/replay structures and enumerations. */
2840 struct aarch64_mem_r
2842 uint64_t len
; /* Record length. */
2843 uint64_t addr
; /* Memory address. */
2846 enum aarch64_record_result
2848 AARCH64_RECORD_SUCCESS
,
2849 AARCH64_RECORD_FAILURE
,
2850 AARCH64_RECORD_UNSUPPORTED
,
2851 AARCH64_RECORD_UNKNOWN
2854 typedef struct insn_decode_record_t
2856 struct gdbarch
*gdbarch
;
2857 struct regcache
*regcache
;
2858 CORE_ADDR this_addr
; /* Address of insn to be recorded. */
2859 uint32_t aarch64_insn
; /* Insn to be recorded. */
2860 uint32_t mem_rec_count
; /* Count of memory records. */
2861 uint32_t reg_rec_count
; /* Count of register records. */
2862 uint32_t *aarch64_regs
; /* Registers to be recorded. */
2863 struct aarch64_mem_r
*aarch64_mems
; /* Memory locations to be recorded. */
2864 } insn_decode_record
;
2866 /* Record handler for data processing - register instructions. */
2869 aarch64_record_data_proc_reg (insn_decode_record
*aarch64_insn_r
)
2871 uint8_t reg_rd
, insn_bits24_27
, insn_bits21_23
;
2872 uint32_t record_buf
[4];
2874 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
2875 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
2876 insn_bits21_23
= bits (aarch64_insn_r
->aarch64_insn
, 21, 23);
2878 if (!bit (aarch64_insn_r
->aarch64_insn
, 28))
2882 /* Logical (shifted register). */
2883 if (insn_bits24_27
== 0x0a)
2884 setflags
= (bits (aarch64_insn_r
->aarch64_insn
, 29, 30) == 0x03);
2886 else if (insn_bits24_27
== 0x0b)
2887 setflags
= bit (aarch64_insn_r
->aarch64_insn
, 29);
2889 return AARCH64_RECORD_UNKNOWN
;
2891 record_buf
[0] = reg_rd
;
2892 aarch64_insn_r
->reg_rec_count
= 1;
2894 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
2898 if (insn_bits24_27
== 0x0b)
2900 /* Data-processing (3 source). */
2901 record_buf
[0] = reg_rd
;
2902 aarch64_insn_r
->reg_rec_count
= 1;
2904 else if (insn_bits24_27
== 0x0a)
2906 if (insn_bits21_23
== 0x00)
2908 /* Add/subtract (with carry). */
2909 record_buf
[0] = reg_rd
;
2910 aarch64_insn_r
->reg_rec_count
= 1;
2911 if (bit (aarch64_insn_r
->aarch64_insn
, 29))
2913 record_buf
[1] = AARCH64_CPSR_REGNUM
;
2914 aarch64_insn_r
->reg_rec_count
= 2;
2917 else if (insn_bits21_23
== 0x02)
2919 /* Conditional compare (register) and conditional compare
2920 (immediate) instructions. */
2921 record_buf
[0] = AARCH64_CPSR_REGNUM
;
2922 aarch64_insn_r
->reg_rec_count
= 1;
2924 else if (insn_bits21_23
== 0x04 || insn_bits21_23
== 0x06)
2926 /* CConditional select. */
2927 /* Data-processing (2 source). */
2928 /* Data-processing (1 source). */
2929 record_buf
[0] = reg_rd
;
2930 aarch64_insn_r
->reg_rec_count
= 1;
2933 return AARCH64_RECORD_UNKNOWN
;
2937 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
2939 return AARCH64_RECORD_SUCCESS
;
2942 /* Record handler for data processing - immediate instructions. */
2945 aarch64_record_data_proc_imm (insn_decode_record
*aarch64_insn_r
)
2947 uint8_t reg_rd
, insn_bit28
, insn_bit23
, insn_bits24_27
, setflags
;
2948 uint32_t record_buf
[4];
2950 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
2951 insn_bit28
= bit (aarch64_insn_r
->aarch64_insn
, 28);
2952 insn_bit23
= bit (aarch64_insn_r
->aarch64_insn
, 23);
2953 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
2955 if (insn_bits24_27
== 0x00 /* PC rel addressing. */
2956 || insn_bits24_27
== 0x03 /* Bitfield and Extract. */
2957 || (insn_bits24_27
== 0x02 && insn_bit23
)) /* Move wide (immediate). */
2959 record_buf
[0] = reg_rd
;
2960 aarch64_insn_r
->reg_rec_count
= 1;
2962 else if (insn_bits24_27
== 0x01)
2964 /* Add/Subtract (immediate). */
2965 setflags
= bit (aarch64_insn_r
->aarch64_insn
, 29);
2966 record_buf
[0] = reg_rd
;
2967 aarch64_insn_r
->reg_rec_count
= 1;
2969 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
2971 else if (insn_bits24_27
== 0x02 && !insn_bit23
)
2973 /* Logical (immediate). */
2974 setflags
= bits (aarch64_insn_r
->aarch64_insn
, 29, 30) == 0x03;
2975 record_buf
[0] = reg_rd
;
2976 aarch64_insn_r
->reg_rec_count
= 1;
2978 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
2981 return AARCH64_RECORD_UNKNOWN
;
2983 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
2985 return AARCH64_RECORD_SUCCESS
;
2988 /* Record handler for branch, exception generation and system instructions. */
2991 aarch64_record_branch_except_sys (insn_decode_record
*aarch64_insn_r
)
2993 struct gdbarch_tdep
*tdep
= gdbarch_tdep (aarch64_insn_r
->gdbarch
);
2994 uint8_t insn_bits24_27
, insn_bits28_31
, insn_bits22_23
;
2995 uint32_t record_buf
[4];
2997 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
2998 insn_bits28_31
= bits (aarch64_insn_r
->aarch64_insn
, 28, 31);
2999 insn_bits22_23
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3001 if (insn_bits28_31
== 0x0d)
3003 /* Exception generation instructions. */
3004 if (insn_bits24_27
== 0x04)
3006 if (!bits (aarch64_insn_r
->aarch64_insn
, 2, 4)
3007 && !bits (aarch64_insn_r
->aarch64_insn
, 21, 23)
3008 && bits (aarch64_insn_r
->aarch64_insn
, 0, 1) == 0x01)
3010 ULONGEST svc_number
;
3012 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, 8,
3014 return tdep
->aarch64_syscall_record (aarch64_insn_r
->regcache
,
3018 return AARCH64_RECORD_UNSUPPORTED
;
3020 /* System instructions. */
3021 else if (insn_bits24_27
== 0x05 && insn_bits22_23
== 0x00)
3023 uint32_t reg_rt
, reg_crn
;
3025 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3026 reg_crn
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
3028 /* Record rt in case of sysl and mrs instructions. */
3029 if (bit (aarch64_insn_r
->aarch64_insn
, 21))
3031 record_buf
[0] = reg_rt
;
3032 aarch64_insn_r
->reg_rec_count
= 1;
3034 /* Record cpsr for hint and msr(immediate) instructions. */
3035 else if (reg_crn
== 0x02 || reg_crn
== 0x04)
3037 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3038 aarch64_insn_r
->reg_rec_count
= 1;
3041 /* Unconditional branch (register). */
3042 else if((insn_bits24_27
& 0x0e) == 0x06)
3044 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
3045 if (bits (aarch64_insn_r
->aarch64_insn
, 21, 22) == 0x01)
3046 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_LR_REGNUM
;
3049 return AARCH64_RECORD_UNKNOWN
;
3051 /* Unconditional branch (immediate). */
3052 else if ((insn_bits28_31
& 0x07) == 0x01 && (insn_bits24_27
& 0x0c) == 0x04)
3054 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
3055 if (bit (aarch64_insn_r
->aarch64_insn
, 31))
3056 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_LR_REGNUM
;
3059 /* Compare & branch (immediate), Test & branch (immediate) and
3060 Conditional branch (immediate). */
3061 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
3063 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3065 return AARCH64_RECORD_SUCCESS
;
3068 /* Record handler for advanced SIMD load and store instructions. */
3071 aarch64_record_asimd_load_store (insn_decode_record
*aarch64_insn_r
)
3074 uint64_t addr_offset
= 0;
3075 uint32_t record_buf
[24];
3076 uint64_t record_buf_mem
[24];
3077 uint32_t reg_rn
, reg_rt
;
3078 uint32_t reg_index
= 0, mem_index
= 0;
3079 uint8_t opcode_bits
, size_bits
;
3081 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3082 reg_rn
= bits (aarch64_insn_r
->aarch64_insn
, 5, 9);
3083 size_bits
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
3084 opcode_bits
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
3085 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
, &address
);
3089 fprintf_unfiltered (gdb_stdlog
,
3090 "Process record: Advanced SIMD load/store\n");
3093 /* Load/store single structure. */
3094 if (bit (aarch64_insn_r
->aarch64_insn
, 24))
3096 uint8_t sindex
, scale
, selem
, esize
, replicate
= 0;
3097 scale
= opcode_bits
>> 2;
3098 selem
= ((opcode_bits
& 0x02) |
3099 bit (aarch64_insn_r
->aarch64_insn
, 21)) + 1;
3103 if (size_bits
& 0x01)
3104 return AARCH64_RECORD_UNKNOWN
;
3107 if ((size_bits
>> 1) & 0x01)
3108 return AARCH64_RECORD_UNKNOWN
;
3109 if (size_bits
& 0x01)
3111 if (!((opcode_bits
>> 1) & 0x01))
3114 return AARCH64_RECORD_UNKNOWN
;
3118 if (bit (aarch64_insn_r
->aarch64_insn
, 22) && !(opcode_bits
& 0x01))
3125 return AARCH64_RECORD_UNKNOWN
;
3131 for (sindex
= 0; sindex
< selem
; sindex
++)
3133 record_buf
[reg_index
++] = reg_rt
+ AARCH64_V0_REGNUM
;
3134 reg_rt
= (reg_rt
+ 1) % 32;
3138 for (sindex
= 0; sindex
< selem
; sindex
++)
3139 if (bit (aarch64_insn_r
->aarch64_insn
, 22))
3140 record_buf
[reg_index
++] = reg_rt
+ AARCH64_V0_REGNUM
;
3143 record_buf_mem
[mem_index
++] = esize
/ 8;
3144 record_buf_mem
[mem_index
++] = address
+ addr_offset
;
3146 addr_offset
= addr_offset
+ (esize
/ 8);
3147 reg_rt
= (reg_rt
+ 1) % 32;
3150 /* Load/store multiple structure. */
3153 uint8_t selem
, esize
, rpt
, elements
;
3154 uint8_t eindex
, rindex
;
3156 esize
= 8 << size_bits
;
3157 if (bit (aarch64_insn_r
->aarch64_insn
, 30))
3158 elements
= 128 / esize
;
3160 elements
= 64 / esize
;
3162 switch (opcode_bits
)
3164 /*LD/ST4 (4 Registers). */
3169 /*LD/ST1 (4 Registers). */
3174 /*LD/ST3 (3 Registers). */
3179 /*LD/ST1 (3 Registers). */
3184 /*LD/ST1 (1 Register). */
3189 /*LD/ST2 (2 Registers). */
3194 /*LD/ST1 (2 Registers). */
3200 return AARCH64_RECORD_UNSUPPORTED
;
3203 for (rindex
= 0; rindex
< rpt
; rindex
++)
3204 for (eindex
= 0; eindex
< elements
; eindex
++)
3206 uint8_t reg_tt
, sindex
;
3207 reg_tt
= (reg_rt
+ rindex
) % 32;
3208 for (sindex
= 0; sindex
< selem
; sindex
++)
3210 if (bit (aarch64_insn_r
->aarch64_insn
, 22))
3211 record_buf
[reg_index
++] = reg_tt
+ AARCH64_V0_REGNUM
;
3214 record_buf_mem
[mem_index
++] = esize
/ 8;
3215 record_buf_mem
[mem_index
++] = address
+ addr_offset
;
3217 addr_offset
= addr_offset
+ (esize
/ 8);
3218 reg_tt
= (reg_tt
+ 1) % 32;
3223 if (bit (aarch64_insn_r
->aarch64_insn
, 23))
3224 record_buf
[reg_index
++] = reg_rn
;
3226 aarch64_insn_r
->reg_rec_count
= reg_index
;
3227 aarch64_insn_r
->mem_rec_count
= mem_index
/ 2;
3228 MEM_ALLOC (aarch64_insn_r
->aarch64_mems
, aarch64_insn_r
->mem_rec_count
,
3230 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3232 return AARCH64_RECORD_SUCCESS
;
3235 /* Record handler for load and store instructions. */
3238 aarch64_record_load_store (insn_decode_record
*aarch64_insn_r
)
3240 uint8_t insn_bits24_27
, insn_bits28_29
, insn_bits10_11
;
3241 uint8_t insn_bit23
, insn_bit21
;
3242 uint8_t opc
, size_bits
, ld_flag
, vector_flag
;
3243 uint32_t reg_rn
, reg_rt
, reg_rt2
;
3244 uint64_t datasize
, offset
;
3245 uint32_t record_buf
[8];
3246 uint64_t record_buf_mem
[8];
3249 insn_bits10_11
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
3250 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3251 insn_bits28_29
= bits (aarch64_insn_r
->aarch64_insn
, 28, 29);
3252 insn_bit21
= bit (aarch64_insn_r
->aarch64_insn
, 21);
3253 insn_bit23
= bit (aarch64_insn_r
->aarch64_insn
, 23);
3254 ld_flag
= bit (aarch64_insn_r
->aarch64_insn
, 22);
3255 vector_flag
= bit (aarch64_insn_r
->aarch64_insn
, 26);
3256 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3257 reg_rn
= bits (aarch64_insn_r
->aarch64_insn
, 5, 9);
3258 reg_rt2
= bits (aarch64_insn_r
->aarch64_insn
, 10, 14);
3259 size_bits
= bits (aarch64_insn_r
->aarch64_insn
, 30, 31);
3261 /* Load/store exclusive. */
3262 if (insn_bits24_27
== 0x08 && insn_bits28_29
== 0x00)
3266 fprintf_unfiltered (gdb_stdlog
,
3267 "Process record: load/store exclusive\n");
3272 record_buf
[0] = reg_rt
;
3273 aarch64_insn_r
->reg_rec_count
= 1;
3276 record_buf
[1] = reg_rt2
;
3277 aarch64_insn_r
->reg_rec_count
= 2;
3283 datasize
= (8 << size_bits
) * 2;
3285 datasize
= (8 << size_bits
);
3286 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3288 record_buf_mem
[0] = datasize
/ 8;
3289 record_buf_mem
[1] = address
;
3290 aarch64_insn_r
->mem_rec_count
= 1;
3293 /* Save register rs. */
3294 record_buf
[0] = bits (aarch64_insn_r
->aarch64_insn
, 16, 20);
3295 aarch64_insn_r
->reg_rec_count
= 1;
3299 /* Load register (literal) instructions decoding. */
3300 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x01)
3304 fprintf_unfiltered (gdb_stdlog
,
3305 "Process record: load register (literal)\n");
3308 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3310 record_buf
[0] = reg_rt
;
3311 aarch64_insn_r
->reg_rec_count
= 1;
3313 /* All types of load/store pair instructions decoding. */
3314 else if ((insn_bits24_27
& 0x0a) == 0x08 && insn_bits28_29
== 0x02)
3318 fprintf_unfiltered (gdb_stdlog
,
3319 "Process record: load/store pair\n");
3326 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3327 record_buf
[1] = reg_rt2
+ AARCH64_V0_REGNUM
;
3331 record_buf
[0] = reg_rt
;
3332 record_buf
[1] = reg_rt2
;
3334 aarch64_insn_r
->reg_rec_count
= 2;
3339 imm7_off
= bits (aarch64_insn_r
->aarch64_insn
, 15, 21);
3341 size_bits
= size_bits
>> 1;
3342 datasize
= 8 << (2 + size_bits
);
3343 offset
= (imm7_off
& 0x40) ? (~imm7_off
& 0x007f) + 1 : imm7_off
;
3344 offset
= offset
<< (2 + size_bits
);
3345 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3347 if (!((insn_bits24_27
& 0x0b) == 0x08 && insn_bit23
))
3349 if (imm7_off
& 0x40)
3350 address
= address
- offset
;
3352 address
= address
+ offset
;
3355 record_buf_mem
[0] = datasize
/ 8;
3356 record_buf_mem
[1] = address
;
3357 record_buf_mem
[2] = datasize
/ 8;
3358 record_buf_mem
[3] = address
+ (datasize
/ 8);
3359 aarch64_insn_r
->mem_rec_count
= 2;
3361 if (bit (aarch64_insn_r
->aarch64_insn
, 23))
3362 record_buf
[aarch64_insn_r
->reg_rec_count
++] = reg_rn
;
3364 /* Load/store register (unsigned immediate) instructions. */
3365 else if ((insn_bits24_27
& 0x0b) == 0x09 && insn_bits28_29
== 0x03)
3367 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3374 if (size_bits
!= 0x03)
3377 return AARCH64_RECORD_UNKNOWN
;
3381 fprintf_unfiltered (gdb_stdlog
,
3382 "Process record: load/store (unsigned immediate):"
3383 " size %x V %d opc %x\n", size_bits
, vector_flag
,
3389 offset
= bits (aarch64_insn_r
->aarch64_insn
, 10, 21);
3390 datasize
= 8 << size_bits
;
3391 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3393 offset
= offset
<< size_bits
;
3394 address
= address
+ offset
;
3396 record_buf_mem
[0] = datasize
>> 3;
3397 record_buf_mem
[1] = address
;
3398 aarch64_insn_r
->mem_rec_count
= 1;
3403 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3405 record_buf
[0] = reg_rt
;
3406 aarch64_insn_r
->reg_rec_count
= 1;
3409 /* Load/store register (register offset) instructions. */
3410 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x03
3411 && insn_bits10_11
== 0x02 && insn_bit21
)
3415 fprintf_unfiltered (gdb_stdlog
,
3416 "Process record: load/store (register offset)\n");
3418 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3425 if (size_bits
!= 0x03)
3428 return AARCH64_RECORD_UNKNOWN
;
3432 uint64_t reg_rm_val
;
3433 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
,
3434 bits (aarch64_insn_r
->aarch64_insn
, 16, 20), ®_rm_val
);
3435 if (bit (aarch64_insn_r
->aarch64_insn
, 12))
3436 offset
= reg_rm_val
<< size_bits
;
3438 offset
= reg_rm_val
;
3439 datasize
= 8 << size_bits
;
3440 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3442 address
= address
+ offset
;
3443 record_buf_mem
[0] = datasize
>> 3;
3444 record_buf_mem
[1] = address
;
3445 aarch64_insn_r
->mem_rec_count
= 1;
3450 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3452 record_buf
[0] = reg_rt
;
3453 aarch64_insn_r
->reg_rec_count
= 1;
3456 /* Load/store register (immediate and unprivileged) instructions. */
3457 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x03
3462 fprintf_unfiltered (gdb_stdlog
,
3463 "Process record: load/store (immediate and unprivileged)\n");
3465 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3472 if (size_bits
!= 0x03)
3475 return AARCH64_RECORD_UNKNOWN
;
3480 imm9_off
= bits (aarch64_insn_r
->aarch64_insn
, 12, 20);
3481 offset
= (imm9_off
& 0x0100) ? (((~imm9_off
) & 0x01ff) + 1) : imm9_off
;
3482 datasize
= 8 << size_bits
;
3483 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3485 if (insn_bits10_11
!= 0x01)
3487 if (imm9_off
& 0x0100)
3488 address
= address
- offset
;
3490 address
= address
+ offset
;
3492 record_buf_mem
[0] = datasize
>> 3;
3493 record_buf_mem
[1] = address
;
3494 aarch64_insn_r
->mem_rec_count
= 1;
3499 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3501 record_buf
[0] = reg_rt
;
3502 aarch64_insn_r
->reg_rec_count
= 1;
3504 if (insn_bits10_11
== 0x01 || insn_bits10_11
== 0x03)
3505 record_buf
[aarch64_insn_r
->reg_rec_count
++] = reg_rn
;
3507 /* Advanced SIMD load/store instructions. */
3509 return aarch64_record_asimd_load_store (aarch64_insn_r
);
3511 MEM_ALLOC (aarch64_insn_r
->aarch64_mems
, aarch64_insn_r
->mem_rec_count
,
3513 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3515 return AARCH64_RECORD_SUCCESS
;
3518 /* Record handler for data processing SIMD and floating point instructions. */
3521 aarch64_record_data_proc_simd_fp (insn_decode_record
*aarch64_insn_r
)
3523 uint8_t insn_bit21
, opcode
, rmode
, reg_rd
;
3524 uint8_t insn_bits24_27
, insn_bits28_31
, insn_bits10_11
, insn_bits12_15
;
3525 uint8_t insn_bits11_14
;
3526 uint32_t record_buf
[2];
3528 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3529 insn_bits28_31
= bits (aarch64_insn_r
->aarch64_insn
, 28, 31);
3530 insn_bits10_11
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
3531 insn_bits12_15
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
3532 insn_bits11_14
= bits (aarch64_insn_r
->aarch64_insn
, 11, 14);
3533 opcode
= bits (aarch64_insn_r
->aarch64_insn
, 16, 18);
3534 rmode
= bits (aarch64_insn_r
->aarch64_insn
, 19, 20);
3535 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3536 insn_bit21
= bit (aarch64_insn_r
->aarch64_insn
, 21);
3540 fprintf_unfiltered (gdb_stdlog
,
3541 "Process record: data processing SIMD/FP: ");
3544 if ((insn_bits28_31
& 0x05) == 0x01 && insn_bits24_27
== 0x0e)
3546 /* Floating point - fixed point conversion instructions. */
3550 fprintf_unfiltered (gdb_stdlog
, "FP - fixed point conversion");
3552 if ((opcode
>> 1) == 0x0 && rmode
== 0x03)
3553 record_buf
[0] = reg_rd
;
3555 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3557 /* Floating point - conditional compare instructions. */
3558 else if (insn_bits10_11
== 0x01)
3561 fprintf_unfiltered (gdb_stdlog
, "FP - conditional compare");
3563 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3565 /* Floating point - data processing (2-source) and
3566 conditional select instructions. */
3567 else if (insn_bits10_11
== 0x02 || insn_bits10_11
== 0x03)
3570 fprintf_unfiltered (gdb_stdlog
, "FP - DP (2-source)");
3572 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3574 else if (insn_bits10_11
== 0x00)
3576 /* Floating point - immediate instructions. */
3577 if ((insn_bits12_15
& 0x01) == 0x01
3578 || (insn_bits12_15
& 0x07) == 0x04)
3581 fprintf_unfiltered (gdb_stdlog
, "FP - immediate");
3582 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3584 /* Floating point - compare instructions. */
3585 else if ((insn_bits12_15
& 0x03) == 0x02)
3588 fprintf_unfiltered (gdb_stdlog
, "FP - immediate");
3589 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3591 /* Floating point - integer conversions instructions. */
3592 else if (insn_bits12_15
== 0x00)
3594 /* Convert float to integer instruction. */
3595 if (!(opcode
>> 1) || ((opcode
>> 1) == 0x02 && !rmode
))
3598 fprintf_unfiltered (gdb_stdlog
, "float to int conversion");
3600 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
3602 /* Convert integer to float instruction. */
3603 else if ((opcode
>> 1) == 0x01 && !rmode
)
3606 fprintf_unfiltered (gdb_stdlog
, "int to float conversion");
3608 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3610 /* Move float to integer instruction. */
3611 else if ((opcode
>> 1) == 0x03)
3614 fprintf_unfiltered (gdb_stdlog
, "move float to int");
3616 if (!(opcode
& 0x01))
3617 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
3619 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3622 return AARCH64_RECORD_UNKNOWN
;
3625 return AARCH64_RECORD_UNKNOWN
;
3628 return AARCH64_RECORD_UNKNOWN
;
3630 else if ((insn_bits28_31
& 0x09) == 0x00 && insn_bits24_27
== 0x0e)
3633 fprintf_unfiltered (gdb_stdlog
, "SIMD copy");
3635 /* Advanced SIMD copy instructions. */
3636 if (!bits (aarch64_insn_r
->aarch64_insn
, 21, 23)
3637 && !bit (aarch64_insn_r
->aarch64_insn
, 15)
3638 && bit (aarch64_insn_r
->aarch64_insn
, 10))
3640 if (insn_bits11_14
== 0x05 || insn_bits11_14
== 0x07)
3641 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
3643 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3646 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3648 /* All remaining floating point or advanced SIMD instructions. */
3652 fprintf_unfiltered (gdb_stdlog
, "all remain");
3654 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3658 fprintf_unfiltered (gdb_stdlog
, "\n");
3660 aarch64_insn_r
->reg_rec_count
++;
3661 gdb_assert (aarch64_insn_r
->reg_rec_count
== 1);
3662 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3664 return AARCH64_RECORD_SUCCESS
;
3667 /* Decodes insns type and invokes its record handler. */
3670 aarch64_record_decode_insn_handler (insn_decode_record
*aarch64_insn_r
)
3672 uint32_t ins_bit25
, ins_bit26
, ins_bit27
, ins_bit28
;
3674 ins_bit25
= bit (aarch64_insn_r
->aarch64_insn
, 25);
3675 ins_bit26
= bit (aarch64_insn_r
->aarch64_insn
, 26);
3676 ins_bit27
= bit (aarch64_insn_r
->aarch64_insn
, 27);
3677 ins_bit28
= bit (aarch64_insn_r
->aarch64_insn
, 28);
3679 /* Data processing - immediate instructions. */
3680 if (!ins_bit26
&& !ins_bit27
&& ins_bit28
)
3681 return aarch64_record_data_proc_imm (aarch64_insn_r
);
3683 /* Branch, exception generation and system instructions. */
3684 if (ins_bit26
&& !ins_bit27
&& ins_bit28
)
3685 return aarch64_record_branch_except_sys (aarch64_insn_r
);
3687 /* Load and store instructions. */
3688 if (!ins_bit25
&& ins_bit27
)
3689 return aarch64_record_load_store (aarch64_insn_r
);
3691 /* Data processing - register instructions. */
3692 if (ins_bit25
&& !ins_bit26
&& ins_bit27
)
3693 return aarch64_record_data_proc_reg (aarch64_insn_r
);
3695 /* Data processing - SIMD and floating point instructions. */
3696 if (ins_bit25
&& ins_bit26
&& ins_bit27
)
3697 return aarch64_record_data_proc_simd_fp (aarch64_insn_r
);
3699 return AARCH64_RECORD_UNSUPPORTED
;
3702 /* Cleans up local record registers and memory allocations. */
3705 deallocate_reg_mem (insn_decode_record
*record
)
3707 xfree (record
->aarch64_regs
);
3708 xfree (record
->aarch64_mems
);
3711 /* Parse the current instruction and record the values of the registers and
3712 memory that will be changed in current instruction to record_arch_list
3713 return -1 if something is wrong. */
3716 aarch64_process_record (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
3717 CORE_ADDR insn_addr
)
3719 uint32_t rec_no
= 0;
3720 uint8_t insn_size
= 4;
3722 ULONGEST t_bit
= 0, insn_id
= 0;
3723 gdb_byte buf
[insn_size
];
3724 insn_decode_record aarch64_record
;
3726 memset (&buf
[0], 0, insn_size
);
3727 memset (&aarch64_record
, 0, sizeof (insn_decode_record
));
3728 target_read_memory (insn_addr
, &buf
[0], insn_size
);
3729 aarch64_record
.aarch64_insn
3730 = (uint32_t) extract_unsigned_integer (&buf
[0],
3732 gdbarch_byte_order (gdbarch
));
3733 aarch64_record
.regcache
= regcache
;
3734 aarch64_record
.this_addr
= insn_addr
;
3735 aarch64_record
.gdbarch
= gdbarch
;
3737 ret
= aarch64_record_decode_insn_handler (&aarch64_record
);
3738 if (ret
== AARCH64_RECORD_UNSUPPORTED
)
3740 printf_unfiltered (_("Process record does not support instruction "
3741 "0x%0x at address %s.\n"),
3742 aarch64_record
.aarch64_insn
,
3743 paddress (gdbarch
, insn_addr
));
3749 /* Record registers. */
3750 record_full_arch_list_add_reg (aarch64_record
.regcache
,
3752 /* Always record register CPSR. */
3753 record_full_arch_list_add_reg (aarch64_record
.regcache
,
3754 AARCH64_CPSR_REGNUM
);
3755 if (aarch64_record
.aarch64_regs
)
3756 for (rec_no
= 0; rec_no
< aarch64_record
.reg_rec_count
; rec_no
++)
3757 if (record_full_arch_list_add_reg (aarch64_record
.regcache
,
3758 aarch64_record
.aarch64_regs
[rec_no
]))
3761 /* Record memories. */
3762 if (aarch64_record
.aarch64_mems
)
3763 for (rec_no
= 0; rec_no
< aarch64_record
.mem_rec_count
; rec_no
++)
3764 if (record_full_arch_list_add_mem
3765 ((CORE_ADDR
)aarch64_record
.aarch64_mems
[rec_no
].addr
,
3766 aarch64_record
.aarch64_mems
[rec_no
].len
))
3769 if (record_full_arch_list_add_end ())
3773 deallocate_reg_mem (&aarch64_record
);