1 /* Common target dependent code for GDB on AArch64 systems.
3 Copyright (C) 2009-2015 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
29 #include "reggroups.h"
32 #include "arch-utils.h"
34 #include "frame-unwind.h"
35 #include "frame-base.h"
36 #include "trad-frame.h"
38 #include "dwarf2-frame.h"
40 #include "prologue-value.h"
41 #include "target-descriptions.h"
42 #include "user-regs.h"
48 #include "aarch64-tdep.h"
51 #include "elf/aarch64.h"
56 #include "record-full.h"
58 #include "features/aarch64.c"
60 #include "arch/aarch64-insn.h"
62 #include "opcode/aarch64.h"
64 #define submask(x) ((1L << ((x) + 1)) - 1)
65 #define bit(obj,st) (((obj) >> (st)) & 1)
66 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
68 /* Pseudo register base numbers. */
69 #define AARCH64_Q0_REGNUM 0
70 #define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + 32)
71 #define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
72 #define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
73 #define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
75 /* The standard register names, and all the valid aliases for them. */
78 const char *const name
;
80 } aarch64_register_aliases
[] =
82 /* 64-bit register names. */
83 {"fp", AARCH64_FP_REGNUM
},
84 {"lr", AARCH64_LR_REGNUM
},
85 {"sp", AARCH64_SP_REGNUM
},
87 /* 32-bit register names. */
88 {"w0", AARCH64_X0_REGNUM
+ 0},
89 {"w1", AARCH64_X0_REGNUM
+ 1},
90 {"w2", AARCH64_X0_REGNUM
+ 2},
91 {"w3", AARCH64_X0_REGNUM
+ 3},
92 {"w4", AARCH64_X0_REGNUM
+ 4},
93 {"w5", AARCH64_X0_REGNUM
+ 5},
94 {"w6", AARCH64_X0_REGNUM
+ 6},
95 {"w7", AARCH64_X0_REGNUM
+ 7},
96 {"w8", AARCH64_X0_REGNUM
+ 8},
97 {"w9", AARCH64_X0_REGNUM
+ 9},
98 {"w10", AARCH64_X0_REGNUM
+ 10},
99 {"w11", AARCH64_X0_REGNUM
+ 11},
100 {"w12", AARCH64_X0_REGNUM
+ 12},
101 {"w13", AARCH64_X0_REGNUM
+ 13},
102 {"w14", AARCH64_X0_REGNUM
+ 14},
103 {"w15", AARCH64_X0_REGNUM
+ 15},
104 {"w16", AARCH64_X0_REGNUM
+ 16},
105 {"w17", AARCH64_X0_REGNUM
+ 17},
106 {"w18", AARCH64_X0_REGNUM
+ 18},
107 {"w19", AARCH64_X0_REGNUM
+ 19},
108 {"w20", AARCH64_X0_REGNUM
+ 20},
109 {"w21", AARCH64_X0_REGNUM
+ 21},
110 {"w22", AARCH64_X0_REGNUM
+ 22},
111 {"w23", AARCH64_X0_REGNUM
+ 23},
112 {"w24", AARCH64_X0_REGNUM
+ 24},
113 {"w25", AARCH64_X0_REGNUM
+ 25},
114 {"w26", AARCH64_X0_REGNUM
+ 26},
115 {"w27", AARCH64_X0_REGNUM
+ 27},
116 {"w28", AARCH64_X0_REGNUM
+ 28},
117 {"w29", AARCH64_X0_REGNUM
+ 29},
118 {"w30", AARCH64_X0_REGNUM
+ 30},
121 {"ip0", AARCH64_X0_REGNUM
+ 16},
122 {"ip1", AARCH64_X0_REGNUM
+ 17}
125 /* The required core 'R' registers. */
126 static const char *const aarch64_r_register_names
[] =
128 /* These registers must appear in consecutive RAW register number
129 order and they must begin with AARCH64_X0_REGNUM! */
130 "x0", "x1", "x2", "x3",
131 "x4", "x5", "x6", "x7",
132 "x8", "x9", "x10", "x11",
133 "x12", "x13", "x14", "x15",
134 "x16", "x17", "x18", "x19",
135 "x20", "x21", "x22", "x23",
136 "x24", "x25", "x26", "x27",
137 "x28", "x29", "x30", "sp",
141 /* The FP/SIMD 'V' registers. */
142 static const char *const aarch64_v_register_names
[] =
144 /* These registers must appear in consecutive RAW register number
145 order and they must begin with AARCH64_V0_REGNUM! */
146 "v0", "v1", "v2", "v3",
147 "v4", "v5", "v6", "v7",
148 "v8", "v9", "v10", "v11",
149 "v12", "v13", "v14", "v15",
150 "v16", "v17", "v18", "v19",
151 "v20", "v21", "v22", "v23",
152 "v24", "v25", "v26", "v27",
153 "v28", "v29", "v30", "v31",
158 /* AArch64 prologue cache structure. */
159 struct aarch64_prologue_cache
161 /* The program counter at the start of the function. It is used to
162 identify this frame as a prologue frame. */
165 /* The program counter at the time this frame was created; i.e. where
166 this function was called from. It is used to identify this frame as a
170 /* The stack pointer at the time this frame was created; i.e. the
171 caller's stack pointer when this function was called. It is used
172 to identify this frame. */
175 /* Is the target available to read from? */
178 /* The frame base for this frame is just prev_sp - frame size.
179 FRAMESIZE is the distance from the frame pointer to the
180 initial stack pointer. */
183 /* The register used to hold the frame pointer for this frame. */
186 /* Saved register offsets. */
187 struct trad_frame_saved_reg
*saved_regs
;
191 show_aarch64_debug (struct ui_file
*file
, int from_tty
,
192 struct cmd_list_element
*c
, const char *value
)
194 fprintf_filtered (file
, _("AArch64 debugging is %s.\n"), value
);
197 /* Extract a signed value from a bit field within an instruction
200 INSN is the instruction opcode.
202 WIDTH specifies the width of the bit field to extract (in bits).
204 OFFSET specifies the least significant bit of the field where bits
205 are numbered zero counting from least to most significant. */
208 extract_signed_bitfield (uint32_t insn
, unsigned width
, unsigned offset
)
210 unsigned shift_l
= sizeof (int32_t) * 8 - (offset
+ width
);
211 unsigned shift_r
= sizeof (int32_t) * 8 - width
;
213 return ((int32_t) insn
<< shift_l
) >> shift_r
;
216 /* Determine if specified bits within an instruction opcode matches a
219 INSN is the instruction opcode.
221 MASK specifies the bits within the opcode that are to be tested
222 agsinst for a match with PATTERN. */
225 decode_masked_match (uint32_t insn
, uint32_t mask
, uint32_t pattern
)
227 return (insn
& mask
) == pattern
;
230 /* Decode an opcode if it represents an immediate ADD or SUB instruction.
232 ADDR specifies the address of the opcode.
233 INSN specifies the opcode to test.
234 RD receives the 'rd' field from the decoded instruction.
235 RN receives the 'rn' field from the decoded instruction.
237 Return 1 if the opcodes matches and is decoded, otherwise 0. */
239 aarch64_decode_add_sub_imm (CORE_ADDR addr
, uint32_t insn
, unsigned *rd
,
240 unsigned *rn
, int32_t *imm
)
242 if ((insn
& 0x9f000000) == 0x91000000)
247 *rd
= (insn
>> 0) & 0x1f;
248 *rn
= (insn
>> 5) & 0x1f;
249 *imm
= (insn
>> 10) & 0xfff;
250 shift
= (insn
>> 22) & 0x3;
251 op_is_sub
= (insn
>> 30) & 0x1;
270 debug_printf ("decode: 0x%s 0x%x add x%u, x%u, #%d\n",
271 core_addr_to_string_nz (addr
), insn
, *rd
, *rn
,
279 /* Decode an opcode if it represents a branch via register instruction.
281 ADDR specifies the address of the opcode.
282 INSN specifies the opcode to test.
283 IS_BLR receives the 'op' bit from the decoded instruction.
284 RN receives the 'rn' field from the decoded instruction.
286 Return 1 if the opcodes matches and is decoded, otherwise 0. */
289 aarch64_decode_br (CORE_ADDR addr
, uint32_t insn
, int *is_blr
,
292 /* 8 4 0 6 2 8 4 0 */
293 /* blr 110101100011111100000000000rrrrr */
294 /* br 110101100001111100000000000rrrrr */
295 if (decode_masked_match (insn
, 0xffdffc1f, 0xd61f0000))
297 *is_blr
= (insn
>> 21) & 1;
298 *rn
= (insn
>> 5) & 0x1f;
302 debug_printf ("decode: 0x%s 0x%x %s 0x%x\n",
303 core_addr_to_string_nz (addr
), insn
,
304 *is_blr
? "blr" : "br", *rn
);
312 /* Decode an opcode if it represents a ERET instruction.
314 ADDR specifies the address of the opcode.
315 INSN specifies the opcode to test.
317 Return 1 if the opcodes matches and is decoded, otherwise 0. */
320 aarch64_decode_eret (CORE_ADDR addr
, uint32_t insn
)
322 /* eret 1101 0110 1001 1111 0000 0011 1110 0000 */
323 if (insn
== 0xd69f03e0)
327 debug_printf ("decode: 0x%s 0x%x eret\n",
328 core_addr_to_string_nz (addr
), insn
);
335 /* Decode an opcode if it represents a MOVZ instruction.
337 ADDR specifies the address of the opcode.
338 INSN specifies the opcode to test.
339 RD receives the 'rd' field from the decoded instruction.
341 Return 1 if the opcodes matches and is decoded, otherwise 0. */
344 aarch64_decode_movz (CORE_ADDR addr
, uint32_t insn
, unsigned *rd
)
346 if (decode_masked_match (insn
, 0xff800000, 0x52800000))
348 *rd
= (insn
>> 0) & 0x1f;
352 debug_printf ("decode: 0x%s 0x%x movz x%u, #?\n",
353 core_addr_to_string_nz (addr
), insn
, *rd
);
360 /* Decode an opcode if it represents a ORR (shifted register)
363 ADDR specifies the address of the opcode.
364 INSN specifies the opcode to test.
365 RD receives the 'rd' field from the decoded instruction.
366 RN receives the 'rn' field from the decoded instruction.
367 RM receives the 'rm' field from the decoded instruction.
368 IMM receives the 'imm6' field from the decoded instruction.
370 Return 1 if the opcodes matches and is decoded, otherwise 0. */
373 aarch64_decode_orr_shifted_register_x (CORE_ADDR addr
, uint32_t insn
,
374 unsigned *rd
, unsigned *rn
,
375 unsigned *rm
, int32_t *imm
)
377 if (decode_masked_match (insn
, 0xff200000, 0xaa000000))
379 *rd
= (insn
>> 0) & 0x1f;
380 *rn
= (insn
>> 5) & 0x1f;
381 *rm
= (insn
>> 16) & 0x1f;
382 *imm
= (insn
>> 10) & 0x3f;
386 debug_printf ("decode: 0x%s 0x%x orr x%u, x%u, x%u, #%u\n",
387 core_addr_to_string_nz (addr
), insn
, *rd
, *rn
,
395 /* Decode an opcode if it represents a RET instruction.
397 ADDR specifies the address of the opcode.
398 INSN specifies the opcode to test.
399 RN receives the 'rn' field from the decoded instruction.
401 Return 1 if the opcodes matches and is decoded, otherwise 0. */
404 aarch64_decode_ret (CORE_ADDR addr
, uint32_t insn
, unsigned *rn
)
406 if (decode_masked_match (insn
, 0xfffffc1f, 0xd65f0000))
408 *rn
= (insn
>> 5) & 0x1f;
411 debug_printf ("decode: 0x%s 0x%x ret x%u\n",
412 core_addr_to_string_nz (addr
), insn
, *rn
);
419 /* Decode an opcode if it represents the following instructions:
421 STP rt, rt2, [rn, #imm]
422 STP rt, rt2, [rn, #imm]!
424 ADDR specifies the address of the opcode.
425 INSN specifies the opcode to test.
426 RT1 receives the 'rt' field from the decoded instruction.
427 RT2 receives the 'rt2' field from the decoded instruction.
428 RN receives the 'rn' field from the decoded instruction.
429 IMM receives the 'imm' field from the decoded instruction.
430 *WBACK receives the bit 23 from the decoded instruction.
432 Return 1 if the opcodes matches and is decoded, otherwise 0. */
435 aarch64_decode_stp_offset (CORE_ADDR addr
, uint32_t insn
, unsigned *rt1
,
436 unsigned *rt2
, unsigned *rn
, int32_t *imm
,
439 if (decode_masked_match (insn
, 0xff400000, 0xa9000000))
441 *rt1
= (insn
>> 0) & 0x1f;
442 *rn
= (insn
>> 5) & 0x1f;
443 *rt2
= (insn
>> 10) & 0x1f;
444 *imm
= extract_signed_bitfield (insn
, 7, 15);
446 *wback
= bit (insn
, 23);
450 debug_printf ("decode: 0x%s 0x%x stp x%u, x%u, [x%u + #%d]%s\n",
451 core_addr_to_string_nz (addr
), insn
, *rt1
, *rt2
,
452 *rn
, *imm
, *wback
? "" : "!");
459 /* Decode an opcode if it represents the following instruction:
462 ADDR specifies the address of the opcode.
463 INSN specifies the opcode to test.
464 IS64 receives size field from the decoded instruction.
465 RT receives the 'rt' field from the decoded instruction.
466 RN receives the 'rn' field from the decoded instruction.
467 IMM receives the 'imm' field from the decoded instruction.
469 Return 1 if the opcodes matches and is decoded, otherwise 0. */
472 aarch64_decode_stur (CORE_ADDR addr
, uint32_t insn
, int *is64
,
473 unsigned *rt
, unsigned *rn
, int32_t *imm
)
475 if (decode_masked_match (insn
, 0xbfe00c00, 0xb8000000))
477 *is64
= (insn
>> 30) & 1;
478 *rt
= (insn
>> 0) & 0x1f;
479 *rn
= (insn
>> 5) & 0x1f;
480 *imm
= extract_signed_bitfield (insn
, 9, 12);
484 debug_printf ("decode: 0x%s 0x%x stur %c%u, [x%u + #%d]\n",
485 core_addr_to_string_nz (addr
), insn
,
486 *is64
? 'x' : 'w', *rt
, *rn
, *imm
);
493 /* Analyze a prologue, looking for a recognizable stack frame
494 and frame pointer. Scan until we encounter a store that could
495 clobber the stack frame unexpectedly, or an unknown instruction. */
498 aarch64_analyze_prologue (struct gdbarch
*gdbarch
,
499 CORE_ADDR start
, CORE_ADDR limit
,
500 struct aarch64_prologue_cache
*cache
)
502 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
504 pv_t regs
[AARCH64_X_REGISTER_COUNT
];
505 struct pv_area
*stack
;
506 struct cleanup
*back_to
;
508 for (i
= 0; i
< AARCH64_X_REGISTER_COUNT
; i
++)
509 regs
[i
] = pv_register (i
, 0);
510 stack
= make_pv_area (AARCH64_SP_REGNUM
, gdbarch_addr_bit (gdbarch
));
511 back_to
= make_cleanup_free_pv_area (stack
);
513 for (; start
< limit
; start
+= 4)
534 insn
= read_memory_unsigned_integer (start
, 4, byte_order_for_code
);
536 if (aarch64_decode_add_sub_imm (start
, insn
, &rd
, &rn
, &imm
))
537 regs
[rd
] = pv_add_constant (regs
[rn
], imm
);
538 else if (aarch64_decode_adr (start
, insn
, &is_adrp
, &rd
, &offset
)
540 regs
[rd
] = pv_unknown ();
541 else if (aarch64_decode_b (start
, insn
, &is_link
, &offset
))
543 /* Stop analysis on branch. */
546 else if (aarch64_decode_bcond (start
, insn
, &cond
, &offset
))
548 /* Stop analysis on branch. */
551 else if (aarch64_decode_br (start
, insn
, &is_link
, &rn
))
553 /* Stop analysis on branch. */
556 else if (aarch64_decode_cb (start
, insn
, &is64
, &is_cbnz
, &rn
,
559 /* Stop analysis on branch. */
562 else if (aarch64_decode_eret (start
, insn
))
564 /* Stop analysis on branch. */
567 else if (aarch64_decode_movz (start
, insn
, &rd
))
568 regs
[rd
] = pv_unknown ();
569 else if (aarch64_decode_orr_shifted_register_x (start
, insn
, &rd
,
572 if (imm
== 0 && rn
== 31)
578 debug_printf ("aarch64: prologue analysis gave up "
579 "addr=0x%s opcode=0x%x (orr x register)\n",
580 core_addr_to_string_nz (start
), insn
);
585 else if (aarch64_decode_ret (start
, insn
, &rn
))
587 /* Stop analysis on branch. */
590 else if (aarch64_decode_stur (start
, insn
, &is64
, &rt
, &rn
, &offset
))
592 pv_area_store (stack
, pv_add_constant (regs
[rn
], offset
),
593 is64
? 8 : 4, regs
[rt
]);
595 else if (aarch64_decode_stp_offset (start
, insn
, &rt1
, &rt2
, &rn
,
598 /* If recording this store would invalidate the store area
599 (perhaps because rn is not known) then we should abandon
600 further prologue analysis. */
601 if (pv_area_store_would_trash (stack
,
602 pv_add_constant (regs
[rn
], imm
)))
605 if (pv_area_store_would_trash (stack
,
606 pv_add_constant (regs
[rn
], imm
+ 8)))
609 pv_area_store (stack
, pv_add_constant (regs
[rn
], imm
), 8,
611 pv_area_store (stack
, pv_add_constant (regs
[rn
], imm
+ 8), 8,
615 regs
[rn
] = pv_add_constant (regs
[rn
], imm
);
618 else if (aarch64_decode_tb (start
, insn
, &is_tbnz
, &bit
, &rn
,
621 /* Stop analysis on branch. */
628 debug_printf ("aarch64: prologue analysis gave up addr=0x%s"
630 core_addr_to_string_nz (start
), insn
);
638 do_cleanups (back_to
);
642 if (pv_is_register (regs
[AARCH64_FP_REGNUM
], AARCH64_SP_REGNUM
))
644 /* Frame pointer is fp. Frame size is constant. */
645 cache
->framereg
= AARCH64_FP_REGNUM
;
646 cache
->framesize
= -regs
[AARCH64_FP_REGNUM
].k
;
648 else if (pv_is_register (regs
[AARCH64_SP_REGNUM
], AARCH64_SP_REGNUM
))
650 /* Try the stack pointer. */
651 cache
->framesize
= -regs
[AARCH64_SP_REGNUM
].k
;
652 cache
->framereg
= AARCH64_SP_REGNUM
;
656 /* We're just out of luck. We don't know where the frame is. */
657 cache
->framereg
= -1;
658 cache
->framesize
= 0;
661 for (i
= 0; i
< AARCH64_X_REGISTER_COUNT
; i
++)
665 if (pv_area_find_reg (stack
, gdbarch
, i
, &offset
))
666 cache
->saved_regs
[i
].addr
= offset
;
669 do_cleanups (back_to
);
673 /* Implement the "skip_prologue" gdbarch method. */
676 aarch64_skip_prologue (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
680 CORE_ADDR func_addr
, limit_pc
;
681 struct symtab_and_line sal
;
683 /* See if we can determine the end of the prologue via the symbol
684 table. If so, then return either PC, or the PC after the
685 prologue, whichever is greater. */
686 if (find_pc_partial_function (pc
, NULL
, &func_addr
, NULL
))
688 CORE_ADDR post_prologue_pc
689 = skip_prologue_using_sal (gdbarch
, func_addr
);
691 if (post_prologue_pc
!= 0)
692 return max (pc
, post_prologue_pc
);
695 /* Can't determine prologue from the symbol table, need to examine
698 /* Find an upper limit on the function prologue using the debug
699 information. If the debug information could not be used to
700 provide that bound, then use an arbitrary large number as the
702 limit_pc
= skip_prologue_using_sal (gdbarch
, pc
);
704 limit_pc
= pc
+ 128; /* Magic. */
706 /* Try disassembling prologue. */
707 return aarch64_analyze_prologue (gdbarch
, pc
, limit_pc
, NULL
);
710 /* Scan the function prologue for THIS_FRAME and populate the prologue
714 aarch64_scan_prologue (struct frame_info
*this_frame
,
715 struct aarch64_prologue_cache
*cache
)
717 CORE_ADDR block_addr
= get_frame_address_in_block (this_frame
);
718 CORE_ADDR prologue_start
;
719 CORE_ADDR prologue_end
;
720 CORE_ADDR prev_pc
= get_frame_pc (this_frame
);
721 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
723 cache
->prev_pc
= prev_pc
;
725 /* Assume we do not find a frame. */
726 cache
->framereg
= -1;
727 cache
->framesize
= 0;
729 if (find_pc_partial_function (block_addr
, NULL
, &prologue_start
,
732 struct symtab_and_line sal
= find_pc_line (prologue_start
, 0);
736 /* No line info so use the current PC. */
737 prologue_end
= prev_pc
;
739 else if (sal
.end
< prologue_end
)
741 /* The next line begins after the function end. */
742 prologue_end
= sal
.end
;
745 prologue_end
= min (prologue_end
, prev_pc
);
746 aarch64_analyze_prologue (gdbarch
, prologue_start
, prologue_end
, cache
);
753 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
755 frame_loc
= get_frame_register_unsigned (this_frame
, AARCH64_FP_REGNUM
);
759 cache
->framereg
= AARCH64_FP_REGNUM
;
760 cache
->framesize
= 16;
761 cache
->saved_regs
[29].addr
= 0;
762 cache
->saved_regs
[30].addr
= 8;
766 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
767 function may throw an exception if the inferior's registers or memory is
771 aarch64_make_prologue_cache_1 (struct frame_info
*this_frame
,
772 struct aarch64_prologue_cache
*cache
)
774 CORE_ADDR unwound_fp
;
777 aarch64_scan_prologue (this_frame
, cache
);
779 if (cache
->framereg
== -1)
782 unwound_fp
= get_frame_register_unsigned (this_frame
, cache
->framereg
);
786 cache
->prev_sp
= unwound_fp
+ cache
->framesize
;
788 /* Calculate actual addresses of saved registers using offsets
789 determined by aarch64_analyze_prologue. */
790 for (reg
= 0; reg
< gdbarch_num_regs (get_frame_arch (this_frame
)); reg
++)
791 if (trad_frame_addr_p (cache
->saved_regs
, reg
))
792 cache
->saved_regs
[reg
].addr
+= cache
->prev_sp
;
794 cache
->func
= get_frame_func (this_frame
);
796 cache
->available_p
= 1;
799 /* Allocate and fill in *THIS_CACHE with information about the prologue of
800 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
801 Return a pointer to the current aarch64_prologue_cache in
804 static struct aarch64_prologue_cache
*
805 aarch64_make_prologue_cache (struct frame_info
*this_frame
, void **this_cache
)
807 struct aarch64_prologue_cache
*cache
;
809 if (*this_cache
!= NULL
)
810 return (struct aarch64_prologue_cache
*) *this_cache
;
812 cache
= FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache
);
813 cache
->saved_regs
= trad_frame_alloc_saved_regs (this_frame
);
818 aarch64_make_prologue_cache_1 (this_frame
, cache
);
820 CATCH (ex
, RETURN_MASK_ERROR
)
822 if (ex
.error
!= NOT_AVAILABLE_ERROR
)
823 throw_exception (ex
);
830 /* Implement the "stop_reason" frame_unwind method. */
832 static enum unwind_stop_reason
833 aarch64_prologue_frame_unwind_stop_reason (struct frame_info
*this_frame
,
836 struct aarch64_prologue_cache
*cache
837 = aarch64_make_prologue_cache (this_frame
, this_cache
);
839 if (!cache
->available_p
)
840 return UNWIND_UNAVAILABLE
;
842 /* Halt the backtrace at "_start". */
843 if (cache
->prev_pc
<= gdbarch_tdep (get_frame_arch (this_frame
))->lowest_pc
)
844 return UNWIND_OUTERMOST
;
846 /* We've hit a wall, stop. */
847 if (cache
->prev_sp
== 0)
848 return UNWIND_OUTERMOST
;
850 return UNWIND_NO_REASON
;
853 /* Our frame ID for a normal frame is the current function's starting
854 PC and the caller's SP when we were called. */
857 aarch64_prologue_this_id (struct frame_info
*this_frame
,
858 void **this_cache
, struct frame_id
*this_id
)
860 struct aarch64_prologue_cache
*cache
861 = aarch64_make_prologue_cache (this_frame
, this_cache
);
863 if (!cache
->available_p
)
864 *this_id
= frame_id_build_unavailable_stack (cache
->func
);
866 *this_id
= frame_id_build (cache
->prev_sp
, cache
->func
);
869 /* Implement the "prev_register" frame_unwind method. */
871 static struct value
*
872 aarch64_prologue_prev_register (struct frame_info
*this_frame
,
873 void **this_cache
, int prev_regnum
)
875 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
876 struct aarch64_prologue_cache
*cache
877 = aarch64_make_prologue_cache (this_frame
, this_cache
);
879 /* If we are asked to unwind the PC, then we need to return the LR
880 instead. The prologue may save PC, but it will point into this
881 frame's prologue, not the next frame's resume location. */
882 if (prev_regnum
== AARCH64_PC_REGNUM
)
886 lr
= frame_unwind_register_unsigned (this_frame
, AARCH64_LR_REGNUM
);
887 return frame_unwind_got_constant (this_frame
, prev_regnum
, lr
);
890 /* SP is generally not saved to the stack, but this frame is
891 identified by the next frame's stack pointer at the time of the
892 call. The value was already reconstructed into PREV_SP. */
905 if (prev_regnum
== AARCH64_SP_REGNUM
)
906 return frame_unwind_got_constant (this_frame
, prev_regnum
,
909 return trad_frame_get_prev_register (this_frame
, cache
->saved_regs
,
913 /* AArch64 prologue unwinder. */
914 struct frame_unwind aarch64_prologue_unwind
=
917 aarch64_prologue_frame_unwind_stop_reason
,
918 aarch64_prologue_this_id
,
919 aarch64_prologue_prev_register
,
921 default_frame_sniffer
924 /* Allocate and fill in *THIS_CACHE with information about the prologue of
925 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
926 Return a pointer to the current aarch64_prologue_cache in
929 static struct aarch64_prologue_cache
*
930 aarch64_make_stub_cache (struct frame_info
*this_frame
, void **this_cache
)
932 struct aarch64_prologue_cache
*cache
;
934 if (*this_cache
!= NULL
)
935 return (struct aarch64_prologue_cache
*) *this_cache
;
937 cache
= FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache
);
938 cache
->saved_regs
= trad_frame_alloc_saved_regs (this_frame
);
943 cache
->prev_sp
= get_frame_register_unsigned (this_frame
,
945 cache
->prev_pc
= get_frame_pc (this_frame
);
946 cache
->available_p
= 1;
948 CATCH (ex
, RETURN_MASK_ERROR
)
950 if (ex
.error
!= NOT_AVAILABLE_ERROR
)
951 throw_exception (ex
);
958 /* Implement the "stop_reason" frame_unwind method. */
960 static enum unwind_stop_reason
961 aarch64_stub_frame_unwind_stop_reason (struct frame_info
*this_frame
,
964 struct aarch64_prologue_cache
*cache
965 = aarch64_make_stub_cache (this_frame
, this_cache
);
967 if (!cache
->available_p
)
968 return UNWIND_UNAVAILABLE
;
970 return UNWIND_NO_REASON
;
973 /* Our frame ID for a stub frame is the current SP and LR. */
976 aarch64_stub_this_id (struct frame_info
*this_frame
,
977 void **this_cache
, struct frame_id
*this_id
)
979 struct aarch64_prologue_cache
*cache
980 = aarch64_make_stub_cache (this_frame
, this_cache
);
982 if (cache
->available_p
)
983 *this_id
= frame_id_build (cache
->prev_sp
, cache
->prev_pc
);
985 *this_id
= frame_id_build_unavailable_stack (cache
->prev_pc
);
988 /* Implement the "sniffer" frame_unwind method. */
991 aarch64_stub_unwind_sniffer (const struct frame_unwind
*self
,
992 struct frame_info
*this_frame
,
993 void **this_prologue_cache
)
995 CORE_ADDR addr_in_block
;
998 addr_in_block
= get_frame_address_in_block (this_frame
);
999 if (in_plt_section (addr_in_block
)
1000 /* We also use the stub winder if the target memory is unreadable
1001 to avoid having the prologue unwinder trying to read it. */
1002 || target_read_memory (get_frame_pc (this_frame
), dummy
, 4) != 0)
1008 /* AArch64 stub unwinder. */
1009 struct frame_unwind aarch64_stub_unwind
=
1012 aarch64_stub_frame_unwind_stop_reason
,
1013 aarch64_stub_this_id
,
1014 aarch64_prologue_prev_register
,
1016 aarch64_stub_unwind_sniffer
1019 /* Return the frame base address of *THIS_FRAME. */
1022 aarch64_normal_frame_base (struct frame_info
*this_frame
, void **this_cache
)
1024 struct aarch64_prologue_cache
*cache
1025 = aarch64_make_prologue_cache (this_frame
, this_cache
);
1027 return cache
->prev_sp
- cache
->framesize
;
1030 /* AArch64 default frame base information. */
1031 struct frame_base aarch64_normal_base
=
1033 &aarch64_prologue_unwind
,
1034 aarch64_normal_frame_base
,
1035 aarch64_normal_frame_base
,
1036 aarch64_normal_frame_base
1039 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
1040 dummy frame. The frame ID's base needs to match the TOS value
1041 saved by save_dummy_frame_tos () and returned from
1042 aarch64_push_dummy_call, and the PC needs to match the dummy
1043 frame's breakpoint. */
1045 static struct frame_id
1046 aarch64_dummy_id (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
1048 return frame_id_build (get_frame_register_unsigned (this_frame
,
1050 get_frame_pc (this_frame
));
1053 /* Implement the "unwind_pc" gdbarch method. */
1056 aarch64_unwind_pc (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
1059 = frame_unwind_register_unsigned (this_frame
, AARCH64_PC_REGNUM
);
1064 /* Implement the "unwind_sp" gdbarch method. */
1067 aarch64_unwind_sp (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
1069 return frame_unwind_register_unsigned (this_frame
, AARCH64_SP_REGNUM
);
1072 /* Return the value of the REGNUM register in the previous frame of
1075 static struct value
*
1076 aarch64_dwarf2_prev_register (struct frame_info
*this_frame
,
1077 void **this_cache
, int regnum
)
1079 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
1084 case AARCH64_PC_REGNUM
:
1085 lr
= frame_unwind_register_unsigned (this_frame
, AARCH64_LR_REGNUM
);
1086 return frame_unwind_got_constant (this_frame
, regnum
, lr
);
1089 internal_error (__FILE__
, __LINE__
,
1090 _("Unexpected register %d"), regnum
);
1094 /* Implement the "init_reg" dwarf2_frame_ops method. */
1097 aarch64_dwarf2_frame_init_reg (struct gdbarch
*gdbarch
, int regnum
,
1098 struct dwarf2_frame_state_reg
*reg
,
1099 struct frame_info
*this_frame
)
1103 case AARCH64_PC_REGNUM
:
1104 reg
->how
= DWARF2_FRAME_REG_FN
;
1105 reg
->loc
.fn
= aarch64_dwarf2_prev_register
;
1107 case AARCH64_SP_REGNUM
:
1108 reg
->how
= DWARF2_FRAME_REG_CFA
;
1113 /* When arguments must be pushed onto the stack, they go on in reverse
1114 order. The code below implements a FILO (stack) to do this. */
1118 /* Value to pass on stack. */
1119 const gdb_byte
*data
;
1121 /* Size in bytes of value to pass on stack. */
1125 DEF_VEC_O (stack_item_t
);
1127 /* Return the alignment (in bytes) of the given type. */
1130 aarch64_type_align (struct type
*t
)
1136 t
= check_typedef (t
);
1137 switch (TYPE_CODE (t
))
1140 /* Should never happen. */
1141 internal_error (__FILE__
, __LINE__
, _("unknown type alignment"));
1145 case TYPE_CODE_ENUM
:
1149 case TYPE_CODE_RANGE
:
1150 case TYPE_CODE_BITSTRING
:
1152 case TYPE_CODE_CHAR
:
1153 case TYPE_CODE_BOOL
:
1154 return TYPE_LENGTH (t
);
1156 case TYPE_CODE_ARRAY
:
1157 case TYPE_CODE_COMPLEX
:
1158 return aarch64_type_align (TYPE_TARGET_TYPE (t
));
1160 case TYPE_CODE_STRUCT
:
1161 case TYPE_CODE_UNION
:
1163 for (n
= 0; n
< TYPE_NFIELDS (t
); n
++)
1165 falign
= aarch64_type_align (TYPE_FIELD_TYPE (t
, n
));
1173 /* Return 1 if *TY is a homogeneous floating-point aggregate as
1174 defined in the AAPCS64 ABI document; otherwise return 0. */
1177 is_hfa (struct type
*ty
)
1179 switch (TYPE_CODE (ty
))
1181 case TYPE_CODE_ARRAY
:
1183 struct type
*target_ty
= TYPE_TARGET_TYPE (ty
);
1184 if (TYPE_CODE (target_ty
) == TYPE_CODE_FLT
&& TYPE_LENGTH (ty
) <= 4)
1189 case TYPE_CODE_UNION
:
1190 case TYPE_CODE_STRUCT
:
1192 if (TYPE_NFIELDS (ty
) > 0 && TYPE_NFIELDS (ty
) <= 4)
1194 struct type
*member0_type
;
1196 member0_type
= check_typedef (TYPE_FIELD_TYPE (ty
, 0));
1197 if (TYPE_CODE (member0_type
) == TYPE_CODE_FLT
)
1201 for (i
= 0; i
< TYPE_NFIELDS (ty
); i
++)
1203 struct type
*member1_type
;
1205 member1_type
= check_typedef (TYPE_FIELD_TYPE (ty
, i
));
1206 if (TYPE_CODE (member0_type
) != TYPE_CODE (member1_type
)
1207 || (TYPE_LENGTH (member0_type
)
1208 != TYPE_LENGTH (member1_type
)))
1224 /* AArch64 function call information structure. */
1225 struct aarch64_call_info
1227 /* the current argument number. */
1230 /* The next general purpose register number, equivalent to NGRN as
1231 described in the AArch64 Procedure Call Standard. */
1234 /* The next SIMD and floating point register number, equivalent to
1235 NSRN as described in the AArch64 Procedure Call Standard. */
1238 /* The next stacked argument address, equivalent to NSAA as
1239 described in the AArch64 Procedure Call Standard. */
1242 /* Stack item vector. */
1243 VEC(stack_item_t
) *si
;
1246 /* Pass a value in a sequence of consecutive X registers. The caller
1247 is responsbile for ensuring sufficient registers are available. */
1250 pass_in_x (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
1251 struct aarch64_call_info
*info
, struct type
*type
,
1252 const bfd_byte
*buf
)
1254 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1255 int len
= TYPE_LENGTH (type
);
1256 enum type_code typecode
= TYPE_CODE (type
);
1257 int regnum
= AARCH64_X0_REGNUM
+ info
->ngrn
;
1263 int partial_len
= len
< X_REGISTER_SIZE
? len
: X_REGISTER_SIZE
;
1264 CORE_ADDR regval
= extract_unsigned_integer (buf
, partial_len
,
1268 /* Adjust sub-word struct/union args when big-endian. */
1269 if (byte_order
== BFD_ENDIAN_BIG
1270 && partial_len
< X_REGISTER_SIZE
1271 && (typecode
== TYPE_CODE_STRUCT
|| typecode
== TYPE_CODE_UNION
))
1272 regval
<<= ((X_REGISTER_SIZE
- partial_len
) * TARGET_CHAR_BIT
);
1276 debug_printf ("arg %d in %s = 0x%s\n", info
->argnum
,
1277 gdbarch_register_name (gdbarch
, regnum
),
1278 phex (regval
, X_REGISTER_SIZE
));
1280 regcache_cooked_write_unsigned (regcache
, regnum
, regval
);
1287 /* Attempt to marshall a value in a V register. Return 1 if
1288 successful, or 0 if insufficient registers are available. This
1289 function, unlike the equivalent pass_in_x() function does not
1290 handle arguments spread across multiple registers. */
1293 pass_in_v (struct gdbarch
*gdbarch
,
1294 struct regcache
*regcache
,
1295 struct aarch64_call_info
*info
,
1296 const bfd_byte
*buf
)
1300 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1301 int regnum
= AARCH64_V0_REGNUM
+ info
->nsrn
;
1306 regcache_cooked_write (regcache
, regnum
, buf
);
1309 debug_printf ("arg %d in %s\n", info
->argnum
,
1310 gdbarch_register_name (gdbarch
, regnum
));
1318 /* Marshall an argument onto the stack. */
1321 pass_on_stack (struct aarch64_call_info
*info
, struct type
*type
,
1322 const bfd_byte
*buf
)
1324 int len
= TYPE_LENGTH (type
);
1330 align
= aarch64_type_align (type
);
1332 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1333 Natural alignment of the argument's type. */
1334 align
= align_up (align
, 8);
1336 /* The AArch64 PCS requires at most doubleword alignment. */
1342 debug_printf ("arg %d len=%d @ sp + %d\n", info
->argnum
, len
,
1348 VEC_safe_push (stack_item_t
, info
->si
, &item
);
1351 if (info
->nsaa
& (align
- 1))
1353 /* Push stack alignment padding. */
1354 int pad
= align
- (info
->nsaa
& (align
- 1));
1359 VEC_safe_push (stack_item_t
, info
->si
, &item
);
1364 /* Marshall an argument into a sequence of one or more consecutive X
1365 registers or, if insufficient X registers are available then onto
1369 pass_in_x_or_stack (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
1370 struct aarch64_call_info
*info
, struct type
*type
,
1371 const bfd_byte
*buf
)
1373 int len
= TYPE_LENGTH (type
);
1374 int nregs
= (len
+ X_REGISTER_SIZE
- 1) / X_REGISTER_SIZE
;
1376 /* PCS C.13 - Pass in registers if we have enough spare */
1377 if (info
->ngrn
+ nregs
<= 8)
1379 pass_in_x (gdbarch
, regcache
, info
, type
, buf
);
1380 info
->ngrn
+= nregs
;
1385 pass_on_stack (info
, type
, buf
);
1389 /* Pass a value in a V register, or on the stack if insufficient are
1393 pass_in_v_or_stack (struct gdbarch
*gdbarch
,
1394 struct regcache
*regcache
,
1395 struct aarch64_call_info
*info
,
1397 const bfd_byte
*buf
)
1399 if (!pass_in_v (gdbarch
, regcache
, info
, buf
))
1400 pass_on_stack (info
, type
, buf
);
1403 /* Implement the "push_dummy_call" gdbarch method. */
1406 aarch64_push_dummy_call (struct gdbarch
*gdbarch
, struct value
*function
,
1407 struct regcache
*regcache
, CORE_ADDR bp_addr
,
1409 struct value
**args
, CORE_ADDR sp
, int struct_return
,
1410 CORE_ADDR struct_addr
)
1416 struct aarch64_call_info info
;
1417 struct type
*func_type
;
1418 struct type
*return_type
;
1419 int lang_struct_return
;
1421 memset (&info
, 0, sizeof (info
));
1423 /* We need to know what the type of the called function is in order
1424 to determine the number of named/anonymous arguments for the
1425 actual argument placement, and the return type in order to handle
1426 return value correctly.
1428 The generic code above us views the decision of return in memory
1429 or return in registers as a two stage processes. The language
1430 handler is consulted first and may decide to return in memory (eg
1431 class with copy constructor returned by value), this will cause
1432 the generic code to allocate space AND insert an initial leading
1435 If the language code does not decide to pass in memory then the
1436 target code is consulted.
1438 If the language code decides to pass in memory we want to move
1439 the pointer inserted as the initial argument from the argument
1440 list and into X8, the conventional AArch64 struct return pointer
1443 This is slightly awkward, ideally the flag "lang_struct_return"
1444 would be passed to the targets implementation of push_dummy_call.
1445 Rather that change the target interface we call the language code
1446 directly ourselves. */
1448 func_type
= check_typedef (value_type (function
));
1450 /* Dereference function pointer types. */
1451 if (TYPE_CODE (func_type
) == TYPE_CODE_PTR
)
1452 func_type
= TYPE_TARGET_TYPE (func_type
);
1454 gdb_assert (TYPE_CODE (func_type
) == TYPE_CODE_FUNC
1455 || TYPE_CODE (func_type
) == TYPE_CODE_METHOD
);
1457 /* If language_pass_by_reference () returned true we will have been
1458 given an additional initial argument, a hidden pointer to the
1459 return slot in memory. */
1460 return_type
= TYPE_TARGET_TYPE (func_type
);
1461 lang_struct_return
= language_pass_by_reference (return_type
);
1463 /* Set the return address. For the AArch64, the return breakpoint
1464 is always at BP_ADDR. */
1465 regcache_cooked_write_unsigned (regcache
, AARCH64_LR_REGNUM
, bp_addr
);
1467 /* If we were given an initial argument for the return slot because
1468 lang_struct_return was true, lose it. */
1469 if (lang_struct_return
)
1475 /* The struct_return pointer occupies X8. */
1476 if (struct_return
|| lang_struct_return
)
1480 debug_printf ("struct return in %s = 0x%s\n",
1481 gdbarch_register_name (gdbarch
,
1482 AARCH64_STRUCT_RETURN_REGNUM
),
1483 paddress (gdbarch
, struct_addr
));
1485 regcache_cooked_write_unsigned (regcache
, AARCH64_STRUCT_RETURN_REGNUM
,
1489 for (argnum
= 0; argnum
< nargs
; argnum
++)
1491 struct value
*arg
= args
[argnum
];
1492 struct type
*arg_type
;
1495 arg_type
= check_typedef (value_type (arg
));
1496 len
= TYPE_LENGTH (arg_type
);
1498 switch (TYPE_CODE (arg_type
))
1501 case TYPE_CODE_BOOL
:
1502 case TYPE_CODE_CHAR
:
1503 case TYPE_CODE_RANGE
:
1504 case TYPE_CODE_ENUM
:
1507 /* Promote to 32 bit integer. */
1508 if (TYPE_UNSIGNED (arg_type
))
1509 arg_type
= builtin_type (gdbarch
)->builtin_uint32
;
1511 arg_type
= builtin_type (gdbarch
)->builtin_int32
;
1512 arg
= value_cast (arg_type
, arg
);
1514 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
,
1515 value_contents (arg
));
1518 case TYPE_CODE_COMPLEX
:
1521 const bfd_byte
*buf
= value_contents (arg
);
1522 struct type
*target_type
=
1523 check_typedef (TYPE_TARGET_TYPE (arg_type
));
1525 pass_in_v (gdbarch
, regcache
, &info
, buf
);
1526 pass_in_v (gdbarch
, regcache
, &info
,
1527 buf
+ TYPE_LENGTH (target_type
));
1532 pass_on_stack (&info
, arg_type
, value_contents (arg
));
1536 pass_in_v_or_stack (gdbarch
, regcache
, &info
, arg_type
,
1537 value_contents (arg
));
1540 case TYPE_CODE_STRUCT
:
1541 case TYPE_CODE_ARRAY
:
1542 case TYPE_CODE_UNION
:
1543 if (is_hfa (arg_type
))
1545 int elements
= TYPE_NFIELDS (arg_type
);
1547 /* Homogeneous Aggregates */
1548 if (info
.nsrn
+ elements
< 8)
1552 for (i
= 0; i
< elements
; i
++)
1554 /* We know that we have sufficient registers
1555 available therefore this will never fallback
1557 struct value
*field
=
1558 value_primitive_field (arg
, 0, i
, arg_type
);
1559 struct type
*field_type
=
1560 check_typedef (value_type (field
));
1562 pass_in_v_or_stack (gdbarch
, regcache
, &info
, field_type
,
1563 value_contents_writeable (field
));
1569 pass_on_stack (&info
, arg_type
, value_contents (arg
));
1574 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1575 invisible reference. */
1577 /* Allocate aligned storage. */
1578 sp
= align_down (sp
- len
, 16);
1580 /* Write the real data into the stack. */
1581 write_memory (sp
, value_contents (arg
), len
);
1583 /* Construct the indirection. */
1584 arg_type
= lookup_pointer_type (arg_type
);
1585 arg
= value_from_pointer (arg_type
, sp
);
1586 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
,
1587 value_contents (arg
));
1590 /* PCS C.15 / C.18 multiple values pass. */
1591 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
,
1592 value_contents (arg
));
1596 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
,
1597 value_contents (arg
));
1602 /* Make sure stack retains 16 byte alignment. */
1604 sp
-= 16 - (info
.nsaa
& 15);
1606 while (!VEC_empty (stack_item_t
, info
.si
))
1608 stack_item_t
*si
= VEC_last (stack_item_t
, info
.si
);
1611 write_memory (sp
, si
->data
, si
->len
);
1612 VEC_pop (stack_item_t
, info
.si
);
1615 VEC_free (stack_item_t
, info
.si
);
1617 /* Finally, update the SP register. */
1618 regcache_cooked_write_unsigned (regcache
, AARCH64_SP_REGNUM
, sp
);
1623 /* Implement the "frame_align" gdbarch method. */
1626 aarch64_frame_align (struct gdbarch
*gdbarch
, CORE_ADDR sp
)
1628 /* Align the stack to sixteen bytes. */
1629 return sp
& ~(CORE_ADDR
) 15;
1632 /* Return the type for an AdvSISD Q register. */
1634 static struct type
*
1635 aarch64_vnq_type (struct gdbarch
*gdbarch
)
1637 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1639 if (tdep
->vnq_type
== NULL
)
1644 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnq",
1647 elem
= builtin_type (gdbarch
)->builtin_uint128
;
1648 append_composite_type_field (t
, "u", elem
);
1650 elem
= builtin_type (gdbarch
)->builtin_int128
;
1651 append_composite_type_field (t
, "s", elem
);
1656 return tdep
->vnq_type
;
1659 /* Return the type for an AdvSISD D register. */
1661 static struct type
*
1662 aarch64_vnd_type (struct gdbarch
*gdbarch
)
1664 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1666 if (tdep
->vnd_type
== NULL
)
1671 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnd",
1674 elem
= builtin_type (gdbarch
)->builtin_double
;
1675 append_composite_type_field (t
, "f", elem
);
1677 elem
= builtin_type (gdbarch
)->builtin_uint64
;
1678 append_composite_type_field (t
, "u", elem
);
1680 elem
= builtin_type (gdbarch
)->builtin_int64
;
1681 append_composite_type_field (t
, "s", elem
);
1686 return tdep
->vnd_type
;
1689 /* Return the type for an AdvSISD S register. */
1691 static struct type
*
1692 aarch64_vns_type (struct gdbarch
*gdbarch
)
1694 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1696 if (tdep
->vns_type
== NULL
)
1701 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vns",
1704 elem
= builtin_type (gdbarch
)->builtin_float
;
1705 append_composite_type_field (t
, "f", elem
);
1707 elem
= builtin_type (gdbarch
)->builtin_uint32
;
1708 append_composite_type_field (t
, "u", elem
);
1710 elem
= builtin_type (gdbarch
)->builtin_int32
;
1711 append_composite_type_field (t
, "s", elem
);
1716 return tdep
->vns_type
;
1719 /* Return the type for an AdvSISD H register. */
1721 static struct type
*
1722 aarch64_vnh_type (struct gdbarch
*gdbarch
)
1724 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1726 if (tdep
->vnh_type
== NULL
)
1731 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnh",
1734 elem
= builtin_type (gdbarch
)->builtin_uint16
;
1735 append_composite_type_field (t
, "u", elem
);
1737 elem
= builtin_type (gdbarch
)->builtin_int16
;
1738 append_composite_type_field (t
, "s", elem
);
1743 return tdep
->vnh_type
;
1746 /* Return the type for an AdvSISD B register. */
1748 static struct type
*
1749 aarch64_vnb_type (struct gdbarch
*gdbarch
)
1751 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1753 if (tdep
->vnb_type
== NULL
)
1758 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnb",
1761 elem
= builtin_type (gdbarch
)->builtin_uint8
;
1762 append_composite_type_field (t
, "u", elem
);
1764 elem
= builtin_type (gdbarch
)->builtin_int8
;
1765 append_composite_type_field (t
, "s", elem
);
1770 return tdep
->vnb_type
;
1773 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1776 aarch64_dwarf_reg_to_regnum (struct gdbarch
*gdbarch
, int reg
)
1778 if (reg
>= AARCH64_DWARF_X0
&& reg
<= AARCH64_DWARF_X0
+ 30)
1779 return AARCH64_X0_REGNUM
+ reg
- AARCH64_DWARF_X0
;
1781 if (reg
== AARCH64_DWARF_SP
)
1782 return AARCH64_SP_REGNUM
;
1784 if (reg
>= AARCH64_DWARF_V0
&& reg
<= AARCH64_DWARF_V0
+ 31)
1785 return AARCH64_V0_REGNUM
+ reg
- AARCH64_DWARF_V0
;
1791 /* Implement the "print_insn" gdbarch method. */
1794 aarch64_gdb_print_insn (bfd_vma memaddr
, disassemble_info
*info
)
1796 info
->symbols
= NULL
;
1797 return print_insn_aarch64 (memaddr
, info
);
1800 /* AArch64 BRK software debug mode instruction.
1801 Note that AArch64 code is always little-endian.
1802 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
1803 static const gdb_byte aarch64_default_breakpoint
[] = {0x00, 0x00, 0x20, 0xd4};
1805 /* Implement the "breakpoint_from_pc" gdbarch method. */
1807 static const gdb_byte
*
1808 aarch64_breakpoint_from_pc (struct gdbarch
*gdbarch
, CORE_ADDR
*pcptr
,
1811 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1813 *lenptr
= sizeof (aarch64_default_breakpoint
);
1814 return aarch64_default_breakpoint
;
1817 /* Extract from an array REGS containing the (raw) register state a
1818 function return value of type TYPE, and copy that, in virtual
1819 format, into VALBUF. */
1822 aarch64_extract_return_value (struct type
*type
, struct regcache
*regs
,
1825 struct gdbarch
*gdbarch
= get_regcache_arch (regs
);
1826 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1828 if (TYPE_CODE (type
) == TYPE_CODE_FLT
)
1830 bfd_byte buf
[V_REGISTER_SIZE
];
1831 int len
= TYPE_LENGTH (type
);
1833 regcache_cooked_read (regs
, AARCH64_V0_REGNUM
, buf
);
1834 memcpy (valbuf
, buf
, len
);
1836 else if (TYPE_CODE (type
) == TYPE_CODE_INT
1837 || TYPE_CODE (type
) == TYPE_CODE_CHAR
1838 || TYPE_CODE (type
) == TYPE_CODE_BOOL
1839 || TYPE_CODE (type
) == TYPE_CODE_PTR
1840 || TYPE_CODE (type
) == TYPE_CODE_REF
1841 || TYPE_CODE (type
) == TYPE_CODE_ENUM
)
1843 /* If the the type is a plain integer, then the access is
1844 straight-forward. Otherwise we have to play around a bit
1846 int len
= TYPE_LENGTH (type
);
1847 int regno
= AARCH64_X0_REGNUM
;
1852 /* By using store_unsigned_integer we avoid having to do
1853 anything special for small big-endian values. */
1854 regcache_cooked_read_unsigned (regs
, regno
++, &tmp
);
1855 store_unsigned_integer (valbuf
,
1856 (len
> X_REGISTER_SIZE
1857 ? X_REGISTER_SIZE
: len
), byte_order
, tmp
);
1858 len
-= X_REGISTER_SIZE
;
1859 valbuf
+= X_REGISTER_SIZE
;
1862 else if (TYPE_CODE (type
) == TYPE_CODE_COMPLEX
)
1864 int regno
= AARCH64_V0_REGNUM
;
1865 bfd_byte buf
[V_REGISTER_SIZE
];
1866 struct type
*target_type
= check_typedef (TYPE_TARGET_TYPE (type
));
1867 int len
= TYPE_LENGTH (target_type
);
1869 regcache_cooked_read (regs
, regno
, buf
);
1870 memcpy (valbuf
, buf
, len
);
1872 regcache_cooked_read (regs
, regno
+ 1, buf
);
1873 memcpy (valbuf
, buf
, len
);
1876 else if (is_hfa (type
))
1878 int elements
= TYPE_NFIELDS (type
);
1879 struct type
*member_type
= check_typedef (TYPE_FIELD_TYPE (type
, 0));
1880 int len
= TYPE_LENGTH (member_type
);
1883 for (i
= 0; i
< elements
; i
++)
1885 int regno
= AARCH64_V0_REGNUM
+ i
;
1886 bfd_byte buf
[X_REGISTER_SIZE
];
1890 debug_printf ("read HFA return value element %d from %s\n",
1892 gdbarch_register_name (gdbarch
, regno
));
1894 regcache_cooked_read (regs
, regno
, buf
);
1896 memcpy (valbuf
, buf
, len
);
1902 /* For a structure or union the behaviour is as if the value had
1903 been stored to word-aligned memory and then loaded into
1904 registers with 64-bit load instruction(s). */
1905 int len
= TYPE_LENGTH (type
);
1906 int regno
= AARCH64_X0_REGNUM
;
1907 bfd_byte buf
[X_REGISTER_SIZE
];
1911 regcache_cooked_read (regs
, regno
++, buf
);
1912 memcpy (valbuf
, buf
, len
> X_REGISTER_SIZE
? X_REGISTER_SIZE
: len
);
1913 len
-= X_REGISTER_SIZE
;
1914 valbuf
+= X_REGISTER_SIZE
;
1920 /* Will a function return an aggregate type in memory or in a
1921 register? Return 0 if an aggregate type can be returned in a
1922 register, 1 if it must be returned in memory. */
1925 aarch64_return_in_memory (struct gdbarch
*gdbarch
, struct type
*type
)
1928 enum type_code code
;
1930 type
= check_typedef (type
);
1932 /* In the AArch64 ABI, "integer" like aggregate types are returned
1933 in registers. For an aggregate type to be integer like, its size
1934 must be less than or equal to 4 * X_REGISTER_SIZE. */
1938 /* PCS B.5 If the argument is a Named HFA, then the argument is
1943 if (TYPE_LENGTH (type
) > 16)
1945 /* PCS B.6 Aggregates larger than 16 bytes are passed by
1946 invisible reference. */
1954 /* Write into appropriate registers a function return value of type
1955 TYPE, given in virtual format. */
1958 aarch64_store_return_value (struct type
*type
, struct regcache
*regs
,
1959 const gdb_byte
*valbuf
)
1961 struct gdbarch
*gdbarch
= get_regcache_arch (regs
);
1962 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1964 if (TYPE_CODE (type
) == TYPE_CODE_FLT
)
1966 bfd_byte buf
[V_REGISTER_SIZE
];
1967 int len
= TYPE_LENGTH (type
);
1969 memcpy (buf
, valbuf
, len
> V_REGISTER_SIZE
? V_REGISTER_SIZE
: len
);
1970 regcache_cooked_write (regs
, AARCH64_V0_REGNUM
, buf
);
1972 else if (TYPE_CODE (type
) == TYPE_CODE_INT
1973 || TYPE_CODE (type
) == TYPE_CODE_CHAR
1974 || TYPE_CODE (type
) == TYPE_CODE_BOOL
1975 || TYPE_CODE (type
) == TYPE_CODE_PTR
1976 || TYPE_CODE (type
) == TYPE_CODE_REF
1977 || TYPE_CODE (type
) == TYPE_CODE_ENUM
)
1979 if (TYPE_LENGTH (type
) <= X_REGISTER_SIZE
)
1981 /* Values of one word or less are zero/sign-extended and
1983 bfd_byte tmpbuf
[X_REGISTER_SIZE
];
1984 LONGEST val
= unpack_long (type
, valbuf
);
1986 store_signed_integer (tmpbuf
, X_REGISTER_SIZE
, byte_order
, val
);
1987 regcache_cooked_write (regs
, AARCH64_X0_REGNUM
, tmpbuf
);
1991 /* Integral values greater than one word are stored in
1992 consecutive registers starting with r0. This will always
1993 be a multiple of the regiser size. */
1994 int len
= TYPE_LENGTH (type
);
1995 int regno
= AARCH64_X0_REGNUM
;
1999 regcache_cooked_write (regs
, regno
++, valbuf
);
2000 len
-= X_REGISTER_SIZE
;
2001 valbuf
+= X_REGISTER_SIZE
;
2005 else if (is_hfa (type
))
2007 int elements
= TYPE_NFIELDS (type
);
2008 struct type
*member_type
= check_typedef (TYPE_FIELD_TYPE (type
, 0));
2009 int len
= TYPE_LENGTH (member_type
);
2012 for (i
= 0; i
< elements
; i
++)
2014 int regno
= AARCH64_V0_REGNUM
+ i
;
2015 bfd_byte tmpbuf
[MAX_REGISTER_SIZE
];
2019 debug_printf ("write HFA return value element %d to %s\n",
2021 gdbarch_register_name (gdbarch
, regno
));
2024 memcpy (tmpbuf
, valbuf
, len
);
2025 regcache_cooked_write (regs
, regno
, tmpbuf
);
2031 /* For a structure or union the behaviour is as if the value had
2032 been stored to word-aligned memory and then loaded into
2033 registers with 64-bit load instruction(s). */
2034 int len
= TYPE_LENGTH (type
);
2035 int regno
= AARCH64_X0_REGNUM
;
2036 bfd_byte tmpbuf
[X_REGISTER_SIZE
];
2040 memcpy (tmpbuf
, valbuf
,
2041 len
> X_REGISTER_SIZE
? X_REGISTER_SIZE
: len
);
2042 regcache_cooked_write (regs
, regno
++, tmpbuf
);
2043 len
-= X_REGISTER_SIZE
;
2044 valbuf
+= X_REGISTER_SIZE
;
2049 /* Implement the "return_value" gdbarch method. */
2051 static enum return_value_convention
2052 aarch64_return_value (struct gdbarch
*gdbarch
, struct value
*func_value
,
2053 struct type
*valtype
, struct regcache
*regcache
,
2054 gdb_byte
*readbuf
, const gdb_byte
*writebuf
)
2056 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2058 if (TYPE_CODE (valtype
) == TYPE_CODE_STRUCT
2059 || TYPE_CODE (valtype
) == TYPE_CODE_UNION
2060 || TYPE_CODE (valtype
) == TYPE_CODE_ARRAY
)
2062 if (aarch64_return_in_memory (gdbarch
, valtype
))
2065 debug_printf ("return value in memory\n");
2066 return RETURN_VALUE_STRUCT_CONVENTION
;
2071 aarch64_store_return_value (valtype
, regcache
, writebuf
);
2074 aarch64_extract_return_value (valtype
, regcache
, readbuf
);
2077 debug_printf ("return value in registers\n");
2079 return RETURN_VALUE_REGISTER_CONVENTION
;
2082 /* Implement the "get_longjmp_target" gdbarch method. */
2085 aarch64_get_longjmp_target (struct frame_info
*frame
, CORE_ADDR
*pc
)
2088 gdb_byte buf
[X_REGISTER_SIZE
];
2089 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
2090 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2091 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2093 jb_addr
= get_frame_register_unsigned (frame
, AARCH64_X0_REGNUM
);
2095 if (target_read_memory (jb_addr
+ tdep
->jb_pc
* tdep
->jb_elt_size
, buf
,
2099 *pc
= extract_unsigned_integer (buf
, X_REGISTER_SIZE
, byte_order
);
2103 /* Implement the "gen_return_address" gdbarch method. */
2106 aarch64_gen_return_address (struct gdbarch
*gdbarch
,
2107 struct agent_expr
*ax
, struct axs_value
*value
,
2110 value
->type
= register_type (gdbarch
, AARCH64_LR_REGNUM
);
2111 value
->kind
= axs_lvalue_register
;
2112 value
->u
.reg
= AARCH64_LR_REGNUM
;
2116 /* Return the pseudo register name corresponding to register regnum. */
2119 aarch64_pseudo_register_name (struct gdbarch
*gdbarch
, int regnum
)
2121 static const char *const q_name
[] =
2123 "q0", "q1", "q2", "q3",
2124 "q4", "q5", "q6", "q7",
2125 "q8", "q9", "q10", "q11",
2126 "q12", "q13", "q14", "q15",
2127 "q16", "q17", "q18", "q19",
2128 "q20", "q21", "q22", "q23",
2129 "q24", "q25", "q26", "q27",
2130 "q28", "q29", "q30", "q31",
2133 static const char *const d_name
[] =
2135 "d0", "d1", "d2", "d3",
2136 "d4", "d5", "d6", "d7",
2137 "d8", "d9", "d10", "d11",
2138 "d12", "d13", "d14", "d15",
2139 "d16", "d17", "d18", "d19",
2140 "d20", "d21", "d22", "d23",
2141 "d24", "d25", "d26", "d27",
2142 "d28", "d29", "d30", "d31",
2145 static const char *const s_name
[] =
2147 "s0", "s1", "s2", "s3",
2148 "s4", "s5", "s6", "s7",
2149 "s8", "s9", "s10", "s11",
2150 "s12", "s13", "s14", "s15",
2151 "s16", "s17", "s18", "s19",
2152 "s20", "s21", "s22", "s23",
2153 "s24", "s25", "s26", "s27",
2154 "s28", "s29", "s30", "s31",
2157 static const char *const h_name
[] =
2159 "h0", "h1", "h2", "h3",
2160 "h4", "h5", "h6", "h7",
2161 "h8", "h9", "h10", "h11",
2162 "h12", "h13", "h14", "h15",
2163 "h16", "h17", "h18", "h19",
2164 "h20", "h21", "h22", "h23",
2165 "h24", "h25", "h26", "h27",
2166 "h28", "h29", "h30", "h31",
2169 static const char *const b_name
[] =
2171 "b0", "b1", "b2", "b3",
2172 "b4", "b5", "b6", "b7",
2173 "b8", "b9", "b10", "b11",
2174 "b12", "b13", "b14", "b15",
2175 "b16", "b17", "b18", "b19",
2176 "b20", "b21", "b22", "b23",
2177 "b24", "b25", "b26", "b27",
2178 "b28", "b29", "b30", "b31",
2181 regnum
-= gdbarch_num_regs (gdbarch
);
2183 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2184 return q_name
[regnum
- AARCH64_Q0_REGNUM
];
2186 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2187 return d_name
[regnum
- AARCH64_D0_REGNUM
];
2189 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2190 return s_name
[regnum
- AARCH64_S0_REGNUM
];
2192 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2193 return h_name
[regnum
- AARCH64_H0_REGNUM
];
2195 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2196 return b_name
[regnum
- AARCH64_B0_REGNUM
];
2198 internal_error (__FILE__
, __LINE__
,
2199 _("aarch64_pseudo_register_name: bad register number %d"),
2203 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2205 static struct type
*
2206 aarch64_pseudo_register_type (struct gdbarch
*gdbarch
, int regnum
)
2208 regnum
-= gdbarch_num_regs (gdbarch
);
2210 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2211 return aarch64_vnq_type (gdbarch
);
2213 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2214 return aarch64_vnd_type (gdbarch
);
2216 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2217 return aarch64_vns_type (gdbarch
);
2219 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2220 return aarch64_vnh_type (gdbarch
);
2222 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2223 return aarch64_vnb_type (gdbarch
);
2225 internal_error (__FILE__
, __LINE__
,
2226 _("aarch64_pseudo_register_type: bad register number %d"),
2230 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2233 aarch64_pseudo_register_reggroup_p (struct gdbarch
*gdbarch
, int regnum
,
2234 struct reggroup
*group
)
2236 regnum
-= gdbarch_num_regs (gdbarch
);
2238 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2239 return group
== all_reggroup
|| group
== vector_reggroup
;
2240 else if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2241 return (group
== all_reggroup
|| group
== vector_reggroup
2242 || group
== float_reggroup
);
2243 else if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2244 return (group
== all_reggroup
|| group
== vector_reggroup
2245 || group
== float_reggroup
);
2246 else if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2247 return group
== all_reggroup
|| group
== vector_reggroup
;
2248 else if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2249 return group
== all_reggroup
|| group
== vector_reggroup
;
2251 return group
== all_reggroup
;
2254 /* Implement the "pseudo_register_read_value" gdbarch method. */
2256 static struct value
*
2257 aarch64_pseudo_read_value (struct gdbarch
*gdbarch
,
2258 struct regcache
*regcache
,
2261 gdb_byte reg_buf
[MAX_REGISTER_SIZE
];
2262 struct value
*result_value
;
2265 result_value
= allocate_value (register_type (gdbarch
, regnum
));
2266 VALUE_LVAL (result_value
) = lval_register
;
2267 VALUE_REGNUM (result_value
) = regnum
;
2268 buf
= value_contents_raw (result_value
);
2270 regnum
-= gdbarch_num_regs (gdbarch
);
2272 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2274 enum register_status status
;
2277 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_Q0_REGNUM
;
2278 status
= regcache_raw_read (regcache
, v_regnum
, reg_buf
);
2279 if (status
!= REG_VALID
)
2280 mark_value_bytes_unavailable (result_value
, 0,
2281 TYPE_LENGTH (value_type (result_value
)));
2283 memcpy (buf
, reg_buf
, Q_REGISTER_SIZE
);
2284 return result_value
;
2287 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2289 enum register_status status
;
2292 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_D0_REGNUM
;
2293 status
= regcache_raw_read (regcache
, v_regnum
, reg_buf
);
2294 if (status
!= REG_VALID
)
2295 mark_value_bytes_unavailable (result_value
, 0,
2296 TYPE_LENGTH (value_type (result_value
)));
2298 memcpy (buf
, reg_buf
, D_REGISTER_SIZE
);
2299 return result_value
;
2302 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2304 enum register_status status
;
2307 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_S0_REGNUM
;
2308 status
= regcache_raw_read (regcache
, v_regnum
, reg_buf
);
2309 if (status
!= REG_VALID
)
2310 mark_value_bytes_unavailable (result_value
, 0,
2311 TYPE_LENGTH (value_type (result_value
)));
2313 memcpy (buf
, reg_buf
, S_REGISTER_SIZE
);
2314 return result_value
;
2317 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2319 enum register_status status
;
2322 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_H0_REGNUM
;
2323 status
= regcache_raw_read (regcache
, v_regnum
, reg_buf
);
2324 if (status
!= REG_VALID
)
2325 mark_value_bytes_unavailable (result_value
, 0,
2326 TYPE_LENGTH (value_type (result_value
)));
2328 memcpy (buf
, reg_buf
, H_REGISTER_SIZE
);
2329 return result_value
;
2332 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2334 enum register_status status
;
2337 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_B0_REGNUM
;
2338 status
= regcache_raw_read (regcache
, v_regnum
, reg_buf
);
2339 if (status
!= REG_VALID
)
2340 mark_value_bytes_unavailable (result_value
, 0,
2341 TYPE_LENGTH (value_type (result_value
)));
2343 memcpy (buf
, reg_buf
, B_REGISTER_SIZE
);
2344 return result_value
;
2347 gdb_assert_not_reached ("regnum out of bound");
2350 /* Implement the "pseudo_register_write" gdbarch method. */
2353 aarch64_pseudo_write (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
2354 int regnum
, const gdb_byte
*buf
)
2356 gdb_byte reg_buf
[MAX_REGISTER_SIZE
];
2358 /* Ensure the register buffer is zero, we want gdb writes of the
2359 various 'scalar' pseudo registers to behavior like architectural
2360 writes, register width bytes are written the remainder are set to
2362 memset (reg_buf
, 0, sizeof (reg_buf
));
2364 regnum
-= gdbarch_num_regs (gdbarch
);
2366 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2368 /* pseudo Q registers */
2371 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_Q0_REGNUM
;
2372 memcpy (reg_buf
, buf
, Q_REGISTER_SIZE
);
2373 regcache_raw_write (regcache
, v_regnum
, reg_buf
);
2377 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2379 /* pseudo D registers */
2382 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_D0_REGNUM
;
2383 memcpy (reg_buf
, buf
, D_REGISTER_SIZE
);
2384 regcache_raw_write (regcache
, v_regnum
, reg_buf
);
2388 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2392 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_S0_REGNUM
;
2393 memcpy (reg_buf
, buf
, S_REGISTER_SIZE
);
2394 regcache_raw_write (regcache
, v_regnum
, reg_buf
);
2398 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2400 /* pseudo H registers */
2403 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_H0_REGNUM
;
2404 memcpy (reg_buf
, buf
, H_REGISTER_SIZE
);
2405 regcache_raw_write (regcache
, v_regnum
, reg_buf
);
2409 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2411 /* pseudo B registers */
2414 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_B0_REGNUM
;
2415 memcpy (reg_buf
, buf
, B_REGISTER_SIZE
);
2416 regcache_raw_write (regcache
, v_regnum
, reg_buf
);
2420 gdb_assert_not_reached ("regnum out of bound");
2423 /* Callback function for user_reg_add. */
2425 static struct value
*
2426 value_of_aarch64_user_reg (struct frame_info
*frame
, const void *baton
)
2428 const int *reg_p
= (const int *) baton
;
2430 return value_of_register (*reg_p
, frame
);
2434 /* Implement the "software_single_step" gdbarch method, needed to
2435 single step through atomic sequences on AArch64. */
2438 aarch64_software_single_step (struct frame_info
*frame
)
2440 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
2441 struct address_space
*aspace
= get_frame_address_space (frame
);
2442 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
2443 const int insn_size
= 4;
2444 const int atomic_sequence_length
= 16; /* Instruction sequence length. */
2445 CORE_ADDR pc
= get_frame_pc (frame
);
2446 CORE_ADDR breaks
[2] = { -1, -1 };
2448 CORE_ADDR closing_insn
= 0;
2449 uint32_t insn
= read_memory_unsigned_integer (loc
, insn_size
,
2450 byte_order_for_code
);
2453 int bc_insn_count
= 0; /* Conditional branch instruction count. */
2454 int last_breakpoint
= 0; /* Defaults to 0 (no breakpoints placed). */
2457 if (aarch64_decode_insn (insn
, &inst
, 1) != 0)
2460 /* Look for a Load Exclusive instruction which begins the sequence. */
2461 if (inst
.opcode
->iclass
!= ldstexcl
|| bit (insn
, 22) == 0)
2464 for (insn_count
= 0; insn_count
< atomic_sequence_length
; ++insn_count
)
2467 insn
= read_memory_unsigned_integer (loc
, insn_size
,
2468 byte_order_for_code
);
2470 if (aarch64_decode_insn (insn
, &inst
, 1) != 0)
2472 /* Check if the instruction is a conditional branch. */
2473 if (inst
.opcode
->iclass
== condbranch
)
2475 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_ADDR_PCREL19
);
2477 if (bc_insn_count
>= 1)
2480 /* It is, so we'll try to set a breakpoint at the destination. */
2481 breaks
[1] = loc
+ inst
.operands
[0].imm
.value
;
2487 /* Look for the Store Exclusive which closes the atomic sequence. */
2488 if (inst
.opcode
->iclass
== ldstexcl
&& bit (insn
, 22) == 0)
2495 /* We didn't find a closing Store Exclusive instruction, fall back. */
2499 /* Insert breakpoint after the end of the atomic sequence. */
2500 breaks
[0] = loc
+ insn_size
;
2502 /* Check for duplicated breakpoints, and also check that the second
2503 breakpoint is not within the atomic sequence. */
2505 && (breaks
[1] == breaks
[0]
2506 || (breaks
[1] >= pc
&& breaks
[1] <= closing_insn
)))
2507 last_breakpoint
= 0;
2509 /* Insert the breakpoint at the end of the sequence, and one at the
2510 destination of the conditional branch, if it exists. */
2511 for (index
= 0; index
<= last_breakpoint
; index
++)
2512 insert_single_step_breakpoint (gdbarch
, aspace
, breaks
[index
]);
2517 struct displaced_step_closure
2519 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2520 is being displaced stepping. */
2523 /* PC adjustment offset after displaced stepping. */
2527 /* Data when visiting instructions for displaced stepping. */
2529 struct aarch64_displaced_step_data
2531 struct aarch64_insn_data base
;
2533 /* The address where the instruction will be executed at. */
2535 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2536 uint32_t insn_buf
[DISPLACED_MODIFIED_INSNS
];
2537 /* Number of instructions in INSN_BUF. */
2538 unsigned insn_count
;
2539 /* Registers when doing displaced stepping. */
2540 struct regcache
*regs
;
2542 struct displaced_step_closure
*dsc
;
2545 /* Implementation of aarch64_insn_visitor method "b". */
2548 aarch64_displaced_step_b (const int is_bl
, const int32_t offset
,
2549 struct aarch64_insn_data
*data
)
2551 struct aarch64_displaced_step_data
*dsd
2552 = (struct aarch64_displaced_step_data
*) data
;
2553 int32_t new_offset
= data
->insn_addr
- dsd
->new_addr
+ offset
;
2555 if (can_encode_int32 (new_offset
, 28))
2557 /* Emit B rather than BL, because executing BL on a new address
2558 will get the wrong address into LR. In order to avoid this,
2559 we emit B, and update LR if the instruction is BL. */
2560 emit_b (dsd
->insn_buf
, 0, new_offset
);
2566 emit_nop (dsd
->insn_buf
);
2568 dsd
->dsc
->pc_adjust
= offset
;
2574 regcache_cooked_write_unsigned (dsd
->regs
, AARCH64_LR_REGNUM
,
2575 data
->insn_addr
+ 4);
2579 /* Implementation of aarch64_insn_visitor method "b_cond". */
2582 aarch64_displaced_step_b_cond (const unsigned cond
, const int32_t offset
,
2583 struct aarch64_insn_data
*data
)
2585 struct aarch64_displaced_step_data
*dsd
2586 = (struct aarch64_displaced_step_data
*) data
;
2587 int32_t new_offset
= data
->insn_addr
- dsd
->new_addr
+ offset
;
2589 /* GDB has to fix up PC after displaced step this instruction
2590 differently according to the condition is true or false. Instead
2591 of checking COND against conditional flags, we can use
2592 the following instructions, and GDB can tell how to fix up PC
2593 according to the PC value.
2595 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2601 emit_bcond (dsd
->insn_buf
, cond
, 8);
2603 dsd
->dsc
->pc_adjust
= offset
;
2604 dsd
->insn_count
= 1;
2607 /* Dynamically allocate a new register. If we know the register
2608 statically, we should make it a global as above instead of using this
2611 static struct aarch64_register
2612 aarch64_register (unsigned num
, int is64
)
2614 return (struct aarch64_register
) { num
, is64
};
2617 /* Implementation of aarch64_insn_visitor method "cb". */
2620 aarch64_displaced_step_cb (const int32_t offset
, const int is_cbnz
,
2621 const unsigned rn
, int is64
,
2622 struct aarch64_insn_data
*data
)
2624 struct aarch64_displaced_step_data
*dsd
2625 = (struct aarch64_displaced_step_data
*) data
;
2626 int32_t new_offset
= data
->insn_addr
- dsd
->new_addr
+ offset
;
2628 /* The offset is out of range for a compare and branch
2629 instruction. We can use the following instructions instead:
2631 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2636 emit_cb (dsd
->insn_buf
, is_cbnz
, aarch64_register (rn
, is64
), 8);
2637 dsd
->insn_count
= 1;
2639 dsd
->dsc
->pc_adjust
= offset
;
2642 /* Implementation of aarch64_insn_visitor method "tb". */
2645 aarch64_displaced_step_tb (const int32_t offset
, int is_tbnz
,
2646 const unsigned rt
, unsigned bit
,
2647 struct aarch64_insn_data
*data
)
2649 struct aarch64_displaced_step_data
*dsd
2650 = (struct aarch64_displaced_step_data
*) data
;
2651 int32_t new_offset
= data
->insn_addr
- dsd
->new_addr
+ offset
;
2653 /* The offset is out of range for a test bit and branch
2654 instruction We can use the following instructions instead:
2656 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2662 emit_tb (dsd
->insn_buf
, is_tbnz
, bit
, aarch64_register (rt
, 1), 8);
2663 dsd
->insn_count
= 1;
2665 dsd
->dsc
->pc_adjust
= offset
;
2668 /* Implementation of aarch64_insn_visitor method "adr". */
2671 aarch64_displaced_step_adr (const int32_t offset
, const unsigned rd
,
2672 const int is_adrp
, struct aarch64_insn_data
*data
)
2674 struct aarch64_displaced_step_data
*dsd
2675 = (struct aarch64_displaced_step_data
*) data
;
2676 /* We know exactly the address the ADR{P,} instruction will compute.
2677 We can just write it to the destination register. */
2678 CORE_ADDR address
= data
->insn_addr
+ offset
;
2682 /* Clear the lower 12 bits of the offset to get the 4K page. */
2683 regcache_cooked_write_unsigned (dsd
->regs
, AARCH64_X0_REGNUM
+ rd
,
2687 regcache_cooked_write_unsigned (dsd
->regs
, AARCH64_X0_REGNUM
+ rd
,
2690 dsd
->dsc
->pc_adjust
= 4;
2691 emit_nop (dsd
->insn_buf
);
2692 dsd
->insn_count
= 1;
2695 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
2698 aarch64_displaced_step_ldr_literal (const int32_t offset
, const int is_sw
,
2699 const unsigned rt
, const int is64
,
2700 struct aarch64_insn_data
*data
)
2702 struct aarch64_displaced_step_data
*dsd
2703 = (struct aarch64_displaced_step_data
*) data
;
2704 CORE_ADDR address
= data
->insn_addr
+ offset
;
2705 struct aarch64_memory_operand zero
= { MEMORY_OPERAND_OFFSET
, 0 };
2707 regcache_cooked_write_unsigned (dsd
->regs
, AARCH64_X0_REGNUM
+ rt
,
2711 dsd
->insn_count
= emit_ldrsw (dsd
->insn_buf
, aarch64_register (rt
, 1),
2712 aarch64_register (rt
, 1), zero
);
2714 dsd
->insn_count
= emit_ldr (dsd
->insn_buf
, aarch64_register (rt
, is64
),
2715 aarch64_register (rt
, 1), zero
);
2717 dsd
->dsc
->pc_adjust
= 4;
2720 /* Implementation of aarch64_insn_visitor method "others". */
2723 aarch64_displaced_step_others (const uint32_t insn
,
2724 struct aarch64_insn_data
*data
)
2726 struct aarch64_displaced_step_data
*dsd
2727 = (struct aarch64_displaced_step_data
*) data
;
2729 aarch64_emit_insn (dsd
->insn_buf
, insn
);
2730 dsd
->insn_count
= 1;
2732 if ((insn
& 0xfffffc1f) == 0xd65f0000)
2735 dsd
->dsc
->pc_adjust
= 0;
2738 dsd
->dsc
->pc_adjust
= 4;
2741 static const struct aarch64_insn_visitor visitor
=
2743 aarch64_displaced_step_b
,
2744 aarch64_displaced_step_b_cond
,
2745 aarch64_displaced_step_cb
,
2746 aarch64_displaced_step_tb
,
2747 aarch64_displaced_step_adr
,
2748 aarch64_displaced_step_ldr_literal
,
2749 aarch64_displaced_step_others
,
2752 /* Implement the "displaced_step_copy_insn" gdbarch method. */
2754 struct displaced_step_closure
*
2755 aarch64_displaced_step_copy_insn (struct gdbarch
*gdbarch
,
2756 CORE_ADDR from
, CORE_ADDR to
,
2757 struct regcache
*regs
)
2759 struct displaced_step_closure
*dsc
= NULL
;
2760 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
2761 uint32_t insn
= read_memory_unsigned_integer (from
, 4, byte_order_for_code
);
2762 struct aarch64_displaced_step_data dsd
;
2764 /* Look for a Load Exclusive instruction which begins the sequence. */
2765 if (decode_masked_match (insn
, 0x3fc00000, 0x08400000))
2767 /* We can't displaced step atomic sequences. */
2771 dsc
= XCNEW (struct displaced_step_closure
);
2772 dsd
.base
.insn_addr
= from
;
2777 aarch64_relocate_instruction (insn
, &visitor
,
2778 (struct aarch64_insn_data
*) &dsd
);
2779 gdb_assert (dsd
.insn_count
<= DISPLACED_MODIFIED_INSNS
);
2781 if (dsd
.insn_count
!= 0)
2785 /* Instruction can be relocated to scratch pad. Copy
2786 relocated instruction(s) there. */
2787 for (i
= 0; i
< dsd
.insn_count
; i
++)
2789 if (debug_displaced
)
2791 debug_printf ("displaced: writing insn ");
2792 debug_printf ("%.8x", dsd
.insn_buf
[i
]);
2793 debug_printf (" at %s\n", paddress (gdbarch
, to
+ i
* 4));
2795 write_memory_unsigned_integer (to
+ i
* 4, 4, byte_order_for_code
,
2796 (ULONGEST
) dsd
.insn_buf
[i
]);
2808 /* Implement the "displaced_step_fixup" gdbarch method. */
2811 aarch64_displaced_step_fixup (struct gdbarch
*gdbarch
,
2812 struct displaced_step_closure
*dsc
,
2813 CORE_ADDR from
, CORE_ADDR to
,
2814 struct regcache
*regs
)
2820 regcache_cooked_read_unsigned (regs
, AARCH64_PC_REGNUM
, &pc
);
2823 /* Condition is true. */
2825 else if (pc
- to
== 4)
2827 /* Condition is false. */
2831 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
2834 if (dsc
->pc_adjust
!= 0)
2836 if (debug_displaced
)
2838 debug_printf ("displaced: fixup: set PC to %s:%d\n",
2839 paddress (gdbarch
, from
), dsc
->pc_adjust
);
2841 regcache_cooked_write_unsigned (regs
, AARCH64_PC_REGNUM
,
2842 from
+ dsc
->pc_adjust
);
2846 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
2849 aarch64_displaced_step_hw_singlestep (struct gdbarch
*gdbarch
,
2850 struct displaced_step_closure
*closure
)
2855 /* Initialize the current architecture based on INFO. If possible,
2856 re-use an architecture from ARCHES, which is a list of
2857 architectures already created during this debugging session.
2859 Called e.g. at program startup, when reading a core file, and when
2860 reading a binary file. */
2862 static struct gdbarch
*
2863 aarch64_gdbarch_init (struct gdbarch_info info
, struct gdbarch_list
*arches
)
2865 struct gdbarch_tdep
*tdep
;
2866 struct gdbarch
*gdbarch
;
2867 struct gdbarch_list
*best_arch
;
2868 struct tdesc_arch_data
*tdesc_data
= NULL
;
2869 const struct target_desc
*tdesc
= info
.target_desc
;
2871 int have_fpa_registers
= 1;
2873 const struct tdesc_feature
*feature
;
2875 int num_pseudo_regs
= 0;
2877 /* Ensure we always have a target descriptor. */
2878 if (!tdesc_has_registers (tdesc
))
2879 tdesc
= tdesc_aarch64
;
2883 feature
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.core");
2885 if (feature
== NULL
)
2888 tdesc_data
= tdesc_data_alloc ();
2890 /* Validate the descriptor provides the mandatory core R registers
2891 and allocate their numbers. */
2892 for (i
= 0; i
< ARRAY_SIZE (aarch64_r_register_names
); i
++)
2894 tdesc_numbered_register (feature
, tdesc_data
, AARCH64_X0_REGNUM
+ i
,
2895 aarch64_r_register_names
[i
]);
2897 num_regs
= AARCH64_X0_REGNUM
+ i
;
2899 /* Look for the V registers. */
2900 feature
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.fpu");
2903 /* Validate the descriptor provides the mandatory V registers
2904 and allocate their numbers. */
2905 for (i
= 0; i
< ARRAY_SIZE (aarch64_v_register_names
); i
++)
2907 tdesc_numbered_register (feature
, tdesc_data
, AARCH64_V0_REGNUM
+ i
,
2908 aarch64_v_register_names
[i
]);
2910 num_regs
= AARCH64_V0_REGNUM
+ i
;
2912 num_pseudo_regs
+= 32; /* add the Qn scalar register pseudos */
2913 num_pseudo_regs
+= 32; /* add the Dn scalar register pseudos */
2914 num_pseudo_regs
+= 32; /* add the Sn scalar register pseudos */
2915 num_pseudo_regs
+= 32; /* add the Hn scalar register pseudos */
2916 num_pseudo_regs
+= 32; /* add the Bn scalar register pseudos */
2921 tdesc_data_cleanup (tdesc_data
);
2925 /* AArch64 code is always little-endian. */
2926 info
.byte_order_for_code
= BFD_ENDIAN_LITTLE
;
2928 /* If there is already a candidate, use it. */
2929 for (best_arch
= gdbarch_list_lookup_by_info (arches
, &info
);
2931 best_arch
= gdbarch_list_lookup_by_info (best_arch
->next
, &info
))
2933 /* Found a match. */
2937 if (best_arch
!= NULL
)
2939 if (tdesc_data
!= NULL
)
2940 tdesc_data_cleanup (tdesc_data
);
2941 return best_arch
->gdbarch
;
2944 tdep
= XCNEW (struct gdbarch_tdep
);
2945 gdbarch
= gdbarch_alloc (&info
, tdep
);
2947 /* This should be low enough for everything. */
2948 tdep
->lowest_pc
= 0x20;
2949 tdep
->jb_pc
= -1; /* Longjump support not enabled by default. */
2950 tdep
->jb_elt_size
= 8;
2952 set_gdbarch_push_dummy_call (gdbarch
, aarch64_push_dummy_call
);
2953 set_gdbarch_frame_align (gdbarch
, aarch64_frame_align
);
2955 /* Frame handling. */
2956 set_gdbarch_dummy_id (gdbarch
, aarch64_dummy_id
);
2957 set_gdbarch_unwind_pc (gdbarch
, aarch64_unwind_pc
);
2958 set_gdbarch_unwind_sp (gdbarch
, aarch64_unwind_sp
);
2960 /* Advance PC across function entry code. */
2961 set_gdbarch_skip_prologue (gdbarch
, aarch64_skip_prologue
);
2963 /* The stack grows downward. */
2964 set_gdbarch_inner_than (gdbarch
, core_addr_lessthan
);
2966 /* Breakpoint manipulation. */
2967 set_gdbarch_breakpoint_from_pc (gdbarch
, aarch64_breakpoint_from_pc
);
2968 set_gdbarch_have_nonsteppable_watchpoint (gdbarch
, 1);
2969 set_gdbarch_software_single_step (gdbarch
, aarch64_software_single_step
);
2971 /* Information about registers, etc. */
2972 set_gdbarch_sp_regnum (gdbarch
, AARCH64_SP_REGNUM
);
2973 set_gdbarch_pc_regnum (gdbarch
, AARCH64_PC_REGNUM
);
2974 set_gdbarch_num_regs (gdbarch
, num_regs
);
2976 set_gdbarch_num_pseudo_regs (gdbarch
, num_pseudo_regs
);
2977 set_gdbarch_pseudo_register_read_value (gdbarch
, aarch64_pseudo_read_value
);
2978 set_gdbarch_pseudo_register_write (gdbarch
, aarch64_pseudo_write
);
2979 set_tdesc_pseudo_register_name (gdbarch
, aarch64_pseudo_register_name
);
2980 set_tdesc_pseudo_register_type (gdbarch
, aarch64_pseudo_register_type
);
2981 set_tdesc_pseudo_register_reggroup_p (gdbarch
,
2982 aarch64_pseudo_register_reggroup_p
);
2985 set_gdbarch_short_bit (gdbarch
, 16);
2986 set_gdbarch_int_bit (gdbarch
, 32);
2987 set_gdbarch_float_bit (gdbarch
, 32);
2988 set_gdbarch_double_bit (gdbarch
, 64);
2989 set_gdbarch_long_double_bit (gdbarch
, 128);
2990 set_gdbarch_long_bit (gdbarch
, 64);
2991 set_gdbarch_long_long_bit (gdbarch
, 64);
2992 set_gdbarch_ptr_bit (gdbarch
, 64);
2993 set_gdbarch_char_signed (gdbarch
, 0);
2994 set_gdbarch_float_format (gdbarch
, floatformats_ieee_single
);
2995 set_gdbarch_double_format (gdbarch
, floatformats_ieee_double
);
2996 set_gdbarch_long_double_format (gdbarch
, floatformats_ia64_quad
);
2998 /* Internal <-> external register number maps. */
2999 set_gdbarch_dwarf2_reg_to_regnum (gdbarch
, aarch64_dwarf_reg_to_regnum
);
3001 /* Returning results. */
3002 set_gdbarch_return_value (gdbarch
, aarch64_return_value
);
3005 set_gdbarch_print_insn (gdbarch
, aarch64_gdb_print_insn
);
3007 /* Virtual tables. */
3008 set_gdbarch_vbit_in_delta (gdbarch
, 1);
3010 /* Hook in the ABI-specific overrides, if they have been registered. */
3011 info
.target_desc
= tdesc
;
3012 info
.tdep_info
= (void *) tdesc_data
;
3013 gdbarch_init_osabi (info
, gdbarch
);
3015 dwarf2_frame_set_init_reg (gdbarch
, aarch64_dwarf2_frame_init_reg
);
3017 /* Add some default predicates. */
3018 frame_unwind_append_unwinder (gdbarch
, &aarch64_stub_unwind
);
3019 dwarf2_append_unwinders (gdbarch
);
3020 frame_unwind_append_unwinder (gdbarch
, &aarch64_prologue_unwind
);
3022 frame_base_set_default (gdbarch
, &aarch64_normal_base
);
3024 /* Now we have tuned the configuration, set a few final things,
3025 based on what the OS ABI has told us. */
3027 if (tdep
->jb_pc
>= 0)
3028 set_gdbarch_get_longjmp_target (gdbarch
, aarch64_get_longjmp_target
);
3030 set_gdbarch_gen_return_address (gdbarch
, aarch64_gen_return_address
);
3032 tdesc_use_registers (gdbarch
, tdesc
, tdesc_data
);
3034 /* Add standard register aliases. */
3035 for (i
= 0; i
< ARRAY_SIZE (aarch64_register_aliases
); i
++)
3036 user_reg_add (gdbarch
, aarch64_register_aliases
[i
].name
,
3037 value_of_aarch64_user_reg
,
3038 &aarch64_register_aliases
[i
].regnum
);
3044 aarch64_dump_tdep (struct gdbarch
*gdbarch
, struct ui_file
*file
)
3046 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
3051 fprintf_unfiltered (file
, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3052 paddress (gdbarch
, tdep
->lowest_pc
));
3055 /* Suppress warning from -Wmissing-prototypes. */
3056 extern initialize_file_ftype _initialize_aarch64_tdep
;
3059 _initialize_aarch64_tdep (void)
3061 gdbarch_register (bfd_arch_aarch64
, aarch64_gdbarch_init
,
3064 initialize_tdesc_aarch64 ();
3066 /* Debug this file's internals. */
3067 add_setshow_boolean_cmd ("aarch64", class_maintenance
, &aarch64_debug
, _("\
3068 Set AArch64 debugging."), _("\
3069 Show AArch64 debugging."), _("\
3070 When on, AArch64 specific debugging is enabled."),
3073 &setdebuglist
, &showdebuglist
);
3076 /* AArch64 process record-replay related structures, defines etc. */
3078 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3081 unsigned int reg_len = LENGTH; \
3084 REGS = XNEWVEC (uint32_t, reg_len); \
3085 memcpy(®S[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3090 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3093 unsigned int mem_len = LENGTH; \
3096 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3097 memcpy(&MEMS->len, &RECORD_BUF[0], \
3098 sizeof(struct aarch64_mem_r) * LENGTH); \
3103 /* AArch64 record/replay structures and enumerations. */
3105 struct aarch64_mem_r
3107 uint64_t len
; /* Record length. */
3108 uint64_t addr
; /* Memory address. */
3111 enum aarch64_record_result
3113 AARCH64_RECORD_SUCCESS
,
3114 AARCH64_RECORD_FAILURE
,
3115 AARCH64_RECORD_UNSUPPORTED
,
3116 AARCH64_RECORD_UNKNOWN
3119 typedef struct insn_decode_record_t
3121 struct gdbarch
*gdbarch
;
3122 struct regcache
*regcache
;
3123 CORE_ADDR this_addr
; /* Address of insn to be recorded. */
3124 uint32_t aarch64_insn
; /* Insn to be recorded. */
3125 uint32_t mem_rec_count
; /* Count of memory records. */
3126 uint32_t reg_rec_count
; /* Count of register records. */
3127 uint32_t *aarch64_regs
; /* Registers to be recorded. */
3128 struct aarch64_mem_r
*aarch64_mems
; /* Memory locations to be recorded. */
3129 } insn_decode_record
;
3131 /* Record handler for data processing - register instructions. */
3134 aarch64_record_data_proc_reg (insn_decode_record
*aarch64_insn_r
)
3136 uint8_t reg_rd
, insn_bits24_27
, insn_bits21_23
;
3137 uint32_t record_buf
[4];
3139 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3140 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3141 insn_bits21_23
= bits (aarch64_insn_r
->aarch64_insn
, 21, 23);
3143 if (!bit (aarch64_insn_r
->aarch64_insn
, 28))
3147 /* Logical (shifted register). */
3148 if (insn_bits24_27
== 0x0a)
3149 setflags
= (bits (aarch64_insn_r
->aarch64_insn
, 29, 30) == 0x03);
3151 else if (insn_bits24_27
== 0x0b)
3152 setflags
= bit (aarch64_insn_r
->aarch64_insn
, 29);
3154 return AARCH64_RECORD_UNKNOWN
;
3156 record_buf
[0] = reg_rd
;
3157 aarch64_insn_r
->reg_rec_count
= 1;
3159 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
3163 if (insn_bits24_27
== 0x0b)
3165 /* Data-processing (3 source). */
3166 record_buf
[0] = reg_rd
;
3167 aarch64_insn_r
->reg_rec_count
= 1;
3169 else if (insn_bits24_27
== 0x0a)
3171 if (insn_bits21_23
== 0x00)
3173 /* Add/subtract (with carry). */
3174 record_buf
[0] = reg_rd
;
3175 aarch64_insn_r
->reg_rec_count
= 1;
3176 if (bit (aarch64_insn_r
->aarch64_insn
, 29))
3178 record_buf
[1] = AARCH64_CPSR_REGNUM
;
3179 aarch64_insn_r
->reg_rec_count
= 2;
3182 else if (insn_bits21_23
== 0x02)
3184 /* Conditional compare (register) and conditional compare
3185 (immediate) instructions. */
3186 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3187 aarch64_insn_r
->reg_rec_count
= 1;
3189 else if (insn_bits21_23
== 0x04 || insn_bits21_23
== 0x06)
3191 /* CConditional select. */
3192 /* Data-processing (2 source). */
3193 /* Data-processing (1 source). */
3194 record_buf
[0] = reg_rd
;
3195 aarch64_insn_r
->reg_rec_count
= 1;
3198 return AARCH64_RECORD_UNKNOWN
;
3202 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3204 return AARCH64_RECORD_SUCCESS
;
3207 /* Record handler for data processing - immediate instructions. */
3210 aarch64_record_data_proc_imm (insn_decode_record
*aarch64_insn_r
)
3212 uint8_t reg_rd
, insn_bit28
, insn_bit23
, insn_bits24_27
, setflags
;
3213 uint32_t record_buf
[4];
3215 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3216 insn_bit28
= bit (aarch64_insn_r
->aarch64_insn
, 28);
3217 insn_bit23
= bit (aarch64_insn_r
->aarch64_insn
, 23);
3218 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3220 if (insn_bits24_27
== 0x00 /* PC rel addressing. */
3221 || insn_bits24_27
== 0x03 /* Bitfield and Extract. */
3222 || (insn_bits24_27
== 0x02 && insn_bit23
)) /* Move wide (immediate). */
3224 record_buf
[0] = reg_rd
;
3225 aarch64_insn_r
->reg_rec_count
= 1;
3227 else if (insn_bits24_27
== 0x01)
3229 /* Add/Subtract (immediate). */
3230 setflags
= bit (aarch64_insn_r
->aarch64_insn
, 29);
3231 record_buf
[0] = reg_rd
;
3232 aarch64_insn_r
->reg_rec_count
= 1;
3234 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
3236 else if (insn_bits24_27
== 0x02 && !insn_bit23
)
3238 /* Logical (immediate). */
3239 setflags
= bits (aarch64_insn_r
->aarch64_insn
, 29, 30) == 0x03;
3240 record_buf
[0] = reg_rd
;
3241 aarch64_insn_r
->reg_rec_count
= 1;
3243 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
3246 return AARCH64_RECORD_UNKNOWN
;
3248 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3250 return AARCH64_RECORD_SUCCESS
;
3253 /* Record handler for branch, exception generation and system instructions. */
3256 aarch64_record_branch_except_sys (insn_decode_record
*aarch64_insn_r
)
3258 struct gdbarch_tdep
*tdep
= gdbarch_tdep (aarch64_insn_r
->gdbarch
);
3259 uint8_t insn_bits24_27
, insn_bits28_31
, insn_bits22_23
;
3260 uint32_t record_buf
[4];
3262 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3263 insn_bits28_31
= bits (aarch64_insn_r
->aarch64_insn
, 28, 31);
3264 insn_bits22_23
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3266 if (insn_bits28_31
== 0x0d)
3268 /* Exception generation instructions. */
3269 if (insn_bits24_27
== 0x04)
3271 if (!bits (aarch64_insn_r
->aarch64_insn
, 2, 4)
3272 && !bits (aarch64_insn_r
->aarch64_insn
, 21, 23)
3273 && bits (aarch64_insn_r
->aarch64_insn
, 0, 1) == 0x01)
3275 ULONGEST svc_number
;
3277 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, 8,
3279 return tdep
->aarch64_syscall_record (aarch64_insn_r
->regcache
,
3283 return AARCH64_RECORD_UNSUPPORTED
;
3285 /* System instructions. */
3286 else if (insn_bits24_27
== 0x05 && insn_bits22_23
== 0x00)
3288 uint32_t reg_rt
, reg_crn
;
3290 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3291 reg_crn
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
3293 /* Record rt in case of sysl and mrs instructions. */
3294 if (bit (aarch64_insn_r
->aarch64_insn
, 21))
3296 record_buf
[0] = reg_rt
;
3297 aarch64_insn_r
->reg_rec_count
= 1;
3299 /* Record cpsr for hint and msr(immediate) instructions. */
3300 else if (reg_crn
== 0x02 || reg_crn
== 0x04)
3302 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3303 aarch64_insn_r
->reg_rec_count
= 1;
3306 /* Unconditional branch (register). */
3307 else if((insn_bits24_27
& 0x0e) == 0x06)
3309 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
3310 if (bits (aarch64_insn_r
->aarch64_insn
, 21, 22) == 0x01)
3311 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_LR_REGNUM
;
3314 return AARCH64_RECORD_UNKNOWN
;
3316 /* Unconditional branch (immediate). */
3317 else if ((insn_bits28_31
& 0x07) == 0x01 && (insn_bits24_27
& 0x0c) == 0x04)
3319 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
3320 if (bit (aarch64_insn_r
->aarch64_insn
, 31))
3321 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_LR_REGNUM
;
3324 /* Compare & branch (immediate), Test & branch (immediate) and
3325 Conditional branch (immediate). */
3326 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
3328 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3330 return AARCH64_RECORD_SUCCESS
;
3333 /* Record handler for advanced SIMD load and store instructions. */
3336 aarch64_record_asimd_load_store (insn_decode_record
*aarch64_insn_r
)
3339 uint64_t addr_offset
= 0;
3340 uint32_t record_buf
[24];
3341 uint64_t record_buf_mem
[24];
3342 uint32_t reg_rn
, reg_rt
;
3343 uint32_t reg_index
= 0, mem_index
= 0;
3344 uint8_t opcode_bits
, size_bits
;
3346 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3347 reg_rn
= bits (aarch64_insn_r
->aarch64_insn
, 5, 9);
3348 size_bits
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
3349 opcode_bits
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
3350 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
, &address
);
3353 debug_printf ("Process record: Advanced SIMD load/store\n");
3355 /* Load/store single structure. */
3356 if (bit (aarch64_insn_r
->aarch64_insn
, 24))
3358 uint8_t sindex
, scale
, selem
, esize
, replicate
= 0;
3359 scale
= opcode_bits
>> 2;
3360 selem
= ((opcode_bits
& 0x02) |
3361 bit (aarch64_insn_r
->aarch64_insn
, 21)) + 1;
3365 if (size_bits
& 0x01)
3366 return AARCH64_RECORD_UNKNOWN
;
3369 if ((size_bits
>> 1) & 0x01)
3370 return AARCH64_RECORD_UNKNOWN
;
3371 if (size_bits
& 0x01)
3373 if (!((opcode_bits
>> 1) & 0x01))
3376 return AARCH64_RECORD_UNKNOWN
;
3380 if (bit (aarch64_insn_r
->aarch64_insn
, 22) && !(opcode_bits
& 0x01))
3387 return AARCH64_RECORD_UNKNOWN
;
3393 for (sindex
= 0; sindex
< selem
; sindex
++)
3395 record_buf
[reg_index
++] = reg_rt
+ AARCH64_V0_REGNUM
;
3396 reg_rt
= (reg_rt
+ 1) % 32;
3400 for (sindex
= 0; sindex
< selem
; sindex
++)
3401 if (bit (aarch64_insn_r
->aarch64_insn
, 22))
3402 record_buf
[reg_index
++] = reg_rt
+ AARCH64_V0_REGNUM
;
3405 record_buf_mem
[mem_index
++] = esize
/ 8;
3406 record_buf_mem
[mem_index
++] = address
+ addr_offset
;
3408 addr_offset
= addr_offset
+ (esize
/ 8);
3409 reg_rt
= (reg_rt
+ 1) % 32;
3412 /* Load/store multiple structure. */
3415 uint8_t selem
, esize
, rpt
, elements
;
3416 uint8_t eindex
, rindex
;
3418 esize
= 8 << size_bits
;
3419 if (bit (aarch64_insn_r
->aarch64_insn
, 30))
3420 elements
= 128 / esize
;
3422 elements
= 64 / esize
;
3424 switch (opcode_bits
)
3426 /*LD/ST4 (4 Registers). */
3431 /*LD/ST1 (4 Registers). */
3436 /*LD/ST3 (3 Registers). */
3441 /*LD/ST1 (3 Registers). */
3446 /*LD/ST1 (1 Register). */
3451 /*LD/ST2 (2 Registers). */
3456 /*LD/ST1 (2 Registers). */
3462 return AARCH64_RECORD_UNSUPPORTED
;
3465 for (rindex
= 0; rindex
< rpt
; rindex
++)
3466 for (eindex
= 0; eindex
< elements
; eindex
++)
3468 uint8_t reg_tt
, sindex
;
3469 reg_tt
= (reg_rt
+ rindex
) % 32;
3470 for (sindex
= 0; sindex
< selem
; sindex
++)
3472 if (bit (aarch64_insn_r
->aarch64_insn
, 22))
3473 record_buf
[reg_index
++] = reg_tt
+ AARCH64_V0_REGNUM
;
3476 record_buf_mem
[mem_index
++] = esize
/ 8;
3477 record_buf_mem
[mem_index
++] = address
+ addr_offset
;
3479 addr_offset
= addr_offset
+ (esize
/ 8);
3480 reg_tt
= (reg_tt
+ 1) % 32;
3485 if (bit (aarch64_insn_r
->aarch64_insn
, 23))
3486 record_buf
[reg_index
++] = reg_rn
;
3488 aarch64_insn_r
->reg_rec_count
= reg_index
;
3489 aarch64_insn_r
->mem_rec_count
= mem_index
/ 2;
3490 MEM_ALLOC (aarch64_insn_r
->aarch64_mems
, aarch64_insn_r
->mem_rec_count
,
3492 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3494 return AARCH64_RECORD_SUCCESS
;
3497 /* Record handler for load and store instructions. */
3500 aarch64_record_load_store (insn_decode_record
*aarch64_insn_r
)
3502 uint8_t insn_bits24_27
, insn_bits28_29
, insn_bits10_11
;
3503 uint8_t insn_bit23
, insn_bit21
;
3504 uint8_t opc
, size_bits
, ld_flag
, vector_flag
;
3505 uint32_t reg_rn
, reg_rt
, reg_rt2
;
3506 uint64_t datasize
, offset
;
3507 uint32_t record_buf
[8];
3508 uint64_t record_buf_mem
[8];
3511 insn_bits10_11
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
3512 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3513 insn_bits28_29
= bits (aarch64_insn_r
->aarch64_insn
, 28, 29);
3514 insn_bit21
= bit (aarch64_insn_r
->aarch64_insn
, 21);
3515 insn_bit23
= bit (aarch64_insn_r
->aarch64_insn
, 23);
3516 ld_flag
= bit (aarch64_insn_r
->aarch64_insn
, 22);
3517 vector_flag
= bit (aarch64_insn_r
->aarch64_insn
, 26);
3518 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3519 reg_rn
= bits (aarch64_insn_r
->aarch64_insn
, 5, 9);
3520 reg_rt2
= bits (aarch64_insn_r
->aarch64_insn
, 10, 14);
3521 size_bits
= bits (aarch64_insn_r
->aarch64_insn
, 30, 31);
3523 /* Load/store exclusive. */
3524 if (insn_bits24_27
== 0x08 && insn_bits28_29
== 0x00)
3527 debug_printf ("Process record: load/store exclusive\n");
3531 record_buf
[0] = reg_rt
;
3532 aarch64_insn_r
->reg_rec_count
= 1;
3535 record_buf
[1] = reg_rt2
;
3536 aarch64_insn_r
->reg_rec_count
= 2;
3542 datasize
= (8 << size_bits
) * 2;
3544 datasize
= (8 << size_bits
);
3545 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3547 record_buf_mem
[0] = datasize
/ 8;
3548 record_buf_mem
[1] = address
;
3549 aarch64_insn_r
->mem_rec_count
= 1;
3552 /* Save register rs. */
3553 record_buf
[0] = bits (aarch64_insn_r
->aarch64_insn
, 16, 20);
3554 aarch64_insn_r
->reg_rec_count
= 1;
3558 /* Load register (literal) instructions decoding. */
3559 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x01)
3562 debug_printf ("Process record: load register (literal)\n");
3564 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3566 record_buf
[0] = reg_rt
;
3567 aarch64_insn_r
->reg_rec_count
= 1;
3569 /* All types of load/store pair instructions decoding. */
3570 else if ((insn_bits24_27
& 0x0a) == 0x08 && insn_bits28_29
== 0x02)
3573 debug_printf ("Process record: load/store pair\n");
3579 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3580 record_buf
[1] = reg_rt2
+ AARCH64_V0_REGNUM
;
3584 record_buf
[0] = reg_rt
;
3585 record_buf
[1] = reg_rt2
;
3587 aarch64_insn_r
->reg_rec_count
= 2;
3592 imm7_off
= bits (aarch64_insn_r
->aarch64_insn
, 15, 21);
3594 size_bits
= size_bits
>> 1;
3595 datasize
= 8 << (2 + size_bits
);
3596 offset
= (imm7_off
& 0x40) ? (~imm7_off
& 0x007f) + 1 : imm7_off
;
3597 offset
= offset
<< (2 + size_bits
);
3598 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3600 if (!((insn_bits24_27
& 0x0b) == 0x08 && insn_bit23
))
3602 if (imm7_off
& 0x40)
3603 address
= address
- offset
;
3605 address
= address
+ offset
;
3608 record_buf_mem
[0] = datasize
/ 8;
3609 record_buf_mem
[1] = address
;
3610 record_buf_mem
[2] = datasize
/ 8;
3611 record_buf_mem
[3] = address
+ (datasize
/ 8);
3612 aarch64_insn_r
->mem_rec_count
= 2;
3614 if (bit (aarch64_insn_r
->aarch64_insn
, 23))
3615 record_buf
[aarch64_insn_r
->reg_rec_count
++] = reg_rn
;
3617 /* Load/store register (unsigned immediate) instructions. */
3618 else if ((insn_bits24_27
& 0x0b) == 0x09 && insn_bits28_29
== 0x03)
3620 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3627 if (size_bits
!= 0x03)
3630 return AARCH64_RECORD_UNKNOWN
;
3634 debug_printf ("Process record: load/store (unsigned immediate):"
3635 " size %x V %d opc %x\n", size_bits
, vector_flag
,
3641 offset
= bits (aarch64_insn_r
->aarch64_insn
, 10, 21);
3642 datasize
= 8 << size_bits
;
3643 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3645 offset
= offset
<< size_bits
;
3646 address
= address
+ offset
;
3648 record_buf_mem
[0] = datasize
>> 3;
3649 record_buf_mem
[1] = address
;
3650 aarch64_insn_r
->mem_rec_count
= 1;
3655 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3657 record_buf
[0] = reg_rt
;
3658 aarch64_insn_r
->reg_rec_count
= 1;
3661 /* Load/store register (register offset) instructions. */
3662 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x03
3663 && insn_bits10_11
== 0x02 && insn_bit21
)
3666 debug_printf ("Process record: load/store (register offset)\n");
3667 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3674 if (size_bits
!= 0x03)
3677 return AARCH64_RECORD_UNKNOWN
;
3681 uint64_t reg_rm_val
;
3682 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
,
3683 bits (aarch64_insn_r
->aarch64_insn
, 16, 20), ®_rm_val
);
3684 if (bit (aarch64_insn_r
->aarch64_insn
, 12))
3685 offset
= reg_rm_val
<< size_bits
;
3687 offset
= reg_rm_val
;
3688 datasize
= 8 << size_bits
;
3689 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3691 address
= address
+ offset
;
3692 record_buf_mem
[0] = datasize
>> 3;
3693 record_buf_mem
[1] = address
;
3694 aarch64_insn_r
->mem_rec_count
= 1;
3699 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3701 record_buf
[0] = reg_rt
;
3702 aarch64_insn_r
->reg_rec_count
= 1;
3705 /* Load/store register (immediate and unprivileged) instructions. */
3706 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x03
3711 debug_printf ("Process record: load/store "
3712 "(immediate and unprivileged)\n");
3714 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3721 if (size_bits
!= 0x03)
3724 return AARCH64_RECORD_UNKNOWN
;
3729 imm9_off
= bits (aarch64_insn_r
->aarch64_insn
, 12, 20);
3730 offset
= (imm9_off
& 0x0100) ? (((~imm9_off
) & 0x01ff) + 1) : imm9_off
;
3731 datasize
= 8 << size_bits
;
3732 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3734 if (insn_bits10_11
!= 0x01)
3736 if (imm9_off
& 0x0100)
3737 address
= address
- offset
;
3739 address
= address
+ offset
;
3741 record_buf_mem
[0] = datasize
>> 3;
3742 record_buf_mem
[1] = address
;
3743 aarch64_insn_r
->mem_rec_count
= 1;
3748 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3750 record_buf
[0] = reg_rt
;
3751 aarch64_insn_r
->reg_rec_count
= 1;
3753 if (insn_bits10_11
== 0x01 || insn_bits10_11
== 0x03)
3754 record_buf
[aarch64_insn_r
->reg_rec_count
++] = reg_rn
;
3756 /* Advanced SIMD load/store instructions. */
3758 return aarch64_record_asimd_load_store (aarch64_insn_r
);
3760 MEM_ALLOC (aarch64_insn_r
->aarch64_mems
, aarch64_insn_r
->mem_rec_count
,
3762 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3764 return AARCH64_RECORD_SUCCESS
;
3767 /* Record handler for data processing SIMD and floating point instructions. */
3770 aarch64_record_data_proc_simd_fp (insn_decode_record
*aarch64_insn_r
)
3772 uint8_t insn_bit21
, opcode
, rmode
, reg_rd
;
3773 uint8_t insn_bits24_27
, insn_bits28_31
, insn_bits10_11
, insn_bits12_15
;
3774 uint8_t insn_bits11_14
;
3775 uint32_t record_buf
[2];
3777 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3778 insn_bits28_31
= bits (aarch64_insn_r
->aarch64_insn
, 28, 31);
3779 insn_bits10_11
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
3780 insn_bits12_15
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
3781 insn_bits11_14
= bits (aarch64_insn_r
->aarch64_insn
, 11, 14);
3782 opcode
= bits (aarch64_insn_r
->aarch64_insn
, 16, 18);
3783 rmode
= bits (aarch64_insn_r
->aarch64_insn
, 19, 20);
3784 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3785 insn_bit21
= bit (aarch64_insn_r
->aarch64_insn
, 21);
3788 debug_printf ("Process record: data processing SIMD/FP: ");
3790 if ((insn_bits28_31
& 0x05) == 0x01 && insn_bits24_27
== 0x0e)
3792 /* Floating point - fixed point conversion instructions. */
3796 debug_printf ("FP - fixed point conversion");
3798 if ((opcode
>> 1) == 0x0 && rmode
== 0x03)
3799 record_buf
[0] = reg_rd
;
3801 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3803 /* Floating point - conditional compare instructions. */
3804 else if (insn_bits10_11
== 0x01)
3807 debug_printf ("FP - conditional compare");
3809 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3811 /* Floating point - data processing (2-source) and
3812 conditional select instructions. */
3813 else if (insn_bits10_11
== 0x02 || insn_bits10_11
== 0x03)
3816 debug_printf ("FP - DP (2-source)");
3818 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3820 else if (insn_bits10_11
== 0x00)
3822 /* Floating point - immediate instructions. */
3823 if ((insn_bits12_15
& 0x01) == 0x01
3824 || (insn_bits12_15
& 0x07) == 0x04)
3827 debug_printf ("FP - immediate");
3828 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3830 /* Floating point - compare instructions. */
3831 else if ((insn_bits12_15
& 0x03) == 0x02)
3834 debug_printf ("FP - immediate");
3835 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3837 /* Floating point - integer conversions instructions. */
3838 else if (insn_bits12_15
== 0x00)
3840 /* Convert float to integer instruction. */
3841 if (!(opcode
>> 1) || ((opcode
>> 1) == 0x02 && !rmode
))
3844 debug_printf ("float to int conversion");
3846 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
3848 /* Convert integer to float instruction. */
3849 else if ((opcode
>> 1) == 0x01 && !rmode
)
3852 debug_printf ("int to float conversion");
3854 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3856 /* Move float to integer instruction. */
3857 else if ((opcode
>> 1) == 0x03)
3860 debug_printf ("move float to int");
3862 if (!(opcode
& 0x01))
3863 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
3865 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3868 return AARCH64_RECORD_UNKNOWN
;
3871 return AARCH64_RECORD_UNKNOWN
;
3874 return AARCH64_RECORD_UNKNOWN
;
3876 else if ((insn_bits28_31
& 0x09) == 0x00 && insn_bits24_27
== 0x0e)
3879 debug_printf ("SIMD copy");
3881 /* Advanced SIMD copy instructions. */
3882 if (!bits (aarch64_insn_r
->aarch64_insn
, 21, 23)
3883 && !bit (aarch64_insn_r
->aarch64_insn
, 15)
3884 && bit (aarch64_insn_r
->aarch64_insn
, 10))
3886 if (insn_bits11_14
== 0x05 || insn_bits11_14
== 0x07)
3887 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
3889 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3892 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3894 /* All remaining floating point or advanced SIMD instructions. */
3898 debug_printf ("all remain");
3900 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3904 debug_printf ("\n");
3906 aarch64_insn_r
->reg_rec_count
++;
3907 gdb_assert (aarch64_insn_r
->reg_rec_count
== 1);
3908 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3910 return AARCH64_RECORD_SUCCESS
;
3913 /* Decodes insns type and invokes its record handler. */
3916 aarch64_record_decode_insn_handler (insn_decode_record
*aarch64_insn_r
)
3918 uint32_t ins_bit25
, ins_bit26
, ins_bit27
, ins_bit28
;
3920 ins_bit25
= bit (aarch64_insn_r
->aarch64_insn
, 25);
3921 ins_bit26
= bit (aarch64_insn_r
->aarch64_insn
, 26);
3922 ins_bit27
= bit (aarch64_insn_r
->aarch64_insn
, 27);
3923 ins_bit28
= bit (aarch64_insn_r
->aarch64_insn
, 28);
3925 /* Data processing - immediate instructions. */
3926 if (!ins_bit26
&& !ins_bit27
&& ins_bit28
)
3927 return aarch64_record_data_proc_imm (aarch64_insn_r
);
3929 /* Branch, exception generation and system instructions. */
3930 if (ins_bit26
&& !ins_bit27
&& ins_bit28
)
3931 return aarch64_record_branch_except_sys (aarch64_insn_r
);
3933 /* Load and store instructions. */
3934 if (!ins_bit25
&& ins_bit27
)
3935 return aarch64_record_load_store (aarch64_insn_r
);
3937 /* Data processing - register instructions. */
3938 if (ins_bit25
&& !ins_bit26
&& ins_bit27
)
3939 return aarch64_record_data_proc_reg (aarch64_insn_r
);
3941 /* Data processing - SIMD and floating point instructions. */
3942 if (ins_bit25
&& ins_bit26
&& ins_bit27
)
3943 return aarch64_record_data_proc_simd_fp (aarch64_insn_r
);
3945 return AARCH64_RECORD_UNSUPPORTED
;
3948 /* Cleans up local record registers and memory allocations. */
3951 deallocate_reg_mem (insn_decode_record
*record
)
3953 xfree (record
->aarch64_regs
);
3954 xfree (record
->aarch64_mems
);
3957 /* Parse the current instruction and record the values of the registers and
3958 memory that will be changed in current instruction to record_arch_list
3959 return -1 if something is wrong. */
3962 aarch64_process_record (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
3963 CORE_ADDR insn_addr
)
3965 uint32_t rec_no
= 0;
3966 uint8_t insn_size
= 4;
3968 ULONGEST t_bit
= 0, insn_id
= 0;
3969 gdb_byte buf
[insn_size
];
3970 insn_decode_record aarch64_record
;
3972 memset (&buf
[0], 0, insn_size
);
3973 memset (&aarch64_record
, 0, sizeof (insn_decode_record
));
3974 target_read_memory (insn_addr
, &buf
[0], insn_size
);
3975 aarch64_record
.aarch64_insn
3976 = (uint32_t) extract_unsigned_integer (&buf
[0],
3978 gdbarch_byte_order (gdbarch
));
3979 aarch64_record
.regcache
= regcache
;
3980 aarch64_record
.this_addr
= insn_addr
;
3981 aarch64_record
.gdbarch
= gdbarch
;
3983 ret
= aarch64_record_decode_insn_handler (&aarch64_record
);
3984 if (ret
== AARCH64_RECORD_UNSUPPORTED
)
3986 printf_unfiltered (_("Process record does not support instruction "
3987 "0x%0x at address %s.\n"),
3988 aarch64_record
.aarch64_insn
,
3989 paddress (gdbarch
, insn_addr
));
3995 /* Record registers. */
3996 record_full_arch_list_add_reg (aarch64_record
.regcache
,
3998 /* Always record register CPSR. */
3999 record_full_arch_list_add_reg (aarch64_record
.regcache
,
4000 AARCH64_CPSR_REGNUM
);
4001 if (aarch64_record
.aarch64_regs
)
4002 for (rec_no
= 0; rec_no
< aarch64_record
.reg_rec_count
; rec_no
++)
4003 if (record_full_arch_list_add_reg (aarch64_record
.regcache
,
4004 aarch64_record
.aarch64_regs
[rec_no
]))
4007 /* Record memories. */
4008 if (aarch64_record
.aarch64_mems
)
4009 for (rec_no
= 0; rec_no
< aarch64_record
.mem_rec_count
; rec_no
++)
4010 if (record_full_arch_list_add_mem
4011 ((CORE_ADDR
)aarch64_record
.aarch64_mems
[rec_no
].addr
,
4012 aarch64_record
.aarch64_mems
[rec_no
].len
))
4015 if (record_full_arch_list_add_end ())
4019 deallocate_reg_mem (&aarch64_record
);