[gdb/testsuite] Fix gdb.dwarf2/locexpr-data-member-location.exp with nopie
[binutils-gdb.git] / gdb / aarch64-tdep.c
1 /* Common target dependent code for GDB on AArch64 systems.
2
3 Copyright (C) 2009-2022 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22
23 #include "frame.h"
24 #include "gdbcmd.h"
25 #include "gdbcore.h"
26 #include "dis-asm.h"
27 #include "regcache.h"
28 #include "reggroups.h"
29 #include "value.h"
30 #include "arch-utils.h"
31 #include "osabi.h"
32 #include "frame-unwind.h"
33 #include "frame-base.h"
34 #include "trad-frame.h"
35 #include "objfiles.h"
36 #include "dwarf2.h"
37 #include "dwarf2/frame.h"
38 #include "gdbtypes.h"
39 #include "prologue-value.h"
40 #include "target-descriptions.h"
41 #include "user-regs.h"
42 #include "ax-gdb.h"
43 #include "gdbsupport/selftest.h"
44
45 #include "aarch64-tdep.h"
46 #include "aarch64-ravenscar-thread.h"
47
48 #include "record.h"
49 #include "record-full.h"
50 #include "arch/aarch64-insn.h"
51 #include "gdbarch.h"
52
53 #include "opcode/aarch64.h"
54 #include <algorithm>
55
56 /* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
57 four members. */
58 #define HA_MAX_NUM_FLDS 4
59
60 /* All possible aarch64 target descriptors. */
61 static target_desc *tdesc_aarch64_list[AARCH64_MAX_SVE_VQ + 1][2/*pauth*/][2 /* mte */][2 /* tls */];
62
63 /* The standard register names, and all the valid aliases for them. */
64 static const struct
65 {
66 const char *const name;
67 int regnum;
68 } aarch64_register_aliases[] =
69 {
70 /* 64-bit register names. */
71 {"fp", AARCH64_FP_REGNUM},
72 {"lr", AARCH64_LR_REGNUM},
73 {"sp", AARCH64_SP_REGNUM},
74
75 /* 32-bit register names. */
76 {"w0", AARCH64_X0_REGNUM + 0},
77 {"w1", AARCH64_X0_REGNUM + 1},
78 {"w2", AARCH64_X0_REGNUM + 2},
79 {"w3", AARCH64_X0_REGNUM + 3},
80 {"w4", AARCH64_X0_REGNUM + 4},
81 {"w5", AARCH64_X0_REGNUM + 5},
82 {"w6", AARCH64_X0_REGNUM + 6},
83 {"w7", AARCH64_X0_REGNUM + 7},
84 {"w8", AARCH64_X0_REGNUM + 8},
85 {"w9", AARCH64_X0_REGNUM + 9},
86 {"w10", AARCH64_X0_REGNUM + 10},
87 {"w11", AARCH64_X0_REGNUM + 11},
88 {"w12", AARCH64_X0_REGNUM + 12},
89 {"w13", AARCH64_X0_REGNUM + 13},
90 {"w14", AARCH64_X0_REGNUM + 14},
91 {"w15", AARCH64_X0_REGNUM + 15},
92 {"w16", AARCH64_X0_REGNUM + 16},
93 {"w17", AARCH64_X0_REGNUM + 17},
94 {"w18", AARCH64_X0_REGNUM + 18},
95 {"w19", AARCH64_X0_REGNUM + 19},
96 {"w20", AARCH64_X0_REGNUM + 20},
97 {"w21", AARCH64_X0_REGNUM + 21},
98 {"w22", AARCH64_X0_REGNUM + 22},
99 {"w23", AARCH64_X0_REGNUM + 23},
100 {"w24", AARCH64_X0_REGNUM + 24},
101 {"w25", AARCH64_X0_REGNUM + 25},
102 {"w26", AARCH64_X0_REGNUM + 26},
103 {"w27", AARCH64_X0_REGNUM + 27},
104 {"w28", AARCH64_X0_REGNUM + 28},
105 {"w29", AARCH64_X0_REGNUM + 29},
106 {"w30", AARCH64_X0_REGNUM + 30},
107
108 /* specials */
109 {"ip0", AARCH64_X0_REGNUM + 16},
110 {"ip1", AARCH64_X0_REGNUM + 17}
111 };
112
113 /* The required core 'R' registers. */
114 static const char *const aarch64_r_register_names[] =
115 {
116 /* These registers must appear in consecutive RAW register number
117 order and they must begin with AARCH64_X0_REGNUM! */
118 "x0", "x1", "x2", "x3",
119 "x4", "x5", "x6", "x7",
120 "x8", "x9", "x10", "x11",
121 "x12", "x13", "x14", "x15",
122 "x16", "x17", "x18", "x19",
123 "x20", "x21", "x22", "x23",
124 "x24", "x25", "x26", "x27",
125 "x28", "x29", "x30", "sp",
126 "pc", "cpsr"
127 };
128
129 /* The FP/SIMD 'V' registers. */
130 static const char *const aarch64_v_register_names[] =
131 {
132 /* These registers must appear in consecutive RAW register number
133 order and they must begin with AARCH64_V0_REGNUM! */
134 "v0", "v1", "v2", "v3",
135 "v4", "v5", "v6", "v7",
136 "v8", "v9", "v10", "v11",
137 "v12", "v13", "v14", "v15",
138 "v16", "v17", "v18", "v19",
139 "v20", "v21", "v22", "v23",
140 "v24", "v25", "v26", "v27",
141 "v28", "v29", "v30", "v31",
142 "fpsr",
143 "fpcr"
144 };
145
146 /* The SVE 'Z' and 'P' registers. */
147 static const char *const aarch64_sve_register_names[] =
148 {
149 /* These registers must appear in consecutive RAW register number
150 order and they must begin with AARCH64_SVE_Z0_REGNUM! */
151 "z0", "z1", "z2", "z3",
152 "z4", "z5", "z6", "z7",
153 "z8", "z9", "z10", "z11",
154 "z12", "z13", "z14", "z15",
155 "z16", "z17", "z18", "z19",
156 "z20", "z21", "z22", "z23",
157 "z24", "z25", "z26", "z27",
158 "z28", "z29", "z30", "z31",
159 "fpsr", "fpcr",
160 "p0", "p1", "p2", "p3",
161 "p4", "p5", "p6", "p7",
162 "p8", "p9", "p10", "p11",
163 "p12", "p13", "p14", "p15",
164 "ffr", "vg"
165 };
166
167 static const char *const aarch64_pauth_register_names[] =
168 {
169 /* Authentication mask for data pointer. */
170 "pauth_dmask",
171 /* Authentication mask for code pointer. */
172 "pauth_cmask"
173 };
174
175 static const char *const aarch64_mte_register_names[] =
176 {
177 /* Tag Control Register. */
178 "tag_ctl"
179 };
180
181 /* AArch64 prologue cache structure. */
182 struct aarch64_prologue_cache
183 {
184 /* The program counter at the start of the function. It is used to
185 identify this frame as a prologue frame. */
186 CORE_ADDR func;
187
188 /* The program counter at the time this frame was created; i.e. where
189 this function was called from. It is used to identify this frame as a
190 stub frame. */
191 CORE_ADDR prev_pc;
192
193 /* The stack pointer at the time this frame was created; i.e. the
194 caller's stack pointer when this function was called. It is used
195 to identify this frame. */
196 CORE_ADDR prev_sp;
197
198 /* Is the target available to read from? */
199 int available_p;
200
201 /* The frame base for this frame is just prev_sp - frame size.
202 FRAMESIZE is the distance from the frame pointer to the
203 initial stack pointer. */
204 int framesize;
205
206 /* The register used to hold the frame pointer for this frame. */
207 int framereg;
208
209 /* Saved register offsets. */
210 trad_frame_saved_reg *saved_regs;
211 };
212
213 static void
214 show_aarch64_debug (struct ui_file *file, int from_tty,
215 struct cmd_list_element *c, const char *value)
216 {
217 gdb_printf (file, _("AArch64 debugging is %s.\n"), value);
218 }
219
220 namespace {
221
222 /* Abstract instruction reader. */
223
224 class abstract_instruction_reader
225 {
226 public:
227 /* Read in one instruction. */
228 virtual ULONGEST read (CORE_ADDR memaddr, int len,
229 enum bfd_endian byte_order) = 0;
230 };
231
232 /* Instruction reader from real target. */
233
234 class instruction_reader : public abstract_instruction_reader
235 {
236 public:
237 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
238 override
239 {
240 return read_code_unsigned_integer (memaddr, len, byte_order);
241 }
242 };
243
244 } // namespace
245
246 /* If address signing is enabled, mask off the signature bits from the link
247 register, which is passed by value in ADDR, using the register values in
248 THIS_FRAME. */
249
250 static CORE_ADDR
251 aarch64_frame_unmask_lr (aarch64_gdbarch_tdep *tdep,
252 struct frame_info *this_frame, CORE_ADDR addr)
253 {
254 if (tdep->has_pauth ()
255 && frame_unwind_register_unsigned (this_frame,
256 tdep->pauth_ra_state_regnum))
257 {
258 int cmask_num = AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base);
259 CORE_ADDR cmask = frame_unwind_register_unsigned (this_frame, cmask_num);
260 addr = addr & ~cmask;
261
262 /* Record in the frame that the link register required unmasking. */
263 set_frame_previous_pc_masked (this_frame);
264 }
265
266 return addr;
267 }
268
269 /* Implement the "get_pc_address_flags" gdbarch method. */
270
271 static std::string
272 aarch64_get_pc_address_flags (frame_info *frame, CORE_ADDR pc)
273 {
274 if (pc != 0 && get_frame_pc_masked (frame))
275 return "PAC";
276
277 return "";
278 }
279
280 /* Analyze a prologue, looking for a recognizable stack frame
281 and frame pointer. Scan until we encounter a store that could
282 clobber the stack frame unexpectedly, or an unknown instruction. */
283
284 static CORE_ADDR
285 aarch64_analyze_prologue (struct gdbarch *gdbarch,
286 CORE_ADDR start, CORE_ADDR limit,
287 struct aarch64_prologue_cache *cache,
288 abstract_instruction_reader& reader)
289 {
290 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
291 int i;
292
293 /* Whether the stack has been set. This should be true when we notice a SP
294 to FP move or if we are using the SP as the base register for storing
295 data, in case the FP is ommitted. */
296 bool seen_stack_set = false;
297
298 /* Track X registers and D registers in prologue. */
299 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
300
301 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
302 regs[i] = pv_register (i, 0);
303 pv_area stack (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
304
305 for (; start < limit; start += 4)
306 {
307 uint32_t insn;
308 aarch64_inst inst;
309
310 insn = reader.read (start, 4, byte_order_for_code);
311
312 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
313 break;
314
315 if (inst.opcode->iclass == addsub_imm
316 && (inst.opcode->op == OP_ADD
317 || strcmp ("sub", inst.opcode->name) == 0))
318 {
319 unsigned rd = inst.operands[0].reg.regno;
320 unsigned rn = inst.operands[1].reg.regno;
321
322 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
323 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
324 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
325 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
326
327 if (inst.opcode->op == OP_ADD)
328 {
329 regs[rd] = pv_add_constant (regs[rn],
330 inst.operands[2].imm.value);
331 }
332 else
333 {
334 regs[rd] = pv_add_constant (regs[rn],
335 -inst.operands[2].imm.value);
336 }
337
338 /* Did we move SP to FP? */
339 if (rn == AARCH64_SP_REGNUM && rd == AARCH64_FP_REGNUM)
340 seen_stack_set = true;
341 }
342 else if (inst.opcode->iclass == pcreladdr
343 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
344 {
345 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
346 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
347
348 regs[inst.operands[0].reg.regno] = pv_unknown ();
349 }
350 else if (inst.opcode->iclass == branch_imm)
351 {
352 /* Stop analysis on branch. */
353 break;
354 }
355 else if (inst.opcode->iclass == condbranch)
356 {
357 /* Stop analysis on branch. */
358 break;
359 }
360 else if (inst.opcode->iclass == branch_reg)
361 {
362 /* Stop analysis on branch. */
363 break;
364 }
365 else if (inst.opcode->iclass == compbranch)
366 {
367 /* Stop analysis on branch. */
368 break;
369 }
370 else if (inst.opcode->op == OP_MOVZ)
371 {
372 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
373
374 /* If this shows up before we set the stack, keep going. Otherwise
375 stop the analysis. */
376 if (seen_stack_set)
377 break;
378
379 regs[inst.operands[0].reg.regno] = pv_unknown ();
380 }
381 else if (inst.opcode->iclass == log_shift
382 && strcmp (inst.opcode->name, "orr") == 0)
383 {
384 unsigned rd = inst.operands[0].reg.regno;
385 unsigned rn = inst.operands[1].reg.regno;
386 unsigned rm = inst.operands[2].reg.regno;
387
388 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
389 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
390 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
391
392 if (inst.operands[2].shifter.amount == 0
393 && rn == AARCH64_SP_REGNUM)
394 regs[rd] = regs[rm];
395 else
396 {
397 aarch64_debug_printf ("prologue analysis gave up "
398 "addr=%s opcode=0x%x (orr x register)",
399 core_addr_to_string_nz (start), insn);
400
401 break;
402 }
403 }
404 else if (inst.opcode->op == OP_STUR)
405 {
406 unsigned rt = inst.operands[0].reg.regno;
407 unsigned rn = inst.operands[1].addr.base_regno;
408 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
409
410 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
411 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
412 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
413 gdb_assert (!inst.operands[1].addr.offset.is_reg);
414
415 stack.store
416 (pv_add_constant (regs[rn], inst.operands[1].addr.offset.imm),
417 size, regs[rt]);
418
419 /* Are we storing with SP as a base? */
420 if (rn == AARCH64_SP_REGNUM)
421 seen_stack_set = true;
422 }
423 else if ((inst.opcode->iclass == ldstpair_off
424 || (inst.opcode->iclass == ldstpair_indexed
425 && inst.operands[2].addr.preind))
426 && strcmp ("stp", inst.opcode->name) == 0)
427 {
428 /* STP with addressing mode Pre-indexed and Base register. */
429 unsigned rt1;
430 unsigned rt2;
431 unsigned rn = inst.operands[2].addr.base_regno;
432 int32_t imm = inst.operands[2].addr.offset.imm;
433 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
434
435 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
436 || inst.operands[0].type == AARCH64_OPND_Ft);
437 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
438 || inst.operands[1].type == AARCH64_OPND_Ft2);
439 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
440 gdb_assert (!inst.operands[2].addr.offset.is_reg);
441
442 /* If recording this store would invalidate the store area
443 (perhaps because rn is not known) then we should abandon
444 further prologue analysis. */
445 if (stack.store_would_trash (pv_add_constant (regs[rn], imm)))
446 break;
447
448 if (stack.store_would_trash (pv_add_constant (regs[rn], imm + 8)))
449 break;
450
451 rt1 = inst.operands[0].reg.regno;
452 rt2 = inst.operands[1].reg.regno;
453 if (inst.operands[0].type == AARCH64_OPND_Ft)
454 {
455 rt1 += AARCH64_X_REGISTER_COUNT;
456 rt2 += AARCH64_X_REGISTER_COUNT;
457 }
458
459 stack.store (pv_add_constant (regs[rn], imm), size, regs[rt1]);
460 stack.store (pv_add_constant (regs[rn], imm + size), size, regs[rt2]);
461
462 if (inst.operands[2].addr.writeback)
463 regs[rn] = pv_add_constant (regs[rn], imm);
464
465 /* Ignore the instruction that allocates stack space and sets
466 the SP. */
467 if (rn == AARCH64_SP_REGNUM && !inst.operands[2].addr.writeback)
468 seen_stack_set = true;
469 }
470 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
471 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
472 && (inst.opcode->op == OP_STR_POS
473 || inst.opcode->op == OP_STRF_POS)))
474 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
475 && strcmp ("str", inst.opcode->name) == 0)
476 {
477 /* STR (immediate) */
478 unsigned int rt = inst.operands[0].reg.regno;
479 int32_t imm = inst.operands[1].addr.offset.imm;
480 unsigned int rn = inst.operands[1].addr.base_regno;
481 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
482 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
483 || inst.operands[0].type == AARCH64_OPND_Ft);
484
485 if (inst.operands[0].type == AARCH64_OPND_Ft)
486 rt += AARCH64_X_REGISTER_COUNT;
487
488 stack.store (pv_add_constant (regs[rn], imm), size, regs[rt]);
489 if (inst.operands[1].addr.writeback)
490 regs[rn] = pv_add_constant (regs[rn], imm);
491
492 /* Are we storing with SP as a base? */
493 if (rn == AARCH64_SP_REGNUM)
494 seen_stack_set = true;
495 }
496 else if (inst.opcode->iclass == testbranch)
497 {
498 /* Stop analysis on branch. */
499 break;
500 }
501 else if (inst.opcode->iclass == ic_system)
502 {
503 aarch64_gdbarch_tdep *tdep
504 = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
505 int ra_state_val = 0;
506
507 if (insn == 0xd503233f /* paciasp. */
508 || insn == 0xd503237f /* pacibsp. */)
509 {
510 /* Return addresses are mangled. */
511 ra_state_val = 1;
512 }
513 else if (insn == 0xd50323bf /* autiasp. */
514 || insn == 0xd50323ff /* autibsp. */)
515 {
516 /* Return addresses are not mangled. */
517 ra_state_val = 0;
518 }
519 else if (IS_BTI (insn))
520 /* We don't need to do anything special for a BTI instruction. */
521 continue;
522 else
523 {
524 aarch64_debug_printf ("prologue analysis gave up addr=%s"
525 " opcode=0x%x (iclass)",
526 core_addr_to_string_nz (start), insn);
527 break;
528 }
529
530 if (tdep->has_pauth () && cache != nullptr)
531 {
532 int regnum = tdep->pauth_ra_state_regnum;
533 cache->saved_regs[regnum].set_value (ra_state_val);
534 }
535 }
536 else
537 {
538 aarch64_debug_printf ("prologue analysis gave up addr=%s"
539 " opcode=0x%x",
540 core_addr_to_string_nz (start), insn);
541
542 break;
543 }
544 }
545
546 if (cache == NULL)
547 return start;
548
549 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
550 {
551 /* Frame pointer is fp. Frame size is constant. */
552 cache->framereg = AARCH64_FP_REGNUM;
553 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
554 }
555 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
556 {
557 /* Try the stack pointer. */
558 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
559 cache->framereg = AARCH64_SP_REGNUM;
560 }
561 else
562 {
563 /* We're just out of luck. We don't know where the frame is. */
564 cache->framereg = -1;
565 cache->framesize = 0;
566 }
567
568 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
569 {
570 CORE_ADDR offset;
571
572 if (stack.find_reg (gdbarch, i, &offset))
573 cache->saved_regs[i].set_addr (offset);
574 }
575
576 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
577 {
578 int regnum = gdbarch_num_regs (gdbarch);
579 CORE_ADDR offset;
580
581 if (stack.find_reg (gdbarch, i + AARCH64_X_REGISTER_COUNT,
582 &offset))
583 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].set_addr (offset);
584 }
585
586 return start;
587 }
588
589 static CORE_ADDR
590 aarch64_analyze_prologue (struct gdbarch *gdbarch,
591 CORE_ADDR start, CORE_ADDR limit,
592 struct aarch64_prologue_cache *cache)
593 {
594 instruction_reader reader;
595
596 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
597 reader);
598 }
599
600 #if GDB_SELF_TEST
601
602 namespace selftests {
603
604 /* Instruction reader from manually cooked instruction sequences. */
605
606 class instruction_reader_test : public abstract_instruction_reader
607 {
608 public:
609 template<size_t SIZE>
610 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
611 : m_insns (insns), m_insns_size (SIZE)
612 {}
613
614 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
615 override
616 {
617 SELF_CHECK (len == 4);
618 SELF_CHECK (memaddr % 4 == 0);
619 SELF_CHECK (memaddr / 4 < m_insns_size);
620
621 return m_insns[memaddr / 4];
622 }
623
624 private:
625 const uint32_t *m_insns;
626 size_t m_insns_size;
627 };
628
629 static void
630 aarch64_analyze_prologue_test (void)
631 {
632 struct gdbarch_info info;
633
634 info.bfd_arch_info = bfd_scan_arch ("aarch64");
635
636 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
637 SELF_CHECK (gdbarch != NULL);
638
639 struct aarch64_prologue_cache cache;
640 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
641
642 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
643
644 /* Test the simple prologue in which frame pointer is used. */
645 {
646 static const uint32_t insns[] = {
647 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
648 0x910003fd, /* mov x29, sp */
649 0x97ffffe6, /* bl 0x400580 */
650 };
651 instruction_reader_test reader (insns);
652
653 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
654 SELF_CHECK (end == 4 * 2);
655
656 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
657 SELF_CHECK (cache.framesize == 272);
658
659 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
660 {
661 if (i == AARCH64_FP_REGNUM)
662 SELF_CHECK (cache.saved_regs[i].addr () == -272);
663 else if (i == AARCH64_LR_REGNUM)
664 SELF_CHECK (cache.saved_regs[i].addr () == -264);
665 else
666 SELF_CHECK (cache.saved_regs[i].is_realreg ()
667 && cache.saved_regs[i].realreg () == i);
668 }
669
670 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
671 {
672 int num_regs = gdbarch_num_regs (gdbarch);
673 int regnum = i + num_regs + AARCH64_D0_REGNUM;
674
675 SELF_CHECK (cache.saved_regs[regnum].is_realreg ()
676 && cache.saved_regs[regnum].realreg () == regnum);
677 }
678 }
679
680 /* Test a prologue in which STR is used and frame pointer is not
681 used. */
682 {
683 static const uint32_t insns[] = {
684 0xf81d0ff3, /* str x19, [sp, #-48]! */
685 0xb9002fe0, /* str w0, [sp, #44] */
686 0xf90013e1, /* str x1, [sp, #32]*/
687 0xfd000fe0, /* str d0, [sp, #24] */
688 0xaa0203f3, /* mov x19, x2 */
689 0xf94013e0, /* ldr x0, [sp, #32] */
690 };
691 instruction_reader_test reader (insns);
692
693 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
694 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
695
696 SELF_CHECK (end == 4 * 5);
697
698 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
699 SELF_CHECK (cache.framesize == 48);
700
701 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
702 {
703 if (i == 1)
704 SELF_CHECK (cache.saved_regs[i].addr () == -16);
705 else if (i == 19)
706 SELF_CHECK (cache.saved_regs[i].addr () == -48);
707 else
708 SELF_CHECK (cache.saved_regs[i].is_realreg ()
709 && cache.saved_regs[i].realreg () == i);
710 }
711
712 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
713 {
714 int num_regs = gdbarch_num_regs (gdbarch);
715 int regnum = i + num_regs + AARCH64_D0_REGNUM;
716
717
718 if (i == 0)
719 SELF_CHECK (cache.saved_regs[regnum].addr () == -24);
720 else
721 SELF_CHECK (cache.saved_regs[regnum].is_realreg ()
722 && cache.saved_regs[regnum].realreg () == regnum);
723 }
724 }
725
726 /* Test handling of movz before setting the frame pointer. */
727 {
728 static const uint32_t insns[] = {
729 0xa9bf7bfd, /* stp x29, x30, [sp, #-16]! */
730 0x52800020, /* mov w0, #0x1 */
731 0x910003fd, /* mov x29, sp */
732 0x528000a2, /* mov w2, #0x5 */
733 0x97fffff8, /* bl 6e4 */
734 };
735
736 instruction_reader_test reader (insns);
737
738 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
739 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
740
741 /* We should stop at the 4th instruction. */
742 SELF_CHECK (end == (4 - 1) * 4);
743 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
744 SELF_CHECK (cache.framesize == 16);
745 }
746
747 /* Test handling of movz/stp when using the stack pointer as frame
748 pointer. */
749 {
750 static const uint32_t insns[] = {
751 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
752 0x52800020, /* mov w0, #0x1 */
753 0x290207e0, /* stp w0, w1, [sp, #16] */
754 0xa9018fe2, /* stp x2, x3, [sp, #24] */
755 0x528000a2, /* mov w2, #0x5 */
756 0x97fffff8, /* bl 6e4 */
757 };
758
759 instruction_reader_test reader (insns);
760
761 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
762 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
763
764 /* We should stop at the 5th instruction. */
765 SELF_CHECK (end == (5 - 1) * 4);
766 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
767 SELF_CHECK (cache.framesize == 64);
768 }
769
770 /* Test handling of movz/str when using the stack pointer as frame
771 pointer */
772 {
773 static const uint32_t insns[] = {
774 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
775 0x52800020, /* mov w0, #0x1 */
776 0xb9002be4, /* str w4, [sp, #40] */
777 0xf9001be5, /* str x5, [sp, #48] */
778 0x528000a2, /* mov w2, #0x5 */
779 0x97fffff8, /* bl 6e4 */
780 };
781
782 instruction_reader_test reader (insns);
783
784 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
785 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
786
787 /* We should stop at the 5th instruction. */
788 SELF_CHECK (end == (5 - 1) * 4);
789 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
790 SELF_CHECK (cache.framesize == 64);
791 }
792
793 /* Test handling of movz/stur when using the stack pointer as frame
794 pointer. */
795 {
796 static const uint32_t insns[] = {
797 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
798 0x52800020, /* mov w0, #0x1 */
799 0xb80343e6, /* stur w6, [sp, #52] */
800 0xf80383e7, /* stur x7, [sp, #56] */
801 0x528000a2, /* mov w2, #0x5 */
802 0x97fffff8, /* bl 6e4 */
803 };
804
805 instruction_reader_test reader (insns);
806
807 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
808 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
809
810 /* We should stop at the 5th instruction. */
811 SELF_CHECK (end == (5 - 1) * 4);
812 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
813 SELF_CHECK (cache.framesize == 64);
814 }
815
816 /* Test handling of movz when there is no frame pointer set or no stack
817 pointer used. */
818 {
819 static const uint32_t insns[] = {
820 0xa9bf7bfd, /* stp x29, x30, [sp, #-16]! */
821 0x52800020, /* mov w0, #0x1 */
822 0x528000a2, /* mov w2, #0x5 */
823 0x97fffff8, /* bl 6e4 */
824 };
825
826 instruction_reader_test reader (insns);
827
828 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
829 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
830
831 /* We should stop at the 4th instruction. */
832 SELF_CHECK (end == (4 - 1) * 4);
833 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
834 SELF_CHECK (cache.framesize == 16);
835 }
836
837 /* Test a prologue in which there is a return address signing instruction. */
838 if (tdep->has_pauth ())
839 {
840 static const uint32_t insns[] = {
841 0xd503233f, /* paciasp */
842 0xa9bd7bfd, /* stp x29, x30, [sp, #-48]! */
843 0x910003fd, /* mov x29, sp */
844 0xf801c3f3, /* str x19, [sp, #28] */
845 0xb9401fa0, /* ldr x19, [x29, #28] */
846 };
847 instruction_reader_test reader (insns);
848
849 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
850 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache,
851 reader);
852
853 SELF_CHECK (end == 4 * 4);
854 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
855 SELF_CHECK (cache.framesize == 48);
856
857 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
858 {
859 if (i == 19)
860 SELF_CHECK (cache.saved_regs[i].addr () == -20);
861 else if (i == AARCH64_FP_REGNUM)
862 SELF_CHECK (cache.saved_regs[i].addr () == -48);
863 else if (i == AARCH64_LR_REGNUM)
864 SELF_CHECK (cache.saved_regs[i].addr () == -40);
865 else
866 SELF_CHECK (cache.saved_regs[i].is_realreg ()
867 && cache.saved_regs[i].realreg () == i);
868 }
869
870 if (tdep->has_pauth ())
871 {
872 int regnum = tdep->pauth_ra_state_regnum;
873 SELF_CHECK (cache.saved_regs[regnum].is_value ());
874 }
875 }
876
877 /* Test a prologue with a BTI instruction. */
878 {
879 static const uint32_t insns[] = {
880 0xd503245f, /* bti */
881 0xa9bd7bfd, /* stp x29, x30, [sp, #-48]! */
882 0x910003fd, /* mov x29, sp */
883 0xf801c3f3, /* str x19, [sp, #28] */
884 0xb9401fa0, /* ldr x19, [x29, #28] */
885 };
886 instruction_reader_test reader (insns);
887
888 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
889 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache,
890 reader);
891
892 SELF_CHECK (end == 4 * 4);
893 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
894 SELF_CHECK (cache.framesize == 48);
895
896 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
897 {
898 if (i == 19)
899 SELF_CHECK (cache.saved_regs[i].addr () == -20);
900 else if (i == AARCH64_FP_REGNUM)
901 SELF_CHECK (cache.saved_regs[i].addr () == -48);
902 else if (i == AARCH64_LR_REGNUM)
903 SELF_CHECK (cache.saved_regs[i].addr () == -40);
904 else
905 SELF_CHECK (cache.saved_regs[i].is_realreg ()
906 && cache.saved_regs[i].realreg () == i);
907 }
908 }
909 }
910 } // namespace selftests
911 #endif /* GDB_SELF_TEST */
912
913 /* Implement the "skip_prologue" gdbarch method. */
914
915 static CORE_ADDR
916 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
917 {
918 CORE_ADDR func_addr, limit_pc;
919
920 /* See if we can determine the end of the prologue via the symbol
921 table. If so, then return either PC, or the PC after the
922 prologue, whichever is greater. */
923 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
924 {
925 CORE_ADDR post_prologue_pc
926 = skip_prologue_using_sal (gdbarch, func_addr);
927
928 if (post_prologue_pc != 0)
929 return std::max (pc, post_prologue_pc);
930 }
931
932 /* Can't determine prologue from the symbol table, need to examine
933 instructions. */
934
935 /* Find an upper limit on the function prologue using the debug
936 information. If the debug information could not be used to
937 provide that bound, then use an arbitrary large number as the
938 upper bound. */
939 limit_pc = skip_prologue_using_sal (gdbarch, pc);
940 if (limit_pc == 0)
941 limit_pc = pc + 128; /* Magic. */
942
943 /* Try disassembling prologue. */
944 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
945 }
946
947 /* Scan the function prologue for THIS_FRAME and populate the prologue
948 cache CACHE. */
949
950 static void
951 aarch64_scan_prologue (struct frame_info *this_frame,
952 struct aarch64_prologue_cache *cache)
953 {
954 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
955 CORE_ADDR prologue_start;
956 CORE_ADDR prologue_end;
957 CORE_ADDR prev_pc = get_frame_pc (this_frame);
958 struct gdbarch *gdbarch = get_frame_arch (this_frame);
959
960 cache->prev_pc = prev_pc;
961
962 /* Assume we do not find a frame. */
963 cache->framereg = -1;
964 cache->framesize = 0;
965
966 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
967 &prologue_end))
968 {
969 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
970
971 if (sal.line == 0)
972 {
973 /* No line info so use the current PC. */
974 prologue_end = prev_pc;
975 }
976 else if (sal.end < prologue_end)
977 {
978 /* The next line begins after the function end. */
979 prologue_end = sal.end;
980 }
981
982 prologue_end = std::min (prologue_end, prev_pc);
983 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
984 }
985 else
986 {
987 CORE_ADDR frame_loc;
988
989 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
990 if (frame_loc == 0)
991 return;
992
993 cache->framereg = AARCH64_FP_REGNUM;
994 cache->framesize = 16;
995 cache->saved_regs[29].set_addr (0);
996 cache->saved_regs[30].set_addr (8);
997 }
998 }
999
1000 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
1001 function may throw an exception if the inferior's registers or memory is
1002 not available. */
1003
1004 static void
1005 aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
1006 struct aarch64_prologue_cache *cache)
1007 {
1008 CORE_ADDR unwound_fp;
1009 int reg;
1010
1011 aarch64_scan_prologue (this_frame, cache);
1012
1013 if (cache->framereg == -1)
1014 return;
1015
1016 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
1017 if (unwound_fp == 0)
1018 return;
1019
1020 cache->prev_sp = unwound_fp + cache->framesize;
1021
1022 /* Calculate actual addresses of saved registers using offsets
1023 determined by aarch64_analyze_prologue. */
1024 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
1025 if (cache->saved_regs[reg].is_addr ())
1026 cache->saved_regs[reg].set_addr (cache->saved_regs[reg].addr ()
1027 + cache->prev_sp);
1028
1029 cache->func = get_frame_func (this_frame);
1030
1031 cache->available_p = 1;
1032 }
1033
1034 /* Allocate and fill in *THIS_CACHE with information about the prologue of
1035 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
1036 Return a pointer to the current aarch64_prologue_cache in
1037 *THIS_CACHE. */
1038
1039 static struct aarch64_prologue_cache *
1040 aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
1041 {
1042 struct aarch64_prologue_cache *cache;
1043
1044 if (*this_cache != NULL)
1045 return (struct aarch64_prologue_cache *) *this_cache;
1046
1047 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1048 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1049 *this_cache = cache;
1050
1051 try
1052 {
1053 aarch64_make_prologue_cache_1 (this_frame, cache);
1054 }
1055 catch (const gdb_exception_error &ex)
1056 {
1057 if (ex.error != NOT_AVAILABLE_ERROR)
1058 throw;
1059 }
1060
1061 return cache;
1062 }
1063
1064 /* Implement the "stop_reason" frame_unwind method. */
1065
1066 static enum unwind_stop_reason
1067 aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
1068 void **this_cache)
1069 {
1070 struct aarch64_prologue_cache *cache
1071 = aarch64_make_prologue_cache (this_frame, this_cache);
1072
1073 if (!cache->available_p)
1074 return UNWIND_UNAVAILABLE;
1075
1076 /* Halt the backtrace at "_start". */
1077 gdbarch *arch = get_frame_arch (this_frame);
1078 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (arch);
1079 if (cache->prev_pc <= tdep->lowest_pc)
1080 return UNWIND_OUTERMOST;
1081
1082 /* We've hit a wall, stop. */
1083 if (cache->prev_sp == 0)
1084 return UNWIND_OUTERMOST;
1085
1086 return UNWIND_NO_REASON;
1087 }
1088
1089 /* Our frame ID for a normal frame is the current function's starting
1090 PC and the caller's SP when we were called. */
1091
1092 static void
1093 aarch64_prologue_this_id (struct frame_info *this_frame,
1094 void **this_cache, struct frame_id *this_id)
1095 {
1096 struct aarch64_prologue_cache *cache
1097 = aarch64_make_prologue_cache (this_frame, this_cache);
1098
1099 if (!cache->available_p)
1100 *this_id = frame_id_build_unavailable_stack (cache->func);
1101 else
1102 *this_id = frame_id_build (cache->prev_sp, cache->func);
1103 }
1104
1105 /* Implement the "prev_register" frame_unwind method. */
1106
1107 static struct value *
1108 aarch64_prologue_prev_register (struct frame_info *this_frame,
1109 void **this_cache, int prev_regnum)
1110 {
1111 struct aarch64_prologue_cache *cache
1112 = aarch64_make_prologue_cache (this_frame, this_cache);
1113
1114 /* If we are asked to unwind the PC, then we need to return the LR
1115 instead. The prologue may save PC, but it will point into this
1116 frame's prologue, not the next frame's resume location. */
1117 if (prev_regnum == AARCH64_PC_REGNUM)
1118 {
1119 CORE_ADDR lr;
1120 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1121 aarch64_gdbarch_tdep *tdep
1122 = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
1123
1124 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1125
1126 if (tdep->has_pauth ()
1127 && cache->saved_regs[tdep->pauth_ra_state_regnum].is_value ())
1128 lr = aarch64_frame_unmask_lr (tdep, this_frame, lr);
1129
1130 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
1131 }
1132
1133 /* SP is generally not saved to the stack, but this frame is
1134 identified by the next frame's stack pointer at the time of the
1135 call. The value was already reconstructed into PREV_SP. */
1136 /*
1137 +----------+ ^
1138 | saved lr | |
1139 +->| saved fp |--+
1140 | | |
1141 | | | <- Previous SP
1142 | +----------+
1143 | | saved lr |
1144 +--| saved fp |<- FP
1145 | |
1146 | |<- SP
1147 +----------+ */
1148 if (prev_regnum == AARCH64_SP_REGNUM)
1149 return frame_unwind_got_constant (this_frame, prev_regnum,
1150 cache->prev_sp);
1151
1152 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
1153 prev_regnum);
1154 }
1155
1156 /* AArch64 prologue unwinder. */
1157 static frame_unwind aarch64_prologue_unwind =
1158 {
1159 "aarch64 prologue",
1160 NORMAL_FRAME,
1161 aarch64_prologue_frame_unwind_stop_reason,
1162 aarch64_prologue_this_id,
1163 aarch64_prologue_prev_register,
1164 NULL,
1165 default_frame_sniffer
1166 };
1167
1168 /* Allocate and fill in *THIS_CACHE with information about the prologue of
1169 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
1170 Return a pointer to the current aarch64_prologue_cache in
1171 *THIS_CACHE. */
1172
1173 static struct aarch64_prologue_cache *
1174 aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
1175 {
1176 struct aarch64_prologue_cache *cache;
1177
1178 if (*this_cache != NULL)
1179 return (struct aarch64_prologue_cache *) *this_cache;
1180
1181 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1182 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1183 *this_cache = cache;
1184
1185 try
1186 {
1187 cache->prev_sp = get_frame_register_unsigned (this_frame,
1188 AARCH64_SP_REGNUM);
1189 cache->prev_pc = get_frame_pc (this_frame);
1190 cache->available_p = 1;
1191 }
1192 catch (const gdb_exception_error &ex)
1193 {
1194 if (ex.error != NOT_AVAILABLE_ERROR)
1195 throw;
1196 }
1197
1198 return cache;
1199 }
1200
1201 /* Implement the "stop_reason" frame_unwind method. */
1202
1203 static enum unwind_stop_reason
1204 aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
1205 void **this_cache)
1206 {
1207 struct aarch64_prologue_cache *cache
1208 = aarch64_make_stub_cache (this_frame, this_cache);
1209
1210 if (!cache->available_p)
1211 return UNWIND_UNAVAILABLE;
1212
1213 return UNWIND_NO_REASON;
1214 }
1215
1216 /* Our frame ID for a stub frame is the current SP and LR. */
1217
1218 static void
1219 aarch64_stub_this_id (struct frame_info *this_frame,
1220 void **this_cache, struct frame_id *this_id)
1221 {
1222 struct aarch64_prologue_cache *cache
1223 = aarch64_make_stub_cache (this_frame, this_cache);
1224
1225 if (cache->available_p)
1226 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
1227 else
1228 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
1229 }
1230
1231 /* Implement the "sniffer" frame_unwind method. */
1232
1233 static int
1234 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
1235 struct frame_info *this_frame,
1236 void **this_prologue_cache)
1237 {
1238 CORE_ADDR addr_in_block;
1239 gdb_byte dummy[4];
1240
1241 addr_in_block = get_frame_address_in_block (this_frame);
1242 if (in_plt_section (addr_in_block)
1243 /* We also use the stub winder if the target memory is unreadable
1244 to avoid having the prologue unwinder trying to read it. */
1245 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
1246 return 1;
1247
1248 return 0;
1249 }
1250
1251 /* AArch64 stub unwinder. */
1252 static frame_unwind aarch64_stub_unwind =
1253 {
1254 "aarch64 stub",
1255 NORMAL_FRAME,
1256 aarch64_stub_frame_unwind_stop_reason,
1257 aarch64_stub_this_id,
1258 aarch64_prologue_prev_register,
1259 NULL,
1260 aarch64_stub_unwind_sniffer
1261 };
1262
1263 /* Return the frame base address of *THIS_FRAME. */
1264
1265 static CORE_ADDR
1266 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
1267 {
1268 struct aarch64_prologue_cache *cache
1269 = aarch64_make_prologue_cache (this_frame, this_cache);
1270
1271 return cache->prev_sp - cache->framesize;
1272 }
1273
1274 /* AArch64 default frame base information. */
1275 static frame_base aarch64_normal_base =
1276 {
1277 &aarch64_prologue_unwind,
1278 aarch64_normal_frame_base,
1279 aarch64_normal_frame_base,
1280 aarch64_normal_frame_base
1281 };
1282
1283 /* Return the value of the REGNUM register in the previous frame of
1284 *THIS_FRAME. */
1285
1286 static struct value *
1287 aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1288 void **this_cache, int regnum)
1289 {
1290 gdbarch *arch = get_frame_arch (this_frame);
1291 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (arch);
1292 CORE_ADDR lr;
1293
1294 switch (regnum)
1295 {
1296 case AARCH64_PC_REGNUM:
1297 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1298 lr = aarch64_frame_unmask_lr (tdep, this_frame, lr);
1299 return frame_unwind_got_constant (this_frame, regnum, lr);
1300
1301 default:
1302 internal_error (__FILE__, __LINE__,
1303 _("Unexpected register %d"), regnum);
1304 }
1305 }
1306
1307 static const unsigned char op_lit0 = DW_OP_lit0;
1308 static const unsigned char op_lit1 = DW_OP_lit1;
1309
1310 /* Implement the "init_reg" dwarf2_frame_ops method. */
1311
1312 static void
1313 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1314 struct dwarf2_frame_state_reg *reg,
1315 struct frame_info *this_frame)
1316 {
1317 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
1318
1319 switch (regnum)
1320 {
1321 case AARCH64_PC_REGNUM:
1322 reg->how = DWARF2_FRAME_REG_FN;
1323 reg->loc.fn = aarch64_dwarf2_prev_register;
1324 return;
1325
1326 case AARCH64_SP_REGNUM:
1327 reg->how = DWARF2_FRAME_REG_CFA;
1328 return;
1329 }
1330
1331 /* Init pauth registers. */
1332 if (tdep->has_pauth ())
1333 {
1334 if (regnum == tdep->pauth_ra_state_regnum)
1335 {
1336 /* Initialize RA_STATE to zero. */
1337 reg->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1338 reg->loc.exp.start = &op_lit0;
1339 reg->loc.exp.len = 1;
1340 return;
1341 }
1342 else if (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
1343 || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base))
1344 {
1345 reg->how = DWARF2_FRAME_REG_SAME_VALUE;
1346 return;
1347 }
1348 }
1349 }
1350
1351 /* Implement the execute_dwarf_cfa_vendor_op method. */
1352
1353 static bool
1354 aarch64_execute_dwarf_cfa_vendor_op (struct gdbarch *gdbarch, gdb_byte op,
1355 struct dwarf2_frame_state *fs)
1356 {
1357 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
1358 struct dwarf2_frame_state_reg *ra_state;
1359
1360 if (op == DW_CFA_AARCH64_negate_ra_state)
1361 {
1362 /* On systems without pauth, treat as a nop. */
1363 if (!tdep->has_pauth ())
1364 return true;
1365
1366 /* Allocate RA_STATE column if it's not allocated yet. */
1367 fs->regs.alloc_regs (AARCH64_DWARF_PAUTH_RA_STATE + 1);
1368
1369 /* Toggle the status of RA_STATE between 0 and 1. */
1370 ra_state = &(fs->regs.reg[AARCH64_DWARF_PAUTH_RA_STATE]);
1371 ra_state->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1372
1373 if (ra_state->loc.exp.start == nullptr
1374 || ra_state->loc.exp.start == &op_lit0)
1375 ra_state->loc.exp.start = &op_lit1;
1376 else
1377 ra_state->loc.exp.start = &op_lit0;
1378
1379 ra_state->loc.exp.len = 1;
1380
1381 return true;
1382 }
1383
1384 return false;
1385 }
1386
1387 /* Used for matching BRK instructions for AArch64. */
1388 static constexpr uint32_t BRK_INSN_MASK = 0xffe0001f;
1389 static constexpr uint32_t BRK_INSN_BASE = 0xd4200000;
1390
1391 /* Implementation of gdbarch_program_breakpoint_here_p for aarch64. */
1392
1393 static bool
1394 aarch64_program_breakpoint_here_p (gdbarch *gdbarch, CORE_ADDR address)
1395 {
1396 const uint32_t insn_len = 4;
1397 gdb_byte target_mem[4];
1398
1399 /* Enable the automatic memory restoration from breakpoints while
1400 we read the memory. Otherwise we may find temporary breakpoints, ones
1401 inserted by GDB, and flag them as permanent breakpoints. */
1402 scoped_restore restore_memory
1403 = make_scoped_restore_show_memory_breakpoints (0);
1404
1405 if (target_read_memory (address, target_mem, insn_len) == 0)
1406 {
1407 uint32_t insn =
1408 (uint32_t) extract_unsigned_integer (target_mem, insn_len,
1409 gdbarch_byte_order_for_code (gdbarch));
1410
1411 /* Check if INSN is a BRK instruction pattern. There are multiple choices
1412 of such instructions with different immediate values. Different OS'
1413 may use a different variation, but they have the same outcome. */
1414 return ((insn & BRK_INSN_MASK) == BRK_INSN_BASE);
1415 }
1416
1417 return false;
1418 }
1419
1420 /* When arguments must be pushed onto the stack, they go on in reverse
1421 order. The code below implements a FILO (stack) to do this. */
1422
1423 struct stack_item_t
1424 {
1425 /* Value to pass on stack. It can be NULL if this item is for stack
1426 padding. */
1427 const gdb_byte *data;
1428
1429 /* Size in bytes of value to pass on stack. */
1430 int len;
1431 };
1432
1433 /* Implement the gdbarch type alignment method, overrides the generic
1434 alignment algorithm for anything that is aarch64 specific. */
1435
1436 static ULONGEST
1437 aarch64_type_align (gdbarch *gdbarch, struct type *t)
1438 {
1439 t = check_typedef (t);
1440 if (t->code () == TYPE_CODE_ARRAY && t->is_vector ())
1441 {
1442 /* Use the natural alignment for vector types (the same for
1443 scalar type), but the maximum alignment is 128-bit. */
1444 if (TYPE_LENGTH (t) > 16)
1445 return 16;
1446 else
1447 return TYPE_LENGTH (t);
1448 }
1449
1450 /* Allow the common code to calculate the alignment. */
1451 return 0;
1452 }
1453
1454 /* Worker function for aapcs_is_vfp_call_or_return_candidate.
1455
1456 Return the number of register required, or -1 on failure.
1457
1458 When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it
1459 to the element, else fail if the type of this element does not match the
1460 existing value. */
1461
1462 static int
1463 aapcs_is_vfp_call_or_return_candidate_1 (struct type *type,
1464 struct type **fundamental_type)
1465 {
1466 if (type == nullptr)
1467 return -1;
1468
1469 switch (type->code ())
1470 {
1471 case TYPE_CODE_FLT:
1472 if (TYPE_LENGTH (type) > 16)
1473 return -1;
1474
1475 if (*fundamental_type == nullptr)
1476 *fundamental_type = type;
1477 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1478 || type->code () != (*fundamental_type)->code ())
1479 return -1;
1480
1481 return 1;
1482
1483 case TYPE_CODE_COMPLEX:
1484 {
1485 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1486 if (TYPE_LENGTH (target_type) > 16)
1487 return -1;
1488
1489 if (*fundamental_type == nullptr)
1490 *fundamental_type = target_type;
1491 else if (TYPE_LENGTH (target_type) != TYPE_LENGTH (*fundamental_type)
1492 || target_type->code () != (*fundamental_type)->code ())
1493 return -1;
1494
1495 return 2;
1496 }
1497
1498 case TYPE_CODE_ARRAY:
1499 {
1500 if (type->is_vector ())
1501 {
1502 if (TYPE_LENGTH (type) != 8 && TYPE_LENGTH (type) != 16)
1503 return -1;
1504
1505 if (*fundamental_type == nullptr)
1506 *fundamental_type = type;
1507 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1508 || type->code () != (*fundamental_type)->code ())
1509 return -1;
1510
1511 return 1;
1512 }
1513 else
1514 {
1515 struct type *target_type = TYPE_TARGET_TYPE (type);
1516 int count = aapcs_is_vfp_call_or_return_candidate_1
1517 (target_type, fundamental_type);
1518
1519 if (count == -1)
1520 return count;
1521
1522 count *= (TYPE_LENGTH (type) / TYPE_LENGTH (target_type));
1523 return count;
1524 }
1525 }
1526
1527 case TYPE_CODE_STRUCT:
1528 case TYPE_CODE_UNION:
1529 {
1530 int count = 0;
1531
1532 for (int i = 0; i < type->num_fields (); i++)
1533 {
1534 /* Ignore any static fields. */
1535 if (field_is_static (&type->field (i)))
1536 continue;
1537
1538 struct type *member = check_typedef (type->field (i).type ());
1539
1540 int sub_count = aapcs_is_vfp_call_or_return_candidate_1
1541 (member, fundamental_type);
1542 if (sub_count == -1)
1543 return -1;
1544 count += sub_count;
1545 }
1546
1547 /* Ensure there is no padding between the fields (allowing for empty
1548 zero length structs) */
1549 int ftype_length = (*fundamental_type == nullptr)
1550 ? 0 : TYPE_LENGTH (*fundamental_type);
1551 if (count * ftype_length != TYPE_LENGTH (type))
1552 return -1;
1553
1554 return count;
1555 }
1556
1557 default:
1558 break;
1559 }
1560
1561 return -1;
1562 }
1563
1564 /* Return true if an argument, whose type is described by TYPE, can be passed or
1565 returned in simd/fp registers, providing enough parameter passing registers
1566 are available. This is as described in the AAPCS64.
1567
1568 Upon successful return, *COUNT returns the number of needed registers,
1569 *FUNDAMENTAL_TYPE contains the type of those registers.
1570
1571 Candidate as per the AAPCS64 5.4.2.C is either a:
1572 - float.
1573 - short-vector.
1574 - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where
1575 all the members are floats and has at most 4 members.
1576 - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where
1577 all the members are short vectors and has at most 4 members.
1578 - Complex (7.1.1)
1579
1580 Note that HFAs and HVAs can include nested structures and arrays. */
1581
1582 static bool
1583 aapcs_is_vfp_call_or_return_candidate (struct type *type, int *count,
1584 struct type **fundamental_type)
1585 {
1586 if (type == nullptr)
1587 return false;
1588
1589 *fundamental_type = nullptr;
1590
1591 int ag_count = aapcs_is_vfp_call_or_return_candidate_1 (type,
1592 fundamental_type);
1593
1594 if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS)
1595 {
1596 *count = ag_count;
1597 return true;
1598 }
1599 else
1600 return false;
1601 }
1602
1603 /* AArch64 function call information structure. */
1604 struct aarch64_call_info
1605 {
1606 /* the current argument number. */
1607 unsigned argnum = 0;
1608
1609 /* The next general purpose register number, equivalent to NGRN as
1610 described in the AArch64 Procedure Call Standard. */
1611 unsigned ngrn = 0;
1612
1613 /* The next SIMD and floating point register number, equivalent to
1614 NSRN as described in the AArch64 Procedure Call Standard. */
1615 unsigned nsrn = 0;
1616
1617 /* The next stacked argument address, equivalent to NSAA as
1618 described in the AArch64 Procedure Call Standard. */
1619 unsigned nsaa = 0;
1620
1621 /* Stack item vector. */
1622 std::vector<stack_item_t> si;
1623 };
1624
1625 /* Pass a value in a sequence of consecutive X registers. The caller
1626 is responsible for ensuring sufficient registers are available. */
1627
1628 static void
1629 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1630 struct aarch64_call_info *info, struct type *type,
1631 struct value *arg)
1632 {
1633 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1634 int len = TYPE_LENGTH (type);
1635 enum type_code typecode = type->code ();
1636 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1637 const bfd_byte *buf = value_contents (arg).data ();
1638
1639 info->argnum++;
1640
1641 while (len > 0)
1642 {
1643 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1644 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1645 byte_order);
1646
1647
1648 /* Adjust sub-word struct/union args when big-endian. */
1649 if (byte_order == BFD_ENDIAN_BIG
1650 && partial_len < X_REGISTER_SIZE
1651 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1652 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1653
1654 aarch64_debug_printf ("arg %d in %s = 0x%s", info->argnum,
1655 gdbarch_register_name (gdbarch, regnum),
1656 phex (regval, X_REGISTER_SIZE));
1657
1658 regcache_cooked_write_unsigned (regcache, regnum, regval);
1659 len -= partial_len;
1660 buf += partial_len;
1661 regnum++;
1662 }
1663 }
1664
1665 /* Attempt to marshall a value in a V register. Return 1 if
1666 successful, or 0 if insufficient registers are available. This
1667 function, unlike the equivalent pass_in_x() function does not
1668 handle arguments spread across multiple registers. */
1669
1670 static int
1671 pass_in_v (struct gdbarch *gdbarch,
1672 struct regcache *regcache,
1673 struct aarch64_call_info *info,
1674 int len, const bfd_byte *buf)
1675 {
1676 if (info->nsrn < 8)
1677 {
1678 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1679 /* Enough space for a full vector register. */
1680 gdb_byte reg[register_size (gdbarch, regnum)];
1681 gdb_assert (len <= sizeof (reg));
1682
1683 info->argnum++;
1684 info->nsrn++;
1685
1686 memset (reg, 0, sizeof (reg));
1687 /* PCS C.1, the argument is allocated to the least significant
1688 bits of V register. */
1689 memcpy (reg, buf, len);
1690 regcache->cooked_write (regnum, reg);
1691
1692 aarch64_debug_printf ("arg %d in %s", info->argnum,
1693 gdbarch_register_name (gdbarch, regnum));
1694
1695 return 1;
1696 }
1697 info->nsrn = 8;
1698 return 0;
1699 }
1700
1701 /* Marshall an argument onto the stack. */
1702
1703 static void
1704 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1705 struct value *arg)
1706 {
1707 const bfd_byte *buf = value_contents (arg).data ();
1708 int len = TYPE_LENGTH (type);
1709 int align;
1710 stack_item_t item;
1711
1712 info->argnum++;
1713
1714 align = type_align (type);
1715
1716 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1717 Natural alignment of the argument's type. */
1718 align = align_up (align, 8);
1719
1720 /* The AArch64 PCS requires at most doubleword alignment. */
1721 if (align > 16)
1722 align = 16;
1723
1724 aarch64_debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1725 info->nsaa);
1726
1727 item.len = len;
1728 item.data = buf;
1729 info->si.push_back (item);
1730
1731 info->nsaa += len;
1732 if (info->nsaa & (align - 1))
1733 {
1734 /* Push stack alignment padding. */
1735 int pad = align - (info->nsaa & (align - 1));
1736
1737 item.len = pad;
1738 item.data = NULL;
1739
1740 info->si.push_back (item);
1741 info->nsaa += pad;
1742 }
1743 }
1744
1745 /* Marshall an argument into a sequence of one or more consecutive X
1746 registers or, if insufficient X registers are available then onto
1747 the stack. */
1748
1749 static void
1750 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1751 struct aarch64_call_info *info, struct type *type,
1752 struct value *arg)
1753 {
1754 int len = TYPE_LENGTH (type);
1755 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1756
1757 /* PCS C.13 - Pass in registers if we have enough spare */
1758 if (info->ngrn + nregs <= 8)
1759 {
1760 pass_in_x (gdbarch, regcache, info, type, arg);
1761 info->ngrn += nregs;
1762 }
1763 else
1764 {
1765 info->ngrn = 8;
1766 pass_on_stack (info, type, arg);
1767 }
1768 }
1769
1770 /* Pass a value, which is of type arg_type, in a V register. Assumes value is a
1771 aapcs_is_vfp_call_or_return_candidate and there are enough spare V
1772 registers. A return value of false is an error state as the value will have
1773 been partially passed to the stack. */
1774 static bool
1775 pass_in_v_vfp_candidate (struct gdbarch *gdbarch, struct regcache *regcache,
1776 struct aarch64_call_info *info, struct type *arg_type,
1777 struct value *arg)
1778 {
1779 switch (arg_type->code ())
1780 {
1781 case TYPE_CODE_FLT:
1782 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1783 value_contents (arg).data ());
1784 break;
1785
1786 case TYPE_CODE_COMPLEX:
1787 {
1788 const bfd_byte *buf = value_contents (arg).data ();
1789 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (arg_type));
1790
1791 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1792 buf))
1793 return false;
1794
1795 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1796 buf + TYPE_LENGTH (target_type));
1797 }
1798
1799 case TYPE_CODE_ARRAY:
1800 if (arg_type->is_vector ())
1801 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1802 value_contents (arg).data ());
1803 /* fall through. */
1804
1805 case TYPE_CODE_STRUCT:
1806 case TYPE_CODE_UNION:
1807 for (int i = 0; i < arg_type->num_fields (); i++)
1808 {
1809 /* Don't include static fields. */
1810 if (field_is_static (&arg_type->field (i)))
1811 continue;
1812
1813 struct value *field = value_primitive_field (arg, 0, i, arg_type);
1814 struct type *field_type = check_typedef (value_type (field));
1815
1816 if (!pass_in_v_vfp_candidate (gdbarch, regcache, info, field_type,
1817 field))
1818 return false;
1819 }
1820 return true;
1821
1822 default:
1823 return false;
1824 }
1825 }
1826
1827 /* Implement the "push_dummy_call" gdbarch method. */
1828
1829 static CORE_ADDR
1830 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1831 struct regcache *regcache, CORE_ADDR bp_addr,
1832 int nargs,
1833 struct value **args, CORE_ADDR sp,
1834 function_call_return_method return_method,
1835 CORE_ADDR struct_addr)
1836 {
1837 int argnum;
1838 struct aarch64_call_info info;
1839
1840 /* We need to know what the type of the called function is in order
1841 to determine the number of named/anonymous arguments for the
1842 actual argument placement, and the return type in order to handle
1843 return value correctly.
1844
1845 The generic code above us views the decision of return in memory
1846 or return in registers as a two stage processes. The language
1847 handler is consulted first and may decide to return in memory (eg
1848 class with copy constructor returned by value), this will cause
1849 the generic code to allocate space AND insert an initial leading
1850 argument.
1851
1852 If the language code does not decide to pass in memory then the
1853 target code is consulted.
1854
1855 If the language code decides to pass in memory we want to move
1856 the pointer inserted as the initial argument from the argument
1857 list and into X8, the conventional AArch64 struct return pointer
1858 register. */
1859
1860 /* Set the return address. For the AArch64, the return breakpoint
1861 is always at BP_ADDR. */
1862 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1863
1864 /* If we were given an initial argument for the return slot, lose it. */
1865 if (return_method == return_method_hidden_param)
1866 {
1867 args++;
1868 nargs--;
1869 }
1870
1871 /* The struct_return pointer occupies X8. */
1872 if (return_method != return_method_normal)
1873 {
1874 aarch64_debug_printf ("struct return in %s = 0x%s",
1875 gdbarch_register_name
1876 (gdbarch, AARCH64_STRUCT_RETURN_REGNUM),
1877 paddress (gdbarch, struct_addr));
1878
1879 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1880 struct_addr);
1881 }
1882
1883 for (argnum = 0; argnum < nargs; argnum++)
1884 {
1885 struct value *arg = args[argnum];
1886 struct type *arg_type, *fundamental_type;
1887 int len, elements;
1888
1889 arg_type = check_typedef (value_type (arg));
1890 len = TYPE_LENGTH (arg_type);
1891
1892 /* If arg can be passed in v registers as per the AAPCS64, then do so if
1893 if there are enough spare registers. */
1894 if (aapcs_is_vfp_call_or_return_candidate (arg_type, &elements,
1895 &fundamental_type))
1896 {
1897 if (info.nsrn + elements <= 8)
1898 {
1899 /* We know that we have sufficient registers available therefore
1900 this will never need to fallback to the stack. */
1901 if (!pass_in_v_vfp_candidate (gdbarch, regcache, &info, arg_type,
1902 arg))
1903 gdb_assert_not_reached ("Failed to push args");
1904 }
1905 else
1906 {
1907 info.nsrn = 8;
1908 pass_on_stack (&info, arg_type, arg);
1909 }
1910 continue;
1911 }
1912
1913 switch (arg_type->code ())
1914 {
1915 case TYPE_CODE_INT:
1916 case TYPE_CODE_BOOL:
1917 case TYPE_CODE_CHAR:
1918 case TYPE_CODE_RANGE:
1919 case TYPE_CODE_ENUM:
1920 if (len < 4 && !is_fixed_point_type (arg_type))
1921 {
1922 /* Promote to 32 bit integer. */
1923 if (arg_type->is_unsigned ())
1924 arg_type = builtin_type (gdbarch)->builtin_uint32;
1925 else
1926 arg_type = builtin_type (gdbarch)->builtin_int32;
1927 arg = value_cast (arg_type, arg);
1928 }
1929 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1930 break;
1931
1932 case TYPE_CODE_STRUCT:
1933 case TYPE_CODE_ARRAY:
1934 case TYPE_CODE_UNION:
1935 if (len > 16)
1936 {
1937 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1938 invisible reference. */
1939
1940 /* Allocate aligned storage. */
1941 sp = align_down (sp - len, 16);
1942
1943 /* Write the real data into the stack. */
1944 write_memory (sp, value_contents (arg).data (), len);
1945
1946 /* Construct the indirection. */
1947 arg_type = lookup_pointer_type (arg_type);
1948 arg = value_from_pointer (arg_type, sp);
1949 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1950 }
1951 else
1952 /* PCS C.15 / C.18 multiple values pass. */
1953 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1954 break;
1955
1956 default:
1957 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1958 break;
1959 }
1960 }
1961
1962 /* Make sure stack retains 16 byte alignment. */
1963 if (info.nsaa & 15)
1964 sp -= 16 - (info.nsaa & 15);
1965
1966 while (!info.si.empty ())
1967 {
1968 const stack_item_t &si = info.si.back ();
1969
1970 sp -= si.len;
1971 if (si.data != NULL)
1972 write_memory (sp, si.data, si.len);
1973 info.si.pop_back ();
1974 }
1975
1976 /* Finally, update the SP register. */
1977 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1978
1979 return sp;
1980 }
1981
1982 /* Implement the "frame_align" gdbarch method. */
1983
1984 static CORE_ADDR
1985 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1986 {
1987 /* Align the stack to sixteen bytes. */
1988 return sp & ~(CORE_ADDR) 15;
1989 }
1990
1991 /* Return the type for an AdvSISD Q register. */
1992
1993 static struct type *
1994 aarch64_vnq_type (struct gdbarch *gdbarch)
1995 {
1996 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
1997
1998 if (tdep->vnq_type == NULL)
1999 {
2000 struct type *t;
2001 struct type *elem;
2002
2003 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
2004 TYPE_CODE_UNION);
2005
2006 elem = builtin_type (gdbarch)->builtin_uint128;
2007 append_composite_type_field (t, "u", elem);
2008
2009 elem = builtin_type (gdbarch)->builtin_int128;
2010 append_composite_type_field (t, "s", elem);
2011
2012 tdep->vnq_type = t;
2013 }
2014
2015 return tdep->vnq_type;
2016 }
2017
2018 /* Return the type for an AdvSISD D register. */
2019
2020 static struct type *
2021 aarch64_vnd_type (struct gdbarch *gdbarch)
2022 {
2023 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
2024
2025 if (tdep->vnd_type == NULL)
2026 {
2027 struct type *t;
2028 struct type *elem;
2029
2030 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
2031 TYPE_CODE_UNION);
2032
2033 elem = builtin_type (gdbarch)->builtin_double;
2034 append_composite_type_field (t, "f", elem);
2035
2036 elem = builtin_type (gdbarch)->builtin_uint64;
2037 append_composite_type_field (t, "u", elem);
2038
2039 elem = builtin_type (gdbarch)->builtin_int64;
2040 append_composite_type_field (t, "s", elem);
2041
2042 tdep->vnd_type = t;
2043 }
2044
2045 return tdep->vnd_type;
2046 }
2047
2048 /* Return the type for an AdvSISD S register. */
2049
2050 static struct type *
2051 aarch64_vns_type (struct gdbarch *gdbarch)
2052 {
2053 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
2054
2055 if (tdep->vns_type == NULL)
2056 {
2057 struct type *t;
2058 struct type *elem;
2059
2060 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
2061 TYPE_CODE_UNION);
2062
2063 elem = builtin_type (gdbarch)->builtin_float;
2064 append_composite_type_field (t, "f", elem);
2065
2066 elem = builtin_type (gdbarch)->builtin_uint32;
2067 append_composite_type_field (t, "u", elem);
2068
2069 elem = builtin_type (gdbarch)->builtin_int32;
2070 append_composite_type_field (t, "s", elem);
2071
2072 tdep->vns_type = t;
2073 }
2074
2075 return tdep->vns_type;
2076 }
2077
2078 /* Return the type for an AdvSISD H register. */
2079
2080 static struct type *
2081 aarch64_vnh_type (struct gdbarch *gdbarch)
2082 {
2083 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
2084
2085 if (tdep->vnh_type == NULL)
2086 {
2087 struct type *t;
2088 struct type *elem;
2089
2090 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
2091 TYPE_CODE_UNION);
2092
2093 elem = builtin_type (gdbarch)->builtin_bfloat16;
2094 append_composite_type_field (t, "bf", elem);
2095
2096 elem = builtin_type (gdbarch)->builtin_half;
2097 append_composite_type_field (t, "f", elem);
2098
2099 elem = builtin_type (gdbarch)->builtin_uint16;
2100 append_composite_type_field (t, "u", elem);
2101
2102 elem = builtin_type (gdbarch)->builtin_int16;
2103 append_composite_type_field (t, "s", elem);
2104
2105 tdep->vnh_type = t;
2106 }
2107
2108 return tdep->vnh_type;
2109 }
2110
2111 /* Return the type for an AdvSISD B register. */
2112
2113 static struct type *
2114 aarch64_vnb_type (struct gdbarch *gdbarch)
2115 {
2116 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
2117
2118 if (tdep->vnb_type == NULL)
2119 {
2120 struct type *t;
2121 struct type *elem;
2122
2123 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
2124 TYPE_CODE_UNION);
2125
2126 elem = builtin_type (gdbarch)->builtin_uint8;
2127 append_composite_type_field (t, "u", elem);
2128
2129 elem = builtin_type (gdbarch)->builtin_int8;
2130 append_composite_type_field (t, "s", elem);
2131
2132 tdep->vnb_type = t;
2133 }
2134
2135 return tdep->vnb_type;
2136 }
2137
2138 /* Return the type for an AdvSISD V register. */
2139
2140 static struct type *
2141 aarch64_vnv_type (struct gdbarch *gdbarch)
2142 {
2143 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
2144
2145 if (tdep->vnv_type == NULL)
2146 {
2147 /* The other AArch64 pseudo registers (Q,D,H,S,B) refer to a single value
2148 slice from the non-pseudo vector registers. However NEON V registers
2149 are always vector registers, and need constructing as such. */
2150 const struct builtin_type *bt = builtin_type (gdbarch);
2151
2152 struct type *t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnv",
2153 TYPE_CODE_UNION);
2154
2155 struct type *sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
2156 TYPE_CODE_UNION);
2157 append_composite_type_field (sub, "f",
2158 init_vector_type (bt->builtin_double, 2));
2159 append_composite_type_field (sub, "u",
2160 init_vector_type (bt->builtin_uint64, 2));
2161 append_composite_type_field (sub, "s",
2162 init_vector_type (bt->builtin_int64, 2));
2163 append_composite_type_field (t, "d", sub);
2164
2165 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
2166 TYPE_CODE_UNION);
2167 append_composite_type_field (sub, "f",
2168 init_vector_type (bt->builtin_float, 4));
2169 append_composite_type_field (sub, "u",
2170 init_vector_type (bt->builtin_uint32, 4));
2171 append_composite_type_field (sub, "s",
2172 init_vector_type (bt->builtin_int32, 4));
2173 append_composite_type_field (t, "s", sub);
2174
2175 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
2176 TYPE_CODE_UNION);
2177 append_composite_type_field (sub, "bf",
2178 init_vector_type (bt->builtin_bfloat16, 8));
2179 append_composite_type_field (sub, "f",
2180 init_vector_type (bt->builtin_half, 8));
2181 append_composite_type_field (sub, "u",
2182 init_vector_type (bt->builtin_uint16, 8));
2183 append_composite_type_field (sub, "s",
2184 init_vector_type (bt->builtin_int16, 8));
2185 append_composite_type_field (t, "h", sub);
2186
2187 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
2188 TYPE_CODE_UNION);
2189 append_composite_type_field (sub, "u",
2190 init_vector_type (bt->builtin_uint8, 16));
2191 append_composite_type_field (sub, "s",
2192 init_vector_type (bt->builtin_int8, 16));
2193 append_composite_type_field (t, "b", sub);
2194
2195 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
2196 TYPE_CODE_UNION);
2197 append_composite_type_field (sub, "u",
2198 init_vector_type (bt->builtin_uint128, 1));
2199 append_composite_type_field (sub, "s",
2200 init_vector_type (bt->builtin_int128, 1));
2201 append_composite_type_field (t, "q", sub);
2202
2203 tdep->vnv_type = t;
2204 }
2205
2206 return tdep->vnv_type;
2207 }
2208
2209 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
2210
2211 static int
2212 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
2213 {
2214 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
2215
2216 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
2217 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
2218
2219 if (reg == AARCH64_DWARF_SP)
2220 return AARCH64_SP_REGNUM;
2221
2222 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
2223 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
2224
2225 if (reg == AARCH64_DWARF_SVE_VG)
2226 return AARCH64_SVE_VG_REGNUM;
2227
2228 if (reg == AARCH64_DWARF_SVE_FFR)
2229 return AARCH64_SVE_FFR_REGNUM;
2230
2231 if (reg >= AARCH64_DWARF_SVE_P0 && reg <= AARCH64_DWARF_SVE_P0 + 15)
2232 return AARCH64_SVE_P0_REGNUM + reg - AARCH64_DWARF_SVE_P0;
2233
2234 if (reg >= AARCH64_DWARF_SVE_Z0 && reg <= AARCH64_DWARF_SVE_Z0 + 15)
2235 return AARCH64_SVE_Z0_REGNUM + reg - AARCH64_DWARF_SVE_Z0;
2236
2237 if (tdep->has_pauth ())
2238 {
2239 if (reg >= AARCH64_DWARF_PAUTH_DMASK && reg <= AARCH64_DWARF_PAUTH_CMASK)
2240 return tdep->pauth_reg_base + reg - AARCH64_DWARF_PAUTH_DMASK;
2241
2242 if (reg == AARCH64_DWARF_PAUTH_RA_STATE)
2243 return tdep->pauth_ra_state_regnum;
2244 }
2245
2246 return -1;
2247 }
2248
2249 /* Implement the "print_insn" gdbarch method. */
2250
2251 static int
2252 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
2253 {
2254 info->symbols = NULL;
2255 return default_print_insn (memaddr, info);
2256 }
2257
2258 /* AArch64 BRK software debug mode instruction.
2259 Note that AArch64 code is always little-endian.
2260 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
2261 constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
2262
2263 typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
2264
2265 /* Extract from an array REGS containing the (raw) register state a
2266 function return value of type TYPE, and copy that, in virtual
2267 format, into VALBUF. */
2268
2269 static void
2270 aarch64_extract_return_value (struct type *type, struct regcache *regs,
2271 gdb_byte *valbuf)
2272 {
2273 struct gdbarch *gdbarch = regs->arch ();
2274 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2275 int elements;
2276 struct type *fundamental_type;
2277
2278 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2279 &fundamental_type))
2280 {
2281 int len = TYPE_LENGTH (fundamental_type);
2282
2283 for (int i = 0; i < elements; i++)
2284 {
2285 int regno = AARCH64_V0_REGNUM + i;
2286 /* Enough space for a full vector register. */
2287 gdb_byte buf[register_size (gdbarch, regno)];
2288 gdb_assert (len <= sizeof (buf));
2289
2290 aarch64_debug_printf
2291 ("read HFA or HVA return value element %d from %s",
2292 i + 1, gdbarch_register_name (gdbarch, regno));
2293
2294 regs->cooked_read (regno, buf);
2295
2296 memcpy (valbuf, buf, len);
2297 valbuf += len;
2298 }
2299 }
2300 else if (type->code () == TYPE_CODE_INT
2301 || type->code () == TYPE_CODE_CHAR
2302 || type->code () == TYPE_CODE_BOOL
2303 || type->code () == TYPE_CODE_PTR
2304 || TYPE_IS_REFERENCE (type)
2305 || type->code () == TYPE_CODE_ENUM)
2306 {
2307 /* If the type is a plain integer, then the access is
2308 straight-forward. Otherwise we have to play around a bit
2309 more. */
2310 int len = TYPE_LENGTH (type);
2311 int regno = AARCH64_X0_REGNUM;
2312 ULONGEST tmp;
2313
2314 while (len > 0)
2315 {
2316 /* By using store_unsigned_integer we avoid having to do
2317 anything special for small big-endian values. */
2318 regcache_cooked_read_unsigned (regs, regno++, &tmp);
2319 store_unsigned_integer (valbuf,
2320 (len > X_REGISTER_SIZE
2321 ? X_REGISTER_SIZE : len), byte_order, tmp);
2322 len -= X_REGISTER_SIZE;
2323 valbuf += X_REGISTER_SIZE;
2324 }
2325 }
2326 else
2327 {
2328 /* For a structure or union the behaviour is as if the value had
2329 been stored to word-aligned memory and then loaded into
2330 registers with 64-bit load instruction(s). */
2331 int len = TYPE_LENGTH (type);
2332 int regno = AARCH64_X0_REGNUM;
2333 bfd_byte buf[X_REGISTER_SIZE];
2334
2335 while (len > 0)
2336 {
2337 regs->cooked_read (regno++, buf);
2338 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2339 len -= X_REGISTER_SIZE;
2340 valbuf += X_REGISTER_SIZE;
2341 }
2342 }
2343 }
2344
2345
2346 /* Will a function return an aggregate type in memory or in a
2347 register? Return 0 if an aggregate type can be returned in a
2348 register, 1 if it must be returned in memory. */
2349
2350 static int
2351 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
2352 {
2353 type = check_typedef (type);
2354 int elements;
2355 struct type *fundamental_type;
2356
2357 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2358 &fundamental_type))
2359 {
2360 /* v0-v7 are used to return values and one register is allocated
2361 for one member. However, HFA or HVA has at most four members. */
2362 return 0;
2363 }
2364
2365 if (TYPE_LENGTH (type) > 16
2366 || !language_pass_by_reference (type).trivially_copyable)
2367 {
2368 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2369 invisible reference. */
2370
2371 return 1;
2372 }
2373
2374 return 0;
2375 }
2376
2377 /* Write into appropriate registers a function return value of type
2378 TYPE, given in virtual format. */
2379
2380 static void
2381 aarch64_store_return_value (struct type *type, struct regcache *regs,
2382 const gdb_byte *valbuf)
2383 {
2384 struct gdbarch *gdbarch = regs->arch ();
2385 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2386 int elements;
2387 struct type *fundamental_type;
2388
2389 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2390 &fundamental_type))
2391 {
2392 int len = TYPE_LENGTH (fundamental_type);
2393
2394 for (int i = 0; i < elements; i++)
2395 {
2396 int regno = AARCH64_V0_REGNUM + i;
2397 /* Enough space for a full vector register. */
2398 gdb_byte tmpbuf[register_size (gdbarch, regno)];
2399 gdb_assert (len <= sizeof (tmpbuf));
2400
2401 aarch64_debug_printf
2402 ("write HFA or HVA return value element %d to %s",
2403 i + 1, gdbarch_register_name (gdbarch, regno));
2404
2405 memcpy (tmpbuf, valbuf,
2406 len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2407 regs->cooked_write (regno, tmpbuf);
2408 valbuf += len;
2409 }
2410 }
2411 else if (type->code () == TYPE_CODE_INT
2412 || type->code () == TYPE_CODE_CHAR
2413 || type->code () == TYPE_CODE_BOOL
2414 || type->code () == TYPE_CODE_PTR
2415 || TYPE_IS_REFERENCE (type)
2416 || type->code () == TYPE_CODE_ENUM)
2417 {
2418 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
2419 {
2420 /* Values of one word or less are zero/sign-extended and
2421 returned in r0. */
2422 bfd_byte tmpbuf[X_REGISTER_SIZE];
2423 LONGEST val = unpack_long (type, valbuf);
2424
2425 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
2426 regs->cooked_write (AARCH64_X0_REGNUM, tmpbuf);
2427 }
2428 else
2429 {
2430 /* Integral values greater than one word are stored in
2431 consecutive registers starting with r0. This will always
2432 be a multiple of the regiser size. */
2433 int len = TYPE_LENGTH (type);
2434 int regno = AARCH64_X0_REGNUM;
2435
2436 while (len > 0)
2437 {
2438 regs->cooked_write (regno++, valbuf);
2439 len -= X_REGISTER_SIZE;
2440 valbuf += X_REGISTER_SIZE;
2441 }
2442 }
2443 }
2444 else
2445 {
2446 /* For a structure or union the behaviour is as if the value had
2447 been stored to word-aligned memory and then loaded into
2448 registers with 64-bit load instruction(s). */
2449 int len = TYPE_LENGTH (type);
2450 int regno = AARCH64_X0_REGNUM;
2451 bfd_byte tmpbuf[X_REGISTER_SIZE];
2452
2453 while (len > 0)
2454 {
2455 memcpy (tmpbuf, valbuf,
2456 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2457 regs->cooked_write (regno++, tmpbuf);
2458 len -= X_REGISTER_SIZE;
2459 valbuf += X_REGISTER_SIZE;
2460 }
2461 }
2462 }
2463
2464 /* Implement the "return_value" gdbarch method. */
2465
2466 static enum return_value_convention
2467 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2468 struct type *valtype, struct regcache *regcache,
2469 gdb_byte *readbuf, const gdb_byte *writebuf)
2470 {
2471
2472 if (valtype->code () == TYPE_CODE_STRUCT
2473 || valtype->code () == TYPE_CODE_UNION
2474 || valtype->code () == TYPE_CODE_ARRAY)
2475 {
2476 if (aarch64_return_in_memory (gdbarch, valtype))
2477 {
2478 /* From the AAPCS64's Result Return section:
2479
2480 "Otherwise, the caller shall reserve a block of memory of
2481 sufficient size and alignment to hold the result. The address
2482 of the memory block shall be passed as an additional argument to
2483 the function in x8. */
2484
2485 aarch64_debug_printf ("return value in memory");
2486
2487 if (readbuf)
2488 {
2489 CORE_ADDR addr;
2490
2491 regcache->cooked_read (AARCH64_STRUCT_RETURN_REGNUM, &addr);
2492 read_memory (addr, readbuf, TYPE_LENGTH (valtype));
2493 }
2494
2495 return RETURN_VALUE_ABI_RETURNS_ADDRESS;
2496 }
2497 }
2498
2499 if (writebuf)
2500 aarch64_store_return_value (valtype, regcache, writebuf);
2501
2502 if (readbuf)
2503 aarch64_extract_return_value (valtype, regcache, readbuf);
2504
2505 aarch64_debug_printf ("return value in registers");
2506
2507 return RETURN_VALUE_REGISTER_CONVENTION;
2508 }
2509
2510 /* Implement the "get_longjmp_target" gdbarch method. */
2511
2512 static int
2513 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2514 {
2515 CORE_ADDR jb_addr;
2516 gdb_byte buf[X_REGISTER_SIZE];
2517 struct gdbarch *gdbarch = get_frame_arch (frame);
2518 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
2519 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2520
2521 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2522
2523 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2524 X_REGISTER_SIZE))
2525 return 0;
2526
2527 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2528 return 1;
2529 }
2530
2531 /* Implement the "gen_return_address" gdbarch method. */
2532
2533 static void
2534 aarch64_gen_return_address (struct gdbarch *gdbarch,
2535 struct agent_expr *ax, struct axs_value *value,
2536 CORE_ADDR scope)
2537 {
2538 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2539 value->kind = axs_lvalue_register;
2540 value->u.reg = AARCH64_LR_REGNUM;
2541 }
2542 \f
2543
2544 /* Return the pseudo register name corresponding to register regnum. */
2545
2546 static const char *
2547 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2548 {
2549 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
2550
2551 static const char *const q_name[] =
2552 {
2553 "q0", "q1", "q2", "q3",
2554 "q4", "q5", "q6", "q7",
2555 "q8", "q9", "q10", "q11",
2556 "q12", "q13", "q14", "q15",
2557 "q16", "q17", "q18", "q19",
2558 "q20", "q21", "q22", "q23",
2559 "q24", "q25", "q26", "q27",
2560 "q28", "q29", "q30", "q31",
2561 };
2562
2563 static const char *const d_name[] =
2564 {
2565 "d0", "d1", "d2", "d3",
2566 "d4", "d5", "d6", "d7",
2567 "d8", "d9", "d10", "d11",
2568 "d12", "d13", "d14", "d15",
2569 "d16", "d17", "d18", "d19",
2570 "d20", "d21", "d22", "d23",
2571 "d24", "d25", "d26", "d27",
2572 "d28", "d29", "d30", "d31",
2573 };
2574
2575 static const char *const s_name[] =
2576 {
2577 "s0", "s1", "s2", "s3",
2578 "s4", "s5", "s6", "s7",
2579 "s8", "s9", "s10", "s11",
2580 "s12", "s13", "s14", "s15",
2581 "s16", "s17", "s18", "s19",
2582 "s20", "s21", "s22", "s23",
2583 "s24", "s25", "s26", "s27",
2584 "s28", "s29", "s30", "s31",
2585 };
2586
2587 static const char *const h_name[] =
2588 {
2589 "h0", "h1", "h2", "h3",
2590 "h4", "h5", "h6", "h7",
2591 "h8", "h9", "h10", "h11",
2592 "h12", "h13", "h14", "h15",
2593 "h16", "h17", "h18", "h19",
2594 "h20", "h21", "h22", "h23",
2595 "h24", "h25", "h26", "h27",
2596 "h28", "h29", "h30", "h31",
2597 };
2598
2599 static const char *const b_name[] =
2600 {
2601 "b0", "b1", "b2", "b3",
2602 "b4", "b5", "b6", "b7",
2603 "b8", "b9", "b10", "b11",
2604 "b12", "b13", "b14", "b15",
2605 "b16", "b17", "b18", "b19",
2606 "b20", "b21", "b22", "b23",
2607 "b24", "b25", "b26", "b27",
2608 "b28", "b29", "b30", "b31",
2609 };
2610
2611 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2612
2613 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2614 return q_name[p_regnum - AARCH64_Q0_REGNUM];
2615
2616 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2617 return d_name[p_regnum - AARCH64_D0_REGNUM];
2618
2619 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2620 return s_name[p_regnum - AARCH64_S0_REGNUM];
2621
2622 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2623 return h_name[p_regnum - AARCH64_H0_REGNUM];
2624
2625 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2626 return b_name[p_regnum - AARCH64_B0_REGNUM];
2627
2628 if (tdep->has_sve ())
2629 {
2630 static const char *const sve_v_name[] =
2631 {
2632 "v0", "v1", "v2", "v3",
2633 "v4", "v5", "v6", "v7",
2634 "v8", "v9", "v10", "v11",
2635 "v12", "v13", "v14", "v15",
2636 "v16", "v17", "v18", "v19",
2637 "v20", "v21", "v22", "v23",
2638 "v24", "v25", "v26", "v27",
2639 "v28", "v29", "v30", "v31",
2640 };
2641
2642 if (p_regnum >= AARCH64_SVE_V0_REGNUM
2643 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2644 return sve_v_name[p_regnum - AARCH64_SVE_V0_REGNUM];
2645 }
2646
2647 /* RA_STATE is used for unwinding only. Do not assign it a name - this
2648 prevents it from being read by methods such as
2649 mi_cmd_trace_frame_collected. */
2650 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2651 return "";
2652
2653 internal_error (__FILE__, __LINE__,
2654 _("aarch64_pseudo_register_name: bad register number %d"),
2655 p_regnum);
2656 }
2657
2658 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2659
2660 static struct type *
2661 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2662 {
2663 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
2664
2665 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2666
2667 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2668 return aarch64_vnq_type (gdbarch);
2669
2670 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2671 return aarch64_vnd_type (gdbarch);
2672
2673 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2674 return aarch64_vns_type (gdbarch);
2675
2676 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2677 return aarch64_vnh_type (gdbarch);
2678
2679 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2680 return aarch64_vnb_type (gdbarch);
2681
2682 if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2683 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2684 return aarch64_vnv_type (gdbarch);
2685
2686 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2687 return builtin_type (gdbarch)->builtin_uint64;
2688
2689 internal_error (__FILE__, __LINE__,
2690 _("aarch64_pseudo_register_type: bad register number %d"),
2691 p_regnum);
2692 }
2693
2694 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2695
2696 static int
2697 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2698 const struct reggroup *group)
2699 {
2700 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
2701
2702 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2703
2704 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2705 return group == all_reggroup || group == vector_reggroup;
2706 else if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2707 return (group == all_reggroup || group == vector_reggroup
2708 || group == float_reggroup);
2709 else if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2710 return (group == all_reggroup || group == vector_reggroup
2711 || group == float_reggroup);
2712 else if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2713 return group == all_reggroup || group == vector_reggroup;
2714 else if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2715 return group == all_reggroup || group == vector_reggroup;
2716 else if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2717 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2718 return group == all_reggroup || group == vector_reggroup;
2719 /* RA_STATE is used for unwinding only. Do not assign it to any groups. */
2720 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2721 return 0;
2722
2723 return group == all_reggroup;
2724 }
2725
2726 /* Helper for aarch64_pseudo_read_value. */
2727
2728 static struct value *
2729 aarch64_pseudo_read_value_1 (struct gdbarch *gdbarch,
2730 readable_regcache *regcache, int regnum_offset,
2731 int regsize, struct value *result_value)
2732 {
2733 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2734
2735 /* Enough space for a full vector register. */
2736 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2737 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2738
2739 if (regcache->raw_read (v_regnum, reg_buf) != REG_VALID)
2740 mark_value_bytes_unavailable (result_value, 0,
2741 TYPE_LENGTH (value_type (result_value)));
2742 else
2743 memcpy (value_contents_raw (result_value).data (), reg_buf, regsize);
2744
2745 return result_value;
2746 }
2747
2748 /* Implement the "pseudo_register_read_value" gdbarch method. */
2749
2750 static struct value *
2751 aarch64_pseudo_read_value (struct gdbarch *gdbarch, readable_regcache *regcache,
2752 int regnum)
2753 {
2754 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
2755 struct value *result_value = allocate_value (register_type (gdbarch, regnum));
2756
2757 VALUE_LVAL (result_value) = lval_register;
2758 VALUE_REGNUM (result_value) = regnum;
2759
2760 regnum -= gdbarch_num_regs (gdbarch);
2761
2762 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2763 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2764 regnum - AARCH64_Q0_REGNUM,
2765 Q_REGISTER_SIZE, result_value);
2766
2767 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2768 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2769 regnum - AARCH64_D0_REGNUM,
2770 D_REGISTER_SIZE, result_value);
2771
2772 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2773 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2774 regnum - AARCH64_S0_REGNUM,
2775 S_REGISTER_SIZE, result_value);
2776
2777 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2778 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2779 regnum - AARCH64_H0_REGNUM,
2780 H_REGISTER_SIZE, result_value);
2781
2782 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2783 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2784 regnum - AARCH64_B0_REGNUM,
2785 B_REGISTER_SIZE, result_value);
2786
2787 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2788 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2789 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2790 regnum - AARCH64_SVE_V0_REGNUM,
2791 V_REGISTER_SIZE, result_value);
2792
2793 gdb_assert_not_reached ("regnum out of bound");
2794 }
2795
2796 /* Helper for aarch64_pseudo_write. */
2797
2798 static void
2799 aarch64_pseudo_write_1 (struct gdbarch *gdbarch, struct regcache *regcache,
2800 int regnum_offset, int regsize, const gdb_byte *buf)
2801 {
2802 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2803
2804 /* Enough space for a full vector register. */
2805 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2806 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2807
2808 /* Ensure the register buffer is zero, we want gdb writes of the
2809 various 'scalar' pseudo registers to behavior like architectural
2810 writes, register width bytes are written the remainder are set to
2811 zero. */
2812 memset (reg_buf, 0, register_size (gdbarch, AARCH64_V0_REGNUM));
2813
2814 memcpy (reg_buf, buf, regsize);
2815 regcache->raw_write (v_regnum, reg_buf);
2816 }
2817
2818 /* Implement the "pseudo_register_write" gdbarch method. */
2819
2820 static void
2821 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2822 int regnum, const gdb_byte *buf)
2823 {
2824 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
2825 regnum -= gdbarch_num_regs (gdbarch);
2826
2827 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2828 return aarch64_pseudo_write_1 (gdbarch, regcache,
2829 regnum - AARCH64_Q0_REGNUM, Q_REGISTER_SIZE,
2830 buf);
2831
2832 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2833 return aarch64_pseudo_write_1 (gdbarch, regcache,
2834 regnum - AARCH64_D0_REGNUM, D_REGISTER_SIZE,
2835 buf);
2836
2837 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2838 return aarch64_pseudo_write_1 (gdbarch, regcache,
2839 regnum - AARCH64_S0_REGNUM, S_REGISTER_SIZE,
2840 buf);
2841
2842 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2843 return aarch64_pseudo_write_1 (gdbarch, regcache,
2844 regnum - AARCH64_H0_REGNUM, H_REGISTER_SIZE,
2845 buf);
2846
2847 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2848 return aarch64_pseudo_write_1 (gdbarch, regcache,
2849 regnum - AARCH64_B0_REGNUM, B_REGISTER_SIZE,
2850 buf);
2851
2852 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2853 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2854 return aarch64_pseudo_write_1 (gdbarch, regcache,
2855 regnum - AARCH64_SVE_V0_REGNUM,
2856 V_REGISTER_SIZE, buf);
2857
2858 gdb_assert_not_reached ("regnum out of bound");
2859 }
2860
2861 /* Callback function for user_reg_add. */
2862
2863 static struct value *
2864 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2865 {
2866 const int *reg_p = (const int *) baton;
2867
2868 return value_of_register (*reg_p, frame);
2869 }
2870 \f
2871
2872 /* Implement the "software_single_step" gdbarch method, needed to
2873 single step through atomic sequences on AArch64. */
2874
2875 static std::vector<CORE_ADDR>
2876 aarch64_software_single_step (struct regcache *regcache)
2877 {
2878 struct gdbarch *gdbarch = regcache->arch ();
2879 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2880 const int insn_size = 4;
2881 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2882 CORE_ADDR pc = regcache_read_pc (regcache);
2883 CORE_ADDR breaks[2] = { CORE_ADDR_MAX, CORE_ADDR_MAX };
2884 CORE_ADDR loc = pc;
2885 CORE_ADDR closing_insn = 0;
2886 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2887 byte_order_for_code);
2888 int index;
2889 int insn_count;
2890 int bc_insn_count = 0; /* Conditional branch instruction count. */
2891 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2892 aarch64_inst inst;
2893
2894 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2895 return {};
2896
2897 /* Look for a Load Exclusive instruction which begins the sequence. */
2898 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
2899 return {};
2900
2901 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2902 {
2903 loc += insn_size;
2904 insn = read_memory_unsigned_integer (loc, insn_size,
2905 byte_order_for_code);
2906
2907 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2908 return {};
2909 /* Check if the instruction is a conditional branch. */
2910 if (inst.opcode->iclass == condbranch)
2911 {
2912 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2913
2914 if (bc_insn_count >= 1)
2915 return {};
2916
2917 /* It is, so we'll try to set a breakpoint at the destination. */
2918 breaks[1] = loc + inst.operands[0].imm.value;
2919
2920 bc_insn_count++;
2921 last_breakpoint++;
2922 }
2923
2924 /* Look for the Store Exclusive which closes the atomic sequence. */
2925 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
2926 {
2927 closing_insn = loc;
2928 break;
2929 }
2930 }
2931
2932 /* We didn't find a closing Store Exclusive instruction, fall back. */
2933 if (!closing_insn)
2934 return {};
2935
2936 /* Insert breakpoint after the end of the atomic sequence. */
2937 breaks[0] = loc + insn_size;
2938
2939 /* Check for duplicated breakpoints, and also check that the second
2940 breakpoint is not within the atomic sequence. */
2941 if (last_breakpoint
2942 && (breaks[1] == breaks[0]
2943 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2944 last_breakpoint = 0;
2945
2946 std::vector<CORE_ADDR> next_pcs;
2947
2948 /* Insert the breakpoint at the end of the sequence, and one at the
2949 destination of the conditional branch, if it exists. */
2950 for (index = 0; index <= last_breakpoint; index++)
2951 next_pcs.push_back (breaks[index]);
2952
2953 return next_pcs;
2954 }
2955
2956 struct aarch64_displaced_step_copy_insn_closure
2957 : public displaced_step_copy_insn_closure
2958 {
2959 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2960 is being displaced stepping. */
2961 bool cond = false;
2962
2963 /* PC adjustment offset after displaced stepping. If 0, then we don't
2964 write the PC back, assuming the PC is already the right address. */
2965 int32_t pc_adjust = 0;
2966 };
2967
2968 /* Data when visiting instructions for displaced stepping. */
2969
2970 struct aarch64_displaced_step_data
2971 {
2972 struct aarch64_insn_data base;
2973
2974 /* The address where the instruction will be executed at. */
2975 CORE_ADDR new_addr;
2976 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2977 uint32_t insn_buf[AARCH64_DISPLACED_MODIFIED_INSNS];
2978 /* Number of instructions in INSN_BUF. */
2979 unsigned insn_count;
2980 /* Registers when doing displaced stepping. */
2981 struct regcache *regs;
2982
2983 aarch64_displaced_step_copy_insn_closure *dsc;
2984 };
2985
2986 /* Implementation of aarch64_insn_visitor method "b". */
2987
2988 static void
2989 aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2990 struct aarch64_insn_data *data)
2991 {
2992 struct aarch64_displaced_step_data *dsd
2993 = (struct aarch64_displaced_step_data *) data;
2994 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
2995
2996 if (can_encode_int32 (new_offset, 28))
2997 {
2998 /* Emit B rather than BL, because executing BL on a new address
2999 will get the wrong address into LR. In order to avoid this,
3000 we emit B, and update LR if the instruction is BL. */
3001 emit_b (dsd->insn_buf, 0, new_offset);
3002 dsd->insn_count++;
3003 }
3004 else
3005 {
3006 /* Write NOP. */
3007 emit_nop (dsd->insn_buf);
3008 dsd->insn_count++;
3009 dsd->dsc->pc_adjust = offset;
3010 }
3011
3012 if (is_bl)
3013 {
3014 /* Update LR. */
3015 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
3016 data->insn_addr + 4);
3017 }
3018 }
3019
3020 /* Implementation of aarch64_insn_visitor method "b_cond". */
3021
3022 static void
3023 aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
3024 struct aarch64_insn_data *data)
3025 {
3026 struct aarch64_displaced_step_data *dsd
3027 = (struct aarch64_displaced_step_data *) data;
3028
3029 /* GDB has to fix up PC after displaced step this instruction
3030 differently according to the condition is true or false. Instead
3031 of checking COND against conditional flags, we can use
3032 the following instructions, and GDB can tell how to fix up PC
3033 according to the PC value.
3034
3035 B.COND TAKEN ; If cond is true, then jump to TAKEN.
3036 INSN1 ;
3037 TAKEN:
3038 INSN2
3039 */
3040
3041 emit_bcond (dsd->insn_buf, cond, 8);
3042 dsd->dsc->cond = true;
3043 dsd->dsc->pc_adjust = offset;
3044 dsd->insn_count = 1;
3045 }
3046
3047 /* Dynamically allocate a new register. If we know the register
3048 statically, we should make it a global as above instead of using this
3049 helper function. */
3050
3051 static struct aarch64_register
3052 aarch64_register (unsigned num, int is64)
3053 {
3054 return (struct aarch64_register) { num, is64 };
3055 }
3056
3057 /* Implementation of aarch64_insn_visitor method "cb". */
3058
3059 static void
3060 aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
3061 const unsigned rn, int is64,
3062 struct aarch64_insn_data *data)
3063 {
3064 struct aarch64_displaced_step_data *dsd
3065 = (struct aarch64_displaced_step_data *) data;
3066
3067 /* The offset is out of range for a compare and branch
3068 instruction. We can use the following instructions instead:
3069
3070 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
3071 INSN1 ;
3072 TAKEN:
3073 INSN2
3074 */
3075 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
3076 dsd->insn_count = 1;
3077 dsd->dsc->cond = true;
3078 dsd->dsc->pc_adjust = offset;
3079 }
3080
3081 /* Implementation of aarch64_insn_visitor method "tb". */
3082
3083 static void
3084 aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
3085 const unsigned rt, unsigned bit,
3086 struct aarch64_insn_data *data)
3087 {
3088 struct aarch64_displaced_step_data *dsd
3089 = (struct aarch64_displaced_step_data *) data;
3090
3091 /* The offset is out of range for a test bit and branch
3092 instruction We can use the following instructions instead:
3093
3094 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
3095 INSN1 ;
3096 TAKEN:
3097 INSN2
3098
3099 */
3100 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
3101 dsd->insn_count = 1;
3102 dsd->dsc->cond = true;
3103 dsd->dsc->pc_adjust = offset;
3104 }
3105
3106 /* Implementation of aarch64_insn_visitor method "adr". */
3107
3108 static void
3109 aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
3110 const int is_adrp, struct aarch64_insn_data *data)
3111 {
3112 struct aarch64_displaced_step_data *dsd
3113 = (struct aarch64_displaced_step_data *) data;
3114 /* We know exactly the address the ADR{P,} instruction will compute.
3115 We can just write it to the destination register. */
3116 CORE_ADDR address = data->insn_addr + offset;
3117
3118 if (is_adrp)
3119 {
3120 /* Clear the lower 12 bits of the offset to get the 4K page. */
3121 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
3122 address & ~0xfff);
3123 }
3124 else
3125 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
3126 address);
3127
3128 dsd->dsc->pc_adjust = 4;
3129 emit_nop (dsd->insn_buf);
3130 dsd->insn_count = 1;
3131 }
3132
3133 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
3134
3135 static void
3136 aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
3137 const unsigned rt, const int is64,
3138 struct aarch64_insn_data *data)
3139 {
3140 struct aarch64_displaced_step_data *dsd
3141 = (struct aarch64_displaced_step_data *) data;
3142 CORE_ADDR address = data->insn_addr + offset;
3143 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
3144
3145 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
3146 address);
3147
3148 if (is_sw)
3149 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
3150 aarch64_register (rt, 1), zero);
3151 else
3152 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
3153 aarch64_register (rt, 1), zero);
3154
3155 dsd->dsc->pc_adjust = 4;
3156 }
3157
3158 /* Implementation of aarch64_insn_visitor method "others". */
3159
3160 static void
3161 aarch64_displaced_step_others (const uint32_t insn,
3162 struct aarch64_insn_data *data)
3163 {
3164 struct aarch64_displaced_step_data *dsd
3165 = (struct aarch64_displaced_step_data *) data;
3166
3167 uint32_t masked_insn = (insn & CLEAR_Rn_MASK);
3168 if (masked_insn == BLR)
3169 {
3170 /* Emit a BR to the same register and then update LR to the original
3171 address (similar to aarch64_displaced_step_b). */
3172 aarch64_emit_insn (dsd->insn_buf, insn & 0xffdfffff);
3173 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
3174 data->insn_addr + 4);
3175 }
3176 else
3177 aarch64_emit_insn (dsd->insn_buf, insn);
3178 dsd->insn_count = 1;
3179
3180 if (masked_insn == RET || masked_insn == BR || masked_insn == BLR)
3181 dsd->dsc->pc_adjust = 0;
3182 else
3183 dsd->dsc->pc_adjust = 4;
3184 }
3185
3186 static const struct aarch64_insn_visitor visitor =
3187 {
3188 aarch64_displaced_step_b,
3189 aarch64_displaced_step_b_cond,
3190 aarch64_displaced_step_cb,
3191 aarch64_displaced_step_tb,
3192 aarch64_displaced_step_adr,
3193 aarch64_displaced_step_ldr_literal,
3194 aarch64_displaced_step_others,
3195 };
3196
3197 /* Implement the "displaced_step_copy_insn" gdbarch method. */
3198
3199 displaced_step_copy_insn_closure_up
3200 aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
3201 CORE_ADDR from, CORE_ADDR to,
3202 struct regcache *regs)
3203 {
3204 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3205 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
3206 struct aarch64_displaced_step_data dsd;
3207 aarch64_inst inst;
3208
3209 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
3210 return NULL;
3211
3212 /* Look for a Load Exclusive instruction which begins the sequence. */
3213 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
3214 {
3215 /* We can't displaced step atomic sequences. */
3216 return NULL;
3217 }
3218
3219 std::unique_ptr<aarch64_displaced_step_copy_insn_closure> dsc
3220 (new aarch64_displaced_step_copy_insn_closure);
3221 dsd.base.insn_addr = from;
3222 dsd.new_addr = to;
3223 dsd.regs = regs;
3224 dsd.dsc = dsc.get ();
3225 dsd.insn_count = 0;
3226 aarch64_relocate_instruction (insn, &visitor,
3227 (struct aarch64_insn_data *) &dsd);
3228 gdb_assert (dsd.insn_count <= AARCH64_DISPLACED_MODIFIED_INSNS);
3229
3230 if (dsd.insn_count != 0)
3231 {
3232 int i;
3233
3234 /* Instruction can be relocated to scratch pad. Copy
3235 relocated instruction(s) there. */
3236 for (i = 0; i < dsd.insn_count; i++)
3237 {
3238 displaced_debug_printf ("writing insn %.8x at %s",
3239 dsd.insn_buf[i],
3240 paddress (gdbarch, to + i * 4));
3241
3242 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
3243 (ULONGEST) dsd.insn_buf[i]);
3244 }
3245 }
3246 else
3247 {
3248 dsc = NULL;
3249 }
3250
3251 /* This is a work around for a problem with g++ 4.8. */
3252 return displaced_step_copy_insn_closure_up (dsc.release ());
3253 }
3254
3255 /* Implement the "displaced_step_fixup" gdbarch method. */
3256
3257 void
3258 aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
3259 struct displaced_step_copy_insn_closure *dsc_,
3260 CORE_ADDR from, CORE_ADDR to,
3261 struct regcache *regs)
3262 {
3263 aarch64_displaced_step_copy_insn_closure *dsc
3264 = (aarch64_displaced_step_copy_insn_closure *) dsc_;
3265
3266 ULONGEST pc;
3267
3268 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
3269
3270 displaced_debug_printf ("PC after stepping: %s (was %s).",
3271 paddress (gdbarch, pc), paddress (gdbarch, to));
3272
3273 if (dsc->cond)
3274 {
3275 displaced_debug_printf ("[Conditional] pc_adjust before: %d",
3276 dsc->pc_adjust);
3277
3278 if (pc - to == 8)
3279 {
3280 /* Condition is true. */
3281 }
3282 else if (pc - to == 4)
3283 {
3284 /* Condition is false. */
3285 dsc->pc_adjust = 4;
3286 }
3287 else
3288 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
3289
3290 displaced_debug_printf ("[Conditional] pc_adjust after: %d",
3291 dsc->pc_adjust);
3292 }
3293
3294 displaced_debug_printf ("%s PC by %d",
3295 dsc->pc_adjust ? "adjusting" : "not adjusting",
3296 dsc->pc_adjust);
3297
3298 if (dsc->pc_adjust != 0)
3299 {
3300 /* Make sure the previous instruction was executed (that is, the PC
3301 has changed). If the PC didn't change, then discard the adjustment
3302 offset. Otherwise we may skip an instruction before its execution
3303 took place. */
3304 if ((pc - to) == 0)
3305 {
3306 displaced_debug_printf ("PC did not move. Discarding PC adjustment.");
3307 dsc->pc_adjust = 0;
3308 }
3309
3310 displaced_debug_printf ("fixup: set PC to %s:%d",
3311 paddress (gdbarch, from), dsc->pc_adjust);
3312
3313 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
3314 from + dsc->pc_adjust);
3315 }
3316 }
3317
3318 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
3319
3320 bool
3321 aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch)
3322 {
3323 return true;
3324 }
3325
3326 /* Get the correct target description for the given VQ value.
3327 If VQ is zero then it is assumed SVE is not supported.
3328 (It is not possible to set VQ to zero on an SVE system).
3329
3330 MTE_P indicates the presence of the Memory Tagging Extension feature.
3331
3332 TLS_P indicates the presence of the Thread Local Storage feature. */
3333
3334 const target_desc *
3335 aarch64_read_description (uint64_t vq, bool pauth_p, bool mte_p, bool tls_p)
3336 {
3337 if (vq > AARCH64_MAX_SVE_VQ)
3338 error (_("VQ is %" PRIu64 ", maximum supported value is %d"), vq,
3339 AARCH64_MAX_SVE_VQ);
3340
3341 struct target_desc *tdesc = tdesc_aarch64_list[vq][pauth_p][mte_p][tls_p];
3342
3343 if (tdesc == NULL)
3344 {
3345 tdesc = aarch64_create_target_description (vq, pauth_p, mte_p, tls_p);
3346 tdesc_aarch64_list[vq][pauth_p][mte_p][tls_p] = tdesc;
3347 }
3348
3349 return tdesc;
3350 }
3351
3352 /* Return the VQ used when creating the target description TDESC. */
3353
3354 static uint64_t
3355 aarch64_get_tdesc_vq (const struct target_desc *tdesc)
3356 {
3357 const struct tdesc_feature *feature_sve;
3358
3359 if (!tdesc_has_registers (tdesc))
3360 return 0;
3361
3362 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3363
3364 if (feature_sve == nullptr)
3365 return 0;
3366
3367 uint64_t vl = tdesc_register_bitsize (feature_sve,
3368 aarch64_sve_register_names[0]) / 8;
3369 return sve_vq_from_vl (vl);
3370 }
3371
3372 /* Implement the "cannot_store_register" gdbarch method. */
3373
3374 static int
3375 aarch64_cannot_store_register (struct gdbarch *gdbarch, int regnum)
3376 {
3377 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
3378
3379 if (!tdep->has_pauth ())
3380 return 0;
3381
3382 /* Pointer authentication registers are read-only. */
3383 return (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
3384 || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base));
3385 }
3386
3387 /* Implement the stack_frame_destroyed_p gdbarch method. */
3388
3389 static int
3390 aarch64_stack_frame_destroyed_p (struct gdbarch *gdbarch, CORE_ADDR pc)
3391 {
3392 CORE_ADDR func_start, func_end;
3393 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
3394 return 0;
3395
3396 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3397 uint32_t insn = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
3398
3399 aarch64_inst inst;
3400 if (aarch64_decode_insn (insn, &inst, 1, nullptr) != 0)
3401 return 0;
3402
3403 return streq (inst.opcode->name, "ret");
3404 }
3405
3406 /* Initialize the current architecture based on INFO. If possible,
3407 re-use an architecture from ARCHES, which is a list of
3408 architectures already created during this debugging session.
3409
3410 Called e.g. at program startup, when reading a core file, and when
3411 reading a binary file. */
3412
3413 static struct gdbarch *
3414 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
3415 {
3416 const struct tdesc_feature *feature_core, *feature_fpu, *feature_sve;
3417 const struct tdesc_feature *feature_pauth;
3418 bool valid_p = true;
3419 int i, num_regs = 0, num_pseudo_regs = 0;
3420 int first_pauth_regnum = -1, pauth_ra_state_offset = -1;
3421 int first_mte_regnum = -1, tls_regnum = -1;
3422
3423 /* Use the vector length passed via the target info. Here -1 is used for no
3424 SVE, and 0 is unset. If unset then use the vector length from the existing
3425 tdesc. */
3426 uint64_t vq = 0;
3427 if (info.id == (int *) -1)
3428 vq = 0;
3429 else if (info.id != 0)
3430 vq = (uint64_t) info.id;
3431 else
3432 vq = aarch64_get_tdesc_vq (info.target_desc);
3433
3434 if (vq > AARCH64_MAX_SVE_VQ)
3435 internal_error (__FILE__, __LINE__, _("VQ out of bounds: %s (max %d)"),
3436 pulongest (vq), AARCH64_MAX_SVE_VQ);
3437
3438 /* If there is already a candidate, use it. */
3439 for (gdbarch_list *best_arch = gdbarch_list_lookup_by_info (arches, &info);
3440 best_arch != nullptr;
3441 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
3442 {
3443 aarch64_gdbarch_tdep *tdep
3444 = (aarch64_gdbarch_tdep *) gdbarch_tdep (best_arch->gdbarch);
3445 if (tdep && tdep->vq == vq)
3446 return best_arch->gdbarch;
3447 }
3448
3449 /* Ensure we always have a target descriptor, and that it is for the given VQ
3450 value. */
3451 const struct target_desc *tdesc = info.target_desc;
3452 if (!tdesc_has_registers (tdesc) || vq != aarch64_get_tdesc_vq (tdesc))
3453 tdesc = aarch64_read_description (vq, false, false, false);
3454 gdb_assert (tdesc);
3455
3456 feature_core = tdesc_find_feature (tdesc,"org.gnu.gdb.aarch64.core");
3457 feature_fpu = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
3458 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3459 feature_pauth = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth");
3460 const struct tdesc_feature *feature_mte
3461 = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.mte");
3462 const struct tdesc_feature *feature_tls
3463 = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.tls");
3464
3465 if (feature_core == nullptr)
3466 return nullptr;
3467
3468 tdesc_arch_data_up tdesc_data = tdesc_data_alloc ();
3469
3470 /* Validate the description provides the mandatory core R registers
3471 and allocate their numbers. */
3472 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
3473 valid_p &= tdesc_numbered_register (feature_core, tdesc_data.get (),
3474 AARCH64_X0_REGNUM + i,
3475 aarch64_r_register_names[i]);
3476
3477 num_regs = AARCH64_X0_REGNUM + i;
3478
3479 /* Add the V registers. */
3480 if (feature_fpu != nullptr)
3481 {
3482 if (feature_sve != nullptr)
3483 error (_("Program contains both fpu and SVE features."));
3484
3485 /* Validate the description provides the mandatory V registers
3486 and allocate their numbers. */
3487 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
3488 valid_p &= tdesc_numbered_register (feature_fpu, tdesc_data.get (),
3489 AARCH64_V0_REGNUM + i,
3490 aarch64_v_register_names[i]);
3491
3492 num_regs = AARCH64_V0_REGNUM + i;
3493 }
3494
3495 /* Add the SVE registers. */
3496 if (feature_sve != nullptr)
3497 {
3498 /* Validate the description provides the mandatory SVE registers
3499 and allocate their numbers. */
3500 for (i = 0; i < ARRAY_SIZE (aarch64_sve_register_names); i++)
3501 valid_p &= tdesc_numbered_register (feature_sve, tdesc_data.get (),
3502 AARCH64_SVE_Z0_REGNUM + i,
3503 aarch64_sve_register_names[i]);
3504
3505 num_regs = AARCH64_SVE_Z0_REGNUM + i;
3506 num_pseudo_regs += 32; /* add the Vn register pseudos. */
3507 }
3508
3509 if (feature_fpu != nullptr || feature_sve != nullptr)
3510 {
3511 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
3512 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
3513 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
3514 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
3515 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
3516 }
3517
3518 /* Add the TLS register. */
3519 if (feature_tls != nullptr)
3520 {
3521 tls_regnum = num_regs;
3522 /* Validate the descriptor provides the mandatory TLS register
3523 and allocate its number. */
3524 valid_p = tdesc_numbered_register (feature_tls, tdesc_data.get (),
3525 tls_regnum, "tpidr");
3526
3527 num_regs++;
3528 }
3529
3530 /* Add the pauth registers. */
3531 if (feature_pauth != NULL)
3532 {
3533 first_pauth_regnum = num_regs;
3534 pauth_ra_state_offset = num_pseudo_regs;
3535 /* Validate the descriptor provides the mandatory PAUTH registers and
3536 allocate their numbers. */
3537 for (i = 0; i < ARRAY_SIZE (aarch64_pauth_register_names); i++)
3538 valid_p &= tdesc_numbered_register (feature_pauth, tdesc_data.get (),
3539 first_pauth_regnum + i,
3540 aarch64_pauth_register_names[i]);
3541
3542 num_regs += i;
3543 num_pseudo_regs += 1; /* Count RA_STATE pseudo register. */
3544 }
3545
3546 /* Add the MTE registers. */
3547 if (feature_mte != NULL)
3548 {
3549 first_mte_regnum = num_regs;
3550 /* Validate the descriptor provides the mandatory MTE registers and
3551 allocate their numbers. */
3552 for (i = 0; i < ARRAY_SIZE (aarch64_mte_register_names); i++)
3553 valid_p &= tdesc_numbered_register (feature_mte, tdesc_data.get (),
3554 first_mte_regnum + i,
3555 aarch64_mte_register_names[i]);
3556
3557 num_regs += i;
3558 }
3559
3560 if (!valid_p)
3561 return nullptr;
3562
3563 /* AArch64 code is always little-endian. */
3564 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
3565
3566 aarch64_gdbarch_tdep *tdep = new aarch64_gdbarch_tdep;
3567 struct gdbarch *gdbarch = gdbarch_alloc (&info, tdep);
3568
3569 /* This should be low enough for everything. */
3570 tdep->lowest_pc = 0x20;
3571 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
3572 tdep->jb_elt_size = 8;
3573 tdep->vq = vq;
3574 tdep->pauth_reg_base = first_pauth_regnum;
3575 tdep->pauth_ra_state_regnum = (feature_pauth == NULL) ? -1
3576 : pauth_ra_state_offset + num_regs;
3577 tdep->mte_reg_base = first_mte_regnum;
3578 tdep->tls_regnum = tls_regnum;
3579
3580 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
3581 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
3582
3583 /* Advance PC across function entry code. */
3584 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
3585
3586 /* The stack grows downward. */
3587 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
3588
3589 /* Breakpoint manipulation. */
3590 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
3591 aarch64_breakpoint::kind_from_pc);
3592 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
3593 aarch64_breakpoint::bp_from_kind);
3594 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
3595 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
3596
3597 /* Information about registers, etc. */
3598 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
3599 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
3600 set_gdbarch_num_regs (gdbarch, num_regs);
3601
3602 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
3603 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
3604 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
3605 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
3606 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
3607 set_tdesc_pseudo_register_reggroup_p (gdbarch,
3608 aarch64_pseudo_register_reggroup_p);
3609 set_gdbarch_cannot_store_register (gdbarch, aarch64_cannot_store_register);
3610
3611 /* ABI */
3612 set_gdbarch_short_bit (gdbarch, 16);
3613 set_gdbarch_int_bit (gdbarch, 32);
3614 set_gdbarch_float_bit (gdbarch, 32);
3615 set_gdbarch_double_bit (gdbarch, 64);
3616 set_gdbarch_long_double_bit (gdbarch, 128);
3617 set_gdbarch_long_bit (gdbarch, 64);
3618 set_gdbarch_long_long_bit (gdbarch, 64);
3619 set_gdbarch_ptr_bit (gdbarch, 64);
3620 set_gdbarch_char_signed (gdbarch, 0);
3621 set_gdbarch_wchar_signed (gdbarch, 0);
3622 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
3623 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
3624 set_gdbarch_long_double_format (gdbarch, floatformats_ieee_quad);
3625 set_gdbarch_type_align (gdbarch, aarch64_type_align);
3626
3627 /* Detect whether PC is at a point where the stack has been destroyed. */
3628 set_gdbarch_stack_frame_destroyed_p (gdbarch, aarch64_stack_frame_destroyed_p);
3629
3630 /* Internal <-> external register number maps. */
3631 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
3632
3633 /* Returning results. */
3634 set_gdbarch_return_value (gdbarch, aarch64_return_value);
3635
3636 /* Disassembly. */
3637 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
3638
3639 /* Virtual tables. */
3640 set_gdbarch_vbit_in_delta (gdbarch, 1);
3641
3642 /* Hook in the ABI-specific overrides, if they have been registered. */
3643 info.target_desc = tdesc;
3644 info.tdesc_data = tdesc_data.get ();
3645 gdbarch_init_osabi (info, gdbarch);
3646
3647 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
3648 /* Register DWARF CFA vendor handler. */
3649 set_gdbarch_execute_dwarf_cfa_vendor_op (gdbarch,
3650 aarch64_execute_dwarf_cfa_vendor_op);
3651
3652 /* Permanent/Program breakpoint handling. */
3653 set_gdbarch_program_breakpoint_here_p (gdbarch,
3654 aarch64_program_breakpoint_here_p);
3655
3656 /* Add some default predicates. */
3657 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
3658 dwarf2_append_unwinders (gdbarch);
3659 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
3660
3661 frame_base_set_default (gdbarch, &aarch64_normal_base);
3662
3663 /* Now we have tuned the configuration, set a few final things,
3664 based on what the OS ABI has told us. */
3665
3666 if (tdep->jb_pc >= 0)
3667 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
3668
3669 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
3670
3671 set_gdbarch_get_pc_address_flags (gdbarch, aarch64_get_pc_address_flags);
3672
3673 tdesc_use_registers (gdbarch, tdesc, std::move (tdesc_data));
3674
3675 /* Add standard register aliases. */
3676 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
3677 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
3678 value_of_aarch64_user_reg,
3679 &aarch64_register_aliases[i].regnum);
3680
3681 register_aarch64_ravenscar_ops (gdbarch);
3682
3683 return gdbarch;
3684 }
3685
3686 static void
3687 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
3688 {
3689 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
3690
3691 if (tdep == NULL)
3692 return;
3693
3694 gdb_printf (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3695 paddress (gdbarch, tdep->lowest_pc));
3696 }
3697
3698 #if GDB_SELF_TEST
3699 namespace selftests
3700 {
3701 static void aarch64_process_record_test (void);
3702 }
3703 #endif
3704
3705 void _initialize_aarch64_tdep ();
3706 void
3707 _initialize_aarch64_tdep ()
3708 {
3709 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
3710 aarch64_dump_tdep);
3711
3712 /* Debug this file's internals. */
3713 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
3714 Set AArch64 debugging."), _("\
3715 Show AArch64 debugging."), _("\
3716 When on, AArch64 specific debugging is enabled."),
3717 NULL,
3718 show_aarch64_debug,
3719 &setdebuglist, &showdebuglist);
3720
3721 #if GDB_SELF_TEST
3722 selftests::register_test ("aarch64-analyze-prologue",
3723 selftests::aarch64_analyze_prologue_test);
3724 selftests::register_test ("aarch64-process-record",
3725 selftests::aarch64_process_record_test);
3726 #endif
3727 }
3728
3729 /* AArch64 process record-replay related structures, defines etc. */
3730
3731 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3732 do \
3733 { \
3734 unsigned int reg_len = LENGTH; \
3735 if (reg_len) \
3736 { \
3737 REGS = XNEWVEC (uint32_t, reg_len); \
3738 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3739 } \
3740 } \
3741 while (0)
3742
3743 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3744 do \
3745 { \
3746 unsigned int mem_len = LENGTH; \
3747 if (mem_len) \
3748 { \
3749 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3750 memcpy(MEMS, &RECORD_BUF[0], \
3751 sizeof(struct aarch64_mem_r) * LENGTH); \
3752 } \
3753 } \
3754 while (0)
3755
3756 /* AArch64 record/replay structures and enumerations. */
3757
3758 struct aarch64_mem_r
3759 {
3760 uint64_t len; /* Record length. */
3761 uint64_t addr; /* Memory address. */
3762 };
3763
3764 enum aarch64_record_result
3765 {
3766 AARCH64_RECORD_SUCCESS,
3767 AARCH64_RECORD_UNSUPPORTED,
3768 AARCH64_RECORD_UNKNOWN
3769 };
3770
3771 typedef struct insn_decode_record_t
3772 {
3773 struct gdbarch *gdbarch;
3774 struct regcache *regcache;
3775 CORE_ADDR this_addr; /* Address of insn to be recorded. */
3776 uint32_t aarch64_insn; /* Insn to be recorded. */
3777 uint32_t mem_rec_count; /* Count of memory records. */
3778 uint32_t reg_rec_count; /* Count of register records. */
3779 uint32_t *aarch64_regs; /* Registers to be recorded. */
3780 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
3781 } insn_decode_record;
3782
3783 /* Record handler for data processing - register instructions. */
3784
3785 static unsigned int
3786 aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
3787 {
3788 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
3789 uint32_t record_buf[4];
3790
3791 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3792 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3793 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
3794
3795 if (!bit (aarch64_insn_r->aarch64_insn, 28))
3796 {
3797 uint8_t setflags;
3798
3799 /* Logical (shifted register). */
3800 if (insn_bits24_27 == 0x0a)
3801 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3802 /* Add/subtract. */
3803 else if (insn_bits24_27 == 0x0b)
3804 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3805 else
3806 return AARCH64_RECORD_UNKNOWN;
3807
3808 record_buf[0] = reg_rd;
3809 aarch64_insn_r->reg_rec_count = 1;
3810 if (setflags)
3811 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3812 }
3813 else
3814 {
3815 if (insn_bits24_27 == 0x0b)
3816 {
3817 /* Data-processing (3 source). */
3818 record_buf[0] = reg_rd;
3819 aarch64_insn_r->reg_rec_count = 1;
3820 }
3821 else if (insn_bits24_27 == 0x0a)
3822 {
3823 if (insn_bits21_23 == 0x00)
3824 {
3825 /* Add/subtract (with carry). */
3826 record_buf[0] = reg_rd;
3827 aarch64_insn_r->reg_rec_count = 1;
3828 if (bit (aarch64_insn_r->aarch64_insn, 29))
3829 {
3830 record_buf[1] = AARCH64_CPSR_REGNUM;
3831 aarch64_insn_r->reg_rec_count = 2;
3832 }
3833 }
3834 else if (insn_bits21_23 == 0x02)
3835 {
3836 /* Conditional compare (register) and conditional compare
3837 (immediate) instructions. */
3838 record_buf[0] = AARCH64_CPSR_REGNUM;
3839 aarch64_insn_r->reg_rec_count = 1;
3840 }
3841 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3842 {
3843 /* Conditional select. */
3844 /* Data-processing (2 source). */
3845 /* Data-processing (1 source). */
3846 record_buf[0] = reg_rd;
3847 aarch64_insn_r->reg_rec_count = 1;
3848 }
3849 else
3850 return AARCH64_RECORD_UNKNOWN;
3851 }
3852 }
3853
3854 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3855 record_buf);
3856 return AARCH64_RECORD_SUCCESS;
3857 }
3858
3859 /* Record handler for data processing - immediate instructions. */
3860
3861 static unsigned int
3862 aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3863 {
3864 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
3865 uint32_t record_buf[4];
3866
3867 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3868 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3869 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3870
3871 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3872 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3873 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3874 {
3875 record_buf[0] = reg_rd;
3876 aarch64_insn_r->reg_rec_count = 1;
3877 }
3878 else if (insn_bits24_27 == 0x01)
3879 {
3880 /* Add/Subtract (immediate). */
3881 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3882 record_buf[0] = reg_rd;
3883 aarch64_insn_r->reg_rec_count = 1;
3884 if (setflags)
3885 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3886 }
3887 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3888 {
3889 /* Logical (immediate). */
3890 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3891 record_buf[0] = reg_rd;
3892 aarch64_insn_r->reg_rec_count = 1;
3893 if (setflags)
3894 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3895 }
3896 else
3897 return AARCH64_RECORD_UNKNOWN;
3898
3899 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3900 record_buf);
3901 return AARCH64_RECORD_SUCCESS;
3902 }
3903
3904 /* Record handler for branch, exception generation and system instructions. */
3905
3906 static unsigned int
3907 aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3908 {
3909
3910 aarch64_gdbarch_tdep *tdep
3911 = (aarch64_gdbarch_tdep *) gdbarch_tdep (aarch64_insn_r->gdbarch);
3912 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3913 uint32_t record_buf[4];
3914
3915 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3916 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3917 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3918
3919 if (insn_bits28_31 == 0x0d)
3920 {
3921 /* Exception generation instructions. */
3922 if (insn_bits24_27 == 0x04)
3923 {
3924 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3925 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3926 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
3927 {
3928 ULONGEST svc_number;
3929
3930 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3931 &svc_number);
3932 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3933 svc_number);
3934 }
3935 else
3936 return AARCH64_RECORD_UNSUPPORTED;
3937 }
3938 /* System instructions. */
3939 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3940 {
3941 uint32_t reg_rt, reg_crn;
3942
3943 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3944 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3945
3946 /* Record rt in case of sysl and mrs instructions. */
3947 if (bit (aarch64_insn_r->aarch64_insn, 21))
3948 {
3949 record_buf[0] = reg_rt;
3950 aarch64_insn_r->reg_rec_count = 1;
3951 }
3952 /* Record cpsr for hint and msr(immediate) instructions. */
3953 else if (reg_crn == 0x02 || reg_crn == 0x04)
3954 {
3955 record_buf[0] = AARCH64_CPSR_REGNUM;
3956 aarch64_insn_r->reg_rec_count = 1;
3957 }
3958 }
3959 /* Unconditional branch (register). */
3960 else if((insn_bits24_27 & 0x0e) == 0x06)
3961 {
3962 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3963 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3964 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3965 }
3966 else
3967 return AARCH64_RECORD_UNKNOWN;
3968 }
3969 /* Unconditional branch (immediate). */
3970 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3971 {
3972 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3973 if (bit (aarch64_insn_r->aarch64_insn, 31))
3974 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3975 }
3976 else
3977 /* Compare & branch (immediate), Test & branch (immediate) and
3978 Conditional branch (immediate). */
3979 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3980
3981 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3982 record_buf);
3983 return AARCH64_RECORD_SUCCESS;
3984 }
3985
3986 /* Record handler for advanced SIMD load and store instructions. */
3987
3988 static unsigned int
3989 aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3990 {
3991 CORE_ADDR address;
3992 uint64_t addr_offset = 0;
3993 uint32_t record_buf[24];
3994 uint64_t record_buf_mem[24];
3995 uint32_t reg_rn, reg_rt;
3996 uint32_t reg_index = 0, mem_index = 0;
3997 uint8_t opcode_bits, size_bits;
3998
3999 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4000 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
4001 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
4002 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
4003 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
4004
4005 if (record_debug)
4006 debug_printf ("Process record: Advanced SIMD load/store\n");
4007
4008 /* Load/store single structure. */
4009 if (bit (aarch64_insn_r->aarch64_insn, 24))
4010 {
4011 uint8_t sindex, scale, selem, esize, replicate = 0;
4012 scale = opcode_bits >> 2;
4013 selem = ((opcode_bits & 0x02) |
4014 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
4015 switch (scale)
4016 {
4017 case 1:
4018 if (size_bits & 0x01)
4019 return AARCH64_RECORD_UNKNOWN;
4020 break;
4021 case 2:
4022 if ((size_bits >> 1) & 0x01)
4023 return AARCH64_RECORD_UNKNOWN;
4024 if (size_bits & 0x01)
4025 {
4026 if (!((opcode_bits >> 1) & 0x01))
4027 scale = 3;
4028 else
4029 return AARCH64_RECORD_UNKNOWN;
4030 }
4031 break;
4032 case 3:
4033 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
4034 {
4035 scale = size_bits;
4036 replicate = 1;
4037 break;
4038 }
4039 else
4040 return AARCH64_RECORD_UNKNOWN;
4041 default:
4042 break;
4043 }
4044 esize = 8 << scale;
4045 if (replicate)
4046 for (sindex = 0; sindex < selem; sindex++)
4047 {
4048 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
4049 reg_rt = (reg_rt + 1) % 32;
4050 }
4051 else
4052 {
4053 for (sindex = 0; sindex < selem; sindex++)
4054 {
4055 if (bit (aarch64_insn_r->aarch64_insn, 22))
4056 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
4057 else
4058 {
4059 record_buf_mem[mem_index++] = esize / 8;
4060 record_buf_mem[mem_index++] = address + addr_offset;
4061 }
4062 addr_offset = addr_offset + (esize / 8);
4063 reg_rt = (reg_rt + 1) % 32;
4064 }
4065 }
4066 }
4067 /* Load/store multiple structure. */
4068 else
4069 {
4070 uint8_t selem, esize, rpt, elements;
4071 uint8_t eindex, rindex;
4072
4073 esize = 8 << size_bits;
4074 if (bit (aarch64_insn_r->aarch64_insn, 30))
4075 elements = 128 / esize;
4076 else
4077 elements = 64 / esize;
4078
4079 switch (opcode_bits)
4080 {
4081 /*LD/ST4 (4 Registers). */
4082 case 0:
4083 rpt = 1;
4084 selem = 4;
4085 break;
4086 /*LD/ST1 (4 Registers). */
4087 case 2:
4088 rpt = 4;
4089 selem = 1;
4090 break;
4091 /*LD/ST3 (3 Registers). */
4092 case 4:
4093 rpt = 1;
4094 selem = 3;
4095 break;
4096 /*LD/ST1 (3 Registers). */
4097 case 6:
4098 rpt = 3;
4099 selem = 1;
4100 break;
4101 /*LD/ST1 (1 Register). */
4102 case 7:
4103 rpt = 1;
4104 selem = 1;
4105 break;
4106 /*LD/ST2 (2 Registers). */
4107 case 8:
4108 rpt = 1;
4109 selem = 2;
4110 break;
4111 /*LD/ST1 (2 Registers). */
4112 case 10:
4113 rpt = 2;
4114 selem = 1;
4115 break;
4116 default:
4117 return AARCH64_RECORD_UNSUPPORTED;
4118 break;
4119 }
4120 for (rindex = 0; rindex < rpt; rindex++)
4121 for (eindex = 0; eindex < elements; eindex++)
4122 {
4123 uint8_t reg_tt, sindex;
4124 reg_tt = (reg_rt + rindex) % 32;
4125 for (sindex = 0; sindex < selem; sindex++)
4126 {
4127 if (bit (aarch64_insn_r->aarch64_insn, 22))
4128 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
4129 else
4130 {
4131 record_buf_mem[mem_index++] = esize / 8;
4132 record_buf_mem[mem_index++] = address + addr_offset;
4133 }
4134 addr_offset = addr_offset + (esize / 8);
4135 reg_tt = (reg_tt + 1) % 32;
4136 }
4137 }
4138 }
4139
4140 if (bit (aarch64_insn_r->aarch64_insn, 23))
4141 record_buf[reg_index++] = reg_rn;
4142
4143 aarch64_insn_r->reg_rec_count = reg_index;
4144 aarch64_insn_r->mem_rec_count = mem_index / 2;
4145 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
4146 record_buf_mem);
4147 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4148 record_buf);
4149 return AARCH64_RECORD_SUCCESS;
4150 }
4151
4152 /* Record handler for load and store instructions. */
4153
4154 static unsigned int
4155 aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
4156 {
4157 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
4158 uint8_t insn_bit23, insn_bit21;
4159 uint8_t opc, size_bits, ld_flag, vector_flag;
4160 uint32_t reg_rn, reg_rt, reg_rt2;
4161 uint64_t datasize, offset;
4162 uint32_t record_buf[8];
4163 uint64_t record_buf_mem[8];
4164 CORE_ADDR address;
4165
4166 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
4167 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4168 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
4169 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
4170 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
4171 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
4172 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
4173 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4174 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
4175 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
4176 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
4177
4178 /* Load/store exclusive. */
4179 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
4180 {
4181 if (record_debug)
4182 debug_printf ("Process record: load/store exclusive\n");
4183
4184 if (ld_flag)
4185 {
4186 record_buf[0] = reg_rt;
4187 aarch64_insn_r->reg_rec_count = 1;
4188 if (insn_bit21)
4189 {
4190 record_buf[1] = reg_rt2;
4191 aarch64_insn_r->reg_rec_count = 2;
4192 }
4193 }
4194 else
4195 {
4196 if (insn_bit21)
4197 datasize = (8 << size_bits) * 2;
4198 else
4199 datasize = (8 << size_bits);
4200 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4201 &address);
4202 record_buf_mem[0] = datasize / 8;
4203 record_buf_mem[1] = address;
4204 aarch64_insn_r->mem_rec_count = 1;
4205 if (!insn_bit23)
4206 {
4207 /* Save register rs. */
4208 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
4209 aarch64_insn_r->reg_rec_count = 1;
4210 }
4211 }
4212 }
4213 /* Load register (literal) instructions decoding. */
4214 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
4215 {
4216 if (record_debug)
4217 debug_printf ("Process record: load register (literal)\n");
4218 if (vector_flag)
4219 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4220 else
4221 record_buf[0] = reg_rt;
4222 aarch64_insn_r->reg_rec_count = 1;
4223 }
4224 /* All types of load/store pair instructions decoding. */
4225 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
4226 {
4227 if (record_debug)
4228 debug_printf ("Process record: load/store pair\n");
4229
4230 if (ld_flag)
4231 {
4232 if (vector_flag)
4233 {
4234 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4235 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
4236 }
4237 else
4238 {
4239 record_buf[0] = reg_rt;
4240 record_buf[1] = reg_rt2;
4241 }
4242 aarch64_insn_r->reg_rec_count = 2;
4243 }
4244 else
4245 {
4246 uint16_t imm7_off;
4247 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
4248 if (!vector_flag)
4249 size_bits = size_bits >> 1;
4250 datasize = 8 << (2 + size_bits);
4251 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
4252 offset = offset << (2 + size_bits);
4253 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4254 &address);
4255 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
4256 {
4257 if (imm7_off & 0x40)
4258 address = address - offset;
4259 else
4260 address = address + offset;
4261 }
4262
4263 record_buf_mem[0] = datasize / 8;
4264 record_buf_mem[1] = address;
4265 record_buf_mem[2] = datasize / 8;
4266 record_buf_mem[3] = address + (datasize / 8);
4267 aarch64_insn_r->mem_rec_count = 2;
4268 }
4269 if (bit (aarch64_insn_r->aarch64_insn, 23))
4270 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
4271 }
4272 /* Load/store register (unsigned immediate) instructions. */
4273 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
4274 {
4275 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4276 if (!(opc >> 1))
4277 {
4278 if (opc & 0x01)
4279 ld_flag = 0x01;
4280 else
4281 ld_flag = 0x0;
4282 }
4283 else
4284 {
4285 if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2)
4286 {
4287 /* PRFM (immediate) */
4288 return AARCH64_RECORD_SUCCESS;
4289 }
4290 else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2)
4291 {
4292 /* LDRSW (immediate) */
4293 ld_flag = 0x1;
4294 }
4295 else
4296 {
4297 if (opc & 0x01)
4298 ld_flag = 0x01;
4299 else
4300 ld_flag = 0x0;
4301 }
4302 }
4303
4304 if (record_debug)
4305 {
4306 debug_printf ("Process record: load/store (unsigned immediate):"
4307 " size %x V %d opc %x\n", size_bits, vector_flag,
4308 opc);
4309 }
4310
4311 if (!ld_flag)
4312 {
4313 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
4314 datasize = 8 << size_bits;
4315 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4316 &address);
4317 offset = offset << size_bits;
4318 address = address + offset;
4319
4320 record_buf_mem[0] = datasize >> 3;
4321 record_buf_mem[1] = address;
4322 aarch64_insn_r->mem_rec_count = 1;
4323 }
4324 else
4325 {
4326 if (vector_flag)
4327 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4328 else
4329 record_buf[0] = reg_rt;
4330 aarch64_insn_r->reg_rec_count = 1;
4331 }
4332 }
4333 /* Load/store register (register offset) instructions. */
4334 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
4335 && insn_bits10_11 == 0x02 && insn_bit21)
4336 {
4337 if (record_debug)
4338 debug_printf ("Process record: load/store (register offset)\n");
4339 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4340 if (!(opc >> 1))
4341 if (opc & 0x01)
4342 ld_flag = 0x01;
4343 else
4344 ld_flag = 0x0;
4345 else
4346 if (size_bits != 0x03)
4347 ld_flag = 0x01;
4348 else
4349 return AARCH64_RECORD_UNKNOWN;
4350
4351 if (!ld_flag)
4352 {
4353 ULONGEST reg_rm_val;
4354
4355 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
4356 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
4357 if (bit (aarch64_insn_r->aarch64_insn, 12))
4358 offset = reg_rm_val << size_bits;
4359 else
4360 offset = reg_rm_val;
4361 datasize = 8 << size_bits;
4362 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4363 &address);
4364 address = address + offset;
4365 record_buf_mem[0] = datasize >> 3;
4366 record_buf_mem[1] = address;
4367 aarch64_insn_r->mem_rec_count = 1;
4368 }
4369 else
4370 {
4371 if (vector_flag)
4372 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4373 else
4374 record_buf[0] = reg_rt;
4375 aarch64_insn_r->reg_rec_count = 1;
4376 }
4377 }
4378 /* Load/store register (immediate and unprivileged) instructions. */
4379 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
4380 && !insn_bit21)
4381 {
4382 if (record_debug)
4383 {
4384 debug_printf ("Process record: load/store "
4385 "(immediate and unprivileged)\n");
4386 }
4387 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4388 if (!(opc >> 1))
4389 if (opc & 0x01)
4390 ld_flag = 0x01;
4391 else
4392 ld_flag = 0x0;
4393 else
4394 if (size_bits != 0x03)
4395 ld_flag = 0x01;
4396 else
4397 return AARCH64_RECORD_UNKNOWN;
4398
4399 if (!ld_flag)
4400 {
4401 uint16_t imm9_off;
4402 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
4403 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
4404 datasize = 8 << size_bits;
4405 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4406 &address);
4407 if (insn_bits10_11 != 0x01)
4408 {
4409 if (imm9_off & 0x0100)
4410 address = address - offset;
4411 else
4412 address = address + offset;
4413 }
4414 record_buf_mem[0] = datasize >> 3;
4415 record_buf_mem[1] = address;
4416 aarch64_insn_r->mem_rec_count = 1;
4417 }
4418 else
4419 {
4420 if (vector_flag)
4421 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4422 else
4423 record_buf[0] = reg_rt;
4424 aarch64_insn_r->reg_rec_count = 1;
4425 }
4426 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
4427 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
4428 }
4429 /* Advanced SIMD load/store instructions. */
4430 else
4431 return aarch64_record_asimd_load_store (aarch64_insn_r);
4432
4433 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
4434 record_buf_mem);
4435 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4436 record_buf);
4437 return AARCH64_RECORD_SUCCESS;
4438 }
4439
4440 /* Record handler for data processing SIMD and floating point instructions. */
4441
4442 static unsigned int
4443 aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
4444 {
4445 uint8_t insn_bit21, opcode, rmode, reg_rd;
4446 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
4447 uint8_t insn_bits11_14;
4448 uint32_t record_buf[2];
4449
4450 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4451 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
4452 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
4453 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
4454 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
4455 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
4456 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
4457 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4458 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
4459
4460 if (record_debug)
4461 debug_printf ("Process record: data processing SIMD/FP: ");
4462
4463 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
4464 {
4465 /* Floating point - fixed point conversion instructions. */
4466 if (!insn_bit21)
4467 {
4468 if (record_debug)
4469 debug_printf ("FP - fixed point conversion");
4470
4471 if ((opcode >> 1) == 0x0 && rmode == 0x03)
4472 record_buf[0] = reg_rd;
4473 else
4474 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4475 }
4476 /* Floating point - conditional compare instructions. */
4477 else if (insn_bits10_11 == 0x01)
4478 {
4479 if (record_debug)
4480 debug_printf ("FP - conditional compare");
4481
4482 record_buf[0] = AARCH64_CPSR_REGNUM;
4483 }
4484 /* Floating point - data processing (2-source) and
4485 conditional select instructions. */
4486 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
4487 {
4488 if (record_debug)
4489 debug_printf ("FP - DP (2-source)");
4490
4491 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4492 }
4493 else if (insn_bits10_11 == 0x00)
4494 {
4495 /* Floating point - immediate instructions. */
4496 if ((insn_bits12_15 & 0x01) == 0x01
4497 || (insn_bits12_15 & 0x07) == 0x04)
4498 {
4499 if (record_debug)
4500 debug_printf ("FP - immediate");
4501 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4502 }
4503 /* Floating point - compare instructions. */
4504 else if ((insn_bits12_15 & 0x03) == 0x02)
4505 {
4506 if (record_debug)
4507 debug_printf ("FP - immediate");
4508 record_buf[0] = AARCH64_CPSR_REGNUM;
4509 }
4510 /* Floating point - integer conversions instructions. */
4511 else if (insn_bits12_15 == 0x00)
4512 {
4513 /* Convert float to integer instruction. */
4514 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
4515 {
4516 if (record_debug)
4517 debug_printf ("float to int conversion");
4518
4519 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4520 }
4521 /* Convert integer to float instruction. */
4522 else if ((opcode >> 1) == 0x01 && !rmode)
4523 {
4524 if (record_debug)
4525 debug_printf ("int to float conversion");
4526
4527 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4528 }
4529 /* Move float to integer instruction. */
4530 else if ((opcode >> 1) == 0x03)
4531 {
4532 if (record_debug)
4533 debug_printf ("move float to int");
4534
4535 if (!(opcode & 0x01))
4536 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4537 else
4538 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4539 }
4540 else
4541 return AARCH64_RECORD_UNKNOWN;
4542 }
4543 else
4544 return AARCH64_RECORD_UNKNOWN;
4545 }
4546 else
4547 return AARCH64_RECORD_UNKNOWN;
4548 }
4549 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
4550 {
4551 if (record_debug)
4552 debug_printf ("SIMD copy");
4553
4554 /* Advanced SIMD copy instructions. */
4555 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
4556 && !bit (aarch64_insn_r->aarch64_insn, 15)
4557 && bit (aarch64_insn_r->aarch64_insn, 10))
4558 {
4559 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
4560 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4561 else
4562 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4563 }
4564 else
4565 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4566 }
4567 /* All remaining floating point or advanced SIMD instructions. */
4568 else
4569 {
4570 if (record_debug)
4571 debug_printf ("all remain");
4572
4573 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4574 }
4575
4576 if (record_debug)
4577 debug_printf ("\n");
4578
4579 /* Record the V/X register. */
4580 aarch64_insn_r->reg_rec_count++;
4581
4582 /* Some of these instructions may set bits in the FPSR, so record it
4583 too. */
4584 record_buf[1] = AARCH64_FPSR_REGNUM;
4585 aarch64_insn_r->reg_rec_count++;
4586
4587 gdb_assert (aarch64_insn_r->reg_rec_count == 2);
4588 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4589 record_buf);
4590 return AARCH64_RECORD_SUCCESS;
4591 }
4592
4593 /* Decodes insns type and invokes its record handler. */
4594
4595 static unsigned int
4596 aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
4597 {
4598 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
4599
4600 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
4601 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
4602 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
4603 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
4604
4605 /* Data processing - immediate instructions. */
4606 if (!ins_bit26 && !ins_bit27 && ins_bit28)
4607 return aarch64_record_data_proc_imm (aarch64_insn_r);
4608
4609 /* Branch, exception generation and system instructions. */
4610 if (ins_bit26 && !ins_bit27 && ins_bit28)
4611 return aarch64_record_branch_except_sys (aarch64_insn_r);
4612
4613 /* Load and store instructions. */
4614 if (!ins_bit25 && ins_bit27)
4615 return aarch64_record_load_store (aarch64_insn_r);
4616
4617 /* Data processing - register instructions. */
4618 if (ins_bit25 && !ins_bit26 && ins_bit27)
4619 return aarch64_record_data_proc_reg (aarch64_insn_r);
4620
4621 /* Data processing - SIMD and floating point instructions. */
4622 if (ins_bit25 && ins_bit26 && ins_bit27)
4623 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
4624
4625 return AARCH64_RECORD_UNSUPPORTED;
4626 }
4627
4628 /* Cleans up local record registers and memory allocations. */
4629
4630 static void
4631 deallocate_reg_mem (insn_decode_record *record)
4632 {
4633 xfree (record->aarch64_regs);
4634 xfree (record->aarch64_mems);
4635 }
4636
4637 #if GDB_SELF_TEST
4638 namespace selftests {
4639
4640 static void
4641 aarch64_process_record_test (void)
4642 {
4643 struct gdbarch_info info;
4644 uint32_t ret;
4645
4646 info.bfd_arch_info = bfd_scan_arch ("aarch64");
4647
4648 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
4649 SELF_CHECK (gdbarch != NULL);
4650
4651 insn_decode_record aarch64_record;
4652
4653 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4654 aarch64_record.regcache = NULL;
4655 aarch64_record.this_addr = 0;
4656 aarch64_record.gdbarch = gdbarch;
4657
4658 /* 20 00 80 f9 prfm pldl1keep, [x1] */
4659 aarch64_record.aarch64_insn = 0xf9800020;
4660 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4661 SELF_CHECK (ret == AARCH64_RECORD_SUCCESS);
4662 SELF_CHECK (aarch64_record.reg_rec_count == 0);
4663 SELF_CHECK (aarch64_record.mem_rec_count == 0);
4664
4665 deallocate_reg_mem (&aarch64_record);
4666 }
4667
4668 } // namespace selftests
4669 #endif /* GDB_SELF_TEST */
4670
4671 /* Parse the current instruction and record the values of the registers and
4672 memory that will be changed in current instruction to record_arch_list
4673 return -1 if something is wrong. */
4674
4675 int
4676 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
4677 CORE_ADDR insn_addr)
4678 {
4679 uint32_t rec_no = 0;
4680 uint8_t insn_size = 4;
4681 uint32_t ret = 0;
4682 gdb_byte buf[insn_size];
4683 insn_decode_record aarch64_record;
4684
4685 memset (&buf[0], 0, insn_size);
4686 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4687 target_read_memory (insn_addr, &buf[0], insn_size);
4688 aarch64_record.aarch64_insn
4689 = (uint32_t) extract_unsigned_integer (&buf[0],
4690 insn_size,
4691 gdbarch_byte_order (gdbarch));
4692 aarch64_record.regcache = regcache;
4693 aarch64_record.this_addr = insn_addr;
4694 aarch64_record.gdbarch = gdbarch;
4695
4696 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4697 if (ret == AARCH64_RECORD_UNSUPPORTED)
4698 {
4699 gdb_printf (gdb_stderr,
4700 _("Process record does not support instruction "
4701 "0x%0x at address %s.\n"),
4702 aarch64_record.aarch64_insn,
4703 paddress (gdbarch, insn_addr));
4704 ret = -1;
4705 }
4706
4707 if (0 == ret)
4708 {
4709 /* Record registers. */
4710 record_full_arch_list_add_reg (aarch64_record.regcache,
4711 AARCH64_PC_REGNUM);
4712 /* Always record register CPSR. */
4713 record_full_arch_list_add_reg (aarch64_record.regcache,
4714 AARCH64_CPSR_REGNUM);
4715 if (aarch64_record.aarch64_regs)
4716 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
4717 if (record_full_arch_list_add_reg (aarch64_record.regcache,
4718 aarch64_record.aarch64_regs[rec_no]))
4719 ret = -1;
4720
4721 /* Record memories. */
4722 if (aarch64_record.aarch64_mems)
4723 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
4724 if (record_full_arch_list_add_mem
4725 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
4726 aarch64_record.aarch64_mems[rec_no].len))
4727 ret = -1;
4728
4729 if (record_full_arch_list_add_end ())
4730 ret = -1;
4731 }
4732
4733 deallocate_reg_mem (&aarch64_record);
4734 return ret;
4735 }