Handle non-ASCII identifiers in Ada
[binutils-gdb.git] / gdb / aarch64-tdep.c
1 /* Common target dependent code for GDB on AArch64 systems.
2
3 Copyright (C) 2009-2022 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22
23 #include "frame.h"
24 #include "gdbcmd.h"
25 #include "gdbcore.h"
26 #include "dis-asm.h"
27 #include "regcache.h"
28 #include "reggroups.h"
29 #include "value.h"
30 #include "arch-utils.h"
31 #include "osabi.h"
32 #include "frame-unwind.h"
33 #include "frame-base.h"
34 #include "trad-frame.h"
35 #include "objfiles.h"
36 #include "dwarf2.h"
37 #include "dwarf2/frame.h"
38 #include "gdbtypes.h"
39 #include "prologue-value.h"
40 #include "target-descriptions.h"
41 #include "user-regs.h"
42 #include "ax-gdb.h"
43 #include "gdbsupport/selftest.h"
44
45 #include "aarch64-tdep.h"
46 #include "aarch64-ravenscar-thread.h"
47
48 #include "record.h"
49 #include "record-full.h"
50 #include "arch/aarch64-insn.h"
51 #include "gdbarch.h"
52
53 #include "opcode/aarch64.h"
54 #include <algorithm>
55
56 /* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
57 four members. */
58 #define HA_MAX_NUM_FLDS 4
59
60 /* All possible aarch64 target descriptors. */
61 static target_desc *tdesc_aarch64_list[AARCH64_MAX_SVE_VQ + 1][2/*pauth*/][2 /* mte */];
62
63 /* The standard register names, and all the valid aliases for them. */
64 static const struct
65 {
66 const char *const name;
67 int regnum;
68 } aarch64_register_aliases[] =
69 {
70 /* 64-bit register names. */
71 {"fp", AARCH64_FP_REGNUM},
72 {"lr", AARCH64_LR_REGNUM},
73 {"sp", AARCH64_SP_REGNUM},
74
75 /* 32-bit register names. */
76 {"w0", AARCH64_X0_REGNUM + 0},
77 {"w1", AARCH64_X0_REGNUM + 1},
78 {"w2", AARCH64_X0_REGNUM + 2},
79 {"w3", AARCH64_X0_REGNUM + 3},
80 {"w4", AARCH64_X0_REGNUM + 4},
81 {"w5", AARCH64_X0_REGNUM + 5},
82 {"w6", AARCH64_X0_REGNUM + 6},
83 {"w7", AARCH64_X0_REGNUM + 7},
84 {"w8", AARCH64_X0_REGNUM + 8},
85 {"w9", AARCH64_X0_REGNUM + 9},
86 {"w10", AARCH64_X0_REGNUM + 10},
87 {"w11", AARCH64_X0_REGNUM + 11},
88 {"w12", AARCH64_X0_REGNUM + 12},
89 {"w13", AARCH64_X0_REGNUM + 13},
90 {"w14", AARCH64_X0_REGNUM + 14},
91 {"w15", AARCH64_X0_REGNUM + 15},
92 {"w16", AARCH64_X0_REGNUM + 16},
93 {"w17", AARCH64_X0_REGNUM + 17},
94 {"w18", AARCH64_X0_REGNUM + 18},
95 {"w19", AARCH64_X0_REGNUM + 19},
96 {"w20", AARCH64_X0_REGNUM + 20},
97 {"w21", AARCH64_X0_REGNUM + 21},
98 {"w22", AARCH64_X0_REGNUM + 22},
99 {"w23", AARCH64_X0_REGNUM + 23},
100 {"w24", AARCH64_X0_REGNUM + 24},
101 {"w25", AARCH64_X0_REGNUM + 25},
102 {"w26", AARCH64_X0_REGNUM + 26},
103 {"w27", AARCH64_X0_REGNUM + 27},
104 {"w28", AARCH64_X0_REGNUM + 28},
105 {"w29", AARCH64_X0_REGNUM + 29},
106 {"w30", AARCH64_X0_REGNUM + 30},
107
108 /* specials */
109 {"ip0", AARCH64_X0_REGNUM + 16},
110 {"ip1", AARCH64_X0_REGNUM + 17}
111 };
112
113 /* The required core 'R' registers. */
114 static const char *const aarch64_r_register_names[] =
115 {
116 /* These registers must appear in consecutive RAW register number
117 order and they must begin with AARCH64_X0_REGNUM! */
118 "x0", "x1", "x2", "x3",
119 "x4", "x5", "x6", "x7",
120 "x8", "x9", "x10", "x11",
121 "x12", "x13", "x14", "x15",
122 "x16", "x17", "x18", "x19",
123 "x20", "x21", "x22", "x23",
124 "x24", "x25", "x26", "x27",
125 "x28", "x29", "x30", "sp",
126 "pc", "cpsr"
127 };
128
129 /* The FP/SIMD 'V' registers. */
130 static const char *const aarch64_v_register_names[] =
131 {
132 /* These registers must appear in consecutive RAW register number
133 order and they must begin with AARCH64_V0_REGNUM! */
134 "v0", "v1", "v2", "v3",
135 "v4", "v5", "v6", "v7",
136 "v8", "v9", "v10", "v11",
137 "v12", "v13", "v14", "v15",
138 "v16", "v17", "v18", "v19",
139 "v20", "v21", "v22", "v23",
140 "v24", "v25", "v26", "v27",
141 "v28", "v29", "v30", "v31",
142 "fpsr",
143 "fpcr"
144 };
145
146 /* The SVE 'Z' and 'P' registers. */
147 static const char *const aarch64_sve_register_names[] =
148 {
149 /* These registers must appear in consecutive RAW register number
150 order and they must begin with AARCH64_SVE_Z0_REGNUM! */
151 "z0", "z1", "z2", "z3",
152 "z4", "z5", "z6", "z7",
153 "z8", "z9", "z10", "z11",
154 "z12", "z13", "z14", "z15",
155 "z16", "z17", "z18", "z19",
156 "z20", "z21", "z22", "z23",
157 "z24", "z25", "z26", "z27",
158 "z28", "z29", "z30", "z31",
159 "fpsr", "fpcr",
160 "p0", "p1", "p2", "p3",
161 "p4", "p5", "p6", "p7",
162 "p8", "p9", "p10", "p11",
163 "p12", "p13", "p14", "p15",
164 "ffr", "vg"
165 };
166
167 static const char *const aarch64_pauth_register_names[] =
168 {
169 /* Authentication mask for data pointer. */
170 "pauth_dmask",
171 /* Authentication mask for code pointer. */
172 "pauth_cmask"
173 };
174
175 static const char *const aarch64_mte_register_names[] =
176 {
177 /* Tag Control Register. */
178 "tag_ctl"
179 };
180
181 /* AArch64 prologue cache structure. */
182 struct aarch64_prologue_cache
183 {
184 /* The program counter at the start of the function. It is used to
185 identify this frame as a prologue frame. */
186 CORE_ADDR func;
187
188 /* The program counter at the time this frame was created; i.e. where
189 this function was called from. It is used to identify this frame as a
190 stub frame. */
191 CORE_ADDR prev_pc;
192
193 /* The stack pointer at the time this frame was created; i.e. the
194 caller's stack pointer when this function was called. It is used
195 to identify this frame. */
196 CORE_ADDR prev_sp;
197
198 /* Is the target available to read from? */
199 int available_p;
200
201 /* The frame base for this frame is just prev_sp - frame size.
202 FRAMESIZE is the distance from the frame pointer to the
203 initial stack pointer. */
204 int framesize;
205
206 /* The register used to hold the frame pointer for this frame. */
207 int framereg;
208
209 /* Saved register offsets. */
210 trad_frame_saved_reg *saved_regs;
211 };
212
213 static void
214 show_aarch64_debug (struct ui_file *file, int from_tty,
215 struct cmd_list_element *c, const char *value)
216 {
217 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
218 }
219
220 namespace {
221
222 /* Abstract instruction reader. */
223
224 class abstract_instruction_reader
225 {
226 public:
227 /* Read in one instruction. */
228 virtual ULONGEST read (CORE_ADDR memaddr, int len,
229 enum bfd_endian byte_order) = 0;
230 };
231
232 /* Instruction reader from real target. */
233
234 class instruction_reader : public abstract_instruction_reader
235 {
236 public:
237 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
238 override
239 {
240 return read_code_unsigned_integer (memaddr, len, byte_order);
241 }
242 };
243
244 } // namespace
245
246 /* If address signing is enabled, mask off the signature bits from the link
247 register, which is passed by value in ADDR, using the register values in
248 THIS_FRAME. */
249
250 static CORE_ADDR
251 aarch64_frame_unmask_lr (aarch64_gdbarch_tdep *tdep,
252 struct frame_info *this_frame, CORE_ADDR addr)
253 {
254 if (tdep->has_pauth ()
255 && frame_unwind_register_unsigned (this_frame,
256 tdep->pauth_ra_state_regnum))
257 {
258 int cmask_num = AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base);
259 CORE_ADDR cmask = frame_unwind_register_unsigned (this_frame, cmask_num);
260 addr = addr & ~cmask;
261
262 /* Record in the frame that the link register required unmasking. */
263 set_frame_previous_pc_masked (this_frame);
264 }
265
266 return addr;
267 }
268
269 /* Implement the "get_pc_address_flags" gdbarch method. */
270
271 static std::string
272 aarch64_get_pc_address_flags (frame_info *frame, CORE_ADDR pc)
273 {
274 if (pc != 0 && get_frame_pc_masked (frame))
275 return "PAC";
276
277 return "";
278 }
279
280 /* Analyze a prologue, looking for a recognizable stack frame
281 and frame pointer. Scan until we encounter a store that could
282 clobber the stack frame unexpectedly, or an unknown instruction. */
283
284 static CORE_ADDR
285 aarch64_analyze_prologue (struct gdbarch *gdbarch,
286 CORE_ADDR start, CORE_ADDR limit,
287 struct aarch64_prologue_cache *cache,
288 abstract_instruction_reader& reader)
289 {
290 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
291 int i;
292
293 /* Whether the stack has been set. This should be true when we notice a SP
294 to FP move or if we are using the SP as the base register for storing
295 data, in case the FP is ommitted. */
296 bool seen_stack_set = false;
297
298 /* Track X registers and D registers in prologue. */
299 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
300
301 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
302 regs[i] = pv_register (i, 0);
303 pv_area stack (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
304
305 for (; start < limit; start += 4)
306 {
307 uint32_t insn;
308 aarch64_inst inst;
309
310 insn = reader.read (start, 4, byte_order_for_code);
311
312 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
313 break;
314
315 if (inst.opcode->iclass == addsub_imm
316 && (inst.opcode->op == OP_ADD
317 || strcmp ("sub", inst.opcode->name) == 0))
318 {
319 unsigned rd = inst.operands[0].reg.regno;
320 unsigned rn = inst.operands[1].reg.regno;
321
322 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
323 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
324 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
325 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
326
327 if (inst.opcode->op == OP_ADD)
328 {
329 regs[rd] = pv_add_constant (regs[rn],
330 inst.operands[2].imm.value);
331 }
332 else
333 {
334 regs[rd] = pv_add_constant (regs[rn],
335 -inst.operands[2].imm.value);
336 }
337
338 /* Did we move SP to FP? */
339 if (rn == AARCH64_SP_REGNUM && rd == AARCH64_FP_REGNUM)
340 seen_stack_set = true;
341 }
342 else if (inst.opcode->iclass == pcreladdr
343 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
344 {
345 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
346 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
347
348 regs[inst.operands[0].reg.regno] = pv_unknown ();
349 }
350 else if (inst.opcode->iclass == branch_imm)
351 {
352 /* Stop analysis on branch. */
353 break;
354 }
355 else if (inst.opcode->iclass == condbranch)
356 {
357 /* Stop analysis on branch. */
358 break;
359 }
360 else if (inst.opcode->iclass == branch_reg)
361 {
362 /* Stop analysis on branch. */
363 break;
364 }
365 else if (inst.opcode->iclass == compbranch)
366 {
367 /* Stop analysis on branch. */
368 break;
369 }
370 else if (inst.opcode->op == OP_MOVZ)
371 {
372 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
373
374 /* If this shows up before we set the stack, keep going. Otherwise
375 stop the analysis. */
376 if (seen_stack_set)
377 break;
378
379 regs[inst.operands[0].reg.regno] = pv_unknown ();
380 }
381 else if (inst.opcode->iclass == log_shift
382 && strcmp (inst.opcode->name, "orr") == 0)
383 {
384 unsigned rd = inst.operands[0].reg.regno;
385 unsigned rn = inst.operands[1].reg.regno;
386 unsigned rm = inst.operands[2].reg.regno;
387
388 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
389 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
390 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
391
392 if (inst.operands[2].shifter.amount == 0
393 && rn == AARCH64_SP_REGNUM)
394 regs[rd] = regs[rm];
395 else
396 {
397 aarch64_debug_printf ("prologue analysis gave up "
398 "addr=%s opcode=0x%x (orr x register)",
399 core_addr_to_string_nz (start), insn);
400
401 break;
402 }
403 }
404 else if (inst.opcode->op == OP_STUR)
405 {
406 unsigned rt = inst.operands[0].reg.regno;
407 unsigned rn = inst.operands[1].addr.base_regno;
408 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
409
410 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
411 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
412 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
413 gdb_assert (!inst.operands[1].addr.offset.is_reg);
414
415 stack.store
416 (pv_add_constant (regs[rn], inst.operands[1].addr.offset.imm),
417 size, regs[rt]);
418
419 /* Are we storing with SP as a base? */
420 if (rn == AARCH64_SP_REGNUM)
421 seen_stack_set = true;
422 }
423 else if ((inst.opcode->iclass == ldstpair_off
424 || (inst.opcode->iclass == ldstpair_indexed
425 && inst.operands[2].addr.preind))
426 && strcmp ("stp", inst.opcode->name) == 0)
427 {
428 /* STP with addressing mode Pre-indexed and Base register. */
429 unsigned rt1;
430 unsigned rt2;
431 unsigned rn = inst.operands[2].addr.base_regno;
432 int32_t imm = inst.operands[2].addr.offset.imm;
433 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
434
435 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
436 || inst.operands[0].type == AARCH64_OPND_Ft);
437 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
438 || inst.operands[1].type == AARCH64_OPND_Ft2);
439 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
440 gdb_assert (!inst.operands[2].addr.offset.is_reg);
441
442 /* If recording this store would invalidate the store area
443 (perhaps because rn is not known) then we should abandon
444 further prologue analysis. */
445 if (stack.store_would_trash (pv_add_constant (regs[rn], imm)))
446 break;
447
448 if (stack.store_would_trash (pv_add_constant (regs[rn], imm + 8)))
449 break;
450
451 rt1 = inst.operands[0].reg.regno;
452 rt2 = inst.operands[1].reg.regno;
453 if (inst.operands[0].type == AARCH64_OPND_Ft)
454 {
455 rt1 += AARCH64_X_REGISTER_COUNT;
456 rt2 += AARCH64_X_REGISTER_COUNT;
457 }
458
459 stack.store (pv_add_constant (regs[rn], imm), size, regs[rt1]);
460 stack.store (pv_add_constant (regs[rn], imm + size), size, regs[rt2]);
461
462 if (inst.operands[2].addr.writeback)
463 regs[rn] = pv_add_constant (regs[rn], imm);
464
465 /* Ignore the instruction that allocates stack space and sets
466 the SP. */
467 if (rn == AARCH64_SP_REGNUM && !inst.operands[2].addr.writeback)
468 seen_stack_set = true;
469 }
470 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
471 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
472 && (inst.opcode->op == OP_STR_POS
473 || inst.opcode->op == OP_STRF_POS)))
474 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
475 && strcmp ("str", inst.opcode->name) == 0)
476 {
477 /* STR (immediate) */
478 unsigned int rt = inst.operands[0].reg.regno;
479 int32_t imm = inst.operands[1].addr.offset.imm;
480 unsigned int rn = inst.operands[1].addr.base_regno;
481 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
482 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
483 || inst.operands[0].type == AARCH64_OPND_Ft);
484
485 if (inst.operands[0].type == AARCH64_OPND_Ft)
486 rt += AARCH64_X_REGISTER_COUNT;
487
488 stack.store (pv_add_constant (regs[rn], imm), size, regs[rt]);
489 if (inst.operands[1].addr.writeback)
490 regs[rn] = pv_add_constant (regs[rn], imm);
491
492 /* Are we storing with SP as a base? */
493 if (rn == AARCH64_SP_REGNUM)
494 seen_stack_set = true;
495 }
496 else if (inst.opcode->iclass == testbranch)
497 {
498 /* Stop analysis on branch. */
499 break;
500 }
501 else if (inst.opcode->iclass == ic_system)
502 {
503 aarch64_gdbarch_tdep *tdep
504 = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
505 int ra_state_val = 0;
506
507 if (insn == 0xd503233f /* paciasp. */
508 || insn == 0xd503237f /* pacibsp. */)
509 {
510 /* Return addresses are mangled. */
511 ra_state_val = 1;
512 }
513 else if (insn == 0xd50323bf /* autiasp. */
514 || insn == 0xd50323ff /* autibsp. */)
515 {
516 /* Return addresses are not mangled. */
517 ra_state_val = 0;
518 }
519 else if (IS_BTI (insn))
520 /* We don't need to do anything special for a BTI instruction. */
521 continue;
522 else
523 {
524 aarch64_debug_printf ("prologue analysis gave up addr=%s"
525 " opcode=0x%x (iclass)",
526 core_addr_to_string_nz (start), insn);
527 break;
528 }
529
530 if (tdep->has_pauth () && cache != nullptr)
531 {
532 int regnum = tdep->pauth_ra_state_regnum;
533 cache->saved_regs[regnum].set_value (ra_state_val);
534 }
535 }
536 else
537 {
538 aarch64_debug_printf ("prologue analysis gave up addr=%s"
539 " opcode=0x%x",
540 core_addr_to_string_nz (start), insn);
541
542 break;
543 }
544 }
545
546 if (cache == NULL)
547 return start;
548
549 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
550 {
551 /* Frame pointer is fp. Frame size is constant. */
552 cache->framereg = AARCH64_FP_REGNUM;
553 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
554 }
555 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
556 {
557 /* Try the stack pointer. */
558 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
559 cache->framereg = AARCH64_SP_REGNUM;
560 }
561 else
562 {
563 /* We're just out of luck. We don't know where the frame is. */
564 cache->framereg = -1;
565 cache->framesize = 0;
566 }
567
568 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
569 {
570 CORE_ADDR offset;
571
572 if (stack.find_reg (gdbarch, i, &offset))
573 cache->saved_regs[i].set_addr (offset);
574 }
575
576 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
577 {
578 int regnum = gdbarch_num_regs (gdbarch);
579 CORE_ADDR offset;
580
581 if (stack.find_reg (gdbarch, i + AARCH64_X_REGISTER_COUNT,
582 &offset))
583 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].set_addr (offset);
584 }
585
586 return start;
587 }
588
589 static CORE_ADDR
590 aarch64_analyze_prologue (struct gdbarch *gdbarch,
591 CORE_ADDR start, CORE_ADDR limit,
592 struct aarch64_prologue_cache *cache)
593 {
594 instruction_reader reader;
595
596 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
597 reader);
598 }
599
600 #if GDB_SELF_TEST
601
602 namespace selftests {
603
604 /* Instruction reader from manually cooked instruction sequences. */
605
606 class instruction_reader_test : public abstract_instruction_reader
607 {
608 public:
609 template<size_t SIZE>
610 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
611 : m_insns (insns), m_insns_size (SIZE)
612 {}
613
614 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
615 override
616 {
617 SELF_CHECK (len == 4);
618 SELF_CHECK (memaddr % 4 == 0);
619 SELF_CHECK (memaddr / 4 < m_insns_size);
620
621 return m_insns[memaddr / 4];
622 }
623
624 private:
625 const uint32_t *m_insns;
626 size_t m_insns_size;
627 };
628
629 static void
630 aarch64_analyze_prologue_test (void)
631 {
632 struct gdbarch_info info;
633
634 info.bfd_arch_info = bfd_scan_arch ("aarch64");
635
636 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
637 SELF_CHECK (gdbarch != NULL);
638
639 struct aarch64_prologue_cache cache;
640 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
641
642 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
643
644 /* Test the simple prologue in which frame pointer is used. */
645 {
646 static const uint32_t insns[] = {
647 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
648 0x910003fd, /* mov x29, sp */
649 0x97ffffe6, /* bl 0x400580 */
650 };
651 instruction_reader_test reader (insns);
652
653 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
654 SELF_CHECK (end == 4 * 2);
655
656 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
657 SELF_CHECK (cache.framesize == 272);
658
659 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
660 {
661 if (i == AARCH64_FP_REGNUM)
662 SELF_CHECK (cache.saved_regs[i].addr () == -272);
663 else if (i == AARCH64_LR_REGNUM)
664 SELF_CHECK (cache.saved_regs[i].addr () == -264);
665 else
666 SELF_CHECK (cache.saved_regs[i].is_realreg ()
667 && cache.saved_regs[i].realreg () == i);
668 }
669
670 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
671 {
672 int num_regs = gdbarch_num_regs (gdbarch);
673 int regnum = i + num_regs + AARCH64_D0_REGNUM;
674
675 SELF_CHECK (cache.saved_regs[regnum].is_realreg ()
676 && cache.saved_regs[regnum].realreg () == regnum);
677 }
678 }
679
680 /* Test a prologue in which STR is used and frame pointer is not
681 used. */
682 {
683 static const uint32_t insns[] = {
684 0xf81d0ff3, /* str x19, [sp, #-48]! */
685 0xb9002fe0, /* str w0, [sp, #44] */
686 0xf90013e1, /* str x1, [sp, #32]*/
687 0xfd000fe0, /* str d0, [sp, #24] */
688 0xaa0203f3, /* mov x19, x2 */
689 0xf94013e0, /* ldr x0, [sp, #32] */
690 };
691 instruction_reader_test reader (insns);
692
693 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
694 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
695
696 SELF_CHECK (end == 4 * 5);
697
698 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
699 SELF_CHECK (cache.framesize == 48);
700
701 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
702 {
703 if (i == 1)
704 SELF_CHECK (cache.saved_regs[i].addr () == -16);
705 else if (i == 19)
706 SELF_CHECK (cache.saved_regs[i].addr () == -48);
707 else
708 SELF_CHECK (cache.saved_regs[i].is_realreg ()
709 && cache.saved_regs[i].realreg () == i);
710 }
711
712 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
713 {
714 int num_regs = gdbarch_num_regs (gdbarch);
715 int regnum = i + num_regs + AARCH64_D0_REGNUM;
716
717
718 if (i == 0)
719 SELF_CHECK (cache.saved_regs[regnum].addr () == -24);
720 else
721 SELF_CHECK (cache.saved_regs[regnum].is_realreg ()
722 && cache.saved_regs[regnum].realreg () == regnum);
723 }
724 }
725
726 /* Test handling of movz before setting the frame pointer. */
727 {
728 static const uint32_t insns[] = {
729 0xa9bf7bfd, /* stp x29, x30, [sp, #-16]! */
730 0x52800020, /* mov w0, #0x1 */
731 0x910003fd, /* mov x29, sp */
732 0x528000a2, /* mov w2, #0x5 */
733 0x97fffff8, /* bl 6e4 */
734 };
735
736 instruction_reader_test reader (insns);
737
738 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
739 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
740
741 /* We should stop at the 4th instruction. */
742 SELF_CHECK (end == (4 - 1) * 4);
743 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
744 SELF_CHECK (cache.framesize == 16);
745 }
746
747 /* Test handling of movz/stp when using the stack pointer as frame
748 pointer. */
749 {
750 static const uint32_t insns[] = {
751 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
752 0x52800020, /* mov w0, #0x1 */
753 0x290207e0, /* stp w0, w1, [sp, #16] */
754 0xa9018fe2, /* stp x2, x3, [sp, #24] */
755 0x528000a2, /* mov w2, #0x5 */
756 0x97fffff8, /* bl 6e4 */
757 };
758
759 instruction_reader_test reader (insns);
760
761 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
762 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
763
764 /* We should stop at the 5th instruction. */
765 SELF_CHECK (end == (5 - 1) * 4);
766 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
767 SELF_CHECK (cache.framesize == 64);
768 }
769
770 /* Test handling of movz/str when using the stack pointer as frame
771 pointer */
772 {
773 static const uint32_t insns[] = {
774 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
775 0x52800020, /* mov w0, #0x1 */
776 0xb9002be4, /* str w4, [sp, #40] */
777 0xf9001be5, /* str x5, [sp, #48] */
778 0x528000a2, /* mov w2, #0x5 */
779 0x97fffff8, /* bl 6e4 */
780 };
781
782 instruction_reader_test reader (insns);
783
784 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
785 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
786
787 /* We should stop at the 5th instruction. */
788 SELF_CHECK (end == (5 - 1) * 4);
789 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
790 SELF_CHECK (cache.framesize == 64);
791 }
792
793 /* Test handling of movz/stur when using the stack pointer as frame
794 pointer. */
795 {
796 static const uint32_t insns[] = {
797 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
798 0x52800020, /* mov w0, #0x1 */
799 0xb80343e6, /* stur w6, [sp, #52] */
800 0xf80383e7, /* stur x7, [sp, #56] */
801 0x528000a2, /* mov w2, #0x5 */
802 0x97fffff8, /* bl 6e4 */
803 };
804
805 instruction_reader_test reader (insns);
806
807 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
808 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
809
810 /* We should stop at the 5th instruction. */
811 SELF_CHECK (end == (5 - 1) * 4);
812 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
813 SELF_CHECK (cache.framesize == 64);
814 }
815
816 /* Test handling of movz when there is no frame pointer set or no stack
817 pointer used. */
818 {
819 static const uint32_t insns[] = {
820 0xa9bf7bfd, /* stp x29, x30, [sp, #-16]! */
821 0x52800020, /* mov w0, #0x1 */
822 0x528000a2, /* mov w2, #0x5 */
823 0x97fffff8, /* bl 6e4 */
824 };
825
826 instruction_reader_test reader (insns);
827
828 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
829 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
830
831 /* We should stop at the 4th instruction. */
832 SELF_CHECK (end == (4 - 1) * 4);
833 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
834 SELF_CHECK (cache.framesize == 16);
835 }
836
837 /* Test a prologue in which there is a return address signing instruction. */
838 if (tdep->has_pauth ())
839 {
840 static const uint32_t insns[] = {
841 0xd503233f, /* paciasp */
842 0xa9bd7bfd, /* stp x29, x30, [sp, #-48]! */
843 0x910003fd, /* mov x29, sp */
844 0xf801c3f3, /* str x19, [sp, #28] */
845 0xb9401fa0, /* ldr x19, [x29, #28] */
846 };
847 instruction_reader_test reader (insns);
848
849 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
850 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache,
851 reader);
852
853 SELF_CHECK (end == 4 * 4);
854 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
855 SELF_CHECK (cache.framesize == 48);
856
857 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
858 {
859 if (i == 19)
860 SELF_CHECK (cache.saved_regs[i].addr () == -20);
861 else if (i == AARCH64_FP_REGNUM)
862 SELF_CHECK (cache.saved_regs[i].addr () == -48);
863 else if (i == AARCH64_LR_REGNUM)
864 SELF_CHECK (cache.saved_regs[i].addr () == -40);
865 else
866 SELF_CHECK (cache.saved_regs[i].is_realreg ()
867 && cache.saved_regs[i].realreg () == i);
868 }
869
870 if (tdep->has_pauth ())
871 {
872 int regnum = tdep->pauth_ra_state_regnum;
873 SELF_CHECK (cache.saved_regs[regnum].is_value ());
874 }
875 }
876
877 /* Test a prologue with a BTI instruction. */
878 {
879 static const uint32_t insns[] = {
880 0xd503245f, /* bti */
881 0xa9bd7bfd, /* stp x29, x30, [sp, #-48]! */
882 0x910003fd, /* mov x29, sp */
883 0xf801c3f3, /* str x19, [sp, #28] */
884 0xb9401fa0, /* ldr x19, [x29, #28] */
885 };
886 instruction_reader_test reader (insns);
887
888 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
889 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache,
890 reader);
891
892 SELF_CHECK (end == 4 * 4);
893 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
894 SELF_CHECK (cache.framesize == 48);
895
896 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
897 {
898 if (i == 19)
899 SELF_CHECK (cache.saved_regs[i].addr () == -20);
900 else if (i == AARCH64_FP_REGNUM)
901 SELF_CHECK (cache.saved_regs[i].addr () == -48);
902 else if (i == AARCH64_LR_REGNUM)
903 SELF_CHECK (cache.saved_regs[i].addr () == -40);
904 else
905 SELF_CHECK (cache.saved_regs[i].is_realreg ()
906 && cache.saved_regs[i].realreg () == i);
907 }
908 }
909 }
910 } // namespace selftests
911 #endif /* GDB_SELF_TEST */
912
913 /* Implement the "skip_prologue" gdbarch method. */
914
915 static CORE_ADDR
916 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
917 {
918 CORE_ADDR func_addr, limit_pc;
919
920 /* See if we can determine the end of the prologue via the symbol
921 table. If so, then return either PC, or the PC after the
922 prologue, whichever is greater. */
923 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
924 {
925 CORE_ADDR post_prologue_pc
926 = skip_prologue_using_sal (gdbarch, func_addr);
927
928 if (post_prologue_pc != 0)
929 return std::max (pc, post_prologue_pc);
930 }
931
932 /* Can't determine prologue from the symbol table, need to examine
933 instructions. */
934
935 /* Find an upper limit on the function prologue using the debug
936 information. If the debug information could not be used to
937 provide that bound, then use an arbitrary large number as the
938 upper bound. */
939 limit_pc = skip_prologue_using_sal (gdbarch, pc);
940 if (limit_pc == 0)
941 limit_pc = pc + 128; /* Magic. */
942
943 /* Try disassembling prologue. */
944 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
945 }
946
947 /* Scan the function prologue for THIS_FRAME and populate the prologue
948 cache CACHE. */
949
950 static void
951 aarch64_scan_prologue (struct frame_info *this_frame,
952 struct aarch64_prologue_cache *cache)
953 {
954 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
955 CORE_ADDR prologue_start;
956 CORE_ADDR prologue_end;
957 CORE_ADDR prev_pc = get_frame_pc (this_frame);
958 struct gdbarch *gdbarch = get_frame_arch (this_frame);
959
960 cache->prev_pc = prev_pc;
961
962 /* Assume we do not find a frame. */
963 cache->framereg = -1;
964 cache->framesize = 0;
965
966 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
967 &prologue_end))
968 {
969 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
970
971 if (sal.line == 0)
972 {
973 /* No line info so use the current PC. */
974 prologue_end = prev_pc;
975 }
976 else if (sal.end < prologue_end)
977 {
978 /* The next line begins after the function end. */
979 prologue_end = sal.end;
980 }
981
982 prologue_end = std::min (prologue_end, prev_pc);
983 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
984 }
985 else
986 {
987 CORE_ADDR frame_loc;
988
989 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
990 if (frame_loc == 0)
991 return;
992
993 cache->framereg = AARCH64_FP_REGNUM;
994 cache->framesize = 16;
995 cache->saved_regs[29].set_addr (0);
996 cache->saved_regs[30].set_addr (8);
997 }
998 }
999
1000 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
1001 function may throw an exception if the inferior's registers or memory is
1002 not available. */
1003
1004 static void
1005 aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
1006 struct aarch64_prologue_cache *cache)
1007 {
1008 CORE_ADDR unwound_fp;
1009 int reg;
1010
1011 aarch64_scan_prologue (this_frame, cache);
1012
1013 if (cache->framereg == -1)
1014 return;
1015
1016 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
1017 if (unwound_fp == 0)
1018 return;
1019
1020 cache->prev_sp = unwound_fp + cache->framesize;
1021
1022 /* Calculate actual addresses of saved registers using offsets
1023 determined by aarch64_analyze_prologue. */
1024 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
1025 if (cache->saved_regs[reg].is_addr ())
1026 cache->saved_regs[reg].set_addr (cache->saved_regs[reg].addr ()
1027 + cache->prev_sp);
1028
1029 cache->func = get_frame_func (this_frame);
1030
1031 cache->available_p = 1;
1032 }
1033
1034 /* Allocate and fill in *THIS_CACHE with information about the prologue of
1035 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
1036 Return a pointer to the current aarch64_prologue_cache in
1037 *THIS_CACHE. */
1038
1039 static struct aarch64_prologue_cache *
1040 aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
1041 {
1042 struct aarch64_prologue_cache *cache;
1043
1044 if (*this_cache != NULL)
1045 return (struct aarch64_prologue_cache *) *this_cache;
1046
1047 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1048 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1049 *this_cache = cache;
1050
1051 try
1052 {
1053 aarch64_make_prologue_cache_1 (this_frame, cache);
1054 }
1055 catch (const gdb_exception_error &ex)
1056 {
1057 if (ex.error != NOT_AVAILABLE_ERROR)
1058 throw;
1059 }
1060
1061 return cache;
1062 }
1063
1064 /* Implement the "stop_reason" frame_unwind method. */
1065
1066 static enum unwind_stop_reason
1067 aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
1068 void **this_cache)
1069 {
1070 struct aarch64_prologue_cache *cache
1071 = aarch64_make_prologue_cache (this_frame, this_cache);
1072
1073 if (!cache->available_p)
1074 return UNWIND_UNAVAILABLE;
1075
1076 /* Halt the backtrace at "_start". */
1077 gdbarch *arch = get_frame_arch (this_frame);
1078 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (arch);
1079 if (cache->prev_pc <= tdep->lowest_pc)
1080 return UNWIND_OUTERMOST;
1081
1082 /* We've hit a wall, stop. */
1083 if (cache->prev_sp == 0)
1084 return UNWIND_OUTERMOST;
1085
1086 return UNWIND_NO_REASON;
1087 }
1088
1089 /* Our frame ID for a normal frame is the current function's starting
1090 PC and the caller's SP when we were called. */
1091
1092 static void
1093 aarch64_prologue_this_id (struct frame_info *this_frame,
1094 void **this_cache, struct frame_id *this_id)
1095 {
1096 struct aarch64_prologue_cache *cache
1097 = aarch64_make_prologue_cache (this_frame, this_cache);
1098
1099 if (!cache->available_p)
1100 *this_id = frame_id_build_unavailable_stack (cache->func);
1101 else
1102 *this_id = frame_id_build (cache->prev_sp, cache->func);
1103 }
1104
1105 /* Implement the "prev_register" frame_unwind method. */
1106
1107 static struct value *
1108 aarch64_prologue_prev_register (struct frame_info *this_frame,
1109 void **this_cache, int prev_regnum)
1110 {
1111 struct aarch64_prologue_cache *cache
1112 = aarch64_make_prologue_cache (this_frame, this_cache);
1113
1114 /* If we are asked to unwind the PC, then we need to return the LR
1115 instead. The prologue may save PC, but it will point into this
1116 frame's prologue, not the next frame's resume location. */
1117 if (prev_regnum == AARCH64_PC_REGNUM)
1118 {
1119 CORE_ADDR lr;
1120 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1121 aarch64_gdbarch_tdep *tdep
1122 = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
1123
1124 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1125
1126 if (tdep->has_pauth ()
1127 && cache->saved_regs[tdep->pauth_ra_state_regnum].is_value ())
1128 lr = aarch64_frame_unmask_lr (tdep, this_frame, lr);
1129
1130 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
1131 }
1132
1133 /* SP is generally not saved to the stack, but this frame is
1134 identified by the next frame's stack pointer at the time of the
1135 call. The value was already reconstructed into PREV_SP. */
1136 /*
1137 +----------+ ^
1138 | saved lr | |
1139 +->| saved fp |--+
1140 | | |
1141 | | | <- Previous SP
1142 | +----------+
1143 | | saved lr |
1144 +--| saved fp |<- FP
1145 | |
1146 | |<- SP
1147 +----------+ */
1148 if (prev_regnum == AARCH64_SP_REGNUM)
1149 return frame_unwind_got_constant (this_frame, prev_regnum,
1150 cache->prev_sp);
1151
1152 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
1153 prev_regnum);
1154 }
1155
1156 /* AArch64 prologue unwinder. */
1157 static frame_unwind aarch64_prologue_unwind =
1158 {
1159 "aarch64 prologue",
1160 NORMAL_FRAME,
1161 aarch64_prologue_frame_unwind_stop_reason,
1162 aarch64_prologue_this_id,
1163 aarch64_prologue_prev_register,
1164 NULL,
1165 default_frame_sniffer
1166 };
1167
1168 /* Allocate and fill in *THIS_CACHE with information about the prologue of
1169 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
1170 Return a pointer to the current aarch64_prologue_cache in
1171 *THIS_CACHE. */
1172
1173 static struct aarch64_prologue_cache *
1174 aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
1175 {
1176 struct aarch64_prologue_cache *cache;
1177
1178 if (*this_cache != NULL)
1179 return (struct aarch64_prologue_cache *) *this_cache;
1180
1181 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1182 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1183 *this_cache = cache;
1184
1185 try
1186 {
1187 cache->prev_sp = get_frame_register_unsigned (this_frame,
1188 AARCH64_SP_REGNUM);
1189 cache->prev_pc = get_frame_pc (this_frame);
1190 cache->available_p = 1;
1191 }
1192 catch (const gdb_exception_error &ex)
1193 {
1194 if (ex.error != NOT_AVAILABLE_ERROR)
1195 throw;
1196 }
1197
1198 return cache;
1199 }
1200
1201 /* Implement the "stop_reason" frame_unwind method. */
1202
1203 static enum unwind_stop_reason
1204 aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
1205 void **this_cache)
1206 {
1207 struct aarch64_prologue_cache *cache
1208 = aarch64_make_stub_cache (this_frame, this_cache);
1209
1210 if (!cache->available_p)
1211 return UNWIND_UNAVAILABLE;
1212
1213 return UNWIND_NO_REASON;
1214 }
1215
1216 /* Our frame ID for a stub frame is the current SP and LR. */
1217
1218 static void
1219 aarch64_stub_this_id (struct frame_info *this_frame,
1220 void **this_cache, struct frame_id *this_id)
1221 {
1222 struct aarch64_prologue_cache *cache
1223 = aarch64_make_stub_cache (this_frame, this_cache);
1224
1225 if (cache->available_p)
1226 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
1227 else
1228 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
1229 }
1230
1231 /* Implement the "sniffer" frame_unwind method. */
1232
1233 static int
1234 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
1235 struct frame_info *this_frame,
1236 void **this_prologue_cache)
1237 {
1238 CORE_ADDR addr_in_block;
1239 gdb_byte dummy[4];
1240
1241 addr_in_block = get_frame_address_in_block (this_frame);
1242 if (in_plt_section (addr_in_block)
1243 /* We also use the stub winder if the target memory is unreadable
1244 to avoid having the prologue unwinder trying to read it. */
1245 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
1246 return 1;
1247
1248 return 0;
1249 }
1250
1251 /* AArch64 stub unwinder. */
1252 static frame_unwind aarch64_stub_unwind =
1253 {
1254 "aarch64 stub",
1255 NORMAL_FRAME,
1256 aarch64_stub_frame_unwind_stop_reason,
1257 aarch64_stub_this_id,
1258 aarch64_prologue_prev_register,
1259 NULL,
1260 aarch64_stub_unwind_sniffer
1261 };
1262
1263 /* Return the frame base address of *THIS_FRAME. */
1264
1265 static CORE_ADDR
1266 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
1267 {
1268 struct aarch64_prologue_cache *cache
1269 = aarch64_make_prologue_cache (this_frame, this_cache);
1270
1271 return cache->prev_sp - cache->framesize;
1272 }
1273
1274 /* AArch64 default frame base information. */
1275 static frame_base aarch64_normal_base =
1276 {
1277 &aarch64_prologue_unwind,
1278 aarch64_normal_frame_base,
1279 aarch64_normal_frame_base,
1280 aarch64_normal_frame_base
1281 };
1282
1283 /* Return the value of the REGNUM register in the previous frame of
1284 *THIS_FRAME. */
1285
1286 static struct value *
1287 aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1288 void **this_cache, int regnum)
1289 {
1290 gdbarch *arch = get_frame_arch (this_frame);
1291 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (arch);
1292 CORE_ADDR lr;
1293
1294 switch (regnum)
1295 {
1296 case AARCH64_PC_REGNUM:
1297 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1298 lr = aarch64_frame_unmask_lr (tdep, this_frame, lr);
1299 return frame_unwind_got_constant (this_frame, regnum, lr);
1300
1301 default:
1302 internal_error (__FILE__, __LINE__,
1303 _("Unexpected register %d"), regnum);
1304 }
1305 }
1306
1307 static const unsigned char op_lit0 = DW_OP_lit0;
1308 static const unsigned char op_lit1 = DW_OP_lit1;
1309
1310 /* Implement the "init_reg" dwarf2_frame_ops method. */
1311
1312 static void
1313 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1314 struct dwarf2_frame_state_reg *reg,
1315 struct frame_info *this_frame)
1316 {
1317 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
1318
1319 switch (regnum)
1320 {
1321 case AARCH64_PC_REGNUM:
1322 reg->how = DWARF2_FRAME_REG_FN;
1323 reg->loc.fn = aarch64_dwarf2_prev_register;
1324 return;
1325
1326 case AARCH64_SP_REGNUM:
1327 reg->how = DWARF2_FRAME_REG_CFA;
1328 return;
1329 }
1330
1331 /* Init pauth registers. */
1332 if (tdep->has_pauth ())
1333 {
1334 if (regnum == tdep->pauth_ra_state_regnum)
1335 {
1336 /* Initialize RA_STATE to zero. */
1337 reg->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1338 reg->loc.exp.start = &op_lit0;
1339 reg->loc.exp.len = 1;
1340 return;
1341 }
1342 else if (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
1343 || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base))
1344 {
1345 reg->how = DWARF2_FRAME_REG_SAME_VALUE;
1346 return;
1347 }
1348 }
1349 }
1350
1351 /* Implement the execute_dwarf_cfa_vendor_op method. */
1352
1353 static bool
1354 aarch64_execute_dwarf_cfa_vendor_op (struct gdbarch *gdbarch, gdb_byte op,
1355 struct dwarf2_frame_state *fs)
1356 {
1357 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
1358 struct dwarf2_frame_state_reg *ra_state;
1359
1360 if (op == DW_CFA_AARCH64_negate_ra_state)
1361 {
1362 /* On systems without pauth, treat as a nop. */
1363 if (!tdep->has_pauth ())
1364 return true;
1365
1366 /* Allocate RA_STATE column if it's not allocated yet. */
1367 fs->regs.alloc_regs (AARCH64_DWARF_PAUTH_RA_STATE + 1);
1368
1369 /* Toggle the status of RA_STATE between 0 and 1. */
1370 ra_state = &(fs->regs.reg[AARCH64_DWARF_PAUTH_RA_STATE]);
1371 ra_state->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1372
1373 if (ra_state->loc.exp.start == nullptr
1374 || ra_state->loc.exp.start == &op_lit0)
1375 ra_state->loc.exp.start = &op_lit1;
1376 else
1377 ra_state->loc.exp.start = &op_lit0;
1378
1379 ra_state->loc.exp.len = 1;
1380
1381 return true;
1382 }
1383
1384 return false;
1385 }
1386
1387 /* Used for matching BRK instructions for AArch64. */
1388 static constexpr uint32_t BRK_INSN_MASK = 0xffe0001f;
1389 static constexpr uint32_t BRK_INSN_BASE = 0xd4200000;
1390
1391 /* Implementation of gdbarch_program_breakpoint_here_p for aarch64. */
1392
1393 static bool
1394 aarch64_program_breakpoint_here_p (gdbarch *gdbarch, CORE_ADDR address)
1395 {
1396 const uint32_t insn_len = 4;
1397 gdb_byte target_mem[4];
1398
1399 /* Enable the automatic memory restoration from breakpoints while
1400 we read the memory. Otherwise we may find temporary breakpoints, ones
1401 inserted by GDB, and flag them as permanent breakpoints. */
1402 scoped_restore restore_memory
1403 = make_scoped_restore_show_memory_breakpoints (0);
1404
1405 if (target_read_memory (address, target_mem, insn_len) == 0)
1406 {
1407 uint32_t insn =
1408 (uint32_t) extract_unsigned_integer (target_mem, insn_len,
1409 gdbarch_byte_order_for_code (gdbarch));
1410
1411 /* Check if INSN is a BRK instruction pattern. There are multiple choices
1412 of such instructions with different immediate values. Different OS'
1413 may use a different variation, but they have the same outcome. */
1414 return ((insn & BRK_INSN_MASK) == BRK_INSN_BASE);
1415 }
1416
1417 return false;
1418 }
1419
1420 /* When arguments must be pushed onto the stack, they go on in reverse
1421 order. The code below implements a FILO (stack) to do this. */
1422
1423 struct stack_item_t
1424 {
1425 /* Value to pass on stack. It can be NULL if this item is for stack
1426 padding. */
1427 const gdb_byte *data;
1428
1429 /* Size in bytes of value to pass on stack. */
1430 int len;
1431 };
1432
1433 /* Implement the gdbarch type alignment method, overrides the generic
1434 alignment algorithm for anything that is aarch64 specific. */
1435
1436 static ULONGEST
1437 aarch64_type_align (gdbarch *gdbarch, struct type *t)
1438 {
1439 t = check_typedef (t);
1440 if (t->code () == TYPE_CODE_ARRAY && t->is_vector ())
1441 {
1442 /* Use the natural alignment for vector types (the same for
1443 scalar type), but the maximum alignment is 128-bit. */
1444 if (TYPE_LENGTH (t) > 16)
1445 return 16;
1446 else
1447 return TYPE_LENGTH (t);
1448 }
1449
1450 /* Allow the common code to calculate the alignment. */
1451 return 0;
1452 }
1453
1454 /* Worker function for aapcs_is_vfp_call_or_return_candidate.
1455
1456 Return the number of register required, or -1 on failure.
1457
1458 When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it
1459 to the element, else fail if the type of this element does not match the
1460 existing value. */
1461
1462 static int
1463 aapcs_is_vfp_call_or_return_candidate_1 (struct type *type,
1464 struct type **fundamental_type)
1465 {
1466 if (type == nullptr)
1467 return -1;
1468
1469 switch (type->code ())
1470 {
1471 case TYPE_CODE_FLT:
1472 if (TYPE_LENGTH (type) > 16)
1473 return -1;
1474
1475 if (*fundamental_type == nullptr)
1476 *fundamental_type = type;
1477 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1478 || type->code () != (*fundamental_type)->code ())
1479 return -1;
1480
1481 return 1;
1482
1483 case TYPE_CODE_COMPLEX:
1484 {
1485 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1486 if (TYPE_LENGTH (target_type) > 16)
1487 return -1;
1488
1489 if (*fundamental_type == nullptr)
1490 *fundamental_type = target_type;
1491 else if (TYPE_LENGTH (target_type) != TYPE_LENGTH (*fundamental_type)
1492 || target_type->code () != (*fundamental_type)->code ())
1493 return -1;
1494
1495 return 2;
1496 }
1497
1498 case TYPE_CODE_ARRAY:
1499 {
1500 if (type->is_vector ())
1501 {
1502 if (TYPE_LENGTH (type) != 8 && TYPE_LENGTH (type) != 16)
1503 return -1;
1504
1505 if (*fundamental_type == nullptr)
1506 *fundamental_type = type;
1507 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1508 || type->code () != (*fundamental_type)->code ())
1509 return -1;
1510
1511 return 1;
1512 }
1513 else
1514 {
1515 struct type *target_type = TYPE_TARGET_TYPE (type);
1516 int count = aapcs_is_vfp_call_or_return_candidate_1
1517 (target_type, fundamental_type);
1518
1519 if (count == -1)
1520 return count;
1521
1522 count *= (TYPE_LENGTH (type) / TYPE_LENGTH (target_type));
1523 return count;
1524 }
1525 }
1526
1527 case TYPE_CODE_STRUCT:
1528 case TYPE_CODE_UNION:
1529 {
1530 int count = 0;
1531
1532 for (int i = 0; i < type->num_fields (); i++)
1533 {
1534 /* Ignore any static fields. */
1535 if (field_is_static (&type->field (i)))
1536 continue;
1537
1538 struct type *member = check_typedef (type->field (i).type ());
1539
1540 int sub_count = aapcs_is_vfp_call_or_return_candidate_1
1541 (member, fundamental_type);
1542 if (sub_count == -1)
1543 return -1;
1544 count += sub_count;
1545 }
1546
1547 /* Ensure there is no padding between the fields (allowing for empty
1548 zero length structs) */
1549 int ftype_length = (*fundamental_type == nullptr)
1550 ? 0 : TYPE_LENGTH (*fundamental_type);
1551 if (count * ftype_length != TYPE_LENGTH (type))
1552 return -1;
1553
1554 return count;
1555 }
1556
1557 default:
1558 break;
1559 }
1560
1561 return -1;
1562 }
1563
1564 /* Return true if an argument, whose type is described by TYPE, can be passed or
1565 returned in simd/fp registers, providing enough parameter passing registers
1566 are available. This is as described in the AAPCS64.
1567
1568 Upon successful return, *COUNT returns the number of needed registers,
1569 *FUNDAMENTAL_TYPE contains the type of those registers.
1570
1571 Candidate as per the AAPCS64 5.4.2.C is either a:
1572 - float.
1573 - short-vector.
1574 - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where
1575 all the members are floats and has at most 4 members.
1576 - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where
1577 all the members are short vectors and has at most 4 members.
1578 - Complex (7.1.1)
1579
1580 Note that HFAs and HVAs can include nested structures and arrays. */
1581
1582 static bool
1583 aapcs_is_vfp_call_or_return_candidate (struct type *type, int *count,
1584 struct type **fundamental_type)
1585 {
1586 if (type == nullptr)
1587 return false;
1588
1589 *fundamental_type = nullptr;
1590
1591 int ag_count = aapcs_is_vfp_call_or_return_candidate_1 (type,
1592 fundamental_type);
1593
1594 if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS)
1595 {
1596 *count = ag_count;
1597 return true;
1598 }
1599 else
1600 return false;
1601 }
1602
1603 /* AArch64 function call information structure. */
1604 struct aarch64_call_info
1605 {
1606 /* the current argument number. */
1607 unsigned argnum = 0;
1608
1609 /* The next general purpose register number, equivalent to NGRN as
1610 described in the AArch64 Procedure Call Standard. */
1611 unsigned ngrn = 0;
1612
1613 /* The next SIMD and floating point register number, equivalent to
1614 NSRN as described in the AArch64 Procedure Call Standard. */
1615 unsigned nsrn = 0;
1616
1617 /* The next stacked argument address, equivalent to NSAA as
1618 described in the AArch64 Procedure Call Standard. */
1619 unsigned nsaa = 0;
1620
1621 /* Stack item vector. */
1622 std::vector<stack_item_t> si;
1623 };
1624
1625 /* Pass a value in a sequence of consecutive X registers. The caller
1626 is responsible for ensuring sufficient registers are available. */
1627
1628 static void
1629 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1630 struct aarch64_call_info *info, struct type *type,
1631 struct value *arg)
1632 {
1633 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1634 int len = TYPE_LENGTH (type);
1635 enum type_code typecode = type->code ();
1636 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1637 const bfd_byte *buf = value_contents (arg).data ();
1638
1639 info->argnum++;
1640
1641 while (len > 0)
1642 {
1643 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1644 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1645 byte_order);
1646
1647
1648 /* Adjust sub-word struct/union args when big-endian. */
1649 if (byte_order == BFD_ENDIAN_BIG
1650 && partial_len < X_REGISTER_SIZE
1651 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1652 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1653
1654 aarch64_debug_printf ("arg %d in %s = 0x%s", info->argnum,
1655 gdbarch_register_name (gdbarch, regnum),
1656 phex (regval, X_REGISTER_SIZE));
1657
1658 regcache_cooked_write_unsigned (regcache, regnum, regval);
1659 len -= partial_len;
1660 buf += partial_len;
1661 regnum++;
1662 }
1663 }
1664
1665 /* Attempt to marshall a value in a V register. Return 1 if
1666 successful, or 0 if insufficient registers are available. This
1667 function, unlike the equivalent pass_in_x() function does not
1668 handle arguments spread across multiple registers. */
1669
1670 static int
1671 pass_in_v (struct gdbarch *gdbarch,
1672 struct regcache *regcache,
1673 struct aarch64_call_info *info,
1674 int len, const bfd_byte *buf)
1675 {
1676 if (info->nsrn < 8)
1677 {
1678 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1679 /* Enough space for a full vector register. */
1680 gdb_byte reg[register_size (gdbarch, regnum)];
1681 gdb_assert (len <= sizeof (reg));
1682
1683 info->argnum++;
1684 info->nsrn++;
1685
1686 memset (reg, 0, sizeof (reg));
1687 /* PCS C.1, the argument is allocated to the least significant
1688 bits of V register. */
1689 memcpy (reg, buf, len);
1690 regcache->cooked_write (regnum, reg);
1691
1692 aarch64_debug_printf ("arg %d in %s", info->argnum,
1693 gdbarch_register_name (gdbarch, regnum));
1694
1695 return 1;
1696 }
1697 info->nsrn = 8;
1698 return 0;
1699 }
1700
1701 /* Marshall an argument onto the stack. */
1702
1703 static void
1704 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1705 struct value *arg)
1706 {
1707 const bfd_byte *buf = value_contents (arg).data ();
1708 int len = TYPE_LENGTH (type);
1709 int align;
1710 stack_item_t item;
1711
1712 info->argnum++;
1713
1714 align = type_align (type);
1715
1716 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1717 Natural alignment of the argument's type. */
1718 align = align_up (align, 8);
1719
1720 /* The AArch64 PCS requires at most doubleword alignment. */
1721 if (align > 16)
1722 align = 16;
1723
1724 aarch64_debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1725 info->nsaa);
1726
1727 item.len = len;
1728 item.data = buf;
1729 info->si.push_back (item);
1730
1731 info->nsaa += len;
1732 if (info->nsaa & (align - 1))
1733 {
1734 /* Push stack alignment padding. */
1735 int pad = align - (info->nsaa & (align - 1));
1736
1737 item.len = pad;
1738 item.data = NULL;
1739
1740 info->si.push_back (item);
1741 info->nsaa += pad;
1742 }
1743 }
1744
1745 /* Marshall an argument into a sequence of one or more consecutive X
1746 registers or, if insufficient X registers are available then onto
1747 the stack. */
1748
1749 static void
1750 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1751 struct aarch64_call_info *info, struct type *type,
1752 struct value *arg)
1753 {
1754 int len = TYPE_LENGTH (type);
1755 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1756
1757 /* PCS C.13 - Pass in registers if we have enough spare */
1758 if (info->ngrn + nregs <= 8)
1759 {
1760 pass_in_x (gdbarch, regcache, info, type, arg);
1761 info->ngrn += nregs;
1762 }
1763 else
1764 {
1765 info->ngrn = 8;
1766 pass_on_stack (info, type, arg);
1767 }
1768 }
1769
1770 /* Pass a value, which is of type arg_type, in a V register. Assumes value is a
1771 aapcs_is_vfp_call_or_return_candidate and there are enough spare V
1772 registers. A return value of false is an error state as the value will have
1773 been partially passed to the stack. */
1774 static bool
1775 pass_in_v_vfp_candidate (struct gdbarch *gdbarch, struct regcache *regcache,
1776 struct aarch64_call_info *info, struct type *arg_type,
1777 struct value *arg)
1778 {
1779 switch (arg_type->code ())
1780 {
1781 case TYPE_CODE_FLT:
1782 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1783 value_contents (arg).data ());
1784 break;
1785
1786 case TYPE_CODE_COMPLEX:
1787 {
1788 const bfd_byte *buf = value_contents (arg).data ();
1789 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (arg_type));
1790
1791 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1792 buf))
1793 return false;
1794
1795 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1796 buf + TYPE_LENGTH (target_type));
1797 }
1798
1799 case TYPE_CODE_ARRAY:
1800 if (arg_type->is_vector ())
1801 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1802 value_contents (arg).data ());
1803 /* fall through. */
1804
1805 case TYPE_CODE_STRUCT:
1806 case TYPE_CODE_UNION:
1807 for (int i = 0; i < arg_type->num_fields (); i++)
1808 {
1809 /* Don't include static fields. */
1810 if (field_is_static (&arg_type->field (i)))
1811 continue;
1812
1813 struct value *field = value_primitive_field (arg, 0, i, arg_type);
1814 struct type *field_type = check_typedef (value_type (field));
1815
1816 if (!pass_in_v_vfp_candidate (gdbarch, regcache, info, field_type,
1817 field))
1818 return false;
1819 }
1820 return true;
1821
1822 default:
1823 return false;
1824 }
1825 }
1826
1827 /* Implement the "push_dummy_call" gdbarch method. */
1828
1829 static CORE_ADDR
1830 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1831 struct regcache *regcache, CORE_ADDR bp_addr,
1832 int nargs,
1833 struct value **args, CORE_ADDR sp,
1834 function_call_return_method return_method,
1835 CORE_ADDR struct_addr)
1836 {
1837 int argnum;
1838 struct aarch64_call_info info;
1839
1840 /* We need to know what the type of the called function is in order
1841 to determine the number of named/anonymous arguments for the
1842 actual argument placement, and the return type in order to handle
1843 return value correctly.
1844
1845 The generic code above us views the decision of return in memory
1846 or return in registers as a two stage processes. The language
1847 handler is consulted first and may decide to return in memory (eg
1848 class with copy constructor returned by value), this will cause
1849 the generic code to allocate space AND insert an initial leading
1850 argument.
1851
1852 If the language code does not decide to pass in memory then the
1853 target code is consulted.
1854
1855 If the language code decides to pass in memory we want to move
1856 the pointer inserted as the initial argument from the argument
1857 list and into X8, the conventional AArch64 struct return pointer
1858 register. */
1859
1860 /* Set the return address. For the AArch64, the return breakpoint
1861 is always at BP_ADDR. */
1862 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1863
1864 /* If we were given an initial argument for the return slot, lose it. */
1865 if (return_method == return_method_hidden_param)
1866 {
1867 args++;
1868 nargs--;
1869 }
1870
1871 /* The struct_return pointer occupies X8. */
1872 if (return_method != return_method_normal)
1873 {
1874 aarch64_debug_printf ("struct return in %s = 0x%s",
1875 gdbarch_register_name
1876 (gdbarch, AARCH64_STRUCT_RETURN_REGNUM),
1877 paddress (gdbarch, struct_addr));
1878
1879 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1880 struct_addr);
1881 }
1882
1883 for (argnum = 0; argnum < nargs; argnum++)
1884 {
1885 struct value *arg = args[argnum];
1886 struct type *arg_type, *fundamental_type;
1887 int len, elements;
1888
1889 arg_type = check_typedef (value_type (arg));
1890 len = TYPE_LENGTH (arg_type);
1891
1892 /* If arg can be passed in v registers as per the AAPCS64, then do so if
1893 if there are enough spare registers. */
1894 if (aapcs_is_vfp_call_or_return_candidate (arg_type, &elements,
1895 &fundamental_type))
1896 {
1897 if (info.nsrn + elements <= 8)
1898 {
1899 /* We know that we have sufficient registers available therefore
1900 this will never need to fallback to the stack. */
1901 if (!pass_in_v_vfp_candidate (gdbarch, regcache, &info, arg_type,
1902 arg))
1903 gdb_assert_not_reached ("Failed to push args");
1904 }
1905 else
1906 {
1907 info.nsrn = 8;
1908 pass_on_stack (&info, arg_type, arg);
1909 }
1910 continue;
1911 }
1912
1913 switch (arg_type->code ())
1914 {
1915 case TYPE_CODE_INT:
1916 case TYPE_CODE_BOOL:
1917 case TYPE_CODE_CHAR:
1918 case TYPE_CODE_RANGE:
1919 case TYPE_CODE_ENUM:
1920 if (len < 4 && !is_fixed_point_type (arg_type))
1921 {
1922 /* Promote to 32 bit integer. */
1923 if (arg_type->is_unsigned ())
1924 arg_type = builtin_type (gdbarch)->builtin_uint32;
1925 else
1926 arg_type = builtin_type (gdbarch)->builtin_int32;
1927 arg = value_cast (arg_type, arg);
1928 }
1929 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1930 break;
1931
1932 case TYPE_CODE_STRUCT:
1933 case TYPE_CODE_ARRAY:
1934 case TYPE_CODE_UNION:
1935 if (len > 16)
1936 {
1937 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1938 invisible reference. */
1939
1940 /* Allocate aligned storage. */
1941 sp = align_down (sp - len, 16);
1942
1943 /* Write the real data into the stack. */
1944 write_memory (sp, value_contents (arg).data (), len);
1945
1946 /* Construct the indirection. */
1947 arg_type = lookup_pointer_type (arg_type);
1948 arg = value_from_pointer (arg_type, sp);
1949 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1950 }
1951 else
1952 /* PCS C.15 / C.18 multiple values pass. */
1953 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1954 break;
1955
1956 default:
1957 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1958 break;
1959 }
1960 }
1961
1962 /* Make sure stack retains 16 byte alignment. */
1963 if (info.nsaa & 15)
1964 sp -= 16 - (info.nsaa & 15);
1965
1966 while (!info.si.empty ())
1967 {
1968 const stack_item_t &si = info.si.back ();
1969
1970 sp -= si.len;
1971 if (si.data != NULL)
1972 write_memory (sp, si.data, si.len);
1973 info.si.pop_back ();
1974 }
1975
1976 /* Finally, update the SP register. */
1977 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1978
1979 return sp;
1980 }
1981
1982 /* Implement the "frame_align" gdbarch method. */
1983
1984 static CORE_ADDR
1985 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1986 {
1987 /* Align the stack to sixteen bytes. */
1988 return sp & ~(CORE_ADDR) 15;
1989 }
1990
1991 /* Return the type for an AdvSISD Q register. */
1992
1993 static struct type *
1994 aarch64_vnq_type (struct gdbarch *gdbarch)
1995 {
1996 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
1997
1998 if (tdep->vnq_type == NULL)
1999 {
2000 struct type *t;
2001 struct type *elem;
2002
2003 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
2004 TYPE_CODE_UNION);
2005
2006 elem = builtin_type (gdbarch)->builtin_uint128;
2007 append_composite_type_field (t, "u", elem);
2008
2009 elem = builtin_type (gdbarch)->builtin_int128;
2010 append_composite_type_field (t, "s", elem);
2011
2012 tdep->vnq_type = t;
2013 }
2014
2015 return tdep->vnq_type;
2016 }
2017
2018 /* Return the type for an AdvSISD D register. */
2019
2020 static struct type *
2021 aarch64_vnd_type (struct gdbarch *gdbarch)
2022 {
2023 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
2024
2025 if (tdep->vnd_type == NULL)
2026 {
2027 struct type *t;
2028 struct type *elem;
2029
2030 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
2031 TYPE_CODE_UNION);
2032
2033 elem = builtin_type (gdbarch)->builtin_double;
2034 append_composite_type_field (t, "f", elem);
2035
2036 elem = builtin_type (gdbarch)->builtin_uint64;
2037 append_composite_type_field (t, "u", elem);
2038
2039 elem = builtin_type (gdbarch)->builtin_int64;
2040 append_composite_type_field (t, "s", elem);
2041
2042 tdep->vnd_type = t;
2043 }
2044
2045 return tdep->vnd_type;
2046 }
2047
2048 /* Return the type for an AdvSISD S register. */
2049
2050 static struct type *
2051 aarch64_vns_type (struct gdbarch *gdbarch)
2052 {
2053 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
2054
2055 if (tdep->vns_type == NULL)
2056 {
2057 struct type *t;
2058 struct type *elem;
2059
2060 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
2061 TYPE_CODE_UNION);
2062
2063 elem = builtin_type (gdbarch)->builtin_float;
2064 append_composite_type_field (t, "f", elem);
2065
2066 elem = builtin_type (gdbarch)->builtin_uint32;
2067 append_composite_type_field (t, "u", elem);
2068
2069 elem = builtin_type (gdbarch)->builtin_int32;
2070 append_composite_type_field (t, "s", elem);
2071
2072 tdep->vns_type = t;
2073 }
2074
2075 return tdep->vns_type;
2076 }
2077
2078 /* Return the type for an AdvSISD H register. */
2079
2080 static struct type *
2081 aarch64_vnh_type (struct gdbarch *gdbarch)
2082 {
2083 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
2084
2085 if (tdep->vnh_type == NULL)
2086 {
2087 struct type *t;
2088 struct type *elem;
2089
2090 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
2091 TYPE_CODE_UNION);
2092
2093 elem = builtin_type (gdbarch)->builtin_bfloat16;
2094 append_composite_type_field (t, "bf", elem);
2095
2096 elem = builtin_type (gdbarch)->builtin_half;
2097 append_composite_type_field (t, "f", elem);
2098
2099 elem = builtin_type (gdbarch)->builtin_uint16;
2100 append_composite_type_field (t, "u", elem);
2101
2102 elem = builtin_type (gdbarch)->builtin_int16;
2103 append_composite_type_field (t, "s", elem);
2104
2105 tdep->vnh_type = t;
2106 }
2107
2108 return tdep->vnh_type;
2109 }
2110
2111 /* Return the type for an AdvSISD B register. */
2112
2113 static struct type *
2114 aarch64_vnb_type (struct gdbarch *gdbarch)
2115 {
2116 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
2117
2118 if (tdep->vnb_type == NULL)
2119 {
2120 struct type *t;
2121 struct type *elem;
2122
2123 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
2124 TYPE_CODE_UNION);
2125
2126 elem = builtin_type (gdbarch)->builtin_uint8;
2127 append_composite_type_field (t, "u", elem);
2128
2129 elem = builtin_type (gdbarch)->builtin_int8;
2130 append_composite_type_field (t, "s", elem);
2131
2132 tdep->vnb_type = t;
2133 }
2134
2135 return tdep->vnb_type;
2136 }
2137
2138 /* Return the type for an AdvSISD V register. */
2139
2140 static struct type *
2141 aarch64_vnv_type (struct gdbarch *gdbarch)
2142 {
2143 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
2144
2145 if (tdep->vnv_type == NULL)
2146 {
2147 /* The other AArch64 pseudo registers (Q,D,H,S,B) refer to a single value
2148 slice from the non-pseudo vector registers. However NEON V registers
2149 are always vector registers, and need constructing as such. */
2150 const struct builtin_type *bt = builtin_type (gdbarch);
2151
2152 struct type *t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnv",
2153 TYPE_CODE_UNION);
2154
2155 struct type *sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
2156 TYPE_CODE_UNION);
2157 append_composite_type_field (sub, "f",
2158 init_vector_type (bt->builtin_double, 2));
2159 append_composite_type_field (sub, "u",
2160 init_vector_type (bt->builtin_uint64, 2));
2161 append_composite_type_field (sub, "s",
2162 init_vector_type (bt->builtin_int64, 2));
2163 append_composite_type_field (t, "d", sub);
2164
2165 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
2166 TYPE_CODE_UNION);
2167 append_composite_type_field (sub, "f",
2168 init_vector_type (bt->builtin_float, 4));
2169 append_composite_type_field (sub, "u",
2170 init_vector_type (bt->builtin_uint32, 4));
2171 append_composite_type_field (sub, "s",
2172 init_vector_type (bt->builtin_int32, 4));
2173 append_composite_type_field (t, "s", sub);
2174
2175 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
2176 TYPE_CODE_UNION);
2177 append_composite_type_field (sub, "bf",
2178 init_vector_type (bt->builtin_bfloat16, 8));
2179 append_composite_type_field (sub, "f",
2180 init_vector_type (bt->builtin_half, 8));
2181 append_composite_type_field (sub, "u",
2182 init_vector_type (bt->builtin_uint16, 8));
2183 append_composite_type_field (sub, "s",
2184 init_vector_type (bt->builtin_int16, 8));
2185 append_composite_type_field (t, "h", sub);
2186
2187 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
2188 TYPE_CODE_UNION);
2189 append_composite_type_field (sub, "u",
2190 init_vector_type (bt->builtin_uint8, 16));
2191 append_composite_type_field (sub, "s",
2192 init_vector_type (bt->builtin_int8, 16));
2193 append_composite_type_field (t, "b", sub);
2194
2195 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
2196 TYPE_CODE_UNION);
2197 append_composite_type_field (sub, "u",
2198 init_vector_type (bt->builtin_uint128, 1));
2199 append_composite_type_field (sub, "s",
2200 init_vector_type (bt->builtin_int128, 1));
2201 append_composite_type_field (t, "q", sub);
2202
2203 tdep->vnv_type = t;
2204 }
2205
2206 return tdep->vnv_type;
2207 }
2208
2209 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
2210
2211 static int
2212 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
2213 {
2214 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
2215
2216 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
2217 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
2218
2219 if (reg == AARCH64_DWARF_SP)
2220 return AARCH64_SP_REGNUM;
2221
2222 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
2223 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
2224
2225 if (reg == AARCH64_DWARF_SVE_VG)
2226 return AARCH64_SVE_VG_REGNUM;
2227
2228 if (reg == AARCH64_DWARF_SVE_FFR)
2229 return AARCH64_SVE_FFR_REGNUM;
2230
2231 if (reg >= AARCH64_DWARF_SVE_P0 && reg <= AARCH64_DWARF_SVE_P0 + 15)
2232 return AARCH64_SVE_P0_REGNUM + reg - AARCH64_DWARF_SVE_P0;
2233
2234 if (reg >= AARCH64_DWARF_SVE_Z0 && reg <= AARCH64_DWARF_SVE_Z0 + 15)
2235 return AARCH64_SVE_Z0_REGNUM + reg - AARCH64_DWARF_SVE_Z0;
2236
2237 if (tdep->has_pauth ())
2238 {
2239 if (reg >= AARCH64_DWARF_PAUTH_DMASK && reg <= AARCH64_DWARF_PAUTH_CMASK)
2240 return tdep->pauth_reg_base + reg - AARCH64_DWARF_PAUTH_DMASK;
2241
2242 if (reg == AARCH64_DWARF_PAUTH_RA_STATE)
2243 return tdep->pauth_ra_state_regnum;
2244 }
2245
2246 return -1;
2247 }
2248
2249 /* Implement the "print_insn" gdbarch method. */
2250
2251 static int
2252 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
2253 {
2254 info->symbols = NULL;
2255 return default_print_insn (memaddr, info);
2256 }
2257
2258 /* AArch64 BRK software debug mode instruction.
2259 Note that AArch64 code is always little-endian.
2260 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
2261 constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
2262
2263 typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
2264
2265 /* Extract from an array REGS containing the (raw) register state a
2266 function return value of type TYPE, and copy that, in virtual
2267 format, into VALBUF. */
2268
2269 static void
2270 aarch64_extract_return_value (struct type *type, struct regcache *regs,
2271 gdb_byte *valbuf)
2272 {
2273 struct gdbarch *gdbarch = regs->arch ();
2274 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2275 int elements;
2276 struct type *fundamental_type;
2277
2278 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2279 &fundamental_type))
2280 {
2281 int len = TYPE_LENGTH (fundamental_type);
2282
2283 for (int i = 0; i < elements; i++)
2284 {
2285 int regno = AARCH64_V0_REGNUM + i;
2286 /* Enough space for a full vector register. */
2287 gdb_byte buf[register_size (gdbarch, regno)];
2288 gdb_assert (len <= sizeof (buf));
2289
2290 aarch64_debug_printf
2291 ("read HFA or HVA return value element %d from %s",
2292 i + 1, gdbarch_register_name (gdbarch, regno));
2293
2294 regs->cooked_read (regno, buf);
2295
2296 memcpy (valbuf, buf, len);
2297 valbuf += len;
2298 }
2299 }
2300 else if (type->code () == TYPE_CODE_INT
2301 || type->code () == TYPE_CODE_CHAR
2302 || type->code () == TYPE_CODE_BOOL
2303 || type->code () == TYPE_CODE_PTR
2304 || TYPE_IS_REFERENCE (type)
2305 || type->code () == TYPE_CODE_ENUM)
2306 {
2307 /* If the type is a plain integer, then the access is
2308 straight-forward. Otherwise we have to play around a bit
2309 more. */
2310 int len = TYPE_LENGTH (type);
2311 int regno = AARCH64_X0_REGNUM;
2312 ULONGEST tmp;
2313
2314 while (len > 0)
2315 {
2316 /* By using store_unsigned_integer we avoid having to do
2317 anything special for small big-endian values. */
2318 regcache_cooked_read_unsigned (regs, regno++, &tmp);
2319 store_unsigned_integer (valbuf,
2320 (len > X_REGISTER_SIZE
2321 ? X_REGISTER_SIZE : len), byte_order, tmp);
2322 len -= X_REGISTER_SIZE;
2323 valbuf += X_REGISTER_SIZE;
2324 }
2325 }
2326 else
2327 {
2328 /* For a structure or union the behaviour is as if the value had
2329 been stored to word-aligned memory and then loaded into
2330 registers with 64-bit load instruction(s). */
2331 int len = TYPE_LENGTH (type);
2332 int regno = AARCH64_X0_REGNUM;
2333 bfd_byte buf[X_REGISTER_SIZE];
2334
2335 while (len > 0)
2336 {
2337 regs->cooked_read (regno++, buf);
2338 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2339 len -= X_REGISTER_SIZE;
2340 valbuf += X_REGISTER_SIZE;
2341 }
2342 }
2343 }
2344
2345
2346 /* Will a function return an aggregate type in memory or in a
2347 register? Return 0 if an aggregate type can be returned in a
2348 register, 1 if it must be returned in memory. */
2349
2350 static int
2351 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
2352 {
2353 type = check_typedef (type);
2354 int elements;
2355 struct type *fundamental_type;
2356
2357 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2358 &fundamental_type))
2359 {
2360 /* v0-v7 are used to return values and one register is allocated
2361 for one member. However, HFA or HVA has at most four members. */
2362 return 0;
2363 }
2364
2365 if (TYPE_LENGTH (type) > 16)
2366 {
2367 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2368 invisible reference. */
2369
2370 return 1;
2371 }
2372
2373 return 0;
2374 }
2375
2376 /* Write into appropriate registers a function return value of type
2377 TYPE, given in virtual format. */
2378
2379 static void
2380 aarch64_store_return_value (struct type *type, struct regcache *regs,
2381 const gdb_byte *valbuf)
2382 {
2383 struct gdbarch *gdbarch = regs->arch ();
2384 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2385 int elements;
2386 struct type *fundamental_type;
2387
2388 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2389 &fundamental_type))
2390 {
2391 int len = TYPE_LENGTH (fundamental_type);
2392
2393 for (int i = 0; i < elements; i++)
2394 {
2395 int regno = AARCH64_V0_REGNUM + i;
2396 /* Enough space for a full vector register. */
2397 gdb_byte tmpbuf[register_size (gdbarch, regno)];
2398 gdb_assert (len <= sizeof (tmpbuf));
2399
2400 aarch64_debug_printf
2401 ("write HFA or HVA return value element %d to %s",
2402 i + 1, gdbarch_register_name (gdbarch, regno));
2403
2404 memcpy (tmpbuf, valbuf,
2405 len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2406 regs->cooked_write (regno, tmpbuf);
2407 valbuf += len;
2408 }
2409 }
2410 else if (type->code () == TYPE_CODE_INT
2411 || type->code () == TYPE_CODE_CHAR
2412 || type->code () == TYPE_CODE_BOOL
2413 || type->code () == TYPE_CODE_PTR
2414 || TYPE_IS_REFERENCE (type)
2415 || type->code () == TYPE_CODE_ENUM)
2416 {
2417 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
2418 {
2419 /* Values of one word or less are zero/sign-extended and
2420 returned in r0. */
2421 bfd_byte tmpbuf[X_REGISTER_SIZE];
2422 LONGEST val = unpack_long (type, valbuf);
2423
2424 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
2425 regs->cooked_write (AARCH64_X0_REGNUM, tmpbuf);
2426 }
2427 else
2428 {
2429 /* Integral values greater than one word are stored in
2430 consecutive registers starting with r0. This will always
2431 be a multiple of the regiser size. */
2432 int len = TYPE_LENGTH (type);
2433 int regno = AARCH64_X0_REGNUM;
2434
2435 while (len > 0)
2436 {
2437 regs->cooked_write (regno++, valbuf);
2438 len -= X_REGISTER_SIZE;
2439 valbuf += X_REGISTER_SIZE;
2440 }
2441 }
2442 }
2443 else
2444 {
2445 /* For a structure or union the behaviour is as if the value had
2446 been stored to word-aligned memory and then loaded into
2447 registers with 64-bit load instruction(s). */
2448 int len = TYPE_LENGTH (type);
2449 int regno = AARCH64_X0_REGNUM;
2450 bfd_byte tmpbuf[X_REGISTER_SIZE];
2451
2452 while (len > 0)
2453 {
2454 memcpy (tmpbuf, valbuf,
2455 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2456 regs->cooked_write (regno++, tmpbuf);
2457 len -= X_REGISTER_SIZE;
2458 valbuf += X_REGISTER_SIZE;
2459 }
2460 }
2461 }
2462
2463 /* Implement the "return_value" gdbarch method. */
2464
2465 static enum return_value_convention
2466 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2467 struct type *valtype, struct regcache *regcache,
2468 gdb_byte *readbuf, const gdb_byte *writebuf)
2469 {
2470
2471 if (valtype->code () == TYPE_CODE_STRUCT
2472 || valtype->code () == TYPE_CODE_UNION
2473 || valtype->code () == TYPE_CODE_ARRAY)
2474 {
2475 if (aarch64_return_in_memory (gdbarch, valtype))
2476 {
2477 aarch64_debug_printf ("return value in memory");
2478 return RETURN_VALUE_STRUCT_CONVENTION;
2479 }
2480 }
2481
2482 if (writebuf)
2483 aarch64_store_return_value (valtype, regcache, writebuf);
2484
2485 if (readbuf)
2486 aarch64_extract_return_value (valtype, regcache, readbuf);
2487
2488 aarch64_debug_printf ("return value in registers");
2489
2490 return RETURN_VALUE_REGISTER_CONVENTION;
2491 }
2492
2493 /* Implement the "get_longjmp_target" gdbarch method. */
2494
2495 static int
2496 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2497 {
2498 CORE_ADDR jb_addr;
2499 gdb_byte buf[X_REGISTER_SIZE];
2500 struct gdbarch *gdbarch = get_frame_arch (frame);
2501 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
2502 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2503
2504 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2505
2506 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2507 X_REGISTER_SIZE))
2508 return 0;
2509
2510 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2511 return 1;
2512 }
2513
2514 /* Implement the "gen_return_address" gdbarch method. */
2515
2516 static void
2517 aarch64_gen_return_address (struct gdbarch *gdbarch,
2518 struct agent_expr *ax, struct axs_value *value,
2519 CORE_ADDR scope)
2520 {
2521 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2522 value->kind = axs_lvalue_register;
2523 value->u.reg = AARCH64_LR_REGNUM;
2524 }
2525 \f
2526
2527 /* Return the pseudo register name corresponding to register regnum. */
2528
2529 static const char *
2530 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2531 {
2532 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
2533
2534 static const char *const q_name[] =
2535 {
2536 "q0", "q1", "q2", "q3",
2537 "q4", "q5", "q6", "q7",
2538 "q8", "q9", "q10", "q11",
2539 "q12", "q13", "q14", "q15",
2540 "q16", "q17", "q18", "q19",
2541 "q20", "q21", "q22", "q23",
2542 "q24", "q25", "q26", "q27",
2543 "q28", "q29", "q30", "q31",
2544 };
2545
2546 static const char *const d_name[] =
2547 {
2548 "d0", "d1", "d2", "d3",
2549 "d4", "d5", "d6", "d7",
2550 "d8", "d9", "d10", "d11",
2551 "d12", "d13", "d14", "d15",
2552 "d16", "d17", "d18", "d19",
2553 "d20", "d21", "d22", "d23",
2554 "d24", "d25", "d26", "d27",
2555 "d28", "d29", "d30", "d31",
2556 };
2557
2558 static const char *const s_name[] =
2559 {
2560 "s0", "s1", "s2", "s3",
2561 "s4", "s5", "s6", "s7",
2562 "s8", "s9", "s10", "s11",
2563 "s12", "s13", "s14", "s15",
2564 "s16", "s17", "s18", "s19",
2565 "s20", "s21", "s22", "s23",
2566 "s24", "s25", "s26", "s27",
2567 "s28", "s29", "s30", "s31",
2568 };
2569
2570 static const char *const h_name[] =
2571 {
2572 "h0", "h1", "h2", "h3",
2573 "h4", "h5", "h6", "h7",
2574 "h8", "h9", "h10", "h11",
2575 "h12", "h13", "h14", "h15",
2576 "h16", "h17", "h18", "h19",
2577 "h20", "h21", "h22", "h23",
2578 "h24", "h25", "h26", "h27",
2579 "h28", "h29", "h30", "h31",
2580 };
2581
2582 static const char *const b_name[] =
2583 {
2584 "b0", "b1", "b2", "b3",
2585 "b4", "b5", "b6", "b7",
2586 "b8", "b9", "b10", "b11",
2587 "b12", "b13", "b14", "b15",
2588 "b16", "b17", "b18", "b19",
2589 "b20", "b21", "b22", "b23",
2590 "b24", "b25", "b26", "b27",
2591 "b28", "b29", "b30", "b31",
2592 };
2593
2594 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2595
2596 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2597 return q_name[p_regnum - AARCH64_Q0_REGNUM];
2598
2599 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2600 return d_name[p_regnum - AARCH64_D0_REGNUM];
2601
2602 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2603 return s_name[p_regnum - AARCH64_S0_REGNUM];
2604
2605 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2606 return h_name[p_regnum - AARCH64_H0_REGNUM];
2607
2608 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2609 return b_name[p_regnum - AARCH64_B0_REGNUM];
2610
2611 if (tdep->has_sve ())
2612 {
2613 static const char *const sve_v_name[] =
2614 {
2615 "v0", "v1", "v2", "v3",
2616 "v4", "v5", "v6", "v7",
2617 "v8", "v9", "v10", "v11",
2618 "v12", "v13", "v14", "v15",
2619 "v16", "v17", "v18", "v19",
2620 "v20", "v21", "v22", "v23",
2621 "v24", "v25", "v26", "v27",
2622 "v28", "v29", "v30", "v31",
2623 };
2624
2625 if (p_regnum >= AARCH64_SVE_V0_REGNUM
2626 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2627 return sve_v_name[p_regnum - AARCH64_SVE_V0_REGNUM];
2628 }
2629
2630 /* RA_STATE is used for unwinding only. Do not assign it a name - this
2631 prevents it from being read by methods such as
2632 mi_cmd_trace_frame_collected. */
2633 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2634 return "";
2635
2636 internal_error (__FILE__, __LINE__,
2637 _("aarch64_pseudo_register_name: bad register number %d"),
2638 p_regnum);
2639 }
2640
2641 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2642
2643 static struct type *
2644 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2645 {
2646 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
2647
2648 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2649
2650 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2651 return aarch64_vnq_type (gdbarch);
2652
2653 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2654 return aarch64_vnd_type (gdbarch);
2655
2656 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2657 return aarch64_vns_type (gdbarch);
2658
2659 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2660 return aarch64_vnh_type (gdbarch);
2661
2662 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2663 return aarch64_vnb_type (gdbarch);
2664
2665 if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2666 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2667 return aarch64_vnv_type (gdbarch);
2668
2669 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2670 return builtin_type (gdbarch)->builtin_uint64;
2671
2672 internal_error (__FILE__, __LINE__,
2673 _("aarch64_pseudo_register_type: bad register number %d"),
2674 p_regnum);
2675 }
2676
2677 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2678
2679 static int
2680 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2681 struct reggroup *group)
2682 {
2683 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
2684
2685 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2686
2687 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2688 return group == all_reggroup || group == vector_reggroup;
2689 else if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2690 return (group == all_reggroup || group == vector_reggroup
2691 || group == float_reggroup);
2692 else if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2693 return (group == all_reggroup || group == vector_reggroup
2694 || group == float_reggroup);
2695 else if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2696 return group == all_reggroup || group == vector_reggroup;
2697 else if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2698 return group == all_reggroup || group == vector_reggroup;
2699 else if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2700 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2701 return group == all_reggroup || group == vector_reggroup;
2702 /* RA_STATE is used for unwinding only. Do not assign it to any groups. */
2703 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2704 return 0;
2705
2706 return group == all_reggroup;
2707 }
2708
2709 /* Helper for aarch64_pseudo_read_value. */
2710
2711 static struct value *
2712 aarch64_pseudo_read_value_1 (struct gdbarch *gdbarch,
2713 readable_regcache *regcache, int regnum_offset,
2714 int regsize, struct value *result_value)
2715 {
2716 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2717
2718 /* Enough space for a full vector register. */
2719 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2720 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2721
2722 if (regcache->raw_read (v_regnum, reg_buf) != REG_VALID)
2723 mark_value_bytes_unavailable (result_value, 0,
2724 TYPE_LENGTH (value_type (result_value)));
2725 else
2726 memcpy (value_contents_raw (result_value).data (), reg_buf, regsize);
2727
2728 return result_value;
2729 }
2730
2731 /* Implement the "pseudo_register_read_value" gdbarch method. */
2732
2733 static struct value *
2734 aarch64_pseudo_read_value (struct gdbarch *gdbarch, readable_regcache *regcache,
2735 int regnum)
2736 {
2737 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
2738 struct value *result_value = allocate_value (register_type (gdbarch, regnum));
2739
2740 VALUE_LVAL (result_value) = lval_register;
2741 VALUE_REGNUM (result_value) = regnum;
2742
2743 regnum -= gdbarch_num_regs (gdbarch);
2744
2745 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2746 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2747 regnum - AARCH64_Q0_REGNUM,
2748 Q_REGISTER_SIZE, result_value);
2749
2750 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2751 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2752 regnum - AARCH64_D0_REGNUM,
2753 D_REGISTER_SIZE, result_value);
2754
2755 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2756 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2757 regnum - AARCH64_S0_REGNUM,
2758 S_REGISTER_SIZE, result_value);
2759
2760 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2761 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2762 regnum - AARCH64_H0_REGNUM,
2763 H_REGISTER_SIZE, result_value);
2764
2765 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2766 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2767 regnum - AARCH64_B0_REGNUM,
2768 B_REGISTER_SIZE, result_value);
2769
2770 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2771 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2772 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2773 regnum - AARCH64_SVE_V0_REGNUM,
2774 V_REGISTER_SIZE, result_value);
2775
2776 gdb_assert_not_reached ("regnum out of bound");
2777 }
2778
2779 /* Helper for aarch64_pseudo_write. */
2780
2781 static void
2782 aarch64_pseudo_write_1 (struct gdbarch *gdbarch, struct regcache *regcache,
2783 int regnum_offset, int regsize, const gdb_byte *buf)
2784 {
2785 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2786
2787 /* Enough space for a full vector register. */
2788 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2789 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2790
2791 /* Ensure the register buffer is zero, we want gdb writes of the
2792 various 'scalar' pseudo registers to behavior like architectural
2793 writes, register width bytes are written the remainder are set to
2794 zero. */
2795 memset (reg_buf, 0, register_size (gdbarch, AARCH64_V0_REGNUM));
2796
2797 memcpy (reg_buf, buf, regsize);
2798 regcache->raw_write (v_regnum, reg_buf);
2799 }
2800
2801 /* Implement the "pseudo_register_write" gdbarch method. */
2802
2803 static void
2804 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2805 int regnum, const gdb_byte *buf)
2806 {
2807 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
2808 regnum -= gdbarch_num_regs (gdbarch);
2809
2810 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2811 return aarch64_pseudo_write_1 (gdbarch, regcache,
2812 regnum - AARCH64_Q0_REGNUM, Q_REGISTER_SIZE,
2813 buf);
2814
2815 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2816 return aarch64_pseudo_write_1 (gdbarch, regcache,
2817 regnum - AARCH64_D0_REGNUM, D_REGISTER_SIZE,
2818 buf);
2819
2820 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2821 return aarch64_pseudo_write_1 (gdbarch, regcache,
2822 regnum - AARCH64_S0_REGNUM, S_REGISTER_SIZE,
2823 buf);
2824
2825 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2826 return aarch64_pseudo_write_1 (gdbarch, regcache,
2827 regnum - AARCH64_H0_REGNUM, H_REGISTER_SIZE,
2828 buf);
2829
2830 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2831 return aarch64_pseudo_write_1 (gdbarch, regcache,
2832 regnum - AARCH64_B0_REGNUM, B_REGISTER_SIZE,
2833 buf);
2834
2835 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2836 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2837 return aarch64_pseudo_write_1 (gdbarch, regcache,
2838 regnum - AARCH64_SVE_V0_REGNUM,
2839 V_REGISTER_SIZE, buf);
2840
2841 gdb_assert_not_reached ("regnum out of bound");
2842 }
2843
2844 /* Callback function for user_reg_add. */
2845
2846 static struct value *
2847 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2848 {
2849 const int *reg_p = (const int *) baton;
2850
2851 return value_of_register (*reg_p, frame);
2852 }
2853 \f
2854
2855 /* Implement the "software_single_step" gdbarch method, needed to
2856 single step through atomic sequences on AArch64. */
2857
2858 static std::vector<CORE_ADDR>
2859 aarch64_software_single_step (struct regcache *regcache)
2860 {
2861 struct gdbarch *gdbarch = regcache->arch ();
2862 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2863 const int insn_size = 4;
2864 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2865 CORE_ADDR pc = regcache_read_pc (regcache);
2866 CORE_ADDR breaks[2] = { CORE_ADDR_MAX, CORE_ADDR_MAX };
2867 CORE_ADDR loc = pc;
2868 CORE_ADDR closing_insn = 0;
2869 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2870 byte_order_for_code);
2871 int index;
2872 int insn_count;
2873 int bc_insn_count = 0; /* Conditional branch instruction count. */
2874 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2875 aarch64_inst inst;
2876
2877 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2878 return {};
2879
2880 /* Look for a Load Exclusive instruction which begins the sequence. */
2881 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
2882 return {};
2883
2884 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2885 {
2886 loc += insn_size;
2887 insn = read_memory_unsigned_integer (loc, insn_size,
2888 byte_order_for_code);
2889
2890 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2891 return {};
2892 /* Check if the instruction is a conditional branch. */
2893 if (inst.opcode->iclass == condbranch)
2894 {
2895 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2896
2897 if (bc_insn_count >= 1)
2898 return {};
2899
2900 /* It is, so we'll try to set a breakpoint at the destination. */
2901 breaks[1] = loc + inst.operands[0].imm.value;
2902
2903 bc_insn_count++;
2904 last_breakpoint++;
2905 }
2906
2907 /* Look for the Store Exclusive which closes the atomic sequence. */
2908 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
2909 {
2910 closing_insn = loc;
2911 break;
2912 }
2913 }
2914
2915 /* We didn't find a closing Store Exclusive instruction, fall back. */
2916 if (!closing_insn)
2917 return {};
2918
2919 /* Insert breakpoint after the end of the atomic sequence. */
2920 breaks[0] = loc + insn_size;
2921
2922 /* Check for duplicated breakpoints, and also check that the second
2923 breakpoint is not within the atomic sequence. */
2924 if (last_breakpoint
2925 && (breaks[1] == breaks[0]
2926 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2927 last_breakpoint = 0;
2928
2929 std::vector<CORE_ADDR> next_pcs;
2930
2931 /* Insert the breakpoint at the end of the sequence, and one at the
2932 destination of the conditional branch, if it exists. */
2933 for (index = 0; index <= last_breakpoint; index++)
2934 next_pcs.push_back (breaks[index]);
2935
2936 return next_pcs;
2937 }
2938
2939 struct aarch64_displaced_step_copy_insn_closure
2940 : public displaced_step_copy_insn_closure
2941 {
2942 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2943 is being displaced stepping. */
2944 bool cond = false;
2945
2946 /* PC adjustment offset after displaced stepping. If 0, then we don't
2947 write the PC back, assuming the PC is already the right address. */
2948 int32_t pc_adjust = 0;
2949 };
2950
2951 /* Data when visiting instructions for displaced stepping. */
2952
2953 struct aarch64_displaced_step_data
2954 {
2955 struct aarch64_insn_data base;
2956
2957 /* The address where the instruction will be executed at. */
2958 CORE_ADDR new_addr;
2959 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2960 uint32_t insn_buf[AARCH64_DISPLACED_MODIFIED_INSNS];
2961 /* Number of instructions in INSN_BUF. */
2962 unsigned insn_count;
2963 /* Registers when doing displaced stepping. */
2964 struct regcache *regs;
2965
2966 aarch64_displaced_step_copy_insn_closure *dsc;
2967 };
2968
2969 /* Implementation of aarch64_insn_visitor method "b". */
2970
2971 static void
2972 aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2973 struct aarch64_insn_data *data)
2974 {
2975 struct aarch64_displaced_step_data *dsd
2976 = (struct aarch64_displaced_step_data *) data;
2977 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
2978
2979 if (can_encode_int32 (new_offset, 28))
2980 {
2981 /* Emit B rather than BL, because executing BL on a new address
2982 will get the wrong address into LR. In order to avoid this,
2983 we emit B, and update LR if the instruction is BL. */
2984 emit_b (dsd->insn_buf, 0, new_offset);
2985 dsd->insn_count++;
2986 }
2987 else
2988 {
2989 /* Write NOP. */
2990 emit_nop (dsd->insn_buf);
2991 dsd->insn_count++;
2992 dsd->dsc->pc_adjust = offset;
2993 }
2994
2995 if (is_bl)
2996 {
2997 /* Update LR. */
2998 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2999 data->insn_addr + 4);
3000 }
3001 }
3002
3003 /* Implementation of aarch64_insn_visitor method "b_cond". */
3004
3005 static void
3006 aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
3007 struct aarch64_insn_data *data)
3008 {
3009 struct aarch64_displaced_step_data *dsd
3010 = (struct aarch64_displaced_step_data *) data;
3011
3012 /* GDB has to fix up PC after displaced step this instruction
3013 differently according to the condition is true or false. Instead
3014 of checking COND against conditional flags, we can use
3015 the following instructions, and GDB can tell how to fix up PC
3016 according to the PC value.
3017
3018 B.COND TAKEN ; If cond is true, then jump to TAKEN.
3019 INSN1 ;
3020 TAKEN:
3021 INSN2
3022 */
3023
3024 emit_bcond (dsd->insn_buf, cond, 8);
3025 dsd->dsc->cond = true;
3026 dsd->dsc->pc_adjust = offset;
3027 dsd->insn_count = 1;
3028 }
3029
3030 /* Dynamically allocate a new register. If we know the register
3031 statically, we should make it a global as above instead of using this
3032 helper function. */
3033
3034 static struct aarch64_register
3035 aarch64_register (unsigned num, int is64)
3036 {
3037 return (struct aarch64_register) { num, is64 };
3038 }
3039
3040 /* Implementation of aarch64_insn_visitor method "cb". */
3041
3042 static void
3043 aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
3044 const unsigned rn, int is64,
3045 struct aarch64_insn_data *data)
3046 {
3047 struct aarch64_displaced_step_data *dsd
3048 = (struct aarch64_displaced_step_data *) data;
3049
3050 /* The offset is out of range for a compare and branch
3051 instruction. We can use the following instructions instead:
3052
3053 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
3054 INSN1 ;
3055 TAKEN:
3056 INSN2
3057 */
3058 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
3059 dsd->insn_count = 1;
3060 dsd->dsc->cond = true;
3061 dsd->dsc->pc_adjust = offset;
3062 }
3063
3064 /* Implementation of aarch64_insn_visitor method "tb". */
3065
3066 static void
3067 aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
3068 const unsigned rt, unsigned bit,
3069 struct aarch64_insn_data *data)
3070 {
3071 struct aarch64_displaced_step_data *dsd
3072 = (struct aarch64_displaced_step_data *) data;
3073
3074 /* The offset is out of range for a test bit and branch
3075 instruction We can use the following instructions instead:
3076
3077 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
3078 INSN1 ;
3079 TAKEN:
3080 INSN2
3081
3082 */
3083 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
3084 dsd->insn_count = 1;
3085 dsd->dsc->cond = true;
3086 dsd->dsc->pc_adjust = offset;
3087 }
3088
3089 /* Implementation of aarch64_insn_visitor method "adr". */
3090
3091 static void
3092 aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
3093 const int is_adrp, struct aarch64_insn_data *data)
3094 {
3095 struct aarch64_displaced_step_data *dsd
3096 = (struct aarch64_displaced_step_data *) data;
3097 /* We know exactly the address the ADR{P,} instruction will compute.
3098 We can just write it to the destination register. */
3099 CORE_ADDR address = data->insn_addr + offset;
3100
3101 if (is_adrp)
3102 {
3103 /* Clear the lower 12 bits of the offset to get the 4K page. */
3104 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
3105 address & ~0xfff);
3106 }
3107 else
3108 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
3109 address);
3110
3111 dsd->dsc->pc_adjust = 4;
3112 emit_nop (dsd->insn_buf);
3113 dsd->insn_count = 1;
3114 }
3115
3116 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
3117
3118 static void
3119 aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
3120 const unsigned rt, const int is64,
3121 struct aarch64_insn_data *data)
3122 {
3123 struct aarch64_displaced_step_data *dsd
3124 = (struct aarch64_displaced_step_data *) data;
3125 CORE_ADDR address = data->insn_addr + offset;
3126 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
3127
3128 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
3129 address);
3130
3131 if (is_sw)
3132 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
3133 aarch64_register (rt, 1), zero);
3134 else
3135 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
3136 aarch64_register (rt, 1), zero);
3137
3138 dsd->dsc->pc_adjust = 4;
3139 }
3140
3141 /* Implementation of aarch64_insn_visitor method "others". */
3142
3143 static void
3144 aarch64_displaced_step_others (const uint32_t insn,
3145 struct aarch64_insn_data *data)
3146 {
3147 struct aarch64_displaced_step_data *dsd
3148 = (struct aarch64_displaced_step_data *) data;
3149
3150 uint32_t masked_insn = (insn & CLEAR_Rn_MASK);
3151 if (masked_insn == BLR)
3152 {
3153 /* Emit a BR to the same register and then update LR to the original
3154 address (similar to aarch64_displaced_step_b). */
3155 aarch64_emit_insn (dsd->insn_buf, insn & 0xffdfffff);
3156 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
3157 data->insn_addr + 4);
3158 }
3159 else
3160 aarch64_emit_insn (dsd->insn_buf, insn);
3161 dsd->insn_count = 1;
3162
3163 if (masked_insn == RET || masked_insn == BR || masked_insn == BLR)
3164 dsd->dsc->pc_adjust = 0;
3165 else
3166 dsd->dsc->pc_adjust = 4;
3167 }
3168
3169 static const struct aarch64_insn_visitor visitor =
3170 {
3171 aarch64_displaced_step_b,
3172 aarch64_displaced_step_b_cond,
3173 aarch64_displaced_step_cb,
3174 aarch64_displaced_step_tb,
3175 aarch64_displaced_step_adr,
3176 aarch64_displaced_step_ldr_literal,
3177 aarch64_displaced_step_others,
3178 };
3179
3180 /* Implement the "displaced_step_copy_insn" gdbarch method. */
3181
3182 displaced_step_copy_insn_closure_up
3183 aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
3184 CORE_ADDR from, CORE_ADDR to,
3185 struct regcache *regs)
3186 {
3187 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3188 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
3189 struct aarch64_displaced_step_data dsd;
3190 aarch64_inst inst;
3191
3192 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
3193 return NULL;
3194
3195 /* Look for a Load Exclusive instruction which begins the sequence. */
3196 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
3197 {
3198 /* We can't displaced step atomic sequences. */
3199 return NULL;
3200 }
3201
3202 std::unique_ptr<aarch64_displaced_step_copy_insn_closure> dsc
3203 (new aarch64_displaced_step_copy_insn_closure);
3204 dsd.base.insn_addr = from;
3205 dsd.new_addr = to;
3206 dsd.regs = regs;
3207 dsd.dsc = dsc.get ();
3208 dsd.insn_count = 0;
3209 aarch64_relocate_instruction (insn, &visitor,
3210 (struct aarch64_insn_data *) &dsd);
3211 gdb_assert (dsd.insn_count <= AARCH64_DISPLACED_MODIFIED_INSNS);
3212
3213 if (dsd.insn_count != 0)
3214 {
3215 int i;
3216
3217 /* Instruction can be relocated to scratch pad. Copy
3218 relocated instruction(s) there. */
3219 for (i = 0; i < dsd.insn_count; i++)
3220 {
3221 displaced_debug_printf ("writing insn %.8x at %s",
3222 dsd.insn_buf[i],
3223 paddress (gdbarch, to + i * 4));
3224
3225 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
3226 (ULONGEST) dsd.insn_buf[i]);
3227 }
3228 }
3229 else
3230 {
3231 dsc = NULL;
3232 }
3233
3234 /* This is a work around for a problem with g++ 4.8. */
3235 return displaced_step_copy_insn_closure_up (dsc.release ());
3236 }
3237
3238 /* Implement the "displaced_step_fixup" gdbarch method. */
3239
3240 void
3241 aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
3242 struct displaced_step_copy_insn_closure *dsc_,
3243 CORE_ADDR from, CORE_ADDR to,
3244 struct regcache *regs)
3245 {
3246 aarch64_displaced_step_copy_insn_closure *dsc
3247 = (aarch64_displaced_step_copy_insn_closure *) dsc_;
3248
3249 ULONGEST pc;
3250
3251 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
3252
3253 displaced_debug_printf ("PC after stepping: %s (was %s).",
3254 paddress (gdbarch, pc), paddress (gdbarch, to));
3255
3256 if (dsc->cond)
3257 {
3258 displaced_debug_printf ("[Conditional] pc_adjust before: %d",
3259 dsc->pc_adjust);
3260
3261 if (pc - to == 8)
3262 {
3263 /* Condition is true. */
3264 }
3265 else if (pc - to == 4)
3266 {
3267 /* Condition is false. */
3268 dsc->pc_adjust = 4;
3269 }
3270 else
3271 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
3272
3273 displaced_debug_printf ("[Conditional] pc_adjust after: %d",
3274 dsc->pc_adjust);
3275 }
3276
3277 displaced_debug_printf ("%s PC by %d",
3278 dsc->pc_adjust ? "adjusting" : "not adjusting",
3279 dsc->pc_adjust);
3280
3281 if (dsc->pc_adjust != 0)
3282 {
3283 /* Make sure the previous instruction was executed (that is, the PC
3284 has changed). If the PC didn't change, then discard the adjustment
3285 offset. Otherwise we may skip an instruction before its execution
3286 took place. */
3287 if ((pc - to) == 0)
3288 {
3289 displaced_debug_printf ("PC did not move. Discarding PC adjustment.");
3290 dsc->pc_adjust = 0;
3291 }
3292
3293 displaced_debug_printf ("fixup: set PC to %s:%d",
3294 paddress (gdbarch, from), dsc->pc_adjust);
3295
3296 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
3297 from + dsc->pc_adjust);
3298 }
3299 }
3300
3301 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
3302
3303 bool
3304 aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch)
3305 {
3306 return true;
3307 }
3308
3309 /* Get the correct target description for the given VQ value.
3310 If VQ is zero then it is assumed SVE is not supported.
3311 (It is not possible to set VQ to zero on an SVE system).
3312
3313 MTE_P indicates the presence of the Memory Tagging Extension feature. */
3314
3315 const target_desc *
3316 aarch64_read_description (uint64_t vq, bool pauth_p, bool mte_p)
3317 {
3318 if (vq > AARCH64_MAX_SVE_VQ)
3319 error (_("VQ is %" PRIu64 ", maximum supported value is %d"), vq,
3320 AARCH64_MAX_SVE_VQ);
3321
3322 struct target_desc *tdesc = tdesc_aarch64_list[vq][pauth_p][mte_p];
3323
3324 if (tdesc == NULL)
3325 {
3326 tdesc = aarch64_create_target_description (vq, pauth_p, mte_p);
3327 tdesc_aarch64_list[vq][pauth_p][mte_p] = tdesc;
3328 }
3329
3330 return tdesc;
3331 }
3332
3333 /* Return the VQ used when creating the target description TDESC. */
3334
3335 static uint64_t
3336 aarch64_get_tdesc_vq (const struct target_desc *tdesc)
3337 {
3338 const struct tdesc_feature *feature_sve;
3339
3340 if (!tdesc_has_registers (tdesc))
3341 return 0;
3342
3343 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3344
3345 if (feature_sve == nullptr)
3346 return 0;
3347
3348 uint64_t vl = tdesc_register_bitsize (feature_sve,
3349 aarch64_sve_register_names[0]) / 8;
3350 return sve_vq_from_vl (vl);
3351 }
3352
3353 /* Add all the expected register sets into GDBARCH. */
3354
3355 static void
3356 aarch64_add_reggroups (struct gdbarch *gdbarch)
3357 {
3358 reggroup_add (gdbarch, general_reggroup);
3359 reggroup_add (gdbarch, float_reggroup);
3360 reggroup_add (gdbarch, system_reggroup);
3361 reggroup_add (gdbarch, vector_reggroup);
3362 reggroup_add (gdbarch, all_reggroup);
3363 reggroup_add (gdbarch, save_reggroup);
3364 reggroup_add (gdbarch, restore_reggroup);
3365 }
3366
3367 /* Implement the "cannot_store_register" gdbarch method. */
3368
3369 static int
3370 aarch64_cannot_store_register (struct gdbarch *gdbarch, int regnum)
3371 {
3372 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
3373
3374 if (!tdep->has_pauth ())
3375 return 0;
3376
3377 /* Pointer authentication registers are read-only. */
3378 return (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
3379 || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base));
3380 }
3381
3382 /* Initialize the current architecture based on INFO. If possible,
3383 re-use an architecture from ARCHES, which is a list of
3384 architectures already created during this debugging session.
3385
3386 Called e.g. at program startup, when reading a core file, and when
3387 reading a binary file. */
3388
3389 static struct gdbarch *
3390 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
3391 {
3392 const struct tdesc_feature *feature_core, *feature_fpu, *feature_sve;
3393 const struct tdesc_feature *feature_pauth;
3394 bool valid_p = true;
3395 int i, num_regs = 0, num_pseudo_regs = 0;
3396 int first_pauth_regnum = -1, pauth_ra_state_offset = -1;
3397 int first_mte_regnum = -1;
3398
3399 /* Use the vector length passed via the target info. Here -1 is used for no
3400 SVE, and 0 is unset. If unset then use the vector length from the existing
3401 tdesc. */
3402 uint64_t vq = 0;
3403 if (info.id == (int *) -1)
3404 vq = 0;
3405 else if (info.id != 0)
3406 vq = (uint64_t) info.id;
3407 else
3408 vq = aarch64_get_tdesc_vq (info.target_desc);
3409
3410 if (vq > AARCH64_MAX_SVE_VQ)
3411 internal_error (__FILE__, __LINE__, _("VQ out of bounds: %s (max %d)"),
3412 pulongest (vq), AARCH64_MAX_SVE_VQ);
3413
3414 /* If there is already a candidate, use it. */
3415 for (gdbarch_list *best_arch = gdbarch_list_lookup_by_info (arches, &info);
3416 best_arch != nullptr;
3417 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
3418 {
3419 aarch64_gdbarch_tdep *tdep
3420 = (aarch64_gdbarch_tdep *) gdbarch_tdep (best_arch->gdbarch);
3421 if (tdep && tdep->vq == vq)
3422 return best_arch->gdbarch;
3423 }
3424
3425 /* Ensure we always have a target descriptor, and that it is for the given VQ
3426 value. */
3427 const struct target_desc *tdesc = info.target_desc;
3428 if (!tdesc_has_registers (tdesc) || vq != aarch64_get_tdesc_vq (tdesc))
3429 tdesc = aarch64_read_description (vq, false, false);
3430 gdb_assert (tdesc);
3431
3432 feature_core = tdesc_find_feature (tdesc,"org.gnu.gdb.aarch64.core");
3433 feature_fpu = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
3434 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3435 feature_pauth = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth");
3436 const struct tdesc_feature *feature_mte
3437 = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.mte");
3438
3439 if (feature_core == nullptr)
3440 return nullptr;
3441
3442 tdesc_arch_data_up tdesc_data = tdesc_data_alloc ();
3443
3444 /* Validate the description provides the mandatory core R registers
3445 and allocate their numbers. */
3446 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
3447 valid_p &= tdesc_numbered_register (feature_core, tdesc_data.get (),
3448 AARCH64_X0_REGNUM + i,
3449 aarch64_r_register_names[i]);
3450
3451 num_regs = AARCH64_X0_REGNUM + i;
3452
3453 /* Add the V registers. */
3454 if (feature_fpu != nullptr)
3455 {
3456 if (feature_sve != nullptr)
3457 error (_("Program contains both fpu and SVE features."));
3458
3459 /* Validate the description provides the mandatory V registers
3460 and allocate their numbers. */
3461 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
3462 valid_p &= tdesc_numbered_register (feature_fpu, tdesc_data.get (),
3463 AARCH64_V0_REGNUM + i,
3464 aarch64_v_register_names[i]);
3465
3466 num_regs = AARCH64_V0_REGNUM + i;
3467 }
3468
3469 /* Add the SVE registers. */
3470 if (feature_sve != nullptr)
3471 {
3472 /* Validate the description provides the mandatory SVE registers
3473 and allocate their numbers. */
3474 for (i = 0; i < ARRAY_SIZE (aarch64_sve_register_names); i++)
3475 valid_p &= tdesc_numbered_register (feature_sve, tdesc_data.get (),
3476 AARCH64_SVE_Z0_REGNUM + i,
3477 aarch64_sve_register_names[i]);
3478
3479 num_regs = AARCH64_SVE_Z0_REGNUM + i;
3480 num_pseudo_regs += 32; /* add the Vn register pseudos. */
3481 }
3482
3483 if (feature_fpu != nullptr || feature_sve != nullptr)
3484 {
3485 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
3486 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
3487 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
3488 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
3489 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
3490 }
3491
3492 /* Add the pauth registers. */
3493 if (feature_pauth != NULL)
3494 {
3495 first_pauth_regnum = num_regs;
3496 pauth_ra_state_offset = num_pseudo_regs;
3497 /* Validate the descriptor provides the mandatory PAUTH registers and
3498 allocate their numbers. */
3499 for (i = 0; i < ARRAY_SIZE (aarch64_pauth_register_names); i++)
3500 valid_p &= tdesc_numbered_register (feature_pauth, tdesc_data.get (),
3501 first_pauth_regnum + i,
3502 aarch64_pauth_register_names[i]);
3503
3504 num_regs += i;
3505 num_pseudo_regs += 1; /* Count RA_STATE pseudo register. */
3506 }
3507
3508 /* Add the MTE registers. */
3509 if (feature_mte != NULL)
3510 {
3511 first_mte_regnum = num_regs;
3512 /* Validate the descriptor provides the mandatory MTE registers and
3513 allocate their numbers. */
3514 for (i = 0; i < ARRAY_SIZE (aarch64_mte_register_names); i++)
3515 valid_p &= tdesc_numbered_register (feature_mte, tdesc_data.get (),
3516 first_mte_regnum + i,
3517 aarch64_mte_register_names[i]);
3518
3519 num_regs += i;
3520 }
3521
3522 if (!valid_p)
3523 return nullptr;
3524
3525 /* AArch64 code is always little-endian. */
3526 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
3527
3528 aarch64_gdbarch_tdep *tdep = new aarch64_gdbarch_tdep;
3529 struct gdbarch *gdbarch = gdbarch_alloc (&info, tdep);
3530
3531 /* This should be low enough for everything. */
3532 tdep->lowest_pc = 0x20;
3533 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
3534 tdep->jb_elt_size = 8;
3535 tdep->vq = vq;
3536 tdep->pauth_reg_base = first_pauth_regnum;
3537 tdep->pauth_ra_state_regnum = (feature_pauth == NULL) ? -1
3538 : pauth_ra_state_offset + num_regs;
3539 tdep->mte_reg_base = first_mte_regnum;
3540
3541 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
3542 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
3543
3544 /* Advance PC across function entry code. */
3545 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
3546
3547 /* The stack grows downward. */
3548 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
3549
3550 /* Breakpoint manipulation. */
3551 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
3552 aarch64_breakpoint::kind_from_pc);
3553 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
3554 aarch64_breakpoint::bp_from_kind);
3555 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
3556 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
3557
3558 /* Information about registers, etc. */
3559 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
3560 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
3561 set_gdbarch_num_regs (gdbarch, num_regs);
3562
3563 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
3564 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
3565 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
3566 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
3567 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
3568 set_tdesc_pseudo_register_reggroup_p (gdbarch,
3569 aarch64_pseudo_register_reggroup_p);
3570 set_gdbarch_cannot_store_register (gdbarch, aarch64_cannot_store_register);
3571
3572 /* ABI */
3573 set_gdbarch_short_bit (gdbarch, 16);
3574 set_gdbarch_int_bit (gdbarch, 32);
3575 set_gdbarch_float_bit (gdbarch, 32);
3576 set_gdbarch_double_bit (gdbarch, 64);
3577 set_gdbarch_long_double_bit (gdbarch, 128);
3578 set_gdbarch_long_bit (gdbarch, 64);
3579 set_gdbarch_long_long_bit (gdbarch, 64);
3580 set_gdbarch_ptr_bit (gdbarch, 64);
3581 set_gdbarch_char_signed (gdbarch, 0);
3582 set_gdbarch_wchar_signed (gdbarch, 0);
3583 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
3584 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
3585 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
3586 set_gdbarch_type_align (gdbarch, aarch64_type_align);
3587
3588 /* Internal <-> external register number maps. */
3589 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
3590
3591 /* Returning results. */
3592 set_gdbarch_return_value (gdbarch, aarch64_return_value);
3593
3594 /* Disassembly. */
3595 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
3596
3597 /* Virtual tables. */
3598 set_gdbarch_vbit_in_delta (gdbarch, 1);
3599
3600 /* Register architecture. */
3601 aarch64_add_reggroups (gdbarch);
3602
3603 /* Hook in the ABI-specific overrides, if they have been registered. */
3604 info.target_desc = tdesc;
3605 info.tdesc_data = tdesc_data.get ();
3606 gdbarch_init_osabi (info, gdbarch);
3607
3608 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
3609 /* Register DWARF CFA vendor handler. */
3610 set_gdbarch_execute_dwarf_cfa_vendor_op (gdbarch,
3611 aarch64_execute_dwarf_cfa_vendor_op);
3612
3613 /* Permanent/Program breakpoint handling. */
3614 set_gdbarch_program_breakpoint_here_p (gdbarch,
3615 aarch64_program_breakpoint_here_p);
3616
3617 /* Add some default predicates. */
3618 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
3619 dwarf2_append_unwinders (gdbarch);
3620 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
3621
3622 frame_base_set_default (gdbarch, &aarch64_normal_base);
3623
3624 /* Now we have tuned the configuration, set a few final things,
3625 based on what the OS ABI has told us. */
3626
3627 if (tdep->jb_pc >= 0)
3628 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
3629
3630 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
3631
3632 set_gdbarch_get_pc_address_flags (gdbarch, aarch64_get_pc_address_flags);
3633
3634 tdesc_use_registers (gdbarch, tdesc, std::move (tdesc_data));
3635
3636 /* Add standard register aliases. */
3637 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
3638 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
3639 value_of_aarch64_user_reg,
3640 &aarch64_register_aliases[i].regnum);
3641
3642 register_aarch64_ravenscar_ops (gdbarch);
3643
3644 return gdbarch;
3645 }
3646
3647 static void
3648 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
3649 {
3650 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
3651
3652 if (tdep == NULL)
3653 return;
3654
3655 fprintf_filtered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3656 paddress (gdbarch, tdep->lowest_pc));
3657 }
3658
3659 #if GDB_SELF_TEST
3660 namespace selftests
3661 {
3662 static void aarch64_process_record_test (void);
3663 }
3664 #endif
3665
3666 void _initialize_aarch64_tdep ();
3667 void
3668 _initialize_aarch64_tdep ()
3669 {
3670 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
3671 aarch64_dump_tdep);
3672
3673 /* Debug this file's internals. */
3674 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
3675 Set AArch64 debugging."), _("\
3676 Show AArch64 debugging."), _("\
3677 When on, AArch64 specific debugging is enabled."),
3678 NULL,
3679 show_aarch64_debug,
3680 &setdebuglist, &showdebuglist);
3681
3682 #if GDB_SELF_TEST
3683 selftests::register_test ("aarch64-analyze-prologue",
3684 selftests::aarch64_analyze_prologue_test);
3685 selftests::register_test ("aarch64-process-record",
3686 selftests::aarch64_process_record_test);
3687 #endif
3688 }
3689
3690 /* AArch64 process record-replay related structures, defines etc. */
3691
3692 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3693 do \
3694 { \
3695 unsigned int reg_len = LENGTH; \
3696 if (reg_len) \
3697 { \
3698 REGS = XNEWVEC (uint32_t, reg_len); \
3699 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3700 } \
3701 } \
3702 while (0)
3703
3704 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3705 do \
3706 { \
3707 unsigned int mem_len = LENGTH; \
3708 if (mem_len) \
3709 { \
3710 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3711 memcpy(MEMS, &RECORD_BUF[0], \
3712 sizeof(struct aarch64_mem_r) * LENGTH); \
3713 } \
3714 } \
3715 while (0)
3716
3717 /* AArch64 record/replay structures and enumerations. */
3718
3719 struct aarch64_mem_r
3720 {
3721 uint64_t len; /* Record length. */
3722 uint64_t addr; /* Memory address. */
3723 };
3724
3725 enum aarch64_record_result
3726 {
3727 AARCH64_RECORD_SUCCESS,
3728 AARCH64_RECORD_UNSUPPORTED,
3729 AARCH64_RECORD_UNKNOWN
3730 };
3731
3732 typedef struct insn_decode_record_t
3733 {
3734 struct gdbarch *gdbarch;
3735 struct regcache *regcache;
3736 CORE_ADDR this_addr; /* Address of insn to be recorded. */
3737 uint32_t aarch64_insn; /* Insn to be recorded. */
3738 uint32_t mem_rec_count; /* Count of memory records. */
3739 uint32_t reg_rec_count; /* Count of register records. */
3740 uint32_t *aarch64_regs; /* Registers to be recorded. */
3741 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
3742 } insn_decode_record;
3743
3744 /* Record handler for data processing - register instructions. */
3745
3746 static unsigned int
3747 aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
3748 {
3749 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
3750 uint32_t record_buf[4];
3751
3752 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3753 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3754 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
3755
3756 if (!bit (aarch64_insn_r->aarch64_insn, 28))
3757 {
3758 uint8_t setflags;
3759
3760 /* Logical (shifted register). */
3761 if (insn_bits24_27 == 0x0a)
3762 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3763 /* Add/subtract. */
3764 else if (insn_bits24_27 == 0x0b)
3765 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3766 else
3767 return AARCH64_RECORD_UNKNOWN;
3768
3769 record_buf[0] = reg_rd;
3770 aarch64_insn_r->reg_rec_count = 1;
3771 if (setflags)
3772 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3773 }
3774 else
3775 {
3776 if (insn_bits24_27 == 0x0b)
3777 {
3778 /* Data-processing (3 source). */
3779 record_buf[0] = reg_rd;
3780 aarch64_insn_r->reg_rec_count = 1;
3781 }
3782 else if (insn_bits24_27 == 0x0a)
3783 {
3784 if (insn_bits21_23 == 0x00)
3785 {
3786 /* Add/subtract (with carry). */
3787 record_buf[0] = reg_rd;
3788 aarch64_insn_r->reg_rec_count = 1;
3789 if (bit (aarch64_insn_r->aarch64_insn, 29))
3790 {
3791 record_buf[1] = AARCH64_CPSR_REGNUM;
3792 aarch64_insn_r->reg_rec_count = 2;
3793 }
3794 }
3795 else if (insn_bits21_23 == 0x02)
3796 {
3797 /* Conditional compare (register) and conditional compare
3798 (immediate) instructions. */
3799 record_buf[0] = AARCH64_CPSR_REGNUM;
3800 aarch64_insn_r->reg_rec_count = 1;
3801 }
3802 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3803 {
3804 /* Conditional select. */
3805 /* Data-processing (2 source). */
3806 /* Data-processing (1 source). */
3807 record_buf[0] = reg_rd;
3808 aarch64_insn_r->reg_rec_count = 1;
3809 }
3810 else
3811 return AARCH64_RECORD_UNKNOWN;
3812 }
3813 }
3814
3815 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3816 record_buf);
3817 return AARCH64_RECORD_SUCCESS;
3818 }
3819
3820 /* Record handler for data processing - immediate instructions. */
3821
3822 static unsigned int
3823 aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3824 {
3825 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
3826 uint32_t record_buf[4];
3827
3828 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3829 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3830 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3831
3832 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3833 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3834 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3835 {
3836 record_buf[0] = reg_rd;
3837 aarch64_insn_r->reg_rec_count = 1;
3838 }
3839 else if (insn_bits24_27 == 0x01)
3840 {
3841 /* Add/Subtract (immediate). */
3842 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3843 record_buf[0] = reg_rd;
3844 aarch64_insn_r->reg_rec_count = 1;
3845 if (setflags)
3846 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3847 }
3848 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3849 {
3850 /* Logical (immediate). */
3851 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3852 record_buf[0] = reg_rd;
3853 aarch64_insn_r->reg_rec_count = 1;
3854 if (setflags)
3855 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3856 }
3857 else
3858 return AARCH64_RECORD_UNKNOWN;
3859
3860 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3861 record_buf);
3862 return AARCH64_RECORD_SUCCESS;
3863 }
3864
3865 /* Record handler for branch, exception generation and system instructions. */
3866
3867 static unsigned int
3868 aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3869 {
3870
3871 aarch64_gdbarch_tdep *tdep
3872 = (aarch64_gdbarch_tdep *) gdbarch_tdep (aarch64_insn_r->gdbarch);
3873 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3874 uint32_t record_buf[4];
3875
3876 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3877 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3878 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3879
3880 if (insn_bits28_31 == 0x0d)
3881 {
3882 /* Exception generation instructions. */
3883 if (insn_bits24_27 == 0x04)
3884 {
3885 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3886 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3887 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
3888 {
3889 ULONGEST svc_number;
3890
3891 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3892 &svc_number);
3893 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3894 svc_number);
3895 }
3896 else
3897 return AARCH64_RECORD_UNSUPPORTED;
3898 }
3899 /* System instructions. */
3900 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3901 {
3902 uint32_t reg_rt, reg_crn;
3903
3904 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3905 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3906
3907 /* Record rt in case of sysl and mrs instructions. */
3908 if (bit (aarch64_insn_r->aarch64_insn, 21))
3909 {
3910 record_buf[0] = reg_rt;
3911 aarch64_insn_r->reg_rec_count = 1;
3912 }
3913 /* Record cpsr for hint and msr(immediate) instructions. */
3914 else if (reg_crn == 0x02 || reg_crn == 0x04)
3915 {
3916 record_buf[0] = AARCH64_CPSR_REGNUM;
3917 aarch64_insn_r->reg_rec_count = 1;
3918 }
3919 }
3920 /* Unconditional branch (register). */
3921 else if((insn_bits24_27 & 0x0e) == 0x06)
3922 {
3923 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3924 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3925 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3926 }
3927 else
3928 return AARCH64_RECORD_UNKNOWN;
3929 }
3930 /* Unconditional branch (immediate). */
3931 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3932 {
3933 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3934 if (bit (aarch64_insn_r->aarch64_insn, 31))
3935 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3936 }
3937 else
3938 /* Compare & branch (immediate), Test & branch (immediate) and
3939 Conditional branch (immediate). */
3940 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3941
3942 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3943 record_buf);
3944 return AARCH64_RECORD_SUCCESS;
3945 }
3946
3947 /* Record handler for advanced SIMD load and store instructions. */
3948
3949 static unsigned int
3950 aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3951 {
3952 CORE_ADDR address;
3953 uint64_t addr_offset = 0;
3954 uint32_t record_buf[24];
3955 uint64_t record_buf_mem[24];
3956 uint32_t reg_rn, reg_rt;
3957 uint32_t reg_index = 0, mem_index = 0;
3958 uint8_t opcode_bits, size_bits;
3959
3960 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3961 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3962 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3963 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3964 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3965
3966 if (record_debug)
3967 debug_printf ("Process record: Advanced SIMD load/store\n");
3968
3969 /* Load/store single structure. */
3970 if (bit (aarch64_insn_r->aarch64_insn, 24))
3971 {
3972 uint8_t sindex, scale, selem, esize, replicate = 0;
3973 scale = opcode_bits >> 2;
3974 selem = ((opcode_bits & 0x02) |
3975 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3976 switch (scale)
3977 {
3978 case 1:
3979 if (size_bits & 0x01)
3980 return AARCH64_RECORD_UNKNOWN;
3981 break;
3982 case 2:
3983 if ((size_bits >> 1) & 0x01)
3984 return AARCH64_RECORD_UNKNOWN;
3985 if (size_bits & 0x01)
3986 {
3987 if (!((opcode_bits >> 1) & 0x01))
3988 scale = 3;
3989 else
3990 return AARCH64_RECORD_UNKNOWN;
3991 }
3992 break;
3993 case 3:
3994 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3995 {
3996 scale = size_bits;
3997 replicate = 1;
3998 break;
3999 }
4000 else
4001 return AARCH64_RECORD_UNKNOWN;
4002 default:
4003 break;
4004 }
4005 esize = 8 << scale;
4006 if (replicate)
4007 for (sindex = 0; sindex < selem; sindex++)
4008 {
4009 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
4010 reg_rt = (reg_rt + 1) % 32;
4011 }
4012 else
4013 {
4014 for (sindex = 0; sindex < selem; sindex++)
4015 {
4016 if (bit (aarch64_insn_r->aarch64_insn, 22))
4017 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
4018 else
4019 {
4020 record_buf_mem[mem_index++] = esize / 8;
4021 record_buf_mem[mem_index++] = address + addr_offset;
4022 }
4023 addr_offset = addr_offset + (esize / 8);
4024 reg_rt = (reg_rt + 1) % 32;
4025 }
4026 }
4027 }
4028 /* Load/store multiple structure. */
4029 else
4030 {
4031 uint8_t selem, esize, rpt, elements;
4032 uint8_t eindex, rindex;
4033
4034 esize = 8 << size_bits;
4035 if (bit (aarch64_insn_r->aarch64_insn, 30))
4036 elements = 128 / esize;
4037 else
4038 elements = 64 / esize;
4039
4040 switch (opcode_bits)
4041 {
4042 /*LD/ST4 (4 Registers). */
4043 case 0:
4044 rpt = 1;
4045 selem = 4;
4046 break;
4047 /*LD/ST1 (4 Registers). */
4048 case 2:
4049 rpt = 4;
4050 selem = 1;
4051 break;
4052 /*LD/ST3 (3 Registers). */
4053 case 4:
4054 rpt = 1;
4055 selem = 3;
4056 break;
4057 /*LD/ST1 (3 Registers). */
4058 case 6:
4059 rpt = 3;
4060 selem = 1;
4061 break;
4062 /*LD/ST1 (1 Register). */
4063 case 7:
4064 rpt = 1;
4065 selem = 1;
4066 break;
4067 /*LD/ST2 (2 Registers). */
4068 case 8:
4069 rpt = 1;
4070 selem = 2;
4071 break;
4072 /*LD/ST1 (2 Registers). */
4073 case 10:
4074 rpt = 2;
4075 selem = 1;
4076 break;
4077 default:
4078 return AARCH64_RECORD_UNSUPPORTED;
4079 break;
4080 }
4081 for (rindex = 0; rindex < rpt; rindex++)
4082 for (eindex = 0; eindex < elements; eindex++)
4083 {
4084 uint8_t reg_tt, sindex;
4085 reg_tt = (reg_rt + rindex) % 32;
4086 for (sindex = 0; sindex < selem; sindex++)
4087 {
4088 if (bit (aarch64_insn_r->aarch64_insn, 22))
4089 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
4090 else
4091 {
4092 record_buf_mem[mem_index++] = esize / 8;
4093 record_buf_mem[mem_index++] = address + addr_offset;
4094 }
4095 addr_offset = addr_offset + (esize / 8);
4096 reg_tt = (reg_tt + 1) % 32;
4097 }
4098 }
4099 }
4100
4101 if (bit (aarch64_insn_r->aarch64_insn, 23))
4102 record_buf[reg_index++] = reg_rn;
4103
4104 aarch64_insn_r->reg_rec_count = reg_index;
4105 aarch64_insn_r->mem_rec_count = mem_index / 2;
4106 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
4107 record_buf_mem);
4108 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4109 record_buf);
4110 return AARCH64_RECORD_SUCCESS;
4111 }
4112
4113 /* Record handler for load and store instructions. */
4114
4115 static unsigned int
4116 aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
4117 {
4118 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
4119 uint8_t insn_bit23, insn_bit21;
4120 uint8_t opc, size_bits, ld_flag, vector_flag;
4121 uint32_t reg_rn, reg_rt, reg_rt2;
4122 uint64_t datasize, offset;
4123 uint32_t record_buf[8];
4124 uint64_t record_buf_mem[8];
4125 CORE_ADDR address;
4126
4127 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
4128 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4129 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
4130 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
4131 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
4132 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
4133 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
4134 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4135 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
4136 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
4137 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
4138
4139 /* Load/store exclusive. */
4140 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
4141 {
4142 if (record_debug)
4143 debug_printf ("Process record: load/store exclusive\n");
4144
4145 if (ld_flag)
4146 {
4147 record_buf[0] = reg_rt;
4148 aarch64_insn_r->reg_rec_count = 1;
4149 if (insn_bit21)
4150 {
4151 record_buf[1] = reg_rt2;
4152 aarch64_insn_r->reg_rec_count = 2;
4153 }
4154 }
4155 else
4156 {
4157 if (insn_bit21)
4158 datasize = (8 << size_bits) * 2;
4159 else
4160 datasize = (8 << size_bits);
4161 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4162 &address);
4163 record_buf_mem[0] = datasize / 8;
4164 record_buf_mem[1] = address;
4165 aarch64_insn_r->mem_rec_count = 1;
4166 if (!insn_bit23)
4167 {
4168 /* Save register rs. */
4169 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
4170 aarch64_insn_r->reg_rec_count = 1;
4171 }
4172 }
4173 }
4174 /* Load register (literal) instructions decoding. */
4175 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
4176 {
4177 if (record_debug)
4178 debug_printf ("Process record: load register (literal)\n");
4179 if (vector_flag)
4180 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4181 else
4182 record_buf[0] = reg_rt;
4183 aarch64_insn_r->reg_rec_count = 1;
4184 }
4185 /* All types of load/store pair instructions decoding. */
4186 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
4187 {
4188 if (record_debug)
4189 debug_printf ("Process record: load/store pair\n");
4190
4191 if (ld_flag)
4192 {
4193 if (vector_flag)
4194 {
4195 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4196 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
4197 }
4198 else
4199 {
4200 record_buf[0] = reg_rt;
4201 record_buf[1] = reg_rt2;
4202 }
4203 aarch64_insn_r->reg_rec_count = 2;
4204 }
4205 else
4206 {
4207 uint16_t imm7_off;
4208 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
4209 if (!vector_flag)
4210 size_bits = size_bits >> 1;
4211 datasize = 8 << (2 + size_bits);
4212 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
4213 offset = offset << (2 + size_bits);
4214 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4215 &address);
4216 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
4217 {
4218 if (imm7_off & 0x40)
4219 address = address - offset;
4220 else
4221 address = address + offset;
4222 }
4223
4224 record_buf_mem[0] = datasize / 8;
4225 record_buf_mem[1] = address;
4226 record_buf_mem[2] = datasize / 8;
4227 record_buf_mem[3] = address + (datasize / 8);
4228 aarch64_insn_r->mem_rec_count = 2;
4229 }
4230 if (bit (aarch64_insn_r->aarch64_insn, 23))
4231 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
4232 }
4233 /* Load/store register (unsigned immediate) instructions. */
4234 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
4235 {
4236 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4237 if (!(opc >> 1))
4238 {
4239 if (opc & 0x01)
4240 ld_flag = 0x01;
4241 else
4242 ld_flag = 0x0;
4243 }
4244 else
4245 {
4246 if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2)
4247 {
4248 /* PRFM (immediate) */
4249 return AARCH64_RECORD_SUCCESS;
4250 }
4251 else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2)
4252 {
4253 /* LDRSW (immediate) */
4254 ld_flag = 0x1;
4255 }
4256 else
4257 {
4258 if (opc & 0x01)
4259 ld_flag = 0x01;
4260 else
4261 ld_flag = 0x0;
4262 }
4263 }
4264
4265 if (record_debug)
4266 {
4267 debug_printf ("Process record: load/store (unsigned immediate):"
4268 " size %x V %d opc %x\n", size_bits, vector_flag,
4269 opc);
4270 }
4271
4272 if (!ld_flag)
4273 {
4274 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
4275 datasize = 8 << size_bits;
4276 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4277 &address);
4278 offset = offset << size_bits;
4279 address = address + offset;
4280
4281 record_buf_mem[0] = datasize >> 3;
4282 record_buf_mem[1] = address;
4283 aarch64_insn_r->mem_rec_count = 1;
4284 }
4285 else
4286 {
4287 if (vector_flag)
4288 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4289 else
4290 record_buf[0] = reg_rt;
4291 aarch64_insn_r->reg_rec_count = 1;
4292 }
4293 }
4294 /* Load/store register (register offset) instructions. */
4295 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
4296 && insn_bits10_11 == 0x02 && insn_bit21)
4297 {
4298 if (record_debug)
4299 debug_printf ("Process record: load/store (register offset)\n");
4300 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4301 if (!(opc >> 1))
4302 if (opc & 0x01)
4303 ld_flag = 0x01;
4304 else
4305 ld_flag = 0x0;
4306 else
4307 if (size_bits != 0x03)
4308 ld_flag = 0x01;
4309 else
4310 return AARCH64_RECORD_UNKNOWN;
4311
4312 if (!ld_flag)
4313 {
4314 ULONGEST reg_rm_val;
4315
4316 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
4317 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
4318 if (bit (aarch64_insn_r->aarch64_insn, 12))
4319 offset = reg_rm_val << size_bits;
4320 else
4321 offset = reg_rm_val;
4322 datasize = 8 << size_bits;
4323 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4324 &address);
4325 address = address + offset;
4326 record_buf_mem[0] = datasize >> 3;
4327 record_buf_mem[1] = address;
4328 aarch64_insn_r->mem_rec_count = 1;
4329 }
4330 else
4331 {
4332 if (vector_flag)
4333 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4334 else
4335 record_buf[0] = reg_rt;
4336 aarch64_insn_r->reg_rec_count = 1;
4337 }
4338 }
4339 /* Load/store register (immediate and unprivileged) instructions. */
4340 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
4341 && !insn_bit21)
4342 {
4343 if (record_debug)
4344 {
4345 debug_printf ("Process record: load/store "
4346 "(immediate and unprivileged)\n");
4347 }
4348 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4349 if (!(opc >> 1))
4350 if (opc & 0x01)
4351 ld_flag = 0x01;
4352 else
4353 ld_flag = 0x0;
4354 else
4355 if (size_bits != 0x03)
4356 ld_flag = 0x01;
4357 else
4358 return AARCH64_RECORD_UNKNOWN;
4359
4360 if (!ld_flag)
4361 {
4362 uint16_t imm9_off;
4363 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
4364 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
4365 datasize = 8 << size_bits;
4366 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4367 &address);
4368 if (insn_bits10_11 != 0x01)
4369 {
4370 if (imm9_off & 0x0100)
4371 address = address - offset;
4372 else
4373 address = address + offset;
4374 }
4375 record_buf_mem[0] = datasize >> 3;
4376 record_buf_mem[1] = address;
4377 aarch64_insn_r->mem_rec_count = 1;
4378 }
4379 else
4380 {
4381 if (vector_flag)
4382 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4383 else
4384 record_buf[0] = reg_rt;
4385 aarch64_insn_r->reg_rec_count = 1;
4386 }
4387 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
4388 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
4389 }
4390 /* Advanced SIMD load/store instructions. */
4391 else
4392 return aarch64_record_asimd_load_store (aarch64_insn_r);
4393
4394 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
4395 record_buf_mem);
4396 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4397 record_buf);
4398 return AARCH64_RECORD_SUCCESS;
4399 }
4400
4401 /* Record handler for data processing SIMD and floating point instructions. */
4402
4403 static unsigned int
4404 aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
4405 {
4406 uint8_t insn_bit21, opcode, rmode, reg_rd;
4407 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
4408 uint8_t insn_bits11_14;
4409 uint32_t record_buf[2];
4410
4411 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4412 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
4413 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
4414 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
4415 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
4416 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
4417 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
4418 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4419 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
4420
4421 if (record_debug)
4422 debug_printf ("Process record: data processing SIMD/FP: ");
4423
4424 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
4425 {
4426 /* Floating point - fixed point conversion instructions. */
4427 if (!insn_bit21)
4428 {
4429 if (record_debug)
4430 debug_printf ("FP - fixed point conversion");
4431
4432 if ((opcode >> 1) == 0x0 && rmode == 0x03)
4433 record_buf[0] = reg_rd;
4434 else
4435 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4436 }
4437 /* Floating point - conditional compare instructions. */
4438 else if (insn_bits10_11 == 0x01)
4439 {
4440 if (record_debug)
4441 debug_printf ("FP - conditional compare");
4442
4443 record_buf[0] = AARCH64_CPSR_REGNUM;
4444 }
4445 /* Floating point - data processing (2-source) and
4446 conditional select instructions. */
4447 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
4448 {
4449 if (record_debug)
4450 debug_printf ("FP - DP (2-source)");
4451
4452 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4453 }
4454 else if (insn_bits10_11 == 0x00)
4455 {
4456 /* Floating point - immediate instructions. */
4457 if ((insn_bits12_15 & 0x01) == 0x01
4458 || (insn_bits12_15 & 0x07) == 0x04)
4459 {
4460 if (record_debug)
4461 debug_printf ("FP - immediate");
4462 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4463 }
4464 /* Floating point - compare instructions. */
4465 else if ((insn_bits12_15 & 0x03) == 0x02)
4466 {
4467 if (record_debug)
4468 debug_printf ("FP - immediate");
4469 record_buf[0] = AARCH64_CPSR_REGNUM;
4470 }
4471 /* Floating point - integer conversions instructions. */
4472 else if (insn_bits12_15 == 0x00)
4473 {
4474 /* Convert float to integer instruction. */
4475 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
4476 {
4477 if (record_debug)
4478 debug_printf ("float to int conversion");
4479
4480 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4481 }
4482 /* Convert integer to float instruction. */
4483 else if ((opcode >> 1) == 0x01 && !rmode)
4484 {
4485 if (record_debug)
4486 debug_printf ("int to float conversion");
4487
4488 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4489 }
4490 /* Move float to integer instruction. */
4491 else if ((opcode >> 1) == 0x03)
4492 {
4493 if (record_debug)
4494 debug_printf ("move float to int");
4495
4496 if (!(opcode & 0x01))
4497 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4498 else
4499 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4500 }
4501 else
4502 return AARCH64_RECORD_UNKNOWN;
4503 }
4504 else
4505 return AARCH64_RECORD_UNKNOWN;
4506 }
4507 else
4508 return AARCH64_RECORD_UNKNOWN;
4509 }
4510 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
4511 {
4512 if (record_debug)
4513 debug_printf ("SIMD copy");
4514
4515 /* Advanced SIMD copy instructions. */
4516 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
4517 && !bit (aarch64_insn_r->aarch64_insn, 15)
4518 && bit (aarch64_insn_r->aarch64_insn, 10))
4519 {
4520 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
4521 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4522 else
4523 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4524 }
4525 else
4526 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4527 }
4528 /* All remaining floating point or advanced SIMD instructions. */
4529 else
4530 {
4531 if (record_debug)
4532 debug_printf ("all remain");
4533
4534 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4535 }
4536
4537 if (record_debug)
4538 debug_printf ("\n");
4539
4540 /* Record the V/X register. */
4541 aarch64_insn_r->reg_rec_count++;
4542
4543 /* Some of these instructions may set bits in the FPSR, so record it
4544 too. */
4545 record_buf[1] = AARCH64_FPSR_REGNUM;
4546 aarch64_insn_r->reg_rec_count++;
4547
4548 gdb_assert (aarch64_insn_r->reg_rec_count == 2);
4549 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4550 record_buf);
4551 return AARCH64_RECORD_SUCCESS;
4552 }
4553
4554 /* Decodes insns type and invokes its record handler. */
4555
4556 static unsigned int
4557 aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
4558 {
4559 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
4560
4561 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
4562 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
4563 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
4564 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
4565
4566 /* Data processing - immediate instructions. */
4567 if (!ins_bit26 && !ins_bit27 && ins_bit28)
4568 return aarch64_record_data_proc_imm (aarch64_insn_r);
4569
4570 /* Branch, exception generation and system instructions. */
4571 if (ins_bit26 && !ins_bit27 && ins_bit28)
4572 return aarch64_record_branch_except_sys (aarch64_insn_r);
4573
4574 /* Load and store instructions. */
4575 if (!ins_bit25 && ins_bit27)
4576 return aarch64_record_load_store (aarch64_insn_r);
4577
4578 /* Data processing - register instructions. */
4579 if (ins_bit25 && !ins_bit26 && ins_bit27)
4580 return aarch64_record_data_proc_reg (aarch64_insn_r);
4581
4582 /* Data processing - SIMD and floating point instructions. */
4583 if (ins_bit25 && ins_bit26 && ins_bit27)
4584 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
4585
4586 return AARCH64_RECORD_UNSUPPORTED;
4587 }
4588
4589 /* Cleans up local record registers and memory allocations. */
4590
4591 static void
4592 deallocate_reg_mem (insn_decode_record *record)
4593 {
4594 xfree (record->aarch64_regs);
4595 xfree (record->aarch64_mems);
4596 }
4597
4598 #if GDB_SELF_TEST
4599 namespace selftests {
4600
4601 static void
4602 aarch64_process_record_test (void)
4603 {
4604 struct gdbarch_info info;
4605 uint32_t ret;
4606
4607 info.bfd_arch_info = bfd_scan_arch ("aarch64");
4608
4609 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
4610 SELF_CHECK (gdbarch != NULL);
4611
4612 insn_decode_record aarch64_record;
4613
4614 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4615 aarch64_record.regcache = NULL;
4616 aarch64_record.this_addr = 0;
4617 aarch64_record.gdbarch = gdbarch;
4618
4619 /* 20 00 80 f9 prfm pldl1keep, [x1] */
4620 aarch64_record.aarch64_insn = 0xf9800020;
4621 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4622 SELF_CHECK (ret == AARCH64_RECORD_SUCCESS);
4623 SELF_CHECK (aarch64_record.reg_rec_count == 0);
4624 SELF_CHECK (aarch64_record.mem_rec_count == 0);
4625
4626 deallocate_reg_mem (&aarch64_record);
4627 }
4628
4629 } // namespace selftests
4630 #endif /* GDB_SELF_TEST */
4631
4632 /* Parse the current instruction and record the values of the registers and
4633 memory that will be changed in current instruction to record_arch_list
4634 return -1 if something is wrong. */
4635
4636 int
4637 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
4638 CORE_ADDR insn_addr)
4639 {
4640 uint32_t rec_no = 0;
4641 uint8_t insn_size = 4;
4642 uint32_t ret = 0;
4643 gdb_byte buf[insn_size];
4644 insn_decode_record aarch64_record;
4645
4646 memset (&buf[0], 0, insn_size);
4647 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4648 target_read_memory (insn_addr, &buf[0], insn_size);
4649 aarch64_record.aarch64_insn
4650 = (uint32_t) extract_unsigned_integer (&buf[0],
4651 insn_size,
4652 gdbarch_byte_order (gdbarch));
4653 aarch64_record.regcache = regcache;
4654 aarch64_record.this_addr = insn_addr;
4655 aarch64_record.gdbarch = gdbarch;
4656
4657 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4658 if (ret == AARCH64_RECORD_UNSUPPORTED)
4659 {
4660 fprintf_unfiltered (gdb_stderr,
4661 _("Process record does not support instruction "
4662 "0x%0x at address %s.\n"),
4663 aarch64_record.aarch64_insn,
4664 paddress (gdbarch, insn_addr));
4665 ret = -1;
4666 }
4667
4668 if (0 == ret)
4669 {
4670 /* Record registers. */
4671 record_full_arch_list_add_reg (aarch64_record.regcache,
4672 AARCH64_PC_REGNUM);
4673 /* Always record register CPSR. */
4674 record_full_arch_list_add_reg (aarch64_record.regcache,
4675 AARCH64_CPSR_REGNUM);
4676 if (aarch64_record.aarch64_regs)
4677 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
4678 if (record_full_arch_list_add_reg (aarch64_record.regcache,
4679 aarch64_record.aarch64_regs[rec_no]))
4680 ret = -1;
4681
4682 /* Record memories. */
4683 if (aarch64_record.aarch64_mems)
4684 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
4685 if (record_full_arch_list_add_mem
4686 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
4687 aarch64_record.aarch64_mems[rec_no].len))
4688 ret = -1;
4689
4690 if (record_full_arch_list_add_end ())
4691 ret = -1;
4692 }
4693
4694 deallocate_reg_mem (&aarch64_record);
4695 return ret;
4696 }