SHF_GNU_RETAIN 7a and 7b tests
[binutils-gdb.git] / gdb / aarch64-tdep.c
1 /* Common target dependent code for GDB on AArch64 systems.
2
3 Copyright (C) 2009-2020 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22
23 #include "frame.h"
24 #include "gdbcmd.h"
25 #include "gdbcore.h"
26 #include "dis-asm.h"
27 #include "regcache.h"
28 #include "reggroups.h"
29 #include "value.h"
30 #include "arch-utils.h"
31 #include "osabi.h"
32 #include "frame-unwind.h"
33 #include "frame-base.h"
34 #include "trad-frame.h"
35 #include "objfiles.h"
36 #include "dwarf2.h"
37 #include "dwarf2/frame.h"
38 #include "gdbtypes.h"
39 #include "prologue-value.h"
40 #include "target-descriptions.h"
41 #include "user-regs.h"
42 #include "ax-gdb.h"
43 #include "gdbsupport/selftest.h"
44
45 #include "aarch64-tdep.h"
46 #include "aarch64-ravenscar-thread.h"
47
48 #include "record.h"
49 #include "record-full.h"
50 #include "arch/aarch64-insn.h"
51 #include "gdbarch.h"
52
53 #include "opcode/aarch64.h"
54 #include <algorithm>
55
56 /* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
57 four members. */
58 #define HA_MAX_NUM_FLDS 4
59
60 /* All possible aarch64 target descriptors. */
61 struct target_desc *tdesc_aarch64_list[AARCH64_MAX_SVE_VQ + 1][2/*pauth*/];
62
63 /* The standard register names, and all the valid aliases for them. */
64 static const struct
65 {
66 const char *const name;
67 int regnum;
68 } aarch64_register_aliases[] =
69 {
70 /* 64-bit register names. */
71 {"fp", AARCH64_FP_REGNUM},
72 {"lr", AARCH64_LR_REGNUM},
73 {"sp", AARCH64_SP_REGNUM},
74
75 /* 32-bit register names. */
76 {"w0", AARCH64_X0_REGNUM + 0},
77 {"w1", AARCH64_X0_REGNUM + 1},
78 {"w2", AARCH64_X0_REGNUM + 2},
79 {"w3", AARCH64_X0_REGNUM + 3},
80 {"w4", AARCH64_X0_REGNUM + 4},
81 {"w5", AARCH64_X0_REGNUM + 5},
82 {"w6", AARCH64_X0_REGNUM + 6},
83 {"w7", AARCH64_X0_REGNUM + 7},
84 {"w8", AARCH64_X0_REGNUM + 8},
85 {"w9", AARCH64_X0_REGNUM + 9},
86 {"w10", AARCH64_X0_REGNUM + 10},
87 {"w11", AARCH64_X0_REGNUM + 11},
88 {"w12", AARCH64_X0_REGNUM + 12},
89 {"w13", AARCH64_X0_REGNUM + 13},
90 {"w14", AARCH64_X0_REGNUM + 14},
91 {"w15", AARCH64_X0_REGNUM + 15},
92 {"w16", AARCH64_X0_REGNUM + 16},
93 {"w17", AARCH64_X0_REGNUM + 17},
94 {"w18", AARCH64_X0_REGNUM + 18},
95 {"w19", AARCH64_X0_REGNUM + 19},
96 {"w20", AARCH64_X0_REGNUM + 20},
97 {"w21", AARCH64_X0_REGNUM + 21},
98 {"w22", AARCH64_X0_REGNUM + 22},
99 {"w23", AARCH64_X0_REGNUM + 23},
100 {"w24", AARCH64_X0_REGNUM + 24},
101 {"w25", AARCH64_X0_REGNUM + 25},
102 {"w26", AARCH64_X0_REGNUM + 26},
103 {"w27", AARCH64_X0_REGNUM + 27},
104 {"w28", AARCH64_X0_REGNUM + 28},
105 {"w29", AARCH64_X0_REGNUM + 29},
106 {"w30", AARCH64_X0_REGNUM + 30},
107
108 /* specials */
109 {"ip0", AARCH64_X0_REGNUM + 16},
110 {"ip1", AARCH64_X0_REGNUM + 17}
111 };
112
113 /* The required core 'R' registers. */
114 static const char *const aarch64_r_register_names[] =
115 {
116 /* These registers must appear in consecutive RAW register number
117 order and they must begin with AARCH64_X0_REGNUM! */
118 "x0", "x1", "x2", "x3",
119 "x4", "x5", "x6", "x7",
120 "x8", "x9", "x10", "x11",
121 "x12", "x13", "x14", "x15",
122 "x16", "x17", "x18", "x19",
123 "x20", "x21", "x22", "x23",
124 "x24", "x25", "x26", "x27",
125 "x28", "x29", "x30", "sp",
126 "pc", "cpsr"
127 };
128
129 /* The FP/SIMD 'V' registers. */
130 static const char *const aarch64_v_register_names[] =
131 {
132 /* These registers must appear in consecutive RAW register number
133 order and they must begin with AARCH64_V0_REGNUM! */
134 "v0", "v1", "v2", "v3",
135 "v4", "v5", "v6", "v7",
136 "v8", "v9", "v10", "v11",
137 "v12", "v13", "v14", "v15",
138 "v16", "v17", "v18", "v19",
139 "v20", "v21", "v22", "v23",
140 "v24", "v25", "v26", "v27",
141 "v28", "v29", "v30", "v31",
142 "fpsr",
143 "fpcr"
144 };
145
146 /* The SVE 'Z' and 'P' registers. */
147 static const char *const aarch64_sve_register_names[] =
148 {
149 /* These registers must appear in consecutive RAW register number
150 order and they must begin with AARCH64_SVE_Z0_REGNUM! */
151 "z0", "z1", "z2", "z3",
152 "z4", "z5", "z6", "z7",
153 "z8", "z9", "z10", "z11",
154 "z12", "z13", "z14", "z15",
155 "z16", "z17", "z18", "z19",
156 "z20", "z21", "z22", "z23",
157 "z24", "z25", "z26", "z27",
158 "z28", "z29", "z30", "z31",
159 "fpsr", "fpcr",
160 "p0", "p1", "p2", "p3",
161 "p4", "p5", "p6", "p7",
162 "p8", "p9", "p10", "p11",
163 "p12", "p13", "p14", "p15",
164 "ffr", "vg"
165 };
166
167 static const char *const aarch64_pauth_register_names[] =
168 {
169 /* Authentication mask for data pointer. */
170 "pauth_dmask",
171 /* Authentication mask for code pointer. */
172 "pauth_cmask"
173 };
174
175 /* AArch64 prologue cache structure. */
176 struct aarch64_prologue_cache
177 {
178 /* The program counter at the start of the function. It is used to
179 identify this frame as a prologue frame. */
180 CORE_ADDR func;
181
182 /* The program counter at the time this frame was created; i.e. where
183 this function was called from. It is used to identify this frame as a
184 stub frame. */
185 CORE_ADDR prev_pc;
186
187 /* The stack pointer at the time this frame was created; i.e. the
188 caller's stack pointer when this function was called. It is used
189 to identify this frame. */
190 CORE_ADDR prev_sp;
191
192 /* Is the target available to read from? */
193 int available_p;
194
195 /* The frame base for this frame is just prev_sp - frame size.
196 FRAMESIZE is the distance from the frame pointer to the
197 initial stack pointer. */
198 int framesize;
199
200 /* The register used to hold the frame pointer for this frame. */
201 int framereg;
202
203 /* Saved register offsets. */
204 struct trad_frame_saved_reg *saved_regs;
205 };
206
207 static void
208 show_aarch64_debug (struct ui_file *file, int from_tty,
209 struct cmd_list_element *c, const char *value)
210 {
211 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
212 }
213
214 namespace {
215
216 /* Abstract instruction reader. */
217
218 class abstract_instruction_reader
219 {
220 public:
221 /* Read in one instruction. */
222 virtual ULONGEST read (CORE_ADDR memaddr, int len,
223 enum bfd_endian byte_order) = 0;
224 };
225
226 /* Instruction reader from real target. */
227
228 class instruction_reader : public abstract_instruction_reader
229 {
230 public:
231 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
232 override
233 {
234 return read_code_unsigned_integer (memaddr, len, byte_order);
235 }
236 };
237
238 } // namespace
239
240 /* If address signing is enabled, mask off the signature bits from the link
241 register, which is passed by value in ADDR, using the register values in
242 THIS_FRAME. */
243
244 static CORE_ADDR
245 aarch64_frame_unmask_lr (struct gdbarch_tdep *tdep,
246 struct frame_info *this_frame, CORE_ADDR addr)
247 {
248 if (tdep->has_pauth ()
249 && frame_unwind_register_unsigned (this_frame,
250 tdep->pauth_ra_state_regnum))
251 {
252 int cmask_num = AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base);
253 CORE_ADDR cmask = frame_unwind_register_unsigned (this_frame, cmask_num);
254 addr = addr & ~cmask;
255
256 /* Record in the frame that the link register required unmasking. */
257 set_frame_previous_pc_masked (this_frame);
258 }
259
260 return addr;
261 }
262
263 /* Implement the "get_pc_address_flags" gdbarch method. */
264
265 static std::string
266 aarch64_get_pc_address_flags (frame_info *frame, CORE_ADDR pc)
267 {
268 if (pc != 0 && get_frame_pc_masked (frame))
269 return "PAC";
270
271 return "";
272 }
273
274 /* Analyze a prologue, looking for a recognizable stack frame
275 and frame pointer. Scan until we encounter a store that could
276 clobber the stack frame unexpectedly, or an unknown instruction. */
277
278 static CORE_ADDR
279 aarch64_analyze_prologue (struct gdbarch *gdbarch,
280 CORE_ADDR start, CORE_ADDR limit,
281 struct aarch64_prologue_cache *cache,
282 abstract_instruction_reader& reader)
283 {
284 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
285 int i;
286
287 /* Whether the stack has been set. This should be true when we notice a SP
288 to FP move or if we are using the SP as the base register for storing
289 data, in case the FP is ommitted. */
290 bool seen_stack_set = false;
291
292 /* Track X registers and D registers in prologue. */
293 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
294
295 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
296 regs[i] = pv_register (i, 0);
297 pv_area stack (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
298
299 for (; start < limit; start += 4)
300 {
301 uint32_t insn;
302 aarch64_inst inst;
303
304 insn = reader.read (start, 4, byte_order_for_code);
305
306 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
307 break;
308
309 if (inst.opcode->iclass == addsub_imm
310 && (inst.opcode->op == OP_ADD
311 || strcmp ("sub", inst.opcode->name) == 0))
312 {
313 unsigned rd = inst.operands[0].reg.regno;
314 unsigned rn = inst.operands[1].reg.regno;
315
316 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
317 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
318 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
319 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
320
321 if (inst.opcode->op == OP_ADD)
322 {
323 regs[rd] = pv_add_constant (regs[rn],
324 inst.operands[2].imm.value);
325 }
326 else
327 {
328 regs[rd] = pv_add_constant (regs[rn],
329 -inst.operands[2].imm.value);
330 }
331
332 /* Did we move SP to FP? */
333 if (rn == AARCH64_SP_REGNUM && rd == AARCH64_FP_REGNUM)
334 seen_stack_set = true;
335 }
336 else if (inst.opcode->iclass == pcreladdr
337 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
338 {
339 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
340 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
341
342 regs[inst.operands[0].reg.regno] = pv_unknown ();
343 }
344 else if (inst.opcode->iclass == branch_imm)
345 {
346 /* Stop analysis on branch. */
347 break;
348 }
349 else if (inst.opcode->iclass == condbranch)
350 {
351 /* Stop analysis on branch. */
352 break;
353 }
354 else if (inst.opcode->iclass == branch_reg)
355 {
356 /* Stop analysis on branch. */
357 break;
358 }
359 else if (inst.opcode->iclass == compbranch)
360 {
361 /* Stop analysis on branch. */
362 break;
363 }
364 else if (inst.opcode->op == OP_MOVZ)
365 {
366 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
367
368 /* If this shows up before we set the stack, keep going. Otherwise
369 stop the analysis. */
370 if (seen_stack_set)
371 break;
372
373 regs[inst.operands[0].reg.regno] = pv_unknown ();
374 }
375 else if (inst.opcode->iclass == log_shift
376 && strcmp (inst.opcode->name, "orr") == 0)
377 {
378 unsigned rd = inst.operands[0].reg.regno;
379 unsigned rn = inst.operands[1].reg.regno;
380 unsigned rm = inst.operands[2].reg.regno;
381
382 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
383 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
384 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
385
386 if (inst.operands[2].shifter.amount == 0
387 && rn == AARCH64_SP_REGNUM)
388 regs[rd] = regs[rm];
389 else
390 {
391 if (aarch64_debug)
392 {
393 debug_printf ("aarch64: prologue analysis gave up "
394 "addr=%s opcode=0x%x (orr x register)\n",
395 core_addr_to_string_nz (start), insn);
396 }
397 break;
398 }
399 }
400 else if (inst.opcode->op == OP_STUR)
401 {
402 unsigned rt = inst.operands[0].reg.regno;
403 unsigned rn = inst.operands[1].addr.base_regno;
404 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
405
406 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
407 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
408 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
409 gdb_assert (!inst.operands[1].addr.offset.is_reg);
410
411 stack.store
412 (pv_add_constant (regs[rn], inst.operands[1].addr.offset.imm),
413 size, regs[rt]);
414
415 /* Are we storing with SP as a base? */
416 if (rn == AARCH64_SP_REGNUM)
417 seen_stack_set = true;
418 }
419 else if ((inst.opcode->iclass == ldstpair_off
420 || (inst.opcode->iclass == ldstpair_indexed
421 && inst.operands[2].addr.preind))
422 && strcmp ("stp", inst.opcode->name) == 0)
423 {
424 /* STP with addressing mode Pre-indexed and Base register. */
425 unsigned rt1;
426 unsigned rt2;
427 unsigned rn = inst.operands[2].addr.base_regno;
428 int32_t imm = inst.operands[2].addr.offset.imm;
429 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
430
431 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
432 || inst.operands[0].type == AARCH64_OPND_Ft);
433 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
434 || inst.operands[1].type == AARCH64_OPND_Ft2);
435 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
436 gdb_assert (!inst.operands[2].addr.offset.is_reg);
437
438 /* If recording this store would invalidate the store area
439 (perhaps because rn is not known) then we should abandon
440 further prologue analysis. */
441 if (stack.store_would_trash (pv_add_constant (regs[rn], imm)))
442 break;
443
444 if (stack.store_would_trash (pv_add_constant (regs[rn], imm + 8)))
445 break;
446
447 rt1 = inst.operands[0].reg.regno;
448 rt2 = inst.operands[1].reg.regno;
449 if (inst.operands[0].type == AARCH64_OPND_Ft)
450 {
451 rt1 += AARCH64_X_REGISTER_COUNT;
452 rt2 += AARCH64_X_REGISTER_COUNT;
453 }
454
455 stack.store (pv_add_constant (regs[rn], imm), size, regs[rt1]);
456 stack.store (pv_add_constant (regs[rn], imm + size), size, regs[rt2]);
457
458 if (inst.operands[2].addr.writeback)
459 regs[rn] = pv_add_constant (regs[rn], imm);
460
461 /* Ignore the instruction that allocates stack space and sets
462 the SP. */
463 if (rn == AARCH64_SP_REGNUM && !inst.operands[2].addr.writeback)
464 seen_stack_set = true;
465 }
466 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
467 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
468 && (inst.opcode->op == OP_STR_POS
469 || inst.opcode->op == OP_STRF_POS)))
470 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
471 && strcmp ("str", inst.opcode->name) == 0)
472 {
473 /* STR (immediate) */
474 unsigned int rt = inst.operands[0].reg.regno;
475 int32_t imm = inst.operands[1].addr.offset.imm;
476 unsigned int rn = inst.operands[1].addr.base_regno;
477 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
478 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
479 || inst.operands[0].type == AARCH64_OPND_Ft);
480
481 if (inst.operands[0].type == AARCH64_OPND_Ft)
482 rt += AARCH64_X_REGISTER_COUNT;
483
484 stack.store (pv_add_constant (regs[rn], imm), size, regs[rt]);
485 if (inst.operands[1].addr.writeback)
486 regs[rn] = pv_add_constant (regs[rn], imm);
487
488 /* Are we storing with SP as a base? */
489 if (rn == AARCH64_SP_REGNUM)
490 seen_stack_set = true;
491 }
492 else if (inst.opcode->iclass == testbranch)
493 {
494 /* Stop analysis on branch. */
495 break;
496 }
497 else if (inst.opcode->iclass == ic_system)
498 {
499 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
500 int ra_state_val = 0;
501
502 if (insn == 0xd503233f /* paciasp. */
503 || insn == 0xd503237f /* pacibsp. */)
504 {
505 /* Return addresses are mangled. */
506 ra_state_val = 1;
507 }
508 else if (insn == 0xd50323bf /* autiasp. */
509 || insn == 0xd50323ff /* autibsp. */)
510 {
511 /* Return addresses are not mangled. */
512 ra_state_val = 0;
513 }
514 else
515 {
516 if (aarch64_debug)
517 debug_printf ("aarch64: prologue analysis gave up addr=%s"
518 " opcode=0x%x (iclass)\n",
519 core_addr_to_string_nz (start), insn);
520 break;
521 }
522
523 if (tdep->has_pauth () && cache != nullptr)
524 trad_frame_set_value (cache->saved_regs,
525 tdep->pauth_ra_state_regnum,
526 ra_state_val);
527 }
528 else
529 {
530 if (aarch64_debug)
531 {
532 debug_printf ("aarch64: prologue analysis gave up addr=%s"
533 " opcode=0x%x\n",
534 core_addr_to_string_nz (start), insn);
535 }
536 break;
537 }
538 }
539
540 if (cache == NULL)
541 return start;
542
543 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
544 {
545 /* Frame pointer is fp. Frame size is constant. */
546 cache->framereg = AARCH64_FP_REGNUM;
547 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
548 }
549 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
550 {
551 /* Try the stack pointer. */
552 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
553 cache->framereg = AARCH64_SP_REGNUM;
554 }
555 else
556 {
557 /* We're just out of luck. We don't know where the frame is. */
558 cache->framereg = -1;
559 cache->framesize = 0;
560 }
561
562 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
563 {
564 CORE_ADDR offset;
565
566 if (stack.find_reg (gdbarch, i, &offset))
567 cache->saved_regs[i].addr = offset;
568 }
569
570 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
571 {
572 int regnum = gdbarch_num_regs (gdbarch);
573 CORE_ADDR offset;
574
575 if (stack.find_reg (gdbarch, i + AARCH64_X_REGISTER_COUNT,
576 &offset))
577 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].addr = offset;
578 }
579
580 return start;
581 }
582
583 static CORE_ADDR
584 aarch64_analyze_prologue (struct gdbarch *gdbarch,
585 CORE_ADDR start, CORE_ADDR limit,
586 struct aarch64_prologue_cache *cache)
587 {
588 instruction_reader reader;
589
590 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
591 reader);
592 }
593
594 #if GDB_SELF_TEST
595
596 namespace selftests {
597
598 /* Instruction reader from manually cooked instruction sequences. */
599
600 class instruction_reader_test : public abstract_instruction_reader
601 {
602 public:
603 template<size_t SIZE>
604 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
605 : m_insns (insns), m_insns_size (SIZE)
606 {}
607
608 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
609 override
610 {
611 SELF_CHECK (len == 4);
612 SELF_CHECK (memaddr % 4 == 0);
613 SELF_CHECK (memaddr / 4 < m_insns_size);
614
615 return m_insns[memaddr / 4];
616 }
617
618 private:
619 const uint32_t *m_insns;
620 size_t m_insns_size;
621 };
622
623 static void
624 aarch64_analyze_prologue_test (void)
625 {
626 struct gdbarch_info info;
627
628 gdbarch_info_init (&info);
629 info.bfd_arch_info = bfd_scan_arch ("aarch64");
630
631 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
632 SELF_CHECK (gdbarch != NULL);
633
634 struct aarch64_prologue_cache cache;
635 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
636
637 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
638
639 /* Test the simple prologue in which frame pointer is used. */
640 {
641 static const uint32_t insns[] = {
642 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
643 0x910003fd, /* mov x29, sp */
644 0x97ffffe6, /* bl 0x400580 */
645 };
646 instruction_reader_test reader (insns);
647
648 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
649 SELF_CHECK (end == 4 * 2);
650
651 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
652 SELF_CHECK (cache.framesize == 272);
653
654 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
655 {
656 if (i == AARCH64_FP_REGNUM)
657 SELF_CHECK (cache.saved_regs[i].addr == -272);
658 else if (i == AARCH64_LR_REGNUM)
659 SELF_CHECK (cache.saved_regs[i].addr == -264);
660 else
661 SELF_CHECK (cache.saved_regs[i].addr == -1);
662 }
663
664 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
665 {
666 int regnum = gdbarch_num_regs (gdbarch);
667
668 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
669 == -1);
670 }
671 }
672
673 /* Test a prologue in which STR is used and frame pointer is not
674 used. */
675 {
676 static const uint32_t insns[] = {
677 0xf81d0ff3, /* str x19, [sp, #-48]! */
678 0xb9002fe0, /* str w0, [sp, #44] */
679 0xf90013e1, /* str x1, [sp, #32]*/
680 0xfd000fe0, /* str d0, [sp, #24] */
681 0xaa0203f3, /* mov x19, x2 */
682 0xf94013e0, /* ldr x0, [sp, #32] */
683 };
684 instruction_reader_test reader (insns);
685
686 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
687 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
688
689 SELF_CHECK (end == 4 * 5);
690
691 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
692 SELF_CHECK (cache.framesize == 48);
693
694 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
695 {
696 if (i == 1)
697 SELF_CHECK (cache.saved_regs[i].addr == -16);
698 else if (i == 19)
699 SELF_CHECK (cache.saved_regs[i].addr == -48);
700 else
701 SELF_CHECK (cache.saved_regs[i].addr == -1);
702 }
703
704 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
705 {
706 int regnum = gdbarch_num_regs (gdbarch);
707
708 if (i == 0)
709 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
710 == -24);
711 else
712 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
713 == -1);
714 }
715 }
716
717 /* Test handling of movz before setting the frame pointer. */
718 {
719 static const uint32_t insns[] = {
720 0xa9bf7bfd, /* stp x29, x30, [sp, #-16]! */
721 0x52800020, /* mov w0, #0x1 */
722 0x910003fd, /* mov x29, sp */
723 0x528000a2, /* mov w2, #0x5 */
724 0x97fffff8, /* bl 6e4 */
725 };
726
727 instruction_reader_test reader (insns);
728
729 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
730 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
731
732 /* We should stop at the 4th instruction. */
733 SELF_CHECK (end == (4 - 1) * 4);
734 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
735 SELF_CHECK (cache.framesize == 16);
736 }
737
738 /* Test handling of movz/stp when using the stack pointer as frame
739 pointer. */
740 {
741 static const uint32_t insns[] = {
742 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
743 0x52800020, /* mov w0, #0x1 */
744 0x290207e0, /* stp w0, w1, [sp, #16] */
745 0xa9018fe2, /* stp x2, x3, [sp, #24] */
746 0x528000a2, /* mov w2, #0x5 */
747 0x97fffff8, /* bl 6e4 */
748 };
749
750 instruction_reader_test reader (insns);
751
752 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
753 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
754
755 /* We should stop at the 5th instruction. */
756 SELF_CHECK (end == (5 - 1) * 4);
757 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
758 SELF_CHECK (cache.framesize == 64);
759 }
760
761 /* Test handling of movz/str when using the stack pointer as frame
762 pointer */
763 {
764 static const uint32_t insns[] = {
765 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
766 0x52800020, /* mov w0, #0x1 */
767 0xb9002be4, /* str w4, [sp, #40] */
768 0xf9001be5, /* str x5, [sp, #48] */
769 0x528000a2, /* mov w2, #0x5 */
770 0x97fffff8, /* bl 6e4 */
771 };
772
773 instruction_reader_test reader (insns);
774
775 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
776 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
777
778 /* We should stop at the 5th instruction. */
779 SELF_CHECK (end == (5 - 1) * 4);
780 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
781 SELF_CHECK (cache.framesize == 64);
782 }
783
784 /* Test handling of movz/stur when using the stack pointer as frame
785 pointer. */
786 {
787 static const uint32_t insns[] = {
788 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
789 0x52800020, /* mov w0, #0x1 */
790 0xb80343e6, /* stur w6, [sp, #52] */
791 0xf80383e7, /* stur x7, [sp, #56] */
792 0x528000a2, /* mov w2, #0x5 */
793 0x97fffff8, /* bl 6e4 */
794 };
795
796 instruction_reader_test reader (insns);
797
798 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
799 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
800
801 /* We should stop at the 5th instruction. */
802 SELF_CHECK (end == (5 - 1) * 4);
803 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
804 SELF_CHECK (cache.framesize == 64);
805 }
806
807 /* Test handling of movz when there is no frame pointer set or no stack
808 pointer used. */
809 {
810 static const uint32_t insns[] = {
811 0xa9bf7bfd, /* stp x29, x30, [sp, #-16]! */
812 0x52800020, /* mov w0, #0x1 */
813 0x528000a2, /* mov w2, #0x5 */
814 0x97fffff8, /* bl 6e4 */
815 };
816
817 instruction_reader_test reader (insns);
818
819 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
820 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
821
822 /* We should stop at the 4th instruction. */
823 SELF_CHECK (end == (4 - 1) * 4);
824 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
825 SELF_CHECK (cache.framesize == 16);
826 }
827
828 /* Test a prologue in which there is a return address signing instruction. */
829 if (tdep->has_pauth ())
830 {
831 static const uint32_t insns[] = {
832 0xd503233f, /* paciasp */
833 0xa9bd7bfd, /* stp x29, x30, [sp, #-48]! */
834 0x910003fd, /* mov x29, sp */
835 0xf801c3f3, /* str x19, [sp, #28] */
836 0xb9401fa0, /* ldr x19, [x29, #28] */
837 };
838 instruction_reader_test reader (insns);
839
840 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
841 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache,
842 reader);
843
844 SELF_CHECK (end == 4 * 4);
845 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
846 SELF_CHECK (cache.framesize == 48);
847
848 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
849 {
850 if (i == 19)
851 SELF_CHECK (cache.saved_regs[i].addr == -20);
852 else if (i == AARCH64_FP_REGNUM)
853 SELF_CHECK (cache.saved_regs[i].addr == -48);
854 else if (i == AARCH64_LR_REGNUM)
855 SELF_CHECK (cache.saved_regs[i].addr == -40);
856 else
857 SELF_CHECK (cache.saved_regs[i].addr == -1);
858 }
859
860 if (tdep->has_pauth ())
861 {
862 SELF_CHECK (trad_frame_value_p (cache.saved_regs,
863 tdep->pauth_ra_state_regnum));
864 SELF_CHECK (cache.saved_regs[tdep->pauth_ra_state_regnum].addr == 1);
865 }
866 }
867 }
868 } // namespace selftests
869 #endif /* GDB_SELF_TEST */
870
871 /* Implement the "skip_prologue" gdbarch method. */
872
873 static CORE_ADDR
874 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
875 {
876 CORE_ADDR func_addr, limit_pc;
877
878 /* See if we can determine the end of the prologue via the symbol
879 table. If so, then return either PC, or the PC after the
880 prologue, whichever is greater. */
881 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
882 {
883 CORE_ADDR post_prologue_pc
884 = skip_prologue_using_sal (gdbarch, func_addr);
885
886 if (post_prologue_pc != 0)
887 return std::max (pc, post_prologue_pc);
888 }
889
890 /* Can't determine prologue from the symbol table, need to examine
891 instructions. */
892
893 /* Find an upper limit on the function prologue using the debug
894 information. If the debug information could not be used to
895 provide that bound, then use an arbitrary large number as the
896 upper bound. */
897 limit_pc = skip_prologue_using_sal (gdbarch, pc);
898 if (limit_pc == 0)
899 limit_pc = pc + 128; /* Magic. */
900
901 /* Try disassembling prologue. */
902 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
903 }
904
905 /* Scan the function prologue for THIS_FRAME and populate the prologue
906 cache CACHE. */
907
908 static void
909 aarch64_scan_prologue (struct frame_info *this_frame,
910 struct aarch64_prologue_cache *cache)
911 {
912 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
913 CORE_ADDR prologue_start;
914 CORE_ADDR prologue_end;
915 CORE_ADDR prev_pc = get_frame_pc (this_frame);
916 struct gdbarch *gdbarch = get_frame_arch (this_frame);
917
918 cache->prev_pc = prev_pc;
919
920 /* Assume we do not find a frame. */
921 cache->framereg = -1;
922 cache->framesize = 0;
923
924 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
925 &prologue_end))
926 {
927 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
928
929 if (sal.line == 0)
930 {
931 /* No line info so use the current PC. */
932 prologue_end = prev_pc;
933 }
934 else if (sal.end < prologue_end)
935 {
936 /* The next line begins after the function end. */
937 prologue_end = sal.end;
938 }
939
940 prologue_end = std::min (prologue_end, prev_pc);
941 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
942 }
943 else
944 {
945 CORE_ADDR frame_loc;
946
947 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
948 if (frame_loc == 0)
949 return;
950
951 cache->framereg = AARCH64_FP_REGNUM;
952 cache->framesize = 16;
953 cache->saved_regs[29].addr = 0;
954 cache->saved_regs[30].addr = 8;
955 }
956 }
957
958 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
959 function may throw an exception if the inferior's registers or memory is
960 not available. */
961
962 static void
963 aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
964 struct aarch64_prologue_cache *cache)
965 {
966 CORE_ADDR unwound_fp;
967 int reg;
968
969 aarch64_scan_prologue (this_frame, cache);
970
971 if (cache->framereg == -1)
972 return;
973
974 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
975 if (unwound_fp == 0)
976 return;
977
978 cache->prev_sp = unwound_fp + cache->framesize;
979
980 /* Calculate actual addresses of saved registers using offsets
981 determined by aarch64_analyze_prologue. */
982 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
983 if (trad_frame_addr_p (cache->saved_regs, reg))
984 cache->saved_regs[reg].addr += cache->prev_sp;
985
986 cache->func = get_frame_func (this_frame);
987
988 cache->available_p = 1;
989 }
990
991 /* Allocate and fill in *THIS_CACHE with information about the prologue of
992 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
993 Return a pointer to the current aarch64_prologue_cache in
994 *THIS_CACHE. */
995
996 static struct aarch64_prologue_cache *
997 aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
998 {
999 struct aarch64_prologue_cache *cache;
1000
1001 if (*this_cache != NULL)
1002 return (struct aarch64_prologue_cache *) *this_cache;
1003
1004 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1005 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1006 *this_cache = cache;
1007
1008 try
1009 {
1010 aarch64_make_prologue_cache_1 (this_frame, cache);
1011 }
1012 catch (const gdb_exception_error &ex)
1013 {
1014 if (ex.error != NOT_AVAILABLE_ERROR)
1015 throw;
1016 }
1017
1018 return cache;
1019 }
1020
1021 /* Implement the "stop_reason" frame_unwind method. */
1022
1023 static enum unwind_stop_reason
1024 aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
1025 void **this_cache)
1026 {
1027 struct aarch64_prologue_cache *cache
1028 = aarch64_make_prologue_cache (this_frame, this_cache);
1029
1030 if (!cache->available_p)
1031 return UNWIND_UNAVAILABLE;
1032
1033 /* Halt the backtrace at "_start". */
1034 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
1035 return UNWIND_OUTERMOST;
1036
1037 /* We've hit a wall, stop. */
1038 if (cache->prev_sp == 0)
1039 return UNWIND_OUTERMOST;
1040
1041 return UNWIND_NO_REASON;
1042 }
1043
1044 /* Our frame ID for a normal frame is the current function's starting
1045 PC and the caller's SP when we were called. */
1046
1047 static void
1048 aarch64_prologue_this_id (struct frame_info *this_frame,
1049 void **this_cache, struct frame_id *this_id)
1050 {
1051 struct aarch64_prologue_cache *cache
1052 = aarch64_make_prologue_cache (this_frame, this_cache);
1053
1054 if (!cache->available_p)
1055 *this_id = frame_id_build_unavailable_stack (cache->func);
1056 else
1057 *this_id = frame_id_build (cache->prev_sp, cache->func);
1058 }
1059
1060 /* Implement the "prev_register" frame_unwind method. */
1061
1062 static struct value *
1063 aarch64_prologue_prev_register (struct frame_info *this_frame,
1064 void **this_cache, int prev_regnum)
1065 {
1066 struct aarch64_prologue_cache *cache
1067 = aarch64_make_prologue_cache (this_frame, this_cache);
1068
1069 /* If we are asked to unwind the PC, then we need to return the LR
1070 instead. The prologue may save PC, but it will point into this
1071 frame's prologue, not the next frame's resume location. */
1072 if (prev_regnum == AARCH64_PC_REGNUM)
1073 {
1074 CORE_ADDR lr;
1075 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1076 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1077
1078 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1079
1080 if (tdep->has_pauth ()
1081 && trad_frame_value_p (cache->saved_regs,
1082 tdep->pauth_ra_state_regnum))
1083 lr = aarch64_frame_unmask_lr (tdep, this_frame, lr);
1084
1085 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
1086 }
1087
1088 /* SP is generally not saved to the stack, but this frame is
1089 identified by the next frame's stack pointer at the time of the
1090 call. The value was already reconstructed into PREV_SP. */
1091 /*
1092 +----------+ ^
1093 | saved lr | |
1094 +->| saved fp |--+
1095 | | |
1096 | | | <- Previous SP
1097 | +----------+
1098 | | saved lr |
1099 +--| saved fp |<- FP
1100 | |
1101 | |<- SP
1102 +----------+ */
1103 if (prev_regnum == AARCH64_SP_REGNUM)
1104 return frame_unwind_got_constant (this_frame, prev_regnum,
1105 cache->prev_sp);
1106
1107 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
1108 prev_regnum);
1109 }
1110
1111 /* AArch64 prologue unwinder. */
1112 struct frame_unwind aarch64_prologue_unwind =
1113 {
1114 NORMAL_FRAME,
1115 aarch64_prologue_frame_unwind_stop_reason,
1116 aarch64_prologue_this_id,
1117 aarch64_prologue_prev_register,
1118 NULL,
1119 default_frame_sniffer
1120 };
1121
1122 /* Allocate and fill in *THIS_CACHE with information about the prologue of
1123 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
1124 Return a pointer to the current aarch64_prologue_cache in
1125 *THIS_CACHE. */
1126
1127 static struct aarch64_prologue_cache *
1128 aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
1129 {
1130 struct aarch64_prologue_cache *cache;
1131
1132 if (*this_cache != NULL)
1133 return (struct aarch64_prologue_cache *) *this_cache;
1134
1135 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1136 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1137 *this_cache = cache;
1138
1139 try
1140 {
1141 cache->prev_sp = get_frame_register_unsigned (this_frame,
1142 AARCH64_SP_REGNUM);
1143 cache->prev_pc = get_frame_pc (this_frame);
1144 cache->available_p = 1;
1145 }
1146 catch (const gdb_exception_error &ex)
1147 {
1148 if (ex.error != NOT_AVAILABLE_ERROR)
1149 throw;
1150 }
1151
1152 return cache;
1153 }
1154
1155 /* Implement the "stop_reason" frame_unwind method. */
1156
1157 static enum unwind_stop_reason
1158 aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
1159 void **this_cache)
1160 {
1161 struct aarch64_prologue_cache *cache
1162 = aarch64_make_stub_cache (this_frame, this_cache);
1163
1164 if (!cache->available_p)
1165 return UNWIND_UNAVAILABLE;
1166
1167 return UNWIND_NO_REASON;
1168 }
1169
1170 /* Our frame ID for a stub frame is the current SP and LR. */
1171
1172 static void
1173 aarch64_stub_this_id (struct frame_info *this_frame,
1174 void **this_cache, struct frame_id *this_id)
1175 {
1176 struct aarch64_prologue_cache *cache
1177 = aarch64_make_stub_cache (this_frame, this_cache);
1178
1179 if (cache->available_p)
1180 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
1181 else
1182 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
1183 }
1184
1185 /* Implement the "sniffer" frame_unwind method. */
1186
1187 static int
1188 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
1189 struct frame_info *this_frame,
1190 void **this_prologue_cache)
1191 {
1192 CORE_ADDR addr_in_block;
1193 gdb_byte dummy[4];
1194
1195 addr_in_block = get_frame_address_in_block (this_frame);
1196 if (in_plt_section (addr_in_block)
1197 /* We also use the stub winder if the target memory is unreadable
1198 to avoid having the prologue unwinder trying to read it. */
1199 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
1200 return 1;
1201
1202 return 0;
1203 }
1204
1205 /* AArch64 stub unwinder. */
1206 struct frame_unwind aarch64_stub_unwind =
1207 {
1208 NORMAL_FRAME,
1209 aarch64_stub_frame_unwind_stop_reason,
1210 aarch64_stub_this_id,
1211 aarch64_prologue_prev_register,
1212 NULL,
1213 aarch64_stub_unwind_sniffer
1214 };
1215
1216 /* Return the frame base address of *THIS_FRAME. */
1217
1218 static CORE_ADDR
1219 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
1220 {
1221 struct aarch64_prologue_cache *cache
1222 = aarch64_make_prologue_cache (this_frame, this_cache);
1223
1224 return cache->prev_sp - cache->framesize;
1225 }
1226
1227 /* AArch64 default frame base information. */
1228 struct frame_base aarch64_normal_base =
1229 {
1230 &aarch64_prologue_unwind,
1231 aarch64_normal_frame_base,
1232 aarch64_normal_frame_base,
1233 aarch64_normal_frame_base
1234 };
1235
1236 /* Return the value of the REGNUM register in the previous frame of
1237 *THIS_FRAME. */
1238
1239 static struct value *
1240 aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1241 void **this_cache, int regnum)
1242 {
1243 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
1244 CORE_ADDR lr;
1245
1246 switch (regnum)
1247 {
1248 case AARCH64_PC_REGNUM:
1249 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1250 lr = aarch64_frame_unmask_lr (tdep, this_frame, lr);
1251 return frame_unwind_got_constant (this_frame, regnum, lr);
1252
1253 default:
1254 internal_error (__FILE__, __LINE__,
1255 _("Unexpected register %d"), regnum);
1256 }
1257 }
1258
1259 static const unsigned char op_lit0 = DW_OP_lit0;
1260 static const unsigned char op_lit1 = DW_OP_lit1;
1261
1262 /* Implement the "init_reg" dwarf2_frame_ops method. */
1263
1264 static void
1265 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1266 struct dwarf2_frame_state_reg *reg,
1267 struct frame_info *this_frame)
1268 {
1269 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1270
1271 switch (regnum)
1272 {
1273 case AARCH64_PC_REGNUM:
1274 reg->how = DWARF2_FRAME_REG_FN;
1275 reg->loc.fn = aarch64_dwarf2_prev_register;
1276 return;
1277
1278 case AARCH64_SP_REGNUM:
1279 reg->how = DWARF2_FRAME_REG_CFA;
1280 return;
1281 }
1282
1283 /* Init pauth registers. */
1284 if (tdep->has_pauth ())
1285 {
1286 if (regnum == tdep->pauth_ra_state_regnum)
1287 {
1288 /* Initialize RA_STATE to zero. */
1289 reg->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1290 reg->loc.exp.start = &op_lit0;
1291 reg->loc.exp.len = 1;
1292 return;
1293 }
1294 else if (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
1295 || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base))
1296 {
1297 reg->how = DWARF2_FRAME_REG_SAME_VALUE;
1298 return;
1299 }
1300 }
1301 }
1302
1303 /* Implement the execute_dwarf_cfa_vendor_op method. */
1304
1305 static bool
1306 aarch64_execute_dwarf_cfa_vendor_op (struct gdbarch *gdbarch, gdb_byte op,
1307 struct dwarf2_frame_state *fs)
1308 {
1309 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1310 struct dwarf2_frame_state_reg *ra_state;
1311
1312 if (op == DW_CFA_AARCH64_negate_ra_state)
1313 {
1314 /* On systems without pauth, treat as a nop. */
1315 if (!tdep->has_pauth ())
1316 return true;
1317
1318 /* Allocate RA_STATE column if it's not allocated yet. */
1319 fs->regs.alloc_regs (AARCH64_DWARF_PAUTH_RA_STATE + 1);
1320
1321 /* Toggle the status of RA_STATE between 0 and 1. */
1322 ra_state = &(fs->regs.reg[AARCH64_DWARF_PAUTH_RA_STATE]);
1323 ra_state->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1324
1325 if (ra_state->loc.exp.start == nullptr
1326 || ra_state->loc.exp.start == &op_lit0)
1327 ra_state->loc.exp.start = &op_lit1;
1328 else
1329 ra_state->loc.exp.start = &op_lit0;
1330
1331 ra_state->loc.exp.len = 1;
1332
1333 return true;
1334 }
1335
1336 return false;
1337 }
1338
1339 /* Used for matching BRK instructions for AArch64. */
1340 static constexpr uint32_t BRK_INSN_MASK = 0xffe0001f;
1341 static constexpr uint32_t BRK_INSN_BASE = 0xd4200000;
1342
1343 /* Implementation of gdbarch_program_breakpoint_here_p for aarch64. */
1344
1345 static bool
1346 aarch64_program_breakpoint_here_p (gdbarch *gdbarch, CORE_ADDR address)
1347 {
1348 const uint32_t insn_len = 4;
1349 gdb_byte target_mem[4];
1350
1351 /* Enable the automatic memory restoration from breakpoints while
1352 we read the memory. Otherwise we may find temporary breakpoints, ones
1353 inserted by GDB, and flag them as permanent breakpoints. */
1354 scoped_restore restore_memory
1355 = make_scoped_restore_show_memory_breakpoints (0);
1356
1357 if (target_read_memory (address, target_mem, insn_len) == 0)
1358 {
1359 uint32_t insn =
1360 (uint32_t) extract_unsigned_integer (target_mem, insn_len,
1361 gdbarch_byte_order_for_code (gdbarch));
1362
1363 /* Check if INSN is a BRK instruction pattern. There are multiple choices
1364 of such instructions with different immediate values. Different OS'
1365 may use a different variation, but they have the same outcome. */
1366 return ((insn & BRK_INSN_MASK) == BRK_INSN_BASE);
1367 }
1368
1369 return false;
1370 }
1371
1372 /* When arguments must be pushed onto the stack, they go on in reverse
1373 order. The code below implements a FILO (stack) to do this. */
1374
1375 struct stack_item_t
1376 {
1377 /* Value to pass on stack. It can be NULL if this item is for stack
1378 padding. */
1379 const gdb_byte *data;
1380
1381 /* Size in bytes of value to pass on stack. */
1382 int len;
1383 };
1384
1385 /* Implement the gdbarch type alignment method, overrides the generic
1386 alignment algorithm for anything that is aarch64 specific. */
1387
1388 static ULONGEST
1389 aarch64_type_align (gdbarch *gdbarch, struct type *t)
1390 {
1391 t = check_typedef (t);
1392 if (t->code () == TYPE_CODE_ARRAY && t->is_vector ())
1393 {
1394 /* Use the natural alignment for vector types (the same for
1395 scalar type), but the maximum alignment is 128-bit. */
1396 if (TYPE_LENGTH (t) > 16)
1397 return 16;
1398 else
1399 return TYPE_LENGTH (t);
1400 }
1401
1402 /* Allow the common code to calculate the alignment. */
1403 return 0;
1404 }
1405
1406 /* Worker function for aapcs_is_vfp_call_or_return_candidate.
1407
1408 Return the number of register required, or -1 on failure.
1409
1410 When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it
1411 to the element, else fail if the type of this element does not match the
1412 existing value. */
1413
1414 static int
1415 aapcs_is_vfp_call_or_return_candidate_1 (struct type *type,
1416 struct type **fundamental_type)
1417 {
1418 if (type == nullptr)
1419 return -1;
1420
1421 switch (type->code ())
1422 {
1423 case TYPE_CODE_FLT:
1424 if (TYPE_LENGTH (type) > 16)
1425 return -1;
1426
1427 if (*fundamental_type == nullptr)
1428 *fundamental_type = type;
1429 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1430 || type->code () != (*fundamental_type)->code ())
1431 return -1;
1432
1433 return 1;
1434
1435 case TYPE_CODE_COMPLEX:
1436 {
1437 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1438 if (TYPE_LENGTH (target_type) > 16)
1439 return -1;
1440
1441 if (*fundamental_type == nullptr)
1442 *fundamental_type = target_type;
1443 else if (TYPE_LENGTH (target_type) != TYPE_LENGTH (*fundamental_type)
1444 || target_type->code () != (*fundamental_type)->code ())
1445 return -1;
1446
1447 return 2;
1448 }
1449
1450 case TYPE_CODE_ARRAY:
1451 {
1452 if (type->is_vector ())
1453 {
1454 if (TYPE_LENGTH (type) != 8 && TYPE_LENGTH (type) != 16)
1455 return -1;
1456
1457 if (*fundamental_type == nullptr)
1458 *fundamental_type = type;
1459 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1460 || type->code () != (*fundamental_type)->code ())
1461 return -1;
1462
1463 return 1;
1464 }
1465 else
1466 {
1467 struct type *target_type = TYPE_TARGET_TYPE (type);
1468 int count = aapcs_is_vfp_call_or_return_candidate_1
1469 (target_type, fundamental_type);
1470
1471 if (count == -1)
1472 return count;
1473
1474 count *= (TYPE_LENGTH (type) / TYPE_LENGTH (target_type));
1475 return count;
1476 }
1477 }
1478
1479 case TYPE_CODE_STRUCT:
1480 case TYPE_CODE_UNION:
1481 {
1482 int count = 0;
1483
1484 for (int i = 0; i < type->num_fields (); i++)
1485 {
1486 /* Ignore any static fields. */
1487 if (field_is_static (&type->field (i)))
1488 continue;
1489
1490 struct type *member = check_typedef (type->field (i).type ());
1491
1492 int sub_count = aapcs_is_vfp_call_or_return_candidate_1
1493 (member, fundamental_type);
1494 if (sub_count == -1)
1495 return -1;
1496 count += sub_count;
1497 }
1498
1499 /* Ensure there is no padding between the fields (allowing for empty
1500 zero length structs) */
1501 int ftype_length = (*fundamental_type == nullptr)
1502 ? 0 : TYPE_LENGTH (*fundamental_type);
1503 if (count * ftype_length != TYPE_LENGTH (type))
1504 return -1;
1505
1506 return count;
1507 }
1508
1509 default:
1510 break;
1511 }
1512
1513 return -1;
1514 }
1515
1516 /* Return true if an argument, whose type is described by TYPE, can be passed or
1517 returned in simd/fp registers, providing enough parameter passing registers
1518 are available. This is as described in the AAPCS64.
1519
1520 Upon successful return, *COUNT returns the number of needed registers,
1521 *FUNDAMENTAL_TYPE contains the type of those registers.
1522
1523 Candidate as per the AAPCS64 5.4.2.C is either a:
1524 - float.
1525 - short-vector.
1526 - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where
1527 all the members are floats and has at most 4 members.
1528 - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where
1529 all the members are short vectors and has at most 4 members.
1530 - Complex (7.1.1)
1531
1532 Note that HFAs and HVAs can include nested structures and arrays. */
1533
1534 static bool
1535 aapcs_is_vfp_call_or_return_candidate (struct type *type, int *count,
1536 struct type **fundamental_type)
1537 {
1538 if (type == nullptr)
1539 return false;
1540
1541 *fundamental_type = nullptr;
1542
1543 int ag_count = aapcs_is_vfp_call_or_return_candidate_1 (type,
1544 fundamental_type);
1545
1546 if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS)
1547 {
1548 *count = ag_count;
1549 return true;
1550 }
1551 else
1552 return false;
1553 }
1554
1555 /* AArch64 function call information structure. */
1556 struct aarch64_call_info
1557 {
1558 /* the current argument number. */
1559 unsigned argnum = 0;
1560
1561 /* The next general purpose register number, equivalent to NGRN as
1562 described in the AArch64 Procedure Call Standard. */
1563 unsigned ngrn = 0;
1564
1565 /* The next SIMD and floating point register number, equivalent to
1566 NSRN as described in the AArch64 Procedure Call Standard. */
1567 unsigned nsrn = 0;
1568
1569 /* The next stacked argument address, equivalent to NSAA as
1570 described in the AArch64 Procedure Call Standard. */
1571 unsigned nsaa = 0;
1572
1573 /* Stack item vector. */
1574 std::vector<stack_item_t> si;
1575 };
1576
1577 /* Pass a value in a sequence of consecutive X registers. The caller
1578 is responsible for ensuring sufficient registers are available. */
1579
1580 static void
1581 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1582 struct aarch64_call_info *info, struct type *type,
1583 struct value *arg)
1584 {
1585 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1586 int len = TYPE_LENGTH (type);
1587 enum type_code typecode = type->code ();
1588 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1589 const bfd_byte *buf = value_contents (arg);
1590
1591 info->argnum++;
1592
1593 while (len > 0)
1594 {
1595 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1596 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1597 byte_order);
1598
1599
1600 /* Adjust sub-word struct/union args when big-endian. */
1601 if (byte_order == BFD_ENDIAN_BIG
1602 && partial_len < X_REGISTER_SIZE
1603 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1604 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1605
1606 if (aarch64_debug)
1607 {
1608 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1609 gdbarch_register_name (gdbarch, regnum),
1610 phex (regval, X_REGISTER_SIZE));
1611 }
1612 regcache_cooked_write_unsigned (regcache, regnum, regval);
1613 len -= partial_len;
1614 buf += partial_len;
1615 regnum++;
1616 }
1617 }
1618
1619 /* Attempt to marshall a value in a V register. Return 1 if
1620 successful, or 0 if insufficient registers are available. This
1621 function, unlike the equivalent pass_in_x() function does not
1622 handle arguments spread across multiple registers. */
1623
1624 static int
1625 pass_in_v (struct gdbarch *gdbarch,
1626 struct regcache *regcache,
1627 struct aarch64_call_info *info,
1628 int len, const bfd_byte *buf)
1629 {
1630 if (info->nsrn < 8)
1631 {
1632 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1633 /* Enough space for a full vector register. */
1634 gdb_byte reg[register_size (gdbarch, regnum)];
1635 gdb_assert (len <= sizeof (reg));
1636
1637 info->argnum++;
1638 info->nsrn++;
1639
1640 memset (reg, 0, sizeof (reg));
1641 /* PCS C.1, the argument is allocated to the least significant
1642 bits of V register. */
1643 memcpy (reg, buf, len);
1644 regcache->cooked_write (regnum, reg);
1645
1646 if (aarch64_debug)
1647 {
1648 debug_printf ("arg %d in %s\n", info->argnum,
1649 gdbarch_register_name (gdbarch, regnum));
1650 }
1651 return 1;
1652 }
1653 info->nsrn = 8;
1654 return 0;
1655 }
1656
1657 /* Marshall an argument onto the stack. */
1658
1659 static void
1660 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1661 struct value *arg)
1662 {
1663 const bfd_byte *buf = value_contents (arg);
1664 int len = TYPE_LENGTH (type);
1665 int align;
1666 stack_item_t item;
1667
1668 info->argnum++;
1669
1670 align = type_align (type);
1671
1672 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1673 Natural alignment of the argument's type. */
1674 align = align_up (align, 8);
1675
1676 /* The AArch64 PCS requires at most doubleword alignment. */
1677 if (align > 16)
1678 align = 16;
1679
1680 if (aarch64_debug)
1681 {
1682 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1683 info->nsaa);
1684 }
1685
1686 item.len = len;
1687 item.data = buf;
1688 info->si.push_back (item);
1689
1690 info->nsaa += len;
1691 if (info->nsaa & (align - 1))
1692 {
1693 /* Push stack alignment padding. */
1694 int pad = align - (info->nsaa & (align - 1));
1695
1696 item.len = pad;
1697 item.data = NULL;
1698
1699 info->si.push_back (item);
1700 info->nsaa += pad;
1701 }
1702 }
1703
1704 /* Marshall an argument into a sequence of one or more consecutive X
1705 registers or, if insufficient X registers are available then onto
1706 the stack. */
1707
1708 static void
1709 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1710 struct aarch64_call_info *info, struct type *type,
1711 struct value *arg)
1712 {
1713 int len = TYPE_LENGTH (type);
1714 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1715
1716 /* PCS C.13 - Pass in registers if we have enough spare */
1717 if (info->ngrn + nregs <= 8)
1718 {
1719 pass_in_x (gdbarch, regcache, info, type, arg);
1720 info->ngrn += nregs;
1721 }
1722 else
1723 {
1724 info->ngrn = 8;
1725 pass_on_stack (info, type, arg);
1726 }
1727 }
1728
1729 /* Pass a value, which is of type arg_type, in a V register. Assumes value is a
1730 aapcs_is_vfp_call_or_return_candidate and there are enough spare V
1731 registers. A return value of false is an error state as the value will have
1732 been partially passed to the stack. */
1733 static bool
1734 pass_in_v_vfp_candidate (struct gdbarch *gdbarch, struct regcache *regcache,
1735 struct aarch64_call_info *info, struct type *arg_type,
1736 struct value *arg)
1737 {
1738 switch (arg_type->code ())
1739 {
1740 case TYPE_CODE_FLT:
1741 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1742 value_contents (arg));
1743 break;
1744
1745 case TYPE_CODE_COMPLEX:
1746 {
1747 const bfd_byte *buf = value_contents (arg);
1748 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (arg_type));
1749
1750 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1751 buf))
1752 return false;
1753
1754 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1755 buf + TYPE_LENGTH (target_type));
1756 }
1757
1758 case TYPE_CODE_ARRAY:
1759 if (arg_type->is_vector ())
1760 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1761 value_contents (arg));
1762 /* fall through. */
1763
1764 case TYPE_CODE_STRUCT:
1765 case TYPE_CODE_UNION:
1766 for (int i = 0; i < arg_type->num_fields (); i++)
1767 {
1768 /* Don't include static fields. */
1769 if (field_is_static (&arg_type->field (i)))
1770 continue;
1771
1772 struct value *field = value_primitive_field (arg, 0, i, arg_type);
1773 struct type *field_type = check_typedef (value_type (field));
1774
1775 if (!pass_in_v_vfp_candidate (gdbarch, regcache, info, field_type,
1776 field))
1777 return false;
1778 }
1779 return true;
1780
1781 default:
1782 return false;
1783 }
1784 }
1785
1786 /* Implement the "push_dummy_call" gdbarch method. */
1787
1788 static CORE_ADDR
1789 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1790 struct regcache *regcache, CORE_ADDR bp_addr,
1791 int nargs,
1792 struct value **args, CORE_ADDR sp,
1793 function_call_return_method return_method,
1794 CORE_ADDR struct_addr)
1795 {
1796 int argnum;
1797 struct aarch64_call_info info;
1798
1799 /* We need to know what the type of the called function is in order
1800 to determine the number of named/anonymous arguments for the
1801 actual argument placement, and the return type in order to handle
1802 return value correctly.
1803
1804 The generic code above us views the decision of return in memory
1805 or return in registers as a two stage processes. The language
1806 handler is consulted first and may decide to return in memory (eg
1807 class with copy constructor returned by value), this will cause
1808 the generic code to allocate space AND insert an initial leading
1809 argument.
1810
1811 If the language code does not decide to pass in memory then the
1812 target code is consulted.
1813
1814 If the language code decides to pass in memory we want to move
1815 the pointer inserted as the initial argument from the argument
1816 list and into X8, the conventional AArch64 struct return pointer
1817 register. */
1818
1819 /* Set the return address. For the AArch64, the return breakpoint
1820 is always at BP_ADDR. */
1821 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1822
1823 /* If we were given an initial argument for the return slot, lose it. */
1824 if (return_method == return_method_hidden_param)
1825 {
1826 args++;
1827 nargs--;
1828 }
1829
1830 /* The struct_return pointer occupies X8. */
1831 if (return_method != return_method_normal)
1832 {
1833 if (aarch64_debug)
1834 {
1835 debug_printf ("struct return in %s = 0x%s\n",
1836 gdbarch_register_name (gdbarch,
1837 AARCH64_STRUCT_RETURN_REGNUM),
1838 paddress (gdbarch, struct_addr));
1839 }
1840 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1841 struct_addr);
1842 }
1843
1844 for (argnum = 0; argnum < nargs; argnum++)
1845 {
1846 struct value *arg = args[argnum];
1847 struct type *arg_type, *fundamental_type;
1848 int len, elements;
1849
1850 arg_type = check_typedef (value_type (arg));
1851 len = TYPE_LENGTH (arg_type);
1852
1853 /* If arg can be passed in v registers as per the AAPCS64, then do so if
1854 if there are enough spare registers. */
1855 if (aapcs_is_vfp_call_or_return_candidate (arg_type, &elements,
1856 &fundamental_type))
1857 {
1858 if (info.nsrn + elements <= 8)
1859 {
1860 /* We know that we have sufficient registers available therefore
1861 this will never need to fallback to the stack. */
1862 if (!pass_in_v_vfp_candidate (gdbarch, regcache, &info, arg_type,
1863 arg))
1864 gdb_assert_not_reached ("Failed to push args");
1865 }
1866 else
1867 {
1868 info.nsrn = 8;
1869 pass_on_stack (&info, arg_type, arg);
1870 }
1871 continue;
1872 }
1873
1874 switch (arg_type->code ())
1875 {
1876 case TYPE_CODE_INT:
1877 case TYPE_CODE_BOOL:
1878 case TYPE_CODE_CHAR:
1879 case TYPE_CODE_RANGE:
1880 case TYPE_CODE_ENUM:
1881 if (len < 4)
1882 {
1883 /* Promote to 32 bit integer. */
1884 if (arg_type->is_unsigned ())
1885 arg_type = builtin_type (gdbarch)->builtin_uint32;
1886 else
1887 arg_type = builtin_type (gdbarch)->builtin_int32;
1888 arg = value_cast (arg_type, arg);
1889 }
1890 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1891 break;
1892
1893 case TYPE_CODE_STRUCT:
1894 case TYPE_CODE_ARRAY:
1895 case TYPE_CODE_UNION:
1896 if (len > 16)
1897 {
1898 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1899 invisible reference. */
1900
1901 /* Allocate aligned storage. */
1902 sp = align_down (sp - len, 16);
1903
1904 /* Write the real data into the stack. */
1905 write_memory (sp, value_contents (arg), len);
1906
1907 /* Construct the indirection. */
1908 arg_type = lookup_pointer_type (arg_type);
1909 arg = value_from_pointer (arg_type, sp);
1910 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1911 }
1912 else
1913 /* PCS C.15 / C.18 multiple values pass. */
1914 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1915 break;
1916
1917 default:
1918 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1919 break;
1920 }
1921 }
1922
1923 /* Make sure stack retains 16 byte alignment. */
1924 if (info.nsaa & 15)
1925 sp -= 16 - (info.nsaa & 15);
1926
1927 while (!info.si.empty ())
1928 {
1929 const stack_item_t &si = info.si.back ();
1930
1931 sp -= si.len;
1932 if (si.data != NULL)
1933 write_memory (sp, si.data, si.len);
1934 info.si.pop_back ();
1935 }
1936
1937 /* Finally, update the SP register. */
1938 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1939
1940 return sp;
1941 }
1942
1943 /* Implement the "frame_align" gdbarch method. */
1944
1945 static CORE_ADDR
1946 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1947 {
1948 /* Align the stack to sixteen bytes. */
1949 return sp & ~(CORE_ADDR) 15;
1950 }
1951
1952 /* Return the type for an AdvSISD Q register. */
1953
1954 static struct type *
1955 aarch64_vnq_type (struct gdbarch *gdbarch)
1956 {
1957 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1958
1959 if (tdep->vnq_type == NULL)
1960 {
1961 struct type *t;
1962 struct type *elem;
1963
1964 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1965 TYPE_CODE_UNION);
1966
1967 elem = builtin_type (gdbarch)->builtin_uint128;
1968 append_composite_type_field (t, "u", elem);
1969
1970 elem = builtin_type (gdbarch)->builtin_int128;
1971 append_composite_type_field (t, "s", elem);
1972
1973 tdep->vnq_type = t;
1974 }
1975
1976 return tdep->vnq_type;
1977 }
1978
1979 /* Return the type for an AdvSISD D register. */
1980
1981 static struct type *
1982 aarch64_vnd_type (struct gdbarch *gdbarch)
1983 {
1984 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1985
1986 if (tdep->vnd_type == NULL)
1987 {
1988 struct type *t;
1989 struct type *elem;
1990
1991 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1992 TYPE_CODE_UNION);
1993
1994 elem = builtin_type (gdbarch)->builtin_double;
1995 append_composite_type_field (t, "f", elem);
1996
1997 elem = builtin_type (gdbarch)->builtin_uint64;
1998 append_composite_type_field (t, "u", elem);
1999
2000 elem = builtin_type (gdbarch)->builtin_int64;
2001 append_composite_type_field (t, "s", elem);
2002
2003 tdep->vnd_type = t;
2004 }
2005
2006 return tdep->vnd_type;
2007 }
2008
2009 /* Return the type for an AdvSISD S register. */
2010
2011 static struct type *
2012 aarch64_vns_type (struct gdbarch *gdbarch)
2013 {
2014 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2015
2016 if (tdep->vns_type == NULL)
2017 {
2018 struct type *t;
2019 struct type *elem;
2020
2021 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
2022 TYPE_CODE_UNION);
2023
2024 elem = builtin_type (gdbarch)->builtin_float;
2025 append_composite_type_field (t, "f", elem);
2026
2027 elem = builtin_type (gdbarch)->builtin_uint32;
2028 append_composite_type_field (t, "u", elem);
2029
2030 elem = builtin_type (gdbarch)->builtin_int32;
2031 append_composite_type_field (t, "s", elem);
2032
2033 tdep->vns_type = t;
2034 }
2035
2036 return tdep->vns_type;
2037 }
2038
2039 /* Return the type for an AdvSISD H register. */
2040
2041 static struct type *
2042 aarch64_vnh_type (struct gdbarch *gdbarch)
2043 {
2044 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2045
2046 if (tdep->vnh_type == NULL)
2047 {
2048 struct type *t;
2049 struct type *elem;
2050
2051 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
2052 TYPE_CODE_UNION);
2053
2054 elem = builtin_type (gdbarch)->builtin_half;
2055 append_composite_type_field (t, "f", elem);
2056
2057 elem = builtin_type (gdbarch)->builtin_uint16;
2058 append_composite_type_field (t, "u", elem);
2059
2060 elem = builtin_type (gdbarch)->builtin_int16;
2061 append_composite_type_field (t, "s", elem);
2062
2063 tdep->vnh_type = t;
2064 }
2065
2066 return tdep->vnh_type;
2067 }
2068
2069 /* Return the type for an AdvSISD B register. */
2070
2071 static struct type *
2072 aarch64_vnb_type (struct gdbarch *gdbarch)
2073 {
2074 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2075
2076 if (tdep->vnb_type == NULL)
2077 {
2078 struct type *t;
2079 struct type *elem;
2080
2081 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
2082 TYPE_CODE_UNION);
2083
2084 elem = builtin_type (gdbarch)->builtin_uint8;
2085 append_composite_type_field (t, "u", elem);
2086
2087 elem = builtin_type (gdbarch)->builtin_int8;
2088 append_composite_type_field (t, "s", elem);
2089
2090 tdep->vnb_type = t;
2091 }
2092
2093 return tdep->vnb_type;
2094 }
2095
2096 /* Return the type for an AdvSISD V register. */
2097
2098 static struct type *
2099 aarch64_vnv_type (struct gdbarch *gdbarch)
2100 {
2101 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2102
2103 if (tdep->vnv_type == NULL)
2104 {
2105 /* The other AArch64 pseudo registers (Q,D,H,S,B) refer to a single value
2106 slice from the non-pseudo vector registers. However NEON V registers
2107 are always vector registers, and need constructing as such. */
2108 const struct builtin_type *bt = builtin_type (gdbarch);
2109
2110 struct type *t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnv",
2111 TYPE_CODE_UNION);
2112
2113 struct type *sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
2114 TYPE_CODE_UNION);
2115 append_composite_type_field (sub, "f",
2116 init_vector_type (bt->builtin_double, 2));
2117 append_composite_type_field (sub, "u",
2118 init_vector_type (bt->builtin_uint64, 2));
2119 append_composite_type_field (sub, "s",
2120 init_vector_type (bt->builtin_int64, 2));
2121 append_composite_type_field (t, "d", sub);
2122
2123 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
2124 TYPE_CODE_UNION);
2125 append_composite_type_field (sub, "f",
2126 init_vector_type (bt->builtin_float, 4));
2127 append_composite_type_field (sub, "u",
2128 init_vector_type (bt->builtin_uint32, 4));
2129 append_composite_type_field (sub, "s",
2130 init_vector_type (bt->builtin_int32, 4));
2131 append_composite_type_field (t, "s", sub);
2132
2133 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
2134 TYPE_CODE_UNION);
2135 append_composite_type_field (sub, "f",
2136 init_vector_type (bt->builtin_half, 8));
2137 append_composite_type_field (sub, "u",
2138 init_vector_type (bt->builtin_uint16, 8));
2139 append_composite_type_field (sub, "s",
2140 init_vector_type (bt->builtin_int16, 8));
2141 append_composite_type_field (t, "h", sub);
2142
2143 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
2144 TYPE_CODE_UNION);
2145 append_composite_type_field (sub, "u",
2146 init_vector_type (bt->builtin_uint8, 16));
2147 append_composite_type_field (sub, "s",
2148 init_vector_type (bt->builtin_int8, 16));
2149 append_composite_type_field (t, "b", sub);
2150
2151 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
2152 TYPE_CODE_UNION);
2153 append_composite_type_field (sub, "u",
2154 init_vector_type (bt->builtin_uint128, 1));
2155 append_composite_type_field (sub, "s",
2156 init_vector_type (bt->builtin_int128, 1));
2157 append_composite_type_field (t, "q", sub);
2158
2159 tdep->vnv_type = t;
2160 }
2161
2162 return tdep->vnv_type;
2163 }
2164
2165 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
2166
2167 static int
2168 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
2169 {
2170 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2171
2172 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
2173 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
2174
2175 if (reg == AARCH64_DWARF_SP)
2176 return AARCH64_SP_REGNUM;
2177
2178 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
2179 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
2180
2181 if (reg == AARCH64_DWARF_SVE_VG)
2182 return AARCH64_SVE_VG_REGNUM;
2183
2184 if (reg == AARCH64_DWARF_SVE_FFR)
2185 return AARCH64_SVE_FFR_REGNUM;
2186
2187 if (reg >= AARCH64_DWARF_SVE_P0 && reg <= AARCH64_DWARF_SVE_P0 + 15)
2188 return AARCH64_SVE_P0_REGNUM + reg - AARCH64_DWARF_SVE_P0;
2189
2190 if (reg >= AARCH64_DWARF_SVE_Z0 && reg <= AARCH64_DWARF_SVE_Z0 + 15)
2191 return AARCH64_SVE_Z0_REGNUM + reg - AARCH64_DWARF_SVE_Z0;
2192
2193 if (tdep->has_pauth ())
2194 {
2195 if (reg >= AARCH64_DWARF_PAUTH_DMASK && reg <= AARCH64_DWARF_PAUTH_CMASK)
2196 return tdep->pauth_reg_base + reg - AARCH64_DWARF_PAUTH_DMASK;
2197
2198 if (reg == AARCH64_DWARF_PAUTH_RA_STATE)
2199 return tdep->pauth_ra_state_regnum;
2200 }
2201
2202 return -1;
2203 }
2204
2205 /* Implement the "print_insn" gdbarch method. */
2206
2207 static int
2208 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
2209 {
2210 info->symbols = NULL;
2211 return default_print_insn (memaddr, info);
2212 }
2213
2214 /* AArch64 BRK software debug mode instruction.
2215 Note that AArch64 code is always little-endian.
2216 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
2217 constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
2218
2219 typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
2220
2221 /* Extract from an array REGS containing the (raw) register state a
2222 function return value of type TYPE, and copy that, in virtual
2223 format, into VALBUF. */
2224
2225 static void
2226 aarch64_extract_return_value (struct type *type, struct regcache *regs,
2227 gdb_byte *valbuf)
2228 {
2229 struct gdbarch *gdbarch = regs->arch ();
2230 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2231 int elements;
2232 struct type *fundamental_type;
2233
2234 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2235 &fundamental_type))
2236 {
2237 int len = TYPE_LENGTH (fundamental_type);
2238
2239 for (int i = 0; i < elements; i++)
2240 {
2241 int regno = AARCH64_V0_REGNUM + i;
2242 /* Enough space for a full vector register. */
2243 gdb_byte buf[register_size (gdbarch, regno)];
2244 gdb_assert (len <= sizeof (buf));
2245
2246 if (aarch64_debug)
2247 {
2248 debug_printf ("read HFA or HVA return value element %d from %s\n",
2249 i + 1,
2250 gdbarch_register_name (gdbarch, regno));
2251 }
2252 regs->cooked_read (regno, buf);
2253
2254 memcpy (valbuf, buf, len);
2255 valbuf += len;
2256 }
2257 }
2258 else if (type->code () == TYPE_CODE_INT
2259 || type->code () == TYPE_CODE_CHAR
2260 || type->code () == TYPE_CODE_BOOL
2261 || type->code () == TYPE_CODE_PTR
2262 || TYPE_IS_REFERENCE (type)
2263 || type->code () == TYPE_CODE_ENUM)
2264 {
2265 /* If the type is a plain integer, then the access is
2266 straight-forward. Otherwise we have to play around a bit
2267 more. */
2268 int len = TYPE_LENGTH (type);
2269 int regno = AARCH64_X0_REGNUM;
2270 ULONGEST tmp;
2271
2272 while (len > 0)
2273 {
2274 /* By using store_unsigned_integer we avoid having to do
2275 anything special for small big-endian values. */
2276 regcache_cooked_read_unsigned (regs, regno++, &tmp);
2277 store_unsigned_integer (valbuf,
2278 (len > X_REGISTER_SIZE
2279 ? X_REGISTER_SIZE : len), byte_order, tmp);
2280 len -= X_REGISTER_SIZE;
2281 valbuf += X_REGISTER_SIZE;
2282 }
2283 }
2284 else
2285 {
2286 /* For a structure or union the behaviour is as if the value had
2287 been stored to word-aligned memory and then loaded into
2288 registers with 64-bit load instruction(s). */
2289 int len = TYPE_LENGTH (type);
2290 int regno = AARCH64_X0_REGNUM;
2291 bfd_byte buf[X_REGISTER_SIZE];
2292
2293 while (len > 0)
2294 {
2295 regs->cooked_read (regno++, buf);
2296 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2297 len -= X_REGISTER_SIZE;
2298 valbuf += X_REGISTER_SIZE;
2299 }
2300 }
2301 }
2302
2303
2304 /* Will a function return an aggregate type in memory or in a
2305 register? Return 0 if an aggregate type can be returned in a
2306 register, 1 if it must be returned in memory. */
2307
2308 static int
2309 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
2310 {
2311 type = check_typedef (type);
2312 int elements;
2313 struct type *fundamental_type;
2314
2315 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2316 &fundamental_type))
2317 {
2318 /* v0-v7 are used to return values and one register is allocated
2319 for one member. However, HFA or HVA has at most four members. */
2320 return 0;
2321 }
2322
2323 if (TYPE_LENGTH (type) > 16)
2324 {
2325 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2326 invisible reference. */
2327
2328 return 1;
2329 }
2330
2331 return 0;
2332 }
2333
2334 /* Write into appropriate registers a function return value of type
2335 TYPE, given in virtual format. */
2336
2337 static void
2338 aarch64_store_return_value (struct type *type, struct regcache *regs,
2339 const gdb_byte *valbuf)
2340 {
2341 struct gdbarch *gdbarch = regs->arch ();
2342 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2343 int elements;
2344 struct type *fundamental_type;
2345
2346 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2347 &fundamental_type))
2348 {
2349 int len = TYPE_LENGTH (fundamental_type);
2350
2351 for (int i = 0; i < elements; i++)
2352 {
2353 int regno = AARCH64_V0_REGNUM + i;
2354 /* Enough space for a full vector register. */
2355 gdb_byte tmpbuf[register_size (gdbarch, regno)];
2356 gdb_assert (len <= sizeof (tmpbuf));
2357
2358 if (aarch64_debug)
2359 {
2360 debug_printf ("write HFA or HVA return value element %d to %s\n",
2361 i + 1,
2362 gdbarch_register_name (gdbarch, regno));
2363 }
2364
2365 memcpy (tmpbuf, valbuf,
2366 len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2367 regs->cooked_write (regno, tmpbuf);
2368 valbuf += len;
2369 }
2370 }
2371 else if (type->code () == TYPE_CODE_INT
2372 || type->code () == TYPE_CODE_CHAR
2373 || type->code () == TYPE_CODE_BOOL
2374 || type->code () == TYPE_CODE_PTR
2375 || TYPE_IS_REFERENCE (type)
2376 || type->code () == TYPE_CODE_ENUM)
2377 {
2378 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
2379 {
2380 /* Values of one word or less are zero/sign-extended and
2381 returned in r0. */
2382 bfd_byte tmpbuf[X_REGISTER_SIZE];
2383 LONGEST val = unpack_long (type, valbuf);
2384
2385 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
2386 regs->cooked_write (AARCH64_X0_REGNUM, tmpbuf);
2387 }
2388 else
2389 {
2390 /* Integral values greater than one word are stored in
2391 consecutive registers starting with r0. This will always
2392 be a multiple of the regiser size. */
2393 int len = TYPE_LENGTH (type);
2394 int regno = AARCH64_X0_REGNUM;
2395
2396 while (len > 0)
2397 {
2398 regs->cooked_write (regno++, valbuf);
2399 len -= X_REGISTER_SIZE;
2400 valbuf += X_REGISTER_SIZE;
2401 }
2402 }
2403 }
2404 else
2405 {
2406 /* For a structure or union the behaviour is as if the value had
2407 been stored to word-aligned memory and then loaded into
2408 registers with 64-bit load instruction(s). */
2409 int len = TYPE_LENGTH (type);
2410 int regno = AARCH64_X0_REGNUM;
2411 bfd_byte tmpbuf[X_REGISTER_SIZE];
2412
2413 while (len > 0)
2414 {
2415 memcpy (tmpbuf, valbuf,
2416 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2417 regs->cooked_write (regno++, tmpbuf);
2418 len -= X_REGISTER_SIZE;
2419 valbuf += X_REGISTER_SIZE;
2420 }
2421 }
2422 }
2423
2424 /* Implement the "return_value" gdbarch method. */
2425
2426 static enum return_value_convention
2427 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2428 struct type *valtype, struct regcache *regcache,
2429 gdb_byte *readbuf, const gdb_byte *writebuf)
2430 {
2431
2432 if (valtype->code () == TYPE_CODE_STRUCT
2433 || valtype->code () == TYPE_CODE_UNION
2434 || valtype->code () == TYPE_CODE_ARRAY)
2435 {
2436 if (aarch64_return_in_memory (gdbarch, valtype))
2437 {
2438 if (aarch64_debug)
2439 debug_printf ("return value in memory\n");
2440 return RETURN_VALUE_STRUCT_CONVENTION;
2441 }
2442 }
2443
2444 if (writebuf)
2445 aarch64_store_return_value (valtype, regcache, writebuf);
2446
2447 if (readbuf)
2448 aarch64_extract_return_value (valtype, regcache, readbuf);
2449
2450 if (aarch64_debug)
2451 debug_printf ("return value in registers\n");
2452
2453 return RETURN_VALUE_REGISTER_CONVENTION;
2454 }
2455
2456 /* Implement the "get_longjmp_target" gdbarch method. */
2457
2458 static int
2459 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2460 {
2461 CORE_ADDR jb_addr;
2462 gdb_byte buf[X_REGISTER_SIZE];
2463 struct gdbarch *gdbarch = get_frame_arch (frame);
2464 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2465 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2466
2467 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2468
2469 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2470 X_REGISTER_SIZE))
2471 return 0;
2472
2473 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2474 return 1;
2475 }
2476
2477 /* Implement the "gen_return_address" gdbarch method. */
2478
2479 static void
2480 aarch64_gen_return_address (struct gdbarch *gdbarch,
2481 struct agent_expr *ax, struct axs_value *value,
2482 CORE_ADDR scope)
2483 {
2484 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2485 value->kind = axs_lvalue_register;
2486 value->u.reg = AARCH64_LR_REGNUM;
2487 }
2488 \f
2489
2490 /* Return the pseudo register name corresponding to register regnum. */
2491
2492 static const char *
2493 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2494 {
2495 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2496
2497 static const char *const q_name[] =
2498 {
2499 "q0", "q1", "q2", "q3",
2500 "q4", "q5", "q6", "q7",
2501 "q8", "q9", "q10", "q11",
2502 "q12", "q13", "q14", "q15",
2503 "q16", "q17", "q18", "q19",
2504 "q20", "q21", "q22", "q23",
2505 "q24", "q25", "q26", "q27",
2506 "q28", "q29", "q30", "q31",
2507 };
2508
2509 static const char *const d_name[] =
2510 {
2511 "d0", "d1", "d2", "d3",
2512 "d4", "d5", "d6", "d7",
2513 "d8", "d9", "d10", "d11",
2514 "d12", "d13", "d14", "d15",
2515 "d16", "d17", "d18", "d19",
2516 "d20", "d21", "d22", "d23",
2517 "d24", "d25", "d26", "d27",
2518 "d28", "d29", "d30", "d31",
2519 };
2520
2521 static const char *const s_name[] =
2522 {
2523 "s0", "s1", "s2", "s3",
2524 "s4", "s5", "s6", "s7",
2525 "s8", "s9", "s10", "s11",
2526 "s12", "s13", "s14", "s15",
2527 "s16", "s17", "s18", "s19",
2528 "s20", "s21", "s22", "s23",
2529 "s24", "s25", "s26", "s27",
2530 "s28", "s29", "s30", "s31",
2531 };
2532
2533 static const char *const h_name[] =
2534 {
2535 "h0", "h1", "h2", "h3",
2536 "h4", "h5", "h6", "h7",
2537 "h8", "h9", "h10", "h11",
2538 "h12", "h13", "h14", "h15",
2539 "h16", "h17", "h18", "h19",
2540 "h20", "h21", "h22", "h23",
2541 "h24", "h25", "h26", "h27",
2542 "h28", "h29", "h30", "h31",
2543 };
2544
2545 static const char *const b_name[] =
2546 {
2547 "b0", "b1", "b2", "b3",
2548 "b4", "b5", "b6", "b7",
2549 "b8", "b9", "b10", "b11",
2550 "b12", "b13", "b14", "b15",
2551 "b16", "b17", "b18", "b19",
2552 "b20", "b21", "b22", "b23",
2553 "b24", "b25", "b26", "b27",
2554 "b28", "b29", "b30", "b31",
2555 };
2556
2557 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2558
2559 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2560 return q_name[p_regnum - AARCH64_Q0_REGNUM];
2561
2562 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2563 return d_name[p_regnum - AARCH64_D0_REGNUM];
2564
2565 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2566 return s_name[p_regnum - AARCH64_S0_REGNUM];
2567
2568 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2569 return h_name[p_regnum - AARCH64_H0_REGNUM];
2570
2571 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2572 return b_name[p_regnum - AARCH64_B0_REGNUM];
2573
2574 if (tdep->has_sve ())
2575 {
2576 static const char *const sve_v_name[] =
2577 {
2578 "v0", "v1", "v2", "v3",
2579 "v4", "v5", "v6", "v7",
2580 "v8", "v9", "v10", "v11",
2581 "v12", "v13", "v14", "v15",
2582 "v16", "v17", "v18", "v19",
2583 "v20", "v21", "v22", "v23",
2584 "v24", "v25", "v26", "v27",
2585 "v28", "v29", "v30", "v31",
2586 };
2587
2588 if (p_regnum >= AARCH64_SVE_V0_REGNUM
2589 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2590 return sve_v_name[p_regnum - AARCH64_SVE_V0_REGNUM];
2591 }
2592
2593 /* RA_STATE is used for unwinding only. Do not assign it a name - this
2594 prevents it from being read by methods such as
2595 mi_cmd_trace_frame_collected. */
2596 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2597 return "";
2598
2599 internal_error (__FILE__, __LINE__,
2600 _("aarch64_pseudo_register_name: bad register number %d"),
2601 p_regnum);
2602 }
2603
2604 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2605
2606 static struct type *
2607 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2608 {
2609 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2610
2611 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2612
2613 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2614 return aarch64_vnq_type (gdbarch);
2615
2616 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2617 return aarch64_vnd_type (gdbarch);
2618
2619 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2620 return aarch64_vns_type (gdbarch);
2621
2622 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2623 return aarch64_vnh_type (gdbarch);
2624
2625 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2626 return aarch64_vnb_type (gdbarch);
2627
2628 if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2629 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2630 return aarch64_vnv_type (gdbarch);
2631
2632 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2633 return builtin_type (gdbarch)->builtin_uint64;
2634
2635 internal_error (__FILE__, __LINE__,
2636 _("aarch64_pseudo_register_type: bad register number %d"),
2637 p_regnum);
2638 }
2639
2640 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2641
2642 static int
2643 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2644 struct reggroup *group)
2645 {
2646 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2647
2648 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2649
2650 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2651 return group == all_reggroup || group == vector_reggroup;
2652 else if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2653 return (group == all_reggroup || group == vector_reggroup
2654 || group == float_reggroup);
2655 else if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2656 return (group == all_reggroup || group == vector_reggroup
2657 || group == float_reggroup);
2658 else if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2659 return group == all_reggroup || group == vector_reggroup;
2660 else if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2661 return group == all_reggroup || group == vector_reggroup;
2662 else if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2663 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2664 return group == all_reggroup || group == vector_reggroup;
2665 /* RA_STATE is used for unwinding only. Do not assign it to any groups. */
2666 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2667 return 0;
2668
2669 return group == all_reggroup;
2670 }
2671
2672 /* Helper for aarch64_pseudo_read_value. */
2673
2674 static struct value *
2675 aarch64_pseudo_read_value_1 (struct gdbarch *gdbarch,
2676 readable_regcache *regcache, int regnum_offset,
2677 int regsize, struct value *result_value)
2678 {
2679 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2680
2681 /* Enough space for a full vector register. */
2682 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2683 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2684
2685 if (regcache->raw_read (v_regnum, reg_buf) != REG_VALID)
2686 mark_value_bytes_unavailable (result_value, 0,
2687 TYPE_LENGTH (value_type (result_value)));
2688 else
2689 memcpy (value_contents_raw (result_value), reg_buf, regsize);
2690
2691 return result_value;
2692 }
2693
2694 /* Implement the "pseudo_register_read_value" gdbarch method. */
2695
2696 static struct value *
2697 aarch64_pseudo_read_value (struct gdbarch *gdbarch, readable_regcache *regcache,
2698 int regnum)
2699 {
2700 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2701 struct value *result_value = allocate_value (register_type (gdbarch, regnum));
2702
2703 VALUE_LVAL (result_value) = lval_register;
2704 VALUE_REGNUM (result_value) = regnum;
2705
2706 regnum -= gdbarch_num_regs (gdbarch);
2707
2708 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2709 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2710 regnum - AARCH64_Q0_REGNUM,
2711 Q_REGISTER_SIZE, result_value);
2712
2713 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2714 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2715 regnum - AARCH64_D0_REGNUM,
2716 D_REGISTER_SIZE, result_value);
2717
2718 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2719 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2720 regnum - AARCH64_S0_REGNUM,
2721 S_REGISTER_SIZE, result_value);
2722
2723 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2724 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2725 regnum - AARCH64_H0_REGNUM,
2726 H_REGISTER_SIZE, result_value);
2727
2728 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2729 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2730 regnum - AARCH64_B0_REGNUM,
2731 B_REGISTER_SIZE, result_value);
2732
2733 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2734 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2735 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2736 regnum - AARCH64_SVE_V0_REGNUM,
2737 V_REGISTER_SIZE, result_value);
2738
2739 gdb_assert_not_reached ("regnum out of bound");
2740 }
2741
2742 /* Helper for aarch64_pseudo_write. */
2743
2744 static void
2745 aarch64_pseudo_write_1 (struct gdbarch *gdbarch, struct regcache *regcache,
2746 int regnum_offset, int regsize, const gdb_byte *buf)
2747 {
2748 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2749
2750 /* Enough space for a full vector register. */
2751 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2752 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2753
2754 /* Ensure the register buffer is zero, we want gdb writes of the
2755 various 'scalar' pseudo registers to behavior like architectural
2756 writes, register width bytes are written the remainder are set to
2757 zero. */
2758 memset (reg_buf, 0, register_size (gdbarch, AARCH64_V0_REGNUM));
2759
2760 memcpy (reg_buf, buf, regsize);
2761 regcache->raw_write (v_regnum, reg_buf);
2762 }
2763
2764 /* Implement the "pseudo_register_write" gdbarch method. */
2765
2766 static void
2767 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2768 int regnum, const gdb_byte *buf)
2769 {
2770 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2771 regnum -= gdbarch_num_regs (gdbarch);
2772
2773 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2774 return aarch64_pseudo_write_1 (gdbarch, regcache,
2775 regnum - AARCH64_Q0_REGNUM, Q_REGISTER_SIZE,
2776 buf);
2777
2778 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2779 return aarch64_pseudo_write_1 (gdbarch, regcache,
2780 regnum - AARCH64_D0_REGNUM, D_REGISTER_SIZE,
2781 buf);
2782
2783 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2784 return aarch64_pseudo_write_1 (gdbarch, regcache,
2785 regnum - AARCH64_S0_REGNUM, S_REGISTER_SIZE,
2786 buf);
2787
2788 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2789 return aarch64_pseudo_write_1 (gdbarch, regcache,
2790 regnum - AARCH64_H0_REGNUM, H_REGISTER_SIZE,
2791 buf);
2792
2793 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2794 return aarch64_pseudo_write_1 (gdbarch, regcache,
2795 regnum - AARCH64_B0_REGNUM, B_REGISTER_SIZE,
2796 buf);
2797
2798 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2799 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2800 return aarch64_pseudo_write_1 (gdbarch, regcache,
2801 regnum - AARCH64_SVE_V0_REGNUM,
2802 V_REGISTER_SIZE, buf);
2803
2804 gdb_assert_not_reached ("regnum out of bound");
2805 }
2806
2807 /* Callback function for user_reg_add. */
2808
2809 static struct value *
2810 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2811 {
2812 const int *reg_p = (const int *) baton;
2813
2814 return value_of_register (*reg_p, frame);
2815 }
2816 \f
2817
2818 /* Implement the "software_single_step" gdbarch method, needed to
2819 single step through atomic sequences on AArch64. */
2820
2821 static std::vector<CORE_ADDR>
2822 aarch64_software_single_step (struct regcache *regcache)
2823 {
2824 struct gdbarch *gdbarch = regcache->arch ();
2825 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2826 const int insn_size = 4;
2827 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2828 CORE_ADDR pc = regcache_read_pc (regcache);
2829 CORE_ADDR breaks[2] = { CORE_ADDR_MAX, CORE_ADDR_MAX };
2830 CORE_ADDR loc = pc;
2831 CORE_ADDR closing_insn = 0;
2832 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2833 byte_order_for_code);
2834 int index;
2835 int insn_count;
2836 int bc_insn_count = 0; /* Conditional branch instruction count. */
2837 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2838 aarch64_inst inst;
2839
2840 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2841 return {};
2842
2843 /* Look for a Load Exclusive instruction which begins the sequence. */
2844 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
2845 return {};
2846
2847 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2848 {
2849 loc += insn_size;
2850 insn = read_memory_unsigned_integer (loc, insn_size,
2851 byte_order_for_code);
2852
2853 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2854 return {};
2855 /* Check if the instruction is a conditional branch. */
2856 if (inst.opcode->iclass == condbranch)
2857 {
2858 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2859
2860 if (bc_insn_count >= 1)
2861 return {};
2862
2863 /* It is, so we'll try to set a breakpoint at the destination. */
2864 breaks[1] = loc + inst.operands[0].imm.value;
2865
2866 bc_insn_count++;
2867 last_breakpoint++;
2868 }
2869
2870 /* Look for the Store Exclusive which closes the atomic sequence. */
2871 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
2872 {
2873 closing_insn = loc;
2874 break;
2875 }
2876 }
2877
2878 /* We didn't find a closing Store Exclusive instruction, fall back. */
2879 if (!closing_insn)
2880 return {};
2881
2882 /* Insert breakpoint after the end of the atomic sequence. */
2883 breaks[0] = loc + insn_size;
2884
2885 /* Check for duplicated breakpoints, and also check that the second
2886 breakpoint is not within the atomic sequence. */
2887 if (last_breakpoint
2888 && (breaks[1] == breaks[0]
2889 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2890 last_breakpoint = 0;
2891
2892 std::vector<CORE_ADDR> next_pcs;
2893
2894 /* Insert the breakpoint at the end of the sequence, and one at the
2895 destination of the conditional branch, if it exists. */
2896 for (index = 0; index <= last_breakpoint; index++)
2897 next_pcs.push_back (breaks[index]);
2898
2899 return next_pcs;
2900 }
2901
2902 struct aarch64_displaced_step_copy_insn_closure
2903 : public displaced_step_copy_insn_closure
2904 {
2905 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2906 is being displaced stepping. */
2907 bool cond = false;
2908
2909 /* PC adjustment offset after displaced stepping. If 0, then we don't
2910 write the PC back, assuming the PC is already the right address. */
2911 int32_t pc_adjust = 0;
2912 };
2913
2914 /* Data when visiting instructions for displaced stepping. */
2915
2916 struct aarch64_displaced_step_data
2917 {
2918 struct aarch64_insn_data base;
2919
2920 /* The address where the instruction will be executed at. */
2921 CORE_ADDR new_addr;
2922 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2923 uint32_t insn_buf[AARCH64_DISPLACED_MODIFIED_INSNS];
2924 /* Number of instructions in INSN_BUF. */
2925 unsigned insn_count;
2926 /* Registers when doing displaced stepping. */
2927 struct regcache *regs;
2928
2929 aarch64_displaced_step_copy_insn_closure *dsc;
2930 };
2931
2932 /* Implementation of aarch64_insn_visitor method "b". */
2933
2934 static void
2935 aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2936 struct aarch64_insn_data *data)
2937 {
2938 struct aarch64_displaced_step_data *dsd
2939 = (struct aarch64_displaced_step_data *) data;
2940 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
2941
2942 if (can_encode_int32 (new_offset, 28))
2943 {
2944 /* Emit B rather than BL, because executing BL on a new address
2945 will get the wrong address into LR. In order to avoid this,
2946 we emit B, and update LR if the instruction is BL. */
2947 emit_b (dsd->insn_buf, 0, new_offset);
2948 dsd->insn_count++;
2949 }
2950 else
2951 {
2952 /* Write NOP. */
2953 emit_nop (dsd->insn_buf);
2954 dsd->insn_count++;
2955 dsd->dsc->pc_adjust = offset;
2956 }
2957
2958 if (is_bl)
2959 {
2960 /* Update LR. */
2961 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2962 data->insn_addr + 4);
2963 }
2964 }
2965
2966 /* Implementation of aarch64_insn_visitor method "b_cond". */
2967
2968 static void
2969 aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2970 struct aarch64_insn_data *data)
2971 {
2972 struct aarch64_displaced_step_data *dsd
2973 = (struct aarch64_displaced_step_data *) data;
2974
2975 /* GDB has to fix up PC after displaced step this instruction
2976 differently according to the condition is true or false. Instead
2977 of checking COND against conditional flags, we can use
2978 the following instructions, and GDB can tell how to fix up PC
2979 according to the PC value.
2980
2981 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2982 INSN1 ;
2983 TAKEN:
2984 INSN2
2985 */
2986
2987 emit_bcond (dsd->insn_buf, cond, 8);
2988 dsd->dsc->cond = true;
2989 dsd->dsc->pc_adjust = offset;
2990 dsd->insn_count = 1;
2991 }
2992
2993 /* Dynamically allocate a new register. If we know the register
2994 statically, we should make it a global as above instead of using this
2995 helper function. */
2996
2997 static struct aarch64_register
2998 aarch64_register (unsigned num, int is64)
2999 {
3000 return (struct aarch64_register) { num, is64 };
3001 }
3002
3003 /* Implementation of aarch64_insn_visitor method "cb". */
3004
3005 static void
3006 aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
3007 const unsigned rn, int is64,
3008 struct aarch64_insn_data *data)
3009 {
3010 struct aarch64_displaced_step_data *dsd
3011 = (struct aarch64_displaced_step_data *) data;
3012
3013 /* The offset is out of range for a compare and branch
3014 instruction. We can use the following instructions instead:
3015
3016 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
3017 INSN1 ;
3018 TAKEN:
3019 INSN2
3020 */
3021 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
3022 dsd->insn_count = 1;
3023 dsd->dsc->cond = true;
3024 dsd->dsc->pc_adjust = offset;
3025 }
3026
3027 /* Implementation of aarch64_insn_visitor method "tb". */
3028
3029 static void
3030 aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
3031 const unsigned rt, unsigned bit,
3032 struct aarch64_insn_data *data)
3033 {
3034 struct aarch64_displaced_step_data *dsd
3035 = (struct aarch64_displaced_step_data *) data;
3036
3037 /* The offset is out of range for a test bit and branch
3038 instruction We can use the following instructions instead:
3039
3040 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
3041 INSN1 ;
3042 TAKEN:
3043 INSN2
3044
3045 */
3046 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
3047 dsd->insn_count = 1;
3048 dsd->dsc->cond = true;
3049 dsd->dsc->pc_adjust = offset;
3050 }
3051
3052 /* Implementation of aarch64_insn_visitor method "adr". */
3053
3054 static void
3055 aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
3056 const int is_adrp, struct aarch64_insn_data *data)
3057 {
3058 struct aarch64_displaced_step_data *dsd
3059 = (struct aarch64_displaced_step_data *) data;
3060 /* We know exactly the address the ADR{P,} instruction will compute.
3061 We can just write it to the destination register. */
3062 CORE_ADDR address = data->insn_addr + offset;
3063
3064 if (is_adrp)
3065 {
3066 /* Clear the lower 12 bits of the offset to get the 4K page. */
3067 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
3068 address & ~0xfff);
3069 }
3070 else
3071 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
3072 address);
3073
3074 dsd->dsc->pc_adjust = 4;
3075 emit_nop (dsd->insn_buf);
3076 dsd->insn_count = 1;
3077 }
3078
3079 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
3080
3081 static void
3082 aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
3083 const unsigned rt, const int is64,
3084 struct aarch64_insn_data *data)
3085 {
3086 struct aarch64_displaced_step_data *dsd
3087 = (struct aarch64_displaced_step_data *) data;
3088 CORE_ADDR address = data->insn_addr + offset;
3089 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
3090
3091 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
3092 address);
3093
3094 if (is_sw)
3095 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
3096 aarch64_register (rt, 1), zero);
3097 else
3098 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
3099 aarch64_register (rt, 1), zero);
3100
3101 dsd->dsc->pc_adjust = 4;
3102 }
3103
3104 /* Implementation of aarch64_insn_visitor method "others". */
3105
3106 static void
3107 aarch64_displaced_step_others (const uint32_t insn,
3108 struct aarch64_insn_data *data)
3109 {
3110 struct aarch64_displaced_step_data *dsd
3111 = (struct aarch64_displaced_step_data *) data;
3112
3113 aarch64_emit_insn (dsd->insn_buf, insn);
3114 dsd->insn_count = 1;
3115
3116 if ((insn & 0xfffffc1f) == 0xd65f0000)
3117 {
3118 /* RET */
3119 dsd->dsc->pc_adjust = 0;
3120 }
3121 else
3122 dsd->dsc->pc_adjust = 4;
3123 }
3124
3125 static const struct aarch64_insn_visitor visitor =
3126 {
3127 aarch64_displaced_step_b,
3128 aarch64_displaced_step_b_cond,
3129 aarch64_displaced_step_cb,
3130 aarch64_displaced_step_tb,
3131 aarch64_displaced_step_adr,
3132 aarch64_displaced_step_ldr_literal,
3133 aarch64_displaced_step_others,
3134 };
3135
3136 /* Implement the "displaced_step_copy_insn" gdbarch method. */
3137
3138 displaced_step_copy_insn_closure_up
3139 aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
3140 CORE_ADDR from, CORE_ADDR to,
3141 struct regcache *regs)
3142 {
3143 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3144 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
3145 struct aarch64_displaced_step_data dsd;
3146 aarch64_inst inst;
3147
3148 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
3149 return NULL;
3150
3151 /* Look for a Load Exclusive instruction which begins the sequence. */
3152 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
3153 {
3154 /* We can't displaced step atomic sequences. */
3155 return NULL;
3156 }
3157
3158 std::unique_ptr<aarch64_displaced_step_copy_insn_closure> dsc
3159 (new aarch64_displaced_step_copy_insn_closure);
3160 dsd.base.insn_addr = from;
3161 dsd.new_addr = to;
3162 dsd.regs = regs;
3163 dsd.dsc = dsc.get ();
3164 dsd.insn_count = 0;
3165 aarch64_relocate_instruction (insn, &visitor,
3166 (struct aarch64_insn_data *) &dsd);
3167 gdb_assert (dsd.insn_count <= AARCH64_DISPLACED_MODIFIED_INSNS);
3168
3169 if (dsd.insn_count != 0)
3170 {
3171 int i;
3172
3173 /* Instruction can be relocated to scratch pad. Copy
3174 relocated instruction(s) there. */
3175 for (i = 0; i < dsd.insn_count; i++)
3176 {
3177 displaced_debug_printf ("writing insn %.8x at %s",
3178 dsd.insn_buf[i],
3179 paddress (gdbarch, to + i * 4));
3180
3181 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
3182 (ULONGEST) dsd.insn_buf[i]);
3183 }
3184 }
3185 else
3186 {
3187 dsc = NULL;
3188 }
3189
3190 /* This is a work around for a problem with g++ 4.8. */
3191 return displaced_step_copy_insn_closure_up (dsc.release ());
3192 }
3193
3194 /* Implement the "displaced_step_fixup" gdbarch method. */
3195
3196 void
3197 aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
3198 struct displaced_step_copy_insn_closure *dsc_,
3199 CORE_ADDR from, CORE_ADDR to,
3200 struct regcache *regs)
3201 {
3202 aarch64_displaced_step_copy_insn_closure *dsc
3203 = (aarch64_displaced_step_copy_insn_closure *) dsc_;
3204
3205 ULONGEST pc;
3206
3207 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
3208
3209 displaced_debug_printf ("PC after stepping: %s (was %s).",
3210 paddress (gdbarch, pc), paddress (gdbarch, to));
3211
3212 if (dsc->cond)
3213 {
3214 displaced_debug_printf ("[Conditional] pc_adjust before: %d",
3215 dsc->pc_adjust);
3216
3217 if (pc - to == 8)
3218 {
3219 /* Condition is true. */
3220 }
3221 else if (pc - to == 4)
3222 {
3223 /* Condition is false. */
3224 dsc->pc_adjust = 4;
3225 }
3226 else
3227 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
3228
3229 displaced_debug_printf ("[Conditional] pc_adjust after: %d",
3230 dsc->pc_adjust);
3231 }
3232
3233 displaced_debug_printf ("%s PC by %d",
3234 dsc->pc_adjust ? "adjusting" : "not adjusting",
3235 dsc->pc_adjust);
3236
3237 if (dsc->pc_adjust != 0)
3238 {
3239 /* Make sure the previous instruction was executed (that is, the PC
3240 has changed). If the PC didn't change, then discard the adjustment
3241 offset. Otherwise we may skip an instruction before its execution
3242 took place. */
3243 if ((pc - to) == 0)
3244 {
3245 displaced_debug_printf ("PC did not move. Discarding PC adjustment.");
3246 dsc->pc_adjust = 0;
3247 }
3248
3249 displaced_debug_printf ("fixup: set PC to %s:%d",
3250 paddress (gdbarch, from), dsc->pc_adjust);
3251
3252 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
3253 from + dsc->pc_adjust);
3254 }
3255 }
3256
3257 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
3258
3259 bool
3260 aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch)
3261 {
3262 return true;
3263 }
3264
3265 /* Get the correct target description for the given VQ value.
3266 If VQ is zero then it is assumed SVE is not supported.
3267 (It is not possible to set VQ to zero on an SVE system). */
3268
3269 const target_desc *
3270 aarch64_read_description (uint64_t vq, bool pauth_p)
3271 {
3272 if (vq > AARCH64_MAX_SVE_VQ)
3273 error (_("VQ is %" PRIu64 ", maximum supported value is %d"), vq,
3274 AARCH64_MAX_SVE_VQ);
3275
3276 struct target_desc *tdesc = tdesc_aarch64_list[vq][pauth_p];
3277
3278 if (tdesc == NULL)
3279 {
3280 tdesc = aarch64_create_target_description (vq, pauth_p);
3281 tdesc_aarch64_list[vq][pauth_p] = tdesc;
3282 }
3283
3284 return tdesc;
3285 }
3286
3287 /* Return the VQ used when creating the target description TDESC. */
3288
3289 static uint64_t
3290 aarch64_get_tdesc_vq (const struct target_desc *tdesc)
3291 {
3292 const struct tdesc_feature *feature_sve;
3293
3294 if (!tdesc_has_registers (tdesc))
3295 return 0;
3296
3297 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3298
3299 if (feature_sve == nullptr)
3300 return 0;
3301
3302 uint64_t vl = tdesc_register_bitsize (feature_sve,
3303 aarch64_sve_register_names[0]) / 8;
3304 return sve_vq_from_vl (vl);
3305 }
3306
3307 /* Add all the expected register sets into GDBARCH. */
3308
3309 static void
3310 aarch64_add_reggroups (struct gdbarch *gdbarch)
3311 {
3312 reggroup_add (gdbarch, general_reggroup);
3313 reggroup_add (gdbarch, float_reggroup);
3314 reggroup_add (gdbarch, system_reggroup);
3315 reggroup_add (gdbarch, vector_reggroup);
3316 reggroup_add (gdbarch, all_reggroup);
3317 reggroup_add (gdbarch, save_reggroup);
3318 reggroup_add (gdbarch, restore_reggroup);
3319 }
3320
3321 /* Implement the "cannot_store_register" gdbarch method. */
3322
3323 static int
3324 aarch64_cannot_store_register (struct gdbarch *gdbarch, int regnum)
3325 {
3326 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3327
3328 if (!tdep->has_pauth ())
3329 return 0;
3330
3331 /* Pointer authentication registers are read-only. */
3332 return (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
3333 || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base));
3334 }
3335
3336 /* Initialize the current architecture based on INFO. If possible,
3337 re-use an architecture from ARCHES, which is a list of
3338 architectures already created during this debugging session.
3339
3340 Called e.g. at program startup, when reading a core file, and when
3341 reading a binary file. */
3342
3343 static struct gdbarch *
3344 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
3345 {
3346 const struct tdesc_feature *feature_core, *feature_fpu, *feature_sve;
3347 const struct tdesc_feature *feature_pauth;
3348 bool valid_p = true;
3349 int i, num_regs = 0, num_pseudo_regs = 0;
3350 int first_pauth_regnum = -1, pauth_ra_state_offset = -1;
3351
3352 /* Use the vector length passed via the target info. Here -1 is used for no
3353 SVE, and 0 is unset. If unset then use the vector length from the existing
3354 tdesc. */
3355 uint64_t vq = 0;
3356 if (info.id == (int *) -1)
3357 vq = 0;
3358 else if (info.id != 0)
3359 vq = (uint64_t) info.id;
3360 else
3361 vq = aarch64_get_tdesc_vq (info.target_desc);
3362
3363 if (vq > AARCH64_MAX_SVE_VQ)
3364 internal_error (__FILE__, __LINE__, _("VQ out of bounds: %s (max %d)"),
3365 pulongest (vq), AARCH64_MAX_SVE_VQ);
3366
3367 /* If there is already a candidate, use it. */
3368 for (gdbarch_list *best_arch = gdbarch_list_lookup_by_info (arches, &info);
3369 best_arch != nullptr;
3370 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
3371 {
3372 struct gdbarch_tdep *tdep = gdbarch_tdep (best_arch->gdbarch);
3373 if (tdep && tdep->vq == vq)
3374 return best_arch->gdbarch;
3375 }
3376
3377 /* Ensure we always have a target descriptor, and that it is for the given VQ
3378 value. */
3379 const struct target_desc *tdesc = info.target_desc;
3380 if (!tdesc_has_registers (tdesc) || vq != aarch64_get_tdesc_vq (tdesc))
3381 tdesc = aarch64_read_description (vq, false);
3382 gdb_assert (tdesc);
3383
3384 feature_core = tdesc_find_feature (tdesc,"org.gnu.gdb.aarch64.core");
3385 feature_fpu = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
3386 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3387 feature_pauth = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth");
3388
3389 if (feature_core == nullptr)
3390 return nullptr;
3391
3392 tdesc_arch_data_up tdesc_data = tdesc_data_alloc ();
3393
3394 /* Validate the description provides the mandatory core R registers
3395 and allocate their numbers. */
3396 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
3397 valid_p &= tdesc_numbered_register (feature_core, tdesc_data.get (),
3398 AARCH64_X0_REGNUM + i,
3399 aarch64_r_register_names[i]);
3400
3401 num_regs = AARCH64_X0_REGNUM + i;
3402
3403 /* Add the V registers. */
3404 if (feature_fpu != nullptr)
3405 {
3406 if (feature_sve != nullptr)
3407 error (_("Program contains both fpu and SVE features."));
3408
3409 /* Validate the description provides the mandatory V registers
3410 and allocate their numbers. */
3411 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
3412 valid_p &= tdesc_numbered_register (feature_fpu, tdesc_data.get (),
3413 AARCH64_V0_REGNUM + i,
3414 aarch64_v_register_names[i]);
3415
3416 num_regs = AARCH64_V0_REGNUM + i;
3417 }
3418
3419 /* Add the SVE registers. */
3420 if (feature_sve != nullptr)
3421 {
3422 /* Validate the description provides the mandatory SVE registers
3423 and allocate their numbers. */
3424 for (i = 0; i < ARRAY_SIZE (aarch64_sve_register_names); i++)
3425 valid_p &= tdesc_numbered_register (feature_sve, tdesc_data.get (),
3426 AARCH64_SVE_Z0_REGNUM + i,
3427 aarch64_sve_register_names[i]);
3428
3429 num_regs = AARCH64_SVE_Z0_REGNUM + i;
3430 num_pseudo_regs += 32; /* add the Vn register pseudos. */
3431 }
3432
3433 if (feature_fpu != nullptr || feature_sve != nullptr)
3434 {
3435 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
3436 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
3437 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
3438 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
3439 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
3440 }
3441
3442 /* Add the pauth registers. */
3443 if (feature_pauth != NULL)
3444 {
3445 first_pauth_regnum = num_regs;
3446 pauth_ra_state_offset = num_pseudo_regs;
3447 /* Validate the descriptor provides the mandatory PAUTH registers and
3448 allocate their numbers. */
3449 for (i = 0; i < ARRAY_SIZE (aarch64_pauth_register_names); i++)
3450 valid_p &= tdesc_numbered_register (feature_pauth, tdesc_data.get (),
3451 first_pauth_regnum + i,
3452 aarch64_pauth_register_names[i]);
3453
3454 num_regs += i;
3455 num_pseudo_regs += 1; /* Count RA_STATE pseudo register. */
3456 }
3457
3458 if (!valid_p)
3459 return nullptr;
3460
3461 /* AArch64 code is always little-endian. */
3462 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
3463
3464 struct gdbarch_tdep *tdep = XCNEW (struct gdbarch_tdep);
3465 struct gdbarch *gdbarch = gdbarch_alloc (&info, tdep);
3466
3467 /* This should be low enough for everything. */
3468 tdep->lowest_pc = 0x20;
3469 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
3470 tdep->jb_elt_size = 8;
3471 tdep->vq = vq;
3472 tdep->pauth_reg_base = first_pauth_regnum;
3473 tdep->pauth_ra_state_regnum = (feature_pauth == NULL) ? -1
3474 : pauth_ra_state_offset + num_regs;
3475
3476 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
3477 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
3478
3479 /* Advance PC across function entry code. */
3480 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
3481
3482 /* The stack grows downward. */
3483 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
3484
3485 /* Breakpoint manipulation. */
3486 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
3487 aarch64_breakpoint::kind_from_pc);
3488 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
3489 aarch64_breakpoint::bp_from_kind);
3490 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
3491 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
3492
3493 /* Information about registers, etc. */
3494 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
3495 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
3496 set_gdbarch_num_regs (gdbarch, num_regs);
3497
3498 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
3499 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
3500 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
3501 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
3502 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
3503 set_tdesc_pseudo_register_reggroup_p (gdbarch,
3504 aarch64_pseudo_register_reggroup_p);
3505 set_gdbarch_cannot_store_register (gdbarch, aarch64_cannot_store_register);
3506
3507 /* ABI */
3508 set_gdbarch_short_bit (gdbarch, 16);
3509 set_gdbarch_int_bit (gdbarch, 32);
3510 set_gdbarch_float_bit (gdbarch, 32);
3511 set_gdbarch_double_bit (gdbarch, 64);
3512 set_gdbarch_long_double_bit (gdbarch, 128);
3513 set_gdbarch_long_bit (gdbarch, 64);
3514 set_gdbarch_long_long_bit (gdbarch, 64);
3515 set_gdbarch_ptr_bit (gdbarch, 64);
3516 set_gdbarch_char_signed (gdbarch, 0);
3517 set_gdbarch_wchar_signed (gdbarch, 0);
3518 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
3519 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
3520 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
3521 set_gdbarch_type_align (gdbarch, aarch64_type_align);
3522
3523 /* Internal <-> external register number maps. */
3524 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
3525
3526 /* Returning results. */
3527 set_gdbarch_return_value (gdbarch, aarch64_return_value);
3528
3529 /* Disassembly. */
3530 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
3531
3532 /* Virtual tables. */
3533 set_gdbarch_vbit_in_delta (gdbarch, 1);
3534
3535 /* Register architecture. */
3536 aarch64_add_reggroups (gdbarch);
3537
3538 /* Hook in the ABI-specific overrides, if they have been registered. */
3539 info.target_desc = tdesc;
3540 info.tdesc_data = tdesc_data.get ();
3541 gdbarch_init_osabi (info, gdbarch);
3542
3543 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
3544 /* Register DWARF CFA vendor handler. */
3545 set_gdbarch_execute_dwarf_cfa_vendor_op (gdbarch,
3546 aarch64_execute_dwarf_cfa_vendor_op);
3547
3548 /* Permanent/Program breakpoint handling. */
3549 set_gdbarch_program_breakpoint_here_p (gdbarch,
3550 aarch64_program_breakpoint_here_p);
3551
3552 /* Add some default predicates. */
3553 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
3554 dwarf2_append_unwinders (gdbarch);
3555 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
3556
3557 frame_base_set_default (gdbarch, &aarch64_normal_base);
3558
3559 /* Now we have tuned the configuration, set a few final things,
3560 based on what the OS ABI has told us. */
3561
3562 if (tdep->jb_pc >= 0)
3563 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
3564
3565 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
3566
3567 set_gdbarch_get_pc_address_flags (gdbarch, aarch64_get_pc_address_flags);
3568
3569 tdesc_use_registers (gdbarch, tdesc, std::move (tdesc_data));
3570
3571 /* Add standard register aliases. */
3572 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
3573 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
3574 value_of_aarch64_user_reg,
3575 &aarch64_register_aliases[i].regnum);
3576
3577 register_aarch64_ravenscar_ops (gdbarch);
3578
3579 return gdbarch;
3580 }
3581
3582 static void
3583 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
3584 {
3585 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3586
3587 if (tdep == NULL)
3588 return;
3589
3590 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3591 paddress (gdbarch, tdep->lowest_pc));
3592 }
3593
3594 #if GDB_SELF_TEST
3595 namespace selftests
3596 {
3597 static void aarch64_process_record_test (void);
3598 }
3599 #endif
3600
3601 void _initialize_aarch64_tdep ();
3602 void
3603 _initialize_aarch64_tdep ()
3604 {
3605 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
3606 aarch64_dump_tdep);
3607
3608 /* Debug this file's internals. */
3609 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
3610 Set AArch64 debugging."), _("\
3611 Show AArch64 debugging."), _("\
3612 When on, AArch64 specific debugging is enabled."),
3613 NULL,
3614 show_aarch64_debug,
3615 &setdebuglist, &showdebuglist);
3616
3617 #if GDB_SELF_TEST
3618 selftests::register_test ("aarch64-analyze-prologue",
3619 selftests::aarch64_analyze_prologue_test);
3620 selftests::register_test ("aarch64-process-record",
3621 selftests::aarch64_process_record_test);
3622 #endif
3623 }
3624
3625 /* AArch64 process record-replay related structures, defines etc. */
3626
3627 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3628 do \
3629 { \
3630 unsigned int reg_len = LENGTH; \
3631 if (reg_len) \
3632 { \
3633 REGS = XNEWVEC (uint32_t, reg_len); \
3634 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3635 } \
3636 } \
3637 while (0)
3638
3639 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3640 do \
3641 { \
3642 unsigned int mem_len = LENGTH; \
3643 if (mem_len) \
3644 { \
3645 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3646 memcpy(&MEMS->len, &RECORD_BUF[0], \
3647 sizeof(struct aarch64_mem_r) * LENGTH); \
3648 } \
3649 } \
3650 while (0)
3651
3652 /* AArch64 record/replay structures and enumerations. */
3653
3654 struct aarch64_mem_r
3655 {
3656 uint64_t len; /* Record length. */
3657 uint64_t addr; /* Memory address. */
3658 };
3659
3660 enum aarch64_record_result
3661 {
3662 AARCH64_RECORD_SUCCESS,
3663 AARCH64_RECORD_UNSUPPORTED,
3664 AARCH64_RECORD_UNKNOWN
3665 };
3666
3667 typedef struct insn_decode_record_t
3668 {
3669 struct gdbarch *gdbarch;
3670 struct regcache *regcache;
3671 CORE_ADDR this_addr; /* Address of insn to be recorded. */
3672 uint32_t aarch64_insn; /* Insn to be recorded. */
3673 uint32_t mem_rec_count; /* Count of memory records. */
3674 uint32_t reg_rec_count; /* Count of register records. */
3675 uint32_t *aarch64_regs; /* Registers to be recorded. */
3676 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
3677 } insn_decode_record;
3678
3679 /* Record handler for data processing - register instructions. */
3680
3681 static unsigned int
3682 aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
3683 {
3684 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
3685 uint32_t record_buf[4];
3686
3687 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3688 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3689 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
3690
3691 if (!bit (aarch64_insn_r->aarch64_insn, 28))
3692 {
3693 uint8_t setflags;
3694
3695 /* Logical (shifted register). */
3696 if (insn_bits24_27 == 0x0a)
3697 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3698 /* Add/subtract. */
3699 else if (insn_bits24_27 == 0x0b)
3700 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3701 else
3702 return AARCH64_RECORD_UNKNOWN;
3703
3704 record_buf[0] = reg_rd;
3705 aarch64_insn_r->reg_rec_count = 1;
3706 if (setflags)
3707 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3708 }
3709 else
3710 {
3711 if (insn_bits24_27 == 0x0b)
3712 {
3713 /* Data-processing (3 source). */
3714 record_buf[0] = reg_rd;
3715 aarch64_insn_r->reg_rec_count = 1;
3716 }
3717 else if (insn_bits24_27 == 0x0a)
3718 {
3719 if (insn_bits21_23 == 0x00)
3720 {
3721 /* Add/subtract (with carry). */
3722 record_buf[0] = reg_rd;
3723 aarch64_insn_r->reg_rec_count = 1;
3724 if (bit (aarch64_insn_r->aarch64_insn, 29))
3725 {
3726 record_buf[1] = AARCH64_CPSR_REGNUM;
3727 aarch64_insn_r->reg_rec_count = 2;
3728 }
3729 }
3730 else if (insn_bits21_23 == 0x02)
3731 {
3732 /* Conditional compare (register) and conditional compare
3733 (immediate) instructions. */
3734 record_buf[0] = AARCH64_CPSR_REGNUM;
3735 aarch64_insn_r->reg_rec_count = 1;
3736 }
3737 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3738 {
3739 /* Conditional select. */
3740 /* Data-processing (2 source). */
3741 /* Data-processing (1 source). */
3742 record_buf[0] = reg_rd;
3743 aarch64_insn_r->reg_rec_count = 1;
3744 }
3745 else
3746 return AARCH64_RECORD_UNKNOWN;
3747 }
3748 }
3749
3750 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3751 record_buf);
3752 return AARCH64_RECORD_SUCCESS;
3753 }
3754
3755 /* Record handler for data processing - immediate instructions. */
3756
3757 static unsigned int
3758 aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3759 {
3760 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
3761 uint32_t record_buf[4];
3762
3763 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3764 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3765 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3766
3767 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3768 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3769 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3770 {
3771 record_buf[0] = reg_rd;
3772 aarch64_insn_r->reg_rec_count = 1;
3773 }
3774 else if (insn_bits24_27 == 0x01)
3775 {
3776 /* Add/Subtract (immediate). */
3777 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3778 record_buf[0] = reg_rd;
3779 aarch64_insn_r->reg_rec_count = 1;
3780 if (setflags)
3781 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3782 }
3783 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3784 {
3785 /* Logical (immediate). */
3786 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3787 record_buf[0] = reg_rd;
3788 aarch64_insn_r->reg_rec_count = 1;
3789 if (setflags)
3790 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3791 }
3792 else
3793 return AARCH64_RECORD_UNKNOWN;
3794
3795 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3796 record_buf);
3797 return AARCH64_RECORD_SUCCESS;
3798 }
3799
3800 /* Record handler for branch, exception generation and system instructions. */
3801
3802 static unsigned int
3803 aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3804 {
3805 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3806 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3807 uint32_t record_buf[4];
3808
3809 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3810 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3811 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3812
3813 if (insn_bits28_31 == 0x0d)
3814 {
3815 /* Exception generation instructions. */
3816 if (insn_bits24_27 == 0x04)
3817 {
3818 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3819 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3820 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
3821 {
3822 ULONGEST svc_number;
3823
3824 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3825 &svc_number);
3826 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3827 svc_number);
3828 }
3829 else
3830 return AARCH64_RECORD_UNSUPPORTED;
3831 }
3832 /* System instructions. */
3833 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3834 {
3835 uint32_t reg_rt, reg_crn;
3836
3837 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3838 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3839
3840 /* Record rt in case of sysl and mrs instructions. */
3841 if (bit (aarch64_insn_r->aarch64_insn, 21))
3842 {
3843 record_buf[0] = reg_rt;
3844 aarch64_insn_r->reg_rec_count = 1;
3845 }
3846 /* Record cpsr for hint and msr(immediate) instructions. */
3847 else if (reg_crn == 0x02 || reg_crn == 0x04)
3848 {
3849 record_buf[0] = AARCH64_CPSR_REGNUM;
3850 aarch64_insn_r->reg_rec_count = 1;
3851 }
3852 }
3853 /* Unconditional branch (register). */
3854 else if((insn_bits24_27 & 0x0e) == 0x06)
3855 {
3856 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3857 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3858 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3859 }
3860 else
3861 return AARCH64_RECORD_UNKNOWN;
3862 }
3863 /* Unconditional branch (immediate). */
3864 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3865 {
3866 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3867 if (bit (aarch64_insn_r->aarch64_insn, 31))
3868 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3869 }
3870 else
3871 /* Compare & branch (immediate), Test & branch (immediate) and
3872 Conditional branch (immediate). */
3873 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3874
3875 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3876 record_buf);
3877 return AARCH64_RECORD_SUCCESS;
3878 }
3879
3880 /* Record handler for advanced SIMD load and store instructions. */
3881
3882 static unsigned int
3883 aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3884 {
3885 CORE_ADDR address;
3886 uint64_t addr_offset = 0;
3887 uint32_t record_buf[24];
3888 uint64_t record_buf_mem[24];
3889 uint32_t reg_rn, reg_rt;
3890 uint32_t reg_index = 0, mem_index = 0;
3891 uint8_t opcode_bits, size_bits;
3892
3893 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3894 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3895 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3896 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3897 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3898
3899 if (record_debug)
3900 debug_printf ("Process record: Advanced SIMD load/store\n");
3901
3902 /* Load/store single structure. */
3903 if (bit (aarch64_insn_r->aarch64_insn, 24))
3904 {
3905 uint8_t sindex, scale, selem, esize, replicate = 0;
3906 scale = opcode_bits >> 2;
3907 selem = ((opcode_bits & 0x02) |
3908 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3909 switch (scale)
3910 {
3911 case 1:
3912 if (size_bits & 0x01)
3913 return AARCH64_RECORD_UNKNOWN;
3914 break;
3915 case 2:
3916 if ((size_bits >> 1) & 0x01)
3917 return AARCH64_RECORD_UNKNOWN;
3918 if (size_bits & 0x01)
3919 {
3920 if (!((opcode_bits >> 1) & 0x01))
3921 scale = 3;
3922 else
3923 return AARCH64_RECORD_UNKNOWN;
3924 }
3925 break;
3926 case 3:
3927 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3928 {
3929 scale = size_bits;
3930 replicate = 1;
3931 break;
3932 }
3933 else
3934 return AARCH64_RECORD_UNKNOWN;
3935 default:
3936 break;
3937 }
3938 esize = 8 << scale;
3939 if (replicate)
3940 for (sindex = 0; sindex < selem; sindex++)
3941 {
3942 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3943 reg_rt = (reg_rt + 1) % 32;
3944 }
3945 else
3946 {
3947 for (sindex = 0; sindex < selem; sindex++)
3948 {
3949 if (bit (aarch64_insn_r->aarch64_insn, 22))
3950 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3951 else
3952 {
3953 record_buf_mem[mem_index++] = esize / 8;
3954 record_buf_mem[mem_index++] = address + addr_offset;
3955 }
3956 addr_offset = addr_offset + (esize / 8);
3957 reg_rt = (reg_rt + 1) % 32;
3958 }
3959 }
3960 }
3961 /* Load/store multiple structure. */
3962 else
3963 {
3964 uint8_t selem, esize, rpt, elements;
3965 uint8_t eindex, rindex;
3966
3967 esize = 8 << size_bits;
3968 if (bit (aarch64_insn_r->aarch64_insn, 30))
3969 elements = 128 / esize;
3970 else
3971 elements = 64 / esize;
3972
3973 switch (opcode_bits)
3974 {
3975 /*LD/ST4 (4 Registers). */
3976 case 0:
3977 rpt = 1;
3978 selem = 4;
3979 break;
3980 /*LD/ST1 (4 Registers). */
3981 case 2:
3982 rpt = 4;
3983 selem = 1;
3984 break;
3985 /*LD/ST3 (3 Registers). */
3986 case 4:
3987 rpt = 1;
3988 selem = 3;
3989 break;
3990 /*LD/ST1 (3 Registers). */
3991 case 6:
3992 rpt = 3;
3993 selem = 1;
3994 break;
3995 /*LD/ST1 (1 Register). */
3996 case 7:
3997 rpt = 1;
3998 selem = 1;
3999 break;
4000 /*LD/ST2 (2 Registers). */
4001 case 8:
4002 rpt = 1;
4003 selem = 2;
4004 break;
4005 /*LD/ST1 (2 Registers). */
4006 case 10:
4007 rpt = 2;
4008 selem = 1;
4009 break;
4010 default:
4011 return AARCH64_RECORD_UNSUPPORTED;
4012 break;
4013 }
4014 for (rindex = 0; rindex < rpt; rindex++)
4015 for (eindex = 0; eindex < elements; eindex++)
4016 {
4017 uint8_t reg_tt, sindex;
4018 reg_tt = (reg_rt + rindex) % 32;
4019 for (sindex = 0; sindex < selem; sindex++)
4020 {
4021 if (bit (aarch64_insn_r->aarch64_insn, 22))
4022 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
4023 else
4024 {
4025 record_buf_mem[mem_index++] = esize / 8;
4026 record_buf_mem[mem_index++] = address + addr_offset;
4027 }
4028 addr_offset = addr_offset + (esize / 8);
4029 reg_tt = (reg_tt + 1) % 32;
4030 }
4031 }
4032 }
4033
4034 if (bit (aarch64_insn_r->aarch64_insn, 23))
4035 record_buf[reg_index++] = reg_rn;
4036
4037 aarch64_insn_r->reg_rec_count = reg_index;
4038 aarch64_insn_r->mem_rec_count = mem_index / 2;
4039 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
4040 record_buf_mem);
4041 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4042 record_buf);
4043 return AARCH64_RECORD_SUCCESS;
4044 }
4045
4046 /* Record handler for load and store instructions. */
4047
4048 static unsigned int
4049 aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
4050 {
4051 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
4052 uint8_t insn_bit23, insn_bit21;
4053 uint8_t opc, size_bits, ld_flag, vector_flag;
4054 uint32_t reg_rn, reg_rt, reg_rt2;
4055 uint64_t datasize, offset;
4056 uint32_t record_buf[8];
4057 uint64_t record_buf_mem[8];
4058 CORE_ADDR address;
4059
4060 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
4061 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4062 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
4063 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
4064 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
4065 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
4066 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
4067 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4068 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
4069 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
4070 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
4071
4072 /* Load/store exclusive. */
4073 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
4074 {
4075 if (record_debug)
4076 debug_printf ("Process record: load/store exclusive\n");
4077
4078 if (ld_flag)
4079 {
4080 record_buf[0] = reg_rt;
4081 aarch64_insn_r->reg_rec_count = 1;
4082 if (insn_bit21)
4083 {
4084 record_buf[1] = reg_rt2;
4085 aarch64_insn_r->reg_rec_count = 2;
4086 }
4087 }
4088 else
4089 {
4090 if (insn_bit21)
4091 datasize = (8 << size_bits) * 2;
4092 else
4093 datasize = (8 << size_bits);
4094 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4095 &address);
4096 record_buf_mem[0] = datasize / 8;
4097 record_buf_mem[1] = address;
4098 aarch64_insn_r->mem_rec_count = 1;
4099 if (!insn_bit23)
4100 {
4101 /* Save register rs. */
4102 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
4103 aarch64_insn_r->reg_rec_count = 1;
4104 }
4105 }
4106 }
4107 /* Load register (literal) instructions decoding. */
4108 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
4109 {
4110 if (record_debug)
4111 debug_printf ("Process record: load register (literal)\n");
4112 if (vector_flag)
4113 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4114 else
4115 record_buf[0] = reg_rt;
4116 aarch64_insn_r->reg_rec_count = 1;
4117 }
4118 /* All types of load/store pair instructions decoding. */
4119 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
4120 {
4121 if (record_debug)
4122 debug_printf ("Process record: load/store pair\n");
4123
4124 if (ld_flag)
4125 {
4126 if (vector_flag)
4127 {
4128 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4129 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
4130 }
4131 else
4132 {
4133 record_buf[0] = reg_rt;
4134 record_buf[1] = reg_rt2;
4135 }
4136 aarch64_insn_r->reg_rec_count = 2;
4137 }
4138 else
4139 {
4140 uint16_t imm7_off;
4141 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
4142 if (!vector_flag)
4143 size_bits = size_bits >> 1;
4144 datasize = 8 << (2 + size_bits);
4145 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
4146 offset = offset << (2 + size_bits);
4147 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4148 &address);
4149 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
4150 {
4151 if (imm7_off & 0x40)
4152 address = address - offset;
4153 else
4154 address = address + offset;
4155 }
4156
4157 record_buf_mem[0] = datasize / 8;
4158 record_buf_mem[1] = address;
4159 record_buf_mem[2] = datasize / 8;
4160 record_buf_mem[3] = address + (datasize / 8);
4161 aarch64_insn_r->mem_rec_count = 2;
4162 }
4163 if (bit (aarch64_insn_r->aarch64_insn, 23))
4164 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
4165 }
4166 /* Load/store register (unsigned immediate) instructions. */
4167 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
4168 {
4169 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4170 if (!(opc >> 1))
4171 {
4172 if (opc & 0x01)
4173 ld_flag = 0x01;
4174 else
4175 ld_flag = 0x0;
4176 }
4177 else
4178 {
4179 if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2)
4180 {
4181 /* PRFM (immediate) */
4182 return AARCH64_RECORD_SUCCESS;
4183 }
4184 else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2)
4185 {
4186 /* LDRSW (immediate) */
4187 ld_flag = 0x1;
4188 }
4189 else
4190 {
4191 if (opc & 0x01)
4192 ld_flag = 0x01;
4193 else
4194 ld_flag = 0x0;
4195 }
4196 }
4197
4198 if (record_debug)
4199 {
4200 debug_printf ("Process record: load/store (unsigned immediate):"
4201 " size %x V %d opc %x\n", size_bits, vector_flag,
4202 opc);
4203 }
4204
4205 if (!ld_flag)
4206 {
4207 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
4208 datasize = 8 << size_bits;
4209 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4210 &address);
4211 offset = offset << size_bits;
4212 address = address + offset;
4213
4214 record_buf_mem[0] = datasize >> 3;
4215 record_buf_mem[1] = address;
4216 aarch64_insn_r->mem_rec_count = 1;
4217 }
4218 else
4219 {
4220 if (vector_flag)
4221 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4222 else
4223 record_buf[0] = reg_rt;
4224 aarch64_insn_r->reg_rec_count = 1;
4225 }
4226 }
4227 /* Load/store register (register offset) instructions. */
4228 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
4229 && insn_bits10_11 == 0x02 && insn_bit21)
4230 {
4231 if (record_debug)
4232 debug_printf ("Process record: load/store (register offset)\n");
4233 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4234 if (!(opc >> 1))
4235 if (opc & 0x01)
4236 ld_flag = 0x01;
4237 else
4238 ld_flag = 0x0;
4239 else
4240 if (size_bits != 0x03)
4241 ld_flag = 0x01;
4242 else
4243 return AARCH64_RECORD_UNKNOWN;
4244
4245 if (!ld_flag)
4246 {
4247 ULONGEST reg_rm_val;
4248
4249 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
4250 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
4251 if (bit (aarch64_insn_r->aarch64_insn, 12))
4252 offset = reg_rm_val << size_bits;
4253 else
4254 offset = reg_rm_val;
4255 datasize = 8 << size_bits;
4256 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4257 &address);
4258 address = address + offset;
4259 record_buf_mem[0] = datasize >> 3;
4260 record_buf_mem[1] = address;
4261 aarch64_insn_r->mem_rec_count = 1;
4262 }
4263 else
4264 {
4265 if (vector_flag)
4266 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4267 else
4268 record_buf[0] = reg_rt;
4269 aarch64_insn_r->reg_rec_count = 1;
4270 }
4271 }
4272 /* Load/store register (immediate and unprivileged) instructions. */
4273 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
4274 && !insn_bit21)
4275 {
4276 if (record_debug)
4277 {
4278 debug_printf ("Process record: load/store "
4279 "(immediate and unprivileged)\n");
4280 }
4281 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4282 if (!(opc >> 1))
4283 if (opc & 0x01)
4284 ld_flag = 0x01;
4285 else
4286 ld_flag = 0x0;
4287 else
4288 if (size_bits != 0x03)
4289 ld_flag = 0x01;
4290 else
4291 return AARCH64_RECORD_UNKNOWN;
4292
4293 if (!ld_flag)
4294 {
4295 uint16_t imm9_off;
4296 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
4297 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
4298 datasize = 8 << size_bits;
4299 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4300 &address);
4301 if (insn_bits10_11 != 0x01)
4302 {
4303 if (imm9_off & 0x0100)
4304 address = address - offset;
4305 else
4306 address = address + offset;
4307 }
4308 record_buf_mem[0] = datasize >> 3;
4309 record_buf_mem[1] = address;
4310 aarch64_insn_r->mem_rec_count = 1;
4311 }
4312 else
4313 {
4314 if (vector_flag)
4315 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4316 else
4317 record_buf[0] = reg_rt;
4318 aarch64_insn_r->reg_rec_count = 1;
4319 }
4320 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
4321 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
4322 }
4323 /* Advanced SIMD load/store instructions. */
4324 else
4325 return aarch64_record_asimd_load_store (aarch64_insn_r);
4326
4327 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
4328 record_buf_mem);
4329 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4330 record_buf);
4331 return AARCH64_RECORD_SUCCESS;
4332 }
4333
4334 /* Record handler for data processing SIMD and floating point instructions. */
4335
4336 static unsigned int
4337 aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
4338 {
4339 uint8_t insn_bit21, opcode, rmode, reg_rd;
4340 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
4341 uint8_t insn_bits11_14;
4342 uint32_t record_buf[2];
4343
4344 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4345 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
4346 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
4347 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
4348 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
4349 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
4350 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
4351 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4352 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
4353
4354 if (record_debug)
4355 debug_printf ("Process record: data processing SIMD/FP: ");
4356
4357 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
4358 {
4359 /* Floating point - fixed point conversion instructions. */
4360 if (!insn_bit21)
4361 {
4362 if (record_debug)
4363 debug_printf ("FP - fixed point conversion");
4364
4365 if ((opcode >> 1) == 0x0 && rmode == 0x03)
4366 record_buf[0] = reg_rd;
4367 else
4368 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4369 }
4370 /* Floating point - conditional compare instructions. */
4371 else if (insn_bits10_11 == 0x01)
4372 {
4373 if (record_debug)
4374 debug_printf ("FP - conditional compare");
4375
4376 record_buf[0] = AARCH64_CPSR_REGNUM;
4377 }
4378 /* Floating point - data processing (2-source) and
4379 conditional select instructions. */
4380 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
4381 {
4382 if (record_debug)
4383 debug_printf ("FP - DP (2-source)");
4384
4385 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4386 }
4387 else if (insn_bits10_11 == 0x00)
4388 {
4389 /* Floating point - immediate instructions. */
4390 if ((insn_bits12_15 & 0x01) == 0x01
4391 || (insn_bits12_15 & 0x07) == 0x04)
4392 {
4393 if (record_debug)
4394 debug_printf ("FP - immediate");
4395 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4396 }
4397 /* Floating point - compare instructions. */
4398 else if ((insn_bits12_15 & 0x03) == 0x02)
4399 {
4400 if (record_debug)
4401 debug_printf ("FP - immediate");
4402 record_buf[0] = AARCH64_CPSR_REGNUM;
4403 }
4404 /* Floating point - integer conversions instructions. */
4405 else if (insn_bits12_15 == 0x00)
4406 {
4407 /* Convert float to integer instruction. */
4408 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
4409 {
4410 if (record_debug)
4411 debug_printf ("float to int conversion");
4412
4413 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4414 }
4415 /* Convert integer to float instruction. */
4416 else if ((opcode >> 1) == 0x01 && !rmode)
4417 {
4418 if (record_debug)
4419 debug_printf ("int to float conversion");
4420
4421 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4422 }
4423 /* Move float to integer instruction. */
4424 else if ((opcode >> 1) == 0x03)
4425 {
4426 if (record_debug)
4427 debug_printf ("move float to int");
4428
4429 if (!(opcode & 0x01))
4430 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4431 else
4432 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4433 }
4434 else
4435 return AARCH64_RECORD_UNKNOWN;
4436 }
4437 else
4438 return AARCH64_RECORD_UNKNOWN;
4439 }
4440 else
4441 return AARCH64_RECORD_UNKNOWN;
4442 }
4443 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
4444 {
4445 if (record_debug)
4446 debug_printf ("SIMD copy");
4447
4448 /* Advanced SIMD copy instructions. */
4449 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
4450 && !bit (aarch64_insn_r->aarch64_insn, 15)
4451 && bit (aarch64_insn_r->aarch64_insn, 10))
4452 {
4453 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
4454 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4455 else
4456 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4457 }
4458 else
4459 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4460 }
4461 /* All remaining floating point or advanced SIMD instructions. */
4462 else
4463 {
4464 if (record_debug)
4465 debug_printf ("all remain");
4466
4467 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4468 }
4469
4470 if (record_debug)
4471 debug_printf ("\n");
4472
4473 /* Record the V/X register. */
4474 aarch64_insn_r->reg_rec_count++;
4475
4476 /* Some of these instructions may set bits in the FPSR, so record it
4477 too. */
4478 record_buf[1] = AARCH64_FPSR_REGNUM;
4479 aarch64_insn_r->reg_rec_count++;
4480
4481 gdb_assert (aarch64_insn_r->reg_rec_count == 2);
4482 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4483 record_buf);
4484 return AARCH64_RECORD_SUCCESS;
4485 }
4486
4487 /* Decodes insns type and invokes its record handler. */
4488
4489 static unsigned int
4490 aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
4491 {
4492 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
4493
4494 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
4495 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
4496 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
4497 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
4498
4499 /* Data processing - immediate instructions. */
4500 if (!ins_bit26 && !ins_bit27 && ins_bit28)
4501 return aarch64_record_data_proc_imm (aarch64_insn_r);
4502
4503 /* Branch, exception generation and system instructions. */
4504 if (ins_bit26 && !ins_bit27 && ins_bit28)
4505 return aarch64_record_branch_except_sys (aarch64_insn_r);
4506
4507 /* Load and store instructions. */
4508 if (!ins_bit25 && ins_bit27)
4509 return aarch64_record_load_store (aarch64_insn_r);
4510
4511 /* Data processing - register instructions. */
4512 if (ins_bit25 && !ins_bit26 && ins_bit27)
4513 return aarch64_record_data_proc_reg (aarch64_insn_r);
4514
4515 /* Data processing - SIMD and floating point instructions. */
4516 if (ins_bit25 && ins_bit26 && ins_bit27)
4517 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
4518
4519 return AARCH64_RECORD_UNSUPPORTED;
4520 }
4521
4522 /* Cleans up local record registers and memory allocations. */
4523
4524 static void
4525 deallocate_reg_mem (insn_decode_record *record)
4526 {
4527 xfree (record->aarch64_regs);
4528 xfree (record->aarch64_mems);
4529 }
4530
4531 #if GDB_SELF_TEST
4532 namespace selftests {
4533
4534 static void
4535 aarch64_process_record_test (void)
4536 {
4537 struct gdbarch_info info;
4538 uint32_t ret;
4539
4540 gdbarch_info_init (&info);
4541 info.bfd_arch_info = bfd_scan_arch ("aarch64");
4542
4543 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
4544 SELF_CHECK (gdbarch != NULL);
4545
4546 insn_decode_record aarch64_record;
4547
4548 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4549 aarch64_record.regcache = NULL;
4550 aarch64_record.this_addr = 0;
4551 aarch64_record.gdbarch = gdbarch;
4552
4553 /* 20 00 80 f9 prfm pldl1keep, [x1] */
4554 aarch64_record.aarch64_insn = 0xf9800020;
4555 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4556 SELF_CHECK (ret == AARCH64_RECORD_SUCCESS);
4557 SELF_CHECK (aarch64_record.reg_rec_count == 0);
4558 SELF_CHECK (aarch64_record.mem_rec_count == 0);
4559
4560 deallocate_reg_mem (&aarch64_record);
4561 }
4562
4563 } // namespace selftests
4564 #endif /* GDB_SELF_TEST */
4565
4566 /* Parse the current instruction and record the values of the registers and
4567 memory that will be changed in current instruction to record_arch_list
4568 return -1 if something is wrong. */
4569
4570 int
4571 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
4572 CORE_ADDR insn_addr)
4573 {
4574 uint32_t rec_no = 0;
4575 uint8_t insn_size = 4;
4576 uint32_t ret = 0;
4577 gdb_byte buf[insn_size];
4578 insn_decode_record aarch64_record;
4579
4580 memset (&buf[0], 0, insn_size);
4581 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4582 target_read_memory (insn_addr, &buf[0], insn_size);
4583 aarch64_record.aarch64_insn
4584 = (uint32_t) extract_unsigned_integer (&buf[0],
4585 insn_size,
4586 gdbarch_byte_order (gdbarch));
4587 aarch64_record.regcache = regcache;
4588 aarch64_record.this_addr = insn_addr;
4589 aarch64_record.gdbarch = gdbarch;
4590
4591 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4592 if (ret == AARCH64_RECORD_UNSUPPORTED)
4593 {
4594 printf_unfiltered (_("Process record does not support instruction "
4595 "0x%0x at address %s.\n"),
4596 aarch64_record.aarch64_insn,
4597 paddress (gdbarch, insn_addr));
4598 ret = -1;
4599 }
4600
4601 if (0 == ret)
4602 {
4603 /* Record registers. */
4604 record_full_arch_list_add_reg (aarch64_record.regcache,
4605 AARCH64_PC_REGNUM);
4606 /* Always record register CPSR. */
4607 record_full_arch_list_add_reg (aarch64_record.regcache,
4608 AARCH64_CPSR_REGNUM);
4609 if (aarch64_record.aarch64_regs)
4610 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
4611 if (record_full_arch_list_add_reg (aarch64_record.regcache,
4612 aarch64_record.aarch64_regs[rec_no]))
4613 ret = -1;
4614
4615 /* Record memories. */
4616 if (aarch64_record.aarch64_mems)
4617 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
4618 if (record_full_arch_list_add_mem
4619 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
4620 aarch64_record.aarch64_mems[rec_no].len))
4621 ret = -1;
4622
4623 if (record_full_arch_list_add_end ())
4624 ret = -1;
4625 }
4626
4627 deallocate_reg_mem (&aarch64_record);
4628 return ret;
4629 }