[AArch64] Teach prologue unwinder to terminate gracefully
[binutils-gdb.git] / gdb / aarch64-tdep.c
1 /* Common target dependent code for GDB on AArch64 systems.
2
3 Copyright (C) 2009-2015 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22
23 #include "frame.h"
24 #include "inferior.h"
25 #include "gdbcmd.h"
26 #include "gdbcore.h"
27 #include "dis-asm.h"
28 #include "regcache.h"
29 #include "reggroups.h"
30 #include "doublest.h"
31 #include "value.h"
32 #include "arch-utils.h"
33 #include "osabi.h"
34 #include "frame-unwind.h"
35 #include "frame-base.h"
36 #include "trad-frame.h"
37 #include "objfiles.h"
38 #include "dwarf2-frame.h"
39 #include "gdbtypes.h"
40 #include "prologue-value.h"
41 #include "target-descriptions.h"
42 #include "user-regs.h"
43 #include "language.h"
44 #include "infcall.h"
45
46 #include "aarch64-tdep.h"
47
48 #include "elf-bfd.h"
49 #include "elf/aarch64.h"
50
51 #include "vec.h"
52
53 #include "record.h"
54 #include "record-full.h"
55
56 #include "features/aarch64.c"
57
58 /* Pseudo register base numbers. */
59 #define AARCH64_Q0_REGNUM 0
60 #define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + 32)
61 #define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
62 #define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
63 #define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
64
65 /* The standard register names, and all the valid aliases for them. */
66 static const struct
67 {
68 const char *const name;
69 int regnum;
70 } aarch64_register_aliases[] =
71 {
72 /* 64-bit register names. */
73 {"fp", AARCH64_FP_REGNUM},
74 {"lr", AARCH64_LR_REGNUM},
75 {"sp", AARCH64_SP_REGNUM},
76
77 /* 32-bit register names. */
78 {"w0", AARCH64_X0_REGNUM + 0},
79 {"w1", AARCH64_X0_REGNUM + 1},
80 {"w2", AARCH64_X0_REGNUM + 2},
81 {"w3", AARCH64_X0_REGNUM + 3},
82 {"w4", AARCH64_X0_REGNUM + 4},
83 {"w5", AARCH64_X0_REGNUM + 5},
84 {"w6", AARCH64_X0_REGNUM + 6},
85 {"w7", AARCH64_X0_REGNUM + 7},
86 {"w8", AARCH64_X0_REGNUM + 8},
87 {"w9", AARCH64_X0_REGNUM + 9},
88 {"w10", AARCH64_X0_REGNUM + 10},
89 {"w11", AARCH64_X0_REGNUM + 11},
90 {"w12", AARCH64_X0_REGNUM + 12},
91 {"w13", AARCH64_X0_REGNUM + 13},
92 {"w14", AARCH64_X0_REGNUM + 14},
93 {"w15", AARCH64_X0_REGNUM + 15},
94 {"w16", AARCH64_X0_REGNUM + 16},
95 {"w17", AARCH64_X0_REGNUM + 17},
96 {"w18", AARCH64_X0_REGNUM + 18},
97 {"w19", AARCH64_X0_REGNUM + 19},
98 {"w20", AARCH64_X0_REGNUM + 20},
99 {"w21", AARCH64_X0_REGNUM + 21},
100 {"w22", AARCH64_X0_REGNUM + 22},
101 {"w23", AARCH64_X0_REGNUM + 23},
102 {"w24", AARCH64_X0_REGNUM + 24},
103 {"w25", AARCH64_X0_REGNUM + 25},
104 {"w26", AARCH64_X0_REGNUM + 26},
105 {"w27", AARCH64_X0_REGNUM + 27},
106 {"w28", AARCH64_X0_REGNUM + 28},
107 {"w29", AARCH64_X0_REGNUM + 29},
108 {"w30", AARCH64_X0_REGNUM + 30},
109
110 /* specials */
111 {"ip0", AARCH64_X0_REGNUM + 16},
112 {"ip1", AARCH64_X0_REGNUM + 17}
113 };
114
115 /* The required core 'R' registers. */
116 static const char *const aarch64_r_register_names[] =
117 {
118 /* These registers must appear in consecutive RAW register number
119 order and they must begin with AARCH64_X0_REGNUM! */
120 "x0", "x1", "x2", "x3",
121 "x4", "x5", "x6", "x7",
122 "x8", "x9", "x10", "x11",
123 "x12", "x13", "x14", "x15",
124 "x16", "x17", "x18", "x19",
125 "x20", "x21", "x22", "x23",
126 "x24", "x25", "x26", "x27",
127 "x28", "x29", "x30", "sp",
128 "pc", "cpsr"
129 };
130
131 /* The FP/SIMD 'V' registers. */
132 static const char *const aarch64_v_register_names[] =
133 {
134 /* These registers must appear in consecutive RAW register number
135 order and they must begin with AARCH64_V0_REGNUM! */
136 "v0", "v1", "v2", "v3",
137 "v4", "v5", "v6", "v7",
138 "v8", "v9", "v10", "v11",
139 "v12", "v13", "v14", "v15",
140 "v16", "v17", "v18", "v19",
141 "v20", "v21", "v22", "v23",
142 "v24", "v25", "v26", "v27",
143 "v28", "v29", "v30", "v31",
144 "fpsr",
145 "fpcr"
146 };
147
148 /* AArch64 prologue cache structure. */
149 struct aarch64_prologue_cache
150 {
151 /* The program counter at the start of the function. It is used to
152 identify this frame as a prologue frame. */
153 CORE_ADDR func;
154
155 /* The program counter at the time this frame was created; i.e. where
156 this function was called from. It is used to identify this frame as a
157 stub frame. */
158 CORE_ADDR prev_pc;
159
160 /* The stack pointer at the time this frame was created; i.e. the
161 caller's stack pointer when this function was called. It is used
162 to identify this frame. */
163 CORE_ADDR prev_sp;
164
165 /* Is the target available to read from? */
166 int available_p;
167
168 /* The frame base for this frame is just prev_sp - frame size.
169 FRAMESIZE is the distance from the frame pointer to the
170 initial stack pointer. */
171 int framesize;
172
173 /* The register used to hold the frame pointer for this frame. */
174 int framereg;
175
176 /* Saved register offsets. */
177 struct trad_frame_saved_reg *saved_regs;
178 };
179
180 /* Toggle this file's internal debugging dump. */
181 static int aarch64_debug;
182
183 static void
184 show_aarch64_debug (struct ui_file *file, int from_tty,
185 struct cmd_list_element *c, const char *value)
186 {
187 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
188 }
189
190 /* Extract a signed value from a bit field within an instruction
191 encoding.
192
193 INSN is the instruction opcode.
194
195 WIDTH specifies the width of the bit field to extract (in bits).
196
197 OFFSET specifies the least significant bit of the field where bits
198 are numbered zero counting from least to most significant. */
199
200 static int32_t
201 extract_signed_bitfield (uint32_t insn, unsigned width, unsigned offset)
202 {
203 unsigned shift_l = sizeof (int32_t) * 8 - (offset + width);
204 unsigned shift_r = sizeof (int32_t) * 8 - width;
205
206 return ((int32_t) insn << shift_l) >> shift_r;
207 }
208
209 /* Determine if specified bits within an instruction opcode matches a
210 specific pattern.
211
212 INSN is the instruction opcode.
213
214 MASK specifies the bits within the opcode that are to be tested
215 agsinst for a match with PATTERN. */
216
217 static int
218 decode_masked_match (uint32_t insn, uint32_t mask, uint32_t pattern)
219 {
220 return (insn & mask) == pattern;
221 }
222
223 /* Decode an opcode if it represents an immediate ADD or SUB instruction.
224
225 ADDR specifies the address of the opcode.
226 INSN specifies the opcode to test.
227 RD receives the 'rd' field from the decoded instruction.
228 RN receives the 'rn' field from the decoded instruction.
229
230 Return 1 if the opcodes matches and is decoded, otherwise 0. */
231 static int
232 decode_add_sub_imm (CORE_ADDR addr, uint32_t insn, unsigned *rd, unsigned *rn,
233 int32_t *imm)
234 {
235 if ((insn & 0x9f000000) == 0x91000000)
236 {
237 unsigned shift;
238 unsigned op_is_sub;
239
240 *rd = (insn >> 0) & 0x1f;
241 *rn = (insn >> 5) & 0x1f;
242 *imm = (insn >> 10) & 0xfff;
243 shift = (insn >> 22) & 0x3;
244 op_is_sub = (insn >> 30) & 0x1;
245
246 switch (shift)
247 {
248 case 0:
249 break;
250 case 1:
251 *imm <<= 12;
252 break;
253 default:
254 /* UNDEFINED */
255 return 0;
256 }
257
258 if (op_is_sub)
259 *imm = -*imm;
260
261 if (aarch64_debug)
262 fprintf_unfiltered (gdb_stdlog,
263 "decode: 0x%s 0x%x add x%u, x%u, #%d\n",
264 core_addr_to_string_nz (addr), insn, *rd, *rn,
265 *imm);
266 return 1;
267 }
268 return 0;
269 }
270
271 /* Decode an opcode if it represents an ADRP instruction.
272
273 ADDR specifies the address of the opcode.
274 INSN specifies the opcode to test.
275 RD receives the 'rd' field from the decoded instruction.
276
277 Return 1 if the opcodes matches and is decoded, otherwise 0. */
278
279 static int
280 decode_adrp (CORE_ADDR addr, uint32_t insn, unsigned *rd)
281 {
282 if (decode_masked_match (insn, 0x9f000000, 0x90000000))
283 {
284 *rd = (insn >> 0) & 0x1f;
285
286 if (aarch64_debug)
287 fprintf_unfiltered (gdb_stdlog,
288 "decode: 0x%s 0x%x adrp x%u, #?\n",
289 core_addr_to_string_nz (addr), insn, *rd);
290 return 1;
291 }
292 return 0;
293 }
294
295 /* Decode an opcode if it represents an branch immediate or branch
296 and link immediate instruction.
297
298 ADDR specifies the address of the opcode.
299 INSN specifies the opcode to test.
300 LINK receives the 'link' bit from the decoded instruction.
301 OFFSET receives the immediate offset from the decoded instruction.
302
303 Return 1 if the opcodes matches and is decoded, otherwise 0. */
304
305 static int
306 decode_b (CORE_ADDR addr, uint32_t insn, unsigned *link, int32_t *offset)
307 {
308 /* b 0001 01ii iiii iiii iiii iiii iiii iiii */
309 /* bl 1001 01ii iiii iiii iiii iiii iiii iiii */
310 if (decode_masked_match (insn, 0x7c000000, 0x14000000))
311 {
312 *link = insn >> 31;
313 *offset = extract_signed_bitfield (insn, 26, 0) << 2;
314
315 if (aarch64_debug)
316 fprintf_unfiltered (gdb_stdlog,
317 "decode: 0x%s 0x%x %s 0x%s\n",
318 core_addr_to_string_nz (addr), insn,
319 *link ? "bl" : "b",
320 core_addr_to_string_nz (addr + *offset));
321
322 return 1;
323 }
324 return 0;
325 }
326
327 /* Decode an opcode if it represents a conditional branch instruction.
328
329 ADDR specifies the address of the opcode.
330 INSN specifies the opcode to test.
331 COND receives the branch condition field from the decoded
332 instruction.
333 OFFSET receives the immediate offset from the decoded instruction.
334
335 Return 1 if the opcodes matches and is decoded, otherwise 0. */
336
337 static int
338 decode_bcond (CORE_ADDR addr, uint32_t insn, unsigned *cond, int32_t *offset)
339 {
340 if (decode_masked_match (insn, 0xfe000000, 0x54000000))
341 {
342 *cond = (insn >> 0) & 0xf;
343 *offset = extract_signed_bitfield (insn, 19, 5) << 2;
344
345 if (aarch64_debug)
346 fprintf_unfiltered (gdb_stdlog,
347 "decode: 0x%s 0x%x b<%u> 0x%s\n",
348 core_addr_to_string_nz (addr), insn, *cond,
349 core_addr_to_string_nz (addr + *offset));
350 return 1;
351 }
352 return 0;
353 }
354
355 /* Decode an opcode if it represents a branch via register instruction.
356
357 ADDR specifies the address of the opcode.
358 INSN specifies the opcode to test.
359 LINK receives the 'link' bit from the decoded instruction.
360 RN receives the 'rn' field from the decoded instruction.
361
362 Return 1 if the opcodes matches and is decoded, otherwise 0. */
363
364 static int
365 decode_br (CORE_ADDR addr, uint32_t insn, unsigned *link, unsigned *rn)
366 {
367 /* 8 4 0 6 2 8 4 0 */
368 /* blr 110101100011111100000000000rrrrr */
369 /* br 110101100001111100000000000rrrrr */
370 if (decode_masked_match (insn, 0xffdffc1f, 0xd61f0000))
371 {
372 *link = (insn >> 21) & 1;
373 *rn = (insn >> 5) & 0x1f;
374
375 if (aarch64_debug)
376 fprintf_unfiltered (gdb_stdlog,
377 "decode: 0x%s 0x%x %s 0x%x\n",
378 core_addr_to_string_nz (addr), insn,
379 *link ? "blr" : "br", *rn);
380
381 return 1;
382 }
383 return 0;
384 }
385
386 /* Decode an opcode if it represents a CBZ or CBNZ instruction.
387
388 ADDR specifies the address of the opcode.
389 INSN specifies the opcode to test.
390 IS64 receives the 'sf' field from the decoded instruction.
391 OP receives the 'op' field from the decoded instruction.
392 RN receives the 'rn' field from the decoded instruction.
393 OFFSET receives the 'imm19' field from the decoded instruction.
394
395 Return 1 if the opcodes matches and is decoded, otherwise 0. */
396
397 static int
398 decode_cb (CORE_ADDR addr,
399 uint32_t insn, int *is64, unsigned *op, unsigned *rn,
400 int32_t *offset)
401 {
402 if (decode_masked_match (insn, 0x7e000000, 0x34000000))
403 {
404 /* cbz T011 010o iiii iiii iiii iiii iiir rrrr */
405 /* cbnz T011 010o iiii iiii iiii iiii iiir rrrr */
406
407 *rn = (insn >> 0) & 0x1f;
408 *is64 = (insn >> 31) & 0x1;
409 *op = (insn >> 24) & 0x1;
410 *offset = extract_signed_bitfield (insn, 19, 5) << 2;
411
412 if (aarch64_debug)
413 fprintf_unfiltered (gdb_stdlog,
414 "decode: 0x%s 0x%x %s 0x%s\n",
415 core_addr_to_string_nz (addr), insn,
416 *op ? "cbnz" : "cbz",
417 core_addr_to_string_nz (addr + *offset));
418 return 1;
419 }
420 return 0;
421 }
422
423 /* Decode an opcode if it represents a ERET instruction.
424
425 ADDR specifies the address of the opcode.
426 INSN specifies the opcode to test.
427
428 Return 1 if the opcodes matches and is decoded, otherwise 0. */
429
430 static int
431 decode_eret (CORE_ADDR addr, uint32_t insn)
432 {
433 /* eret 1101 0110 1001 1111 0000 0011 1110 0000 */
434 if (insn == 0xd69f03e0)
435 {
436 if (aarch64_debug)
437 fprintf_unfiltered (gdb_stdlog, "decode: 0x%s 0x%x eret\n",
438 core_addr_to_string_nz (addr), insn);
439 return 1;
440 }
441 return 0;
442 }
443
444 /* Decode an opcode if it represents a MOVZ instruction.
445
446 ADDR specifies the address of the opcode.
447 INSN specifies the opcode to test.
448 RD receives the 'rd' field from the decoded instruction.
449
450 Return 1 if the opcodes matches and is decoded, otherwise 0. */
451
452 static int
453 decode_movz (CORE_ADDR addr, uint32_t insn, unsigned *rd)
454 {
455 if (decode_masked_match (insn, 0xff800000, 0x52800000))
456 {
457 *rd = (insn >> 0) & 0x1f;
458
459 if (aarch64_debug)
460 fprintf_unfiltered (gdb_stdlog,
461 "decode: 0x%s 0x%x movz x%u, #?\n",
462 core_addr_to_string_nz (addr), insn, *rd);
463 return 1;
464 }
465 return 0;
466 }
467
468 /* Decode an opcode if it represents a ORR (shifted register)
469 instruction.
470
471 ADDR specifies the address of the opcode.
472 INSN specifies the opcode to test.
473 RD receives the 'rd' field from the decoded instruction.
474 RN receives the 'rn' field from the decoded instruction.
475 RM receives the 'rm' field from the decoded instruction.
476 IMM receives the 'imm6' field from the decoded instruction.
477
478 Return 1 if the opcodes matches and is decoded, otherwise 0. */
479
480 static int
481 decode_orr_shifted_register_x (CORE_ADDR addr,
482 uint32_t insn, unsigned *rd, unsigned *rn,
483 unsigned *rm, int32_t *imm)
484 {
485 if (decode_masked_match (insn, 0xff200000, 0xaa000000))
486 {
487 *rd = (insn >> 0) & 0x1f;
488 *rn = (insn >> 5) & 0x1f;
489 *rm = (insn >> 16) & 0x1f;
490 *imm = (insn >> 10) & 0x3f;
491
492 if (aarch64_debug)
493 fprintf_unfiltered (gdb_stdlog,
494 "decode: 0x%s 0x%x orr x%u, x%u, x%u, #%u\n",
495 core_addr_to_string_nz (addr), insn, *rd,
496 *rn, *rm, *imm);
497 return 1;
498 }
499 return 0;
500 }
501
502 /* Decode an opcode if it represents a RET instruction.
503
504 ADDR specifies the address of the opcode.
505 INSN specifies the opcode to test.
506 RN receives the 'rn' field from the decoded instruction.
507
508 Return 1 if the opcodes matches and is decoded, otherwise 0. */
509
510 static int
511 decode_ret (CORE_ADDR addr, uint32_t insn, unsigned *rn)
512 {
513 if (decode_masked_match (insn, 0xfffffc1f, 0xd65f0000))
514 {
515 *rn = (insn >> 5) & 0x1f;
516 if (aarch64_debug)
517 fprintf_unfiltered (gdb_stdlog,
518 "decode: 0x%s 0x%x ret x%u\n",
519 core_addr_to_string_nz (addr), insn, *rn);
520 return 1;
521 }
522 return 0;
523 }
524
525 /* Decode an opcode if it represents the following instruction:
526 STP rt, rt2, [rn, #imm]
527
528 ADDR specifies the address of the opcode.
529 INSN specifies the opcode to test.
530 RT1 receives the 'rt' field from the decoded instruction.
531 RT2 receives the 'rt2' field from the decoded instruction.
532 RN receives the 'rn' field from the decoded instruction.
533 IMM receives the 'imm' field from the decoded instruction.
534
535 Return 1 if the opcodes matches and is decoded, otherwise 0. */
536
537 static int
538 decode_stp_offset (CORE_ADDR addr,
539 uint32_t insn,
540 unsigned *rt1, unsigned *rt2, unsigned *rn, int32_t *imm)
541 {
542 if (decode_masked_match (insn, 0xffc00000, 0xa9000000))
543 {
544 *rt1 = (insn >> 0) & 0x1f;
545 *rn = (insn >> 5) & 0x1f;
546 *rt2 = (insn >> 10) & 0x1f;
547 *imm = extract_signed_bitfield (insn, 7, 15);
548 *imm <<= 3;
549
550 if (aarch64_debug)
551 fprintf_unfiltered (gdb_stdlog,
552 "decode: 0x%s 0x%x stp x%u, x%u, [x%u + #%d]\n",
553 core_addr_to_string_nz (addr), insn,
554 *rt1, *rt2, *rn, *imm);
555 return 1;
556 }
557 return 0;
558 }
559
560 /* Decode an opcode if it represents the following instruction:
561 STP rt, rt2, [rn, #imm]!
562
563 ADDR specifies the address of the opcode.
564 INSN specifies the opcode to test.
565 RT1 receives the 'rt' field from the decoded instruction.
566 RT2 receives the 'rt2' field from the decoded instruction.
567 RN receives the 'rn' field from the decoded instruction.
568 IMM receives the 'imm' field from the decoded instruction.
569
570 Return 1 if the opcodes matches and is decoded, otherwise 0. */
571
572 static int
573 decode_stp_offset_wb (CORE_ADDR addr,
574 uint32_t insn,
575 unsigned *rt1, unsigned *rt2, unsigned *rn,
576 int32_t *imm)
577 {
578 if (decode_masked_match (insn, 0xffc00000, 0xa9800000))
579 {
580 *rt1 = (insn >> 0) & 0x1f;
581 *rn = (insn >> 5) & 0x1f;
582 *rt2 = (insn >> 10) & 0x1f;
583 *imm = extract_signed_bitfield (insn, 7, 15);
584 *imm <<= 3;
585
586 if (aarch64_debug)
587 fprintf_unfiltered (gdb_stdlog,
588 "decode: 0x%s 0x%x stp x%u, x%u, [x%u + #%d]!\n",
589 core_addr_to_string_nz (addr), insn,
590 *rt1, *rt2, *rn, *imm);
591 return 1;
592 }
593 return 0;
594 }
595
596 /* Decode an opcode if it represents the following instruction:
597 STUR rt, [rn, #imm]
598
599 ADDR specifies the address of the opcode.
600 INSN specifies the opcode to test.
601 IS64 receives size field from the decoded instruction.
602 RT receives the 'rt' field from the decoded instruction.
603 RN receives the 'rn' field from the decoded instruction.
604 IMM receives the 'imm' field from the decoded instruction.
605
606 Return 1 if the opcodes matches and is decoded, otherwise 0. */
607
608 static int
609 decode_stur (CORE_ADDR addr, uint32_t insn, int *is64, unsigned *rt,
610 unsigned *rn, int32_t *imm)
611 {
612 if (decode_masked_match (insn, 0xbfe00c00, 0xb8000000))
613 {
614 *is64 = (insn >> 30) & 1;
615 *rt = (insn >> 0) & 0x1f;
616 *rn = (insn >> 5) & 0x1f;
617 *imm = extract_signed_bitfield (insn, 9, 12);
618
619 if (aarch64_debug)
620 fprintf_unfiltered (gdb_stdlog,
621 "decode: 0x%s 0x%x stur %c%u, [x%u + #%d]\n",
622 core_addr_to_string_nz (addr), insn,
623 *is64 ? 'x' : 'w', *rt, *rn, *imm);
624 return 1;
625 }
626 return 0;
627 }
628
629 /* Decode an opcode if it represents a TB or TBNZ instruction.
630
631 ADDR specifies the address of the opcode.
632 INSN specifies the opcode to test.
633 OP receives the 'op' field from the decoded instruction.
634 BIT receives the bit position field from the decoded instruction.
635 RT receives 'rt' field from the decoded instruction.
636 IMM receives 'imm' field from the decoded instruction.
637
638 Return 1 if the opcodes matches and is decoded, otherwise 0. */
639
640 static int
641 decode_tb (CORE_ADDR addr,
642 uint32_t insn, unsigned *op, unsigned *bit, unsigned *rt,
643 int32_t *imm)
644 {
645 if (decode_masked_match (insn, 0x7e000000, 0x36000000))
646 {
647 /* tbz b011 0110 bbbb biii iiii iiii iiir rrrr */
648 /* tbnz B011 0111 bbbb biii iiii iiii iiir rrrr */
649
650 *rt = (insn >> 0) & 0x1f;
651 *op = insn & (1 << 24);
652 *bit = ((insn >> (31 - 4)) & 0x20) | ((insn >> 19) & 0x1f);
653 *imm = extract_signed_bitfield (insn, 14, 5) << 2;
654
655 if (aarch64_debug)
656 fprintf_unfiltered (gdb_stdlog,
657 "decode: 0x%s 0x%x %s x%u, #%u, 0x%s\n",
658 core_addr_to_string_nz (addr), insn,
659 *op ? "tbnz" : "tbz", *rt, *bit,
660 core_addr_to_string_nz (addr + *imm));
661 return 1;
662 }
663 return 0;
664 }
665
666 /* Analyze a prologue, looking for a recognizable stack frame
667 and frame pointer. Scan until we encounter a store that could
668 clobber the stack frame unexpectedly, or an unknown instruction. */
669
670 static CORE_ADDR
671 aarch64_analyze_prologue (struct gdbarch *gdbarch,
672 CORE_ADDR start, CORE_ADDR limit,
673 struct aarch64_prologue_cache *cache)
674 {
675 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
676 int i;
677 pv_t regs[AARCH64_X_REGISTER_COUNT];
678 struct pv_area *stack;
679 struct cleanup *back_to;
680
681 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
682 regs[i] = pv_register (i, 0);
683 stack = make_pv_area (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
684 back_to = make_cleanup_free_pv_area (stack);
685
686 for (; start < limit; start += 4)
687 {
688 uint32_t insn;
689 unsigned rd;
690 unsigned rn;
691 unsigned rm;
692 unsigned rt;
693 unsigned rt1;
694 unsigned rt2;
695 int op_is_sub;
696 int32_t imm;
697 unsigned cond;
698 int is64;
699 unsigned is_link;
700 unsigned op;
701 unsigned bit;
702 int32_t offset;
703
704 insn = read_memory_unsigned_integer (start, 4, byte_order_for_code);
705
706 if (decode_add_sub_imm (start, insn, &rd, &rn, &imm))
707 regs[rd] = pv_add_constant (regs[rn], imm);
708 else if (decode_adrp (start, insn, &rd))
709 regs[rd] = pv_unknown ();
710 else if (decode_b (start, insn, &is_link, &offset))
711 {
712 /* Stop analysis on branch. */
713 break;
714 }
715 else if (decode_bcond (start, insn, &cond, &offset))
716 {
717 /* Stop analysis on branch. */
718 break;
719 }
720 else if (decode_br (start, insn, &is_link, &rn))
721 {
722 /* Stop analysis on branch. */
723 break;
724 }
725 else if (decode_cb (start, insn, &is64, &op, &rn, &offset))
726 {
727 /* Stop analysis on branch. */
728 break;
729 }
730 else if (decode_eret (start, insn))
731 {
732 /* Stop analysis on branch. */
733 break;
734 }
735 else if (decode_movz (start, insn, &rd))
736 regs[rd] = pv_unknown ();
737 else
738 if (decode_orr_shifted_register_x (start, insn, &rd, &rn, &rm, &imm))
739 {
740 if (imm == 0 && rn == 31)
741 regs[rd] = regs[rm];
742 else
743 {
744 if (aarch64_debug)
745 fprintf_unfiltered
746 (gdb_stdlog,
747 "aarch64: prologue analysis gave up addr=0x%s "
748 "opcode=0x%x (orr x register)\n",
749 core_addr_to_string_nz (start),
750 insn);
751 break;
752 }
753 }
754 else if (decode_ret (start, insn, &rn))
755 {
756 /* Stop analysis on branch. */
757 break;
758 }
759 else if (decode_stur (start, insn, &is64, &rt, &rn, &offset))
760 {
761 pv_area_store (stack, pv_add_constant (regs[rn], offset),
762 is64 ? 8 : 4, regs[rt]);
763 }
764 else if (decode_stp_offset (start, insn, &rt1, &rt2, &rn, &imm))
765 {
766 /* If recording this store would invalidate the store area
767 (perhaps because rn is not known) then we should abandon
768 further prologue analysis. */
769 if (pv_area_store_would_trash (stack,
770 pv_add_constant (regs[rn], imm)))
771 break;
772
773 if (pv_area_store_would_trash (stack,
774 pv_add_constant (regs[rn], imm + 8)))
775 break;
776
777 pv_area_store (stack, pv_add_constant (regs[rn], imm), 8,
778 regs[rt1]);
779 pv_area_store (stack, pv_add_constant (regs[rn], imm + 8), 8,
780 regs[rt2]);
781 }
782 else if (decode_stp_offset_wb (start, insn, &rt1, &rt2, &rn, &imm))
783 {
784 /* If recording this store would invalidate the store area
785 (perhaps because rn is not known) then we should abandon
786 further prologue analysis. */
787 if (pv_area_store_would_trash (stack,
788 pv_add_constant (regs[rn], imm)))
789 break;
790
791 if (pv_area_store_would_trash (stack,
792 pv_add_constant (regs[rn], imm + 8)))
793 break;
794
795 pv_area_store (stack, pv_add_constant (regs[rn], imm), 8,
796 regs[rt1]);
797 pv_area_store (stack, pv_add_constant (regs[rn], imm + 8), 8,
798 regs[rt2]);
799 regs[rn] = pv_add_constant (regs[rn], imm);
800 }
801 else if (decode_tb (start, insn, &op, &bit, &rn, &offset))
802 {
803 /* Stop analysis on branch. */
804 break;
805 }
806 else
807 {
808 if (aarch64_debug)
809 fprintf_unfiltered (gdb_stdlog,
810 "aarch64: prologue analysis gave up addr=0x%s"
811 " opcode=0x%x\n",
812 core_addr_to_string_nz (start), insn);
813 break;
814 }
815 }
816
817 if (cache == NULL)
818 {
819 do_cleanups (back_to);
820 return start;
821 }
822
823 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
824 {
825 /* Frame pointer is fp. Frame size is constant. */
826 cache->framereg = AARCH64_FP_REGNUM;
827 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
828 }
829 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
830 {
831 /* Try the stack pointer. */
832 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
833 cache->framereg = AARCH64_SP_REGNUM;
834 }
835 else
836 {
837 /* We're just out of luck. We don't know where the frame is. */
838 cache->framereg = -1;
839 cache->framesize = 0;
840 }
841
842 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
843 {
844 CORE_ADDR offset;
845
846 if (pv_area_find_reg (stack, gdbarch, i, &offset))
847 cache->saved_regs[i].addr = offset;
848 }
849
850 do_cleanups (back_to);
851 return start;
852 }
853
854 /* Implement the "skip_prologue" gdbarch method. */
855
856 static CORE_ADDR
857 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
858 {
859 unsigned long inst;
860 CORE_ADDR skip_pc;
861 CORE_ADDR func_addr, limit_pc;
862 struct symtab_and_line sal;
863
864 /* See if we can determine the end of the prologue via the symbol
865 table. If so, then return either PC, or the PC after the
866 prologue, whichever is greater. */
867 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
868 {
869 CORE_ADDR post_prologue_pc
870 = skip_prologue_using_sal (gdbarch, func_addr);
871
872 if (post_prologue_pc != 0)
873 return max (pc, post_prologue_pc);
874 }
875
876 /* Can't determine prologue from the symbol table, need to examine
877 instructions. */
878
879 /* Find an upper limit on the function prologue using the debug
880 information. If the debug information could not be used to
881 provide that bound, then use an arbitrary large number as the
882 upper bound. */
883 limit_pc = skip_prologue_using_sal (gdbarch, pc);
884 if (limit_pc == 0)
885 limit_pc = pc + 128; /* Magic. */
886
887 /* Try disassembling prologue. */
888 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
889 }
890
891 /* Scan the function prologue for THIS_FRAME and populate the prologue
892 cache CACHE. */
893
894 static void
895 aarch64_scan_prologue (struct frame_info *this_frame,
896 struct aarch64_prologue_cache *cache)
897 {
898 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
899 CORE_ADDR prologue_start;
900 CORE_ADDR prologue_end;
901 CORE_ADDR prev_pc = get_frame_pc (this_frame);
902 struct gdbarch *gdbarch = get_frame_arch (this_frame);
903
904 cache->prev_pc = prev_pc;
905
906 /* Assume we do not find a frame. */
907 cache->framereg = -1;
908 cache->framesize = 0;
909
910 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
911 &prologue_end))
912 {
913 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
914
915 if (sal.line == 0)
916 {
917 /* No line info so use the current PC. */
918 prologue_end = prev_pc;
919 }
920 else if (sal.end < prologue_end)
921 {
922 /* The next line begins after the function end. */
923 prologue_end = sal.end;
924 }
925
926 prologue_end = min (prologue_end, prev_pc);
927 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
928 }
929 else
930 {
931 CORE_ADDR frame_loc;
932 LONGEST saved_fp;
933 LONGEST saved_lr;
934 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
935
936 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
937 if (frame_loc == 0)
938 return;
939
940 cache->framereg = AARCH64_FP_REGNUM;
941 cache->framesize = 16;
942 cache->saved_regs[29].addr = 0;
943 cache->saved_regs[30].addr = 8;
944 }
945 }
946
947 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
948 function may throw an exception if the inferior's registers or memory is
949 not available. */
950
951 static void
952 aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
953 struct aarch64_prologue_cache *cache)
954 {
955 CORE_ADDR unwound_fp;
956 int reg;
957
958 aarch64_scan_prologue (this_frame, cache);
959
960 if (cache->framereg == -1)
961 return;
962
963 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
964 if (unwound_fp == 0)
965 return;
966
967 cache->prev_sp = unwound_fp + cache->framesize;
968
969 /* Calculate actual addresses of saved registers using offsets
970 determined by aarch64_analyze_prologue. */
971 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
972 if (trad_frame_addr_p (cache->saved_regs, reg))
973 cache->saved_regs[reg].addr += cache->prev_sp;
974
975 cache->func = get_frame_func (this_frame);
976
977 cache->available_p = 1;
978 }
979
980 /* Allocate and fill in *THIS_CACHE with information about the prologue of
981 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
982 Return a pointer to the current aarch64_prologue_cache in
983 *THIS_CACHE. */
984
985 static struct aarch64_prologue_cache *
986 aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
987 {
988 struct aarch64_prologue_cache *cache;
989
990 if (*this_cache != NULL)
991 return *this_cache;
992
993 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
994 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
995 *this_cache = cache;
996
997 TRY
998 {
999 aarch64_make_prologue_cache_1 (this_frame, cache);
1000 }
1001 CATCH (ex, RETURN_MASK_ERROR)
1002 {
1003 if (ex.error != NOT_AVAILABLE_ERROR)
1004 throw_exception (ex);
1005 }
1006 END_CATCH
1007
1008 return cache;
1009 }
1010
1011 /* Implement the "stop_reason" frame_unwind method. */
1012
1013 static enum unwind_stop_reason
1014 aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
1015 void **this_cache)
1016 {
1017 struct aarch64_prologue_cache *cache
1018 = aarch64_make_prologue_cache (this_frame, this_cache);
1019
1020 if (!cache->available_p)
1021 return UNWIND_UNAVAILABLE;
1022
1023 /* Halt the backtrace at "_start". */
1024 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
1025 return UNWIND_OUTERMOST;
1026
1027 /* We've hit a wall, stop. */
1028 if (cache->prev_sp == 0)
1029 return UNWIND_OUTERMOST;
1030
1031 return UNWIND_NO_REASON;
1032 }
1033
1034 /* Our frame ID for a normal frame is the current function's starting
1035 PC and the caller's SP when we were called. */
1036
1037 static void
1038 aarch64_prologue_this_id (struct frame_info *this_frame,
1039 void **this_cache, struct frame_id *this_id)
1040 {
1041 struct aarch64_prologue_cache *cache
1042 = aarch64_make_prologue_cache (this_frame, this_cache);
1043
1044 if (!cache->available_p)
1045 *this_id = frame_id_build_unavailable_stack (cache->func);
1046 else
1047 *this_id = frame_id_build (cache->prev_sp, cache->func);
1048 }
1049
1050 /* Implement the "prev_register" frame_unwind method. */
1051
1052 static struct value *
1053 aarch64_prologue_prev_register (struct frame_info *this_frame,
1054 void **this_cache, int prev_regnum)
1055 {
1056 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1057 struct aarch64_prologue_cache *cache
1058 = aarch64_make_prologue_cache (this_frame, this_cache);
1059
1060 /* If we are asked to unwind the PC, then we need to return the LR
1061 instead. The prologue may save PC, but it will point into this
1062 frame's prologue, not the next frame's resume location. */
1063 if (prev_regnum == AARCH64_PC_REGNUM)
1064 {
1065 CORE_ADDR lr;
1066
1067 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1068 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
1069 }
1070
1071 /* SP is generally not saved to the stack, but this frame is
1072 identified by the next frame's stack pointer at the time of the
1073 call. The value was already reconstructed into PREV_SP. */
1074 /*
1075 +----------+ ^
1076 | saved lr | |
1077 +->| saved fp |--+
1078 | | |
1079 | | | <- Previous SP
1080 | +----------+
1081 | | saved lr |
1082 +--| saved fp |<- FP
1083 | |
1084 | |<- SP
1085 +----------+ */
1086 if (prev_regnum == AARCH64_SP_REGNUM)
1087 return frame_unwind_got_constant (this_frame, prev_regnum,
1088 cache->prev_sp);
1089
1090 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
1091 prev_regnum);
1092 }
1093
1094 /* AArch64 prologue unwinder. */
1095 struct frame_unwind aarch64_prologue_unwind =
1096 {
1097 NORMAL_FRAME,
1098 aarch64_prologue_frame_unwind_stop_reason,
1099 aarch64_prologue_this_id,
1100 aarch64_prologue_prev_register,
1101 NULL,
1102 default_frame_sniffer
1103 };
1104
1105 /* Allocate and fill in *THIS_CACHE with information about the prologue of
1106 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
1107 Return a pointer to the current aarch64_prologue_cache in
1108 *THIS_CACHE. */
1109
1110 static struct aarch64_prologue_cache *
1111 aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
1112 {
1113 struct aarch64_prologue_cache *cache;
1114
1115 if (*this_cache != NULL)
1116 return *this_cache;
1117
1118 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1119 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1120 *this_cache = cache;
1121
1122 cache->prev_sp
1123 = get_frame_register_unsigned (this_frame, AARCH64_SP_REGNUM);
1124 cache->prev_pc = get_frame_pc (this_frame);
1125
1126 return cache;
1127 }
1128
1129 /* Our frame ID for a stub frame is the current SP and LR. */
1130
1131 static void
1132 aarch64_stub_this_id (struct frame_info *this_frame,
1133 void **this_cache, struct frame_id *this_id)
1134 {
1135 struct aarch64_prologue_cache *cache
1136 = aarch64_make_stub_cache (this_frame, this_cache);
1137
1138 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
1139 }
1140
1141 /* Implement the "sniffer" frame_unwind method. */
1142
1143 static int
1144 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
1145 struct frame_info *this_frame,
1146 void **this_prologue_cache)
1147 {
1148 CORE_ADDR addr_in_block;
1149 gdb_byte dummy[4];
1150
1151 addr_in_block = get_frame_address_in_block (this_frame);
1152 if (in_plt_section (addr_in_block)
1153 /* We also use the stub winder if the target memory is unreadable
1154 to avoid having the prologue unwinder trying to read it. */
1155 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
1156 return 1;
1157
1158 return 0;
1159 }
1160
1161 /* AArch64 stub unwinder. */
1162 struct frame_unwind aarch64_stub_unwind =
1163 {
1164 NORMAL_FRAME,
1165 default_frame_unwind_stop_reason,
1166 aarch64_stub_this_id,
1167 aarch64_prologue_prev_register,
1168 NULL,
1169 aarch64_stub_unwind_sniffer
1170 };
1171
1172 /* Return the frame base address of *THIS_FRAME. */
1173
1174 static CORE_ADDR
1175 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
1176 {
1177 struct aarch64_prologue_cache *cache
1178 = aarch64_make_prologue_cache (this_frame, this_cache);
1179
1180 return cache->prev_sp - cache->framesize;
1181 }
1182
1183 /* AArch64 default frame base information. */
1184 struct frame_base aarch64_normal_base =
1185 {
1186 &aarch64_prologue_unwind,
1187 aarch64_normal_frame_base,
1188 aarch64_normal_frame_base,
1189 aarch64_normal_frame_base
1190 };
1191
1192 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
1193 dummy frame. The frame ID's base needs to match the TOS value
1194 saved by save_dummy_frame_tos () and returned from
1195 aarch64_push_dummy_call, and the PC needs to match the dummy
1196 frame's breakpoint. */
1197
1198 static struct frame_id
1199 aarch64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
1200 {
1201 return frame_id_build (get_frame_register_unsigned (this_frame,
1202 AARCH64_SP_REGNUM),
1203 get_frame_pc (this_frame));
1204 }
1205
1206 /* Implement the "unwind_pc" gdbarch method. */
1207
1208 static CORE_ADDR
1209 aarch64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
1210 {
1211 CORE_ADDR pc
1212 = frame_unwind_register_unsigned (this_frame, AARCH64_PC_REGNUM);
1213
1214 return pc;
1215 }
1216
1217 /* Implement the "unwind_sp" gdbarch method. */
1218
1219 static CORE_ADDR
1220 aarch64_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
1221 {
1222 return frame_unwind_register_unsigned (this_frame, AARCH64_SP_REGNUM);
1223 }
1224
1225 /* Return the value of the REGNUM register in the previous frame of
1226 *THIS_FRAME. */
1227
1228 static struct value *
1229 aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1230 void **this_cache, int regnum)
1231 {
1232 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1233 CORE_ADDR lr;
1234
1235 switch (regnum)
1236 {
1237 case AARCH64_PC_REGNUM:
1238 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1239 return frame_unwind_got_constant (this_frame, regnum, lr);
1240
1241 default:
1242 internal_error (__FILE__, __LINE__,
1243 _("Unexpected register %d"), regnum);
1244 }
1245 }
1246
1247 /* Implement the "init_reg" dwarf2_frame_ops method. */
1248
1249 static void
1250 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1251 struct dwarf2_frame_state_reg *reg,
1252 struct frame_info *this_frame)
1253 {
1254 switch (regnum)
1255 {
1256 case AARCH64_PC_REGNUM:
1257 reg->how = DWARF2_FRAME_REG_FN;
1258 reg->loc.fn = aarch64_dwarf2_prev_register;
1259 break;
1260 case AARCH64_SP_REGNUM:
1261 reg->how = DWARF2_FRAME_REG_CFA;
1262 break;
1263 }
1264 }
1265
1266 /* When arguments must be pushed onto the stack, they go on in reverse
1267 order. The code below implements a FILO (stack) to do this. */
1268
1269 typedef struct
1270 {
1271 /* Value to pass on stack. */
1272 const void *data;
1273
1274 /* Size in bytes of value to pass on stack. */
1275 int len;
1276 } stack_item_t;
1277
1278 DEF_VEC_O (stack_item_t);
1279
1280 /* Return the alignment (in bytes) of the given type. */
1281
1282 static int
1283 aarch64_type_align (struct type *t)
1284 {
1285 int n;
1286 int align;
1287 int falign;
1288
1289 t = check_typedef (t);
1290 switch (TYPE_CODE (t))
1291 {
1292 default:
1293 /* Should never happen. */
1294 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
1295 return 4;
1296
1297 case TYPE_CODE_PTR:
1298 case TYPE_CODE_ENUM:
1299 case TYPE_CODE_INT:
1300 case TYPE_CODE_FLT:
1301 case TYPE_CODE_SET:
1302 case TYPE_CODE_RANGE:
1303 case TYPE_CODE_BITSTRING:
1304 case TYPE_CODE_REF:
1305 case TYPE_CODE_CHAR:
1306 case TYPE_CODE_BOOL:
1307 return TYPE_LENGTH (t);
1308
1309 case TYPE_CODE_ARRAY:
1310 case TYPE_CODE_COMPLEX:
1311 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1312
1313 case TYPE_CODE_STRUCT:
1314 case TYPE_CODE_UNION:
1315 align = 1;
1316 for (n = 0; n < TYPE_NFIELDS (t); n++)
1317 {
1318 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
1319 if (falign > align)
1320 align = falign;
1321 }
1322 return align;
1323 }
1324 }
1325
1326 /* Return 1 if *TY is a homogeneous floating-point aggregate as
1327 defined in the AAPCS64 ABI document; otherwise return 0. */
1328
1329 static int
1330 is_hfa (struct type *ty)
1331 {
1332 switch (TYPE_CODE (ty))
1333 {
1334 case TYPE_CODE_ARRAY:
1335 {
1336 struct type *target_ty = TYPE_TARGET_TYPE (ty);
1337 if (TYPE_CODE (target_ty) == TYPE_CODE_FLT && TYPE_LENGTH (ty) <= 4)
1338 return 1;
1339 break;
1340 }
1341
1342 case TYPE_CODE_UNION:
1343 case TYPE_CODE_STRUCT:
1344 {
1345 if (TYPE_NFIELDS (ty) > 0 && TYPE_NFIELDS (ty) <= 4)
1346 {
1347 struct type *member0_type;
1348
1349 member0_type = check_typedef (TYPE_FIELD_TYPE (ty, 0));
1350 if (TYPE_CODE (member0_type) == TYPE_CODE_FLT)
1351 {
1352 int i;
1353
1354 for (i = 0; i < TYPE_NFIELDS (ty); i++)
1355 {
1356 struct type *member1_type;
1357
1358 member1_type = check_typedef (TYPE_FIELD_TYPE (ty, i));
1359 if (TYPE_CODE (member0_type) != TYPE_CODE (member1_type)
1360 || (TYPE_LENGTH (member0_type)
1361 != TYPE_LENGTH (member1_type)))
1362 return 0;
1363 }
1364 return 1;
1365 }
1366 }
1367 return 0;
1368 }
1369
1370 default:
1371 break;
1372 }
1373
1374 return 0;
1375 }
1376
1377 /* AArch64 function call information structure. */
1378 struct aarch64_call_info
1379 {
1380 /* the current argument number. */
1381 unsigned argnum;
1382
1383 /* The next general purpose register number, equivalent to NGRN as
1384 described in the AArch64 Procedure Call Standard. */
1385 unsigned ngrn;
1386
1387 /* The next SIMD and floating point register number, equivalent to
1388 NSRN as described in the AArch64 Procedure Call Standard. */
1389 unsigned nsrn;
1390
1391 /* The next stacked argument address, equivalent to NSAA as
1392 described in the AArch64 Procedure Call Standard. */
1393 unsigned nsaa;
1394
1395 /* Stack item vector. */
1396 VEC(stack_item_t) *si;
1397 };
1398
1399 /* Pass a value in a sequence of consecutive X registers. The caller
1400 is responsbile for ensuring sufficient registers are available. */
1401
1402 static void
1403 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1404 struct aarch64_call_info *info, struct type *type,
1405 const bfd_byte *buf)
1406 {
1407 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1408 int len = TYPE_LENGTH (type);
1409 enum type_code typecode = TYPE_CODE (type);
1410 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1411
1412 info->argnum++;
1413
1414 while (len > 0)
1415 {
1416 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1417 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1418 byte_order);
1419
1420
1421 /* Adjust sub-word struct/union args when big-endian. */
1422 if (byte_order == BFD_ENDIAN_BIG
1423 && partial_len < X_REGISTER_SIZE
1424 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1425 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1426
1427 if (aarch64_debug)
1428 fprintf_unfiltered (gdb_stdlog, "arg %d in %s = 0x%s\n",
1429 info->argnum,
1430 gdbarch_register_name (gdbarch, regnum),
1431 phex (regval, X_REGISTER_SIZE));
1432 regcache_cooked_write_unsigned (regcache, regnum, regval);
1433 len -= partial_len;
1434 buf += partial_len;
1435 regnum++;
1436 }
1437 }
1438
1439 /* Attempt to marshall a value in a V register. Return 1 if
1440 successful, or 0 if insufficient registers are available. This
1441 function, unlike the equivalent pass_in_x() function does not
1442 handle arguments spread across multiple registers. */
1443
1444 static int
1445 pass_in_v (struct gdbarch *gdbarch,
1446 struct regcache *regcache,
1447 struct aarch64_call_info *info,
1448 const bfd_byte *buf)
1449 {
1450 if (info->nsrn < 8)
1451 {
1452 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1453 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1454
1455 info->argnum++;
1456 info->nsrn++;
1457
1458 regcache_cooked_write (regcache, regnum, buf);
1459 if (aarch64_debug)
1460 fprintf_unfiltered (gdb_stdlog, "arg %d in %s\n",
1461 info->argnum,
1462 gdbarch_register_name (gdbarch, regnum));
1463 return 1;
1464 }
1465 info->nsrn = 8;
1466 return 0;
1467 }
1468
1469 /* Marshall an argument onto the stack. */
1470
1471 static void
1472 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1473 const bfd_byte *buf)
1474 {
1475 int len = TYPE_LENGTH (type);
1476 int align;
1477 stack_item_t item;
1478
1479 info->argnum++;
1480
1481 align = aarch64_type_align (type);
1482
1483 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1484 Natural alignment of the argument's type. */
1485 align = align_up (align, 8);
1486
1487 /* The AArch64 PCS requires at most doubleword alignment. */
1488 if (align > 16)
1489 align = 16;
1490
1491 if (aarch64_debug)
1492 fprintf_unfiltered (gdb_stdlog, "arg %d len=%d @ sp + %d\n",
1493 info->argnum, len, info->nsaa);
1494
1495 item.len = len;
1496 item.data = buf;
1497 VEC_safe_push (stack_item_t, info->si, &item);
1498
1499 info->nsaa += len;
1500 if (info->nsaa & (align - 1))
1501 {
1502 /* Push stack alignment padding. */
1503 int pad = align - (info->nsaa & (align - 1));
1504
1505 item.len = pad;
1506 item.data = buf;
1507
1508 VEC_safe_push (stack_item_t, info->si, &item);
1509 info->nsaa += pad;
1510 }
1511 }
1512
1513 /* Marshall an argument into a sequence of one or more consecutive X
1514 registers or, if insufficient X registers are available then onto
1515 the stack. */
1516
1517 static void
1518 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1519 struct aarch64_call_info *info, struct type *type,
1520 const bfd_byte *buf)
1521 {
1522 int len = TYPE_LENGTH (type);
1523 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1524
1525 /* PCS C.13 - Pass in registers if we have enough spare */
1526 if (info->ngrn + nregs <= 8)
1527 {
1528 pass_in_x (gdbarch, regcache, info, type, buf);
1529 info->ngrn += nregs;
1530 }
1531 else
1532 {
1533 info->ngrn = 8;
1534 pass_on_stack (info, type, buf);
1535 }
1536 }
1537
1538 /* Pass a value in a V register, or on the stack if insufficient are
1539 available. */
1540
1541 static void
1542 pass_in_v_or_stack (struct gdbarch *gdbarch,
1543 struct regcache *regcache,
1544 struct aarch64_call_info *info,
1545 struct type *type,
1546 const bfd_byte *buf)
1547 {
1548 if (!pass_in_v (gdbarch, regcache, info, buf))
1549 pass_on_stack (info, type, buf);
1550 }
1551
1552 /* Implement the "push_dummy_call" gdbarch method. */
1553
1554 static CORE_ADDR
1555 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1556 struct regcache *regcache, CORE_ADDR bp_addr,
1557 int nargs,
1558 struct value **args, CORE_ADDR sp, int struct_return,
1559 CORE_ADDR struct_addr)
1560 {
1561 int nstack = 0;
1562 int argnum;
1563 int x_argreg;
1564 int v_argreg;
1565 struct aarch64_call_info info;
1566 struct type *func_type;
1567 struct type *return_type;
1568 int lang_struct_return;
1569
1570 memset (&info, 0, sizeof (info));
1571
1572 /* We need to know what the type of the called function is in order
1573 to determine the number of named/anonymous arguments for the
1574 actual argument placement, and the return type in order to handle
1575 return value correctly.
1576
1577 The generic code above us views the decision of return in memory
1578 or return in registers as a two stage processes. The language
1579 handler is consulted first and may decide to return in memory (eg
1580 class with copy constructor returned by value), this will cause
1581 the generic code to allocate space AND insert an initial leading
1582 argument.
1583
1584 If the language code does not decide to pass in memory then the
1585 target code is consulted.
1586
1587 If the language code decides to pass in memory we want to move
1588 the pointer inserted as the initial argument from the argument
1589 list and into X8, the conventional AArch64 struct return pointer
1590 register.
1591
1592 This is slightly awkward, ideally the flag "lang_struct_return"
1593 would be passed to the targets implementation of push_dummy_call.
1594 Rather that change the target interface we call the language code
1595 directly ourselves. */
1596
1597 func_type = check_typedef (value_type (function));
1598
1599 /* Dereference function pointer types. */
1600 if (TYPE_CODE (func_type) == TYPE_CODE_PTR)
1601 func_type = TYPE_TARGET_TYPE (func_type);
1602
1603 gdb_assert (TYPE_CODE (func_type) == TYPE_CODE_FUNC
1604 || TYPE_CODE (func_type) == TYPE_CODE_METHOD);
1605
1606 /* If language_pass_by_reference () returned true we will have been
1607 given an additional initial argument, a hidden pointer to the
1608 return slot in memory. */
1609 return_type = TYPE_TARGET_TYPE (func_type);
1610 lang_struct_return = language_pass_by_reference (return_type);
1611
1612 /* Set the return address. For the AArch64, the return breakpoint
1613 is always at BP_ADDR. */
1614 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1615
1616 /* If we were given an initial argument for the return slot because
1617 lang_struct_return was true, lose it. */
1618 if (lang_struct_return)
1619 {
1620 args++;
1621 nargs--;
1622 }
1623
1624 /* The struct_return pointer occupies X8. */
1625 if (struct_return || lang_struct_return)
1626 {
1627 if (aarch64_debug)
1628 fprintf_unfiltered (gdb_stdlog, "struct return in %s = 0x%s\n",
1629 gdbarch_register_name
1630 (gdbarch,
1631 AARCH64_STRUCT_RETURN_REGNUM),
1632 paddress (gdbarch, struct_addr));
1633 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1634 struct_addr);
1635 }
1636
1637 for (argnum = 0; argnum < nargs; argnum++)
1638 {
1639 struct value *arg = args[argnum];
1640 struct type *arg_type;
1641 int len;
1642
1643 arg_type = check_typedef (value_type (arg));
1644 len = TYPE_LENGTH (arg_type);
1645
1646 switch (TYPE_CODE (arg_type))
1647 {
1648 case TYPE_CODE_INT:
1649 case TYPE_CODE_BOOL:
1650 case TYPE_CODE_CHAR:
1651 case TYPE_CODE_RANGE:
1652 case TYPE_CODE_ENUM:
1653 if (len < 4)
1654 {
1655 /* Promote to 32 bit integer. */
1656 if (TYPE_UNSIGNED (arg_type))
1657 arg_type = builtin_type (gdbarch)->builtin_uint32;
1658 else
1659 arg_type = builtin_type (gdbarch)->builtin_int32;
1660 arg = value_cast (arg_type, arg);
1661 }
1662 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1663 value_contents (arg));
1664 break;
1665
1666 case TYPE_CODE_COMPLEX:
1667 if (info.nsrn <= 6)
1668 {
1669 const bfd_byte *buf = value_contents (arg);
1670 struct type *target_type =
1671 check_typedef (TYPE_TARGET_TYPE (arg_type));
1672
1673 pass_in_v (gdbarch, regcache, &info, buf);
1674 pass_in_v (gdbarch, regcache, &info,
1675 buf + TYPE_LENGTH (target_type));
1676 }
1677 else
1678 {
1679 info.nsrn = 8;
1680 pass_on_stack (&info, arg_type, value_contents (arg));
1681 }
1682 break;
1683 case TYPE_CODE_FLT:
1684 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type,
1685 value_contents (arg));
1686 break;
1687
1688 case TYPE_CODE_STRUCT:
1689 case TYPE_CODE_ARRAY:
1690 case TYPE_CODE_UNION:
1691 if (is_hfa (arg_type))
1692 {
1693 int elements = TYPE_NFIELDS (arg_type);
1694
1695 /* Homogeneous Aggregates */
1696 if (info.nsrn + elements < 8)
1697 {
1698 int i;
1699
1700 for (i = 0; i < elements; i++)
1701 {
1702 /* We know that we have sufficient registers
1703 available therefore this will never fallback
1704 to the stack. */
1705 struct value *field =
1706 value_primitive_field (arg, 0, i, arg_type);
1707 struct type *field_type =
1708 check_typedef (value_type (field));
1709
1710 pass_in_v_or_stack (gdbarch, regcache, &info, field_type,
1711 value_contents_writeable (field));
1712 }
1713 }
1714 else
1715 {
1716 info.nsrn = 8;
1717 pass_on_stack (&info, arg_type, value_contents (arg));
1718 }
1719 }
1720 else if (len > 16)
1721 {
1722 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1723 invisible reference. */
1724
1725 /* Allocate aligned storage. */
1726 sp = align_down (sp - len, 16);
1727
1728 /* Write the real data into the stack. */
1729 write_memory (sp, value_contents (arg), len);
1730
1731 /* Construct the indirection. */
1732 arg_type = lookup_pointer_type (arg_type);
1733 arg = value_from_pointer (arg_type, sp);
1734 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1735 value_contents (arg));
1736 }
1737 else
1738 /* PCS C.15 / C.18 multiple values pass. */
1739 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1740 value_contents (arg));
1741 break;
1742
1743 default:
1744 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1745 value_contents (arg));
1746 break;
1747 }
1748 }
1749
1750 /* Make sure stack retains 16 byte alignment. */
1751 if (info.nsaa & 15)
1752 sp -= 16 - (info.nsaa & 15);
1753
1754 while (!VEC_empty (stack_item_t, info.si))
1755 {
1756 stack_item_t *si = VEC_last (stack_item_t, info.si);
1757
1758 sp -= si->len;
1759 write_memory (sp, si->data, si->len);
1760 VEC_pop (stack_item_t, info.si);
1761 }
1762
1763 VEC_free (stack_item_t, info.si);
1764
1765 /* Finally, update the SP register. */
1766 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1767
1768 return sp;
1769 }
1770
1771 /* Implement the "frame_align" gdbarch method. */
1772
1773 static CORE_ADDR
1774 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1775 {
1776 /* Align the stack to sixteen bytes. */
1777 return sp & ~(CORE_ADDR) 15;
1778 }
1779
1780 /* Return the type for an AdvSISD Q register. */
1781
1782 static struct type *
1783 aarch64_vnq_type (struct gdbarch *gdbarch)
1784 {
1785 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1786
1787 if (tdep->vnq_type == NULL)
1788 {
1789 struct type *t;
1790 struct type *elem;
1791
1792 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1793 TYPE_CODE_UNION);
1794
1795 elem = builtin_type (gdbarch)->builtin_uint128;
1796 append_composite_type_field (t, "u", elem);
1797
1798 elem = builtin_type (gdbarch)->builtin_int128;
1799 append_composite_type_field (t, "s", elem);
1800
1801 tdep->vnq_type = t;
1802 }
1803
1804 return tdep->vnq_type;
1805 }
1806
1807 /* Return the type for an AdvSISD D register. */
1808
1809 static struct type *
1810 aarch64_vnd_type (struct gdbarch *gdbarch)
1811 {
1812 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1813
1814 if (tdep->vnd_type == NULL)
1815 {
1816 struct type *t;
1817 struct type *elem;
1818
1819 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1820 TYPE_CODE_UNION);
1821
1822 elem = builtin_type (gdbarch)->builtin_double;
1823 append_composite_type_field (t, "f", elem);
1824
1825 elem = builtin_type (gdbarch)->builtin_uint64;
1826 append_composite_type_field (t, "u", elem);
1827
1828 elem = builtin_type (gdbarch)->builtin_int64;
1829 append_composite_type_field (t, "s", elem);
1830
1831 tdep->vnd_type = t;
1832 }
1833
1834 return tdep->vnd_type;
1835 }
1836
1837 /* Return the type for an AdvSISD S register. */
1838
1839 static struct type *
1840 aarch64_vns_type (struct gdbarch *gdbarch)
1841 {
1842 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1843
1844 if (tdep->vns_type == NULL)
1845 {
1846 struct type *t;
1847 struct type *elem;
1848
1849 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1850 TYPE_CODE_UNION);
1851
1852 elem = builtin_type (gdbarch)->builtin_float;
1853 append_composite_type_field (t, "f", elem);
1854
1855 elem = builtin_type (gdbarch)->builtin_uint32;
1856 append_composite_type_field (t, "u", elem);
1857
1858 elem = builtin_type (gdbarch)->builtin_int32;
1859 append_composite_type_field (t, "s", elem);
1860
1861 tdep->vns_type = t;
1862 }
1863
1864 return tdep->vns_type;
1865 }
1866
1867 /* Return the type for an AdvSISD H register. */
1868
1869 static struct type *
1870 aarch64_vnh_type (struct gdbarch *gdbarch)
1871 {
1872 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1873
1874 if (tdep->vnh_type == NULL)
1875 {
1876 struct type *t;
1877 struct type *elem;
1878
1879 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1880 TYPE_CODE_UNION);
1881
1882 elem = builtin_type (gdbarch)->builtin_uint16;
1883 append_composite_type_field (t, "u", elem);
1884
1885 elem = builtin_type (gdbarch)->builtin_int16;
1886 append_composite_type_field (t, "s", elem);
1887
1888 tdep->vnh_type = t;
1889 }
1890
1891 return tdep->vnh_type;
1892 }
1893
1894 /* Return the type for an AdvSISD B register. */
1895
1896 static struct type *
1897 aarch64_vnb_type (struct gdbarch *gdbarch)
1898 {
1899 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1900
1901 if (tdep->vnb_type == NULL)
1902 {
1903 struct type *t;
1904 struct type *elem;
1905
1906 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1907 TYPE_CODE_UNION);
1908
1909 elem = builtin_type (gdbarch)->builtin_uint8;
1910 append_composite_type_field (t, "u", elem);
1911
1912 elem = builtin_type (gdbarch)->builtin_int8;
1913 append_composite_type_field (t, "s", elem);
1914
1915 tdep->vnb_type = t;
1916 }
1917
1918 return tdep->vnb_type;
1919 }
1920
1921 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1922
1923 static int
1924 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1925 {
1926 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
1927 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
1928
1929 if (reg == AARCH64_DWARF_SP)
1930 return AARCH64_SP_REGNUM;
1931
1932 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
1933 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
1934
1935 return -1;
1936 }
1937 \f
1938
1939 /* Implement the "print_insn" gdbarch method. */
1940
1941 static int
1942 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
1943 {
1944 info->symbols = NULL;
1945 return print_insn_aarch64 (memaddr, info);
1946 }
1947
1948 /* AArch64 BRK software debug mode instruction.
1949 Note that AArch64 code is always little-endian.
1950 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
1951 static const gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
1952
1953 /* Implement the "breakpoint_from_pc" gdbarch method. */
1954
1955 static const gdb_byte *
1956 aarch64_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr,
1957 int *lenptr)
1958 {
1959 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1960
1961 *lenptr = sizeof (aarch64_default_breakpoint);
1962 return aarch64_default_breakpoint;
1963 }
1964
1965 /* Extract from an array REGS containing the (raw) register state a
1966 function return value of type TYPE, and copy that, in virtual
1967 format, into VALBUF. */
1968
1969 static void
1970 aarch64_extract_return_value (struct type *type, struct regcache *regs,
1971 gdb_byte *valbuf)
1972 {
1973 struct gdbarch *gdbarch = get_regcache_arch (regs);
1974 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1975
1976 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1977 {
1978 bfd_byte buf[V_REGISTER_SIZE];
1979 int len = TYPE_LENGTH (type);
1980
1981 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
1982 memcpy (valbuf, buf, len);
1983 }
1984 else if (TYPE_CODE (type) == TYPE_CODE_INT
1985 || TYPE_CODE (type) == TYPE_CODE_CHAR
1986 || TYPE_CODE (type) == TYPE_CODE_BOOL
1987 || TYPE_CODE (type) == TYPE_CODE_PTR
1988 || TYPE_CODE (type) == TYPE_CODE_REF
1989 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1990 {
1991 /* If the the type is a plain integer, then the access is
1992 straight-forward. Otherwise we have to play around a bit
1993 more. */
1994 int len = TYPE_LENGTH (type);
1995 int regno = AARCH64_X0_REGNUM;
1996 ULONGEST tmp;
1997
1998 while (len > 0)
1999 {
2000 /* By using store_unsigned_integer we avoid having to do
2001 anything special for small big-endian values. */
2002 regcache_cooked_read_unsigned (regs, regno++, &tmp);
2003 store_unsigned_integer (valbuf,
2004 (len > X_REGISTER_SIZE
2005 ? X_REGISTER_SIZE : len), byte_order, tmp);
2006 len -= X_REGISTER_SIZE;
2007 valbuf += X_REGISTER_SIZE;
2008 }
2009 }
2010 else if (TYPE_CODE (type) == TYPE_CODE_COMPLEX)
2011 {
2012 int regno = AARCH64_V0_REGNUM;
2013 bfd_byte buf[V_REGISTER_SIZE];
2014 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
2015 int len = TYPE_LENGTH (target_type);
2016
2017 regcache_cooked_read (regs, regno, buf);
2018 memcpy (valbuf, buf, len);
2019 valbuf += len;
2020 regcache_cooked_read (regs, regno + 1, buf);
2021 memcpy (valbuf, buf, len);
2022 valbuf += len;
2023 }
2024 else if (is_hfa (type))
2025 {
2026 int elements = TYPE_NFIELDS (type);
2027 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
2028 int len = TYPE_LENGTH (member_type);
2029 int i;
2030
2031 for (i = 0; i < elements; i++)
2032 {
2033 int regno = AARCH64_V0_REGNUM + i;
2034 bfd_byte buf[X_REGISTER_SIZE];
2035
2036 if (aarch64_debug)
2037 fprintf_unfiltered (gdb_stdlog,
2038 "read HFA return value element %d from %s\n",
2039 i + 1,
2040 gdbarch_register_name (gdbarch, regno));
2041 regcache_cooked_read (regs, regno, buf);
2042
2043 memcpy (valbuf, buf, len);
2044 valbuf += len;
2045 }
2046 }
2047 else
2048 {
2049 /* For a structure or union the behaviour is as if the value had
2050 been stored to word-aligned memory and then loaded into
2051 registers with 64-bit load instruction(s). */
2052 int len = TYPE_LENGTH (type);
2053 int regno = AARCH64_X0_REGNUM;
2054 bfd_byte buf[X_REGISTER_SIZE];
2055
2056 while (len > 0)
2057 {
2058 regcache_cooked_read (regs, regno++, buf);
2059 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2060 len -= X_REGISTER_SIZE;
2061 valbuf += X_REGISTER_SIZE;
2062 }
2063 }
2064 }
2065
2066
2067 /* Will a function return an aggregate type in memory or in a
2068 register? Return 0 if an aggregate type can be returned in a
2069 register, 1 if it must be returned in memory. */
2070
2071 static int
2072 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
2073 {
2074 int nRc;
2075 enum type_code code;
2076
2077 CHECK_TYPEDEF (type);
2078
2079 /* In the AArch64 ABI, "integer" like aggregate types are returned
2080 in registers. For an aggregate type to be integer like, its size
2081 must be less than or equal to 4 * X_REGISTER_SIZE. */
2082
2083 if (is_hfa (type))
2084 {
2085 /* PCS B.5 If the argument is a Named HFA, then the argument is
2086 used unmodified. */
2087 return 0;
2088 }
2089
2090 if (TYPE_LENGTH (type) > 16)
2091 {
2092 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2093 invisible reference. */
2094
2095 return 1;
2096 }
2097
2098 return 0;
2099 }
2100
2101 /* Write into appropriate registers a function return value of type
2102 TYPE, given in virtual format. */
2103
2104 static void
2105 aarch64_store_return_value (struct type *type, struct regcache *regs,
2106 const gdb_byte *valbuf)
2107 {
2108 struct gdbarch *gdbarch = get_regcache_arch (regs);
2109 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2110
2111 if (TYPE_CODE (type) == TYPE_CODE_FLT)
2112 {
2113 bfd_byte buf[V_REGISTER_SIZE];
2114 int len = TYPE_LENGTH (type);
2115
2116 memcpy (buf, valbuf, len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2117 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
2118 }
2119 else if (TYPE_CODE (type) == TYPE_CODE_INT
2120 || TYPE_CODE (type) == TYPE_CODE_CHAR
2121 || TYPE_CODE (type) == TYPE_CODE_BOOL
2122 || TYPE_CODE (type) == TYPE_CODE_PTR
2123 || TYPE_CODE (type) == TYPE_CODE_REF
2124 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2125 {
2126 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
2127 {
2128 /* Values of one word or less are zero/sign-extended and
2129 returned in r0. */
2130 bfd_byte tmpbuf[X_REGISTER_SIZE];
2131 LONGEST val = unpack_long (type, valbuf);
2132
2133 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
2134 regcache_cooked_write (regs, AARCH64_X0_REGNUM, tmpbuf);
2135 }
2136 else
2137 {
2138 /* Integral values greater than one word are stored in
2139 consecutive registers starting with r0. This will always
2140 be a multiple of the regiser size. */
2141 int len = TYPE_LENGTH (type);
2142 int regno = AARCH64_X0_REGNUM;
2143
2144 while (len > 0)
2145 {
2146 regcache_cooked_write (regs, regno++, valbuf);
2147 len -= X_REGISTER_SIZE;
2148 valbuf += X_REGISTER_SIZE;
2149 }
2150 }
2151 }
2152 else if (is_hfa (type))
2153 {
2154 int elements = TYPE_NFIELDS (type);
2155 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
2156 int len = TYPE_LENGTH (member_type);
2157 int i;
2158
2159 for (i = 0; i < elements; i++)
2160 {
2161 int regno = AARCH64_V0_REGNUM + i;
2162 bfd_byte tmpbuf[MAX_REGISTER_SIZE];
2163
2164 if (aarch64_debug)
2165 fprintf_unfiltered (gdb_stdlog,
2166 "write HFA return value element %d to %s\n",
2167 i + 1,
2168 gdbarch_register_name (gdbarch, regno));
2169
2170 memcpy (tmpbuf, valbuf, len);
2171 regcache_cooked_write (regs, regno, tmpbuf);
2172 valbuf += len;
2173 }
2174 }
2175 else
2176 {
2177 /* For a structure or union the behaviour is as if the value had
2178 been stored to word-aligned memory and then loaded into
2179 registers with 64-bit load instruction(s). */
2180 int len = TYPE_LENGTH (type);
2181 int regno = AARCH64_X0_REGNUM;
2182 bfd_byte tmpbuf[X_REGISTER_SIZE];
2183
2184 while (len > 0)
2185 {
2186 memcpy (tmpbuf, valbuf,
2187 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2188 regcache_cooked_write (regs, regno++, tmpbuf);
2189 len -= X_REGISTER_SIZE;
2190 valbuf += X_REGISTER_SIZE;
2191 }
2192 }
2193 }
2194
2195 /* Implement the "return_value" gdbarch method. */
2196
2197 static enum return_value_convention
2198 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2199 struct type *valtype, struct regcache *regcache,
2200 gdb_byte *readbuf, const gdb_byte *writebuf)
2201 {
2202 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2203
2204 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
2205 || TYPE_CODE (valtype) == TYPE_CODE_UNION
2206 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
2207 {
2208 if (aarch64_return_in_memory (gdbarch, valtype))
2209 {
2210 if (aarch64_debug)
2211 fprintf_unfiltered (gdb_stdlog, "return value in memory\n");
2212 return RETURN_VALUE_STRUCT_CONVENTION;
2213 }
2214 }
2215
2216 if (writebuf)
2217 aarch64_store_return_value (valtype, regcache, writebuf);
2218
2219 if (readbuf)
2220 aarch64_extract_return_value (valtype, regcache, readbuf);
2221
2222 if (aarch64_debug)
2223 fprintf_unfiltered (gdb_stdlog, "return value in registers\n");
2224
2225 return RETURN_VALUE_REGISTER_CONVENTION;
2226 }
2227
2228 /* Implement the "get_longjmp_target" gdbarch method. */
2229
2230 static int
2231 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2232 {
2233 CORE_ADDR jb_addr;
2234 gdb_byte buf[X_REGISTER_SIZE];
2235 struct gdbarch *gdbarch = get_frame_arch (frame);
2236 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2237 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2238
2239 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2240
2241 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2242 X_REGISTER_SIZE))
2243 return 0;
2244
2245 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2246 return 1;
2247 }
2248 \f
2249
2250 /* Return the pseudo register name corresponding to register regnum. */
2251
2252 static const char *
2253 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2254 {
2255 static const char *const q_name[] =
2256 {
2257 "q0", "q1", "q2", "q3",
2258 "q4", "q5", "q6", "q7",
2259 "q8", "q9", "q10", "q11",
2260 "q12", "q13", "q14", "q15",
2261 "q16", "q17", "q18", "q19",
2262 "q20", "q21", "q22", "q23",
2263 "q24", "q25", "q26", "q27",
2264 "q28", "q29", "q30", "q31",
2265 };
2266
2267 static const char *const d_name[] =
2268 {
2269 "d0", "d1", "d2", "d3",
2270 "d4", "d5", "d6", "d7",
2271 "d8", "d9", "d10", "d11",
2272 "d12", "d13", "d14", "d15",
2273 "d16", "d17", "d18", "d19",
2274 "d20", "d21", "d22", "d23",
2275 "d24", "d25", "d26", "d27",
2276 "d28", "d29", "d30", "d31",
2277 };
2278
2279 static const char *const s_name[] =
2280 {
2281 "s0", "s1", "s2", "s3",
2282 "s4", "s5", "s6", "s7",
2283 "s8", "s9", "s10", "s11",
2284 "s12", "s13", "s14", "s15",
2285 "s16", "s17", "s18", "s19",
2286 "s20", "s21", "s22", "s23",
2287 "s24", "s25", "s26", "s27",
2288 "s28", "s29", "s30", "s31",
2289 };
2290
2291 static const char *const h_name[] =
2292 {
2293 "h0", "h1", "h2", "h3",
2294 "h4", "h5", "h6", "h7",
2295 "h8", "h9", "h10", "h11",
2296 "h12", "h13", "h14", "h15",
2297 "h16", "h17", "h18", "h19",
2298 "h20", "h21", "h22", "h23",
2299 "h24", "h25", "h26", "h27",
2300 "h28", "h29", "h30", "h31",
2301 };
2302
2303 static const char *const b_name[] =
2304 {
2305 "b0", "b1", "b2", "b3",
2306 "b4", "b5", "b6", "b7",
2307 "b8", "b9", "b10", "b11",
2308 "b12", "b13", "b14", "b15",
2309 "b16", "b17", "b18", "b19",
2310 "b20", "b21", "b22", "b23",
2311 "b24", "b25", "b26", "b27",
2312 "b28", "b29", "b30", "b31",
2313 };
2314
2315 regnum -= gdbarch_num_regs (gdbarch);
2316
2317 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2318 return q_name[regnum - AARCH64_Q0_REGNUM];
2319
2320 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2321 return d_name[regnum - AARCH64_D0_REGNUM];
2322
2323 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2324 return s_name[regnum - AARCH64_S0_REGNUM];
2325
2326 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2327 return h_name[regnum - AARCH64_H0_REGNUM];
2328
2329 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2330 return b_name[regnum - AARCH64_B0_REGNUM];
2331
2332 internal_error (__FILE__, __LINE__,
2333 _("aarch64_pseudo_register_name: bad register number %d"),
2334 regnum);
2335 }
2336
2337 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2338
2339 static struct type *
2340 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2341 {
2342 regnum -= gdbarch_num_regs (gdbarch);
2343
2344 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2345 return aarch64_vnq_type (gdbarch);
2346
2347 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2348 return aarch64_vnd_type (gdbarch);
2349
2350 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2351 return aarch64_vns_type (gdbarch);
2352
2353 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2354 return aarch64_vnh_type (gdbarch);
2355
2356 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2357 return aarch64_vnb_type (gdbarch);
2358
2359 internal_error (__FILE__, __LINE__,
2360 _("aarch64_pseudo_register_type: bad register number %d"),
2361 regnum);
2362 }
2363
2364 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2365
2366 static int
2367 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2368 struct reggroup *group)
2369 {
2370 regnum -= gdbarch_num_regs (gdbarch);
2371
2372 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2373 return group == all_reggroup || group == vector_reggroup;
2374 else if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2375 return (group == all_reggroup || group == vector_reggroup
2376 || group == float_reggroup);
2377 else if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2378 return (group == all_reggroup || group == vector_reggroup
2379 || group == float_reggroup);
2380 else if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2381 return group == all_reggroup || group == vector_reggroup;
2382 else if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2383 return group == all_reggroup || group == vector_reggroup;
2384
2385 return group == all_reggroup;
2386 }
2387
2388 /* Implement the "pseudo_register_read_value" gdbarch method. */
2389
2390 static struct value *
2391 aarch64_pseudo_read_value (struct gdbarch *gdbarch,
2392 struct regcache *regcache,
2393 int regnum)
2394 {
2395 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2396 struct value *result_value;
2397 gdb_byte *buf;
2398
2399 result_value = allocate_value (register_type (gdbarch, regnum));
2400 VALUE_LVAL (result_value) = lval_register;
2401 VALUE_REGNUM (result_value) = regnum;
2402 buf = value_contents_raw (result_value);
2403
2404 regnum -= gdbarch_num_regs (gdbarch);
2405
2406 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2407 {
2408 enum register_status status;
2409 unsigned v_regnum;
2410
2411 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2412 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2413 if (status != REG_VALID)
2414 mark_value_bytes_unavailable (result_value, 0,
2415 TYPE_LENGTH (value_type (result_value)));
2416 else
2417 memcpy (buf, reg_buf, Q_REGISTER_SIZE);
2418 return result_value;
2419 }
2420
2421 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2422 {
2423 enum register_status status;
2424 unsigned v_regnum;
2425
2426 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2427 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2428 if (status != REG_VALID)
2429 mark_value_bytes_unavailable (result_value, 0,
2430 TYPE_LENGTH (value_type (result_value)));
2431 else
2432 memcpy (buf, reg_buf, D_REGISTER_SIZE);
2433 return result_value;
2434 }
2435
2436 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2437 {
2438 enum register_status status;
2439 unsigned v_regnum;
2440
2441 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2442 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2443 memcpy (buf, reg_buf, S_REGISTER_SIZE);
2444 return result_value;
2445 }
2446
2447 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2448 {
2449 enum register_status status;
2450 unsigned v_regnum;
2451
2452 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2453 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2454 if (status != REG_VALID)
2455 mark_value_bytes_unavailable (result_value, 0,
2456 TYPE_LENGTH (value_type (result_value)));
2457 else
2458 memcpy (buf, reg_buf, H_REGISTER_SIZE);
2459 return result_value;
2460 }
2461
2462 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2463 {
2464 enum register_status status;
2465 unsigned v_regnum;
2466
2467 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2468 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2469 if (status != REG_VALID)
2470 mark_value_bytes_unavailable (result_value, 0,
2471 TYPE_LENGTH (value_type (result_value)));
2472 else
2473 memcpy (buf, reg_buf, B_REGISTER_SIZE);
2474 return result_value;
2475 }
2476
2477 gdb_assert_not_reached ("regnum out of bound");
2478 }
2479
2480 /* Implement the "pseudo_register_write" gdbarch method. */
2481
2482 static void
2483 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2484 int regnum, const gdb_byte *buf)
2485 {
2486 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2487
2488 /* Ensure the register buffer is zero, we want gdb writes of the
2489 various 'scalar' pseudo registers to behavior like architectural
2490 writes, register width bytes are written the remainder are set to
2491 zero. */
2492 memset (reg_buf, 0, sizeof (reg_buf));
2493
2494 regnum -= gdbarch_num_regs (gdbarch);
2495
2496 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2497 {
2498 /* pseudo Q registers */
2499 unsigned v_regnum;
2500
2501 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2502 memcpy (reg_buf, buf, Q_REGISTER_SIZE);
2503 regcache_raw_write (regcache, v_regnum, reg_buf);
2504 return;
2505 }
2506
2507 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2508 {
2509 /* pseudo D registers */
2510 unsigned v_regnum;
2511
2512 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2513 memcpy (reg_buf, buf, D_REGISTER_SIZE);
2514 regcache_raw_write (regcache, v_regnum, reg_buf);
2515 return;
2516 }
2517
2518 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2519 {
2520 unsigned v_regnum;
2521
2522 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2523 memcpy (reg_buf, buf, S_REGISTER_SIZE);
2524 regcache_raw_write (regcache, v_regnum, reg_buf);
2525 return;
2526 }
2527
2528 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2529 {
2530 /* pseudo H registers */
2531 unsigned v_regnum;
2532
2533 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2534 memcpy (reg_buf, buf, H_REGISTER_SIZE);
2535 regcache_raw_write (regcache, v_regnum, reg_buf);
2536 return;
2537 }
2538
2539 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2540 {
2541 /* pseudo B registers */
2542 unsigned v_regnum;
2543
2544 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2545 memcpy (reg_buf, buf, B_REGISTER_SIZE);
2546 regcache_raw_write (regcache, v_regnum, reg_buf);
2547 return;
2548 }
2549
2550 gdb_assert_not_reached ("regnum out of bound");
2551 }
2552
2553 /* Callback function for user_reg_add. */
2554
2555 static struct value *
2556 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2557 {
2558 const int *reg_p = baton;
2559
2560 return value_of_register (*reg_p, frame);
2561 }
2562 \f
2563
2564 /* Implement the "software_single_step" gdbarch method, needed to
2565 single step through atomic sequences on AArch64. */
2566
2567 static int
2568 aarch64_software_single_step (struct frame_info *frame)
2569 {
2570 struct gdbarch *gdbarch = get_frame_arch (frame);
2571 struct address_space *aspace = get_frame_address_space (frame);
2572 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2573 const int insn_size = 4;
2574 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2575 CORE_ADDR pc = get_frame_pc (frame);
2576 CORE_ADDR breaks[2] = { -1, -1 };
2577 CORE_ADDR loc = pc;
2578 CORE_ADDR closing_insn = 0;
2579 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2580 byte_order_for_code);
2581 int index;
2582 int insn_count;
2583 int bc_insn_count = 0; /* Conditional branch instruction count. */
2584 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2585
2586 /* Look for a Load Exclusive instruction which begins the sequence. */
2587 if (!decode_masked_match (insn, 0x3fc00000, 0x08400000))
2588 return 0;
2589
2590 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2591 {
2592 int32_t offset;
2593 unsigned cond;
2594
2595 loc += insn_size;
2596 insn = read_memory_unsigned_integer (loc, insn_size,
2597 byte_order_for_code);
2598
2599 /* Check if the instruction is a conditional branch. */
2600 if (decode_bcond (loc, insn, &cond, &offset))
2601 {
2602 if (bc_insn_count >= 1)
2603 return 0;
2604
2605 /* It is, so we'll try to set a breakpoint at the destination. */
2606 breaks[1] = loc + offset;
2607
2608 bc_insn_count++;
2609 last_breakpoint++;
2610 }
2611
2612 /* Look for the Store Exclusive which closes the atomic sequence. */
2613 if (decode_masked_match (insn, 0x3fc00000, 0x08000000))
2614 {
2615 closing_insn = loc;
2616 break;
2617 }
2618 }
2619
2620 /* We didn't find a closing Store Exclusive instruction, fall back. */
2621 if (!closing_insn)
2622 return 0;
2623
2624 /* Insert breakpoint after the end of the atomic sequence. */
2625 breaks[0] = loc + insn_size;
2626
2627 /* Check for duplicated breakpoints, and also check that the second
2628 breakpoint is not within the atomic sequence. */
2629 if (last_breakpoint
2630 && (breaks[1] == breaks[0]
2631 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2632 last_breakpoint = 0;
2633
2634 /* Insert the breakpoint at the end of the sequence, and one at the
2635 destination of the conditional branch, if it exists. */
2636 for (index = 0; index <= last_breakpoint; index++)
2637 insert_single_step_breakpoint (gdbarch, aspace, breaks[index]);
2638
2639 return 1;
2640 }
2641
2642 /* Initialize the current architecture based on INFO. If possible,
2643 re-use an architecture from ARCHES, which is a list of
2644 architectures already created during this debugging session.
2645
2646 Called e.g. at program startup, when reading a core file, and when
2647 reading a binary file. */
2648
2649 static struct gdbarch *
2650 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2651 {
2652 struct gdbarch_tdep *tdep;
2653 struct gdbarch *gdbarch;
2654 struct gdbarch_list *best_arch;
2655 struct tdesc_arch_data *tdesc_data = NULL;
2656 const struct target_desc *tdesc = info.target_desc;
2657 int i;
2658 int have_fpa_registers = 1;
2659 int valid_p = 1;
2660 const struct tdesc_feature *feature;
2661 int num_regs = 0;
2662 int num_pseudo_regs = 0;
2663
2664 /* Ensure we always have a target descriptor. */
2665 if (!tdesc_has_registers (tdesc))
2666 tdesc = tdesc_aarch64;
2667
2668 gdb_assert (tdesc);
2669
2670 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
2671
2672 if (feature == NULL)
2673 return NULL;
2674
2675 tdesc_data = tdesc_data_alloc ();
2676
2677 /* Validate the descriptor provides the mandatory core R registers
2678 and allocate their numbers. */
2679 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
2680 valid_p &=
2681 tdesc_numbered_register (feature, tdesc_data, AARCH64_X0_REGNUM + i,
2682 aarch64_r_register_names[i]);
2683
2684 num_regs = AARCH64_X0_REGNUM + i;
2685
2686 /* Look for the V registers. */
2687 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
2688 if (feature)
2689 {
2690 /* Validate the descriptor provides the mandatory V registers
2691 and allocate their numbers. */
2692 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
2693 valid_p &=
2694 tdesc_numbered_register (feature, tdesc_data, AARCH64_V0_REGNUM + i,
2695 aarch64_v_register_names[i]);
2696
2697 num_regs = AARCH64_V0_REGNUM + i;
2698
2699 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
2700 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
2701 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
2702 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
2703 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
2704 }
2705
2706 if (!valid_p)
2707 {
2708 tdesc_data_cleanup (tdesc_data);
2709 return NULL;
2710 }
2711
2712 /* AArch64 code is always little-endian. */
2713 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
2714
2715 /* If there is already a candidate, use it. */
2716 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
2717 best_arch != NULL;
2718 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
2719 {
2720 /* Found a match. */
2721 break;
2722 }
2723
2724 if (best_arch != NULL)
2725 {
2726 if (tdesc_data != NULL)
2727 tdesc_data_cleanup (tdesc_data);
2728 return best_arch->gdbarch;
2729 }
2730
2731 tdep = xcalloc (1, sizeof (struct gdbarch_tdep));
2732 gdbarch = gdbarch_alloc (&info, tdep);
2733
2734 /* This should be low enough for everything. */
2735 tdep->lowest_pc = 0x20;
2736 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
2737 tdep->jb_elt_size = 8;
2738
2739 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
2740 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
2741
2742 /* Frame handling. */
2743 set_gdbarch_dummy_id (gdbarch, aarch64_dummy_id);
2744 set_gdbarch_unwind_pc (gdbarch, aarch64_unwind_pc);
2745 set_gdbarch_unwind_sp (gdbarch, aarch64_unwind_sp);
2746
2747 /* Advance PC across function entry code. */
2748 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
2749
2750 /* The stack grows downward. */
2751 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
2752
2753 /* Breakpoint manipulation. */
2754 set_gdbarch_breakpoint_from_pc (gdbarch, aarch64_breakpoint_from_pc);
2755 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
2756 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
2757
2758 /* Information about registers, etc. */
2759 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
2760 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
2761 set_gdbarch_num_regs (gdbarch, num_regs);
2762
2763 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
2764 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
2765 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
2766 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
2767 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
2768 set_tdesc_pseudo_register_reggroup_p (gdbarch,
2769 aarch64_pseudo_register_reggroup_p);
2770
2771 /* ABI */
2772 set_gdbarch_short_bit (gdbarch, 16);
2773 set_gdbarch_int_bit (gdbarch, 32);
2774 set_gdbarch_float_bit (gdbarch, 32);
2775 set_gdbarch_double_bit (gdbarch, 64);
2776 set_gdbarch_long_double_bit (gdbarch, 128);
2777 set_gdbarch_long_bit (gdbarch, 64);
2778 set_gdbarch_long_long_bit (gdbarch, 64);
2779 set_gdbarch_ptr_bit (gdbarch, 64);
2780 set_gdbarch_char_signed (gdbarch, 0);
2781 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
2782 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
2783 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
2784
2785 /* Internal <-> external register number maps. */
2786 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
2787
2788 /* Returning results. */
2789 set_gdbarch_return_value (gdbarch, aarch64_return_value);
2790
2791 /* Disassembly. */
2792 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
2793
2794 /* Virtual tables. */
2795 set_gdbarch_vbit_in_delta (gdbarch, 1);
2796
2797 /* Hook in the ABI-specific overrides, if they have been registered. */
2798 info.target_desc = tdesc;
2799 info.tdep_info = (void *) tdesc_data;
2800 gdbarch_init_osabi (info, gdbarch);
2801
2802 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
2803
2804 /* Add some default predicates. */
2805 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
2806 dwarf2_append_unwinders (gdbarch);
2807 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
2808
2809 frame_base_set_default (gdbarch, &aarch64_normal_base);
2810
2811 /* Now we have tuned the configuration, set a few final things,
2812 based on what the OS ABI has told us. */
2813
2814 if (tdep->jb_pc >= 0)
2815 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
2816
2817 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
2818
2819 /* Add standard register aliases. */
2820 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
2821 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
2822 value_of_aarch64_user_reg,
2823 &aarch64_register_aliases[i].regnum);
2824
2825 return gdbarch;
2826 }
2827
2828 static void
2829 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
2830 {
2831 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2832
2833 if (tdep == NULL)
2834 return;
2835
2836 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
2837 paddress (gdbarch, tdep->lowest_pc));
2838 }
2839
2840 /* Suppress warning from -Wmissing-prototypes. */
2841 extern initialize_file_ftype _initialize_aarch64_tdep;
2842
2843 void
2844 _initialize_aarch64_tdep (void)
2845 {
2846 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
2847 aarch64_dump_tdep);
2848
2849 initialize_tdesc_aarch64 ();
2850
2851 /* Debug this file's internals. */
2852 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
2853 Set AArch64 debugging."), _("\
2854 Show AArch64 debugging."), _("\
2855 When on, AArch64 specific debugging is enabled."),
2856 NULL,
2857 show_aarch64_debug,
2858 &setdebuglist, &showdebuglist);
2859 }
2860
2861 /* AArch64 process record-replay related structures, defines etc. */
2862
2863 #define submask(x) ((1L << ((x) + 1)) - 1)
2864 #define bit(obj,st) (((obj) >> (st)) & 1)
2865 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
2866
2867 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
2868 do \
2869 { \
2870 unsigned int reg_len = LENGTH; \
2871 if (reg_len) \
2872 { \
2873 REGS = XNEWVEC (uint32_t, reg_len); \
2874 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
2875 } \
2876 } \
2877 while (0)
2878
2879 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
2880 do \
2881 { \
2882 unsigned int mem_len = LENGTH; \
2883 if (mem_len) \
2884 { \
2885 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
2886 memcpy(&MEMS->len, &RECORD_BUF[0], \
2887 sizeof(struct aarch64_mem_r) * LENGTH); \
2888 } \
2889 } \
2890 while (0)
2891
2892 /* AArch64 record/replay structures and enumerations. */
2893
2894 struct aarch64_mem_r
2895 {
2896 uint64_t len; /* Record length. */
2897 uint64_t addr; /* Memory address. */
2898 };
2899
2900 enum aarch64_record_result
2901 {
2902 AARCH64_RECORD_SUCCESS,
2903 AARCH64_RECORD_FAILURE,
2904 AARCH64_RECORD_UNSUPPORTED,
2905 AARCH64_RECORD_UNKNOWN
2906 };
2907
2908 typedef struct insn_decode_record_t
2909 {
2910 struct gdbarch *gdbarch;
2911 struct regcache *regcache;
2912 CORE_ADDR this_addr; /* Address of insn to be recorded. */
2913 uint32_t aarch64_insn; /* Insn to be recorded. */
2914 uint32_t mem_rec_count; /* Count of memory records. */
2915 uint32_t reg_rec_count; /* Count of register records. */
2916 uint32_t *aarch64_regs; /* Registers to be recorded. */
2917 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
2918 } insn_decode_record;
2919
2920 /* Record handler for data processing - register instructions. */
2921
2922 static unsigned int
2923 aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
2924 {
2925 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
2926 uint32_t record_buf[4];
2927
2928 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
2929 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
2930 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
2931
2932 if (!bit (aarch64_insn_r->aarch64_insn, 28))
2933 {
2934 uint8_t setflags;
2935
2936 /* Logical (shifted register). */
2937 if (insn_bits24_27 == 0x0a)
2938 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
2939 /* Add/subtract. */
2940 else if (insn_bits24_27 == 0x0b)
2941 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
2942 else
2943 return AARCH64_RECORD_UNKNOWN;
2944
2945 record_buf[0] = reg_rd;
2946 aarch64_insn_r->reg_rec_count = 1;
2947 if (setflags)
2948 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
2949 }
2950 else
2951 {
2952 if (insn_bits24_27 == 0x0b)
2953 {
2954 /* Data-processing (3 source). */
2955 record_buf[0] = reg_rd;
2956 aarch64_insn_r->reg_rec_count = 1;
2957 }
2958 else if (insn_bits24_27 == 0x0a)
2959 {
2960 if (insn_bits21_23 == 0x00)
2961 {
2962 /* Add/subtract (with carry). */
2963 record_buf[0] = reg_rd;
2964 aarch64_insn_r->reg_rec_count = 1;
2965 if (bit (aarch64_insn_r->aarch64_insn, 29))
2966 {
2967 record_buf[1] = AARCH64_CPSR_REGNUM;
2968 aarch64_insn_r->reg_rec_count = 2;
2969 }
2970 }
2971 else if (insn_bits21_23 == 0x02)
2972 {
2973 /* Conditional compare (register) and conditional compare
2974 (immediate) instructions. */
2975 record_buf[0] = AARCH64_CPSR_REGNUM;
2976 aarch64_insn_r->reg_rec_count = 1;
2977 }
2978 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
2979 {
2980 /* CConditional select. */
2981 /* Data-processing (2 source). */
2982 /* Data-processing (1 source). */
2983 record_buf[0] = reg_rd;
2984 aarch64_insn_r->reg_rec_count = 1;
2985 }
2986 else
2987 return AARCH64_RECORD_UNKNOWN;
2988 }
2989 }
2990
2991 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
2992 record_buf);
2993 return AARCH64_RECORD_SUCCESS;
2994 }
2995
2996 /* Record handler for data processing - immediate instructions. */
2997
2998 static unsigned int
2999 aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3000 {
3001 uint8_t reg_rd, insn_bit28, insn_bit23, insn_bits24_27, setflags;
3002 uint32_t record_buf[4];
3003
3004 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3005 insn_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
3006 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3007 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3008
3009 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3010 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3011 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3012 {
3013 record_buf[0] = reg_rd;
3014 aarch64_insn_r->reg_rec_count = 1;
3015 }
3016 else if (insn_bits24_27 == 0x01)
3017 {
3018 /* Add/Subtract (immediate). */
3019 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3020 record_buf[0] = reg_rd;
3021 aarch64_insn_r->reg_rec_count = 1;
3022 if (setflags)
3023 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3024 }
3025 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3026 {
3027 /* Logical (immediate). */
3028 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3029 record_buf[0] = reg_rd;
3030 aarch64_insn_r->reg_rec_count = 1;
3031 if (setflags)
3032 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3033 }
3034 else
3035 return AARCH64_RECORD_UNKNOWN;
3036
3037 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3038 record_buf);
3039 return AARCH64_RECORD_SUCCESS;
3040 }
3041
3042 /* Record handler for branch, exception generation and system instructions. */
3043
3044 static unsigned int
3045 aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3046 {
3047 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3048 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3049 uint32_t record_buf[4];
3050
3051 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3052 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3053 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3054
3055 if (insn_bits28_31 == 0x0d)
3056 {
3057 /* Exception generation instructions. */
3058 if (insn_bits24_27 == 0x04)
3059 {
3060 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3061 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3062 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
3063 {
3064 ULONGEST svc_number;
3065
3066 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3067 &svc_number);
3068 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3069 svc_number);
3070 }
3071 else
3072 return AARCH64_RECORD_UNSUPPORTED;
3073 }
3074 /* System instructions. */
3075 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3076 {
3077 uint32_t reg_rt, reg_crn;
3078
3079 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3080 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3081
3082 /* Record rt in case of sysl and mrs instructions. */
3083 if (bit (aarch64_insn_r->aarch64_insn, 21))
3084 {
3085 record_buf[0] = reg_rt;
3086 aarch64_insn_r->reg_rec_count = 1;
3087 }
3088 /* Record cpsr for hint and msr(immediate) instructions. */
3089 else if (reg_crn == 0x02 || reg_crn == 0x04)
3090 {
3091 record_buf[0] = AARCH64_CPSR_REGNUM;
3092 aarch64_insn_r->reg_rec_count = 1;
3093 }
3094 }
3095 /* Unconditional branch (register). */
3096 else if((insn_bits24_27 & 0x0e) == 0x06)
3097 {
3098 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3099 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3100 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3101 }
3102 else
3103 return AARCH64_RECORD_UNKNOWN;
3104 }
3105 /* Unconditional branch (immediate). */
3106 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3107 {
3108 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3109 if (bit (aarch64_insn_r->aarch64_insn, 31))
3110 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3111 }
3112 else
3113 /* Compare & branch (immediate), Test & branch (immediate) and
3114 Conditional branch (immediate). */
3115 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3116
3117 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3118 record_buf);
3119 return AARCH64_RECORD_SUCCESS;
3120 }
3121
3122 /* Record handler for advanced SIMD load and store instructions. */
3123
3124 static unsigned int
3125 aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3126 {
3127 CORE_ADDR address;
3128 uint64_t addr_offset = 0;
3129 uint32_t record_buf[24];
3130 uint64_t record_buf_mem[24];
3131 uint32_t reg_rn, reg_rt;
3132 uint32_t reg_index = 0, mem_index = 0;
3133 uint8_t opcode_bits, size_bits;
3134
3135 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3136 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3137 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3138 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3139 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3140
3141 if (record_debug)
3142 {
3143 fprintf_unfiltered (gdb_stdlog,
3144 "Process record: Advanced SIMD load/store\n");
3145 }
3146
3147 /* Load/store single structure. */
3148 if (bit (aarch64_insn_r->aarch64_insn, 24))
3149 {
3150 uint8_t sindex, scale, selem, esize, replicate = 0;
3151 scale = opcode_bits >> 2;
3152 selem = ((opcode_bits & 0x02) |
3153 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3154 switch (scale)
3155 {
3156 case 1:
3157 if (size_bits & 0x01)
3158 return AARCH64_RECORD_UNKNOWN;
3159 break;
3160 case 2:
3161 if ((size_bits >> 1) & 0x01)
3162 return AARCH64_RECORD_UNKNOWN;
3163 if (size_bits & 0x01)
3164 {
3165 if (!((opcode_bits >> 1) & 0x01))
3166 scale = 3;
3167 else
3168 return AARCH64_RECORD_UNKNOWN;
3169 }
3170 break;
3171 case 3:
3172 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3173 {
3174 scale = size_bits;
3175 replicate = 1;
3176 break;
3177 }
3178 else
3179 return AARCH64_RECORD_UNKNOWN;
3180 default:
3181 break;
3182 }
3183 esize = 8 << scale;
3184 if (replicate)
3185 for (sindex = 0; sindex < selem; sindex++)
3186 {
3187 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3188 reg_rt = (reg_rt + 1) % 32;
3189 }
3190 else
3191 {
3192 for (sindex = 0; sindex < selem; sindex++)
3193 if (bit (aarch64_insn_r->aarch64_insn, 22))
3194 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3195 else
3196 {
3197 record_buf_mem[mem_index++] = esize / 8;
3198 record_buf_mem[mem_index++] = address + addr_offset;
3199 }
3200 addr_offset = addr_offset + (esize / 8);
3201 reg_rt = (reg_rt + 1) % 32;
3202 }
3203 }
3204 /* Load/store multiple structure. */
3205 else
3206 {
3207 uint8_t selem, esize, rpt, elements;
3208 uint8_t eindex, rindex;
3209
3210 esize = 8 << size_bits;
3211 if (bit (aarch64_insn_r->aarch64_insn, 30))
3212 elements = 128 / esize;
3213 else
3214 elements = 64 / esize;
3215
3216 switch (opcode_bits)
3217 {
3218 /*LD/ST4 (4 Registers). */
3219 case 0:
3220 rpt = 1;
3221 selem = 4;
3222 break;
3223 /*LD/ST1 (4 Registers). */
3224 case 2:
3225 rpt = 4;
3226 selem = 1;
3227 break;
3228 /*LD/ST3 (3 Registers). */
3229 case 4:
3230 rpt = 1;
3231 selem = 3;
3232 break;
3233 /*LD/ST1 (3 Registers). */
3234 case 6:
3235 rpt = 3;
3236 selem = 1;
3237 break;
3238 /*LD/ST1 (1 Register). */
3239 case 7:
3240 rpt = 1;
3241 selem = 1;
3242 break;
3243 /*LD/ST2 (2 Registers). */
3244 case 8:
3245 rpt = 1;
3246 selem = 2;
3247 break;
3248 /*LD/ST1 (2 Registers). */
3249 case 10:
3250 rpt = 2;
3251 selem = 1;
3252 break;
3253 default:
3254 return AARCH64_RECORD_UNSUPPORTED;
3255 break;
3256 }
3257 for (rindex = 0; rindex < rpt; rindex++)
3258 for (eindex = 0; eindex < elements; eindex++)
3259 {
3260 uint8_t reg_tt, sindex;
3261 reg_tt = (reg_rt + rindex) % 32;
3262 for (sindex = 0; sindex < selem; sindex++)
3263 {
3264 if (bit (aarch64_insn_r->aarch64_insn, 22))
3265 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3266 else
3267 {
3268 record_buf_mem[mem_index++] = esize / 8;
3269 record_buf_mem[mem_index++] = address + addr_offset;
3270 }
3271 addr_offset = addr_offset + (esize / 8);
3272 reg_tt = (reg_tt + 1) % 32;
3273 }
3274 }
3275 }
3276
3277 if (bit (aarch64_insn_r->aarch64_insn, 23))
3278 record_buf[reg_index++] = reg_rn;
3279
3280 aarch64_insn_r->reg_rec_count = reg_index;
3281 aarch64_insn_r->mem_rec_count = mem_index / 2;
3282 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3283 record_buf_mem);
3284 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3285 record_buf);
3286 return AARCH64_RECORD_SUCCESS;
3287 }
3288
3289 /* Record handler for load and store instructions. */
3290
3291 static unsigned int
3292 aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3293 {
3294 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3295 uint8_t insn_bit23, insn_bit21;
3296 uint8_t opc, size_bits, ld_flag, vector_flag;
3297 uint32_t reg_rn, reg_rt, reg_rt2;
3298 uint64_t datasize, offset;
3299 uint32_t record_buf[8];
3300 uint64_t record_buf_mem[8];
3301 CORE_ADDR address;
3302
3303 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3304 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3305 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3306 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3307 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3308 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3309 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3310 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3311 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3312 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3313 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3314
3315 /* Load/store exclusive. */
3316 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3317 {
3318 if (record_debug)
3319 {
3320 fprintf_unfiltered (gdb_stdlog,
3321 "Process record: load/store exclusive\n");
3322 }
3323
3324 if (ld_flag)
3325 {
3326 record_buf[0] = reg_rt;
3327 aarch64_insn_r->reg_rec_count = 1;
3328 if (insn_bit21)
3329 {
3330 record_buf[1] = reg_rt2;
3331 aarch64_insn_r->reg_rec_count = 2;
3332 }
3333 }
3334 else
3335 {
3336 if (insn_bit21)
3337 datasize = (8 << size_bits) * 2;
3338 else
3339 datasize = (8 << size_bits);
3340 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3341 &address);
3342 record_buf_mem[0] = datasize / 8;
3343 record_buf_mem[1] = address;
3344 aarch64_insn_r->mem_rec_count = 1;
3345 if (!insn_bit23)
3346 {
3347 /* Save register rs. */
3348 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3349 aarch64_insn_r->reg_rec_count = 1;
3350 }
3351 }
3352 }
3353 /* Load register (literal) instructions decoding. */
3354 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3355 {
3356 if (record_debug)
3357 {
3358 fprintf_unfiltered (gdb_stdlog,
3359 "Process record: load register (literal)\n");
3360 }
3361 if (vector_flag)
3362 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3363 else
3364 record_buf[0] = reg_rt;
3365 aarch64_insn_r->reg_rec_count = 1;
3366 }
3367 /* All types of load/store pair instructions decoding. */
3368 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3369 {
3370 if (record_debug)
3371 {
3372 fprintf_unfiltered (gdb_stdlog,
3373 "Process record: load/store pair\n");
3374 }
3375
3376 if (ld_flag)
3377 {
3378 if (vector_flag)
3379 {
3380 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3381 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3382 }
3383 else
3384 {
3385 record_buf[0] = reg_rt;
3386 record_buf[1] = reg_rt2;
3387 }
3388 aarch64_insn_r->reg_rec_count = 2;
3389 }
3390 else
3391 {
3392 uint16_t imm7_off;
3393 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3394 if (!vector_flag)
3395 size_bits = size_bits >> 1;
3396 datasize = 8 << (2 + size_bits);
3397 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3398 offset = offset << (2 + size_bits);
3399 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3400 &address);
3401 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3402 {
3403 if (imm7_off & 0x40)
3404 address = address - offset;
3405 else
3406 address = address + offset;
3407 }
3408
3409 record_buf_mem[0] = datasize / 8;
3410 record_buf_mem[1] = address;
3411 record_buf_mem[2] = datasize / 8;
3412 record_buf_mem[3] = address + (datasize / 8);
3413 aarch64_insn_r->mem_rec_count = 2;
3414 }
3415 if (bit (aarch64_insn_r->aarch64_insn, 23))
3416 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3417 }
3418 /* Load/store register (unsigned immediate) instructions. */
3419 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3420 {
3421 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3422 if (!(opc >> 1))
3423 if (opc & 0x01)
3424 ld_flag = 0x01;
3425 else
3426 ld_flag = 0x0;
3427 else
3428 if (size_bits != 0x03)
3429 ld_flag = 0x01;
3430 else
3431 return AARCH64_RECORD_UNKNOWN;
3432
3433 if (record_debug)
3434 {
3435 fprintf_unfiltered (gdb_stdlog,
3436 "Process record: load/store (unsigned immediate):"
3437 " size %x V %d opc %x\n", size_bits, vector_flag,
3438 opc);
3439 }
3440
3441 if (!ld_flag)
3442 {
3443 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
3444 datasize = 8 << size_bits;
3445 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3446 &address);
3447 offset = offset << size_bits;
3448 address = address + offset;
3449
3450 record_buf_mem[0] = datasize >> 3;
3451 record_buf_mem[1] = address;
3452 aarch64_insn_r->mem_rec_count = 1;
3453 }
3454 else
3455 {
3456 if (vector_flag)
3457 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3458 else
3459 record_buf[0] = reg_rt;
3460 aarch64_insn_r->reg_rec_count = 1;
3461 }
3462 }
3463 /* Load/store register (register offset) instructions. */
3464 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3465 && insn_bits10_11 == 0x02 && insn_bit21)
3466 {
3467 if (record_debug)
3468 {
3469 fprintf_unfiltered (gdb_stdlog,
3470 "Process record: load/store (register offset)\n");
3471 }
3472 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3473 if (!(opc >> 1))
3474 if (opc & 0x01)
3475 ld_flag = 0x01;
3476 else
3477 ld_flag = 0x0;
3478 else
3479 if (size_bits != 0x03)
3480 ld_flag = 0x01;
3481 else
3482 return AARCH64_RECORD_UNKNOWN;
3483
3484 if (!ld_flag)
3485 {
3486 uint64_t reg_rm_val;
3487 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
3488 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
3489 if (bit (aarch64_insn_r->aarch64_insn, 12))
3490 offset = reg_rm_val << size_bits;
3491 else
3492 offset = reg_rm_val;
3493 datasize = 8 << size_bits;
3494 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3495 &address);
3496 address = address + offset;
3497 record_buf_mem[0] = datasize >> 3;
3498 record_buf_mem[1] = address;
3499 aarch64_insn_r->mem_rec_count = 1;
3500 }
3501 else
3502 {
3503 if (vector_flag)
3504 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3505 else
3506 record_buf[0] = reg_rt;
3507 aarch64_insn_r->reg_rec_count = 1;
3508 }
3509 }
3510 /* Load/store register (immediate and unprivileged) instructions. */
3511 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3512 && !insn_bit21)
3513 {
3514 if (record_debug)
3515 {
3516 fprintf_unfiltered (gdb_stdlog,
3517 "Process record: load/store (immediate and unprivileged)\n");
3518 }
3519 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3520 if (!(opc >> 1))
3521 if (opc & 0x01)
3522 ld_flag = 0x01;
3523 else
3524 ld_flag = 0x0;
3525 else
3526 if (size_bits != 0x03)
3527 ld_flag = 0x01;
3528 else
3529 return AARCH64_RECORD_UNKNOWN;
3530
3531 if (!ld_flag)
3532 {
3533 uint16_t imm9_off;
3534 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
3535 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
3536 datasize = 8 << size_bits;
3537 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3538 &address);
3539 if (insn_bits10_11 != 0x01)
3540 {
3541 if (imm9_off & 0x0100)
3542 address = address - offset;
3543 else
3544 address = address + offset;
3545 }
3546 record_buf_mem[0] = datasize >> 3;
3547 record_buf_mem[1] = address;
3548 aarch64_insn_r->mem_rec_count = 1;
3549 }
3550 else
3551 {
3552 if (vector_flag)
3553 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3554 else
3555 record_buf[0] = reg_rt;
3556 aarch64_insn_r->reg_rec_count = 1;
3557 }
3558 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
3559 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3560 }
3561 /* Advanced SIMD load/store instructions. */
3562 else
3563 return aarch64_record_asimd_load_store (aarch64_insn_r);
3564
3565 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3566 record_buf_mem);
3567 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3568 record_buf);
3569 return AARCH64_RECORD_SUCCESS;
3570 }
3571
3572 /* Record handler for data processing SIMD and floating point instructions. */
3573
3574 static unsigned int
3575 aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
3576 {
3577 uint8_t insn_bit21, opcode, rmode, reg_rd;
3578 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
3579 uint8_t insn_bits11_14;
3580 uint32_t record_buf[2];
3581
3582 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3583 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3584 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3585 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3586 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
3587 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
3588 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
3589 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3590 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3591
3592 if (record_debug)
3593 {
3594 fprintf_unfiltered (gdb_stdlog,
3595 "Process record: data processing SIMD/FP: ");
3596 }
3597
3598 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
3599 {
3600 /* Floating point - fixed point conversion instructions. */
3601 if (!insn_bit21)
3602 {
3603 if (record_debug)
3604 fprintf_unfiltered (gdb_stdlog, "FP - fixed point conversion");
3605
3606 if ((opcode >> 1) == 0x0 && rmode == 0x03)
3607 record_buf[0] = reg_rd;
3608 else
3609 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3610 }
3611 /* Floating point - conditional compare instructions. */
3612 else if (insn_bits10_11 == 0x01)
3613 {
3614 if (record_debug)
3615 fprintf_unfiltered (gdb_stdlog, "FP - conditional compare");
3616
3617 record_buf[0] = AARCH64_CPSR_REGNUM;
3618 }
3619 /* Floating point - data processing (2-source) and
3620 conditional select instructions. */
3621 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
3622 {
3623 if (record_debug)
3624 fprintf_unfiltered (gdb_stdlog, "FP - DP (2-source)");
3625
3626 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3627 }
3628 else if (insn_bits10_11 == 0x00)
3629 {
3630 /* Floating point - immediate instructions. */
3631 if ((insn_bits12_15 & 0x01) == 0x01
3632 || (insn_bits12_15 & 0x07) == 0x04)
3633 {
3634 if (record_debug)
3635 fprintf_unfiltered (gdb_stdlog, "FP - immediate");
3636 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3637 }
3638 /* Floating point - compare instructions. */
3639 else if ((insn_bits12_15 & 0x03) == 0x02)
3640 {
3641 if (record_debug)
3642 fprintf_unfiltered (gdb_stdlog, "FP - immediate");
3643 record_buf[0] = AARCH64_CPSR_REGNUM;
3644 }
3645 /* Floating point - integer conversions instructions. */
3646 else if (insn_bits12_15 == 0x00)
3647 {
3648 /* Convert float to integer instruction. */
3649 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
3650 {
3651 if (record_debug)
3652 fprintf_unfiltered (gdb_stdlog, "float to int conversion");
3653
3654 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3655 }
3656 /* Convert integer to float instruction. */
3657 else if ((opcode >> 1) == 0x01 && !rmode)
3658 {
3659 if (record_debug)
3660 fprintf_unfiltered (gdb_stdlog, "int to float conversion");
3661
3662 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3663 }
3664 /* Move float to integer instruction. */
3665 else if ((opcode >> 1) == 0x03)
3666 {
3667 if (record_debug)
3668 fprintf_unfiltered (gdb_stdlog, "move float to int");
3669
3670 if (!(opcode & 0x01))
3671 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3672 else
3673 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3674 }
3675 else
3676 return AARCH64_RECORD_UNKNOWN;
3677 }
3678 else
3679 return AARCH64_RECORD_UNKNOWN;
3680 }
3681 else
3682 return AARCH64_RECORD_UNKNOWN;
3683 }
3684 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
3685 {
3686 if (record_debug)
3687 fprintf_unfiltered (gdb_stdlog, "SIMD copy");
3688
3689 /* Advanced SIMD copy instructions. */
3690 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
3691 && !bit (aarch64_insn_r->aarch64_insn, 15)
3692 && bit (aarch64_insn_r->aarch64_insn, 10))
3693 {
3694 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
3695 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3696 else
3697 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3698 }
3699 else
3700 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3701 }
3702 /* All remaining floating point or advanced SIMD instructions. */
3703 else
3704 {
3705 if (record_debug)
3706 fprintf_unfiltered (gdb_stdlog, "all remain");
3707
3708 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3709 }
3710
3711 if (record_debug)
3712 fprintf_unfiltered (gdb_stdlog, "\n");
3713
3714 aarch64_insn_r->reg_rec_count++;
3715 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
3716 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3717 record_buf);
3718 return AARCH64_RECORD_SUCCESS;
3719 }
3720
3721 /* Decodes insns type and invokes its record handler. */
3722
3723 static unsigned int
3724 aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
3725 {
3726 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
3727
3728 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
3729 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
3730 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
3731 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
3732
3733 /* Data processing - immediate instructions. */
3734 if (!ins_bit26 && !ins_bit27 && ins_bit28)
3735 return aarch64_record_data_proc_imm (aarch64_insn_r);
3736
3737 /* Branch, exception generation and system instructions. */
3738 if (ins_bit26 && !ins_bit27 && ins_bit28)
3739 return aarch64_record_branch_except_sys (aarch64_insn_r);
3740
3741 /* Load and store instructions. */
3742 if (!ins_bit25 && ins_bit27)
3743 return aarch64_record_load_store (aarch64_insn_r);
3744
3745 /* Data processing - register instructions. */
3746 if (ins_bit25 && !ins_bit26 && ins_bit27)
3747 return aarch64_record_data_proc_reg (aarch64_insn_r);
3748
3749 /* Data processing - SIMD and floating point instructions. */
3750 if (ins_bit25 && ins_bit26 && ins_bit27)
3751 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
3752
3753 return AARCH64_RECORD_UNSUPPORTED;
3754 }
3755
3756 /* Cleans up local record registers and memory allocations. */
3757
3758 static void
3759 deallocate_reg_mem (insn_decode_record *record)
3760 {
3761 xfree (record->aarch64_regs);
3762 xfree (record->aarch64_mems);
3763 }
3764
3765 /* Parse the current instruction and record the values of the registers and
3766 memory that will be changed in current instruction to record_arch_list
3767 return -1 if something is wrong. */
3768
3769 int
3770 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
3771 CORE_ADDR insn_addr)
3772 {
3773 uint32_t rec_no = 0;
3774 uint8_t insn_size = 4;
3775 uint32_t ret = 0;
3776 ULONGEST t_bit = 0, insn_id = 0;
3777 gdb_byte buf[insn_size];
3778 insn_decode_record aarch64_record;
3779
3780 memset (&buf[0], 0, insn_size);
3781 memset (&aarch64_record, 0, sizeof (insn_decode_record));
3782 target_read_memory (insn_addr, &buf[0], insn_size);
3783 aarch64_record.aarch64_insn
3784 = (uint32_t) extract_unsigned_integer (&buf[0],
3785 insn_size,
3786 gdbarch_byte_order (gdbarch));
3787 aarch64_record.regcache = regcache;
3788 aarch64_record.this_addr = insn_addr;
3789 aarch64_record.gdbarch = gdbarch;
3790
3791 ret = aarch64_record_decode_insn_handler (&aarch64_record);
3792 if (ret == AARCH64_RECORD_UNSUPPORTED)
3793 {
3794 printf_unfiltered (_("Process record does not support instruction "
3795 "0x%0x at address %s.\n"),
3796 aarch64_record.aarch64_insn,
3797 paddress (gdbarch, insn_addr));
3798 ret = -1;
3799 }
3800
3801 if (0 == ret)
3802 {
3803 /* Record registers. */
3804 record_full_arch_list_add_reg (aarch64_record.regcache,
3805 AARCH64_PC_REGNUM);
3806 /* Always record register CPSR. */
3807 record_full_arch_list_add_reg (aarch64_record.regcache,
3808 AARCH64_CPSR_REGNUM);
3809 if (aarch64_record.aarch64_regs)
3810 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
3811 if (record_full_arch_list_add_reg (aarch64_record.regcache,
3812 aarch64_record.aarch64_regs[rec_no]))
3813 ret = -1;
3814
3815 /* Record memories. */
3816 if (aarch64_record.aarch64_mems)
3817 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
3818 if (record_full_arch_list_add_mem
3819 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
3820 aarch64_record.aarch64_mems[rec_no].len))
3821 ret = -1;
3822
3823 if (record_full_arch_list_add_end ())
3824 ret = -1;
3825 }
3826
3827 deallocate_reg_mem (&aarch64_record);
3828 return ret;
3829 }