Combine aarch64_decode_stp_offset_wb and aarch64_decode_stp_offset
[binutils-gdb.git] / gdb / aarch64-tdep.c
1 /* Common target dependent code for GDB on AArch64 systems.
2
3 Copyright (C) 2009-2015 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22
23 #include "frame.h"
24 #include "inferior.h"
25 #include "gdbcmd.h"
26 #include "gdbcore.h"
27 #include "dis-asm.h"
28 #include "regcache.h"
29 #include "reggroups.h"
30 #include "doublest.h"
31 #include "value.h"
32 #include "arch-utils.h"
33 #include "osabi.h"
34 #include "frame-unwind.h"
35 #include "frame-base.h"
36 #include "trad-frame.h"
37 #include "objfiles.h"
38 #include "dwarf2-frame.h"
39 #include "gdbtypes.h"
40 #include "prologue-value.h"
41 #include "target-descriptions.h"
42 #include "user-regs.h"
43 #include "language.h"
44 #include "infcall.h"
45 #include "ax.h"
46 #include "ax-gdb.h"
47
48 #include "aarch64-tdep.h"
49
50 #include "elf-bfd.h"
51 #include "elf/aarch64.h"
52
53 #include "vec.h"
54
55 #include "record.h"
56 #include "record-full.h"
57
58 #include "features/aarch64.c"
59
60 #include "arch/aarch64-insn.h"
61
62 #include "opcode/aarch64.h"
63
64 #define submask(x) ((1L << ((x) + 1)) - 1)
65 #define bit(obj,st) (((obj) >> (st)) & 1)
66 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
67
68 /* Pseudo register base numbers. */
69 #define AARCH64_Q0_REGNUM 0
70 #define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + 32)
71 #define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
72 #define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
73 #define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
74
75 /* The standard register names, and all the valid aliases for them. */
76 static const struct
77 {
78 const char *const name;
79 int regnum;
80 } aarch64_register_aliases[] =
81 {
82 /* 64-bit register names. */
83 {"fp", AARCH64_FP_REGNUM},
84 {"lr", AARCH64_LR_REGNUM},
85 {"sp", AARCH64_SP_REGNUM},
86
87 /* 32-bit register names. */
88 {"w0", AARCH64_X0_REGNUM + 0},
89 {"w1", AARCH64_X0_REGNUM + 1},
90 {"w2", AARCH64_X0_REGNUM + 2},
91 {"w3", AARCH64_X0_REGNUM + 3},
92 {"w4", AARCH64_X0_REGNUM + 4},
93 {"w5", AARCH64_X0_REGNUM + 5},
94 {"w6", AARCH64_X0_REGNUM + 6},
95 {"w7", AARCH64_X0_REGNUM + 7},
96 {"w8", AARCH64_X0_REGNUM + 8},
97 {"w9", AARCH64_X0_REGNUM + 9},
98 {"w10", AARCH64_X0_REGNUM + 10},
99 {"w11", AARCH64_X0_REGNUM + 11},
100 {"w12", AARCH64_X0_REGNUM + 12},
101 {"w13", AARCH64_X0_REGNUM + 13},
102 {"w14", AARCH64_X0_REGNUM + 14},
103 {"w15", AARCH64_X0_REGNUM + 15},
104 {"w16", AARCH64_X0_REGNUM + 16},
105 {"w17", AARCH64_X0_REGNUM + 17},
106 {"w18", AARCH64_X0_REGNUM + 18},
107 {"w19", AARCH64_X0_REGNUM + 19},
108 {"w20", AARCH64_X0_REGNUM + 20},
109 {"w21", AARCH64_X0_REGNUM + 21},
110 {"w22", AARCH64_X0_REGNUM + 22},
111 {"w23", AARCH64_X0_REGNUM + 23},
112 {"w24", AARCH64_X0_REGNUM + 24},
113 {"w25", AARCH64_X0_REGNUM + 25},
114 {"w26", AARCH64_X0_REGNUM + 26},
115 {"w27", AARCH64_X0_REGNUM + 27},
116 {"w28", AARCH64_X0_REGNUM + 28},
117 {"w29", AARCH64_X0_REGNUM + 29},
118 {"w30", AARCH64_X0_REGNUM + 30},
119
120 /* specials */
121 {"ip0", AARCH64_X0_REGNUM + 16},
122 {"ip1", AARCH64_X0_REGNUM + 17}
123 };
124
125 /* The required core 'R' registers. */
126 static const char *const aarch64_r_register_names[] =
127 {
128 /* These registers must appear in consecutive RAW register number
129 order and they must begin with AARCH64_X0_REGNUM! */
130 "x0", "x1", "x2", "x3",
131 "x4", "x5", "x6", "x7",
132 "x8", "x9", "x10", "x11",
133 "x12", "x13", "x14", "x15",
134 "x16", "x17", "x18", "x19",
135 "x20", "x21", "x22", "x23",
136 "x24", "x25", "x26", "x27",
137 "x28", "x29", "x30", "sp",
138 "pc", "cpsr"
139 };
140
141 /* The FP/SIMD 'V' registers. */
142 static const char *const aarch64_v_register_names[] =
143 {
144 /* These registers must appear in consecutive RAW register number
145 order and they must begin with AARCH64_V0_REGNUM! */
146 "v0", "v1", "v2", "v3",
147 "v4", "v5", "v6", "v7",
148 "v8", "v9", "v10", "v11",
149 "v12", "v13", "v14", "v15",
150 "v16", "v17", "v18", "v19",
151 "v20", "v21", "v22", "v23",
152 "v24", "v25", "v26", "v27",
153 "v28", "v29", "v30", "v31",
154 "fpsr",
155 "fpcr"
156 };
157
158 /* AArch64 prologue cache structure. */
159 struct aarch64_prologue_cache
160 {
161 /* The program counter at the start of the function. It is used to
162 identify this frame as a prologue frame. */
163 CORE_ADDR func;
164
165 /* The program counter at the time this frame was created; i.e. where
166 this function was called from. It is used to identify this frame as a
167 stub frame. */
168 CORE_ADDR prev_pc;
169
170 /* The stack pointer at the time this frame was created; i.e. the
171 caller's stack pointer when this function was called. It is used
172 to identify this frame. */
173 CORE_ADDR prev_sp;
174
175 /* Is the target available to read from? */
176 int available_p;
177
178 /* The frame base for this frame is just prev_sp - frame size.
179 FRAMESIZE is the distance from the frame pointer to the
180 initial stack pointer. */
181 int framesize;
182
183 /* The register used to hold the frame pointer for this frame. */
184 int framereg;
185
186 /* Saved register offsets. */
187 struct trad_frame_saved_reg *saved_regs;
188 };
189
190 static void
191 show_aarch64_debug (struct ui_file *file, int from_tty,
192 struct cmd_list_element *c, const char *value)
193 {
194 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
195 }
196
197 /* Extract a signed value from a bit field within an instruction
198 encoding.
199
200 INSN is the instruction opcode.
201
202 WIDTH specifies the width of the bit field to extract (in bits).
203
204 OFFSET specifies the least significant bit of the field where bits
205 are numbered zero counting from least to most significant. */
206
207 static int32_t
208 extract_signed_bitfield (uint32_t insn, unsigned width, unsigned offset)
209 {
210 unsigned shift_l = sizeof (int32_t) * 8 - (offset + width);
211 unsigned shift_r = sizeof (int32_t) * 8 - width;
212
213 return ((int32_t) insn << shift_l) >> shift_r;
214 }
215
216 /* Determine if specified bits within an instruction opcode matches a
217 specific pattern.
218
219 INSN is the instruction opcode.
220
221 MASK specifies the bits within the opcode that are to be tested
222 agsinst for a match with PATTERN. */
223
224 static int
225 decode_masked_match (uint32_t insn, uint32_t mask, uint32_t pattern)
226 {
227 return (insn & mask) == pattern;
228 }
229
230 /* Decode an opcode if it represents an immediate ADD or SUB instruction.
231
232 ADDR specifies the address of the opcode.
233 INSN specifies the opcode to test.
234 RD receives the 'rd' field from the decoded instruction.
235 RN receives the 'rn' field from the decoded instruction.
236
237 Return 1 if the opcodes matches and is decoded, otherwise 0. */
238 static int
239 aarch64_decode_add_sub_imm (CORE_ADDR addr, uint32_t insn, unsigned *rd,
240 unsigned *rn, int32_t *imm)
241 {
242 if ((insn & 0x9f000000) == 0x91000000)
243 {
244 unsigned shift;
245 unsigned op_is_sub;
246
247 *rd = (insn >> 0) & 0x1f;
248 *rn = (insn >> 5) & 0x1f;
249 *imm = (insn >> 10) & 0xfff;
250 shift = (insn >> 22) & 0x3;
251 op_is_sub = (insn >> 30) & 0x1;
252
253 switch (shift)
254 {
255 case 0:
256 break;
257 case 1:
258 *imm <<= 12;
259 break;
260 default:
261 /* UNDEFINED */
262 return 0;
263 }
264
265 if (op_is_sub)
266 *imm = -*imm;
267
268 if (aarch64_debug)
269 {
270 debug_printf ("decode: 0x%s 0x%x add x%u, x%u, #%d\n",
271 core_addr_to_string_nz (addr), insn, *rd, *rn,
272 *imm);
273 }
274 return 1;
275 }
276 return 0;
277 }
278
279 /* Decode an opcode if it represents a branch via register instruction.
280
281 ADDR specifies the address of the opcode.
282 INSN specifies the opcode to test.
283 IS_BLR receives the 'op' bit from the decoded instruction.
284 RN receives the 'rn' field from the decoded instruction.
285
286 Return 1 if the opcodes matches and is decoded, otherwise 0. */
287
288 static int
289 aarch64_decode_br (CORE_ADDR addr, uint32_t insn, int *is_blr,
290 unsigned *rn)
291 {
292 /* 8 4 0 6 2 8 4 0 */
293 /* blr 110101100011111100000000000rrrrr */
294 /* br 110101100001111100000000000rrrrr */
295 if (decode_masked_match (insn, 0xffdffc1f, 0xd61f0000))
296 {
297 *is_blr = (insn >> 21) & 1;
298 *rn = (insn >> 5) & 0x1f;
299
300 if (aarch64_debug)
301 {
302 debug_printf ("decode: 0x%s 0x%x %s 0x%x\n",
303 core_addr_to_string_nz (addr), insn,
304 *is_blr ? "blr" : "br", *rn);
305 }
306
307 return 1;
308 }
309 return 0;
310 }
311
312 /* Decode an opcode if it represents a ERET instruction.
313
314 ADDR specifies the address of the opcode.
315 INSN specifies the opcode to test.
316
317 Return 1 if the opcodes matches and is decoded, otherwise 0. */
318
319 static int
320 aarch64_decode_eret (CORE_ADDR addr, uint32_t insn)
321 {
322 /* eret 1101 0110 1001 1111 0000 0011 1110 0000 */
323 if (insn == 0xd69f03e0)
324 {
325 if (aarch64_debug)
326 {
327 debug_printf ("decode: 0x%s 0x%x eret\n",
328 core_addr_to_string_nz (addr), insn);
329 }
330 return 1;
331 }
332 return 0;
333 }
334
335 /* Decode an opcode if it represents a MOVZ instruction.
336
337 ADDR specifies the address of the opcode.
338 INSN specifies the opcode to test.
339 RD receives the 'rd' field from the decoded instruction.
340
341 Return 1 if the opcodes matches and is decoded, otherwise 0. */
342
343 static int
344 aarch64_decode_movz (CORE_ADDR addr, uint32_t insn, unsigned *rd)
345 {
346 if (decode_masked_match (insn, 0xff800000, 0x52800000))
347 {
348 *rd = (insn >> 0) & 0x1f;
349
350 if (aarch64_debug)
351 {
352 debug_printf ("decode: 0x%s 0x%x movz x%u, #?\n",
353 core_addr_to_string_nz (addr), insn, *rd);
354 }
355 return 1;
356 }
357 return 0;
358 }
359
360 /* Decode an opcode if it represents a ORR (shifted register)
361 instruction.
362
363 ADDR specifies the address of the opcode.
364 INSN specifies the opcode to test.
365 RD receives the 'rd' field from the decoded instruction.
366 RN receives the 'rn' field from the decoded instruction.
367 RM receives the 'rm' field from the decoded instruction.
368 IMM receives the 'imm6' field from the decoded instruction.
369
370 Return 1 if the opcodes matches and is decoded, otherwise 0. */
371
372 static int
373 aarch64_decode_orr_shifted_register_x (CORE_ADDR addr, uint32_t insn,
374 unsigned *rd, unsigned *rn,
375 unsigned *rm, int32_t *imm)
376 {
377 if (decode_masked_match (insn, 0xff200000, 0xaa000000))
378 {
379 *rd = (insn >> 0) & 0x1f;
380 *rn = (insn >> 5) & 0x1f;
381 *rm = (insn >> 16) & 0x1f;
382 *imm = (insn >> 10) & 0x3f;
383
384 if (aarch64_debug)
385 {
386 debug_printf ("decode: 0x%s 0x%x orr x%u, x%u, x%u, #%u\n",
387 core_addr_to_string_nz (addr), insn, *rd, *rn,
388 *rm, *imm);
389 }
390 return 1;
391 }
392 return 0;
393 }
394
395 /* Decode an opcode if it represents a RET instruction.
396
397 ADDR specifies the address of the opcode.
398 INSN specifies the opcode to test.
399 RN receives the 'rn' field from the decoded instruction.
400
401 Return 1 if the opcodes matches and is decoded, otherwise 0. */
402
403 static int
404 aarch64_decode_ret (CORE_ADDR addr, uint32_t insn, unsigned *rn)
405 {
406 if (decode_masked_match (insn, 0xfffffc1f, 0xd65f0000))
407 {
408 *rn = (insn >> 5) & 0x1f;
409 if (aarch64_debug)
410 {
411 debug_printf ("decode: 0x%s 0x%x ret x%u\n",
412 core_addr_to_string_nz (addr), insn, *rn);
413 }
414 return 1;
415 }
416 return 0;
417 }
418
419 /* Decode an opcode if it represents the following instructions:
420
421 STP rt, rt2, [rn, #imm]
422 STP rt, rt2, [rn, #imm]!
423
424 ADDR specifies the address of the opcode.
425 INSN specifies the opcode to test.
426 RT1 receives the 'rt' field from the decoded instruction.
427 RT2 receives the 'rt2' field from the decoded instruction.
428 RN receives the 'rn' field from the decoded instruction.
429 IMM receives the 'imm' field from the decoded instruction.
430 *WBACK receives the bit 23 from the decoded instruction.
431
432 Return 1 if the opcodes matches and is decoded, otherwise 0. */
433
434 static int
435 aarch64_decode_stp_offset (CORE_ADDR addr, uint32_t insn, unsigned *rt1,
436 unsigned *rt2, unsigned *rn, int32_t *imm,
437 int *wback)
438 {
439 if (decode_masked_match (insn, 0xff400000, 0xa9000000))
440 {
441 *rt1 = (insn >> 0) & 0x1f;
442 *rn = (insn >> 5) & 0x1f;
443 *rt2 = (insn >> 10) & 0x1f;
444 *imm = extract_signed_bitfield (insn, 7, 15);
445 *imm <<= 3;
446 *wback = bit (insn, 23);
447
448 if (aarch64_debug)
449 {
450 debug_printf ("decode: 0x%s 0x%x stp x%u, x%u, [x%u + #%d]%s\n",
451 core_addr_to_string_nz (addr), insn, *rt1, *rt2,
452 *rn, *imm, *wback ? "" : "!");
453 }
454 return 1;
455 }
456 return 0;
457 }
458
459 /* Decode an opcode if it represents the following instruction:
460 STUR rt, [rn, #imm]
461
462 ADDR specifies the address of the opcode.
463 INSN specifies the opcode to test.
464 IS64 receives size field from the decoded instruction.
465 RT receives the 'rt' field from the decoded instruction.
466 RN receives the 'rn' field from the decoded instruction.
467 IMM receives the 'imm' field from the decoded instruction.
468
469 Return 1 if the opcodes matches and is decoded, otherwise 0. */
470
471 static int
472 aarch64_decode_stur (CORE_ADDR addr, uint32_t insn, int *is64,
473 unsigned *rt, unsigned *rn, int32_t *imm)
474 {
475 if (decode_masked_match (insn, 0xbfe00c00, 0xb8000000))
476 {
477 *is64 = (insn >> 30) & 1;
478 *rt = (insn >> 0) & 0x1f;
479 *rn = (insn >> 5) & 0x1f;
480 *imm = extract_signed_bitfield (insn, 9, 12);
481
482 if (aarch64_debug)
483 {
484 debug_printf ("decode: 0x%s 0x%x stur %c%u, [x%u + #%d]\n",
485 core_addr_to_string_nz (addr), insn,
486 *is64 ? 'x' : 'w', *rt, *rn, *imm);
487 }
488 return 1;
489 }
490 return 0;
491 }
492
493 /* Analyze a prologue, looking for a recognizable stack frame
494 and frame pointer. Scan until we encounter a store that could
495 clobber the stack frame unexpectedly, or an unknown instruction. */
496
497 static CORE_ADDR
498 aarch64_analyze_prologue (struct gdbarch *gdbarch,
499 CORE_ADDR start, CORE_ADDR limit,
500 struct aarch64_prologue_cache *cache)
501 {
502 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
503 int i;
504 pv_t regs[AARCH64_X_REGISTER_COUNT];
505 struct pv_area *stack;
506 struct cleanup *back_to;
507
508 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
509 regs[i] = pv_register (i, 0);
510 stack = make_pv_area (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
511 back_to = make_cleanup_free_pv_area (stack);
512
513 for (; start < limit; start += 4)
514 {
515 uint32_t insn;
516 unsigned rd;
517 unsigned rn;
518 unsigned rm;
519 unsigned rt;
520 unsigned rt1;
521 unsigned rt2;
522 int op_is_sub;
523 int wback;
524 int32_t imm;
525 unsigned cond;
526 int is64;
527 int is_link;
528 int is_cbnz;
529 int is_tbnz;
530 unsigned bit;
531 int is_adrp;
532 int32_t offset;
533
534 insn = read_memory_unsigned_integer (start, 4, byte_order_for_code);
535
536 if (aarch64_decode_add_sub_imm (start, insn, &rd, &rn, &imm))
537 regs[rd] = pv_add_constant (regs[rn], imm);
538 else if (aarch64_decode_adr (start, insn, &is_adrp, &rd, &offset)
539 && is_adrp)
540 regs[rd] = pv_unknown ();
541 else if (aarch64_decode_b (start, insn, &is_link, &offset))
542 {
543 /* Stop analysis on branch. */
544 break;
545 }
546 else if (aarch64_decode_bcond (start, insn, &cond, &offset))
547 {
548 /* Stop analysis on branch. */
549 break;
550 }
551 else if (aarch64_decode_br (start, insn, &is_link, &rn))
552 {
553 /* Stop analysis on branch. */
554 break;
555 }
556 else if (aarch64_decode_cb (start, insn, &is64, &is_cbnz, &rn,
557 &offset))
558 {
559 /* Stop analysis on branch. */
560 break;
561 }
562 else if (aarch64_decode_eret (start, insn))
563 {
564 /* Stop analysis on branch. */
565 break;
566 }
567 else if (aarch64_decode_movz (start, insn, &rd))
568 regs[rd] = pv_unknown ();
569 else if (aarch64_decode_orr_shifted_register_x (start, insn, &rd,
570 &rn, &rm, &imm))
571 {
572 if (imm == 0 && rn == 31)
573 regs[rd] = regs[rm];
574 else
575 {
576 if (aarch64_debug)
577 {
578 debug_printf ("aarch64: prologue analysis gave up "
579 "addr=0x%s opcode=0x%x (orr x register)\n",
580 core_addr_to_string_nz (start), insn);
581 }
582 break;
583 }
584 }
585 else if (aarch64_decode_ret (start, insn, &rn))
586 {
587 /* Stop analysis on branch. */
588 break;
589 }
590 else if (aarch64_decode_stur (start, insn, &is64, &rt, &rn, &offset))
591 {
592 pv_area_store (stack, pv_add_constant (regs[rn], offset),
593 is64 ? 8 : 4, regs[rt]);
594 }
595 else if (aarch64_decode_stp_offset (start, insn, &rt1, &rt2, &rn,
596 &imm, &wback))
597 {
598 /* If recording this store would invalidate the store area
599 (perhaps because rn is not known) then we should abandon
600 further prologue analysis. */
601 if (pv_area_store_would_trash (stack,
602 pv_add_constant (regs[rn], imm)))
603 break;
604
605 if (pv_area_store_would_trash (stack,
606 pv_add_constant (regs[rn], imm + 8)))
607 break;
608
609 pv_area_store (stack, pv_add_constant (regs[rn], imm), 8,
610 regs[rt1]);
611 pv_area_store (stack, pv_add_constant (regs[rn], imm + 8), 8,
612 regs[rt2]);
613
614 if (wback)
615 regs[rn] = pv_add_constant (regs[rn], imm);
616
617 }
618 else if (aarch64_decode_tb (start, insn, &is_tbnz, &bit, &rn,
619 &offset))
620 {
621 /* Stop analysis on branch. */
622 break;
623 }
624 else
625 {
626 if (aarch64_debug)
627 {
628 debug_printf ("aarch64: prologue analysis gave up addr=0x%s"
629 " opcode=0x%x\n",
630 core_addr_to_string_nz (start), insn);
631 }
632 break;
633 }
634 }
635
636 if (cache == NULL)
637 {
638 do_cleanups (back_to);
639 return start;
640 }
641
642 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
643 {
644 /* Frame pointer is fp. Frame size is constant. */
645 cache->framereg = AARCH64_FP_REGNUM;
646 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
647 }
648 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
649 {
650 /* Try the stack pointer. */
651 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
652 cache->framereg = AARCH64_SP_REGNUM;
653 }
654 else
655 {
656 /* We're just out of luck. We don't know where the frame is. */
657 cache->framereg = -1;
658 cache->framesize = 0;
659 }
660
661 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
662 {
663 CORE_ADDR offset;
664
665 if (pv_area_find_reg (stack, gdbarch, i, &offset))
666 cache->saved_regs[i].addr = offset;
667 }
668
669 do_cleanups (back_to);
670 return start;
671 }
672
673 /* Implement the "skip_prologue" gdbarch method. */
674
675 static CORE_ADDR
676 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
677 {
678 unsigned long inst;
679 CORE_ADDR skip_pc;
680 CORE_ADDR func_addr, limit_pc;
681 struct symtab_and_line sal;
682
683 /* See if we can determine the end of the prologue via the symbol
684 table. If so, then return either PC, or the PC after the
685 prologue, whichever is greater. */
686 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
687 {
688 CORE_ADDR post_prologue_pc
689 = skip_prologue_using_sal (gdbarch, func_addr);
690
691 if (post_prologue_pc != 0)
692 return max (pc, post_prologue_pc);
693 }
694
695 /* Can't determine prologue from the symbol table, need to examine
696 instructions. */
697
698 /* Find an upper limit on the function prologue using the debug
699 information. If the debug information could not be used to
700 provide that bound, then use an arbitrary large number as the
701 upper bound. */
702 limit_pc = skip_prologue_using_sal (gdbarch, pc);
703 if (limit_pc == 0)
704 limit_pc = pc + 128; /* Magic. */
705
706 /* Try disassembling prologue. */
707 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
708 }
709
710 /* Scan the function prologue for THIS_FRAME and populate the prologue
711 cache CACHE. */
712
713 static void
714 aarch64_scan_prologue (struct frame_info *this_frame,
715 struct aarch64_prologue_cache *cache)
716 {
717 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
718 CORE_ADDR prologue_start;
719 CORE_ADDR prologue_end;
720 CORE_ADDR prev_pc = get_frame_pc (this_frame);
721 struct gdbarch *gdbarch = get_frame_arch (this_frame);
722
723 cache->prev_pc = prev_pc;
724
725 /* Assume we do not find a frame. */
726 cache->framereg = -1;
727 cache->framesize = 0;
728
729 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
730 &prologue_end))
731 {
732 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
733
734 if (sal.line == 0)
735 {
736 /* No line info so use the current PC. */
737 prologue_end = prev_pc;
738 }
739 else if (sal.end < prologue_end)
740 {
741 /* The next line begins after the function end. */
742 prologue_end = sal.end;
743 }
744
745 prologue_end = min (prologue_end, prev_pc);
746 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
747 }
748 else
749 {
750 CORE_ADDR frame_loc;
751 LONGEST saved_fp;
752 LONGEST saved_lr;
753 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
754
755 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
756 if (frame_loc == 0)
757 return;
758
759 cache->framereg = AARCH64_FP_REGNUM;
760 cache->framesize = 16;
761 cache->saved_regs[29].addr = 0;
762 cache->saved_regs[30].addr = 8;
763 }
764 }
765
766 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
767 function may throw an exception if the inferior's registers or memory is
768 not available. */
769
770 static void
771 aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
772 struct aarch64_prologue_cache *cache)
773 {
774 CORE_ADDR unwound_fp;
775 int reg;
776
777 aarch64_scan_prologue (this_frame, cache);
778
779 if (cache->framereg == -1)
780 return;
781
782 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
783 if (unwound_fp == 0)
784 return;
785
786 cache->prev_sp = unwound_fp + cache->framesize;
787
788 /* Calculate actual addresses of saved registers using offsets
789 determined by aarch64_analyze_prologue. */
790 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
791 if (trad_frame_addr_p (cache->saved_regs, reg))
792 cache->saved_regs[reg].addr += cache->prev_sp;
793
794 cache->func = get_frame_func (this_frame);
795
796 cache->available_p = 1;
797 }
798
799 /* Allocate and fill in *THIS_CACHE with information about the prologue of
800 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
801 Return a pointer to the current aarch64_prologue_cache in
802 *THIS_CACHE. */
803
804 static struct aarch64_prologue_cache *
805 aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
806 {
807 struct aarch64_prologue_cache *cache;
808
809 if (*this_cache != NULL)
810 return (struct aarch64_prologue_cache *) *this_cache;
811
812 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
813 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
814 *this_cache = cache;
815
816 TRY
817 {
818 aarch64_make_prologue_cache_1 (this_frame, cache);
819 }
820 CATCH (ex, RETURN_MASK_ERROR)
821 {
822 if (ex.error != NOT_AVAILABLE_ERROR)
823 throw_exception (ex);
824 }
825 END_CATCH
826
827 return cache;
828 }
829
830 /* Implement the "stop_reason" frame_unwind method. */
831
832 static enum unwind_stop_reason
833 aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
834 void **this_cache)
835 {
836 struct aarch64_prologue_cache *cache
837 = aarch64_make_prologue_cache (this_frame, this_cache);
838
839 if (!cache->available_p)
840 return UNWIND_UNAVAILABLE;
841
842 /* Halt the backtrace at "_start". */
843 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
844 return UNWIND_OUTERMOST;
845
846 /* We've hit a wall, stop. */
847 if (cache->prev_sp == 0)
848 return UNWIND_OUTERMOST;
849
850 return UNWIND_NO_REASON;
851 }
852
853 /* Our frame ID for a normal frame is the current function's starting
854 PC and the caller's SP when we were called. */
855
856 static void
857 aarch64_prologue_this_id (struct frame_info *this_frame,
858 void **this_cache, struct frame_id *this_id)
859 {
860 struct aarch64_prologue_cache *cache
861 = aarch64_make_prologue_cache (this_frame, this_cache);
862
863 if (!cache->available_p)
864 *this_id = frame_id_build_unavailable_stack (cache->func);
865 else
866 *this_id = frame_id_build (cache->prev_sp, cache->func);
867 }
868
869 /* Implement the "prev_register" frame_unwind method. */
870
871 static struct value *
872 aarch64_prologue_prev_register (struct frame_info *this_frame,
873 void **this_cache, int prev_regnum)
874 {
875 struct gdbarch *gdbarch = get_frame_arch (this_frame);
876 struct aarch64_prologue_cache *cache
877 = aarch64_make_prologue_cache (this_frame, this_cache);
878
879 /* If we are asked to unwind the PC, then we need to return the LR
880 instead. The prologue may save PC, but it will point into this
881 frame's prologue, not the next frame's resume location. */
882 if (prev_regnum == AARCH64_PC_REGNUM)
883 {
884 CORE_ADDR lr;
885
886 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
887 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
888 }
889
890 /* SP is generally not saved to the stack, but this frame is
891 identified by the next frame's stack pointer at the time of the
892 call. The value was already reconstructed into PREV_SP. */
893 /*
894 +----------+ ^
895 | saved lr | |
896 +->| saved fp |--+
897 | | |
898 | | | <- Previous SP
899 | +----------+
900 | | saved lr |
901 +--| saved fp |<- FP
902 | |
903 | |<- SP
904 +----------+ */
905 if (prev_regnum == AARCH64_SP_REGNUM)
906 return frame_unwind_got_constant (this_frame, prev_regnum,
907 cache->prev_sp);
908
909 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
910 prev_regnum);
911 }
912
913 /* AArch64 prologue unwinder. */
914 struct frame_unwind aarch64_prologue_unwind =
915 {
916 NORMAL_FRAME,
917 aarch64_prologue_frame_unwind_stop_reason,
918 aarch64_prologue_this_id,
919 aarch64_prologue_prev_register,
920 NULL,
921 default_frame_sniffer
922 };
923
924 /* Allocate and fill in *THIS_CACHE with information about the prologue of
925 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
926 Return a pointer to the current aarch64_prologue_cache in
927 *THIS_CACHE. */
928
929 static struct aarch64_prologue_cache *
930 aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
931 {
932 struct aarch64_prologue_cache *cache;
933
934 if (*this_cache != NULL)
935 return (struct aarch64_prologue_cache *) *this_cache;
936
937 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
938 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
939 *this_cache = cache;
940
941 TRY
942 {
943 cache->prev_sp = get_frame_register_unsigned (this_frame,
944 AARCH64_SP_REGNUM);
945 cache->prev_pc = get_frame_pc (this_frame);
946 cache->available_p = 1;
947 }
948 CATCH (ex, RETURN_MASK_ERROR)
949 {
950 if (ex.error != NOT_AVAILABLE_ERROR)
951 throw_exception (ex);
952 }
953 END_CATCH
954
955 return cache;
956 }
957
958 /* Implement the "stop_reason" frame_unwind method. */
959
960 static enum unwind_stop_reason
961 aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
962 void **this_cache)
963 {
964 struct aarch64_prologue_cache *cache
965 = aarch64_make_stub_cache (this_frame, this_cache);
966
967 if (!cache->available_p)
968 return UNWIND_UNAVAILABLE;
969
970 return UNWIND_NO_REASON;
971 }
972
973 /* Our frame ID for a stub frame is the current SP and LR. */
974
975 static void
976 aarch64_stub_this_id (struct frame_info *this_frame,
977 void **this_cache, struct frame_id *this_id)
978 {
979 struct aarch64_prologue_cache *cache
980 = aarch64_make_stub_cache (this_frame, this_cache);
981
982 if (cache->available_p)
983 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
984 else
985 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
986 }
987
988 /* Implement the "sniffer" frame_unwind method. */
989
990 static int
991 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
992 struct frame_info *this_frame,
993 void **this_prologue_cache)
994 {
995 CORE_ADDR addr_in_block;
996 gdb_byte dummy[4];
997
998 addr_in_block = get_frame_address_in_block (this_frame);
999 if (in_plt_section (addr_in_block)
1000 /* We also use the stub winder if the target memory is unreadable
1001 to avoid having the prologue unwinder trying to read it. */
1002 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
1003 return 1;
1004
1005 return 0;
1006 }
1007
1008 /* AArch64 stub unwinder. */
1009 struct frame_unwind aarch64_stub_unwind =
1010 {
1011 NORMAL_FRAME,
1012 aarch64_stub_frame_unwind_stop_reason,
1013 aarch64_stub_this_id,
1014 aarch64_prologue_prev_register,
1015 NULL,
1016 aarch64_stub_unwind_sniffer
1017 };
1018
1019 /* Return the frame base address of *THIS_FRAME. */
1020
1021 static CORE_ADDR
1022 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
1023 {
1024 struct aarch64_prologue_cache *cache
1025 = aarch64_make_prologue_cache (this_frame, this_cache);
1026
1027 return cache->prev_sp - cache->framesize;
1028 }
1029
1030 /* AArch64 default frame base information. */
1031 struct frame_base aarch64_normal_base =
1032 {
1033 &aarch64_prologue_unwind,
1034 aarch64_normal_frame_base,
1035 aarch64_normal_frame_base,
1036 aarch64_normal_frame_base
1037 };
1038
1039 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
1040 dummy frame. The frame ID's base needs to match the TOS value
1041 saved by save_dummy_frame_tos () and returned from
1042 aarch64_push_dummy_call, and the PC needs to match the dummy
1043 frame's breakpoint. */
1044
1045 static struct frame_id
1046 aarch64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
1047 {
1048 return frame_id_build (get_frame_register_unsigned (this_frame,
1049 AARCH64_SP_REGNUM),
1050 get_frame_pc (this_frame));
1051 }
1052
1053 /* Implement the "unwind_pc" gdbarch method. */
1054
1055 static CORE_ADDR
1056 aarch64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
1057 {
1058 CORE_ADDR pc
1059 = frame_unwind_register_unsigned (this_frame, AARCH64_PC_REGNUM);
1060
1061 return pc;
1062 }
1063
1064 /* Implement the "unwind_sp" gdbarch method. */
1065
1066 static CORE_ADDR
1067 aarch64_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
1068 {
1069 return frame_unwind_register_unsigned (this_frame, AARCH64_SP_REGNUM);
1070 }
1071
1072 /* Return the value of the REGNUM register in the previous frame of
1073 *THIS_FRAME. */
1074
1075 static struct value *
1076 aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1077 void **this_cache, int regnum)
1078 {
1079 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1080 CORE_ADDR lr;
1081
1082 switch (regnum)
1083 {
1084 case AARCH64_PC_REGNUM:
1085 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1086 return frame_unwind_got_constant (this_frame, regnum, lr);
1087
1088 default:
1089 internal_error (__FILE__, __LINE__,
1090 _("Unexpected register %d"), regnum);
1091 }
1092 }
1093
1094 /* Implement the "init_reg" dwarf2_frame_ops method. */
1095
1096 static void
1097 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1098 struct dwarf2_frame_state_reg *reg,
1099 struct frame_info *this_frame)
1100 {
1101 switch (regnum)
1102 {
1103 case AARCH64_PC_REGNUM:
1104 reg->how = DWARF2_FRAME_REG_FN;
1105 reg->loc.fn = aarch64_dwarf2_prev_register;
1106 break;
1107 case AARCH64_SP_REGNUM:
1108 reg->how = DWARF2_FRAME_REG_CFA;
1109 break;
1110 }
1111 }
1112
1113 /* When arguments must be pushed onto the stack, they go on in reverse
1114 order. The code below implements a FILO (stack) to do this. */
1115
1116 typedef struct
1117 {
1118 /* Value to pass on stack. */
1119 const gdb_byte *data;
1120
1121 /* Size in bytes of value to pass on stack. */
1122 int len;
1123 } stack_item_t;
1124
1125 DEF_VEC_O (stack_item_t);
1126
1127 /* Return the alignment (in bytes) of the given type. */
1128
1129 static int
1130 aarch64_type_align (struct type *t)
1131 {
1132 int n;
1133 int align;
1134 int falign;
1135
1136 t = check_typedef (t);
1137 switch (TYPE_CODE (t))
1138 {
1139 default:
1140 /* Should never happen. */
1141 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
1142 return 4;
1143
1144 case TYPE_CODE_PTR:
1145 case TYPE_CODE_ENUM:
1146 case TYPE_CODE_INT:
1147 case TYPE_CODE_FLT:
1148 case TYPE_CODE_SET:
1149 case TYPE_CODE_RANGE:
1150 case TYPE_CODE_BITSTRING:
1151 case TYPE_CODE_REF:
1152 case TYPE_CODE_CHAR:
1153 case TYPE_CODE_BOOL:
1154 return TYPE_LENGTH (t);
1155
1156 case TYPE_CODE_ARRAY:
1157 case TYPE_CODE_COMPLEX:
1158 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1159
1160 case TYPE_CODE_STRUCT:
1161 case TYPE_CODE_UNION:
1162 align = 1;
1163 for (n = 0; n < TYPE_NFIELDS (t); n++)
1164 {
1165 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
1166 if (falign > align)
1167 align = falign;
1168 }
1169 return align;
1170 }
1171 }
1172
1173 /* Return 1 if *TY is a homogeneous floating-point aggregate as
1174 defined in the AAPCS64 ABI document; otherwise return 0. */
1175
1176 static int
1177 is_hfa (struct type *ty)
1178 {
1179 switch (TYPE_CODE (ty))
1180 {
1181 case TYPE_CODE_ARRAY:
1182 {
1183 struct type *target_ty = TYPE_TARGET_TYPE (ty);
1184 if (TYPE_CODE (target_ty) == TYPE_CODE_FLT && TYPE_LENGTH (ty) <= 4)
1185 return 1;
1186 break;
1187 }
1188
1189 case TYPE_CODE_UNION:
1190 case TYPE_CODE_STRUCT:
1191 {
1192 if (TYPE_NFIELDS (ty) > 0 && TYPE_NFIELDS (ty) <= 4)
1193 {
1194 struct type *member0_type;
1195
1196 member0_type = check_typedef (TYPE_FIELD_TYPE (ty, 0));
1197 if (TYPE_CODE (member0_type) == TYPE_CODE_FLT)
1198 {
1199 int i;
1200
1201 for (i = 0; i < TYPE_NFIELDS (ty); i++)
1202 {
1203 struct type *member1_type;
1204
1205 member1_type = check_typedef (TYPE_FIELD_TYPE (ty, i));
1206 if (TYPE_CODE (member0_type) != TYPE_CODE (member1_type)
1207 || (TYPE_LENGTH (member0_type)
1208 != TYPE_LENGTH (member1_type)))
1209 return 0;
1210 }
1211 return 1;
1212 }
1213 }
1214 return 0;
1215 }
1216
1217 default:
1218 break;
1219 }
1220
1221 return 0;
1222 }
1223
1224 /* AArch64 function call information structure. */
1225 struct aarch64_call_info
1226 {
1227 /* the current argument number. */
1228 unsigned argnum;
1229
1230 /* The next general purpose register number, equivalent to NGRN as
1231 described in the AArch64 Procedure Call Standard. */
1232 unsigned ngrn;
1233
1234 /* The next SIMD and floating point register number, equivalent to
1235 NSRN as described in the AArch64 Procedure Call Standard. */
1236 unsigned nsrn;
1237
1238 /* The next stacked argument address, equivalent to NSAA as
1239 described in the AArch64 Procedure Call Standard. */
1240 unsigned nsaa;
1241
1242 /* Stack item vector. */
1243 VEC(stack_item_t) *si;
1244 };
1245
1246 /* Pass a value in a sequence of consecutive X registers. The caller
1247 is responsbile for ensuring sufficient registers are available. */
1248
1249 static void
1250 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1251 struct aarch64_call_info *info, struct type *type,
1252 const bfd_byte *buf)
1253 {
1254 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1255 int len = TYPE_LENGTH (type);
1256 enum type_code typecode = TYPE_CODE (type);
1257 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1258
1259 info->argnum++;
1260
1261 while (len > 0)
1262 {
1263 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1264 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1265 byte_order);
1266
1267
1268 /* Adjust sub-word struct/union args when big-endian. */
1269 if (byte_order == BFD_ENDIAN_BIG
1270 && partial_len < X_REGISTER_SIZE
1271 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1272 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1273
1274 if (aarch64_debug)
1275 {
1276 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1277 gdbarch_register_name (gdbarch, regnum),
1278 phex (regval, X_REGISTER_SIZE));
1279 }
1280 regcache_cooked_write_unsigned (regcache, regnum, regval);
1281 len -= partial_len;
1282 buf += partial_len;
1283 regnum++;
1284 }
1285 }
1286
1287 /* Attempt to marshall a value in a V register. Return 1 if
1288 successful, or 0 if insufficient registers are available. This
1289 function, unlike the equivalent pass_in_x() function does not
1290 handle arguments spread across multiple registers. */
1291
1292 static int
1293 pass_in_v (struct gdbarch *gdbarch,
1294 struct regcache *regcache,
1295 struct aarch64_call_info *info,
1296 const bfd_byte *buf)
1297 {
1298 if (info->nsrn < 8)
1299 {
1300 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1301 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1302
1303 info->argnum++;
1304 info->nsrn++;
1305
1306 regcache_cooked_write (regcache, regnum, buf);
1307 if (aarch64_debug)
1308 {
1309 debug_printf ("arg %d in %s\n", info->argnum,
1310 gdbarch_register_name (gdbarch, regnum));
1311 }
1312 return 1;
1313 }
1314 info->nsrn = 8;
1315 return 0;
1316 }
1317
1318 /* Marshall an argument onto the stack. */
1319
1320 static void
1321 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1322 const bfd_byte *buf)
1323 {
1324 int len = TYPE_LENGTH (type);
1325 int align;
1326 stack_item_t item;
1327
1328 info->argnum++;
1329
1330 align = aarch64_type_align (type);
1331
1332 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1333 Natural alignment of the argument's type. */
1334 align = align_up (align, 8);
1335
1336 /* The AArch64 PCS requires at most doubleword alignment. */
1337 if (align > 16)
1338 align = 16;
1339
1340 if (aarch64_debug)
1341 {
1342 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1343 info->nsaa);
1344 }
1345
1346 item.len = len;
1347 item.data = buf;
1348 VEC_safe_push (stack_item_t, info->si, &item);
1349
1350 info->nsaa += len;
1351 if (info->nsaa & (align - 1))
1352 {
1353 /* Push stack alignment padding. */
1354 int pad = align - (info->nsaa & (align - 1));
1355
1356 item.len = pad;
1357 item.data = buf;
1358
1359 VEC_safe_push (stack_item_t, info->si, &item);
1360 info->nsaa += pad;
1361 }
1362 }
1363
1364 /* Marshall an argument into a sequence of one or more consecutive X
1365 registers or, if insufficient X registers are available then onto
1366 the stack. */
1367
1368 static void
1369 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1370 struct aarch64_call_info *info, struct type *type,
1371 const bfd_byte *buf)
1372 {
1373 int len = TYPE_LENGTH (type);
1374 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1375
1376 /* PCS C.13 - Pass in registers if we have enough spare */
1377 if (info->ngrn + nregs <= 8)
1378 {
1379 pass_in_x (gdbarch, regcache, info, type, buf);
1380 info->ngrn += nregs;
1381 }
1382 else
1383 {
1384 info->ngrn = 8;
1385 pass_on_stack (info, type, buf);
1386 }
1387 }
1388
1389 /* Pass a value in a V register, or on the stack if insufficient are
1390 available. */
1391
1392 static void
1393 pass_in_v_or_stack (struct gdbarch *gdbarch,
1394 struct regcache *regcache,
1395 struct aarch64_call_info *info,
1396 struct type *type,
1397 const bfd_byte *buf)
1398 {
1399 if (!pass_in_v (gdbarch, regcache, info, buf))
1400 pass_on_stack (info, type, buf);
1401 }
1402
1403 /* Implement the "push_dummy_call" gdbarch method. */
1404
1405 static CORE_ADDR
1406 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1407 struct regcache *regcache, CORE_ADDR bp_addr,
1408 int nargs,
1409 struct value **args, CORE_ADDR sp, int struct_return,
1410 CORE_ADDR struct_addr)
1411 {
1412 int nstack = 0;
1413 int argnum;
1414 int x_argreg;
1415 int v_argreg;
1416 struct aarch64_call_info info;
1417 struct type *func_type;
1418 struct type *return_type;
1419 int lang_struct_return;
1420
1421 memset (&info, 0, sizeof (info));
1422
1423 /* We need to know what the type of the called function is in order
1424 to determine the number of named/anonymous arguments for the
1425 actual argument placement, and the return type in order to handle
1426 return value correctly.
1427
1428 The generic code above us views the decision of return in memory
1429 or return in registers as a two stage processes. The language
1430 handler is consulted first and may decide to return in memory (eg
1431 class with copy constructor returned by value), this will cause
1432 the generic code to allocate space AND insert an initial leading
1433 argument.
1434
1435 If the language code does not decide to pass in memory then the
1436 target code is consulted.
1437
1438 If the language code decides to pass in memory we want to move
1439 the pointer inserted as the initial argument from the argument
1440 list and into X8, the conventional AArch64 struct return pointer
1441 register.
1442
1443 This is slightly awkward, ideally the flag "lang_struct_return"
1444 would be passed to the targets implementation of push_dummy_call.
1445 Rather that change the target interface we call the language code
1446 directly ourselves. */
1447
1448 func_type = check_typedef (value_type (function));
1449
1450 /* Dereference function pointer types. */
1451 if (TYPE_CODE (func_type) == TYPE_CODE_PTR)
1452 func_type = TYPE_TARGET_TYPE (func_type);
1453
1454 gdb_assert (TYPE_CODE (func_type) == TYPE_CODE_FUNC
1455 || TYPE_CODE (func_type) == TYPE_CODE_METHOD);
1456
1457 /* If language_pass_by_reference () returned true we will have been
1458 given an additional initial argument, a hidden pointer to the
1459 return slot in memory. */
1460 return_type = TYPE_TARGET_TYPE (func_type);
1461 lang_struct_return = language_pass_by_reference (return_type);
1462
1463 /* Set the return address. For the AArch64, the return breakpoint
1464 is always at BP_ADDR. */
1465 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1466
1467 /* If we were given an initial argument for the return slot because
1468 lang_struct_return was true, lose it. */
1469 if (lang_struct_return)
1470 {
1471 args++;
1472 nargs--;
1473 }
1474
1475 /* The struct_return pointer occupies X8. */
1476 if (struct_return || lang_struct_return)
1477 {
1478 if (aarch64_debug)
1479 {
1480 debug_printf ("struct return in %s = 0x%s\n",
1481 gdbarch_register_name (gdbarch,
1482 AARCH64_STRUCT_RETURN_REGNUM),
1483 paddress (gdbarch, struct_addr));
1484 }
1485 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1486 struct_addr);
1487 }
1488
1489 for (argnum = 0; argnum < nargs; argnum++)
1490 {
1491 struct value *arg = args[argnum];
1492 struct type *arg_type;
1493 int len;
1494
1495 arg_type = check_typedef (value_type (arg));
1496 len = TYPE_LENGTH (arg_type);
1497
1498 switch (TYPE_CODE (arg_type))
1499 {
1500 case TYPE_CODE_INT:
1501 case TYPE_CODE_BOOL:
1502 case TYPE_CODE_CHAR:
1503 case TYPE_CODE_RANGE:
1504 case TYPE_CODE_ENUM:
1505 if (len < 4)
1506 {
1507 /* Promote to 32 bit integer. */
1508 if (TYPE_UNSIGNED (arg_type))
1509 arg_type = builtin_type (gdbarch)->builtin_uint32;
1510 else
1511 arg_type = builtin_type (gdbarch)->builtin_int32;
1512 arg = value_cast (arg_type, arg);
1513 }
1514 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1515 value_contents (arg));
1516 break;
1517
1518 case TYPE_CODE_COMPLEX:
1519 if (info.nsrn <= 6)
1520 {
1521 const bfd_byte *buf = value_contents (arg);
1522 struct type *target_type =
1523 check_typedef (TYPE_TARGET_TYPE (arg_type));
1524
1525 pass_in_v (gdbarch, regcache, &info, buf);
1526 pass_in_v (gdbarch, regcache, &info,
1527 buf + TYPE_LENGTH (target_type));
1528 }
1529 else
1530 {
1531 info.nsrn = 8;
1532 pass_on_stack (&info, arg_type, value_contents (arg));
1533 }
1534 break;
1535 case TYPE_CODE_FLT:
1536 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type,
1537 value_contents (arg));
1538 break;
1539
1540 case TYPE_CODE_STRUCT:
1541 case TYPE_CODE_ARRAY:
1542 case TYPE_CODE_UNION:
1543 if (is_hfa (arg_type))
1544 {
1545 int elements = TYPE_NFIELDS (arg_type);
1546
1547 /* Homogeneous Aggregates */
1548 if (info.nsrn + elements < 8)
1549 {
1550 int i;
1551
1552 for (i = 0; i < elements; i++)
1553 {
1554 /* We know that we have sufficient registers
1555 available therefore this will never fallback
1556 to the stack. */
1557 struct value *field =
1558 value_primitive_field (arg, 0, i, arg_type);
1559 struct type *field_type =
1560 check_typedef (value_type (field));
1561
1562 pass_in_v_or_stack (gdbarch, regcache, &info, field_type,
1563 value_contents_writeable (field));
1564 }
1565 }
1566 else
1567 {
1568 info.nsrn = 8;
1569 pass_on_stack (&info, arg_type, value_contents (arg));
1570 }
1571 }
1572 else if (len > 16)
1573 {
1574 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1575 invisible reference. */
1576
1577 /* Allocate aligned storage. */
1578 sp = align_down (sp - len, 16);
1579
1580 /* Write the real data into the stack. */
1581 write_memory (sp, value_contents (arg), len);
1582
1583 /* Construct the indirection. */
1584 arg_type = lookup_pointer_type (arg_type);
1585 arg = value_from_pointer (arg_type, sp);
1586 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1587 value_contents (arg));
1588 }
1589 else
1590 /* PCS C.15 / C.18 multiple values pass. */
1591 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1592 value_contents (arg));
1593 break;
1594
1595 default:
1596 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1597 value_contents (arg));
1598 break;
1599 }
1600 }
1601
1602 /* Make sure stack retains 16 byte alignment. */
1603 if (info.nsaa & 15)
1604 sp -= 16 - (info.nsaa & 15);
1605
1606 while (!VEC_empty (stack_item_t, info.si))
1607 {
1608 stack_item_t *si = VEC_last (stack_item_t, info.si);
1609
1610 sp -= si->len;
1611 write_memory (sp, si->data, si->len);
1612 VEC_pop (stack_item_t, info.si);
1613 }
1614
1615 VEC_free (stack_item_t, info.si);
1616
1617 /* Finally, update the SP register. */
1618 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1619
1620 return sp;
1621 }
1622
1623 /* Implement the "frame_align" gdbarch method. */
1624
1625 static CORE_ADDR
1626 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1627 {
1628 /* Align the stack to sixteen bytes. */
1629 return sp & ~(CORE_ADDR) 15;
1630 }
1631
1632 /* Return the type for an AdvSISD Q register. */
1633
1634 static struct type *
1635 aarch64_vnq_type (struct gdbarch *gdbarch)
1636 {
1637 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1638
1639 if (tdep->vnq_type == NULL)
1640 {
1641 struct type *t;
1642 struct type *elem;
1643
1644 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1645 TYPE_CODE_UNION);
1646
1647 elem = builtin_type (gdbarch)->builtin_uint128;
1648 append_composite_type_field (t, "u", elem);
1649
1650 elem = builtin_type (gdbarch)->builtin_int128;
1651 append_composite_type_field (t, "s", elem);
1652
1653 tdep->vnq_type = t;
1654 }
1655
1656 return tdep->vnq_type;
1657 }
1658
1659 /* Return the type for an AdvSISD D register. */
1660
1661 static struct type *
1662 aarch64_vnd_type (struct gdbarch *gdbarch)
1663 {
1664 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1665
1666 if (tdep->vnd_type == NULL)
1667 {
1668 struct type *t;
1669 struct type *elem;
1670
1671 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1672 TYPE_CODE_UNION);
1673
1674 elem = builtin_type (gdbarch)->builtin_double;
1675 append_composite_type_field (t, "f", elem);
1676
1677 elem = builtin_type (gdbarch)->builtin_uint64;
1678 append_composite_type_field (t, "u", elem);
1679
1680 elem = builtin_type (gdbarch)->builtin_int64;
1681 append_composite_type_field (t, "s", elem);
1682
1683 tdep->vnd_type = t;
1684 }
1685
1686 return tdep->vnd_type;
1687 }
1688
1689 /* Return the type for an AdvSISD S register. */
1690
1691 static struct type *
1692 aarch64_vns_type (struct gdbarch *gdbarch)
1693 {
1694 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1695
1696 if (tdep->vns_type == NULL)
1697 {
1698 struct type *t;
1699 struct type *elem;
1700
1701 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1702 TYPE_CODE_UNION);
1703
1704 elem = builtin_type (gdbarch)->builtin_float;
1705 append_composite_type_field (t, "f", elem);
1706
1707 elem = builtin_type (gdbarch)->builtin_uint32;
1708 append_composite_type_field (t, "u", elem);
1709
1710 elem = builtin_type (gdbarch)->builtin_int32;
1711 append_composite_type_field (t, "s", elem);
1712
1713 tdep->vns_type = t;
1714 }
1715
1716 return tdep->vns_type;
1717 }
1718
1719 /* Return the type for an AdvSISD H register. */
1720
1721 static struct type *
1722 aarch64_vnh_type (struct gdbarch *gdbarch)
1723 {
1724 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1725
1726 if (tdep->vnh_type == NULL)
1727 {
1728 struct type *t;
1729 struct type *elem;
1730
1731 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1732 TYPE_CODE_UNION);
1733
1734 elem = builtin_type (gdbarch)->builtin_uint16;
1735 append_composite_type_field (t, "u", elem);
1736
1737 elem = builtin_type (gdbarch)->builtin_int16;
1738 append_composite_type_field (t, "s", elem);
1739
1740 tdep->vnh_type = t;
1741 }
1742
1743 return tdep->vnh_type;
1744 }
1745
1746 /* Return the type for an AdvSISD B register. */
1747
1748 static struct type *
1749 aarch64_vnb_type (struct gdbarch *gdbarch)
1750 {
1751 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1752
1753 if (tdep->vnb_type == NULL)
1754 {
1755 struct type *t;
1756 struct type *elem;
1757
1758 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1759 TYPE_CODE_UNION);
1760
1761 elem = builtin_type (gdbarch)->builtin_uint8;
1762 append_composite_type_field (t, "u", elem);
1763
1764 elem = builtin_type (gdbarch)->builtin_int8;
1765 append_composite_type_field (t, "s", elem);
1766
1767 tdep->vnb_type = t;
1768 }
1769
1770 return tdep->vnb_type;
1771 }
1772
1773 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1774
1775 static int
1776 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1777 {
1778 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
1779 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
1780
1781 if (reg == AARCH64_DWARF_SP)
1782 return AARCH64_SP_REGNUM;
1783
1784 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
1785 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
1786
1787 return -1;
1788 }
1789 \f
1790
1791 /* Implement the "print_insn" gdbarch method. */
1792
1793 static int
1794 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
1795 {
1796 info->symbols = NULL;
1797 return print_insn_aarch64 (memaddr, info);
1798 }
1799
1800 /* AArch64 BRK software debug mode instruction.
1801 Note that AArch64 code is always little-endian.
1802 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
1803 static const gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
1804
1805 /* Implement the "breakpoint_from_pc" gdbarch method. */
1806
1807 static const gdb_byte *
1808 aarch64_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr,
1809 int *lenptr)
1810 {
1811 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1812
1813 *lenptr = sizeof (aarch64_default_breakpoint);
1814 return aarch64_default_breakpoint;
1815 }
1816
1817 /* Extract from an array REGS containing the (raw) register state a
1818 function return value of type TYPE, and copy that, in virtual
1819 format, into VALBUF. */
1820
1821 static void
1822 aarch64_extract_return_value (struct type *type, struct regcache *regs,
1823 gdb_byte *valbuf)
1824 {
1825 struct gdbarch *gdbarch = get_regcache_arch (regs);
1826 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1827
1828 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1829 {
1830 bfd_byte buf[V_REGISTER_SIZE];
1831 int len = TYPE_LENGTH (type);
1832
1833 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
1834 memcpy (valbuf, buf, len);
1835 }
1836 else if (TYPE_CODE (type) == TYPE_CODE_INT
1837 || TYPE_CODE (type) == TYPE_CODE_CHAR
1838 || TYPE_CODE (type) == TYPE_CODE_BOOL
1839 || TYPE_CODE (type) == TYPE_CODE_PTR
1840 || TYPE_CODE (type) == TYPE_CODE_REF
1841 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1842 {
1843 /* If the the type is a plain integer, then the access is
1844 straight-forward. Otherwise we have to play around a bit
1845 more. */
1846 int len = TYPE_LENGTH (type);
1847 int regno = AARCH64_X0_REGNUM;
1848 ULONGEST tmp;
1849
1850 while (len > 0)
1851 {
1852 /* By using store_unsigned_integer we avoid having to do
1853 anything special for small big-endian values. */
1854 regcache_cooked_read_unsigned (regs, regno++, &tmp);
1855 store_unsigned_integer (valbuf,
1856 (len > X_REGISTER_SIZE
1857 ? X_REGISTER_SIZE : len), byte_order, tmp);
1858 len -= X_REGISTER_SIZE;
1859 valbuf += X_REGISTER_SIZE;
1860 }
1861 }
1862 else if (TYPE_CODE (type) == TYPE_CODE_COMPLEX)
1863 {
1864 int regno = AARCH64_V0_REGNUM;
1865 bfd_byte buf[V_REGISTER_SIZE];
1866 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1867 int len = TYPE_LENGTH (target_type);
1868
1869 regcache_cooked_read (regs, regno, buf);
1870 memcpy (valbuf, buf, len);
1871 valbuf += len;
1872 regcache_cooked_read (regs, regno + 1, buf);
1873 memcpy (valbuf, buf, len);
1874 valbuf += len;
1875 }
1876 else if (is_hfa (type))
1877 {
1878 int elements = TYPE_NFIELDS (type);
1879 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1880 int len = TYPE_LENGTH (member_type);
1881 int i;
1882
1883 for (i = 0; i < elements; i++)
1884 {
1885 int regno = AARCH64_V0_REGNUM + i;
1886 bfd_byte buf[X_REGISTER_SIZE];
1887
1888 if (aarch64_debug)
1889 {
1890 debug_printf ("read HFA return value element %d from %s\n",
1891 i + 1,
1892 gdbarch_register_name (gdbarch, regno));
1893 }
1894 regcache_cooked_read (regs, regno, buf);
1895
1896 memcpy (valbuf, buf, len);
1897 valbuf += len;
1898 }
1899 }
1900 else
1901 {
1902 /* For a structure or union the behaviour is as if the value had
1903 been stored to word-aligned memory and then loaded into
1904 registers with 64-bit load instruction(s). */
1905 int len = TYPE_LENGTH (type);
1906 int regno = AARCH64_X0_REGNUM;
1907 bfd_byte buf[X_REGISTER_SIZE];
1908
1909 while (len > 0)
1910 {
1911 regcache_cooked_read (regs, regno++, buf);
1912 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1913 len -= X_REGISTER_SIZE;
1914 valbuf += X_REGISTER_SIZE;
1915 }
1916 }
1917 }
1918
1919
1920 /* Will a function return an aggregate type in memory or in a
1921 register? Return 0 if an aggregate type can be returned in a
1922 register, 1 if it must be returned in memory. */
1923
1924 static int
1925 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
1926 {
1927 int nRc;
1928 enum type_code code;
1929
1930 type = check_typedef (type);
1931
1932 /* In the AArch64 ABI, "integer" like aggregate types are returned
1933 in registers. For an aggregate type to be integer like, its size
1934 must be less than or equal to 4 * X_REGISTER_SIZE. */
1935
1936 if (is_hfa (type))
1937 {
1938 /* PCS B.5 If the argument is a Named HFA, then the argument is
1939 used unmodified. */
1940 return 0;
1941 }
1942
1943 if (TYPE_LENGTH (type) > 16)
1944 {
1945 /* PCS B.6 Aggregates larger than 16 bytes are passed by
1946 invisible reference. */
1947
1948 return 1;
1949 }
1950
1951 return 0;
1952 }
1953
1954 /* Write into appropriate registers a function return value of type
1955 TYPE, given in virtual format. */
1956
1957 static void
1958 aarch64_store_return_value (struct type *type, struct regcache *regs,
1959 const gdb_byte *valbuf)
1960 {
1961 struct gdbarch *gdbarch = get_regcache_arch (regs);
1962 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1963
1964 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1965 {
1966 bfd_byte buf[V_REGISTER_SIZE];
1967 int len = TYPE_LENGTH (type);
1968
1969 memcpy (buf, valbuf, len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
1970 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
1971 }
1972 else if (TYPE_CODE (type) == TYPE_CODE_INT
1973 || TYPE_CODE (type) == TYPE_CODE_CHAR
1974 || TYPE_CODE (type) == TYPE_CODE_BOOL
1975 || TYPE_CODE (type) == TYPE_CODE_PTR
1976 || TYPE_CODE (type) == TYPE_CODE_REF
1977 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1978 {
1979 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
1980 {
1981 /* Values of one word or less are zero/sign-extended and
1982 returned in r0. */
1983 bfd_byte tmpbuf[X_REGISTER_SIZE];
1984 LONGEST val = unpack_long (type, valbuf);
1985
1986 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
1987 regcache_cooked_write (regs, AARCH64_X0_REGNUM, tmpbuf);
1988 }
1989 else
1990 {
1991 /* Integral values greater than one word are stored in
1992 consecutive registers starting with r0. This will always
1993 be a multiple of the regiser size. */
1994 int len = TYPE_LENGTH (type);
1995 int regno = AARCH64_X0_REGNUM;
1996
1997 while (len > 0)
1998 {
1999 regcache_cooked_write (regs, regno++, valbuf);
2000 len -= X_REGISTER_SIZE;
2001 valbuf += X_REGISTER_SIZE;
2002 }
2003 }
2004 }
2005 else if (is_hfa (type))
2006 {
2007 int elements = TYPE_NFIELDS (type);
2008 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
2009 int len = TYPE_LENGTH (member_type);
2010 int i;
2011
2012 for (i = 0; i < elements; i++)
2013 {
2014 int regno = AARCH64_V0_REGNUM + i;
2015 bfd_byte tmpbuf[MAX_REGISTER_SIZE];
2016
2017 if (aarch64_debug)
2018 {
2019 debug_printf ("write HFA return value element %d to %s\n",
2020 i + 1,
2021 gdbarch_register_name (gdbarch, regno));
2022 }
2023
2024 memcpy (tmpbuf, valbuf, len);
2025 regcache_cooked_write (regs, regno, tmpbuf);
2026 valbuf += len;
2027 }
2028 }
2029 else
2030 {
2031 /* For a structure or union the behaviour is as if the value had
2032 been stored to word-aligned memory and then loaded into
2033 registers with 64-bit load instruction(s). */
2034 int len = TYPE_LENGTH (type);
2035 int regno = AARCH64_X0_REGNUM;
2036 bfd_byte tmpbuf[X_REGISTER_SIZE];
2037
2038 while (len > 0)
2039 {
2040 memcpy (tmpbuf, valbuf,
2041 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2042 regcache_cooked_write (regs, regno++, tmpbuf);
2043 len -= X_REGISTER_SIZE;
2044 valbuf += X_REGISTER_SIZE;
2045 }
2046 }
2047 }
2048
2049 /* Implement the "return_value" gdbarch method. */
2050
2051 static enum return_value_convention
2052 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2053 struct type *valtype, struct regcache *regcache,
2054 gdb_byte *readbuf, const gdb_byte *writebuf)
2055 {
2056 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2057
2058 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
2059 || TYPE_CODE (valtype) == TYPE_CODE_UNION
2060 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
2061 {
2062 if (aarch64_return_in_memory (gdbarch, valtype))
2063 {
2064 if (aarch64_debug)
2065 debug_printf ("return value in memory\n");
2066 return RETURN_VALUE_STRUCT_CONVENTION;
2067 }
2068 }
2069
2070 if (writebuf)
2071 aarch64_store_return_value (valtype, regcache, writebuf);
2072
2073 if (readbuf)
2074 aarch64_extract_return_value (valtype, regcache, readbuf);
2075
2076 if (aarch64_debug)
2077 debug_printf ("return value in registers\n");
2078
2079 return RETURN_VALUE_REGISTER_CONVENTION;
2080 }
2081
2082 /* Implement the "get_longjmp_target" gdbarch method. */
2083
2084 static int
2085 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2086 {
2087 CORE_ADDR jb_addr;
2088 gdb_byte buf[X_REGISTER_SIZE];
2089 struct gdbarch *gdbarch = get_frame_arch (frame);
2090 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2091 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2092
2093 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2094
2095 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2096 X_REGISTER_SIZE))
2097 return 0;
2098
2099 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2100 return 1;
2101 }
2102
2103 /* Implement the "gen_return_address" gdbarch method. */
2104
2105 static void
2106 aarch64_gen_return_address (struct gdbarch *gdbarch,
2107 struct agent_expr *ax, struct axs_value *value,
2108 CORE_ADDR scope)
2109 {
2110 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2111 value->kind = axs_lvalue_register;
2112 value->u.reg = AARCH64_LR_REGNUM;
2113 }
2114 \f
2115
2116 /* Return the pseudo register name corresponding to register regnum. */
2117
2118 static const char *
2119 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2120 {
2121 static const char *const q_name[] =
2122 {
2123 "q0", "q1", "q2", "q3",
2124 "q4", "q5", "q6", "q7",
2125 "q8", "q9", "q10", "q11",
2126 "q12", "q13", "q14", "q15",
2127 "q16", "q17", "q18", "q19",
2128 "q20", "q21", "q22", "q23",
2129 "q24", "q25", "q26", "q27",
2130 "q28", "q29", "q30", "q31",
2131 };
2132
2133 static const char *const d_name[] =
2134 {
2135 "d0", "d1", "d2", "d3",
2136 "d4", "d5", "d6", "d7",
2137 "d8", "d9", "d10", "d11",
2138 "d12", "d13", "d14", "d15",
2139 "d16", "d17", "d18", "d19",
2140 "d20", "d21", "d22", "d23",
2141 "d24", "d25", "d26", "d27",
2142 "d28", "d29", "d30", "d31",
2143 };
2144
2145 static const char *const s_name[] =
2146 {
2147 "s0", "s1", "s2", "s3",
2148 "s4", "s5", "s6", "s7",
2149 "s8", "s9", "s10", "s11",
2150 "s12", "s13", "s14", "s15",
2151 "s16", "s17", "s18", "s19",
2152 "s20", "s21", "s22", "s23",
2153 "s24", "s25", "s26", "s27",
2154 "s28", "s29", "s30", "s31",
2155 };
2156
2157 static const char *const h_name[] =
2158 {
2159 "h0", "h1", "h2", "h3",
2160 "h4", "h5", "h6", "h7",
2161 "h8", "h9", "h10", "h11",
2162 "h12", "h13", "h14", "h15",
2163 "h16", "h17", "h18", "h19",
2164 "h20", "h21", "h22", "h23",
2165 "h24", "h25", "h26", "h27",
2166 "h28", "h29", "h30", "h31",
2167 };
2168
2169 static const char *const b_name[] =
2170 {
2171 "b0", "b1", "b2", "b3",
2172 "b4", "b5", "b6", "b7",
2173 "b8", "b9", "b10", "b11",
2174 "b12", "b13", "b14", "b15",
2175 "b16", "b17", "b18", "b19",
2176 "b20", "b21", "b22", "b23",
2177 "b24", "b25", "b26", "b27",
2178 "b28", "b29", "b30", "b31",
2179 };
2180
2181 regnum -= gdbarch_num_regs (gdbarch);
2182
2183 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2184 return q_name[regnum - AARCH64_Q0_REGNUM];
2185
2186 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2187 return d_name[regnum - AARCH64_D0_REGNUM];
2188
2189 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2190 return s_name[regnum - AARCH64_S0_REGNUM];
2191
2192 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2193 return h_name[regnum - AARCH64_H0_REGNUM];
2194
2195 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2196 return b_name[regnum - AARCH64_B0_REGNUM];
2197
2198 internal_error (__FILE__, __LINE__,
2199 _("aarch64_pseudo_register_name: bad register number %d"),
2200 regnum);
2201 }
2202
2203 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2204
2205 static struct type *
2206 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2207 {
2208 regnum -= gdbarch_num_regs (gdbarch);
2209
2210 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2211 return aarch64_vnq_type (gdbarch);
2212
2213 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2214 return aarch64_vnd_type (gdbarch);
2215
2216 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2217 return aarch64_vns_type (gdbarch);
2218
2219 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2220 return aarch64_vnh_type (gdbarch);
2221
2222 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2223 return aarch64_vnb_type (gdbarch);
2224
2225 internal_error (__FILE__, __LINE__,
2226 _("aarch64_pseudo_register_type: bad register number %d"),
2227 regnum);
2228 }
2229
2230 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2231
2232 static int
2233 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2234 struct reggroup *group)
2235 {
2236 regnum -= gdbarch_num_regs (gdbarch);
2237
2238 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2239 return group == all_reggroup || group == vector_reggroup;
2240 else if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2241 return (group == all_reggroup || group == vector_reggroup
2242 || group == float_reggroup);
2243 else if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2244 return (group == all_reggroup || group == vector_reggroup
2245 || group == float_reggroup);
2246 else if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2247 return group == all_reggroup || group == vector_reggroup;
2248 else if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2249 return group == all_reggroup || group == vector_reggroup;
2250
2251 return group == all_reggroup;
2252 }
2253
2254 /* Implement the "pseudo_register_read_value" gdbarch method. */
2255
2256 static struct value *
2257 aarch64_pseudo_read_value (struct gdbarch *gdbarch,
2258 struct regcache *regcache,
2259 int regnum)
2260 {
2261 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2262 struct value *result_value;
2263 gdb_byte *buf;
2264
2265 result_value = allocate_value (register_type (gdbarch, regnum));
2266 VALUE_LVAL (result_value) = lval_register;
2267 VALUE_REGNUM (result_value) = regnum;
2268 buf = value_contents_raw (result_value);
2269
2270 regnum -= gdbarch_num_regs (gdbarch);
2271
2272 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2273 {
2274 enum register_status status;
2275 unsigned v_regnum;
2276
2277 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2278 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2279 if (status != REG_VALID)
2280 mark_value_bytes_unavailable (result_value, 0,
2281 TYPE_LENGTH (value_type (result_value)));
2282 else
2283 memcpy (buf, reg_buf, Q_REGISTER_SIZE);
2284 return result_value;
2285 }
2286
2287 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2288 {
2289 enum register_status status;
2290 unsigned v_regnum;
2291
2292 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2293 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2294 if (status != REG_VALID)
2295 mark_value_bytes_unavailable (result_value, 0,
2296 TYPE_LENGTH (value_type (result_value)));
2297 else
2298 memcpy (buf, reg_buf, D_REGISTER_SIZE);
2299 return result_value;
2300 }
2301
2302 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2303 {
2304 enum register_status status;
2305 unsigned v_regnum;
2306
2307 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2308 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2309 if (status != REG_VALID)
2310 mark_value_bytes_unavailable (result_value, 0,
2311 TYPE_LENGTH (value_type (result_value)));
2312 else
2313 memcpy (buf, reg_buf, S_REGISTER_SIZE);
2314 return result_value;
2315 }
2316
2317 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2318 {
2319 enum register_status status;
2320 unsigned v_regnum;
2321
2322 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2323 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2324 if (status != REG_VALID)
2325 mark_value_bytes_unavailable (result_value, 0,
2326 TYPE_LENGTH (value_type (result_value)));
2327 else
2328 memcpy (buf, reg_buf, H_REGISTER_SIZE);
2329 return result_value;
2330 }
2331
2332 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2333 {
2334 enum register_status status;
2335 unsigned v_regnum;
2336
2337 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2338 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2339 if (status != REG_VALID)
2340 mark_value_bytes_unavailable (result_value, 0,
2341 TYPE_LENGTH (value_type (result_value)));
2342 else
2343 memcpy (buf, reg_buf, B_REGISTER_SIZE);
2344 return result_value;
2345 }
2346
2347 gdb_assert_not_reached ("regnum out of bound");
2348 }
2349
2350 /* Implement the "pseudo_register_write" gdbarch method. */
2351
2352 static void
2353 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2354 int regnum, const gdb_byte *buf)
2355 {
2356 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2357
2358 /* Ensure the register buffer is zero, we want gdb writes of the
2359 various 'scalar' pseudo registers to behavior like architectural
2360 writes, register width bytes are written the remainder are set to
2361 zero. */
2362 memset (reg_buf, 0, sizeof (reg_buf));
2363
2364 regnum -= gdbarch_num_regs (gdbarch);
2365
2366 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2367 {
2368 /* pseudo Q registers */
2369 unsigned v_regnum;
2370
2371 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2372 memcpy (reg_buf, buf, Q_REGISTER_SIZE);
2373 regcache_raw_write (regcache, v_regnum, reg_buf);
2374 return;
2375 }
2376
2377 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2378 {
2379 /* pseudo D registers */
2380 unsigned v_regnum;
2381
2382 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2383 memcpy (reg_buf, buf, D_REGISTER_SIZE);
2384 regcache_raw_write (regcache, v_regnum, reg_buf);
2385 return;
2386 }
2387
2388 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2389 {
2390 unsigned v_regnum;
2391
2392 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2393 memcpy (reg_buf, buf, S_REGISTER_SIZE);
2394 regcache_raw_write (regcache, v_regnum, reg_buf);
2395 return;
2396 }
2397
2398 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2399 {
2400 /* pseudo H registers */
2401 unsigned v_regnum;
2402
2403 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2404 memcpy (reg_buf, buf, H_REGISTER_SIZE);
2405 regcache_raw_write (regcache, v_regnum, reg_buf);
2406 return;
2407 }
2408
2409 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2410 {
2411 /* pseudo B registers */
2412 unsigned v_regnum;
2413
2414 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2415 memcpy (reg_buf, buf, B_REGISTER_SIZE);
2416 regcache_raw_write (regcache, v_regnum, reg_buf);
2417 return;
2418 }
2419
2420 gdb_assert_not_reached ("regnum out of bound");
2421 }
2422
2423 /* Callback function for user_reg_add. */
2424
2425 static struct value *
2426 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2427 {
2428 const int *reg_p = (const int *) baton;
2429
2430 return value_of_register (*reg_p, frame);
2431 }
2432 \f
2433
2434 /* Implement the "software_single_step" gdbarch method, needed to
2435 single step through atomic sequences on AArch64. */
2436
2437 static int
2438 aarch64_software_single_step (struct frame_info *frame)
2439 {
2440 struct gdbarch *gdbarch = get_frame_arch (frame);
2441 struct address_space *aspace = get_frame_address_space (frame);
2442 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2443 const int insn_size = 4;
2444 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2445 CORE_ADDR pc = get_frame_pc (frame);
2446 CORE_ADDR breaks[2] = { -1, -1 };
2447 CORE_ADDR loc = pc;
2448 CORE_ADDR closing_insn = 0;
2449 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2450 byte_order_for_code);
2451 int index;
2452 int insn_count;
2453 int bc_insn_count = 0; /* Conditional branch instruction count. */
2454 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2455 aarch64_inst inst;
2456
2457 if (aarch64_decode_insn (insn, &inst, 1) != 0)
2458 return 0;
2459
2460 /* Look for a Load Exclusive instruction which begins the sequence. */
2461 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
2462 return 0;
2463
2464 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2465 {
2466 loc += insn_size;
2467 insn = read_memory_unsigned_integer (loc, insn_size,
2468 byte_order_for_code);
2469
2470 if (aarch64_decode_insn (insn, &inst, 1) != 0)
2471 return 0;
2472 /* Check if the instruction is a conditional branch. */
2473 if (inst.opcode->iclass == condbranch)
2474 {
2475 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2476
2477 if (bc_insn_count >= 1)
2478 return 0;
2479
2480 /* It is, so we'll try to set a breakpoint at the destination. */
2481 breaks[1] = loc + inst.operands[0].imm.value;
2482
2483 bc_insn_count++;
2484 last_breakpoint++;
2485 }
2486
2487 /* Look for the Store Exclusive which closes the atomic sequence. */
2488 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
2489 {
2490 closing_insn = loc;
2491 break;
2492 }
2493 }
2494
2495 /* We didn't find a closing Store Exclusive instruction, fall back. */
2496 if (!closing_insn)
2497 return 0;
2498
2499 /* Insert breakpoint after the end of the atomic sequence. */
2500 breaks[0] = loc + insn_size;
2501
2502 /* Check for duplicated breakpoints, and also check that the second
2503 breakpoint is not within the atomic sequence. */
2504 if (last_breakpoint
2505 && (breaks[1] == breaks[0]
2506 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2507 last_breakpoint = 0;
2508
2509 /* Insert the breakpoint at the end of the sequence, and one at the
2510 destination of the conditional branch, if it exists. */
2511 for (index = 0; index <= last_breakpoint; index++)
2512 insert_single_step_breakpoint (gdbarch, aspace, breaks[index]);
2513
2514 return 1;
2515 }
2516
2517 struct displaced_step_closure
2518 {
2519 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2520 is being displaced stepping. */
2521 int cond;
2522
2523 /* PC adjustment offset after displaced stepping. */
2524 int32_t pc_adjust;
2525 };
2526
2527 /* Data when visiting instructions for displaced stepping. */
2528
2529 struct aarch64_displaced_step_data
2530 {
2531 struct aarch64_insn_data base;
2532
2533 /* The address where the instruction will be executed at. */
2534 CORE_ADDR new_addr;
2535 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2536 uint32_t insn_buf[DISPLACED_MODIFIED_INSNS];
2537 /* Number of instructions in INSN_BUF. */
2538 unsigned insn_count;
2539 /* Registers when doing displaced stepping. */
2540 struct regcache *regs;
2541
2542 struct displaced_step_closure *dsc;
2543 };
2544
2545 /* Implementation of aarch64_insn_visitor method "b". */
2546
2547 static void
2548 aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2549 struct aarch64_insn_data *data)
2550 {
2551 struct aarch64_displaced_step_data *dsd
2552 = (struct aarch64_displaced_step_data *) data;
2553 int32_t new_offset = data->insn_addr - dsd->new_addr + offset;
2554
2555 if (can_encode_int32 (new_offset, 28))
2556 {
2557 /* Emit B rather than BL, because executing BL on a new address
2558 will get the wrong address into LR. In order to avoid this,
2559 we emit B, and update LR if the instruction is BL. */
2560 emit_b (dsd->insn_buf, 0, new_offset);
2561 dsd->insn_count++;
2562 }
2563 else
2564 {
2565 /* Write NOP. */
2566 emit_nop (dsd->insn_buf);
2567 dsd->insn_count++;
2568 dsd->dsc->pc_adjust = offset;
2569 }
2570
2571 if (is_bl)
2572 {
2573 /* Update LR. */
2574 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2575 data->insn_addr + 4);
2576 }
2577 }
2578
2579 /* Implementation of aarch64_insn_visitor method "b_cond". */
2580
2581 static void
2582 aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2583 struct aarch64_insn_data *data)
2584 {
2585 struct aarch64_displaced_step_data *dsd
2586 = (struct aarch64_displaced_step_data *) data;
2587 int32_t new_offset = data->insn_addr - dsd->new_addr + offset;
2588
2589 /* GDB has to fix up PC after displaced step this instruction
2590 differently according to the condition is true or false. Instead
2591 of checking COND against conditional flags, we can use
2592 the following instructions, and GDB can tell how to fix up PC
2593 according to the PC value.
2594
2595 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2596 INSN1 ;
2597 TAKEN:
2598 INSN2
2599 */
2600
2601 emit_bcond (dsd->insn_buf, cond, 8);
2602 dsd->dsc->cond = 1;
2603 dsd->dsc->pc_adjust = offset;
2604 dsd->insn_count = 1;
2605 }
2606
2607 /* Dynamically allocate a new register. If we know the register
2608 statically, we should make it a global as above instead of using this
2609 helper function. */
2610
2611 static struct aarch64_register
2612 aarch64_register (unsigned num, int is64)
2613 {
2614 return (struct aarch64_register) { num, is64 };
2615 }
2616
2617 /* Implementation of aarch64_insn_visitor method "cb". */
2618
2619 static void
2620 aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
2621 const unsigned rn, int is64,
2622 struct aarch64_insn_data *data)
2623 {
2624 struct aarch64_displaced_step_data *dsd
2625 = (struct aarch64_displaced_step_data *) data;
2626 int32_t new_offset = data->insn_addr - dsd->new_addr + offset;
2627
2628 /* The offset is out of range for a compare and branch
2629 instruction. We can use the following instructions instead:
2630
2631 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2632 INSN1 ;
2633 TAKEN:
2634 INSN2
2635 */
2636 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
2637 dsd->insn_count = 1;
2638 dsd->dsc->cond = 1;
2639 dsd->dsc->pc_adjust = offset;
2640 }
2641
2642 /* Implementation of aarch64_insn_visitor method "tb". */
2643
2644 static void
2645 aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
2646 const unsigned rt, unsigned bit,
2647 struct aarch64_insn_data *data)
2648 {
2649 struct aarch64_displaced_step_data *dsd
2650 = (struct aarch64_displaced_step_data *) data;
2651 int32_t new_offset = data->insn_addr - dsd->new_addr + offset;
2652
2653 /* The offset is out of range for a test bit and branch
2654 instruction We can use the following instructions instead:
2655
2656 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2657 INSN1 ;
2658 TAKEN:
2659 INSN2
2660
2661 */
2662 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
2663 dsd->insn_count = 1;
2664 dsd->dsc->cond = 1;
2665 dsd->dsc->pc_adjust = offset;
2666 }
2667
2668 /* Implementation of aarch64_insn_visitor method "adr". */
2669
2670 static void
2671 aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
2672 const int is_adrp, struct aarch64_insn_data *data)
2673 {
2674 struct aarch64_displaced_step_data *dsd
2675 = (struct aarch64_displaced_step_data *) data;
2676 /* We know exactly the address the ADR{P,} instruction will compute.
2677 We can just write it to the destination register. */
2678 CORE_ADDR address = data->insn_addr + offset;
2679
2680 if (is_adrp)
2681 {
2682 /* Clear the lower 12 bits of the offset to get the 4K page. */
2683 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2684 address & ~0xfff);
2685 }
2686 else
2687 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2688 address);
2689
2690 dsd->dsc->pc_adjust = 4;
2691 emit_nop (dsd->insn_buf);
2692 dsd->insn_count = 1;
2693 }
2694
2695 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
2696
2697 static void
2698 aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
2699 const unsigned rt, const int is64,
2700 struct aarch64_insn_data *data)
2701 {
2702 struct aarch64_displaced_step_data *dsd
2703 = (struct aarch64_displaced_step_data *) data;
2704 CORE_ADDR address = data->insn_addr + offset;
2705 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
2706
2707 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
2708 address);
2709
2710 if (is_sw)
2711 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
2712 aarch64_register (rt, 1), zero);
2713 else
2714 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
2715 aarch64_register (rt, 1), zero);
2716
2717 dsd->dsc->pc_adjust = 4;
2718 }
2719
2720 /* Implementation of aarch64_insn_visitor method "others". */
2721
2722 static void
2723 aarch64_displaced_step_others (const uint32_t insn,
2724 struct aarch64_insn_data *data)
2725 {
2726 struct aarch64_displaced_step_data *dsd
2727 = (struct aarch64_displaced_step_data *) data;
2728
2729 aarch64_emit_insn (dsd->insn_buf, insn);
2730 dsd->insn_count = 1;
2731
2732 if ((insn & 0xfffffc1f) == 0xd65f0000)
2733 {
2734 /* RET */
2735 dsd->dsc->pc_adjust = 0;
2736 }
2737 else
2738 dsd->dsc->pc_adjust = 4;
2739 }
2740
2741 static const struct aarch64_insn_visitor visitor =
2742 {
2743 aarch64_displaced_step_b,
2744 aarch64_displaced_step_b_cond,
2745 aarch64_displaced_step_cb,
2746 aarch64_displaced_step_tb,
2747 aarch64_displaced_step_adr,
2748 aarch64_displaced_step_ldr_literal,
2749 aarch64_displaced_step_others,
2750 };
2751
2752 /* Implement the "displaced_step_copy_insn" gdbarch method. */
2753
2754 struct displaced_step_closure *
2755 aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
2756 CORE_ADDR from, CORE_ADDR to,
2757 struct regcache *regs)
2758 {
2759 struct displaced_step_closure *dsc = NULL;
2760 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2761 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
2762 struct aarch64_displaced_step_data dsd;
2763
2764 /* Look for a Load Exclusive instruction which begins the sequence. */
2765 if (decode_masked_match (insn, 0x3fc00000, 0x08400000))
2766 {
2767 /* We can't displaced step atomic sequences. */
2768 return NULL;
2769 }
2770
2771 dsc = XCNEW (struct displaced_step_closure);
2772 dsd.base.insn_addr = from;
2773 dsd.new_addr = to;
2774 dsd.regs = regs;
2775 dsd.dsc = dsc;
2776 dsd.insn_count = 0;
2777 aarch64_relocate_instruction (insn, &visitor,
2778 (struct aarch64_insn_data *) &dsd);
2779 gdb_assert (dsd.insn_count <= DISPLACED_MODIFIED_INSNS);
2780
2781 if (dsd.insn_count != 0)
2782 {
2783 int i;
2784
2785 /* Instruction can be relocated to scratch pad. Copy
2786 relocated instruction(s) there. */
2787 for (i = 0; i < dsd.insn_count; i++)
2788 {
2789 if (debug_displaced)
2790 {
2791 debug_printf ("displaced: writing insn ");
2792 debug_printf ("%.8x", dsd.insn_buf[i]);
2793 debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
2794 }
2795 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
2796 (ULONGEST) dsd.insn_buf[i]);
2797 }
2798 }
2799 else
2800 {
2801 xfree (dsc);
2802 dsc = NULL;
2803 }
2804
2805 return dsc;
2806 }
2807
2808 /* Implement the "displaced_step_fixup" gdbarch method. */
2809
2810 void
2811 aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
2812 struct displaced_step_closure *dsc,
2813 CORE_ADDR from, CORE_ADDR to,
2814 struct regcache *regs)
2815 {
2816 if (dsc->cond)
2817 {
2818 ULONGEST pc;
2819
2820 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
2821 if (pc - to == 8)
2822 {
2823 /* Condition is true. */
2824 }
2825 else if (pc - to == 4)
2826 {
2827 /* Condition is false. */
2828 dsc->pc_adjust = 4;
2829 }
2830 else
2831 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
2832 }
2833
2834 if (dsc->pc_adjust != 0)
2835 {
2836 if (debug_displaced)
2837 {
2838 debug_printf ("displaced: fixup: set PC to %s:%d\n",
2839 paddress (gdbarch, from), dsc->pc_adjust);
2840 }
2841 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
2842 from + dsc->pc_adjust);
2843 }
2844 }
2845
2846 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
2847
2848 int
2849 aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
2850 struct displaced_step_closure *closure)
2851 {
2852 return 1;
2853 }
2854
2855 /* Initialize the current architecture based on INFO. If possible,
2856 re-use an architecture from ARCHES, which is a list of
2857 architectures already created during this debugging session.
2858
2859 Called e.g. at program startup, when reading a core file, and when
2860 reading a binary file. */
2861
2862 static struct gdbarch *
2863 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2864 {
2865 struct gdbarch_tdep *tdep;
2866 struct gdbarch *gdbarch;
2867 struct gdbarch_list *best_arch;
2868 struct tdesc_arch_data *tdesc_data = NULL;
2869 const struct target_desc *tdesc = info.target_desc;
2870 int i;
2871 int have_fpa_registers = 1;
2872 int valid_p = 1;
2873 const struct tdesc_feature *feature;
2874 int num_regs = 0;
2875 int num_pseudo_regs = 0;
2876
2877 /* Ensure we always have a target descriptor. */
2878 if (!tdesc_has_registers (tdesc))
2879 tdesc = tdesc_aarch64;
2880
2881 gdb_assert (tdesc);
2882
2883 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
2884
2885 if (feature == NULL)
2886 return NULL;
2887
2888 tdesc_data = tdesc_data_alloc ();
2889
2890 /* Validate the descriptor provides the mandatory core R registers
2891 and allocate their numbers. */
2892 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
2893 valid_p &=
2894 tdesc_numbered_register (feature, tdesc_data, AARCH64_X0_REGNUM + i,
2895 aarch64_r_register_names[i]);
2896
2897 num_regs = AARCH64_X0_REGNUM + i;
2898
2899 /* Look for the V registers. */
2900 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
2901 if (feature)
2902 {
2903 /* Validate the descriptor provides the mandatory V registers
2904 and allocate their numbers. */
2905 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
2906 valid_p &=
2907 tdesc_numbered_register (feature, tdesc_data, AARCH64_V0_REGNUM + i,
2908 aarch64_v_register_names[i]);
2909
2910 num_regs = AARCH64_V0_REGNUM + i;
2911
2912 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
2913 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
2914 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
2915 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
2916 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
2917 }
2918
2919 if (!valid_p)
2920 {
2921 tdesc_data_cleanup (tdesc_data);
2922 return NULL;
2923 }
2924
2925 /* AArch64 code is always little-endian. */
2926 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
2927
2928 /* If there is already a candidate, use it. */
2929 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
2930 best_arch != NULL;
2931 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
2932 {
2933 /* Found a match. */
2934 break;
2935 }
2936
2937 if (best_arch != NULL)
2938 {
2939 if (tdesc_data != NULL)
2940 tdesc_data_cleanup (tdesc_data);
2941 return best_arch->gdbarch;
2942 }
2943
2944 tdep = XCNEW (struct gdbarch_tdep);
2945 gdbarch = gdbarch_alloc (&info, tdep);
2946
2947 /* This should be low enough for everything. */
2948 tdep->lowest_pc = 0x20;
2949 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
2950 tdep->jb_elt_size = 8;
2951
2952 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
2953 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
2954
2955 /* Frame handling. */
2956 set_gdbarch_dummy_id (gdbarch, aarch64_dummy_id);
2957 set_gdbarch_unwind_pc (gdbarch, aarch64_unwind_pc);
2958 set_gdbarch_unwind_sp (gdbarch, aarch64_unwind_sp);
2959
2960 /* Advance PC across function entry code. */
2961 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
2962
2963 /* The stack grows downward. */
2964 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
2965
2966 /* Breakpoint manipulation. */
2967 set_gdbarch_breakpoint_from_pc (gdbarch, aarch64_breakpoint_from_pc);
2968 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
2969 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
2970
2971 /* Information about registers, etc. */
2972 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
2973 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
2974 set_gdbarch_num_regs (gdbarch, num_regs);
2975
2976 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
2977 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
2978 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
2979 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
2980 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
2981 set_tdesc_pseudo_register_reggroup_p (gdbarch,
2982 aarch64_pseudo_register_reggroup_p);
2983
2984 /* ABI */
2985 set_gdbarch_short_bit (gdbarch, 16);
2986 set_gdbarch_int_bit (gdbarch, 32);
2987 set_gdbarch_float_bit (gdbarch, 32);
2988 set_gdbarch_double_bit (gdbarch, 64);
2989 set_gdbarch_long_double_bit (gdbarch, 128);
2990 set_gdbarch_long_bit (gdbarch, 64);
2991 set_gdbarch_long_long_bit (gdbarch, 64);
2992 set_gdbarch_ptr_bit (gdbarch, 64);
2993 set_gdbarch_char_signed (gdbarch, 0);
2994 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
2995 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
2996 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
2997
2998 /* Internal <-> external register number maps. */
2999 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
3000
3001 /* Returning results. */
3002 set_gdbarch_return_value (gdbarch, aarch64_return_value);
3003
3004 /* Disassembly. */
3005 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
3006
3007 /* Virtual tables. */
3008 set_gdbarch_vbit_in_delta (gdbarch, 1);
3009
3010 /* Hook in the ABI-specific overrides, if they have been registered. */
3011 info.target_desc = tdesc;
3012 info.tdep_info = (void *) tdesc_data;
3013 gdbarch_init_osabi (info, gdbarch);
3014
3015 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
3016
3017 /* Add some default predicates. */
3018 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
3019 dwarf2_append_unwinders (gdbarch);
3020 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
3021
3022 frame_base_set_default (gdbarch, &aarch64_normal_base);
3023
3024 /* Now we have tuned the configuration, set a few final things,
3025 based on what the OS ABI has told us. */
3026
3027 if (tdep->jb_pc >= 0)
3028 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
3029
3030 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
3031
3032 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
3033
3034 /* Add standard register aliases. */
3035 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
3036 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
3037 value_of_aarch64_user_reg,
3038 &aarch64_register_aliases[i].regnum);
3039
3040 return gdbarch;
3041 }
3042
3043 static void
3044 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
3045 {
3046 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3047
3048 if (tdep == NULL)
3049 return;
3050
3051 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3052 paddress (gdbarch, tdep->lowest_pc));
3053 }
3054
3055 /* Suppress warning from -Wmissing-prototypes. */
3056 extern initialize_file_ftype _initialize_aarch64_tdep;
3057
3058 void
3059 _initialize_aarch64_tdep (void)
3060 {
3061 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
3062 aarch64_dump_tdep);
3063
3064 initialize_tdesc_aarch64 ();
3065
3066 /* Debug this file's internals. */
3067 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
3068 Set AArch64 debugging."), _("\
3069 Show AArch64 debugging."), _("\
3070 When on, AArch64 specific debugging is enabled."),
3071 NULL,
3072 show_aarch64_debug,
3073 &setdebuglist, &showdebuglist);
3074 }
3075
3076 /* AArch64 process record-replay related structures, defines etc. */
3077
3078 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3079 do \
3080 { \
3081 unsigned int reg_len = LENGTH; \
3082 if (reg_len) \
3083 { \
3084 REGS = XNEWVEC (uint32_t, reg_len); \
3085 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3086 } \
3087 } \
3088 while (0)
3089
3090 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3091 do \
3092 { \
3093 unsigned int mem_len = LENGTH; \
3094 if (mem_len) \
3095 { \
3096 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3097 memcpy(&MEMS->len, &RECORD_BUF[0], \
3098 sizeof(struct aarch64_mem_r) * LENGTH); \
3099 } \
3100 } \
3101 while (0)
3102
3103 /* AArch64 record/replay structures and enumerations. */
3104
3105 struct aarch64_mem_r
3106 {
3107 uint64_t len; /* Record length. */
3108 uint64_t addr; /* Memory address. */
3109 };
3110
3111 enum aarch64_record_result
3112 {
3113 AARCH64_RECORD_SUCCESS,
3114 AARCH64_RECORD_FAILURE,
3115 AARCH64_RECORD_UNSUPPORTED,
3116 AARCH64_RECORD_UNKNOWN
3117 };
3118
3119 typedef struct insn_decode_record_t
3120 {
3121 struct gdbarch *gdbarch;
3122 struct regcache *regcache;
3123 CORE_ADDR this_addr; /* Address of insn to be recorded. */
3124 uint32_t aarch64_insn; /* Insn to be recorded. */
3125 uint32_t mem_rec_count; /* Count of memory records. */
3126 uint32_t reg_rec_count; /* Count of register records. */
3127 uint32_t *aarch64_regs; /* Registers to be recorded. */
3128 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
3129 } insn_decode_record;
3130
3131 /* Record handler for data processing - register instructions. */
3132
3133 static unsigned int
3134 aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
3135 {
3136 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
3137 uint32_t record_buf[4];
3138
3139 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3140 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3141 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
3142
3143 if (!bit (aarch64_insn_r->aarch64_insn, 28))
3144 {
3145 uint8_t setflags;
3146
3147 /* Logical (shifted register). */
3148 if (insn_bits24_27 == 0x0a)
3149 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3150 /* Add/subtract. */
3151 else if (insn_bits24_27 == 0x0b)
3152 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3153 else
3154 return AARCH64_RECORD_UNKNOWN;
3155
3156 record_buf[0] = reg_rd;
3157 aarch64_insn_r->reg_rec_count = 1;
3158 if (setflags)
3159 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3160 }
3161 else
3162 {
3163 if (insn_bits24_27 == 0x0b)
3164 {
3165 /* Data-processing (3 source). */
3166 record_buf[0] = reg_rd;
3167 aarch64_insn_r->reg_rec_count = 1;
3168 }
3169 else if (insn_bits24_27 == 0x0a)
3170 {
3171 if (insn_bits21_23 == 0x00)
3172 {
3173 /* Add/subtract (with carry). */
3174 record_buf[0] = reg_rd;
3175 aarch64_insn_r->reg_rec_count = 1;
3176 if (bit (aarch64_insn_r->aarch64_insn, 29))
3177 {
3178 record_buf[1] = AARCH64_CPSR_REGNUM;
3179 aarch64_insn_r->reg_rec_count = 2;
3180 }
3181 }
3182 else if (insn_bits21_23 == 0x02)
3183 {
3184 /* Conditional compare (register) and conditional compare
3185 (immediate) instructions. */
3186 record_buf[0] = AARCH64_CPSR_REGNUM;
3187 aarch64_insn_r->reg_rec_count = 1;
3188 }
3189 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3190 {
3191 /* CConditional select. */
3192 /* Data-processing (2 source). */
3193 /* Data-processing (1 source). */
3194 record_buf[0] = reg_rd;
3195 aarch64_insn_r->reg_rec_count = 1;
3196 }
3197 else
3198 return AARCH64_RECORD_UNKNOWN;
3199 }
3200 }
3201
3202 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3203 record_buf);
3204 return AARCH64_RECORD_SUCCESS;
3205 }
3206
3207 /* Record handler for data processing - immediate instructions. */
3208
3209 static unsigned int
3210 aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3211 {
3212 uint8_t reg_rd, insn_bit28, insn_bit23, insn_bits24_27, setflags;
3213 uint32_t record_buf[4];
3214
3215 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3216 insn_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
3217 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3218 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3219
3220 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3221 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3222 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3223 {
3224 record_buf[0] = reg_rd;
3225 aarch64_insn_r->reg_rec_count = 1;
3226 }
3227 else if (insn_bits24_27 == 0x01)
3228 {
3229 /* Add/Subtract (immediate). */
3230 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3231 record_buf[0] = reg_rd;
3232 aarch64_insn_r->reg_rec_count = 1;
3233 if (setflags)
3234 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3235 }
3236 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3237 {
3238 /* Logical (immediate). */
3239 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3240 record_buf[0] = reg_rd;
3241 aarch64_insn_r->reg_rec_count = 1;
3242 if (setflags)
3243 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3244 }
3245 else
3246 return AARCH64_RECORD_UNKNOWN;
3247
3248 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3249 record_buf);
3250 return AARCH64_RECORD_SUCCESS;
3251 }
3252
3253 /* Record handler for branch, exception generation and system instructions. */
3254
3255 static unsigned int
3256 aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3257 {
3258 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3259 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3260 uint32_t record_buf[4];
3261
3262 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3263 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3264 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3265
3266 if (insn_bits28_31 == 0x0d)
3267 {
3268 /* Exception generation instructions. */
3269 if (insn_bits24_27 == 0x04)
3270 {
3271 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3272 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3273 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
3274 {
3275 ULONGEST svc_number;
3276
3277 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3278 &svc_number);
3279 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3280 svc_number);
3281 }
3282 else
3283 return AARCH64_RECORD_UNSUPPORTED;
3284 }
3285 /* System instructions. */
3286 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3287 {
3288 uint32_t reg_rt, reg_crn;
3289
3290 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3291 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3292
3293 /* Record rt in case of sysl and mrs instructions. */
3294 if (bit (aarch64_insn_r->aarch64_insn, 21))
3295 {
3296 record_buf[0] = reg_rt;
3297 aarch64_insn_r->reg_rec_count = 1;
3298 }
3299 /* Record cpsr for hint and msr(immediate) instructions. */
3300 else if (reg_crn == 0x02 || reg_crn == 0x04)
3301 {
3302 record_buf[0] = AARCH64_CPSR_REGNUM;
3303 aarch64_insn_r->reg_rec_count = 1;
3304 }
3305 }
3306 /* Unconditional branch (register). */
3307 else if((insn_bits24_27 & 0x0e) == 0x06)
3308 {
3309 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3310 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3311 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3312 }
3313 else
3314 return AARCH64_RECORD_UNKNOWN;
3315 }
3316 /* Unconditional branch (immediate). */
3317 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3318 {
3319 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3320 if (bit (aarch64_insn_r->aarch64_insn, 31))
3321 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3322 }
3323 else
3324 /* Compare & branch (immediate), Test & branch (immediate) and
3325 Conditional branch (immediate). */
3326 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3327
3328 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3329 record_buf);
3330 return AARCH64_RECORD_SUCCESS;
3331 }
3332
3333 /* Record handler for advanced SIMD load and store instructions. */
3334
3335 static unsigned int
3336 aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3337 {
3338 CORE_ADDR address;
3339 uint64_t addr_offset = 0;
3340 uint32_t record_buf[24];
3341 uint64_t record_buf_mem[24];
3342 uint32_t reg_rn, reg_rt;
3343 uint32_t reg_index = 0, mem_index = 0;
3344 uint8_t opcode_bits, size_bits;
3345
3346 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3347 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3348 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3349 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3350 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3351
3352 if (record_debug)
3353 debug_printf ("Process record: Advanced SIMD load/store\n");
3354
3355 /* Load/store single structure. */
3356 if (bit (aarch64_insn_r->aarch64_insn, 24))
3357 {
3358 uint8_t sindex, scale, selem, esize, replicate = 0;
3359 scale = opcode_bits >> 2;
3360 selem = ((opcode_bits & 0x02) |
3361 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3362 switch (scale)
3363 {
3364 case 1:
3365 if (size_bits & 0x01)
3366 return AARCH64_RECORD_UNKNOWN;
3367 break;
3368 case 2:
3369 if ((size_bits >> 1) & 0x01)
3370 return AARCH64_RECORD_UNKNOWN;
3371 if (size_bits & 0x01)
3372 {
3373 if (!((opcode_bits >> 1) & 0x01))
3374 scale = 3;
3375 else
3376 return AARCH64_RECORD_UNKNOWN;
3377 }
3378 break;
3379 case 3:
3380 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3381 {
3382 scale = size_bits;
3383 replicate = 1;
3384 break;
3385 }
3386 else
3387 return AARCH64_RECORD_UNKNOWN;
3388 default:
3389 break;
3390 }
3391 esize = 8 << scale;
3392 if (replicate)
3393 for (sindex = 0; sindex < selem; sindex++)
3394 {
3395 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3396 reg_rt = (reg_rt + 1) % 32;
3397 }
3398 else
3399 {
3400 for (sindex = 0; sindex < selem; sindex++)
3401 if (bit (aarch64_insn_r->aarch64_insn, 22))
3402 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3403 else
3404 {
3405 record_buf_mem[mem_index++] = esize / 8;
3406 record_buf_mem[mem_index++] = address + addr_offset;
3407 }
3408 addr_offset = addr_offset + (esize / 8);
3409 reg_rt = (reg_rt + 1) % 32;
3410 }
3411 }
3412 /* Load/store multiple structure. */
3413 else
3414 {
3415 uint8_t selem, esize, rpt, elements;
3416 uint8_t eindex, rindex;
3417
3418 esize = 8 << size_bits;
3419 if (bit (aarch64_insn_r->aarch64_insn, 30))
3420 elements = 128 / esize;
3421 else
3422 elements = 64 / esize;
3423
3424 switch (opcode_bits)
3425 {
3426 /*LD/ST4 (4 Registers). */
3427 case 0:
3428 rpt = 1;
3429 selem = 4;
3430 break;
3431 /*LD/ST1 (4 Registers). */
3432 case 2:
3433 rpt = 4;
3434 selem = 1;
3435 break;
3436 /*LD/ST3 (3 Registers). */
3437 case 4:
3438 rpt = 1;
3439 selem = 3;
3440 break;
3441 /*LD/ST1 (3 Registers). */
3442 case 6:
3443 rpt = 3;
3444 selem = 1;
3445 break;
3446 /*LD/ST1 (1 Register). */
3447 case 7:
3448 rpt = 1;
3449 selem = 1;
3450 break;
3451 /*LD/ST2 (2 Registers). */
3452 case 8:
3453 rpt = 1;
3454 selem = 2;
3455 break;
3456 /*LD/ST1 (2 Registers). */
3457 case 10:
3458 rpt = 2;
3459 selem = 1;
3460 break;
3461 default:
3462 return AARCH64_RECORD_UNSUPPORTED;
3463 break;
3464 }
3465 for (rindex = 0; rindex < rpt; rindex++)
3466 for (eindex = 0; eindex < elements; eindex++)
3467 {
3468 uint8_t reg_tt, sindex;
3469 reg_tt = (reg_rt + rindex) % 32;
3470 for (sindex = 0; sindex < selem; sindex++)
3471 {
3472 if (bit (aarch64_insn_r->aarch64_insn, 22))
3473 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3474 else
3475 {
3476 record_buf_mem[mem_index++] = esize / 8;
3477 record_buf_mem[mem_index++] = address + addr_offset;
3478 }
3479 addr_offset = addr_offset + (esize / 8);
3480 reg_tt = (reg_tt + 1) % 32;
3481 }
3482 }
3483 }
3484
3485 if (bit (aarch64_insn_r->aarch64_insn, 23))
3486 record_buf[reg_index++] = reg_rn;
3487
3488 aarch64_insn_r->reg_rec_count = reg_index;
3489 aarch64_insn_r->mem_rec_count = mem_index / 2;
3490 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3491 record_buf_mem);
3492 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3493 record_buf);
3494 return AARCH64_RECORD_SUCCESS;
3495 }
3496
3497 /* Record handler for load and store instructions. */
3498
3499 static unsigned int
3500 aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3501 {
3502 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3503 uint8_t insn_bit23, insn_bit21;
3504 uint8_t opc, size_bits, ld_flag, vector_flag;
3505 uint32_t reg_rn, reg_rt, reg_rt2;
3506 uint64_t datasize, offset;
3507 uint32_t record_buf[8];
3508 uint64_t record_buf_mem[8];
3509 CORE_ADDR address;
3510
3511 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3512 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3513 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3514 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3515 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3516 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3517 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3518 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3519 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3520 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3521 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3522
3523 /* Load/store exclusive. */
3524 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3525 {
3526 if (record_debug)
3527 debug_printf ("Process record: load/store exclusive\n");
3528
3529 if (ld_flag)
3530 {
3531 record_buf[0] = reg_rt;
3532 aarch64_insn_r->reg_rec_count = 1;
3533 if (insn_bit21)
3534 {
3535 record_buf[1] = reg_rt2;
3536 aarch64_insn_r->reg_rec_count = 2;
3537 }
3538 }
3539 else
3540 {
3541 if (insn_bit21)
3542 datasize = (8 << size_bits) * 2;
3543 else
3544 datasize = (8 << size_bits);
3545 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3546 &address);
3547 record_buf_mem[0] = datasize / 8;
3548 record_buf_mem[1] = address;
3549 aarch64_insn_r->mem_rec_count = 1;
3550 if (!insn_bit23)
3551 {
3552 /* Save register rs. */
3553 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3554 aarch64_insn_r->reg_rec_count = 1;
3555 }
3556 }
3557 }
3558 /* Load register (literal) instructions decoding. */
3559 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3560 {
3561 if (record_debug)
3562 debug_printf ("Process record: load register (literal)\n");
3563 if (vector_flag)
3564 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3565 else
3566 record_buf[0] = reg_rt;
3567 aarch64_insn_r->reg_rec_count = 1;
3568 }
3569 /* All types of load/store pair instructions decoding. */
3570 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3571 {
3572 if (record_debug)
3573 debug_printf ("Process record: load/store pair\n");
3574
3575 if (ld_flag)
3576 {
3577 if (vector_flag)
3578 {
3579 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3580 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3581 }
3582 else
3583 {
3584 record_buf[0] = reg_rt;
3585 record_buf[1] = reg_rt2;
3586 }
3587 aarch64_insn_r->reg_rec_count = 2;
3588 }
3589 else
3590 {
3591 uint16_t imm7_off;
3592 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3593 if (!vector_flag)
3594 size_bits = size_bits >> 1;
3595 datasize = 8 << (2 + size_bits);
3596 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3597 offset = offset << (2 + size_bits);
3598 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3599 &address);
3600 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3601 {
3602 if (imm7_off & 0x40)
3603 address = address - offset;
3604 else
3605 address = address + offset;
3606 }
3607
3608 record_buf_mem[0] = datasize / 8;
3609 record_buf_mem[1] = address;
3610 record_buf_mem[2] = datasize / 8;
3611 record_buf_mem[3] = address + (datasize / 8);
3612 aarch64_insn_r->mem_rec_count = 2;
3613 }
3614 if (bit (aarch64_insn_r->aarch64_insn, 23))
3615 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3616 }
3617 /* Load/store register (unsigned immediate) instructions. */
3618 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3619 {
3620 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3621 if (!(opc >> 1))
3622 if (opc & 0x01)
3623 ld_flag = 0x01;
3624 else
3625 ld_flag = 0x0;
3626 else
3627 if (size_bits != 0x03)
3628 ld_flag = 0x01;
3629 else
3630 return AARCH64_RECORD_UNKNOWN;
3631
3632 if (record_debug)
3633 {
3634 debug_printf ("Process record: load/store (unsigned immediate):"
3635 " size %x V %d opc %x\n", size_bits, vector_flag,
3636 opc);
3637 }
3638
3639 if (!ld_flag)
3640 {
3641 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
3642 datasize = 8 << size_bits;
3643 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3644 &address);
3645 offset = offset << size_bits;
3646 address = address + offset;
3647
3648 record_buf_mem[0] = datasize >> 3;
3649 record_buf_mem[1] = address;
3650 aarch64_insn_r->mem_rec_count = 1;
3651 }
3652 else
3653 {
3654 if (vector_flag)
3655 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3656 else
3657 record_buf[0] = reg_rt;
3658 aarch64_insn_r->reg_rec_count = 1;
3659 }
3660 }
3661 /* Load/store register (register offset) instructions. */
3662 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3663 && insn_bits10_11 == 0x02 && insn_bit21)
3664 {
3665 if (record_debug)
3666 debug_printf ("Process record: load/store (register offset)\n");
3667 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3668 if (!(opc >> 1))
3669 if (opc & 0x01)
3670 ld_flag = 0x01;
3671 else
3672 ld_flag = 0x0;
3673 else
3674 if (size_bits != 0x03)
3675 ld_flag = 0x01;
3676 else
3677 return AARCH64_RECORD_UNKNOWN;
3678
3679 if (!ld_flag)
3680 {
3681 uint64_t reg_rm_val;
3682 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
3683 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
3684 if (bit (aarch64_insn_r->aarch64_insn, 12))
3685 offset = reg_rm_val << size_bits;
3686 else
3687 offset = reg_rm_val;
3688 datasize = 8 << size_bits;
3689 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3690 &address);
3691 address = address + offset;
3692 record_buf_mem[0] = datasize >> 3;
3693 record_buf_mem[1] = address;
3694 aarch64_insn_r->mem_rec_count = 1;
3695 }
3696 else
3697 {
3698 if (vector_flag)
3699 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3700 else
3701 record_buf[0] = reg_rt;
3702 aarch64_insn_r->reg_rec_count = 1;
3703 }
3704 }
3705 /* Load/store register (immediate and unprivileged) instructions. */
3706 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3707 && !insn_bit21)
3708 {
3709 if (record_debug)
3710 {
3711 debug_printf ("Process record: load/store "
3712 "(immediate and unprivileged)\n");
3713 }
3714 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3715 if (!(opc >> 1))
3716 if (opc & 0x01)
3717 ld_flag = 0x01;
3718 else
3719 ld_flag = 0x0;
3720 else
3721 if (size_bits != 0x03)
3722 ld_flag = 0x01;
3723 else
3724 return AARCH64_RECORD_UNKNOWN;
3725
3726 if (!ld_flag)
3727 {
3728 uint16_t imm9_off;
3729 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
3730 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
3731 datasize = 8 << size_bits;
3732 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3733 &address);
3734 if (insn_bits10_11 != 0x01)
3735 {
3736 if (imm9_off & 0x0100)
3737 address = address - offset;
3738 else
3739 address = address + offset;
3740 }
3741 record_buf_mem[0] = datasize >> 3;
3742 record_buf_mem[1] = address;
3743 aarch64_insn_r->mem_rec_count = 1;
3744 }
3745 else
3746 {
3747 if (vector_flag)
3748 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3749 else
3750 record_buf[0] = reg_rt;
3751 aarch64_insn_r->reg_rec_count = 1;
3752 }
3753 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
3754 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3755 }
3756 /* Advanced SIMD load/store instructions. */
3757 else
3758 return aarch64_record_asimd_load_store (aarch64_insn_r);
3759
3760 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3761 record_buf_mem);
3762 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3763 record_buf);
3764 return AARCH64_RECORD_SUCCESS;
3765 }
3766
3767 /* Record handler for data processing SIMD and floating point instructions. */
3768
3769 static unsigned int
3770 aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
3771 {
3772 uint8_t insn_bit21, opcode, rmode, reg_rd;
3773 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
3774 uint8_t insn_bits11_14;
3775 uint32_t record_buf[2];
3776
3777 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3778 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3779 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3780 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3781 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
3782 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
3783 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
3784 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3785 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3786
3787 if (record_debug)
3788 debug_printf ("Process record: data processing SIMD/FP: ");
3789
3790 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
3791 {
3792 /* Floating point - fixed point conversion instructions. */
3793 if (!insn_bit21)
3794 {
3795 if (record_debug)
3796 debug_printf ("FP - fixed point conversion");
3797
3798 if ((opcode >> 1) == 0x0 && rmode == 0x03)
3799 record_buf[0] = reg_rd;
3800 else
3801 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3802 }
3803 /* Floating point - conditional compare instructions. */
3804 else if (insn_bits10_11 == 0x01)
3805 {
3806 if (record_debug)
3807 debug_printf ("FP - conditional compare");
3808
3809 record_buf[0] = AARCH64_CPSR_REGNUM;
3810 }
3811 /* Floating point - data processing (2-source) and
3812 conditional select instructions. */
3813 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
3814 {
3815 if (record_debug)
3816 debug_printf ("FP - DP (2-source)");
3817
3818 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3819 }
3820 else if (insn_bits10_11 == 0x00)
3821 {
3822 /* Floating point - immediate instructions. */
3823 if ((insn_bits12_15 & 0x01) == 0x01
3824 || (insn_bits12_15 & 0x07) == 0x04)
3825 {
3826 if (record_debug)
3827 debug_printf ("FP - immediate");
3828 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3829 }
3830 /* Floating point - compare instructions. */
3831 else if ((insn_bits12_15 & 0x03) == 0x02)
3832 {
3833 if (record_debug)
3834 debug_printf ("FP - immediate");
3835 record_buf[0] = AARCH64_CPSR_REGNUM;
3836 }
3837 /* Floating point - integer conversions instructions. */
3838 else if (insn_bits12_15 == 0x00)
3839 {
3840 /* Convert float to integer instruction. */
3841 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
3842 {
3843 if (record_debug)
3844 debug_printf ("float to int conversion");
3845
3846 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3847 }
3848 /* Convert integer to float instruction. */
3849 else if ((opcode >> 1) == 0x01 && !rmode)
3850 {
3851 if (record_debug)
3852 debug_printf ("int to float conversion");
3853
3854 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3855 }
3856 /* Move float to integer instruction. */
3857 else if ((opcode >> 1) == 0x03)
3858 {
3859 if (record_debug)
3860 debug_printf ("move float to int");
3861
3862 if (!(opcode & 0x01))
3863 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3864 else
3865 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3866 }
3867 else
3868 return AARCH64_RECORD_UNKNOWN;
3869 }
3870 else
3871 return AARCH64_RECORD_UNKNOWN;
3872 }
3873 else
3874 return AARCH64_RECORD_UNKNOWN;
3875 }
3876 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
3877 {
3878 if (record_debug)
3879 debug_printf ("SIMD copy");
3880
3881 /* Advanced SIMD copy instructions. */
3882 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
3883 && !bit (aarch64_insn_r->aarch64_insn, 15)
3884 && bit (aarch64_insn_r->aarch64_insn, 10))
3885 {
3886 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
3887 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3888 else
3889 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3890 }
3891 else
3892 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3893 }
3894 /* All remaining floating point or advanced SIMD instructions. */
3895 else
3896 {
3897 if (record_debug)
3898 debug_printf ("all remain");
3899
3900 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3901 }
3902
3903 if (record_debug)
3904 debug_printf ("\n");
3905
3906 aarch64_insn_r->reg_rec_count++;
3907 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
3908 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3909 record_buf);
3910 return AARCH64_RECORD_SUCCESS;
3911 }
3912
3913 /* Decodes insns type and invokes its record handler. */
3914
3915 static unsigned int
3916 aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
3917 {
3918 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
3919
3920 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
3921 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
3922 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
3923 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
3924
3925 /* Data processing - immediate instructions. */
3926 if (!ins_bit26 && !ins_bit27 && ins_bit28)
3927 return aarch64_record_data_proc_imm (aarch64_insn_r);
3928
3929 /* Branch, exception generation and system instructions. */
3930 if (ins_bit26 && !ins_bit27 && ins_bit28)
3931 return aarch64_record_branch_except_sys (aarch64_insn_r);
3932
3933 /* Load and store instructions. */
3934 if (!ins_bit25 && ins_bit27)
3935 return aarch64_record_load_store (aarch64_insn_r);
3936
3937 /* Data processing - register instructions. */
3938 if (ins_bit25 && !ins_bit26 && ins_bit27)
3939 return aarch64_record_data_proc_reg (aarch64_insn_r);
3940
3941 /* Data processing - SIMD and floating point instructions. */
3942 if (ins_bit25 && ins_bit26 && ins_bit27)
3943 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
3944
3945 return AARCH64_RECORD_UNSUPPORTED;
3946 }
3947
3948 /* Cleans up local record registers and memory allocations. */
3949
3950 static void
3951 deallocate_reg_mem (insn_decode_record *record)
3952 {
3953 xfree (record->aarch64_regs);
3954 xfree (record->aarch64_mems);
3955 }
3956
3957 /* Parse the current instruction and record the values of the registers and
3958 memory that will be changed in current instruction to record_arch_list
3959 return -1 if something is wrong. */
3960
3961 int
3962 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
3963 CORE_ADDR insn_addr)
3964 {
3965 uint32_t rec_no = 0;
3966 uint8_t insn_size = 4;
3967 uint32_t ret = 0;
3968 ULONGEST t_bit = 0, insn_id = 0;
3969 gdb_byte buf[insn_size];
3970 insn_decode_record aarch64_record;
3971
3972 memset (&buf[0], 0, insn_size);
3973 memset (&aarch64_record, 0, sizeof (insn_decode_record));
3974 target_read_memory (insn_addr, &buf[0], insn_size);
3975 aarch64_record.aarch64_insn
3976 = (uint32_t) extract_unsigned_integer (&buf[0],
3977 insn_size,
3978 gdbarch_byte_order (gdbarch));
3979 aarch64_record.regcache = regcache;
3980 aarch64_record.this_addr = insn_addr;
3981 aarch64_record.gdbarch = gdbarch;
3982
3983 ret = aarch64_record_decode_insn_handler (&aarch64_record);
3984 if (ret == AARCH64_RECORD_UNSUPPORTED)
3985 {
3986 printf_unfiltered (_("Process record does not support instruction "
3987 "0x%0x at address %s.\n"),
3988 aarch64_record.aarch64_insn,
3989 paddress (gdbarch, insn_addr));
3990 ret = -1;
3991 }
3992
3993 if (0 == ret)
3994 {
3995 /* Record registers. */
3996 record_full_arch_list_add_reg (aarch64_record.regcache,
3997 AARCH64_PC_REGNUM);
3998 /* Always record register CPSR. */
3999 record_full_arch_list_add_reg (aarch64_record.regcache,
4000 AARCH64_CPSR_REGNUM);
4001 if (aarch64_record.aarch64_regs)
4002 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
4003 if (record_full_arch_list_add_reg (aarch64_record.regcache,
4004 aarch64_record.aarch64_regs[rec_no]))
4005 ret = -1;
4006
4007 /* Record memories. */
4008 if (aarch64_record.aarch64_mems)
4009 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
4010 if (record_full_arch_list_add_mem
4011 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
4012 aarch64_record.aarch64_mems[rec_no].len))
4013 ret = -1;
4014
4015 if (record_full_arch_list_add_end ())
4016 ret = -1;
4017 }
4018
4019 deallocate_reg_mem (&aarch64_record);
4020 return ret;
4021 }