Move instruction decoding into new arch/ directory
[binutils-gdb.git] / gdb / aarch64-tdep.c
1 /* Common target dependent code for GDB on AArch64 systems.
2
3 Copyright (C) 2009-2015 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22
23 #include "frame.h"
24 #include "inferior.h"
25 #include "gdbcmd.h"
26 #include "gdbcore.h"
27 #include "dis-asm.h"
28 #include "regcache.h"
29 #include "reggroups.h"
30 #include "doublest.h"
31 #include "value.h"
32 #include "arch-utils.h"
33 #include "osabi.h"
34 #include "frame-unwind.h"
35 #include "frame-base.h"
36 #include "trad-frame.h"
37 #include "objfiles.h"
38 #include "dwarf2-frame.h"
39 #include "gdbtypes.h"
40 #include "prologue-value.h"
41 #include "target-descriptions.h"
42 #include "user-regs.h"
43 #include "language.h"
44 #include "infcall.h"
45 #include "ax.h"
46 #include "ax-gdb.h"
47
48 #include "aarch64-tdep.h"
49
50 #include "elf-bfd.h"
51 #include "elf/aarch64.h"
52
53 #include "vec.h"
54
55 #include "record.h"
56 #include "record-full.h"
57
58 #include "features/aarch64.c"
59
60 #include "arch/aarch64-insn.h"
61
62 /* Pseudo register base numbers. */
63 #define AARCH64_Q0_REGNUM 0
64 #define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + 32)
65 #define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
66 #define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
67 #define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
68
69 /* The standard register names, and all the valid aliases for them. */
70 static const struct
71 {
72 const char *const name;
73 int regnum;
74 } aarch64_register_aliases[] =
75 {
76 /* 64-bit register names. */
77 {"fp", AARCH64_FP_REGNUM},
78 {"lr", AARCH64_LR_REGNUM},
79 {"sp", AARCH64_SP_REGNUM},
80
81 /* 32-bit register names. */
82 {"w0", AARCH64_X0_REGNUM + 0},
83 {"w1", AARCH64_X0_REGNUM + 1},
84 {"w2", AARCH64_X0_REGNUM + 2},
85 {"w3", AARCH64_X0_REGNUM + 3},
86 {"w4", AARCH64_X0_REGNUM + 4},
87 {"w5", AARCH64_X0_REGNUM + 5},
88 {"w6", AARCH64_X0_REGNUM + 6},
89 {"w7", AARCH64_X0_REGNUM + 7},
90 {"w8", AARCH64_X0_REGNUM + 8},
91 {"w9", AARCH64_X0_REGNUM + 9},
92 {"w10", AARCH64_X0_REGNUM + 10},
93 {"w11", AARCH64_X0_REGNUM + 11},
94 {"w12", AARCH64_X0_REGNUM + 12},
95 {"w13", AARCH64_X0_REGNUM + 13},
96 {"w14", AARCH64_X0_REGNUM + 14},
97 {"w15", AARCH64_X0_REGNUM + 15},
98 {"w16", AARCH64_X0_REGNUM + 16},
99 {"w17", AARCH64_X0_REGNUM + 17},
100 {"w18", AARCH64_X0_REGNUM + 18},
101 {"w19", AARCH64_X0_REGNUM + 19},
102 {"w20", AARCH64_X0_REGNUM + 20},
103 {"w21", AARCH64_X0_REGNUM + 21},
104 {"w22", AARCH64_X0_REGNUM + 22},
105 {"w23", AARCH64_X0_REGNUM + 23},
106 {"w24", AARCH64_X0_REGNUM + 24},
107 {"w25", AARCH64_X0_REGNUM + 25},
108 {"w26", AARCH64_X0_REGNUM + 26},
109 {"w27", AARCH64_X0_REGNUM + 27},
110 {"w28", AARCH64_X0_REGNUM + 28},
111 {"w29", AARCH64_X0_REGNUM + 29},
112 {"w30", AARCH64_X0_REGNUM + 30},
113
114 /* specials */
115 {"ip0", AARCH64_X0_REGNUM + 16},
116 {"ip1", AARCH64_X0_REGNUM + 17}
117 };
118
119 /* The required core 'R' registers. */
120 static const char *const aarch64_r_register_names[] =
121 {
122 /* These registers must appear in consecutive RAW register number
123 order and they must begin with AARCH64_X0_REGNUM! */
124 "x0", "x1", "x2", "x3",
125 "x4", "x5", "x6", "x7",
126 "x8", "x9", "x10", "x11",
127 "x12", "x13", "x14", "x15",
128 "x16", "x17", "x18", "x19",
129 "x20", "x21", "x22", "x23",
130 "x24", "x25", "x26", "x27",
131 "x28", "x29", "x30", "sp",
132 "pc", "cpsr"
133 };
134
135 /* The FP/SIMD 'V' registers. */
136 static const char *const aarch64_v_register_names[] =
137 {
138 /* These registers must appear in consecutive RAW register number
139 order and they must begin with AARCH64_V0_REGNUM! */
140 "v0", "v1", "v2", "v3",
141 "v4", "v5", "v6", "v7",
142 "v8", "v9", "v10", "v11",
143 "v12", "v13", "v14", "v15",
144 "v16", "v17", "v18", "v19",
145 "v20", "v21", "v22", "v23",
146 "v24", "v25", "v26", "v27",
147 "v28", "v29", "v30", "v31",
148 "fpsr",
149 "fpcr"
150 };
151
152 /* AArch64 prologue cache structure. */
153 struct aarch64_prologue_cache
154 {
155 /* The program counter at the start of the function. It is used to
156 identify this frame as a prologue frame. */
157 CORE_ADDR func;
158
159 /* The program counter at the time this frame was created; i.e. where
160 this function was called from. It is used to identify this frame as a
161 stub frame. */
162 CORE_ADDR prev_pc;
163
164 /* The stack pointer at the time this frame was created; i.e. the
165 caller's stack pointer when this function was called. It is used
166 to identify this frame. */
167 CORE_ADDR prev_sp;
168
169 /* Is the target available to read from? */
170 int available_p;
171
172 /* The frame base for this frame is just prev_sp - frame size.
173 FRAMESIZE is the distance from the frame pointer to the
174 initial stack pointer. */
175 int framesize;
176
177 /* The register used to hold the frame pointer for this frame. */
178 int framereg;
179
180 /* Saved register offsets. */
181 struct trad_frame_saved_reg *saved_regs;
182 };
183
184 static void
185 show_aarch64_debug (struct ui_file *file, int from_tty,
186 struct cmd_list_element *c, const char *value)
187 {
188 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
189 }
190
191 /* Extract a signed value from a bit field within an instruction
192 encoding.
193
194 INSN is the instruction opcode.
195
196 WIDTH specifies the width of the bit field to extract (in bits).
197
198 OFFSET specifies the least significant bit of the field where bits
199 are numbered zero counting from least to most significant. */
200
201 static int32_t
202 extract_signed_bitfield (uint32_t insn, unsigned width, unsigned offset)
203 {
204 unsigned shift_l = sizeof (int32_t) * 8 - (offset + width);
205 unsigned shift_r = sizeof (int32_t) * 8 - width;
206
207 return ((int32_t) insn << shift_l) >> shift_r;
208 }
209
210 /* Determine if specified bits within an instruction opcode matches a
211 specific pattern.
212
213 INSN is the instruction opcode.
214
215 MASK specifies the bits within the opcode that are to be tested
216 agsinst for a match with PATTERN. */
217
218 static int
219 decode_masked_match (uint32_t insn, uint32_t mask, uint32_t pattern)
220 {
221 return (insn & mask) == pattern;
222 }
223
224 /* Decode an opcode if it represents an immediate ADD or SUB instruction.
225
226 ADDR specifies the address of the opcode.
227 INSN specifies the opcode to test.
228 RD receives the 'rd' field from the decoded instruction.
229 RN receives the 'rn' field from the decoded instruction.
230
231 Return 1 if the opcodes matches and is decoded, otherwise 0. */
232 static int
233 aarch64_decode_add_sub_imm (CORE_ADDR addr, uint32_t insn, unsigned *rd,
234 unsigned *rn, int32_t *imm)
235 {
236 if ((insn & 0x9f000000) == 0x91000000)
237 {
238 unsigned shift;
239 unsigned op_is_sub;
240
241 *rd = (insn >> 0) & 0x1f;
242 *rn = (insn >> 5) & 0x1f;
243 *imm = (insn >> 10) & 0xfff;
244 shift = (insn >> 22) & 0x3;
245 op_is_sub = (insn >> 30) & 0x1;
246
247 switch (shift)
248 {
249 case 0:
250 break;
251 case 1:
252 *imm <<= 12;
253 break;
254 default:
255 /* UNDEFINED */
256 return 0;
257 }
258
259 if (op_is_sub)
260 *imm = -*imm;
261
262 if (aarch64_debug)
263 {
264 debug_printf ("decode: 0x%s 0x%x add x%u, x%u, #%d\n",
265 core_addr_to_string_nz (addr), insn, *rd, *rn,
266 *imm);
267 }
268 return 1;
269 }
270 return 0;
271 }
272
273 /* Decode an opcode if it represents a branch via register instruction.
274
275 ADDR specifies the address of the opcode.
276 INSN specifies the opcode to test.
277 IS_BLR receives the 'op' bit from the decoded instruction.
278 RN receives the 'rn' field from the decoded instruction.
279
280 Return 1 if the opcodes matches and is decoded, otherwise 0. */
281
282 static int
283 aarch64_decode_br (CORE_ADDR addr, uint32_t insn, int *is_blr,
284 unsigned *rn)
285 {
286 /* 8 4 0 6 2 8 4 0 */
287 /* blr 110101100011111100000000000rrrrr */
288 /* br 110101100001111100000000000rrrrr */
289 if (decode_masked_match (insn, 0xffdffc1f, 0xd61f0000))
290 {
291 *is_blr = (insn >> 21) & 1;
292 *rn = (insn >> 5) & 0x1f;
293
294 if (aarch64_debug)
295 {
296 debug_printf ("decode: 0x%s 0x%x %s 0x%x\n",
297 core_addr_to_string_nz (addr), insn,
298 *is_blr ? "blr" : "br", *rn);
299 }
300
301 return 1;
302 }
303 return 0;
304 }
305
306 /* Decode an opcode if it represents a ERET instruction.
307
308 ADDR specifies the address of the opcode.
309 INSN specifies the opcode to test.
310
311 Return 1 if the opcodes matches and is decoded, otherwise 0. */
312
313 static int
314 aarch64_decode_eret (CORE_ADDR addr, uint32_t insn)
315 {
316 /* eret 1101 0110 1001 1111 0000 0011 1110 0000 */
317 if (insn == 0xd69f03e0)
318 {
319 if (aarch64_debug)
320 {
321 debug_printf ("decode: 0x%s 0x%x eret\n",
322 core_addr_to_string_nz (addr), insn);
323 }
324 return 1;
325 }
326 return 0;
327 }
328
329 /* Decode an opcode if it represents a MOVZ instruction.
330
331 ADDR specifies the address of the opcode.
332 INSN specifies the opcode to test.
333 RD receives the 'rd' field from the decoded instruction.
334
335 Return 1 if the opcodes matches and is decoded, otherwise 0. */
336
337 static int
338 aarch64_decode_movz (CORE_ADDR addr, uint32_t insn, unsigned *rd)
339 {
340 if (decode_masked_match (insn, 0xff800000, 0x52800000))
341 {
342 *rd = (insn >> 0) & 0x1f;
343
344 if (aarch64_debug)
345 {
346 debug_printf ("decode: 0x%s 0x%x movz x%u, #?\n",
347 core_addr_to_string_nz (addr), insn, *rd);
348 }
349 return 1;
350 }
351 return 0;
352 }
353
354 /* Decode an opcode if it represents a ORR (shifted register)
355 instruction.
356
357 ADDR specifies the address of the opcode.
358 INSN specifies the opcode to test.
359 RD receives the 'rd' field from the decoded instruction.
360 RN receives the 'rn' field from the decoded instruction.
361 RM receives the 'rm' field from the decoded instruction.
362 IMM receives the 'imm6' field from the decoded instruction.
363
364 Return 1 if the opcodes matches and is decoded, otherwise 0. */
365
366 static int
367 aarch64_decode_orr_shifted_register_x (CORE_ADDR addr, uint32_t insn,
368 unsigned *rd, unsigned *rn,
369 unsigned *rm, int32_t *imm)
370 {
371 if (decode_masked_match (insn, 0xff200000, 0xaa000000))
372 {
373 *rd = (insn >> 0) & 0x1f;
374 *rn = (insn >> 5) & 0x1f;
375 *rm = (insn >> 16) & 0x1f;
376 *imm = (insn >> 10) & 0x3f;
377
378 if (aarch64_debug)
379 {
380 debug_printf ("decode: 0x%s 0x%x orr x%u, x%u, x%u, #%u\n",
381 core_addr_to_string_nz (addr), insn, *rd, *rn,
382 *rm, *imm);
383 }
384 return 1;
385 }
386 return 0;
387 }
388
389 /* Decode an opcode if it represents a RET instruction.
390
391 ADDR specifies the address of the opcode.
392 INSN specifies the opcode to test.
393 RN receives the 'rn' field from the decoded instruction.
394
395 Return 1 if the opcodes matches and is decoded, otherwise 0. */
396
397 static int
398 aarch64_decode_ret (CORE_ADDR addr, uint32_t insn, unsigned *rn)
399 {
400 if (decode_masked_match (insn, 0xfffffc1f, 0xd65f0000))
401 {
402 *rn = (insn >> 5) & 0x1f;
403 if (aarch64_debug)
404 {
405 debug_printf ("decode: 0x%s 0x%x ret x%u\n",
406 core_addr_to_string_nz (addr), insn, *rn);
407 }
408 return 1;
409 }
410 return 0;
411 }
412
413 /* Decode an opcode if it represents the following instruction:
414 STP rt, rt2, [rn, #imm]
415
416 ADDR specifies the address of the opcode.
417 INSN specifies the opcode to test.
418 RT1 receives the 'rt' field from the decoded instruction.
419 RT2 receives the 'rt2' field from the decoded instruction.
420 RN receives the 'rn' field from the decoded instruction.
421 IMM receives the 'imm' field from the decoded instruction.
422
423 Return 1 if the opcodes matches and is decoded, otherwise 0. */
424
425 static int
426 aarch64_decode_stp_offset (CORE_ADDR addr, uint32_t insn, unsigned *rt1,
427 unsigned *rt2, unsigned *rn, int32_t *imm)
428 {
429 if (decode_masked_match (insn, 0xffc00000, 0xa9000000))
430 {
431 *rt1 = (insn >> 0) & 0x1f;
432 *rn = (insn >> 5) & 0x1f;
433 *rt2 = (insn >> 10) & 0x1f;
434 *imm = extract_signed_bitfield (insn, 7, 15);
435 *imm <<= 3;
436
437 if (aarch64_debug)
438 {
439 debug_printf ("decode: 0x%s 0x%x stp x%u, x%u, [x%u + #%d]\n",
440 core_addr_to_string_nz (addr), insn, *rt1, *rt2,
441 *rn, *imm);
442 }
443 return 1;
444 }
445 return 0;
446 }
447
448 /* Decode an opcode if it represents the following instruction:
449 STP rt, rt2, [rn, #imm]!
450
451 ADDR specifies the address of the opcode.
452 INSN specifies the opcode to test.
453 RT1 receives the 'rt' field from the decoded instruction.
454 RT2 receives the 'rt2' field from the decoded instruction.
455 RN receives the 'rn' field from the decoded instruction.
456 IMM receives the 'imm' field from the decoded instruction.
457
458 Return 1 if the opcodes matches and is decoded, otherwise 0. */
459
460 static int
461 aarch64_decode_stp_offset_wb (CORE_ADDR addr, uint32_t insn, unsigned *rt1,
462 unsigned *rt2, unsigned *rn, int32_t *imm)
463 {
464 if (decode_masked_match (insn, 0xffc00000, 0xa9800000))
465 {
466 *rt1 = (insn >> 0) & 0x1f;
467 *rn = (insn >> 5) & 0x1f;
468 *rt2 = (insn >> 10) & 0x1f;
469 *imm = extract_signed_bitfield (insn, 7, 15);
470 *imm <<= 3;
471
472 if (aarch64_debug)
473 {
474 debug_printf ("decode: 0x%s 0x%x stp x%u, x%u, [x%u + #%d]!\n",
475 core_addr_to_string_nz (addr), insn, *rt1, *rt2,
476 *rn, *imm);
477 }
478 return 1;
479 }
480 return 0;
481 }
482
483 /* Decode an opcode if it represents the following instruction:
484 STUR rt, [rn, #imm]
485
486 ADDR specifies the address of the opcode.
487 INSN specifies the opcode to test.
488 IS64 receives size field from the decoded instruction.
489 RT receives the 'rt' field from the decoded instruction.
490 RN receives the 'rn' field from the decoded instruction.
491 IMM receives the 'imm' field from the decoded instruction.
492
493 Return 1 if the opcodes matches and is decoded, otherwise 0. */
494
495 static int
496 aarch64_decode_stur (CORE_ADDR addr, uint32_t insn, int *is64,
497 unsigned *rt, unsigned *rn, int32_t *imm)
498 {
499 if (decode_masked_match (insn, 0xbfe00c00, 0xb8000000))
500 {
501 *is64 = (insn >> 30) & 1;
502 *rt = (insn >> 0) & 0x1f;
503 *rn = (insn >> 5) & 0x1f;
504 *imm = extract_signed_bitfield (insn, 9, 12);
505
506 if (aarch64_debug)
507 {
508 debug_printf ("decode: 0x%s 0x%x stur %c%u, [x%u + #%d]\n",
509 core_addr_to_string_nz (addr), insn,
510 *is64 ? 'x' : 'w', *rt, *rn, *imm);
511 }
512 return 1;
513 }
514 return 0;
515 }
516
517 /* Analyze a prologue, looking for a recognizable stack frame
518 and frame pointer. Scan until we encounter a store that could
519 clobber the stack frame unexpectedly, or an unknown instruction. */
520
521 static CORE_ADDR
522 aarch64_analyze_prologue (struct gdbarch *gdbarch,
523 CORE_ADDR start, CORE_ADDR limit,
524 struct aarch64_prologue_cache *cache)
525 {
526 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
527 int i;
528 pv_t regs[AARCH64_X_REGISTER_COUNT];
529 struct pv_area *stack;
530 struct cleanup *back_to;
531
532 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
533 regs[i] = pv_register (i, 0);
534 stack = make_pv_area (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
535 back_to = make_cleanup_free_pv_area (stack);
536
537 for (; start < limit; start += 4)
538 {
539 uint32_t insn;
540 unsigned rd;
541 unsigned rn;
542 unsigned rm;
543 unsigned rt;
544 unsigned rt1;
545 unsigned rt2;
546 int op_is_sub;
547 int32_t imm;
548 unsigned cond;
549 int is64;
550 int is_link;
551 int is_cbnz;
552 int is_tbnz;
553 unsigned bit;
554 int32_t offset;
555
556 insn = read_memory_unsigned_integer (start, 4, byte_order_for_code);
557
558 if (aarch64_decode_add_sub_imm (start, insn, &rd, &rn, &imm))
559 regs[rd] = pv_add_constant (regs[rn], imm);
560 else if (aarch64_decode_adrp (start, insn, &rd))
561 regs[rd] = pv_unknown ();
562 else if (aarch64_decode_b (start, insn, &is_link, &offset))
563 {
564 /* Stop analysis on branch. */
565 break;
566 }
567 else if (aarch64_decode_bcond (start, insn, &cond, &offset))
568 {
569 /* Stop analysis on branch. */
570 break;
571 }
572 else if (aarch64_decode_br (start, insn, &is_link, &rn))
573 {
574 /* Stop analysis on branch. */
575 break;
576 }
577 else if (aarch64_decode_cb (start, insn, &is64, &is_cbnz, &rn,
578 &offset))
579 {
580 /* Stop analysis on branch. */
581 break;
582 }
583 else if (aarch64_decode_eret (start, insn))
584 {
585 /* Stop analysis on branch. */
586 break;
587 }
588 else if (aarch64_decode_movz (start, insn, &rd))
589 regs[rd] = pv_unknown ();
590 else if (aarch64_decode_orr_shifted_register_x (start, insn, &rd,
591 &rn, &rm, &imm))
592 {
593 if (imm == 0 && rn == 31)
594 regs[rd] = regs[rm];
595 else
596 {
597 if (aarch64_debug)
598 {
599 debug_printf ("aarch64: prologue analysis gave up "
600 "addr=0x%s opcode=0x%x (orr x register)\n",
601 core_addr_to_string_nz (start), insn);
602 }
603 break;
604 }
605 }
606 else if (aarch64_decode_ret (start, insn, &rn))
607 {
608 /* Stop analysis on branch. */
609 break;
610 }
611 else if (aarch64_decode_stur (start, insn, &is64, &rt, &rn, &offset))
612 {
613 pv_area_store (stack, pv_add_constant (regs[rn], offset),
614 is64 ? 8 : 4, regs[rt]);
615 }
616 else if (aarch64_decode_stp_offset (start, insn, &rt1, &rt2, &rn,
617 &imm))
618 {
619 /* If recording this store would invalidate the store area
620 (perhaps because rn is not known) then we should abandon
621 further prologue analysis. */
622 if (pv_area_store_would_trash (stack,
623 pv_add_constant (regs[rn], imm)))
624 break;
625
626 if (pv_area_store_would_trash (stack,
627 pv_add_constant (regs[rn], imm + 8)))
628 break;
629
630 pv_area_store (stack, pv_add_constant (regs[rn], imm), 8,
631 regs[rt1]);
632 pv_area_store (stack, pv_add_constant (regs[rn], imm + 8), 8,
633 regs[rt2]);
634 }
635 else if (aarch64_decode_stp_offset_wb (start, insn, &rt1, &rt2, &rn,
636 &imm))
637 {
638 /* If recording this store would invalidate the store area
639 (perhaps because rn is not known) then we should abandon
640 further prologue analysis. */
641 if (pv_area_store_would_trash (stack,
642 pv_add_constant (regs[rn], imm)))
643 break;
644
645 if (pv_area_store_would_trash (stack,
646 pv_add_constant (regs[rn], imm + 8)))
647 break;
648
649 pv_area_store (stack, pv_add_constant (regs[rn], imm), 8,
650 regs[rt1]);
651 pv_area_store (stack, pv_add_constant (regs[rn], imm + 8), 8,
652 regs[rt2]);
653 regs[rn] = pv_add_constant (regs[rn], imm);
654 }
655 else if (aarch64_decode_tb (start, insn, &is_tbnz, &bit, &rn,
656 &offset))
657 {
658 /* Stop analysis on branch. */
659 break;
660 }
661 else
662 {
663 if (aarch64_debug)
664 {
665 debug_printf ("aarch64: prologue analysis gave up addr=0x%s"
666 " opcode=0x%x\n",
667 core_addr_to_string_nz (start), insn);
668 }
669 break;
670 }
671 }
672
673 if (cache == NULL)
674 {
675 do_cleanups (back_to);
676 return start;
677 }
678
679 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
680 {
681 /* Frame pointer is fp. Frame size is constant. */
682 cache->framereg = AARCH64_FP_REGNUM;
683 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
684 }
685 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
686 {
687 /* Try the stack pointer. */
688 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
689 cache->framereg = AARCH64_SP_REGNUM;
690 }
691 else
692 {
693 /* We're just out of luck. We don't know where the frame is. */
694 cache->framereg = -1;
695 cache->framesize = 0;
696 }
697
698 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
699 {
700 CORE_ADDR offset;
701
702 if (pv_area_find_reg (stack, gdbarch, i, &offset))
703 cache->saved_regs[i].addr = offset;
704 }
705
706 do_cleanups (back_to);
707 return start;
708 }
709
710 /* Implement the "skip_prologue" gdbarch method. */
711
712 static CORE_ADDR
713 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
714 {
715 unsigned long inst;
716 CORE_ADDR skip_pc;
717 CORE_ADDR func_addr, limit_pc;
718 struct symtab_and_line sal;
719
720 /* See if we can determine the end of the prologue via the symbol
721 table. If so, then return either PC, or the PC after the
722 prologue, whichever is greater. */
723 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
724 {
725 CORE_ADDR post_prologue_pc
726 = skip_prologue_using_sal (gdbarch, func_addr);
727
728 if (post_prologue_pc != 0)
729 return max (pc, post_prologue_pc);
730 }
731
732 /* Can't determine prologue from the symbol table, need to examine
733 instructions. */
734
735 /* Find an upper limit on the function prologue using the debug
736 information. If the debug information could not be used to
737 provide that bound, then use an arbitrary large number as the
738 upper bound. */
739 limit_pc = skip_prologue_using_sal (gdbarch, pc);
740 if (limit_pc == 0)
741 limit_pc = pc + 128; /* Magic. */
742
743 /* Try disassembling prologue. */
744 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
745 }
746
747 /* Scan the function prologue for THIS_FRAME and populate the prologue
748 cache CACHE. */
749
750 static void
751 aarch64_scan_prologue (struct frame_info *this_frame,
752 struct aarch64_prologue_cache *cache)
753 {
754 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
755 CORE_ADDR prologue_start;
756 CORE_ADDR prologue_end;
757 CORE_ADDR prev_pc = get_frame_pc (this_frame);
758 struct gdbarch *gdbarch = get_frame_arch (this_frame);
759
760 cache->prev_pc = prev_pc;
761
762 /* Assume we do not find a frame. */
763 cache->framereg = -1;
764 cache->framesize = 0;
765
766 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
767 &prologue_end))
768 {
769 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
770
771 if (sal.line == 0)
772 {
773 /* No line info so use the current PC. */
774 prologue_end = prev_pc;
775 }
776 else if (sal.end < prologue_end)
777 {
778 /* The next line begins after the function end. */
779 prologue_end = sal.end;
780 }
781
782 prologue_end = min (prologue_end, prev_pc);
783 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
784 }
785 else
786 {
787 CORE_ADDR frame_loc;
788 LONGEST saved_fp;
789 LONGEST saved_lr;
790 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
791
792 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
793 if (frame_loc == 0)
794 return;
795
796 cache->framereg = AARCH64_FP_REGNUM;
797 cache->framesize = 16;
798 cache->saved_regs[29].addr = 0;
799 cache->saved_regs[30].addr = 8;
800 }
801 }
802
803 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
804 function may throw an exception if the inferior's registers or memory is
805 not available. */
806
807 static void
808 aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
809 struct aarch64_prologue_cache *cache)
810 {
811 CORE_ADDR unwound_fp;
812 int reg;
813
814 aarch64_scan_prologue (this_frame, cache);
815
816 if (cache->framereg == -1)
817 return;
818
819 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
820 if (unwound_fp == 0)
821 return;
822
823 cache->prev_sp = unwound_fp + cache->framesize;
824
825 /* Calculate actual addresses of saved registers using offsets
826 determined by aarch64_analyze_prologue. */
827 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
828 if (trad_frame_addr_p (cache->saved_regs, reg))
829 cache->saved_regs[reg].addr += cache->prev_sp;
830
831 cache->func = get_frame_func (this_frame);
832
833 cache->available_p = 1;
834 }
835
836 /* Allocate and fill in *THIS_CACHE with information about the prologue of
837 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
838 Return a pointer to the current aarch64_prologue_cache in
839 *THIS_CACHE. */
840
841 static struct aarch64_prologue_cache *
842 aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
843 {
844 struct aarch64_prologue_cache *cache;
845
846 if (*this_cache != NULL)
847 return *this_cache;
848
849 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
850 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
851 *this_cache = cache;
852
853 TRY
854 {
855 aarch64_make_prologue_cache_1 (this_frame, cache);
856 }
857 CATCH (ex, RETURN_MASK_ERROR)
858 {
859 if (ex.error != NOT_AVAILABLE_ERROR)
860 throw_exception (ex);
861 }
862 END_CATCH
863
864 return cache;
865 }
866
867 /* Implement the "stop_reason" frame_unwind method. */
868
869 static enum unwind_stop_reason
870 aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
871 void **this_cache)
872 {
873 struct aarch64_prologue_cache *cache
874 = aarch64_make_prologue_cache (this_frame, this_cache);
875
876 if (!cache->available_p)
877 return UNWIND_UNAVAILABLE;
878
879 /* Halt the backtrace at "_start". */
880 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
881 return UNWIND_OUTERMOST;
882
883 /* We've hit a wall, stop. */
884 if (cache->prev_sp == 0)
885 return UNWIND_OUTERMOST;
886
887 return UNWIND_NO_REASON;
888 }
889
890 /* Our frame ID for a normal frame is the current function's starting
891 PC and the caller's SP when we were called. */
892
893 static void
894 aarch64_prologue_this_id (struct frame_info *this_frame,
895 void **this_cache, struct frame_id *this_id)
896 {
897 struct aarch64_prologue_cache *cache
898 = aarch64_make_prologue_cache (this_frame, this_cache);
899
900 if (!cache->available_p)
901 *this_id = frame_id_build_unavailable_stack (cache->func);
902 else
903 *this_id = frame_id_build (cache->prev_sp, cache->func);
904 }
905
906 /* Implement the "prev_register" frame_unwind method. */
907
908 static struct value *
909 aarch64_prologue_prev_register (struct frame_info *this_frame,
910 void **this_cache, int prev_regnum)
911 {
912 struct gdbarch *gdbarch = get_frame_arch (this_frame);
913 struct aarch64_prologue_cache *cache
914 = aarch64_make_prologue_cache (this_frame, this_cache);
915
916 /* If we are asked to unwind the PC, then we need to return the LR
917 instead. The prologue may save PC, but it will point into this
918 frame's prologue, not the next frame's resume location. */
919 if (prev_regnum == AARCH64_PC_REGNUM)
920 {
921 CORE_ADDR lr;
922
923 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
924 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
925 }
926
927 /* SP is generally not saved to the stack, but this frame is
928 identified by the next frame's stack pointer at the time of the
929 call. The value was already reconstructed into PREV_SP. */
930 /*
931 +----------+ ^
932 | saved lr | |
933 +->| saved fp |--+
934 | | |
935 | | | <- Previous SP
936 | +----------+
937 | | saved lr |
938 +--| saved fp |<- FP
939 | |
940 | |<- SP
941 +----------+ */
942 if (prev_regnum == AARCH64_SP_REGNUM)
943 return frame_unwind_got_constant (this_frame, prev_regnum,
944 cache->prev_sp);
945
946 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
947 prev_regnum);
948 }
949
950 /* AArch64 prologue unwinder. */
951 struct frame_unwind aarch64_prologue_unwind =
952 {
953 NORMAL_FRAME,
954 aarch64_prologue_frame_unwind_stop_reason,
955 aarch64_prologue_this_id,
956 aarch64_prologue_prev_register,
957 NULL,
958 default_frame_sniffer
959 };
960
961 /* Allocate and fill in *THIS_CACHE with information about the prologue of
962 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
963 Return a pointer to the current aarch64_prologue_cache in
964 *THIS_CACHE. */
965
966 static struct aarch64_prologue_cache *
967 aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
968 {
969 struct aarch64_prologue_cache *cache;
970
971 if (*this_cache != NULL)
972 return *this_cache;
973
974 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
975 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
976 *this_cache = cache;
977
978 TRY
979 {
980 cache->prev_sp = get_frame_register_unsigned (this_frame,
981 AARCH64_SP_REGNUM);
982 cache->prev_pc = get_frame_pc (this_frame);
983 cache->available_p = 1;
984 }
985 CATCH (ex, RETURN_MASK_ERROR)
986 {
987 if (ex.error != NOT_AVAILABLE_ERROR)
988 throw_exception (ex);
989 }
990 END_CATCH
991
992 return cache;
993 }
994
995 /* Implement the "stop_reason" frame_unwind method. */
996
997 static enum unwind_stop_reason
998 aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
999 void **this_cache)
1000 {
1001 struct aarch64_prologue_cache *cache
1002 = aarch64_make_stub_cache (this_frame, this_cache);
1003
1004 if (!cache->available_p)
1005 return UNWIND_UNAVAILABLE;
1006
1007 return UNWIND_NO_REASON;
1008 }
1009
1010 /* Our frame ID for a stub frame is the current SP and LR. */
1011
1012 static void
1013 aarch64_stub_this_id (struct frame_info *this_frame,
1014 void **this_cache, struct frame_id *this_id)
1015 {
1016 struct aarch64_prologue_cache *cache
1017 = aarch64_make_stub_cache (this_frame, this_cache);
1018
1019 if (cache->available_p)
1020 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
1021 else
1022 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
1023 }
1024
1025 /* Implement the "sniffer" frame_unwind method. */
1026
1027 static int
1028 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
1029 struct frame_info *this_frame,
1030 void **this_prologue_cache)
1031 {
1032 CORE_ADDR addr_in_block;
1033 gdb_byte dummy[4];
1034
1035 addr_in_block = get_frame_address_in_block (this_frame);
1036 if (in_plt_section (addr_in_block)
1037 /* We also use the stub winder if the target memory is unreadable
1038 to avoid having the prologue unwinder trying to read it. */
1039 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
1040 return 1;
1041
1042 return 0;
1043 }
1044
1045 /* AArch64 stub unwinder. */
1046 struct frame_unwind aarch64_stub_unwind =
1047 {
1048 NORMAL_FRAME,
1049 aarch64_stub_frame_unwind_stop_reason,
1050 aarch64_stub_this_id,
1051 aarch64_prologue_prev_register,
1052 NULL,
1053 aarch64_stub_unwind_sniffer
1054 };
1055
1056 /* Return the frame base address of *THIS_FRAME. */
1057
1058 static CORE_ADDR
1059 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
1060 {
1061 struct aarch64_prologue_cache *cache
1062 = aarch64_make_prologue_cache (this_frame, this_cache);
1063
1064 return cache->prev_sp - cache->framesize;
1065 }
1066
1067 /* AArch64 default frame base information. */
1068 struct frame_base aarch64_normal_base =
1069 {
1070 &aarch64_prologue_unwind,
1071 aarch64_normal_frame_base,
1072 aarch64_normal_frame_base,
1073 aarch64_normal_frame_base
1074 };
1075
1076 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
1077 dummy frame. The frame ID's base needs to match the TOS value
1078 saved by save_dummy_frame_tos () and returned from
1079 aarch64_push_dummy_call, and the PC needs to match the dummy
1080 frame's breakpoint. */
1081
1082 static struct frame_id
1083 aarch64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
1084 {
1085 return frame_id_build (get_frame_register_unsigned (this_frame,
1086 AARCH64_SP_REGNUM),
1087 get_frame_pc (this_frame));
1088 }
1089
1090 /* Implement the "unwind_pc" gdbarch method. */
1091
1092 static CORE_ADDR
1093 aarch64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
1094 {
1095 CORE_ADDR pc
1096 = frame_unwind_register_unsigned (this_frame, AARCH64_PC_REGNUM);
1097
1098 return pc;
1099 }
1100
1101 /* Implement the "unwind_sp" gdbarch method. */
1102
1103 static CORE_ADDR
1104 aarch64_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
1105 {
1106 return frame_unwind_register_unsigned (this_frame, AARCH64_SP_REGNUM);
1107 }
1108
1109 /* Return the value of the REGNUM register in the previous frame of
1110 *THIS_FRAME. */
1111
1112 static struct value *
1113 aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1114 void **this_cache, int regnum)
1115 {
1116 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1117 CORE_ADDR lr;
1118
1119 switch (regnum)
1120 {
1121 case AARCH64_PC_REGNUM:
1122 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1123 return frame_unwind_got_constant (this_frame, regnum, lr);
1124
1125 default:
1126 internal_error (__FILE__, __LINE__,
1127 _("Unexpected register %d"), regnum);
1128 }
1129 }
1130
1131 /* Implement the "init_reg" dwarf2_frame_ops method. */
1132
1133 static void
1134 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1135 struct dwarf2_frame_state_reg *reg,
1136 struct frame_info *this_frame)
1137 {
1138 switch (regnum)
1139 {
1140 case AARCH64_PC_REGNUM:
1141 reg->how = DWARF2_FRAME_REG_FN;
1142 reg->loc.fn = aarch64_dwarf2_prev_register;
1143 break;
1144 case AARCH64_SP_REGNUM:
1145 reg->how = DWARF2_FRAME_REG_CFA;
1146 break;
1147 }
1148 }
1149
1150 /* When arguments must be pushed onto the stack, they go on in reverse
1151 order. The code below implements a FILO (stack) to do this. */
1152
1153 typedef struct
1154 {
1155 /* Value to pass on stack. */
1156 const void *data;
1157
1158 /* Size in bytes of value to pass on stack. */
1159 int len;
1160 } stack_item_t;
1161
1162 DEF_VEC_O (stack_item_t);
1163
1164 /* Return the alignment (in bytes) of the given type. */
1165
1166 static int
1167 aarch64_type_align (struct type *t)
1168 {
1169 int n;
1170 int align;
1171 int falign;
1172
1173 t = check_typedef (t);
1174 switch (TYPE_CODE (t))
1175 {
1176 default:
1177 /* Should never happen. */
1178 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
1179 return 4;
1180
1181 case TYPE_CODE_PTR:
1182 case TYPE_CODE_ENUM:
1183 case TYPE_CODE_INT:
1184 case TYPE_CODE_FLT:
1185 case TYPE_CODE_SET:
1186 case TYPE_CODE_RANGE:
1187 case TYPE_CODE_BITSTRING:
1188 case TYPE_CODE_REF:
1189 case TYPE_CODE_CHAR:
1190 case TYPE_CODE_BOOL:
1191 return TYPE_LENGTH (t);
1192
1193 case TYPE_CODE_ARRAY:
1194 case TYPE_CODE_COMPLEX:
1195 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1196
1197 case TYPE_CODE_STRUCT:
1198 case TYPE_CODE_UNION:
1199 align = 1;
1200 for (n = 0; n < TYPE_NFIELDS (t); n++)
1201 {
1202 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
1203 if (falign > align)
1204 align = falign;
1205 }
1206 return align;
1207 }
1208 }
1209
1210 /* Return 1 if *TY is a homogeneous floating-point aggregate as
1211 defined in the AAPCS64 ABI document; otherwise return 0. */
1212
1213 static int
1214 is_hfa (struct type *ty)
1215 {
1216 switch (TYPE_CODE (ty))
1217 {
1218 case TYPE_CODE_ARRAY:
1219 {
1220 struct type *target_ty = TYPE_TARGET_TYPE (ty);
1221 if (TYPE_CODE (target_ty) == TYPE_CODE_FLT && TYPE_LENGTH (ty) <= 4)
1222 return 1;
1223 break;
1224 }
1225
1226 case TYPE_CODE_UNION:
1227 case TYPE_CODE_STRUCT:
1228 {
1229 if (TYPE_NFIELDS (ty) > 0 && TYPE_NFIELDS (ty) <= 4)
1230 {
1231 struct type *member0_type;
1232
1233 member0_type = check_typedef (TYPE_FIELD_TYPE (ty, 0));
1234 if (TYPE_CODE (member0_type) == TYPE_CODE_FLT)
1235 {
1236 int i;
1237
1238 for (i = 0; i < TYPE_NFIELDS (ty); i++)
1239 {
1240 struct type *member1_type;
1241
1242 member1_type = check_typedef (TYPE_FIELD_TYPE (ty, i));
1243 if (TYPE_CODE (member0_type) != TYPE_CODE (member1_type)
1244 || (TYPE_LENGTH (member0_type)
1245 != TYPE_LENGTH (member1_type)))
1246 return 0;
1247 }
1248 return 1;
1249 }
1250 }
1251 return 0;
1252 }
1253
1254 default:
1255 break;
1256 }
1257
1258 return 0;
1259 }
1260
1261 /* AArch64 function call information structure. */
1262 struct aarch64_call_info
1263 {
1264 /* the current argument number. */
1265 unsigned argnum;
1266
1267 /* The next general purpose register number, equivalent to NGRN as
1268 described in the AArch64 Procedure Call Standard. */
1269 unsigned ngrn;
1270
1271 /* The next SIMD and floating point register number, equivalent to
1272 NSRN as described in the AArch64 Procedure Call Standard. */
1273 unsigned nsrn;
1274
1275 /* The next stacked argument address, equivalent to NSAA as
1276 described in the AArch64 Procedure Call Standard. */
1277 unsigned nsaa;
1278
1279 /* Stack item vector. */
1280 VEC(stack_item_t) *si;
1281 };
1282
1283 /* Pass a value in a sequence of consecutive X registers. The caller
1284 is responsbile for ensuring sufficient registers are available. */
1285
1286 static void
1287 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1288 struct aarch64_call_info *info, struct type *type,
1289 const bfd_byte *buf)
1290 {
1291 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1292 int len = TYPE_LENGTH (type);
1293 enum type_code typecode = TYPE_CODE (type);
1294 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1295
1296 info->argnum++;
1297
1298 while (len > 0)
1299 {
1300 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1301 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1302 byte_order);
1303
1304
1305 /* Adjust sub-word struct/union args when big-endian. */
1306 if (byte_order == BFD_ENDIAN_BIG
1307 && partial_len < X_REGISTER_SIZE
1308 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1309 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1310
1311 if (aarch64_debug)
1312 {
1313 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1314 gdbarch_register_name (gdbarch, regnum),
1315 phex (regval, X_REGISTER_SIZE));
1316 }
1317 regcache_cooked_write_unsigned (regcache, regnum, regval);
1318 len -= partial_len;
1319 buf += partial_len;
1320 regnum++;
1321 }
1322 }
1323
1324 /* Attempt to marshall a value in a V register. Return 1 if
1325 successful, or 0 if insufficient registers are available. This
1326 function, unlike the equivalent pass_in_x() function does not
1327 handle arguments spread across multiple registers. */
1328
1329 static int
1330 pass_in_v (struct gdbarch *gdbarch,
1331 struct regcache *regcache,
1332 struct aarch64_call_info *info,
1333 const bfd_byte *buf)
1334 {
1335 if (info->nsrn < 8)
1336 {
1337 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1338 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1339
1340 info->argnum++;
1341 info->nsrn++;
1342
1343 regcache_cooked_write (regcache, regnum, buf);
1344 if (aarch64_debug)
1345 {
1346 debug_printf ("arg %d in %s\n", info->argnum,
1347 gdbarch_register_name (gdbarch, regnum));
1348 }
1349 return 1;
1350 }
1351 info->nsrn = 8;
1352 return 0;
1353 }
1354
1355 /* Marshall an argument onto the stack. */
1356
1357 static void
1358 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1359 const bfd_byte *buf)
1360 {
1361 int len = TYPE_LENGTH (type);
1362 int align;
1363 stack_item_t item;
1364
1365 info->argnum++;
1366
1367 align = aarch64_type_align (type);
1368
1369 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1370 Natural alignment of the argument's type. */
1371 align = align_up (align, 8);
1372
1373 /* The AArch64 PCS requires at most doubleword alignment. */
1374 if (align > 16)
1375 align = 16;
1376
1377 if (aarch64_debug)
1378 {
1379 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1380 info->nsaa);
1381 }
1382
1383 item.len = len;
1384 item.data = buf;
1385 VEC_safe_push (stack_item_t, info->si, &item);
1386
1387 info->nsaa += len;
1388 if (info->nsaa & (align - 1))
1389 {
1390 /* Push stack alignment padding. */
1391 int pad = align - (info->nsaa & (align - 1));
1392
1393 item.len = pad;
1394 item.data = buf;
1395
1396 VEC_safe_push (stack_item_t, info->si, &item);
1397 info->nsaa += pad;
1398 }
1399 }
1400
1401 /* Marshall an argument into a sequence of one or more consecutive X
1402 registers or, if insufficient X registers are available then onto
1403 the stack. */
1404
1405 static void
1406 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1407 struct aarch64_call_info *info, struct type *type,
1408 const bfd_byte *buf)
1409 {
1410 int len = TYPE_LENGTH (type);
1411 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1412
1413 /* PCS C.13 - Pass in registers if we have enough spare */
1414 if (info->ngrn + nregs <= 8)
1415 {
1416 pass_in_x (gdbarch, regcache, info, type, buf);
1417 info->ngrn += nregs;
1418 }
1419 else
1420 {
1421 info->ngrn = 8;
1422 pass_on_stack (info, type, buf);
1423 }
1424 }
1425
1426 /* Pass a value in a V register, or on the stack if insufficient are
1427 available. */
1428
1429 static void
1430 pass_in_v_or_stack (struct gdbarch *gdbarch,
1431 struct regcache *regcache,
1432 struct aarch64_call_info *info,
1433 struct type *type,
1434 const bfd_byte *buf)
1435 {
1436 if (!pass_in_v (gdbarch, regcache, info, buf))
1437 pass_on_stack (info, type, buf);
1438 }
1439
1440 /* Implement the "push_dummy_call" gdbarch method. */
1441
1442 static CORE_ADDR
1443 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1444 struct regcache *regcache, CORE_ADDR bp_addr,
1445 int nargs,
1446 struct value **args, CORE_ADDR sp, int struct_return,
1447 CORE_ADDR struct_addr)
1448 {
1449 int nstack = 0;
1450 int argnum;
1451 int x_argreg;
1452 int v_argreg;
1453 struct aarch64_call_info info;
1454 struct type *func_type;
1455 struct type *return_type;
1456 int lang_struct_return;
1457
1458 memset (&info, 0, sizeof (info));
1459
1460 /* We need to know what the type of the called function is in order
1461 to determine the number of named/anonymous arguments for the
1462 actual argument placement, and the return type in order to handle
1463 return value correctly.
1464
1465 The generic code above us views the decision of return in memory
1466 or return in registers as a two stage processes. The language
1467 handler is consulted first and may decide to return in memory (eg
1468 class with copy constructor returned by value), this will cause
1469 the generic code to allocate space AND insert an initial leading
1470 argument.
1471
1472 If the language code does not decide to pass in memory then the
1473 target code is consulted.
1474
1475 If the language code decides to pass in memory we want to move
1476 the pointer inserted as the initial argument from the argument
1477 list and into X8, the conventional AArch64 struct return pointer
1478 register.
1479
1480 This is slightly awkward, ideally the flag "lang_struct_return"
1481 would be passed to the targets implementation of push_dummy_call.
1482 Rather that change the target interface we call the language code
1483 directly ourselves. */
1484
1485 func_type = check_typedef (value_type (function));
1486
1487 /* Dereference function pointer types. */
1488 if (TYPE_CODE (func_type) == TYPE_CODE_PTR)
1489 func_type = TYPE_TARGET_TYPE (func_type);
1490
1491 gdb_assert (TYPE_CODE (func_type) == TYPE_CODE_FUNC
1492 || TYPE_CODE (func_type) == TYPE_CODE_METHOD);
1493
1494 /* If language_pass_by_reference () returned true we will have been
1495 given an additional initial argument, a hidden pointer to the
1496 return slot in memory. */
1497 return_type = TYPE_TARGET_TYPE (func_type);
1498 lang_struct_return = language_pass_by_reference (return_type);
1499
1500 /* Set the return address. For the AArch64, the return breakpoint
1501 is always at BP_ADDR. */
1502 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1503
1504 /* If we were given an initial argument for the return slot because
1505 lang_struct_return was true, lose it. */
1506 if (lang_struct_return)
1507 {
1508 args++;
1509 nargs--;
1510 }
1511
1512 /* The struct_return pointer occupies X8. */
1513 if (struct_return || lang_struct_return)
1514 {
1515 if (aarch64_debug)
1516 {
1517 debug_printf ("struct return in %s = 0x%s\n",
1518 gdbarch_register_name (gdbarch,
1519 AARCH64_STRUCT_RETURN_REGNUM),
1520 paddress (gdbarch, struct_addr));
1521 }
1522 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1523 struct_addr);
1524 }
1525
1526 for (argnum = 0; argnum < nargs; argnum++)
1527 {
1528 struct value *arg = args[argnum];
1529 struct type *arg_type;
1530 int len;
1531
1532 arg_type = check_typedef (value_type (arg));
1533 len = TYPE_LENGTH (arg_type);
1534
1535 switch (TYPE_CODE (arg_type))
1536 {
1537 case TYPE_CODE_INT:
1538 case TYPE_CODE_BOOL:
1539 case TYPE_CODE_CHAR:
1540 case TYPE_CODE_RANGE:
1541 case TYPE_CODE_ENUM:
1542 if (len < 4)
1543 {
1544 /* Promote to 32 bit integer. */
1545 if (TYPE_UNSIGNED (arg_type))
1546 arg_type = builtin_type (gdbarch)->builtin_uint32;
1547 else
1548 arg_type = builtin_type (gdbarch)->builtin_int32;
1549 arg = value_cast (arg_type, arg);
1550 }
1551 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1552 value_contents (arg));
1553 break;
1554
1555 case TYPE_CODE_COMPLEX:
1556 if (info.nsrn <= 6)
1557 {
1558 const bfd_byte *buf = value_contents (arg);
1559 struct type *target_type =
1560 check_typedef (TYPE_TARGET_TYPE (arg_type));
1561
1562 pass_in_v (gdbarch, regcache, &info, buf);
1563 pass_in_v (gdbarch, regcache, &info,
1564 buf + TYPE_LENGTH (target_type));
1565 }
1566 else
1567 {
1568 info.nsrn = 8;
1569 pass_on_stack (&info, arg_type, value_contents (arg));
1570 }
1571 break;
1572 case TYPE_CODE_FLT:
1573 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type,
1574 value_contents (arg));
1575 break;
1576
1577 case TYPE_CODE_STRUCT:
1578 case TYPE_CODE_ARRAY:
1579 case TYPE_CODE_UNION:
1580 if (is_hfa (arg_type))
1581 {
1582 int elements = TYPE_NFIELDS (arg_type);
1583
1584 /* Homogeneous Aggregates */
1585 if (info.nsrn + elements < 8)
1586 {
1587 int i;
1588
1589 for (i = 0; i < elements; i++)
1590 {
1591 /* We know that we have sufficient registers
1592 available therefore this will never fallback
1593 to the stack. */
1594 struct value *field =
1595 value_primitive_field (arg, 0, i, arg_type);
1596 struct type *field_type =
1597 check_typedef (value_type (field));
1598
1599 pass_in_v_or_stack (gdbarch, regcache, &info, field_type,
1600 value_contents_writeable (field));
1601 }
1602 }
1603 else
1604 {
1605 info.nsrn = 8;
1606 pass_on_stack (&info, arg_type, value_contents (arg));
1607 }
1608 }
1609 else if (len > 16)
1610 {
1611 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1612 invisible reference. */
1613
1614 /* Allocate aligned storage. */
1615 sp = align_down (sp - len, 16);
1616
1617 /* Write the real data into the stack. */
1618 write_memory (sp, value_contents (arg), len);
1619
1620 /* Construct the indirection. */
1621 arg_type = lookup_pointer_type (arg_type);
1622 arg = value_from_pointer (arg_type, sp);
1623 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1624 value_contents (arg));
1625 }
1626 else
1627 /* PCS C.15 / C.18 multiple values pass. */
1628 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1629 value_contents (arg));
1630 break;
1631
1632 default:
1633 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1634 value_contents (arg));
1635 break;
1636 }
1637 }
1638
1639 /* Make sure stack retains 16 byte alignment. */
1640 if (info.nsaa & 15)
1641 sp -= 16 - (info.nsaa & 15);
1642
1643 while (!VEC_empty (stack_item_t, info.si))
1644 {
1645 stack_item_t *si = VEC_last (stack_item_t, info.si);
1646
1647 sp -= si->len;
1648 write_memory (sp, si->data, si->len);
1649 VEC_pop (stack_item_t, info.si);
1650 }
1651
1652 VEC_free (stack_item_t, info.si);
1653
1654 /* Finally, update the SP register. */
1655 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1656
1657 return sp;
1658 }
1659
1660 /* Implement the "frame_align" gdbarch method. */
1661
1662 static CORE_ADDR
1663 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1664 {
1665 /* Align the stack to sixteen bytes. */
1666 return sp & ~(CORE_ADDR) 15;
1667 }
1668
1669 /* Return the type for an AdvSISD Q register. */
1670
1671 static struct type *
1672 aarch64_vnq_type (struct gdbarch *gdbarch)
1673 {
1674 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1675
1676 if (tdep->vnq_type == NULL)
1677 {
1678 struct type *t;
1679 struct type *elem;
1680
1681 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1682 TYPE_CODE_UNION);
1683
1684 elem = builtin_type (gdbarch)->builtin_uint128;
1685 append_composite_type_field (t, "u", elem);
1686
1687 elem = builtin_type (gdbarch)->builtin_int128;
1688 append_composite_type_field (t, "s", elem);
1689
1690 tdep->vnq_type = t;
1691 }
1692
1693 return tdep->vnq_type;
1694 }
1695
1696 /* Return the type for an AdvSISD D register. */
1697
1698 static struct type *
1699 aarch64_vnd_type (struct gdbarch *gdbarch)
1700 {
1701 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1702
1703 if (tdep->vnd_type == NULL)
1704 {
1705 struct type *t;
1706 struct type *elem;
1707
1708 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1709 TYPE_CODE_UNION);
1710
1711 elem = builtin_type (gdbarch)->builtin_double;
1712 append_composite_type_field (t, "f", elem);
1713
1714 elem = builtin_type (gdbarch)->builtin_uint64;
1715 append_composite_type_field (t, "u", elem);
1716
1717 elem = builtin_type (gdbarch)->builtin_int64;
1718 append_composite_type_field (t, "s", elem);
1719
1720 tdep->vnd_type = t;
1721 }
1722
1723 return tdep->vnd_type;
1724 }
1725
1726 /* Return the type for an AdvSISD S register. */
1727
1728 static struct type *
1729 aarch64_vns_type (struct gdbarch *gdbarch)
1730 {
1731 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1732
1733 if (tdep->vns_type == NULL)
1734 {
1735 struct type *t;
1736 struct type *elem;
1737
1738 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1739 TYPE_CODE_UNION);
1740
1741 elem = builtin_type (gdbarch)->builtin_float;
1742 append_composite_type_field (t, "f", elem);
1743
1744 elem = builtin_type (gdbarch)->builtin_uint32;
1745 append_composite_type_field (t, "u", elem);
1746
1747 elem = builtin_type (gdbarch)->builtin_int32;
1748 append_composite_type_field (t, "s", elem);
1749
1750 tdep->vns_type = t;
1751 }
1752
1753 return tdep->vns_type;
1754 }
1755
1756 /* Return the type for an AdvSISD H register. */
1757
1758 static struct type *
1759 aarch64_vnh_type (struct gdbarch *gdbarch)
1760 {
1761 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1762
1763 if (tdep->vnh_type == NULL)
1764 {
1765 struct type *t;
1766 struct type *elem;
1767
1768 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1769 TYPE_CODE_UNION);
1770
1771 elem = builtin_type (gdbarch)->builtin_uint16;
1772 append_composite_type_field (t, "u", elem);
1773
1774 elem = builtin_type (gdbarch)->builtin_int16;
1775 append_composite_type_field (t, "s", elem);
1776
1777 tdep->vnh_type = t;
1778 }
1779
1780 return tdep->vnh_type;
1781 }
1782
1783 /* Return the type for an AdvSISD B register. */
1784
1785 static struct type *
1786 aarch64_vnb_type (struct gdbarch *gdbarch)
1787 {
1788 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1789
1790 if (tdep->vnb_type == NULL)
1791 {
1792 struct type *t;
1793 struct type *elem;
1794
1795 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1796 TYPE_CODE_UNION);
1797
1798 elem = builtin_type (gdbarch)->builtin_uint8;
1799 append_composite_type_field (t, "u", elem);
1800
1801 elem = builtin_type (gdbarch)->builtin_int8;
1802 append_composite_type_field (t, "s", elem);
1803
1804 tdep->vnb_type = t;
1805 }
1806
1807 return tdep->vnb_type;
1808 }
1809
1810 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1811
1812 static int
1813 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1814 {
1815 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
1816 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
1817
1818 if (reg == AARCH64_DWARF_SP)
1819 return AARCH64_SP_REGNUM;
1820
1821 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
1822 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
1823
1824 return -1;
1825 }
1826 \f
1827
1828 /* Implement the "print_insn" gdbarch method. */
1829
1830 static int
1831 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
1832 {
1833 info->symbols = NULL;
1834 return print_insn_aarch64 (memaddr, info);
1835 }
1836
1837 /* AArch64 BRK software debug mode instruction.
1838 Note that AArch64 code is always little-endian.
1839 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
1840 static const gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
1841
1842 /* Implement the "breakpoint_from_pc" gdbarch method. */
1843
1844 static const gdb_byte *
1845 aarch64_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr,
1846 int *lenptr)
1847 {
1848 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1849
1850 *lenptr = sizeof (aarch64_default_breakpoint);
1851 return aarch64_default_breakpoint;
1852 }
1853
1854 /* Extract from an array REGS containing the (raw) register state a
1855 function return value of type TYPE, and copy that, in virtual
1856 format, into VALBUF. */
1857
1858 static void
1859 aarch64_extract_return_value (struct type *type, struct regcache *regs,
1860 gdb_byte *valbuf)
1861 {
1862 struct gdbarch *gdbarch = get_regcache_arch (regs);
1863 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1864
1865 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1866 {
1867 bfd_byte buf[V_REGISTER_SIZE];
1868 int len = TYPE_LENGTH (type);
1869
1870 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
1871 memcpy (valbuf, buf, len);
1872 }
1873 else if (TYPE_CODE (type) == TYPE_CODE_INT
1874 || TYPE_CODE (type) == TYPE_CODE_CHAR
1875 || TYPE_CODE (type) == TYPE_CODE_BOOL
1876 || TYPE_CODE (type) == TYPE_CODE_PTR
1877 || TYPE_CODE (type) == TYPE_CODE_REF
1878 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1879 {
1880 /* If the the type is a plain integer, then the access is
1881 straight-forward. Otherwise we have to play around a bit
1882 more. */
1883 int len = TYPE_LENGTH (type);
1884 int regno = AARCH64_X0_REGNUM;
1885 ULONGEST tmp;
1886
1887 while (len > 0)
1888 {
1889 /* By using store_unsigned_integer we avoid having to do
1890 anything special for small big-endian values. */
1891 regcache_cooked_read_unsigned (regs, regno++, &tmp);
1892 store_unsigned_integer (valbuf,
1893 (len > X_REGISTER_SIZE
1894 ? X_REGISTER_SIZE : len), byte_order, tmp);
1895 len -= X_REGISTER_SIZE;
1896 valbuf += X_REGISTER_SIZE;
1897 }
1898 }
1899 else if (TYPE_CODE (type) == TYPE_CODE_COMPLEX)
1900 {
1901 int regno = AARCH64_V0_REGNUM;
1902 bfd_byte buf[V_REGISTER_SIZE];
1903 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1904 int len = TYPE_LENGTH (target_type);
1905
1906 regcache_cooked_read (regs, regno, buf);
1907 memcpy (valbuf, buf, len);
1908 valbuf += len;
1909 regcache_cooked_read (regs, regno + 1, buf);
1910 memcpy (valbuf, buf, len);
1911 valbuf += len;
1912 }
1913 else if (is_hfa (type))
1914 {
1915 int elements = TYPE_NFIELDS (type);
1916 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1917 int len = TYPE_LENGTH (member_type);
1918 int i;
1919
1920 for (i = 0; i < elements; i++)
1921 {
1922 int regno = AARCH64_V0_REGNUM + i;
1923 bfd_byte buf[X_REGISTER_SIZE];
1924
1925 if (aarch64_debug)
1926 {
1927 debug_printf ("read HFA return value element %d from %s\n",
1928 i + 1,
1929 gdbarch_register_name (gdbarch, regno));
1930 }
1931 regcache_cooked_read (regs, regno, buf);
1932
1933 memcpy (valbuf, buf, len);
1934 valbuf += len;
1935 }
1936 }
1937 else
1938 {
1939 /* For a structure or union the behaviour is as if the value had
1940 been stored to word-aligned memory and then loaded into
1941 registers with 64-bit load instruction(s). */
1942 int len = TYPE_LENGTH (type);
1943 int regno = AARCH64_X0_REGNUM;
1944 bfd_byte buf[X_REGISTER_SIZE];
1945
1946 while (len > 0)
1947 {
1948 regcache_cooked_read (regs, regno++, buf);
1949 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1950 len -= X_REGISTER_SIZE;
1951 valbuf += X_REGISTER_SIZE;
1952 }
1953 }
1954 }
1955
1956
1957 /* Will a function return an aggregate type in memory or in a
1958 register? Return 0 if an aggregate type can be returned in a
1959 register, 1 if it must be returned in memory. */
1960
1961 static int
1962 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
1963 {
1964 int nRc;
1965 enum type_code code;
1966
1967 type = check_typedef (type);
1968
1969 /* In the AArch64 ABI, "integer" like aggregate types are returned
1970 in registers. For an aggregate type to be integer like, its size
1971 must be less than or equal to 4 * X_REGISTER_SIZE. */
1972
1973 if (is_hfa (type))
1974 {
1975 /* PCS B.5 If the argument is a Named HFA, then the argument is
1976 used unmodified. */
1977 return 0;
1978 }
1979
1980 if (TYPE_LENGTH (type) > 16)
1981 {
1982 /* PCS B.6 Aggregates larger than 16 bytes are passed by
1983 invisible reference. */
1984
1985 return 1;
1986 }
1987
1988 return 0;
1989 }
1990
1991 /* Write into appropriate registers a function return value of type
1992 TYPE, given in virtual format. */
1993
1994 static void
1995 aarch64_store_return_value (struct type *type, struct regcache *regs,
1996 const gdb_byte *valbuf)
1997 {
1998 struct gdbarch *gdbarch = get_regcache_arch (regs);
1999 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2000
2001 if (TYPE_CODE (type) == TYPE_CODE_FLT)
2002 {
2003 bfd_byte buf[V_REGISTER_SIZE];
2004 int len = TYPE_LENGTH (type);
2005
2006 memcpy (buf, valbuf, len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2007 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
2008 }
2009 else if (TYPE_CODE (type) == TYPE_CODE_INT
2010 || TYPE_CODE (type) == TYPE_CODE_CHAR
2011 || TYPE_CODE (type) == TYPE_CODE_BOOL
2012 || TYPE_CODE (type) == TYPE_CODE_PTR
2013 || TYPE_CODE (type) == TYPE_CODE_REF
2014 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2015 {
2016 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
2017 {
2018 /* Values of one word or less are zero/sign-extended and
2019 returned in r0. */
2020 bfd_byte tmpbuf[X_REGISTER_SIZE];
2021 LONGEST val = unpack_long (type, valbuf);
2022
2023 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
2024 regcache_cooked_write (regs, AARCH64_X0_REGNUM, tmpbuf);
2025 }
2026 else
2027 {
2028 /* Integral values greater than one word are stored in
2029 consecutive registers starting with r0. This will always
2030 be a multiple of the regiser size. */
2031 int len = TYPE_LENGTH (type);
2032 int regno = AARCH64_X0_REGNUM;
2033
2034 while (len > 0)
2035 {
2036 regcache_cooked_write (regs, regno++, valbuf);
2037 len -= X_REGISTER_SIZE;
2038 valbuf += X_REGISTER_SIZE;
2039 }
2040 }
2041 }
2042 else if (is_hfa (type))
2043 {
2044 int elements = TYPE_NFIELDS (type);
2045 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
2046 int len = TYPE_LENGTH (member_type);
2047 int i;
2048
2049 for (i = 0; i < elements; i++)
2050 {
2051 int regno = AARCH64_V0_REGNUM + i;
2052 bfd_byte tmpbuf[MAX_REGISTER_SIZE];
2053
2054 if (aarch64_debug)
2055 {
2056 debug_printf ("write HFA return value element %d to %s\n",
2057 i + 1,
2058 gdbarch_register_name (gdbarch, regno));
2059 }
2060
2061 memcpy (tmpbuf, valbuf, len);
2062 regcache_cooked_write (regs, regno, tmpbuf);
2063 valbuf += len;
2064 }
2065 }
2066 else
2067 {
2068 /* For a structure or union the behaviour is as if the value had
2069 been stored to word-aligned memory and then loaded into
2070 registers with 64-bit load instruction(s). */
2071 int len = TYPE_LENGTH (type);
2072 int regno = AARCH64_X0_REGNUM;
2073 bfd_byte tmpbuf[X_REGISTER_SIZE];
2074
2075 while (len > 0)
2076 {
2077 memcpy (tmpbuf, valbuf,
2078 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2079 regcache_cooked_write (regs, regno++, tmpbuf);
2080 len -= X_REGISTER_SIZE;
2081 valbuf += X_REGISTER_SIZE;
2082 }
2083 }
2084 }
2085
2086 /* Implement the "return_value" gdbarch method. */
2087
2088 static enum return_value_convention
2089 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2090 struct type *valtype, struct regcache *regcache,
2091 gdb_byte *readbuf, const gdb_byte *writebuf)
2092 {
2093 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2094
2095 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
2096 || TYPE_CODE (valtype) == TYPE_CODE_UNION
2097 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
2098 {
2099 if (aarch64_return_in_memory (gdbarch, valtype))
2100 {
2101 if (aarch64_debug)
2102 debug_printf ("return value in memory\n");
2103 return RETURN_VALUE_STRUCT_CONVENTION;
2104 }
2105 }
2106
2107 if (writebuf)
2108 aarch64_store_return_value (valtype, regcache, writebuf);
2109
2110 if (readbuf)
2111 aarch64_extract_return_value (valtype, regcache, readbuf);
2112
2113 if (aarch64_debug)
2114 debug_printf ("return value in registers\n");
2115
2116 return RETURN_VALUE_REGISTER_CONVENTION;
2117 }
2118
2119 /* Implement the "get_longjmp_target" gdbarch method. */
2120
2121 static int
2122 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2123 {
2124 CORE_ADDR jb_addr;
2125 gdb_byte buf[X_REGISTER_SIZE];
2126 struct gdbarch *gdbarch = get_frame_arch (frame);
2127 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2128 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2129
2130 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2131
2132 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2133 X_REGISTER_SIZE))
2134 return 0;
2135
2136 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2137 return 1;
2138 }
2139
2140 /* Implement the "gen_return_address" gdbarch method. */
2141
2142 static void
2143 aarch64_gen_return_address (struct gdbarch *gdbarch,
2144 struct agent_expr *ax, struct axs_value *value,
2145 CORE_ADDR scope)
2146 {
2147 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2148 value->kind = axs_lvalue_register;
2149 value->u.reg = AARCH64_LR_REGNUM;
2150 }
2151 \f
2152
2153 /* Return the pseudo register name corresponding to register regnum. */
2154
2155 static const char *
2156 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2157 {
2158 static const char *const q_name[] =
2159 {
2160 "q0", "q1", "q2", "q3",
2161 "q4", "q5", "q6", "q7",
2162 "q8", "q9", "q10", "q11",
2163 "q12", "q13", "q14", "q15",
2164 "q16", "q17", "q18", "q19",
2165 "q20", "q21", "q22", "q23",
2166 "q24", "q25", "q26", "q27",
2167 "q28", "q29", "q30", "q31",
2168 };
2169
2170 static const char *const d_name[] =
2171 {
2172 "d0", "d1", "d2", "d3",
2173 "d4", "d5", "d6", "d7",
2174 "d8", "d9", "d10", "d11",
2175 "d12", "d13", "d14", "d15",
2176 "d16", "d17", "d18", "d19",
2177 "d20", "d21", "d22", "d23",
2178 "d24", "d25", "d26", "d27",
2179 "d28", "d29", "d30", "d31",
2180 };
2181
2182 static const char *const s_name[] =
2183 {
2184 "s0", "s1", "s2", "s3",
2185 "s4", "s5", "s6", "s7",
2186 "s8", "s9", "s10", "s11",
2187 "s12", "s13", "s14", "s15",
2188 "s16", "s17", "s18", "s19",
2189 "s20", "s21", "s22", "s23",
2190 "s24", "s25", "s26", "s27",
2191 "s28", "s29", "s30", "s31",
2192 };
2193
2194 static const char *const h_name[] =
2195 {
2196 "h0", "h1", "h2", "h3",
2197 "h4", "h5", "h6", "h7",
2198 "h8", "h9", "h10", "h11",
2199 "h12", "h13", "h14", "h15",
2200 "h16", "h17", "h18", "h19",
2201 "h20", "h21", "h22", "h23",
2202 "h24", "h25", "h26", "h27",
2203 "h28", "h29", "h30", "h31",
2204 };
2205
2206 static const char *const b_name[] =
2207 {
2208 "b0", "b1", "b2", "b3",
2209 "b4", "b5", "b6", "b7",
2210 "b8", "b9", "b10", "b11",
2211 "b12", "b13", "b14", "b15",
2212 "b16", "b17", "b18", "b19",
2213 "b20", "b21", "b22", "b23",
2214 "b24", "b25", "b26", "b27",
2215 "b28", "b29", "b30", "b31",
2216 };
2217
2218 regnum -= gdbarch_num_regs (gdbarch);
2219
2220 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2221 return q_name[regnum - AARCH64_Q0_REGNUM];
2222
2223 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2224 return d_name[regnum - AARCH64_D0_REGNUM];
2225
2226 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2227 return s_name[regnum - AARCH64_S0_REGNUM];
2228
2229 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2230 return h_name[regnum - AARCH64_H0_REGNUM];
2231
2232 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2233 return b_name[regnum - AARCH64_B0_REGNUM];
2234
2235 internal_error (__FILE__, __LINE__,
2236 _("aarch64_pseudo_register_name: bad register number %d"),
2237 regnum);
2238 }
2239
2240 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2241
2242 static struct type *
2243 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2244 {
2245 regnum -= gdbarch_num_regs (gdbarch);
2246
2247 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2248 return aarch64_vnq_type (gdbarch);
2249
2250 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2251 return aarch64_vnd_type (gdbarch);
2252
2253 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2254 return aarch64_vns_type (gdbarch);
2255
2256 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2257 return aarch64_vnh_type (gdbarch);
2258
2259 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2260 return aarch64_vnb_type (gdbarch);
2261
2262 internal_error (__FILE__, __LINE__,
2263 _("aarch64_pseudo_register_type: bad register number %d"),
2264 regnum);
2265 }
2266
2267 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2268
2269 static int
2270 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2271 struct reggroup *group)
2272 {
2273 regnum -= gdbarch_num_regs (gdbarch);
2274
2275 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2276 return group == all_reggroup || group == vector_reggroup;
2277 else if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2278 return (group == all_reggroup || group == vector_reggroup
2279 || group == float_reggroup);
2280 else if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2281 return (group == all_reggroup || group == vector_reggroup
2282 || group == float_reggroup);
2283 else if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2284 return group == all_reggroup || group == vector_reggroup;
2285 else if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2286 return group == all_reggroup || group == vector_reggroup;
2287
2288 return group == all_reggroup;
2289 }
2290
2291 /* Implement the "pseudo_register_read_value" gdbarch method. */
2292
2293 static struct value *
2294 aarch64_pseudo_read_value (struct gdbarch *gdbarch,
2295 struct regcache *regcache,
2296 int regnum)
2297 {
2298 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2299 struct value *result_value;
2300 gdb_byte *buf;
2301
2302 result_value = allocate_value (register_type (gdbarch, regnum));
2303 VALUE_LVAL (result_value) = lval_register;
2304 VALUE_REGNUM (result_value) = regnum;
2305 buf = value_contents_raw (result_value);
2306
2307 regnum -= gdbarch_num_regs (gdbarch);
2308
2309 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2310 {
2311 enum register_status status;
2312 unsigned v_regnum;
2313
2314 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2315 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2316 if (status != REG_VALID)
2317 mark_value_bytes_unavailable (result_value, 0,
2318 TYPE_LENGTH (value_type (result_value)));
2319 else
2320 memcpy (buf, reg_buf, Q_REGISTER_SIZE);
2321 return result_value;
2322 }
2323
2324 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2325 {
2326 enum register_status status;
2327 unsigned v_regnum;
2328
2329 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2330 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2331 if (status != REG_VALID)
2332 mark_value_bytes_unavailable (result_value, 0,
2333 TYPE_LENGTH (value_type (result_value)));
2334 else
2335 memcpy (buf, reg_buf, D_REGISTER_SIZE);
2336 return result_value;
2337 }
2338
2339 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2340 {
2341 enum register_status status;
2342 unsigned v_regnum;
2343
2344 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2345 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2346 if (status != REG_VALID)
2347 mark_value_bytes_unavailable (result_value, 0,
2348 TYPE_LENGTH (value_type (result_value)));
2349 else
2350 memcpy (buf, reg_buf, S_REGISTER_SIZE);
2351 return result_value;
2352 }
2353
2354 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2355 {
2356 enum register_status status;
2357 unsigned v_regnum;
2358
2359 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2360 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2361 if (status != REG_VALID)
2362 mark_value_bytes_unavailable (result_value, 0,
2363 TYPE_LENGTH (value_type (result_value)));
2364 else
2365 memcpy (buf, reg_buf, H_REGISTER_SIZE);
2366 return result_value;
2367 }
2368
2369 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2370 {
2371 enum register_status status;
2372 unsigned v_regnum;
2373
2374 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2375 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2376 if (status != REG_VALID)
2377 mark_value_bytes_unavailable (result_value, 0,
2378 TYPE_LENGTH (value_type (result_value)));
2379 else
2380 memcpy (buf, reg_buf, B_REGISTER_SIZE);
2381 return result_value;
2382 }
2383
2384 gdb_assert_not_reached ("regnum out of bound");
2385 }
2386
2387 /* Implement the "pseudo_register_write" gdbarch method. */
2388
2389 static void
2390 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2391 int regnum, const gdb_byte *buf)
2392 {
2393 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2394
2395 /* Ensure the register buffer is zero, we want gdb writes of the
2396 various 'scalar' pseudo registers to behavior like architectural
2397 writes, register width bytes are written the remainder are set to
2398 zero. */
2399 memset (reg_buf, 0, sizeof (reg_buf));
2400
2401 regnum -= gdbarch_num_regs (gdbarch);
2402
2403 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2404 {
2405 /* pseudo Q registers */
2406 unsigned v_regnum;
2407
2408 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2409 memcpy (reg_buf, buf, Q_REGISTER_SIZE);
2410 regcache_raw_write (regcache, v_regnum, reg_buf);
2411 return;
2412 }
2413
2414 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2415 {
2416 /* pseudo D registers */
2417 unsigned v_regnum;
2418
2419 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2420 memcpy (reg_buf, buf, D_REGISTER_SIZE);
2421 regcache_raw_write (regcache, v_regnum, reg_buf);
2422 return;
2423 }
2424
2425 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2426 {
2427 unsigned v_regnum;
2428
2429 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2430 memcpy (reg_buf, buf, S_REGISTER_SIZE);
2431 regcache_raw_write (regcache, v_regnum, reg_buf);
2432 return;
2433 }
2434
2435 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2436 {
2437 /* pseudo H registers */
2438 unsigned v_regnum;
2439
2440 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2441 memcpy (reg_buf, buf, H_REGISTER_SIZE);
2442 regcache_raw_write (regcache, v_regnum, reg_buf);
2443 return;
2444 }
2445
2446 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2447 {
2448 /* pseudo B registers */
2449 unsigned v_regnum;
2450
2451 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2452 memcpy (reg_buf, buf, B_REGISTER_SIZE);
2453 regcache_raw_write (regcache, v_regnum, reg_buf);
2454 return;
2455 }
2456
2457 gdb_assert_not_reached ("regnum out of bound");
2458 }
2459
2460 /* Callback function for user_reg_add. */
2461
2462 static struct value *
2463 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2464 {
2465 const int *reg_p = baton;
2466
2467 return value_of_register (*reg_p, frame);
2468 }
2469 \f
2470
2471 /* Implement the "software_single_step" gdbarch method, needed to
2472 single step through atomic sequences on AArch64. */
2473
2474 static int
2475 aarch64_software_single_step (struct frame_info *frame)
2476 {
2477 struct gdbarch *gdbarch = get_frame_arch (frame);
2478 struct address_space *aspace = get_frame_address_space (frame);
2479 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2480 const int insn_size = 4;
2481 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2482 CORE_ADDR pc = get_frame_pc (frame);
2483 CORE_ADDR breaks[2] = { -1, -1 };
2484 CORE_ADDR loc = pc;
2485 CORE_ADDR closing_insn = 0;
2486 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2487 byte_order_for_code);
2488 int index;
2489 int insn_count;
2490 int bc_insn_count = 0; /* Conditional branch instruction count. */
2491 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2492
2493 /* Look for a Load Exclusive instruction which begins the sequence. */
2494 if (!decode_masked_match (insn, 0x3fc00000, 0x08400000))
2495 return 0;
2496
2497 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2498 {
2499 int32_t offset;
2500 unsigned cond;
2501
2502 loc += insn_size;
2503 insn = read_memory_unsigned_integer (loc, insn_size,
2504 byte_order_for_code);
2505
2506 /* Check if the instruction is a conditional branch. */
2507 if (aarch64_decode_bcond (loc, insn, &cond, &offset))
2508 {
2509 if (bc_insn_count >= 1)
2510 return 0;
2511
2512 /* It is, so we'll try to set a breakpoint at the destination. */
2513 breaks[1] = loc + offset;
2514
2515 bc_insn_count++;
2516 last_breakpoint++;
2517 }
2518
2519 /* Look for the Store Exclusive which closes the atomic sequence. */
2520 if (decode_masked_match (insn, 0x3fc00000, 0x08000000))
2521 {
2522 closing_insn = loc;
2523 break;
2524 }
2525 }
2526
2527 /* We didn't find a closing Store Exclusive instruction, fall back. */
2528 if (!closing_insn)
2529 return 0;
2530
2531 /* Insert breakpoint after the end of the atomic sequence. */
2532 breaks[0] = loc + insn_size;
2533
2534 /* Check for duplicated breakpoints, and also check that the second
2535 breakpoint is not within the atomic sequence. */
2536 if (last_breakpoint
2537 && (breaks[1] == breaks[0]
2538 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2539 last_breakpoint = 0;
2540
2541 /* Insert the breakpoint at the end of the sequence, and one at the
2542 destination of the conditional branch, if it exists. */
2543 for (index = 0; index <= last_breakpoint; index++)
2544 insert_single_step_breakpoint (gdbarch, aspace, breaks[index]);
2545
2546 return 1;
2547 }
2548
2549 /* Initialize the current architecture based on INFO. If possible,
2550 re-use an architecture from ARCHES, which is a list of
2551 architectures already created during this debugging session.
2552
2553 Called e.g. at program startup, when reading a core file, and when
2554 reading a binary file. */
2555
2556 static struct gdbarch *
2557 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2558 {
2559 struct gdbarch_tdep *tdep;
2560 struct gdbarch *gdbarch;
2561 struct gdbarch_list *best_arch;
2562 struct tdesc_arch_data *tdesc_data = NULL;
2563 const struct target_desc *tdesc = info.target_desc;
2564 int i;
2565 int have_fpa_registers = 1;
2566 int valid_p = 1;
2567 const struct tdesc_feature *feature;
2568 int num_regs = 0;
2569 int num_pseudo_regs = 0;
2570
2571 /* Ensure we always have a target descriptor. */
2572 if (!tdesc_has_registers (tdesc))
2573 tdesc = tdesc_aarch64;
2574
2575 gdb_assert (tdesc);
2576
2577 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
2578
2579 if (feature == NULL)
2580 return NULL;
2581
2582 tdesc_data = tdesc_data_alloc ();
2583
2584 /* Validate the descriptor provides the mandatory core R registers
2585 and allocate their numbers. */
2586 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
2587 valid_p &=
2588 tdesc_numbered_register (feature, tdesc_data, AARCH64_X0_REGNUM + i,
2589 aarch64_r_register_names[i]);
2590
2591 num_regs = AARCH64_X0_REGNUM + i;
2592
2593 /* Look for the V registers. */
2594 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
2595 if (feature)
2596 {
2597 /* Validate the descriptor provides the mandatory V registers
2598 and allocate their numbers. */
2599 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
2600 valid_p &=
2601 tdesc_numbered_register (feature, tdesc_data, AARCH64_V0_REGNUM + i,
2602 aarch64_v_register_names[i]);
2603
2604 num_regs = AARCH64_V0_REGNUM + i;
2605
2606 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
2607 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
2608 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
2609 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
2610 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
2611 }
2612
2613 if (!valid_p)
2614 {
2615 tdesc_data_cleanup (tdesc_data);
2616 return NULL;
2617 }
2618
2619 /* AArch64 code is always little-endian. */
2620 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
2621
2622 /* If there is already a candidate, use it. */
2623 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
2624 best_arch != NULL;
2625 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
2626 {
2627 /* Found a match. */
2628 break;
2629 }
2630
2631 if (best_arch != NULL)
2632 {
2633 if (tdesc_data != NULL)
2634 tdesc_data_cleanup (tdesc_data);
2635 return best_arch->gdbarch;
2636 }
2637
2638 tdep = XCNEW (struct gdbarch_tdep);
2639 gdbarch = gdbarch_alloc (&info, tdep);
2640
2641 /* This should be low enough for everything. */
2642 tdep->lowest_pc = 0x20;
2643 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
2644 tdep->jb_elt_size = 8;
2645
2646 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
2647 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
2648
2649 /* Frame handling. */
2650 set_gdbarch_dummy_id (gdbarch, aarch64_dummy_id);
2651 set_gdbarch_unwind_pc (gdbarch, aarch64_unwind_pc);
2652 set_gdbarch_unwind_sp (gdbarch, aarch64_unwind_sp);
2653
2654 /* Advance PC across function entry code. */
2655 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
2656
2657 /* The stack grows downward. */
2658 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
2659
2660 /* Breakpoint manipulation. */
2661 set_gdbarch_breakpoint_from_pc (gdbarch, aarch64_breakpoint_from_pc);
2662 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
2663 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
2664
2665 /* Information about registers, etc. */
2666 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
2667 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
2668 set_gdbarch_num_regs (gdbarch, num_regs);
2669
2670 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
2671 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
2672 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
2673 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
2674 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
2675 set_tdesc_pseudo_register_reggroup_p (gdbarch,
2676 aarch64_pseudo_register_reggroup_p);
2677
2678 /* ABI */
2679 set_gdbarch_short_bit (gdbarch, 16);
2680 set_gdbarch_int_bit (gdbarch, 32);
2681 set_gdbarch_float_bit (gdbarch, 32);
2682 set_gdbarch_double_bit (gdbarch, 64);
2683 set_gdbarch_long_double_bit (gdbarch, 128);
2684 set_gdbarch_long_bit (gdbarch, 64);
2685 set_gdbarch_long_long_bit (gdbarch, 64);
2686 set_gdbarch_ptr_bit (gdbarch, 64);
2687 set_gdbarch_char_signed (gdbarch, 0);
2688 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
2689 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
2690 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
2691
2692 /* Internal <-> external register number maps. */
2693 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
2694
2695 /* Returning results. */
2696 set_gdbarch_return_value (gdbarch, aarch64_return_value);
2697
2698 /* Disassembly. */
2699 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
2700
2701 /* Virtual tables. */
2702 set_gdbarch_vbit_in_delta (gdbarch, 1);
2703
2704 /* Hook in the ABI-specific overrides, if they have been registered. */
2705 info.target_desc = tdesc;
2706 info.tdep_info = (void *) tdesc_data;
2707 gdbarch_init_osabi (info, gdbarch);
2708
2709 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
2710
2711 /* Add some default predicates. */
2712 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
2713 dwarf2_append_unwinders (gdbarch);
2714 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
2715
2716 frame_base_set_default (gdbarch, &aarch64_normal_base);
2717
2718 /* Now we have tuned the configuration, set a few final things,
2719 based on what the OS ABI has told us. */
2720
2721 if (tdep->jb_pc >= 0)
2722 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
2723
2724 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
2725
2726 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
2727
2728 /* Add standard register aliases. */
2729 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
2730 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
2731 value_of_aarch64_user_reg,
2732 &aarch64_register_aliases[i].regnum);
2733
2734 return gdbarch;
2735 }
2736
2737 static void
2738 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
2739 {
2740 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2741
2742 if (tdep == NULL)
2743 return;
2744
2745 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
2746 paddress (gdbarch, tdep->lowest_pc));
2747 }
2748
2749 /* Suppress warning from -Wmissing-prototypes. */
2750 extern initialize_file_ftype _initialize_aarch64_tdep;
2751
2752 void
2753 _initialize_aarch64_tdep (void)
2754 {
2755 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
2756 aarch64_dump_tdep);
2757
2758 initialize_tdesc_aarch64 ();
2759
2760 /* Debug this file's internals. */
2761 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
2762 Set AArch64 debugging."), _("\
2763 Show AArch64 debugging."), _("\
2764 When on, AArch64 specific debugging is enabled."),
2765 NULL,
2766 show_aarch64_debug,
2767 &setdebuglist, &showdebuglist);
2768 }
2769
2770 /* AArch64 process record-replay related structures, defines etc. */
2771
2772 #define submask(x) ((1L << ((x) + 1)) - 1)
2773 #define bit(obj,st) (((obj) >> (st)) & 1)
2774 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
2775
2776 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
2777 do \
2778 { \
2779 unsigned int reg_len = LENGTH; \
2780 if (reg_len) \
2781 { \
2782 REGS = XNEWVEC (uint32_t, reg_len); \
2783 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
2784 } \
2785 } \
2786 while (0)
2787
2788 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
2789 do \
2790 { \
2791 unsigned int mem_len = LENGTH; \
2792 if (mem_len) \
2793 { \
2794 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
2795 memcpy(&MEMS->len, &RECORD_BUF[0], \
2796 sizeof(struct aarch64_mem_r) * LENGTH); \
2797 } \
2798 } \
2799 while (0)
2800
2801 /* AArch64 record/replay structures and enumerations. */
2802
2803 struct aarch64_mem_r
2804 {
2805 uint64_t len; /* Record length. */
2806 uint64_t addr; /* Memory address. */
2807 };
2808
2809 enum aarch64_record_result
2810 {
2811 AARCH64_RECORD_SUCCESS,
2812 AARCH64_RECORD_FAILURE,
2813 AARCH64_RECORD_UNSUPPORTED,
2814 AARCH64_RECORD_UNKNOWN
2815 };
2816
2817 typedef struct insn_decode_record_t
2818 {
2819 struct gdbarch *gdbarch;
2820 struct regcache *regcache;
2821 CORE_ADDR this_addr; /* Address of insn to be recorded. */
2822 uint32_t aarch64_insn; /* Insn to be recorded. */
2823 uint32_t mem_rec_count; /* Count of memory records. */
2824 uint32_t reg_rec_count; /* Count of register records. */
2825 uint32_t *aarch64_regs; /* Registers to be recorded. */
2826 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
2827 } insn_decode_record;
2828
2829 /* Record handler for data processing - register instructions. */
2830
2831 static unsigned int
2832 aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
2833 {
2834 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
2835 uint32_t record_buf[4];
2836
2837 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
2838 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
2839 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
2840
2841 if (!bit (aarch64_insn_r->aarch64_insn, 28))
2842 {
2843 uint8_t setflags;
2844
2845 /* Logical (shifted register). */
2846 if (insn_bits24_27 == 0x0a)
2847 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
2848 /* Add/subtract. */
2849 else if (insn_bits24_27 == 0x0b)
2850 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
2851 else
2852 return AARCH64_RECORD_UNKNOWN;
2853
2854 record_buf[0] = reg_rd;
2855 aarch64_insn_r->reg_rec_count = 1;
2856 if (setflags)
2857 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
2858 }
2859 else
2860 {
2861 if (insn_bits24_27 == 0x0b)
2862 {
2863 /* Data-processing (3 source). */
2864 record_buf[0] = reg_rd;
2865 aarch64_insn_r->reg_rec_count = 1;
2866 }
2867 else if (insn_bits24_27 == 0x0a)
2868 {
2869 if (insn_bits21_23 == 0x00)
2870 {
2871 /* Add/subtract (with carry). */
2872 record_buf[0] = reg_rd;
2873 aarch64_insn_r->reg_rec_count = 1;
2874 if (bit (aarch64_insn_r->aarch64_insn, 29))
2875 {
2876 record_buf[1] = AARCH64_CPSR_REGNUM;
2877 aarch64_insn_r->reg_rec_count = 2;
2878 }
2879 }
2880 else if (insn_bits21_23 == 0x02)
2881 {
2882 /* Conditional compare (register) and conditional compare
2883 (immediate) instructions. */
2884 record_buf[0] = AARCH64_CPSR_REGNUM;
2885 aarch64_insn_r->reg_rec_count = 1;
2886 }
2887 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
2888 {
2889 /* CConditional select. */
2890 /* Data-processing (2 source). */
2891 /* Data-processing (1 source). */
2892 record_buf[0] = reg_rd;
2893 aarch64_insn_r->reg_rec_count = 1;
2894 }
2895 else
2896 return AARCH64_RECORD_UNKNOWN;
2897 }
2898 }
2899
2900 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
2901 record_buf);
2902 return AARCH64_RECORD_SUCCESS;
2903 }
2904
2905 /* Record handler for data processing - immediate instructions. */
2906
2907 static unsigned int
2908 aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
2909 {
2910 uint8_t reg_rd, insn_bit28, insn_bit23, insn_bits24_27, setflags;
2911 uint32_t record_buf[4];
2912
2913 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
2914 insn_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
2915 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
2916 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
2917
2918 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
2919 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
2920 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
2921 {
2922 record_buf[0] = reg_rd;
2923 aarch64_insn_r->reg_rec_count = 1;
2924 }
2925 else if (insn_bits24_27 == 0x01)
2926 {
2927 /* Add/Subtract (immediate). */
2928 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
2929 record_buf[0] = reg_rd;
2930 aarch64_insn_r->reg_rec_count = 1;
2931 if (setflags)
2932 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
2933 }
2934 else if (insn_bits24_27 == 0x02 && !insn_bit23)
2935 {
2936 /* Logical (immediate). */
2937 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
2938 record_buf[0] = reg_rd;
2939 aarch64_insn_r->reg_rec_count = 1;
2940 if (setflags)
2941 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
2942 }
2943 else
2944 return AARCH64_RECORD_UNKNOWN;
2945
2946 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
2947 record_buf);
2948 return AARCH64_RECORD_SUCCESS;
2949 }
2950
2951 /* Record handler for branch, exception generation and system instructions. */
2952
2953 static unsigned int
2954 aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
2955 {
2956 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
2957 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
2958 uint32_t record_buf[4];
2959
2960 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
2961 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
2962 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
2963
2964 if (insn_bits28_31 == 0x0d)
2965 {
2966 /* Exception generation instructions. */
2967 if (insn_bits24_27 == 0x04)
2968 {
2969 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
2970 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
2971 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
2972 {
2973 ULONGEST svc_number;
2974
2975 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
2976 &svc_number);
2977 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
2978 svc_number);
2979 }
2980 else
2981 return AARCH64_RECORD_UNSUPPORTED;
2982 }
2983 /* System instructions. */
2984 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
2985 {
2986 uint32_t reg_rt, reg_crn;
2987
2988 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
2989 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
2990
2991 /* Record rt in case of sysl and mrs instructions. */
2992 if (bit (aarch64_insn_r->aarch64_insn, 21))
2993 {
2994 record_buf[0] = reg_rt;
2995 aarch64_insn_r->reg_rec_count = 1;
2996 }
2997 /* Record cpsr for hint and msr(immediate) instructions. */
2998 else if (reg_crn == 0x02 || reg_crn == 0x04)
2999 {
3000 record_buf[0] = AARCH64_CPSR_REGNUM;
3001 aarch64_insn_r->reg_rec_count = 1;
3002 }
3003 }
3004 /* Unconditional branch (register). */
3005 else if((insn_bits24_27 & 0x0e) == 0x06)
3006 {
3007 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3008 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3009 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3010 }
3011 else
3012 return AARCH64_RECORD_UNKNOWN;
3013 }
3014 /* Unconditional branch (immediate). */
3015 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3016 {
3017 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3018 if (bit (aarch64_insn_r->aarch64_insn, 31))
3019 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3020 }
3021 else
3022 /* Compare & branch (immediate), Test & branch (immediate) and
3023 Conditional branch (immediate). */
3024 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3025
3026 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3027 record_buf);
3028 return AARCH64_RECORD_SUCCESS;
3029 }
3030
3031 /* Record handler for advanced SIMD load and store instructions. */
3032
3033 static unsigned int
3034 aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3035 {
3036 CORE_ADDR address;
3037 uint64_t addr_offset = 0;
3038 uint32_t record_buf[24];
3039 uint64_t record_buf_mem[24];
3040 uint32_t reg_rn, reg_rt;
3041 uint32_t reg_index = 0, mem_index = 0;
3042 uint8_t opcode_bits, size_bits;
3043
3044 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3045 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3046 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3047 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3048 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3049
3050 if (record_debug)
3051 debug_printf ("Process record: Advanced SIMD load/store\n");
3052
3053 /* Load/store single structure. */
3054 if (bit (aarch64_insn_r->aarch64_insn, 24))
3055 {
3056 uint8_t sindex, scale, selem, esize, replicate = 0;
3057 scale = opcode_bits >> 2;
3058 selem = ((opcode_bits & 0x02) |
3059 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3060 switch (scale)
3061 {
3062 case 1:
3063 if (size_bits & 0x01)
3064 return AARCH64_RECORD_UNKNOWN;
3065 break;
3066 case 2:
3067 if ((size_bits >> 1) & 0x01)
3068 return AARCH64_RECORD_UNKNOWN;
3069 if (size_bits & 0x01)
3070 {
3071 if (!((opcode_bits >> 1) & 0x01))
3072 scale = 3;
3073 else
3074 return AARCH64_RECORD_UNKNOWN;
3075 }
3076 break;
3077 case 3:
3078 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3079 {
3080 scale = size_bits;
3081 replicate = 1;
3082 break;
3083 }
3084 else
3085 return AARCH64_RECORD_UNKNOWN;
3086 default:
3087 break;
3088 }
3089 esize = 8 << scale;
3090 if (replicate)
3091 for (sindex = 0; sindex < selem; sindex++)
3092 {
3093 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3094 reg_rt = (reg_rt + 1) % 32;
3095 }
3096 else
3097 {
3098 for (sindex = 0; sindex < selem; sindex++)
3099 if (bit (aarch64_insn_r->aarch64_insn, 22))
3100 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3101 else
3102 {
3103 record_buf_mem[mem_index++] = esize / 8;
3104 record_buf_mem[mem_index++] = address + addr_offset;
3105 }
3106 addr_offset = addr_offset + (esize / 8);
3107 reg_rt = (reg_rt + 1) % 32;
3108 }
3109 }
3110 /* Load/store multiple structure. */
3111 else
3112 {
3113 uint8_t selem, esize, rpt, elements;
3114 uint8_t eindex, rindex;
3115
3116 esize = 8 << size_bits;
3117 if (bit (aarch64_insn_r->aarch64_insn, 30))
3118 elements = 128 / esize;
3119 else
3120 elements = 64 / esize;
3121
3122 switch (opcode_bits)
3123 {
3124 /*LD/ST4 (4 Registers). */
3125 case 0:
3126 rpt = 1;
3127 selem = 4;
3128 break;
3129 /*LD/ST1 (4 Registers). */
3130 case 2:
3131 rpt = 4;
3132 selem = 1;
3133 break;
3134 /*LD/ST3 (3 Registers). */
3135 case 4:
3136 rpt = 1;
3137 selem = 3;
3138 break;
3139 /*LD/ST1 (3 Registers). */
3140 case 6:
3141 rpt = 3;
3142 selem = 1;
3143 break;
3144 /*LD/ST1 (1 Register). */
3145 case 7:
3146 rpt = 1;
3147 selem = 1;
3148 break;
3149 /*LD/ST2 (2 Registers). */
3150 case 8:
3151 rpt = 1;
3152 selem = 2;
3153 break;
3154 /*LD/ST1 (2 Registers). */
3155 case 10:
3156 rpt = 2;
3157 selem = 1;
3158 break;
3159 default:
3160 return AARCH64_RECORD_UNSUPPORTED;
3161 break;
3162 }
3163 for (rindex = 0; rindex < rpt; rindex++)
3164 for (eindex = 0; eindex < elements; eindex++)
3165 {
3166 uint8_t reg_tt, sindex;
3167 reg_tt = (reg_rt + rindex) % 32;
3168 for (sindex = 0; sindex < selem; sindex++)
3169 {
3170 if (bit (aarch64_insn_r->aarch64_insn, 22))
3171 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3172 else
3173 {
3174 record_buf_mem[mem_index++] = esize / 8;
3175 record_buf_mem[mem_index++] = address + addr_offset;
3176 }
3177 addr_offset = addr_offset + (esize / 8);
3178 reg_tt = (reg_tt + 1) % 32;
3179 }
3180 }
3181 }
3182
3183 if (bit (aarch64_insn_r->aarch64_insn, 23))
3184 record_buf[reg_index++] = reg_rn;
3185
3186 aarch64_insn_r->reg_rec_count = reg_index;
3187 aarch64_insn_r->mem_rec_count = mem_index / 2;
3188 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3189 record_buf_mem);
3190 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3191 record_buf);
3192 return AARCH64_RECORD_SUCCESS;
3193 }
3194
3195 /* Record handler for load and store instructions. */
3196
3197 static unsigned int
3198 aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3199 {
3200 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3201 uint8_t insn_bit23, insn_bit21;
3202 uint8_t opc, size_bits, ld_flag, vector_flag;
3203 uint32_t reg_rn, reg_rt, reg_rt2;
3204 uint64_t datasize, offset;
3205 uint32_t record_buf[8];
3206 uint64_t record_buf_mem[8];
3207 CORE_ADDR address;
3208
3209 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3210 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3211 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3212 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3213 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3214 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3215 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3216 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3217 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3218 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3219 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3220
3221 /* Load/store exclusive. */
3222 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3223 {
3224 if (record_debug)
3225 debug_printf ("Process record: load/store exclusive\n");
3226
3227 if (ld_flag)
3228 {
3229 record_buf[0] = reg_rt;
3230 aarch64_insn_r->reg_rec_count = 1;
3231 if (insn_bit21)
3232 {
3233 record_buf[1] = reg_rt2;
3234 aarch64_insn_r->reg_rec_count = 2;
3235 }
3236 }
3237 else
3238 {
3239 if (insn_bit21)
3240 datasize = (8 << size_bits) * 2;
3241 else
3242 datasize = (8 << size_bits);
3243 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3244 &address);
3245 record_buf_mem[0] = datasize / 8;
3246 record_buf_mem[1] = address;
3247 aarch64_insn_r->mem_rec_count = 1;
3248 if (!insn_bit23)
3249 {
3250 /* Save register rs. */
3251 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3252 aarch64_insn_r->reg_rec_count = 1;
3253 }
3254 }
3255 }
3256 /* Load register (literal) instructions decoding. */
3257 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3258 {
3259 if (record_debug)
3260 debug_printf ("Process record: load register (literal)\n");
3261 if (vector_flag)
3262 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3263 else
3264 record_buf[0] = reg_rt;
3265 aarch64_insn_r->reg_rec_count = 1;
3266 }
3267 /* All types of load/store pair instructions decoding. */
3268 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3269 {
3270 if (record_debug)
3271 debug_printf ("Process record: load/store pair\n");
3272
3273 if (ld_flag)
3274 {
3275 if (vector_flag)
3276 {
3277 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3278 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3279 }
3280 else
3281 {
3282 record_buf[0] = reg_rt;
3283 record_buf[1] = reg_rt2;
3284 }
3285 aarch64_insn_r->reg_rec_count = 2;
3286 }
3287 else
3288 {
3289 uint16_t imm7_off;
3290 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3291 if (!vector_flag)
3292 size_bits = size_bits >> 1;
3293 datasize = 8 << (2 + size_bits);
3294 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3295 offset = offset << (2 + size_bits);
3296 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3297 &address);
3298 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3299 {
3300 if (imm7_off & 0x40)
3301 address = address - offset;
3302 else
3303 address = address + offset;
3304 }
3305
3306 record_buf_mem[0] = datasize / 8;
3307 record_buf_mem[1] = address;
3308 record_buf_mem[2] = datasize / 8;
3309 record_buf_mem[3] = address + (datasize / 8);
3310 aarch64_insn_r->mem_rec_count = 2;
3311 }
3312 if (bit (aarch64_insn_r->aarch64_insn, 23))
3313 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3314 }
3315 /* Load/store register (unsigned immediate) instructions. */
3316 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3317 {
3318 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3319 if (!(opc >> 1))
3320 if (opc & 0x01)
3321 ld_flag = 0x01;
3322 else
3323 ld_flag = 0x0;
3324 else
3325 if (size_bits != 0x03)
3326 ld_flag = 0x01;
3327 else
3328 return AARCH64_RECORD_UNKNOWN;
3329
3330 if (record_debug)
3331 {
3332 debug_printf ("Process record: load/store (unsigned immediate):"
3333 " size %x V %d opc %x\n", size_bits, vector_flag,
3334 opc);
3335 }
3336
3337 if (!ld_flag)
3338 {
3339 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
3340 datasize = 8 << size_bits;
3341 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3342 &address);
3343 offset = offset << size_bits;
3344 address = address + offset;
3345
3346 record_buf_mem[0] = datasize >> 3;
3347 record_buf_mem[1] = address;
3348 aarch64_insn_r->mem_rec_count = 1;
3349 }
3350 else
3351 {
3352 if (vector_flag)
3353 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3354 else
3355 record_buf[0] = reg_rt;
3356 aarch64_insn_r->reg_rec_count = 1;
3357 }
3358 }
3359 /* Load/store register (register offset) instructions. */
3360 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3361 && insn_bits10_11 == 0x02 && insn_bit21)
3362 {
3363 if (record_debug)
3364 debug_printf ("Process record: load/store (register offset)\n");
3365 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3366 if (!(opc >> 1))
3367 if (opc & 0x01)
3368 ld_flag = 0x01;
3369 else
3370 ld_flag = 0x0;
3371 else
3372 if (size_bits != 0x03)
3373 ld_flag = 0x01;
3374 else
3375 return AARCH64_RECORD_UNKNOWN;
3376
3377 if (!ld_flag)
3378 {
3379 uint64_t reg_rm_val;
3380 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
3381 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
3382 if (bit (aarch64_insn_r->aarch64_insn, 12))
3383 offset = reg_rm_val << size_bits;
3384 else
3385 offset = reg_rm_val;
3386 datasize = 8 << size_bits;
3387 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3388 &address);
3389 address = address + offset;
3390 record_buf_mem[0] = datasize >> 3;
3391 record_buf_mem[1] = address;
3392 aarch64_insn_r->mem_rec_count = 1;
3393 }
3394 else
3395 {
3396 if (vector_flag)
3397 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3398 else
3399 record_buf[0] = reg_rt;
3400 aarch64_insn_r->reg_rec_count = 1;
3401 }
3402 }
3403 /* Load/store register (immediate and unprivileged) instructions. */
3404 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3405 && !insn_bit21)
3406 {
3407 if (record_debug)
3408 {
3409 debug_printf ("Process record: load/store "
3410 "(immediate and unprivileged)\n");
3411 }
3412 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3413 if (!(opc >> 1))
3414 if (opc & 0x01)
3415 ld_flag = 0x01;
3416 else
3417 ld_flag = 0x0;
3418 else
3419 if (size_bits != 0x03)
3420 ld_flag = 0x01;
3421 else
3422 return AARCH64_RECORD_UNKNOWN;
3423
3424 if (!ld_flag)
3425 {
3426 uint16_t imm9_off;
3427 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
3428 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
3429 datasize = 8 << size_bits;
3430 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3431 &address);
3432 if (insn_bits10_11 != 0x01)
3433 {
3434 if (imm9_off & 0x0100)
3435 address = address - offset;
3436 else
3437 address = address + offset;
3438 }
3439 record_buf_mem[0] = datasize >> 3;
3440 record_buf_mem[1] = address;
3441 aarch64_insn_r->mem_rec_count = 1;
3442 }
3443 else
3444 {
3445 if (vector_flag)
3446 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3447 else
3448 record_buf[0] = reg_rt;
3449 aarch64_insn_r->reg_rec_count = 1;
3450 }
3451 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
3452 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3453 }
3454 /* Advanced SIMD load/store instructions. */
3455 else
3456 return aarch64_record_asimd_load_store (aarch64_insn_r);
3457
3458 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3459 record_buf_mem);
3460 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3461 record_buf);
3462 return AARCH64_RECORD_SUCCESS;
3463 }
3464
3465 /* Record handler for data processing SIMD and floating point instructions. */
3466
3467 static unsigned int
3468 aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
3469 {
3470 uint8_t insn_bit21, opcode, rmode, reg_rd;
3471 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
3472 uint8_t insn_bits11_14;
3473 uint32_t record_buf[2];
3474
3475 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3476 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3477 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3478 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3479 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
3480 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
3481 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
3482 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3483 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3484
3485 if (record_debug)
3486 debug_printf ("Process record: data processing SIMD/FP: ");
3487
3488 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
3489 {
3490 /* Floating point - fixed point conversion instructions. */
3491 if (!insn_bit21)
3492 {
3493 if (record_debug)
3494 debug_printf ("FP - fixed point conversion");
3495
3496 if ((opcode >> 1) == 0x0 && rmode == 0x03)
3497 record_buf[0] = reg_rd;
3498 else
3499 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3500 }
3501 /* Floating point - conditional compare instructions. */
3502 else if (insn_bits10_11 == 0x01)
3503 {
3504 if (record_debug)
3505 debug_printf ("FP - conditional compare");
3506
3507 record_buf[0] = AARCH64_CPSR_REGNUM;
3508 }
3509 /* Floating point - data processing (2-source) and
3510 conditional select instructions. */
3511 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
3512 {
3513 if (record_debug)
3514 debug_printf ("FP - DP (2-source)");
3515
3516 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3517 }
3518 else if (insn_bits10_11 == 0x00)
3519 {
3520 /* Floating point - immediate instructions. */
3521 if ((insn_bits12_15 & 0x01) == 0x01
3522 || (insn_bits12_15 & 0x07) == 0x04)
3523 {
3524 if (record_debug)
3525 debug_printf ("FP - immediate");
3526 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3527 }
3528 /* Floating point - compare instructions. */
3529 else if ((insn_bits12_15 & 0x03) == 0x02)
3530 {
3531 if (record_debug)
3532 debug_printf ("FP - immediate");
3533 record_buf[0] = AARCH64_CPSR_REGNUM;
3534 }
3535 /* Floating point - integer conversions instructions. */
3536 else if (insn_bits12_15 == 0x00)
3537 {
3538 /* Convert float to integer instruction. */
3539 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
3540 {
3541 if (record_debug)
3542 debug_printf ("float to int conversion");
3543
3544 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3545 }
3546 /* Convert integer to float instruction. */
3547 else if ((opcode >> 1) == 0x01 && !rmode)
3548 {
3549 if (record_debug)
3550 debug_printf ("int to float conversion");
3551
3552 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3553 }
3554 /* Move float to integer instruction. */
3555 else if ((opcode >> 1) == 0x03)
3556 {
3557 if (record_debug)
3558 debug_printf ("move float to int");
3559
3560 if (!(opcode & 0x01))
3561 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3562 else
3563 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3564 }
3565 else
3566 return AARCH64_RECORD_UNKNOWN;
3567 }
3568 else
3569 return AARCH64_RECORD_UNKNOWN;
3570 }
3571 else
3572 return AARCH64_RECORD_UNKNOWN;
3573 }
3574 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
3575 {
3576 if (record_debug)
3577 debug_printf ("SIMD copy");
3578
3579 /* Advanced SIMD copy instructions. */
3580 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
3581 && !bit (aarch64_insn_r->aarch64_insn, 15)
3582 && bit (aarch64_insn_r->aarch64_insn, 10))
3583 {
3584 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
3585 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3586 else
3587 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3588 }
3589 else
3590 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3591 }
3592 /* All remaining floating point or advanced SIMD instructions. */
3593 else
3594 {
3595 if (record_debug)
3596 debug_printf ("all remain");
3597
3598 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3599 }
3600
3601 if (record_debug)
3602 debug_printf ("\n");
3603
3604 aarch64_insn_r->reg_rec_count++;
3605 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
3606 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3607 record_buf);
3608 return AARCH64_RECORD_SUCCESS;
3609 }
3610
3611 /* Decodes insns type and invokes its record handler. */
3612
3613 static unsigned int
3614 aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
3615 {
3616 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
3617
3618 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
3619 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
3620 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
3621 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
3622
3623 /* Data processing - immediate instructions. */
3624 if (!ins_bit26 && !ins_bit27 && ins_bit28)
3625 return aarch64_record_data_proc_imm (aarch64_insn_r);
3626
3627 /* Branch, exception generation and system instructions. */
3628 if (ins_bit26 && !ins_bit27 && ins_bit28)
3629 return aarch64_record_branch_except_sys (aarch64_insn_r);
3630
3631 /* Load and store instructions. */
3632 if (!ins_bit25 && ins_bit27)
3633 return aarch64_record_load_store (aarch64_insn_r);
3634
3635 /* Data processing - register instructions. */
3636 if (ins_bit25 && !ins_bit26 && ins_bit27)
3637 return aarch64_record_data_proc_reg (aarch64_insn_r);
3638
3639 /* Data processing - SIMD and floating point instructions. */
3640 if (ins_bit25 && ins_bit26 && ins_bit27)
3641 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
3642
3643 return AARCH64_RECORD_UNSUPPORTED;
3644 }
3645
3646 /* Cleans up local record registers and memory allocations. */
3647
3648 static void
3649 deallocate_reg_mem (insn_decode_record *record)
3650 {
3651 xfree (record->aarch64_regs);
3652 xfree (record->aarch64_mems);
3653 }
3654
3655 /* Parse the current instruction and record the values of the registers and
3656 memory that will be changed in current instruction to record_arch_list
3657 return -1 if something is wrong. */
3658
3659 int
3660 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
3661 CORE_ADDR insn_addr)
3662 {
3663 uint32_t rec_no = 0;
3664 uint8_t insn_size = 4;
3665 uint32_t ret = 0;
3666 ULONGEST t_bit = 0, insn_id = 0;
3667 gdb_byte buf[insn_size];
3668 insn_decode_record aarch64_record;
3669
3670 memset (&buf[0], 0, insn_size);
3671 memset (&aarch64_record, 0, sizeof (insn_decode_record));
3672 target_read_memory (insn_addr, &buf[0], insn_size);
3673 aarch64_record.aarch64_insn
3674 = (uint32_t) extract_unsigned_integer (&buf[0],
3675 insn_size,
3676 gdbarch_byte_order (gdbarch));
3677 aarch64_record.regcache = regcache;
3678 aarch64_record.this_addr = insn_addr;
3679 aarch64_record.gdbarch = gdbarch;
3680
3681 ret = aarch64_record_decode_insn_handler (&aarch64_record);
3682 if (ret == AARCH64_RECORD_UNSUPPORTED)
3683 {
3684 printf_unfiltered (_("Process record does not support instruction "
3685 "0x%0x at address %s.\n"),
3686 aarch64_record.aarch64_insn,
3687 paddress (gdbarch, insn_addr));
3688 ret = -1;
3689 }
3690
3691 if (0 == ret)
3692 {
3693 /* Record registers. */
3694 record_full_arch_list_add_reg (aarch64_record.regcache,
3695 AARCH64_PC_REGNUM);
3696 /* Always record register CPSR. */
3697 record_full_arch_list_add_reg (aarch64_record.regcache,
3698 AARCH64_CPSR_REGNUM);
3699 if (aarch64_record.aarch64_regs)
3700 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
3701 if (record_full_arch_list_add_reg (aarch64_record.regcache,
3702 aarch64_record.aarch64_regs[rec_no]))
3703 ret = -1;
3704
3705 /* Record memories. */
3706 if (aarch64_record.aarch64_mems)
3707 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
3708 if (record_full_arch_list_add_mem
3709 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
3710 aarch64_record.aarch64_mems[rec_no].len))
3711 ret = -1;
3712
3713 if (record_full_arch_list_add_end ())
3714 ret = -1;
3715 }
3716
3717 deallocate_reg_mem (&aarch64_record);
3718 return ret;
3719 }