[AArch64] Track FP registers in prologue analyzer
[binutils-gdb.git] / gdb / aarch64-tdep.c
1 /* Common target dependent code for GDB on AArch64 systems.
2
3 Copyright (C) 2009-2016 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22
23 #include "frame.h"
24 #include "inferior.h"
25 #include "gdbcmd.h"
26 #include "gdbcore.h"
27 #include "dis-asm.h"
28 #include "regcache.h"
29 #include "reggroups.h"
30 #include "doublest.h"
31 #include "value.h"
32 #include "arch-utils.h"
33 #include "osabi.h"
34 #include "frame-unwind.h"
35 #include "frame-base.h"
36 #include "trad-frame.h"
37 #include "objfiles.h"
38 #include "dwarf2-frame.h"
39 #include "gdbtypes.h"
40 #include "prologue-value.h"
41 #include "target-descriptions.h"
42 #include "user-regs.h"
43 #include "language.h"
44 #include "infcall.h"
45 #include "ax.h"
46 #include "ax-gdb.h"
47
48 #include "aarch64-tdep.h"
49
50 #include "elf-bfd.h"
51 #include "elf/aarch64.h"
52
53 #include "vec.h"
54
55 #include "record.h"
56 #include "record-full.h"
57
58 #include "features/aarch64.c"
59
60 #include "arch/aarch64-insn.h"
61
62 #include "opcode/aarch64.h"
63 #include <algorithm>
64
65 #define submask(x) ((1L << ((x) + 1)) - 1)
66 #define bit(obj,st) (((obj) >> (st)) & 1)
67 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
68
69 /* Pseudo register base numbers. */
70 #define AARCH64_Q0_REGNUM 0
71 #define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + AARCH64_D_REGISTER_COUNT)
72 #define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
73 #define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
74 #define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
75
76 /* The standard register names, and all the valid aliases for them. */
77 static const struct
78 {
79 const char *const name;
80 int regnum;
81 } aarch64_register_aliases[] =
82 {
83 /* 64-bit register names. */
84 {"fp", AARCH64_FP_REGNUM},
85 {"lr", AARCH64_LR_REGNUM},
86 {"sp", AARCH64_SP_REGNUM},
87
88 /* 32-bit register names. */
89 {"w0", AARCH64_X0_REGNUM + 0},
90 {"w1", AARCH64_X0_REGNUM + 1},
91 {"w2", AARCH64_X0_REGNUM + 2},
92 {"w3", AARCH64_X0_REGNUM + 3},
93 {"w4", AARCH64_X0_REGNUM + 4},
94 {"w5", AARCH64_X0_REGNUM + 5},
95 {"w6", AARCH64_X0_REGNUM + 6},
96 {"w7", AARCH64_X0_REGNUM + 7},
97 {"w8", AARCH64_X0_REGNUM + 8},
98 {"w9", AARCH64_X0_REGNUM + 9},
99 {"w10", AARCH64_X0_REGNUM + 10},
100 {"w11", AARCH64_X0_REGNUM + 11},
101 {"w12", AARCH64_X0_REGNUM + 12},
102 {"w13", AARCH64_X0_REGNUM + 13},
103 {"w14", AARCH64_X0_REGNUM + 14},
104 {"w15", AARCH64_X0_REGNUM + 15},
105 {"w16", AARCH64_X0_REGNUM + 16},
106 {"w17", AARCH64_X0_REGNUM + 17},
107 {"w18", AARCH64_X0_REGNUM + 18},
108 {"w19", AARCH64_X0_REGNUM + 19},
109 {"w20", AARCH64_X0_REGNUM + 20},
110 {"w21", AARCH64_X0_REGNUM + 21},
111 {"w22", AARCH64_X0_REGNUM + 22},
112 {"w23", AARCH64_X0_REGNUM + 23},
113 {"w24", AARCH64_X0_REGNUM + 24},
114 {"w25", AARCH64_X0_REGNUM + 25},
115 {"w26", AARCH64_X0_REGNUM + 26},
116 {"w27", AARCH64_X0_REGNUM + 27},
117 {"w28", AARCH64_X0_REGNUM + 28},
118 {"w29", AARCH64_X0_REGNUM + 29},
119 {"w30", AARCH64_X0_REGNUM + 30},
120
121 /* specials */
122 {"ip0", AARCH64_X0_REGNUM + 16},
123 {"ip1", AARCH64_X0_REGNUM + 17}
124 };
125
126 /* The required core 'R' registers. */
127 static const char *const aarch64_r_register_names[] =
128 {
129 /* These registers must appear in consecutive RAW register number
130 order and they must begin with AARCH64_X0_REGNUM! */
131 "x0", "x1", "x2", "x3",
132 "x4", "x5", "x6", "x7",
133 "x8", "x9", "x10", "x11",
134 "x12", "x13", "x14", "x15",
135 "x16", "x17", "x18", "x19",
136 "x20", "x21", "x22", "x23",
137 "x24", "x25", "x26", "x27",
138 "x28", "x29", "x30", "sp",
139 "pc", "cpsr"
140 };
141
142 /* The FP/SIMD 'V' registers. */
143 static const char *const aarch64_v_register_names[] =
144 {
145 /* These registers must appear in consecutive RAW register number
146 order and they must begin with AARCH64_V0_REGNUM! */
147 "v0", "v1", "v2", "v3",
148 "v4", "v5", "v6", "v7",
149 "v8", "v9", "v10", "v11",
150 "v12", "v13", "v14", "v15",
151 "v16", "v17", "v18", "v19",
152 "v20", "v21", "v22", "v23",
153 "v24", "v25", "v26", "v27",
154 "v28", "v29", "v30", "v31",
155 "fpsr",
156 "fpcr"
157 };
158
159 /* AArch64 prologue cache structure. */
160 struct aarch64_prologue_cache
161 {
162 /* The program counter at the start of the function. It is used to
163 identify this frame as a prologue frame. */
164 CORE_ADDR func;
165
166 /* The program counter at the time this frame was created; i.e. where
167 this function was called from. It is used to identify this frame as a
168 stub frame. */
169 CORE_ADDR prev_pc;
170
171 /* The stack pointer at the time this frame was created; i.e. the
172 caller's stack pointer when this function was called. It is used
173 to identify this frame. */
174 CORE_ADDR prev_sp;
175
176 /* Is the target available to read from? */
177 int available_p;
178
179 /* The frame base for this frame is just prev_sp - frame size.
180 FRAMESIZE is the distance from the frame pointer to the
181 initial stack pointer. */
182 int framesize;
183
184 /* The register used to hold the frame pointer for this frame. */
185 int framereg;
186
187 /* Saved register offsets. */
188 struct trad_frame_saved_reg *saved_regs;
189 };
190
191 static void
192 show_aarch64_debug (struct ui_file *file, int from_tty,
193 struct cmd_list_element *c, const char *value)
194 {
195 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
196 }
197
198 /* Analyze a prologue, looking for a recognizable stack frame
199 and frame pointer. Scan until we encounter a store that could
200 clobber the stack frame unexpectedly, or an unknown instruction. */
201
202 static CORE_ADDR
203 aarch64_analyze_prologue (struct gdbarch *gdbarch,
204 CORE_ADDR start, CORE_ADDR limit,
205 struct aarch64_prologue_cache *cache)
206 {
207 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
208 int i;
209 /* Track X registers and D registers in prologue. */
210 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
211 struct pv_area *stack;
212 struct cleanup *back_to;
213
214 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
215 regs[i] = pv_register (i, 0);
216 stack = make_pv_area (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
217 back_to = make_cleanup_free_pv_area (stack);
218
219 for (; start < limit; start += 4)
220 {
221 uint32_t insn;
222 aarch64_inst inst;
223
224 insn = read_memory_unsigned_integer (start, 4, byte_order_for_code);
225
226 if (aarch64_decode_insn (insn, &inst, 1) != 0)
227 break;
228
229 if (inst.opcode->iclass == addsub_imm
230 && (inst.opcode->op == OP_ADD
231 || strcmp ("sub", inst.opcode->name) == 0))
232 {
233 unsigned rd = inst.operands[0].reg.regno;
234 unsigned rn = inst.operands[1].reg.regno;
235
236 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
237 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
238 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
239 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
240
241 if (inst.opcode->op == OP_ADD)
242 {
243 regs[rd] = pv_add_constant (regs[rn],
244 inst.operands[2].imm.value);
245 }
246 else
247 {
248 regs[rd] = pv_add_constant (regs[rn],
249 -inst.operands[2].imm.value);
250 }
251 }
252 else if (inst.opcode->iclass == pcreladdr
253 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
254 {
255 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
256 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
257
258 regs[inst.operands[0].reg.regno] = pv_unknown ();
259 }
260 else if (inst.opcode->iclass == branch_imm)
261 {
262 /* Stop analysis on branch. */
263 break;
264 }
265 else if (inst.opcode->iclass == condbranch)
266 {
267 /* Stop analysis on branch. */
268 break;
269 }
270 else if (inst.opcode->iclass == branch_reg)
271 {
272 /* Stop analysis on branch. */
273 break;
274 }
275 else if (inst.opcode->iclass == compbranch)
276 {
277 /* Stop analysis on branch. */
278 break;
279 }
280 else if (inst.opcode->op == OP_MOVZ)
281 {
282 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
283 regs[inst.operands[0].reg.regno] = pv_unknown ();
284 }
285 else if (inst.opcode->iclass == log_shift
286 && strcmp (inst.opcode->name, "orr") == 0)
287 {
288 unsigned rd = inst.operands[0].reg.regno;
289 unsigned rn = inst.operands[1].reg.regno;
290 unsigned rm = inst.operands[2].reg.regno;
291
292 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
293 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
294 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
295
296 if (inst.operands[2].shifter.amount == 0
297 && rn == AARCH64_SP_REGNUM)
298 regs[rd] = regs[rm];
299 else
300 {
301 if (aarch64_debug)
302 {
303 debug_printf ("aarch64: prologue analysis gave up "
304 "addr=%s opcode=0x%x (orr x register)\n",
305 core_addr_to_string_nz (start), insn);
306 }
307 break;
308 }
309 }
310 else if (inst.opcode->op == OP_STUR)
311 {
312 unsigned rt = inst.operands[0].reg.regno;
313 unsigned rn = inst.operands[1].addr.base_regno;
314 int is64
315 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
316
317 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
318 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
319 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
320 gdb_assert (!inst.operands[1].addr.offset.is_reg);
321
322 pv_area_store (stack, pv_add_constant (regs[rn],
323 inst.operands[1].addr.offset.imm),
324 is64 ? 8 : 4, regs[rt]);
325 }
326 else if ((inst.opcode->iclass == ldstpair_off
327 || (inst.opcode->iclass == ldstpair_indexed
328 && inst.operands[2].addr.preind))
329 && strcmp ("stp", inst.opcode->name) == 0)
330 {
331 /* STP with addressing mode Pre-indexed and Base register. */
332 unsigned rt1;
333 unsigned rt2;
334 unsigned rn = inst.operands[2].addr.base_regno;
335 int32_t imm = inst.operands[2].addr.offset.imm;
336
337 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
338 || inst.operands[0].type == AARCH64_OPND_Ft);
339 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
340 || inst.operands[1].type == AARCH64_OPND_Ft2);
341 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
342 gdb_assert (!inst.operands[2].addr.offset.is_reg);
343
344 /* If recording this store would invalidate the store area
345 (perhaps because rn is not known) then we should abandon
346 further prologue analysis. */
347 if (pv_area_store_would_trash (stack,
348 pv_add_constant (regs[rn], imm)))
349 break;
350
351 if (pv_area_store_would_trash (stack,
352 pv_add_constant (regs[rn], imm + 8)))
353 break;
354
355 rt1 = inst.operands[0].reg.regno;
356 rt2 = inst.operands[1].reg.regno;
357 if (inst.operands[0].type == AARCH64_OPND_Ft)
358 {
359 /* Only bottom 64-bit of each V register (D register) need
360 to be preserved. */
361 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
362 rt1 += AARCH64_X_REGISTER_COUNT;
363 rt2 += AARCH64_X_REGISTER_COUNT;
364 }
365
366 pv_area_store (stack, pv_add_constant (regs[rn], imm), 8,
367 regs[rt1]);
368 pv_area_store (stack, pv_add_constant (regs[rn], imm + 8), 8,
369 regs[rt2]);
370
371 if (inst.operands[2].addr.writeback)
372 regs[rn] = pv_add_constant (regs[rn], imm);
373
374 }
375 else if (inst.opcode->iclass == testbranch)
376 {
377 /* Stop analysis on branch. */
378 break;
379 }
380 else
381 {
382 if (aarch64_debug)
383 {
384 debug_printf ("aarch64: prologue analysis gave up addr=%s"
385 " opcode=0x%x\n",
386 core_addr_to_string_nz (start), insn);
387 }
388 break;
389 }
390 }
391
392 if (cache == NULL)
393 {
394 do_cleanups (back_to);
395 return start;
396 }
397
398 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
399 {
400 /* Frame pointer is fp. Frame size is constant. */
401 cache->framereg = AARCH64_FP_REGNUM;
402 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
403 }
404 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
405 {
406 /* Try the stack pointer. */
407 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
408 cache->framereg = AARCH64_SP_REGNUM;
409 }
410 else
411 {
412 /* We're just out of luck. We don't know where the frame is. */
413 cache->framereg = -1;
414 cache->framesize = 0;
415 }
416
417 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
418 {
419 CORE_ADDR offset;
420
421 if (pv_area_find_reg (stack, gdbarch, i, &offset))
422 cache->saved_regs[i].addr = offset;
423 }
424
425 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
426 {
427 int regnum = gdbarch_num_regs (gdbarch);
428 CORE_ADDR offset;
429
430 if (pv_area_find_reg (stack, gdbarch, i + AARCH64_X_REGISTER_COUNT,
431 &offset))
432 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].addr = offset;
433 }
434
435 do_cleanups (back_to);
436 return start;
437 }
438
439 /* Implement the "skip_prologue" gdbarch method. */
440
441 static CORE_ADDR
442 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
443 {
444 CORE_ADDR func_addr, limit_pc;
445
446 /* See if we can determine the end of the prologue via the symbol
447 table. If so, then return either PC, or the PC after the
448 prologue, whichever is greater. */
449 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
450 {
451 CORE_ADDR post_prologue_pc
452 = skip_prologue_using_sal (gdbarch, func_addr);
453
454 if (post_prologue_pc != 0)
455 return std::max (pc, post_prologue_pc);
456 }
457
458 /* Can't determine prologue from the symbol table, need to examine
459 instructions. */
460
461 /* Find an upper limit on the function prologue using the debug
462 information. If the debug information could not be used to
463 provide that bound, then use an arbitrary large number as the
464 upper bound. */
465 limit_pc = skip_prologue_using_sal (gdbarch, pc);
466 if (limit_pc == 0)
467 limit_pc = pc + 128; /* Magic. */
468
469 /* Try disassembling prologue. */
470 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
471 }
472
473 /* Scan the function prologue for THIS_FRAME and populate the prologue
474 cache CACHE. */
475
476 static void
477 aarch64_scan_prologue (struct frame_info *this_frame,
478 struct aarch64_prologue_cache *cache)
479 {
480 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
481 CORE_ADDR prologue_start;
482 CORE_ADDR prologue_end;
483 CORE_ADDR prev_pc = get_frame_pc (this_frame);
484 struct gdbarch *gdbarch = get_frame_arch (this_frame);
485
486 cache->prev_pc = prev_pc;
487
488 /* Assume we do not find a frame. */
489 cache->framereg = -1;
490 cache->framesize = 0;
491
492 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
493 &prologue_end))
494 {
495 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
496
497 if (sal.line == 0)
498 {
499 /* No line info so use the current PC. */
500 prologue_end = prev_pc;
501 }
502 else if (sal.end < prologue_end)
503 {
504 /* The next line begins after the function end. */
505 prologue_end = sal.end;
506 }
507
508 prologue_end = std::min (prologue_end, prev_pc);
509 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
510 }
511 else
512 {
513 CORE_ADDR frame_loc;
514
515 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
516 if (frame_loc == 0)
517 return;
518
519 cache->framereg = AARCH64_FP_REGNUM;
520 cache->framesize = 16;
521 cache->saved_regs[29].addr = 0;
522 cache->saved_regs[30].addr = 8;
523 }
524 }
525
526 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
527 function may throw an exception if the inferior's registers or memory is
528 not available. */
529
530 static void
531 aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
532 struct aarch64_prologue_cache *cache)
533 {
534 CORE_ADDR unwound_fp;
535 int reg;
536
537 aarch64_scan_prologue (this_frame, cache);
538
539 if (cache->framereg == -1)
540 return;
541
542 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
543 if (unwound_fp == 0)
544 return;
545
546 cache->prev_sp = unwound_fp + cache->framesize;
547
548 /* Calculate actual addresses of saved registers using offsets
549 determined by aarch64_analyze_prologue. */
550 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
551 if (trad_frame_addr_p (cache->saved_regs, reg))
552 cache->saved_regs[reg].addr += cache->prev_sp;
553
554 cache->func = get_frame_func (this_frame);
555
556 cache->available_p = 1;
557 }
558
559 /* Allocate and fill in *THIS_CACHE with information about the prologue of
560 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
561 Return a pointer to the current aarch64_prologue_cache in
562 *THIS_CACHE. */
563
564 static struct aarch64_prologue_cache *
565 aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
566 {
567 struct aarch64_prologue_cache *cache;
568
569 if (*this_cache != NULL)
570 return (struct aarch64_prologue_cache *) *this_cache;
571
572 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
573 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
574 *this_cache = cache;
575
576 TRY
577 {
578 aarch64_make_prologue_cache_1 (this_frame, cache);
579 }
580 CATCH (ex, RETURN_MASK_ERROR)
581 {
582 if (ex.error != NOT_AVAILABLE_ERROR)
583 throw_exception (ex);
584 }
585 END_CATCH
586
587 return cache;
588 }
589
590 /* Implement the "stop_reason" frame_unwind method. */
591
592 static enum unwind_stop_reason
593 aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
594 void **this_cache)
595 {
596 struct aarch64_prologue_cache *cache
597 = aarch64_make_prologue_cache (this_frame, this_cache);
598
599 if (!cache->available_p)
600 return UNWIND_UNAVAILABLE;
601
602 /* Halt the backtrace at "_start". */
603 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
604 return UNWIND_OUTERMOST;
605
606 /* We've hit a wall, stop. */
607 if (cache->prev_sp == 0)
608 return UNWIND_OUTERMOST;
609
610 return UNWIND_NO_REASON;
611 }
612
613 /* Our frame ID for a normal frame is the current function's starting
614 PC and the caller's SP when we were called. */
615
616 static void
617 aarch64_prologue_this_id (struct frame_info *this_frame,
618 void **this_cache, struct frame_id *this_id)
619 {
620 struct aarch64_prologue_cache *cache
621 = aarch64_make_prologue_cache (this_frame, this_cache);
622
623 if (!cache->available_p)
624 *this_id = frame_id_build_unavailable_stack (cache->func);
625 else
626 *this_id = frame_id_build (cache->prev_sp, cache->func);
627 }
628
629 /* Implement the "prev_register" frame_unwind method. */
630
631 static struct value *
632 aarch64_prologue_prev_register (struct frame_info *this_frame,
633 void **this_cache, int prev_regnum)
634 {
635 struct aarch64_prologue_cache *cache
636 = aarch64_make_prologue_cache (this_frame, this_cache);
637
638 /* If we are asked to unwind the PC, then we need to return the LR
639 instead. The prologue may save PC, but it will point into this
640 frame's prologue, not the next frame's resume location. */
641 if (prev_regnum == AARCH64_PC_REGNUM)
642 {
643 CORE_ADDR lr;
644
645 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
646 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
647 }
648
649 /* SP is generally not saved to the stack, but this frame is
650 identified by the next frame's stack pointer at the time of the
651 call. The value was already reconstructed into PREV_SP. */
652 /*
653 +----------+ ^
654 | saved lr | |
655 +->| saved fp |--+
656 | | |
657 | | | <- Previous SP
658 | +----------+
659 | | saved lr |
660 +--| saved fp |<- FP
661 | |
662 | |<- SP
663 +----------+ */
664 if (prev_regnum == AARCH64_SP_REGNUM)
665 return frame_unwind_got_constant (this_frame, prev_regnum,
666 cache->prev_sp);
667
668 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
669 prev_regnum);
670 }
671
672 /* AArch64 prologue unwinder. */
673 struct frame_unwind aarch64_prologue_unwind =
674 {
675 NORMAL_FRAME,
676 aarch64_prologue_frame_unwind_stop_reason,
677 aarch64_prologue_this_id,
678 aarch64_prologue_prev_register,
679 NULL,
680 default_frame_sniffer
681 };
682
683 /* Allocate and fill in *THIS_CACHE with information about the prologue of
684 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
685 Return a pointer to the current aarch64_prologue_cache in
686 *THIS_CACHE. */
687
688 static struct aarch64_prologue_cache *
689 aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
690 {
691 struct aarch64_prologue_cache *cache;
692
693 if (*this_cache != NULL)
694 return (struct aarch64_prologue_cache *) *this_cache;
695
696 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
697 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
698 *this_cache = cache;
699
700 TRY
701 {
702 cache->prev_sp = get_frame_register_unsigned (this_frame,
703 AARCH64_SP_REGNUM);
704 cache->prev_pc = get_frame_pc (this_frame);
705 cache->available_p = 1;
706 }
707 CATCH (ex, RETURN_MASK_ERROR)
708 {
709 if (ex.error != NOT_AVAILABLE_ERROR)
710 throw_exception (ex);
711 }
712 END_CATCH
713
714 return cache;
715 }
716
717 /* Implement the "stop_reason" frame_unwind method. */
718
719 static enum unwind_stop_reason
720 aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
721 void **this_cache)
722 {
723 struct aarch64_prologue_cache *cache
724 = aarch64_make_stub_cache (this_frame, this_cache);
725
726 if (!cache->available_p)
727 return UNWIND_UNAVAILABLE;
728
729 return UNWIND_NO_REASON;
730 }
731
732 /* Our frame ID for a stub frame is the current SP and LR. */
733
734 static void
735 aarch64_stub_this_id (struct frame_info *this_frame,
736 void **this_cache, struct frame_id *this_id)
737 {
738 struct aarch64_prologue_cache *cache
739 = aarch64_make_stub_cache (this_frame, this_cache);
740
741 if (cache->available_p)
742 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
743 else
744 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
745 }
746
747 /* Implement the "sniffer" frame_unwind method. */
748
749 static int
750 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
751 struct frame_info *this_frame,
752 void **this_prologue_cache)
753 {
754 CORE_ADDR addr_in_block;
755 gdb_byte dummy[4];
756
757 addr_in_block = get_frame_address_in_block (this_frame);
758 if (in_plt_section (addr_in_block)
759 /* We also use the stub winder if the target memory is unreadable
760 to avoid having the prologue unwinder trying to read it. */
761 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
762 return 1;
763
764 return 0;
765 }
766
767 /* AArch64 stub unwinder. */
768 struct frame_unwind aarch64_stub_unwind =
769 {
770 NORMAL_FRAME,
771 aarch64_stub_frame_unwind_stop_reason,
772 aarch64_stub_this_id,
773 aarch64_prologue_prev_register,
774 NULL,
775 aarch64_stub_unwind_sniffer
776 };
777
778 /* Return the frame base address of *THIS_FRAME. */
779
780 static CORE_ADDR
781 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
782 {
783 struct aarch64_prologue_cache *cache
784 = aarch64_make_prologue_cache (this_frame, this_cache);
785
786 return cache->prev_sp - cache->framesize;
787 }
788
789 /* AArch64 default frame base information. */
790 struct frame_base aarch64_normal_base =
791 {
792 &aarch64_prologue_unwind,
793 aarch64_normal_frame_base,
794 aarch64_normal_frame_base,
795 aarch64_normal_frame_base
796 };
797
798 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
799 dummy frame. The frame ID's base needs to match the TOS value
800 saved by save_dummy_frame_tos () and returned from
801 aarch64_push_dummy_call, and the PC needs to match the dummy
802 frame's breakpoint. */
803
804 static struct frame_id
805 aarch64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
806 {
807 return frame_id_build (get_frame_register_unsigned (this_frame,
808 AARCH64_SP_REGNUM),
809 get_frame_pc (this_frame));
810 }
811
812 /* Implement the "unwind_pc" gdbarch method. */
813
814 static CORE_ADDR
815 aarch64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
816 {
817 CORE_ADDR pc
818 = frame_unwind_register_unsigned (this_frame, AARCH64_PC_REGNUM);
819
820 return pc;
821 }
822
823 /* Implement the "unwind_sp" gdbarch method. */
824
825 static CORE_ADDR
826 aarch64_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
827 {
828 return frame_unwind_register_unsigned (this_frame, AARCH64_SP_REGNUM);
829 }
830
831 /* Return the value of the REGNUM register in the previous frame of
832 *THIS_FRAME. */
833
834 static struct value *
835 aarch64_dwarf2_prev_register (struct frame_info *this_frame,
836 void **this_cache, int regnum)
837 {
838 CORE_ADDR lr;
839
840 switch (regnum)
841 {
842 case AARCH64_PC_REGNUM:
843 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
844 return frame_unwind_got_constant (this_frame, regnum, lr);
845
846 default:
847 internal_error (__FILE__, __LINE__,
848 _("Unexpected register %d"), regnum);
849 }
850 }
851
852 /* Implement the "init_reg" dwarf2_frame_ops method. */
853
854 static void
855 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
856 struct dwarf2_frame_state_reg *reg,
857 struct frame_info *this_frame)
858 {
859 switch (regnum)
860 {
861 case AARCH64_PC_REGNUM:
862 reg->how = DWARF2_FRAME_REG_FN;
863 reg->loc.fn = aarch64_dwarf2_prev_register;
864 break;
865 case AARCH64_SP_REGNUM:
866 reg->how = DWARF2_FRAME_REG_CFA;
867 break;
868 }
869 }
870
871 /* When arguments must be pushed onto the stack, they go on in reverse
872 order. The code below implements a FILO (stack) to do this. */
873
874 typedef struct
875 {
876 /* Value to pass on stack. It can be NULL if this item is for stack
877 padding. */
878 const gdb_byte *data;
879
880 /* Size in bytes of value to pass on stack. */
881 int len;
882 } stack_item_t;
883
884 DEF_VEC_O (stack_item_t);
885
886 /* Return the alignment (in bytes) of the given type. */
887
888 static int
889 aarch64_type_align (struct type *t)
890 {
891 int n;
892 int align;
893 int falign;
894
895 t = check_typedef (t);
896 switch (TYPE_CODE (t))
897 {
898 default:
899 /* Should never happen. */
900 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
901 return 4;
902
903 case TYPE_CODE_PTR:
904 case TYPE_CODE_ENUM:
905 case TYPE_CODE_INT:
906 case TYPE_CODE_FLT:
907 case TYPE_CODE_SET:
908 case TYPE_CODE_RANGE:
909 case TYPE_CODE_BITSTRING:
910 case TYPE_CODE_REF:
911 case TYPE_CODE_CHAR:
912 case TYPE_CODE_BOOL:
913 return TYPE_LENGTH (t);
914
915 case TYPE_CODE_ARRAY:
916 if (TYPE_VECTOR (t))
917 {
918 /* Use the natural alignment for vector types (the same for
919 scalar type), but the maximum alignment is 128-bit. */
920 if (TYPE_LENGTH (t) > 16)
921 return 16;
922 else
923 return TYPE_LENGTH (t);
924 }
925 else
926 return aarch64_type_align (TYPE_TARGET_TYPE (t));
927 case TYPE_CODE_COMPLEX:
928 return aarch64_type_align (TYPE_TARGET_TYPE (t));
929
930 case TYPE_CODE_STRUCT:
931 case TYPE_CODE_UNION:
932 align = 1;
933 for (n = 0; n < TYPE_NFIELDS (t); n++)
934 {
935 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
936 if (falign > align)
937 align = falign;
938 }
939 return align;
940 }
941 }
942
943 /* Return 1 if *TY is a homogeneous floating-point aggregate or
944 homogeneous short-vector aggregate as defined in the AAPCS64 ABI
945 document; otherwise return 0. */
946
947 static int
948 is_hfa_or_hva (struct type *ty)
949 {
950 switch (TYPE_CODE (ty))
951 {
952 case TYPE_CODE_ARRAY:
953 {
954 struct type *target_ty = TYPE_TARGET_TYPE (ty);
955
956 if (TYPE_VECTOR (ty))
957 return 0;
958
959 if (TYPE_LENGTH (ty) <= 4 /* HFA or HVA has at most 4 members. */
960 && (TYPE_CODE (target_ty) == TYPE_CODE_FLT /* HFA */
961 || (TYPE_CODE (target_ty) == TYPE_CODE_ARRAY /* HVA */
962 && TYPE_VECTOR (target_ty))))
963 return 1;
964 break;
965 }
966
967 case TYPE_CODE_UNION:
968 case TYPE_CODE_STRUCT:
969 {
970 /* HFA or HVA has at most four members. */
971 if (TYPE_NFIELDS (ty) > 0 && TYPE_NFIELDS (ty) <= 4)
972 {
973 struct type *member0_type;
974
975 member0_type = check_typedef (TYPE_FIELD_TYPE (ty, 0));
976 if (TYPE_CODE (member0_type) == TYPE_CODE_FLT
977 || (TYPE_CODE (member0_type) == TYPE_CODE_ARRAY
978 && TYPE_VECTOR (member0_type)))
979 {
980 int i;
981
982 for (i = 0; i < TYPE_NFIELDS (ty); i++)
983 {
984 struct type *member1_type;
985
986 member1_type = check_typedef (TYPE_FIELD_TYPE (ty, i));
987 if (TYPE_CODE (member0_type) != TYPE_CODE (member1_type)
988 || (TYPE_LENGTH (member0_type)
989 != TYPE_LENGTH (member1_type)))
990 return 0;
991 }
992 return 1;
993 }
994 }
995 return 0;
996 }
997
998 default:
999 break;
1000 }
1001
1002 return 0;
1003 }
1004
1005 /* AArch64 function call information structure. */
1006 struct aarch64_call_info
1007 {
1008 /* the current argument number. */
1009 unsigned argnum;
1010
1011 /* The next general purpose register number, equivalent to NGRN as
1012 described in the AArch64 Procedure Call Standard. */
1013 unsigned ngrn;
1014
1015 /* The next SIMD and floating point register number, equivalent to
1016 NSRN as described in the AArch64 Procedure Call Standard. */
1017 unsigned nsrn;
1018
1019 /* The next stacked argument address, equivalent to NSAA as
1020 described in the AArch64 Procedure Call Standard. */
1021 unsigned nsaa;
1022
1023 /* Stack item vector. */
1024 VEC(stack_item_t) *si;
1025 };
1026
1027 /* Pass a value in a sequence of consecutive X registers. The caller
1028 is responsbile for ensuring sufficient registers are available. */
1029
1030 static void
1031 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1032 struct aarch64_call_info *info, struct type *type,
1033 struct value *arg)
1034 {
1035 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1036 int len = TYPE_LENGTH (type);
1037 enum type_code typecode = TYPE_CODE (type);
1038 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1039 const bfd_byte *buf = value_contents (arg);
1040
1041 info->argnum++;
1042
1043 while (len > 0)
1044 {
1045 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1046 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1047 byte_order);
1048
1049
1050 /* Adjust sub-word struct/union args when big-endian. */
1051 if (byte_order == BFD_ENDIAN_BIG
1052 && partial_len < X_REGISTER_SIZE
1053 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1054 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1055
1056 if (aarch64_debug)
1057 {
1058 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1059 gdbarch_register_name (gdbarch, regnum),
1060 phex (regval, X_REGISTER_SIZE));
1061 }
1062 regcache_cooked_write_unsigned (regcache, regnum, regval);
1063 len -= partial_len;
1064 buf += partial_len;
1065 regnum++;
1066 }
1067 }
1068
1069 /* Attempt to marshall a value in a V register. Return 1 if
1070 successful, or 0 if insufficient registers are available. This
1071 function, unlike the equivalent pass_in_x() function does not
1072 handle arguments spread across multiple registers. */
1073
1074 static int
1075 pass_in_v (struct gdbarch *gdbarch,
1076 struct regcache *regcache,
1077 struct aarch64_call_info *info,
1078 int len, const bfd_byte *buf)
1079 {
1080 if (info->nsrn < 8)
1081 {
1082 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1083 gdb_byte reg[V_REGISTER_SIZE];
1084
1085 info->argnum++;
1086 info->nsrn++;
1087
1088 memset (reg, 0, sizeof (reg));
1089 /* PCS C.1, the argument is allocated to the least significant
1090 bits of V register. */
1091 memcpy (reg, buf, len);
1092 regcache_cooked_write (regcache, regnum, reg);
1093
1094 if (aarch64_debug)
1095 {
1096 debug_printf ("arg %d in %s\n", info->argnum,
1097 gdbarch_register_name (gdbarch, regnum));
1098 }
1099 return 1;
1100 }
1101 info->nsrn = 8;
1102 return 0;
1103 }
1104
1105 /* Marshall an argument onto the stack. */
1106
1107 static void
1108 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1109 struct value *arg)
1110 {
1111 const bfd_byte *buf = value_contents (arg);
1112 int len = TYPE_LENGTH (type);
1113 int align;
1114 stack_item_t item;
1115
1116 info->argnum++;
1117
1118 align = aarch64_type_align (type);
1119
1120 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1121 Natural alignment of the argument's type. */
1122 align = align_up (align, 8);
1123
1124 /* The AArch64 PCS requires at most doubleword alignment. */
1125 if (align > 16)
1126 align = 16;
1127
1128 if (aarch64_debug)
1129 {
1130 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1131 info->nsaa);
1132 }
1133
1134 item.len = len;
1135 item.data = buf;
1136 VEC_safe_push (stack_item_t, info->si, &item);
1137
1138 info->nsaa += len;
1139 if (info->nsaa & (align - 1))
1140 {
1141 /* Push stack alignment padding. */
1142 int pad = align - (info->nsaa & (align - 1));
1143
1144 item.len = pad;
1145 item.data = NULL;
1146
1147 VEC_safe_push (stack_item_t, info->si, &item);
1148 info->nsaa += pad;
1149 }
1150 }
1151
1152 /* Marshall an argument into a sequence of one or more consecutive X
1153 registers or, if insufficient X registers are available then onto
1154 the stack. */
1155
1156 static void
1157 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1158 struct aarch64_call_info *info, struct type *type,
1159 struct value *arg)
1160 {
1161 int len = TYPE_LENGTH (type);
1162 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1163
1164 /* PCS C.13 - Pass in registers if we have enough spare */
1165 if (info->ngrn + nregs <= 8)
1166 {
1167 pass_in_x (gdbarch, regcache, info, type, arg);
1168 info->ngrn += nregs;
1169 }
1170 else
1171 {
1172 info->ngrn = 8;
1173 pass_on_stack (info, type, arg);
1174 }
1175 }
1176
1177 /* Pass a value in a V register, or on the stack if insufficient are
1178 available. */
1179
1180 static void
1181 pass_in_v_or_stack (struct gdbarch *gdbarch,
1182 struct regcache *regcache,
1183 struct aarch64_call_info *info,
1184 struct type *type,
1185 struct value *arg)
1186 {
1187 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (type),
1188 value_contents (arg)))
1189 pass_on_stack (info, type, arg);
1190 }
1191
1192 /* Implement the "push_dummy_call" gdbarch method. */
1193
1194 static CORE_ADDR
1195 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1196 struct regcache *regcache, CORE_ADDR bp_addr,
1197 int nargs,
1198 struct value **args, CORE_ADDR sp, int struct_return,
1199 CORE_ADDR struct_addr)
1200 {
1201 int argnum;
1202 struct aarch64_call_info info;
1203 struct type *func_type;
1204 struct type *return_type;
1205 int lang_struct_return;
1206
1207 memset (&info, 0, sizeof (info));
1208
1209 /* We need to know what the type of the called function is in order
1210 to determine the number of named/anonymous arguments for the
1211 actual argument placement, and the return type in order to handle
1212 return value correctly.
1213
1214 The generic code above us views the decision of return in memory
1215 or return in registers as a two stage processes. The language
1216 handler is consulted first and may decide to return in memory (eg
1217 class with copy constructor returned by value), this will cause
1218 the generic code to allocate space AND insert an initial leading
1219 argument.
1220
1221 If the language code does not decide to pass in memory then the
1222 target code is consulted.
1223
1224 If the language code decides to pass in memory we want to move
1225 the pointer inserted as the initial argument from the argument
1226 list and into X8, the conventional AArch64 struct return pointer
1227 register.
1228
1229 This is slightly awkward, ideally the flag "lang_struct_return"
1230 would be passed to the targets implementation of push_dummy_call.
1231 Rather that change the target interface we call the language code
1232 directly ourselves. */
1233
1234 func_type = check_typedef (value_type (function));
1235
1236 /* Dereference function pointer types. */
1237 if (TYPE_CODE (func_type) == TYPE_CODE_PTR)
1238 func_type = TYPE_TARGET_TYPE (func_type);
1239
1240 gdb_assert (TYPE_CODE (func_type) == TYPE_CODE_FUNC
1241 || TYPE_CODE (func_type) == TYPE_CODE_METHOD);
1242
1243 /* If language_pass_by_reference () returned true we will have been
1244 given an additional initial argument, a hidden pointer to the
1245 return slot in memory. */
1246 return_type = TYPE_TARGET_TYPE (func_type);
1247 lang_struct_return = language_pass_by_reference (return_type);
1248
1249 /* Set the return address. For the AArch64, the return breakpoint
1250 is always at BP_ADDR. */
1251 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1252
1253 /* If we were given an initial argument for the return slot because
1254 lang_struct_return was true, lose it. */
1255 if (lang_struct_return)
1256 {
1257 args++;
1258 nargs--;
1259 }
1260
1261 /* The struct_return pointer occupies X8. */
1262 if (struct_return || lang_struct_return)
1263 {
1264 if (aarch64_debug)
1265 {
1266 debug_printf ("struct return in %s = 0x%s\n",
1267 gdbarch_register_name (gdbarch,
1268 AARCH64_STRUCT_RETURN_REGNUM),
1269 paddress (gdbarch, struct_addr));
1270 }
1271 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1272 struct_addr);
1273 }
1274
1275 for (argnum = 0; argnum < nargs; argnum++)
1276 {
1277 struct value *arg = args[argnum];
1278 struct type *arg_type;
1279 int len;
1280
1281 arg_type = check_typedef (value_type (arg));
1282 len = TYPE_LENGTH (arg_type);
1283
1284 switch (TYPE_CODE (arg_type))
1285 {
1286 case TYPE_CODE_INT:
1287 case TYPE_CODE_BOOL:
1288 case TYPE_CODE_CHAR:
1289 case TYPE_CODE_RANGE:
1290 case TYPE_CODE_ENUM:
1291 if (len < 4)
1292 {
1293 /* Promote to 32 bit integer. */
1294 if (TYPE_UNSIGNED (arg_type))
1295 arg_type = builtin_type (gdbarch)->builtin_uint32;
1296 else
1297 arg_type = builtin_type (gdbarch)->builtin_int32;
1298 arg = value_cast (arg_type, arg);
1299 }
1300 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1301 break;
1302
1303 case TYPE_CODE_COMPLEX:
1304 if (info.nsrn <= 6)
1305 {
1306 const bfd_byte *buf = value_contents (arg);
1307 struct type *target_type =
1308 check_typedef (TYPE_TARGET_TYPE (arg_type));
1309
1310 pass_in_v (gdbarch, regcache, &info,
1311 TYPE_LENGTH (target_type), buf);
1312 pass_in_v (gdbarch, regcache, &info,
1313 TYPE_LENGTH (target_type),
1314 buf + TYPE_LENGTH (target_type));
1315 }
1316 else
1317 {
1318 info.nsrn = 8;
1319 pass_on_stack (&info, arg_type, arg);
1320 }
1321 break;
1322 case TYPE_CODE_FLT:
1323 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type, arg);
1324 break;
1325
1326 case TYPE_CODE_STRUCT:
1327 case TYPE_CODE_ARRAY:
1328 case TYPE_CODE_UNION:
1329 if (is_hfa_or_hva (arg_type))
1330 {
1331 int elements = TYPE_NFIELDS (arg_type);
1332
1333 /* Homogeneous Aggregates */
1334 if (info.nsrn + elements < 8)
1335 {
1336 int i;
1337
1338 for (i = 0; i < elements; i++)
1339 {
1340 /* We know that we have sufficient registers
1341 available therefore this will never fallback
1342 to the stack. */
1343 struct value *field =
1344 value_primitive_field (arg, 0, i, arg_type);
1345 struct type *field_type =
1346 check_typedef (value_type (field));
1347
1348 pass_in_v_or_stack (gdbarch, regcache, &info,
1349 field_type, field);
1350 }
1351 }
1352 else
1353 {
1354 info.nsrn = 8;
1355 pass_on_stack (&info, arg_type, arg);
1356 }
1357 }
1358 else if (TYPE_CODE (arg_type) == TYPE_CODE_ARRAY
1359 && TYPE_VECTOR (arg_type) && (len == 16 || len == 8))
1360 {
1361 /* Short vector types are passed in V registers. */
1362 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type, arg);
1363 }
1364 else if (len > 16)
1365 {
1366 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1367 invisible reference. */
1368
1369 /* Allocate aligned storage. */
1370 sp = align_down (sp - len, 16);
1371
1372 /* Write the real data into the stack. */
1373 write_memory (sp, value_contents (arg), len);
1374
1375 /* Construct the indirection. */
1376 arg_type = lookup_pointer_type (arg_type);
1377 arg = value_from_pointer (arg_type, sp);
1378 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1379 }
1380 else
1381 /* PCS C.15 / C.18 multiple values pass. */
1382 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1383 break;
1384
1385 default:
1386 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1387 break;
1388 }
1389 }
1390
1391 /* Make sure stack retains 16 byte alignment. */
1392 if (info.nsaa & 15)
1393 sp -= 16 - (info.nsaa & 15);
1394
1395 while (!VEC_empty (stack_item_t, info.si))
1396 {
1397 stack_item_t *si = VEC_last (stack_item_t, info.si);
1398
1399 sp -= si->len;
1400 if (si->data != NULL)
1401 write_memory (sp, si->data, si->len);
1402 VEC_pop (stack_item_t, info.si);
1403 }
1404
1405 VEC_free (stack_item_t, info.si);
1406
1407 /* Finally, update the SP register. */
1408 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1409
1410 return sp;
1411 }
1412
1413 /* Implement the "frame_align" gdbarch method. */
1414
1415 static CORE_ADDR
1416 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1417 {
1418 /* Align the stack to sixteen bytes. */
1419 return sp & ~(CORE_ADDR) 15;
1420 }
1421
1422 /* Return the type for an AdvSISD Q register. */
1423
1424 static struct type *
1425 aarch64_vnq_type (struct gdbarch *gdbarch)
1426 {
1427 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1428
1429 if (tdep->vnq_type == NULL)
1430 {
1431 struct type *t;
1432 struct type *elem;
1433
1434 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1435 TYPE_CODE_UNION);
1436
1437 elem = builtin_type (gdbarch)->builtin_uint128;
1438 append_composite_type_field (t, "u", elem);
1439
1440 elem = builtin_type (gdbarch)->builtin_int128;
1441 append_composite_type_field (t, "s", elem);
1442
1443 tdep->vnq_type = t;
1444 }
1445
1446 return tdep->vnq_type;
1447 }
1448
1449 /* Return the type for an AdvSISD D register. */
1450
1451 static struct type *
1452 aarch64_vnd_type (struct gdbarch *gdbarch)
1453 {
1454 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1455
1456 if (tdep->vnd_type == NULL)
1457 {
1458 struct type *t;
1459 struct type *elem;
1460
1461 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1462 TYPE_CODE_UNION);
1463
1464 elem = builtin_type (gdbarch)->builtin_double;
1465 append_composite_type_field (t, "f", elem);
1466
1467 elem = builtin_type (gdbarch)->builtin_uint64;
1468 append_composite_type_field (t, "u", elem);
1469
1470 elem = builtin_type (gdbarch)->builtin_int64;
1471 append_composite_type_field (t, "s", elem);
1472
1473 tdep->vnd_type = t;
1474 }
1475
1476 return tdep->vnd_type;
1477 }
1478
1479 /* Return the type for an AdvSISD S register. */
1480
1481 static struct type *
1482 aarch64_vns_type (struct gdbarch *gdbarch)
1483 {
1484 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1485
1486 if (tdep->vns_type == NULL)
1487 {
1488 struct type *t;
1489 struct type *elem;
1490
1491 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1492 TYPE_CODE_UNION);
1493
1494 elem = builtin_type (gdbarch)->builtin_float;
1495 append_composite_type_field (t, "f", elem);
1496
1497 elem = builtin_type (gdbarch)->builtin_uint32;
1498 append_composite_type_field (t, "u", elem);
1499
1500 elem = builtin_type (gdbarch)->builtin_int32;
1501 append_composite_type_field (t, "s", elem);
1502
1503 tdep->vns_type = t;
1504 }
1505
1506 return tdep->vns_type;
1507 }
1508
1509 /* Return the type for an AdvSISD H register. */
1510
1511 static struct type *
1512 aarch64_vnh_type (struct gdbarch *gdbarch)
1513 {
1514 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1515
1516 if (tdep->vnh_type == NULL)
1517 {
1518 struct type *t;
1519 struct type *elem;
1520
1521 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1522 TYPE_CODE_UNION);
1523
1524 elem = builtin_type (gdbarch)->builtin_uint16;
1525 append_composite_type_field (t, "u", elem);
1526
1527 elem = builtin_type (gdbarch)->builtin_int16;
1528 append_composite_type_field (t, "s", elem);
1529
1530 tdep->vnh_type = t;
1531 }
1532
1533 return tdep->vnh_type;
1534 }
1535
1536 /* Return the type for an AdvSISD B register. */
1537
1538 static struct type *
1539 aarch64_vnb_type (struct gdbarch *gdbarch)
1540 {
1541 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1542
1543 if (tdep->vnb_type == NULL)
1544 {
1545 struct type *t;
1546 struct type *elem;
1547
1548 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1549 TYPE_CODE_UNION);
1550
1551 elem = builtin_type (gdbarch)->builtin_uint8;
1552 append_composite_type_field (t, "u", elem);
1553
1554 elem = builtin_type (gdbarch)->builtin_int8;
1555 append_composite_type_field (t, "s", elem);
1556
1557 tdep->vnb_type = t;
1558 }
1559
1560 return tdep->vnb_type;
1561 }
1562
1563 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1564
1565 static int
1566 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1567 {
1568 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
1569 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
1570
1571 if (reg == AARCH64_DWARF_SP)
1572 return AARCH64_SP_REGNUM;
1573
1574 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
1575 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
1576
1577 return -1;
1578 }
1579 \f
1580
1581 /* Implement the "print_insn" gdbarch method. */
1582
1583 static int
1584 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
1585 {
1586 info->symbols = NULL;
1587 return print_insn_aarch64 (memaddr, info);
1588 }
1589
1590 /* AArch64 BRK software debug mode instruction.
1591 Note that AArch64 code is always little-endian.
1592 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
1593 static const gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
1594
1595 /* Implement the "breakpoint_from_pc" gdbarch method. */
1596
1597 static const gdb_byte *
1598 aarch64_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr,
1599 int *lenptr)
1600 {
1601 *lenptr = sizeof (aarch64_default_breakpoint);
1602 return aarch64_default_breakpoint;
1603 }
1604
1605 /* Extract from an array REGS containing the (raw) register state a
1606 function return value of type TYPE, and copy that, in virtual
1607 format, into VALBUF. */
1608
1609 static void
1610 aarch64_extract_return_value (struct type *type, struct regcache *regs,
1611 gdb_byte *valbuf)
1612 {
1613 struct gdbarch *gdbarch = get_regcache_arch (regs);
1614 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1615
1616 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1617 {
1618 bfd_byte buf[V_REGISTER_SIZE];
1619 int len = TYPE_LENGTH (type);
1620
1621 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
1622 memcpy (valbuf, buf, len);
1623 }
1624 else if (TYPE_CODE (type) == TYPE_CODE_INT
1625 || TYPE_CODE (type) == TYPE_CODE_CHAR
1626 || TYPE_CODE (type) == TYPE_CODE_BOOL
1627 || TYPE_CODE (type) == TYPE_CODE_PTR
1628 || TYPE_CODE (type) == TYPE_CODE_REF
1629 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1630 {
1631 /* If the the type is a plain integer, then the access is
1632 straight-forward. Otherwise we have to play around a bit
1633 more. */
1634 int len = TYPE_LENGTH (type);
1635 int regno = AARCH64_X0_REGNUM;
1636 ULONGEST tmp;
1637
1638 while (len > 0)
1639 {
1640 /* By using store_unsigned_integer we avoid having to do
1641 anything special for small big-endian values. */
1642 regcache_cooked_read_unsigned (regs, regno++, &tmp);
1643 store_unsigned_integer (valbuf,
1644 (len > X_REGISTER_SIZE
1645 ? X_REGISTER_SIZE : len), byte_order, tmp);
1646 len -= X_REGISTER_SIZE;
1647 valbuf += X_REGISTER_SIZE;
1648 }
1649 }
1650 else if (TYPE_CODE (type) == TYPE_CODE_COMPLEX)
1651 {
1652 int regno = AARCH64_V0_REGNUM;
1653 bfd_byte buf[V_REGISTER_SIZE];
1654 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1655 int len = TYPE_LENGTH (target_type);
1656
1657 regcache_cooked_read (regs, regno, buf);
1658 memcpy (valbuf, buf, len);
1659 valbuf += len;
1660 regcache_cooked_read (regs, regno + 1, buf);
1661 memcpy (valbuf, buf, len);
1662 valbuf += len;
1663 }
1664 else if (is_hfa_or_hva (type))
1665 {
1666 int elements = TYPE_NFIELDS (type);
1667 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1668 int len = TYPE_LENGTH (member_type);
1669 int i;
1670
1671 for (i = 0; i < elements; i++)
1672 {
1673 int regno = AARCH64_V0_REGNUM + i;
1674 bfd_byte buf[V_REGISTER_SIZE];
1675
1676 if (aarch64_debug)
1677 {
1678 debug_printf ("read HFA or HVA return value element %d from %s\n",
1679 i + 1,
1680 gdbarch_register_name (gdbarch, regno));
1681 }
1682 regcache_cooked_read (regs, regno, buf);
1683
1684 memcpy (valbuf, buf, len);
1685 valbuf += len;
1686 }
1687 }
1688 else if (TYPE_CODE (type) == TYPE_CODE_ARRAY && TYPE_VECTOR (type)
1689 && (TYPE_LENGTH (type) == 16 || TYPE_LENGTH (type) == 8))
1690 {
1691 /* Short vector is returned in V register. */
1692 gdb_byte buf[V_REGISTER_SIZE];
1693
1694 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
1695 memcpy (valbuf, buf, TYPE_LENGTH (type));
1696 }
1697 else
1698 {
1699 /* For a structure or union the behaviour is as if the value had
1700 been stored to word-aligned memory and then loaded into
1701 registers with 64-bit load instruction(s). */
1702 int len = TYPE_LENGTH (type);
1703 int regno = AARCH64_X0_REGNUM;
1704 bfd_byte buf[X_REGISTER_SIZE];
1705
1706 while (len > 0)
1707 {
1708 regcache_cooked_read (regs, regno++, buf);
1709 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1710 len -= X_REGISTER_SIZE;
1711 valbuf += X_REGISTER_SIZE;
1712 }
1713 }
1714 }
1715
1716
1717 /* Will a function return an aggregate type in memory or in a
1718 register? Return 0 if an aggregate type can be returned in a
1719 register, 1 if it must be returned in memory. */
1720
1721 static int
1722 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
1723 {
1724 type = check_typedef (type);
1725
1726 if (is_hfa_or_hva (type))
1727 {
1728 /* v0-v7 are used to return values and one register is allocated
1729 for one member. However, HFA or HVA has at most four members. */
1730 return 0;
1731 }
1732
1733 if (TYPE_LENGTH (type) > 16)
1734 {
1735 /* PCS B.6 Aggregates larger than 16 bytes are passed by
1736 invisible reference. */
1737
1738 return 1;
1739 }
1740
1741 return 0;
1742 }
1743
1744 /* Write into appropriate registers a function return value of type
1745 TYPE, given in virtual format. */
1746
1747 static void
1748 aarch64_store_return_value (struct type *type, struct regcache *regs,
1749 const gdb_byte *valbuf)
1750 {
1751 struct gdbarch *gdbarch = get_regcache_arch (regs);
1752 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1753
1754 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1755 {
1756 bfd_byte buf[V_REGISTER_SIZE];
1757 int len = TYPE_LENGTH (type);
1758
1759 memcpy (buf, valbuf, len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
1760 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
1761 }
1762 else if (TYPE_CODE (type) == TYPE_CODE_INT
1763 || TYPE_CODE (type) == TYPE_CODE_CHAR
1764 || TYPE_CODE (type) == TYPE_CODE_BOOL
1765 || TYPE_CODE (type) == TYPE_CODE_PTR
1766 || TYPE_CODE (type) == TYPE_CODE_REF
1767 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1768 {
1769 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
1770 {
1771 /* Values of one word or less are zero/sign-extended and
1772 returned in r0. */
1773 bfd_byte tmpbuf[X_REGISTER_SIZE];
1774 LONGEST val = unpack_long (type, valbuf);
1775
1776 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
1777 regcache_cooked_write (regs, AARCH64_X0_REGNUM, tmpbuf);
1778 }
1779 else
1780 {
1781 /* Integral values greater than one word are stored in
1782 consecutive registers starting with r0. This will always
1783 be a multiple of the regiser size. */
1784 int len = TYPE_LENGTH (type);
1785 int regno = AARCH64_X0_REGNUM;
1786
1787 while (len > 0)
1788 {
1789 regcache_cooked_write (regs, regno++, valbuf);
1790 len -= X_REGISTER_SIZE;
1791 valbuf += X_REGISTER_SIZE;
1792 }
1793 }
1794 }
1795 else if (is_hfa_or_hva (type))
1796 {
1797 int elements = TYPE_NFIELDS (type);
1798 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1799 int len = TYPE_LENGTH (member_type);
1800 int i;
1801
1802 for (i = 0; i < elements; i++)
1803 {
1804 int regno = AARCH64_V0_REGNUM + i;
1805 bfd_byte tmpbuf[MAX_REGISTER_SIZE];
1806
1807 if (aarch64_debug)
1808 {
1809 debug_printf ("write HFA or HVA return value element %d to %s\n",
1810 i + 1,
1811 gdbarch_register_name (gdbarch, regno));
1812 }
1813
1814 memcpy (tmpbuf, valbuf, len);
1815 regcache_cooked_write (regs, regno, tmpbuf);
1816 valbuf += len;
1817 }
1818 }
1819 else if (TYPE_CODE (type) == TYPE_CODE_ARRAY && TYPE_VECTOR (type)
1820 && (TYPE_LENGTH (type) == 8 || TYPE_LENGTH (type) == 16))
1821 {
1822 /* Short vector. */
1823 gdb_byte buf[V_REGISTER_SIZE];
1824
1825 memcpy (buf, valbuf, TYPE_LENGTH (type));
1826 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
1827 }
1828 else
1829 {
1830 /* For a structure or union the behaviour is as if the value had
1831 been stored to word-aligned memory and then loaded into
1832 registers with 64-bit load instruction(s). */
1833 int len = TYPE_LENGTH (type);
1834 int regno = AARCH64_X0_REGNUM;
1835 bfd_byte tmpbuf[X_REGISTER_SIZE];
1836
1837 while (len > 0)
1838 {
1839 memcpy (tmpbuf, valbuf,
1840 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1841 regcache_cooked_write (regs, regno++, tmpbuf);
1842 len -= X_REGISTER_SIZE;
1843 valbuf += X_REGISTER_SIZE;
1844 }
1845 }
1846 }
1847
1848 /* Implement the "return_value" gdbarch method. */
1849
1850 static enum return_value_convention
1851 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
1852 struct type *valtype, struct regcache *regcache,
1853 gdb_byte *readbuf, const gdb_byte *writebuf)
1854 {
1855
1856 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
1857 || TYPE_CODE (valtype) == TYPE_CODE_UNION
1858 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
1859 {
1860 if (aarch64_return_in_memory (gdbarch, valtype))
1861 {
1862 if (aarch64_debug)
1863 debug_printf ("return value in memory\n");
1864 return RETURN_VALUE_STRUCT_CONVENTION;
1865 }
1866 }
1867
1868 if (writebuf)
1869 aarch64_store_return_value (valtype, regcache, writebuf);
1870
1871 if (readbuf)
1872 aarch64_extract_return_value (valtype, regcache, readbuf);
1873
1874 if (aarch64_debug)
1875 debug_printf ("return value in registers\n");
1876
1877 return RETURN_VALUE_REGISTER_CONVENTION;
1878 }
1879
1880 /* Implement the "get_longjmp_target" gdbarch method. */
1881
1882 static int
1883 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
1884 {
1885 CORE_ADDR jb_addr;
1886 gdb_byte buf[X_REGISTER_SIZE];
1887 struct gdbarch *gdbarch = get_frame_arch (frame);
1888 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1889 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1890
1891 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
1892
1893 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
1894 X_REGISTER_SIZE))
1895 return 0;
1896
1897 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
1898 return 1;
1899 }
1900
1901 /* Implement the "gen_return_address" gdbarch method. */
1902
1903 static void
1904 aarch64_gen_return_address (struct gdbarch *gdbarch,
1905 struct agent_expr *ax, struct axs_value *value,
1906 CORE_ADDR scope)
1907 {
1908 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
1909 value->kind = axs_lvalue_register;
1910 value->u.reg = AARCH64_LR_REGNUM;
1911 }
1912 \f
1913
1914 /* Return the pseudo register name corresponding to register regnum. */
1915
1916 static const char *
1917 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
1918 {
1919 static const char *const q_name[] =
1920 {
1921 "q0", "q1", "q2", "q3",
1922 "q4", "q5", "q6", "q7",
1923 "q8", "q9", "q10", "q11",
1924 "q12", "q13", "q14", "q15",
1925 "q16", "q17", "q18", "q19",
1926 "q20", "q21", "q22", "q23",
1927 "q24", "q25", "q26", "q27",
1928 "q28", "q29", "q30", "q31",
1929 };
1930
1931 static const char *const d_name[] =
1932 {
1933 "d0", "d1", "d2", "d3",
1934 "d4", "d5", "d6", "d7",
1935 "d8", "d9", "d10", "d11",
1936 "d12", "d13", "d14", "d15",
1937 "d16", "d17", "d18", "d19",
1938 "d20", "d21", "d22", "d23",
1939 "d24", "d25", "d26", "d27",
1940 "d28", "d29", "d30", "d31",
1941 };
1942
1943 static const char *const s_name[] =
1944 {
1945 "s0", "s1", "s2", "s3",
1946 "s4", "s5", "s6", "s7",
1947 "s8", "s9", "s10", "s11",
1948 "s12", "s13", "s14", "s15",
1949 "s16", "s17", "s18", "s19",
1950 "s20", "s21", "s22", "s23",
1951 "s24", "s25", "s26", "s27",
1952 "s28", "s29", "s30", "s31",
1953 };
1954
1955 static const char *const h_name[] =
1956 {
1957 "h0", "h1", "h2", "h3",
1958 "h4", "h5", "h6", "h7",
1959 "h8", "h9", "h10", "h11",
1960 "h12", "h13", "h14", "h15",
1961 "h16", "h17", "h18", "h19",
1962 "h20", "h21", "h22", "h23",
1963 "h24", "h25", "h26", "h27",
1964 "h28", "h29", "h30", "h31",
1965 };
1966
1967 static const char *const b_name[] =
1968 {
1969 "b0", "b1", "b2", "b3",
1970 "b4", "b5", "b6", "b7",
1971 "b8", "b9", "b10", "b11",
1972 "b12", "b13", "b14", "b15",
1973 "b16", "b17", "b18", "b19",
1974 "b20", "b21", "b22", "b23",
1975 "b24", "b25", "b26", "b27",
1976 "b28", "b29", "b30", "b31",
1977 };
1978
1979 regnum -= gdbarch_num_regs (gdbarch);
1980
1981 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
1982 return q_name[regnum - AARCH64_Q0_REGNUM];
1983
1984 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
1985 return d_name[regnum - AARCH64_D0_REGNUM];
1986
1987 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
1988 return s_name[regnum - AARCH64_S0_REGNUM];
1989
1990 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
1991 return h_name[regnum - AARCH64_H0_REGNUM];
1992
1993 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
1994 return b_name[regnum - AARCH64_B0_REGNUM];
1995
1996 internal_error (__FILE__, __LINE__,
1997 _("aarch64_pseudo_register_name: bad register number %d"),
1998 regnum);
1999 }
2000
2001 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2002
2003 static struct type *
2004 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2005 {
2006 regnum -= gdbarch_num_regs (gdbarch);
2007
2008 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2009 return aarch64_vnq_type (gdbarch);
2010
2011 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2012 return aarch64_vnd_type (gdbarch);
2013
2014 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2015 return aarch64_vns_type (gdbarch);
2016
2017 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2018 return aarch64_vnh_type (gdbarch);
2019
2020 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2021 return aarch64_vnb_type (gdbarch);
2022
2023 internal_error (__FILE__, __LINE__,
2024 _("aarch64_pseudo_register_type: bad register number %d"),
2025 regnum);
2026 }
2027
2028 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2029
2030 static int
2031 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2032 struct reggroup *group)
2033 {
2034 regnum -= gdbarch_num_regs (gdbarch);
2035
2036 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2037 return group == all_reggroup || group == vector_reggroup;
2038 else if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2039 return (group == all_reggroup || group == vector_reggroup
2040 || group == float_reggroup);
2041 else if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2042 return (group == all_reggroup || group == vector_reggroup
2043 || group == float_reggroup);
2044 else if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2045 return group == all_reggroup || group == vector_reggroup;
2046 else if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2047 return group == all_reggroup || group == vector_reggroup;
2048
2049 return group == all_reggroup;
2050 }
2051
2052 /* Implement the "pseudo_register_read_value" gdbarch method. */
2053
2054 static struct value *
2055 aarch64_pseudo_read_value (struct gdbarch *gdbarch,
2056 struct regcache *regcache,
2057 int regnum)
2058 {
2059 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2060 struct value *result_value;
2061 gdb_byte *buf;
2062
2063 result_value = allocate_value (register_type (gdbarch, regnum));
2064 VALUE_LVAL (result_value) = lval_register;
2065 VALUE_REGNUM (result_value) = regnum;
2066 buf = value_contents_raw (result_value);
2067
2068 regnum -= gdbarch_num_regs (gdbarch);
2069
2070 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2071 {
2072 enum register_status status;
2073 unsigned v_regnum;
2074
2075 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2076 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2077 if (status != REG_VALID)
2078 mark_value_bytes_unavailable (result_value, 0,
2079 TYPE_LENGTH (value_type (result_value)));
2080 else
2081 memcpy (buf, reg_buf, Q_REGISTER_SIZE);
2082 return result_value;
2083 }
2084
2085 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2086 {
2087 enum register_status status;
2088 unsigned v_regnum;
2089
2090 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2091 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2092 if (status != REG_VALID)
2093 mark_value_bytes_unavailable (result_value, 0,
2094 TYPE_LENGTH (value_type (result_value)));
2095 else
2096 memcpy (buf, reg_buf, D_REGISTER_SIZE);
2097 return result_value;
2098 }
2099
2100 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2101 {
2102 enum register_status status;
2103 unsigned v_regnum;
2104
2105 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2106 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2107 if (status != REG_VALID)
2108 mark_value_bytes_unavailable (result_value, 0,
2109 TYPE_LENGTH (value_type (result_value)));
2110 else
2111 memcpy (buf, reg_buf, S_REGISTER_SIZE);
2112 return result_value;
2113 }
2114
2115 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2116 {
2117 enum register_status status;
2118 unsigned v_regnum;
2119
2120 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2121 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2122 if (status != REG_VALID)
2123 mark_value_bytes_unavailable (result_value, 0,
2124 TYPE_LENGTH (value_type (result_value)));
2125 else
2126 memcpy (buf, reg_buf, H_REGISTER_SIZE);
2127 return result_value;
2128 }
2129
2130 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2131 {
2132 enum register_status status;
2133 unsigned v_regnum;
2134
2135 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2136 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2137 if (status != REG_VALID)
2138 mark_value_bytes_unavailable (result_value, 0,
2139 TYPE_LENGTH (value_type (result_value)));
2140 else
2141 memcpy (buf, reg_buf, B_REGISTER_SIZE);
2142 return result_value;
2143 }
2144
2145 gdb_assert_not_reached ("regnum out of bound");
2146 }
2147
2148 /* Implement the "pseudo_register_write" gdbarch method. */
2149
2150 static void
2151 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2152 int regnum, const gdb_byte *buf)
2153 {
2154 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2155
2156 /* Ensure the register buffer is zero, we want gdb writes of the
2157 various 'scalar' pseudo registers to behavior like architectural
2158 writes, register width bytes are written the remainder are set to
2159 zero. */
2160 memset (reg_buf, 0, sizeof (reg_buf));
2161
2162 regnum -= gdbarch_num_regs (gdbarch);
2163
2164 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2165 {
2166 /* pseudo Q registers */
2167 unsigned v_regnum;
2168
2169 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2170 memcpy (reg_buf, buf, Q_REGISTER_SIZE);
2171 regcache_raw_write (regcache, v_regnum, reg_buf);
2172 return;
2173 }
2174
2175 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2176 {
2177 /* pseudo D registers */
2178 unsigned v_regnum;
2179
2180 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2181 memcpy (reg_buf, buf, D_REGISTER_SIZE);
2182 regcache_raw_write (regcache, v_regnum, reg_buf);
2183 return;
2184 }
2185
2186 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2187 {
2188 unsigned v_regnum;
2189
2190 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2191 memcpy (reg_buf, buf, S_REGISTER_SIZE);
2192 regcache_raw_write (regcache, v_regnum, reg_buf);
2193 return;
2194 }
2195
2196 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2197 {
2198 /* pseudo H registers */
2199 unsigned v_regnum;
2200
2201 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2202 memcpy (reg_buf, buf, H_REGISTER_SIZE);
2203 regcache_raw_write (regcache, v_regnum, reg_buf);
2204 return;
2205 }
2206
2207 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2208 {
2209 /* pseudo B registers */
2210 unsigned v_regnum;
2211
2212 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2213 memcpy (reg_buf, buf, B_REGISTER_SIZE);
2214 regcache_raw_write (regcache, v_regnum, reg_buf);
2215 return;
2216 }
2217
2218 gdb_assert_not_reached ("regnum out of bound");
2219 }
2220
2221 /* Callback function for user_reg_add. */
2222
2223 static struct value *
2224 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2225 {
2226 const int *reg_p = (const int *) baton;
2227
2228 return value_of_register (*reg_p, frame);
2229 }
2230 \f
2231
2232 /* Implement the "software_single_step" gdbarch method, needed to
2233 single step through atomic sequences on AArch64. */
2234
2235 static int
2236 aarch64_software_single_step (struct frame_info *frame)
2237 {
2238 struct gdbarch *gdbarch = get_frame_arch (frame);
2239 struct address_space *aspace = get_frame_address_space (frame);
2240 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2241 const int insn_size = 4;
2242 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2243 CORE_ADDR pc = get_frame_pc (frame);
2244 CORE_ADDR breaks[2] = { -1, -1 };
2245 CORE_ADDR loc = pc;
2246 CORE_ADDR closing_insn = 0;
2247 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2248 byte_order_for_code);
2249 int index;
2250 int insn_count;
2251 int bc_insn_count = 0; /* Conditional branch instruction count. */
2252 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2253 aarch64_inst inst;
2254
2255 if (aarch64_decode_insn (insn, &inst, 1) != 0)
2256 return 0;
2257
2258 /* Look for a Load Exclusive instruction which begins the sequence. */
2259 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
2260 return 0;
2261
2262 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2263 {
2264 loc += insn_size;
2265 insn = read_memory_unsigned_integer (loc, insn_size,
2266 byte_order_for_code);
2267
2268 if (aarch64_decode_insn (insn, &inst, 1) != 0)
2269 return 0;
2270 /* Check if the instruction is a conditional branch. */
2271 if (inst.opcode->iclass == condbranch)
2272 {
2273 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2274
2275 if (bc_insn_count >= 1)
2276 return 0;
2277
2278 /* It is, so we'll try to set a breakpoint at the destination. */
2279 breaks[1] = loc + inst.operands[0].imm.value;
2280
2281 bc_insn_count++;
2282 last_breakpoint++;
2283 }
2284
2285 /* Look for the Store Exclusive which closes the atomic sequence. */
2286 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
2287 {
2288 closing_insn = loc;
2289 break;
2290 }
2291 }
2292
2293 /* We didn't find a closing Store Exclusive instruction, fall back. */
2294 if (!closing_insn)
2295 return 0;
2296
2297 /* Insert breakpoint after the end of the atomic sequence. */
2298 breaks[0] = loc + insn_size;
2299
2300 /* Check for duplicated breakpoints, and also check that the second
2301 breakpoint is not within the atomic sequence. */
2302 if (last_breakpoint
2303 && (breaks[1] == breaks[0]
2304 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2305 last_breakpoint = 0;
2306
2307 /* Insert the breakpoint at the end of the sequence, and one at the
2308 destination of the conditional branch, if it exists. */
2309 for (index = 0; index <= last_breakpoint; index++)
2310 insert_single_step_breakpoint (gdbarch, aspace, breaks[index]);
2311
2312 return 1;
2313 }
2314
2315 struct displaced_step_closure
2316 {
2317 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2318 is being displaced stepping. */
2319 int cond;
2320
2321 /* PC adjustment offset after displaced stepping. */
2322 int32_t pc_adjust;
2323 };
2324
2325 /* Data when visiting instructions for displaced stepping. */
2326
2327 struct aarch64_displaced_step_data
2328 {
2329 struct aarch64_insn_data base;
2330
2331 /* The address where the instruction will be executed at. */
2332 CORE_ADDR new_addr;
2333 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2334 uint32_t insn_buf[DISPLACED_MODIFIED_INSNS];
2335 /* Number of instructions in INSN_BUF. */
2336 unsigned insn_count;
2337 /* Registers when doing displaced stepping. */
2338 struct regcache *regs;
2339
2340 struct displaced_step_closure *dsc;
2341 };
2342
2343 /* Implementation of aarch64_insn_visitor method "b". */
2344
2345 static void
2346 aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2347 struct aarch64_insn_data *data)
2348 {
2349 struct aarch64_displaced_step_data *dsd
2350 = (struct aarch64_displaced_step_data *) data;
2351 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
2352
2353 if (can_encode_int32 (new_offset, 28))
2354 {
2355 /* Emit B rather than BL, because executing BL on a new address
2356 will get the wrong address into LR. In order to avoid this,
2357 we emit B, and update LR if the instruction is BL. */
2358 emit_b (dsd->insn_buf, 0, new_offset);
2359 dsd->insn_count++;
2360 }
2361 else
2362 {
2363 /* Write NOP. */
2364 emit_nop (dsd->insn_buf);
2365 dsd->insn_count++;
2366 dsd->dsc->pc_adjust = offset;
2367 }
2368
2369 if (is_bl)
2370 {
2371 /* Update LR. */
2372 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2373 data->insn_addr + 4);
2374 }
2375 }
2376
2377 /* Implementation of aarch64_insn_visitor method "b_cond". */
2378
2379 static void
2380 aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2381 struct aarch64_insn_data *data)
2382 {
2383 struct aarch64_displaced_step_data *dsd
2384 = (struct aarch64_displaced_step_data *) data;
2385
2386 /* GDB has to fix up PC after displaced step this instruction
2387 differently according to the condition is true or false. Instead
2388 of checking COND against conditional flags, we can use
2389 the following instructions, and GDB can tell how to fix up PC
2390 according to the PC value.
2391
2392 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2393 INSN1 ;
2394 TAKEN:
2395 INSN2
2396 */
2397
2398 emit_bcond (dsd->insn_buf, cond, 8);
2399 dsd->dsc->cond = 1;
2400 dsd->dsc->pc_adjust = offset;
2401 dsd->insn_count = 1;
2402 }
2403
2404 /* Dynamically allocate a new register. If we know the register
2405 statically, we should make it a global as above instead of using this
2406 helper function. */
2407
2408 static struct aarch64_register
2409 aarch64_register (unsigned num, int is64)
2410 {
2411 return (struct aarch64_register) { num, is64 };
2412 }
2413
2414 /* Implementation of aarch64_insn_visitor method "cb". */
2415
2416 static void
2417 aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
2418 const unsigned rn, int is64,
2419 struct aarch64_insn_data *data)
2420 {
2421 struct aarch64_displaced_step_data *dsd
2422 = (struct aarch64_displaced_step_data *) data;
2423
2424 /* The offset is out of range for a compare and branch
2425 instruction. We can use the following instructions instead:
2426
2427 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2428 INSN1 ;
2429 TAKEN:
2430 INSN2
2431 */
2432 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
2433 dsd->insn_count = 1;
2434 dsd->dsc->cond = 1;
2435 dsd->dsc->pc_adjust = offset;
2436 }
2437
2438 /* Implementation of aarch64_insn_visitor method "tb". */
2439
2440 static void
2441 aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
2442 const unsigned rt, unsigned bit,
2443 struct aarch64_insn_data *data)
2444 {
2445 struct aarch64_displaced_step_data *dsd
2446 = (struct aarch64_displaced_step_data *) data;
2447
2448 /* The offset is out of range for a test bit and branch
2449 instruction We can use the following instructions instead:
2450
2451 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2452 INSN1 ;
2453 TAKEN:
2454 INSN2
2455
2456 */
2457 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
2458 dsd->insn_count = 1;
2459 dsd->dsc->cond = 1;
2460 dsd->dsc->pc_adjust = offset;
2461 }
2462
2463 /* Implementation of aarch64_insn_visitor method "adr". */
2464
2465 static void
2466 aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
2467 const int is_adrp, struct aarch64_insn_data *data)
2468 {
2469 struct aarch64_displaced_step_data *dsd
2470 = (struct aarch64_displaced_step_data *) data;
2471 /* We know exactly the address the ADR{P,} instruction will compute.
2472 We can just write it to the destination register. */
2473 CORE_ADDR address = data->insn_addr + offset;
2474
2475 if (is_adrp)
2476 {
2477 /* Clear the lower 12 bits of the offset to get the 4K page. */
2478 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2479 address & ~0xfff);
2480 }
2481 else
2482 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2483 address);
2484
2485 dsd->dsc->pc_adjust = 4;
2486 emit_nop (dsd->insn_buf);
2487 dsd->insn_count = 1;
2488 }
2489
2490 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
2491
2492 static void
2493 aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
2494 const unsigned rt, const int is64,
2495 struct aarch64_insn_data *data)
2496 {
2497 struct aarch64_displaced_step_data *dsd
2498 = (struct aarch64_displaced_step_data *) data;
2499 CORE_ADDR address = data->insn_addr + offset;
2500 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
2501
2502 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
2503 address);
2504
2505 if (is_sw)
2506 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
2507 aarch64_register (rt, 1), zero);
2508 else
2509 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
2510 aarch64_register (rt, 1), zero);
2511
2512 dsd->dsc->pc_adjust = 4;
2513 }
2514
2515 /* Implementation of aarch64_insn_visitor method "others". */
2516
2517 static void
2518 aarch64_displaced_step_others (const uint32_t insn,
2519 struct aarch64_insn_data *data)
2520 {
2521 struct aarch64_displaced_step_data *dsd
2522 = (struct aarch64_displaced_step_data *) data;
2523
2524 aarch64_emit_insn (dsd->insn_buf, insn);
2525 dsd->insn_count = 1;
2526
2527 if ((insn & 0xfffffc1f) == 0xd65f0000)
2528 {
2529 /* RET */
2530 dsd->dsc->pc_adjust = 0;
2531 }
2532 else
2533 dsd->dsc->pc_adjust = 4;
2534 }
2535
2536 static const struct aarch64_insn_visitor visitor =
2537 {
2538 aarch64_displaced_step_b,
2539 aarch64_displaced_step_b_cond,
2540 aarch64_displaced_step_cb,
2541 aarch64_displaced_step_tb,
2542 aarch64_displaced_step_adr,
2543 aarch64_displaced_step_ldr_literal,
2544 aarch64_displaced_step_others,
2545 };
2546
2547 /* Implement the "displaced_step_copy_insn" gdbarch method. */
2548
2549 struct displaced_step_closure *
2550 aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
2551 CORE_ADDR from, CORE_ADDR to,
2552 struct regcache *regs)
2553 {
2554 struct displaced_step_closure *dsc = NULL;
2555 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2556 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
2557 struct aarch64_displaced_step_data dsd;
2558 aarch64_inst inst;
2559
2560 if (aarch64_decode_insn (insn, &inst, 1) != 0)
2561 return NULL;
2562
2563 /* Look for a Load Exclusive instruction which begins the sequence. */
2564 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
2565 {
2566 /* We can't displaced step atomic sequences. */
2567 return NULL;
2568 }
2569
2570 dsc = XCNEW (struct displaced_step_closure);
2571 dsd.base.insn_addr = from;
2572 dsd.new_addr = to;
2573 dsd.regs = regs;
2574 dsd.dsc = dsc;
2575 dsd.insn_count = 0;
2576 aarch64_relocate_instruction (insn, &visitor,
2577 (struct aarch64_insn_data *) &dsd);
2578 gdb_assert (dsd.insn_count <= DISPLACED_MODIFIED_INSNS);
2579
2580 if (dsd.insn_count != 0)
2581 {
2582 int i;
2583
2584 /* Instruction can be relocated to scratch pad. Copy
2585 relocated instruction(s) there. */
2586 for (i = 0; i < dsd.insn_count; i++)
2587 {
2588 if (debug_displaced)
2589 {
2590 debug_printf ("displaced: writing insn ");
2591 debug_printf ("%.8x", dsd.insn_buf[i]);
2592 debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
2593 }
2594 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
2595 (ULONGEST) dsd.insn_buf[i]);
2596 }
2597 }
2598 else
2599 {
2600 xfree (dsc);
2601 dsc = NULL;
2602 }
2603
2604 return dsc;
2605 }
2606
2607 /* Implement the "displaced_step_fixup" gdbarch method. */
2608
2609 void
2610 aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
2611 struct displaced_step_closure *dsc,
2612 CORE_ADDR from, CORE_ADDR to,
2613 struct regcache *regs)
2614 {
2615 if (dsc->cond)
2616 {
2617 ULONGEST pc;
2618
2619 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
2620 if (pc - to == 8)
2621 {
2622 /* Condition is true. */
2623 }
2624 else if (pc - to == 4)
2625 {
2626 /* Condition is false. */
2627 dsc->pc_adjust = 4;
2628 }
2629 else
2630 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
2631 }
2632
2633 if (dsc->pc_adjust != 0)
2634 {
2635 if (debug_displaced)
2636 {
2637 debug_printf ("displaced: fixup: set PC to %s:%d\n",
2638 paddress (gdbarch, from), dsc->pc_adjust);
2639 }
2640 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
2641 from + dsc->pc_adjust);
2642 }
2643 }
2644
2645 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
2646
2647 int
2648 aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
2649 struct displaced_step_closure *closure)
2650 {
2651 return 1;
2652 }
2653
2654 /* Initialize the current architecture based on INFO. If possible,
2655 re-use an architecture from ARCHES, which is a list of
2656 architectures already created during this debugging session.
2657
2658 Called e.g. at program startup, when reading a core file, and when
2659 reading a binary file. */
2660
2661 static struct gdbarch *
2662 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2663 {
2664 struct gdbarch_tdep *tdep;
2665 struct gdbarch *gdbarch;
2666 struct gdbarch_list *best_arch;
2667 struct tdesc_arch_data *tdesc_data = NULL;
2668 const struct target_desc *tdesc = info.target_desc;
2669 int i;
2670 int valid_p = 1;
2671 const struct tdesc_feature *feature;
2672 int num_regs = 0;
2673 int num_pseudo_regs = 0;
2674
2675 /* Ensure we always have a target descriptor. */
2676 if (!tdesc_has_registers (tdesc))
2677 tdesc = tdesc_aarch64;
2678
2679 gdb_assert (tdesc);
2680
2681 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
2682
2683 if (feature == NULL)
2684 return NULL;
2685
2686 tdesc_data = tdesc_data_alloc ();
2687
2688 /* Validate the descriptor provides the mandatory core R registers
2689 and allocate their numbers. */
2690 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
2691 valid_p &=
2692 tdesc_numbered_register (feature, tdesc_data, AARCH64_X0_REGNUM + i,
2693 aarch64_r_register_names[i]);
2694
2695 num_regs = AARCH64_X0_REGNUM + i;
2696
2697 /* Look for the V registers. */
2698 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
2699 if (feature)
2700 {
2701 /* Validate the descriptor provides the mandatory V registers
2702 and allocate their numbers. */
2703 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
2704 valid_p &=
2705 tdesc_numbered_register (feature, tdesc_data, AARCH64_V0_REGNUM + i,
2706 aarch64_v_register_names[i]);
2707
2708 num_regs = AARCH64_V0_REGNUM + i;
2709
2710 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
2711 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
2712 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
2713 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
2714 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
2715 }
2716
2717 if (!valid_p)
2718 {
2719 tdesc_data_cleanup (tdesc_data);
2720 return NULL;
2721 }
2722
2723 /* AArch64 code is always little-endian. */
2724 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
2725
2726 /* If there is already a candidate, use it. */
2727 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
2728 best_arch != NULL;
2729 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
2730 {
2731 /* Found a match. */
2732 break;
2733 }
2734
2735 if (best_arch != NULL)
2736 {
2737 if (tdesc_data != NULL)
2738 tdesc_data_cleanup (tdesc_data);
2739 return best_arch->gdbarch;
2740 }
2741
2742 tdep = XCNEW (struct gdbarch_tdep);
2743 gdbarch = gdbarch_alloc (&info, tdep);
2744
2745 /* This should be low enough for everything. */
2746 tdep->lowest_pc = 0x20;
2747 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
2748 tdep->jb_elt_size = 8;
2749
2750 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
2751 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
2752
2753 /* Frame handling. */
2754 set_gdbarch_dummy_id (gdbarch, aarch64_dummy_id);
2755 set_gdbarch_unwind_pc (gdbarch, aarch64_unwind_pc);
2756 set_gdbarch_unwind_sp (gdbarch, aarch64_unwind_sp);
2757
2758 /* Advance PC across function entry code. */
2759 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
2760
2761 /* The stack grows downward. */
2762 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
2763
2764 /* Breakpoint manipulation. */
2765 set_gdbarch_breakpoint_from_pc (gdbarch, aarch64_breakpoint_from_pc);
2766 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
2767 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
2768
2769 /* Information about registers, etc. */
2770 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
2771 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
2772 set_gdbarch_num_regs (gdbarch, num_regs);
2773
2774 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
2775 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
2776 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
2777 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
2778 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
2779 set_tdesc_pseudo_register_reggroup_p (gdbarch,
2780 aarch64_pseudo_register_reggroup_p);
2781
2782 /* ABI */
2783 set_gdbarch_short_bit (gdbarch, 16);
2784 set_gdbarch_int_bit (gdbarch, 32);
2785 set_gdbarch_float_bit (gdbarch, 32);
2786 set_gdbarch_double_bit (gdbarch, 64);
2787 set_gdbarch_long_double_bit (gdbarch, 128);
2788 set_gdbarch_long_bit (gdbarch, 64);
2789 set_gdbarch_long_long_bit (gdbarch, 64);
2790 set_gdbarch_ptr_bit (gdbarch, 64);
2791 set_gdbarch_char_signed (gdbarch, 0);
2792 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
2793 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
2794 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
2795
2796 /* Internal <-> external register number maps. */
2797 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
2798
2799 /* Returning results. */
2800 set_gdbarch_return_value (gdbarch, aarch64_return_value);
2801
2802 /* Disassembly. */
2803 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
2804
2805 /* Virtual tables. */
2806 set_gdbarch_vbit_in_delta (gdbarch, 1);
2807
2808 /* Hook in the ABI-specific overrides, if they have been registered. */
2809 info.target_desc = tdesc;
2810 info.tdep_info = (void *) tdesc_data;
2811 gdbarch_init_osabi (info, gdbarch);
2812
2813 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
2814
2815 /* Add some default predicates. */
2816 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
2817 dwarf2_append_unwinders (gdbarch);
2818 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
2819
2820 frame_base_set_default (gdbarch, &aarch64_normal_base);
2821
2822 /* Now we have tuned the configuration, set a few final things,
2823 based on what the OS ABI has told us. */
2824
2825 if (tdep->jb_pc >= 0)
2826 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
2827
2828 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
2829
2830 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
2831
2832 /* Add standard register aliases. */
2833 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
2834 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
2835 value_of_aarch64_user_reg,
2836 &aarch64_register_aliases[i].regnum);
2837
2838 return gdbarch;
2839 }
2840
2841 static void
2842 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
2843 {
2844 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2845
2846 if (tdep == NULL)
2847 return;
2848
2849 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
2850 paddress (gdbarch, tdep->lowest_pc));
2851 }
2852
2853 /* Suppress warning from -Wmissing-prototypes. */
2854 extern initialize_file_ftype _initialize_aarch64_tdep;
2855
2856 void
2857 _initialize_aarch64_tdep (void)
2858 {
2859 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
2860 aarch64_dump_tdep);
2861
2862 initialize_tdesc_aarch64 ();
2863
2864 /* Debug this file's internals. */
2865 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
2866 Set AArch64 debugging."), _("\
2867 Show AArch64 debugging."), _("\
2868 When on, AArch64 specific debugging is enabled."),
2869 NULL,
2870 show_aarch64_debug,
2871 &setdebuglist, &showdebuglist);
2872 }
2873
2874 /* AArch64 process record-replay related structures, defines etc. */
2875
2876 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
2877 do \
2878 { \
2879 unsigned int reg_len = LENGTH; \
2880 if (reg_len) \
2881 { \
2882 REGS = XNEWVEC (uint32_t, reg_len); \
2883 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
2884 } \
2885 } \
2886 while (0)
2887
2888 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
2889 do \
2890 { \
2891 unsigned int mem_len = LENGTH; \
2892 if (mem_len) \
2893 { \
2894 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
2895 memcpy(&MEMS->len, &RECORD_BUF[0], \
2896 sizeof(struct aarch64_mem_r) * LENGTH); \
2897 } \
2898 } \
2899 while (0)
2900
2901 /* AArch64 record/replay structures and enumerations. */
2902
2903 struct aarch64_mem_r
2904 {
2905 uint64_t len; /* Record length. */
2906 uint64_t addr; /* Memory address. */
2907 };
2908
2909 enum aarch64_record_result
2910 {
2911 AARCH64_RECORD_SUCCESS,
2912 AARCH64_RECORD_FAILURE,
2913 AARCH64_RECORD_UNSUPPORTED,
2914 AARCH64_RECORD_UNKNOWN
2915 };
2916
2917 typedef struct insn_decode_record_t
2918 {
2919 struct gdbarch *gdbarch;
2920 struct regcache *regcache;
2921 CORE_ADDR this_addr; /* Address of insn to be recorded. */
2922 uint32_t aarch64_insn; /* Insn to be recorded. */
2923 uint32_t mem_rec_count; /* Count of memory records. */
2924 uint32_t reg_rec_count; /* Count of register records. */
2925 uint32_t *aarch64_regs; /* Registers to be recorded. */
2926 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
2927 } insn_decode_record;
2928
2929 /* Record handler for data processing - register instructions. */
2930
2931 static unsigned int
2932 aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
2933 {
2934 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
2935 uint32_t record_buf[4];
2936
2937 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
2938 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
2939 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
2940
2941 if (!bit (aarch64_insn_r->aarch64_insn, 28))
2942 {
2943 uint8_t setflags;
2944
2945 /* Logical (shifted register). */
2946 if (insn_bits24_27 == 0x0a)
2947 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
2948 /* Add/subtract. */
2949 else if (insn_bits24_27 == 0x0b)
2950 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
2951 else
2952 return AARCH64_RECORD_UNKNOWN;
2953
2954 record_buf[0] = reg_rd;
2955 aarch64_insn_r->reg_rec_count = 1;
2956 if (setflags)
2957 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
2958 }
2959 else
2960 {
2961 if (insn_bits24_27 == 0x0b)
2962 {
2963 /* Data-processing (3 source). */
2964 record_buf[0] = reg_rd;
2965 aarch64_insn_r->reg_rec_count = 1;
2966 }
2967 else if (insn_bits24_27 == 0x0a)
2968 {
2969 if (insn_bits21_23 == 0x00)
2970 {
2971 /* Add/subtract (with carry). */
2972 record_buf[0] = reg_rd;
2973 aarch64_insn_r->reg_rec_count = 1;
2974 if (bit (aarch64_insn_r->aarch64_insn, 29))
2975 {
2976 record_buf[1] = AARCH64_CPSR_REGNUM;
2977 aarch64_insn_r->reg_rec_count = 2;
2978 }
2979 }
2980 else if (insn_bits21_23 == 0x02)
2981 {
2982 /* Conditional compare (register) and conditional compare
2983 (immediate) instructions. */
2984 record_buf[0] = AARCH64_CPSR_REGNUM;
2985 aarch64_insn_r->reg_rec_count = 1;
2986 }
2987 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
2988 {
2989 /* CConditional select. */
2990 /* Data-processing (2 source). */
2991 /* Data-processing (1 source). */
2992 record_buf[0] = reg_rd;
2993 aarch64_insn_r->reg_rec_count = 1;
2994 }
2995 else
2996 return AARCH64_RECORD_UNKNOWN;
2997 }
2998 }
2999
3000 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3001 record_buf);
3002 return AARCH64_RECORD_SUCCESS;
3003 }
3004
3005 /* Record handler for data processing - immediate instructions. */
3006
3007 static unsigned int
3008 aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3009 {
3010 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
3011 uint32_t record_buf[4];
3012
3013 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3014 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3015 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3016
3017 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3018 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3019 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3020 {
3021 record_buf[0] = reg_rd;
3022 aarch64_insn_r->reg_rec_count = 1;
3023 }
3024 else if (insn_bits24_27 == 0x01)
3025 {
3026 /* Add/Subtract (immediate). */
3027 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3028 record_buf[0] = reg_rd;
3029 aarch64_insn_r->reg_rec_count = 1;
3030 if (setflags)
3031 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3032 }
3033 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3034 {
3035 /* Logical (immediate). */
3036 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3037 record_buf[0] = reg_rd;
3038 aarch64_insn_r->reg_rec_count = 1;
3039 if (setflags)
3040 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3041 }
3042 else
3043 return AARCH64_RECORD_UNKNOWN;
3044
3045 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3046 record_buf);
3047 return AARCH64_RECORD_SUCCESS;
3048 }
3049
3050 /* Record handler for branch, exception generation and system instructions. */
3051
3052 static unsigned int
3053 aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3054 {
3055 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3056 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3057 uint32_t record_buf[4];
3058
3059 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3060 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3061 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3062
3063 if (insn_bits28_31 == 0x0d)
3064 {
3065 /* Exception generation instructions. */
3066 if (insn_bits24_27 == 0x04)
3067 {
3068 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3069 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3070 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
3071 {
3072 ULONGEST svc_number;
3073
3074 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3075 &svc_number);
3076 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3077 svc_number);
3078 }
3079 else
3080 return AARCH64_RECORD_UNSUPPORTED;
3081 }
3082 /* System instructions. */
3083 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3084 {
3085 uint32_t reg_rt, reg_crn;
3086
3087 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3088 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3089
3090 /* Record rt in case of sysl and mrs instructions. */
3091 if (bit (aarch64_insn_r->aarch64_insn, 21))
3092 {
3093 record_buf[0] = reg_rt;
3094 aarch64_insn_r->reg_rec_count = 1;
3095 }
3096 /* Record cpsr for hint and msr(immediate) instructions. */
3097 else if (reg_crn == 0x02 || reg_crn == 0x04)
3098 {
3099 record_buf[0] = AARCH64_CPSR_REGNUM;
3100 aarch64_insn_r->reg_rec_count = 1;
3101 }
3102 }
3103 /* Unconditional branch (register). */
3104 else if((insn_bits24_27 & 0x0e) == 0x06)
3105 {
3106 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3107 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3108 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3109 }
3110 else
3111 return AARCH64_RECORD_UNKNOWN;
3112 }
3113 /* Unconditional branch (immediate). */
3114 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3115 {
3116 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3117 if (bit (aarch64_insn_r->aarch64_insn, 31))
3118 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3119 }
3120 else
3121 /* Compare & branch (immediate), Test & branch (immediate) and
3122 Conditional branch (immediate). */
3123 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3124
3125 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3126 record_buf);
3127 return AARCH64_RECORD_SUCCESS;
3128 }
3129
3130 /* Record handler for advanced SIMD load and store instructions. */
3131
3132 static unsigned int
3133 aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3134 {
3135 CORE_ADDR address;
3136 uint64_t addr_offset = 0;
3137 uint32_t record_buf[24];
3138 uint64_t record_buf_mem[24];
3139 uint32_t reg_rn, reg_rt;
3140 uint32_t reg_index = 0, mem_index = 0;
3141 uint8_t opcode_bits, size_bits;
3142
3143 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3144 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3145 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3146 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3147 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3148
3149 if (record_debug)
3150 debug_printf ("Process record: Advanced SIMD load/store\n");
3151
3152 /* Load/store single structure. */
3153 if (bit (aarch64_insn_r->aarch64_insn, 24))
3154 {
3155 uint8_t sindex, scale, selem, esize, replicate = 0;
3156 scale = opcode_bits >> 2;
3157 selem = ((opcode_bits & 0x02) |
3158 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3159 switch (scale)
3160 {
3161 case 1:
3162 if (size_bits & 0x01)
3163 return AARCH64_RECORD_UNKNOWN;
3164 break;
3165 case 2:
3166 if ((size_bits >> 1) & 0x01)
3167 return AARCH64_RECORD_UNKNOWN;
3168 if (size_bits & 0x01)
3169 {
3170 if (!((opcode_bits >> 1) & 0x01))
3171 scale = 3;
3172 else
3173 return AARCH64_RECORD_UNKNOWN;
3174 }
3175 break;
3176 case 3:
3177 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3178 {
3179 scale = size_bits;
3180 replicate = 1;
3181 break;
3182 }
3183 else
3184 return AARCH64_RECORD_UNKNOWN;
3185 default:
3186 break;
3187 }
3188 esize = 8 << scale;
3189 if (replicate)
3190 for (sindex = 0; sindex < selem; sindex++)
3191 {
3192 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3193 reg_rt = (reg_rt + 1) % 32;
3194 }
3195 else
3196 {
3197 for (sindex = 0; sindex < selem; sindex++)
3198 {
3199 if (bit (aarch64_insn_r->aarch64_insn, 22))
3200 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3201 else
3202 {
3203 record_buf_mem[mem_index++] = esize / 8;
3204 record_buf_mem[mem_index++] = address + addr_offset;
3205 }
3206 addr_offset = addr_offset + (esize / 8);
3207 reg_rt = (reg_rt + 1) % 32;
3208 }
3209 }
3210 }
3211 /* Load/store multiple structure. */
3212 else
3213 {
3214 uint8_t selem, esize, rpt, elements;
3215 uint8_t eindex, rindex;
3216
3217 esize = 8 << size_bits;
3218 if (bit (aarch64_insn_r->aarch64_insn, 30))
3219 elements = 128 / esize;
3220 else
3221 elements = 64 / esize;
3222
3223 switch (opcode_bits)
3224 {
3225 /*LD/ST4 (4 Registers). */
3226 case 0:
3227 rpt = 1;
3228 selem = 4;
3229 break;
3230 /*LD/ST1 (4 Registers). */
3231 case 2:
3232 rpt = 4;
3233 selem = 1;
3234 break;
3235 /*LD/ST3 (3 Registers). */
3236 case 4:
3237 rpt = 1;
3238 selem = 3;
3239 break;
3240 /*LD/ST1 (3 Registers). */
3241 case 6:
3242 rpt = 3;
3243 selem = 1;
3244 break;
3245 /*LD/ST1 (1 Register). */
3246 case 7:
3247 rpt = 1;
3248 selem = 1;
3249 break;
3250 /*LD/ST2 (2 Registers). */
3251 case 8:
3252 rpt = 1;
3253 selem = 2;
3254 break;
3255 /*LD/ST1 (2 Registers). */
3256 case 10:
3257 rpt = 2;
3258 selem = 1;
3259 break;
3260 default:
3261 return AARCH64_RECORD_UNSUPPORTED;
3262 break;
3263 }
3264 for (rindex = 0; rindex < rpt; rindex++)
3265 for (eindex = 0; eindex < elements; eindex++)
3266 {
3267 uint8_t reg_tt, sindex;
3268 reg_tt = (reg_rt + rindex) % 32;
3269 for (sindex = 0; sindex < selem; sindex++)
3270 {
3271 if (bit (aarch64_insn_r->aarch64_insn, 22))
3272 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3273 else
3274 {
3275 record_buf_mem[mem_index++] = esize / 8;
3276 record_buf_mem[mem_index++] = address + addr_offset;
3277 }
3278 addr_offset = addr_offset + (esize / 8);
3279 reg_tt = (reg_tt + 1) % 32;
3280 }
3281 }
3282 }
3283
3284 if (bit (aarch64_insn_r->aarch64_insn, 23))
3285 record_buf[reg_index++] = reg_rn;
3286
3287 aarch64_insn_r->reg_rec_count = reg_index;
3288 aarch64_insn_r->mem_rec_count = mem_index / 2;
3289 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3290 record_buf_mem);
3291 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3292 record_buf);
3293 return AARCH64_RECORD_SUCCESS;
3294 }
3295
3296 /* Record handler for load and store instructions. */
3297
3298 static unsigned int
3299 aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3300 {
3301 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3302 uint8_t insn_bit23, insn_bit21;
3303 uint8_t opc, size_bits, ld_flag, vector_flag;
3304 uint32_t reg_rn, reg_rt, reg_rt2;
3305 uint64_t datasize, offset;
3306 uint32_t record_buf[8];
3307 uint64_t record_buf_mem[8];
3308 CORE_ADDR address;
3309
3310 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3311 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3312 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3313 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3314 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3315 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3316 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3317 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3318 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3319 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3320 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3321
3322 /* Load/store exclusive. */
3323 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3324 {
3325 if (record_debug)
3326 debug_printf ("Process record: load/store exclusive\n");
3327
3328 if (ld_flag)
3329 {
3330 record_buf[0] = reg_rt;
3331 aarch64_insn_r->reg_rec_count = 1;
3332 if (insn_bit21)
3333 {
3334 record_buf[1] = reg_rt2;
3335 aarch64_insn_r->reg_rec_count = 2;
3336 }
3337 }
3338 else
3339 {
3340 if (insn_bit21)
3341 datasize = (8 << size_bits) * 2;
3342 else
3343 datasize = (8 << size_bits);
3344 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3345 &address);
3346 record_buf_mem[0] = datasize / 8;
3347 record_buf_mem[1] = address;
3348 aarch64_insn_r->mem_rec_count = 1;
3349 if (!insn_bit23)
3350 {
3351 /* Save register rs. */
3352 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3353 aarch64_insn_r->reg_rec_count = 1;
3354 }
3355 }
3356 }
3357 /* Load register (literal) instructions decoding. */
3358 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3359 {
3360 if (record_debug)
3361 debug_printf ("Process record: load register (literal)\n");
3362 if (vector_flag)
3363 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3364 else
3365 record_buf[0] = reg_rt;
3366 aarch64_insn_r->reg_rec_count = 1;
3367 }
3368 /* All types of load/store pair instructions decoding. */
3369 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3370 {
3371 if (record_debug)
3372 debug_printf ("Process record: load/store pair\n");
3373
3374 if (ld_flag)
3375 {
3376 if (vector_flag)
3377 {
3378 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3379 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3380 }
3381 else
3382 {
3383 record_buf[0] = reg_rt;
3384 record_buf[1] = reg_rt2;
3385 }
3386 aarch64_insn_r->reg_rec_count = 2;
3387 }
3388 else
3389 {
3390 uint16_t imm7_off;
3391 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3392 if (!vector_flag)
3393 size_bits = size_bits >> 1;
3394 datasize = 8 << (2 + size_bits);
3395 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3396 offset = offset << (2 + size_bits);
3397 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3398 &address);
3399 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3400 {
3401 if (imm7_off & 0x40)
3402 address = address - offset;
3403 else
3404 address = address + offset;
3405 }
3406
3407 record_buf_mem[0] = datasize / 8;
3408 record_buf_mem[1] = address;
3409 record_buf_mem[2] = datasize / 8;
3410 record_buf_mem[3] = address + (datasize / 8);
3411 aarch64_insn_r->mem_rec_count = 2;
3412 }
3413 if (bit (aarch64_insn_r->aarch64_insn, 23))
3414 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3415 }
3416 /* Load/store register (unsigned immediate) instructions. */
3417 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3418 {
3419 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3420 if (!(opc >> 1))
3421 if (opc & 0x01)
3422 ld_flag = 0x01;
3423 else
3424 ld_flag = 0x0;
3425 else
3426 if (size_bits != 0x03)
3427 ld_flag = 0x01;
3428 else
3429 return AARCH64_RECORD_UNKNOWN;
3430
3431 if (record_debug)
3432 {
3433 debug_printf ("Process record: load/store (unsigned immediate):"
3434 " size %x V %d opc %x\n", size_bits, vector_flag,
3435 opc);
3436 }
3437
3438 if (!ld_flag)
3439 {
3440 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
3441 datasize = 8 << size_bits;
3442 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3443 &address);
3444 offset = offset << size_bits;
3445 address = address + offset;
3446
3447 record_buf_mem[0] = datasize >> 3;
3448 record_buf_mem[1] = address;
3449 aarch64_insn_r->mem_rec_count = 1;
3450 }
3451 else
3452 {
3453 if (vector_flag)
3454 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3455 else
3456 record_buf[0] = reg_rt;
3457 aarch64_insn_r->reg_rec_count = 1;
3458 }
3459 }
3460 /* Load/store register (register offset) instructions. */
3461 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3462 && insn_bits10_11 == 0x02 && insn_bit21)
3463 {
3464 if (record_debug)
3465 debug_printf ("Process record: load/store (register offset)\n");
3466 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3467 if (!(opc >> 1))
3468 if (opc & 0x01)
3469 ld_flag = 0x01;
3470 else
3471 ld_flag = 0x0;
3472 else
3473 if (size_bits != 0x03)
3474 ld_flag = 0x01;
3475 else
3476 return AARCH64_RECORD_UNKNOWN;
3477
3478 if (!ld_flag)
3479 {
3480 ULONGEST reg_rm_val;
3481
3482 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
3483 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
3484 if (bit (aarch64_insn_r->aarch64_insn, 12))
3485 offset = reg_rm_val << size_bits;
3486 else
3487 offset = reg_rm_val;
3488 datasize = 8 << size_bits;
3489 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3490 &address);
3491 address = address + offset;
3492 record_buf_mem[0] = datasize >> 3;
3493 record_buf_mem[1] = address;
3494 aarch64_insn_r->mem_rec_count = 1;
3495 }
3496 else
3497 {
3498 if (vector_flag)
3499 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3500 else
3501 record_buf[0] = reg_rt;
3502 aarch64_insn_r->reg_rec_count = 1;
3503 }
3504 }
3505 /* Load/store register (immediate and unprivileged) instructions. */
3506 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3507 && !insn_bit21)
3508 {
3509 if (record_debug)
3510 {
3511 debug_printf ("Process record: load/store "
3512 "(immediate and unprivileged)\n");
3513 }
3514 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3515 if (!(opc >> 1))
3516 if (opc & 0x01)
3517 ld_flag = 0x01;
3518 else
3519 ld_flag = 0x0;
3520 else
3521 if (size_bits != 0x03)
3522 ld_flag = 0x01;
3523 else
3524 return AARCH64_RECORD_UNKNOWN;
3525
3526 if (!ld_flag)
3527 {
3528 uint16_t imm9_off;
3529 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
3530 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
3531 datasize = 8 << size_bits;
3532 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3533 &address);
3534 if (insn_bits10_11 != 0x01)
3535 {
3536 if (imm9_off & 0x0100)
3537 address = address - offset;
3538 else
3539 address = address + offset;
3540 }
3541 record_buf_mem[0] = datasize >> 3;
3542 record_buf_mem[1] = address;
3543 aarch64_insn_r->mem_rec_count = 1;
3544 }
3545 else
3546 {
3547 if (vector_flag)
3548 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3549 else
3550 record_buf[0] = reg_rt;
3551 aarch64_insn_r->reg_rec_count = 1;
3552 }
3553 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
3554 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3555 }
3556 /* Advanced SIMD load/store instructions. */
3557 else
3558 return aarch64_record_asimd_load_store (aarch64_insn_r);
3559
3560 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3561 record_buf_mem);
3562 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3563 record_buf);
3564 return AARCH64_RECORD_SUCCESS;
3565 }
3566
3567 /* Record handler for data processing SIMD and floating point instructions. */
3568
3569 static unsigned int
3570 aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
3571 {
3572 uint8_t insn_bit21, opcode, rmode, reg_rd;
3573 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
3574 uint8_t insn_bits11_14;
3575 uint32_t record_buf[2];
3576
3577 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3578 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3579 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3580 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3581 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
3582 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
3583 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
3584 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3585 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3586
3587 if (record_debug)
3588 debug_printf ("Process record: data processing SIMD/FP: ");
3589
3590 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
3591 {
3592 /* Floating point - fixed point conversion instructions. */
3593 if (!insn_bit21)
3594 {
3595 if (record_debug)
3596 debug_printf ("FP - fixed point conversion");
3597
3598 if ((opcode >> 1) == 0x0 && rmode == 0x03)
3599 record_buf[0] = reg_rd;
3600 else
3601 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3602 }
3603 /* Floating point - conditional compare instructions. */
3604 else if (insn_bits10_11 == 0x01)
3605 {
3606 if (record_debug)
3607 debug_printf ("FP - conditional compare");
3608
3609 record_buf[0] = AARCH64_CPSR_REGNUM;
3610 }
3611 /* Floating point - data processing (2-source) and
3612 conditional select instructions. */
3613 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
3614 {
3615 if (record_debug)
3616 debug_printf ("FP - DP (2-source)");
3617
3618 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3619 }
3620 else if (insn_bits10_11 == 0x00)
3621 {
3622 /* Floating point - immediate instructions. */
3623 if ((insn_bits12_15 & 0x01) == 0x01
3624 || (insn_bits12_15 & 0x07) == 0x04)
3625 {
3626 if (record_debug)
3627 debug_printf ("FP - immediate");
3628 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3629 }
3630 /* Floating point - compare instructions. */
3631 else if ((insn_bits12_15 & 0x03) == 0x02)
3632 {
3633 if (record_debug)
3634 debug_printf ("FP - immediate");
3635 record_buf[0] = AARCH64_CPSR_REGNUM;
3636 }
3637 /* Floating point - integer conversions instructions. */
3638 else if (insn_bits12_15 == 0x00)
3639 {
3640 /* Convert float to integer instruction. */
3641 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
3642 {
3643 if (record_debug)
3644 debug_printf ("float to int conversion");
3645
3646 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3647 }
3648 /* Convert integer to float instruction. */
3649 else if ((opcode >> 1) == 0x01 && !rmode)
3650 {
3651 if (record_debug)
3652 debug_printf ("int to float conversion");
3653
3654 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3655 }
3656 /* Move float to integer instruction. */
3657 else if ((opcode >> 1) == 0x03)
3658 {
3659 if (record_debug)
3660 debug_printf ("move float to int");
3661
3662 if (!(opcode & 0x01))
3663 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3664 else
3665 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3666 }
3667 else
3668 return AARCH64_RECORD_UNKNOWN;
3669 }
3670 else
3671 return AARCH64_RECORD_UNKNOWN;
3672 }
3673 else
3674 return AARCH64_RECORD_UNKNOWN;
3675 }
3676 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
3677 {
3678 if (record_debug)
3679 debug_printf ("SIMD copy");
3680
3681 /* Advanced SIMD copy instructions. */
3682 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
3683 && !bit (aarch64_insn_r->aarch64_insn, 15)
3684 && bit (aarch64_insn_r->aarch64_insn, 10))
3685 {
3686 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
3687 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3688 else
3689 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3690 }
3691 else
3692 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3693 }
3694 /* All remaining floating point or advanced SIMD instructions. */
3695 else
3696 {
3697 if (record_debug)
3698 debug_printf ("all remain");
3699
3700 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3701 }
3702
3703 if (record_debug)
3704 debug_printf ("\n");
3705
3706 aarch64_insn_r->reg_rec_count++;
3707 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
3708 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3709 record_buf);
3710 return AARCH64_RECORD_SUCCESS;
3711 }
3712
3713 /* Decodes insns type and invokes its record handler. */
3714
3715 static unsigned int
3716 aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
3717 {
3718 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
3719
3720 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
3721 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
3722 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
3723 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
3724
3725 /* Data processing - immediate instructions. */
3726 if (!ins_bit26 && !ins_bit27 && ins_bit28)
3727 return aarch64_record_data_proc_imm (aarch64_insn_r);
3728
3729 /* Branch, exception generation and system instructions. */
3730 if (ins_bit26 && !ins_bit27 && ins_bit28)
3731 return aarch64_record_branch_except_sys (aarch64_insn_r);
3732
3733 /* Load and store instructions. */
3734 if (!ins_bit25 && ins_bit27)
3735 return aarch64_record_load_store (aarch64_insn_r);
3736
3737 /* Data processing - register instructions. */
3738 if (ins_bit25 && !ins_bit26 && ins_bit27)
3739 return aarch64_record_data_proc_reg (aarch64_insn_r);
3740
3741 /* Data processing - SIMD and floating point instructions. */
3742 if (ins_bit25 && ins_bit26 && ins_bit27)
3743 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
3744
3745 return AARCH64_RECORD_UNSUPPORTED;
3746 }
3747
3748 /* Cleans up local record registers and memory allocations. */
3749
3750 static void
3751 deallocate_reg_mem (insn_decode_record *record)
3752 {
3753 xfree (record->aarch64_regs);
3754 xfree (record->aarch64_mems);
3755 }
3756
3757 /* Parse the current instruction and record the values of the registers and
3758 memory that will be changed in current instruction to record_arch_list
3759 return -1 if something is wrong. */
3760
3761 int
3762 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
3763 CORE_ADDR insn_addr)
3764 {
3765 uint32_t rec_no = 0;
3766 uint8_t insn_size = 4;
3767 uint32_t ret = 0;
3768 gdb_byte buf[insn_size];
3769 insn_decode_record aarch64_record;
3770
3771 memset (&buf[0], 0, insn_size);
3772 memset (&aarch64_record, 0, sizeof (insn_decode_record));
3773 target_read_memory (insn_addr, &buf[0], insn_size);
3774 aarch64_record.aarch64_insn
3775 = (uint32_t) extract_unsigned_integer (&buf[0],
3776 insn_size,
3777 gdbarch_byte_order (gdbarch));
3778 aarch64_record.regcache = regcache;
3779 aarch64_record.this_addr = insn_addr;
3780 aarch64_record.gdbarch = gdbarch;
3781
3782 ret = aarch64_record_decode_insn_handler (&aarch64_record);
3783 if (ret == AARCH64_RECORD_UNSUPPORTED)
3784 {
3785 printf_unfiltered (_("Process record does not support instruction "
3786 "0x%0x at address %s.\n"),
3787 aarch64_record.aarch64_insn,
3788 paddress (gdbarch, insn_addr));
3789 ret = -1;
3790 }
3791
3792 if (0 == ret)
3793 {
3794 /* Record registers. */
3795 record_full_arch_list_add_reg (aarch64_record.regcache,
3796 AARCH64_PC_REGNUM);
3797 /* Always record register CPSR. */
3798 record_full_arch_list_add_reg (aarch64_record.regcache,
3799 AARCH64_CPSR_REGNUM);
3800 if (aarch64_record.aarch64_regs)
3801 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
3802 if (record_full_arch_list_add_reg (aarch64_record.regcache,
3803 aarch64_record.aarch64_regs[rec_no]))
3804 ret = -1;
3805
3806 /* Record memories. */
3807 if (aarch64_record.aarch64_mems)
3808 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
3809 if (record_full_arch_list_add_mem
3810 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
3811 aarch64_record.aarch64_mems[rec_no].len))
3812 ret = -1;
3813
3814 if (record_full_arch_list_add_end ())
3815 ret = -1;
3816 }
3817
3818 deallocate_reg_mem (&aarch64_record);
3819 return ret;
3820 }