gdb/
[binutils-gdb.git] / gdb / arm-tdep.c
1 /* Common target dependent code for GDB on ARM systems.
2
3 Copyright (C) 1988, 1989, 1991, 1992, 1993, 1995, 1996, 1998, 1999, 2000,
4 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
5 Free Software Foundation, Inc.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include <ctype.h> /* XXX for isupper (). */
23
24 #include "defs.h"
25 #include "frame.h"
26 #include "inferior.h"
27 #include "gdbcmd.h"
28 #include "gdbcore.h"
29 #include "gdb_string.h"
30 #include "dis-asm.h" /* For register styles. */
31 #include "regcache.h"
32 #include "reggroups.h"
33 #include "doublest.h"
34 #include "value.h"
35 #include "arch-utils.h"
36 #include "osabi.h"
37 #include "frame-unwind.h"
38 #include "frame-base.h"
39 #include "trad-frame.h"
40 #include "objfiles.h"
41 #include "dwarf2-frame.h"
42 #include "gdbtypes.h"
43 #include "prologue-value.h"
44 #include "target-descriptions.h"
45 #include "user-regs.h"
46 #include "observer.h"
47
48 #include "arm-tdep.h"
49 #include "gdb/sim-arm.h"
50
51 #include "elf-bfd.h"
52 #include "coff/internal.h"
53 #include "elf/arm.h"
54
55 #include "gdb_assert.h"
56 #include "vec.h"
57
58 #include "features/arm-with-m.c"
59 #include "features/arm-with-iwmmxt.c"
60 #include "features/arm-with-vfpv2.c"
61 #include "features/arm-with-vfpv3.c"
62 #include "features/arm-with-neon.c"
63
64 static int arm_debug;
65
66 /* Macros for setting and testing a bit in a minimal symbol that marks
67 it as Thumb function. The MSB of the minimal symbol's "info" field
68 is used for this purpose.
69
70 MSYMBOL_SET_SPECIAL Actually sets the "special" bit.
71 MSYMBOL_IS_SPECIAL Tests the "special" bit in a minimal symbol. */
72
73 #define MSYMBOL_SET_SPECIAL(msym) \
74 MSYMBOL_TARGET_FLAG_1 (msym) = 1
75
76 #define MSYMBOL_IS_SPECIAL(msym) \
77 MSYMBOL_TARGET_FLAG_1 (msym)
78
79 /* Per-objfile data used for mapping symbols. */
80 static const struct objfile_data *arm_objfile_data_key;
81
82 struct arm_mapping_symbol
83 {
84 bfd_vma value;
85 char type;
86 };
87 typedef struct arm_mapping_symbol arm_mapping_symbol_s;
88 DEF_VEC_O(arm_mapping_symbol_s);
89
90 struct arm_per_objfile
91 {
92 VEC(arm_mapping_symbol_s) **section_maps;
93 };
94
95 /* The list of available "set arm ..." and "show arm ..." commands. */
96 static struct cmd_list_element *setarmcmdlist = NULL;
97 static struct cmd_list_element *showarmcmdlist = NULL;
98
99 /* The type of floating-point to use. Keep this in sync with enum
100 arm_float_model, and the help string in _initialize_arm_tdep. */
101 static const char *fp_model_strings[] =
102 {
103 "auto",
104 "softfpa",
105 "fpa",
106 "softvfp",
107 "vfp",
108 NULL
109 };
110
111 /* A variable that can be configured by the user. */
112 static enum arm_float_model arm_fp_model = ARM_FLOAT_AUTO;
113 static const char *current_fp_model = "auto";
114
115 /* The ABI to use. Keep this in sync with arm_abi_kind. */
116 static const char *arm_abi_strings[] =
117 {
118 "auto",
119 "APCS",
120 "AAPCS",
121 NULL
122 };
123
124 /* A variable that can be configured by the user. */
125 static enum arm_abi_kind arm_abi_global = ARM_ABI_AUTO;
126 static const char *arm_abi_string = "auto";
127
128 /* The execution mode to assume. */
129 static const char *arm_mode_strings[] =
130 {
131 "auto",
132 "arm",
133 "thumb",
134 NULL
135 };
136
137 static const char *arm_fallback_mode_string = "auto";
138 static const char *arm_force_mode_string = "auto";
139
140 /* Internal override of the execution mode. -1 means no override,
141 0 means override to ARM mode, 1 means override to Thumb mode.
142 The effect is the same as if arm_force_mode has been set by the
143 user (except the internal override has precedence over a user's
144 arm_force_mode override). */
145 static int arm_override_mode = -1;
146
147 /* Number of different reg name sets (options). */
148 static int num_disassembly_options;
149
150 /* The standard register names, and all the valid aliases for them. Note
151 that `fp', `sp' and `pc' are not added in this alias list, because they
152 have been added as builtin user registers in
153 std-regs.c:_initialize_frame_reg. */
154 static const struct
155 {
156 const char *name;
157 int regnum;
158 } arm_register_aliases[] = {
159 /* Basic register numbers. */
160 { "r0", 0 },
161 { "r1", 1 },
162 { "r2", 2 },
163 { "r3", 3 },
164 { "r4", 4 },
165 { "r5", 5 },
166 { "r6", 6 },
167 { "r7", 7 },
168 { "r8", 8 },
169 { "r9", 9 },
170 { "r10", 10 },
171 { "r11", 11 },
172 { "r12", 12 },
173 { "r13", 13 },
174 { "r14", 14 },
175 { "r15", 15 },
176 /* Synonyms (argument and variable registers). */
177 { "a1", 0 },
178 { "a2", 1 },
179 { "a3", 2 },
180 { "a4", 3 },
181 { "v1", 4 },
182 { "v2", 5 },
183 { "v3", 6 },
184 { "v4", 7 },
185 { "v5", 8 },
186 { "v6", 9 },
187 { "v7", 10 },
188 { "v8", 11 },
189 /* Other platform-specific names for r9. */
190 { "sb", 9 },
191 { "tr", 9 },
192 /* Special names. */
193 { "ip", 12 },
194 { "lr", 14 },
195 /* Names used by GCC (not listed in the ARM EABI). */
196 { "sl", 10 },
197 /* A special name from the older ATPCS. */
198 { "wr", 7 },
199 };
200
201 static const char *const arm_register_names[] =
202 {"r0", "r1", "r2", "r3", /* 0 1 2 3 */
203 "r4", "r5", "r6", "r7", /* 4 5 6 7 */
204 "r8", "r9", "r10", "r11", /* 8 9 10 11 */
205 "r12", "sp", "lr", "pc", /* 12 13 14 15 */
206 "f0", "f1", "f2", "f3", /* 16 17 18 19 */
207 "f4", "f5", "f6", "f7", /* 20 21 22 23 */
208 "fps", "cpsr" }; /* 24 25 */
209
210 /* Valid register name styles. */
211 static const char **valid_disassembly_styles;
212
213 /* Disassembly style to use. Default to "std" register names. */
214 static const char *disassembly_style;
215
216 /* This is used to keep the bfd arch_info in sync with the disassembly
217 style. */
218 static void set_disassembly_style_sfunc(char *, int,
219 struct cmd_list_element *);
220 static void set_disassembly_style (void);
221
222 static void convert_from_extended (const struct floatformat *, const void *,
223 void *, int);
224 static void convert_to_extended (const struct floatformat *, void *,
225 const void *, int);
226
227 static enum register_status arm_neon_quad_read (struct gdbarch *gdbarch,
228 struct regcache *regcache,
229 int regnum, gdb_byte *buf);
230 static void arm_neon_quad_write (struct gdbarch *gdbarch,
231 struct regcache *regcache,
232 int regnum, const gdb_byte *buf);
233
234 struct arm_prologue_cache
235 {
236 /* The stack pointer at the time this frame was created; i.e. the
237 caller's stack pointer when this function was called. It is used
238 to identify this frame. */
239 CORE_ADDR prev_sp;
240
241 /* The frame base for this frame is just prev_sp - frame size.
242 FRAMESIZE is the distance from the frame pointer to the
243 initial stack pointer. */
244
245 int framesize;
246
247 /* The register used to hold the frame pointer for this frame. */
248 int framereg;
249
250 /* Saved register offsets. */
251 struct trad_frame_saved_reg *saved_regs;
252 };
253
254 static CORE_ADDR arm_analyze_prologue (struct gdbarch *gdbarch,
255 CORE_ADDR prologue_start,
256 CORE_ADDR prologue_end,
257 struct arm_prologue_cache *cache);
258
259 /* Architecture version for displaced stepping. This effects the behaviour of
260 certain instructions, and really should not be hard-wired. */
261
262 #define DISPLACED_STEPPING_ARCH_VERSION 5
263
264 /* Addresses for calling Thumb functions have the bit 0 set.
265 Here are some macros to test, set, or clear bit 0 of addresses. */
266 #define IS_THUMB_ADDR(addr) ((addr) & 1)
267 #define MAKE_THUMB_ADDR(addr) ((addr) | 1)
268 #define UNMAKE_THUMB_ADDR(addr) ((addr) & ~1)
269
270 /* Set to true if the 32-bit mode is in use. */
271
272 int arm_apcs_32 = 1;
273
274 /* Return the bit mask in ARM_PS_REGNUM that indicates Thumb mode. */
275
276 int
277 arm_psr_thumb_bit (struct gdbarch *gdbarch)
278 {
279 if (gdbarch_tdep (gdbarch)->is_m)
280 return XPSR_T;
281 else
282 return CPSR_T;
283 }
284
285 /* Determine if FRAME is executing in Thumb mode. */
286
287 int
288 arm_frame_is_thumb (struct frame_info *frame)
289 {
290 CORE_ADDR cpsr;
291 ULONGEST t_bit = arm_psr_thumb_bit (get_frame_arch (frame));
292
293 /* Every ARM frame unwinder can unwind the T bit of the CPSR, either
294 directly (from a signal frame or dummy frame) or by interpreting
295 the saved LR (from a prologue or DWARF frame). So consult it and
296 trust the unwinders. */
297 cpsr = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
298
299 return (cpsr & t_bit) != 0;
300 }
301
302 /* Callback for VEC_lower_bound. */
303
304 static inline int
305 arm_compare_mapping_symbols (const struct arm_mapping_symbol *lhs,
306 const struct arm_mapping_symbol *rhs)
307 {
308 return lhs->value < rhs->value;
309 }
310
311 /* Search for the mapping symbol covering MEMADDR. If one is found,
312 return its type. Otherwise, return 0. If START is non-NULL,
313 set *START to the location of the mapping symbol. */
314
315 static char
316 arm_find_mapping_symbol (CORE_ADDR memaddr, CORE_ADDR *start)
317 {
318 struct obj_section *sec;
319
320 /* If there are mapping symbols, consult them. */
321 sec = find_pc_section (memaddr);
322 if (sec != NULL)
323 {
324 struct arm_per_objfile *data;
325 VEC(arm_mapping_symbol_s) *map;
326 struct arm_mapping_symbol map_key = { memaddr - obj_section_addr (sec),
327 0 };
328 unsigned int idx;
329
330 data = objfile_data (sec->objfile, arm_objfile_data_key);
331 if (data != NULL)
332 {
333 map = data->section_maps[sec->the_bfd_section->index];
334 if (!VEC_empty (arm_mapping_symbol_s, map))
335 {
336 struct arm_mapping_symbol *map_sym;
337
338 idx = VEC_lower_bound (arm_mapping_symbol_s, map, &map_key,
339 arm_compare_mapping_symbols);
340
341 /* VEC_lower_bound finds the earliest ordered insertion
342 point. If the following symbol starts at this exact
343 address, we use that; otherwise, the preceding
344 mapping symbol covers this address. */
345 if (idx < VEC_length (arm_mapping_symbol_s, map))
346 {
347 map_sym = VEC_index (arm_mapping_symbol_s, map, idx);
348 if (map_sym->value == map_key.value)
349 {
350 if (start)
351 *start = map_sym->value + obj_section_addr (sec);
352 return map_sym->type;
353 }
354 }
355
356 if (idx > 0)
357 {
358 map_sym = VEC_index (arm_mapping_symbol_s, map, idx - 1);
359 if (start)
360 *start = map_sym->value + obj_section_addr (sec);
361 return map_sym->type;
362 }
363 }
364 }
365 }
366
367 return 0;
368 }
369
370 /* Determine if the program counter specified in MEMADDR is in a Thumb
371 function. This function should be called for addresses unrelated to
372 any executing frame; otherwise, prefer arm_frame_is_thumb. */
373
374 int
375 arm_pc_is_thumb (struct gdbarch *gdbarch, CORE_ADDR memaddr)
376 {
377 struct obj_section *sec;
378 struct minimal_symbol *sym;
379 char type;
380 struct displaced_step_closure* dsc
381 = get_displaced_step_closure_by_addr(memaddr);
382
383 /* If checking the mode of displaced instruction in copy area, the mode
384 should be determined by instruction on the original address. */
385 if (dsc)
386 {
387 if (debug_displaced)
388 fprintf_unfiltered (gdb_stdlog,
389 "displaced: check mode of %.8lx instead of %.8lx\n",
390 (unsigned long) dsc->insn_addr,
391 (unsigned long) memaddr);
392 memaddr = dsc->insn_addr;
393 }
394
395 /* If bit 0 of the address is set, assume this is a Thumb address. */
396 if (IS_THUMB_ADDR (memaddr))
397 return 1;
398
399 /* Respect internal mode override if active. */
400 if (arm_override_mode != -1)
401 return arm_override_mode;
402
403 /* If the user wants to override the symbol table, let him. */
404 if (strcmp (arm_force_mode_string, "arm") == 0)
405 return 0;
406 if (strcmp (arm_force_mode_string, "thumb") == 0)
407 return 1;
408
409 /* ARM v6-M and v7-M are always in Thumb mode. */
410 if (gdbarch_tdep (gdbarch)->is_m)
411 return 1;
412
413 /* If there are mapping symbols, consult them. */
414 type = arm_find_mapping_symbol (memaddr, NULL);
415 if (type)
416 return type == 't';
417
418 /* Thumb functions have a "special" bit set in minimal symbols. */
419 sym = lookup_minimal_symbol_by_pc (memaddr);
420 if (sym)
421 return (MSYMBOL_IS_SPECIAL (sym));
422
423 /* If the user wants to override the fallback mode, let them. */
424 if (strcmp (arm_fallback_mode_string, "arm") == 0)
425 return 0;
426 if (strcmp (arm_fallback_mode_string, "thumb") == 0)
427 return 1;
428
429 /* If we couldn't find any symbol, but we're talking to a running
430 target, then trust the current value of $cpsr. This lets
431 "display/i $pc" always show the correct mode (though if there is
432 a symbol table we will not reach here, so it still may not be
433 displayed in the mode it will be executed). */
434 if (target_has_registers)
435 return arm_frame_is_thumb (get_current_frame ());
436
437 /* Otherwise we're out of luck; we assume ARM. */
438 return 0;
439 }
440
441 /* Remove useless bits from addresses in a running program. */
442 static CORE_ADDR
443 arm_addr_bits_remove (struct gdbarch *gdbarch, CORE_ADDR val)
444 {
445 if (arm_apcs_32)
446 return UNMAKE_THUMB_ADDR (val);
447 else
448 return (val & 0x03fffffc);
449 }
450
451 /* When reading symbols, we need to zap the low bit of the address,
452 which may be set to 1 for Thumb functions. */
453 static CORE_ADDR
454 arm_smash_text_address (struct gdbarch *gdbarch, CORE_ADDR val)
455 {
456 return val & ~1;
457 }
458
459 /* Return 1 if PC is the start of a compiler helper function which
460 can be safely ignored during prologue skipping. IS_THUMB is true
461 if the function is known to be a Thumb function due to the way it
462 is being called. */
463 static int
464 skip_prologue_function (struct gdbarch *gdbarch, CORE_ADDR pc, int is_thumb)
465 {
466 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
467 struct minimal_symbol *msym;
468
469 msym = lookup_minimal_symbol_by_pc (pc);
470 if (msym != NULL
471 && SYMBOL_VALUE_ADDRESS (msym) == pc
472 && SYMBOL_LINKAGE_NAME (msym) != NULL)
473 {
474 const char *name = SYMBOL_LINKAGE_NAME (msym);
475
476 /* The GNU linker's Thumb call stub to foo is named
477 __foo_from_thumb. */
478 if (strstr (name, "_from_thumb") != NULL)
479 name += 2;
480
481 /* On soft-float targets, __truncdfsf2 is called to convert promoted
482 arguments to their argument types in non-prototyped
483 functions. */
484 if (strncmp (name, "__truncdfsf2", strlen ("__truncdfsf2")) == 0)
485 return 1;
486 if (strncmp (name, "__aeabi_d2f", strlen ("__aeabi_d2f")) == 0)
487 return 1;
488
489 /* Internal functions related to thread-local storage. */
490 if (strncmp (name, "__tls_get_addr", strlen ("__tls_get_addr")) == 0)
491 return 1;
492 if (strncmp (name, "__aeabi_read_tp", strlen ("__aeabi_read_tp")) == 0)
493 return 1;
494 }
495 else
496 {
497 /* If we run against a stripped glibc, we may be unable to identify
498 special functions by name. Check for one important case,
499 __aeabi_read_tp, by comparing the *code* against the default
500 implementation (this is hand-written ARM assembler in glibc). */
501
502 if (!is_thumb
503 && read_memory_unsigned_integer (pc, 4, byte_order_for_code)
504 == 0xe3e00a0f /* mov r0, #0xffff0fff */
505 && read_memory_unsigned_integer (pc + 4, 4, byte_order_for_code)
506 == 0xe240f01f) /* sub pc, r0, #31 */
507 return 1;
508 }
509
510 return 0;
511 }
512
513 /* Support routines for instruction parsing. */
514 #define submask(x) ((1L << ((x) + 1)) - 1)
515 #define bit(obj,st) (((obj) >> (st)) & 1)
516 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
517 #define sbits(obj,st,fn) \
518 ((long) (bits(obj,st,fn) | ((long) bit(obj,fn) * ~ submask (fn - st))))
519 #define BranchDest(addr,instr) \
520 ((CORE_ADDR) (((long) (addr)) + 8 + (sbits (instr, 0, 23) << 2)))
521
522 /* Extract the immediate from instruction movw/movt of encoding T. INSN1 is
523 the first 16-bit of instruction, and INSN2 is the second 16-bit of
524 instruction. */
525 #define EXTRACT_MOVW_MOVT_IMM_T(insn1, insn2) \
526 ((bits ((insn1), 0, 3) << 12) \
527 | (bits ((insn1), 10, 10) << 11) \
528 | (bits ((insn2), 12, 14) << 8) \
529 | bits ((insn2), 0, 7))
530
531 /* Extract the immediate from instruction movw/movt of encoding A. INSN is
532 the 32-bit instruction. */
533 #define EXTRACT_MOVW_MOVT_IMM_A(insn) \
534 ((bits ((insn), 16, 19) << 12) \
535 | bits ((insn), 0, 11))
536
537 /* Decode immediate value; implements ThumbExpandImmediate pseudo-op. */
538
539 static unsigned int
540 thumb_expand_immediate (unsigned int imm)
541 {
542 unsigned int count = imm >> 7;
543
544 if (count < 8)
545 switch (count / 2)
546 {
547 case 0:
548 return imm & 0xff;
549 case 1:
550 return (imm & 0xff) | ((imm & 0xff) << 16);
551 case 2:
552 return ((imm & 0xff) << 8) | ((imm & 0xff) << 24);
553 case 3:
554 return (imm & 0xff) | ((imm & 0xff) << 8)
555 | ((imm & 0xff) << 16) | ((imm & 0xff) << 24);
556 }
557
558 return (0x80 | (imm & 0x7f)) << (32 - count);
559 }
560
561 /* Return 1 if the 16-bit Thumb instruction INST might change
562 control flow, 0 otherwise. */
563
564 static int
565 thumb_instruction_changes_pc (unsigned short inst)
566 {
567 if ((inst & 0xff00) == 0xbd00) /* pop {rlist, pc} */
568 return 1;
569
570 if ((inst & 0xf000) == 0xd000) /* conditional branch */
571 return 1;
572
573 if ((inst & 0xf800) == 0xe000) /* unconditional branch */
574 return 1;
575
576 if ((inst & 0xff00) == 0x4700) /* bx REG, blx REG */
577 return 1;
578
579 if ((inst & 0xff87) == 0x4687) /* mov pc, REG */
580 return 1;
581
582 if ((inst & 0xf500) == 0xb100) /* CBNZ or CBZ. */
583 return 1;
584
585 return 0;
586 }
587
588 /* Return 1 if the 32-bit Thumb instruction in INST1 and INST2
589 might change control flow, 0 otherwise. */
590
591 static int
592 thumb2_instruction_changes_pc (unsigned short inst1, unsigned short inst2)
593 {
594 if ((inst1 & 0xf800) == 0xf000 && (inst2 & 0x8000) == 0x8000)
595 {
596 /* Branches and miscellaneous control instructions. */
597
598 if ((inst2 & 0x1000) != 0 || (inst2 & 0xd001) == 0xc000)
599 {
600 /* B, BL, BLX. */
601 return 1;
602 }
603 else if (inst1 == 0xf3de && (inst2 & 0xff00) == 0x3f00)
604 {
605 /* SUBS PC, LR, #imm8. */
606 return 1;
607 }
608 else if ((inst2 & 0xd000) == 0x8000 && (inst1 & 0x0380) != 0x0380)
609 {
610 /* Conditional branch. */
611 return 1;
612 }
613
614 return 0;
615 }
616
617 if ((inst1 & 0xfe50) == 0xe810)
618 {
619 /* Load multiple or RFE. */
620
621 if (bit (inst1, 7) && !bit (inst1, 8))
622 {
623 /* LDMIA or POP */
624 if (bit (inst2, 15))
625 return 1;
626 }
627 else if (!bit (inst1, 7) && bit (inst1, 8))
628 {
629 /* LDMDB */
630 if (bit (inst2, 15))
631 return 1;
632 }
633 else if (bit (inst1, 7) && bit (inst1, 8))
634 {
635 /* RFEIA */
636 return 1;
637 }
638 else if (!bit (inst1, 7) && !bit (inst1, 8))
639 {
640 /* RFEDB */
641 return 1;
642 }
643
644 return 0;
645 }
646
647 if ((inst1 & 0xffef) == 0xea4f && (inst2 & 0xfff0) == 0x0f00)
648 {
649 /* MOV PC or MOVS PC. */
650 return 1;
651 }
652
653 if ((inst1 & 0xff70) == 0xf850 && (inst2 & 0xf000) == 0xf000)
654 {
655 /* LDR PC. */
656 if (bits (inst1, 0, 3) == 15)
657 return 1;
658 if (bit (inst1, 7))
659 return 1;
660 if (bit (inst2, 11))
661 return 1;
662 if ((inst2 & 0x0fc0) == 0x0000)
663 return 1;
664
665 return 0;
666 }
667
668 if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf000)
669 {
670 /* TBB. */
671 return 1;
672 }
673
674 if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf010)
675 {
676 /* TBH. */
677 return 1;
678 }
679
680 return 0;
681 }
682
683 /* Analyze a Thumb prologue, looking for a recognizable stack frame
684 and frame pointer. Scan until we encounter a store that could
685 clobber the stack frame unexpectedly, or an unknown instruction.
686 Return the last address which is definitely safe to skip for an
687 initial breakpoint. */
688
689 static CORE_ADDR
690 thumb_analyze_prologue (struct gdbarch *gdbarch,
691 CORE_ADDR start, CORE_ADDR limit,
692 struct arm_prologue_cache *cache)
693 {
694 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
695 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
696 int i;
697 pv_t regs[16];
698 struct pv_area *stack;
699 struct cleanup *back_to;
700 CORE_ADDR offset;
701 CORE_ADDR unrecognized_pc = 0;
702
703 for (i = 0; i < 16; i++)
704 regs[i] = pv_register (i, 0);
705 stack = make_pv_area (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
706 back_to = make_cleanup_free_pv_area (stack);
707
708 while (start < limit)
709 {
710 unsigned short insn;
711
712 insn = read_memory_unsigned_integer (start, 2, byte_order_for_code);
713
714 if ((insn & 0xfe00) == 0xb400) /* push { rlist } */
715 {
716 int regno;
717 int mask;
718
719 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
720 break;
721
722 /* Bits 0-7 contain a mask for registers R0-R7. Bit 8 says
723 whether to save LR (R14). */
724 mask = (insn & 0xff) | ((insn & 0x100) << 6);
725
726 /* Calculate offsets of saved R0-R7 and LR. */
727 for (regno = ARM_LR_REGNUM; regno >= 0; regno--)
728 if (mask & (1 << regno))
729 {
730 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
731 -4);
732 pv_area_store (stack, regs[ARM_SP_REGNUM], 4, regs[regno]);
733 }
734 }
735 else if ((insn & 0xff00) == 0xb000) /* add sp, #simm OR
736 sub sp, #simm */
737 {
738 offset = (insn & 0x7f) << 2; /* get scaled offset */
739 if (insn & 0x80) /* Check for SUB. */
740 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
741 -offset);
742 else
743 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
744 offset);
745 }
746 else if ((insn & 0xf800) == 0xa800) /* add Rd, sp, #imm */
747 regs[bits (insn, 8, 10)] = pv_add_constant (regs[ARM_SP_REGNUM],
748 (insn & 0xff) << 2);
749 else if ((insn & 0xfe00) == 0x1c00 /* add Rd, Rn, #imm */
750 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM))
751 regs[bits (insn, 0, 2)] = pv_add_constant (regs[bits (insn, 3, 5)],
752 bits (insn, 6, 8));
753 else if ((insn & 0xf800) == 0x3000 /* add Rd, #imm */
754 && pv_is_register (regs[bits (insn, 8, 10)], ARM_SP_REGNUM))
755 regs[bits (insn, 8, 10)] = pv_add_constant (regs[bits (insn, 8, 10)],
756 bits (insn, 0, 7));
757 else if ((insn & 0xfe00) == 0x1800 /* add Rd, Rn, Rm */
758 && pv_is_register (regs[bits (insn, 6, 8)], ARM_SP_REGNUM)
759 && pv_is_constant (regs[bits (insn, 3, 5)]))
760 regs[bits (insn, 0, 2)] = pv_add (regs[bits (insn, 3, 5)],
761 regs[bits (insn, 6, 8)]);
762 else if ((insn & 0xff00) == 0x4400 /* add Rd, Rm */
763 && pv_is_constant (regs[bits (insn, 3, 6)]))
764 {
765 int rd = (bit (insn, 7) << 3) + bits (insn, 0, 2);
766 int rm = bits (insn, 3, 6);
767 regs[rd] = pv_add (regs[rd], regs[rm]);
768 }
769 else if ((insn & 0xff00) == 0x4600) /* mov hi, lo or mov lo, hi */
770 {
771 int dst_reg = (insn & 0x7) + ((insn & 0x80) >> 4);
772 int src_reg = (insn & 0x78) >> 3;
773 regs[dst_reg] = regs[src_reg];
774 }
775 else if ((insn & 0xf800) == 0x9000) /* str rd, [sp, #off] */
776 {
777 /* Handle stores to the stack. Normally pushes are used,
778 but with GCC -mtpcs-frame, there may be other stores
779 in the prologue to create the frame. */
780 int regno = (insn >> 8) & 0x7;
781 pv_t addr;
782
783 offset = (insn & 0xff) << 2;
784 addr = pv_add_constant (regs[ARM_SP_REGNUM], offset);
785
786 if (pv_area_store_would_trash (stack, addr))
787 break;
788
789 pv_area_store (stack, addr, 4, regs[regno]);
790 }
791 else if ((insn & 0xf800) == 0x6000) /* str rd, [rn, #off] */
792 {
793 int rd = bits (insn, 0, 2);
794 int rn = bits (insn, 3, 5);
795 pv_t addr;
796
797 offset = bits (insn, 6, 10) << 2;
798 addr = pv_add_constant (regs[rn], offset);
799
800 if (pv_area_store_would_trash (stack, addr))
801 break;
802
803 pv_area_store (stack, addr, 4, regs[rd]);
804 }
805 else if (((insn & 0xf800) == 0x7000 /* strb Rd, [Rn, #off] */
806 || (insn & 0xf800) == 0x8000) /* strh Rd, [Rn, #off] */
807 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM))
808 /* Ignore stores of argument registers to the stack. */
809 ;
810 else if ((insn & 0xf800) == 0xc800 /* ldmia Rn!, { registers } */
811 && pv_is_register (regs[bits (insn, 8, 10)], ARM_SP_REGNUM))
812 /* Ignore block loads from the stack, potentially copying
813 parameters from memory. */
814 ;
815 else if ((insn & 0xf800) == 0x9800 /* ldr Rd, [Rn, #immed] */
816 || ((insn & 0xf800) == 0x6800 /* ldr Rd, [sp, #immed] */
817 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM)))
818 /* Similarly ignore single loads from the stack. */
819 ;
820 else if ((insn & 0xffc0) == 0x0000 /* lsls Rd, Rm, #0 */
821 || (insn & 0xffc0) == 0x1c00) /* add Rd, Rn, #0 */
822 /* Skip register copies, i.e. saves to another register
823 instead of the stack. */
824 ;
825 else if ((insn & 0xf800) == 0x2000) /* movs Rd, #imm */
826 /* Recognize constant loads; even with small stacks these are necessary
827 on Thumb. */
828 regs[bits (insn, 8, 10)] = pv_constant (bits (insn, 0, 7));
829 else if ((insn & 0xf800) == 0x4800) /* ldr Rd, [pc, #imm] */
830 {
831 /* Constant pool loads, for the same reason. */
832 unsigned int constant;
833 CORE_ADDR loc;
834
835 loc = start + 4 + bits (insn, 0, 7) * 4;
836 constant = read_memory_unsigned_integer (loc, 4, byte_order);
837 regs[bits (insn, 8, 10)] = pv_constant (constant);
838 }
839 else if ((insn & 0xe000) == 0xe000)
840 {
841 unsigned short inst2;
842
843 inst2 = read_memory_unsigned_integer (start + 2, 2,
844 byte_order_for_code);
845
846 if ((insn & 0xf800) == 0xf000 && (inst2 & 0xe800) == 0xe800)
847 {
848 /* BL, BLX. Allow some special function calls when
849 skipping the prologue; GCC generates these before
850 storing arguments to the stack. */
851 CORE_ADDR nextpc;
852 int j1, j2, imm1, imm2;
853
854 imm1 = sbits (insn, 0, 10);
855 imm2 = bits (inst2, 0, 10);
856 j1 = bit (inst2, 13);
857 j2 = bit (inst2, 11);
858
859 offset = ((imm1 << 12) + (imm2 << 1));
860 offset ^= ((!j2) << 22) | ((!j1) << 23);
861
862 nextpc = start + 4 + offset;
863 /* For BLX make sure to clear the low bits. */
864 if (bit (inst2, 12) == 0)
865 nextpc = nextpc & 0xfffffffc;
866
867 if (!skip_prologue_function (gdbarch, nextpc,
868 bit (inst2, 12) != 0))
869 break;
870 }
871
872 else if ((insn & 0xffd0) == 0xe900 /* stmdb Rn{!},
873 { registers } */
874 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
875 {
876 pv_t addr = regs[bits (insn, 0, 3)];
877 int regno;
878
879 if (pv_area_store_would_trash (stack, addr))
880 break;
881
882 /* Calculate offsets of saved registers. */
883 for (regno = ARM_LR_REGNUM; regno >= 0; regno--)
884 if (inst2 & (1 << regno))
885 {
886 addr = pv_add_constant (addr, -4);
887 pv_area_store (stack, addr, 4, regs[regno]);
888 }
889
890 if (insn & 0x0020)
891 regs[bits (insn, 0, 3)] = addr;
892 }
893
894 else if ((insn & 0xff50) == 0xe940 /* strd Rt, Rt2,
895 [Rn, #+/-imm]{!} */
896 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
897 {
898 int regno1 = bits (inst2, 12, 15);
899 int regno2 = bits (inst2, 8, 11);
900 pv_t addr = regs[bits (insn, 0, 3)];
901
902 offset = inst2 & 0xff;
903 if (insn & 0x0080)
904 addr = pv_add_constant (addr, offset);
905 else
906 addr = pv_add_constant (addr, -offset);
907
908 if (pv_area_store_would_trash (stack, addr))
909 break;
910
911 pv_area_store (stack, addr, 4, regs[regno1]);
912 pv_area_store (stack, pv_add_constant (addr, 4),
913 4, regs[regno2]);
914
915 if (insn & 0x0020)
916 regs[bits (insn, 0, 3)] = addr;
917 }
918
919 else if ((insn & 0xfff0) == 0xf8c0 /* str Rt,[Rn,+/-#imm]{!} */
920 && (inst2 & 0x0c00) == 0x0c00
921 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
922 {
923 int regno = bits (inst2, 12, 15);
924 pv_t addr = regs[bits (insn, 0, 3)];
925
926 offset = inst2 & 0xff;
927 if (inst2 & 0x0200)
928 addr = pv_add_constant (addr, offset);
929 else
930 addr = pv_add_constant (addr, -offset);
931
932 if (pv_area_store_would_trash (stack, addr))
933 break;
934
935 pv_area_store (stack, addr, 4, regs[regno]);
936
937 if (inst2 & 0x0100)
938 regs[bits (insn, 0, 3)] = addr;
939 }
940
941 else if ((insn & 0xfff0) == 0xf8c0 /* str.w Rt,[Rn,#imm] */
942 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
943 {
944 int regno = bits (inst2, 12, 15);
945 pv_t addr;
946
947 offset = inst2 & 0xfff;
948 addr = pv_add_constant (regs[bits (insn, 0, 3)], offset);
949
950 if (pv_area_store_would_trash (stack, addr))
951 break;
952
953 pv_area_store (stack, addr, 4, regs[regno]);
954 }
955
956 else if ((insn & 0xffd0) == 0xf880 /* str{bh}.w Rt,[Rn,#imm] */
957 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
958 /* Ignore stores of argument registers to the stack. */
959 ;
960
961 else if ((insn & 0xffd0) == 0xf800 /* str{bh} Rt,[Rn,#+/-imm] */
962 && (inst2 & 0x0d00) == 0x0c00
963 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
964 /* Ignore stores of argument registers to the stack. */
965 ;
966
967 else if ((insn & 0xffd0) == 0xe890 /* ldmia Rn[!],
968 { registers } */
969 && (inst2 & 0x8000) == 0x0000
970 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
971 /* Ignore block loads from the stack, potentially copying
972 parameters from memory. */
973 ;
974
975 else if ((insn & 0xffb0) == 0xe950 /* ldrd Rt, Rt2,
976 [Rn, #+/-imm] */
977 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
978 /* Similarly ignore dual loads from the stack. */
979 ;
980
981 else if ((insn & 0xfff0) == 0xf850 /* ldr Rt,[Rn,#+/-imm] */
982 && (inst2 & 0x0d00) == 0x0c00
983 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
984 /* Similarly ignore single loads from the stack. */
985 ;
986
987 else if ((insn & 0xfff0) == 0xf8d0 /* ldr.w Rt,[Rn,#imm] */
988 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
989 /* Similarly ignore single loads from the stack. */
990 ;
991
992 else if ((insn & 0xfbf0) == 0xf100 /* add.w Rd, Rn, #imm */
993 && (inst2 & 0x8000) == 0x0000)
994 {
995 unsigned int imm = ((bits (insn, 10, 10) << 11)
996 | (bits (inst2, 12, 14) << 8)
997 | bits (inst2, 0, 7));
998
999 regs[bits (inst2, 8, 11)]
1000 = pv_add_constant (regs[bits (insn, 0, 3)],
1001 thumb_expand_immediate (imm));
1002 }
1003
1004 else if ((insn & 0xfbf0) == 0xf200 /* addw Rd, Rn, #imm */
1005 && (inst2 & 0x8000) == 0x0000)
1006 {
1007 unsigned int imm = ((bits (insn, 10, 10) << 11)
1008 | (bits (inst2, 12, 14) << 8)
1009 | bits (inst2, 0, 7));
1010
1011 regs[bits (inst2, 8, 11)]
1012 = pv_add_constant (regs[bits (insn, 0, 3)], imm);
1013 }
1014
1015 else if ((insn & 0xfbf0) == 0xf1a0 /* sub.w Rd, Rn, #imm */
1016 && (inst2 & 0x8000) == 0x0000)
1017 {
1018 unsigned int imm = ((bits (insn, 10, 10) << 11)
1019 | (bits (inst2, 12, 14) << 8)
1020 | bits (inst2, 0, 7));
1021
1022 regs[bits (inst2, 8, 11)]
1023 = pv_add_constant (regs[bits (insn, 0, 3)],
1024 - (CORE_ADDR) thumb_expand_immediate (imm));
1025 }
1026
1027 else if ((insn & 0xfbf0) == 0xf2a0 /* subw Rd, Rn, #imm */
1028 && (inst2 & 0x8000) == 0x0000)
1029 {
1030 unsigned int imm = ((bits (insn, 10, 10) << 11)
1031 | (bits (inst2, 12, 14) << 8)
1032 | bits (inst2, 0, 7));
1033
1034 regs[bits (inst2, 8, 11)]
1035 = pv_add_constant (regs[bits (insn, 0, 3)], - (CORE_ADDR) imm);
1036 }
1037
1038 else if ((insn & 0xfbff) == 0xf04f) /* mov.w Rd, #const */
1039 {
1040 unsigned int imm = ((bits (insn, 10, 10) << 11)
1041 | (bits (inst2, 12, 14) << 8)
1042 | bits (inst2, 0, 7));
1043
1044 regs[bits (inst2, 8, 11)]
1045 = pv_constant (thumb_expand_immediate (imm));
1046 }
1047
1048 else if ((insn & 0xfbf0) == 0xf240) /* movw Rd, #const */
1049 {
1050 unsigned int imm
1051 = EXTRACT_MOVW_MOVT_IMM_T (insn, inst2);
1052
1053 regs[bits (inst2, 8, 11)] = pv_constant (imm);
1054 }
1055
1056 else if (insn == 0xea5f /* mov.w Rd,Rm */
1057 && (inst2 & 0xf0f0) == 0)
1058 {
1059 int dst_reg = (inst2 & 0x0f00) >> 8;
1060 int src_reg = inst2 & 0xf;
1061 regs[dst_reg] = regs[src_reg];
1062 }
1063
1064 else if ((insn & 0xff7f) == 0xf85f) /* ldr.w Rt,<label> */
1065 {
1066 /* Constant pool loads. */
1067 unsigned int constant;
1068 CORE_ADDR loc;
1069
1070 offset = bits (insn, 0, 11);
1071 if (insn & 0x0080)
1072 loc = start + 4 + offset;
1073 else
1074 loc = start + 4 - offset;
1075
1076 constant = read_memory_unsigned_integer (loc, 4, byte_order);
1077 regs[bits (inst2, 12, 15)] = pv_constant (constant);
1078 }
1079
1080 else if ((insn & 0xff7f) == 0xe95f) /* ldrd Rt,Rt2,<label> */
1081 {
1082 /* Constant pool loads. */
1083 unsigned int constant;
1084 CORE_ADDR loc;
1085
1086 offset = bits (insn, 0, 7) << 2;
1087 if (insn & 0x0080)
1088 loc = start + 4 + offset;
1089 else
1090 loc = start + 4 - offset;
1091
1092 constant = read_memory_unsigned_integer (loc, 4, byte_order);
1093 regs[bits (inst2, 12, 15)] = pv_constant (constant);
1094
1095 constant = read_memory_unsigned_integer (loc + 4, 4, byte_order);
1096 regs[bits (inst2, 8, 11)] = pv_constant (constant);
1097 }
1098
1099 else if (thumb2_instruction_changes_pc (insn, inst2))
1100 {
1101 /* Don't scan past anything that might change control flow. */
1102 break;
1103 }
1104 else
1105 {
1106 /* The optimizer might shove anything into the prologue,
1107 so we just skip what we don't recognize. */
1108 unrecognized_pc = start;
1109 }
1110
1111 start += 2;
1112 }
1113 else if (thumb_instruction_changes_pc (insn))
1114 {
1115 /* Don't scan past anything that might change control flow. */
1116 break;
1117 }
1118 else
1119 {
1120 /* The optimizer might shove anything into the prologue,
1121 so we just skip what we don't recognize. */
1122 unrecognized_pc = start;
1123 }
1124
1125 start += 2;
1126 }
1127
1128 if (arm_debug)
1129 fprintf_unfiltered (gdb_stdlog, "Prologue scan stopped at %s\n",
1130 paddress (gdbarch, start));
1131
1132 if (unrecognized_pc == 0)
1133 unrecognized_pc = start;
1134
1135 if (cache == NULL)
1136 {
1137 do_cleanups (back_to);
1138 return unrecognized_pc;
1139 }
1140
1141 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
1142 {
1143 /* Frame pointer is fp. Frame size is constant. */
1144 cache->framereg = ARM_FP_REGNUM;
1145 cache->framesize = -regs[ARM_FP_REGNUM].k;
1146 }
1147 else if (pv_is_register (regs[THUMB_FP_REGNUM], ARM_SP_REGNUM))
1148 {
1149 /* Frame pointer is r7. Frame size is constant. */
1150 cache->framereg = THUMB_FP_REGNUM;
1151 cache->framesize = -regs[THUMB_FP_REGNUM].k;
1152 }
1153 else if (pv_is_register (regs[ARM_SP_REGNUM], ARM_SP_REGNUM))
1154 {
1155 /* Try the stack pointer... this is a bit desperate. */
1156 cache->framereg = ARM_SP_REGNUM;
1157 cache->framesize = -regs[ARM_SP_REGNUM].k;
1158 }
1159 else
1160 {
1161 /* We're just out of luck. We don't know where the frame is. */
1162 cache->framereg = -1;
1163 cache->framesize = 0;
1164 }
1165
1166 for (i = 0; i < 16; i++)
1167 if (pv_area_find_reg (stack, gdbarch, i, &offset))
1168 cache->saved_regs[i].addr = offset;
1169
1170 do_cleanups (back_to);
1171 return unrecognized_pc;
1172 }
1173
1174
1175 /* Try to analyze the instructions starting from PC, which load symbol
1176 __stack_chk_guard. Return the address of instruction after loading this
1177 symbol, set the dest register number to *BASEREG, and set the size of
1178 instructions for loading symbol in OFFSET. Return 0 if instructions are
1179 not recognized. */
1180
1181 static CORE_ADDR
1182 arm_analyze_load_stack_chk_guard(CORE_ADDR pc, struct gdbarch *gdbarch,
1183 unsigned int *destreg, int *offset)
1184 {
1185 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1186 int is_thumb = arm_pc_is_thumb (gdbarch, pc);
1187 unsigned int low, high, address;
1188
1189 address = 0;
1190 if (is_thumb)
1191 {
1192 unsigned short insn1
1193 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
1194
1195 if ((insn1 & 0xf800) == 0x4800) /* ldr Rd, #immed */
1196 {
1197 *destreg = bits (insn1, 8, 10);
1198 *offset = 2;
1199 address = bits (insn1, 0, 7);
1200 }
1201 else if ((insn1 & 0xfbf0) == 0xf240) /* movw Rd, #const */
1202 {
1203 unsigned short insn2
1204 = read_memory_unsigned_integer (pc + 2, 2, byte_order_for_code);
1205
1206 low = EXTRACT_MOVW_MOVT_IMM_T (insn1, insn2);
1207
1208 insn1
1209 = read_memory_unsigned_integer (pc + 4, 2, byte_order_for_code);
1210 insn2
1211 = read_memory_unsigned_integer (pc + 6, 2, byte_order_for_code);
1212
1213 /* movt Rd, #const */
1214 if ((insn1 & 0xfbc0) == 0xf2c0)
1215 {
1216 high = EXTRACT_MOVW_MOVT_IMM_T (insn1, insn2);
1217 *destreg = bits (insn2, 8, 11);
1218 *offset = 8;
1219 address = (high << 16 | low);
1220 }
1221 }
1222 }
1223 else
1224 {
1225 unsigned int insn
1226 = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
1227
1228 if ((insn & 0x0e5f0000) == 0x041f0000) /* ldr Rd, #immed */
1229 {
1230 address = bits (insn, 0, 11);
1231 *destreg = bits (insn, 12, 15);
1232 *offset = 4;
1233 }
1234 else if ((insn & 0x0ff00000) == 0x03000000) /* movw Rd, #const */
1235 {
1236 low = EXTRACT_MOVW_MOVT_IMM_A (insn);
1237
1238 insn
1239 = read_memory_unsigned_integer (pc + 4, 4, byte_order_for_code);
1240
1241 if ((insn & 0x0ff00000) == 0x03400000) /* movt Rd, #const */
1242 {
1243 high = EXTRACT_MOVW_MOVT_IMM_A (insn);
1244 *destreg = bits (insn, 12, 15);
1245 *offset = 8;
1246 address = (high << 16 | low);
1247 }
1248 }
1249 }
1250
1251 return address;
1252 }
1253
1254 /* Try to skip a sequence of instructions used for stack protector. If PC
1255 points to the first instruction of this sequence, return the address of
1256 first instruction after this sequence, otherwise, return original PC.
1257
1258 On arm, this sequence of instructions is composed of mainly three steps,
1259 Step 1: load symbol __stack_chk_guard,
1260 Step 2: load from address of __stack_chk_guard,
1261 Step 3: store it to somewhere else.
1262
1263 Usually, instructions on step 2 and step 3 are the same on various ARM
1264 architectures. On step 2, it is one instruction 'ldr Rx, [Rn, #0]', and
1265 on step 3, it is also one instruction 'str Rx, [r7, #immd]'. However,
1266 instructions in step 1 vary from different ARM architectures. On ARMv7,
1267 they are,
1268
1269 movw Rn, #:lower16:__stack_chk_guard
1270 movt Rn, #:upper16:__stack_chk_guard
1271
1272 On ARMv5t, it is,
1273
1274 ldr Rn, .Label
1275 ....
1276 .Lable:
1277 .word __stack_chk_guard
1278
1279 Since ldr/str is a very popular instruction, we can't use them as
1280 'fingerprint' or 'signature' of stack protector sequence. Here we choose
1281 sequence {movw/movt, ldr}/ldr/str plus symbol __stack_chk_guard, if not
1282 stripped, as the 'fingerprint' of a stack protector cdoe sequence. */
1283
1284 static CORE_ADDR
1285 arm_skip_stack_protector(CORE_ADDR pc, struct gdbarch *gdbarch)
1286 {
1287 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1288 unsigned int address, basereg;
1289 struct minimal_symbol *stack_chk_guard;
1290 int offset;
1291 int is_thumb = arm_pc_is_thumb (gdbarch, pc);
1292 CORE_ADDR addr;
1293
1294 /* Try to parse the instructions in Step 1. */
1295 addr = arm_analyze_load_stack_chk_guard (pc, gdbarch,
1296 &basereg, &offset);
1297 if (!addr)
1298 return pc;
1299
1300 stack_chk_guard = lookup_minimal_symbol_by_pc (addr);
1301 /* If name of symbol doesn't start with '__stack_chk_guard', this
1302 instruction sequence is not for stack protector. If symbol is
1303 removed, we conservatively think this sequence is for stack protector. */
1304 if (stack_chk_guard
1305 && strncmp (SYMBOL_LINKAGE_NAME (stack_chk_guard), "__stack_chk_guard",
1306 strlen ("__stack_chk_guard")) != 0)
1307 return pc;
1308
1309 if (is_thumb)
1310 {
1311 unsigned int destreg;
1312 unsigned short insn
1313 = read_memory_unsigned_integer (pc + offset, 2, byte_order_for_code);
1314
1315 /* Step 2: ldr Rd, [Rn, #immed], encoding T1. */
1316 if ((insn & 0xf800) != 0x6800)
1317 return pc;
1318 if (bits (insn, 3, 5) != basereg)
1319 return pc;
1320 destreg = bits (insn, 0, 2);
1321
1322 insn = read_memory_unsigned_integer (pc + offset + 2, 2,
1323 byte_order_for_code);
1324 /* Step 3: str Rd, [Rn, #immed], encoding T1. */
1325 if ((insn & 0xf800) != 0x6000)
1326 return pc;
1327 if (destreg != bits (insn, 0, 2))
1328 return pc;
1329 }
1330 else
1331 {
1332 unsigned int destreg;
1333 unsigned int insn
1334 = read_memory_unsigned_integer (pc + offset, 4, byte_order_for_code);
1335
1336 /* Step 2: ldr Rd, [Rn, #immed], encoding A1. */
1337 if ((insn & 0x0e500000) != 0x04100000)
1338 return pc;
1339 if (bits (insn, 16, 19) != basereg)
1340 return pc;
1341 destreg = bits (insn, 12, 15);
1342 /* Step 3: str Rd, [Rn, #immed], encoding A1. */
1343 insn = read_memory_unsigned_integer (pc + offset + 4,
1344 4, byte_order_for_code);
1345 if ((insn & 0x0e500000) != 0x04000000)
1346 return pc;
1347 if (bits (insn, 12, 15) != destreg)
1348 return pc;
1349 }
1350 /* The size of total two instructions ldr/str is 4 on Thumb-2, while 8
1351 on arm. */
1352 if (is_thumb)
1353 return pc + offset + 4;
1354 else
1355 return pc + offset + 8;
1356 }
1357
1358 /* Advance the PC across any function entry prologue instructions to
1359 reach some "real" code.
1360
1361 The APCS (ARM Procedure Call Standard) defines the following
1362 prologue:
1363
1364 mov ip, sp
1365 [stmfd sp!, {a1,a2,a3,a4}]
1366 stmfd sp!, {...,fp,ip,lr,pc}
1367 [stfe f7, [sp, #-12]!]
1368 [stfe f6, [sp, #-12]!]
1369 [stfe f5, [sp, #-12]!]
1370 [stfe f4, [sp, #-12]!]
1371 sub fp, ip, #nn @@ nn == 20 or 4 depending on second insn. */
1372
1373 static CORE_ADDR
1374 arm_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
1375 {
1376 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1377 unsigned long inst;
1378 CORE_ADDR skip_pc;
1379 CORE_ADDR func_addr, limit_pc;
1380 struct symtab_and_line sal;
1381
1382 /* See if we can determine the end of the prologue via the symbol table.
1383 If so, then return either PC, or the PC after the prologue, whichever
1384 is greater. */
1385 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
1386 {
1387 CORE_ADDR post_prologue_pc
1388 = skip_prologue_using_sal (gdbarch, func_addr);
1389 struct symtab *s = find_pc_symtab (func_addr);
1390
1391 if (post_prologue_pc)
1392 post_prologue_pc
1393 = arm_skip_stack_protector (post_prologue_pc, gdbarch);
1394
1395
1396 /* GCC always emits a line note before the prologue and another
1397 one after, even if the two are at the same address or on the
1398 same line. Take advantage of this so that we do not need to
1399 know every instruction that might appear in the prologue. We
1400 will have producer information for most binaries; if it is
1401 missing (e.g. for -gstabs), assuming the GNU tools. */
1402 if (post_prologue_pc
1403 && (s == NULL
1404 || s->producer == NULL
1405 || strncmp (s->producer, "GNU ", sizeof ("GNU ") - 1) == 0))
1406 return post_prologue_pc;
1407
1408 if (post_prologue_pc != 0)
1409 {
1410 CORE_ADDR analyzed_limit;
1411
1412 /* For non-GCC compilers, make sure the entire line is an
1413 acceptable prologue; GDB will round this function's
1414 return value up to the end of the following line so we
1415 can not skip just part of a line (and we do not want to).
1416
1417 RealView does not treat the prologue specially, but does
1418 associate prologue code with the opening brace; so this
1419 lets us skip the first line if we think it is the opening
1420 brace. */
1421 if (arm_pc_is_thumb (gdbarch, func_addr))
1422 analyzed_limit = thumb_analyze_prologue (gdbarch, func_addr,
1423 post_prologue_pc, NULL);
1424 else
1425 analyzed_limit = arm_analyze_prologue (gdbarch, func_addr,
1426 post_prologue_pc, NULL);
1427
1428 if (analyzed_limit != post_prologue_pc)
1429 return func_addr;
1430
1431 return post_prologue_pc;
1432 }
1433 }
1434
1435 /* Can't determine prologue from the symbol table, need to examine
1436 instructions. */
1437
1438 /* Find an upper limit on the function prologue using the debug
1439 information. If the debug information could not be used to provide
1440 that bound, then use an arbitrary large number as the upper bound. */
1441 /* Like arm_scan_prologue, stop no later than pc + 64. */
1442 limit_pc = skip_prologue_using_sal (gdbarch, pc);
1443 if (limit_pc == 0)
1444 limit_pc = pc + 64; /* Magic. */
1445
1446
1447 /* Check if this is Thumb code. */
1448 if (arm_pc_is_thumb (gdbarch, pc))
1449 return thumb_analyze_prologue (gdbarch, pc, limit_pc, NULL);
1450
1451 for (skip_pc = pc; skip_pc < limit_pc; skip_pc += 4)
1452 {
1453 inst = read_memory_unsigned_integer (skip_pc, 4, byte_order_for_code);
1454
1455 /* "mov ip, sp" is no longer a required part of the prologue. */
1456 if (inst == 0xe1a0c00d) /* mov ip, sp */
1457 continue;
1458
1459 if ((inst & 0xfffff000) == 0xe28dc000) /* add ip, sp #n */
1460 continue;
1461
1462 if ((inst & 0xfffff000) == 0xe24dc000) /* sub ip, sp #n */
1463 continue;
1464
1465 /* Some prologues begin with "str lr, [sp, #-4]!". */
1466 if (inst == 0xe52de004) /* str lr, [sp, #-4]! */
1467 continue;
1468
1469 if ((inst & 0xfffffff0) == 0xe92d0000) /* stmfd sp!,{a1,a2,a3,a4} */
1470 continue;
1471
1472 if ((inst & 0xfffff800) == 0xe92dd800) /* stmfd sp!,{fp,ip,lr,pc} */
1473 continue;
1474
1475 /* Any insns after this point may float into the code, if it makes
1476 for better instruction scheduling, so we skip them only if we
1477 find them, but still consider the function to be frame-ful. */
1478
1479 /* We may have either one sfmfd instruction here, or several stfe
1480 insns, depending on the version of floating point code we
1481 support. */
1482 if ((inst & 0xffbf0fff) == 0xec2d0200) /* sfmfd fn, <cnt>, [sp]! */
1483 continue;
1484
1485 if ((inst & 0xffff8fff) == 0xed6d0103) /* stfe fn, [sp, #-12]! */
1486 continue;
1487
1488 if ((inst & 0xfffff000) == 0xe24cb000) /* sub fp, ip, #nn */
1489 continue;
1490
1491 if ((inst & 0xfffff000) == 0xe24dd000) /* sub sp, sp, #nn */
1492 continue;
1493
1494 if ((inst & 0xffffc000) == 0xe54b0000 /* strb r(0123),[r11,#-nn] */
1495 || (inst & 0xffffc0f0) == 0xe14b00b0 /* strh r(0123),[r11,#-nn] */
1496 || (inst & 0xffffc000) == 0xe50b0000) /* str r(0123),[r11,#-nn] */
1497 continue;
1498
1499 if ((inst & 0xffffc000) == 0xe5cd0000 /* strb r(0123),[sp,#nn] */
1500 || (inst & 0xffffc0f0) == 0xe1cd00b0 /* strh r(0123),[sp,#nn] */
1501 || (inst & 0xffffc000) == 0xe58d0000) /* str r(0123),[sp,#nn] */
1502 continue;
1503
1504 /* Un-recognized instruction; stop scanning. */
1505 break;
1506 }
1507
1508 return skip_pc; /* End of prologue. */
1509 }
1510
1511 /* *INDENT-OFF* */
1512 /* Function: thumb_scan_prologue (helper function for arm_scan_prologue)
1513 This function decodes a Thumb function prologue to determine:
1514 1) the size of the stack frame
1515 2) which registers are saved on it
1516 3) the offsets of saved regs
1517 4) the offset from the stack pointer to the frame pointer
1518
1519 A typical Thumb function prologue would create this stack frame
1520 (offsets relative to FP)
1521 old SP -> 24 stack parameters
1522 20 LR
1523 16 R7
1524 R7 -> 0 local variables (16 bytes)
1525 SP -> -12 additional stack space (12 bytes)
1526 The frame size would thus be 36 bytes, and the frame offset would be
1527 12 bytes. The frame register is R7.
1528
1529 The comments for thumb_skip_prolog() describe the algorithm we use
1530 to detect the end of the prolog. */
1531 /* *INDENT-ON* */
1532
1533 static void
1534 thumb_scan_prologue (struct gdbarch *gdbarch, CORE_ADDR prev_pc,
1535 CORE_ADDR block_addr, struct arm_prologue_cache *cache)
1536 {
1537 CORE_ADDR prologue_start;
1538 CORE_ADDR prologue_end;
1539 CORE_ADDR current_pc;
1540
1541 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
1542 &prologue_end))
1543 {
1544 /* See comment in arm_scan_prologue for an explanation of
1545 this heuristics. */
1546 if (prologue_end > prologue_start + 64)
1547 {
1548 prologue_end = prologue_start + 64;
1549 }
1550 }
1551 else
1552 /* We're in the boondocks: we have no idea where the start of the
1553 function is. */
1554 return;
1555
1556 prologue_end = min (prologue_end, prev_pc);
1557
1558 thumb_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
1559 }
1560
1561 /* Return 1 if THIS_INSTR might change control flow, 0 otherwise. */
1562
1563 static int
1564 arm_instruction_changes_pc (uint32_t this_instr)
1565 {
1566 if (bits (this_instr, 28, 31) == INST_NV)
1567 /* Unconditional instructions. */
1568 switch (bits (this_instr, 24, 27))
1569 {
1570 case 0xa:
1571 case 0xb:
1572 /* Branch with Link and change to Thumb. */
1573 return 1;
1574 case 0xc:
1575 case 0xd:
1576 case 0xe:
1577 /* Coprocessor register transfer. */
1578 if (bits (this_instr, 12, 15) == 15)
1579 error (_("Invalid update to pc in instruction"));
1580 return 0;
1581 default:
1582 return 0;
1583 }
1584 else
1585 switch (bits (this_instr, 25, 27))
1586 {
1587 case 0x0:
1588 if (bits (this_instr, 23, 24) == 2 && bit (this_instr, 20) == 0)
1589 {
1590 /* Multiplies and extra load/stores. */
1591 if (bit (this_instr, 4) == 1 && bit (this_instr, 7) == 1)
1592 /* Neither multiplies nor extension load/stores are allowed
1593 to modify PC. */
1594 return 0;
1595
1596 /* Otherwise, miscellaneous instructions. */
1597
1598 /* BX <reg>, BXJ <reg>, BLX <reg> */
1599 if (bits (this_instr, 4, 27) == 0x12fff1
1600 || bits (this_instr, 4, 27) == 0x12fff2
1601 || bits (this_instr, 4, 27) == 0x12fff3)
1602 return 1;
1603
1604 /* Other miscellaneous instructions are unpredictable if they
1605 modify PC. */
1606 return 0;
1607 }
1608 /* Data processing instruction. Fall through. */
1609
1610 case 0x1:
1611 if (bits (this_instr, 12, 15) == 15)
1612 return 1;
1613 else
1614 return 0;
1615
1616 case 0x2:
1617 case 0x3:
1618 /* Media instructions and architecturally undefined instructions. */
1619 if (bits (this_instr, 25, 27) == 3 && bit (this_instr, 4) == 1)
1620 return 0;
1621
1622 /* Stores. */
1623 if (bit (this_instr, 20) == 0)
1624 return 0;
1625
1626 /* Loads. */
1627 if (bits (this_instr, 12, 15) == ARM_PC_REGNUM)
1628 return 1;
1629 else
1630 return 0;
1631
1632 case 0x4:
1633 /* Load/store multiple. */
1634 if (bit (this_instr, 20) == 1 && bit (this_instr, 15) == 1)
1635 return 1;
1636 else
1637 return 0;
1638
1639 case 0x5:
1640 /* Branch and branch with link. */
1641 return 1;
1642
1643 case 0x6:
1644 case 0x7:
1645 /* Coprocessor transfers or SWIs can not affect PC. */
1646 return 0;
1647
1648 default:
1649 internal_error (__FILE__, __LINE__, _("bad value in switch"));
1650 }
1651 }
1652
1653 /* Analyze an ARM mode prologue starting at PROLOGUE_START and
1654 continuing no further than PROLOGUE_END. If CACHE is non-NULL,
1655 fill it in. Return the first address not recognized as a prologue
1656 instruction.
1657
1658 We recognize all the instructions typically found in ARM prologues,
1659 plus harmless instructions which can be skipped (either for analysis
1660 purposes, or a more restrictive set that can be skipped when finding
1661 the end of the prologue). */
1662
1663 static CORE_ADDR
1664 arm_analyze_prologue (struct gdbarch *gdbarch,
1665 CORE_ADDR prologue_start, CORE_ADDR prologue_end,
1666 struct arm_prologue_cache *cache)
1667 {
1668 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1669 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1670 int regno;
1671 CORE_ADDR offset, current_pc;
1672 pv_t regs[ARM_FPS_REGNUM];
1673 struct pv_area *stack;
1674 struct cleanup *back_to;
1675 int framereg, framesize;
1676 CORE_ADDR unrecognized_pc = 0;
1677
1678 /* Search the prologue looking for instructions that set up the
1679 frame pointer, adjust the stack pointer, and save registers.
1680
1681 Be careful, however, and if it doesn't look like a prologue,
1682 don't try to scan it. If, for instance, a frameless function
1683 begins with stmfd sp!, then we will tell ourselves there is
1684 a frame, which will confuse stack traceback, as well as "finish"
1685 and other operations that rely on a knowledge of the stack
1686 traceback. */
1687
1688 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
1689 regs[regno] = pv_register (regno, 0);
1690 stack = make_pv_area (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
1691 back_to = make_cleanup_free_pv_area (stack);
1692
1693 for (current_pc = prologue_start;
1694 current_pc < prologue_end;
1695 current_pc += 4)
1696 {
1697 unsigned int insn
1698 = read_memory_unsigned_integer (current_pc, 4, byte_order_for_code);
1699
1700 if (insn == 0xe1a0c00d) /* mov ip, sp */
1701 {
1702 regs[ARM_IP_REGNUM] = regs[ARM_SP_REGNUM];
1703 continue;
1704 }
1705 else if ((insn & 0xfff00000) == 0xe2800000 /* add Rd, Rn, #n */
1706 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1707 {
1708 unsigned imm = insn & 0xff; /* immediate value */
1709 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1710 int rd = bits (insn, 12, 15);
1711 imm = (imm >> rot) | (imm << (32 - rot));
1712 regs[rd] = pv_add_constant (regs[bits (insn, 16, 19)], imm);
1713 continue;
1714 }
1715 else if ((insn & 0xfff00000) == 0xe2400000 /* sub Rd, Rn, #n */
1716 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1717 {
1718 unsigned imm = insn & 0xff; /* immediate value */
1719 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1720 int rd = bits (insn, 12, 15);
1721 imm = (imm >> rot) | (imm << (32 - rot));
1722 regs[rd] = pv_add_constant (regs[bits (insn, 16, 19)], -imm);
1723 continue;
1724 }
1725 else if ((insn & 0xffff0fff) == 0xe52d0004) /* str Rd,
1726 [sp, #-4]! */
1727 {
1728 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1729 break;
1730 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -4);
1731 pv_area_store (stack, regs[ARM_SP_REGNUM], 4,
1732 regs[bits (insn, 12, 15)]);
1733 continue;
1734 }
1735 else if ((insn & 0xffff0000) == 0xe92d0000)
1736 /* stmfd sp!, {..., fp, ip, lr, pc}
1737 or
1738 stmfd sp!, {a1, a2, a3, a4} */
1739 {
1740 int mask = insn & 0xffff;
1741
1742 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1743 break;
1744
1745 /* Calculate offsets of saved registers. */
1746 for (regno = ARM_PC_REGNUM; regno >= 0; regno--)
1747 if (mask & (1 << regno))
1748 {
1749 regs[ARM_SP_REGNUM]
1750 = pv_add_constant (regs[ARM_SP_REGNUM], -4);
1751 pv_area_store (stack, regs[ARM_SP_REGNUM], 4, regs[regno]);
1752 }
1753 }
1754 else if ((insn & 0xffff0000) == 0xe54b0000 /* strb rx,[r11,#-n] */
1755 || (insn & 0xffff00f0) == 0xe14b00b0 /* strh rx,[r11,#-n] */
1756 || (insn & 0xffffc000) == 0xe50b0000) /* str rx,[r11,#-n] */
1757 {
1758 /* No need to add this to saved_regs -- it's just an arg reg. */
1759 continue;
1760 }
1761 else if ((insn & 0xffff0000) == 0xe5cd0000 /* strb rx,[sp,#n] */
1762 || (insn & 0xffff00f0) == 0xe1cd00b0 /* strh rx,[sp,#n] */
1763 || (insn & 0xffffc000) == 0xe58d0000) /* str rx,[sp,#n] */
1764 {
1765 /* No need to add this to saved_regs -- it's just an arg reg. */
1766 continue;
1767 }
1768 else if ((insn & 0xfff00000) == 0xe8800000 /* stm Rn,
1769 { registers } */
1770 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1771 {
1772 /* No need to add this to saved_regs -- it's just arg regs. */
1773 continue;
1774 }
1775 else if ((insn & 0xfffff000) == 0xe24cb000) /* sub fp, ip #n */
1776 {
1777 unsigned imm = insn & 0xff; /* immediate value */
1778 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1779 imm = (imm >> rot) | (imm << (32 - rot));
1780 regs[ARM_FP_REGNUM] = pv_add_constant (regs[ARM_IP_REGNUM], -imm);
1781 }
1782 else if ((insn & 0xfffff000) == 0xe24dd000) /* sub sp, sp #n */
1783 {
1784 unsigned imm = insn & 0xff; /* immediate value */
1785 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1786 imm = (imm >> rot) | (imm << (32 - rot));
1787 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -imm);
1788 }
1789 else if ((insn & 0xffff7fff) == 0xed6d0103 /* stfe f?,
1790 [sp, -#c]! */
1791 && gdbarch_tdep (gdbarch)->have_fpa_registers)
1792 {
1793 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1794 break;
1795
1796 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
1797 regno = ARM_F0_REGNUM + ((insn >> 12) & 0x07);
1798 pv_area_store (stack, regs[ARM_SP_REGNUM], 12, regs[regno]);
1799 }
1800 else if ((insn & 0xffbf0fff) == 0xec2d0200 /* sfmfd f0, 4,
1801 [sp!] */
1802 && gdbarch_tdep (gdbarch)->have_fpa_registers)
1803 {
1804 int n_saved_fp_regs;
1805 unsigned int fp_start_reg, fp_bound_reg;
1806
1807 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1808 break;
1809
1810 if ((insn & 0x800) == 0x800) /* N0 is set */
1811 {
1812 if ((insn & 0x40000) == 0x40000) /* N1 is set */
1813 n_saved_fp_regs = 3;
1814 else
1815 n_saved_fp_regs = 1;
1816 }
1817 else
1818 {
1819 if ((insn & 0x40000) == 0x40000) /* N1 is set */
1820 n_saved_fp_regs = 2;
1821 else
1822 n_saved_fp_regs = 4;
1823 }
1824
1825 fp_start_reg = ARM_F0_REGNUM + ((insn >> 12) & 0x7);
1826 fp_bound_reg = fp_start_reg + n_saved_fp_regs;
1827 for (; fp_start_reg < fp_bound_reg; fp_start_reg++)
1828 {
1829 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
1830 pv_area_store (stack, regs[ARM_SP_REGNUM], 12,
1831 regs[fp_start_reg++]);
1832 }
1833 }
1834 else if ((insn & 0xff000000) == 0xeb000000 && cache == NULL) /* bl */
1835 {
1836 /* Allow some special function calls when skipping the
1837 prologue; GCC generates these before storing arguments to
1838 the stack. */
1839 CORE_ADDR dest = BranchDest (current_pc, insn);
1840
1841 if (skip_prologue_function (gdbarch, dest, 0))
1842 continue;
1843 else
1844 break;
1845 }
1846 else if ((insn & 0xf0000000) != 0xe0000000)
1847 break; /* Condition not true, exit early. */
1848 else if (arm_instruction_changes_pc (insn))
1849 /* Don't scan past anything that might change control flow. */
1850 break;
1851 else if ((insn & 0xfe500000) == 0xe8100000 /* ldm */
1852 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1853 /* Ignore block loads from the stack, potentially copying
1854 parameters from memory. */
1855 continue;
1856 else if ((insn & 0xfc500000) == 0xe4100000
1857 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1858 /* Similarly ignore single loads from the stack. */
1859 continue;
1860 else if ((insn & 0xffff0ff0) == 0xe1a00000)
1861 /* MOV Rd, Rm. Skip register copies, i.e. saves to another
1862 register instead of the stack. */
1863 continue;
1864 else
1865 {
1866 /* The optimizer might shove anything into the prologue,
1867 so we just skip what we don't recognize. */
1868 unrecognized_pc = current_pc;
1869 continue;
1870 }
1871 }
1872
1873 if (unrecognized_pc == 0)
1874 unrecognized_pc = current_pc;
1875
1876 /* The frame size is just the distance from the frame register
1877 to the original stack pointer. */
1878 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
1879 {
1880 /* Frame pointer is fp. */
1881 framereg = ARM_FP_REGNUM;
1882 framesize = -regs[ARM_FP_REGNUM].k;
1883 }
1884 else if (pv_is_register (regs[ARM_SP_REGNUM], ARM_SP_REGNUM))
1885 {
1886 /* Try the stack pointer... this is a bit desperate. */
1887 framereg = ARM_SP_REGNUM;
1888 framesize = -regs[ARM_SP_REGNUM].k;
1889 }
1890 else
1891 {
1892 /* We're just out of luck. We don't know where the frame is. */
1893 framereg = -1;
1894 framesize = 0;
1895 }
1896
1897 if (cache)
1898 {
1899 cache->framereg = framereg;
1900 cache->framesize = framesize;
1901
1902 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
1903 if (pv_area_find_reg (stack, gdbarch, regno, &offset))
1904 cache->saved_regs[regno].addr = offset;
1905 }
1906
1907 if (arm_debug)
1908 fprintf_unfiltered (gdb_stdlog, "Prologue scan stopped at %s\n",
1909 paddress (gdbarch, unrecognized_pc));
1910
1911 do_cleanups (back_to);
1912 return unrecognized_pc;
1913 }
1914
1915 static void
1916 arm_scan_prologue (struct frame_info *this_frame,
1917 struct arm_prologue_cache *cache)
1918 {
1919 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1920 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1921 int regno;
1922 CORE_ADDR prologue_start, prologue_end, current_pc;
1923 CORE_ADDR prev_pc = get_frame_pc (this_frame);
1924 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
1925 pv_t regs[ARM_FPS_REGNUM];
1926 struct pv_area *stack;
1927 struct cleanup *back_to;
1928 CORE_ADDR offset;
1929
1930 /* Assume there is no frame until proven otherwise. */
1931 cache->framereg = ARM_SP_REGNUM;
1932 cache->framesize = 0;
1933
1934 /* Check for Thumb prologue. */
1935 if (arm_frame_is_thumb (this_frame))
1936 {
1937 thumb_scan_prologue (gdbarch, prev_pc, block_addr, cache);
1938 return;
1939 }
1940
1941 /* Find the function prologue. If we can't find the function in
1942 the symbol table, peek in the stack frame to find the PC. */
1943 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
1944 &prologue_end))
1945 {
1946 /* One way to find the end of the prologue (which works well
1947 for unoptimized code) is to do the following:
1948
1949 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
1950
1951 if (sal.line == 0)
1952 prologue_end = prev_pc;
1953 else if (sal.end < prologue_end)
1954 prologue_end = sal.end;
1955
1956 This mechanism is very accurate so long as the optimizer
1957 doesn't move any instructions from the function body into the
1958 prologue. If this happens, sal.end will be the last
1959 instruction in the first hunk of prologue code just before
1960 the first instruction that the scheduler has moved from
1961 the body to the prologue.
1962
1963 In order to make sure that we scan all of the prologue
1964 instructions, we use a slightly less accurate mechanism which
1965 may scan more than necessary. To help compensate for this
1966 lack of accuracy, the prologue scanning loop below contains
1967 several clauses which'll cause the loop to terminate early if
1968 an implausible prologue instruction is encountered.
1969
1970 The expression
1971
1972 prologue_start + 64
1973
1974 is a suitable endpoint since it accounts for the largest
1975 possible prologue plus up to five instructions inserted by
1976 the scheduler. */
1977
1978 if (prologue_end > prologue_start + 64)
1979 {
1980 prologue_end = prologue_start + 64; /* See above. */
1981 }
1982 }
1983 else
1984 {
1985 /* We have no symbol information. Our only option is to assume this
1986 function has a standard stack frame and the normal frame register.
1987 Then, we can find the value of our frame pointer on entrance to
1988 the callee (or at the present moment if this is the innermost frame).
1989 The value stored there should be the address of the stmfd + 8. */
1990 CORE_ADDR frame_loc;
1991 LONGEST return_value;
1992
1993 frame_loc = get_frame_register_unsigned (this_frame, ARM_FP_REGNUM);
1994 if (!safe_read_memory_integer (frame_loc, 4, byte_order, &return_value))
1995 return;
1996 else
1997 {
1998 prologue_start = gdbarch_addr_bits_remove
1999 (gdbarch, return_value) - 8;
2000 prologue_end = prologue_start + 64; /* See above. */
2001 }
2002 }
2003
2004 if (prev_pc < prologue_end)
2005 prologue_end = prev_pc;
2006
2007 arm_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
2008 }
2009
2010 static struct arm_prologue_cache *
2011 arm_make_prologue_cache (struct frame_info *this_frame)
2012 {
2013 int reg;
2014 struct arm_prologue_cache *cache;
2015 CORE_ADDR unwound_fp;
2016
2017 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2018 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2019
2020 arm_scan_prologue (this_frame, cache);
2021
2022 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
2023 if (unwound_fp == 0)
2024 return cache;
2025
2026 cache->prev_sp = unwound_fp + cache->framesize;
2027
2028 /* Calculate actual addresses of saved registers using offsets
2029 determined by arm_scan_prologue. */
2030 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
2031 if (trad_frame_addr_p (cache->saved_regs, reg))
2032 cache->saved_regs[reg].addr += cache->prev_sp;
2033
2034 return cache;
2035 }
2036
2037 /* Our frame ID for a normal frame is the current function's starting PC
2038 and the caller's SP when we were called. */
2039
2040 static void
2041 arm_prologue_this_id (struct frame_info *this_frame,
2042 void **this_cache,
2043 struct frame_id *this_id)
2044 {
2045 struct arm_prologue_cache *cache;
2046 struct frame_id id;
2047 CORE_ADDR pc, func;
2048
2049 if (*this_cache == NULL)
2050 *this_cache = arm_make_prologue_cache (this_frame);
2051 cache = *this_cache;
2052
2053 /* This is meant to halt the backtrace at "_start". */
2054 pc = get_frame_pc (this_frame);
2055 if (pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
2056 return;
2057
2058 /* If we've hit a wall, stop. */
2059 if (cache->prev_sp == 0)
2060 return;
2061
2062 /* Use function start address as part of the frame ID. If we cannot
2063 identify the start address (due to missing symbol information),
2064 fall back to just using the current PC. */
2065 func = get_frame_func (this_frame);
2066 if (!func)
2067 func = pc;
2068
2069 id = frame_id_build (cache->prev_sp, func);
2070 *this_id = id;
2071 }
2072
2073 static struct value *
2074 arm_prologue_prev_register (struct frame_info *this_frame,
2075 void **this_cache,
2076 int prev_regnum)
2077 {
2078 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2079 struct arm_prologue_cache *cache;
2080
2081 if (*this_cache == NULL)
2082 *this_cache = arm_make_prologue_cache (this_frame);
2083 cache = *this_cache;
2084
2085 /* If we are asked to unwind the PC, then we need to return the LR
2086 instead. The prologue may save PC, but it will point into this
2087 frame's prologue, not the next frame's resume location. Also
2088 strip the saved T bit. A valid LR may have the low bit set, but
2089 a valid PC never does. */
2090 if (prev_regnum == ARM_PC_REGNUM)
2091 {
2092 CORE_ADDR lr;
2093
2094 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
2095 return frame_unwind_got_constant (this_frame, prev_regnum,
2096 arm_addr_bits_remove (gdbarch, lr));
2097 }
2098
2099 /* SP is generally not saved to the stack, but this frame is
2100 identified by the next frame's stack pointer at the time of the call.
2101 The value was already reconstructed into PREV_SP. */
2102 if (prev_regnum == ARM_SP_REGNUM)
2103 return frame_unwind_got_constant (this_frame, prev_regnum, cache->prev_sp);
2104
2105 /* The CPSR may have been changed by the call instruction and by the
2106 called function. The only bit we can reconstruct is the T bit,
2107 by checking the low bit of LR as of the call. This is a reliable
2108 indicator of Thumb-ness except for some ARM v4T pre-interworking
2109 Thumb code, which could get away with a clear low bit as long as
2110 the called function did not use bx. Guess that all other
2111 bits are unchanged; the condition flags are presumably lost,
2112 but the processor status is likely valid. */
2113 if (prev_regnum == ARM_PS_REGNUM)
2114 {
2115 CORE_ADDR lr, cpsr;
2116 ULONGEST t_bit = arm_psr_thumb_bit (gdbarch);
2117
2118 cpsr = get_frame_register_unsigned (this_frame, prev_regnum);
2119 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
2120 if (IS_THUMB_ADDR (lr))
2121 cpsr |= t_bit;
2122 else
2123 cpsr &= ~t_bit;
2124 return frame_unwind_got_constant (this_frame, prev_regnum, cpsr);
2125 }
2126
2127 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
2128 prev_regnum);
2129 }
2130
2131 struct frame_unwind arm_prologue_unwind = {
2132 NORMAL_FRAME,
2133 default_frame_unwind_stop_reason,
2134 arm_prologue_this_id,
2135 arm_prologue_prev_register,
2136 NULL,
2137 default_frame_sniffer
2138 };
2139
2140 /* Maintain a list of ARM exception table entries per objfile, similar to the
2141 list of mapping symbols. We only cache entries for standard ARM-defined
2142 personality routines; the cache will contain only the frame unwinding
2143 instructions associated with the entry (not the descriptors). */
2144
2145 static const struct objfile_data *arm_exidx_data_key;
2146
2147 struct arm_exidx_entry
2148 {
2149 bfd_vma addr;
2150 gdb_byte *entry;
2151 };
2152 typedef struct arm_exidx_entry arm_exidx_entry_s;
2153 DEF_VEC_O(arm_exidx_entry_s);
2154
2155 struct arm_exidx_data
2156 {
2157 VEC(arm_exidx_entry_s) **section_maps;
2158 };
2159
2160 static void
2161 arm_exidx_data_free (struct objfile *objfile, void *arg)
2162 {
2163 struct arm_exidx_data *data = arg;
2164 unsigned int i;
2165
2166 for (i = 0; i < objfile->obfd->section_count; i++)
2167 VEC_free (arm_exidx_entry_s, data->section_maps[i]);
2168 }
2169
2170 static inline int
2171 arm_compare_exidx_entries (const struct arm_exidx_entry *lhs,
2172 const struct arm_exidx_entry *rhs)
2173 {
2174 return lhs->addr < rhs->addr;
2175 }
2176
2177 static struct obj_section *
2178 arm_obj_section_from_vma (struct objfile *objfile, bfd_vma vma)
2179 {
2180 struct obj_section *osect;
2181
2182 ALL_OBJFILE_OSECTIONS (objfile, osect)
2183 if (bfd_get_section_flags (objfile->obfd,
2184 osect->the_bfd_section) & SEC_ALLOC)
2185 {
2186 bfd_vma start, size;
2187 start = bfd_get_section_vma (objfile->obfd, osect->the_bfd_section);
2188 size = bfd_get_section_size (osect->the_bfd_section);
2189
2190 if (start <= vma && vma < start + size)
2191 return osect;
2192 }
2193
2194 return NULL;
2195 }
2196
2197 /* Parse contents of exception table and exception index sections
2198 of OBJFILE, and fill in the exception table entry cache.
2199
2200 For each entry that refers to a standard ARM-defined personality
2201 routine, extract the frame unwinding instructions (from either
2202 the index or the table section). The unwinding instructions
2203 are normalized by:
2204 - extracting them from the rest of the table data
2205 - converting to host endianness
2206 - appending the implicit 0xb0 ("Finish") code
2207
2208 The extracted and normalized instructions are stored for later
2209 retrieval by the arm_find_exidx_entry routine. */
2210
2211 static void
2212 arm_exidx_new_objfile (struct objfile *objfile)
2213 {
2214 struct cleanup *cleanups;
2215 struct arm_exidx_data *data;
2216 asection *exidx, *extab;
2217 bfd_vma exidx_vma = 0, extab_vma = 0;
2218 bfd_size_type exidx_size = 0, extab_size = 0;
2219 gdb_byte *exidx_data = NULL, *extab_data = NULL;
2220 LONGEST i;
2221
2222 /* If we've already touched this file, do nothing. */
2223 if (!objfile || objfile_data (objfile, arm_exidx_data_key) != NULL)
2224 return;
2225 cleanups = make_cleanup (null_cleanup, NULL);
2226
2227 /* Read contents of exception table and index. */
2228 exidx = bfd_get_section_by_name (objfile->obfd, ".ARM.exidx");
2229 if (exidx)
2230 {
2231 exidx_vma = bfd_section_vma (objfile->obfd, exidx);
2232 exidx_size = bfd_get_section_size (exidx);
2233 exidx_data = xmalloc (exidx_size);
2234 make_cleanup (xfree, exidx_data);
2235
2236 if (!bfd_get_section_contents (objfile->obfd, exidx,
2237 exidx_data, 0, exidx_size))
2238 {
2239 do_cleanups (cleanups);
2240 return;
2241 }
2242 }
2243
2244 extab = bfd_get_section_by_name (objfile->obfd, ".ARM.extab");
2245 if (extab)
2246 {
2247 extab_vma = bfd_section_vma (objfile->obfd, extab);
2248 extab_size = bfd_get_section_size (extab);
2249 extab_data = xmalloc (extab_size);
2250 make_cleanup (xfree, extab_data);
2251
2252 if (!bfd_get_section_contents (objfile->obfd, extab,
2253 extab_data, 0, extab_size))
2254 {
2255 do_cleanups (cleanups);
2256 return;
2257 }
2258 }
2259
2260 /* Allocate exception table data structure. */
2261 data = OBSTACK_ZALLOC (&objfile->objfile_obstack, struct arm_exidx_data);
2262 set_objfile_data (objfile, arm_exidx_data_key, data);
2263 data->section_maps = OBSTACK_CALLOC (&objfile->objfile_obstack,
2264 objfile->obfd->section_count,
2265 VEC(arm_exidx_entry_s) *);
2266
2267 /* Fill in exception table. */
2268 for (i = 0; i < exidx_size / 8; i++)
2269 {
2270 struct arm_exidx_entry new_exidx_entry;
2271 bfd_vma idx = bfd_h_get_32 (objfile->obfd, exidx_data + i * 8);
2272 bfd_vma val = bfd_h_get_32 (objfile->obfd, exidx_data + i * 8 + 4);
2273 bfd_vma addr = 0, word = 0;
2274 int n_bytes = 0, n_words = 0;
2275 struct obj_section *sec;
2276 gdb_byte *entry = NULL;
2277
2278 /* Extract address of start of function. */
2279 idx = ((idx & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2280 idx += exidx_vma + i * 8;
2281
2282 /* Find section containing function and compute section offset. */
2283 sec = arm_obj_section_from_vma (objfile, idx);
2284 if (sec == NULL)
2285 continue;
2286 idx -= bfd_get_section_vma (objfile->obfd, sec->the_bfd_section);
2287
2288 /* Determine address of exception table entry. */
2289 if (val == 1)
2290 {
2291 /* EXIDX_CANTUNWIND -- no exception table entry present. */
2292 }
2293 else if ((val & 0xff000000) == 0x80000000)
2294 {
2295 /* Exception table entry embedded in .ARM.exidx
2296 -- must be short form. */
2297 word = val;
2298 n_bytes = 3;
2299 }
2300 else if (!(val & 0x80000000))
2301 {
2302 /* Exception table entry in .ARM.extab. */
2303 addr = ((val & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2304 addr += exidx_vma + i * 8 + 4;
2305
2306 if (addr >= extab_vma && addr + 4 <= extab_vma + extab_size)
2307 {
2308 word = bfd_h_get_32 (objfile->obfd,
2309 extab_data + addr - extab_vma);
2310 addr += 4;
2311
2312 if ((word & 0xff000000) == 0x80000000)
2313 {
2314 /* Short form. */
2315 n_bytes = 3;
2316 }
2317 else if ((word & 0xff000000) == 0x81000000
2318 || (word & 0xff000000) == 0x82000000)
2319 {
2320 /* Long form. */
2321 n_bytes = 2;
2322 n_words = ((word >> 16) & 0xff);
2323 }
2324 else if (!(word & 0x80000000))
2325 {
2326 bfd_vma pers;
2327 struct obj_section *pers_sec;
2328 int gnu_personality = 0;
2329
2330 /* Custom personality routine. */
2331 pers = ((word & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2332 pers = UNMAKE_THUMB_ADDR (pers + addr - 4);
2333
2334 /* Check whether we've got one of the variants of the
2335 GNU personality routines. */
2336 pers_sec = arm_obj_section_from_vma (objfile, pers);
2337 if (pers_sec)
2338 {
2339 static const char *personality[] =
2340 {
2341 "__gcc_personality_v0",
2342 "__gxx_personality_v0",
2343 "__gcj_personality_v0",
2344 "__gnu_objc_personality_v0",
2345 NULL
2346 };
2347
2348 CORE_ADDR pc = pers + obj_section_offset (pers_sec);
2349 int k;
2350
2351 for (k = 0; personality[k]; k++)
2352 if (lookup_minimal_symbol_by_pc_name
2353 (pc, personality[k], objfile))
2354 {
2355 gnu_personality = 1;
2356 break;
2357 }
2358 }
2359
2360 /* If so, the next word contains a word count in the high
2361 byte, followed by the same unwind instructions as the
2362 pre-defined forms. */
2363 if (gnu_personality
2364 && addr + 4 <= extab_vma + extab_size)
2365 {
2366 word = bfd_h_get_32 (objfile->obfd,
2367 extab_data + addr - extab_vma);
2368 addr += 4;
2369 n_bytes = 3;
2370 n_words = ((word >> 24) & 0xff);
2371 }
2372 }
2373 }
2374 }
2375
2376 /* Sanity check address. */
2377 if (n_words)
2378 if (addr < extab_vma || addr + 4 * n_words > extab_vma + extab_size)
2379 n_words = n_bytes = 0;
2380
2381 /* The unwind instructions reside in WORD (only the N_BYTES least
2382 significant bytes are valid), followed by N_WORDS words in the
2383 extab section starting at ADDR. */
2384 if (n_bytes || n_words)
2385 {
2386 gdb_byte *p = entry = obstack_alloc (&objfile->objfile_obstack,
2387 n_bytes + n_words * 4 + 1);
2388
2389 while (n_bytes--)
2390 *p++ = (gdb_byte) ((word >> (8 * n_bytes)) & 0xff);
2391
2392 while (n_words--)
2393 {
2394 word = bfd_h_get_32 (objfile->obfd,
2395 extab_data + addr - extab_vma);
2396 addr += 4;
2397
2398 *p++ = (gdb_byte) ((word >> 24) & 0xff);
2399 *p++ = (gdb_byte) ((word >> 16) & 0xff);
2400 *p++ = (gdb_byte) ((word >> 8) & 0xff);
2401 *p++ = (gdb_byte) (word & 0xff);
2402 }
2403
2404 /* Implied "Finish" to terminate the list. */
2405 *p++ = 0xb0;
2406 }
2407
2408 /* Push entry onto vector. They are guaranteed to always
2409 appear in order of increasing addresses. */
2410 new_exidx_entry.addr = idx;
2411 new_exidx_entry.entry = entry;
2412 VEC_safe_push (arm_exidx_entry_s,
2413 data->section_maps[sec->the_bfd_section->index],
2414 &new_exidx_entry);
2415 }
2416
2417 do_cleanups (cleanups);
2418 }
2419
2420 /* Search for the exception table entry covering MEMADDR. If one is found,
2421 return a pointer to its data. Otherwise, return 0. If START is non-NULL,
2422 set *START to the start of the region covered by this entry. */
2423
2424 static gdb_byte *
2425 arm_find_exidx_entry (CORE_ADDR memaddr, CORE_ADDR *start)
2426 {
2427 struct obj_section *sec;
2428
2429 sec = find_pc_section (memaddr);
2430 if (sec != NULL)
2431 {
2432 struct arm_exidx_data *data;
2433 VEC(arm_exidx_entry_s) *map;
2434 struct arm_exidx_entry map_key = { memaddr - obj_section_addr (sec), 0 };
2435 unsigned int idx;
2436
2437 data = objfile_data (sec->objfile, arm_exidx_data_key);
2438 if (data != NULL)
2439 {
2440 map = data->section_maps[sec->the_bfd_section->index];
2441 if (!VEC_empty (arm_exidx_entry_s, map))
2442 {
2443 struct arm_exidx_entry *map_sym;
2444
2445 idx = VEC_lower_bound (arm_exidx_entry_s, map, &map_key,
2446 arm_compare_exidx_entries);
2447
2448 /* VEC_lower_bound finds the earliest ordered insertion
2449 point. If the following symbol starts at this exact
2450 address, we use that; otherwise, the preceding
2451 exception table entry covers this address. */
2452 if (idx < VEC_length (arm_exidx_entry_s, map))
2453 {
2454 map_sym = VEC_index (arm_exidx_entry_s, map, idx);
2455 if (map_sym->addr == map_key.addr)
2456 {
2457 if (start)
2458 *start = map_sym->addr + obj_section_addr (sec);
2459 return map_sym->entry;
2460 }
2461 }
2462
2463 if (idx > 0)
2464 {
2465 map_sym = VEC_index (arm_exidx_entry_s, map, idx - 1);
2466 if (start)
2467 *start = map_sym->addr + obj_section_addr (sec);
2468 return map_sym->entry;
2469 }
2470 }
2471 }
2472 }
2473
2474 return NULL;
2475 }
2476
2477 /* Given the current frame THIS_FRAME, and its associated frame unwinding
2478 instruction list from the ARM exception table entry ENTRY, allocate and
2479 return a prologue cache structure describing how to unwind this frame.
2480
2481 Return NULL if the unwinding instruction list contains a "spare",
2482 "reserved" or "refuse to unwind" instruction as defined in section
2483 "9.3 Frame unwinding instructions" of the "Exception Handling ABI
2484 for the ARM Architecture" document. */
2485
2486 static struct arm_prologue_cache *
2487 arm_exidx_fill_cache (struct frame_info *this_frame, gdb_byte *entry)
2488 {
2489 CORE_ADDR vsp = 0;
2490 int vsp_valid = 0;
2491
2492 struct arm_prologue_cache *cache;
2493 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2494 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2495
2496 for (;;)
2497 {
2498 gdb_byte insn;
2499
2500 /* Whenever we reload SP, we actually have to retrieve its
2501 actual value in the current frame. */
2502 if (!vsp_valid)
2503 {
2504 if (trad_frame_realreg_p (cache->saved_regs, ARM_SP_REGNUM))
2505 {
2506 int reg = cache->saved_regs[ARM_SP_REGNUM].realreg;
2507 vsp = get_frame_register_unsigned (this_frame, reg);
2508 }
2509 else
2510 {
2511 CORE_ADDR addr = cache->saved_regs[ARM_SP_REGNUM].addr;
2512 vsp = get_frame_memory_unsigned (this_frame, addr, 4);
2513 }
2514
2515 vsp_valid = 1;
2516 }
2517
2518 /* Decode next unwind instruction. */
2519 insn = *entry++;
2520
2521 if ((insn & 0xc0) == 0)
2522 {
2523 int offset = insn & 0x3f;
2524 vsp += (offset << 2) + 4;
2525 }
2526 else if ((insn & 0xc0) == 0x40)
2527 {
2528 int offset = insn & 0x3f;
2529 vsp -= (offset << 2) + 4;
2530 }
2531 else if ((insn & 0xf0) == 0x80)
2532 {
2533 int mask = ((insn & 0xf) << 8) | *entry++;
2534 int i;
2535
2536 /* The special case of an all-zero mask identifies
2537 "Refuse to unwind". We return NULL to fall back
2538 to the prologue analyzer. */
2539 if (mask == 0)
2540 return NULL;
2541
2542 /* Pop registers r4..r15 under mask. */
2543 for (i = 0; i < 12; i++)
2544 if (mask & (1 << i))
2545 {
2546 cache->saved_regs[4 + i].addr = vsp;
2547 vsp += 4;
2548 }
2549
2550 /* Special-case popping SP -- we need to reload vsp. */
2551 if (mask & (1 << (ARM_SP_REGNUM - 4)))
2552 vsp_valid = 0;
2553 }
2554 else if ((insn & 0xf0) == 0x90)
2555 {
2556 int reg = insn & 0xf;
2557
2558 /* Reserved cases. */
2559 if (reg == ARM_SP_REGNUM || reg == ARM_PC_REGNUM)
2560 return NULL;
2561
2562 /* Set SP from another register and mark VSP for reload. */
2563 cache->saved_regs[ARM_SP_REGNUM] = cache->saved_regs[reg];
2564 vsp_valid = 0;
2565 }
2566 else if ((insn & 0xf0) == 0xa0)
2567 {
2568 int count = insn & 0x7;
2569 int pop_lr = (insn & 0x8) != 0;
2570 int i;
2571
2572 /* Pop r4..r[4+count]. */
2573 for (i = 0; i <= count; i++)
2574 {
2575 cache->saved_regs[4 + i].addr = vsp;
2576 vsp += 4;
2577 }
2578
2579 /* If indicated by flag, pop LR as well. */
2580 if (pop_lr)
2581 {
2582 cache->saved_regs[ARM_LR_REGNUM].addr = vsp;
2583 vsp += 4;
2584 }
2585 }
2586 else if (insn == 0xb0)
2587 {
2588 /* We could only have updated PC by popping into it; if so, it
2589 will show up as address. Otherwise, copy LR into PC. */
2590 if (!trad_frame_addr_p (cache->saved_regs, ARM_PC_REGNUM))
2591 cache->saved_regs[ARM_PC_REGNUM]
2592 = cache->saved_regs[ARM_LR_REGNUM];
2593
2594 /* We're done. */
2595 break;
2596 }
2597 else if (insn == 0xb1)
2598 {
2599 int mask = *entry++;
2600 int i;
2601
2602 /* All-zero mask and mask >= 16 is "spare". */
2603 if (mask == 0 || mask >= 16)
2604 return NULL;
2605
2606 /* Pop r0..r3 under mask. */
2607 for (i = 0; i < 4; i++)
2608 if (mask & (1 << i))
2609 {
2610 cache->saved_regs[i].addr = vsp;
2611 vsp += 4;
2612 }
2613 }
2614 else if (insn == 0xb2)
2615 {
2616 ULONGEST offset = 0;
2617 unsigned shift = 0;
2618
2619 do
2620 {
2621 offset |= (*entry & 0x7f) << shift;
2622 shift += 7;
2623 }
2624 while (*entry++ & 0x80);
2625
2626 vsp += 0x204 + (offset << 2);
2627 }
2628 else if (insn == 0xb3)
2629 {
2630 int start = *entry >> 4;
2631 int count = (*entry++) & 0xf;
2632 int i;
2633
2634 /* Only registers D0..D15 are valid here. */
2635 if (start + count >= 16)
2636 return NULL;
2637
2638 /* Pop VFP double-precision registers D[start]..D[start+count]. */
2639 for (i = 0; i <= count; i++)
2640 {
2641 cache->saved_regs[ARM_D0_REGNUM + start + i].addr = vsp;
2642 vsp += 8;
2643 }
2644
2645 /* Add an extra 4 bytes for FSTMFDX-style stack. */
2646 vsp += 4;
2647 }
2648 else if ((insn & 0xf8) == 0xb8)
2649 {
2650 int count = insn & 0x7;
2651 int i;
2652
2653 /* Pop VFP double-precision registers D[8]..D[8+count]. */
2654 for (i = 0; i <= count; i++)
2655 {
2656 cache->saved_regs[ARM_D0_REGNUM + 8 + i].addr = vsp;
2657 vsp += 8;
2658 }
2659
2660 /* Add an extra 4 bytes for FSTMFDX-style stack. */
2661 vsp += 4;
2662 }
2663 else if (insn == 0xc6)
2664 {
2665 int start = *entry >> 4;
2666 int count = (*entry++) & 0xf;
2667 int i;
2668
2669 /* Only registers WR0..WR15 are valid. */
2670 if (start + count >= 16)
2671 return NULL;
2672
2673 /* Pop iwmmx registers WR[start]..WR[start+count]. */
2674 for (i = 0; i <= count; i++)
2675 {
2676 cache->saved_regs[ARM_WR0_REGNUM + start + i].addr = vsp;
2677 vsp += 8;
2678 }
2679 }
2680 else if (insn == 0xc7)
2681 {
2682 int mask = *entry++;
2683 int i;
2684
2685 /* All-zero mask and mask >= 16 is "spare". */
2686 if (mask == 0 || mask >= 16)
2687 return NULL;
2688
2689 /* Pop iwmmx general-purpose registers WCGR0..WCGR3 under mask. */
2690 for (i = 0; i < 4; i++)
2691 if (mask & (1 << i))
2692 {
2693 cache->saved_regs[ARM_WCGR0_REGNUM + i].addr = vsp;
2694 vsp += 4;
2695 }
2696 }
2697 else if ((insn & 0xf8) == 0xc0)
2698 {
2699 int count = insn & 0x7;
2700 int i;
2701
2702 /* Pop iwmmx registers WR[10]..WR[10+count]. */
2703 for (i = 0; i <= count; i++)
2704 {
2705 cache->saved_regs[ARM_WR0_REGNUM + 10 + i].addr = vsp;
2706 vsp += 8;
2707 }
2708 }
2709 else if (insn == 0xc8)
2710 {
2711 int start = *entry >> 4;
2712 int count = (*entry++) & 0xf;
2713 int i;
2714
2715 /* Only registers D0..D31 are valid. */
2716 if (start + count >= 16)
2717 return NULL;
2718
2719 /* Pop VFP double-precision registers
2720 D[16+start]..D[16+start+count]. */
2721 for (i = 0; i <= count; i++)
2722 {
2723 cache->saved_regs[ARM_D0_REGNUM + 16 + start + i].addr = vsp;
2724 vsp += 8;
2725 }
2726 }
2727 else if (insn == 0xc9)
2728 {
2729 int start = *entry >> 4;
2730 int count = (*entry++) & 0xf;
2731 int i;
2732
2733 /* Pop VFP double-precision registers D[start]..D[start+count]. */
2734 for (i = 0; i <= count; i++)
2735 {
2736 cache->saved_regs[ARM_D0_REGNUM + start + i].addr = vsp;
2737 vsp += 8;
2738 }
2739 }
2740 else if ((insn & 0xf8) == 0xd0)
2741 {
2742 int count = insn & 0x7;
2743 int i;
2744
2745 /* Pop VFP double-precision registers D[8]..D[8+count]. */
2746 for (i = 0; i <= count; i++)
2747 {
2748 cache->saved_regs[ARM_D0_REGNUM + 8 + i].addr = vsp;
2749 vsp += 8;
2750 }
2751 }
2752 else
2753 {
2754 /* Everything else is "spare". */
2755 return NULL;
2756 }
2757 }
2758
2759 /* If we restore SP from a register, assume this was the frame register.
2760 Otherwise just fall back to SP as frame register. */
2761 if (trad_frame_realreg_p (cache->saved_regs, ARM_SP_REGNUM))
2762 cache->framereg = cache->saved_regs[ARM_SP_REGNUM].realreg;
2763 else
2764 cache->framereg = ARM_SP_REGNUM;
2765
2766 /* Determine offset to previous frame. */
2767 cache->framesize
2768 = vsp - get_frame_register_unsigned (this_frame, cache->framereg);
2769
2770 /* We already got the previous SP. */
2771 cache->prev_sp = vsp;
2772
2773 return cache;
2774 }
2775
2776 /* Unwinding via ARM exception table entries. Note that the sniffer
2777 already computes a filled-in prologue cache, which is then used
2778 with the same arm_prologue_this_id and arm_prologue_prev_register
2779 routines also used for prologue-parsing based unwinding. */
2780
2781 static int
2782 arm_exidx_unwind_sniffer (const struct frame_unwind *self,
2783 struct frame_info *this_frame,
2784 void **this_prologue_cache)
2785 {
2786 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2787 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2788 CORE_ADDR addr_in_block, exidx_region, func_start;
2789 struct arm_prologue_cache *cache;
2790 gdb_byte *entry;
2791
2792 /* See if we have an ARM exception table entry covering this address. */
2793 addr_in_block = get_frame_address_in_block (this_frame);
2794 entry = arm_find_exidx_entry (addr_in_block, &exidx_region);
2795 if (!entry)
2796 return 0;
2797
2798 /* The ARM exception table does not describe unwind information
2799 for arbitrary PC values, but is guaranteed to be correct only
2800 at call sites. We have to decide here whether we want to use
2801 ARM exception table information for this frame, or fall back
2802 to using prologue parsing. (Note that if we have DWARF CFI,
2803 this sniffer isn't even called -- CFI is always preferred.)
2804
2805 Before we make this decision, however, we check whether we
2806 actually have *symbol* information for the current frame.
2807 If not, prologue parsing would not work anyway, so we might
2808 as well use the exception table and hope for the best. */
2809 if (find_pc_partial_function (addr_in_block, NULL, &func_start, NULL))
2810 {
2811 int exc_valid = 0;
2812
2813 /* If the next frame is "normal", we are at a call site in this
2814 frame, so exception information is guaranteed to be valid. */
2815 if (get_next_frame (this_frame)
2816 && get_frame_type (get_next_frame (this_frame)) == NORMAL_FRAME)
2817 exc_valid = 1;
2818
2819 /* We also assume exception information is valid if we're currently
2820 blocked in a system call. The system library is supposed to
2821 ensure this, so that e.g. pthread cancellation works. */
2822 if (arm_frame_is_thumb (this_frame))
2823 {
2824 LONGEST insn;
2825
2826 if (safe_read_memory_integer (get_frame_pc (this_frame) - 2, 2,
2827 byte_order_for_code, &insn)
2828 && (insn & 0xff00) == 0xdf00 /* svc */)
2829 exc_valid = 1;
2830 }
2831 else
2832 {
2833 LONGEST insn;
2834
2835 if (safe_read_memory_integer (get_frame_pc (this_frame) - 4, 4,
2836 byte_order_for_code, &insn)
2837 && (insn & 0x0f000000) == 0x0f000000 /* svc */)
2838 exc_valid = 1;
2839 }
2840
2841 /* Bail out if we don't know that exception information is valid. */
2842 if (!exc_valid)
2843 return 0;
2844
2845 /* The ARM exception index does not mark the *end* of the region
2846 covered by the entry, and some functions will not have any entry.
2847 To correctly recognize the end of the covered region, the linker
2848 should have inserted dummy records with a CANTUNWIND marker.
2849
2850 Unfortunately, current versions of GNU ld do not reliably do
2851 this, and thus we may have found an incorrect entry above.
2852 As a (temporary) sanity check, we only use the entry if it
2853 lies *within* the bounds of the function. Note that this check
2854 might reject perfectly valid entries that just happen to cover
2855 multiple functions; therefore this check ought to be removed
2856 once the linker is fixed. */
2857 if (func_start > exidx_region)
2858 return 0;
2859 }
2860
2861 /* Decode the list of unwinding instructions into a prologue cache.
2862 Note that this may fail due to e.g. a "refuse to unwind" code. */
2863 cache = arm_exidx_fill_cache (this_frame, entry);
2864 if (!cache)
2865 return 0;
2866
2867 *this_prologue_cache = cache;
2868 return 1;
2869 }
2870
2871 struct frame_unwind arm_exidx_unwind = {
2872 NORMAL_FRAME,
2873 default_frame_unwind_stop_reason,
2874 arm_prologue_this_id,
2875 arm_prologue_prev_register,
2876 NULL,
2877 arm_exidx_unwind_sniffer
2878 };
2879
2880 static struct arm_prologue_cache *
2881 arm_make_stub_cache (struct frame_info *this_frame)
2882 {
2883 struct arm_prologue_cache *cache;
2884
2885 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2886 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2887
2888 cache->prev_sp = get_frame_register_unsigned (this_frame, ARM_SP_REGNUM);
2889
2890 return cache;
2891 }
2892
2893 /* Our frame ID for a stub frame is the current SP and LR. */
2894
2895 static void
2896 arm_stub_this_id (struct frame_info *this_frame,
2897 void **this_cache,
2898 struct frame_id *this_id)
2899 {
2900 struct arm_prologue_cache *cache;
2901
2902 if (*this_cache == NULL)
2903 *this_cache = arm_make_stub_cache (this_frame);
2904 cache = *this_cache;
2905
2906 *this_id = frame_id_build (cache->prev_sp, get_frame_pc (this_frame));
2907 }
2908
2909 static int
2910 arm_stub_unwind_sniffer (const struct frame_unwind *self,
2911 struct frame_info *this_frame,
2912 void **this_prologue_cache)
2913 {
2914 CORE_ADDR addr_in_block;
2915 char dummy[4];
2916
2917 addr_in_block = get_frame_address_in_block (this_frame);
2918 if (in_plt_section (addr_in_block, NULL)
2919 /* We also use the stub winder if the target memory is unreadable
2920 to avoid having the prologue unwinder trying to read it. */
2921 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
2922 return 1;
2923
2924 return 0;
2925 }
2926
2927 struct frame_unwind arm_stub_unwind = {
2928 NORMAL_FRAME,
2929 default_frame_unwind_stop_reason,
2930 arm_stub_this_id,
2931 arm_prologue_prev_register,
2932 NULL,
2933 arm_stub_unwind_sniffer
2934 };
2935
2936 static CORE_ADDR
2937 arm_normal_frame_base (struct frame_info *this_frame, void **this_cache)
2938 {
2939 struct arm_prologue_cache *cache;
2940
2941 if (*this_cache == NULL)
2942 *this_cache = arm_make_prologue_cache (this_frame);
2943 cache = *this_cache;
2944
2945 return cache->prev_sp - cache->framesize;
2946 }
2947
2948 struct frame_base arm_normal_base = {
2949 &arm_prologue_unwind,
2950 arm_normal_frame_base,
2951 arm_normal_frame_base,
2952 arm_normal_frame_base
2953 };
2954
2955 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
2956 dummy frame. The frame ID's base needs to match the TOS value
2957 saved by save_dummy_frame_tos() and returned from
2958 arm_push_dummy_call, and the PC needs to match the dummy frame's
2959 breakpoint. */
2960
2961 static struct frame_id
2962 arm_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
2963 {
2964 return frame_id_build (get_frame_register_unsigned (this_frame,
2965 ARM_SP_REGNUM),
2966 get_frame_pc (this_frame));
2967 }
2968
2969 /* Given THIS_FRAME, find the previous frame's resume PC (which will
2970 be used to construct the previous frame's ID, after looking up the
2971 containing function). */
2972
2973 static CORE_ADDR
2974 arm_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
2975 {
2976 CORE_ADDR pc;
2977 pc = frame_unwind_register_unsigned (this_frame, ARM_PC_REGNUM);
2978 return arm_addr_bits_remove (gdbarch, pc);
2979 }
2980
2981 static CORE_ADDR
2982 arm_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
2983 {
2984 return frame_unwind_register_unsigned (this_frame, ARM_SP_REGNUM);
2985 }
2986
2987 static struct value *
2988 arm_dwarf2_prev_register (struct frame_info *this_frame, void **this_cache,
2989 int regnum)
2990 {
2991 struct gdbarch * gdbarch = get_frame_arch (this_frame);
2992 CORE_ADDR lr, cpsr;
2993 ULONGEST t_bit = arm_psr_thumb_bit (gdbarch);
2994
2995 switch (regnum)
2996 {
2997 case ARM_PC_REGNUM:
2998 /* The PC is normally copied from the return column, which
2999 describes saves of LR. However, that version may have an
3000 extra bit set to indicate Thumb state. The bit is not
3001 part of the PC. */
3002 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
3003 return frame_unwind_got_constant (this_frame, regnum,
3004 arm_addr_bits_remove (gdbarch, lr));
3005
3006 case ARM_PS_REGNUM:
3007 /* Reconstruct the T bit; see arm_prologue_prev_register for details. */
3008 cpsr = get_frame_register_unsigned (this_frame, regnum);
3009 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
3010 if (IS_THUMB_ADDR (lr))
3011 cpsr |= t_bit;
3012 else
3013 cpsr &= ~t_bit;
3014 return frame_unwind_got_constant (this_frame, regnum, cpsr);
3015
3016 default:
3017 internal_error (__FILE__, __LINE__,
3018 _("Unexpected register %d"), regnum);
3019 }
3020 }
3021
3022 static void
3023 arm_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
3024 struct dwarf2_frame_state_reg *reg,
3025 struct frame_info *this_frame)
3026 {
3027 switch (regnum)
3028 {
3029 case ARM_PC_REGNUM:
3030 case ARM_PS_REGNUM:
3031 reg->how = DWARF2_FRAME_REG_FN;
3032 reg->loc.fn = arm_dwarf2_prev_register;
3033 break;
3034 case ARM_SP_REGNUM:
3035 reg->how = DWARF2_FRAME_REG_CFA;
3036 break;
3037 }
3038 }
3039
3040 /* Return true if we are in the function's epilogue, i.e. after the
3041 instruction that destroyed the function's stack frame. */
3042
3043 static int
3044 thumb_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
3045 {
3046 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3047 unsigned int insn, insn2;
3048 int found_return = 0, found_stack_adjust = 0;
3049 CORE_ADDR func_start, func_end;
3050 CORE_ADDR scan_pc;
3051 gdb_byte buf[4];
3052
3053 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
3054 return 0;
3055
3056 /* The epilogue is a sequence of instructions along the following lines:
3057
3058 - add stack frame size to SP or FP
3059 - [if frame pointer used] restore SP from FP
3060 - restore registers from SP [may include PC]
3061 - a return-type instruction [if PC wasn't already restored]
3062
3063 In a first pass, we scan forward from the current PC and verify the
3064 instructions we find as compatible with this sequence, ending in a
3065 return instruction.
3066
3067 However, this is not sufficient to distinguish indirect function calls
3068 within a function from indirect tail calls in the epilogue in some cases.
3069 Therefore, if we didn't already find any SP-changing instruction during
3070 forward scan, we add a backward scanning heuristic to ensure we actually
3071 are in the epilogue. */
3072
3073 scan_pc = pc;
3074 while (scan_pc < func_end && !found_return)
3075 {
3076 if (target_read_memory (scan_pc, buf, 2))
3077 break;
3078
3079 scan_pc += 2;
3080 insn = extract_unsigned_integer (buf, 2, byte_order_for_code);
3081
3082 if ((insn & 0xff80) == 0x4700) /* bx <Rm> */
3083 found_return = 1;
3084 else if (insn == 0x46f7) /* mov pc, lr */
3085 found_return = 1;
3086 else if (insn == 0x46bd) /* mov sp, r7 */
3087 found_stack_adjust = 1;
3088 else if ((insn & 0xff00) == 0xb000) /* add sp, imm or sub sp, imm */
3089 found_stack_adjust = 1;
3090 else if ((insn & 0xfe00) == 0xbc00) /* pop <registers> */
3091 {
3092 found_stack_adjust = 1;
3093 if (insn & 0x0100) /* <registers> include PC. */
3094 found_return = 1;
3095 }
3096 else if ((insn & 0xe000) == 0xe000) /* 32-bit Thumb-2 instruction */
3097 {
3098 if (target_read_memory (scan_pc, buf, 2))
3099 break;
3100
3101 scan_pc += 2;
3102 insn2 = extract_unsigned_integer (buf, 2, byte_order_for_code);
3103
3104 if (insn == 0xe8bd) /* ldm.w sp!, <registers> */
3105 {
3106 found_stack_adjust = 1;
3107 if (insn2 & 0x8000) /* <registers> include PC. */
3108 found_return = 1;
3109 }
3110 else if (insn == 0xf85d /* ldr.w <Rt>, [sp], #4 */
3111 && (insn2 & 0x0fff) == 0x0b04)
3112 {
3113 found_stack_adjust = 1;
3114 if ((insn2 & 0xf000) == 0xf000) /* <Rt> is PC. */
3115 found_return = 1;
3116 }
3117 else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, <list> */
3118 && (insn2 & 0x0e00) == 0x0a00)
3119 found_stack_adjust = 1;
3120 else
3121 break;
3122 }
3123 else
3124 break;
3125 }
3126
3127 if (!found_return)
3128 return 0;
3129
3130 /* Since any instruction in the epilogue sequence, with the possible
3131 exception of return itself, updates the stack pointer, we need to
3132 scan backwards for at most one instruction. Try either a 16-bit or
3133 a 32-bit instruction. This is just a heuristic, so we do not worry
3134 too much about false positives. */
3135
3136 if (!found_stack_adjust)
3137 {
3138 if (pc - 4 < func_start)
3139 return 0;
3140 if (target_read_memory (pc - 4, buf, 4))
3141 return 0;
3142
3143 insn = extract_unsigned_integer (buf, 2, byte_order_for_code);
3144 insn2 = extract_unsigned_integer (buf + 2, 2, byte_order_for_code);
3145
3146 if (insn2 == 0x46bd) /* mov sp, r7 */
3147 found_stack_adjust = 1;
3148 else if ((insn2 & 0xff00) == 0xb000) /* add sp, imm or sub sp, imm */
3149 found_stack_adjust = 1;
3150 else if ((insn2 & 0xff00) == 0xbc00) /* pop <registers> without PC */
3151 found_stack_adjust = 1;
3152 else if (insn == 0xe8bd) /* ldm.w sp!, <registers> */
3153 found_stack_adjust = 1;
3154 else if (insn == 0xf85d /* ldr.w <Rt>, [sp], #4 */
3155 && (insn2 & 0x0fff) == 0x0b04)
3156 found_stack_adjust = 1;
3157 else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, <list> */
3158 && (insn2 & 0x0e00) == 0x0a00)
3159 found_stack_adjust = 1;
3160 }
3161
3162 return found_stack_adjust;
3163 }
3164
3165 /* Return true if we are in the function's epilogue, i.e. after the
3166 instruction that destroyed the function's stack frame. */
3167
3168 static int
3169 arm_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
3170 {
3171 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3172 unsigned int insn;
3173 int found_return, found_stack_adjust;
3174 CORE_ADDR func_start, func_end;
3175
3176 if (arm_pc_is_thumb (gdbarch, pc))
3177 return thumb_in_function_epilogue_p (gdbarch, pc);
3178
3179 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
3180 return 0;
3181
3182 /* We are in the epilogue if the previous instruction was a stack
3183 adjustment and the next instruction is a possible return (bx, mov
3184 pc, or pop). We could have to scan backwards to find the stack
3185 adjustment, or forwards to find the return, but this is a decent
3186 approximation. First scan forwards. */
3187
3188 found_return = 0;
3189 insn = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
3190 if (bits (insn, 28, 31) != INST_NV)
3191 {
3192 if ((insn & 0x0ffffff0) == 0x012fff10)
3193 /* BX. */
3194 found_return = 1;
3195 else if ((insn & 0x0ffffff0) == 0x01a0f000)
3196 /* MOV PC. */
3197 found_return = 1;
3198 else if ((insn & 0x0fff0000) == 0x08bd0000
3199 && (insn & 0x0000c000) != 0)
3200 /* POP (LDMIA), including PC or LR. */
3201 found_return = 1;
3202 }
3203
3204 if (!found_return)
3205 return 0;
3206
3207 /* Scan backwards. This is just a heuristic, so do not worry about
3208 false positives from mode changes. */
3209
3210 if (pc < func_start + 4)
3211 return 0;
3212
3213 found_stack_adjust = 0;
3214 insn = read_memory_unsigned_integer (pc - 4, 4, byte_order_for_code);
3215 if (bits (insn, 28, 31) != INST_NV)
3216 {
3217 if ((insn & 0x0df0f000) == 0x0080d000)
3218 /* ADD SP (register or immediate). */
3219 found_stack_adjust = 1;
3220 else if ((insn & 0x0df0f000) == 0x0040d000)
3221 /* SUB SP (register or immediate). */
3222 found_stack_adjust = 1;
3223 else if ((insn & 0x0ffffff0) == 0x01a0d000)
3224 /* MOV SP. */
3225 found_stack_adjust = 1;
3226 else if ((insn & 0x0fff0000) == 0x08bd0000)
3227 /* POP (LDMIA). */
3228 found_stack_adjust = 1;
3229 }
3230
3231 if (found_stack_adjust)
3232 return 1;
3233
3234 return 0;
3235 }
3236
3237
3238 /* When arguments must be pushed onto the stack, they go on in reverse
3239 order. The code below implements a FILO (stack) to do this. */
3240
3241 struct stack_item
3242 {
3243 int len;
3244 struct stack_item *prev;
3245 void *data;
3246 };
3247
3248 static struct stack_item *
3249 push_stack_item (struct stack_item *prev, const void *contents, int len)
3250 {
3251 struct stack_item *si;
3252 si = xmalloc (sizeof (struct stack_item));
3253 si->data = xmalloc (len);
3254 si->len = len;
3255 si->prev = prev;
3256 memcpy (si->data, contents, len);
3257 return si;
3258 }
3259
3260 static struct stack_item *
3261 pop_stack_item (struct stack_item *si)
3262 {
3263 struct stack_item *dead = si;
3264 si = si->prev;
3265 xfree (dead->data);
3266 xfree (dead);
3267 return si;
3268 }
3269
3270
3271 /* Return the alignment (in bytes) of the given type. */
3272
3273 static int
3274 arm_type_align (struct type *t)
3275 {
3276 int n;
3277 int align;
3278 int falign;
3279
3280 t = check_typedef (t);
3281 switch (TYPE_CODE (t))
3282 {
3283 default:
3284 /* Should never happen. */
3285 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
3286 return 4;
3287
3288 case TYPE_CODE_PTR:
3289 case TYPE_CODE_ENUM:
3290 case TYPE_CODE_INT:
3291 case TYPE_CODE_FLT:
3292 case TYPE_CODE_SET:
3293 case TYPE_CODE_RANGE:
3294 case TYPE_CODE_BITSTRING:
3295 case TYPE_CODE_REF:
3296 case TYPE_CODE_CHAR:
3297 case TYPE_CODE_BOOL:
3298 return TYPE_LENGTH (t);
3299
3300 case TYPE_CODE_ARRAY:
3301 case TYPE_CODE_COMPLEX:
3302 /* TODO: What about vector types? */
3303 return arm_type_align (TYPE_TARGET_TYPE (t));
3304
3305 case TYPE_CODE_STRUCT:
3306 case TYPE_CODE_UNION:
3307 align = 1;
3308 for (n = 0; n < TYPE_NFIELDS (t); n++)
3309 {
3310 falign = arm_type_align (TYPE_FIELD_TYPE (t, n));
3311 if (falign > align)
3312 align = falign;
3313 }
3314 return align;
3315 }
3316 }
3317
3318 /* Possible base types for a candidate for passing and returning in
3319 VFP registers. */
3320
3321 enum arm_vfp_cprc_base_type
3322 {
3323 VFP_CPRC_UNKNOWN,
3324 VFP_CPRC_SINGLE,
3325 VFP_CPRC_DOUBLE,
3326 VFP_CPRC_VEC64,
3327 VFP_CPRC_VEC128
3328 };
3329
3330 /* The length of one element of base type B. */
3331
3332 static unsigned
3333 arm_vfp_cprc_unit_length (enum arm_vfp_cprc_base_type b)
3334 {
3335 switch (b)
3336 {
3337 case VFP_CPRC_SINGLE:
3338 return 4;
3339 case VFP_CPRC_DOUBLE:
3340 return 8;
3341 case VFP_CPRC_VEC64:
3342 return 8;
3343 case VFP_CPRC_VEC128:
3344 return 16;
3345 default:
3346 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
3347 (int) b);
3348 }
3349 }
3350
3351 /* The character ('s', 'd' or 'q') for the type of VFP register used
3352 for passing base type B. */
3353
3354 static int
3355 arm_vfp_cprc_reg_char (enum arm_vfp_cprc_base_type b)
3356 {
3357 switch (b)
3358 {
3359 case VFP_CPRC_SINGLE:
3360 return 's';
3361 case VFP_CPRC_DOUBLE:
3362 return 'd';
3363 case VFP_CPRC_VEC64:
3364 return 'd';
3365 case VFP_CPRC_VEC128:
3366 return 'q';
3367 default:
3368 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
3369 (int) b);
3370 }
3371 }
3372
3373 /* Determine whether T may be part of a candidate for passing and
3374 returning in VFP registers, ignoring the limit on the total number
3375 of components. If *BASE_TYPE is VFP_CPRC_UNKNOWN, set it to the
3376 classification of the first valid component found; if it is not
3377 VFP_CPRC_UNKNOWN, all components must have the same classification
3378 as *BASE_TYPE. If it is found that T contains a type not permitted
3379 for passing and returning in VFP registers, a type differently
3380 classified from *BASE_TYPE, or two types differently classified
3381 from each other, return -1, otherwise return the total number of
3382 base-type elements found (possibly 0 in an empty structure or
3383 array). Vectors and complex types are not currently supported,
3384 matching the generic AAPCS support. */
3385
3386 static int
3387 arm_vfp_cprc_sub_candidate (struct type *t,
3388 enum arm_vfp_cprc_base_type *base_type)
3389 {
3390 t = check_typedef (t);
3391 switch (TYPE_CODE (t))
3392 {
3393 case TYPE_CODE_FLT:
3394 switch (TYPE_LENGTH (t))
3395 {
3396 case 4:
3397 if (*base_type == VFP_CPRC_UNKNOWN)
3398 *base_type = VFP_CPRC_SINGLE;
3399 else if (*base_type != VFP_CPRC_SINGLE)
3400 return -1;
3401 return 1;
3402
3403 case 8:
3404 if (*base_type == VFP_CPRC_UNKNOWN)
3405 *base_type = VFP_CPRC_DOUBLE;
3406 else if (*base_type != VFP_CPRC_DOUBLE)
3407 return -1;
3408 return 1;
3409
3410 default:
3411 return -1;
3412 }
3413 break;
3414
3415 case TYPE_CODE_ARRAY:
3416 {
3417 int count;
3418 unsigned unitlen;
3419 count = arm_vfp_cprc_sub_candidate (TYPE_TARGET_TYPE (t), base_type);
3420 if (count == -1)
3421 return -1;
3422 if (TYPE_LENGTH (t) == 0)
3423 {
3424 gdb_assert (count == 0);
3425 return 0;
3426 }
3427 else if (count == 0)
3428 return -1;
3429 unitlen = arm_vfp_cprc_unit_length (*base_type);
3430 gdb_assert ((TYPE_LENGTH (t) % unitlen) == 0);
3431 return TYPE_LENGTH (t) / unitlen;
3432 }
3433 break;
3434
3435 case TYPE_CODE_STRUCT:
3436 {
3437 int count = 0;
3438 unsigned unitlen;
3439 int i;
3440 for (i = 0; i < TYPE_NFIELDS (t); i++)
3441 {
3442 int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
3443 base_type);
3444 if (sub_count == -1)
3445 return -1;
3446 count += sub_count;
3447 }
3448 if (TYPE_LENGTH (t) == 0)
3449 {
3450 gdb_assert (count == 0);
3451 return 0;
3452 }
3453 else if (count == 0)
3454 return -1;
3455 unitlen = arm_vfp_cprc_unit_length (*base_type);
3456 if (TYPE_LENGTH (t) != unitlen * count)
3457 return -1;
3458 return count;
3459 }
3460
3461 case TYPE_CODE_UNION:
3462 {
3463 int count = 0;
3464 unsigned unitlen;
3465 int i;
3466 for (i = 0; i < TYPE_NFIELDS (t); i++)
3467 {
3468 int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
3469 base_type);
3470 if (sub_count == -1)
3471 return -1;
3472 count = (count > sub_count ? count : sub_count);
3473 }
3474 if (TYPE_LENGTH (t) == 0)
3475 {
3476 gdb_assert (count == 0);
3477 return 0;
3478 }
3479 else if (count == 0)
3480 return -1;
3481 unitlen = arm_vfp_cprc_unit_length (*base_type);
3482 if (TYPE_LENGTH (t) != unitlen * count)
3483 return -1;
3484 return count;
3485 }
3486
3487 default:
3488 break;
3489 }
3490
3491 return -1;
3492 }
3493
3494 /* Determine whether T is a VFP co-processor register candidate (CPRC)
3495 if passed to or returned from a non-variadic function with the VFP
3496 ABI in effect. Return 1 if it is, 0 otherwise. If it is, set
3497 *BASE_TYPE to the base type for T and *COUNT to the number of
3498 elements of that base type before returning. */
3499
3500 static int
3501 arm_vfp_call_candidate (struct type *t, enum arm_vfp_cprc_base_type *base_type,
3502 int *count)
3503 {
3504 enum arm_vfp_cprc_base_type b = VFP_CPRC_UNKNOWN;
3505 int c = arm_vfp_cprc_sub_candidate (t, &b);
3506 if (c <= 0 || c > 4)
3507 return 0;
3508 *base_type = b;
3509 *count = c;
3510 return 1;
3511 }
3512
3513 /* Return 1 if the VFP ABI should be used for passing arguments to and
3514 returning values from a function of type FUNC_TYPE, 0
3515 otherwise. */
3516
3517 static int
3518 arm_vfp_abi_for_function (struct gdbarch *gdbarch, struct type *func_type)
3519 {
3520 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3521 /* Variadic functions always use the base ABI. Assume that functions
3522 without debug info are not variadic. */
3523 if (func_type && TYPE_VARARGS (check_typedef (func_type)))
3524 return 0;
3525 /* The VFP ABI is only supported as a variant of AAPCS. */
3526 if (tdep->arm_abi != ARM_ABI_AAPCS)
3527 return 0;
3528 return gdbarch_tdep (gdbarch)->fp_model == ARM_FLOAT_VFP;
3529 }
3530
3531 /* We currently only support passing parameters in integer registers, which
3532 conforms with GCC's default model, and VFP argument passing following
3533 the VFP variant of AAPCS. Several other variants exist and
3534 we should probably support some of them based on the selected ABI. */
3535
3536 static CORE_ADDR
3537 arm_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
3538 struct regcache *regcache, CORE_ADDR bp_addr, int nargs,
3539 struct value **args, CORE_ADDR sp, int struct_return,
3540 CORE_ADDR struct_addr)
3541 {
3542 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3543 int argnum;
3544 int argreg;
3545 int nstack;
3546 struct stack_item *si = NULL;
3547 int use_vfp_abi;
3548 struct type *ftype;
3549 unsigned vfp_regs_free = (1 << 16) - 1;
3550
3551 /* Determine the type of this function and whether the VFP ABI
3552 applies. */
3553 ftype = check_typedef (value_type (function));
3554 if (TYPE_CODE (ftype) == TYPE_CODE_PTR)
3555 ftype = check_typedef (TYPE_TARGET_TYPE (ftype));
3556 use_vfp_abi = arm_vfp_abi_for_function (gdbarch, ftype);
3557
3558 /* Set the return address. For the ARM, the return breakpoint is
3559 always at BP_ADDR. */
3560 if (arm_pc_is_thumb (gdbarch, bp_addr))
3561 bp_addr |= 1;
3562 regcache_cooked_write_unsigned (regcache, ARM_LR_REGNUM, bp_addr);
3563
3564 /* Walk through the list of args and determine how large a temporary
3565 stack is required. Need to take care here as structs may be
3566 passed on the stack, and we have to push them. */
3567 nstack = 0;
3568
3569 argreg = ARM_A1_REGNUM;
3570 nstack = 0;
3571
3572 /* The struct_return pointer occupies the first parameter
3573 passing register. */
3574 if (struct_return)
3575 {
3576 if (arm_debug)
3577 fprintf_unfiltered (gdb_stdlog, "struct return in %s = %s\n",
3578 gdbarch_register_name (gdbarch, argreg),
3579 paddress (gdbarch, struct_addr));
3580 regcache_cooked_write_unsigned (regcache, argreg, struct_addr);
3581 argreg++;
3582 }
3583
3584 for (argnum = 0; argnum < nargs; argnum++)
3585 {
3586 int len;
3587 struct type *arg_type;
3588 struct type *target_type;
3589 enum type_code typecode;
3590 const bfd_byte *val;
3591 int align;
3592 enum arm_vfp_cprc_base_type vfp_base_type;
3593 int vfp_base_count;
3594 int may_use_core_reg = 1;
3595
3596 arg_type = check_typedef (value_type (args[argnum]));
3597 len = TYPE_LENGTH (arg_type);
3598 target_type = TYPE_TARGET_TYPE (arg_type);
3599 typecode = TYPE_CODE (arg_type);
3600 val = value_contents (args[argnum]);
3601
3602 align = arm_type_align (arg_type);
3603 /* Round alignment up to a whole number of words. */
3604 align = (align + INT_REGISTER_SIZE - 1) & ~(INT_REGISTER_SIZE - 1);
3605 /* Different ABIs have different maximum alignments. */
3606 if (gdbarch_tdep (gdbarch)->arm_abi == ARM_ABI_APCS)
3607 {
3608 /* The APCS ABI only requires word alignment. */
3609 align = INT_REGISTER_SIZE;
3610 }
3611 else
3612 {
3613 /* The AAPCS requires at most doubleword alignment. */
3614 if (align > INT_REGISTER_SIZE * 2)
3615 align = INT_REGISTER_SIZE * 2;
3616 }
3617
3618 if (use_vfp_abi
3619 && arm_vfp_call_candidate (arg_type, &vfp_base_type,
3620 &vfp_base_count))
3621 {
3622 int regno;
3623 int unit_length;
3624 int shift;
3625 unsigned mask;
3626
3627 /* Because this is a CPRC it cannot go in a core register or
3628 cause a core register to be skipped for alignment.
3629 Either it goes in VFP registers and the rest of this loop
3630 iteration is skipped for this argument, or it goes on the
3631 stack (and the stack alignment code is correct for this
3632 case). */
3633 may_use_core_reg = 0;
3634
3635 unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
3636 shift = unit_length / 4;
3637 mask = (1 << (shift * vfp_base_count)) - 1;
3638 for (regno = 0; regno < 16; regno += shift)
3639 if (((vfp_regs_free >> regno) & mask) == mask)
3640 break;
3641
3642 if (regno < 16)
3643 {
3644 int reg_char;
3645 int reg_scaled;
3646 int i;
3647
3648 vfp_regs_free &= ~(mask << regno);
3649 reg_scaled = regno / shift;
3650 reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
3651 for (i = 0; i < vfp_base_count; i++)
3652 {
3653 char name_buf[4];
3654 int regnum;
3655 if (reg_char == 'q')
3656 arm_neon_quad_write (gdbarch, regcache, reg_scaled + i,
3657 val + i * unit_length);
3658 else
3659 {
3660 sprintf (name_buf, "%c%d", reg_char, reg_scaled + i);
3661 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
3662 strlen (name_buf));
3663 regcache_cooked_write (regcache, regnum,
3664 val + i * unit_length);
3665 }
3666 }
3667 continue;
3668 }
3669 else
3670 {
3671 /* This CPRC could not go in VFP registers, so all VFP
3672 registers are now marked as used. */
3673 vfp_regs_free = 0;
3674 }
3675 }
3676
3677 /* Push stack padding for dowubleword alignment. */
3678 if (nstack & (align - 1))
3679 {
3680 si = push_stack_item (si, val, INT_REGISTER_SIZE);
3681 nstack += INT_REGISTER_SIZE;
3682 }
3683
3684 /* Doubleword aligned quantities must go in even register pairs. */
3685 if (may_use_core_reg
3686 && argreg <= ARM_LAST_ARG_REGNUM
3687 && align > INT_REGISTER_SIZE
3688 && argreg & 1)
3689 argreg++;
3690
3691 /* If the argument is a pointer to a function, and it is a
3692 Thumb function, create a LOCAL copy of the value and set
3693 the THUMB bit in it. */
3694 if (TYPE_CODE_PTR == typecode
3695 && target_type != NULL
3696 && TYPE_CODE_FUNC == TYPE_CODE (check_typedef (target_type)))
3697 {
3698 CORE_ADDR regval = extract_unsigned_integer (val, len, byte_order);
3699 if (arm_pc_is_thumb (gdbarch, regval))
3700 {
3701 bfd_byte *copy = alloca (len);
3702 store_unsigned_integer (copy, len, byte_order,
3703 MAKE_THUMB_ADDR (regval));
3704 val = copy;
3705 }
3706 }
3707
3708 /* Copy the argument to general registers or the stack in
3709 register-sized pieces. Large arguments are split between
3710 registers and stack. */
3711 while (len > 0)
3712 {
3713 int partial_len = len < INT_REGISTER_SIZE ? len : INT_REGISTER_SIZE;
3714
3715 if (may_use_core_reg && argreg <= ARM_LAST_ARG_REGNUM)
3716 {
3717 /* The argument is being passed in a general purpose
3718 register. */
3719 CORE_ADDR regval
3720 = extract_unsigned_integer (val, partial_len, byte_order);
3721 if (byte_order == BFD_ENDIAN_BIG)
3722 regval <<= (INT_REGISTER_SIZE - partial_len) * 8;
3723 if (arm_debug)
3724 fprintf_unfiltered (gdb_stdlog, "arg %d in %s = 0x%s\n",
3725 argnum,
3726 gdbarch_register_name
3727 (gdbarch, argreg),
3728 phex (regval, INT_REGISTER_SIZE));
3729 regcache_cooked_write_unsigned (regcache, argreg, regval);
3730 argreg++;
3731 }
3732 else
3733 {
3734 /* Push the arguments onto the stack. */
3735 if (arm_debug)
3736 fprintf_unfiltered (gdb_stdlog, "arg %d @ sp + %d\n",
3737 argnum, nstack);
3738 si = push_stack_item (si, val, INT_REGISTER_SIZE);
3739 nstack += INT_REGISTER_SIZE;
3740 }
3741
3742 len -= partial_len;
3743 val += partial_len;
3744 }
3745 }
3746 /* If we have an odd number of words to push, then decrement the stack
3747 by one word now, so first stack argument will be dword aligned. */
3748 if (nstack & 4)
3749 sp -= 4;
3750
3751 while (si)
3752 {
3753 sp -= si->len;
3754 write_memory (sp, si->data, si->len);
3755 si = pop_stack_item (si);
3756 }
3757
3758 /* Finally, update teh SP register. */
3759 regcache_cooked_write_unsigned (regcache, ARM_SP_REGNUM, sp);
3760
3761 return sp;
3762 }
3763
3764
3765 /* Always align the frame to an 8-byte boundary. This is required on
3766 some platforms and harmless on the rest. */
3767
3768 static CORE_ADDR
3769 arm_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
3770 {
3771 /* Align the stack to eight bytes. */
3772 return sp & ~ (CORE_ADDR) 7;
3773 }
3774
3775 static void
3776 print_fpu_flags (int flags)
3777 {
3778 if (flags & (1 << 0))
3779 fputs ("IVO ", stdout);
3780 if (flags & (1 << 1))
3781 fputs ("DVZ ", stdout);
3782 if (flags & (1 << 2))
3783 fputs ("OFL ", stdout);
3784 if (flags & (1 << 3))
3785 fputs ("UFL ", stdout);
3786 if (flags & (1 << 4))
3787 fputs ("INX ", stdout);
3788 putchar ('\n');
3789 }
3790
3791 /* Print interesting information about the floating point processor
3792 (if present) or emulator. */
3793 static void
3794 arm_print_float_info (struct gdbarch *gdbarch, struct ui_file *file,
3795 struct frame_info *frame, const char *args)
3796 {
3797 unsigned long status = get_frame_register_unsigned (frame, ARM_FPS_REGNUM);
3798 int type;
3799
3800 type = (status >> 24) & 127;
3801 if (status & (1 << 31))
3802 printf (_("Hardware FPU type %d\n"), type);
3803 else
3804 printf (_("Software FPU type %d\n"), type);
3805 /* i18n: [floating point unit] mask */
3806 fputs (_("mask: "), stdout);
3807 print_fpu_flags (status >> 16);
3808 /* i18n: [floating point unit] flags */
3809 fputs (_("flags: "), stdout);
3810 print_fpu_flags (status);
3811 }
3812
3813 /* Construct the ARM extended floating point type. */
3814 static struct type *
3815 arm_ext_type (struct gdbarch *gdbarch)
3816 {
3817 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3818
3819 if (!tdep->arm_ext_type)
3820 tdep->arm_ext_type
3821 = arch_float_type (gdbarch, -1, "builtin_type_arm_ext",
3822 floatformats_arm_ext);
3823
3824 return tdep->arm_ext_type;
3825 }
3826
3827 static struct type *
3828 arm_neon_double_type (struct gdbarch *gdbarch)
3829 {
3830 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3831
3832 if (tdep->neon_double_type == NULL)
3833 {
3834 struct type *t, *elem;
3835
3836 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_d",
3837 TYPE_CODE_UNION);
3838 elem = builtin_type (gdbarch)->builtin_uint8;
3839 append_composite_type_field (t, "u8", init_vector_type (elem, 8));
3840 elem = builtin_type (gdbarch)->builtin_uint16;
3841 append_composite_type_field (t, "u16", init_vector_type (elem, 4));
3842 elem = builtin_type (gdbarch)->builtin_uint32;
3843 append_composite_type_field (t, "u32", init_vector_type (elem, 2));
3844 elem = builtin_type (gdbarch)->builtin_uint64;
3845 append_composite_type_field (t, "u64", elem);
3846 elem = builtin_type (gdbarch)->builtin_float;
3847 append_composite_type_field (t, "f32", init_vector_type (elem, 2));
3848 elem = builtin_type (gdbarch)->builtin_double;
3849 append_composite_type_field (t, "f64", elem);
3850
3851 TYPE_VECTOR (t) = 1;
3852 TYPE_NAME (t) = "neon_d";
3853 tdep->neon_double_type = t;
3854 }
3855
3856 return tdep->neon_double_type;
3857 }
3858
3859 /* FIXME: The vector types are not correctly ordered on big-endian
3860 targets. Just as s0 is the low bits of d0, d0[0] is also the low
3861 bits of d0 - regardless of what unit size is being held in d0. So
3862 the offset of the first uint8 in d0 is 7, but the offset of the
3863 first float is 4. This code works as-is for little-endian
3864 targets. */
3865
3866 static struct type *
3867 arm_neon_quad_type (struct gdbarch *gdbarch)
3868 {
3869 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3870
3871 if (tdep->neon_quad_type == NULL)
3872 {
3873 struct type *t, *elem;
3874
3875 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_q",
3876 TYPE_CODE_UNION);
3877 elem = builtin_type (gdbarch)->builtin_uint8;
3878 append_composite_type_field (t, "u8", init_vector_type (elem, 16));
3879 elem = builtin_type (gdbarch)->builtin_uint16;
3880 append_composite_type_field (t, "u16", init_vector_type (elem, 8));
3881 elem = builtin_type (gdbarch)->builtin_uint32;
3882 append_composite_type_field (t, "u32", init_vector_type (elem, 4));
3883 elem = builtin_type (gdbarch)->builtin_uint64;
3884 append_composite_type_field (t, "u64", init_vector_type (elem, 2));
3885 elem = builtin_type (gdbarch)->builtin_float;
3886 append_composite_type_field (t, "f32", init_vector_type (elem, 4));
3887 elem = builtin_type (gdbarch)->builtin_double;
3888 append_composite_type_field (t, "f64", init_vector_type (elem, 2));
3889
3890 TYPE_VECTOR (t) = 1;
3891 TYPE_NAME (t) = "neon_q";
3892 tdep->neon_quad_type = t;
3893 }
3894
3895 return tdep->neon_quad_type;
3896 }
3897
3898 /* Return the GDB type object for the "standard" data type of data in
3899 register N. */
3900
3901 static struct type *
3902 arm_register_type (struct gdbarch *gdbarch, int regnum)
3903 {
3904 int num_regs = gdbarch_num_regs (gdbarch);
3905
3906 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
3907 && regnum >= num_regs && regnum < num_regs + 32)
3908 return builtin_type (gdbarch)->builtin_float;
3909
3910 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
3911 && regnum >= num_regs + 32 && regnum < num_regs + 32 + 16)
3912 return arm_neon_quad_type (gdbarch);
3913
3914 /* If the target description has register information, we are only
3915 in this function so that we can override the types of
3916 double-precision registers for NEON. */
3917 if (tdesc_has_registers (gdbarch_target_desc (gdbarch)))
3918 {
3919 struct type *t = tdesc_register_type (gdbarch, regnum);
3920
3921 if (regnum >= ARM_D0_REGNUM && regnum < ARM_D0_REGNUM + 32
3922 && TYPE_CODE (t) == TYPE_CODE_FLT
3923 && gdbarch_tdep (gdbarch)->have_neon)
3924 return arm_neon_double_type (gdbarch);
3925 else
3926 return t;
3927 }
3928
3929 if (regnum >= ARM_F0_REGNUM && regnum < ARM_F0_REGNUM + NUM_FREGS)
3930 {
3931 if (!gdbarch_tdep (gdbarch)->have_fpa_registers)
3932 return builtin_type (gdbarch)->builtin_void;
3933
3934 return arm_ext_type (gdbarch);
3935 }
3936 else if (regnum == ARM_SP_REGNUM)
3937 return builtin_type (gdbarch)->builtin_data_ptr;
3938 else if (regnum == ARM_PC_REGNUM)
3939 return builtin_type (gdbarch)->builtin_func_ptr;
3940 else if (regnum >= ARRAY_SIZE (arm_register_names))
3941 /* These registers are only supported on targets which supply
3942 an XML description. */
3943 return builtin_type (gdbarch)->builtin_int0;
3944 else
3945 return builtin_type (gdbarch)->builtin_uint32;
3946 }
3947
3948 /* Map a DWARF register REGNUM onto the appropriate GDB register
3949 number. */
3950
3951 static int
3952 arm_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
3953 {
3954 /* Core integer regs. */
3955 if (reg >= 0 && reg <= 15)
3956 return reg;
3957
3958 /* Legacy FPA encoding. These were once used in a way which
3959 overlapped with VFP register numbering, so their use is
3960 discouraged, but GDB doesn't support the ARM toolchain
3961 which used them for VFP. */
3962 if (reg >= 16 && reg <= 23)
3963 return ARM_F0_REGNUM + reg - 16;
3964
3965 /* New assignments for the FPA registers. */
3966 if (reg >= 96 && reg <= 103)
3967 return ARM_F0_REGNUM + reg - 96;
3968
3969 /* WMMX register assignments. */
3970 if (reg >= 104 && reg <= 111)
3971 return ARM_WCGR0_REGNUM + reg - 104;
3972
3973 if (reg >= 112 && reg <= 127)
3974 return ARM_WR0_REGNUM + reg - 112;
3975
3976 if (reg >= 192 && reg <= 199)
3977 return ARM_WC0_REGNUM + reg - 192;
3978
3979 /* VFP v2 registers. A double precision value is actually
3980 in d1 rather than s2, but the ABI only defines numbering
3981 for the single precision registers. This will "just work"
3982 in GDB for little endian targets (we'll read eight bytes,
3983 starting in s0 and then progressing to s1), but will be
3984 reversed on big endian targets with VFP. This won't
3985 be a problem for the new Neon quad registers; you're supposed
3986 to use DW_OP_piece for those. */
3987 if (reg >= 64 && reg <= 95)
3988 {
3989 char name_buf[4];
3990
3991 sprintf (name_buf, "s%d", reg - 64);
3992 return user_reg_map_name_to_regnum (gdbarch, name_buf,
3993 strlen (name_buf));
3994 }
3995
3996 /* VFP v3 / Neon registers. This range is also used for VFP v2
3997 registers, except that it now describes d0 instead of s0. */
3998 if (reg >= 256 && reg <= 287)
3999 {
4000 char name_buf[4];
4001
4002 sprintf (name_buf, "d%d", reg - 256);
4003 return user_reg_map_name_to_regnum (gdbarch, name_buf,
4004 strlen (name_buf));
4005 }
4006
4007 return -1;
4008 }
4009
4010 /* Map GDB internal REGNUM onto the Arm simulator register numbers. */
4011 static int
4012 arm_register_sim_regno (struct gdbarch *gdbarch, int regnum)
4013 {
4014 int reg = regnum;
4015 gdb_assert (reg >= 0 && reg < gdbarch_num_regs (gdbarch));
4016
4017 if (regnum >= ARM_WR0_REGNUM && regnum <= ARM_WR15_REGNUM)
4018 return regnum - ARM_WR0_REGNUM + SIM_ARM_IWMMXT_COP0R0_REGNUM;
4019
4020 if (regnum >= ARM_WC0_REGNUM && regnum <= ARM_WC7_REGNUM)
4021 return regnum - ARM_WC0_REGNUM + SIM_ARM_IWMMXT_COP1R0_REGNUM;
4022
4023 if (regnum >= ARM_WCGR0_REGNUM && regnum <= ARM_WCGR7_REGNUM)
4024 return regnum - ARM_WCGR0_REGNUM + SIM_ARM_IWMMXT_COP1R8_REGNUM;
4025
4026 if (reg < NUM_GREGS)
4027 return SIM_ARM_R0_REGNUM + reg;
4028 reg -= NUM_GREGS;
4029
4030 if (reg < NUM_FREGS)
4031 return SIM_ARM_FP0_REGNUM + reg;
4032 reg -= NUM_FREGS;
4033
4034 if (reg < NUM_SREGS)
4035 return SIM_ARM_FPS_REGNUM + reg;
4036 reg -= NUM_SREGS;
4037
4038 internal_error (__FILE__, __LINE__, _("Bad REGNUM %d"), regnum);
4039 }
4040
4041 /* NOTE: cagney/2001-08-20: Both convert_from_extended() and
4042 convert_to_extended() use floatformat_arm_ext_littlebyte_bigword.
4043 It is thought that this is is the floating-point register format on
4044 little-endian systems. */
4045
4046 static void
4047 convert_from_extended (const struct floatformat *fmt, const void *ptr,
4048 void *dbl, int endianess)
4049 {
4050 DOUBLEST d;
4051
4052 if (endianess == BFD_ENDIAN_BIG)
4053 floatformat_to_doublest (&floatformat_arm_ext_big, ptr, &d);
4054 else
4055 floatformat_to_doublest (&floatformat_arm_ext_littlebyte_bigword,
4056 ptr, &d);
4057 floatformat_from_doublest (fmt, &d, dbl);
4058 }
4059
4060 static void
4061 convert_to_extended (const struct floatformat *fmt, void *dbl, const void *ptr,
4062 int endianess)
4063 {
4064 DOUBLEST d;
4065
4066 floatformat_to_doublest (fmt, ptr, &d);
4067 if (endianess == BFD_ENDIAN_BIG)
4068 floatformat_from_doublest (&floatformat_arm_ext_big, &d, dbl);
4069 else
4070 floatformat_from_doublest (&floatformat_arm_ext_littlebyte_bigword,
4071 &d, dbl);
4072 }
4073
4074 static int
4075 condition_true (unsigned long cond, unsigned long status_reg)
4076 {
4077 if (cond == INST_AL || cond == INST_NV)
4078 return 1;
4079
4080 switch (cond)
4081 {
4082 case INST_EQ:
4083 return ((status_reg & FLAG_Z) != 0);
4084 case INST_NE:
4085 return ((status_reg & FLAG_Z) == 0);
4086 case INST_CS:
4087 return ((status_reg & FLAG_C) != 0);
4088 case INST_CC:
4089 return ((status_reg & FLAG_C) == 0);
4090 case INST_MI:
4091 return ((status_reg & FLAG_N) != 0);
4092 case INST_PL:
4093 return ((status_reg & FLAG_N) == 0);
4094 case INST_VS:
4095 return ((status_reg & FLAG_V) != 0);
4096 case INST_VC:
4097 return ((status_reg & FLAG_V) == 0);
4098 case INST_HI:
4099 return ((status_reg & (FLAG_C | FLAG_Z)) == FLAG_C);
4100 case INST_LS:
4101 return ((status_reg & (FLAG_C | FLAG_Z)) != FLAG_C);
4102 case INST_GE:
4103 return (((status_reg & FLAG_N) == 0) == ((status_reg & FLAG_V) == 0));
4104 case INST_LT:
4105 return (((status_reg & FLAG_N) == 0) != ((status_reg & FLAG_V) == 0));
4106 case INST_GT:
4107 return (((status_reg & FLAG_Z) == 0)
4108 && (((status_reg & FLAG_N) == 0)
4109 == ((status_reg & FLAG_V) == 0)));
4110 case INST_LE:
4111 return (((status_reg & FLAG_Z) != 0)
4112 || (((status_reg & FLAG_N) == 0)
4113 != ((status_reg & FLAG_V) == 0)));
4114 }
4115 return 1;
4116 }
4117
4118 static unsigned long
4119 shifted_reg_val (struct frame_info *frame, unsigned long inst, int carry,
4120 unsigned long pc_val, unsigned long status_reg)
4121 {
4122 unsigned long res, shift;
4123 int rm = bits (inst, 0, 3);
4124 unsigned long shifttype = bits (inst, 5, 6);
4125
4126 if (bit (inst, 4))
4127 {
4128 int rs = bits (inst, 8, 11);
4129 shift = (rs == 15 ? pc_val + 8
4130 : get_frame_register_unsigned (frame, rs)) & 0xFF;
4131 }
4132 else
4133 shift = bits (inst, 7, 11);
4134
4135 res = (rm == ARM_PC_REGNUM
4136 ? (pc_val + (bit (inst, 4) ? 12 : 8))
4137 : get_frame_register_unsigned (frame, rm));
4138
4139 switch (shifttype)
4140 {
4141 case 0: /* LSL */
4142 res = shift >= 32 ? 0 : res << shift;
4143 break;
4144
4145 case 1: /* LSR */
4146 res = shift >= 32 ? 0 : res >> shift;
4147 break;
4148
4149 case 2: /* ASR */
4150 if (shift >= 32)
4151 shift = 31;
4152 res = ((res & 0x80000000L)
4153 ? ~((~res) >> shift) : res >> shift);
4154 break;
4155
4156 case 3: /* ROR/RRX */
4157 shift &= 31;
4158 if (shift == 0)
4159 res = (res >> 1) | (carry ? 0x80000000L : 0);
4160 else
4161 res = (res >> shift) | (res << (32 - shift));
4162 break;
4163 }
4164
4165 return res & 0xffffffff;
4166 }
4167
4168 /* Return number of 1-bits in VAL. */
4169
4170 static int
4171 bitcount (unsigned long val)
4172 {
4173 int nbits;
4174 for (nbits = 0; val != 0; nbits++)
4175 val &= val - 1; /* Delete rightmost 1-bit in val. */
4176 return nbits;
4177 }
4178
4179 /* Return the size in bytes of the complete Thumb instruction whose
4180 first halfword is INST1. */
4181
4182 static int
4183 thumb_insn_size (unsigned short inst1)
4184 {
4185 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
4186 return 4;
4187 else
4188 return 2;
4189 }
4190
4191 static int
4192 thumb_advance_itstate (unsigned int itstate)
4193 {
4194 /* Preserve IT[7:5], the first three bits of the condition. Shift
4195 the upcoming condition flags left by one bit. */
4196 itstate = (itstate & 0xe0) | ((itstate << 1) & 0x1f);
4197
4198 /* If we have finished the IT block, clear the state. */
4199 if ((itstate & 0x0f) == 0)
4200 itstate = 0;
4201
4202 return itstate;
4203 }
4204
4205 /* Find the next PC after the current instruction executes. In some
4206 cases we can not statically determine the answer (see the IT state
4207 handling in this function); in that case, a breakpoint may be
4208 inserted in addition to the returned PC, which will be used to set
4209 another breakpoint by our caller. */
4210
4211 static CORE_ADDR
4212 thumb_get_next_pc_raw (struct frame_info *frame, CORE_ADDR pc)
4213 {
4214 struct gdbarch *gdbarch = get_frame_arch (frame);
4215 struct address_space *aspace = get_frame_address_space (frame);
4216 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4217 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
4218 unsigned long pc_val = ((unsigned long) pc) + 4; /* PC after prefetch */
4219 unsigned short inst1;
4220 CORE_ADDR nextpc = pc + 2; /* Default is next instruction. */
4221 unsigned long offset;
4222 ULONGEST status, itstate;
4223
4224 nextpc = MAKE_THUMB_ADDR (nextpc);
4225 pc_val = MAKE_THUMB_ADDR (pc_val);
4226
4227 inst1 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
4228
4229 /* Thumb-2 conditional execution support. There are eight bits in
4230 the CPSR which describe conditional execution state. Once
4231 reconstructed (they're in a funny order), the low five bits
4232 describe the low bit of the condition for each instruction and
4233 how many instructions remain. The high three bits describe the
4234 base condition. One of the low four bits will be set if an IT
4235 block is active. These bits read as zero on earlier
4236 processors. */
4237 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
4238 itstate = ((status >> 8) & 0xfc) | ((status >> 25) & 0x3);
4239
4240 /* If-Then handling. On GNU/Linux, where this routine is used, we
4241 use an undefined instruction as a breakpoint. Unlike BKPT, IT
4242 can disable execution of the undefined instruction. So we might
4243 miss the breakpoint if we set it on a skipped conditional
4244 instruction. Because conditional instructions can change the
4245 flags, affecting the execution of further instructions, we may
4246 need to set two breakpoints. */
4247
4248 if (gdbarch_tdep (gdbarch)->thumb2_breakpoint != NULL)
4249 {
4250 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
4251 {
4252 /* An IT instruction. Because this instruction does not
4253 modify the flags, we can accurately predict the next
4254 executed instruction. */
4255 itstate = inst1 & 0x00ff;
4256 pc += thumb_insn_size (inst1);
4257
4258 while (itstate != 0 && ! condition_true (itstate >> 4, status))
4259 {
4260 inst1 = read_memory_unsigned_integer (pc, 2,
4261 byte_order_for_code);
4262 pc += thumb_insn_size (inst1);
4263 itstate = thumb_advance_itstate (itstate);
4264 }
4265
4266 return MAKE_THUMB_ADDR (pc);
4267 }
4268 else if (itstate != 0)
4269 {
4270 /* We are in a conditional block. Check the condition. */
4271 if (! condition_true (itstate >> 4, status))
4272 {
4273 /* Advance to the next executed instruction. */
4274 pc += thumb_insn_size (inst1);
4275 itstate = thumb_advance_itstate (itstate);
4276
4277 while (itstate != 0 && ! condition_true (itstate >> 4, status))
4278 {
4279 inst1 = read_memory_unsigned_integer (pc, 2,
4280 byte_order_for_code);
4281 pc += thumb_insn_size (inst1);
4282 itstate = thumb_advance_itstate (itstate);
4283 }
4284
4285 return MAKE_THUMB_ADDR (pc);
4286 }
4287 else if ((itstate & 0x0f) == 0x08)
4288 {
4289 /* This is the last instruction of the conditional
4290 block, and it is executed. We can handle it normally
4291 because the following instruction is not conditional,
4292 and we must handle it normally because it is
4293 permitted to branch. Fall through. */
4294 }
4295 else
4296 {
4297 int cond_negated;
4298
4299 /* There are conditional instructions after this one.
4300 If this instruction modifies the flags, then we can
4301 not predict what the next executed instruction will
4302 be. Fortunately, this instruction is architecturally
4303 forbidden to branch; we know it will fall through.
4304 Start by skipping past it. */
4305 pc += thumb_insn_size (inst1);
4306 itstate = thumb_advance_itstate (itstate);
4307
4308 /* Set a breakpoint on the following instruction. */
4309 gdb_assert ((itstate & 0x0f) != 0);
4310 arm_insert_single_step_breakpoint (gdbarch, aspace,
4311 MAKE_THUMB_ADDR (pc));
4312 cond_negated = (itstate >> 4) & 1;
4313
4314 /* Skip all following instructions with the same
4315 condition. If there is a later instruction in the IT
4316 block with the opposite condition, set the other
4317 breakpoint there. If not, then set a breakpoint on
4318 the instruction after the IT block. */
4319 do
4320 {
4321 inst1 = read_memory_unsigned_integer (pc, 2,
4322 byte_order_for_code);
4323 pc += thumb_insn_size (inst1);
4324 itstate = thumb_advance_itstate (itstate);
4325 }
4326 while (itstate != 0 && ((itstate >> 4) & 1) == cond_negated);
4327
4328 return MAKE_THUMB_ADDR (pc);
4329 }
4330 }
4331 }
4332 else if (itstate & 0x0f)
4333 {
4334 /* We are in a conditional block. Check the condition. */
4335 int cond = itstate >> 4;
4336
4337 if (! condition_true (cond, status))
4338 {
4339 /* Advance to the next instruction. All the 32-bit
4340 instructions share a common prefix. */
4341 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
4342 return MAKE_THUMB_ADDR (pc + 4);
4343 else
4344 return MAKE_THUMB_ADDR (pc + 2);
4345 }
4346
4347 /* Otherwise, handle the instruction normally. */
4348 }
4349
4350 if ((inst1 & 0xff00) == 0xbd00) /* pop {rlist, pc} */
4351 {
4352 CORE_ADDR sp;
4353
4354 /* Fetch the saved PC from the stack. It's stored above
4355 all of the other registers. */
4356 offset = bitcount (bits (inst1, 0, 7)) * INT_REGISTER_SIZE;
4357 sp = get_frame_register_unsigned (frame, ARM_SP_REGNUM);
4358 nextpc = read_memory_unsigned_integer (sp + offset, 4, byte_order);
4359 }
4360 else if ((inst1 & 0xf000) == 0xd000) /* conditional branch */
4361 {
4362 unsigned long cond = bits (inst1, 8, 11);
4363 if (cond == 0x0f) /* 0x0f = SWI */
4364 {
4365 struct gdbarch_tdep *tdep;
4366 tdep = gdbarch_tdep (gdbarch);
4367
4368 if (tdep->syscall_next_pc != NULL)
4369 nextpc = tdep->syscall_next_pc (frame);
4370
4371 }
4372 else if (cond != 0x0f && condition_true (cond, status))
4373 nextpc = pc_val + (sbits (inst1, 0, 7) << 1);
4374 }
4375 else if ((inst1 & 0xf800) == 0xe000) /* unconditional branch */
4376 {
4377 nextpc = pc_val + (sbits (inst1, 0, 10) << 1);
4378 }
4379 else if ((inst1 & 0xe000) == 0xe000) /* 32-bit instruction */
4380 {
4381 unsigned short inst2;
4382 inst2 = read_memory_unsigned_integer (pc + 2, 2, byte_order_for_code);
4383
4384 /* Default to the next instruction. */
4385 nextpc = pc + 4;
4386 nextpc = MAKE_THUMB_ADDR (nextpc);
4387
4388 if ((inst1 & 0xf800) == 0xf000 && (inst2 & 0x8000) == 0x8000)
4389 {
4390 /* Branches and miscellaneous control instructions. */
4391
4392 if ((inst2 & 0x1000) != 0 || (inst2 & 0xd001) == 0xc000)
4393 {
4394 /* B, BL, BLX. */
4395 int j1, j2, imm1, imm2;
4396
4397 imm1 = sbits (inst1, 0, 10);
4398 imm2 = bits (inst2, 0, 10);
4399 j1 = bit (inst2, 13);
4400 j2 = bit (inst2, 11);
4401
4402 offset = ((imm1 << 12) + (imm2 << 1));
4403 offset ^= ((!j2) << 22) | ((!j1) << 23);
4404
4405 nextpc = pc_val + offset;
4406 /* For BLX make sure to clear the low bits. */
4407 if (bit (inst2, 12) == 0)
4408 nextpc = nextpc & 0xfffffffc;
4409 }
4410 else if (inst1 == 0xf3de && (inst2 & 0xff00) == 0x3f00)
4411 {
4412 /* SUBS PC, LR, #imm8. */
4413 nextpc = get_frame_register_unsigned (frame, ARM_LR_REGNUM);
4414 nextpc -= inst2 & 0x00ff;
4415 }
4416 else if ((inst2 & 0xd000) == 0x8000 && (inst1 & 0x0380) != 0x0380)
4417 {
4418 /* Conditional branch. */
4419 if (condition_true (bits (inst1, 6, 9), status))
4420 {
4421 int sign, j1, j2, imm1, imm2;
4422
4423 sign = sbits (inst1, 10, 10);
4424 imm1 = bits (inst1, 0, 5);
4425 imm2 = bits (inst2, 0, 10);
4426 j1 = bit (inst2, 13);
4427 j2 = bit (inst2, 11);
4428
4429 offset = (sign << 20) + (j2 << 19) + (j1 << 18);
4430 offset += (imm1 << 12) + (imm2 << 1);
4431
4432 nextpc = pc_val + offset;
4433 }
4434 }
4435 }
4436 else if ((inst1 & 0xfe50) == 0xe810)
4437 {
4438 /* Load multiple or RFE. */
4439 int rn, offset, load_pc = 1;
4440
4441 rn = bits (inst1, 0, 3);
4442 if (bit (inst1, 7) && !bit (inst1, 8))
4443 {
4444 /* LDMIA or POP */
4445 if (!bit (inst2, 15))
4446 load_pc = 0;
4447 offset = bitcount (inst2) * 4 - 4;
4448 }
4449 else if (!bit (inst1, 7) && bit (inst1, 8))
4450 {
4451 /* LDMDB */
4452 if (!bit (inst2, 15))
4453 load_pc = 0;
4454 offset = -4;
4455 }
4456 else if (bit (inst1, 7) && bit (inst1, 8))
4457 {
4458 /* RFEIA */
4459 offset = 0;
4460 }
4461 else if (!bit (inst1, 7) && !bit (inst1, 8))
4462 {
4463 /* RFEDB */
4464 offset = -8;
4465 }
4466 else
4467 load_pc = 0;
4468
4469 if (load_pc)
4470 {
4471 CORE_ADDR addr = get_frame_register_unsigned (frame, rn);
4472 nextpc = get_frame_memory_unsigned (frame, addr + offset, 4);
4473 }
4474 }
4475 else if ((inst1 & 0xffef) == 0xea4f && (inst2 & 0xfff0) == 0x0f00)
4476 {
4477 /* MOV PC or MOVS PC. */
4478 nextpc = get_frame_register_unsigned (frame, bits (inst2, 0, 3));
4479 nextpc = MAKE_THUMB_ADDR (nextpc);
4480 }
4481 else if ((inst1 & 0xff70) == 0xf850 && (inst2 & 0xf000) == 0xf000)
4482 {
4483 /* LDR PC. */
4484 CORE_ADDR base;
4485 int rn, load_pc = 1;
4486
4487 rn = bits (inst1, 0, 3);
4488 base = get_frame_register_unsigned (frame, rn);
4489 if (rn == ARM_PC_REGNUM)
4490 {
4491 base = (base + 4) & ~(CORE_ADDR) 0x3;
4492 if (bit (inst1, 7))
4493 base += bits (inst2, 0, 11);
4494 else
4495 base -= bits (inst2, 0, 11);
4496 }
4497 else if (bit (inst1, 7))
4498 base += bits (inst2, 0, 11);
4499 else if (bit (inst2, 11))
4500 {
4501 if (bit (inst2, 10))
4502 {
4503 if (bit (inst2, 9))
4504 base += bits (inst2, 0, 7);
4505 else
4506 base -= bits (inst2, 0, 7);
4507 }
4508 }
4509 else if ((inst2 & 0x0fc0) == 0x0000)
4510 {
4511 int shift = bits (inst2, 4, 5), rm = bits (inst2, 0, 3);
4512 base += get_frame_register_unsigned (frame, rm) << shift;
4513 }
4514 else
4515 /* Reserved. */
4516 load_pc = 0;
4517
4518 if (load_pc)
4519 nextpc = get_frame_memory_unsigned (frame, base, 4);
4520 }
4521 else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf000)
4522 {
4523 /* TBB. */
4524 CORE_ADDR tbl_reg, table, offset, length;
4525
4526 tbl_reg = bits (inst1, 0, 3);
4527 if (tbl_reg == 0x0f)
4528 table = pc + 4; /* Regcache copy of PC isn't right yet. */
4529 else
4530 table = get_frame_register_unsigned (frame, tbl_reg);
4531
4532 offset = get_frame_register_unsigned (frame, bits (inst2, 0, 3));
4533 length = 2 * get_frame_memory_unsigned (frame, table + offset, 1);
4534 nextpc = pc_val + length;
4535 }
4536 else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf010)
4537 {
4538 /* TBH. */
4539 CORE_ADDR tbl_reg, table, offset, length;
4540
4541 tbl_reg = bits (inst1, 0, 3);
4542 if (tbl_reg == 0x0f)
4543 table = pc + 4; /* Regcache copy of PC isn't right yet. */
4544 else
4545 table = get_frame_register_unsigned (frame, tbl_reg);
4546
4547 offset = 2 * get_frame_register_unsigned (frame, bits (inst2, 0, 3));
4548 length = 2 * get_frame_memory_unsigned (frame, table + offset, 2);
4549 nextpc = pc_val + length;
4550 }
4551 }
4552 else if ((inst1 & 0xff00) == 0x4700) /* bx REG, blx REG */
4553 {
4554 if (bits (inst1, 3, 6) == 0x0f)
4555 nextpc = pc_val;
4556 else
4557 nextpc = get_frame_register_unsigned (frame, bits (inst1, 3, 6));
4558 }
4559 else if ((inst1 & 0xff87) == 0x4687) /* mov pc, REG */
4560 {
4561 if (bits (inst1, 3, 6) == 0x0f)
4562 nextpc = pc_val;
4563 else
4564 nextpc = get_frame_register_unsigned (frame, bits (inst1, 3, 6));
4565
4566 nextpc = MAKE_THUMB_ADDR (nextpc);
4567 }
4568 else if ((inst1 & 0xf500) == 0xb100)
4569 {
4570 /* CBNZ or CBZ. */
4571 int imm = (bit (inst1, 9) << 6) + (bits (inst1, 3, 7) << 1);
4572 ULONGEST reg = get_frame_register_unsigned (frame, bits (inst1, 0, 2));
4573
4574 if (bit (inst1, 11) && reg != 0)
4575 nextpc = pc_val + imm;
4576 else if (!bit (inst1, 11) && reg == 0)
4577 nextpc = pc_val + imm;
4578 }
4579 return nextpc;
4580 }
4581
4582 /* Get the raw next address. PC is the current program counter, in
4583 FRAME, which is assumed to be executing in ARM mode.
4584
4585 The value returned has the execution state of the next instruction
4586 encoded in it. Use IS_THUMB_ADDR () to see whether the instruction is
4587 in Thumb-State, and gdbarch_addr_bits_remove () to get the plain memory
4588 address. */
4589
4590 static CORE_ADDR
4591 arm_get_next_pc_raw (struct frame_info *frame, CORE_ADDR pc)
4592 {
4593 struct gdbarch *gdbarch = get_frame_arch (frame);
4594 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4595 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
4596 unsigned long pc_val;
4597 unsigned long this_instr;
4598 unsigned long status;
4599 CORE_ADDR nextpc;
4600
4601 pc_val = (unsigned long) pc;
4602 this_instr = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
4603
4604 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
4605 nextpc = (CORE_ADDR) (pc_val + 4); /* Default case */
4606
4607 if (bits (this_instr, 28, 31) == INST_NV)
4608 switch (bits (this_instr, 24, 27))
4609 {
4610 case 0xa:
4611 case 0xb:
4612 {
4613 /* Branch with Link and change to Thumb. */
4614 nextpc = BranchDest (pc, this_instr);
4615 nextpc |= bit (this_instr, 24) << 1;
4616 nextpc = MAKE_THUMB_ADDR (nextpc);
4617 break;
4618 }
4619 case 0xc:
4620 case 0xd:
4621 case 0xe:
4622 /* Coprocessor register transfer. */
4623 if (bits (this_instr, 12, 15) == 15)
4624 error (_("Invalid update to pc in instruction"));
4625 break;
4626 }
4627 else if (condition_true (bits (this_instr, 28, 31), status))
4628 {
4629 switch (bits (this_instr, 24, 27))
4630 {
4631 case 0x0:
4632 case 0x1: /* data processing */
4633 case 0x2:
4634 case 0x3:
4635 {
4636 unsigned long operand1, operand2, result = 0;
4637 unsigned long rn;
4638 int c;
4639
4640 if (bits (this_instr, 12, 15) != 15)
4641 break;
4642
4643 if (bits (this_instr, 22, 25) == 0
4644 && bits (this_instr, 4, 7) == 9) /* multiply */
4645 error (_("Invalid update to pc in instruction"));
4646
4647 /* BX <reg>, BLX <reg> */
4648 if (bits (this_instr, 4, 27) == 0x12fff1
4649 || bits (this_instr, 4, 27) == 0x12fff3)
4650 {
4651 rn = bits (this_instr, 0, 3);
4652 nextpc = ((rn == ARM_PC_REGNUM)
4653 ? (pc_val + 8)
4654 : get_frame_register_unsigned (frame, rn));
4655
4656 return nextpc;
4657 }
4658
4659 /* Multiply into PC. */
4660 c = (status & FLAG_C) ? 1 : 0;
4661 rn = bits (this_instr, 16, 19);
4662 operand1 = ((rn == ARM_PC_REGNUM)
4663 ? (pc_val + 8)
4664 : get_frame_register_unsigned (frame, rn));
4665
4666 if (bit (this_instr, 25))
4667 {
4668 unsigned long immval = bits (this_instr, 0, 7);
4669 unsigned long rotate = 2 * bits (this_instr, 8, 11);
4670 operand2 = ((immval >> rotate) | (immval << (32 - rotate)))
4671 & 0xffffffff;
4672 }
4673 else /* operand 2 is a shifted register. */
4674 operand2 = shifted_reg_val (frame, this_instr, c,
4675 pc_val, status);
4676
4677 switch (bits (this_instr, 21, 24))
4678 {
4679 case 0x0: /*and */
4680 result = operand1 & operand2;
4681 break;
4682
4683 case 0x1: /*eor */
4684 result = operand1 ^ operand2;
4685 break;
4686
4687 case 0x2: /*sub */
4688 result = operand1 - operand2;
4689 break;
4690
4691 case 0x3: /*rsb */
4692 result = operand2 - operand1;
4693 break;
4694
4695 case 0x4: /*add */
4696 result = operand1 + operand2;
4697 break;
4698
4699 case 0x5: /*adc */
4700 result = operand1 + operand2 + c;
4701 break;
4702
4703 case 0x6: /*sbc */
4704 result = operand1 - operand2 + c;
4705 break;
4706
4707 case 0x7: /*rsc */
4708 result = operand2 - operand1 + c;
4709 break;
4710
4711 case 0x8:
4712 case 0x9:
4713 case 0xa:
4714 case 0xb: /* tst, teq, cmp, cmn */
4715 result = (unsigned long) nextpc;
4716 break;
4717
4718 case 0xc: /*orr */
4719 result = operand1 | operand2;
4720 break;
4721
4722 case 0xd: /*mov */
4723 /* Always step into a function. */
4724 result = operand2;
4725 break;
4726
4727 case 0xe: /*bic */
4728 result = operand1 & ~operand2;
4729 break;
4730
4731 case 0xf: /*mvn */
4732 result = ~operand2;
4733 break;
4734 }
4735
4736 /* In 26-bit APCS the bottom two bits of the result are
4737 ignored, and we always end up in ARM state. */
4738 if (!arm_apcs_32)
4739 nextpc = arm_addr_bits_remove (gdbarch, result);
4740 else
4741 nextpc = result;
4742
4743 break;
4744 }
4745
4746 case 0x4:
4747 case 0x5: /* data transfer */
4748 case 0x6:
4749 case 0x7:
4750 if (bit (this_instr, 20))
4751 {
4752 /* load */
4753 if (bits (this_instr, 12, 15) == 15)
4754 {
4755 /* rd == pc */
4756 unsigned long rn;
4757 unsigned long base;
4758
4759 if (bit (this_instr, 22))
4760 error (_("Invalid update to pc in instruction"));
4761
4762 /* byte write to PC */
4763 rn = bits (this_instr, 16, 19);
4764 base = ((rn == ARM_PC_REGNUM)
4765 ? (pc_val + 8)
4766 : get_frame_register_unsigned (frame, rn));
4767
4768 if (bit (this_instr, 24))
4769 {
4770 /* pre-indexed */
4771 int c = (status & FLAG_C) ? 1 : 0;
4772 unsigned long offset =
4773 (bit (this_instr, 25)
4774 ? shifted_reg_val (frame, this_instr, c, pc_val, status)
4775 : bits (this_instr, 0, 11));
4776
4777 if (bit (this_instr, 23))
4778 base += offset;
4779 else
4780 base -= offset;
4781 }
4782 nextpc = (CORE_ADDR) read_memory_integer ((CORE_ADDR) base,
4783 4, byte_order);
4784 }
4785 }
4786 break;
4787
4788 case 0x8:
4789 case 0x9: /* block transfer */
4790 if (bit (this_instr, 20))
4791 {
4792 /* LDM */
4793 if (bit (this_instr, 15))
4794 {
4795 /* loading pc */
4796 int offset = 0;
4797
4798 if (bit (this_instr, 23))
4799 {
4800 /* up */
4801 unsigned long reglist = bits (this_instr, 0, 14);
4802 offset = bitcount (reglist) * 4;
4803 if (bit (this_instr, 24)) /* pre */
4804 offset += 4;
4805 }
4806 else if (bit (this_instr, 24))
4807 offset = -4;
4808
4809 {
4810 unsigned long rn_val =
4811 get_frame_register_unsigned (frame,
4812 bits (this_instr, 16, 19));
4813 nextpc =
4814 (CORE_ADDR) read_memory_integer ((CORE_ADDR) (rn_val
4815 + offset),
4816 4, byte_order);
4817 }
4818 }
4819 }
4820 break;
4821
4822 case 0xb: /* branch & link */
4823 case 0xa: /* branch */
4824 {
4825 nextpc = BranchDest (pc, this_instr);
4826 break;
4827 }
4828
4829 case 0xc:
4830 case 0xd:
4831 case 0xe: /* coproc ops */
4832 break;
4833 case 0xf: /* SWI */
4834 {
4835 struct gdbarch_tdep *tdep;
4836 tdep = gdbarch_tdep (gdbarch);
4837
4838 if (tdep->syscall_next_pc != NULL)
4839 nextpc = tdep->syscall_next_pc (frame);
4840
4841 }
4842 break;
4843
4844 default:
4845 fprintf_filtered (gdb_stderr, _("Bad bit-field extraction\n"));
4846 return (pc);
4847 }
4848 }
4849
4850 return nextpc;
4851 }
4852
4853 /* Determine next PC after current instruction executes. Will call either
4854 arm_get_next_pc_raw or thumb_get_next_pc_raw. Error out if infinite
4855 loop is detected. */
4856
4857 CORE_ADDR
4858 arm_get_next_pc (struct frame_info *frame, CORE_ADDR pc)
4859 {
4860 CORE_ADDR nextpc;
4861
4862 if (arm_frame_is_thumb (frame))
4863 {
4864 nextpc = thumb_get_next_pc_raw (frame, pc);
4865 if (nextpc == MAKE_THUMB_ADDR (pc))
4866 error (_("Infinite loop detected"));
4867 }
4868 else
4869 {
4870 nextpc = arm_get_next_pc_raw (frame, pc);
4871 if (nextpc == pc)
4872 error (_("Infinite loop detected"));
4873 }
4874
4875 return nextpc;
4876 }
4877
4878 /* Like insert_single_step_breakpoint, but make sure we use a breakpoint
4879 of the appropriate mode (as encoded in the PC value), even if this
4880 differs from what would be expected according to the symbol tables. */
4881
4882 void
4883 arm_insert_single_step_breakpoint (struct gdbarch *gdbarch,
4884 struct address_space *aspace,
4885 CORE_ADDR pc)
4886 {
4887 struct cleanup *old_chain
4888 = make_cleanup_restore_integer (&arm_override_mode);
4889
4890 arm_override_mode = IS_THUMB_ADDR (pc);
4891 pc = gdbarch_addr_bits_remove (gdbarch, pc);
4892
4893 insert_single_step_breakpoint (gdbarch, aspace, pc);
4894
4895 do_cleanups (old_chain);
4896 }
4897
4898 /* single_step() is called just before we want to resume the inferior,
4899 if we want to single-step it but there is no hardware or kernel
4900 single-step support. We find the target of the coming instruction
4901 and breakpoint it. */
4902
4903 int
4904 arm_software_single_step (struct frame_info *frame)
4905 {
4906 struct gdbarch *gdbarch = get_frame_arch (frame);
4907 struct address_space *aspace = get_frame_address_space (frame);
4908 CORE_ADDR next_pc = arm_get_next_pc (frame, get_frame_pc (frame));
4909
4910 arm_insert_single_step_breakpoint (gdbarch, aspace, next_pc);
4911
4912 return 1;
4913 }
4914
4915 /* Given BUF, which is OLD_LEN bytes ending at ENDADDR, expand
4916 the buffer to be NEW_LEN bytes ending at ENDADDR. Return
4917 NULL if an error occurs. BUF is freed. */
4918
4919 static gdb_byte *
4920 extend_buffer_earlier (gdb_byte *buf, CORE_ADDR endaddr,
4921 int old_len, int new_len)
4922 {
4923 gdb_byte *new_buf, *middle;
4924 int bytes_to_read = new_len - old_len;
4925
4926 new_buf = xmalloc (new_len);
4927 memcpy (new_buf + bytes_to_read, buf, old_len);
4928 xfree (buf);
4929 if (target_read_memory (endaddr - new_len, new_buf, bytes_to_read) != 0)
4930 {
4931 xfree (new_buf);
4932 return NULL;
4933 }
4934 return new_buf;
4935 }
4936
4937 /* An IT block is at most the 2-byte IT instruction followed by
4938 four 4-byte instructions. The furthest back we must search to
4939 find an IT block that affects the current instruction is thus
4940 2 + 3 * 4 == 14 bytes. */
4941 #define MAX_IT_BLOCK_PREFIX 14
4942
4943 /* Use a quick scan if there are more than this many bytes of
4944 code. */
4945 #define IT_SCAN_THRESHOLD 32
4946
4947 /* Adjust a breakpoint's address to move breakpoints out of IT blocks.
4948 A breakpoint in an IT block may not be hit, depending on the
4949 condition flags. */
4950 static CORE_ADDR
4951 arm_adjust_breakpoint_address (struct gdbarch *gdbarch, CORE_ADDR bpaddr)
4952 {
4953 gdb_byte *buf;
4954 char map_type;
4955 CORE_ADDR boundary, func_start;
4956 int buf_len, buf2_len;
4957 enum bfd_endian order = gdbarch_byte_order_for_code (gdbarch);
4958 int i, any, last_it, last_it_count;
4959
4960 /* If we are using BKPT breakpoints, none of this is necessary. */
4961 if (gdbarch_tdep (gdbarch)->thumb2_breakpoint == NULL)
4962 return bpaddr;
4963
4964 /* ARM mode does not have this problem. */
4965 if (!arm_pc_is_thumb (gdbarch, bpaddr))
4966 return bpaddr;
4967
4968 /* We are setting a breakpoint in Thumb code that could potentially
4969 contain an IT block. The first step is to find how much Thumb
4970 code there is; we do not need to read outside of known Thumb
4971 sequences. */
4972 map_type = arm_find_mapping_symbol (bpaddr, &boundary);
4973 if (map_type == 0)
4974 /* Thumb-2 code must have mapping symbols to have a chance. */
4975 return bpaddr;
4976
4977 bpaddr = gdbarch_addr_bits_remove (gdbarch, bpaddr);
4978
4979 if (find_pc_partial_function (bpaddr, NULL, &func_start, NULL)
4980 && func_start > boundary)
4981 boundary = func_start;
4982
4983 /* Search for a candidate IT instruction. We have to do some fancy
4984 footwork to distinguish a real IT instruction from the second
4985 half of a 32-bit instruction, but there is no need for that if
4986 there's no candidate. */
4987 buf_len = min (bpaddr - boundary, MAX_IT_BLOCK_PREFIX);
4988 if (buf_len == 0)
4989 /* No room for an IT instruction. */
4990 return bpaddr;
4991
4992 buf = xmalloc (buf_len);
4993 if (target_read_memory (bpaddr - buf_len, buf, buf_len) != 0)
4994 return bpaddr;
4995 any = 0;
4996 for (i = 0; i < buf_len; i += 2)
4997 {
4998 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
4999 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
5000 {
5001 any = 1;
5002 break;
5003 }
5004 }
5005 if (any == 0)
5006 {
5007 xfree (buf);
5008 return bpaddr;
5009 }
5010
5011 /* OK, the code bytes before this instruction contain at least one
5012 halfword which resembles an IT instruction. We know that it's
5013 Thumb code, but there are still two possibilities. Either the
5014 halfword really is an IT instruction, or it is the second half of
5015 a 32-bit Thumb instruction. The only way we can tell is to
5016 scan forwards from a known instruction boundary. */
5017 if (bpaddr - boundary > IT_SCAN_THRESHOLD)
5018 {
5019 int definite;
5020
5021 /* There's a lot of code before this instruction. Start with an
5022 optimistic search; it's easy to recognize halfwords that can
5023 not be the start of a 32-bit instruction, and use that to
5024 lock on to the instruction boundaries. */
5025 buf = extend_buffer_earlier (buf, bpaddr, buf_len, IT_SCAN_THRESHOLD);
5026 if (buf == NULL)
5027 return bpaddr;
5028 buf_len = IT_SCAN_THRESHOLD;
5029
5030 definite = 0;
5031 for (i = 0; i < buf_len - sizeof (buf) && ! definite; i += 2)
5032 {
5033 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
5034 if (thumb_insn_size (inst1) == 2)
5035 {
5036 definite = 1;
5037 break;
5038 }
5039 }
5040
5041 /* At this point, if DEFINITE, BUF[I] is the first place we
5042 are sure that we know the instruction boundaries, and it is far
5043 enough from BPADDR that we could not miss an IT instruction
5044 affecting BPADDR. If ! DEFINITE, give up - start from a
5045 known boundary. */
5046 if (! definite)
5047 {
5048 buf = extend_buffer_earlier (buf, bpaddr, buf_len,
5049 bpaddr - boundary);
5050 if (buf == NULL)
5051 return bpaddr;
5052 buf_len = bpaddr - boundary;
5053 i = 0;
5054 }
5055 }
5056 else
5057 {
5058 buf = extend_buffer_earlier (buf, bpaddr, buf_len, bpaddr - boundary);
5059 if (buf == NULL)
5060 return bpaddr;
5061 buf_len = bpaddr - boundary;
5062 i = 0;
5063 }
5064
5065 /* Scan forwards. Find the last IT instruction before BPADDR. */
5066 last_it = -1;
5067 last_it_count = 0;
5068 while (i < buf_len)
5069 {
5070 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
5071 last_it_count--;
5072 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
5073 {
5074 last_it = i;
5075 if (inst1 & 0x0001)
5076 last_it_count = 4;
5077 else if (inst1 & 0x0002)
5078 last_it_count = 3;
5079 else if (inst1 & 0x0004)
5080 last_it_count = 2;
5081 else
5082 last_it_count = 1;
5083 }
5084 i += thumb_insn_size (inst1);
5085 }
5086
5087 xfree (buf);
5088
5089 if (last_it == -1)
5090 /* There wasn't really an IT instruction after all. */
5091 return bpaddr;
5092
5093 if (last_it_count < 1)
5094 /* It was too far away. */
5095 return bpaddr;
5096
5097 /* This really is a trouble spot. Move the breakpoint to the IT
5098 instruction. */
5099 return bpaddr - buf_len + last_it;
5100 }
5101
5102 /* ARM displaced stepping support.
5103
5104 Generally ARM displaced stepping works as follows:
5105
5106 1. When an instruction is to be single-stepped, it is first decoded by
5107 arm_process_displaced_insn (called from arm_displaced_step_copy_insn).
5108 Depending on the type of instruction, it is then copied to a scratch
5109 location, possibly in a modified form. The copy_* set of functions
5110 performs such modification, as necessary. A breakpoint is placed after
5111 the modified instruction in the scratch space to return control to GDB.
5112 Note in particular that instructions which modify the PC will no longer
5113 do so after modification.
5114
5115 2. The instruction is single-stepped, by setting the PC to the scratch
5116 location address, and resuming. Control returns to GDB when the
5117 breakpoint is hit.
5118
5119 3. A cleanup function (cleanup_*) is called corresponding to the copy_*
5120 function used for the current instruction. This function's job is to
5121 put the CPU/memory state back to what it would have been if the
5122 instruction had been executed unmodified in its original location. */
5123
5124 /* NOP instruction (mov r0, r0). */
5125 #define ARM_NOP 0xe1a00000
5126
5127 /* Helper for register reads for displaced stepping. In particular, this
5128 returns the PC as it would be seen by the instruction at its original
5129 location. */
5130
5131 ULONGEST
5132 displaced_read_reg (struct regcache *regs, struct displaced_step_closure *dsc,
5133 int regno)
5134 {
5135 ULONGEST ret;
5136 CORE_ADDR from = dsc->insn_addr;
5137
5138 if (regno == ARM_PC_REGNUM)
5139 {
5140 /* Compute pipeline offset:
5141 - When executing an ARM instruction, PC reads as the address of the
5142 current instruction plus 8.
5143 - When executing a Thumb instruction, PC reads as the address of the
5144 current instruction plus 4. */
5145
5146 if (!dsc->is_thumb)
5147 from += 8;
5148 else
5149 from += 4;
5150
5151 if (debug_displaced)
5152 fprintf_unfiltered (gdb_stdlog, "displaced: read pc value %.8lx\n",
5153 (unsigned long) from);
5154 return (ULONGEST) from;
5155 }
5156 else
5157 {
5158 regcache_cooked_read_unsigned (regs, regno, &ret);
5159 if (debug_displaced)
5160 fprintf_unfiltered (gdb_stdlog, "displaced: read r%d value %.8lx\n",
5161 regno, (unsigned long) ret);
5162 return ret;
5163 }
5164 }
5165
5166 static int
5167 displaced_in_arm_mode (struct regcache *regs)
5168 {
5169 ULONGEST ps;
5170 ULONGEST t_bit = arm_psr_thumb_bit (get_regcache_arch (regs));
5171
5172 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
5173
5174 return (ps & t_bit) == 0;
5175 }
5176
5177 /* Write to the PC as from a branch instruction. */
5178
5179 static void
5180 branch_write_pc (struct regcache *regs, struct displaced_step_closure *dsc,
5181 ULONGEST val)
5182 {
5183 if (!dsc->is_thumb)
5184 /* Note: If bits 0/1 are set, this branch would be unpredictable for
5185 architecture versions < 6. */
5186 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
5187 val & ~(ULONGEST) 0x3);
5188 else
5189 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
5190 val & ~(ULONGEST) 0x1);
5191 }
5192
5193 /* Write to the PC as from a branch-exchange instruction. */
5194
5195 static void
5196 bx_write_pc (struct regcache *regs, ULONGEST val)
5197 {
5198 ULONGEST ps;
5199 ULONGEST t_bit = arm_psr_thumb_bit (get_regcache_arch (regs));
5200
5201 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
5202
5203 if ((val & 1) == 1)
5204 {
5205 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps | t_bit);
5206 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffe);
5207 }
5208 else if ((val & 2) == 0)
5209 {
5210 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps & ~t_bit);
5211 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val);
5212 }
5213 else
5214 {
5215 /* Unpredictable behaviour. Try to do something sensible (switch to ARM
5216 mode, align dest to 4 bytes). */
5217 warning (_("Single-stepping BX to non-word-aligned ARM instruction."));
5218 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps & ~t_bit);
5219 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffc);
5220 }
5221 }
5222
5223 /* Write to the PC as if from a load instruction. */
5224
5225 static void
5226 load_write_pc (struct regcache *regs, struct displaced_step_closure *dsc,
5227 ULONGEST val)
5228 {
5229 if (DISPLACED_STEPPING_ARCH_VERSION >= 5)
5230 bx_write_pc (regs, val);
5231 else
5232 branch_write_pc (regs, dsc, val);
5233 }
5234
5235 /* Write to the PC as if from an ALU instruction. */
5236
5237 static void
5238 alu_write_pc (struct regcache *regs, struct displaced_step_closure *dsc,
5239 ULONGEST val)
5240 {
5241 if (DISPLACED_STEPPING_ARCH_VERSION >= 7 && !dsc->is_thumb)
5242 bx_write_pc (regs, val);
5243 else
5244 branch_write_pc (regs, dsc, val);
5245 }
5246
5247 /* Helper for writing to registers for displaced stepping. Writing to the PC
5248 has a varying effects depending on the instruction which does the write:
5249 this is controlled by the WRITE_PC argument. */
5250
5251 void
5252 displaced_write_reg (struct regcache *regs, struct displaced_step_closure *dsc,
5253 int regno, ULONGEST val, enum pc_write_style write_pc)
5254 {
5255 if (regno == ARM_PC_REGNUM)
5256 {
5257 if (debug_displaced)
5258 fprintf_unfiltered (gdb_stdlog, "displaced: writing pc %.8lx\n",
5259 (unsigned long) val);
5260 switch (write_pc)
5261 {
5262 case BRANCH_WRITE_PC:
5263 branch_write_pc (regs, dsc, val);
5264 break;
5265
5266 case BX_WRITE_PC:
5267 bx_write_pc (regs, val);
5268 break;
5269
5270 case LOAD_WRITE_PC:
5271 load_write_pc (regs, dsc, val);
5272 break;
5273
5274 case ALU_WRITE_PC:
5275 alu_write_pc (regs, dsc, val);
5276 break;
5277
5278 case CANNOT_WRITE_PC:
5279 warning (_("Instruction wrote to PC in an unexpected way when "
5280 "single-stepping"));
5281 break;
5282
5283 default:
5284 internal_error (__FILE__, __LINE__,
5285 _("Invalid argument to displaced_write_reg"));
5286 }
5287
5288 dsc->wrote_to_pc = 1;
5289 }
5290 else
5291 {
5292 if (debug_displaced)
5293 fprintf_unfiltered (gdb_stdlog, "displaced: writing r%d value %.8lx\n",
5294 regno, (unsigned long) val);
5295 regcache_cooked_write_unsigned (regs, regno, val);
5296 }
5297 }
5298
5299 /* This function is used to concisely determine if an instruction INSN
5300 references PC. Register fields of interest in INSN should have the
5301 corresponding fields of BITMASK set to 0b1111. The function
5302 returns return 1 if any of these fields in INSN reference the PC
5303 (also 0b1111, r15), else it returns 0. */
5304
5305 static int
5306 insn_references_pc (uint32_t insn, uint32_t bitmask)
5307 {
5308 uint32_t lowbit = 1;
5309
5310 while (bitmask != 0)
5311 {
5312 uint32_t mask;
5313
5314 for (; lowbit && (bitmask & lowbit) == 0; lowbit <<= 1)
5315 ;
5316
5317 if (!lowbit)
5318 break;
5319
5320 mask = lowbit * 0xf;
5321
5322 if ((insn & mask) == mask)
5323 return 1;
5324
5325 bitmask &= ~mask;
5326 }
5327
5328 return 0;
5329 }
5330
5331 /* The simplest copy function. Many instructions have the same effect no
5332 matter what address they are executed at: in those cases, use this. */
5333
5334 static int
5335 arm_copy_unmodified (struct gdbarch *gdbarch, uint32_t insn,
5336 const char *iname, struct displaced_step_closure *dsc)
5337 {
5338 if (debug_displaced)
5339 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.8lx, "
5340 "opcode/class '%s' unmodified\n", (unsigned long) insn,
5341 iname);
5342
5343 dsc->modinsn[0] = insn;
5344
5345 return 0;
5346 }
5347
5348 /* Preload instructions with immediate offset. */
5349
5350 static void
5351 cleanup_preload (struct gdbarch *gdbarch,
5352 struct regcache *regs, struct displaced_step_closure *dsc)
5353 {
5354 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5355 if (!dsc->u.preload.immed)
5356 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
5357 }
5358
5359 static void
5360 install_preload (struct gdbarch *gdbarch, struct regcache *regs,
5361 struct displaced_step_closure *dsc, unsigned int rn)
5362 {
5363 ULONGEST rn_val;
5364 /* Preload instructions:
5365
5366 {pli/pld} [rn, #+/-imm]
5367 ->
5368 {pli/pld} [r0, #+/-imm]. */
5369
5370 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5371 rn_val = displaced_read_reg (regs, dsc, rn);
5372 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
5373 dsc->u.preload.immed = 1;
5374
5375 dsc->cleanup = &cleanup_preload;
5376 }
5377
5378 static int
5379 arm_copy_preload (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
5380 struct displaced_step_closure *dsc)
5381 {
5382 unsigned int rn = bits (insn, 16, 19);
5383
5384 if (!insn_references_pc (insn, 0x000f0000ul))
5385 return arm_copy_unmodified (gdbarch, insn, "preload", dsc);
5386
5387 if (debug_displaced)
5388 fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
5389 (unsigned long) insn);
5390
5391 dsc->modinsn[0] = insn & 0xfff0ffff;
5392
5393 install_preload (gdbarch, regs, dsc, rn);
5394
5395 return 0;
5396 }
5397
5398 /* Preload instructions with register offset. */
5399
5400 static void
5401 install_preload_reg(struct gdbarch *gdbarch, struct regcache *regs,
5402 struct displaced_step_closure *dsc, unsigned int rn,
5403 unsigned int rm)
5404 {
5405 ULONGEST rn_val, rm_val;
5406
5407 /* Preload register-offset instructions:
5408
5409 {pli/pld} [rn, rm {, shift}]
5410 ->
5411 {pli/pld} [r0, r1 {, shift}]. */
5412
5413 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5414 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5415 rn_val = displaced_read_reg (regs, dsc, rn);
5416 rm_val = displaced_read_reg (regs, dsc, rm);
5417 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
5418 displaced_write_reg (regs, dsc, 1, rm_val, CANNOT_WRITE_PC);
5419 dsc->u.preload.immed = 0;
5420
5421 dsc->cleanup = &cleanup_preload;
5422 }
5423
5424 static int
5425 arm_copy_preload_reg (struct gdbarch *gdbarch, uint32_t insn,
5426 struct regcache *regs,
5427 struct displaced_step_closure *dsc)
5428 {
5429 unsigned int rn = bits (insn, 16, 19);
5430 unsigned int rm = bits (insn, 0, 3);
5431
5432
5433 if (!insn_references_pc (insn, 0x000f000ful))
5434 return arm_copy_unmodified (gdbarch, insn, "preload reg", dsc);
5435
5436 if (debug_displaced)
5437 fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
5438 (unsigned long) insn);
5439
5440 dsc->modinsn[0] = (insn & 0xfff0fff0) | 0x1;
5441
5442 install_preload_reg (gdbarch, regs, dsc, rn, rm);
5443 return 0;
5444 }
5445
5446 /* Copy/cleanup coprocessor load and store instructions. */
5447
5448 static void
5449 cleanup_copro_load_store (struct gdbarch *gdbarch,
5450 struct regcache *regs,
5451 struct displaced_step_closure *dsc)
5452 {
5453 ULONGEST rn_val = displaced_read_reg (regs, dsc, 0);
5454
5455 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5456
5457 if (dsc->u.ldst.writeback)
5458 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, LOAD_WRITE_PC);
5459 }
5460
5461 static void
5462 install_copro_load_store (struct gdbarch *gdbarch, struct regcache *regs,
5463 struct displaced_step_closure *dsc,
5464 int writeback, unsigned int rn)
5465 {
5466 ULONGEST rn_val;
5467
5468 /* Coprocessor load/store instructions:
5469
5470 {stc/stc2} [<Rn>, #+/-imm] (and other immediate addressing modes)
5471 ->
5472 {stc/stc2} [r0, #+/-imm].
5473
5474 ldc/ldc2 are handled identically. */
5475
5476 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5477 rn_val = displaced_read_reg (regs, dsc, rn);
5478 /* PC should be 4-byte aligned. */
5479 rn_val = rn_val & 0xfffffffc;
5480 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
5481
5482 dsc->u.ldst.writeback = writeback;
5483 dsc->u.ldst.rn = rn;
5484
5485 dsc->cleanup = &cleanup_copro_load_store;
5486 }
5487
5488 static int
5489 arm_copy_copro_load_store (struct gdbarch *gdbarch, uint32_t insn,
5490 struct regcache *regs,
5491 struct displaced_step_closure *dsc)
5492 {
5493 unsigned int rn = bits (insn, 16, 19);
5494
5495 if (!insn_references_pc (insn, 0x000f0000ul))
5496 return arm_copy_unmodified (gdbarch, insn, "copro load/store", dsc);
5497
5498 if (debug_displaced)
5499 fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
5500 "load/store insn %.8lx\n", (unsigned long) insn);
5501
5502 dsc->modinsn[0] = insn & 0xfff0ffff;
5503
5504 install_copro_load_store (gdbarch, regs, dsc, bit (insn, 25), rn);
5505
5506 return 0;
5507 }
5508
5509 /* Clean up branch instructions (actually perform the branch, by setting
5510 PC). */
5511
5512 static void
5513 cleanup_branch (struct gdbarch *gdbarch, struct regcache *regs,
5514 struct displaced_step_closure *dsc)
5515 {
5516 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
5517 int branch_taken = condition_true (dsc->u.branch.cond, status);
5518 enum pc_write_style write_pc = dsc->u.branch.exchange
5519 ? BX_WRITE_PC : BRANCH_WRITE_PC;
5520
5521 if (!branch_taken)
5522 return;
5523
5524 if (dsc->u.branch.link)
5525 {
5526 /* The value of LR should be the next insn of current one. In order
5527 not to confuse logic hanlding later insn `bx lr', if current insn mode
5528 is Thumb, the bit 0 of LR value should be set to 1. */
5529 ULONGEST next_insn_addr = dsc->insn_addr + dsc->insn_size;
5530
5531 if (dsc->is_thumb)
5532 next_insn_addr |= 0x1;
5533
5534 displaced_write_reg (regs, dsc, ARM_LR_REGNUM, next_insn_addr,
5535 CANNOT_WRITE_PC);
5536 }
5537
5538 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, dsc->u.branch.dest, write_pc);
5539 }
5540
5541 /* Copy B/BL/BLX instructions with immediate destinations. */
5542
5543 static void
5544 install_b_bl_blx (struct gdbarch *gdbarch, struct regcache *regs,
5545 struct displaced_step_closure *dsc,
5546 unsigned int cond, int exchange, int link, long offset)
5547 {
5548 /* Implement "BL<cond> <label>" as:
5549
5550 Preparation: cond <- instruction condition
5551 Insn: mov r0, r0 (nop)
5552 Cleanup: if (condition true) { r14 <- pc; pc <- label }.
5553
5554 B<cond> similar, but don't set r14 in cleanup. */
5555
5556 dsc->u.branch.cond = cond;
5557 dsc->u.branch.link = link;
5558 dsc->u.branch.exchange = exchange;
5559
5560 dsc->u.branch.dest = dsc->insn_addr;
5561 if (link && exchange)
5562 /* For BLX, offset is computed from the Align (PC, 4). */
5563 dsc->u.branch.dest = dsc->u.branch.dest & 0xfffffffc;
5564
5565 if (dsc->is_thumb)
5566 dsc->u.branch.dest += 4 + offset;
5567 else
5568 dsc->u.branch.dest += 8 + offset;
5569
5570 dsc->cleanup = &cleanup_branch;
5571 }
5572 static int
5573 arm_copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
5574 struct regcache *regs, struct displaced_step_closure *dsc)
5575 {
5576 unsigned int cond = bits (insn, 28, 31);
5577 int exchange = (cond == 0xf);
5578 int link = exchange || bit (insn, 24);
5579 long offset;
5580
5581 if (debug_displaced)
5582 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s immediate insn "
5583 "%.8lx\n", (exchange) ? "blx" : (link) ? "bl" : "b",
5584 (unsigned long) insn);
5585 if (exchange)
5586 /* For BLX, set bit 0 of the destination. The cleanup_branch function will
5587 then arrange the switch into Thumb mode. */
5588 offset = (bits (insn, 0, 23) << 2) | (bit (insn, 24) << 1) | 1;
5589 else
5590 offset = bits (insn, 0, 23) << 2;
5591
5592 if (bit (offset, 25))
5593 offset = offset | ~0x3ffffff;
5594
5595 dsc->modinsn[0] = ARM_NOP;
5596
5597 install_b_bl_blx (gdbarch, regs, dsc, cond, exchange, link, offset);
5598 return 0;
5599 }
5600
5601 /* Copy BX/BLX with register-specified destinations. */
5602
5603 static void
5604 install_bx_blx_reg (struct gdbarch *gdbarch, struct regcache *regs,
5605 struct displaced_step_closure *dsc, int link,
5606 unsigned int cond, unsigned int rm)
5607 {
5608 /* Implement {BX,BLX}<cond> <reg>" as:
5609
5610 Preparation: cond <- instruction condition
5611 Insn: mov r0, r0 (nop)
5612 Cleanup: if (condition true) { r14 <- pc; pc <- dest; }.
5613
5614 Don't set r14 in cleanup for BX. */
5615
5616 dsc->u.branch.dest = displaced_read_reg (regs, dsc, rm);
5617
5618 dsc->u.branch.cond = cond;
5619 dsc->u.branch.link = link;
5620
5621 dsc->u.branch.exchange = 1;
5622
5623 dsc->cleanup = &cleanup_branch;
5624 }
5625
5626 static int
5627 arm_copy_bx_blx_reg (struct gdbarch *gdbarch, uint32_t insn,
5628 struct regcache *regs, struct displaced_step_closure *dsc)
5629 {
5630 unsigned int cond = bits (insn, 28, 31);
5631 /* BX: x12xxx1x
5632 BLX: x12xxx3x. */
5633 int link = bit (insn, 5);
5634 unsigned int rm = bits (insn, 0, 3);
5635
5636 if (debug_displaced)
5637 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.8lx",
5638 (unsigned long) insn);
5639
5640 dsc->modinsn[0] = ARM_NOP;
5641
5642 install_bx_blx_reg (gdbarch, regs, dsc, link, cond, rm);
5643 return 0;
5644 }
5645
5646 /* Copy/cleanup arithmetic/logic instruction with immediate RHS. */
5647
5648 static void
5649 cleanup_alu_imm (struct gdbarch *gdbarch,
5650 struct regcache *regs, struct displaced_step_closure *dsc)
5651 {
5652 ULONGEST rd_val = displaced_read_reg (regs, dsc, 0);
5653 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5654 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
5655 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
5656 }
5657
5658 static int
5659 arm_copy_alu_imm (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
5660 struct displaced_step_closure *dsc)
5661 {
5662 unsigned int rn = bits (insn, 16, 19);
5663 unsigned int rd = bits (insn, 12, 15);
5664 unsigned int op = bits (insn, 21, 24);
5665 int is_mov = (op == 0xd);
5666 ULONGEST rd_val, rn_val;
5667
5668 if (!insn_references_pc (insn, 0x000ff000ul))
5669 return arm_copy_unmodified (gdbarch, insn, "ALU immediate", dsc);
5670
5671 if (debug_displaced)
5672 fprintf_unfiltered (gdb_stdlog, "displaced: copying immediate %s insn "
5673 "%.8lx\n", is_mov ? "move" : "ALU",
5674 (unsigned long) insn);
5675
5676 /* Instruction is of form:
5677
5678 <op><cond> rd, [rn,] #imm
5679
5680 Rewrite as:
5681
5682 Preparation: tmp1, tmp2 <- r0, r1;
5683 r0, r1 <- rd, rn
5684 Insn: <op><cond> r0, r1, #imm
5685 Cleanup: rd <- r0; r0 <- tmp1; r1 <- tmp2
5686 */
5687
5688 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5689 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5690 rn_val = displaced_read_reg (regs, dsc, rn);
5691 rd_val = displaced_read_reg (regs, dsc, rd);
5692 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
5693 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
5694 dsc->rd = rd;
5695
5696 if (is_mov)
5697 dsc->modinsn[0] = insn & 0xfff00fff;
5698 else
5699 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x10000;
5700
5701 dsc->cleanup = &cleanup_alu_imm;
5702
5703 return 0;
5704 }
5705
5706 /* Copy/cleanup arithmetic/logic insns with register RHS. */
5707
5708 static void
5709 cleanup_alu_reg (struct gdbarch *gdbarch,
5710 struct regcache *regs, struct displaced_step_closure *dsc)
5711 {
5712 ULONGEST rd_val;
5713 int i;
5714
5715 rd_val = displaced_read_reg (regs, dsc, 0);
5716
5717 for (i = 0; i < 3; i++)
5718 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
5719
5720 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
5721 }
5722
5723 static void
5724 install_alu_reg (struct gdbarch *gdbarch, struct regcache *regs,
5725 struct displaced_step_closure *dsc,
5726 unsigned int rd, unsigned int rn, unsigned int rm)
5727 {
5728 ULONGEST rd_val, rn_val, rm_val;
5729
5730 /* Instruction is of form:
5731
5732 <op><cond> rd, [rn,] rm [, <shift>]
5733
5734 Rewrite as:
5735
5736 Preparation: tmp1, tmp2, tmp3 <- r0, r1, r2;
5737 r0, r1, r2 <- rd, rn, rm
5738 Insn: <op><cond> r0, r1, r2 [, <shift>]
5739 Cleanup: rd <- r0; r0, r1, r2 <- tmp1, tmp2, tmp3
5740 */
5741
5742 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5743 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5744 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
5745 rd_val = displaced_read_reg (regs, dsc, rd);
5746 rn_val = displaced_read_reg (regs, dsc, rn);
5747 rm_val = displaced_read_reg (regs, dsc, rm);
5748 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
5749 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
5750 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
5751 dsc->rd = rd;
5752
5753 dsc->cleanup = &cleanup_alu_reg;
5754 }
5755
5756 static int
5757 arm_copy_alu_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
5758 struct displaced_step_closure *dsc)
5759 {
5760 unsigned int op = bits (insn, 21, 24);
5761 int is_mov = (op == 0xd);
5762
5763 if (!insn_references_pc (insn, 0x000ff00ful))
5764 return arm_copy_unmodified (gdbarch, insn, "ALU reg", dsc);
5765
5766 if (debug_displaced)
5767 fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.8lx\n",
5768 is_mov ? "move" : "ALU", (unsigned long) insn);
5769
5770 if (is_mov)
5771 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x2;
5772 else
5773 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x10002;
5774
5775 install_alu_reg (gdbarch, regs, dsc, bits (insn, 12, 15), bits (insn, 16, 19),
5776 bits (insn, 0, 3));
5777 return 0;
5778 }
5779
5780 /* Cleanup/copy arithmetic/logic insns with shifted register RHS. */
5781
5782 static void
5783 cleanup_alu_shifted_reg (struct gdbarch *gdbarch,
5784 struct regcache *regs,
5785 struct displaced_step_closure *dsc)
5786 {
5787 ULONGEST rd_val = displaced_read_reg (regs, dsc, 0);
5788 int i;
5789
5790 for (i = 0; i < 4; i++)
5791 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
5792
5793 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
5794 }
5795
5796 static void
5797 install_alu_shifted_reg (struct gdbarch *gdbarch, struct regcache *regs,
5798 struct displaced_step_closure *dsc,
5799 unsigned int rd, unsigned int rn, unsigned int rm,
5800 unsigned rs)
5801 {
5802 int i;
5803 ULONGEST rd_val, rn_val, rm_val, rs_val;
5804
5805 /* Instruction is of form:
5806
5807 <op><cond> rd, [rn,] rm, <shift> rs
5808
5809 Rewrite as:
5810
5811 Preparation: tmp1, tmp2, tmp3, tmp4 <- r0, r1, r2, r3
5812 r0, r1, r2, r3 <- rd, rn, rm, rs
5813 Insn: <op><cond> r0, r1, r2, <shift> r3
5814 Cleanup: tmp5 <- r0
5815 r0, r1, r2, r3 <- tmp1, tmp2, tmp3, tmp4
5816 rd <- tmp5
5817 */
5818
5819 for (i = 0; i < 4; i++)
5820 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
5821
5822 rd_val = displaced_read_reg (regs, dsc, rd);
5823 rn_val = displaced_read_reg (regs, dsc, rn);
5824 rm_val = displaced_read_reg (regs, dsc, rm);
5825 rs_val = displaced_read_reg (regs, dsc, rs);
5826 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
5827 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
5828 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
5829 displaced_write_reg (regs, dsc, 3, rs_val, CANNOT_WRITE_PC);
5830 dsc->rd = rd;
5831 dsc->cleanup = &cleanup_alu_shifted_reg;
5832 }
5833
5834 static int
5835 arm_copy_alu_shifted_reg (struct gdbarch *gdbarch, uint32_t insn,
5836 struct regcache *regs,
5837 struct displaced_step_closure *dsc)
5838 {
5839 unsigned int op = bits (insn, 21, 24);
5840 int is_mov = (op == 0xd);
5841 unsigned int rd, rn, rm, rs;
5842
5843 if (!insn_references_pc (insn, 0x000fff0ful))
5844 return arm_copy_unmodified (gdbarch, insn, "ALU shifted reg", dsc);
5845
5846 if (debug_displaced)
5847 fprintf_unfiltered (gdb_stdlog, "displaced: copying shifted reg %s insn "
5848 "%.8lx\n", is_mov ? "move" : "ALU",
5849 (unsigned long) insn);
5850
5851 rn = bits (insn, 16, 19);
5852 rm = bits (insn, 0, 3);
5853 rs = bits (insn, 8, 11);
5854 rd = bits (insn, 12, 15);
5855
5856 if (is_mov)
5857 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x302;
5858 else
5859 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x10302;
5860
5861 install_alu_shifted_reg (gdbarch, regs, dsc, rd, rn, rm, rs);
5862
5863 return 0;
5864 }
5865
5866 /* Clean up load instructions. */
5867
5868 static void
5869 cleanup_load (struct gdbarch *gdbarch, struct regcache *regs,
5870 struct displaced_step_closure *dsc)
5871 {
5872 ULONGEST rt_val, rt_val2 = 0, rn_val;
5873
5874 rt_val = displaced_read_reg (regs, dsc, 0);
5875 if (dsc->u.ldst.xfersize == 8)
5876 rt_val2 = displaced_read_reg (regs, dsc, 1);
5877 rn_val = displaced_read_reg (regs, dsc, 2);
5878
5879 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5880 if (dsc->u.ldst.xfersize > 4)
5881 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
5882 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
5883 if (!dsc->u.ldst.immed)
5884 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
5885
5886 /* Handle register writeback. */
5887 if (dsc->u.ldst.writeback)
5888 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
5889 /* Put result in right place. */
5890 displaced_write_reg (regs, dsc, dsc->rd, rt_val, LOAD_WRITE_PC);
5891 if (dsc->u.ldst.xfersize == 8)
5892 displaced_write_reg (regs, dsc, dsc->rd + 1, rt_val2, LOAD_WRITE_PC);
5893 }
5894
5895 /* Clean up store instructions. */
5896
5897 static void
5898 cleanup_store (struct gdbarch *gdbarch, struct regcache *regs,
5899 struct displaced_step_closure *dsc)
5900 {
5901 ULONGEST rn_val = displaced_read_reg (regs, dsc, 2);
5902
5903 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5904 if (dsc->u.ldst.xfersize > 4)
5905 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
5906 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
5907 if (!dsc->u.ldst.immed)
5908 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
5909 if (!dsc->u.ldst.restore_r4)
5910 displaced_write_reg (regs, dsc, 4, dsc->tmp[4], CANNOT_WRITE_PC);
5911
5912 /* Writeback. */
5913 if (dsc->u.ldst.writeback)
5914 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
5915 }
5916
5917 /* Copy "extra" load/store instructions. These are halfword/doubleword
5918 transfers, which have a different encoding to byte/word transfers. */
5919
5920 static int
5921 arm_copy_extra_ld_st (struct gdbarch *gdbarch, uint32_t insn, int unpriveleged,
5922 struct regcache *regs, struct displaced_step_closure *dsc)
5923 {
5924 unsigned int op1 = bits (insn, 20, 24);
5925 unsigned int op2 = bits (insn, 5, 6);
5926 unsigned int rt = bits (insn, 12, 15);
5927 unsigned int rn = bits (insn, 16, 19);
5928 unsigned int rm = bits (insn, 0, 3);
5929 char load[12] = {0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1};
5930 char bytesize[12] = {2, 2, 2, 2, 8, 1, 8, 1, 8, 2, 8, 2};
5931 int immed = (op1 & 0x4) != 0;
5932 int opcode;
5933 ULONGEST rt_val, rt_val2 = 0, rn_val, rm_val = 0;
5934
5935 if (!insn_references_pc (insn, 0x000ff00ful))
5936 return arm_copy_unmodified (gdbarch, insn, "extra load/store", dsc);
5937
5938 if (debug_displaced)
5939 fprintf_unfiltered (gdb_stdlog, "displaced: copying %sextra load/store "
5940 "insn %.8lx\n", unpriveleged ? "unpriveleged " : "",
5941 (unsigned long) insn);
5942
5943 opcode = ((op2 << 2) | (op1 & 0x1) | ((op1 & 0x4) >> 1)) - 4;
5944
5945 if (opcode < 0)
5946 internal_error (__FILE__, __LINE__,
5947 _("copy_extra_ld_st: instruction decode error"));
5948
5949 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5950 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5951 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
5952 if (!immed)
5953 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
5954
5955 rt_val = displaced_read_reg (regs, dsc, rt);
5956 if (bytesize[opcode] == 8)
5957 rt_val2 = displaced_read_reg (regs, dsc, rt + 1);
5958 rn_val = displaced_read_reg (regs, dsc, rn);
5959 if (!immed)
5960 rm_val = displaced_read_reg (regs, dsc, rm);
5961
5962 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
5963 if (bytesize[opcode] == 8)
5964 displaced_write_reg (regs, dsc, 1, rt_val2, CANNOT_WRITE_PC);
5965 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
5966 if (!immed)
5967 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
5968
5969 dsc->rd = rt;
5970 dsc->u.ldst.xfersize = bytesize[opcode];
5971 dsc->u.ldst.rn = rn;
5972 dsc->u.ldst.immed = immed;
5973 dsc->u.ldst.writeback = bit (insn, 24) == 0 || bit (insn, 21) != 0;
5974 dsc->u.ldst.restore_r4 = 0;
5975
5976 if (immed)
5977 /* {ldr,str}<width><cond> rt, [rt2,] [rn, #imm]
5978 ->
5979 {ldr,str}<width><cond> r0, [r1,] [r2, #imm]. */
5980 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
5981 else
5982 /* {ldr,str}<width><cond> rt, [rt2,] [rn, +/-rm]
5983 ->
5984 {ldr,str}<width><cond> r0, [r1,] [r2, +/-r3]. */
5985 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
5986
5987 dsc->cleanup = load[opcode] ? &cleanup_load : &cleanup_store;
5988
5989 return 0;
5990 }
5991
5992 /* Copy byte/half word/word loads and stores. */
5993
5994 static void
5995 install_load_store (struct gdbarch *gdbarch, struct regcache *regs,
5996 struct displaced_step_closure *dsc, int load,
5997 int immed, int writeback, int size, int usermode,
5998 int rt, int rm, int rn)
5999 {
6000 ULONGEST rt_val, rn_val, rm_val = 0;
6001
6002 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
6003 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
6004 if (!immed)
6005 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
6006 if (!load)
6007 dsc->tmp[4] = displaced_read_reg (regs, dsc, 4);
6008
6009 rt_val = displaced_read_reg (regs, dsc, rt);
6010 rn_val = displaced_read_reg (regs, dsc, rn);
6011 if (!immed)
6012 rm_val = displaced_read_reg (regs, dsc, rm);
6013
6014 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
6015 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
6016 if (!immed)
6017 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
6018 dsc->rd = rt;
6019 dsc->u.ldst.xfersize = size;
6020 dsc->u.ldst.rn = rn;
6021 dsc->u.ldst.immed = immed;
6022 dsc->u.ldst.writeback = writeback;
6023
6024 /* To write PC we can do:
6025
6026 Before this sequence of instructions:
6027 r0 is the PC value got from displaced_read_reg, so r0 = from + 8;
6028 r2 is the Rn value got from dispalced_read_reg.
6029
6030 Insn1: push {pc} Write address of STR instruction + offset on stack
6031 Insn2: pop {r4} Read it back from stack, r4 = addr(Insn1) + offset
6032 Insn3: sub r4, r4, pc r4 = addr(Insn1) + offset - pc
6033 = addr(Insn1) + offset - addr(Insn3) - 8
6034 = offset - 16
6035 Insn4: add r4, r4, #8 r4 = offset - 8
6036 Insn5: add r0, r0, r4 r0 = from + 8 + offset - 8
6037 = from + offset
6038 Insn6: str r0, [r2, #imm] (or str r0, [r2, r3])
6039
6040 Otherwise we don't know what value to write for PC, since the offset is
6041 architecture-dependent (sometimes PC+8, sometimes PC+12). More details
6042 of this can be found in Section "Saving from r15" in
6043 http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0204g/Cihbjifh.html */
6044
6045 dsc->cleanup = load ? &cleanup_load : &cleanup_store;
6046 }
6047
6048 static int
6049 arm_copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint32_t insn,
6050 struct regcache *regs,
6051 struct displaced_step_closure *dsc,
6052 int load, int size, int usermode)
6053 {
6054 int immed = !bit (insn, 25);
6055 int writeback = (bit (insn, 24) == 0 || bit (insn, 21) != 0);
6056 unsigned int rt = bits (insn, 12, 15);
6057 unsigned int rn = bits (insn, 16, 19);
6058 unsigned int rm = bits (insn, 0, 3); /* Only valid if !immed. */
6059
6060 if (!insn_references_pc (insn, 0x000ff00ful))
6061 return arm_copy_unmodified (gdbarch, insn, "load/store", dsc);
6062
6063 if (debug_displaced)
6064 fprintf_unfiltered (gdb_stdlog,
6065 "displaced: copying %s%s r%d [r%d] insn %.8lx\n",
6066 load ? (size == 1 ? "ldrb" : "ldr")
6067 : (size == 1 ? "strb" : "str"), usermode ? "t" : "",
6068 rt, rn,
6069 (unsigned long) insn);
6070
6071 install_load_store (gdbarch, regs, dsc, load, immed, writeback, size,
6072 usermode, rt, rm, rn);
6073
6074 if (load || rt != ARM_PC_REGNUM)
6075 {
6076 dsc->u.ldst.restore_r4 = 0;
6077
6078 if (immed)
6079 /* {ldr,str}[b]<cond> rt, [rn, #imm], etc.
6080 ->
6081 {ldr,str}[b]<cond> r0, [r2, #imm]. */
6082 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
6083 else
6084 /* {ldr,str}[b]<cond> rt, [rn, rm], etc.
6085 ->
6086 {ldr,str}[b]<cond> r0, [r2, r3]. */
6087 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
6088 }
6089 else
6090 {
6091 /* We need to use r4 as scratch. Make sure it's restored afterwards. */
6092 dsc->u.ldst.restore_r4 = 1;
6093 dsc->modinsn[0] = 0xe92d8000; /* push {pc} */
6094 dsc->modinsn[1] = 0xe8bd0010; /* pop {r4} */
6095 dsc->modinsn[2] = 0xe044400f; /* sub r4, r4, pc. */
6096 dsc->modinsn[3] = 0xe2844008; /* add r4, r4, #8. */
6097 dsc->modinsn[4] = 0xe0800004; /* add r0, r0, r4. */
6098
6099 /* As above. */
6100 if (immed)
6101 dsc->modinsn[5] = (insn & 0xfff00fff) | 0x20000;
6102 else
6103 dsc->modinsn[5] = (insn & 0xfff00ff0) | 0x20003;
6104
6105 dsc->numinsns = 6;
6106 }
6107
6108 dsc->cleanup = load ? &cleanup_load : &cleanup_store;
6109
6110 return 0;
6111 }
6112
6113 /* Cleanup LDM instructions with fully-populated register list. This is an
6114 unfortunate corner case: it's impossible to implement correctly by modifying
6115 the instruction. The issue is as follows: we have an instruction,
6116
6117 ldm rN, {r0-r15}
6118
6119 which we must rewrite to avoid loading PC. A possible solution would be to
6120 do the load in two halves, something like (with suitable cleanup
6121 afterwards):
6122
6123 mov r8, rN
6124 ldm[id][ab] r8!, {r0-r7}
6125 str r7, <temp>
6126 ldm[id][ab] r8, {r7-r14}
6127 <bkpt>
6128
6129 but at present there's no suitable place for <temp>, since the scratch space
6130 is overwritten before the cleanup routine is called. For now, we simply
6131 emulate the instruction. */
6132
6133 static void
6134 cleanup_block_load_all (struct gdbarch *gdbarch, struct regcache *regs,
6135 struct displaced_step_closure *dsc)
6136 {
6137 int inc = dsc->u.block.increment;
6138 int bump_before = dsc->u.block.before ? (inc ? 4 : -4) : 0;
6139 int bump_after = dsc->u.block.before ? 0 : (inc ? 4 : -4);
6140 uint32_t regmask = dsc->u.block.regmask;
6141 int regno = inc ? 0 : 15;
6142 CORE_ADDR xfer_addr = dsc->u.block.xfer_addr;
6143 int exception_return = dsc->u.block.load && dsc->u.block.user
6144 && (regmask & 0x8000) != 0;
6145 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
6146 int do_transfer = condition_true (dsc->u.block.cond, status);
6147 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
6148
6149 if (!do_transfer)
6150 return;
6151
6152 /* If the instruction is ldm rN, {...pc}^, I don't think there's anything
6153 sensible we can do here. Complain loudly. */
6154 if (exception_return)
6155 error (_("Cannot single-step exception return"));
6156
6157 /* We don't handle any stores here for now. */
6158 gdb_assert (dsc->u.block.load != 0);
6159
6160 if (debug_displaced)
6161 fprintf_unfiltered (gdb_stdlog, "displaced: emulating block transfer: "
6162 "%s %s %s\n", dsc->u.block.load ? "ldm" : "stm",
6163 dsc->u.block.increment ? "inc" : "dec",
6164 dsc->u.block.before ? "before" : "after");
6165
6166 while (regmask)
6167 {
6168 uint32_t memword;
6169
6170 if (inc)
6171 while (regno <= ARM_PC_REGNUM && (regmask & (1 << regno)) == 0)
6172 regno++;
6173 else
6174 while (regno >= 0 && (regmask & (1 << regno)) == 0)
6175 regno--;
6176
6177 xfer_addr += bump_before;
6178
6179 memword = read_memory_unsigned_integer (xfer_addr, 4, byte_order);
6180 displaced_write_reg (regs, dsc, regno, memword, LOAD_WRITE_PC);
6181
6182 xfer_addr += bump_after;
6183
6184 regmask &= ~(1 << regno);
6185 }
6186
6187 if (dsc->u.block.writeback)
6188 displaced_write_reg (regs, dsc, dsc->u.block.rn, xfer_addr,
6189 CANNOT_WRITE_PC);
6190 }
6191
6192 /* Clean up an STM which included the PC in the register list. */
6193
6194 static void
6195 cleanup_block_store_pc (struct gdbarch *gdbarch, struct regcache *regs,
6196 struct displaced_step_closure *dsc)
6197 {
6198 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
6199 int store_executed = condition_true (dsc->u.block.cond, status);
6200 CORE_ADDR pc_stored_at, transferred_regs = bitcount (dsc->u.block.regmask);
6201 CORE_ADDR stm_insn_addr;
6202 uint32_t pc_val;
6203 long offset;
6204 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
6205
6206 /* If condition code fails, there's nothing else to do. */
6207 if (!store_executed)
6208 return;
6209
6210 if (dsc->u.block.increment)
6211 {
6212 pc_stored_at = dsc->u.block.xfer_addr + 4 * transferred_regs;
6213
6214 if (dsc->u.block.before)
6215 pc_stored_at += 4;
6216 }
6217 else
6218 {
6219 pc_stored_at = dsc->u.block.xfer_addr;
6220
6221 if (dsc->u.block.before)
6222 pc_stored_at -= 4;
6223 }
6224
6225 pc_val = read_memory_unsigned_integer (pc_stored_at, 4, byte_order);
6226 stm_insn_addr = dsc->scratch_base;
6227 offset = pc_val - stm_insn_addr;
6228
6229 if (debug_displaced)
6230 fprintf_unfiltered (gdb_stdlog, "displaced: detected PC offset %.8lx for "
6231 "STM instruction\n", offset);
6232
6233 /* Rewrite the stored PC to the proper value for the non-displaced original
6234 instruction. */
6235 write_memory_unsigned_integer (pc_stored_at, 4, byte_order,
6236 dsc->insn_addr + offset);
6237 }
6238
6239 /* Clean up an LDM which includes the PC in the register list. We clumped all
6240 the registers in the transferred list into a contiguous range r0...rX (to
6241 avoid loading PC directly and losing control of the debugged program), so we
6242 must undo that here. */
6243
6244 static void
6245 cleanup_block_load_pc (struct gdbarch *gdbarch,
6246 struct regcache *regs,
6247 struct displaced_step_closure *dsc)
6248 {
6249 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
6250 int load_executed = condition_true (dsc->u.block.cond, status), i;
6251 unsigned int mask = dsc->u.block.regmask, write_reg = ARM_PC_REGNUM;
6252 unsigned int regs_loaded = bitcount (mask);
6253 unsigned int num_to_shuffle = regs_loaded, clobbered;
6254
6255 /* The method employed here will fail if the register list is fully populated
6256 (we need to avoid loading PC directly). */
6257 gdb_assert (num_to_shuffle < 16);
6258
6259 if (!load_executed)
6260 return;
6261
6262 clobbered = (1 << num_to_shuffle) - 1;
6263
6264 while (num_to_shuffle > 0)
6265 {
6266 if ((mask & (1 << write_reg)) != 0)
6267 {
6268 unsigned int read_reg = num_to_shuffle - 1;
6269
6270 if (read_reg != write_reg)
6271 {
6272 ULONGEST rval = displaced_read_reg (regs, dsc, read_reg);
6273 displaced_write_reg (regs, dsc, write_reg, rval, LOAD_WRITE_PC);
6274 if (debug_displaced)
6275 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: move "
6276 "loaded register r%d to r%d\n"), read_reg,
6277 write_reg);
6278 }
6279 else if (debug_displaced)
6280 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: register "
6281 "r%d already in the right place\n"),
6282 write_reg);
6283
6284 clobbered &= ~(1 << write_reg);
6285
6286 num_to_shuffle--;
6287 }
6288
6289 write_reg--;
6290 }
6291
6292 /* Restore any registers we scribbled over. */
6293 for (write_reg = 0; clobbered != 0; write_reg++)
6294 {
6295 if ((clobbered & (1 << write_reg)) != 0)
6296 {
6297 displaced_write_reg (regs, dsc, write_reg, dsc->tmp[write_reg],
6298 CANNOT_WRITE_PC);
6299 if (debug_displaced)
6300 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: restored "
6301 "clobbered register r%d\n"), write_reg);
6302 clobbered &= ~(1 << write_reg);
6303 }
6304 }
6305
6306 /* Perform register writeback manually. */
6307 if (dsc->u.block.writeback)
6308 {
6309 ULONGEST new_rn_val = dsc->u.block.xfer_addr;
6310
6311 if (dsc->u.block.increment)
6312 new_rn_val += regs_loaded * 4;
6313 else
6314 new_rn_val -= regs_loaded * 4;
6315
6316 displaced_write_reg (regs, dsc, dsc->u.block.rn, new_rn_val,
6317 CANNOT_WRITE_PC);
6318 }
6319 }
6320
6321 /* Handle ldm/stm, apart from some tricky cases which are unlikely to occur
6322 in user-level code (in particular exception return, ldm rn, {...pc}^). */
6323
6324 static int
6325 arm_copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn,
6326 struct regcache *regs,
6327 struct displaced_step_closure *dsc)
6328 {
6329 int load = bit (insn, 20);
6330 int user = bit (insn, 22);
6331 int increment = bit (insn, 23);
6332 int before = bit (insn, 24);
6333 int writeback = bit (insn, 21);
6334 int rn = bits (insn, 16, 19);
6335
6336 /* Block transfers which don't mention PC can be run directly
6337 out-of-line. */
6338 if (rn != ARM_PC_REGNUM && (insn & 0x8000) == 0)
6339 return arm_copy_unmodified (gdbarch, insn, "ldm/stm", dsc);
6340
6341 if (rn == ARM_PC_REGNUM)
6342 {
6343 warning (_("displaced: Unpredictable LDM or STM with "
6344 "base register r15"));
6345 return arm_copy_unmodified (gdbarch, insn, "unpredictable ldm/stm", dsc);
6346 }
6347
6348 if (debug_displaced)
6349 fprintf_unfiltered (gdb_stdlog, "displaced: copying block transfer insn "
6350 "%.8lx\n", (unsigned long) insn);
6351
6352 dsc->u.block.xfer_addr = displaced_read_reg (regs, dsc, rn);
6353 dsc->u.block.rn = rn;
6354
6355 dsc->u.block.load = load;
6356 dsc->u.block.user = user;
6357 dsc->u.block.increment = increment;
6358 dsc->u.block.before = before;
6359 dsc->u.block.writeback = writeback;
6360 dsc->u.block.cond = bits (insn, 28, 31);
6361
6362 dsc->u.block.regmask = insn & 0xffff;
6363
6364 if (load)
6365 {
6366 if ((insn & 0xffff) == 0xffff)
6367 {
6368 /* LDM with a fully-populated register list. This case is
6369 particularly tricky. Implement for now by fully emulating the
6370 instruction (which might not behave perfectly in all cases, but
6371 these instructions should be rare enough for that not to matter
6372 too much). */
6373 dsc->modinsn[0] = ARM_NOP;
6374
6375 dsc->cleanup = &cleanup_block_load_all;
6376 }
6377 else
6378 {
6379 /* LDM of a list of registers which includes PC. Implement by
6380 rewriting the list of registers to be transferred into a
6381 contiguous chunk r0...rX before doing the transfer, then shuffling
6382 registers into the correct places in the cleanup routine. */
6383 unsigned int regmask = insn & 0xffff;
6384 unsigned int num_in_list = bitcount (regmask), new_regmask, bit = 1;
6385 unsigned int to = 0, from = 0, i, new_rn;
6386
6387 for (i = 0; i < num_in_list; i++)
6388 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
6389
6390 /* Writeback makes things complicated. We need to avoid clobbering
6391 the base register with one of the registers in our modified
6392 register list, but just using a different register can't work in
6393 all cases, e.g.:
6394
6395 ldm r14!, {r0-r13,pc}
6396
6397 which would need to be rewritten as:
6398
6399 ldm rN!, {r0-r14}
6400
6401 but that can't work, because there's no free register for N.
6402
6403 Solve this by turning off the writeback bit, and emulating
6404 writeback manually in the cleanup routine. */
6405
6406 if (writeback)
6407 insn &= ~(1 << 21);
6408
6409 new_regmask = (1 << num_in_list) - 1;
6410
6411 if (debug_displaced)
6412 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM r%d%s, "
6413 "{..., pc}: original reg list %.4x, modified "
6414 "list %.4x\n"), rn, writeback ? "!" : "",
6415 (int) insn & 0xffff, new_regmask);
6416
6417 dsc->modinsn[0] = (insn & ~0xffff) | (new_regmask & 0xffff);
6418
6419 dsc->cleanup = &cleanup_block_load_pc;
6420 }
6421 }
6422 else
6423 {
6424 /* STM of a list of registers which includes PC. Run the instruction
6425 as-is, but out of line: this will store the wrong value for the PC,
6426 so we must manually fix up the memory in the cleanup routine.
6427 Doing things this way has the advantage that we can auto-detect
6428 the offset of the PC write (which is architecture-dependent) in
6429 the cleanup routine. */
6430 dsc->modinsn[0] = insn;
6431
6432 dsc->cleanup = &cleanup_block_store_pc;
6433 }
6434
6435 return 0;
6436 }
6437
6438 /* Cleanup/copy SVC (SWI) instructions. These two functions are overridden
6439 for Linux, where some SVC instructions must be treated specially. */
6440
6441 static void
6442 cleanup_svc (struct gdbarch *gdbarch, struct regcache *regs,
6443 struct displaced_step_closure *dsc)
6444 {
6445 CORE_ADDR resume_addr = dsc->insn_addr + dsc->insn_size;
6446
6447 if (debug_displaced)
6448 fprintf_unfiltered (gdb_stdlog, "displaced: cleanup for svc, resume at "
6449 "%.8lx\n", (unsigned long) resume_addr);
6450
6451 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, resume_addr, BRANCH_WRITE_PC);
6452 }
6453
6454 static int
6455
6456 arm_copy_svc (struct gdbarch *gdbarch, uint32_t insn,
6457 struct regcache *regs, struct displaced_step_closure *dsc)
6458 {
6459
6460 if (debug_displaced)
6461 fprintf_unfiltered (gdb_stdlog, "displaced: copying svc insn %.8lx\n",
6462 (unsigned long) insn);
6463
6464 /* Preparation: none.
6465 Insn: unmodified svc.
6466 Cleanup: pc <- insn_addr + 4. */
6467
6468 dsc->modinsn[0] = insn;
6469
6470 /* Pretend we wrote to the PC, so cleanup doesn't set PC to the next
6471 instruction. */
6472 dsc->wrote_to_pc = 1;
6473
6474 /* Allow OS-specific code to override SVC handling. */
6475 if (dsc->u.svc.copy_svc_os)
6476 return dsc->u.svc.copy_svc_os (gdbarch, regs, dsc);
6477 else
6478 {
6479 dsc->cleanup = &cleanup_svc;
6480 return 0;
6481 }
6482
6483 }
6484
6485 /* Copy undefined instructions. */
6486
6487 static int
6488 arm_copy_undef (struct gdbarch *gdbarch, uint32_t insn,
6489 struct displaced_step_closure *dsc)
6490 {
6491 if (debug_displaced)
6492 fprintf_unfiltered (gdb_stdlog,
6493 "displaced: copying undefined insn %.8lx\n",
6494 (unsigned long) insn);
6495
6496 dsc->modinsn[0] = insn;
6497
6498 return 0;
6499 }
6500
6501 /* Copy unpredictable instructions. */
6502
6503 static int
6504 arm_copy_unpred (struct gdbarch *gdbarch, uint32_t insn,
6505 struct displaced_step_closure *dsc)
6506 {
6507 if (debug_displaced)
6508 fprintf_unfiltered (gdb_stdlog, "displaced: copying unpredictable insn "
6509 "%.8lx\n", (unsigned long) insn);
6510
6511 dsc->modinsn[0] = insn;
6512
6513 return 0;
6514 }
6515
6516 /* The decode_* functions are instruction decoding helpers. They mostly follow
6517 the presentation in the ARM ARM. */
6518
6519 static int
6520 arm_decode_misc_memhint_neon (struct gdbarch *gdbarch, uint32_t insn,
6521 struct regcache *regs,
6522 struct displaced_step_closure *dsc)
6523 {
6524 unsigned int op1 = bits (insn, 20, 26), op2 = bits (insn, 4, 7);
6525 unsigned int rn = bits (insn, 16, 19);
6526
6527 if (op1 == 0x10 && (op2 & 0x2) == 0x0 && (rn & 0xe) == 0x0)
6528 return arm_copy_unmodified (gdbarch, insn, "cps", dsc);
6529 else if (op1 == 0x10 && op2 == 0x0 && (rn & 0xe) == 0x1)
6530 return arm_copy_unmodified (gdbarch, insn, "setend", dsc);
6531 else if ((op1 & 0x60) == 0x20)
6532 return arm_copy_unmodified (gdbarch, insn, "neon dataproc", dsc);
6533 else if ((op1 & 0x71) == 0x40)
6534 return arm_copy_unmodified (gdbarch, insn, "neon elt/struct load/store",
6535 dsc);
6536 else if ((op1 & 0x77) == 0x41)
6537 return arm_copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
6538 else if ((op1 & 0x77) == 0x45)
6539 return arm_copy_preload (gdbarch, insn, regs, dsc); /* pli. */
6540 else if ((op1 & 0x77) == 0x51)
6541 {
6542 if (rn != 0xf)
6543 return arm_copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
6544 else
6545 return arm_copy_unpred (gdbarch, insn, dsc);
6546 }
6547 else if ((op1 & 0x77) == 0x55)
6548 return arm_copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
6549 else if (op1 == 0x57)
6550 switch (op2)
6551 {
6552 case 0x1: return arm_copy_unmodified (gdbarch, insn, "clrex", dsc);
6553 case 0x4: return arm_copy_unmodified (gdbarch, insn, "dsb", dsc);
6554 case 0x5: return arm_copy_unmodified (gdbarch, insn, "dmb", dsc);
6555 case 0x6: return arm_copy_unmodified (gdbarch, insn, "isb", dsc);
6556 default: return arm_copy_unpred (gdbarch, insn, dsc);
6557 }
6558 else if ((op1 & 0x63) == 0x43)
6559 return arm_copy_unpred (gdbarch, insn, dsc);
6560 else if ((op2 & 0x1) == 0x0)
6561 switch (op1 & ~0x80)
6562 {
6563 case 0x61:
6564 return arm_copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
6565 case 0x65:
6566 return arm_copy_preload_reg (gdbarch, insn, regs, dsc); /* pli reg. */
6567 case 0x71: case 0x75:
6568 /* pld/pldw reg. */
6569 return arm_copy_preload_reg (gdbarch, insn, regs, dsc);
6570 case 0x63: case 0x67: case 0x73: case 0x77:
6571 return arm_copy_unpred (gdbarch, insn, dsc);
6572 default:
6573 return arm_copy_undef (gdbarch, insn, dsc);
6574 }
6575 else
6576 return arm_copy_undef (gdbarch, insn, dsc); /* Probably unreachable. */
6577 }
6578
6579 static int
6580 arm_decode_unconditional (struct gdbarch *gdbarch, uint32_t insn,
6581 struct regcache *regs,
6582 struct displaced_step_closure *dsc)
6583 {
6584 if (bit (insn, 27) == 0)
6585 return arm_decode_misc_memhint_neon (gdbarch, insn, regs, dsc);
6586 /* Switch on bits: 0bxxxxx321xxx0xxxxxxxxxxxxxxxxxxxx. */
6587 else switch (((insn & 0x7000000) >> 23) | ((insn & 0x100000) >> 20))
6588 {
6589 case 0x0: case 0x2:
6590 return arm_copy_unmodified (gdbarch, insn, "srs", dsc);
6591
6592 case 0x1: case 0x3:
6593 return arm_copy_unmodified (gdbarch, insn, "rfe", dsc);
6594
6595 case 0x4: case 0x5: case 0x6: case 0x7:
6596 return arm_copy_b_bl_blx (gdbarch, insn, regs, dsc);
6597
6598 case 0x8:
6599 switch ((insn & 0xe00000) >> 21)
6600 {
6601 case 0x1: case 0x3: case 0x4: case 0x5: case 0x6: case 0x7:
6602 /* stc/stc2. */
6603 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
6604
6605 case 0x2:
6606 return arm_copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
6607
6608 default:
6609 return arm_copy_undef (gdbarch, insn, dsc);
6610 }
6611
6612 case 0x9:
6613 {
6614 int rn_f = (bits (insn, 16, 19) == 0xf);
6615 switch ((insn & 0xe00000) >> 21)
6616 {
6617 case 0x1: case 0x3:
6618 /* ldc/ldc2 imm (undefined for rn == pc). */
6619 return rn_f ? arm_copy_undef (gdbarch, insn, dsc)
6620 : arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
6621
6622 case 0x2:
6623 return arm_copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
6624
6625 case 0x4: case 0x5: case 0x6: case 0x7:
6626 /* ldc/ldc2 lit (undefined for rn != pc). */
6627 return rn_f ? arm_copy_copro_load_store (gdbarch, insn, regs, dsc)
6628 : arm_copy_undef (gdbarch, insn, dsc);
6629
6630 default:
6631 return arm_copy_undef (gdbarch, insn, dsc);
6632 }
6633 }
6634
6635 case 0xa:
6636 return arm_copy_unmodified (gdbarch, insn, "stc/stc2", dsc);
6637
6638 case 0xb:
6639 if (bits (insn, 16, 19) == 0xf)
6640 /* ldc/ldc2 lit. */
6641 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
6642 else
6643 return arm_copy_undef (gdbarch, insn, dsc);
6644
6645 case 0xc:
6646 if (bit (insn, 4))
6647 return arm_copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
6648 else
6649 return arm_copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
6650
6651 case 0xd:
6652 if (bit (insn, 4))
6653 return arm_copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
6654 else
6655 return arm_copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
6656
6657 default:
6658 return arm_copy_undef (gdbarch, insn, dsc);
6659 }
6660 }
6661
6662 /* Decode miscellaneous instructions in dp/misc encoding space. */
6663
6664 static int
6665 arm_decode_miscellaneous (struct gdbarch *gdbarch, uint32_t insn,
6666 struct regcache *regs,
6667 struct displaced_step_closure *dsc)
6668 {
6669 unsigned int op2 = bits (insn, 4, 6);
6670 unsigned int op = bits (insn, 21, 22);
6671 unsigned int op1 = bits (insn, 16, 19);
6672
6673 switch (op2)
6674 {
6675 case 0x0:
6676 return arm_copy_unmodified (gdbarch, insn, "mrs/msr", dsc);
6677
6678 case 0x1:
6679 if (op == 0x1) /* bx. */
6680 return arm_copy_bx_blx_reg (gdbarch, insn, regs, dsc);
6681 else if (op == 0x3)
6682 return arm_copy_unmodified (gdbarch, insn, "clz", dsc);
6683 else
6684 return arm_copy_undef (gdbarch, insn, dsc);
6685
6686 case 0x2:
6687 if (op == 0x1)
6688 /* Not really supported. */
6689 return arm_copy_unmodified (gdbarch, insn, "bxj", dsc);
6690 else
6691 return arm_copy_undef (gdbarch, insn, dsc);
6692
6693 case 0x3:
6694 if (op == 0x1)
6695 return arm_copy_bx_blx_reg (gdbarch, insn,
6696 regs, dsc); /* blx register. */
6697 else
6698 return arm_copy_undef (gdbarch, insn, dsc);
6699
6700 case 0x5:
6701 return arm_copy_unmodified (gdbarch, insn, "saturating add/sub", dsc);
6702
6703 case 0x7:
6704 if (op == 0x1)
6705 return arm_copy_unmodified (gdbarch, insn, "bkpt", dsc);
6706 else if (op == 0x3)
6707 /* Not really supported. */
6708 return arm_copy_unmodified (gdbarch, insn, "smc", dsc);
6709
6710 default:
6711 return arm_copy_undef (gdbarch, insn, dsc);
6712 }
6713 }
6714
6715 static int
6716 arm_decode_dp_misc (struct gdbarch *gdbarch, uint32_t insn,
6717 struct regcache *regs,
6718 struct displaced_step_closure *dsc)
6719 {
6720 if (bit (insn, 25))
6721 switch (bits (insn, 20, 24))
6722 {
6723 case 0x10:
6724 return arm_copy_unmodified (gdbarch, insn, "movw", dsc);
6725
6726 case 0x14:
6727 return arm_copy_unmodified (gdbarch, insn, "movt", dsc);
6728
6729 case 0x12: case 0x16:
6730 return arm_copy_unmodified (gdbarch, insn, "msr imm", dsc);
6731
6732 default:
6733 return arm_copy_alu_imm (gdbarch, insn, regs, dsc);
6734 }
6735 else
6736 {
6737 uint32_t op1 = bits (insn, 20, 24), op2 = bits (insn, 4, 7);
6738
6739 if ((op1 & 0x19) != 0x10 && (op2 & 0x1) == 0x0)
6740 return arm_copy_alu_reg (gdbarch, insn, regs, dsc);
6741 else if ((op1 & 0x19) != 0x10 && (op2 & 0x9) == 0x1)
6742 return arm_copy_alu_shifted_reg (gdbarch, insn, regs, dsc);
6743 else if ((op1 & 0x19) == 0x10 && (op2 & 0x8) == 0x0)
6744 return arm_decode_miscellaneous (gdbarch, insn, regs, dsc);
6745 else if ((op1 & 0x19) == 0x10 && (op2 & 0x9) == 0x8)
6746 return arm_copy_unmodified (gdbarch, insn, "halfword mul/mla", dsc);
6747 else if ((op1 & 0x10) == 0x00 && op2 == 0x9)
6748 return arm_copy_unmodified (gdbarch, insn, "mul/mla", dsc);
6749 else if ((op1 & 0x10) == 0x10 && op2 == 0x9)
6750 return arm_copy_unmodified (gdbarch, insn, "synch", dsc);
6751 else if (op2 == 0xb || (op2 & 0xd) == 0xd)
6752 /* 2nd arg means "unpriveleged". */
6753 return arm_copy_extra_ld_st (gdbarch, insn, (op1 & 0x12) == 0x02, regs,
6754 dsc);
6755 }
6756
6757 /* Should be unreachable. */
6758 return 1;
6759 }
6760
6761 static int
6762 arm_decode_ld_st_word_ubyte (struct gdbarch *gdbarch, uint32_t insn,
6763 struct regcache *regs,
6764 struct displaced_step_closure *dsc)
6765 {
6766 int a = bit (insn, 25), b = bit (insn, 4);
6767 uint32_t op1 = bits (insn, 20, 24);
6768 int rn_f = bits (insn, 16, 19) == 0xf;
6769
6770 if ((!a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02)
6771 || (a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02 && !b))
6772 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 4, 0);
6773 else if ((!a && (op1 & 0x17) == 0x02)
6774 || (a && (op1 & 0x17) == 0x02 && !b))
6775 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 4, 1);
6776 else if ((!a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03)
6777 || (a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03 && !b))
6778 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 4, 0);
6779 else if ((!a && (op1 & 0x17) == 0x03)
6780 || (a && (op1 & 0x17) == 0x03 && !b))
6781 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 4, 1);
6782 else if ((!a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06)
6783 || (a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06 && !b))
6784 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 0);
6785 else if ((!a && (op1 & 0x17) == 0x06)
6786 || (a && (op1 & 0x17) == 0x06 && !b))
6787 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 1);
6788 else if ((!a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07)
6789 || (a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07 && !b))
6790 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 0);
6791 else if ((!a && (op1 & 0x17) == 0x07)
6792 || (a && (op1 & 0x17) == 0x07 && !b))
6793 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 1);
6794
6795 /* Should be unreachable. */
6796 return 1;
6797 }
6798
6799 static int
6800 arm_decode_media (struct gdbarch *gdbarch, uint32_t insn,
6801 struct displaced_step_closure *dsc)
6802 {
6803 switch (bits (insn, 20, 24))
6804 {
6805 case 0x00: case 0x01: case 0x02: case 0x03:
6806 return arm_copy_unmodified (gdbarch, insn, "parallel add/sub signed", dsc);
6807
6808 case 0x04: case 0x05: case 0x06: case 0x07:
6809 return arm_copy_unmodified (gdbarch, insn, "parallel add/sub unsigned", dsc);
6810
6811 case 0x08: case 0x09: case 0x0a: case 0x0b:
6812 case 0x0c: case 0x0d: case 0x0e: case 0x0f:
6813 return arm_copy_unmodified (gdbarch, insn,
6814 "decode/pack/unpack/saturate/reverse", dsc);
6815
6816 case 0x18:
6817 if (bits (insn, 5, 7) == 0) /* op2. */
6818 {
6819 if (bits (insn, 12, 15) == 0xf)
6820 return arm_copy_unmodified (gdbarch, insn, "usad8", dsc);
6821 else
6822 return arm_copy_unmodified (gdbarch, insn, "usada8", dsc);
6823 }
6824 else
6825 return arm_copy_undef (gdbarch, insn, dsc);
6826
6827 case 0x1a: case 0x1b:
6828 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
6829 return arm_copy_unmodified (gdbarch, insn, "sbfx", dsc);
6830 else
6831 return arm_copy_undef (gdbarch, insn, dsc);
6832
6833 case 0x1c: case 0x1d:
6834 if (bits (insn, 5, 6) == 0x0) /* op2[1:0]. */
6835 {
6836 if (bits (insn, 0, 3) == 0xf)
6837 return arm_copy_unmodified (gdbarch, insn, "bfc", dsc);
6838 else
6839 return arm_copy_unmodified (gdbarch, insn, "bfi", dsc);
6840 }
6841 else
6842 return arm_copy_undef (gdbarch, insn, dsc);
6843
6844 case 0x1e: case 0x1f:
6845 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
6846 return arm_copy_unmodified (gdbarch, insn, "ubfx", dsc);
6847 else
6848 return arm_copy_undef (gdbarch, insn, dsc);
6849 }
6850
6851 /* Should be unreachable. */
6852 return 1;
6853 }
6854
6855 static int
6856 arm_decode_b_bl_ldmstm (struct gdbarch *gdbarch, int32_t insn,
6857 struct regcache *regs,
6858 struct displaced_step_closure *dsc)
6859 {
6860 if (bit (insn, 25))
6861 return arm_copy_b_bl_blx (gdbarch, insn, regs, dsc);
6862 else
6863 return arm_copy_block_xfer (gdbarch, insn, regs, dsc);
6864 }
6865
6866 static int
6867 arm_decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint32_t insn,
6868 struct regcache *regs,
6869 struct displaced_step_closure *dsc)
6870 {
6871 unsigned int opcode = bits (insn, 20, 24);
6872
6873 switch (opcode)
6874 {
6875 case 0x04: case 0x05: /* VFP/Neon mrrc/mcrr. */
6876 return arm_copy_unmodified (gdbarch, insn, "vfp/neon mrrc/mcrr", dsc);
6877
6878 case 0x08: case 0x0a: case 0x0c: case 0x0e:
6879 case 0x12: case 0x16:
6880 return arm_copy_unmodified (gdbarch, insn, "vfp/neon vstm/vpush", dsc);
6881
6882 case 0x09: case 0x0b: case 0x0d: case 0x0f:
6883 case 0x13: case 0x17:
6884 return arm_copy_unmodified (gdbarch, insn, "vfp/neon vldm/vpop", dsc);
6885
6886 case 0x10: case 0x14: case 0x18: case 0x1c: /* vstr. */
6887 case 0x11: case 0x15: case 0x19: case 0x1d: /* vldr. */
6888 /* Note: no writeback for these instructions. Bit 25 will always be
6889 zero though (via caller), so the following works OK. */
6890 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
6891 }
6892
6893 /* Should be unreachable. */
6894 return 1;
6895 }
6896
6897 static int
6898 arm_decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
6899 struct regcache *regs, struct displaced_step_closure *dsc)
6900 {
6901 unsigned int op1 = bits (insn, 20, 25);
6902 int op = bit (insn, 4);
6903 unsigned int coproc = bits (insn, 8, 11);
6904 unsigned int rn = bits (insn, 16, 19);
6905
6906 if ((op1 & 0x20) == 0x00 && (op1 & 0x3a) != 0x00 && (coproc & 0xe) == 0xa)
6907 return arm_decode_ext_reg_ld_st (gdbarch, insn, regs, dsc);
6908 else if ((op1 & 0x21) == 0x00 && (op1 & 0x3a) != 0x00
6909 && (coproc & 0xe) != 0xa)
6910 /* stc/stc2. */
6911 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
6912 else if ((op1 & 0x21) == 0x01 && (op1 & 0x3a) != 0x00
6913 && (coproc & 0xe) != 0xa)
6914 /* ldc/ldc2 imm/lit. */
6915 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
6916 else if ((op1 & 0x3e) == 0x00)
6917 return arm_copy_undef (gdbarch, insn, dsc);
6918 else if ((op1 & 0x3e) == 0x04 && (coproc & 0xe) == 0xa)
6919 return arm_copy_unmodified (gdbarch, insn, "neon 64bit xfer", dsc);
6920 else if (op1 == 0x04 && (coproc & 0xe) != 0xa)
6921 return arm_copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
6922 else if (op1 == 0x05 && (coproc & 0xe) != 0xa)
6923 return arm_copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
6924 else if ((op1 & 0x30) == 0x20 && !op)
6925 {
6926 if ((coproc & 0xe) == 0xa)
6927 return arm_copy_unmodified (gdbarch, insn, "vfp dataproc", dsc);
6928 else
6929 return arm_copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
6930 }
6931 else if ((op1 & 0x30) == 0x20 && op)
6932 return arm_copy_unmodified (gdbarch, insn, "neon 8/16/32 bit xfer", dsc);
6933 else if ((op1 & 0x31) == 0x20 && op && (coproc & 0xe) != 0xa)
6934 return arm_copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
6935 else if ((op1 & 0x31) == 0x21 && op && (coproc & 0xe) != 0xa)
6936 return arm_copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
6937 else if ((op1 & 0x30) == 0x30)
6938 return arm_copy_svc (gdbarch, insn, regs, dsc);
6939 else
6940 return arm_copy_undef (gdbarch, insn, dsc); /* Possibly unreachable. */
6941 }
6942
6943 static void
6944 thumb_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
6945 CORE_ADDR to, struct regcache *regs,
6946 struct displaced_step_closure *dsc)
6947 {
6948 error (_("Displaced stepping is only supported in ARM mode"));
6949 }
6950
6951 void
6952 arm_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
6953 CORE_ADDR to, struct regcache *regs,
6954 struct displaced_step_closure *dsc)
6955 {
6956 int err = 0;
6957 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
6958 uint32_t insn;
6959
6960 /* Most displaced instructions use a 1-instruction scratch space, so set this
6961 here and override below if/when necessary. */
6962 dsc->numinsns = 1;
6963 dsc->insn_addr = from;
6964 dsc->scratch_base = to;
6965 dsc->cleanup = NULL;
6966 dsc->wrote_to_pc = 0;
6967
6968 if (!displaced_in_arm_mode (regs))
6969 return thumb_process_displaced_insn (gdbarch, from, to, regs, dsc);
6970
6971 dsc->is_thumb = 0;
6972 dsc->insn_size = 4;
6973 insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
6974 if (debug_displaced)
6975 fprintf_unfiltered (gdb_stdlog, "displaced: stepping insn %.8lx "
6976 "at %.8lx\n", (unsigned long) insn,
6977 (unsigned long) from);
6978
6979 if ((insn & 0xf0000000) == 0xf0000000)
6980 err = arm_decode_unconditional (gdbarch, insn, regs, dsc);
6981 else switch (((insn & 0x10) >> 4) | ((insn & 0xe000000) >> 24))
6982 {
6983 case 0x0: case 0x1: case 0x2: case 0x3:
6984 err = arm_decode_dp_misc (gdbarch, insn, regs, dsc);
6985 break;
6986
6987 case 0x4: case 0x5: case 0x6:
6988 err = arm_decode_ld_st_word_ubyte (gdbarch, insn, regs, dsc);
6989 break;
6990
6991 case 0x7:
6992 err = arm_decode_media (gdbarch, insn, dsc);
6993 break;
6994
6995 case 0x8: case 0x9: case 0xa: case 0xb:
6996 err = arm_decode_b_bl_ldmstm (gdbarch, insn, regs, dsc);
6997 break;
6998
6999 case 0xc: case 0xd: case 0xe: case 0xf:
7000 err = arm_decode_svc_copro (gdbarch, insn, to, regs, dsc);
7001 break;
7002 }
7003
7004 if (err)
7005 internal_error (__FILE__, __LINE__,
7006 _("arm_process_displaced_insn: Instruction decode error"));
7007 }
7008
7009 /* Actually set up the scratch space for a displaced instruction. */
7010
7011 void
7012 arm_displaced_init_closure (struct gdbarch *gdbarch, CORE_ADDR from,
7013 CORE_ADDR to, struct displaced_step_closure *dsc)
7014 {
7015 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
7016 unsigned int i, len, offset;
7017 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
7018 int size = dsc->is_thumb? 2 : 4;
7019 const unsigned char *bkp_insn;
7020
7021 offset = 0;
7022 /* Poke modified instruction(s). */
7023 for (i = 0; i < dsc->numinsns; i++)
7024 {
7025 if (debug_displaced)
7026 {
7027 fprintf_unfiltered (gdb_stdlog, "displaced: writing insn ");
7028 if (size == 4)
7029 fprintf_unfiltered (gdb_stdlog, "%.8lx",
7030 dsc->modinsn[i]);
7031 else if (size == 2)
7032 fprintf_unfiltered (gdb_stdlog, "%.4x",
7033 (unsigned short)dsc->modinsn[i]);
7034
7035 fprintf_unfiltered (gdb_stdlog, " at %.8lx\n",
7036 (unsigned long) to + offset);
7037
7038 }
7039 write_memory_unsigned_integer (to + offset, size,
7040 byte_order_for_code,
7041 dsc->modinsn[i]);
7042 offset += size;
7043 }
7044
7045 /* Choose the correct breakpoint instruction. */
7046 if (dsc->is_thumb)
7047 {
7048 bkp_insn = tdep->thumb_breakpoint;
7049 len = tdep->thumb_breakpoint_size;
7050 }
7051 else
7052 {
7053 bkp_insn = tdep->arm_breakpoint;
7054 len = tdep->arm_breakpoint_size;
7055 }
7056
7057 /* Put breakpoint afterwards. */
7058 write_memory (to + offset, bkp_insn, len);
7059
7060 if (debug_displaced)
7061 fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
7062 paddress (gdbarch, from), paddress (gdbarch, to));
7063 }
7064
7065 /* Entry point for copying an instruction into scratch space for displaced
7066 stepping. */
7067
7068 struct displaced_step_closure *
7069 arm_displaced_step_copy_insn (struct gdbarch *gdbarch,
7070 CORE_ADDR from, CORE_ADDR to,
7071 struct regcache *regs)
7072 {
7073 struct displaced_step_closure *dsc
7074 = xmalloc (sizeof (struct displaced_step_closure));
7075 arm_process_displaced_insn (gdbarch, from, to, regs, dsc);
7076 arm_displaced_init_closure (gdbarch, from, to, dsc);
7077
7078 return dsc;
7079 }
7080
7081 /* Entry point for cleaning things up after a displaced instruction has been
7082 single-stepped. */
7083
7084 void
7085 arm_displaced_step_fixup (struct gdbarch *gdbarch,
7086 struct displaced_step_closure *dsc,
7087 CORE_ADDR from, CORE_ADDR to,
7088 struct regcache *regs)
7089 {
7090 if (dsc->cleanup)
7091 dsc->cleanup (gdbarch, regs, dsc);
7092
7093 if (!dsc->wrote_to_pc)
7094 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
7095 dsc->insn_addr + dsc->insn_size);
7096
7097 }
7098
7099 #include "bfd-in2.h"
7100 #include "libcoff.h"
7101
7102 static int
7103 gdb_print_insn_arm (bfd_vma memaddr, disassemble_info *info)
7104 {
7105 struct gdbarch *gdbarch = info->application_data;
7106
7107 if (arm_pc_is_thumb (gdbarch, memaddr))
7108 {
7109 static asymbol *asym;
7110 static combined_entry_type ce;
7111 static struct coff_symbol_struct csym;
7112 static struct bfd fake_bfd;
7113 static bfd_target fake_target;
7114
7115 if (csym.native == NULL)
7116 {
7117 /* Create a fake symbol vector containing a Thumb symbol.
7118 This is solely so that the code in print_insn_little_arm()
7119 and print_insn_big_arm() in opcodes/arm-dis.c will detect
7120 the presence of a Thumb symbol and switch to decoding
7121 Thumb instructions. */
7122
7123 fake_target.flavour = bfd_target_coff_flavour;
7124 fake_bfd.xvec = &fake_target;
7125 ce.u.syment.n_sclass = C_THUMBEXTFUNC;
7126 csym.native = &ce;
7127 csym.symbol.the_bfd = &fake_bfd;
7128 csym.symbol.name = "fake";
7129 asym = (asymbol *) & csym;
7130 }
7131
7132 memaddr = UNMAKE_THUMB_ADDR (memaddr);
7133 info->symbols = &asym;
7134 }
7135 else
7136 info->symbols = NULL;
7137
7138 if (info->endian == BFD_ENDIAN_BIG)
7139 return print_insn_big_arm (memaddr, info);
7140 else
7141 return print_insn_little_arm (memaddr, info);
7142 }
7143
7144 /* The following define instruction sequences that will cause ARM
7145 cpu's to take an undefined instruction trap. These are used to
7146 signal a breakpoint to GDB.
7147
7148 The newer ARMv4T cpu's are capable of operating in ARM or Thumb
7149 modes. A different instruction is required for each mode. The ARM
7150 cpu's can also be big or little endian. Thus four different
7151 instructions are needed to support all cases.
7152
7153 Note: ARMv4 defines several new instructions that will take the
7154 undefined instruction trap. ARM7TDMI is nominally ARMv4T, but does
7155 not in fact add the new instructions. The new undefined
7156 instructions in ARMv4 are all instructions that had no defined
7157 behaviour in earlier chips. There is no guarantee that they will
7158 raise an exception, but may be treated as NOP's. In practice, it
7159 may only safe to rely on instructions matching:
7160
7161 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
7162 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
7163 C C C C 0 1 1 x x x x x x x x x x x x x x x x x x x x 1 x x x x
7164
7165 Even this may only true if the condition predicate is true. The
7166 following use a condition predicate of ALWAYS so it is always TRUE.
7167
7168 There are other ways of forcing a breakpoint. GNU/Linux, RISC iX,
7169 and NetBSD all use a software interrupt rather than an undefined
7170 instruction to force a trap. This can be handled by by the
7171 abi-specific code during establishment of the gdbarch vector. */
7172
7173 #define ARM_LE_BREAKPOINT {0xFE,0xDE,0xFF,0xE7}
7174 #define ARM_BE_BREAKPOINT {0xE7,0xFF,0xDE,0xFE}
7175 #define THUMB_LE_BREAKPOINT {0xbe,0xbe}
7176 #define THUMB_BE_BREAKPOINT {0xbe,0xbe}
7177
7178 static const char arm_default_arm_le_breakpoint[] = ARM_LE_BREAKPOINT;
7179 static const char arm_default_arm_be_breakpoint[] = ARM_BE_BREAKPOINT;
7180 static const char arm_default_thumb_le_breakpoint[] = THUMB_LE_BREAKPOINT;
7181 static const char arm_default_thumb_be_breakpoint[] = THUMB_BE_BREAKPOINT;
7182
7183 /* Determine the type and size of breakpoint to insert at PCPTR. Uses
7184 the program counter value to determine whether a 16-bit or 32-bit
7185 breakpoint should be used. It returns a pointer to a string of
7186 bytes that encode a breakpoint instruction, stores the length of
7187 the string to *lenptr, and adjusts the program counter (if
7188 necessary) to point to the actual memory location where the
7189 breakpoint should be inserted. */
7190
7191 static const unsigned char *
7192 arm_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr, int *lenptr)
7193 {
7194 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
7195 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
7196
7197 if (arm_pc_is_thumb (gdbarch, *pcptr))
7198 {
7199 *pcptr = UNMAKE_THUMB_ADDR (*pcptr);
7200
7201 /* If we have a separate 32-bit breakpoint instruction for Thumb-2,
7202 check whether we are replacing a 32-bit instruction. */
7203 if (tdep->thumb2_breakpoint != NULL)
7204 {
7205 gdb_byte buf[2];
7206 if (target_read_memory (*pcptr, buf, 2) == 0)
7207 {
7208 unsigned short inst1;
7209 inst1 = extract_unsigned_integer (buf, 2, byte_order_for_code);
7210 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
7211 {
7212 *lenptr = tdep->thumb2_breakpoint_size;
7213 return tdep->thumb2_breakpoint;
7214 }
7215 }
7216 }
7217
7218 *lenptr = tdep->thumb_breakpoint_size;
7219 return tdep->thumb_breakpoint;
7220 }
7221 else
7222 {
7223 *lenptr = tdep->arm_breakpoint_size;
7224 return tdep->arm_breakpoint;
7225 }
7226 }
7227
7228 static void
7229 arm_remote_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr,
7230 int *kindptr)
7231 {
7232 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
7233
7234 arm_breakpoint_from_pc (gdbarch, pcptr, kindptr);
7235
7236 if (arm_pc_is_thumb (gdbarch, *pcptr) && *kindptr == 4)
7237 /* The documented magic value for a 32-bit Thumb-2 breakpoint, so
7238 that this is not confused with a 32-bit ARM breakpoint. */
7239 *kindptr = 3;
7240 }
7241
7242 /* Extract from an array REGBUF containing the (raw) register state a
7243 function return value of type TYPE, and copy that, in virtual
7244 format, into VALBUF. */
7245
7246 static void
7247 arm_extract_return_value (struct type *type, struct regcache *regs,
7248 gdb_byte *valbuf)
7249 {
7250 struct gdbarch *gdbarch = get_regcache_arch (regs);
7251 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
7252
7253 if (TYPE_CODE_FLT == TYPE_CODE (type))
7254 {
7255 switch (gdbarch_tdep (gdbarch)->fp_model)
7256 {
7257 case ARM_FLOAT_FPA:
7258 {
7259 /* The value is in register F0 in internal format. We need to
7260 extract the raw value and then convert it to the desired
7261 internal type. */
7262 bfd_byte tmpbuf[FP_REGISTER_SIZE];
7263
7264 regcache_cooked_read (regs, ARM_F0_REGNUM, tmpbuf);
7265 convert_from_extended (floatformat_from_type (type), tmpbuf,
7266 valbuf, gdbarch_byte_order (gdbarch));
7267 }
7268 break;
7269
7270 case ARM_FLOAT_SOFT_FPA:
7271 case ARM_FLOAT_SOFT_VFP:
7272 /* ARM_FLOAT_VFP can arise if this is a variadic function so
7273 not using the VFP ABI code. */
7274 case ARM_FLOAT_VFP:
7275 regcache_cooked_read (regs, ARM_A1_REGNUM, valbuf);
7276 if (TYPE_LENGTH (type) > 4)
7277 regcache_cooked_read (regs, ARM_A1_REGNUM + 1,
7278 valbuf + INT_REGISTER_SIZE);
7279 break;
7280
7281 default:
7282 internal_error (__FILE__, __LINE__,
7283 _("arm_extract_return_value: "
7284 "Floating point model not supported"));
7285 break;
7286 }
7287 }
7288 else if (TYPE_CODE (type) == TYPE_CODE_INT
7289 || TYPE_CODE (type) == TYPE_CODE_CHAR
7290 || TYPE_CODE (type) == TYPE_CODE_BOOL
7291 || TYPE_CODE (type) == TYPE_CODE_PTR
7292 || TYPE_CODE (type) == TYPE_CODE_REF
7293 || TYPE_CODE (type) == TYPE_CODE_ENUM)
7294 {
7295 /* If the type is a plain integer, then the access is
7296 straight-forward. Otherwise we have to play around a bit
7297 more. */
7298 int len = TYPE_LENGTH (type);
7299 int regno = ARM_A1_REGNUM;
7300 ULONGEST tmp;
7301
7302 while (len > 0)
7303 {
7304 /* By using store_unsigned_integer we avoid having to do
7305 anything special for small big-endian values. */
7306 regcache_cooked_read_unsigned (regs, regno++, &tmp);
7307 store_unsigned_integer (valbuf,
7308 (len > INT_REGISTER_SIZE
7309 ? INT_REGISTER_SIZE : len),
7310 byte_order, tmp);
7311 len -= INT_REGISTER_SIZE;
7312 valbuf += INT_REGISTER_SIZE;
7313 }
7314 }
7315 else
7316 {
7317 /* For a structure or union the behaviour is as if the value had
7318 been stored to word-aligned memory and then loaded into
7319 registers with 32-bit load instruction(s). */
7320 int len = TYPE_LENGTH (type);
7321 int regno = ARM_A1_REGNUM;
7322 bfd_byte tmpbuf[INT_REGISTER_SIZE];
7323
7324 while (len > 0)
7325 {
7326 regcache_cooked_read (regs, regno++, tmpbuf);
7327 memcpy (valbuf, tmpbuf,
7328 len > INT_REGISTER_SIZE ? INT_REGISTER_SIZE : len);
7329 len -= INT_REGISTER_SIZE;
7330 valbuf += INT_REGISTER_SIZE;
7331 }
7332 }
7333 }
7334
7335
7336 /* Will a function return an aggregate type in memory or in a
7337 register? Return 0 if an aggregate type can be returned in a
7338 register, 1 if it must be returned in memory. */
7339
7340 static int
7341 arm_return_in_memory (struct gdbarch *gdbarch, struct type *type)
7342 {
7343 int nRc;
7344 enum type_code code;
7345
7346 CHECK_TYPEDEF (type);
7347
7348 /* In the ARM ABI, "integer" like aggregate types are returned in
7349 registers. For an aggregate type to be integer like, its size
7350 must be less than or equal to INT_REGISTER_SIZE and the
7351 offset of each addressable subfield must be zero. Note that bit
7352 fields are not addressable, and all addressable subfields of
7353 unions always start at offset zero.
7354
7355 This function is based on the behaviour of GCC 2.95.1.
7356 See: gcc/arm.c: arm_return_in_memory() for details.
7357
7358 Note: All versions of GCC before GCC 2.95.2 do not set up the
7359 parameters correctly for a function returning the following
7360 structure: struct { float f;}; This should be returned in memory,
7361 not a register. Richard Earnshaw sent me a patch, but I do not
7362 know of any way to detect if a function like the above has been
7363 compiled with the correct calling convention. */
7364
7365 /* All aggregate types that won't fit in a register must be returned
7366 in memory. */
7367 if (TYPE_LENGTH (type) > INT_REGISTER_SIZE)
7368 {
7369 return 1;
7370 }
7371
7372 /* The AAPCS says all aggregates not larger than a word are returned
7373 in a register. */
7374 if (gdbarch_tdep (gdbarch)->arm_abi != ARM_ABI_APCS)
7375 return 0;
7376
7377 /* The only aggregate types that can be returned in a register are
7378 structs and unions. Arrays must be returned in memory. */
7379 code = TYPE_CODE (type);
7380 if ((TYPE_CODE_STRUCT != code) && (TYPE_CODE_UNION != code))
7381 {
7382 return 1;
7383 }
7384
7385 /* Assume all other aggregate types can be returned in a register.
7386 Run a check for structures, unions and arrays. */
7387 nRc = 0;
7388
7389 if ((TYPE_CODE_STRUCT == code) || (TYPE_CODE_UNION == code))
7390 {
7391 int i;
7392 /* Need to check if this struct/union is "integer" like. For
7393 this to be true, its size must be less than or equal to
7394 INT_REGISTER_SIZE and the offset of each addressable
7395 subfield must be zero. Note that bit fields are not
7396 addressable, and unions always start at offset zero. If any
7397 of the subfields is a floating point type, the struct/union
7398 cannot be an integer type. */
7399
7400 /* For each field in the object, check:
7401 1) Is it FP? --> yes, nRc = 1;
7402 2) Is it addressable (bitpos != 0) and
7403 not packed (bitsize == 0)?
7404 --> yes, nRc = 1
7405 */
7406
7407 for (i = 0; i < TYPE_NFIELDS (type); i++)
7408 {
7409 enum type_code field_type_code;
7410 field_type_code = TYPE_CODE (check_typedef (TYPE_FIELD_TYPE (type,
7411 i)));
7412
7413 /* Is it a floating point type field? */
7414 if (field_type_code == TYPE_CODE_FLT)
7415 {
7416 nRc = 1;
7417 break;
7418 }
7419
7420 /* If bitpos != 0, then we have to care about it. */
7421 if (TYPE_FIELD_BITPOS (type, i) != 0)
7422 {
7423 /* Bitfields are not addressable. If the field bitsize is
7424 zero, then the field is not packed. Hence it cannot be
7425 a bitfield or any other packed type. */
7426 if (TYPE_FIELD_BITSIZE (type, i) == 0)
7427 {
7428 nRc = 1;
7429 break;
7430 }
7431 }
7432 }
7433 }
7434
7435 return nRc;
7436 }
7437
7438 /* Write into appropriate registers a function return value of type
7439 TYPE, given in virtual format. */
7440
7441 static void
7442 arm_store_return_value (struct type *type, struct regcache *regs,
7443 const gdb_byte *valbuf)
7444 {
7445 struct gdbarch *gdbarch = get_regcache_arch (regs);
7446 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
7447
7448 if (TYPE_CODE (type) == TYPE_CODE_FLT)
7449 {
7450 char buf[MAX_REGISTER_SIZE];
7451
7452 switch (gdbarch_tdep (gdbarch)->fp_model)
7453 {
7454 case ARM_FLOAT_FPA:
7455
7456 convert_to_extended (floatformat_from_type (type), buf, valbuf,
7457 gdbarch_byte_order (gdbarch));
7458 regcache_cooked_write (regs, ARM_F0_REGNUM, buf);
7459 break;
7460
7461 case ARM_FLOAT_SOFT_FPA:
7462 case ARM_FLOAT_SOFT_VFP:
7463 /* ARM_FLOAT_VFP can arise if this is a variadic function so
7464 not using the VFP ABI code. */
7465 case ARM_FLOAT_VFP:
7466 regcache_cooked_write (regs, ARM_A1_REGNUM, valbuf);
7467 if (TYPE_LENGTH (type) > 4)
7468 regcache_cooked_write (regs, ARM_A1_REGNUM + 1,
7469 valbuf + INT_REGISTER_SIZE);
7470 break;
7471
7472 default:
7473 internal_error (__FILE__, __LINE__,
7474 _("arm_store_return_value: Floating "
7475 "point model not supported"));
7476 break;
7477 }
7478 }
7479 else if (TYPE_CODE (type) == TYPE_CODE_INT
7480 || TYPE_CODE (type) == TYPE_CODE_CHAR
7481 || TYPE_CODE (type) == TYPE_CODE_BOOL
7482 || TYPE_CODE (type) == TYPE_CODE_PTR
7483 || TYPE_CODE (type) == TYPE_CODE_REF
7484 || TYPE_CODE (type) == TYPE_CODE_ENUM)
7485 {
7486 if (TYPE_LENGTH (type) <= 4)
7487 {
7488 /* Values of one word or less are zero/sign-extended and
7489 returned in r0. */
7490 bfd_byte tmpbuf[INT_REGISTER_SIZE];
7491 LONGEST val = unpack_long (type, valbuf);
7492
7493 store_signed_integer (tmpbuf, INT_REGISTER_SIZE, byte_order, val);
7494 regcache_cooked_write (regs, ARM_A1_REGNUM, tmpbuf);
7495 }
7496 else
7497 {
7498 /* Integral values greater than one word are stored in consecutive
7499 registers starting with r0. This will always be a multiple of
7500 the regiser size. */
7501 int len = TYPE_LENGTH (type);
7502 int regno = ARM_A1_REGNUM;
7503
7504 while (len > 0)
7505 {
7506 regcache_cooked_write (regs, regno++, valbuf);
7507 len -= INT_REGISTER_SIZE;
7508 valbuf += INT_REGISTER_SIZE;
7509 }
7510 }
7511 }
7512 else
7513 {
7514 /* For a structure or union the behaviour is as if the value had
7515 been stored to word-aligned memory and then loaded into
7516 registers with 32-bit load instruction(s). */
7517 int len = TYPE_LENGTH (type);
7518 int regno = ARM_A1_REGNUM;
7519 bfd_byte tmpbuf[INT_REGISTER_SIZE];
7520
7521 while (len > 0)
7522 {
7523 memcpy (tmpbuf, valbuf,
7524 len > INT_REGISTER_SIZE ? INT_REGISTER_SIZE : len);
7525 regcache_cooked_write (regs, regno++, tmpbuf);
7526 len -= INT_REGISTER_SIZE;
7527 valbuf += INT_REGISTER_SIZE;
7528 }
7529 }
7530 }
7531
7532
7533 /* Handle function return values. */
7534
7535 static enum return_value_convention
7536 arm_return_value (struct gdbarch *gdbarch, struct type *func_type,
7537 struct type *valtype, struct regcache *regcache,
7538 gdb_byte *readbuf, const gdb_byte *writebuf)
7539 {
7540 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
7541 enum arm_vfp_cprc_base_type vfp_base_type;
7542 int vfp_base_count;
7543
7544 if (arm_vfp_abi_for_function (gdbarch, func_type)
7545 && arm_vfp_call_candidate (valtype, &vfp_base_type, &vfp_base_count))
7546 {
7547 int reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
7548 int unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
7549 int i;
7550 for (i = 0; i < vfp_base_count; i++)
7551 {
7552 if (reg_char == 'q')
7553 {
7554 if (writebuf)
7555 arm_neon_quad_write (gdbarch, regcache, i,
7556 writebuf + i * unit_length);
7557
7558 if (readbuf)
7559 arm_neon_quad_read (gdbarch, regcache, i,
7560 readbuf + i * unit_length);
7561 }
7562 else
7563 {
7564 char name_buf[4];
7565 int regnum;
7566
7567 sprintf (name_buf, "%c%d", reg_char, i);
7568 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
7569 strlen (name_buf));
7570 if (writebuf)
7571 regcache_cooked_write (regcache, regnum,
7572 writebuf + i * unit_length);
7573 if (readbuf)
7574 regcache_cooked_read (regcache, regnum,
7575 readbuf + i * unit_length);
7576 }
7577 }
7578 return RETURN_VALUE_REGISTER_CONVENTION;
7579 }
7580
7581 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
7582 || TYPE_CODE (valtype) == TYPE_CODE_UNION
7583 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
7584 {
7585 if (tdep->struct_return == pcc_struct_return
7586 || arm_return_in_memory (gdbarch, valtype))
7587 return RETURN_VALUE_STRUCT_CONVENTION;
7588 }
7589
7590 if (writebuf)
7591 arm_store_return_value (valtype, regcache, writebuf);
7592
7593 if (readbuf)
7594 arm_extract_return_value (valtype, regcache, readbuf);
7595
7596 return RETURN_VALUE_REGISTER_CONVENTION;
7597 }
7598
7599
7600 static int
7601 arm_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
7602 {
7603 struct gdbarch *gdbarch = get_frame_arch (frame);
7604 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
7605 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
7606 CORE_ADDR jb_addr;
7607 char buf[INT_REGISTER_SIZE];
7608
7609 jb_addr = get_frame_register_unsigned (frame, ARM_A1_REGNUM);
7610
7611 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
7612 INT_REGISTER_SIZE))
7613 return 0;
7614
7615 *pc = extract_unsigned_integer (buf, INT_REGISTER_SIZE, byte_order);
7616 return 1;
7617 }
7618
7619 /* Recognize GCC and GNU ld's trampolines. If we are in a trampoline,
7620 return the target PC. Otherwise return 0. */
7621
7622 CORE_ADDR
7623 arm_skip_stub (struct frame_info *frame, CORE_ADDR pc)
7624 {
7625 char *name;
7626 int namelen;
7627 CORE_ADDR start_addr;
7628
7629 /* Find the starting address and name of the function containing the PC. */
7630 if (find_pc_partial_function (pc, &name, &start_addr, NULL) == 0)
7631 return 0;
7632
7633 /* If PC is in a Thumb call or return stub, return the address of the
7634 target PC, which is in a register. The thunk functions are called
7635 _call_via_xx, where x is the register name. The possible names
7636 are r0-r9, sl, fp, ip, sp, and lr. ARM RealView has similar
7637 functions, named __ARM_call_via_r[0-7]. */
7638 if (strncmp (name, "_call_via_", 10) == 0
7639 || strncmp (name, "__ARM_call_via_", strlen ("__ARM_call_via_")) == 0)
7640 {
7641 /* Use the name suffix to determine which register contains the
7642 target PC. */
7643 static char *table[15] =
7644 {"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
7645 "r8", "r9", "sl", "fp", "ip", "sp", "lr"
7646 };
7647 int regno;
7648 int offset = strlen (name) - 2;
7649
7650 for (regno = 0; regno <= 14; regno++)
7651 if (strcmp (&name[offset], table[regno]) == 0)
7652 return get_frame_register_unsigned (frame, regno);
7653 }
7654
7655 /* GNU ld generates __foo_from_arm or __foo_from_thumb for
7656 non-interworking calls to foo. We could decode the stubs
7657 to find the target but it's easier to use the symbol table. */
7658 namelen = strlen (name);
7659 if (name[0] == '_' && name[1] == '_'
7660 && ((namelen > 2 + strlen ("_from_thumb")
7661 && strncmp (name + namelen - strlen ("_from_thumb"), "_from_thumb",
7662 strlen ("_from_thumb")) == 0)
7663 || (namelen > 2 + strlen ("_from_arm")
7664 && strncmp (name + namelen - strlen ("_from_arm"), "_from_arm",
7665 strlen ("_from_arm")) == 0)))
7666 {
7667 char *target_name;
7668 int target_len = namelen - 2;
7669 struct minimal_symbol *minsym;
7670 struct objfile *objfile;
7671 struct obj_section *sec;
7672
7673 if (name[namelen - 1] == 'b')
7674 target_len -= strlen ("_from_thumb");
7675 else
7676 target_len -= strlen ("_from_arm");
7677
7678 target_name = alloca (target_len + 1);
7679 memcpy (target_name, name + 2, target_len);
7680 target_name[target_len] = '\0';
7681
7682 sec = find_pc_section (pc);
7683 objfile = (sec == NULL) ? NULL : sec->objfile;
7684 minsym = lookup_minimal_symbol (target_name, NULL, objfile);
7685 if (minsym != NULL)
7686 return SYMBOL_VALUE_ADDRESS (minsym);
7687 else
7688 return 0;
7689 }
7690
7691 return 0; /* not a stub */
7692 }
7693
7694 static void
7695 set_arm_command (char *args, int from_tty)
7696 {
7697 printf_unfiltered (_("\
7698 \"set arm\" must be followed by an apporpriate subcommand.\n"));
7699 help_list (setarmcmdlist, "set arm ", all_commands, gdb_stdout);
7700 }
7701
7702 static void
7703 show_arm_command (char *args, int from_tty)
7704 {
7705 cmd_show_list (showarmcmdlist, from_tty, "");
7706 }
7707
7708 static void
7709 arm_update_current_architecture (void)
7710 {
7711 struct gdbarch_info info;
7712
7713 /* If the current architecture is not ARM, we have nothing to do. */
7714 if (gdbarch_bfd_arch_info (target_gdbarch)->arch != bfd_arch_arm)
7715 return;
7716
7717 /* Update the architecture. */
7718 gdbarch_info_init (&info);
7719
7720 if (!gdbarch_update_p (info))
7721 internal_error (__FILE__, __LINE__, _("could not update architecture"));
7722 }
7723
7724 static void
7725 set_fp_model_sfunc (char *args, int from_tty,
7726 struct cmd_list_element *c)
7727 {
7728 enum arm_float_model fp_model;
7729
7730 for (fp_model = ARM_FLOAT_AUTO; fp_model != ARM_FLOAT_LAST; fp_model++)
7731 if (strcmp (current_fp_model, fp_model_strings[fp_model]) == 0)
7732 {
7733 arm_fp_model = fp_model;
7734 break;
7735 }
7736
7737 if (fp_model == ARM_FLOAT_LAST)
7738 internal_error (__FILE__, __LINE__, _("Invalid fp model accepted: %s."),
7739 current_fp_model);
7740
7741 arm_update_current_architecture ();
7742 }
7743
7744 static void
7745 show_fp_model (struct ui_file *file, int from_tty,
7746 struct cmd_list_element *c, const char *value)
7747 {
7748 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
7749
7750 if (arm_fp_model == ARM_FLOAT_AUTO
7751 && gdbarch_bfd_arch_info (target_gdbarch)->arch == bfd_arch_arm)
7752 fprintf_filtered (file, _("\
7753 The current ARM floating point model is \"auto\" (currently \"%s\").\n"),
7754 fp_model_strings[tdep->fp_model]);
7755 else
7756 fprintf_filtered (file, _("\
7757 The current ARM floating point model is \"%s\".\n"),
7758 fp_model_strings[arm_fp_model]);
7759 }
7760
7761 static void
7762 arm_set_abi (char *args, int from_tty,
7763 struct cmd_list_element *c)
7764 {
7765 enum arm_abi_kind arm_abi;
7766
7767 for (arm_abi = ARM_ABI_AUTO; arm_abi != ARM_ABI_LAST; arm_abi++)
7768 if (strcmp (arm_abi_string, arm_abi_strings[arm_abi]) == 0)
7769 {
7770 arm_abi_global = arm_abi;
7771 break;
7772 }
7773
7774 if (arm_abi == ARM_ABI_LAST)
7775 internal_error (__FILE__, __LINE__, _("Invalid ABI accepted: %s."),
7776 arm_abi_string);
7777
7778 arm_update_current_architecture ();
7779 }
7780
7781 static void
7782 arm_show_abi (struct ui_file *file, int from_tty,
7783 struct cmd_list_element *c, const char *value)
7784 {
7785 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
7786
7787 if (arm_abi_global == ARM_ABI_AUTO
7788 && gdbarch_bfd_arch_info (target_gdbarch)->arch == bfd_arch_arm)
7789 fprintf_filtered (file, _("\
7790 The current ARM ABI is \"auto\" (currently \"%s\").\n"),
7791 arm_abi_strings[tdep->arm_abi]);
7792 else
7793 fprintf_filtered (file, _("The current ARM ABI is \"%s\".\n"),
7794 arm_abi_string);
7795 }
7796
7797 static void
7798 arm_show_fallback_mode (struct ui_file *file, int from_tty,
7799 struct cmd_list_element *c, const char *value)
7800 {
7801 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
7802
7803 fprintf_filtered (file,
7804 _("The current execution mode assumed "
7805 "(when symbols are unavailable) is \"%s\".\n"),
7806 arm_fallback_mode_string);
7807 }
7808
7809 static void
7810 arm_show_force_mode (struct ui_file *file, int from_tty,
7811 struct cmd_list_element *c, const char *value)
7812 {
7813 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
7814
7815 fprintf_filtered (file,
7816 _("The current execution mode assumed "
7817 "(even when symbols are available) is \"%s\".\n"),
7818 arm_force_mode_string);
7819 }
7820
7821 /* If the user changes the register disassembly style used for info
7822 register and other commands, we have to also switch the style used
7823 in opcodes for disassembly output. This function is run in the "set
7824 arm disassembly" command, and does that. */
7825
7826 static void
7827 set_disassembly_style_sfunc (char *args, int from_tty,
7828 struct cmd_list_element *c)
7829 {
7830 set_disassembly_style ();
7831 }
7832 \f
7833 /* Return the ARM register name corresponding to register I. */
7834 static const char *
7835 arm_register_name (struct gdbarch *gdbarch, int i)
7836 {
7837 const int num_regs = gdbarch_num_regs (gdbarch);
7838
7839 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
7840 && i >= num_regs && i < num_regs + 32)
7841 {
7842 static const char *const vfp_pseudo_names[] = {
7843 "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
7844 "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
7845 "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
7846 "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
7847 };
7848
7849 return vfp_pseudo_names[i - num_regs];
7850 }
7851
7852 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
7853 && i >= num_regs + 32 && i < num_regs + 32 + 16)
7854 {
7855 static const char *const neon_pseudo_names[] = {
7856 "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7",
7857 "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15",
7858 };
7859
7860 return neon_pseudo_names[i - num_regs - 32];
7861 }
7862
7863 if (i >= ARRAY_SIZE (arm_register_names))
7864 /* These registers are only supported on targets which supply
7865 an XML description. */
7866 return "";
7867
7868 return arm_register_names[i];
7869 }
7870
7871 static void
7872 set_disassembly_style (void)
7873 {
7874 int current;
7875
7876 /* Find the style that the user wants. */
7877 for (current = 0; current < num_disassembly_options; current++)
7878 if (disassembly_style == valid_disassembly_styles[current])
7879 break;
7880 gdb_assert (current < num_disassembly_options);
7881
7882 /* Synchronize the disassembler. */
7883 set_arm_regname_option (current);
7884 }
7885
7886 /* Test whether the coff symbol specific value corresponds to a Thumb
7887 function. */
7888
7889 static int
7890 coff_sym_is_thumb (int val)
7891 {
7892 return (val == C_THUMBEXT
7893 || val == C_THUMBSTAT
7894 || val == C_THUMBEXTFUNC
7895 || val == C_THUMBSTATFUNC
7896 || val == C_THUMBLABEL);
7897 }
7898
7899 /* arm_coff_make_msymbol_special()
7900 arm_elf_make_msymbol_special()
7901
7902 These functions test whether the COFF or ELF symbol corresponds to
7903 an address in thumb code, and set a "special" bit in a minimal
7904 symbol to indicate that it does. */
7905
7906 static void
7907 arm_elf_make_msymbol_special(asymbol *sym, struct minimal_symbol *msym)
7908 {
7909 if (ARM_SYM_BRANCH_TYPE (&((elf_symbol_type *)sym)->internal_elf_sym)
7910 == ST_BRANCH_TO_THUMB)
7911 MSYMBOL_SET_SPECIAL (msym);
7912 }
7913
7914 static void
7915 arm_coff_make_msymbol_special(int val, struct minimal_symbol *msym)
7916 {
7917 if (coff_sym_is_thumb (val))
7918 MSYMBOL_SET_SPECIAL (msym);
7919 }
7920
7921 static void
7922 arm_objfile_data_free (struct objfile *objfile, void *arg)
7923 {
7924 struct arm_per_objfile *data = arg;
7925 unsigned int i;
7926
7927 for (i = 0; i < objfile->obfd->section_count; i++)
7928 VEC_free (arm_mapping_symbol_s, data->section_maps[i]);
7929 }
7930
7931 static void
7932 arm_record_special_symbol (struct gdbarch *gdbarch, struct objfile *objfile,
7933 asymbol *sym)
7934 {
7935 const char *name = bfd_asymbol_name (sym);
7936 struct arm_per_objfile *data;
7937 VEC(arm_mapping_symbol_s) **map_p;
7938 struct arm_mapping_symbol new_map_sym;
7939
7940 gdb_assert (name[0] == '$');
7941 if (name[1] != 'a' && name[1] != 't' && name[1] != 'd')
7942 return;
7943
7944 data = objfile_data (objfile, arm_objfile_data_key);
7945 if (data == NULL)
7946 {
7947 data = OBSTACK_ZALLOC (&objfile->objfile_obstack,
7948 struct arm_per_objfile);
7949 set_objfile_data (objfile, arm_objfile_data_key, data);
7950 data->section_maps = OBSTACK_CALLOC (&objfile->objfile_obstack,
7951 objfile->obfd->section_count,
7952 VEC(arm_mapping_symbol_s) *);
7953 }
7954 map_p = &data->section_maps[bfd_get_section (sym)->index];
7955
7956 new_map_sym.value = sym->value;
7957 new_map_sym.type = name[1];
7958
7959 /* Assume that most mapping symbols appear in order of increasing
7960 value. If they were randomly distributed, it would be faster to
7961 always push here and then sort at first use. */
7962 if (!VEC_empty (arm_mapping_symbol_s, *map_p))
7963 {
7964 struct arm_mapping_symbol *prev_map_sym;
7965
7966 prev_map_sym = VEC_last (arm_mapping_symbol_s, *map_p);
7967 if (prev_map_sym->value >= sym->value)
7968 {
7969 unsigned int idx;
7970 idx = VEC_lower_bound (arm_mapping_symbol_s, *map_p, &new_map_sym,
7971 arm_compare_mapping_symbols);
7972 VEC_safe_insert (arm_mapping_symbol_s, *map_p, idx, &new_map_sym);
7973 return;
7974 }
7975 }
7976
7977 VEC_safe_push (arm_mapping_symbol_s, *map_p, &new_map_sym);
7978 }
7979
7980 static void
7981 arm_write_pc (struct regcache *regcache, CORE_ADDR pc)
7982 {
7983 struct gdbarch *gdbarch = get_regcache_arch (regcache);
7984 regcache_cooked_write_unsigned (regcache, ARM_PC_REGNUM, pc);
7985
7986 /* If necessary, set the T bit. */
7987 if (arm_apcs_32)
7988 {
7989 ULONGEST val, t_bit;
7990 regcache_cooked_read_unsigned (regcache, ARM_PS_REGNUM, &val);
7991 t_bit = arm_psr_thumb_bit (gdbarch);
7992 if (arm_pc_is_thumb (gdbarch, pc))
7993 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM,
7994 val | t_bit);
7995 else
7996 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM,
7997 val & ~t_bit);
7998 }
7999 }
8000
8001 /* Read the contents of a NEON quad register, by reading from two
8002 double registers. This is used to implement the quad pseudo
8003 registers, and for argument passing in case the quad registers are
8004 missing; vectors are passed in quad registers when using the VFP
8005 ABI, even if a NEON unit is not present. REGNUM is the index of
8006 the quad register, in [0, 15]. */
8007
8008 static enum register_status
8009 arm_neon_quad_read (struct gdbarch *gdbarch, struct regcache *regcache,
8010 int regnum, gdb_byte *buf)
8011 {
8012 char name_buf[4];
8013 gdb_byte reg_buf[8];
8014 int offset, double_regnum;
8015 enum register_status status;
8016
8017 sprintf (name_buf, "d%d", regnum << 1);
8018 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
8019 strlen (name_buf));
8020
8021 /* d0 is always the least significant half of q0. */
8022 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
8023 offset = 8;
8024 else
8025 offset = 0;
8026
8027 status = regcache_raw_read (regcache, double_regnum, reg_buf);
8028 if (status != REG_VALID)
8029 return status;
8030 memcpy (buf + offset, reg_buf, 8);
8031
8032 offset = 8 - offset;
8033 status = regcache_raw_read (regcache, double_regnum + 1, reg_buf);
8034 if (status != REG_VALID)
8035 return status;
8036 memcpy (buf + offset, reg_buf, 8);
8037
8038 return REG_VALID;
8039 }
8040
8041 static enum register_status
8042 arm_pseudo_read (struct gdbarch *gdbarch, struct regcache *regcache,
8043 int regnum, gdb_byte *buf)
8044 {
8045 const int num_regs = gdbarch_num_regs (gdbarch);
8046 char name_buf[4];
8047 gdb_byte reg_buf[8];
8048 int offset, double_regnum;
8049
8050 gdb_assert (regnum >= num_regs);
8051 regnum -= num_regs;
8052
8053 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
8054 /* Quad-precision register. */
8055 return arm_neon_quad_read (gdbarch, regcache, regnum - 32, buf);
8056 else
8057 {
8058 enum register_status status;
8059
8060 /* Single-precision register. */
8061 gdb_assert (regnum < 32);
8062
8063 /* s0 is always the least significant half of d0. */
8064 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
8065 offset = (regnum & 1) ? 0 : 4;
8066 else
8067 offset = (regnum & 1) ? 4 : 0;
8068
8069 sprintf (name_buf, "d%d", regnum >> 1);
8070 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
8071 strlen (name_buf));
8072
8073 status = regcache_raw_read (regcache, double_regnum, reg_buf);
8074 if (status == REG_VALID)
8075 memcpy (buf, reg_buf + offset, 4);
8076 return status;
8077 }
8078 }
8079
8080 /* Store the contents of BUF to a NEON quad register, by writing to
8081 two double registers. This is used to implement the quad pseudo
8082 registers, and for argument passing in case the quad registers are
8083 missing; vectors are passed in quad registers when using the VFP
8084 ABI, even if a NEON unit is not present. REGNUM is the index
8085 of the quad register, in [0, 15]. */
8086
8087 static void
8088 arm_neon_quad_write (struct gdbarch *gdbarch, struct regcache *regcache,
8089 int regnum, const gdb_byte *buf)
8090 {
8091 char name_buf[4];
8092 gdb_byte reg_buf[8];
8093 int offset, double_regnum;
8094
8095 sprintf (name_buf, "d%d", regnum << 1);
8096 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
8097 strlen (name_buf));
8098
8099 /* d0 is always the least significant half of q0. */
8100 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
8101 offset = 8;
8102 else
8103 offset = 0;
8104
8105 regcache_raw_write (regcache, double_regnum, buf + offset);
8106 offset = 8 - offset;
8107 regcache_raw_write (regcache, double_regnum + 1, buf + offset);
8108 }
8109
8110 static void
8111 arm_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
8112 int regnum, const gdb_byte *buf)
8113 {
8114 const int num_regs = gdbarch_num_regs (gdbarch);
8115 char name_buf[4];
8116 gdb_byte reg_buf[8];
8117 int offset, double_regnum;
8118
8119 gdb_assert (regnum >= num_regs);
8120 regnum -= num_regs;
8121
8122 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
8123 /* Quad-precision register. */
8124 arm_neon_quad_write (gdbarch, regcache, regnum - 32, buf);
8125 else
8126 {
8127 /* Single-precision register. */
8128 gdb_assert (regnum < 32);
8129
8130 /* s0 is always the least significant half of d0. */
8131 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
8132 offset = (regnum & 1) ? 0 : 4;
8133 else
8134 offset = (regnum & 1) ? 4 : 0;
8135
8136 sprintf (name_buf, "d%d", regnum >> 1);
8137 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
8138 strlen (name_buf));
8139
8140 regcache_raw_read (regcache, double_regnum, reg_buf);
8141 memcpy (reg_buf + offset, buf, 4);
8142 regcache_raw_write (regcache, double_regnum, reg_buf);
8143 }
8144 }
8145
8146 static struct value *
8147 value_of_arm_user_reg (struct frame_info *frame, const void *baton)
8148 {
8149 const int *reg_p = baton;
8150 return value_of_register (*reg_p, frame);
8151 }
8152 \f
8153 static enum gdb_osabi
8154 arm_elf_osabi_sniffer (bfd *abfd)
8155 {
8156 unsigned int elfosabi;
8157 enum gdb_osabi osabi = GDB_OSABI_UNKNOWN;
8158
8159 elfosabi = elf_elfheader (abfd)->e_ident[EI_OSABI];
8160
8161 if (elfosabi == ELFOSABI_ARM)
8162 /* GNU tools use this value. Check note sections in this case,
8163 as well. */
8164 bfd_map_over_sections (abfd,
8165 generic_elf_osabi_sniff_abi_tag_sections,
8166 &osabi);
8167
8168 /* Anything else will be handled by the generic ELF sniffer. */
8169 return osabi;
8170 }
8171
8172 static int
8173 arm_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
8174 struct reggroup *group)
8175 {
8176 /* FPS register's type is INT, but belongs to float_reggroup. Beside
8177 this, FPS register belongs to save_regroup, restore_reggroup, and
8178 all_reggroup, of course. */
8179 if (regnum == ARM_FPS_REGNUM)
8180 return (group == float_reggroup
8181 || group == save_reggroup
8182 || group == restore_reggroup
8183 || group == all_reggroup);
8184 else
8185 return default_register_reggroup_p (gdbarch, regnum, group);
8186 }
8187
8188 \f
8189 /* Initialize the current architecture based on INFO. If possible,
8190 re-use an architecture from ARCHES, which is a list of
8191 architectures already created during this debugging session.
8192
8193 Called e.g. at program startup, when reading a core file, and when
8194 reading a binary file. */
8195
8196 static struct gdbarch *
8197 arm_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
8198 {
8199 struct gdbarch_tdep *tdep;
8200 struct gdbarch *gdbarch;
8201 struct gdbarch_list *best_arch;
8202 enum arm_abi_kind arm_abi = arm_abi_global;
8203 enum arm_float_model fp_model = arm_fp_model;
8204 struct tdesc_arch_data *tdesc_data = NULL;
8205 int i, is_m = 0;
8206 int have_vfp_registers = 0, have_vfp_pseudos = 0, have_neon_pseudos = 0;
8207 int have_neon = 0;
8208 int have_fpa_registers = 1;
8209 const struct target_desc *tdesc = info.target_desc;
8210
8211 /* If we have an object to base this architecture on, try to determine
8212 its ABI. */
8213
8214 if (arm_abi == ARM_ABI_AUTO && info.abfd != NULL)
8215 {
8216 int ei_osabi, e_flags;
8217
8218 switch (bfd_get_flavour (info.abfd))
8219 {
8220 case bfd_target_aout_flavour:
8221 /* Assume it's an old APCS-style ABI. */
8222 arm_abi = ARM_ABI_APCS;
8223 break;
8224
8225 case bfd_target_coff_flavour:
8226 /* Assume it's an old APCS-style ABI. */
8227 /* XXX WinCE? */
8228 arm_abi = ARM_ABI_APCS;
8229 break;
8230
8231 case bfd_target_elf_flavour:
8232 ei_osabi = elf_elfheader (info.abfd)->e_ident[EI_OSABI];
8233 e_flags = elf_elfheader (info.abfd)->e_flags;
8234
8235 if (ei_osabi == ELFOSABI_ARM)
8236 {
8237 /* GNU tools used to use this value, but do not for EABI
8238 objects. There's nowhere to tag an EABI version
8239 anyway, so assume APCS. */
8240 arm_abi = ARM_ABI_APCS;
8241 }
8242 else if (ei_osabi == ELFOSABI_NONE)
8243 {
8244 int eabi_ver = EF_ARM_EABI_VERSION (e_flags);
8245 int attr_arch, attr_profile;
8246
8247 switch (eabi_ver)
8248 {
8249 case EF_ARM_EABI_UNKNOWN:
8250 /* Assume GNU tools. */
8251 arm_abi = ARM_ABI_APCS;
8252 break;
8253
8254 case EF_ARM_EABI_VER4:
8255 case EF_ARM_EABI_VER5:
8256 arm_abi = ARM_ABI_AAPCS;
8257 /* EABI binaries default to VFP float ordering.
8258 They may also contain build attributes that can
8259 be used to identify if the VFP argument-passing
8260 ABI is in use. */
8261 if (fp_model == ARM_FLOAT_AUTO)
8262 {
8263 #ifdef HAVE_ELF
8264 switch (bfd_elf_get_obj_attr_int (info.abfd,
8265 OBJ_ATTR_PROC,
8266 Tag_ABI_VFP_args))
8267 {
8268 case 0:
8269 /* "The user intended FP parameter/result
8270 passing to conform to AAPCS, base
8271 variant". */
8272 fp_model = ARM_FLOAT_SOFT_VFP;
8273 break;
8274 case 1:
8275 /* "The user intended FP parameter/result
8276 passing to conform to AAPCS, VFP
8277 variant". */
8278 fp_model = ARM_FLOAT_VFP;
8279 break;
8280 case 2:
8281 /* "The user intended FP parameter/result
8282 passing to conform to tool chain-specific
8283 conventions" - we don't know any such
8284 conventions, so leave it as "auto". */
8285 break;
8286 default:
8287 /* Attribute value not mentioned in the
8288 October 2008 ABI, so leave it as
8289 "auto". */
8290 break;
8291 }
8292 #else
8293 fp_model = ARM_FLOAT_SOFT_VFP;
8294 #endif
8295 }
8296 break;
8297
8298 default:
8299 /* Leave it as "auto". */
8300 warning (_("unknown ARM EABI version 0x%x"), eabi_ver);
8301 break;
8302 }
8303
8304 #ifdef HAVE_ELF
8305 /* Detect M-profile programs. This only works if the
8306 executable file includes build attributes; GCC does
8307 copy them to the executable, but e.g. RealView does
8308 not. */
8309 attr_arch = bfd_elf_get_obj_attr_int (info.abfd, OBJ_ATTR_PROC,
8310 Tag_CPU_arch);
8311 attr_profile = bfd_elf_get_obj_attr_int (info.abfd,
8312 OBJ_ATTR_PROC,
8313 Tag_CPU_arch_profile);
8314 /* GCC specifies the profile for v6-M; RealView only
8315 specifies the profile for architectures starting with
8316 V7 (as opposed to architectures with a tag
8317 numerically greater than TAG_CPU_ARCH_V7). */
8318 if (!tdesc_has_registers (tdesc)
8319 && (attr_arch == TAG_CPU_ARCH_V6_M
8320 || attr_arch == TAG_CPU_ARCH_V6S_M
8321 || attr_profile == 'M'))
8322 tdesc = tdesc_arm_with_m;
8323 #endif
8324 }
8325
8326 if (fp_model == ARM_FLOAT_AUTO)
8327 {
8328 int e_flags = elf_elfheader (info.abfd)->e_flags;
8329
8330 switch (e_flags & (EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT))
8331 {
8332 case 0:
8333 /* Leave it as "auto". Strictly speaking this case
8334 means FPA, but almost nobody uses that now, and
8335 many toolchains fail to set the appropriate bits
8336 for the floating-point model they use. */
8337 break;
8338 case EF_ARM_SOFT_FLOAT:
8339 fp_model = ARM_FLOAT_SOFT_FPA;
8340 break;
8341 case EF_ARM_VFP_FLOAT:
8342 fp_model = ARM_FLOAT_VFP;
8343 break;
8344 case EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT:
8345 fp_model = ARM_FLOAT_SOFT_VFP;
8346 break;
8347 }
8348 }
8349
8350 if (e_flags & EF_ARM_BE8)
8351 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
8352
8353 break;
8354
8355 default:
8356 /* Leave it as "auto". */
8357 break;
8358 }
8359 }
8360
8361 /* Check any target description for validity. */
8362 if (tdesc_has_registers (tdesc))
8363 {
8364 /* For most registers we require GDB's default names; but also allow
8365 the numeric names for sp / lr / pc, as a convenience. */
8366 static const char *const arm_sp_names[] = { "r13", "sp", NULL };
8367 static const char *const arm_lr_names[] = { "r14", "lr", NULL };
8368 static const char *const arm_pc_names[] = { "r15", "pc", NULL };
8369
8370 const struct tdesc_feature *feature;
8371 int valid_p;
8372
8373 feature = tdesc_find_feature (tdesc,
8374 "org.gnu.gdb.arm.core");
8375 if (feature == NULL)
8376 {
8377 feature = tdesc_find_feature (tdesc,
8378 "org.gnu.gdb.arm.m-profile");
8379 if (feature == NULL)
8380 return NULL;
8381 else
8382 is_m = 1;
8383 }
8384
8385 tdesc_data = tdesc_data_alloc ();
8386
8387 valid_p = 1;
8388 for (i = 0; i < ARM_SP_REGNUM; i++)
8389 valid_p &= tdesc_numbered_register (feature, tdesc_data, i,
8390 arm_register_names[i]);
8391 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
8392 ARM_SP_REGNUM,
8393 arm_sp_names);
8394 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
8395 ARM_LR_REGNUM,
8396 arm_lr_names);
8397 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
8398 ARM_PC_REGNUM,
8399 arm_pc_names);
8400 if (is_m)
8401 valid_p &= tdesc_numbered_register (feature, tdesc_data,
8402 ARM_PS_REGNUM, "xpsr");
8403 else
8404 valid_p &= tdesc_numbered_register (feature, tdesc_data,
8405 ARM_PS_REGNUM, "cpsr");
8406
8407 if (!valid_p)
8408 {
8409 tdesc_data_cleanup (tdesc_data);
8410 return NULL;
8411 }
8412
8413 feature = tdesc_find_feature (tdesc,
8414 "org.gnu.gdb.arm.fpa");
8415 if (feature != NULL)
8416 {
8417 valid_p = 1;
8418 for (i = ARM_F0_REGNUM; i <= ARM_FPS_REGNUM; i++)
8419 valid_p &= tdesc_numbered_register (feature, tdesc_data, i,
8420 arm_register_names[i]);
8421 if (!valid_p)
8422 {
8423 tdesc_data_cleanup (tdesc_data);
8424 return NULL;
8425 }
8426 }
8427 else
8428 have_fpa_registers = 0;
8429
8430 feature = tdesc_find_feature (tdesc,
8431 "org.gnu.gdb.xscale.iwmmxt");
8432 if (feature != NULL)
8433 {
8434 static const char *const iwmmxt_names[] = {
8435 "wR0", "wR1", "wR2", "wR3", "wR4", "wR5", "wR6", "wR7",
8436 "wR8", "wR9", "wR10", "wR11", "wR12", "wR13", "wR14", "wR15",
8437 "wCID", "wCon", "wCSSF", "wCASF", "", "", "", "",
8438 "wCGR0", "wCGR1", "wCGR2", "wCGR3", "", "", "", "",
8439 };
8440
8441 valid_p = 1;
8442 for (i = ARM_WR0_REGNUM; i <= ARM_WR15_REGNUM; i++)
8443 valid_p
8444 &= tdesc_numbered_register (feature, tdesc_data, i,
8445 iwmmxt_names[i - ARM_WR0_REGNUM]);
8446
8447 /* Check for the control registers, but do not fail if they
8448 are missing. */
8449 for (i = ARM_WC0_REGNUM; i <= ARM_WCASF_REGNUM; i++)
8450 tdesc_numbered_register (feature, tdesc_data, i,
8451 iwmmxt_names[i - ARM_WR0_REGNUM]);
8452
8453 for (i = ARM_WCGR0_REGNUM; i <= ARM_WCGR3_REGNUM; i++)
8454 valid_p
8455 &= tdesc_numbered_register (feature, tdesc_data, i,
8456 iwmmxt_names[i - ARM_WR0_REGNUM]);
8457
8458 if (!valid_p)
8459 {
8460 tdesc_data_cleanup (tdesc_data);
8461 return NULL;
8462 }
8463 }
8464
8465 /* If we have a VFP unit, check whether the single precision registers
8466 are present. If not, then we will synthesize them as pseudo
8467 registers. */
8468 feature = tdesc_find_feature (tdesc,
8469 "org.gnu.gdb.arm.vfp");
8470 if (feature != NULL)
8471 {
8472 static const char *const vfp_double_names[] = {
8473 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
8474 "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
8475 "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
8476 "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
8477 };
8478
8479 /* Require the double precision registers. There must be either
8480 16 or 32. */
8481 valid_p = 1;
8482 for (i = 0; i < 32; i++)
8483 {
8484 valid_p &= tdesc_numbered_register (feature, tdesc_data,
8485 ARM_D0_REGNUM + i,
8486 vfp_double_names[i]);
8487 if (!valid_p)
8488 break;
8489 }
8490 if (!valid_p && i == 16)
8491 valid_p = 1;
8492
8493 /* Also require FPSCR. */
8494 valid_p &= tdesc_numbered_register (feature, tdesc_data,
8495 ARM_FPSCR_REGNUM, "fpscr");
8496 if (!valid_p)
8497 {
8498 tdesc_data_cleanup (tdesc_data);
8499 return NULL;
8500 }
8501
8502 if (tdesc_unnumbered_register (feature, "s0") == 0)
8503 have_vfp_pseudos = 1;
8504
8505 have_vfp_registers = 1;
8506
8507 /* If we have VFP, also check for NEON. The architecture allows
8508 NEON without VFP (integer vector operations only), but GDB
8509 does not support that. */
8510 feature = tdesc_find_feature (tdesc,
8511 "org.gnu.gdb.arm.neon");
8512 if (feature != NULL)
8513 {
8514 /* NEON requires 32 double-precision registers. */
8515 if (i != 32)
8516 {
8517 tdesc_data_cleanup (tdesc_data);
8518 return NULL;
8519 }
8520
8521 /* If there are quad registers defined by the stub, use
8522 their type; otherwise (normally) provide them with
8523 the default type. */
8524 if (tdesc_unnumbered_register (feature, "q0") == 0)
8525 have_neon_pseudos = 1;
8526
8527 have_neon = 1;
8528 }
8529 }
8530 }
8531
8532 /* If there is already a candidate, use it. */
8533 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
8534 best_arch != NULL;
8535 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
8536 {
8537 if (arm_abi != ARM_ABI_AUTO
8538 && arm_abi != gdbarch_tdep (best_arch->gdbarch)->arm_abi)
8539 continue;
8540
8541 if (fp_model != ARM_FLOAT_AUTO
8542 && fp_model != gdbarch_tdep (best_arch->gdbarch)->fp_model)
8543 continue;
8544
8545 /* There are various other properties in tdep that we do not
8546 need to check here: those derived from a target description,
8547 since gdbarches with a different target description are
8548 automatically disqualified. */
8549
8550 /* Do check is_m, though, since it might come from the binary. */
8551 if (is_m != gdbarch_tdep (best_arch->gdbarch)->is_m)
8552 continue;
8553
8554 /* Found a match. */
8555 break;
8556 }
8557
8558 if (best_arch != NULL)
8559 {
8560 if (tdesc_data != NULL)
8561 tdesc_data_cleanup (tdesc_data);
8562 return best_arch->gdbarch;
8563 }
8564
8565 tdep = xcalloc (1, sizeof (struct gdbarch_tdep));
8566 gdbarch = gdbarch_alloc (&info, tdep);
8567
8568 /* Record additional information about the architecture we are defining.
8569 These are gdbarch discriminators, like the OSABI. */
8570 tdep->arm_abi = arm_abi;
8571 tdep->fp_model = fp_model;
8572 tdep->is_m = is_m;
8573 tdep->have_fpa_registers = have_fpa_registers;
8574 tdep->have_vfp_registers = have_vfp_registers;
8575 tdep->have_vfp_pseudos = have_vfp_pseudos;
8576 tdep->have_neon_pseudos = have_neon_pseudos;
8577 tdep->have_neon = have_neon;
8578
8579 /* Breakpoints. */
8580 switch (info.byte_order_for_code)
8581 {
8582 case BFD_ENDIAN_BIG:
8583 tdep->arm_breakpoint = arm_default_arm_be_breakpoint;
8584 tdep->arm_breakpoint_size = sizeof (arm_default_arm_be_breakpoint);
8585 tdep->thumb_breakpoint = arm_default_thumb_be_breakpoint;
8586 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_be_breakpoint);
8587
8588 break;
8589
8590 case BFD_ENDIAN_LITTLE:
8591 tdep->arm_breakpoint = arm_default_arm_le_breakpoint;
8592 tdep->arm_breakpoint_size = sizeof (arm_default_arm_le_breakpoint);
8593 tdep->thumb_breakpoint = arm_default_thumb_le_breakpoint;
8594 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_le_breakpoint);
8595
8596 break;
8597
8598 default:
8599 internal_error (__FILE__, __LINE__,
8600 _("arm_gdbarch_init: bad byte order for float format"));
8601 }
8602
8603 /* On ARM targets char defaults to unsigned. */
8604 set_gdbarch_char_signed (gdbarch, 0);
8605
8606 /* Note: for displaced stepping, this includes the breakpoint, and one word
8607 of additional scratch space. This setting isn't used for anything beside
8608 displaced stepping at present. */
8609 set_gdbarch_max_insn_length (gdbarch, 4 * DISPLACED_MODIFIED_INSNS);
8610
8611 /* This should be low enough for everything. */
8612 tdep->lowest_pc = 0x20;
8613 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
8614
8615 /* The default, for both APCS and AAPCS, is to return small
8616 structures in registers. */
8617 tdep->struct_return = reg_struct_return;
8618
8619 set_gdbarch_push_dummy_call (gdbarch, arm_push_dummy_call);
8620 set_gdbarch_frame_align (gdbarch, arm_frame_align);
8621
8622 set_gdbarch_write_pc (gdbarch, arm_write_pc);
8623
8624 /* Frame handling. */
8625 set_gdbarch_dummy_id (gdbarch, arm_dummy_id);
8626 set_gdbarch_unwind_pc (gdbarch, arm_unwind_pc);
8627 set_gdbarch_unwind_sp (gdbarch, arm_unwind_sp);
8628
8629 frame_base_set_default (gdbarch, &arm_normal_base);
8630
8631 /* Address manipulation. */
8632 set_gdbarch_smash_text_address (gdbarch, arm_smash_text_address);
8633 set_gdbarch_addr_bits_remove (gdbarch, arm_addr_bits_remove);
8634
8635 /* Advance PC across function entry code. */
8636 set_gdbarch_skip_prologue (gdbarch, arm_skip_prologue);
8637
8638 /* Detect whether PC is in function epilogue. */
8639 set_gdbarch_in_function_epilogue_p (gdbarch, arm_in_function_epilogue_p);
8640
8641 /* Skip trampolines. */
8642 set_gdbarch_skip_trampoline_code (gdbarch, arm_skip_stub);
8643
8644 /* The stack grows downward. */
8645 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
8646
8647 /* Breakpoint manipulation. */
8648 set_gdbarch_breakpoint_from_pc (gdbarch, arm_breakpoint_from_pc);
8649 set_gdbarch_remote_breakpoint_from_pc (gdbarch,
8650 arm_remote_breakpoint_from_pc);
8651
8652 /* Information about registers, etc. */
8653 set_gdbarch_sp_regnum (gdbarch, ARM_SP_REGNUM);
8654 set_gdbarch_pc_regnum (gdbarch, ARM_PC_REGNUM);
8655 set_gdbarch_num_regs (gdbarch, ARM_NUM_REGS);
8656 set_gdbarch_register_type (gdbarch, arm_register_type);
8657 set_gdbarch_register_reggroup_p (gdbarch, arm_register_reggroup_p);
8658
8659 /* This "info float" is FPA-specific. Use the generic version if we
8660 do not have FPA. */
8661 if (gdbarch_tdep (gdbarch)->have_fpa_registers)
8662 set_gdbarch_print_float_info (gdbarch, arm_print_float_info);
8663
8664 /* Internal <-> external register number maps. */
8665 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, arm_dwarf_reg_to_regnum);
8666 set_gdbarch_register_sim_regno (gdbarch, arm_register_sim_regno);
8667
8668 set_gdbarch_register_name (gdbarch, arm_register_name);
8669
8670 /* Returning results. */
8671 set_gdbarch_return_value (gdbarch, arm_return_value);
8672
8673 /* Disassembly. */
8674 set_gdbarch_print_insn (gdbarch, gdb_print_insn_arm);
8675
8676 /* Minsymbol frobbing. */
8677 set_gdbarch_elf_make_msymbol_special (gdbarch, arm_elf_make_msymbol_special);
8678 set_gdbarch_coff_make_msymbol_special (gdbarch,
8679 arm_coff_make_msymbol_special);
8680 set_gdbarch_record_special_symbol (gdbarch, arm_record_special_symbol);
8681
8682 /* Thumb-2 IT block support. */
8683 set_gdbarch_adjust_breakpoint_address (gdbarch,
8684 arm_adjust_breakpoint_address);
8685
8686 /* Virtual tables. */
8687 set_gdbarch_vbit_in_delta (gdbarch, 1);
8688
8689 /* Hook in the ABI-specific overrides, if they have been registered. */
8690 gdbarch_init_osabi (info, gdbarch);
8691
8692 dwarf2_frame_set_init_reg (gdbarch, arm_dwarf2_frame_init_reg);
8693
8694 /* Add some default predicates. */
8695 frame_unwind_append_unwinder (gdbarch, &arm_stub_unwind);
8696 dwarf2_append_unwinders (gdbarch);
8697 frame_unwind_append_unwinder (gdbarch, &arm_exidx_unwind);
8698 frame_unwind_append_unwinder (gdbarch, &arm_prologue_unwind);
8699
8700 /* Now we have tuned the configuration, set a few final things,
8701 based on what the OS ABI has told us. */
8702
8703 /* If the ABI is not otherwise marked, assume the old GNU APCS. EABI
8704 binaries are always marked. */
8705 if (tdep->arm_abi == ARM_ABI_AUTO)
8706 tdep->arm_abi = ARM_ABI_APCS;
8707
8708 /* Watchpoints are not steppable. */
8709 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
8710
8711 /* We used to default to FPA for generic ARM, but almost nobody
8712 uses that now, and we now provide a way for the user to force
8713 the model. So default to the most useful variant. */
8714 if (tdep->fp_model == ARM_FLOAT_AUTO)
8715 tdep->fp_model = ARM_FLOAT_SOFT_FPA;
8716
8717 if (tdep->jb_pc >= 0)
8718 set_gdbarch_get_longjmp_target (gdbarch, arm_get_longjmp_target);
8719
8720 /* Floating point sizes and format. */
8721 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
8722 if (tdep->fp_model == ARM_FLOAT_SOFT_FPA || tdep->fp_model == ARM_FLOAT_FPA)
8723 {
8724 set_gdbarch_double_format
8725 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
8726 set_gdbarch_long_double_format
8727 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
8728 }
8729 else
8730 {
8731 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
8732 set_gdbarch_long_double_format (gdbarch, floatformats_ieee_double);
8733 }
8734
8735 if (have_vfp_pseudos)
8736 {
8737 /* NOTE: These are the only pseudo registers used by
8738 the ARM target at the moment. If more are added, a
8739 little more care in numbering will be needed. */
8740
8741 int num_pseudos = 32;
8742 if (have_neon_pseudos)
8743 num_pseudos += 16;
8744 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudos);
8745 set_gdbarch_pseudo_register_read (gdbarch, arm_pseudo_read);
8746 set_gdbarch_pseudo_register_write (gdbarch, arm_pseudo_write);
8747 }
8748
8749 if (tdesc_data)
8750 {
8751 set_tdesc_pseudo_register_name (gdbarch, arm_register_name);
8752
8753 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
8754
8755 /* Override tdesc_register_type to adjust the types of VFP
8756 registers for NEON. */
8757 set_gdbarch_register_type (gdbarch, arm_register_type);
8758 }
8759
8760 /* Add standard register aliases. We add aliases even for those
8761 nanes which are used by the current architecture - it's simpler,
8762 and does no harm, since nothing ever lists user registers. */
8763 for (i = 0; i < ARRAY_SIZE (arm_register_aliases); i++)
8764 user_reg_add (gdbarch, arm_register_aliases[i].name,
8765 value_of_arm_user_reg, &arm_register_aliases[i].regnum);
8766
8767 return gdbarch;
8768 }
8769
8770 static void
8771 arm_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
8772 {
8773 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
8774
8775 if (tdep == NULL)
8776 return;
8777
8778 fprintf_unfiltered (file, _("arm_dump_tdep: Lowest pc = 0x%lx"),
8779 (unsigned long) tdep->lowest_pc);
8780 }
8781
8782 extern initialize_file_ftype _initialize_arm_tdep; /* -Wmissing-prototypes */
8783
8784 void
8785 _initialize_arm_tdep (void)
8786 {
8787 struct ui_file *stb;
8788 long length;
8789 struct cmd_list_element *new_set, *new_show;
8790 const char *setname;
8791 const char *setdesc;
8792 const char *const *regnames;
8793 int numregs, i, j;
8794 static char *helptext;
8795 char regdesc[1024], *rdptr = regdesc;
8796 size_t rest = sizeof (regdesc);
8797
8798 gdbarch_register (bfd_arch_arm, arm_gdbarch_init, arm_dump_tdep);
8799
8800 arm_objfile_data_key
8801 = register_objfile_data_with_cleanup (NULL, arm_objfile_data_free);
8802
8803 /* Add ourselves to objfile event chain. */
8804 observer_attach_new_objfile (arm_exidx_new_objfile);
8805 arm_exidx_data_key
8806 = register_objfile_data_with_cleanup (NULL, arm_exidx_data_free);
8807
8808 /* Register an ELF OS ABI sniffer for ARM binaries. */
8809 gdbarch_register_osabi_sniffer (bfd_arch_arm,
8810 bfd_target_elf_flavour,
8811 arm_elf_osabi_sniffer);
8812
8813 /* Initialize the standard target descriptions. */
8814 initialize_tdesc_arm_with_m ();
8815 initialize_tdesc_arm_with_iwmmxt ();
8816 initialize_tdesc_arm_with_vfpv2 ();
8817 initialize_tdesc_arm_with_vfpv3 ();
8818 initialize_tdesc_arm_with_neon ();
8819
8820 /* Get the number of possible sets of register names defined in opcodes. */
8821 num_disassembly_options = get_arm_regname_num_options ();
8822
8823 /* Add root prefix command for all "set arm"/"show arm" commands. */
8824 add_prefix_cmd ("arm", no_class, set_arm_command,
8825 _("Various ARM-specific commands."),
8826 &setarmcmdlist, "set arm ", 0, &setlist);
8827
8828 add_prefix_cmd ("arm", no_class, show_arm_command,
8829 _("Various ARM-specific commands."),
8830 &showarmcmdlist, "show arm ", 0, &showlist);
8831
8832 /* Sync the opcode insn printer with our register viewer. */
8833 parse_arm_disassembler_option ("reg-names-std");
8834
8835 /* Initialize the array that will be passed to
8836 add_setshow_enum_cmd(). */
8837 valid_disassembly_styles
8838 = xmalloc ((num_disassembly_options + 1) * sizeof (char *));
8839 for (i = 0; i < num_disassembly_options; i++)
8840 {
8841 numregs = get_arm_regnames (i, &setname, &setdesc, &regnames);
8842 valid_disassembly_styles[i] = setname;
8843 length = snprintf (rdptr, rest, "%s - %s\n", setname, setdesc);
8844 rdptr += length;
8845 rest -= length;
8846 /* When we find the default names, tell the disassembler to use
8847 them. */
8848 if (!strcmp (setname, "std"))
8849 {
8850 disassembly_style = setname;
8851 set_arm_regname_option (i);
8852 }
8853 }
8854 /* Mark the end of valid options. */
8855 valid_disassembly_styles[num_disassembly_options] = NULL;
8856
8857 /* Create the help text. */
8858 stb = mem_fileopen ();
8859 fprintf_unfiltered (stb, "%s%s%s",
8860 _("The valid values are:\n"),
8861 regdesc,
8862 _("The default is \"std\"."));
8863 helptext = ui_file_xstrdup (stb, NULL);
8864 ui_file_delete (stb);
8865
8866 add_setshow_enum_cmd("disassembler", no_class,
8867 valid_disassembly_styles, &disassembly_style,
8868 _("Set the disassembly style."),
8869 _("Show the disassembly style."),
8870 helptext,
8871 set_disassembly_style_sfunc,
8872 NULL, /* FIXME: i18n: The disassembly style is
8873 \"%s\". */
8874 &setarmcmdlist, &showarmcmdlist);
8875
8876 add_setshow_boolean_cmd ("apcs32", no_class, &arm_apcs_32,
8877 _("Set usage of ARM 32-bit mode."),
8878 _("Show usage of ARM 32-bit mode."),
8879 _("When off, a 26-bit PC will be used."),
8880 NULL,
8881 NULL, /* FIXME: i18n: Usage of ARM 32-bit
8882 mode is %s. */
8883 &setarmcmdlist, &showarmcmdlist);
8884
8885 /* Add a command to allow the user to force the FPU model. */
8886 add_setshow_enum_cmd ("fpu", no_class, fp_model_strings, &current_fp_model,
8887 _("Set the floating point type."),
8888 _("Show the floating point type."),
8889 _("auto - Determine the FP typefrom the OS-ABI.\n\
8890 softfpa - Software FP, mixed-endian doubles on little-endian ARMs.\n\
8891 fpa - FPA co-processor (GCC compiled).\n\
8892 softvfp - Software FP with pure-endian doubles.\n\
8893 vfp - VFP co-processor."),
8894 set_fp_model_sfunc, show_fp_model,
8895 &setarmcmdlist, &showarmcmdlist);
8896
8897 /* Add a command to allow the user to force the ABI. */
8898 add_setshow_enum_cmd ("abi", class_support, arm_abi_strings, &arm_abi_string,
8899 _("Set the ABI."),
8900 _("Show the ABI."),
8901 NULL, arm_set_abi, arm_show_abi,
8902 &setarmcmdlist, &showarmcmdlist);
8903
8904 /* Add two commands to allow the user to force the assumed
8905 execution mode. */
8906 add_setshow_enum_cmd ("fallback-mode", class_support,
8907 arm_mode_strings, &arm_fallback_mode_string,
8908 _("Set the mode assumed when symbols are unavailable."),
8909 _("Show the mode assumed when symbols are unavailable."),
8910 NULL, NULL, arm_show_fallback_mode,
8911 &setarmcmdlist, &showarmcmdlist);
8912 add_setshow_enum_cmd ("force-mode", class_support,
8913 arm_mode_strings, &arm_force_mode_string,
8914 _("Set the mode assumed even when symbols are available."),
8915 _("Show the mode assumed even when symbols are available."),
8916 NULL, NULL, arm_show_force_mode,
8917 &setarmcmdlist, &showarmcmdlist);
8918
8919 /* Debugging flag. */
8920 add_setshow_boolean_cmd ("arm", class_maintenance, &arm_debug,
8921 _("Set ARM debugging."),
8922 _("Show ARM debugging."),
8923 _("When on, arm-specific debugging is enabled."),
8924 NULL,
8925 NULL, /* FIXME: i18n: "ARM debugging is %s. */
8926 &setdebuglist, &showdebuglist);
8927 }