* arm-tdep.c (arm_analyze_load_stack_chk_guard): Avoid build break
[binutils-gdb.git] / gdb / arm-tdep.c
1 /* Common target dependent code for GDB on ARM systems.
2
3 Copyright (C) 1988, 1989, 1991, 1992, 1993, 1995, 1996, 1998, 1999, 2000,
4 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
5 Free Software Foundation, Inc.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include <ctype.h> /* XXX for isupper (). */
23
24 #include "defs.h"
25 #include "frame.h"
26 #include "inferior.h"
27 #include "gdbcmd.h"
28 #include "gdbcore.h"
29 #include "gdb_string.h"
30 #include "dis-asm.h" /* For register styles. */
31 #include "regcache.h"
32 #include "reggroups.h"
33 #include "doublest.h"
34 #include "value.h"
35 #include "arch-utils.h"
36 #include "osabi.h"
37 #include "frame-unwind.h"
38 #include "frame-base.h"
39 #include "trad-frame.h"
40 #include "objfiles.h"
41 #include "dwarf2-frame.h"
42 #include "gdbtypes.h"
43 #include "prologue-value.h"
44 #include "target-descriptions.h"
45 #include "user-regs.h"
46
47 #include "arm-tdep.h"
48 #include "gdb/sim-arm.h"
49
50 #include "elf-bfd.h"
51 #include "coff/internal.h"
52 #include "elf/arm.h"
53
54 #include "gdb_assert.h"
55 #include "vec.h"
56
57 #include "features/arm-with-m.c"
58
59 static int arm_debug;
60
61 /* Macros for setting and testing a bit in a minimal symbol that marks
62 it as Thumb function. The MSB of the minimal symbol's "info" field
63 is used for this purpose.
64
65 MSYMBOL_SET_SPECIAL Actually sets the "special" bit.
66 MSYMBOL_IS_SPECIAL Tests the "special" bit in a minimal symbol. */
67
68 #define MSYMBOL_SET_SPECIAL(msym) \
69 MSYMBOL_TARGET_FLAG_1 (msym) = 1
70
71 #define MSYMBOL_IS_SPECIAL(msym) \
72 MSYMBOL_TARGET_FLAG_1 (msym)
73
74 /* Per-objfile data used for mapping symbols. */
75 static const struct objfile_data *arm_objfile_data_key;
76
77 struct arm_mapping_symbol
78 {
79 bfd_vma value;
80 char type;
81 };
82 typedef struct arm_mapping_symbol arm_mapping_symbol_s;
83 DEF_VEC_O(arm_mapping_symbol_s);
84
85 struct arm_per_objfile
86 {
87 VEC(arm_mapping_symbol_s) **section_maps;
88 };
89
90 /* The list of available "set arm ..." and "show arm ..." commands. */
91 static struct cmd_list_element *setarmcmdlist = NULL;
92 static struct cmd_list_element *showarmcmdlist = NULL;
93
94 /* The type of floating-point to use. Keep this in sync with enum
95 arm_float_model, and the help string in _initialize_arm_tdep. */
96 static const char *fp_model_strings[] =
97 {
98 "auto",
99 "softfpa",
100 "fpa",
101 "softvfp",
102 "vfp",
103 NULL
104 };
105
106 /* A variable that can be configured by the user. */
107 static enum arm_float_model arm_fp_model = ARM_FLOAT_AUTO;
108 static const char *current_fp_model = "auto";
109
110 /* The ABI to use. Keep this in sync with arm_abi_kind. */
111 static const char *arm_abi_strings[] =
112 {
113 "auto",
114 "APCS",
115 "AAPCS",
116 NULL
117 };
118
119 /* A variable that can be configured by the user. */
120 static enum arm_abi_kind arm_abi_global = ARM_ABI_AUTO;
121 static const char *arm_abi_string = "auto";
122
123 /* The execution mode to assume. */
124 static const char *arm_mode_strings[] =
125 {
126 "auto",
127 "arm",
128 "thumb",
129 NULL
130 };
131
132 static const char *arm_fallback_mode_string = "auto";
133 static const char *arm_force_mode_string = "auto";
134
135 /* Number of different reg name sets (options). */
136 static int num_disassembly_options;
137
138 /* The standard register names, and all the valid aliases for them. Note
139 that `fp', `sp' and `pc' are not added in this alias list, because they
140 have been added as builtin user registers in
141 std-regs.c:_initialize_frame_reg. */
142 static const struct
143 {
144 const char *name;
145 int regnum;
146 } arm_register_aliases[] = {
147 /* Basic register numbers. */
148 { "r0", 0 },
149 { "r1", 1 },
150 { "r2", 2 },
151 { "r3", 3 },
152 { "r4", 4 },
153 { "r5", 5 },
154 { "r6", 6 },
155 { "r7", 7 },
156 { "r8", 8 },
157 { "r9", 9 },
158 { "r10", 10 },
159 { "r11", 11 },
160 { "r12", 12 },
161 { "r13", 13 },
162 { "r14", 14 },
163 { "r15", 15 },
164 /* Synonyms (argument and variable registers). */
165 { "a1", 0 },
166 { "a2", 1 },
167 { "a3", 2 },
168 { "a4", 3 },
169 { "v1", 4 },
170 { "v2", 5 },
171 { "v3", 6 },
172 { "v4", 7 },
173 { "v5", 8 },
174 { "v6", 9 },
175 { "v7", 10 },
176 { "v8", 11 },
177 /* Other platform-specific names for r9. */
178 { "sb", 9 },
179 { "tr", 9 },
180 /* Special names. */
181 { "ip", 12 },
182 { "lr", 14 },
183 /* Names used by GCC (not listed in the ARM EABI). */
184 { "sl", 10 },
185 /* A special name from the older ATPCS. */
186 { "wr", 7 },
187 };
188
189 static const char *const arm_register_names[] =
190 {"r0", "r1", "r2", "r3", /* 0 1 2 3 */
191 "r4", "r5", "r6", "r7", /* 4 5 6 7 */
192 "r8", "r9", "r10", "r11", /* 8 9 10 11 */
193 "r12", "sp", "lr", "pc", /* 12 13 14 15 */
194 "f0", "f1", "f2", "f3", /* 16 17 18 19 */
195 "f4", "f5", "f6", "f7", /* 20 21 22 23 */
196 "fps", "cpsr" }; /* 24 25 */
197
198 /* Valid register name styles. */
199 static const char **valid_disassembly_styles;
200
201 /* Disassembly style to use. Default to "std" register names. */
202 static const char *disassembly_style;
203
204 /* This is used to keep the bfd arch_info in sync with the disassembly
205 style. */
206 static void set_disassembly_style_sfunc(char *, int,
207 struct cmd_list_element *);
208 static void set_disassembly_style (void);
209
210 static void convert_from_extended (const struct floatformat *, const void *,
211 void *, int);
212 static void convert_to_extended (const struct floatformat *, void *,
213 const void *, int);
214
215 static void arm_neon_quad_read (struct gdbarch *gdbarch,
216 struct regcache *regcache,
217 int regnum, gdb_byte *buf);
218 static void arm_neon_quad_write (struct gdbarch *gdbarch,
219 struct regcache *regcache,
220 int regnum, const gdb_byte *buf);
221
222 struct arm_prologue_cache
223 {
224 /* The stack pointer at the time this frame was created; i.e. the
225 caller's stack pointer when this function was called. It is used
226 to identify this frame. */
227 CORE_ADDR prev_sp;
228
229 /* The frame base for this frame is just prev_sp - frame size.
230 FRAMESIZE is the distance from the frame pointer to the
231 initial stack pointer. */
232
233 int framesize;
234
235 /* The register used to hold the frame pointer for this frame. */
236 int framereg;
237
238 /* Saved register offsets. */
239 struct trad_frame_saved_reg *saved_regs;
240 };
241
242 static CORE_ADDR arm_analyze_prologue (struct gdbarch *gdbarch,
243 CORE_ADDR prologue_start,
244 CORE_ADDR prologue_end,
245 struct arm_prologue_cache *cache);
246
247 /* Architecture version for displaced stepping. This effects the behaviour of
248 certain instructions, and really should not be hard-wired. */
249
250 #define DISPLACED_STEPPING_ARCH_VERSION 5
251
252 /* Addresses for calling Thumb functions have the bit 0 set.
253 Here are some macros to test, set, or clear bit 0 of addresses. */
254 #define IS_THUMB_ADDR(addr) ((addr) & 1)
255 #define MAKE_THUMB_ADDR(addr) ((addr) | 1)
256 #define UNMAKE_THUMB_ADDR(addr) ((addr) & ~1)
257
258 /* Set to true if the 32-bit mode is in use. */
259
260 int arm_apcs_32 = 1;
261
262 /* Return the bit mask in ARM_PS_REGNUM that indicates Thumb mode. */
263
264 static int
265 arm_psr_thumb_bit (struct gdbarch *gdbarch)
266 {
267 if (gdbarch_tdep (gdbarch)->is_m)
268 return XPSR_T;
269 else
270 return CPSR_T;
271 }
272
273 /* Determine if FRAME is executing in Thumb mode. */
274
275 int
276 arm_frame_is_thumb (struct frame_info *frame)
277 {
278 CORE_ADDR cpsr;
279 ULONGEST t_bit = arm_psr_thumb_bit (get_frame_arch (frame));
280
281 /* Every ARM frame unwinder can unwind the T bit of the CPSR, either
282 directly (from a signal frame or dummy frame) or by interpreting
283 the saved LR (from a prologue or DWARF frame). So consult it and
284 trust the unwinders. */
285 cpsr = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
286
287 return (cpsr & t_bit) != 0;
288 }
289
290 /* Callback for VEC_lower_bound. */
291
292 static inline int
293 arm_compare_mapping_symbols (const struct arm_mapping_symbol *lhs,
294 const struct arm_mapping_symbol *rhs)
295 {
296 return lhs->value < rhs->value;
297 }
298
299 /* Search for the mapping symbol covering MEMADDR. If one is found,
300 return its type. Otherwise, return 0. If START is non-NULL,
301 set *START to the location of the mapping symbol. */
302
303 static char
304 arm_find_mapping_symbol (CORE_ADDR memaddr, CORE_ADDR *start)
305 {
306 struct obj_section *sec;
307
308 /* If there are mapping symbols, consult them. */
309 sec = find_pc_section (memaddr);
310 if (sec != NULL)
311 {
312 struct arm_per_objfile *data;
313 VEC(arm_mapping_symbol_s) *map;
314 struct arm_mapping_symbol map_key = { memaddr - obj_section_addr (sec),
315 0 };
316 unsigned int idx;
317
318 data = objfile_data (sec->objfile, arm_objfile_data_key);
319 if (data != NULL)
320 {
321 map = data->section_maps[sec->the_bfd_section->index];
322 if (!VEC_empty (arm_mapping_symbol_s, map))
323 {
324 struct arm_mapping_symbol *map_sym;
325
326 idx = VEC_lower_bound (arm_mapping_symbol_s, map, &map_key,
327 arm_compare_mapping_symbols);
328
329 /* VEC_lower_bound finds the earliest ordered insertion
330 point. If the following symbol starts at this exact
331 address, we use that; otherwise, the preceding
332 mapping symbol covers this address. */
333 if (idx < VEC_length (arm_mapping_symbol_s, map))
334 {
335 map_sym = VEC_index (arm_mapping_symbol_s, map, idx);
336 if (map_sym->value == map_key.value)
337 {
338 if (start)
339 *start = map_sym->value + obj_section_addr (sec);
340 return map_sym->type;
341 }
342 }
343
344 if (idx > 0)
345 {
346 map_sym = VEC_index (arm_mapping_symbol_s, map, idx - 1);
347 if (start)
348 *start = map_sym->value + obj_section_addr (sec);
349 return map_sym->type;
350 }
351 }
352 }
353 }
354
355 return 0;
356 }
357
358 static CORE_ADDR arm_get_next_pc_raw (struct frame_info *frame,
359 CORE_ADDR pc, int insert_bkpt);
360
361 /* Determine if the program counter specified in MEMADDR is in a Thumb
362 function. This function should be called for addresses unrelated to
363 any executing frame; otherwise, prefer arm_frame_is_thumb. */
364
365 static int
366 arm_pc_is_thumb (struct gdbarch *gdbarch, CORE_ADDR memaddr)
367 {
368 struct obj_section *sec;
369 struct minimal_symbol *sym;
370 char type;
371
372 /* If bit 0 of the address is set, assume this is a Thumb address. */
373 if (IS_THUMB_ADDR (memaddr))
374 return 1;
375
376 /* If the user wants to override the symbol table, let him. */
377 if (strcmp (arm_force_mode_string, "arm") == 0)
378 return 0;
379 if (strcmp (arm_force_mode_string, "thumb") == 0)
380 return 1;
381
382 /* ARM v6-M and v7-M are always in Thumb mode. */
383 if (gdbarch_tdep (gdbarch)->is_m)
384 return 1;
385
386 /* If there are mapping symbols, consult them. */
387 type = arm_find_mapping_symbol (memaddr, NULL);
388 if (type)
389 return type == 't';
390
391 /* Thumb functions have a "special" bit set in minimal symbols. */
392 sym = lookup_minimal_symbol_by_pc (memaddr);
393 if (sym)
394 return (MSYMBOL_IS_SPECIAL (sym));
395
396 /* If the user wants to override the fallback mode, let them. */
397 if (strcmp (arm_fallback_mode_string, "arm") == 0)
398 return 0;
399 if (strcmp (arm_fallback_mode_string, "thumb") == 0)
400 return 1;
401
402 /* If we couldn't find any symbol, but we're talking to a running
403 target, then trust the current value of $cpsr. This lets
404 "display/i $pc" always show the correct mode (though if there is
405 a symbol table we will not reach here, so it still may not be
406 displayed in the mode it will be executed).
407
408 As a further heuristic if we detect that we are doing a single-step we
409 see what state executing the current instruction ends up with us being
410 in. */
411 if (target_has_registers)
412 {
413 struct frame_info *current_frame = get_current_frame ();
414 CORE_ADDR current_pc = get_frame_pc (current_frame);
415 int is_thumb = arm_frame_is_thumb (current_frame);
416 CORE_ADDR next_pc;
417 if (memaddr == current_pc)
418 return is_thumb;
419 else
420 {
421 struct gdbarch *gdbarch = get_frame_arch (current_frame);
422 next_pc = arm_get_next_pc_raw (current_frame, current_pc, FALSE);
423 if (memaddr == gdbarch_addr_bits_remove (gdbarch, next_pc))
424 return IS_THUMB_ADDR (next_pc);
425 else
426 return is_thumb;
427 }
428 }
429
430 /* Otherwise we're out of luck; we assume ARM. */
431 return 0;
432 }
433
434 /* Remove useless bits from addresses in a running program. */
435 static CORE_ADDR
436 arm_addr_bits_remove (struct gdbarch *gdbarch, CORE_ADDR val)
437 {
438 if (arm_apcs_32)
439 return UNMAKE_THUMB_ADDR (val);
440 else
441 return (val & 0x03fffffc);
442 }
443
444 /* When reading symbols, we need to zap the low bit of the address,
445 which may be set to 1 for Thumb functions. */
446 static CORE_ADDR
447 arm_smash_text_address (struct gdbarch *gdbarch, CORE_ADDR val)
448 {
449 return val & ~1;
450 }
451
452 /* Return 1 if PC is the start of a compiler helper function which
453 can be safely ignored during prologue skipping. */
454 static int
455 skip_prologue_function (CORE_ADDR pc)
456 {
457 struct minimal_symbol *msym;
458 const char *name;
459
460 msym = lookup_minimal_symbol_by_pc (pc);
461 if (msym == NULL || SYMBOL_VALUE_ADDRESS (msym) != pc)
462 return 0;
463
464 name = SYMBOL_LINKAGE_NAME (msym);
465 if (name == NULL)
466 return 0;
467
468 /* The GNU linker's Thumb call stub to foo is named
469 __foo_from_thumb. */
470 if (strstr (name, "_from_thumb") != NULL)
471 name += 2;
472
473 /* On soft-float targets, __truncdfsf2 is called to convert promoted
474 arguments to their argument types in non-prototyped
475 functions. */
476 if (strncmp (name, "__truncdfsf2", strlen ("__truncdfsf2")) == 0)
477 return 1;
478 if (strncmp (name, "__aeabi_d2f", strlen ("__aeabi_d2f")) == 0)
479 return 1;
480
481 /* Internal functions related to thread-local storage. */
482 if (strncmp (name, "__tls_get_addr", strlen ("__tls_get_addr")) == 0)
483 return 1;
484 if (strncmp (name, "__aeabi_read_tp", strlen ("__aeabi_read_tp")) == 0)
485 return 1;
486
487 return 0;
488 }
489
490 /* Support routines for instruction parsing. */
491 #define submask(x) ((1L << ((x) + 1)) - 1)
492 #define bit(obj,st) (((obj) >> (st)) & 1)
493 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
494 #define sbits(obj,st,fn) \
495 ((long) (bits(obj,st,fn) | ((long) bit(obj,fn) * ~ submask (fn - st))))
496 #define BranchDest(addr,instr) \
497 ((CORE_ADDR) (((long) (addr)) + 8 + (sbits (instr, 0, 23) << 2)))
498
499 /* Extract the immediate from instruction movw/movt of encoding T. INSN1 is
500 the first 16-bit of instruction, and INSN2 is the second 16-bit of
501 instruction. */
502 #define EXTRACT_MOVW_MOVT_IMM_T(insn1, insn2) \
503 ((bits ((insn1), 0, 3) << 12) \
504 | (bits ((insn1), 10, 10) << 11) \
505 | (bits ((insn2), 12, 14) << 8) \
506 | bits ((insn2), 0, 7))
507
508 /* Extract the immediate from instruction movw/movt of encoding A. INSN is
509 the 32-bit instruction. */
510 #define EXTRACT_MOVW_MOVT_IMM_A(insn) \
511 ((bits ((insn), 16, 19) << 12) \
512 | bits ((insn), 0, 11))
513
514 /* Decode immediate value; implements ThumbExpandImmediate pseudo-op. */
515
516 static unsigned int
517 thumb_expand_immediate (unsigned int imm)
518 {
519 unsigned int count = imm >> 7;
520
521 if (count < 8)
522 switch (count / 2)
523 {
524 case 0:
525 return imm & 0xff;
526 case 1:
527 return (imm & 0xff) | ((imm & 0xff) << 16);
528 case 2:
529 return ((imm & 0xff) << 8) | ((imm & 0xff) << 24);
530 case 3:
531 return (imm & 0xff) | ((imm & 0xff) << 8)
532 | ((imm & 0xff) << 16) | ((imm & 0xff) << 24);
533 }
534
535 return (0x80 | (imm & 0x7f)) << (32 - count);
536 }
537
538 /* Return 1 if the 16-bit Thumb instruction INST might change
539 control flow, 0 otherwise. */
540
541 static int
542 thumb_instruction_changes_pc (unsigned short inst)
543 {
544 if ((inst & 0xff00) == 0xbd00) /* pop {rlist, pc} */
545 return 1;
546
547 if ((inst & 0xf000) == 0xd000) /* conditional branch */
548 return 1;
549
550 if ((inst & 0xf800) == 0xe000) /* unconditional branch */
551 return 1;
552
553 if ((inst & 0xff00) == 0x4700) /* bx REG, blx REG */
554 return 1;
555
556 if ((inst & 0xff87) == 0x4687) /* mov pc, REG */
557 return 1;
558
559 if ((inst & 0xf500) == 0xb100) /* CBNZ or CBZ. */
560 return 1;
561
562 return 0;
563 }
564
565 /* Return 1 if the 32-bit Thumb instruction in INST1 and INST2
566 might change control flow, 0 otherwise. */
567
568 static int
569 thumb2_instruction_changes_pc (unsigned short inst1, unsigned short inst2)
570 {
571 if ((inst1 & 0xf800) == 0xf000 && (inst2 & 0x8000) == 0x8000)
572 {
573 /* Branches and miscellaneous control instructions. */
574
575 if ((inst2 & 0x1000) != 0 || (inst2 & 0xd001) == 0xc000)
576 {
577 /* B, BL, BLX. */
578 return 1;
579 }
580 else if (inst1 == 0xf3de && (inst2 & 0xff00) == 0x3f00)
581 {
582 /* SUBS PC, LR, #imm8. */
583 return 1;
584 }
585 else if ((inst2 & 0xd000) == 0x8000 && (inst1 & 0x0380) != 0x0380)
586 {
587 /* Conditional branch. */
588 return 1;
589 }
590
591 return 0;
592 }
593
594 if ((inst1 & 0xfe50) == 0xe810)
595 {
596 /* Load multiple or RFE. */
597
598 if (bit (inst1, 7) && !bit (inst1, 8))
599 {
600 /* LDMIA or POP */
601 if (bit (inst2, 15))
602 return 1;
603 }
604 else if (!bit (inst1, 7) && bit (inst1, 8))
605 {
606 /* LDMDB */
607 if (bit (inst2, 15))
608 return 1;
609 }
610 else if (bit (inst1, 7) && bit (inst1, 8))
611 {
612 /* RFEIA */
613 return 1;
614 }
615 else if (!bit (inst1, 7) && !bit (inst1, 8))
616 {
617 /* RFEDB */
618 return 1;
619 }
620
621 return 0;
622 }
623
624 if ((inst1 & 0xffef) == 0xea4f && (inst2 & 0xfff0) == 0x0f00)
625 {
626 /* MOV PC or MOVS PC. */
627 return 1;
628 }
629
630 if ((inst1 & 0xff70) == 0xf850 && (inst2 & 0xf000) == 0xf000)
631 {
632 /* LDR PC. */
633 if (bits (inst1, 0, 3) == 15)
634 return 1;
635 if (bit (inst1, 7))
636 return 1;
637 if (bit (inst2, 11))
638 return 1;
639 if ((inst2 & 0x0fc0) == 0x0000)
640 return 1;
641
642 return 0;
643 }
644
645 if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf000)
646 {
647 /* TBB. */
648 return 1;
649 }
650
651 if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf010)
652 {
653 /* TBH. */
654 return 1;
655 }
656
657 return 0;
658 }
659
660 /* Analyze a Thumb prologue, looking for a recognizable stack frame
661 and frame pointer. Scan until we encounter a store that could
662 clobber the stack frame unexpectedly, or an unknown instruction.
663 Return the last address which is definitely safe to skip for an
664 initial breakpoint. */
665
666 static CORE_ADDR
667 thumb_analyze_prologue (struct gdbarch *gdbarch,
668 CORE_ADDR start, CORE_ADDR limit,
669 struct arm_prologue_cache *cache)
670 {
671 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
672 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
673 int i;
674 pv_t regs[16];
675 struct pv_area *stack;
676 struct cleanup *back_to;
677 CORE_ADDR offset;
678 CORE_ADDR unrecognized_pc = 0;
679
680 for (i = 0; i < 16; i++)
681 regs[i] = pv_register (i, 0);
682 stack = make_pv_area (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
683 back_to = make_cleanup_free_pv_area (stack);
684
685 while (start < limit)
686 {
687 unsigned short insn;
688
689 insn = read_memory_unsigned_integer (start, 2, byte_order_for_code);
690
691 if ((insn & 0xfe00) == 0xb400) /* push { rlist } */
692 {
693 int regno;
694 int mask;
695
696 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
697 break;
698
699 /* Bits 0-7 contain a mask for registers R0-R7. Bit 8 says
700 whether to save LR (R14). */
701 mask = (insn & 0xff) | ((insn & 0x100) << 6);
702
703 /* Calculate offsets of saved R0-R7 and LR. */
704 for (regno = ARM_LR_REGNUM; regno >= 0; regno--)
705 if (mask & (1 << regno))
706 {
707 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
708 -4);
709 pv_area_store (stack, regs[ARM_SP_REGNUM], 4, regs[regno]);
710 }
711 }
712 else if ((insn & 0xff00) == 0xb000) /* add sp, #simm OR
713 sub sp, #simm */
714 {
715 offset = (insn & 0x7f) << 2; /* get scaled offset */
716 if (insn & 0x80) /* Check for SUB. */
717 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
718 -offset);
719 else
720 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
721 offset);
722 }
723 else if ((insn & 0xf800) == 0xa800) /* add Rd, sp, #imm */
724 regs[bits (insn, 8, 10)] = pv_add_constant (regs[ARM_SP_REGNUM],
725 (insn & 0xff) << 2);
726 else if ((insn & 0xfe00) == 0x1c00 /* add Rd, Rn, #imm */
727 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM))
728 regs[bits (insn, 0, 2)] = pv_add_constant (regs[bits (insn, 3, 5)],
729 bits (insn, 6, 8));
730 else if ((insn & 0xf800) == 0x3000 /* add Rd, #imm */
731 && pv_is_register (regs[bits (insn, 8, 10)], ARM_SP_REGNUM))
732 regs[bits (insn, 8, 10)] = pv_add_constant (regs[bits (insn, 8, 10)],
733 bits (insn, 0, 7));
734 else if ((insn & 0xfe00) == 0x1800 /* add Rd, Rn, Rm */
735 && pv_is_register (regs[bits (insn, 6, 8)], ARM_SP_REGNUM)
736 && pv_is_constant (regs[bits (insn, 3, 5)]))
737 regs[bits (insn, 0, 2)] = pv_add (regs[bits (insn, 3, 5)],
738 regs[bits (insn, 6, 8)]);
739 else if ((insn & 0xff00) == 0x4400 /* add Rd, Rm */
740 && pv_is_constant (regs[bits (insn, 3, 6)]))
741 {
742 int rd = (bit (insn, 7) << 3) + bits (insn, 0, 2);
743 int rm = bits (insn, 3, 6);
744 regs[rd] = pv_add (regs[rd], regs[rm]);
745 }
746 else if ((insn & 0xff00) == 0x4600) /* mov hi, lo or mov lo, hi */
747 {
748 int dst_reg = (insn & 0x7) + ((insn & 0x80) >> 4);
749 int src_reg = (insn & 0x78) >> 3;
750 regs[dst_reg] = regs[src_reg];
751 }
752 else if ((insn & 0xf800) == 0x9000) /* str rd, [sp, #off] */
753 {
754 /* Handle stores to the stack. Normally pushes are used,
755 but with GCC -mtpcs-frame, there may be other stores
756 in the prologue to create the frame. */
757 int regno = (insn >> 8) & 0x7;
758 pv_t addr;
759
760 offset = (insn & 0xff) << 2;
761 addr = pv_add_constant (regs[ARM_SP_REGNUM], offset);
762
763 if (pv_area_store_would_trash (stack, addr))
764 break;
765
766 pv_area_store (stack, addr, 4, regs[regno]);
767 }
768 else if ((insn & 0xf800) == 0x6000) /* str rd, [rn, #off] */
769 {
770 int rd = bits (insn, 0, 2);
771 int rn = bits (insn, 3, 5);
772 pv_t addr;
773
774 offset = bits (insn, 6, 10) << 2;
775 addr = pv_add_constant (regs[rn], offset);
776
777 if (pv_area_store_would_trash (stack, addr))
778 break;
779
780 pv_area_store (stack, addr, 4, regs[rd]);
781 }
782 else if (((insn & 0xf800) == 0x7000 /* strb Rd, [Rn, #off] */
783 || (insn & 0xf800) == 0x8000) /* strh Rd, [Rn, #off] */
784 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM))
785 /* Ignore stores of argument registers to the stack. */
786 ;
787 else if ((insn & 0xf800) == 0xc800 /* ldmia Rn!, { registers } */
788 && pv_is_register (regs[bits (insn, 8, 10)], ARM_SP_REGNUM))
789 /* Ignore block loads from the stack, potentially copying
790 parameters from memory. */
791 ;
792 else if ((insn & 0xf800) == 0x9800 /* ldr Rd, [Rn, #immed] */
793 || ((insn & 0xf800) == 0x6800 /* ldr Rd, [sp, #immed] */
794 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM)))
795 /* Similarly ignore single loads from the stack. */
796 ;
797 else if ((insn & 0xffc0) == 0x0000 /* lsls Rd, Rm, #0 */
798 || (insn & 0xffc0) == 0x1c00) /* add Rd, Rn, #0 */
799 /* Skip register copies, i.e. saves to another register
800 instead of the stack. */
801 ;
802 else if ((insn & 0xf800) == 0x2000) /* movs Rd, #imm */
803 /* Recognize constant loads; even with small stacks these are necessary
804 on Thumb. */
805 regs[bits (insn, 8, 10)] = pv_constant (bits (insn, 0, 7));
806 else if ((insn & 0xf800) == 0x4800) /* ldr Rd, [pc, #imm] */
807 {
808 /* Constant pool loads, for the same reason. */
809 unsigned int constant;
810 CORE_ADDR loc;
811
812 loc = start + 4 + bits (insn, 0, 7) * 4;
813 constant = read_memory_unsigned_integer (loc, 4, byte_order);
814 regs[bits (insn, 8, 10)] = pv_constant (constant);
815 }
816 else if ((insn & 0xe000) == 0xe000)
817 {
818 unsigned short inst2;
819
820 inst2 = read_memory_unsigned_integer (start + 2, 2,
821 byte_order_for_code);
822
823 if ((insn & 0xf800) == 0xf000 && (inst2 & 0xe800) == 0xe800)
824 {
825 /* BL, BLX. Allow some special function calls when
826 skipping the prologue; GCC generates these before
827 storing arguments to the stack. */
828 CORE_ADDR nextpc;
829 int j1, j2, imm1, imm2;
830
831 imm1 = sbits (insn, 0, 10);
832 imm2 = bits (inst2, 0, 10);
833 j1 = bit (inst2, 13);
834 j2 = bit (inst2, 11);
835
836 offset = ((imm1 << 12) + (imm2 << 1));
837 offset ^= ((!j2) << 22) | ((!j1) << 23);
838
839 nextpc = start + 4 + offset;
840 /* For BLX make sure to clear the low bits. */
841 if (bit (inst2, 12) == 0)
842 nextpc = nextpc & 0xfffffffc;
843
844 if (!skip_prologue_function (nextpc))
845 break;
846 }
847
848 else if ((insn & 0xffd0) == 0xe900 /* stmdb Rn{!},
849 { registers } */
850 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
851 {
852 pv_t addr = regs[bits (insn, 0, 3)];
853 int regno;
854
855 if (pv_area_store_would_trash (stack, addr))
856 break;
857
858 /* Calculate offsets of saved registers. */
859 for (regno = ARM_LR_REGNUM; regno >= 0; regno--)
860 if (inst2 & (1 << regno))
861 {
862 addr = pv_add_constant (addr, -4);
863 pv_area_store (stack, addr, 4, regs[regno]);
864 }
865
866 if (insn & 0x0020)
867 regs[bits (insn, 0, 3)] = addr;
868 }
869
870 else if ((insn & 0xff50) == 0xe940 /* strd Rt, Rt2,
871 [Rn, #+/-imm]{!} */
872 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
873 {
874 int regno1 = bits (inst2, 12, 15);
875 int regno2 = bits (inst2, 8, 11);
876 pv_t addr = regs[bits (insn, 0, 3)];
877
878 offset = inst2 & 0xff;
879 if (insn & 0x0080)
880 addr = pv_add_constant (addr, offset);
881 else
882 addr = pv_add_constant (addr, -offset);
883
884 if (pv_area_store_would_trash (stack, addr))
885 break;
886
887 pv_area_store (stack, addr, 4, regs[regno1]);
888 pv_area_store (stack, pv_add_constant (addr, 4),
889 4, regs[regno2]);
890
891 if (insn & 0x0020)
892 regs[bits (insn, 0, 3)] = addr;
893 }
894
895 else if ((insn & 0xfff0) == 0xf8c0 /* str Rt,[Rn,+/-#imm]{!} */
896 && (inst2 & 0x0c00) == 0x0c00
897 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
898 {
899 int regno = bits (inst2, 12, 15);
900 pv_t addr = regs[bits (insn, 0, 3)];
901
902 offset = inst2 & 0xff;
903 if (inst2 & 0x0200)
904 addr = pv_add_constant (addr, offset);
905 else
906 addr = pv_add_constant (addr, -offset);
907
908 if (pv_area_store_would_trash (stack, addr))
909 break;
910
911 pv_area_store (stack, addr, 4, regs[regno]);
912
913 if (inst2 & 0x0100)
914 regs[bits (insn, 0, 3)] = addr;
915 }
916
917 else if ((insn & 0xfff0) == 0xf8c0 /* str.w Rt,[Rn,#imm] */
918 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
919 {
920 int regno = bits (inst2, 12, 15);
921 pv_t addr;
922
923 offset = inst2 & 0xfff;
924 addr = pv_add_constant (regs[bits (insn, 0, 3)], offset);
925
926 if (pv_area_store_would_trash (stack, addr))
927 break;
928
929 pv_area_store (stack, addr, 4, regs[regno]);
930 }
931
932 else if ((insn & 0xffd0) == 0xf880 /* str{bh}.w Rt,[Rn,#imm] */
933 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
934 /* Ignore stores of argument registers to the stack. */
935 ;
936
937 else if ((insn & 0xffd0) == 0xf800 /* str{bh} Rt,[Rn,#+/-imm] */
938 && (inst2 & 0x0d00) == 0x0c00
939 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
940 /* Ignore stores of argument registers to the stack. */
941 ;
942
943 else if ((insn & 0xffd0) == 0xe890 /* ldmia Rn[!],
944 { registers } */
945 && (inst2 & 0x8000) == 0x0000
946 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
947 /* Ignore block loads from the stack, potentially copying
948 parameters from memory. */
949 ;
950
951 else if ((insn & 0xffb0) == 0xe950 /* ldrd Rt, Rt2,
952 [Rn, #+/-imm] */
953 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
954 /* Similarly ignore dual loads from the stack. */
955 ;
956
957 else if ((insn & 0xfff0) == 0xf850 /* ldr Rt,[Rn,#+/-imm] */
958 && (inst2 & 0x0d00) == 0x0c00
959 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
960 /* Similarly ignore single loads from the stack. */
961 ;
962
963 else if ((insn & 0xfff0) == 0xf8d0 /* ldr.w Rt,[Rn,#imm] */
964 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
965 /* Similarly ignore single loads from the stack. */
966 ;
967
968 else if ((insn & 0xfbf0) == 0xf100 /* add.w Rd, Rn, #imm */
969 && (inst2 & 0x8000) == 0x0000)
970 {
971 unsigned int imm = ((bits (insn, 10, 10) << 11)
972 | (bits (inst2, 12, 14) << 8)
973 | bits (inst2, 0, 7));
974
975 regs[bits (inst2, 8, 11)]
976 = pv_add_constant (regs[bits (insn, 0, 3)],
977 thumb_expand_immediate (imm));
978 }
979
980 else if ((insn & 0xfbf0) == 0xf200 /* addw Rd, Rn, #imm */
981 && (inst2 & 0x8000) == 0x0000)
982 {
983 unsigned int imm = ((bits (insn, 10, 10) << 11)
984 | (bits (inst2, 12, 14) << 8)
985 | bits (inst2, 0, 7));
986
987 regs[bits (inst2, 8, 11)]
988 = pv_add_constant (regs[bits (insn, 0, 3)], imm);
989 }
990
991 else if ((insn & 0xfbf0) == 0xf1a0 /* sub.w Rd, Rn, #imm */
992 && (inst2 & 0x8000) == 0x0000)
993 {
994 unsigned int imm = ((bits (insn, 10, 10) << 11)
995 | (bits (inst2, 12, 14) << 8)
996 | bits (inst2, 0, 7));
997
998 regs[bits (inst2, 8, 11)]
999 = pv_add_constant (regs[bits (insn, 0, 3)],
1000 - (CORE_ADDR) thumb_expand_immediate (imm));
1001 }
1002
1003 else if ((insn & 0xfbf0) == 0xf2a0 /* subw Rd, Rn, #imm */
1004 && (inst2 & 0x8000) == 0x0000)
1005 {
1006 unsigned int imm = ((bits (insn, 10, 10) << 11)
1007 | (bits (inst2, 12, 14) << 8)
1008 | bits (inst2, 0, 7));
1009
1010 regs[bits (inst2, 8, 11)]
1011 = pv_add_constant (regs[bits (insn, 0, 3)], - (CORE_ADDR) imm);
1012 }
1013
1014 else if ((insn & 0xfbff) == 0xf04f) /* mov.w Rd, #const */
1015 {
1016 unsigned int imm = ((bits (insn, 10, 10) << 11)
1017 | (bits (inst2, 12, 14) << 8)
1018 | bits (inst2, 0, 7));
1019
1020 regs[bits (inst2, 8, 11)]
1021 = pv_constant (thumb_expand_immediate (imm));
1022 }
1023
1024 else if ((insn & 0xfbf0) == 0xf240) /* movw Rd, #const */
1025 {
1026 unsigned int imm
1027 = EXTRACT_MOVW_MOVT_IMM_T (insn, inst2);
1028
1029 regs[bits (inst2, 8, 11)] = pv_constant (imm);
1030 }
1031
1032 else if (insn == 0xea5f /* mov.w Rd,Rm */
1033 && (inst2 & 0xf0f0) == 0)
1034 {
1035 int dst_reg = (inst2 & 0x0f00) >> 8;
1036 int src_reg = inst2 & 0xf;
1037 regs[dst_reg] = regs[src_reg];
1038 }
1039
1040 else if ((insn & 0xff7f) == 0xf85f) /* ldr.w Rt,<label> */
1041 {
1042 /* Constant pool loads. */
1043 unsigned int constant;
1044 CORE_ADDR loc;
1045
1046 offset = bits (insn, 0, 11);
1047 if (insn & 0x0080)
1048 loc = start + 4 + offset;
1049 else
1050 loc = start + 4 - offset;
1051
1052 constant = read_memory_unsigned_integer (loc, 4, byte_order);
1053 regs[bits (inst2, 12, 15)] = pv_constant (constant);
1054 }
1055
1056 else if ((insn & 0xff7f) == 0xe95f) /* ldrd Rt,Rt2,<label> */
1057 {
1058 /* Constant pool loads. */
1059 unsigned int constant;
1060 CORE_ADDR loc;
1061
1062 offset = bits (insn, 0, 7) << 2;
1063 if (insn & 0x0080)
1064 loc = start + 4 + offset;
1065 else
1066 loc = start + 4 - offset;
1067
1068 constant = read_memory_unsigned_integer (loc, 4, byte_order);
1069 regs[bits (inst2, 12, 15)] = pv_constant (constant);
1070
1071 constant = read_memory_unsigned_integer (loc + 4, 4, byte_order);
1072 regs[bits (inst2, 8, 11)] = pv_constant (constant);
1073 }
1074
1075 else if (thumb2_instruction_changes_pc (insn, inst2))
1076 {
1077 /* Don't scan past anything that might change control flow. */
1078 break;
1079 }
1080 else
1081 {
1082 /* The optimizer might shove anything into the prologue,
1083 so we just skip what we don't recognize. */
1084 unrecognized_pc = start;
1085 }
1086
1087 start += 2;
1088 }
1089 else if (thumb_instruction_changes_pc (insn))
1090 {
1091 /* Don't scan past anything that might change control flow. */
1092 break;
1093 }
1094 else
1095 {
1096 /* The optimizer might shove anything into the prologue,
1097 so we just skip what we don't recognize. */
1098 unrecognized_pc = start;
1099 }
1100
1101 start += 2;
1102 }
1103
1104 if (arm_debug)
1105 fprintf_unfiltered (gdb_stdlog, "Prologue scan stopped at %s\n",
1106 paddress (gdbarch, start));
1107
1108 if (unrecognized_pc == 0)
1109 unrecognized_pc = start;
1110
1111 if (cache == NULL)
1112 {
1113 do_cleanups (back_to);
1114 return unrecognized_pc;
1115 }
1116
1117 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
1118 {
1119 /* Frame pointer is fp. Frame size is constant. */
1120 cache->framereg = ARM_FP_REGNUM;
1121 cache->framesize = -regs[ARM_FP_REGNUM].k;
1122 }
1123 else if (pv_is_register (regs[THUMB_FP_REGNUM], ARM_SP_REGNUM))
1124 {
1125 /* Frame pointer is r7. Frame size is constant. */
1126 cache->framereg = THUMB_FP_REGNUM;
1127 cache->framesize = -regs[THUMB_FP_REGNUM].k;
1128 }
1129 else if (pv_is_register (regs[ARM_SP_REGNUM], ARM_SP_REGNUM))
1130 {
1131 /* Try the stack pointer... this is a bit desperate. */
1132 cache->framereg = ARM_SP_REGNUM;
1133 cache->framesize = -regs[ARM_SP_REGNUM].k;
1134 }
1135 else
1136 {
1137 /* We're just out of luck. We don't know where the frame is. */
1138 cache->framereg = -1;
1139 cache->framesize = 0;
1140 }
1141
1142 for (i = 0; i < 16; i++)
1143 if (pv_area_find_reg (stack, gdbarch, i, &offset))
1144 cache->saved_regs[i].addr = offset;
1145
1146 do_cleanups (back_to);
1147 return unrecognized_pc;
1148 }
1149
1150
1151 /* Try to analyze the instructions starting from PC, which load symbol
1152 __stack_chk_guard. Return the address of instruction after loading this
1153 symbol, set the dest register number to *BASEREG, and set the size of
1154 instructions for loading symbol in OFFSET. Return 0 if instructions are
1155 not recognized. */
1156
1157 static CORE_ADDR
1158 arm_analyze_load_stack_chk_guard(CORE_ADDR pc, struct gdbarch *gdbarch,
1159 unsigned int *destreg, int *offset)
1160 {
1161 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1162 int is_thumb = arm_pc_is_thumb (gdbarch, pc);
1163 unsigned int low, high, address;
1164
1165 address = 0;
1166 if (is_thumb)
1167 {
1168 unsigned short insn1
1169 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
1170
1171 if ((insn1 & 0xf800) == 0x4800) /* ldr Rd, #immed */
1172 {
1173 *destreg = bits (insn1, 8, 10);
1174 *offset = 2;
1175 address = bits (insn1, 0, 7);
1176 }
1177 else if ((insn1 & 0xfbf0) == 0xf240) /* movw Rd, #const */
1178 {
1179 unsigned short insn2
1180 = read_memory_unsigned_integer (pc + 2, 2, byte_order_for_code);
1181
1182 low = EXTRACT_MOVW_MOVT_IMM_T (insn1, insn2);
1183
1184 insn1
1185 = read_memory_unsigned_integer (pc + 4, 2, byte_order_for_code);
1186 insn2
1187 = read_memory_unsigned_integer (pc + 6, 2, byte_order_for_code);
1188
1189 /* movt Rd, #const */
1190 if ((insn1 & 0xfbc0) == 0xf2c0)
1191 {
1192 high = EXTRACT_MOVW_MOVT_IMM_T (insn1, insn2);
1193 *destreg = bits (insn2, 8, 11);
1194 *offset = 8;
1195 address = (high << 16 | low);
1196 }
1197 }
1198 }
1199 else
1200 {
1201 unsigned int insn
1202 = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
1203
1204 if ((insn & 0x0e5f0000) == 0x041f0000) /* ldr Rd, #immed */
1205 {
1206 address = bits (insn, 0, 11);
1207 *destreg = bits (insn, 12, 15);
1208 *offset = 4;
1209 }
1210 else if ((insn & 0x0ff00000) == 0x03000000) /* movw Rd, #const */
1211 {
1212 low = EXTRACT_MOVW_MOVT_IMM_A (insn);
1213
1214 insn
1215 = read_memory_unsigned_integer (pc + 4, 4, byte_order_for_code);
1216
1217 if ((insn & 0x0ff00000) == 0x03400000) /* movt Rd, #const */
1218 {
1219 high = EXTRACT_MOVW_MOVT_IMM_A (insn);
1220 *destreg = bits (insn, 12, 15);
1221 *offset = 8;
1222 address = (high << 16 | low);
1223 }
1224 }
1225 }
1226
1227 return address;
1228 }
1229
1230 /* Try to skip a sequence of instructions used for stack protector. If PC
1231 points to the first instruction of this sequence, return the address of
1232 first instruction after this sequence, otherwise, return original PC.
1233
1234 On arm, this sequence of instructions is composed of mainly three steps,
1235 Step 1: load symbol __stack_chk_guard,
1236 Step 2: load from address of __stack_chk_guard,
1237 Step 3: store it to somewhere else.
1238
1239 Usually, instructions on step 2 and step 3 are the same on various ARM
1240 architectures. On step 2, it is one instruction 'ldr Rx, [Rn, #0]', and
1241 on step 3, it is also one instruction 'str Rx, [r7, #immd]'. However,
1242 instructions in step 1 vary from different ARM architectures. On ARMv7,
1243 they are,
1244
1245 movw Rn, #:lower16:__stack_chk_guard
1246 movt Rn, #:upper16:__stack_chk_guard
1247
1248 On ARMv5t, it is,
1249
1250 ldr Rn, .Label
1251 ....
1252 .Lable:
1253 .word __stack_chk_guard
1254
1255 Since ldr/str is a very popular instruction, we can't use them as
1256 'fingerprint' or 'signature' of stack protector sequence. Here we choose
1257 sequence {movw/movt, ldr}/ldr/str plus symbol __stack_chk_guard, if not
1258 stripped, as the 'fingerprint' of a stack protector cdoe sequence. */
1259
1260 static CORE_ADDR
1261 arm_skip_stack_protector(CORE_ADDR pc, struct gdbarch *gdbarch)
1262 {
1263 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1264 unsigned int address, basereg;
1265 struct minimal_symbol *stack_chk_guard;
1266 int offset;
1267 int is_thumb = arm_pc_is_thumb (gdbarch, pc);
1268 CORE_ADDR addr;
1269
1270 /* Try to parse the instructions in Step 1. */
1271 addr = arm_analyze_load_stack_chk_guard (pc, gdbarch,
1272 &basereg, &offset);
1273 if (!addr)
1274 return pc;
1275
1276 stack_chk_guard = lookup_minimal_symbol_by_pc (addr);
1277 /* If name of symbol doesn't start with '__stack_chk_guard', this
1278 instruction sequence is not for stack protector. If symbol is
1279 removed, we conservatively think this sequence is for stack protector. */
1280 if (stack_chk_guard
1281 && strcmp (SYMBOL_LINKAGE_NAME(stack_chk_guard), "__stack_chk_guard"))
1282 return pc;
1283
1284 if (is_thumb)
1285 {
1286 unsigned int destreg;
1287 unsigned short insn
1288 = read_memory_unsigned_integer (pc + offset, 2, byte_order_for_code);
1289
1290 /* Step 2: ldr Rd, [Rn, #immed], encoding T1. */
1291 if ((insn & 0xf800) != 0x6800)
1292 return pc;
1293 if (bits (insn, 3, 5) != basereg)
1294 return pc;
1295 destreg = bits (insn, 0, 2);
1296
1297 insn = read_memory_unsigned_integer (pc + offset + 2, 2,
1298 byte_order_for_code);
1299 /* Step 3: str Rd, [Rn, #immed], encoding T1. */
1300 if ((insn & 0xf800) != 0x6000)
1301 return pc;
1302 if (destreg != bits (insn, 0, 2))
1303 return pc;
1304 }
1305 else
1306 {
1307 unsigned int destreg;
1308 unsigned int insn
1309 = read_memory_unsigned_integer (pc + offset, 4, byte_order_for_code);
1310
1311 /* Step 2: ldr Rd, [Rn, #immed], encoding A1. */
1312 if ((insn & 0x0e500000) != 0x04100000)
1313 return pc;
1314 if (bits (insn, 16, 19) != basereg)
1315 return pc;
1316 destreg = bits (insn, 12, 15);
1317 /* Step 3: str Rd, [Rn, #immed], encoding A1. */
1318 insn = read_memory_unsigned_integer (pc + offset + 4,
1319 4, byte_order_for_code);
1320 if ((insn & 0x0e500000) != 0x04000000)
1321 return pc;
1322 if (bits (insn, 12, 15) != destreg)
1323 return pc;
1324 }
1325 /* The size of total two instructions ldr/str is 4 on Thumb-2, while 8
1326 on arm. */
1327 if (is_thumb)
1328 return pc + offset + 4;
1329 else
1330 return pc + offset + 8;
1331 }
1332
1333 /* Advance the PC across any function entry prologue instructions to
1334 reach some "real" code.
1335
1336 The APCS (ARM Procedure Call Standard) defines the following
1337 prologue:
1338
1339 mov ip, sp
1340 [stmfd sp!, {a1,a2,a3,a4}]
1341 stmfd sp!, {...,fp,ip,lr,pc}
1342 [stfe f7, [sp, #-12]!]
1343 [stfe f6, [sp, #-12]!]
1344 [stfe f5, [sp, #-12]!]
1345 [stfe f4, [sp, #-12]!]
1346 sub fp, ip, #nn @@ nn == 20 or 4 depending on second insn. */
1347
1348 static CORE_ADDR
1349 arm_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
1350 {
1351 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1352 unsigned long inst;
1353 CORE_ADDR skip_pc;
1354 CORE_ADDR func_addr, limit_pc;
1355 struct symtab_and_line sal;
1356
1357 /* See if we can determine the end of the prologue via the symbol table.
1358 If so, then return either PC, or the PC after the prologue, whichever
1359 is greater. */
1360 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
1361 {
1362 CORE_ADDR post_prologue_pc
1363 = skip_prologue_using_sal (gdbarch, func_addr);
1364 struct symtab *s = find_pc_symtab (func_addr);
1365
1366 if (post_prologue_pc)
1367 post_prologue_pc
1368 = arm_skip_stack_protector (post_prologue_pc, gdbarch);
1369
1370
1371 /* GCC always emits a line note before the prologue and another
1372 one after, even if the two are at the same address or on the
1373 same line. Take advantage of this so that we do not need to
1374 know every instruction that might appear in the prologue. We
1375 will have producer information for most binaries; if it is
1376 missing (e.g. for -gstabs), assuming the GNU tools. */
1377 if (post_prologue_pc
1378 && (s == NULL
1379 || s->producer == NULL
1380 || strncmp (s->producer, "GNU ", sizeof ("GNU ") - 1) == 0))
1381 return post_prologue_pc;
1382
1383 if (post_prologue_pc != 0)
1384 {
1385 CORE_ADDR analyzed_limit;
1386
1387 /* For non-GCC compilers, make sure the entire line is an
1388 acceptable prologue; GDB will round this function's
1389 return value up to the end of the following line so we
1390 can not skip just part of a line (and we do not want to).
1391
1392 RealView does not treat the prologue specially, but does
1393 associate prologue code with the opening brace; so this
1394 lets us skip the first line if we think it is the opening
1395 brace. */
1396 if (arm_pc_is_thumb (gdbarch, func_addr))
1397 analyzed_limit = thumb_analyze_prologue (gdbarch, func_addr,
1398 post_prologue_pc, NULL);
1399 else
1400 analyzed_limit = arm_analyze_prologue (gdbarch, func_addr,
1401 post_prologue_pc, NULL);
1402
1403 if (analyzed_limit != post_prologue_pc)
1404 return func_addr;
1405
1406 return post_prologue_pc;
1407 }
1408 }
1409
1410 /* Can't determine prologue from the symbol table, need to examine
1411 instructions. */
1412
1413 /* Find an upper limit on the function prologue using the debug
1414 information. If the debug information could not be used to provide
1415 that bound, then use an arbitrary large number as the upper bound. */
1416 /* Like arm_scan_prologue, stop no later than pc + 64. */
1417 limit_pc = skip_prologue_using_sal (gdbarch, pc);
1418 if (limit_pc == 0)
1419 limit_pc = pc + 64; /* Magic. */
1420
1421
1422 /* Check if this is Thumb code. */
1423 if (arm_pc_is_thumb (gdbarch, pc))
1424 return thumb_analyze_prologue (gdbarch, pc, limit_pc, NULL);
1425
1426 for (skip_pc = pc; skip_pc < limit_pc; skip_pc += 4)
1427 {
1428 inst = read_memory_unsigned_integer (skip_pc, 4, byte_order_for_code);
1429
1430 /* "mov ip, sp" is no longer a required part of the prologue. */
1431 if (inst == 0xe1a0c00d) /* mov ip, sp */
1432 continue;
1433
1434 if ((inst & 0xfffff000) == 0xe28dc000) /* add ip, sp #n */
1435 continue;
1436
1437 if ((inst & 0xfffff000) == 0xe24dc000) /* sub ip, sp #n */
1438 continue;
1439
1440 /* Some prologues begin with "str lr, [sp, #-4]!". */
1441 if (inst == 0xe52de004) /* str lr, [sp, #-4]! */
1442 continue;
1443
1444 if ((inst & 0xfffffff0) == 0xe92d0000) /* stmfd sp!,{a1,a2,a3,a4} */
1445 continue;
1446
1447 if ((inst & 0xfffff800) == 0xe92dd800) /* stmfd sp!,{fp,ip,lr,pc} */
1448 continue;
1449
1450 /* Any insns after this point may float into the code, if it makes
1451 for better instruction scheduling, so we skip them only if we
1452 find them, but still consider the function to be frame-ful. */
1453
1454 /* We may have either one sfmfd instruction here, or several stfe
1455 insns, depending on the version of floating point code we
1456 support. */
1457 if ((inst & 0xffbf0fff) == 0xec2d0200) /* sfmfd fn, <cnt>, [sp]! */
1458 continue;
1459
1460 if ((inst & 0xffff8fff) == 0xed6d0103) /* stfe fn, [sp, #-12]! */
1461 continue;
1462
1463 if ((inst & 0xfffff000) == 0xe24cb000) /* sub fp, ip, #nn */
1464 continue;
1465
1466 if ((inst & 0xfffff000) == 0xe24dd000) /* sub sp, sp, #nn */
1467 continue;
1468
1469 if ((inst & 0xffffc000) == 0xe54b0000 /* strb r(0123),[r11,#-nn] */
1470 || (inst & 0xffffc0f0) == 0xe14b00b0 /* strh r(0123),[r11,#-nn] */
1471 || (inst & 0xffffc000) == 0xe50b0000) /* str r(0123),[r11,#-nn] */
1472 continue;
1473
1474 if ((inst & 0xffffc000) == 0xe5cd0000 /* strb r(0123),[sp,#nn] */
1475 || (inst & 0xffffc0f0) == 0xe1cd00b0 /* strh r(0123),[sp,#nn] */
1476 || (inst & 0xffffc000) == 0xe58d0000) /* str r(0123),[sp,#nn] */
1477 continue;
1478
1479 /* Un-recognized instruction; stop scanning. */
1480 break;
1481 }
1482
1483 return skip_pc; /* End of prologue. */
1484 }
1485
1486 /* *INDENT-OFF* */
1487 /* Function: thumb_scan_prologue (helper function for arm_scan_prologue)
1488 This function decodes a Thumb function prologue to determine:
1489 1) the size of the stack frame
1490 2) which registers are saved on it
1491 3) the offsets of saved regs
1492 4) the offset from the stack pointer to the frame pointer
1493
1494 A typical Thumb function prologue would create this stack frame
1495 (offsets relative to FP)
1496 old SP -> 24 stack parameters
1497 20 LR
1498 16 R7
1499 R7 -> 0 local variables (16 bytes)
1500 SP -> -12 additional stack space (12 bytes)
1501 The frame size would thus be 36 bytes, and the frame offset would be
1502 12 bytes. The frame register is R7.
1503
1504 The comments for thumb_skip_prolog() describe the algorithm we use
1505 to detect the end of the prolog. */
1506 /* *INDENT-ON* */
1507
1508 static void
1509 thumb_scan_prologue (struct gdbarch *gdbarch, CORE_ADDR prev_pc,
1510 CORE_ADDR block_addr, struct arm_prologue_cache *cache)
1511 {
1512 CORE_ADDR prologue_start;
1513 CORE_ADDR prologue_end;
1514 CORE_ADDR current_pc;
1515
1516 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
1517 &prologue_end))
1518 {
1519 /* See comment in arm_scan_prologue for an explanation of
1520 this heuristics. */
1521 if (prologue_end > prologue_start + 64)
1522 {
1523 prologue_end = prologue_start + 64;
1524 }
1525 }
1526 else
1527 /* We're in the boondocks: we have no idea where the start of the
1528 function is. */
1529 return;
1530
1531 prologue_end = min (prologue_end, prev_pc);
1532
1533 thumb_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
1534 }
1535
1536 /* Return 1 if THIS_INSTR might change control flow, 0 otherwise. */
1537
1538 static int
1539 arm_instruction_changes_pc (uint32_t this_instr)
1540 {
1541 if (bits (this_instr, 28, 31) == INST_NV)
1542 /* Unconditional instructions. */
1543 switch (bits (this_instr, 24, 27))
1544 {
1545 case 0xa:
1546 case 0xb:
1547 /* Branch with Link and change to Thumb. */
1548 return 1;
1549 case 0xc:
1550 case 0xd:
1551 case 0xe:
1552 /* Coprocessor register transfer. */
1553 if (bits (this_instr, 12, 15) == 15)
1554 error (_("Invalid update to pc in instruction"));
1555 return 0;
1556 default:
1557 return 0;
1558 }
1559 else
1560 switch (bits (this_instr, 25, 27))
1561 {
1562 case 0x0:
1563 if (bits (this_instr, 23, 24) == 2 && bit (this_instr, 20) == 0)
1564 {
1565 /* Multiplies and extra load/stores. */
1566 if (bit (this_instr, 4) == 1 && bit (this_instr, 7) == 1)
1567 /* Neither multiplies nor extension load/stores are allowed
1568 to modify PC. */
1569 return 0;
1570
1571 /* Otherwise, miscellaneous instructions. */
1572
1573 /* BX <reg>, BXJ <reg>, BLX <reg> */
1574 if (bits (this_instr, 4, 27) == 0x12fff1
1575 || bits (this_instr, 4, 27) == 0x12fff2
1576 || bits (this_instr, 4, 27) == 0x12fff3)
1577 return 1;
1578
1579 /* Other miscellaneous instructions are unpredictable if they
1580 modify PC. */
1581 return 0;
1582 }
1583 /* Data processing instruction. Fall through. */
1584
1585 case 0x1:
1586 if (bits (this_instr, 12, 15) == 15)
1587 return 1;
1588 else
1589 return 0;
1590
1591 case 0x2:
1592 case 0x3:
1593 /* Media instructions and architecturally undefined instructions. */
1594 if (bits (this_instr, 25, 27) == 3 && bit (this_instr, 4) == 1)
1595 return 0;
1596
1597 /* Stores. */
1598 if (bit (this_instr, 20) == 0)
1599 return 0;
1600
1601 /* Loads. */
1602 if (bits (this_instr, 12, 15) == ARM_PC_REGNUM)
1603 return 1;
1604 else
1605 return 0;
1606
1607 case 0x4:
1608 /* Load/store multiple. */
1609 if (bit (this_instr, 20) == 1 && bit (this_instr, 15) == 1)
1610 return 1;
1611 else
1612 return 0;
1613
1614 case 0x5:
1615 /* Branch and branch with link. */
1616 return 1;
1617
1618 case 0x6:
1619 case 0x7:
1620 /* Coprocessor transfers or SWIs can not affect PC. */
1621 return 0;
1622
1623 default:
1624 internal_error (__FILE__, __LINE__, _("bad value in switch"));
1625 }
1626 }
1627
1628 /* Analyze an ARM mode prologue starting at PROLOGUE_START and
1629 continuing no further than PROLOGUE_END. If CACHE is non-NULL,
1630 fill it in. Return the first address not recognized as a prologue
1631 instruction.
1632
1633 We recognize all the instructions typically found in ARM prologues,
1634 plus harmless instructions which can be skipped (either for analysis
1635 purposes, or a more restrictive set that can be skipped when finding
1636 the end of the prologue). */
1637
1638 static CORE_ADDR
1639 arm_analyze_prologue (struct gdbarch *gdbarch,
1640 CORE_ADDR prologue_start, CORE_ADDR prologue_end,
1641 struct arm_prologue_cache *cache)
1642 {
1643 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1644 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1645 int regno;
1646 CORE_ADDR offset, current_pc;
1647 pv_t regs[ARM_FPS_REGNUM];
1648 struct pv_area *stack;
1649 struct cleanup *back_to;
1650 int framereg, framesize;
1651 CORE_ADDR unrecognized_pc = 0;
1652
1653 /* Search the prologue looking for instructions that set up the
1654 frame pointer, adjust the stack pointer, and save registers.
1655
1656 Be careful, however, and if it doesn't look like a prologue,
1657 don't try to scan it. If, for instance, a frameless function
1658 begins with stmfd sp!, then we will tell ourselves there is
1659 a frame, which will confuse stack traceback, as well as "finish"
1660 and other operations that rely on a knowledge of the stack
1661 traceback. */
1662
1663 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
1664 regs[regno] = pv_register (regno, 0);
1665 stack = make_pv_area (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
1666 back_to = make_cleanup_free_pv_area (stack);
1667
1668 for (current_pc = prologue_start;
1669 current_pc < prologue_end;
1670 current_pc += 4)
1671 {
1672 unsigned int insn
1673 = read_memory_unsigned_integer (current_pc, 4, byte_order_for_code);
1674
1675 if (insn == 0xe1a0c00d) /* mov ip, sp */
1676 {
1677 regs[ARM_IP_REGNUM] = regs[ARM_SP_REGNUM];
1678 continue;
1679 }
1680 else if ((insn & 0xfff00000) == 0xe2800000 /* add Rd, Rn, #n */
1681 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1682 {
1683 unsigned imm = insn & 0xff; /* immediate value */
1684 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1685 int rd = bits (insn, 12, 15);
1686 imm = (imm >> rot) | (imm << (32 - rot));
1687 regs[rd] = pv_add_constant (regs[bits (insn, 16, 19)], imm);
1688 continue;
1689 }
1690 else if ((insn & 0xfff00000) == 0xe2400000 /* sub Rd, Rn, #n */
1691 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1692 {
1693 unsigned imm = insn & 0xff; /* immediate value */
1694 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1695 int rd = bits (insn, 12, 15);
1696 imm = (imm >> rot) | (imm << (32 - rot));
1697 regs[rd] = pv_add_constant (regs[bits (insn, 16, 19)], -imm);
1698 continue;
1699 }
1700 else if ((insn & 0xffff0fff) == 0xe52d0004) /* str Rd,
1701 [sp, #-4]! */
1702 {
1703 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1704 break;
1705 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -4);
1706 pv_area_store (stack, regs[ARM_SP_REGNUM], 4,
1707 regs[bits (insn, 12, 15)]);
1708 continue;
1709 }
1710 else if ((insn & 0xffff0000) == 0xe92d0000)
1711 /* stmfd sp!, {..., fp, ip, lr, pc}
1712 or
1713 stmfd sp!, {a1, a2, a3, a4} */
1714 {
1715 int mask = insn & 0xffff;
1716
1717 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1718 break;
1719
1720 /* Calculate offsets of saved registers. */
1721 for (regno = ARM_PC_REGNUM; regno >= 0; regno--)
1722 if (mask & (1 << regno))
1723 {
1724 regs[ARM_SP_REGNUM]
1725 = pv_add_constant (regs[ARM_SP_REGNUM], -4);
1726 pv_area_store (stack, regs[ARM_SP_REGNUM], 4, regs[regno]);
1727 }
1728 }
1729 else if ((insn & 0xffff0000) == 0xe54b0000 /* strb rx,[r11,#-n] */
1730 || (insn & 0xffff00f0) == 0xe14b00b0 /* strh rx,[r11,#-n] */
1731 || (insn & 0xffffc000) == 0xe50b0000) /* str rx,[r11,#-n] */
1732 {
1733 /* No need to add this to saved_regs -- it's just an arg reg. */
1734 continue;
1735 }
1736 else if ((insn & 0xffff0000) == 0xe5cd0000 /* strb rx,[sp,#n] */
1737 || (insn & 0xffff00f0) == 0xe1cd00b0 /* strh rx,[sp,#n] */
1738 || (insn & 0xffffc000) == 0xe58d0000) /* str rx,[sp,#n] */
1739 {
1740 /* No need to add this to saved_regs -- it's just an arg reg. */
1741 continue;
1742 }
1743 else if ((insn & 0xfff00000) == 0xe8800000 /* stm Rn,
1744 { registers } */
1745 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1746 {
1747 /* No need to add this to saved_regs -- it's just arg regs. */
1748 continue;
1749 }
1750 else if ((insn & 0xfffff000) == 0xe24cb000) /* sub fp, ip #n */
1751 {
1752 unsigned imm = insn & 0xff; /* immediate value */
1753 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1754 imm = (imm >> rot) | (imm << (32 - rot));
1755 regs[ARM_FP_REGNUM] = pv_add_constant (regs[ARM_IP_REGNUM], -imm);
1756 }
1757 else if ((insn & 0xfffff000) == 0xe24dd000) /* sub sp, sp #n */
1758 {
1759 unsigned imm = insn & 0xff; /* immediate value */
1760 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1761 imm = (imm >> rot) | (imm << (32 - rot));
1762 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -imm);
1763 }
1764 else if ((insn & 0xffff7fff) == 0xed6d0103 /* stfe f?,
1765 [sp, -#c]! */
1766 && gdbarch_tdep (gdbarch)->have_fpa_registers)
1767 {
1768 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1769 break;
1770
1771 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
1772 regno = ARM_F0_REGNUM + ((insn >> 12) & 0x07);
1773 pv_area_store (stack, regs[ARM_SP_REGNUM], 12, regs[regno]);
1774 }
1775 else if ((insn & 0xffbf0fff) == 0xec2d0200 /* sfmfd f0, 4,
1776 [sp!] */
1777 && gdbarch_tdep (gdbarch)->have_fpa_registers)
1778 {
1779 int n_saved_fp_regs;
1780 unsigned int fp_start_reg, fp_bound_reg;
1781
1782 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1783 break;
1784
1785 if ((insn & 0x800) == 0x800) /* N0 is set */
1786 {
1787 if ((insn & 0x40000) == 0x40000) /* N1 is set */
1788 n_saved_fp_regs = 3;
1789 else
1790 n_saved_fp_regs = 1;
1791 }
1792 else
1793 {
1794 if ((insn & 0x40000) == 0x40000) /* N1 is set */
1795 n_saved_fp_regs = 2;
1796 else
1797 n_saved_fp_regs = 4;
1798 }
1799
1800 fp_start_reg = ARM_F0_REGNUM + ((insn >> 12) & 0x7);
1801 fp_bound_reg = fp_start_reg + n_saved_fp_regs;
1802 for (; fp_start_reg < fp_bound_reg; fp_start_reg++)
1803 {
1804 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
1805 pv_area_store (stack, regs[ARM_SP_REGNUM], 12,
1806 regs[fp_start_reg++]);
1807 }
1808 }
1809 else if ((insn & 0xff000000) == 0xeb000000 && cache == NULL) /* bl */
1810 {
1811 /* Allow some special function calls when skipping the
1812 prologue; GCC generates these before storing arguments to
1813 the stack. */
1814 CORE_ADDR dest = BranchDest (current_pc, insn);
1815
1816 if (skip_prologue_function (dest))
1817 continue;
1818 else
1819 break;
1820 }
1821 else if ((insn & 0xf0000000) != 0xe0000000)
1822 break; /* Condition not true, exit early. */
1823 else if (arm_instruction_changes_pc (insn))
1824 /* Don't scan past anything that might change control flow. */
1825 break;
1826 else if ((insn & 0xfe500000) == 0xe8100000) /* ldm */
1827 {
1828 /* Ignore block loads from the stack, potentially copying
1829 parameters from memory. */
1830 if (pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1831 continue;
1832 else
1833 break;
1834 }
1835 else if ((insn & 0xfc500000) == 0xe4100000)
1836 {
1837 /* Similarly ignore single loads from the stack. */
1838 if (pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1839 continue;
1840 else
1841 break;
1842 }
1843 else if ((insn & 0xffff0ff0) == 0xe1a00000)
1844 /* MOV Rd, Rm. Skip register copies, i.e. saves to another
1845 register instead of the stack. */
1846 continue;
1847 else
1848 {
1849 /* The optimizer might shove anything into the prologue,
1850 so we just skip what we don't recognize. */
1851 unrecognized_pc = current_pc;
1852 continue;
1853 }
1854 }
1855
1856 if (unrecognized_pc == 0)
1857 unrecognized_pc = current_pc;
1858
1859 /* The frame size is just the distance from the frame register
1860 to the original stack pointer. */
1861 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
1862 {
1863 /* Frame pointer is fp. */
1864 framereg = ARM_FP_REGNUM;
1865 framesize = -regs[ARM_FP_REGNUM].k;
1866 }
1867 else if (pv_is_register (regs[ARM_SP_REGNUM], ARM_SP_REGNUM))
1868 {
1869 /* Try the stack pointer... this is a bit desperate. */
1870 framereg = ARM_SP_REGNUM;
1871 framesize = -regs[ARM_SP_REGNUM].k;
1872 }
1873 else
1874 {
1875 /* We're just out of luck. We don't know where the frame is. */
1876 framereg = -1;
1877 framesize = 0;
1878 }
1879
1880 if (cache)
1881 {
1882 cache->framereg = framereg;
1883 cache->framesize = framesize;
1884
1885 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
1886 if (pv_area_find_reg (stack, gdbarch, regno, &offset))
1887 cache->saved_regs[regno].addr = offset;
1888 }
1889
1890 if (arm_debug)
1891 fprintf_unfiltered (gdb_stdlog, "Prologue scan stopped at %s\n",
1892 paddress (gdbarch, unrecognized_pc));
1893
1894 do_cleanups (back_to);
1895 return unrecognized_pc;
1896 }
1897
1898 static void
1899 arm_scan_prologue (struct frame_info *this_frame,
1900 struct arm_prologue_cache *cache)
1901 {
1902 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1903 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1904 int regno;
1905 CORE_ADDR prologue_start, prologue_end, current_pc;
1906 CORE_ADDR prev_pc = get_frame_pc (this_frame);
1907 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
1908 pv_t regs[ARM_FPS_REGNUM];
1909 struct pv_area *stack;
1910 struct cleanup *back_to;
1911 CORE_ADDR offset;
1912
1913 /* Assume there is no frame until proven otherwise. */
1914 cache->framereg = ARM_SP_REGNUM;
1915 cache->framesize = 0;
1916
1917 /* Check for Thumb prologue. */
1918 if (arm_frame_is_thumb (this_frame))
1919 {
1920 thumb_scan_prologue (gdbarch, prev_pc, block_addr, cache);
1921 return;
1922 }
1923
1924 /* Find the function prologue. If we can't find the function in
1925 the symbol table, peek in the stack frame to find the PC. */
1926 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
1927 &prologue_end))
1928 {
1929 /* One way to find the end of the prologue (which works well
1930 for unoptimized code) is to do the following:
1931
1932 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
1933
1934 if (sal.line == 0)
1935 prologue_end = prev_pc;
1936 else if (sal.end < prologue_end)
1937 prologue_end = sal.end;
1938
1939 This mechanism is very accurate so long as the optimizer
1940 doesn't move any instructions from the function body into the
1941 prologue. If this happens, sal.end will be the last
1942 instruction in the first hunk of prologue code just before
1943 the first instruction that the scheduler has moved from
1944 the body to the prologue.
1945
1946 In order to make sure that we scan all of the prologue
1947 instructions, we use a slightly less accurate mechanism which
1948 may scan more than necessary. To help compensate for this
1949 lack of accuracy, the prologue scanning loop below contains
1950 several clauses which'll cause the loop to terminate early if
1951 an implausible prologue instruction is encountered.
1952
1953 The expression
1954
1955 prologue_start + 64
1956
1957 is a suitable endpoint since it accounts for the largest
1958 possible prologue plus up to five instructions inserted by
1959 the scheduler. */
1960
1961 if (prologue_end > prologue_start + 64)
1962 {
1963 prologue_end = prologue_start + 64; /* See above. */
1964 }
1965 }
1966 else
1967 {
1968 /* We have no symbol information. Our only option is to assume this
1969 function has a standard stack frame and the normal frame register.
1970 Then, we can find the value of our frame pointer on entrance to
1971 the callee (or at the present moment if this is the innermost frame).
1972 The value stored there should be the address of the stmfd + 8. */
1973 CORE_ADDR frame_loc;
1974 LONGEST return_value;
1975
1976 frame_loc = get_frame_register_unsigned (this_frame, ARM_FP_REGNUM);
1977 if (!safe_read_memory_integer (frame_loc, 4, byte_order, &return_value))
1978 return;
1979 else
1980 {
1981 prologue_start = gdbarch_addr_bits_remove
1982 (gdbarch, return_value) - 8;
1983 prologue_end = prologue_start + 64; /* See above. */
1984 }
1985 }
1986
1987 if (prev_pc < prologue_end)
1988 prologue_end = prev_pc;
1989
1990 arm_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
1991 }
1992
1993 static struct arm_prologue_cache *
1994 arm_make_prologue_cache (struct frame_info *this_frame)
1995 {
1996 int reg;
1997 struct arm_prologue_cache *cache;
1998 CORE_ADDR unwound_fp;
1999
2000 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2001 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2002
2003 arm_scan_prologue (this_frame, cache);
2004
2005 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
2006 if (unwound_fp == 0)
2007 return cache;
2008
2009 cache->prev_sp = unwound_fp + cache->framesize;
2010
2011 /* Calculate actual addresses of saved registers using offsets
2012 determined by arm_scan_prologue. */
2013 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
2014 if (trad_frame_addr_p (cache->saved_regs, reg))
2015 cache->saved_regs[reg].addr += cache->prev_sp;
2016
2017 return cache;
2018 }
2019
2020 /* Our frame ID for a normal frame is the current function's starting PC
2021 and the caller's SP when we were called. */
2022
2023 static void
2024 arm_prologue_this_id (struct frame_info *this_frame,
2025 void **this_cache,
2026 struct frame_id *this_id)
2027 {
2028 struct arm_prologue_cache *cache;
2029 struct frame_id id;
2030 CORE_ADDR pc, func;
2031
2032 if (*this_cache == NULL)
2033 *this_cache = arm_make_prologue_cache (this_frame);
2034 cache = *this_cache;
2035
2036 /* This is meant to halt the backtrace at "_start". */
2037 pc = get_frame_pc (this_frame);
2038 if (pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
2039 return;
2040
2041 /* If we've hit a wall, stop. */
2042 if (cache->prev_sp == 0)
2043 return;
2044
2045 func = get_frame_func (this_frame);
2046 id = frame_id_build (cache->prev_sp, func);
2047 *this_id = id;
2048 }
2049
2050 static struct value *
2051 arm_prologue_prev_register (struct frame_info *this_frame,
2052 void **this_cache,
2053 int prev_regnum)
2054 {
2055 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2056 struct arm_prologue_cache *cache;
2057
2058 if (*this_cache == NULL)
2059 *this_cache = arm_make_prologue_cache (this_frame);
2060 cache = *this_cache;
2061
2062 /* If we are asked to unwind the PC, then we need to return the LR
2063 instead. The prologue may save PC, but it will point into this
2064 frame's prologue, not the next frame's resume location. Also
2065 strip the saved T bit. A valid LR may have the low bit set, but
2066 a valid PC never does. */
2067 if (prev_regnum == ARM_PC_REGNUM)
2068 {
2069 CORE_ADDR lr;
2070
2071 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
2072 return frame_unwind_got_constant (this_frame, prev_regnum,
2073 arm_addr_bits_remove (gdbarch, lr));
2074 }
2075
2076 /* SP is generally not saved to the stack, but this frame is
2077 identified by the next frame's stack pointer at the time of the call.
2078 The value was already reconstructed into PREV_SP. */
2079 if (prev_regnum == ARM_SP_REGNUM)
2080 return frame_unwind_got_constant (this_frame, prev_regnum, cache->prev_sp);
2081
2082 /* The CPSR may have been changed by the call instruction and by the
2083 called function. The only bit we can reconstruct is the T bit,
2084 by checking the low bit of LR as of the call. This is a reliable
2085 indicator of Thumb-ness except for some ARM v4T pre-interworking
2086 Thumb code, which could get away with a clear low bit as long as
2087 the called function did not use bx. Guess that all other
2088 bits are unchanged; the condition flags are presumably lost,
2089 but the processor status is likely valid. */
2090 if (prev_regnum == ARM_PS_REGNUM)
2091 {
2092 CORE_ADDR lr, cpsr;
2093 ULONGEST t_bit = arm_psr_thumb_bit (gdbarch);
2094
2095 cpsr = get_frame_register_unsigned (this_frame, prev_regnum);
2096 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
2097 if (IS_THUMB_ADDR (lr))
2098 cpsr |= t_bit;
2099 else
2100 cpsr &= ~t_bit;
2101 return frame_unwind_got_constant (this_frame, prev_regnum, cpsr);
2102 }
2103
2104 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
2105 prev_regnum);
2106 }
2107
2108 struct frame_unwind arm_prologue_unwind = {
2109 NORMAL_FRAME,
2110 arm_prologue_this_id,
2111 arm_prologue_prev_register,
2112 NULL,
2113 default_frame_sniffer
2114 };
2115
2116 static struct arm_prologue_cache *
2117 arm_make_stub_cache (struct frame_info *this_frame)
2118 {
2119 struct arm_prologue_cache *cache;
2120
2121 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2122 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2123
2124 cache->prev_sp = get_frame_register_unsigned (this_frame, ARM_SP_REGNUM);
2125
2126 return cache;
2127 }
2128
2129 /* Our frame ID for a stub frame is the current SP and LR. */
2130
2131 static void
2132 arm_stub_this_id (struct frame_info *this_frame,
2133 void **this_cache,
2134 struct frame_id *this_id)
2135 {
2136 struct arm_prologue_cache *cache;
2137
2138 if (*this_cache == NULL)
2139 *this_cache = arm_make_stub_cache (this_frame);
2140 cache = *this_cache;
2141
2142 *this_id = frame_id_build (cache->prev_sp, get_frame_pc (this_frame));
2143 }
2144
2145 static int
2146 arm_stub_unwind_sniffer (const struct frame_unwind *self,
2147 struct frame_info *this_frame,
2148 void **this_prologue_cache)
2149 {
2150 CORE_ADDR addr_in_block;
2151 char dummy[4];
2152
2153 addr_in_block = get_frame_address_in_block (this_frame);
2154 if (in_plt_section (addr_in_block, NULL)
2155 /* We also use the stub winder if the target memory is unreadable
2156 to avoid having the prologue unwinder trying to read it. */
2157 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
2158 return 1;
2159
2160 return 0;
2161 }
2162
2163 struct frame_unwind arm_stub_unwind = {
2164 NORMAL_FRAME,
2165 arm_stub_this_id,
2166 arm_prologue_prev_register,
2167 NULL,
2168 arm_stub_unwind_sniffer
2169 };
2170
2171 static CORE_ADDR
2172 arm_normal_frame_base (struct frame_info *this_frame, void **this_cache)
2173 {
2174 struct arm_prologue_cache *cache;
2175
2176 if (*this_cache == NULL)
2177 *this_cache = arm_make_prologue_cache (this_frame);
2178 cache = *this_cache;
2179
2180 return cache->prev_sp - cache->framesize;
2181 }
2182
2183 struct frame_base arm_normal_base = {
2184 &arm_prologue_unwind,
2185 arm_normal_frame_base,
2186 arm_normal_frame_base,
2187 arm_normal_frame_base
2188 };
2189
2190 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
2191 dummy frame. The frame ID's base needs to match the TOS value
2192 saved by save_dummy_frame_tos() and returned from
2193 arm_push_dummy_call, and the PC needs to match the dummy frame's
2194 breakpoint. */
2195
2196 static struct frame_id
2197 arm_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
2198 {
2199 return frame_id_build (get_frame_register_unsigned (this_frame,
2200 ARM_SP_REGNUM),
2201 get_frame_pc (this_frame));
2202 }
2203
2204 /* Given THIS_FRAME, find the previous frame's resume PC (which will
2205 be used to construct the previous frame's ID, after looking up the
2206 containing function). */
2207
2208 static CORE_ADDR
2209 arm_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
2210 {
2211 CORE_ADDR pc;
2212 pc = frame_unwind_register_unsigned (this_frame, ARM_PC_REGNUM);
2213 return arm_addr_bits_remove (gdbarch, pc);
2214 }
2215
2216 static CORE_ADDR
2217 arm_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
2218 {
2219 return frame_unwind_register_unsigned (this_frame, ARM_SP_REGNUM);
2220 }
2221
2222 static struct value *
2223 arm_dwarf2_prev_register (struct frame_info *this_frame, void **this_cache,
2224 int regnum)
2225 {
2226 struct gdbarch * gdbarch = get_frame_arch (this_frame);
2227 CORE_ADDR lr, cpsr;
2228 ULONGEST t_bit = arm_psr_thumb_bit (gdbarch);
2229
2230 switch (regnum)
2231 {
2232 case ARM_PC_REGNUM:
2233 /* The PC is normally copied from the return column, which
2234 describes saves of LR. However, that version may have an
2235 extra bit set to indicate Thumb state. The bit is not
2236 part of the PC. */
2237 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
2238 return frame_unwind_got_constant (this_frame, regnum,
2239 arm_addr_bits_remove (gdbarch, lr));
2240
2241 case ARM_PS_REGNUM:
2242 /* Reconstruct the T bit; see arm_prologue_prev_register for details. */
2243 cpsr = get_frame_register_unsigned (this_frame, regnum);
2244 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
2245 if (IS_THUMB_ADDR (lr))
2246 cpsr |= t_bit;
2247 else
2248 cpsr &= ~t_bit;
2249 return frame_unwind_got_constant (this_frame, regnum, cpsr);
2250
2251 default:
2252 internal_error (__FILE__, __LINE__,
2253 _("Unexpected register %d"), regnum);
2254 }
2255 }
2256
2257 static void
2258 arm_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
2259 struct dwarf2_frame_state_reg *reg,
2260 struct frame_info *this_frame)
2261 {
2262 switch (regnum)
2263 {
2264 case ARM_PC_REGNUM:
2265 case ARM_PS_REGNUM:
2266 reg->how = DWARF2_FRAME_REG_FN;
2267 reg->loc.fn = arm_dwarf2_prev_register;
2268 break;
2269 case ARM_SP_REGNUM:
2270 reg->how = DWARF2_FRAME_REG_CFA;
2271 break;
2272 }
2273 }
2274
2275 /* Return true if we are in the function's epilogue, i.e. after the
2276 instruction that destroyed the function's stack frame. */
2277
2278 static int
2279 thumb_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
2280 {
2281 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2282 unsigned int insn, insn2;
2283 int found_return = 0, found_stack_adjust = 0;
2284 CORE_ADDR func_start, func_end;
2285 CORE_ADDR scan_pc;
2286 gdb_byte buf[4];
2287
2288 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
2289 return 0;
2290
2291 /* The epilogue is a sequence of instructions along the following lines:
2292
2293 - add stack frame size to SP or FP
2294 - [if frame pointer used] restore SP from FP
2295 - restore registers from SP [may include PC]
2296 - a return-type instruction [if PC wasn't already restored]
2297
2298 In a first pass, we scan forward from the current PC and verify the
2299 instructions we find as compatible with this sequence, ending in a
2300 return instruction.
2301
2302 However, this is not sufficient to distinguish indirect function calls
2303 within a function from indirect tail calls in the epilogue in some cases.
2304 Therefore, if we didn't already find any SP-changing instruction during
2305 forward scan, we add a backward scanning heuristic to ensure we actually
2306 are in the epilogue. */
2307
2308 scan_pc = pc;
2309 while (scan_pc < func_end && !found_return)
2310 {
2311 if (target_read_memory (scan_pc, buf, 2))
2312 break;
2313
2314 scan_pc += 2;
2315 insn = extract_unsigned_integer (buf, 2, byte_order_for_code);
2316
2317 if ((insn & 0xff80) == 0x4700) /* bx <Rm> */
2318 found_return = 1;
2319 else if (insn == 0x46f7) /* mov pc, lr */
2320 found_return = 1;
2321 else if (insn == 0x46bd) /* mov sp, r7 */
2322 found_stack_adjust = 1;
2323 else if ((insn & 0xff00) == 0xb000) /* add sp, imm or sub sp, imm */
2324 found_stack_adjust = 1;
2325 else if ((insn & 0xfe00) == 0xbc00) /* pop <registers> */
2326 {
2327 found_stack_adjust = 1;
2328 if (insn & 0x0100) /* <registers> include PC. */
2329 found_return = 1;
2330 }
2331 else if ((insn & 0xe000) == 0xe000) /* 32-bit Thumb-2 instruction */
2332 {
2333 if (target_read_memory (scan_pc, buf, 2))
2334 break;
2335
2336 scan_pc += 2;
2337 insn2 = extract_unsigned_integer (buf, 2, byte_order_for_code);
2338
2339 if (insn == 0xe8bd) /* ldm.w sp!, <registers> */
2340 {
2341 found_stack_adjust = 1;
2342 if (insn2 & 0x8000) /* <registers> include PC. */
2343 found_return = 1;
2344 }
2345 else if (insn == 0xf85d /* ldr.w <Rt>, [sp], #4 */
2346 && (insn2 & 0x0fff) == 0x0b04)
2347 {
2348 found_stack_adjust = 1;
2349 if ((insn2 & 0xf000) == 0xf000) /* <Rt> is PC. */
2350 found_return = 1;
2351 }
2352 else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, <list> */
2353 && (insn2 & 0x0e00) == 0x0a00)
2354 found_stack_adjust = 1;
2355 else
2356 break;
2357 }
2358 else
2359 break;
2360 }
2361
2362 if (!found_return)
2363 return 0;
2364
2365 /* Since any instruction in the epilogue sequence, with the possible
2366 exception of return itself, updates the stack pointer, we need to
2367 scan backwards for at most one instruction. Try either a 16-bit or
2368 a 32-bit instruction. This is just a heuristic, so we do not worry
2369 too much about false positives. */
2370
2371 if (!found_stack_adjust)
2372 {
2373 if (pc - 4 < func_start)
2374 return 0;
2375 if (target_read_memory (pc - 4, buf, 4))
2376 return 0;
2377
2378 insn = extract_unsigned_integer (buf, 2, byte_order_for_code);
2379 insn2 = extract_unsigned_integer (buf + 2, 2, byte_order_for_code);
2380
2381 if (insn2 == 0x46bd) /* mov sp, r7 */
2382 found_stack_adjust = 1;
2383 else if ((insn2 & 0xff00) == 0xb000) /* add sp, imm or sub sp, imm */
2384 found_stack_adjust = 1;
2385 else if ((insn2 & 0xff00) == 0xbc00) /* pop <registers> without PC */
2386 found_stack_adjust = 1;
2387 else if (insn == 0xe8bd) /* ldm.w sp!, <registers> */
2388 found_stack_adjust = 1;
2389 else if (insn == 0xf85d /* ldr.w <Rt>, [sp], #4 */
2390 && (insn2 & 0x0fff) == 0x0b04)
2391 found_stack_adjust = 1;
2392 else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, <list> */
2393 && (insn2 & 0x0e00) == 0x0a00)
2394 found_stack_adjust = 1;
2395 }
2396
2397 return found_stack_adjust;
2398 }
2399
2400 /* Return true if we are in the function's epilogue, i.e. after the
2401 instruction that destroyed the function's stack frame. */
2402
2403 static int
2404 arm_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
2405 {
2406 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2407 unsigned int insn;
2408 int found_return, found_stack_adjust;
2409 CORE_ADDR func_start, func_end;
2410
2411 if (arm_pc_is_thumb (gdbarch, pc))
2412 return thumb_in_function_epilogue_p (gdbarch, pc);
2413
2414 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
2415 return 0;
2416
2417 /* We are in the epilogue if the previous instruction was a stack
2418 adjustment and the next instruction is a possible return (bx, mov
2419 pc, or pop). We could have to scan backwards to find the stack
2420 adjustment, or forwards to find the return, but this is a decent
2421 approximation. First scan forwards. */
2422
2423 found_return = 0;
2424 insn = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
2425 if (bits (insn, 28, 31) != INST_NV)
2426 {
2427 if ((insn & 0x0ffffff0) == 0x012fff10)
2428 /* BX. */
2429 found_return = 1;
2430 else if ((insn & 0x0ffffff0) == 0x01a0f000)
2431 /* MOV PC. */
2432 found_return = 1;
2433 else if ((insn & 0x0fff0000) == 0x08bd0000
2434 && (insn & 0x0000c000) != 0)
2435 /* POP (LDMIA), including PC or LR. */
2436 found_return = 1;
2437 }
2438
2439 if (!found_return)
2440 return 0;
2441
2442 /* Scan backwards. This is just a heuristic, so do not worry about
2443 false positives from mode changes. */
2444
2445 if (pc < func_start + 4)
2446 return 0;
2447
2448 found_stack_adjust = 0;
2449 insn = read_memory_unsigned_integer (pc - 4, 4, byte_order_for_code);
2450 if (bits (insn, 28, 31) != INST_NV)
2451 {
2452 if ((insn & 0x0df0f000) == 0x0080d000)
2453 /* ADD SP (register or immediate). */
2454 found_stack_adjust = 1;
2455 else if ((insn & 0x0df0f000) == 0x0040d000)
2456 /* SUB SP (register or immediate). */
2457 found_stack_adjust = 1;
2458 else if ((insn & 0x0ffffff0) == 0x01a0d000)
2459 /* MOV SP. */
2460 found_stack_adjust = 1;
2461 else if ((insn & 0x0fff0000) == 0x08bd0000)
2462 /* POP (LDMIA). */
2463 found_stack_adjust = 1;
2464 }
2465
2466 if (found_stack_adjust)
2467 return 1;
2468
2469 return 0;
2470 }
2471
2472
2473 /* When arguments must be pushed onto the stack, they go on in reverse
2474 order. The code below implements a FILO (stack) to do this. */
2475
2476 struct stack_item
2477 {
2478 int len;
2479 struct stack_item *prev;
2480 void *data;
2481 };
2482
2483 static struct stack_item *
2484 push_stack_item (struct stack_item *prev, const void *contents, int len)
2485 {
2486 struct stack_item *si;
2487 si = xmalloc (sizeof (struct stack_item));
2488 si->data = xmalloc (len);
2489 si->len = len;
2490 si->prev = prev;
2491 memcpy (si->data, contents, len);
2492 return si;
2493 }
2494
2495 static struct stack_item *
2496 pop_stack_item (struct stack_item *si)
2497 {
2498 struct stack_item *dead = si;
2499 si = si->prev;
2500 xfree (dead->data);
2501 xfree (dead);
2502 return si;
2503 }
2504
2505
2506 /* Return the alignment (in bytes) of the given type. */
2507
2508 static int
2509 arm_type_align (struct type *t)
2510 {
2511 int n;
2512 int align;
2513 int falign;
2514
2515 t = check_typedef (t);
2516 switch (TYPE_CODE (t))
2517 {
2518 default:
2519 /* Should never happen. */
2520 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
2521 return 4;
2522
2523 case TYPE_CODE_PTR:
2524 case TYPE_CODE_ENUM:
2525 case TYPE_CODE_INT:
2526 case TYPE_CODE_FLT:
2527 case TYPE_CODE_SET:
2528 case TYPE_CODE_RANGE:
2529 case TYPE_CODE_BITSTRING:
2530 case TYPE_CODE_REF:
2531 case TYPE_CODE_CHAR:
2532 case TYPE_CODE_BOOL:
2533 return TYPE_LENGTH (t);
2534
2535 case TYPE_CODE_ARRAY:
2536 case TYPE_CODE_COMPLEX:
2537 /* TODO: What about vector types? */
2538 return arm_type_align (TYPE_TARGET_TYPE (t));
2539
2540 case TYPE_CODE_STRUCT:
2541 case TYPE_CODE_UNION:
2542 align = 1;
2543 for (n = 0; n < TYPE_NFIELDS (t); n++)
2544 {
2545 falign = arm_type_align (TYPE_FIELD_TYPE (t, n));
2546 if (falign > align)
2547 align = falign;
2548 }
2549 return align;
2550 }
2551 }
2552
2553 /* Possible base types for a candidate for passing and returning in
2554 VFP registers. */
2555
2556 enum arm_vfp_cprc_base_type
2557 {
2558 VFP_CPRC_UNKNOWN,
2559 VFP_CPRC_SINGLE,
2560 VFP_CPRC_DOUBLE,
2561 VFP_CPRC_VEC64,
2562 VFP_CPRC_VEC128
2563 };
2564
2565 /* The length of one element of base type B. */
2566
2567 static unsigned
2568 arm_vfp_cprc_unit_length (enum arm_vfp_cprc_base_type b)
2569 {
2570 switch (b)
2571 {
2572 case VFP_CPRC_SINGLE:
2573 return 4;
2574 case VFP_CPRC_DOUBLE:
2575 return 8;
2576 case VFP_CPRC_VEC64:
2577 return 8;
2578 case VFP_CPRC_VEC128:
2579 return 16;
2580 default:
2581 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
2582 (int) b);
2583 }
2584 }
2585
2586 /* The character ('s', 'd' or 'q') for the type of VFP register used
2587 for passing base type B. */
2588
2589 static int
2590 arm_vfp_cprc_reg_char (enum arm_vfp_cprc_base_type b)
2591 {
2592 switch (b)
2593 {
2594 case VFP_CPRC_SINGLE:
2595 return 's';
2596 case VFP_CPRC_DOUBLE:
2597 return 'd';
2598 case VFP_CPRC_VEC64:
2599 return 'd';
2600 case VFP_CPRC_VEC128:
2601 return 'q';
2602 default:
2603 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
2604 (int) b);
2605 }
2606 }
2607
2608 /* Determine whether T may be part of a candidate for passing and
2609 returning in VFP registers, ignoring the limit on the total number
2610 of components. If *BASE_TYPE is VFP_CPRC_UNKNOWN, set it to the
2611 classification of the first valid component found; if it is not
2612 VFP_CPRC_UNKNOWN, all components must have the same classification
2613 as *BASE_TYPE. If it is found that T contains a type not permitted
2614 for passing and returning in VFP registers, a type differently
2615 classified from *BASE_TYPE, or two types differently classified
2616 from each other, return -1, otherwise return the total number of
2617 base-type elements found (possibly 0 in an empty structure or
2618 array). Vectors and complex types are not currently supported,
2619 matching the generic AAPCS support. */
2620
2621 static int
2622 arm_vfp_cprc_sub_candidate (struct type *t,
2623 enum arm_vfp_cprc_base_type *base_type)
2624 {
2625 t = check_typedef (t);
2626 switch (TYPE_CODE (t))
2627 {
2628 case TYPE_CODE_FLT:
2629 switch (TYPE_LENGTH (t))
2630 {
2631 case 4:
2632 if (*base_type == VFP_CPRC_UNKNOWN)
2633 *base_type = VFP_CPRC_SINGLE;
2634 else if (*base_type != VFP_CPRC_SINGLE)
2635 return -1;
2636 return 1;
2637
2638 case 8:
2639 if (*base_type == VFP_CPRC_UNKNOWN)
2640 *base_type = VFP_CPRC_DOUBLE;
2641 else if (*base_type != VFP_CPRC_DOUBLE)
2642 return -1;
2643 return 1;
2644
2645 default:
2646 return -1;
2647 }
2648 break;
2649
2650 case TYPE_CODE_ARRAY:
2651 {
2652 int count;
2653 unsigned unitlen;
2654 count = arm_vfp_cprc_sub_candidate (TYPE_TARGET_TYPE (t), base_type);
2655 if (count == -1)
2656 return -1;
2657 if (TYPE_LENGTH (t) == 0)
2658 {
2659 gdb_assert (count == 0);
2660 return 0;
2661 }
2662 else if (count == 0)
2663 return -1;
2664 unitlen = arm_vfp_cprc_unit_length (*base_type);
2665 gdb_assert ((TYPE_LENGTH (t) % unitlen) == 0);
2666 return TYPE_LENGTH (t) / unitlen;
2667 }
2668 break;
2669
2670 case TYPE_CODE_STRUCT:
2671 {
2672 int count = 0;
2673 unsigned unitlen;
2674 int i;
2675 for (i = 0; i < TYPE_NFIELDS (t); i++)
2676 {
2677 int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
2678 base_type);
2679 if (sub_count == -1)
2680 return -1;
2681 count += sub_count;
2682 }
2683 if (TYPE_LENGTH (t) == 0)
2684 {
2685 gdb_assert (count == 0);
2686 return 0;
2687 }
2688 else if (count == 0)
2689 return -1;
2690 unitlen = arm_vfp_cprc_unit_length (*base_type);
2691 if (TYPE_LENGTH (t) != unitlen * count)
2692 return -1;
2693 return count;
2694 }
2695
2696 case TYPE_CODE_UNION:
2697 {
2698 int count = 0;
2699 unsigned unitlen;
2700 int i;
2701 for (i = 0; i < TYPE_NFIELDS (t); i++)
2702 {
2703 int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
2704 base_type);
2705 if (sub_count == -1)
2706 return -1;
2707 count = (count > sub_count ? count : sub_count);
2708 }
2709 if (TYPE_LENGTH (t) == 0)
2710 {
2711 gdb_assert (count == 0);
2712 return 0;
2713 }
2714 else if (count == 0)
2715 return -1;
2716 unitlen = arm_vfp_cprc_unit_length (*base_type);
2717 if (TYPE_LENGTH (t) != unitlen * count)
2718 return -1;
2719 return count;
2720 }
2721
2722 default:
2723 break;
2724 }
2725
2726 return -1;
2727 }
2728
2729 /* Determine whether T is a VFP co-processor register candidate (CPRC)
2730 if passed to or returned from a non-variadic function with the VFP
2731 ABI in effect. Return 1 if it is, 0 otherwise. If it is, set
2732 *BASE_TYPE to the base type for T and *COUNT to the number of
2733 elements of that base type before returning. */
2734
2735 static int
2736 arm_vfp_call_candidate (struct type *t, enum arm_vfp_cprc_base_type *base_type,
2737 int *count)
2738 {
2739 enum arm_vfp_cprc_base_type b = VFP_CPRC_UNKNOWN;
2740 int c = arm_vfp_cprc_sub_candidate (t, &b);
2741 if (c <= 0 || c > 4)
2742 return 0;
2743 *base_type = b;
2744 *count = c;
2745 return 1;
2746 }
2747
2748 /* Return 1 if the VFP ABI should be used for passing arguments to and
2749 returning values from a function of type FUNC_TYPE, 0
2750 otherwise. */
2751
2752 static int
2753 arm_vfp_abi_for_function (struct gdbarch *gdbarch, struct type *func_type)
2754 {
2755 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2756 /* Variadic functions always use the base ABI. Assume that functions
2757 without debug info are not variadic. */
2758 if (func_type && TYPE_VARARGS (check_typedef (func_type)))
2759 return 0;
2760 /* The VFP ABI is only supported as a variant of AAPCS. */
2761 if (tdep->arm_abi != ARM_ABI_AAPCS)
2762 return 0;
2763 return gdbarch_tdep (gdbarch)->fp_model == ARM_FLOAT_VFP;
2764 }
2765
2766 /* We currently only support passing parameters in integer registers, which
2767 conforms with GCC's default model, and VFP argument passing following
2768 the VFP variant of AAPCS. Several other variants exist and
2769 we should probably support some of them based on the selected ABI. */
2770
2771 static CORE_ADDR
2772 arm_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
2773 struct regcache *regcache, CORE_ADDR bp_addr, int nargs,
2774 struct value **args, CORE_ADDR sp, int struct_return,
2775 CORE_ADDR struct_addr)
2776 {
2777 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2778 int argnum;
2779 int argreg;
2780 int nstack;
2781 struct stack_item *si = NULL;
2782 int use_vfp_abi;
2783 struct type *ftype;
2784 unsigned vfp_regs_free = (1 << 16) - 1;
2785
2786 /* Determine the type of this function and whether the VFP ABI
2787 applies. */
2788 ftype = check_typedef (value_type (function));
2789 if (TYPE_CODE (ftype) == TYPE_CODE_PTR)
2790 ftype = check_typedef (TYPE_TARGET_TYPE (ftype));
2791 use_vfp_abi = arm_vfp_abi_for_function (gdbarch, ftype);
2792
2793 /* Set the return address. For the ARM, the return breakpoint is
2794 always at BP_ADDR. */
2795 if (arm_pc_is_thumb (gdbarch, bp_addr))
2796 bp_addr |= 1;
2797 regcache_cooked_write_unsigned (regcache, ARM_LR_REGNUM, bp_addr);
2798
2799 /* Walk through the list of args and determine how large a temporary
2800 stack is required. Need to take care here as structs may be
2801 passed on the stack, and we have to to push them. */
2802 nstack = 0;
2803
2804 argreg = ARM_A1_REGNUM;
2805 nstack = 0;
2806
2807 /* The struct_return pointer occupies the first parameter
2808 passing register. */
2809 if (struct_return)
2810 {
2811 if (arm_debug)
2812 fprintf_unfiltered (gdb_stdlog, "struct return in %s = %s\n",
2813 gdbarch_register_name (gdbarch, argreg),
2814 paddress (gdbarch, struct_addr));
2815 regcache_cooked_write_unsigned (regcache, argreg, struct_addr);
2816 argreg++;
2817 }
2818
2819 for (argnum = 0; argnum < nargs; argnum++)
2820 {
2821 int len;
2822 struct type *arg_type;
2823 struct type *target_type;
2824 enum type_code typecode;
2825 const bfd_byte *val;
2826 int align;
2827 enum arm_vfp_cprc_base_type vfp_base_type;
2828 int vfp_base_count;
2829 int may_use_core_reg = 1;
2830
2831 arg_type = check_typedef (value_type (args[argnum]));
2832 len = TYPE_LENGTH (arg_type);
2833 target_type = TYPE_TARGET_TYPE (arg_type);
2834 typecode = TYPE_CODE (arg_type);
2835 val = value_contents (args[argnum]);
2836
2837 align = arm_type_align (arg_type);
2838 /* Round alignment up to a whole number of words. */
2839 align = (align + INT_REGISTER_SIZE - 1) & ~(INT_REGISTER_SIZE - 1);
2840 /* Different ABIs have different maximum alignments. */
2841 if (gdbarch_tdep (gdbarch)->arm_abi == ARM_ABI_APCS)
2842 {
2843 /* The APCS ABI only requires word alignment. */
2844 align = INT_REGISTER_SIZE;
2845 }
2846 else
2847 {
2848 /* The AAPCS requires at most doubleword alignment. */
2849 if (align > INT_REGISTER_SIZE * 2)
2850 align = INT_REGISTER_SIZE * 2;
2851 }
2852
2853 if (use_vfp_abi
2854 && arm_vfp_call_candidate (arg_type, &vfp_base_type,
2855 &vfp_base_count))
2856 {
2857 int regno;
2858 int unit_length;
2859 int shift;
2860 unsigned mask;
2861
2862 /* Because this is a CPRC it cannot go in a core register or
2863 cause a core register to be skipped for alignment.
2864 Either it goes in VFP registers and the rest of this loop
2865 iteration is skipped for this argument, or it goes on the
2866 stack (and the stack alignment code is correct for this
2867 case). */
2868 may_use_core_reg = 0;
2869
2870 unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
2871 shift = unit_length / 4;
2872 mask = (1 << (shift * vfp_base_count)) - 1;
2873 for (regno = 0; regno < 16; regno += shift)
2874 if (((vfp_regs_free >> regno) & mask) == mask)
2875 break;
2876
2877 if (regno < 16)
2878 {
2879 int reg_char;
2880 int reg_scaled;
2881 int i;
2882
2883 vfp_regs_free &= ~(mask << regno);
2884 reg_scaled = regno / shift;
2885 reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
2886 for (i = 0; i < vfp_base_count; i++)
2887 {
2888 char name_buf[4];
2889 int regnum;
2890 if (reg_char == 'q')
2891 arm_neon_quad_write (gdbarch, regcache, reg_scaled + i,
2892 val + i * unit_length);
2893 else
2894 {
2895 sprintf (name_buf, "%c%d", reg_char, reg_scaled + i);
2896 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
2897 strlen (name_buf));
2898 regcache_cooked_write (regcache, regnum,
2899 val + i * unit_length);
2900 }
2901 }
2902 continue;
2903 }
2904 else
2905 {
2906 /* This CPRC could not go in VFP registers, so all VFP
2907 registers are now marked as used. */
2908 vfp_regs_free = 0;
2909 }
2910 }
2911
2912 /* Push stack padding for dowubleword alignment. */
2913 if (nstack & (align - 1))
2914 {
2915 si = push_stack_item (si, val, INT_REGISTER_SIZE);
2916 nstack += INT_REGISTER_SIZE;
2917 }
2918
2919 /* Doubleword aligned quantities must go in even register pairs. */
2920 if (may_use_core_reg
2921 && argreg <= ARM_LAST_ARG_REGNUM
2922 && align > INT_REGISTER_SIZE
2923 && argreg & 1)
2924 argreg++;
2925
2926 /* If the argument is a pointer to a function, and it is a
2927 Thumb function, create a LOCAL copy of the value and set
2928 the THUMB bit in it. */
2929 if (TYPE_CODE_PTR == typecode
2930 && target_type != NULL
2931 && TYPE_CODE_FUNC == TYPE_CODE (check_typedef (target_type)))
2932 {
2933 CORE_ADDR regval = extract_unsigned_integer (val, len, byte_order);
2934 if (arm_pc_is_thumb (gdbarch, regval))
2935 {
2936 bfd_byte *copy = alloca (len);
2937 store_unsigned_integer (copy, len, byte_order,
2938 MAKE_THUMB_ADDR (regval));
2939 val = copy;
2940 }
2941 }
2942
2943 /* Copy the argument to general registers or the stack in
2944 register-sized pieces. Large arguments are split between
2945 registers and stack. */
2946 while (len > 0)
2947 {
2948 int partial_len = len < INT_REGISTER_SIZE ? len : INT_REGISTER_SIZE;
2949
2950 if (may_use_core_reg && argreg <= ARM_LAST_ARG_REGNUM)
2951 {
2952 /* The argument is being passed in a general purpose
2953 register. */
2954 CORE_ADDR regval
2955 = extract_unsigned_integer (val, partial_len, byte_order);
2956 if (byte_order == BFD_ENDIAN_BIG)
2957 regval <<= (INT_REGISTER_SIZE - partial_len) * 8;
2958 if (arm_debug)
2959 fprintf_unfiltered (gdb_stdlog, "arg %d in %s = 0x%s\n",
2960 argnum,
2961 gdbarch_register_name
2962 (gdbarch, argreg),
2963 phex (regval, INT_REGISTER_SIZE));
2964 regcache_cooked_write_unsigned (regcache, argreg, regval);
2965 argreg++;
2966 }
2967 else
2968 {
2969 /* Push the arguments onto the stack. */
2970 if (arm_debug)
2971 fprintf_unfiltered (gdb_stdlog, "arg %d @ sp + %d\n",
2972 argnum, nstack);
2973 si = push_stack_item (si, val, INT_REGISTER_SIZE);
2974 nstack += INT_REGISTER_SIZE;
2975 }
2976
2977 len -= partial_len;
2978 val += partial_len;
2979 }
2980 }
2981 /* If we have an odd number of words to push, then decrement the stack
2982 by one word now, so first stack argument will be dword aligned. */
2983 if (nstack & 4)
2984 sp -= 4;
2985
2986 while (si)
2987 {
2988 sp -= si->len;
2989 write_memory (sp, si->data, si->len);
2990 si = pop_stack_item (si);
2991 }
2992
2993 /* Finally, update teh SP register. */
2994 regcache_cooked_write_unsigned (regcache, ARM_SP_REGNUM, sp);
2995
2996 return sp;
2997 }
2998
2999
3000 /* Always align the frame to an 8-byte boundary. This is required on
3001 some platforms and harmless on the rest. */
3002
3003 static CORE_ADDR
3004 arm_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
3005 {
3006 /* Align the stack to eight bytes. */
3007 return sp & ~ (CORE_ADDR) 7;
3008 }
3009
3010 static void
3011 print_fpu_flags (int flags)
3012 {
3013 if (flags & (1 << 0))
3014 fputs ("IVO ", stdout);
3015 if (flags & (1 << 1))
3016 fputs ("DVZ ", stdout);
3017 if (flags & (1 << 2))
3018 fputs ("OFL ", stdout);
3019 if (flags & (1 << 3))
3020 fputs ("UFL ", stdout);
3021 if (flags & (1 << 4))
3022 fputs ("INX ", stdout);
3023 putchar ('\n');
3024 }
3025
3026 /* Print interesting information about the floating point processor
3027 (if present) or emulator. */
3028 static void
3029 arm_print_float_info (struct gdbarch *gdbarch, struct ui_file *file,
3030 struct frame_info *frame, const char *args)
3031 {
3032 unsigned long status = get_frame_register_unsigned (frame, ARM_FPS_REGNUM);
3033 int type;
3034
3035 type = (status >> 24) & 127;
3036 if (status & (1 << 31))
3037 printf (_("Hardware FPU type %d\n"), type);
3038 else
3039 printf (_("Software FPU type %d\n"), type);
3040 /* i18n: [floating point unit] mask */
3041 fputs (_("mask: "), stdout);
3042 print_fpu_flags (status >> 16);
3043 /* i18n: [floating point unit] flags */
3044 fputs (_("flags: "), stdout);
3045 print_fpu_flags (status);
3046 }
3047
3048 /* Construct the ARM extended floating point type. */
3049 static struct type *
3050 arm_ext_type (struct gdbarch *gdbarch)
3051 {
3052 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3053
3054 if (!tdep->arm_ext_type)
3055 tdep->arm_ext_type
3056 = arch_float_type (gdbarch, -1, "builtin_type_arm_ext",
3057 floatformats_arm_ext);
3058
3059 return tdep->arm_ext_type;
3060 }
3061
3062 static struct type *
3063 arm_neon_double_type (struct gdbarch *gdbarch)
3064 {
3065 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3066
3067 if (tdep->neon_double_type == NULL)
3068 {
3069 struct type *t, *elem;
3070
3071 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_d",
3072 TYPE_CODE_UNION);
3073 elem = builtin_type (gdbarch)->builtin_uint8;
3074 append_composite_type_field (t, "u8", init_vector_type (elem, 8));
3075 elem = builtin_type (gdbarch)->builtin_uint16;
3076 append_composite_type_field (t, "u16", init_vector_type (elem, 4));
3077 elem = builtin_type (gdbarch)->builtin_uint32;
3078 append_composite_type_field (t, "u32", init_vector_type (elem, 2));
3079 elem = builtin_type (gdbarch)->builtin_uint64;
3080 append_composite_type_field (t, "u64", elem);
3081 elem = builtin_type (gdbarch)->builtin_float;
3082 append_composite_type_field (t, "f32", init_vector_type (elem, 2));
3083 elem = builtin_type (gdbarch)->builtin_double;
3084 append_composite_type_field (t, "f64", elem);
3085
3086 TYPE_VECTOR (t) = 1;
3087 TYPE_NAME (t) = "neon_d";
3088 tdep->neon_double_type = t;
3089 }
3090
3091 return tdep->neon_double_type;
3092 }
3093
3094 /* FIXME: The vector types are not correctly ordered on big-endian
3095 targets. Just as s0 is the low bits of d0, d0[0] is also the low
3096 bits of d0 - regardless of what unit size is being held in d0. So
3097 the offset of the first uint8 in d0 is 7, but the offset of the
3098 first float is 4. This code works as-is for little-endian
3099 targets. */
3100
3101 static struct type *
3102 arm_neon_quad_type (struct gdbarch *gdbarch)
3103 {
3104 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3105
3106 if (tdep->neon_quad_type == NULL)
3107 {
3108 struct type *t, *elem;
3109
3110 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_q",
3111 TYPE_CODE_UNION);
3112 elem = builtin_type (gdbarch)->builtin_uint8;
3113 append_composite_type_field (t, "u8", init_vector_type (elem, 16));
3114 elem = builtin_type (gdbarch)->builtin_uint16;
3115 append_composite_type_field (t, "u16", init_vector_type (elem, 8));
3116 elem = builtin_type (gdbarch)->builtin_uint32;
3117 append_composite_type_field (t, "u32", init_vector_type (elem, 4));
3118 elem = builtin_type (gdbarch)->builtin_uint64;
3119 append_composite_type_field (t, "u64", init_vector_type (elem, 2));
3120 elem = builtin_type (gdbarch)->builtin_float;
3121 append_composite_type_field (t, "f32", init_vector_type (elem, 4));
3122 elem = builtin_type (gdbarch)->builtin_double;
3123 append_composite_type_field (t, "f64", init_vector_type (elem, 2));
3124
3125 TYPE_VECTOR (t) = 1;
3126 TYPE_NAME (t) = "neon_q";
3127 tdep->neon_quad_type = t;
3128 }
3129
3130 return tdep->neon_quad_type;
3131 }
3132
3133 /* Return the GDB type object for the "standard" data type of data in
3134 register N. */
3135
3136 static struct type *
3137 arm_register_type (struct gdbarch *gdbarch, int regnum)
3138 {
3139 int num_regs = gdbarch_num_regs (gdbarch);
3140
3141 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
3142 && regnum >= num_regs && regnum < num_regs + 32)
3143 return builtin_type (gdbarch)->builtin_float;
3144
3145 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
3146 && regnum >= num_regs + 32 && regnum < num_regs + 32 + 16)
3147 return arm_neon_quad_type (gdbarch);
3148
3149 /* If the target description has register information, we are only
3150 in this function so that we can override the types of
3151 double-precision registers for NEON. */
3152 if (tdesc_has_registers (gdbarch_target_desc (gdbarch)))
3153 {
3154 struct type *t = tdesc_register_type (gdbarch, regnum);
3155
3156 if (regnum >= ARM_D0_REGNUM && regnum < ARM_D0_REGNUM + 32
3157 && TYPE_CODE (t) == TYPE_CODE_FLT
3158 && gdbarch_tdep (gdbarch)->have_neon)
3159 return arm_neon_double_type (gdbarch);
3160 else
3161 return t;
3162 }
3163
3164 if (regnum >= ARM_F0_REGNUM && regnum < ARM_F0_REGNUM + NUM_FREGS)
3165 {
3166 if (!gdbarch_tdep (gdbarch)->have_fpa_registers)
3167 return builtin_type (gdbarch)->builtin_void;
3168
3169 return arm_ext_type (gdbarch);
3170 }
3171 else if (regnum == ARM_SP_REGNUM)
3172 return builtin_type (gdbarch)->builtin_data_ptr;
3173 else if (regnum == ARM_PC_REGNUM)
3174 return builtin_type (gdbarch)->builtin_func_ptr;
3175 else if (regnum >= ARRAY_SIZE (arm_register_names))
3176 /* These registers are only supported on targets which supply
3177 an XML description. */
3178 return builtin_type (gdbarch)->builtin_int0;
3179 else
3180 return builtin_type (gdbarch)->builtin_uint32;
3181 }
3182
3183 /* Map a DWARF register REGNUM onto the appropriate GDB register
3184 number. */
3185
3186 static int
3187 arm_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
3188 {
3189 /* Core integer regs. */
3190 if (reg >= 0 && reg <= 15)
3191 return reg;
3192
3193 /* Legacy FPA encoding. These were once used in a way which
3194 overlapped with VFP register numbering, so their use is
3195 discouraged, but GDB doesn't support the ARM toolchain
3196 which used them for VFP. */
3197 if (reg >= 16 && reg <= 23)
3198 return ARM_F0_REGNUM + reg - 16;
3199
3200 /* New assignments for the FPA registers. */
3201 if (reg >= 96 && reg <= 103)
3202 return ARM_F0_REGNUM + reg - 96;
3203
3204 /* WMMX register assignments. */
3205 if (reg >= 104 && reg <= 111)
3206 return ARM_WCGR0_REGNUM + reg - 104;
3207
3208 if (reg >= 112 && reg <= 127)
3209 return ARM_WR0_REGNUM + reg - 112;
3210
3211 if (reg >= 192 && reg <= 199)
3212 return ARM_WC0_REGNUM + reg - 192;
3213
3214 /* VFP v2 registers. A double precision value is actually
3215 in d1 rather than s2, but the ABI only defines numbering
3216 for the single precision registers. This will "just work"
3217 in GDB for little endian targets (we'll read eight bytes,
3218 starting in s0 and then progressing to s1), but will be
3219 reversed on big endian targets with VFP. This won't
3220 be a problem for the new Neon quad registers; you're supposed
3221 to use DW_OP_piece for those. */
3222 if (reg >= 64 && reg <= 95)
3223 {
3224 char name_buf[4];
3225
3226 sprintf (name_buf, "s%d", reg - 64);
3227 return user_reg_map_name_to_regnum (gdbarch, name_buf,
3228 strlen (name_buf));
3229 }
3230
3231 /* VFP v3 / Neon registers. This range is also used for VFP v2
3232 registers, except that it now describes d0 instead of s0. */
3233 if (reg >= 256 && reg <= 287)
3234 {
3235 char name_buf[4];
3236
3237 sprintf (name_buf, "d%d", reg - 256);
3238 return user_reg_map_name_to_regnum (gdbarch, name_buf,
3239 strlen (name_buf));
3240 }
3241
3242 return -1;
3243 }
3244
3245 /* Map GDB internal REGNUM onto the Arm simulator register numbers. */
3246 static int
3247 arm_register_sim_regno (struct gdbarch *gdbarch, int regnum)
3248 {
3249 int reg = regnum;
3250 gdb_assert (reg >= 0 && reg < gdbarch_num_regs (gdbarch));
3251
3252 if (regnum >= ARM_WR0_REGNUM && regnum <= ARM_WR15_REGNUM)
3253 return regnum - ARM_WR0_REGNUM + SIM_ARM_IWMMXT_COP0R0_REGNUM;
3254
3255 if (regnum >= ARM_WC0_REGNUM && regnum <= ARM_WC7_REGNUM)
3256 return regnum - ARM_WC0_REGNUM + SIM_ARM_IWMMXT_COP1R0_REGNUM;
3257
3258 if (regnum >= ARM_WCGR0_REGNUM && regnum <= ARM_WCGR7_REGNUM)
3259 return regnum - ARM_WCGR0_REGNUM + SIM_ARM_IWMMXT_COP1R8_REGNUM;
3260
3261 if (reg < NUM_GREGS)
3262 return SIM_ARM_R0_REGNUM + reg;
3263 reg -= NUM_GREGS;
3264
3265 if (reg < NUM_FREGS)
3266 return SIM_ARM_FP0_REGNUM + reg;
3267 reg -= NUM_FREGS;
3268
3269 if (reg < NUM_SREGS)
3270 return SIM_ARM_FPS_REGNUM + reg;
3271 reg -= NUM_SREGS;
3272
3273 internal_error (__FILE__, __LINE__, _("Bad REGNUM %d"), regnum);
3274 }
3275
3276 /* NOTE: cagney/2001-08-20: Both convert_from_extended() and
3277 convert_to_extended() use floatformat_arm_ext_littlebyte_bigword.
3278 It is thought that this is is the floating-point register format on
3279 little-endian systems. */
3280
3281 static void
3282 convert_from_extended (const struct floatformat *fmt, const void *ptr,
3283 void *dbl, int endianess)
3284 {
3285 DOUBLEST d;
3286
3287 if (endianess == BFD_ENDIAN_BIG)
3288 floatformat_to_doublest (&floatformat_arm_ext_big, ptr, &d);
3289 else
3290 floatformat_to_doublest (&floatformat_arm_ext_littlebyte_bigword,
3291 ptr, &d);
3292 floatformat_from_doublest (fmt, &d, dbl);
3293 }
3294
3295 static void
3296 convert_to_extended (const struct floatformat *fmt, void *dbl, const void *ptr,
3297 int endianess)
3298 {
3299 DOUBLEST d;
3300
3301 floatformat_to_doublest (fmt, ptr, &d);
3302 if (endianess == BFD_ENDIAN_BIG)
3303 floatformat_from_doublest (&floatformat_arm_ext_big, &d, dbl);
3304 else
3305 floatformat_from_doublest (&floatformat_arm_ext_littlebyte_bigword,
3306 &d, dbl);
3307 }
3308
3309 static int
3310 condition_true (unsigned long cond, unsigned long status_reg)
3311 {
3312 if (cond == INST_AL || cond == INST_NV)
3313 return 1;
3314
3315 switch (cond)
3316 {
3317 case INST_EQ:
3318 return ((status_reg & FLAG_Z) != 0);
3319 case INST_NE:
3320 return ((status_reg & FLAG_Z) == 0);
3321 case INST_CS:
3322 return ((status_reg & FLAG_C) != 0);
3323 case INST_CC:
3324 return ((status_reg & FLAG_C) == 0);
3325 case INST_MI:
3326 return ((status_reg & FLAG_N) != 0);
3327 case INST_PL:
3328 return ((status_reg & FLAG_N) == 0);
3329 case INST_VS:
3330 return ((status_reg & FLAG_V) != 0);
3331 case INST_VC:
3332 return ((status_reg & FLAG_V) == 0);
3333 case INST_HI:
3334 return ((status_reg & (FLAG_C | FLAG_Z)) == FLAG_C);
3335 case INST_LS:
3336 return ((status_reg & (FLAG_C | FLAG_Z)) != FLAG_C);
3337 case INST_GE:
3338 return (((status_reg & FLAG_N) == 0) == ((status_reg & FLAG_V) == 0));
3339 case INST_LT:
3340 return (((status_reg & FLAG_N) == 0) != ((status_reg & FLAG_V) == 0));
3341 case INST_GT:
3342 return (((status_reg & FLAG_Z) == 0)
3343 && (((status_reg & FLAG_N) == 0)
3344 == ((status_reg & FLAG_V) == 0)));
3345 case INST_LE:
3346 return (((status_reg & FLAG_Z) != 0)
3347 || (((status_reg & FLAG_N) == 0)
3348 != ((status_reg & FLAG_V) == 0)));
3349 }
3350 return 1;
3351 }
3352
3353 static unsigned long
3354 shifted_reg_val (struct frame_info *frame, unsigned long inst, int carry,
3355 unsigned long pc_val, unsigned long status_reg)
3356 {
3357 unsigned long res, shift;
3358 int rm = bits (inst, 0, 3);
3359 unsigned long shifttype = bits (inst, 5, 6);
3360
3361 if (bit (inst, 4))
3362 {
3363 int rs = bits (inst, 8, 11);
3364 shift = (rs == 15 ? pc_val + 8
3365 : get_frame_register_unsigned (frame, rs)) & 0xFF;
3366 }
3367 else
3368 shift = bits (inst, 7, 11);
3369
3370 res = (rm == 15
3371 ? (pc_val + (bit (inst, 4) ? 12 : 8))
3372 : get_frame_register_unsigned (frame, rm));
3373
3374 switch (shifttype)
3375 {
3376 case 0: /* LSL */
3377 res = shift >= 32 ? 0 : res << shift;
3378 break;
3379
3380 case 1: /* LSR */
3381 res = shift >= 32 ? 0 : res >> shift;
3382 break;
3383
3384 case 2: /* ASR */
3385 if (shift >= 32)
3386 shift = 31;
3387 res = ((res & 0x80000000L)
3388 ? ~((~res) >> shift) : res >> shift);
3389 break;
3390
3391 case 3: /* ROR/RRX */
3392 shift &= 31;
3393 if (shift == 0)
3394 res = (res >> 1) | (carry ? 0x80000000L : 0);
3395 else
3396 res = (res >> shift) | (res << (32 - shift));
3397 break;
3398 }
3399
3400 return res & 0xffffffff;
3401 }
3402
3403 /* Return number of 1-bits in VAL. */
3404
3405 static int
3406 bitcount (unsigned long val)
3407 {
3408 int nbits;
3409 for (nbits = 0; val != 0; nbits++)
3410 val &= val - 1; /* Delete rightmost 1-bit in val. */
3411 return nbits;
3412 }
3413
3414 /* Return the size in bytes of the complete Thumb instruction whose
3415 first halfword is INST1. */
3416
3417 static int
3418 thumb_insn_size (unsigned short inst1)
3419 {
3420 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
3421 return 4;
3422 else
3423 return 2;
3424 }
3425
3426 static int
3427 thumb_advance_itstate (unsigned int itstate)
3428 {
3429 /* Preserve IT[7:5], the first three bits of the condition. Shift
3430 the upcoming condition flags left by one bit. */
3431 itstate = (itstate & 0xe0) | ((itstate << 1) & 0x1f);
3432
3433 /* If we have finished the IT block, clear the state. */
3434 if ((itstate & 0x0f) == 0)
3435 itstate = 0;
3436
3437 return itstate;
3438 }
3439
3440 /* Find the next PC after the current instruction executes. In some
3441 cases we can not statically determine the answer (see the IT state
3442 handling in this function); in that case, a breakpoint may be
3443 inserted in addition to the returned PC, which will be used to set
3444 another breakpoint by our caller. */
3445
3446 static CORE_ADDR
3447 thumb_get_next_pc_raw (struct frame_info *frame, CORE_ADDR pc, int insert_bkpt)
3448 {
3449 struct gdbarch *gdbarch = get_frame_arch (frame);
3450 struct address_space *aspace = get_frame_address_space (frame);
3451 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3452 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3453 unsigned long pc_val = ((unsigned long) pc) + 4; /* PC after prefetch */
3454 unsigned short inst1;
3455 CORE_ADDR nextpc = pc + 2; /* Default is next instruction. */
3456 unsigned long offset;
3457 ULONGEST status, itstate;
3458
3459 nextpc = MAKE_THUMB_ADDR (nextpc);
3460 pc_val = MAKE_THUMB_ADDR (pc_val);
3461
3462 inst1 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
3463
3464 /* Thumb-2 conditional execution support. There are eight bits in
3465 the CPSR which describe conditional execution state. Once
3466 reconstructed (they're in a funny order), the low five bits
3467 describe the low bit of the condition for each instruction and
3468 how many instructions remain. The high three bits describe the
3469 base condition. One of the low four bits will be set if an IT
3470 block is active. These bits read as zero on earlier
3471 processors. */
3472 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
3473 itstate = ((status >> 8) & 0xfc) | ((status >> 25) & 0x3);
3474
3475 /* If-Then handling. On GNU/Linux, where this routine is used, we
3476 use an undefined instruction as a breakpoint. Unlike BKPT, IT
3477 can disable execution of the undefined instruction. So we might
3478 miss the breakpoint if we set it on a skipped conditional
3479 instruction. Because conditional instructions can change the
3480 flags, affecting the execution of further instructions, we may
3481 need to set two breakpoints. */
3482
3483 if (gdbarch_tdep (gdbarch)->thumb2_breakpoint != NULL)
3484 {
3485 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
3486 {
3487 /* An IT instruction. Because this instruction does not
3488 modify the flags, we can accurately predict the next
3489 executed instruction. */
3490 itstate = inst1 & 0x00ff;
3491 pc += thumb_insn_size (inst1);
3492
3493 while (itstate != 0 && ! condition_true (itstate >> 4, status))
3494 {
3495 inst1 = read_memory_unsigned_integer (pc, 2,
3496 byte_order_for_code);
3497 pc += thumb_insn_size (inst1);
3498 itstate = thumb_advance_itstate (itstate);
3499 }
3500
3501 return MAKE_THUMB_ADDR (pc);
3502 }
3503 else if (itstate != 0)
3504 {
3505 /* We are in a conditional block. Check the condition. */
3506 if (! condition_true (itstate >> 4, status))
3507 {
3508 /* Advance to the next executed instruction. */
3509 pc += thumb_insn_size (inst1);
3510 itstate = thumb_advance_itstate (itstate);
3511
3512 while (itstate != 0 && ! condition_true (itstate >> 4, status))
3513 {
3514 inst1 = read_memory_unsigned_integer (pc, 2,
3515 byte_order_for_code);
3516 pc += thumb_insn_size (inst1);
3517 itstate = thumb_advance_itstate (itstate);
3518 }
3519
3520 return MAKE_THUMB_ADDR (pc);
3521 }
3522 else if ((itstate & 0x0f) == 0x08)
3523 {
3524 /* This is the last instruction of the conditional
3525 block, and it is executed. We can handle it normally
3526 because the following instruction is not conditional,
3527 and we must handle it normally because it is
3528 permitted to branch. Fall through. */
3529 }
3530 else
3531 {
3532 int cond_negated;
3533
3534 /* There are conditional instructions after this one.
3535 If this instruction modifies the flags, then we can
3536 not predict what the next executed instruction will
3537 be. Fortunately, this instruction is architecturally
3538 forbidden to branch; we know it will fall through.
3539 Start by skipping past it. */
3540 pc += thumb_insn_size (inst1);
3541 itstate = thumb_advance_itstate (itstate);
3542
3543 /* Set a breakpoint on the following instruction. */
3544 gdb_assert ((itstate & 0x0f) != 0);
3545 if (insert_bkpt)
3546 insert_single_step_breakpoint (gdbarch, aspace, pc);
3547 cond_negated = (itstate >> 4) & 1;
3548
3549 /* Skip all following instructions with the same
3550 condition. If there is a later instruction in the IT
3551 block with the opposite condition, set the other
3552 breakpoint there. If not, then set a breakpoint on
3553 the instruction after the IT block. */
3554 do
3555 {
3556 inst1 = read_memory_unsigned_integer (pc, 2,
3557 byte_order_for_code);
3558 pc += thumb_insn_size (inst1);
3559 itstate = thumb_advance_itstate (itstate);
3560 }
3561 while (itstate != 0 && ((itstate >> 4) & 1) == cond_negated);
3562
3563 return MAKE_THUMB_ADDR (pc);
3564 }
3565 }
3566 }
3567 else if (itstate & 0x0f)
3568 {
3569 /* We are in a conditional block. Check the condition. */
3570 int cond = itstate >> 4;
3571
3572 if (! condition_true (cond, status))
3573 {
3574 /* Advance to the next instruction. All the 32-bit
3575 instructions share a common prefix. */
3576 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
3577 return MAKE_THUMB_ADDR (pc + 4);
3578 else
3579 return MAKE_THUMB_ADDR (pc + 2);
3580 }
3581
3582 /* Otherwise, handle the instruction normally. */
3583 }
3584
3585 if ((inst1 & 0xff00) == 0xbd00) /* pop {rlist, pc} */
3586 {
3587 CORE_ADDR sp;
3588
3589 /* Fetch the saved PC from the stack. It's stored above
3590 all of the other registers. */
3591 offset = bitcount (bits (inst1, 0, 7)) * INT_REGISTER_SIZE;
3592 sp = get_frame_register_unsigned (frame, ARM_SP_REGNUM);
3593 nextpc = read_memory_unsigned_integer (sp + offset, 4, byte_order);
3594 }
3595 else if ((inst1 & 0xf000) == 0xd000) /* conditional branch */
3596 {
3597 unsigned long cond = bits (inst1, 8, 11);
3598 if (cond == 0x0f) /* 0x0f = SWI */
3599 {
3600 struct gdbarch_tdep *tdep;
3601 tdep = gdbarch_tdep (gdbarch);
3602
3603 if (tdep->syscall_next_pc != NULL)
3604 nextpc = tdep->syscall_next_pc (frame);
3605
3606 }
3607 else if (cond != 0x0f && condition_true (cond, status))
3608 nextpc = pc_val + (sbits (inst1, 0, 7) << 1);
3609 }
3610 else if ((inst1 & 0xf800) == 0xe000) /* unconditional branch */
3611 {
3612 nextpc = pc_val + (sbits (inst1, 0, 10) << 1);
3613 }
3614 else if ((inst1 & 0xe000) == 0xe000) /* 32-bit instruction */
3615 {
3616 unsigned short inst2;
3617 inst2 = read_memory_unsigned_integer (pc + 2, 2, byte_order_for_code);
3618
3619 /* Default to the next instruction. */
3620 nextpc = pc + 4;
3621 nextpc = MAKE_THUMB_ADDR (nextpc);
3622
3623 if ((inst1 & 0xf800) == 0xf000 && (inst2 & 0x8000) == 0x8000)
3624 {
3625 /* Branches and miscellaneous control instructions. */
3626
3627 if ((inst2 & 0x1000) != 0 || (inst2 & 0xd001) == 0xc000)
3628 {
3629 /* B, BL, BLX. */
3630 int j1, j2, imm1, imm2;
3631
3632 imm1 = sbits (inst1, 0, 10);
3633 imm2 = bits (inst2, 0, 10);
3634 j1 = bit (inst2, 13);
3635 j2 = bit (inst2, 11);
3636
3637 offset = ((imm1 << 12) + (imm2 << 1));
3638 offset ^= ((!j2) << 22) | ((!j1) << 23);
3639
3640 nextpc = pc_val + offset;
3641 /* For BLX make sure to clear the low bits. */
3642 if (bit (inst2, 12) == 0)
3643 nextpc = nextpc & 0xfffffffc;
3644 }
3645 else if (inst1 == 0xf3de && (inst2 & 0xff00) == 0x3f00)
3646 {
3647 /* SUBS PC, LR, #imm8. */
3648 nextpc = get_frame_register_unsigned (frame, ARM_LR_REGNUM);
3649 nextpc -= inst2 & 0x00ff;
3650 }
3651 else if ((inst2 & 0xd000) == 0x8000 && (inst1 & 0x0380) != 0x0380)
3652 {
3653 /* Conditional branch. */
3654 if (condition_true (bits (inst1, 6, 9), status))
3655 {
3656 int sign, j1, j2, imm1, imm2;
3657
3658 sign = sbits (inst1, 10, 10);
3659 imm1 = bits (inst1, 0, 5);
3660 imm2 = bits (inst2, 0, 10);
3661 j1 = bit (inst2, 13);
3662 j2 = bit (inst2, 11);
3663
3664 offset = (sign << 20) + (j2 << 19) + (j1 << 18);
3665 offset += (imm1 << 12) + (imm2 << 1);
3666
3667 nextpc = pc_val + offset;
3668 }
3669 }
3670 }
3671 else if ((inst1 & 0xfe50) == 0xe810)
3672 {
3673 /* Load multiple or RFE. */
3674 int rn, offset, load_pc = 1;
3675
3676 rn = bits (inst1, 0, 3);
3677 if (bit (inst1, 7) && !bit (inst1, 8))
3678 {
3679 /* LDMIA or POP */
3680 if (!bit (inst2, 15))
3681 load_pc = 0;
3682 offset = bitcount (inst2) * 4 - 4;
3683 }
3684 else if (!bit (inst1, 7) && bit (inst1, 8))
3685 {
3686 /* LDMDB */
3687 if (!bit (inst2, 15))
3688 load_pc = 0;
3689 offset = -4;
3690 }
3691 else if (bit (inst1, 7) && bit (inst1, 8))
3692 {
3693 /* RFEIA */
3694 offset = 0;
3695 }
3696 else if (!bit (inst1, 7) && !bit (inst1, 8))
3697 {
3698 /* RFEDB */
3699 offset = -8;
3700 }
3701 else
3702 load_pc = 0;
3703
3704 if (load_pc)
3705 {
3706 CORE_ADDR addr = get_frame_register_unsigned (frame, rn);
3707 nextpc = get_frame_memory_unsigned (frame, addr + offset, 4);
3708 }
3709 }
3710 else if ((inst1 & 0xffef) == 0xea4f && (inst2 & 0xfff0) == 0x0f00)
3711 {
3712 /* MOV PC or MOVS PC. */
3713 nextpc = get_frame_register_unsigned (frame, bits (inst2, 0, 3));
3714 nextpc = MAKE_THUMB_ADDR (nextpc);
3715 }
3716 else if ((inst1 & 0xff70) == 0xf850 && (inst2 & 0xf000) == 0xf000)
3717 {
3718 /* LDR PC. */
3719 CORE_ADDR base;
3720 int rn, load_pc = 1;
3721
3722 rn = bits (inst1, 0, 3);
3723 base = get_frame_register_unsigned (frame, rn);
3724 if (rn == 15)
3725 {
3726 base = (base + 4) & ~(CORE_ADDR) 0x3;
3727 if (bit (inst1, 7))
3728 base += bits (inst2, 0, 11);
3729 else
3730 base -= bits (inst2, 0, 11);
3731 }
3732 else if (bit (inst1, 7))
3733 base += bits (inst2, 0, 11);
3734 else if (bit (inst2, 11))
3735 {
3736 if (bit (inst2, 10))
3737 {
3738 if (bit (inst2, 9))
3739 base += bits (inst2, 0, 7);
3740 else
3741 base -= bits (inst2, 0, 7);
3742 }
3743 }
3744 else if ((inst2 & 0x0fc0) == 0x0000)
3745 {
3746 int shift = bits (inst2, 4, 5), rm = bits (inst2, 0, 3);
3747 base += get_frame_register_unsigned (frame, rm) << shift;
3748 }
3749 else
3750 /* Reserved. */
3751 load_pc = 0;
3752
3753 if (load_pc)
3754 nextpc = get_frame_memory_unsigned (frame, base, 4);
3755 }
3756 else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf000)
3757 {
3758 /* TBB. */
3759 CORE_ADDR tbl_reg, table, offset, length;
3760
3761 tbl_reg = bits (inst1, 0, 3);
3762 if (tbl_reg == 0x0f)
3763 table = pc + 4; /* Regcache copy of PC isn't right yet. */
3764 else
3765 table = get_frame_register_unsigned (frame, tbl_reg);
3766
3767 offset = get_frame_register_unsigned (frame, bits (inst2, 0, 3));
3768 length = 2 * get_frame_memory_unsigned (frame, table + offset, 1);
3769 nextpc = pc_val + length;
3770 }
3771 else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf010)
3772 {
3773 /* TBH. */
3774 CORE_ADDR tbl_reg, table, offset, length;
3775
3776 tbl_reg = bits (inst1, 0, 3);
3777 if (tbl_reg == 0x0f)
3778 table = pc + 4; /* Regcache copy of PC isn't right yet. */
3779 else
3780 table = get_frame_register_unsigned (frame, tbl_reg);
3781
3782 offset = 2 * get_frame_register_unsigned (frame, bits (inst2, 0, 3));
3783 length = 2 * get_frame_memory_unsigned (frame, table + offset, 2);
3784 nextpc = pc_val + length;
3785 }
3786 }
3787 else if ((inst1 & 0xff00) == 0x4700) /* bx REG, blx REG */
3788 {
3789 if (bits (inst1, 3, 6) == 0x0f)
3790 nextpc = pc_val;
3791 else
3792 nextpc = get_frame_register_unsigned (frame, bits (inst1, 3, 6));
3793 }
3794 else if ((inst1 & 0xff87) == 0x4687) /* mov pc, REG */
3795 {
3796 if (bits (inst1, 3, 6) == 0x0f)
3797 nextpc = pc_val;
3798 else
3799 nextpc = get_frame_register_unsigned (frame, bits (inst1, 3, 6));
3800
3801 nextpc = MAKE_THUMB_ADDR (nextpc);
3802 }
3803 else if ((inst1 & 0xf500) == 0xb100)
3804 {
3805 /* CBNZ or CBZ. */
3806 int imm = (bit (inst1, 9) << 6) + (bits (inst1, 3, 7) << 1);
3807 ULONGEST reg = get_frame_register_unsigned (frame, bits (inst1, 0, 2));
3808
3809 if (bit (inst1, 11) && reg != 0)
3810 nextpc = pc_val + imm;
3811 else if (!bit (inst1, 11) && reg == 0)
3812 nextpc = pc_val + imm;
3813 }
3814 return nextpc;
3815 }
3816
3817 /* Get the raw next address. PC is the current program counter, in
3818 FRAME. INSERT_BKPT should be TRUE if we want a breakpoint set on
3819 the alternative next instruction if there are two options.
3820
3821 The value returned has the execution state of the next instruction
3822 encoded in it. Use IS_THUMB_ADDR () to see whether the instruction is
3823 in Thumb-State, and gdbarch_addr_bits_remove () to get the plain memory
3824 address. */
3825
3826 static CORE_ADDR
3827 arm_get_next_pc_raw (struct frame_info *frame, CORE_ADDR pc, int insert_bkpt)
3828 {
3829 struct gdbarch *gdbarch = get_frame_arch (frame);
3830 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3831 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3832 unsigned long pc_val;
3833 unsigned long this_instr;
3834 unsigned long status;
3835 CORE_ADDR nextpc;
3836
3837 if (arm_frame_is_thumb (frame))
3838 return thumb_get_next_pc_raw (frame, pc, insert_bkpt);
3839
3840 pc_val = (unsigned long) pc;
3841 this_instr = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
3842
3843 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
3844 nextpc = (CORE_ADDR) (pc_val + 4); /* Default case */
3845
3846 if (bits (this_instr, 28, 31) == INST_NV)
3847 switch (bits (this_instr, 24, 27))
3848 {
3849 case 0xa:
3850 case 0xb:
3851 {
3852 /* Branch with Link and change to Thumb. */
3853 nextpc = BranchDest (pc, this_instr);
3854 nextpc |= bit (this_instr, 24) << 1;
3855 nextpc = MAKE_THUMB_ADDR (nextpc);
3856 break;
3857 }
3858 case 0xc:
3859 case 0xd:
3860 case 0xe:
3861 /* Coprocessor register transfer. */
3862 if (bits (this_instr, 12, 15) == 15)
3863 error (_("Invalid update to pc in instruction"));
3864 break;
3865 }
3866 else if (condition_true (bits (this_instr, 28, 31), status))
3867 {
3868 switch (bits (this_instr, 24, 27))
3869 {
3870 case 0x0:
3871 case 0x1: /* data processing */
3872 case 0x2:
3873 case 0x3:
3874 {
3875 unsigned long operand1, operand2, result = 0;
3876 unsigned long rn;
3877 int c;
3878
3879 if (bits (this_instr, 12, 15) != 15)
3880 break;
3881
3882 if (bits (this_instr, 22, 25) == 0
3883 && bits (this_instr, 4, 7) == 9) /* multiply */
3884 error (_("Invalid update to pc in instruction"));
3885
3886 /* BX <reg>, BLX <reg> */
3887 if (bits (this_instr, 4, 27) == 0x12fff1
3888 || bits (this_instr, 4, 27) == 0x12fff3)
3889 {
3890 rn = bits (this_instr, 0, 3);
3891 nextpc = (rn == 15) ? pc_val + 8
3892 : get_frame_register_unsigned (frame, rn);
3893 return nextpc;
3894 }
3895
3896 /* Multiply into PC. */
3897 c = (status & FLAG_C) ? 1 : 0;
3898 rn = bits (this_instr, 16, 19);
3899 operand1 = (rn == 15) ? pc_val + 8
3900 : get_frame_register_unsigned (frame, rn);
3901
3902 if (bit (this_instr, 25))
3903 {
3904 unsigned long immval = bits (this_instr, 0, 7);
3905 unsigned long rotate = 2 * bits (this_instr, 8, 11);
3906 operand2 = ((immval >> rotate) | (immval << (32 - rotate)))
3907 & 0xffffffff;
3908 }
3909 else /* operand 2 is a shifted register. */
3910 operand2 = shifted_reg_val (frame, this_instr, c,
3911 pc_val, status);
3912
3913 switch (bits (this_instr, 21, 24))
3914 {
3915 case 0x0: /*and */
3916 result = operand1 & operand2;
3917 break;
3918
3919 case 0x1: /*eor */
3920 result = operand1 ^ operand2;
3921 break;
3922
3923 case 0x2: /*sub */
3924 result = operand1 - operand2;
3925 break;
3926
3927 case 0x3: /*rsb */
3928 result = operand2 - operand1;
3929 break;
3930
3931 case 0x4: /*add */
3932 result = operand1 + operand2;
3933 break;
3934
3935 case 0x5: /*adc */
3936 result = operand1 + operand2 + c;
3937 break;
3938
3939 case 0x6: /*sbc */
3940 result = operand1 - operand2 + c;
3941 break;
3942
3943 case 0x7: /*rsc */
3944 result = operand2 - operand1 + c;
3945 break;
3946
3947 case 0x8:
3948 case 0x9:
3949 case 0xa:
3950 case 0xb: /* tst, teq, cmp, cmn */
3951 result = (unsigned long) nextpc;
3952 break;
3953
3954 case 0xc: /*orr */
3955 result = operand1 | operand2;
3956 break;
3957
3958 case 0xd: /*mov */
3959 /* Always step into a function. */
3960 result = operand2;
3961 break;
3962
3963 case 0xe: /*bic */
3964 result = operand1 & ~operand2;
3965 break;
3966
3967 case 0xf: /*mvn */
3968 result = ~operand2;
3969 break;
3970 }
3971
3972 /* In 26-bit APCS the bottom two bits of the result are
3973 ignored, and we always end up in ARM state. */
3974 if (!arm_apcs_32)
3975 nextpc = arm_addr_bits_remove (gdbarch, result);
3976 else
3977 nextpc = result;
3978
3979 break;
3980 }
3981
3982 case 0x4:
3983 case 0x5: /* data transfer */
3984 case 0x6:
3985 case 0x7:
3986 if (bit (this_instr, 20))
3987 {
3988 /* load */
3989 if (bits (this_instr, 12, 15) == 15)
3990 {
3991 /* rd == pc */
3992 unsigned long rn;
3993 unsigned long base;
3994
3995 if (bit (this_instr, 22))
3996 error (_("Invalid update to pc in instruction"));
3997
3998 /* byte write to PC */
3999 rn = bits (this_instr, 16, 19);
4000 base = (rn == 15) ? pc_val + 8
4001 : get_frame_register_unsigned (frame, rn);
4002 if (bit (this_instr, 24))
4003 {
4004 /* pre-indexed */
4005 int c = (status & FLAG_C) ? 1 : 0;
4006 unsigned long offset =
4007 (bit (this_instr, 25)
4008 ? shifted_reg_val (frame, this_instr, c, pc_val, status)
4009 : bits (this_instr, 0, 11));
4010
4011 if (bit (this_instr, 23))
4012 base += offset;
4013 else
4014 base -= offset;
4015 }
4016 nextpc = (CORE_ADDR) read_memory_integer ((CORE_ADDR) base,
4017 4, byte_order);
4018 }
4019 }
4020 break;
4021
4022 case 0x8:
4023 case 0x9: /* block transfer */
4024 if (bit (this_instr, 20))
4025 {
4026 /* LDM */
4027 if (bit (this_instr, 15))
4028 {
4029 /* loading pc */
4030 int offset = 0;
4031
4032 if (bit (this_instr, 23))
4033 {
4034 /* up */
4035 unsigned long reglist = bits (this_instr, 0, 14);
4036 offset = bitcount (reglist) * 4;
4037 if (bit (this_instr, 24)) /* pre */
4038 offset += 4;
4039 }
4040 else if (bit (this_instr, 24))
4041 offset = -4;
4042
4043 {
4044 unsigned long rn_val =
4045 get_frame_register_unsigned (frame,
4046 bits (this_instr, 16, 19));
4047 nextpc =
4048 (CORE_ADDR) read_memory_integer ((CORE_ADDR) (rn_val
4049 + offset),
4050 4, byte_order);
4051 }
4052 }
4053 }
4054 break;
4055
4056 case 0xb: /* branch & link */
4057 case 0xa: /* branch */
4058 {
4059 nextpc = BranchDest (pc, this_instr);
4060 break;
4061 }
4062
4063 case 0xc:
4064 case 0xd:
4065 case 0xe: /* coproc ops */
4066 break;
4067 case 0xf: /* SWI */
4068 {
4069 struct gdbarch_tdep *tdep;
4070 tdep = gdbarch_tdep (gdbarch);
4071
4072 if (tdep->syscall_next_pc != NULL)
4073 nextpc = tdep->syscall_next_pc (frame);
4074
4075 }
4076 break;
4077
4078 default:
4079 fprintf_filtered (gdb_stderr, _("Bad bit-field extraction\n"));
4080 return (pc);
4081 }
4082 }
4083
4084 return nextpc;
4085 }
4086
4087 CORE_ADDR
4088 arm_get_next_pc (struct frame_info *frame, CORE_ADDR pc)
4089 {
4090 struct gdbarch *gdbarch = get_frame_arch (frame);
4091 CORE_ADDR nextpc =
4092 gdbarch_addr_bits_remove (gdbarch,
4093 arm_get_next_pc_raw (frame, pc, TRUE));
4094 if (nextpc == pc)
4095 error (_("Infinite loop detected"));
4096 return nextpc;
4097 }
4098
4099 /* single_step() is called just before we want to resume the inferior,
4100 if we want to single-step it but there is no hardware or kernel
4101 single-step support. We find the target of the coming instruction
4102 and breakpoint it. */
4103
4104 int
4105 arm_software_single_step (struct frame_info *frame)
4106 {
4107 struct gdbarch *gdbarch = get_frame_arch (frame);
4108 struct address_space *aspace = get_frame_address_space (frame);
4109
4110 /* NOTE: This may insert the wrong breakpoint instruction when
4111 single-stepping over a mode-changing instruction, if the
4112 CPSR heuristics are used. */
4113
4114 CORE_ADDR next_pc = arm_get_next_pc (frame, get_frame_pc (frame));
4115 insert_single_step_breakpoint (gdbarch, aspace, next_pc);
4116
4117 return 1;
4118 }
4119
4120 /* Given BUF, which is OLD_LEN bytes ending at ENDADDR, expand
4121 the buffer to be NEW_LEN bytes ending at ENDADDR. Return
4122 NULL if an error occurs. BUF is freed. */
4123
4124 static gdb_byte *
4125 extend_buffer_earlier (gdb_byte *buf, CORE_ADDR endaddr,
4126 int old_len, int new_len)
4127 {
4128 gdb_byte *new_buf, *middle;
4129 int bytes_to_read = new_len - old_len;
4130
4131 new_buf = xmalloc (new_len);
4132 memcpy (new_buf + bytes_to_read, buf, old_len);
4133 xfree (buf);
4134 if (target_read_memory (endaddr - new_len, new_buf, bytes_to_read) != 0)
4135 {
4136 xfree (new_buf);
4137 return NULL;
4138 }
4139 return new_buf;
4140 }
4141
4142 /* An IT block is at most the 2-byte IT instruction followed by
4143 four 4-byte instructions. The furthest back we must search to
4144 find an IT block that affects the current instruction is thus
4145 2 + 3 * 4 == 14 bytes. */
4146 #define MAX_IT_BLOCK_PREFIX 14
4147
4148 /* Use a quick scan if there are more than this many bytes of
4149 code. */
4150 #define IT_SCAN_THRESHOLD 32
4151
4152 /* Adjust a breakpoint's address to move breakpoints out of IT blocks.
4153 A breakpoint in an IT block may not be hit, depending on the
4154 condition flags. */
4155 static CORE_ADDR
4156 arm_adjust_breakpoint_address (struct gdbarch *gdbarch, CORE_ADDR bpaddr)
4157 {
4158 gdb_byte *buf;
4159 char map_type;
4160 CORE_ADDR boundary, func_start;
4161 int buf_len, buf2_len;
4162 enum bfd_endian order = gdbarch_byte_order_for_code (gdbarch);
4163 int i, any, last_it, last_it_count;
4164
4165 /* If we are using BKPT breakpoints, none of this is necessary. */
4166 if (gdbarch_tdep (gdbarch)->thumb2_breakpoint == NULL)
4167 return bpaddr;
4168
4169 /* ARM mode does not have this problem. */
4170 if (!arm_pc_is_thumb (gdbarch, bpaddr))
4171 return bpaddr;
4172
4173 /* We are setting a breakpoint in Thumb code that could potentially
4174 contain an IT block. The first step is to find how much Thumb
4175 code there is; we do not need to read outside of known Thumb
4176 sequences. */
4177 map_type = arm_find_mapping_symbol (bpaddr, &boundary);
4178 if (map_type == 0)
4179 /* Thumb-2 code must have mapping symbols to have a chance. */
4180 return bpaddr;
4181
4182 bpaddr = gdbarch_addr_bits_remove (gdbarch, bpaddr);
4183
4184 if (find_pc_partial_function (bpaddr, NULL, &func_start, NULL)
4185 && func_start > boundary)
4186 boundary = func_start;
4187
4188 /* Search for a candidate IT instruction. We have to do some fancy
4189 footwork to distinguish a real IT instruction from the second
4190 half of a 32-bit instruction, but there is no need for that if
4191 there's no candidate. */
4192 buf_len = min (bpaddr - boundary, MAX_IT_BLOCK_PREFIX);
4193 if (buf_len == 0)
4194 /* No room for an IT instruction. */
4195 return bpaddr;
4196
4197 buf = xmalloc (buf_len);
4198 if (target_read_memory (bpaddr - buf_len, buf, buf_len) != 0)
4199 return bpaddr;
4200 any = 0;
4201 for (i = 0; i < buf_len; i += 2)
4202 {
4203 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
4204 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
4205 {
4206 any = 1;
4207 break;
4208 }
4209 }
4210 if (any == 0)
4211 {
4212 xfree (buf);
4213 return bpaddr;
4214 }
4215
4216 /* OK, the code bytes before this instruction contain at least one
4217 halfword which resembles an IT instruction. We know that it's
4218 Thumb code, but there are still two possibilities. Either the
4219 halfword really is an IT instruction, or it is the second half of
4220 a 32-bit Thumb instruction. The only way we can tell is to
4221 scan forwards from a known instruction boundary. */
4222 if (bpaddr - boundary > IT_SCAN_THRESHOLD)
4223 {
4224 int definite;
4225
4226 /* There's a lot of code before this instruction. Start with an
4227 optimistic search; it's easy to recognize halfwords that can
4228 not be the start of a 32-bit instruction, and use that to
4229 lock on to the instruction boundaries. */
4230 buf = extend_buffer_earlier (buf, bpaddr, buf_len, IT_SCAN_THRESHOLD);
4231 if (buf == NULL)
4232 return bpaddr;
4233 buf_len = IT_SCAN_THRESHOLD;
4234
4235 definite = 0;
4236 for (i = 0; i < buf_len - sizeof (buf) && ! definite; i += 2)
4237 {
4238 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
4239 if (thumb_insn_size (inst1) == 2)
4240 {
4241 definite = 1;
4242 break;
4243 }
4244 }
4245
4246 /* At this point, if DEFINITE, BUF[I] is the first place we
4247 are sure that we know the instruction boundaries, and it is far
4248 enough from BPADDR that we could not miss an IT instruction
4249 affecting BPADDR. If ! DEFINITE, give up - start from a
4250 known boundary. */
4251 if (! definite)
4252 {
4253 buf = extend_buffer_earlier (buf, bpaddr, buf_len,
4254 bpaddr - boundary);
4255 if (buf == NULL)
4256 return bpaddr;
4257 buf_len = bpaddr - boundary;
4258 i = 0;
4259 }
4260 }
4261 else
4262 {
4263 buf = extend_buffer_earlier (buf, bpaddr, buf_len, bpaddr - boundary);
4264 if (buf == NULL)
4265 return bpaddr;
4266 buf_len = bpaddr - boundary;
4267 i = 0;
4268 }
4269
4270 /* Scan forwards. Find the last IT instruction before BPADDR. */
4271 last_it = -1;
4272 last_it_count = 0;
4273 while (i < buf_len)
4274 {
4275 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
4276 last_it_count--;
4277 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
4278 {
4279 last_it = i;
4280 if (inst1 & 0x0001)
4281 last_it_count = 4;
4282 else if (inst1 & 0x0002)
4283 last_it_count = 3;
4284 else if (inst1 & 0x0004)
4285 last_it_count = 2;
4286 else
4287 last_it_count = 1;
4288 }
4289 i += thumb_insn_size (inst1);
4290 }
4291
4292 xfree (buf);
4293
4294 if (last_it == -1)
4295 /* There wasn't really an IT instruction after all. */
4296 return bpaddr;
4297
4298 if (last_it_count < 1)
4299 /* It was too far away. */
4300 return bpaddr;
4301
4302 /* This really is a trouble spot. Move the breakpoint to the IT
4303 instruction. */
4304 return bpaddr - buf_len + last_it;
4305 }
4306
4307 /* ARM displaced stepping support.
4308
4309 Generally ARM displaced stepping works as follows:
4310
4311 1. When an instruction is to be single-stepped, it is first decoded by
4312 arm_process_displaced_insn (called from arm_displaced_step_copy_insn).
4313 Depending on the type of instruction, it is then copied to a scratch
4314 location, possibly in a modified form. The copy_* set of functions
4315 performs such modification, as necessary. A breakpoint is placed after
4316 the modified instruction in the scratch space to return control to GDB.
4317 Note in particular that instructions which modify the PC will no longer
4318 do so after modification.
4319
4320 2. The instruction is single-stepped, by setting the PC to the scratch
4321 location address, and resuming. Control returns to GDB when the
4322 breakpoint is hit.
4323
4324 3. A cleanup function (cleanup_*) is called corresponding to the copy_*
4325 function used for the current instruction. This function's job is to
4326 put the CPU/memory state back to what it would have been if the
4327 instruction had been executed unmodified in its original location. */
4328
4329 /* NOP instruction (mov r0, r0). */
4330 #define ARM_NOP 0xe1a00000
4331
4332 /* Helper for register reads for displaced stepping. In particular, this
4333 returns the PC as it would be seen by the instruction at its original
4334 location. */
4335
4336 ULONGEST
4337 displaced_read_reg (struct regcache *regs, CORE_ADDR from, int regno)
4338 {
4339 ULONGEST ret;
4340
4341 if (regno == 15)
4342 {
4343 if (debug_displaced)
4344 fprintf_unfiltered (gdb_stdlog, "displaced: read pc value %.8lx\n",
4345 (unsigned long) from + 8);
4346 return (ULONGEST) from + 8; /* Pipeline offset. */
4347 }
4348 else
4349 {
4350 regcache_cooked_read_unsigned (regs, regno, &ret);
4351 if (debug_displaced)
4352 fprintf_unfiltered (gdb_stdlog, "displaced: read r%d value %.8lx\n",
4353 regno, (unsigned long) ret);
4354 return ret;
4355 }
4356 }
4357
4358 static int
4359 displaced_in_arm_mode (struct regcache *regs)
4360 {
4361 ULONGEST ps;
4362 ULONGEST t_bit = arm_psr_thumb_bit (get_regcache_arch (regs));
4363
4364 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
4365
4366 return (ps & t_bit) == 0;
4367 }
4368
4369 /* Write to the PC as from a branch instruction. */
4370
4371 static void
4372 branch_write_pc (struct regcache *regs, ULONGEST val)
4373 {
4374 if (displaced_in_arm_mode (regs))
4375 /* Note: If bits 0/1 are set, this branch would be unpredictable for
4376 architecture versions < 6. */
4377 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
4378 val & ~(ULONGEST) 0x3);
4379 else
4380 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
4381 val & ~(ULONGEST) 0x1);
4382 }
4383
4384 /* Write to the PC as from a branch-exchange instruction. */
4385
4386 static void
4387 bx_write_pc (struct regcache *regs, ULONGEST val)
4388 {
4389 ULONGEST ps;
4390 ULONGEST t_bit = arm_psr_thumb_bit (get_regcache_arch (regs));
4391
4392 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
4393
4394 if ((val & 1) == 1)
4395 {
4396 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps | t_bit);
4397 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffe);
4398 }
4399 else if ((val & 2) == 0)
4400 {
4401 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps & ~t_bit);
4402 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val);
4403 }
4404 else
4405 {
4406 /* Unpredictable behaviour. Try to do something sensible (switch to ARM
4407 mode, align dest to 4 bytes). */
4408 warning (_("Single-stepping BX to non-word-aligned ARM instruction."));
4409 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps & ~t_bit);
4410 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffc);
4411 }
4412 }
4413
4414 /* Write to the PC as if from a load instruction. */
4415
4416 static void
4417 load_write_pc (struct regcache *regs, ULONGEST val)
4418 {
4419 if (DISPLACED_STEPPING_ARCH_VERSION >= 5)
4420 bx_write_pc (regs, val);
4421 else
4422 branch_write_pc (regs, val);
4423 }
4424
4425 /* Write to the PC as if from an ALU instruction. */
4426
4427 static void
4428 alu_write_pc (struct regcache *regs, ULONGEST val)
4429 {
4430 if (DISPLACED_STEPPING_ARCH_VERSION >= 7 && displaced_in_arm_mode (regs))
4431 bx_write_pc (regs, val);
4432 else
4433 branch_write_pc (regs, val);
4434 }
4435
4436 /* Helper for writing to registers for displaced stepping. Writing to the PC
4437 has a varying effects depending on the instruction which does the write:
4438 this is controlled by the WRITE_PC argument. */
4439
4440 void
4441 displaced_write_reg (struct regcache *regs, struct displaced_step_closure *dsc,
4442 int regno, ULONGEST val, enum pc_write_style write_pc)
4443 {
4444 if (regno == 15)
4445 {
4446 if (debug_displaced)
4447 fprintf_unfiltered (gdb_stdlog, "displaced: writing pc %.8lx\n",
4448 (unsigned long) val);
4449 switch (write_pc)
4450 {
4451 case BRANCH_WRITE_PC:
4452 branch_write_pc (regs, val);
4453 break;
4454
4455 case BX_WRITE_PC:
4456 bx_write_pc (regs, val);
4457 break;
4458
4459 case LOAD_WRITE_PC:
4460 load_write_pc (regs, val);
4461 break;
4462
4463 case ALU_WRITE_PC:
4464 alu_write_pc (regs, val);
4465 break;
4466
4467 case CANNOT_WRITE_PC:
4468 warning (_("Instruction wrote to PC in an unexpected way when "
4469 "single-stepping"));
4470 break;
4471
4472 default:
4473 internal_error (__FILE__, __LINE__,
4474 _("Invalid argument to displaced_write_reg"));
4475 }
4476
4477 dsc->wrote_to_pc = 1;
4478 }
4479 else
4480 {
4481 if (debug_displaced)
4482 fprintf_unfiltered (gdb_stdlog, "displaced: writing r%d value %.8lx\n",
4483 regno, (unsigned long) val);
4484 regcache_cooked_write_unsigned (regs, regno, val);
4485 }
4486 }
4487
4488 /* This function is used to concisely determine if an instruction INSN
4489 references PC. Register fields of interest in INSN should have the
4490 corresponding fields of BITMASK set to 0b1111. The function
4491 returns return 1 if any of these fields in INSN reference the PC
4492 (also 0b1111, r15), else it returns 0. */
4493
4494 static int
4495 insn_references_pc (uint32_t insn, uint32_t bitmask)
4496 {
4497 uint32_t lowbit = 1;
4498
4499 while (bitmask != 0)
4500 {
4501 uint32_t mask;
4502
4503 for (; lowbit && (bitmask & lowbit) == 0; lowbit <<= 1)
4504 ;
4505
4506 if (!lowbit)
4507 break;
4508
4509 mask = lowbit * 0xf;
4510
4511 if ((insn & mask) == mask)
4512 return 1;
4513
4514 bitmask &= ~mask;
4515 }
4516
4517 return 0;
4518 }
4519
4520 /* The simplest copy function. Many instructions have the same effect no
4521 matter what address they are executed at: in those cases, use this. */
4522
4523 static int
4524 copy_unmodified (struct gdbarch *gdbarch, uint32_t insn,
4525 const char *iname, struct displaced_step_closure *dsc)
4526 {
4527 if (debug_displaced)
4528 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.8lx, "
4529 "opcode/class '%s' unmodified\n", (unsigned long) insn,
4530 iname);
4531
4532 dsc->modinsn[0] = insn;
4533
4534 return 0;
4535 }
4536
4537 /* Preload instructions with immediate offset. */
4538
4539 static void
4540 cleanup_preload (struct gdbarch *gdbarch,
4541 struct regcache *regs, struct displaced_step_closure *dsc)
4542 {
4543 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
4544 if (!dsc->u.preload.immed)
4545 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
4546 }
4547
4548 static int
4549 copy_preload (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
4550 struct displaced_step_closure *dsc)
4551 {
4552 unsigned int rn = bits (insn, 16, 19);
4553 ULONGEST rn_val;
4554 CORE_ADDR from = dsc->insn_addr;
4555
4556 if (!insn_references_pc (insn, 0x000f0000ul))
4557 return copy_unmodified (gdbarch, insn, "preload", dsc);
4558
4559 if (debug_displaced)
4560 fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
4561 (unsigned long) insn);
4562
4563 /* Preload instructions:
4564
4565 {pli/pld} [rn, #+/-imm]
4566 ->
4567 {pli/pld} [r0, #+/-imm]. */
4568
4569 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
4570 rn_val = displaced_read_reg (regs, from, rn);
4571 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
4572
4573 dsc->u.preload.immed = 1;
4574
4575 dsc->modinsn[0] = insn & 0xfff0ffff;
4576
4577 dsc->cleanup = &cleanup_preload;
4578
4579 return 0;
4580 }
4581
4582 /* Preload instructions with register offset. */
4583
4584 static int
4585 copy_preload_reg (struct gdbarch *gdbarch, uint32_t insn,
4586 struct regcache *regs,
4587 struct displaced_step_closure *dsc)
4588 {
4589 unsigned int rn = bits (insn, 16, 19);
4590 unsigned int rm = bits (insn, 0, 3);
4591 ULONGEST rn_val, rm_val;
4592 CORE_ADDR from = dsc->insn_addr;
4593
4594 if (!insn_references_pc (insn, 0x000f000ful))
4595 return copy_unmodified (gdbarch, insn, "preload reg", dsc);
4596
4597 if (debug_displaced)
4598 fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
4599 (unsigned long) insn);
4600
4601 /* Preload register-offset instructions:
4602
4603 {pli/pld} [rn, rm {, shift}]
4604 ->
4605 {pli/pld} [r0, r1 {, shift}]. */
4606
4607 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
4608 dsc->tmp[1] = displaced_read_reg (regs, from, 1);
4609 rn_val = displaced_read_reg (regs, from, rn);
4610 rm_val = displaced_read_reg (regs, from, rm);
4611 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
4612 displaced_write_reg (regs, dsc, 1, rm_val, CANNOT_WRITE_PC);
4613
4614 dsc->u.preload.immed = 0;
4615
4616 dsc->modinsn[0] = (insn & 0xfff0fff0) | 0x1;
4617
4618 dsc->cleanup = &cleanup_preload;
4619
4620 return 0;
4621 }
4622
4623 /* Copy/cleanup coprocessor load and store instructions. */
4624
4625 static void
4626 cleanup_copro_load_store (struct gdbarch *gdbarch,
4627 struct regcache *regs,
4628 struct displaced_step_closure *dsc)
4629 {
4630 ULONGEST rn_val = displaced_read_reg (regs, dsc->insn_addr, 0);
4631
4632 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
4633
4634 if (dsc->u.ldst.writeback)
4635 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, LOAD_WRITE_PC);
4636 }
4637
4638 static int
4639 copy_copro_load_store (struct gdbarch *gdbarch, uint32_t insn,
4640 struct regcache *regs,
4641 struct displaced_step_closure *dsc)
4642 {
4643 unsigned int rn = bits (insn, 16, 19);
4644 ULONGEST rn_val;
4645 CORE_ADDR from = dsc->insn_addr;
4646
4647 if (!insn_references_pc (insn, 0x000f0000ul))
4648 return copy_unmodified (gdbarch, insn, "copro load/store", dsc);
4649
4650 if (debug_displaced)
4651 fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
4652 "load/store insn %.8lx\n", (unsigned long) insn);
4653
4654 /* Coprocessor load/store instructions:
4655
4656 {stc/stc2} [<Rn>, #+/-imm] (and other immediate addressing modes)
4657 ->
4658 {stc/stc2} [r0, #+/-imm].
4659
4660 ldc/ldc2 are handled identically. */
4661
4662 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
4663 rn_val = displaced_read_reg (regs, from, rn);
4664 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
4665
4666 dsc->u.ldst.writeback = bit (insn, 25);
4667 dsc->u.ldst.rn = rn;
4668
4669 dsc->modinsn[0] = insn & 0xfff0ffff;
4670
4671 dsc->cleanup = &cleanup_copro_load_store;
4672
4673 return 0;
4674 }
4675
4676 /* Clean up branch instructions (actually perform the branch, by setting
4677 PC). */
4678
4679 static void
4680 cleanup_branch (struct gdbarch *gdbarch, struct regcache *regs,
4681 struct displaced_step_closure *dsc)
4682 {
4683 ULONGEST from = dsc->insn_addr;
4684 uint32_t status = displaced_read_reg (regs, from, ARM_PS_REGNUM);
4685 int branch_taken = condition_true (dsc->u.branch.cond, status);
4686 enum pc_write_style write_pc = dsc->u.branch.exchange
4687 ? BX_WRITE_PC : BRANCH_WRITE_PC;
4688
4689 if (!branch_taken)
4690 return;
4691
4692 if (dsc->u.branch.link)
4693 {
4694 ULONGEST pc = displaced_read_reg (regs, from, 15);
4695 displaced_write_reg (regs, dsc, 14, pc - 4, CANNOT_WRITE_PC);
4696 }
4697
4698 displaced_write_reg (regs, dsc, 15, dsc->u.branch.dest, write_pc);
4699 }
4700
4701 /* Copy B/BL/BLX instructions with immediate destinations. */
4702
4703 static int
4704 copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
4705 struct regcache *regs, struct displaced_step_closure *dsc)
4706 {
4707 unsigned int cond = bits (insn, 28, 31);
4708 int exchange = (cond == 0xf);
4709 int link = exchange || bit (insn, 24);
4710 CORE_ADDR from = dsc->insn_addr;
4711 long offset;
4712
4713 if (debug_displaced)
4714 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s immediate insn "
4715 "%.8lx\n", (exchange) ? "blx" : (link) ? "bl" : "b",
4716 (unsigned long) insn);
4717
4718 /* Implement "BL<cond> <label>" as:
4719
4720 Preparation: cond <- instruction condition
4721 Insn: mov r0, r0 (nop)
4722 Cleanup: if (condition true) { r14 <- pc; pc <- label }.
4723
4724 B<cond> similar, but don't set r14 in cleanup. */
4725
4726 if (exchange)
4727 /* For BLX, set bit 0 of the destination. The cleanup_branch function will
4728 then arrange the switch into Thumb mode. */
4729 offset = (bits (insn, 0, 23) << 2) | (bit (insn, 24) << 1) | 1;
4730 else
4731 offset = bits (insn, 0, 23) << 2;
4732
4733 if (bit (offset, 25))
4734 offset = offset | ~0x3ffffff;
4735
4736 dsc->u.branch.cond = cond;
4737 dsc->u.branch.link = link;
4738 dsc->u.branch.exchange = exchange;
4739 dsc->u.branch.dest = from + 8 + offset;
4740
4741 dsc->modinsn[0] = ARM_NOP;
4742
4743 dsc->cleanup = &cleanup_branch;
4744
4745 return 0;
4746 }
4747
4748 /* Copy BX/BLX with register-specified destinations. */
4749
4750 static int
4751 copy_bx_blx_reg (struct gdbarch *gdbarch, uint32_t insn,
4752 struct regcache *regs, struct displaced_step_closure *dsc)
4753 {
4754 unsigned int cond = bits (insn, 28, 31);
4755 /* BX: x12xxx1x
4756 BLX: x12xxx3x. */
4757 int link = bit (insn, 5);
4758 unsigned int rm = bits (insn, 0, 3);
4759 CORE_ADDR from = dsc->insn_addr;
4760
4761 if (debug_displaced)
4762 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s register insn "
4763 "%.8lx\n", (link) ? "blx" : "bx",
4764 (unsigned long) insn);
4765
4766 /* Implement {BX,BLX}<cond> <reg>" as:
4767
4768 Preparation: cond <- instruction condition
4769 Insn: mov r0, r0 (nop)
4770 Cleanup: if (condition true) { r14 <- pc; pc <- dest; }.
4771
4772 Don't set r14 in cleanup for BX. */
4773
4774 dsc->u.branch.dest = displaced_read_reg (regs, from, rm);
4775
4776 dsc->u.branch.cond = cond;
4777 dsc->u.branch.link = link;
4778 dsc->u.branch.exchange = 1;
4779
4780 dsc->modinsn[0] = ARM_NOP;
4781
4782 dsc->cleanup = &cleanup_branch;
4783
4784 return 0;
4785 }
4786
4787 /* Copy/cleanup arithmetic/logic instruction with immediate RHS. */
4788
4789 static void
4790 cleanup_alu_imm (struct gdbarch *gdbarch,
4791 struct regcache *regs, struct displaced_step_closure *dsc)
4792 {
4793 ULONGEST rd_val = displaced_read_reg (regs, dsc->insn_addr, 0);
4794 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
4795 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
4796 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
4797 }
4798
4799 static int
4800 copy_alu_imm (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
4801 struct displaced_step_closure *dsc)
4802 {
4803 unsigned int rn = bits (insn, 16, 19);
4804 unsigned int rd = bits (insn, 12, 15);
4805 unsigned int op = bits (insn, 21, 24);
4806 int is_mov = (op == 0xd);
4807 ULONGEST rd_val, rn_val;
4808 CORE_ADDR from = dsc->insn_addr;
4809
4810 if (!insn_references_pc (insn, 0x000ff000ul))
4811 return copy_unmodified (gdbarch, insn, "ALU immediate", dsc);
4812
4813 if (debug_displaced)
4814 fprintf_unfiltered (gdb_stdlog, "displaced: copying immediate %s insn "
4815 "%.8lx\n", is_mov ? "move" : "ALU",
4816 (unsigned long) insn);
4817
4818 /* Instruction is of form:
4819
4820 <op><cond> rd, [rn,] #imm
4821
4822 Rewrite as:
4823
4824 Preparation: tmp1, tmp2 <- r0, r1;
4825 r0, r1 <- rd, rn
4826 Insn: <op><cond> r0, r1, #imm
4827 Cleanup: rd <- r0; r0 <- tmp1; r1 <- tmp2
4828 */
4829
4830 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
4831 dsc->tmp[1] = displaced_read_reg (regs, from, 1);
4832 rn_val = displaced_read_reg (regs, from, rn);
4833 rd_val = displaced_read_reg (regs, from, rd);
4834 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
4835 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
4836 dsc->rd = rd;
4837
4838 if (is_mov)
4839 dsc->modinsn[0] = insn & 0xfff00fff;
4840 else
4841 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x10000;
4842
4843 dsc->cleanup = &cleanup_alu_imm;
4844
4845 return 0;
4846 }
4847
4848 /* Copy/cleanup arithmetic/logic insns with register RHS. */
4849
4850 static void
4851 cleanup_alu_reg (struct gdbarch *gdbarch,
4852 struct regcache *regs, struct displaced_step_closure *dsc)
4853 {
4854 ULONGEST rd_val;
4855 int i;
4856
4857 rd_val = displaced_read_reg (regs, dsc->insn_addr, 0);
4858
4859 for (i = 0; i < 3; i++)
4860 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
4861
4862 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
4863 }
4864
4865 static int
4866 copy_alu_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
4867 struct displaced_step_closure *dsc)
4868 {
4869 unsigned int rn = bits (insn, 16, 19);
4870 unsigned int rm = bits (insn, 0, 3);
4871 unsigned int rd = bits (insn, 12, 15);
4872 unsigned int op = bits (insn, 21, 24);
4873 int is_mov = (op == 0xd);
4874 ULONGEST rd_val, rn_val, rm_val;
4875 CORE_ADDR from = dsc->insn_addr;
4876
4877 if (!insn_references_pc (insn, 0x000ff00ful))
4878 return copy_unmodified (gdbarch, insn, "ALU reg", dsc);
4879
4880 if (debug_displaced)
4881 fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.8lx\n",
4882 is_mov ? "move" : "ALU", (unsigned long) insn);
4883
4884 /* Instruction is of form:
4885
4886 <op><cond> rd, [rn,] rm [, <shift>]
4887
4888 Rewrite as:
4889
4890 Preparation: tmp1, tmp2, tmp3 <- r0, r1, r2;
4891 r0, r1, r2 <- rd, rn, rm
4892 Insn: <op><cond> r0, r1, r2 [, <shift>]
4893 Cleanup: rd <- r0; r0, r1, r2 <- tmp1, tmp2, tmp3
4894 */
4895
4896 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
4897 dsc->tmp[1] = displaced_read_reg (regs, from, 1);
4898 dsc->tmp[2] = displaced_read_reg (regs, from, 2);
4899 rd_val = displaced_read_reg (regs, from, rd);
4900 rn_val = displaced_read_reg (regs, from, rn);
4901 rm_val = displaced_read_reg (regs, from, rm);
4902 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
4903 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
4904 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
4905 dsc->rd = rd;
4906
4907 if (is_mov)
4908 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x2;
4909 else
4910 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x10002;
4911
4912 dsc->cleanup = &cleanup_alu_reg;
4913
4914 return 0;
4915 }
4916
4917 /* Cleanup/copy arithmetic/logic insns with shifted register RHS. */
4918
4919 static void
4920 cleanup_alu_shifted_reg (struct gdbarch *gdbarch,
4921 struct regcache *regs,
4922 struct displaced_step_closure *dsc)
4923 {
4924 ULONGEST rd_val = displaced_read_reg (regs, dsc->insn_addr, 0);
4925 int i;
4926
4927 for (i = 0; i < 4; i++)
4928 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
4929
4930 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
4931 }
4932
4933 static int
4934 copy_alu_shifted_reg (struct gdbarch *gdbarch, uint32_t insn,
4935 struct regcache *regs,
4936 struct displaced_step_closure *dsc)
4937 {
4938 unsigned int rn = bits (insn, 16, 19);
4939 unsigned int rm = bits (insn, 0, 3);
4940 unsigned int rd = bits (insn, 12, 15);
4941 unsigned int rs = bits (insn, 8, 11);
4942 unsigned int op = bits (insn, 21, 24);
4943 int is_mov = (op == 0xd), i;
4944 ULONGEST rd_val, rn_val, rm_val, rs_val;
4945 CORE_ADDR from = dsc->insn_addr;
4946
4947 if (!insn_references_pc (insn, 0x000fff0ful))
4948 return copy_unmodified (gdbarch, insn, "ALU shifted reg", dsc);
4949
4950 if (debug_displaced)
4951 fprintf_unfiltered (gdb_stdlog, "displaced: copying shifted reg %s insn "
4952 "%.8lx\n", is_mov ? "move" : "ALU",
4953 (unsigned long) insn);
4954
4955 /* Instruction is of form:
4956
4957 <op><cond> rd, [rn,] rm, <shift> rs
4958
4959 Rewrite as:
4960
4961 Preparation: tmp1, tmp2, tmp3, tmp4 <- r0, r1, r2, r3
4962 r0, r1, r2, r3 <- rd, rn, rm, rs
4963 Insn: <op><cond> r0, r1, r2, <shift> r3
4964 Cleanup: tmp5 <- r0
4965 r0, r1, r2, r3 <- tmp1, tmp2, tmp3, tmp4
4966 rd <- tmp5
4967 */
4968
4969 for (i = 0; i < 4; i++)
4970 dsc->tmp[i] = displaced_read_reg (regs, from, i);
4971
4972 rd_val = displaced_read_reg (regs, from, rd);
4973 rn_val = displaced_read_reg (regs, from, rn);
4974 rm_val = displaced_read_reg (regs, from, rm);
4975 rs_val = displaced_read_reg (regs, from, rs);
4976 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
4977 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
4978 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
4979 displaced_write_reg (regs, dsc, 3, rs_val, CANNOT_WRITE_PC);
4980 dsc->rd = rd;
4981
4982 if (is_mov)
4983 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x302;
4984 else
4985 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x10302;
4986
4987 dsc->cleanup = &cleanup_alu_shifted_reg;
4988
4989 return 0;
4990 }
4991
4992 /* Clean up load instructions. */
4993
4994 static void
4995 cleanup_load (struct gdbarch *gdbarch, struct regcache *regs,
4996 struct displaced_step_closure *dsc)
4997 {
4998 ULONGEST rt_val, rt_val2 = 0, rn_val;
4999 CORE_ADDR from = dsc->insn_addr;
5000
5001 rt_val = displaced_read_reg (regs, from, 0);
5002 if (dsc->u.ldst.xfersize == 8)
5003 rt_val2 = displaced_read_reg (regs, from, 1);
5004 rn_val = displaced_read_reg (regs, from, 2);
5005
5006 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5007 if (dsc->u.ldst.xfersize > 4)
5008 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
5009 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
5010 if (!dsc->u.ldst.immed)
5011 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
5012
5013 /* Handle register writeback. */
5014 if (dsc->u.ldst.writeback)
5015 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
5016 /* Put result in right place. */
5017 displaced_write_reg (regs, dsc, dsc->rd, rt_val, LOAD_WRITE_PC);
5018 if (dsc->u.ldst.xfersize == 8)
5019 displaced_write_reg (regs, dsc, dsc->rd + 1, rt_val2, LOAD_WRITE_PC);
5020 }
5021
5022 /* Clean up store instructions. */
5023
5024 static void
5025 cleanup_store (struct gdbarch *gdbarch, struct regcache *regs,
5026 struct displaced_step_closure *dsc)
5027 {
5028 CORE_ADDR from = dsc->insn_addr;
5029 ULONGEST rn_val = displaced_read_reg (regs, from, 2);
5030
5031 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5032 if (dsc->u.ldst.xfersize > 4)
5033 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
5034 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
5035 if (!dsc->u.ldst.immed)
5036 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
5037 if (!dsc->u.ldst.restore_r4)
5038 displaced_write_reg (regs, dsc, 4, dsc->tmp[4], CANNOT_WRITE_PC);
5039
5040 /* Writeback. */
5041 if (dsc->u.ldst.writeback)
5042 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
5043 }
5044
5045 /* Copy "extra" load/store instructions. These are halfword/doubleword
5046 transfers, which have a different encoding to byte/word transfers. */
5047
5048 static int
5049 copy_extra_ld_st (struct gdbarch *gdbarch, uint32_t insn, int unpriveleged,
5050 struct regcache *regs, struct displaced_step_closure *dsc)
5051 {
5052 unsigned int op1 = bits (insn, 20, 24);
5053 unsigned int op2 = bits (insn, 5, 6);
5054 unsigned int rt = bits (insn, 12, 15);
5055 unsigned int rn = bits (insn, 16, 19);
5056 unsigned int rm = bits (insn, 0, 3);
5057 char load[12] = {0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1};
5058 char bytesize[12] = {2, 2, 2, 2, 8, 1, 8, 1, 8, 2, 8, 2};
5059 int immed = (op1 & 0x4) != 0;
5060 int opcode;
5061 ULONGEST rt_val, rt_val2 = 0, rn_val, rm_val = 0;
5062 CORE_ADDR from = dsc->insn_addr;
5063
5064 if (!insn_references_pc (insn, 0x000ff00ful))
5065 return copy_unmodified (gdbarch, insn, "extra load/store", dsc);
5066
5067 if (debug_displaced)
5068 fprintf_unfiltered (gdb_stdlog, "displaced: copying %sextra load/store "
5069 "insn %.8lx\n", unpriveleged ? "unpriveleged " : "",
5070 (unsigned long) insn);
5071
5072 opcode = ((op2 << 2) | (op1 & 0x1) | ((op1 & 0x4) >> 1)) - 4;
5073
5074 if (opcode < 0)
5075 internal_error (__FILE__, __LINE__,
5076 _("copy_extra_ld_st: instruction decode error"));
5077
5078 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
5079 dsc->tmp[1] = displaced_read_reg (regs, from, 1);
5080 dsc->tmp[2] = displaced_read_reg (regs, from, 2);
5081 if (!immed)
5082 dsc->tmp[3] = displaced_read_reg (regs, from, 3);
5083
5084 rt_val = displaced_read_reg (regs, from, rt);
5085 if (bytesize[opcode] == 8)
5086 rt_val2 = displaced_read_reg (regs, from, rt + 1);
5087 rn_val = displaced_read_reg (regs, from, rn);
5088 if (!immed)
5089 rm_val = displaced_read_reg (regs, from, rm);
5090
5091 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
5092 if (bytesize[opcode] == 8)
5093 displaced_write_reg (regs, dsc, 1, rt_val2, CANNOT_WRITE_PC);
5094 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
5095 if (!immed)
5096 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
5097
5098 dsc->rd = rt;
5099 dsc->u.ldst.xfersize = bytesize[opcode];
5100 dsc->u.ldst.rn = rn;
5101 dsc->u.ldst.immed = immed;
5102 dsc->u.ldst.writeback = bit (insn, 24) == 0 || bit (insn, 21) != 0;
5103 dsc->u.ldst.restore_r4 = 0;
5104
5105 if (immed)
5106 /* {ldr,str}<width><cond> rt, [rt2,] [rn, #imm]
5107 ->
5108 {ldr,str}<width><cond> r0, [r1,] [r2, #imm]. */
5109 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
5110 else
5111 /* {ldr,str}<width><cond> rt, [rt2,] [rn, +/-rm]
5112 ->
5113 {ldr,str}<width><cond> r0, [r1,] [r2, +/-r3]. */
5114 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
5115
5116 dsc->cleanup = load[opcode] ? &cleanup_load : &cleanup_store;
5117
5118 return 0;
5119 }
5120
5121 /* Copy byte/word loads and stores. */
5122
5123 static int
5124 copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint32_t insn,
5125 struct regcache *regs,
5126 struct displaced_step_closure *dsc, int load, int byte,
5127 int usermode)
5128 {
5129 int immed = !bit (insn, 25);
5130 unsigned int rt = bits (insn, 12, 15);
5131 unsigned int rn = bits (insn, 16, 19);
5132 unsigned int rm = bits (insn, 0, 3); /* Only valid if !immed. */
5133 ULONGEST rt_val, rn_val, rm_val = 0;
5134 CORE_ADDR from = dsc->insn_addr;
5135
5136 if (!insn_references_pc (insn, 0x000ff00ful))
5137 return copy_unmodified (gdbarch, insn, "load/store", dsc);
5138
5139 if (debug_displaced)
5140 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s%s insn %.8lx\n",
5141 load ? (byte ? "ldrb" : "ldr")
5142 : (byte ? "strb" : "str"), usermode ? "t" : "",
5143 (unsigned long) insn);
5144
5145 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
5146 dsc->tmp[2] = displaced_read_reg (regs, from, 2);
5147 if (!immed)
5148 dsc->tmp[3] = displaced_read_reg (regs, from, 3);
5149 if (!load)
5150 dsc->tmp[4] = displaced_read_reg (regs, from, 4);
5151
5152 rt_val = displaced_read_reg (regs, from, rt);
5153 rn_val = displaced_read_reg (regs, from, rn);
5154 if (!immed)
5155 rm_val = displaced_read_reg (regs, from, rm);
5156
5157 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
5158 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
5159 if (!immed)
5160 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
5161
5162 dsc->rd = rt;
5163 dsc->u.ldst.xfersize = byte ? 1 : 4;
5164 dsc->u.ldst.rn = rn;
5165 dsc->u.ldst.immed = immed;
5166 dsc->u.ldst.writeback = bit (insn, 24) == 0 || bit (insn, 21) != 0;
5167
5168 /* To write PC we can do:
5169
5170 scratch+0: str pc, temp (*temp = scratch + 8 + offset)
5171 scratch+4: ldr r4, temp
5172 scratch+8: sub r4, r4, pc (r4 = scratch + 8 + offset - scratch - 8 - 8)
5173 scratch+12: add r4, r4, #8 (r4 = offset)
5174 scratch+16: add r0, r0, r4
5175 scratch+20: str r0, [r2, #imm] (or str r0, [r2, r3])
5176 scratch+24: <temp>
5177
5178 Otherwise we don't know what value to write for PC, since the offset is
5179 architecture-dependent (sometimes PC+8, sometimes PC+12). */
5180
5181 if (load || rt != 15)
5182 {
5183 dsc->u.ldst.restore_r4 = 0;
5184
5185 if (immed)
5186 /* {ldr,str}[b]<cond> rt, [rn, #imm], etc.
5187 ->
5188 {ldr,str}[b]<cond> r0, [r2, #imm]. */
5189 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
5190 else
5191 /* {ldr,str}[b]<cond> rt, [rn, rm], etc.
5192 ->
5193 {ldr,str}[b]<cond> r0, [r2, r3]. */
5194 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
5195 }
5196 else
5197 {
5198 /* We need to use r4 as scratch. Make sure it's restored afterwards. */
5199 dsc->u.ldst.restore_r4 = 1;
5200
5201 dsc->modinsn[0] = 0xe58ff014; /* str pc, [pc, #20]. */
5202 dsc->modinsn[1] = 0xe59f4010; /* ldr r4, [pc, #16]. */
5203 dsc->modinsn[2] = 0xe044400f; /* sub r4, r4, pc. */
5204 dsc->modinsn[3] = 0xe2844008; /* add r4, r4, #8. */
5205 dsc->modinsn[4] = 0xe0800004; /* add r0, r0, r4. */
5206
5207 /* As above. */
5208 if (immed)
5209 dsc->modinsn[5] = (insn & 0xfff00fff) | 0x20000;
5210 else
5211 dsc->modinsn[5] = (insn & 0xfff00ff0) | 0x20003;
5212
5213 dsc->modinsn[6] = 0x0; /* breakpoint location. */
5214 dsc->modinsn[7] = 0x0; /* scratch space. */
5215
5216 dsc->numinsns = 6;
5217 }
5218
5219 dsc->cleanup = load ? &cleanup_load : &cleanup_store;
5220
5221 return 0;
5222 }
5223
5224 /* Cleanup LDM instructions with fully-populated register list. This is an
5225 unfortunate corner case: it's impossible to implement correctly by modifying
5226 the instruction. The issue is as follows: we have an instruction,
5227
5228 ldm rN, {r0-r15}
5229
5230 which we must rewrite to avoid loading PC. A possible solution would be to
5231 do the load in two halves, something like (with suitable cleanup
5232 afterwards):
5233
5234 mov r8, rN
5235 ldm[id][ab] r8!, {r0-r7}
5236 str r7, <temp>
5237 ldm[id][ab] r8, {r7-r14}
5238 <bkpt>
5239
5240 but at present there's no suitable place for <temp>, since the scratch space
5241 is overwritten before the cleanup routine is called. For now, we simply
5242 emulate the instruction. */
5243
5244 static void
5245 cleanup_block_load_all (struct gdbarch *gdbarch, struct regcache *regs,
5246 struct displaced_step_closure *dsc)
5247 {
5248 ULONGEST from = dsc->insn_addr;
5249 int inc = dsc->u.block.increment;
5250 int bump_before = dsc->u.block.before ? (inc ? 4 : -4) : 0;
5251 int bump_after = dsc->u.block.before ? 0 : (inc ? 4 : -4);
5252 uint32_t regmask = dsc->u.block.regmask;
5253 int regno = inc ? 0 : 15;
5254 CORE_ADDR xfer_addr = dsc->u.block.xfer_addr;
5255 int exception_return = dsc->u.block.load && dsc->u.block.user
5256 && (regmask & 0x8000) != 0;
5257 uint32_t status = displaced_read_reg (regs, from, ARM_PS_REGNUM);
5258 int do_transfer = condition_true (dsc->u.block.cond, status);
5259 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
5260
5261 if (!do_transfer)
5262 return;
5263
5264 /* If the instruction is ldm rN, {...pc}^, I don't think there's anything
5265 sensible we can do here. Complain loudly. */
5266 if (exception_return)
5267 error (_("Cannot single-step exception return"));
5268
5269 /* We don't handle any stores here for now. */
5270 gdb_assert (dsc->u.block.load != 0);
5271
5272 if (debug_displaced)
5273 fprintf_unfiltered (gdb_stdlog, "displaced: emulating block transfer: "
5274 "%s %s %s\n", dsc->u.block.load ? "ldm" : "stm",
5275 dsc->u.block.increment ? "inc" : "dec",
5276 dsc->u.block.before ? "before" : "after");
5277
5278 while (regmask)
5279 {
5280 uint32_t memword;
5281
5282 if (inc)
5283 while (regno <= 15 && (regmask & (1 << regno)) == 0)
5284 regno++;
5285 else
5286 while (regno >= 0 && (regmask & (1 << regno)) == 0)
5287 regno--;
5288
5289 xfer_addr += bump_before;
5290
5291 memword = read_memory_unsigned_integer (xfer_addr, 4, byte_order);
5292 displaced_write_reg (regs, dsc, regno, memword, LOAD_WRITE_PC);
5293
5294 xfer_addr += bump_after;
5295
5296 regmask &= ~(1 << regno);
5297 }
5298
5299 if (dsc->u.block.writeback)
5300 displaced_write_reg (regs, dsc, dsc->u.block.rn, xfer_addr,
5301 CANNOT_WRITE_PC);
5302 }
5303
5304 /* Clean up an STM which included the PC in the register list. */
5305
5306 static void
5307 cleanup_block_store_pc (struct gdbarch *gdbarch, struct regcache *regs,
5308 struct displaced_step_closure *dsc)
5309 {
5310 ULONGEST from = dsc->insn_addr;
5311 uint32_t status = displaced_read_reg (regs, from, ARM_PS_REGNUM);
5312 int store_executed = condition_true (dsc->u.block.cond, status);
5313 CORE_ADDR pc_stored_at, transferred_regs = bitcount (dsc->u.block.regmask);
5314 CORE_ADDR stm_insn_addr;
5315 uint32_t pc_val;
5316 long offset;
5317 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
5318
5319 /* If condition code fails, there's nothing else to do. */
5320 if (!store_executed)
5321 return;
5322
5323 if (dsc->u.block.increment)
5324 {
5325 pc_stored_at = dsc->u.block.xfer_addr + 4 * transferred_regs;
5326
5327 if (dsc->u.block.before)
5328 pc_stored_at += 4;
5329 }
5330 else
5331 {
5332 pc_stored_at = dsc->u.block.xfer_addr;
5333
5334 if (dsc->u.block.before)
5335 pc_stored_at -= 4;
5336 }
5337
5338 pc_val = read_memory_unsigned_integer (pc_stored_at, 4, byte_order);
5339 stm_insn_addr = dsc->scratch_base;
5340 offset = pc_val - stm_insn_addr;
5341
5342 if (debug_displaced)
5343 fprintf_unfiltered (gdb_stdlog, "displaced: detected PC offset %.8lx for "
5344 "STM instruction\n", offset);
5345
5346 /* Rewrite the stored PC to the proper value for the non-displaced original
5347 instruction. */
5348 write_memory_unsigned_integer (pc_stored_at, 4, byte_order,
5349 dsc->insn_addr + offset);
5350 }
5351
5352 /* Clean up an LDM which includes the PC in the register list. We clumped all
5353 the registers in the transferred list into a contiguous range r0...rX (to
5354 avoid loading PC directly and losing control of the debugged program), so we
5355 must undo that here. */
5356
5357 static void
5358 cleanup_block_load_pc (struct gdbarch *gdbarch,
5359 struct regcache *regs,
5360 struct displaced_step_closure *dsc)
5361 {
5362 ULONGEST from = dsc->insn_addr;
5363 uint32_t status = displaced_read_reg (regs, from, ARM_PS_REGNUM);
5364 int load_executed = condition_true (dsc->u.block.cond, status), i;
5365 unsigned int mask = dsc->u.block.regmask, write_reg = 15;
5366 unsigned int regs_loaded = bitcount (mask);
5367 unsigned int num_to_shuffle = regs_loaded, clobbered;
5368
5369 /* The method employed here will fail if the register list is fully populated
5370 (we need to avoid loading PC directly). */
5371 gdb_assert (num_to_shuffle < 16);
5372
5373 if (!load_executed)
5374 return;
5375
5376 clobbered = (1 << num_to_shuffle) - 1;
5377
5378 while (num_to_shuffle > 0)
5379 {
5380 if ((mask & (1 << write_reg)) != 0)
5381 {
5382 unsigned int read_reg = num_to_shuffle - 1;
5383
5384 if (read_reg != write_reg)
5385 {
5386 ULONGEST rval = displaced_read_reg (regs, from, read_reg);
5387 displaced_write_reg (regs, dsc, write_reg, rval, LOAD_WRITE_PC);
5388 if (debug_displaced)
5389 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: move "
5390 "loaded register r%d to r%d\n"), read_reg,
5391 write_reg);
5392 }
5393 else if (debug_displaced)
5394 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: register "
5395 "r%d already in the right place\n"),
5396 write_reg);
5397
5398 clobbered &= ~(1 << write_reg);
5399
5400 num_to_shuffle--;
5401 }
5402
5403 write_reg--;
5404 }
5405
5406 /* Restore any registers we scribbled over. */
5407 for (write_reg = 0; clobbered != 0; write_reg++)
5408 {
5409 if ((clobbered & (1 << write_reg)) != 0)
5410 {
5411 displaced_write_reg (regs, dsc, write_reg, dsc->tmp[write_reg],
5412 CANNOT_WRITE_PC);
5413 if (debug_displaced)
5414 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: restored "
5415 "clobbered register r%d\n"), write_reg);
5416 clobbered &= ~(1 << write_reg);
5417 }
5418 }
5419
5420 /* Perform register writeback manually. */
5421 if (dsc->u.block.writeback)
5422 {
5423 ULONGEST new_rn_val = dsc->u.block.xfer_addr;
5424
5425 if (dsc->u.block.increment)
5426 new_rn_val += regs_loaded * 4;
5427 else
5428 new_rn_val -= regs_loaded * 4;
5429
5430 displaced_write_reg (regs, dsc, dsc->u.block.rn, new_rn_val,
5431 CANNOT_WRITE_PC);
5432 }
5433 }
5434
5435 /* Handle ldm/stm, apart from some tricky cases which are unlikely to occur
5436 in user-level code (in particular exception return, ldm rn, {...pc}^). */
5437
5438 static int
5439 copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
5440 struct displaced_step_closure *dsc)
5441 {
5442 int load = bit (insn, 20);
5443 int user = bit (insn, 22);
5444 int increment = bit (insn, 23);
5445 int before = bit (insn, 24);
5446 int writeback = bit (insn, 21);
5447 int rn = bits (insn, 16, 19);
5448 CORE_ADDR from = dsc->insn_addr;
5449
5450 /* Block transfers which don't mention PC can be run directly
5451 out-of-line. */
5452 if (rn != 15 && (insn & 0x8000) == 0)
5453 return copy_unmodified (gdbarch, insn, "ldm/stm", dsc);
5454
5455 if (rn == 15)
5456 {
5457 warning (_("displaced: Unpredictable LDM or STM with "
5458 "base register r15"));
5459 return copy_unmodified (gdbarch, insn, "unpredictable ldm/stm", dsc);
5460 }
5461
5462 if (debug_displaced)
5463 fprintf_unfiltered (gdb_stdlog, "displaced: copying block transfer insn "
5464 "%.8lx\n", (unsigned long) insn);
5465
5466 dsc->u.block.xfer_addr = displaced_read_reg (regs, from, rn);
5467 dsc->u.block.rn = rn;
5468
5469 dsc->u.block.load = load;
5470 dsc->u.block.user = user;
5471 dsc->u.block.increment = increment;
5472 dsc->u.block.before = before;
5473 dsc->u.block.writeback = writeback;
5474 dsc->u.block.cond = bits (insn, 28, 31);
5475
5476 dsc->u.block.regmask = insn & 0xffff;
5477
5478 if (load)
5479 {
5480 if ((insn & 0xffff) == 0xffff)
5481 {
5482 /* LDM with a fully-populated register list. This case is
5483 particularly tricky. Implement for now by fully emulating the
5484 instruction (which might not behave perfectly in all cases, but
5485 these instructions should be rare enough for that not to matter
5486 too much). */
5487 dsc->modinsn[0] = ARM_NOP;
5488
5489 dsc->cleanup = &cleanup_block_load_all;
5490 }
5491 else
5492 {
5493 /* LDM of a list of registers which includes PC. Implement by
5494 rewriting the list of registers to be transferred into a
5495 contiguous chunk r0...rX before doing the transfer, then shuffling
5496 registers into the correct places in the cleanup routine. */
5497 unsigned int regmask = insn & 0xffff;
5498 unsigned int num_in_list = bitcount (regmask), new_regmask, bit = 1;
5499 unsigned int to = 0, from = 0, i, new_rn;
5500
5501 for (i = 0; i < num_in_list; i++)
5502 dsc->tmp[i] = displaced_read_reg (regs, from, i);
5503
5504 /* Writeback makes things complicated. We need to avoid clobbering
5505 the base register with one of the registers in our modified
5506 register list, but just using a different register can't work in
5507 all cases, e.g.:
5508
5509 ldm r14!, {r0-r13,pc}
5510
5511 which would need to be rewritten as:
5512
5513 ldm rN!, {r0-r14}
5514
5515 but that can't work, because there's no free register for N.
5516
5517 Solve this by turning off the writeback bit, and emulating
5518 writeback manually in the cleanup routine. */
5519
5520 if (writeback)
5521 insn &= ~(1 << 21);
5522
5523 new_regmask = (1 << num_in_list) - 1;
5524
5525 if (debug_displaced)
5526 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM r%d%s, "
5527 "{..., pc}: original reg list %.4x, modified "
5528 "list %.4x\n"), rn, writeback ? "!" : "",
5529 (int) insn & 0xffff, new_regmask);
5530
5531 dsc->modinsn[0] = (insn & ~0xffff) | (new_regmask & 0xffff);
5532
5533 dsc->cleanup = &cleanup_block_load_pc;
5534 }
5535 }
5536 else
5537 {
5538 /* STM of a list of registers which includes PC. Run the instruction
5539 as-is, but out of line: this will store the wrong value for the PC,
5540 so we must manually fix up the memory in the cleanup routine.
5541 Doing things this way has the advantage that we can auto-detect
5542 the offset of the PC write (which is architecture-dependent) in
5543 the cleanup routine. */
5544 dsc->modinsn[0] = insn;
5545
5546 dsc->cleanup = &cleanup_block_store_pc;
5547 }
5548
5549 return 0;
5550 }
5551
5552 /* Cleanup/copy SVC (SWI) instructions. These two functions are overridden
5553 for Linux, where some SVC instructions must be treated specially. */
5554
5555 static void
5556 cleanup_svc (struct gdbarch *gdbarch, struct regcache *regs,
5557 struct displaced_step_closure *dsc)
5558 {
5559 CORE_ADDR from = dsc->insn_addr;
5560 CORE_ADDR resume_addr = from + 4;
5561
5562 if (debug_displaced)
5563 fprintf_unfiltered (gdb_stdlog, "displaced: cleanup for svc, resume at "
5564 "%.8lx\n", (unsigned long) resume_addr);
5565
5566 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, resume_addr, BRANCH_WRITE_PC);
5567 }
5568
5569 static int
5570 copy_svc (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
5571 struct regcache *regs, struct displaced_step_closure *dsc)
5572 {
5573 CORE_ADDR from = dsc->insn_addr;
5574
5575 /* Allow OS-specific code to override SVC handling. */
5576 if (dsc->u.svc.copy_svc_os)
5577 return dsc->u.svc.copy_svc_os (gdbarch, insn, to, regs, dsc);
5578
5579 if (debug_displaced)
5580 fprintf_unfiltered (gdb_stdlog, "displaced: copying svc insn %.8lx\n",
5581 (unsigned long) insn);
5582
5583 /* Preparation: none.
5584 Insn: unmodified svc.
5585 Cleanup: pc <- insn_addr + 4. */
5586
5587 dsc->modinsn[0] = insn;
5588
5589 dsc->cleanup = &cleanup_svc;
5590 /* Pretend we wrote to the PC, so cleanup doesn't set PC to the next
5591 instruction. */
5592 dsc->wrote_to_pc = 1;
5593
5594 return 0;
5595 }
5596
5597 /* Copy undefined instructions. */
5598
5599 static int
5600 copy_undef (struct gdbarch *gdbarch, uint32_t insn,
5601 struct displaced_step_closure *dsc)
5602 {
5603 if (debug_displaced)
5604 fprintf_unfiltered (gdb_stdlog,
5605 "displaced: copying undefined insn %.8lx\n",
5606 (unsigned long) insn);
5607
5608 dsc->modinsn[0] = insn;
5609
5610 return 0;
5611 }
5612
5613 /* Copy unpredictable instructions. */
5614
5615 static int
5616 copy_unpred (struct gdbarch *gdbarch, uint32_t insn,
5617 struct displaced_step_closure *dsc)
5618 {
5619 if (debug_displaced)
5620 fprintf_unfiltered (gdb_stdlog, "displaced: copying unpredictable insn "
5621 "%.8lx\n", (unsigned long) insn);
5622
5623 dsc->modinsn[0] = insn;
5624
5625 return 0;
5626 }
5627
5628 /* The decode_* functions are instruction decoding helpers. They mostly follow
5629 the presentation in the ARM ARM. */
5630
5631 static int
5632 decode_misc_memhint_neon (struct gdbarch *gdbarch, uint32_t insn,
5633 struct regcache *regs,
5634 struct displaced_step_closure *dsc)
5635 {
5636 unsigned int op1 = bits (insn, 20, 26), op2 = bits (insn, 4, 7);
5637 unsigned int rn = bits (insn, 16, 19);
5638
5639 if (op1 == 0x10 && (op2 & 0x2) == 0x0 && (rn & 0xe) == 0x0)
5640 return copy_unmodified (gdbarch, insn, "cps", dsc);
5641 else if (op1 == 0x10 && op2 == 0x0 && (rn & 0xe) == 0x1)
5642 return copy_unmodified (gdbarch, insn, "setend", dsc);
5643 else if ((op1 & 0x60) == 0x20)
5644 return copy_unmodified (gdbarch, insn, "neon dataproc", dsc);
5645 else if ((op1 & 0x71) == 0x40)
5646 return copy_unmodified (gdbarch, insn, "neon elt/struct load/store", dsc);
5647 else if ((op1 & 0x77) == 0x41)
5648 return copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
5649 else if ((op1 & 0x77) == 0x45)
5650 return copy_preload (gdbarch, insn, regs, dsc); /* pli. */
5651 else if ((op1 & 0x77) == 0x51)
5652 {
5653 if (rn != 0xf)
5654 return copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
5655 else
5656 return copy_unpred (gdbarch, insn, dsc);
5657 }
5658 else if ((op1 & 0x77) == 0x55)
5659 return copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
5660 else if (op1 == 0x57)
5661 switch (op2)
5662 {
5663 case 0x1: return copy_unmodified (gdbarch, insn, "clrex", dsc);
5664 case 0x4: return copy_unmodified (gdbarch, insn, "dsb", dsc);
5665 case 0x5: return copy_unmodified (gdbarch, insn, "dmb", dsc);
5666 case 0x6: return copy_unmodified (gdbarch, insn, "isb", dsc);
5667 default: return copy_unpred (gdbarch, insn, dsc);
5668 }
5669 else if ((op1 & 0x63) == 0x43)
5670 return copy_unpred (gdbarch, insn, dsc);
5671 else if ((op2 & 0x1) == 0x0)
5672 switch (op1 & ~0x80)
5673 {
5674 case 0x61:
5675 return copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
5676 case 0x65:
5677 return copy_preload_reg (gdbarch, insn, regs, dsc); /* pli reg. */
5678 case 0x71: case 0x75:
5679 /* pld/pldw reg. */
5680 return copy_preload_reg (gdbarch, insn, regs, dsc);
5681 case 0x63: case 0x67: case 0x73: case 0x77:
5682 return copy_unpred (gdbarch, insn, dsc);
5683 default:
5684 return copy_undef (gdbarch, insn, dsc);
5685 }
5686 else
5687 return copy_undef (gdbarch, insn, dsc); /* Probably unreachable. */
5688 }
5689
5690 static int
5691 decode_unconditional (struct gdbarch *gdbarch, uint32_t insn,
5692 struct regcache *regs,
5693 struct displaced_step_closure *dsc)
5694 {
5695 if (bit (insn, 27) == 0)
5696 return decode_misc_memhint_neon (gdbarch, insn, regs, dsc);
5697 /* Switch on bits: 0bxxxxx321xxx0xxxxxxxxxxxxxxxxxxxx. */
5698 else switch (((insn & 0x7000000) >> 23) | ((insn & 0x100000) >> 20))
5699 {
5700 case 0x0: case 0x2:
5701 return copy_unmodified (gdbarch, insn, "srs", dsc);
5702
5703 case 0x1: case 0x3:
5704 return copy_unmodified (gdbarch, insn, "rfe", dsc);
5705
5706 case 0x4: case 0x5: case 0x6: case 0x7:
5707 return copy_b_bl_blx (gdbarch, insn, regs, dsc);
5708
5709 case 0x8:
5710 switch ((insn & 0xe00000) >> 21)
5711 {
5712 case 0x1: case 0x3: case 0x4: case 0x5: case 0x6: case 0x7:
5713 /* stc/stc2. */
5714 return copy_copro_load_store (gdbarch, insn, regs, dsc);
5715
5716 case 0x2:
5717 return copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
5718
5719 default:
5720 return copy_undef (gdbarch, insn, dsc);
5721 }
5722
5723 case 0x9:
5724 {
5725 int rn_f = (bits (insn, 16, 19) == 0xf);
5726 switch ((insn & 0xe00000) >> 21)
5727 {
5728 case 0x1: case 0x3:
5729 /* ldc/ldc2 imm (undefined for rn == pc). */
5730 return rn_f ? copy_undef (gdbarch, insn, dsc)
5731 : copy_copro_load_store (gdbarch, insn, regs, dsc);
5732
5733 case 0x2:
5734 return copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
5735
5736 case 0x4: case 0x5: case 0x6: case 0x7:
5737 /* ldc/ldc2 lit (undefined for rn != pc). */
5738 return rn_f ? copy_copro_load_store (gdbarch, insn, regs, dsc)
5739 : copy_undef (gdbarch, insn, dsc);
5740
5741 default:
5742 return copy_undef (gdbarch, insn, dsc);
5743 }
5744 }
5745
5746 case 0xa:
5747 return copy_unmodified (gdbarch, insn, "stc/stc2", dsc);
5748
5749 case 0xb:
5750 if (bits (insn, 16, 19) == 0xf)
5751 /* ldc/ldc2 lit. */
5752 return copy_copro_load_store (gdbarch, insn, regs, dsc);
5753 else
5754 return copy_undef (gdbarch, insn, dsc);
5755
5756 case 0xc:
5757 if (bit (insn, 4))
5758 return copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
5759 else
5760 return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
5761
5762 case 0xd:
5763 if (bit (insn, 4))
5764 return copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
5765 else
5766 return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
5767
5768 default:
5769 return copy_undef (gdbarch, insn, dsc);
5770 }
5771 }
5772
5773 /* Decode miscellaneous instructions in dp/misc encoding space. */
5774
5775 static int
5776 decode_miscellaneous (struct gdbarch *gdbarch, uint32_t insn,
5777 struct regcache *regs,
5778 struct displaced_step_closure *dsc)
5779 {
5780 unsigned int op2 = bits (insn, 4, 6);
5781 unsigned int op = bits (insn, 21, 22);
5782 unsigned int op1 = bits (insn, 16, 19);
5783
5784 switch (op2)
5785 {
5786 case 0x0:
5787 return copy_unmodified (gdbarch, insn, "mrs/msr", dsc);
5788
5789 case 0x1:
5790 if (op == 0x1) /* bx. */
5791 return copy_bx_blx_reg (gdbarch, insn, regs, dsc);
5792 else if (op == 0x3)
5793 return copy_unmodified (gdbarch, insn, "clz", dsc);
5794 else
5795 return copy_undef (gdbarch, insn, dsc);
5796
5797 case 0x2:
5798 if (op == 0x1)
5799 /* Not really supported. */
5800 return copy_unmodified (gdbarch, insn, "bxj", dsc);
5801 else
5802 return copy_undef (gdbarch, insn, dsc);
5803
5804 case 0x3:
5805 if (op == 0x1)
5806 return copy_bx_blx_reg (gdbarch, insn,
5807 regs, dsc); /* blx register. */
5808 else
5809 return copy_undef (gdbarch, insn, dsc);
5810
5811 case 0x5:
5812 return copy_unmodified (gdbarch, insn, "saturating add/sub", dsc);
5813
5814 case 0x7:
5815 if (op == 0x1)
5816 return copy_unmodified (gdbarch, insn, "bkpt", dsc);
5817 else if (op == 0x3)
5818 /* Not really supported. */
5819 return copy_unmodified (gdbarch, insn, "smc", dsc);
5820
5821 default:
5822 return copy_undef (gdbarch, insn, dsc);
5823 }
5824 }
5825
5826 static int
5827 decode_dp_misc (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
5828 struct displaced_step_closure *dsc)
5829 {
5830 if (bit (insn, 25))
5831 switch (bits (insn, 20, 24))
5832 {
5833 case 0x10:
5834 return copy_unmodified (gdbarch, insn, "movw", dsc);
5835
5836 case 0x14:
5837 return copy_unmodified (gdbarch, insn, "movt", dsc);
5838
5839 case 0x12: case 0x16:
5840 return copy_unmodified (gdbarch, insn, "msr imm", dsc);
5841
5842 default:
5843 return copy_alu_imm (gdbarch, insn, regs, dsc);
5844 }
5845 else
5846 {
5847 uint32_t op1 = bits (insn, 20, 24), op2 = bits (insn, 4, 7);
5848
5849 if ((op1 & 0x19) != 0x10 && (op2 & 0x1) == 0x0)
5850 return copy_alu_reg (gdbarch, insn, regs, dsc);
5851 else if ((op1 & 0x19) != 0x10 && (op2 & 0x9) == 0x1)
5852 return copy_alu_shifted_reg (gdbarch, insn, regs, dsc);
5853 else if ((op1 & 0x19) == 0x10 && (op2 & 0x8) == 0x0)
5854 return decode_miscellaneous (gdbarch, insn, regs, dsc);
5855 else if ((op1 & 0x19) == 0x10 && (op2 & 0x9) == 0x8)
5856 return copy_unmodified (gdbarch, insn, "halfword mul/mla", dsc);
5857 else if ((op1 & 0x10) == 0x00 && op2 == 0x9)
5858 return copy_unmodified (gdbarch, insn, "mul/mla", dsc);
5859 else if ((op1 & 0x10) == 0x10 && op2 == 0x9)
5860 return copy_unmodified (gdbarch, insn, "synch", dsc);
5861 else if (op2 == 0xb || (op2 & 0xd) == 0xd)
5862 /* 2nd arg means "unpriveleged". */
5863 return copy_extra_ld_st (gdbarch, insn, (op1 & 0x12) == 0x02, regs,
5864 dsc);
5865 }
5866
5867 /* Should be unreachable. */
5868 return 1;
5869 }
5870
5871 static int
5872 decode_ld_st_word_ubyte (struct gdbarch *gdbarch, uint32_t insn,
5873 struct regcache *regs,
5874 struct displaced_step_closure *dsc)
5875 {
5876 int a = bit (insn, 25), b = bit (insn, 4);
5877 uint32_t op1 = bits (insn, 20, 24);
5878 int rn_f = bits (insn, 16, 19) == 0xf;
5879
5880 if ((!a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02)
5881 || (a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02 && !b))
5882 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 0, 0);
5883 else if ((!a && (op1 & 0x17) == 0x02)
5884 || (a && (op1 & 0x17) == 0x02 && !b))
5885 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 0, 1);
5886 else if ((!a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03)
5887 || (a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03 && !b))
5888 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 0, 0);
5889 else if ((!a && (op1 & 0x17) == 0x03)
5890 || (a && (op1 & 0x17) == 0x03 && !b))
5891 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 0, 1);
5892 else if ((!a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06)
5893 || (a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06 && !b))
5894 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 0);
5895 else if ((!a && (op1 & 0x17) == 0x06)
5896 || (a && (op1 & 0x17) == 0x06 && !b))
5897 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 1);
5898 else if ((!a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07)
5899 || (a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07 && !b))
5900 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 0);
5901 else if ((!a && (op1 & 0x17) == 0x07)
5902 || (a && (op1 & 0x17) == 0x07 && !b))
5903 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 1);
5904
5905 /* Should be unreachable. */
5906 return 1;
5907 }
5908
5909 static int
5910 decode_media (struct gdbarch *gdbarch, uint32_t insn,
5911 struct displaced_step_closure *dsc)
5912 {
5913 switch (bits (insn, 20, 24))
5914 {
5915 case 0x00: case 0x01: case 0x02: case 0x03:
5916 return copy_unmodified (gdbarch, insn, "parallel add/sub signed", dsc);
5917
5918 case 0x04: case 0x05: case 0x06: case 0x07:
5919 return copy_unmodified (gdbarch, insn, "parallel add/sub unsigned", dsc);
5920
5921 case 0x08: case 0x09: case 0x0a: case 0x0b:
5922 case 0x0c: case 0x0d: case 0x0e: case 0x0f:
5923 return copy_unmodified (gdbarch, insn,
5924 "decode/pack/unpack/saturate/reverse", dsc);
5925
5926 case 0x18:
5927 if (bits (insn, 5, 7) == 0) /* op2. */
5928 {
5929 if (bits (insn, 12, 15) == 0xf)
5930 return copy_unmodified (gdbarch, insn, "usad8", dsc);
5931 else
5932 return copy_unmodified (gdbarch, insn, "usada8", dsc);
5933 }
5934 else
5935 return copy_undef (gdbarch, insn, dsc);
5936
5937 case 0x1a: case 0x1b:
5938 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
5939 return copy_unmodified (gdbarch, insn, "sbfx", dsc);
5940 else
5941 return copy_undef (gdbarch, insn, dsc);
5942
5943 case 0x1c: case 0x1d:
5944 if (bits (insn, 5, 6) == 0x0) /* op2[1:0]. */
5945 {
5946 if (bits (insn, 0, 3) == 0xf)
5947 return copy_unmodified (gdbarch, insn, "bfc", dsc);
5948 else
5949 return copy_unmodified (gdbarch, insn, "bfi", dsc);
5950 }
5951 else
5952 return copy_undef (gdbarch, insn, dsc);
5953
5954 case 0x1e: case 0x1f:
5955 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
5956 return copy_unmodified (gdbarch, insn, "ubfx", dsc);
5957 else
5958 return copy_undef (gdbarch, insn, dsc);
5959 }
5960
5961 /* Should be unreachable. */
5962 return 1;
5963 }
5964
5965 static int
5966 decode_b_bl_ldmstm (struct gdbarch *gdbarch, int32_t insn,
5967 struct regcache *regs, struct displaced_step_closure *dsc)
5968 {
5969 if (bit (insn, 25))
5970 return copy_b_bl_blx (gdbarch, insn, regs, dsc);
5971 else
5972 return copy_block_xfer (gdbarch, insn, regs, dsc);
5973 }
5974
5975 static int
5976 decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint32_t insn,
5977 struct regcache *regs,
5978 struct displaced_step_closure *dsc)
5979 {
5980 unsigned int opcode = bits (insn, 20, 24);
5981
5982 switch (opcode)
5983 {
5984 case 0x04: case 0x05: /* VFP/Neon mrrc/mcrr. */
5985 return copy_unmodified (gdbarch, insn, "vfp/neon mrrc/mcrr", dsc);
5986
5987 case 0x08: case 0x0a: case 0x0c: case 0x0e:
5988 case 0x12: case 0x16:
5989 return copy_unmodified (gdbarch, insn, "vfp/neon vstm/vpush", dsc);
5990
5991 case 0x09: case 0x0b: case 0x0d: case 0x0f:
5992 case 0x13: case 0x17:
5993 return copy_unmodified (gdbarch, insn, "vfp/neon vldm/vpop", dsc);
5994
5995 case 0x10: case 0x14: case 0x18: case 0x1c: /* vstr. */
5996 case 0x11: case 0x15: case 0x19: case 0x1d: /* vldr. */
5997 /* Note: no writeback for these instructions. Bit 25 will always be
5998 zero though (via caller), so the following works OK. */
5999 return copy_copro_load_store (gdbarch, insn, regs, dsc);
6000 }
6001
6002 /* Should be unreachable. */
6003 return 1;
6004 }
6005
6006 static int
6007 decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
6008 struct regcache *regs, struct displaced_step_closure *dsc)
6009 {
6010 unsigned int op1 = bits (insn, 20, 25);
6011 int op = bit (insn, 4);
6012 unsigned int coproc = bits (insn, 8, 11);
6013 unsigned int rn = bits (insn, 16, 19);
6014
6015 if ((op1 & 0x20) == 0x00 && (op1 & 0x3a) != 0x00 && (coproc & 0xe) == 0xa)
6016 return decode_ext_reg_ld_st (gdbarch, insn, regs, dsc);
6017 else if ((op1 & 0x21) == 0x00 && (op1 & 0x3a) != 0x00
6018 && (coproc & 0xe) != 0xa)
6019 /* stc/stc2. */
6020 return copy_copro_load_store (gdbarch, insn, regs, dsc);
6021 else if ((op1 & 0x21) == 0x01 && (op1 & 0x3a) != 0x00
6022 && (coproc & 0xe) != 0xa)
6023 /* ldc/ldc2 imm/lit. */
6024 return copy_copro_load_store (gdbarch, insn, regs, dsc);
6025 else if ((op1 & 0x3e) == 0x00)
6026 return copy_undef (gdbarch, insn, dsc);
6027 else if ((op1 & 0x3e) == 0x04 && (coproc & 0xe) == 0xa)
6028 return copy_unmodified (gdbarch, insn, "neon 64bit xfer", dsc);
6029 else if (op1 == 0x04 && (coproc & 0xe) != 0xa)
6030 return copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
6031 else if (op1 == 0x05 && (coproc & 0xe) != 0xa)
6032 return copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
6033 else if ((op1 & 0x30) == 0x20 && !op)
6034 {
6035 if ((coproc & 0xe) == 0xa)
6036 return copy_unmodified (gdbarch, insn, "vfp dataproc", dsc);
6037 else
6038 return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
6039 }
6040 else if ((op1 & 0x30) == 0x20 && op)
6041 return copy_unmodified (gdbarch, insn, "neon 8/16/32 bit xfer", dsc);
6042 else if ((op1 & 0x31) == 0x20 && op && (coproc & 0xe) != 0xa)
6043 return copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
6044 else if ((op1 & 0x31) == 0x21 && op && (coproc & 0xe) != 0xa)
6045 return copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
6046 else if ((op1 & 0x30) == 0x30)
6047 return copy_svc (gdbarch, insn, to, regs, dsc);
6048 else
6049 return copy_undef (gdbarch, insn, dsc); /* Possibly unreachable. */
6050 }
6051
6052 void
6053 arm_process_displaced_insn (struct gdbarch *gdbarch, uint32_t insn,
6054 CORE_ADDR from, CORE_ADDR to,
6055 struct regcache *regs,
6056 struct displaced_step_closure *dsc)
6057 {
6058 int err = 0;
6059
6060 if (!displaced_in_arm_mode (regs))
6061 error (_("Displaced stepping is only supported in ARM mode"));
6062
6063 /* Most displaced instructions use a 1-instruction scratch space, so set this
6064 here and override below if/when necessary. */
6065 dsc->numinsns = 1;
6066 dsc->insn_addr = from;
6067 dsc->scratch_base = to;
6068 dsc->cleanup = NULL;
6069 dsc->wrote_to_pc = 0;
6070
6071 if ((insn & 0xf0000000) == 0xf0000000)
6072 err = decode_unconditional (gdbarch, insn, regs, dsc);
6073 else switch (((insn & 0x10) >> 4) | ((insn & 0xe000000) >> 24))
6074 {
6075 case 0x0: case 0x1: case 0x2: case 0x3:
6076 err = decode_dp_misc (gdbarch, insn, regs, dsc);
6077 break;
6078
6079 case 0x4: case 0x5: case 0x6:
6080 err = decode_ld_st_word_ubyte (gdbarch, insn, regs, dsc);
6081 break;
6082
6083 case 0x7:
6084 err = decode_media (gdbarch, insn, dsc);
6085 break;
6086
6087 case 0x8: case 0x9: case 0xa: case 0xb:
6088 err = decode_b_bl_ldmstm (gdbarch, insn, regs, dsc);
6089 break;
6090
6091 case 0xc: case 0xd: case 0xe: case 0xf:
6092 err = decode_svc_copro (gdbarch, insn, to, regs, dsc);
6093 break;
6094 }
6095
6096 if (err)
6097 internal_error (__FILE__, __LINE__,
6098 _("arm_process_displaced_insn: Instruction decode error"));
6099 }
6100
6101 /* Actually set up the scratch space for a displaced instruction. */
6102
6103 void
6104 arm_displaced_init_closure (struct gdbarch *gdbarch, CORE_ADDR from,
6105 CORE_ADDR to, struct displaced_step_closure *dsc)
6106 {
6107 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
6108 unsigned int i;
6109 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
6110
6111 /* Poke modified instruction(s). */
6112 for (i = 0; i < dsc->numinsns; i++)
6113 {
6114 if (debug_displaced)
6115 fprintf_unfiltered (gdb_stdlog, "displaced: writing insn %.8lx at "
6116 "%.8lx\n", (unsigned long) dsc->modinsn[i],
6117 (unsigned long) to + i * 4);
6118 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
6119 dsc->modinsn[i]);
6120 }
6121
6122 /* Put breakpoint afterwards. */
6123 write_memory (to + dsc->numinsns * 4, tdep->arm_breakpoint,
6124 tdep->arm_breakpoint_size);
6125
6126 if (debug_displaced)
6127 fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
6128 paddress (gdbarch, from), paddress (gdbarch, to));
6129 }
6130
6131 /* Entry point for copying an instruction into scratch space for displaced
6132 stepping. */
6133
6134 struct displaced_step_closure *
6135 arm_displaced_step_copy_insn (struct gdbarch *gdbarch,
6136 CORE_ADDR from, CORE_ADDR to,
6137 struct regcache *regs)
6138 {
6139 struct displaced_step_closure *dsc
6140 = xmalloc (sizeof (struct displaced_step_closure));
6141 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
6142 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
6143
6144 if (debug_displaced)
6145 fprintf_unfiltered (gdb_stdlog, "displaced: stepping insn %.8lx "
6146 "at %.8lx\n", (unsigned long) insn,
6147 (unsigned long) from);
6148
6149 arm_process_displaced_insn (gdbarch, insn, from, to, regs, dsc);
6150 arm_displaced_init_closure (gdbarch, from, to, dsc);
6151
6152 return dsc;
6153 }
6154
6155 /* Entry point for cleaning things up after a displaced instruction has been
6156 single-stepped. */
6157
6158 void
6159 arm_displaced_step_fixup (struct gdbarch *gdbarch,
6160 struct displaced_step_closure *dsc,
6161 CORE_ADDR from, CORE_ADDR to,
6162 struct regcache *regs)
6163 {
6164 if (dsc->cleanup)
6165 dsc->cleanup (gdbarch, regs, dsc);
6166
6167 if (!dsc->wrote_to_pc)
6168 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, dsc->insn_addr + 4);
6169 }
6170
6171 #include "bfd-in2.h"
6172 #include "libcoff.h"
6173
6174 static int
6175 gdb_print_insn_arm (bfd_vma memaddr, disassemble_info *info)
6176 {
6177 struct gdbarch *gdbarch = info->application_data;
6178
6179 if (arm_pc_is_thumb (gdbarch, memaddr))
6180 {
6181 static asymbol *asym;
6182 static combined_entry_type ce;
6183 static struct coff_symbol_struct csym;
6184 static struct bfd fake_bfd;
6185 static bfd_target fake_target;
6186
6187 if (csym.native == NULL)
6188 {
6189 /* Create a fake symbol vector containing a Thumb symbol.
6190 This is solely so that the code in print_insn_little_arm()
6191 and print_insn_big_arm() in opcodes/arm-dis.c will detect
6192 the presence of a Thumb symbol and switch to decoding
6193 Thumb instructions. */
6194
6195 fake_target.flavour = bfd_target_coff_flavour;
6196 fake_bfd.xvec = &fake_target;
6197 ce.u.syment.n_sclass = C_THUMBEXTFUNC;
6198 csym.native = &ce;
6199 csym.symbol.the_bfd = &fake_bfd;
6200 csym.symbol.name = "fake";
6201 asym = (asymbol *) & csym;
6202 }
6203
6204 memaddr = UNMAKE_THUMB_ADDR (memaddr);
6205 info->symbols = &asym;
6206 }
6207 else
6208 info->symbols = NULL;
6209
6210 if (info->endian == BFD_ENDIAN_BIG)
6211 return print_insn_big_arm (memaddr, info);
6212 else
6213 return print_insn_little_arm (memaddr, info);
6214 }
6215
6216 /* The following define instruction sequences that will cause ARM
6217 cpu's to take an undefined instruction trap. These are used to
6218 signal a breakpoint to GDB.
6219
6220 The newer ARMv4T cpu's are capable of operating in ARM or Thumb
6221 modes. A different instruction is required for each mode. The ARM
6222 cpu's can also be big or little endian. Thus four different
6223 instructions are needed to support all cases.
6224
6225 Note: ARMv4 defines several new instructions that will take the
6226 undefined instruction trap. ARM7TDMI is nominally ARMv4T, but does
6227 not in fact add the new instructions. The new undefined
6228 instructions in ARMv4 are all instructions that had no defined
6229 behaviour in earlier chips. There is no guarantee that they will
6230 raise an exception, but may be treated as NOP's. In practice, it
6231 may only safe to rely on instructions matching:
6232
6233 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
6234 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
6235 C C C C 0 1 1 x x x x x x x x x x x x x x x x x x x x 1 x x x x
6236
6237 Even this may only true if the condition predicate is true. The
6238 following use a condition predicate of ALWAYS so it is always TRUE.
6239
6240 There are other ways of forcing a breakpoint. GNU/Linux, RISC iX,
6241 and NetBSD all use a software interrupt rather than an undefined
6242 instruction to force a trap. This can be handled by by the
6243 abi-specific code during establishment of the gdbarch vector. */
6244
6245 #define ARM_LE_BREAKPOINT {0xFE,0xDE,0xFF,0xE7}
6246 #define ARM_BE_BREAKPOINT {0xE7,0xFF,0xDE,0xFE}
6247 #define THUMB_LE_BREAKPOINT {0xbe,0xbe}
6248 #define THUMB_BE_BREAKPOINT {0xbe,0xbe}
6249
6250 static const char arm_default_arm_le_breakpoint[] = ARM_LE_BREAKPOINT;
6251 static const char arm_default_arm_be_breakpoint[] = ARM_BE_BREAKPOINT;
6252 static const char arm_default_thumb_le_breakpoint[] = THUMB_LE_BREAKPOINT;
6253 static const char arm_default_thumb_be_breakpoint[] = THUMB_BE_BREAKPOINT;
6254
6255 /* Determine the type and size of breakpoint to insert at PCPTR. Uses
6256 the program counter value to determine whether a 16-bit or 32-bit
6257 breakpoint should be used. It returns a pointer to a string of
6258 bytes that encode a breakpoint instruction, stores the length of
6259 the string to *lenptr, and adjusts the program counter (if
6260 necessary) to point to the actual memory location where the
6261 breakpoint should be inserted. */
6262
6263 static const unsigned char *
6264 arm_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr, int *lenptr)
6265 {
6266 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
6267 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
6268
6269 if (arm_pc_is_thumb (gdbarch, *pcptr))
6270 {
6271 *pcptr = UNMAKE_THUMB_ADDR (*pcptr);
6272
6273 /* If we have a separate 32-bit breakpoint instruction for Thumb-2,
6274 check whether we are replacing a 32-bit instruction. */
6275 if (tdep->thumb2_breakpoint != NULL)
6276 {
6277 gdb_byte buf[2];
6278 if (target_read_memory (*pcptr, buf, 2) == 0)
6279 {
6280 unsigned short inst1;
6281 inst1 = extract_unsigned_integer (buf, 2, byte_order_for_code);
6282 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
6283 {
6284 *lenptr = tdep->thumb2_breakpoint_size;
6285 return tdep->thumb2_breakpoint;
6286 }
6287 }
6288 }
6289
6290 *lenptr = tdep->thumb_breakpoint_size;
6291 return tdep->thumb_breakpoint;
6292 }
6293 else
6294 {
6295 *lenptr = tdep->arm_breakpoint_size;
6296 return tdep->arm_breakpoint;
6297 }
6298 }
6299
6300 static void
6301 arm_remote_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr,
6302 int *kindptr)
6303 {
6304 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
6305
6306 arm_breakpoint_from_pc (gdbarch, pcptr, kindptr);
6307
6308 if (arm_pc_is_thumb (gdbarch, *pcptr) && *kindptr == 4)
6309 /* The documented magic value for a 32-bit Thumb-2 breakpoint, so
6310 that this is not confused with a 32-bit ARM breakpoint. */
6311 *kindptr = 3;
6312 }
6313
6314 /* Extract from an array REGBUF containing the (raw) register state a
6315 function return value of type TYPE, and copy that, in virtual
6316 format, into VALBUF. */
6317
6318 static void
6319 arm_extract_return_value (struct type *type, struct regcache *regs,
6320 gdb_byte *valbuf)
6321 {
6322 struct gdbarch *gdbarch = get_regcache_arch (regs);
6323 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
6324
6325 if (TYPE_CODE_FLT == TYPE_CODE (type))
6326 {
6327 switch (gdbarch_tdep (gdbarch)->fp_model)
6328 {
6329 case ARM_FLOAT_FPA:
6330 {
6331 /* The value is in register F0 in internal format. We need to
6332 extract the raw value and then convert it to the desired
6333 internal type. */
6334 bfd_byte tmpbuf[FP_REGISTER_SIZE];
6335
6336 regcache_cooked_read (regs, ARM_F0_REGNUM, tmpbuf);
6337 convert_from_extended (floatformat_from_type (type), tmpbuf,
6338 valbuf, gdbarch_byte_order (gdbarch));
6339 }
6340 break;
6341
6342 case ARM_FLOAT_SOFT_FPA:
6343 case ARM_FLOAT_SOFT_VFP:
6344 /* ARM_FLOAT_VFP can arise if this is a variadic function so
6345 not using the VFP ABI code. */
6346 case ARM_FLOAT_VFP:
6347 regcache_cooked_read (regs, ARM_A1_REGNUM, valbuf);
6348 if (TYPE_LENGTH (type) > 4)
6349 regcache_cooked_read (regs, ARM_A1_REGNUM + 1,
6350 valbuf + INT_REGISTER_SIZE);
6351 break;
6352
6353 default:
6354 internal_error (__FILE__, __LINE__,
6355 _("arm_extract_return_value: "
6356 "Floating point model not supported"));
6357 break;
6358 }
6359 }
6360 else if (TYPE_CODE (type) == TYPE_CODE_INT
6361 || TYPE_CODE (type) == TYPE_CODE_CHAR
6362 || TYPE_CODE (type) == TYPE_CODE_BOOL
6363 || TYPE_CODE (type) == TYPE_CODE_PTR
6364 || TYPE_CODE (type) == TYPE_CODE_REF
6365 || TYPE_CODE (type) == TYPE_CODE_ENUM)
6366 {
6367 /* If the the type is a plain integer, then the access is
6368 straight-forward. Otherwise we have to play around a bit more. */
6369 int len = TYPE_LENGTH (type);
6370 int regno = ARM_A1_REGNUM;
6371 ULONGEST tmp;
6372
6373 while (len > 0)
6374 {
6375 /* By using store_unsigned_integer we avoid having to do
6376 anything special for small big-endian values. */
6377 regcache_cooked_read_unsigned (regs, regno++, &tmp);
6378 store_unsigned_integer (valbuf,
6379 (len > INT_REGISTER_SIZE
6380 ? INT_REGISTER_SIZE : len),
6381 byte_order, tmp);
6382 len -= INT_REGISTER_SIZE;
6383 valbuf += INT_REGISTER_SIZE;
6384 }
6385 }
6386 else
6387 {
6388 /* For a structure or union the behaviour is as if the value had
6389 been stored to word-aligned memory and then loaded into
6390 registers with 32-bit load instruction(s). */
6391 int len = TYPE_LENGTH (type);
6392 int regno = ARM_A1_REGNUM;
6393 bfd_byte tmpbuf[INT_REGISTER_SIZE];
6394
6395 while (len > 0)
6396 {
6397 regcache_cooked_read (regs, regno++, tmpbuf);
6398 memcpy (valbuf, tmpbuf,
6399 len > INT_REGISTER_SIZE ? INT_REGISTER_SIZE : len);
6400 len -= INT_REGISTER_SIZE;
6401 valbuf += INT_REGISTER_SIZE;
6402 }
6403 }
6404 }
6405
6406
6407 /* Will a function return an aggregate type in memory or in a
6408 register? Return 0 if an aggregate type can be returned in a
6409 register, 1 if it must be returned in memory. */
6410
6411 static int
6412 arm_return_in_memory (struct gdbarch *gdbarch, struct type *type)
6413 {
6414 int nRc;
6415 enum type_code code;
6416
6417 CHECK_TYPEDEF (type);
6418
6419 /* In the ARM ABI, "integer" like aggregate types are returned in
6420 registers. For an aggregate type to be integer like, its size
6421 must be less than or equal to INT_REGISTER_SIZE and the
6422 offset of each addressable subfield must be zero. Note that bit
6423 fields are not addressable, and all addressable subfields of
6424 unions always start at offset zero.
6425
6426 This function is based on the behaviour of GCC 2.95.1.
6427 See: gcc/arm.c: arm_return_in_memory() for details.
6428
6429 Note: All versions of GCC before GCC 2.95.2 do not set up the
6430 parameters correctly for a function returning the following
6431 structure: struct { float f;}; This should be returned in memory,
6432 not a register. Richard Earnshaw sent me a patch, but I do not
6433 know of any way to detect if a function like the above has been
6434 compiled with the correct calling convention. */
6435
6436 /* All aggregate types that won't fit in a register must be returned
6437 in memory. */
6438 if (TYPE_LENGTH (type) > INT_REGISTER_SIZE)
6439 {
6440 return 1;
6441 }
6442
6443 /* The AAPCS says all aggregates not larger than a word are returned
6444 in a register. */
6445 if (gdbarch_tdep (gdbarch)->arm_abi != ARM_ABI_APCS)
6446 return 0;
6447
6448 /* The only aggregate types that can be returned in a register are
6449 structs and unions. Arrays must be returned in memory. */
6450 code = TYPE_CODE (type);
6451 if ((TYPE_CODE_STRUCT != code) && (TYPE_CODE_UNION != code))
6452 {
6453 return 1;
6454 }
6455
6456 /* Assume all other aggregate types can be returned in a register.
6457 Run a check for structures, unions and arrays. */
6458 nRc = 0;
6459
6460 if ((TYPE_CODE_STRUCT == code) || (TYPE_CODE_UNION == code))
6461 {
6462 int i;
6463 /* Need to check if this struct/union is "integer" like. For
6464 this to be true, its size must be less than or equal to
6465 INT_REGISTER_SIZE and the offset of each addressable
6466 subfield must be zero. Note that bit fields are not
6467 addressable, and unions always start at offset zero. If any
6468 of the subfields is a floating point type, the struct/union
6469 cannot be an integer type. */
6470
6471 /* For each field in the object, check:
6472 1) Is it FP? --> yes, nRc = 1;
6473 2) Is it addressable (bitpos != 0) and
6474 not packed (bitsize == 0)?
6475 --> yes, nRc = 1
6476 */
6477
6478 for (i = 0; i < TYPE_NFIELDS (type); i++)
6479 {
6480 enum type_code field_type_code;
6481 field_type_code = TYPE_CODE (check_typedef (TYPE_FIELD_TYPE (type,
6482 i)));
6483
6484 /* Is it a floating point type field? */
6485 if (field_type_code == TYPE_CODE_FLT)
6486 {
6487 nRc = 1;
6488 break;
6489 }
6490
6491 /* If bitpos != 0, then we have to care about it. */
6492 if (TYPE_FIELD_BITPOS (type, i) != 0)
6493 {
6494 /* Bitfields are not addressable. If the field bitsize is
6495 zero, then the field is not packed. Hence it cannot be
6496 a bitfield or any other packed type. */
6497 if (TYPE_FIELD_BITSIZE (type, i) == 0)
6498 {
6499 nRc = 1;
6500 break;
6501 }
6502 }
6503 }
6504 }
6505
6506 return nRc;
6507 }
6508
6509 /* Write into appropriate registers a function return value of type
6510 TYPE, given in virtual format. */
6511
6512 static void
6513 arm_store_return_value (struct type *type, struct regcache *regs,
6514 const gdb_byte *valbuf)
6515 {
6516 struct gdbarch *gdbarch = get_regcache_arch (regs);
6517 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
6518
6519 if (TYPE_CODE (type) == TYPE_CODE_FLT)
6520 {
6521 char buf[MAX_REGISTER_SIZE];
6522
6523 switch (gdbarch_tdep (gdbarch)->fp_model)
6524 {
6525 case ARM_FLOAT_FPA:
6526
6527 convert_to_extended (floatformat_from_type (type), buf, valbuf,
6528 gdbarch_byte_order (gdbarch));
6529 regcache_cooked_write (regs, ARM_F0_REGNUM, buf);
6530 break;
6531
6532 case ARM_FLOAT_SOFT_FPA:
6533 case ARM_FLOAT_SOFT_VFP:
6534 /* ARM_FLOAT_VFP can arise if this is a variadic function so
6535 not using the VFP ABI code. */
6536 case ARM_FLOAT_VFP:
6537 regcache_cooked_write (regs, ARM_A1_REGNUM, valbuf);
6538 if (TYPE_LENGTH (type) > 4)
6539 regcache_cooked_write (regs, ARM_A1_REGNUM + 1,
6540 valbuf + INT_REGISTER_SIZE);
6541 break;
6542
6543 default:
6544 internal_error (__FILE__, __LINE__,
6545 _("arm_store_return_value: Floating "
6546 "point model not supported"));
6547 break;
6548 }
6549 }
6550 else if (TYPE_CODE (type) == TYPE_CODE_INT
6551 || TYPE_CODE (type) == TYPE_CODE_CHAR
6552 || TYPE_CODE (type) == TYPE_CODE_BOOL
6553 || TYPE_CODE (type) == TYPE_CODE_PTR
6554 || TYPE_CODE (type) == TYPE_CODE_REF
6555 || TYPE_CODE (type) == TYPE_CODE_ENUM)
6556 {
6557 if (TYPE_LENGTH (type) <= 4)
6558 {
6559 /* Values of one word or less are zero/sign-extended and
6560 returned in r0. */
6561 bfd_byte tmpbuf[INT_REGISTER_SIZE];
6562 LONGEST val = unpack_long (type, valbuf);
6563
6564 store_signed_integer (tmpbuf, INT_REGISTER_SIZE, byte_order, val);
6565 regcache_cooked_write (regs, ARM_A1_REGNUM, tmpbuf);
6566 }
6567 else
6568 {
6569 /* Integral values greater than one word are stored in consecutive
6570 registers starting with r0. This will always be a multiple of
6571 the regiser size. */
6572 int len = TYPE_LENGTH (type);
6573 int regno = ARM_A1_REGNUM;
6574
6575 while (len > 0)
6576 {
6577 regcache_cooked_write (regs, regno++, valbuf);
6578 len -= INT_REGISTER_SIZE;
6579 valbuf += INT_REGISTER_SIZE;
6580 }
6581 }
6582 }
6583 else
6584 {
6585 /* For a structure or union the behaviour is as if the value had
6586 been stored to word-aligned memory and then loaded into
6587 registers with 32-bit load instruction(s). */
6588 int len = TYPE_LENGTH (type);
6589 int regno = ARM_A1_REGNUM;
6590 bfd_byte tmpbuf[INT_REGISTER_SIZE];
6591
6592 while (len > 0)
6593 {
6594 memcpy (tmpbuf, valbuf,
6595 len > INT_REGISTER_SIZE ? INT_REGISTER_SIZE : len);
6596 regcache_cooked_write (regs, regno++, tmpbuf);
6597 len -= INT_REGISTER_SIZE;
6598 valbuf += INT_REGISTER_SIZE;
6599 }
6600 }
6601 }
6602
6603
6604 /* Handle function return values. */
6605
6606 static enum return_value_convention
6607 arm_return_value (struct gdbarch *gdbarch, struct type *func_type,
6608 struct type *valtype, struct regcache *regcache,
6609 gdb_byte *readbuf, const gdb_byte *writebuf)
6610 {
6611 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
6612 enum arm_vfp_cprc_base_type vfp_base_type;
6613 int vfp_base_count;
6614
6615 if (arm_vfp_abi_for_function (gdbarch, func_type)
6616 && arm_vfp_call_candidate (valtype, &vfp_base_type, &vfp_base_count))
6617 {
6618 int reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
6619 int unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
6620 int i;
6621 for (i = 0; i < vfp_base_count; i++)
6622 {
6623 if (reg_char == 'q')
6624 {
6625 if (writebuf)
6626 arm_neon_quad_write (gdbarch, regcache, i,
6627 writebuf + i * unit_length);
6628
6629 if (readbuf)
6630 arm_neon_quad_read (gdbarch, regcache, i,
6631 readbuf + i * unit_length);
6632 }
6633 else
6634 {
6635 char name_buf[4];
6636 int regnum;
6637
6638 sprintf (name_buf, "%c%d", reg_char, i);
6639 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
6640 strlen (name_buf));
6641 if (writebuf)
6642 regcache_cooked_write (regcache, regnum,
6643 writebuf + i * unit_length);
6644 if (readbuf)
6645 regcache_cooked_read (regcache, regnum,
6646 readbuf + i * unit_length);
6647 }
6648 }
6649 return RETURN_VALUE_REGISTER_CONVENTION;
6650 }
6651
6652 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
6653 || TYPE_CODE (valtype) == TYPE_CODE_UNION
6654 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
6655 {
6656 if (tdep->struct_return == pcc_struct_return
6657 || arm_return_in_memory (gdbarch, valtype))
6658 return RETURN_VALUE_STRUCT_CONVENTION;
6659 }
6660
6661 if (writebuf)
6662 arm_store_return_value (valtype, regcache, writebuf);
6663
6664 if (readbuf)
6665 arm_extract_return_value (valtype, regcache, readbuf);
6666
6667 return RETURN_VALUE_REGISTER_CONVENTION;
6668 }
6669
6670
6671 static int
6672 arm_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
6673 {
6674 struct gdbarch *gdbarch = get_frame_arch (frame);
6675 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
6676 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
6677 CORE_ADDR jb_addr;
6678 char buf[INT_REGISTER_SIZE];
6679
6680 jb_addr = get_frame_register_unsigned (frame, ARM_A1_REGNUM);
6681
6682 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
6683 INT_REGISTER_SIZE))
6684 return 0;
6685
6686 *pc = extract_unsigned_integer (buf, INT_REGISTER_SIZE, byte_order);
6687 return 1;
6688 }
6689
6690 /* Recognize GCC and GNU ld's trampolines. If we are in a trampoline,
6691 return the target PC. Otherwise return 0. */
6692
6693 CORE_ADDR
6694 arm_skip_stub (struct frame_info *frame, CORE_ADDR pc)
6695 {
6696 char *name;
6697 int namelen;
6698 CORE_ADDR start_addr;
6699
6700 /* Find the starting address and name of the function containing the PC. */
6701 if (find_pc_partial_function (pc, &name, &start_addr, NULL) == 0)
6702 return 0;
6703
6704 /* If PC is in a Thumb call or return stub, return the address of the
6705 target PC, which is in a register. The thunk functions are called
6706 _call_via_xx, where x is the register name. The possible names
6707 are r0-r9, sl, fp, ip, sp, and lr. ARM RealView has similar
6708 functions, named __ARM_call_via_r[0-7]. */
6709 if (strncmp (name, "_call_via_", 10) == 0
6710 || strncmp (name, "__ARM_call_via_", strlen ("__ARM_call_via_")) == 0)
6711 {
6712 /* Use the name suffix to determine which register contains the
6713 target PC. */
6714 static char *table[15] =
6715 {"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
6716 "r8", "r9", "sl", "fp", "ip", "sp", "lr"
6717 };
6718 int regno;
6719 int offset = strlen (name) - 2;
6720
6721 for (regno = 0; regno <= 14; regno++)
6722 if (strcmp (&name[offset], table[regno]) == 0)
6723 return get_frame_register_unsigned (frame, regno);
6724 }
6725
6726 /* GNU ld generates __foo_from_arm or __foo_from_thumb for
6727 non-interworking calls to foo. We could decode the stubs
6728 to find the target but it's easier to use the symbol table. */
6729 namelen = strlen (name);
6730 if (name[0] == '_' && name[1] == '_'
6731 && ((namelen > 2 + strlen ("_from_thumb")
6732 && strncmp (name + namelen - strlen ("_from_thumb"), "_from_thumb",
6733 strlen ("_from_thumb")) == 0)
6734 || (namelen > 2 + strlen ("_from_arm")
6735 && strncmp (name + namelen - strlen ("_from_arm"), "_from_arm",
6736 strlen ("_from_arm")) == 0)))
6737 {
6738 char *target_name;
6739 int target_len = namelen - 2;
6740 struct minimal_symbol *minsym;
6741 struct objfile *objfile;
6742 struct obj_section *sec;
6743
6744 if (name[namelen - 1] == 'b')
6745 target_len -= strlen ("_from_thumb");
6746 else
6747 target_len -= strlen ("_from_arm");
6748
6749 target_name = alloca (target_len + 1);
6750 memcpy (target_name, name + 2, target_len);
6751 target_name[target_len] = '\0';
6752
6753 sec = find_pc_section (pc);
6754 objfile = (sec == NULL) ? NULL : sec->objfile;
6755 minsym = lookup_minimal_symbol (target_name, NULL, objfile);
6756 if (minsym != NULL)
6757 return SYMBOL_VALUE_ADDRESS (minsym);
6758 else
6759 return 0;
6760 }
6761
6762 return 0; /* not a stub */
6763 }
6764
6765 static void
6766 set_arm_command (char *args, int from_tty)
6767 {
6768 printf_unfiltered (_("\
6769 \"set arm\" must be followed by an apporpriate subcommand.\n"));
6770 help_list (setarmcmdlist, "set arm ", all_commands, gdb_stdout);
6771 }
6772
6773 static void
6774 show_arm_command (char *args, int from_tty)
6775 {
6776 cmd_show_list (showarmcmdlist, from_tty, "");
6777 }
6778
6779 static void
6780 arm_update_current_architecture (void)
6781 {
6782 struct gdbarch_info info;
6783
6784 /* If the current architecture is not ARM, we have nothing to do. */
6785 if (gdbarch_bfd_arch_info (target_gdbarch)->arch != bfd_arch_arm)
6786 return;
6787
6788 /* Update the architecture. */
6789 gdbarch_info_init (&info);
6790
6791 if (!gdbarch_update_p (info))
6792 internal_error (__FILE__, __LINE__, _("could not update architecture"));
6793 }
6794
6795 static void
6796 set_fp_model_sfunc (char *args, int from_tty,
6797 struct cmd_list_element *c)
6798 {
6799 enum arm_float_model fp_model;
6800
6801 for (fp_model = ARM_FLOAT_AUTO; fp_model != ARM_FLOAT_LAST; fp_model++)
6802 if (strcmp (current_fp_model, fp_model_strings[fp_model]) == 0)
6803 {
6804 arm_fp_model = fp_model;
6805 break;
6806 }
6807
6808 if (fp_model == ARM_FLOAT_LAST)
6809 internal_error (__FILE__, __LINE__, _("Invalid fp model accepted: %s."),
6810 current_fp_model);
6811
6812 arm_update_current_architecture ();
6813 }
6814
6815 static void
6816 show_fp_model (struct ui_file *file, int from_tty,
6817 struct cmd_list_element *c, const char *value)
6818 {
6819 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
6820
6821 if (arm_fp_model == ARM_FLOAT_AUTO
6822 && gdbarch_bfd_arch_info (target_gdbarch)->arch == bfd_arch_arm)
6823 fprintf_filtered (file, _("\
6824 The current ARM floating point model is \"auto\" (currently \"%s\").\n"),
6825 fp_model_strings[tdep->fp_model]);
6826 else
6827 fprintf_filtered (file, _("\
6828 The current ARM floating point model is \"%s\".\n"),
6829 fp_model_strings[arm_fp_model]);
6830 }
6831
6832 static void
6833 arm_set_abi (char *args, int from_tty,
6834 struct cmd_list_element *c)
6835 {
6836 enum arm_abi_kind arm_abi;
6837
6838 for (arm_abi = ARM_ABI_AUTO; arm_abi != ARM_ABI_LAST; arm_abi++)
6839 if (strcmp (arm_abi_string, arm_abi_strings[arm_abi]) == 0)
6840 {
6841 arm_abi_global = arm_abi;
6842 break;
6843 }
6844
6845 if (arm_abi == ARM_ABI_LAST)
6846 internal_error (__FILE__, __LINE__, _("Invalid ABI accepted: %s."),
6847 arm_abi_string);
6848
6849 arm_update_current_architecture ();
6850 }
6851
6852 static void
6853 arm_show_abi (struct ui_file *file, int from_tty,
6854 struct cmd_list_element *c, const char *value)
6855 {
6856 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
6857
6858 if (arm_abi_global == ARM_ABI_AUTO
6859 && gdbarch_bfd_arch_info (target_gdbarch)->arch == bfd_arch_arm)
6860 fprintf_filtered (file, _("\
6861 The current ARM ABI is \"auto\" (currently \"%s\").\n"),
6862 arm_abi_strings[tdep->arm_abi]);
6863 else
6864 fprintf_filtered (file, _("The current ARM ABI is \"%s\".\n"),
6865 arm_abi_string);
6866 }
6867
6868 static void
6869 arm_show_fallback_mode (struct ui_file *file, int from_tty,
6870 struct cmd_list_element *c, const char *value)
6871 {
6872 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
6873
6874 fprintf_filtered (file,
6875 _("The current execution mode assumed "
6876 "(when symbols are unavailable) is \"%s\".\n"),
6877 arm_fallback_mode_string);
6878 }
6879
6880 static void
6881 arm_show_force_mode (struct ui_file *file, int from_tty,
6882 struct cmd_list_element *c, const char *value)
6883 {
6884 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
6885
6886 fprintf_filtered (file,
6887 _("The current execution mode assumed "
6888 "(even when symbols are available) is \"%s\".\n"),
6889 arm_force_mode_string);
6890 }
6891
6892 /* If the user changes the register disassembly style used for info
6893 register and other commands, we have to also switch the style used
6894 in opcodes for disassembly output. This function is run in the "set
6895 arm disassembly" command, and does that. */
6896
6897 static void
6898 set_disassembly_style_sfunc (char *args, int from_tty,
6899 struct cmd_list_element *c)
6900 {
6901 set_disassembly_style ();
6902 }
6903 \f
6904 /* Return the ARM register name corresponding to register I. */
6905 static const char *
6906 arm_register_name (struct gdbarch *gdbarch, int i)
6907 {
6908 const int num_regs = gdbarch_num_regs (gdbarch);
6909
6910 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
6911 && i >= num_regs && i < num_regs + 32)
6912 {
6913 static const char *const vfp_pseudo_names[] = {
6914 "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
6915 "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
6916 "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
6917 "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
6918 };
6919
6920 return vfp_pseudo_names[i - num_regs];
6921 }
6922
6923 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
6924 && i >= num_regs + 32 && i < num_regs + 32 + 16)
6925 {
6926 static const char *const neon_pseudo_names[] = {
6927 "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7",
6928 "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15",
6929 };
6930
6931 return neon_pseudo_names[i - num_regs - 32];
6932 }
6933
6934 if (i >= ARRAY_SIZE (arm_register_names))
6935 /* These registers are only supported on targets which supply
6936 an XML description. */
6937 return "";
6938
6939 return arm_register_names[i];
6940 }
6941
6942 static void
6943 set_disassembly_style (void)
6944 {
6945 int current;
6946
6947 /* Find the style that the user wants. */
6948 for (current = 0; current < num_disassembly_options; current++)
6949 if (disassembly_style == valid_disassembly_styles[current])
6950 break;
6951 gdb_assert (current < num_disassembly_options);
6952
6953 /* Synchronize the disassembler. */
6954 set_arm_regname_option (current);
6955 }
6956
6957 /* Test whether the coff symbol specific value corresponds to a Thumb
6958 function. */
6959
6960 static int
6961 coff_sym_is_thumb (int val)
6962 {
6963 return (val == C_THUMBEXT
6964 || val == C_THUMBSTAT
6965 || val == C_THUMBEXTFUNC
6966 || val == C_THUMBSTATFUNC
6967 || val == C_THUMBLABEL);
6968 }
6969
6970 /* arm_coff_make_msymbol_special()
6971 arm_elf_make_msymbol_special()
6972
6973 These functions test whether the COFF or ELF symbol corresponds to
6974 an address in thumb code, and set a "special" bit in a minimal
6975 symbol to indicate that it does. */
6976
6977 static void
6978 arm_elf_make_msymbol_special(asymbol *sym, struct minimal_symbol *msym)
6979 {
6980 /* Thumb symbols are of type STT_LOPROC, (synonymous with
6981 STT_ARM_TFUNC). */
6982 if (ELF_ST_TYPE (((elf_symbol_type *)sym)->internal_elf_sym.st_info)
6983 == STT_LOPROC)
6984 MSYMBOL_SET_SPECIAL (msym);
6985 }
6986
6987 static void
6988 arm_coff_make_msymbol_special(int val, struct minimal_symbol *msym)
6989 {
6990 if (coff_sym_is_thumb (val))
6991 MSYMBOL_SET_SPECIAL (msym);
6992 }
6993
6994 static void
6995 arm_objfile_data_free (struct objfile *objfile, void *arg)
6996 {
6997 struct arm_per_objfile *data = arg;
6998 unsigned int i;
6999
7000 for (i = 0; i < objfile->obfd->section_count; i++)
7001 VEC_free (arm_mapping_symbol_s, data->section_maps[i]);
7002 }
7003
7004 static void
7005 arm_record_special_symbol (struct gdbarch *gdbarch, struct objfile *objfile,
7006 asymbol *sym)
7007 {
7008 const char *name = bfd_asymbol_name (sym);
7009 struct arm_per_objfile *data;
7010 VEC(arm_mapping_symbol_s) **map_p;
7011 struct arm_mapping_symbol new_map_sym;
7012
7013 gdb_assert (name[0] == '$');
7014 if (name[1] != 'a' && name[1] != 't' && name[1] != 'd')
7015 return;
7016
7017 data = objfile_data (objfile, arm_objfile_data_key);
7018 if (data == NULL)
7019 {
7020 data = OBSTACK_ZALLOC (&objfile->objfile_obstack,
7021 struct arm_per_objfile);
7022 set_objfile_data (objfile, arm_objfile_data_key, data);
7023 data->section_maps = OBSTACK_CALLOC (&objfile->objfile_obstack,
7024 objfile->obfd->section_count,
7025 VEC(arm_mapping_symbol_s) *);
7026 }
7027 map_p = &data->section_maps[bfd_get_section (sym)->index];
7028
7029 new_map_sym.value = sym->value;
7030 new_map_sym.type = name[1];
7031
7032 /* Assume that most mapping symbols appear in order of increasing
7033 value. If they were randomly distributed, it would be faster to
7034 always push here and then sort at first use. */
7035 if (!VEC_empty (arm_mapping_symbol_s, *map_p))
7036 {
7037 struct arm_mapping_symbol *prev_map_sym;
7038
7039 prev_map_sym = VEC_last (arm_mapping_symbol_s, *map_p);
7040 if (prev_map_sym->value >= sym->value)
7041 {
7042 unsigned int idx;
7043 idx = VEC_lower_bound (arm_mapping_symbol_s, *map_p, &new_map_sym,
7044 arm_compare_mapping_symbols);
7045 VEC_safe_insert (arm_mapping_symbol_s, *map_p, idx, &new_map_sym);
7046 return;
7047 }
7048 }
7049
7050 VEC_safe_push (arm_mapping_symbol_s, *map_p, &new_map_sym);
7051 }
7052
7053 static void
7054 arm_write_pc (struct regcache *regcache, CORE_ADDR pc)
7055 {
7056 struct gdbarch *gdbarch = get_regcache_arch (regcache);
7057 regcache_cooked_write_unsigned (regcache, ARM_PC_REGNUM, pc);
7058
7059 /* If necessary, set the T bit. */
7060 if (arm_apcs_32)
7061 {
7062 ULONGEST val, t_bit;
7063 regcache_cooked_read_unsigned (regcache, ARM_PS_REGNUM, &val);
7064 t_bit = arm_psr_thumb_bit (gdbarch);
7065 if (arm_pc_is_thumb (gdbarch, pc))
7066 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM,
7067 val | t_bit);
7068 else
7069 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM,
7070 val & ~t_bit);
7071 }
7072 }
7073
7074 /* Read the contents of a NEON quad register, by reading from two
7075 double registers. This is used to implement the quad pseudo
7076 registers, and for argument passing in case the quad registers are
7077 missing; vectors are passed in quad registers when using the VFP
7078 ABI, even if a NEON unit is not present. REGNUM is the index of
7079 the quad register, in [0, 15]. */
7080
7081 static void
7082 arm_neon_quad_read (struct gdbarch *gdbarch, struct regcache *regcache,
7083 int regnum, gdb_byte *buf)
7084 {
7085 char name_buf[4];
7086 gdb_byte reg_buf[8];
7087 int offset, double_regnum;
7088
7089 sprintf (name_buf, "d%d", regnum << 1);
7090 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
7091 strlen (name_buf));
7092
7093 /* d0 is always the least significant half of q0. */
7094 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
7095 offset = 8;
7096 else
7097 offset = 0;
7098
7099 regcache_raw_read (regcache, double_regnum, reg_buf);
7100 memcpy (buf + offset, reg_buf, 8);
7101
7102 offset = 8 - offset;
7103 regcache_raw_read (regcache, double_regnum + 1, reg_buf);
7104 memcpy (buf + offset, reg_buf, 8);
7105 }
7106
7107 static void
7108 arm_pseudo_read (struct gdbarch *gdbarch, struct regcache *regcache,
7109 int regnum, gdb_byte *buf)
7110 {
7111 const int num_regs = gdbarch_num_regs (gdbarch);
7112 char name_buf[4];
7113 gdb_byte reg_buf[8];
7114 int offset, double_regnum;
7115
7116 gdb_assert (regnum >= num_regs);
7117 regnum -= num_regs;
7118
7119 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
7120 /* Quad-precision register. */
7121 arm_neon_quad_read (gdbarch, regcache, regnum - 32, buf);
7122 else
7123 {
7124 /* Single-precision register. */
7125 gdb_assert (regnum < 32);
7126
7127 /* s0 is always the least significant half of d0. */
7128 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
7129 offset = (regnum & 1) ? 0 : 4;
7130 else
7131 offset = (regnum & 1) ? 4 : 0;
7132
7133 sprintf (name_buf, "d%d", regnum >> 1);
7134 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
7135 strlen (name_buf));
7136
7137 regcache_raw_read (regcache, double_regnum, reg_buf);
7138 memcpy (buf, reg_buf + offset, 4);
7139 }
7140 }
7141
7142 /* Store the contents of BUF to a NEON quad register, by writing to
7143 two double registers. This is used to implement the quad pseudo
7144 registers, and for argument passing in case the quad registers are
7145 missing; vectors are passed in quad registers when using the VFP
7146 ABI, even if a NEON unit is not present. REGNUM is the index
7147 of the quad register, in [0, 15]. */
7148
7149 static void
7150 arm_neon_quad_write (struct gdbarch *gdbarch, struct regcache *regcache,
7151 int regnum, const gdb_byte *buf)
7152 {
7153 char name_buf[4];
7154 gdb_byte reg_buf[8];
7155 int offset, double_regnum;
7156
7157 sprintf (name_buf, "d%d", regnum << 1);
7158 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
7159 strlen (name_buf));
7160
7161 /* d0 is always the least significant half of q0. */
7162 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
7163 offset = 8;
7164 else
7165 offset = 0;
7166
7167 regcache_raw_write (regcache, double_regnum, buf + offset);
7168 offset = 8 - offset;
7169 regcache_raw_write (regcache, double_regnum + 1, buf + offset);
7170 }
7171
7172 static void
7173 arm_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
7174 int regnum, const gdb_byte *buf)
7175 {
7176 const int num_regs = gdbarch_num_regs (gdbarch);
7177 char name_buf[4];
7178 gdb_byte reg_buf[8];
7179 int offset, double_regnum;
7180
7181 gdb_assert (regnum >= num_regs);
7182 regnum -= num_regs;
7183
7184 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
7185 /* Quad-precision register. */
7186 arm_neon_quad_write (gdbarch, regcache, regnum - 32, buf);
7187 else
7188 {
7189 /* Single-precision register. */
7190 gdb_assert (regnum < 32);
7191
7192 /* s0 is always the least significant half of d0. */
7193 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
7194 offset = (regnum & 1) ? 0 : 4;
7195 else
7196 offset = (regnum & 1) ? 4 : 0;
7197
7198 sprintf (name_buf, "d%d", regnum >> 1);
7199 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
7200 strlen (name_buf));
7201
7202 regcache_raw_read (regcache, double_regnum, reg_buf);
7203 memcpy (reg_buf + offset, buf, 4);
7204 regcache_raw_write (regcache, double_regnum, reg_buf);
7205 }
7206 }
7207
7208 static struct value *
7209 value_of_arm_user_reg (struct frame_info *frame, const void *baton)
7210 {
7211 const int *reg_p = baton;
7212 return value_of_register (*reg_p, frame);
7213 }
7214 \f
7215 static enum gdb_osabi
7216 arm_elf_osabi_sniffer (bfd *abfd)
7217 {
7218 unsigned int elfosabi;
7219 enum gdb_osabi osabi = GDB_OSABI_UNKNOWN;
7220
7221 elfosabi = elf_elfheader (abfd)->e_ident[EI_OSABI];
7222
7223 if (elfosabi == ELFOSABI_ARM)
7224 /* GNU tools use this value. Check note sections in this case,
7225 as well. */
7226 bfd_map_over_sections (abfd,
7227 generic_elf_osabi_sniff_abi_tag_sections,
7228 &osabi);
7229
7230 /* Anything else will be handled by the generic ELF sniffer. */
7231 return osabi;
7232 }
7233
7234 static int
7235 arm_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
7236 struct reggroup *group)
7237 {
7238 /* FPS register's type is INT, but belongs to float_reggroup. Beside
7239 this, FPS register belongs to save_regroup, restore_reggroup, and
7240 all_reggroup, of course. */
7241 if (regnum == ARM_FPS_REGNUM)
7242 return (group == float_reggroup
7243 || group == save_reggroup
7244 || group == restore_reggroup
7245 || group == all_reggroup);
7246 else
7247 return default_register_reggroup_p (gdbarch, regnum, group);
7248 }
7249
7250 \f
7251 /* Initialize the current architecture based on INFO. If possible,
7252 re-use an architecture from ARCHES, which is a list of
7253 architectures already created during this debugging session.
7254
7255 Called e.g. at program startup, when reading a core file, and when
7256 reading a binary file. */
7257
7258 static struct gdbarch *
7259 arm_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
7260 {
7261 struct gdbarch_tdep *tdep;
7262 struct gdbarch *gdbarch;
7263 struct gdbarch_list *best_arch;
7264 enum arm_abi_kind arm_abi = arm_abi_global;
7265 enum arm_float_model fp_model = arm_fp_model;
7266 struct tdesc_arch_data *tdesc_data = NULL;
7267 int i, is_m = 0;
7268 int have_vfp_registers = 0, have_vfp_pseudos = 0, have_neon_pseudos = 0;
7269 int have_neon = 0;
7270 int have_fpa_registers = 1;
7271 const struct target_desc *tdesc = info.target_desc;
7272
7273 /* If we have an object to base this architecture on, try to determine
7274 its ABI. */
7275
7276 if (arm_abi == ARM_ABI_AUTO && info.abfd != NULL)
7277 {
7278 int ei_osabi, e_flags;
7279
7280 switch (bfd_get_flavour (info.abfd))
7281 {
7282 case bfd_target_aout_flavour:
7283 /* Assume it's an old APCS-style ABI. */
7284 arm_abi = ARM_ABI_APCS;
7285 break;
7286
7287 case bfd_target_coff_flavour:
7288 /* Assume it's an old APCS-style ABI. */
7289 /* XXX WinCE? */
7290 arm_abi = ARM_ABI_APCS;
7291 break;
7292
7293 case bfd_target_elf_flavour:
7294 ei_osabi = elf_elfheader (info.abfd)->e_ident[EI_OSABI];
7295 e_flags = elf_elfheader (info.abfd)->e_flags;
7296
7297 if (ei_osabi == ELFOSABI_ARM)
7298 {
7299 /* GNU tools used to use this value, but do not for EABI
7300 objects. There's nowhere to tag an EABI version
7301 anyway, so assume APCS. */
7302 arm_abi = ARM_ABI_APCS;
7303 }
7304 else if (ei_osabi == ELFOSABI_NONE)
7305 {
7306 int eabi_ver = EF_ARM_EABI_VERSION (e_flags);
7307 int attr_arch, attr_profile;
7308
7309 switch (eabi_ver)
7310 {
7311 case EF_ARM_EABI_UNKNOWN:
7312 /* Assume GNU tools. */
7313 arm_abi = ARM_ABI_APCS;
7314 break;
7315
7316 case EF_ARM_EABI_VER4:
7317 case EF_ARM_EABI_VER5:
7318 arm_abi = ARM_ABI_AAPCS;
7319 /* EABI binaries default to VFP float ordering.
7320 They may also contain build attributes that can
7321 be used to identify if the VFP argument-passing
7322 ABI is in use. */
7323 if (fp_model == ARM_FLOAT_AUTO)
7324 {
7325 #ifdef HAVE_ELF
7326 switch (bfd_elf_get_obj_attr_int (info.abfd,
7327 OBJ_ATTR_PROC,
7328 Tag_ABI_VFP_args))
7329 {
7330 case 0:
7331 /* "The user intended FP parameter/result
7332 passing to conform to AAPCS, base
7333 variant". */
7334 fp_model = ARM_FLOAT_SOFT_VFP;
7335 break;
7336 case 1:
7337 /* "The user intended FP parameter/result
7338 passing to conform to AAPCS, VFP
7339 variant". */
7340 fp_model = ARM_FLOAT_VFP;
7341 break;
7342 case 2:
7343 /* "The user intended FP parameter/result
7344 passing to conform to tool chain-specific
7345 conventions" - we don't know any such
7346 conventions, so leave it as "auto". */
7347 break;
7348 default:
7349 /* Attribute value not mentioned in the
7350 October 2008 ABI, so leave it as
7351 "auto". */
7352 break;
7353 }
7354 #else
7355 fp_model = ARM_FLOAT_SOFT_VFP;
7356 #endif
7357 }
7358 break;
7359
7360 default:
7361 /* Leave it as "auto". */
7362 warning (_("unknown ARM EABI version 0x%x"), eabi_ver);
7363 break;
7364 }
7365
7366 #ifdef HAVE_ELF
7367 /* Detect M-profile programs. This only works if the
7368 executable file includes build attributes; GCC does
7369 copy them to the executable, but e.g. RealView does
7370 not. */
7371 attr_arch = bfd_elf_get_obj_attr_int (info.abfd, OBJ_ATTR_PROC,
7372 Tag_CPU_arch);
7373 attr_profile = bfd_elf_get_obj_attr_int (info.abfd,
7374 OBJ_ATTR_PROC,
7375 Tag_CPU_arch_profile);
7376 /* GCC specifies the profile for v6-M; RealView only
7377 specifies the profile for architectures starting with
7378 V7 (as opposed to architectures with a tag
7379 numerically greater than TAG_CPU_ARCH_V7). */
7380 if (!tdesc_has_registers (tdesc)
7381 && (attr_arch == TAG_CPU_ARCH_V6_M
7382 || attr_arch == TAG_CPU_ARCH_V6S_M
7383 || attr_profile == 'M'))
7384 tdesc = tdesc_arm_with_m;
7385 #endif
7386 }
7387
7388 if (fp_model == ARM_FLOAT_AUTO)
7389 {
7390 int e_flags = elf_elfheader (info.abfd)->e_flags;
7391
7392 switch (e_flags & (EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT))
7393 {
7394 case 0:
7395 /* Leave it as "auto". Strictly speaking this case
7396 means FPA, but almost nobody uses that now, and
7397 many toolchains fail to set the appropriate bits
7398 for the floating-point model they use. */
7399 break;
7400 case EF_ARM_SOFT_FLOAT:
7401 fp_model = ARM_FLOAT_SOFT_FPA;
7402 break;
7403 case EF_ARM_VFP_FLOAT:
7404 fp_model = ARM_FLOAT_VFP;
7405 break;
7406 case EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT:
7407 fp_model = ARM_FLOAT_SOFT_VFP;
7408 break;
7409 }
7410 }
7411
7412 if (e_flags & EF_ARM_BE8)
7413 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
7414
7415 break;
7416
7417 default:
7418 /* Leave it as "auto". */
7419 break;
7420 }
7421 }
7422
7423 /* Check any target description for validity. */
7424 if (tdesc_has_registers (tdesc))
7425 {
7426 /* For most registers we require GDB's default names; but also allow
7427 the numeric names for sp / lr / pc, as a convenience. */
7428 static const char *const arm_sp_names[] = { "r13", "sp", NULL };
7429 static const char *const arm_lr_names[] = { "r14", "lr", NULL };
7430 static const char *const arm_pc_names[] = { "r15", "pc", NULL };
7431
7432 const struct tdesc_feature *feature;
7433 int valid_p;
7434
7435 feature = tdesc_find_feature (tdesc,
7436 "org.gnu.gdb.arm.core");
7437 if (feature == NULL)
7438 {
7439 feature = tdesc_find_feature (tdesc,
7440 "org.gnu.gdb.arm.m-profile");
7441 if (feature == NULL)
7442 return NULL;
7443 else
7444 is_m = 1;
7445 }
7446
7447 tdesc_data = tdesc_data_alloc ();
7448
7449 valid_p = 1;
7450 for (i = 0; i < ARM_SP_REGNUM; i++)
7451 valid_p &= tdesc_numbered_register (feature, tdesc_data, i,
7452 arm_register_names[i]);
7453 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
7454 ARM_SP_REGNUM,
7455 arm_sp_names);
7456 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
7457 ARM_LR_REGNUM,
7458 arm_lr_names);
7459 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
7460 ARM_PC_REGNUM,
7461 arm_pc_names);
7462 if (is_m)
7463 valid_p &= tdesc_numbered_register (feature, tdesc_data,
7464 ARM_PS_REGNUM, "xpsr");
7465 else
7466 valid_p &= tdesc_numbered_register (feature, tdesc_data,
7467 ARM_PS_REGNUM, "cpsr");
7468
7469 if (!valid_p)
7470 {
7471 tdesc_data_cleanup (tdesc_data);
7472 return NULL;
7473 }
7474
7475 feature = tdesc_find_feature (tdesc,
7476 "org.gnu.gdb.arm.fpa");
7477 if (feature != NULL)
7478 {
7479 valid_p = 1;
7480 for (i = ARM_F0_REGNUM; i <= ARM_FPS_REGNUM; i++)
7481 valid_p &= tdesc_numbered_register (feature, tdesc_data, i,
7482 arm_register_names[i]);
7483 if (!valid_p)
7484 {
7485 tdesc_data_cleanup (tdesc_data);
7486 return NULL;
7487 }
7488 }
7489 else
7490 have_fpa_registers = 0;
7491
7492 feature = tdesc_find_feature (tdesc,
7493 "org.gnu.gdb.xscale.iwmmxt");
7494 if (feature != NULL)
7495 {
7496 static const char *const iwmmxt_names[] = {
7497 "wR0", "wR1", "wR2", "wR3", "wR4", "wR5", "wR6", "wR7",
7498 "wR8", "wR9", "wR10", "wR11", "wR12", "wR13", "wR14", "wR15",
7499 "wCID", "wCon", "wCSSF", "wCASF", "", "", "", "",
7500 "wCGR0", "wCGR1", "wCGR2", "wCGR3", "", "", "", "",
7501 };
7502
7503 valid_p = 1;
7504 for (i = ARM_WR0_REGNUM; i <= ARM_WR15_REGNUM; i++)
7505 valid_p
7506 &= tdesc_numbered_register (feature, tdesc_data, i,
7507 iwmmxt_names[i - ARM_WR0_REGNUM]);
7508
7509 /* Check for the control registers, but do not fail if they
7510 are missing. */
7511 for (i = ARM_WC0_REGNUM; i <= ARM_WCASF_REGNUM; i++)
7512 tdesc_numbered_register (feature, tdesc_data, i,
7513 iwmmxt_names[i - ARM_WR0_REGNUM]);
7514
7515 for (i = ARM_WCGR0_REGNUM; i <= ARM_WCGR3_REGNUM; i++)
7516 valid_p
7517 &= tdesc_numbered_register (feature, tdesc_data, i,
7518 iwmmxt_names[i - ARM_WR0_REGNUM]);
7519
7520 if (!valid_p)
7521 {
7522 tdesc_data_cleanup (tdesc_data);
7523 return NULL;
7524 }
7525 }
7526
7527 /* If we have a VFP unit, check whether the single precision registers
7528 are present. If not, then we will synthesize them as pseudo
7529 registers. */
7530 feature = tdesc_find_feature (tdesc,
7531 "org.gnu.gdb.arm.vfp");
7532 if (feature != NULL)
7533 {
7534 static const char *const vfp_double_names[] = {
7535 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
7536 "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
7537 "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
7538 "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
7539 };
7540
7541 /* Require the double precision registers. There must be either
7542 16 or 32. */
7543 valid_p = 1;
7544 for (i = 0; i < 32; i++)
7545 {
7546 valid_p &= tdesc_numbered_register (feature, tdesc_data,
7547 ARM_D0_REGNUM + i,
7548 vfp_double_names[i]);
7549 if (!valid_p)
7550 break;
7551 }
7552
7553 if (!valid_p && i != 16)
7554 {
7555 tdesc_data_cleanup (tdesc_data);
7556 return NULL;
7557 }
7558
7559 if (tdesc_unnumbered_register (feature, "s0") == 0)
7560 have_vfp_pseudos = 1;
7561
7562 have_vfp_registers = 1;
7563
7564 /* If we have VFP, also check for NEON. The architecture allows
7565 NEON without VFP (integer vector operations only), but GDB
7566 does not support that. */
7567 feature = tdesc_find_feature (tdesc,
7568 "org.gnu.gdb.arm.neon");
7569 if (feature != NULL)
7570 {
7571 /* NEON requires 32 double-precision registers. */
7572 if (i != 32)
7573 {
7574 tdesc_data_cleanup (tdesc_data);
7575 return NULL;
7576 }
7577
7578 /* If there are quad registers defined by the stub, use
7579 their type; otherwise (normally) provide them with
7580 the default type. */
7581 if (tdesc_unnumbered_register (feature, "q0") == 0)
7582 have_neon_pseudos = 1;
7583
7584 have_neon = 1;
7585 }
7586 }
7587 }
7588
7589 /* If there is already a candidate, use it. */
7590 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
7591 best_arch != NULL;
7592 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
7593 {
7594 if (arm_abi != ARM_ABI_AUTO
7595 && arm_abi != gdbarch_tdep (best_arch->gdbarch)->arm_abi)
7596 continue;
7597
7598 if (fp_model != ARM_FLOAT_AUTO
7599 && fp_model != gdbarch_tdep (best_arch->gdbarch)->fp_model)
7600 continue;
7601
7602 /* There are various other properties in tdep that we do not
7603 need to check here: those derived from a target description,
7604 since gdbarches with a different target description are
7605 automatically disqualified. */
7606
7607 /* Do check is_m, though, since it might come from the binary. */
7608 if (is_m != gdbarch_tdep (best_arch->gdbarch)->is_m)
7609 continue;
7610
7611 /* Found a match. */
7612 break;
7613 }
7614
7615 if (best_arch != NULL)
7616 {
7617 if (tdesc_data != NULL)
7618 tdesc_data_cleanup (tdesc_data);
7619 return best_arch->gdbarch;
7620 }
7621
7622 tdep = xcalloc (1, sizeof (struct gdbarch_tdep));
7623 gdbarch = gdbarch_alloc (&info, tdep);
7624
7625 /* Record additional information about the architecture we are defining.
7626 These are gdbarch discriminators, like the OSABI. */
7627 tdep->arm_abi = arm_abi;
7628 tdep->fp_model = fp_model;
7629 tdep->is_m = is_m;
7630 tdep->have_fpa_registers = have_fpa_registers;
7631 tdep->have_vfp_registers = have_vfp_registers;
7632 tdep->have_vfp_pseudos = have_vfp_pseudos;
7633 tdep->have_neon_pseudos = have_neon_pseudos;
7634 tdep->have_neon = have_neon;
7635
7636 /* Breakpoints. */
7637 switch (info.byte_order_for_code)
7638 {
7639 case BFD_ENDIAN_BIG:
7640 tdep->arm_breakpoint = arm_default_arm_be_breakpoint;
7641 tdep->arm_breakpoint_size = sizeof (arm_default_arm_be_breakpoint);
7642 tdep->thumb_breakpoint = arm_default_thumb_be_breakpoint;
7643 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_be_breakpoint);
7644
7645 break;
7646
7647 case BFD_ENDIAN_LITTLE:
7648 tdep->arm_breakpoint = arm_default_arm_le_breakpoint;
7649 tdep->arm_breakpoint_size = sizeof (arm_default_arm_le_breakpoint);
7650 tdep->thumb_breakpoint = arm_default_thumb_le_breakpoint;
7651 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_le_breakpoint);
7652
7653 break;
7654
7655 default:
7656 internal_error (__FILE__, __LINE__,
7657 _("arm_gdbarch_init: bad byte order for float format"));
7658 }
7659
7660 /* On ARM targets char defaults to unsigned. */
7661 set_gdbarch_char_signed (gdbarch, 0);
7662
7663 /* Note: for displaced stepping, this includes the breakpoint, and one word
7664 of additional scratch space. This setting isn't used for anything beside
7665 displaced stepping at present. */
7666 set_gdbarch_max_insn_length (gdbarch, 4 * DISPLACED_MODIFIED_INSNS);
7667
7668 /* This should be low enough for everything. */
7669 tdep->lowest_pc = 0x20;
7670 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
7671
7672 /* The default, for both APCS and AAPCS, is to return small
7673 structures in registers. */
7674 tdep->struct_return = reg_struct_return;
7675
7676 set_gdbarch_push_dummy_call (gdbarch, arm_push_dummy_call);
7677 set_gdbarch_frame_align (gdbarch, arm_frame_align);
7678
7679 set_gdbarch_write_pc (gdbarch, arm_write_pc);
7680
7681 /* Frame handling. */
7682 set_gdbarch_dummy_id (gdbarch, arm_dummy_id);
7683 set_gdbarch_unwind_pc (gdbarch, arm_unwind_pc);
7684 set_gdbarch_unwind_sp (gdbarch, arm_unwind_sp);
7685
7686 frame_base_set_default (gdbarch, &arm_normal_base);
7687
7688 /* Address manipulation. */
7689 set_gdbarch_smash_text_address (gdbarch, arm_smash_text_address);
7690 set_gdbarch_addr_bits_remove (gdbarch, arm_addr_bits_remove);
7691
7692 /* Advance PC across function entry code. */
7693 set_gdbarch_skip_prologue (gdbarch, arm_skip_prologue);
7694
7695 /* Detect whether PC is in function epilogue. */
7696 set_gdbarch_in_function_epilogue_p (gdbarch, arm_in_function_epilogue_p);
7697
7698 /* Skip trampolines. */
7699 set_gdbarch_skip_trampoline_code (gdbarch, arm_skip_stub);
7700
7701 /* The stack grows downward. */
7702 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
7703
7704 /* Breakpoint manipulation. */
7705 set_gdbarch_breakpoint_from_pc (gdbarch, arm_breakpoint_from_pc);
7706 set_gdbarch_remote_breakpoint_from_pc (gdbarch,
7707 arm_remote_breakpoint_from_pc);
7708
7709 /* Information about registers, etc. */
7710 set_gdbarch_sp_regnum (gdbarch, ARM_SP_REGNUM);
7711 set_gdbarch_pc_regnum (gdbarch, ARM_PC_REGNUM);
7712 set_gdbarch_num_regs (gdbarch, ARM_NUM_REGS);
7713 set_gdbarch_register_type (gdbarch, arm_register_type);
7714 set_gdbarch_register_reggroup_p (gdbarch, arm_register_reggroup_p);
7715
7716 /* This "info float" is FPA-specific. Use the generic version if we
7717 do not have FPA. */
7718 if (gdbarch_tdep (gdbarch)->have_fpa_registers)
7719 set_gdbarch_print_float_info (gdbarch, arm_print_float_info);
7720
7721 /* Internal <-> external register number maps. */
7722 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, arm_dwarf_reg_to_regnum);
7723 set_gdbarch_register_sim_regno (gdbarch, arm_register_sim_regno);
7724
7725 set_gdbarch_register_name (gdbarch, arm_register_name);
7726
7727 /* Returning results. */
7728 set_gdbarch_return_value (gdbarch, arm_return_value);
7729
7730 /* Disassembly. */
7731 set_gdbarch_print_insn (gdbarch, gdb_print_insn_arm);
7732
7733 /* Minsymbol frobbing. */
7734 set_gdbarch_elf_make_msymbol_special (gdbarch, arm_elf_make_msymbol_special);
7735 set_gdbarch_coff_make_msymbol_special (gdbarch,
7736 arm_coff_make_msymbol_special);
7737 set_gdbarch_record_special_symbol (gdbarch, arm_record_special_symbol);
7738
7739 /* Thumb-2 IT block support. */
7740 set_gdbarch_adjust_breakpoint_address (gdbarch,
7741 arm_adjust_breakpoint_address);
7742
7743 /* Virtual tables. */
7744 set_gdbarch_vbit_in_delta (gdbarch, 1);
7745
7746 /* Hook in the ABI-specific overrides, if they have been registered. */
7747 gdbarch_init_osabi (info, gdbarch);
7748
7749 dwarf2_frame_set_init_reg (gdbarch, arm_dwarf2_frame_init_reg);
7750
7751 /* Add some default predicates. */
7752 frame_unwind_append_unwinder (gdbarch, &arm_stub_unwind);
7753 dwarf2_append_unwinders (gdbarch);
7754 frame_unwind_append_unwinder (gdbarch, &arm_prologue_unwind);
7755
7756 /* Now we have tuned the configuration, set a few final things,
7757 based on what the OS ABI has told us. */
7758
7759 /* If the ABI is not otherwise marked, assume the old GNU APCS. EABI
7760 binaries are always marked. */
7761 if (tdep->arm_abi == ARM_ABI_AUTO)
7762 tdep->arm_abi = ARM_ABI_APCS;
7763
7764 /* We used to default to FPA for generic ARM, but almost nobody
7765 uses that now, and we now provide a way for the user to force
7766 the model. So default to the most useful variant. */
7767 if (tdep->fp_model == ARM_FLOAT_AUTO)
7768 tdep->fp_model = ARM_FLOAT_SOFT_FPA;
7769
7770 if (tdep->jb_pc >= 0)
7771 set_gdbarch_get_longjmp_target (gdbarch, arm_get_longjmp_target);
7772
7773 /* Floating point sizes and format. */
7774 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
7775 if (tdep->fp_model == ARM_FLOAT_SOFT_FPA || tdep->fp_model == ARM_FLOAT_FPA)
7776 {
7777 set_gdbarch_double_format
7778 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
7779 set_gdbarch_long_double_format
7780 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
7781 }
7782 else
7783 {
7784 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
7785 set_gdbarch_long_double_format (gdbarch, floatformats_ieee_double);
7786 }
7787
7788 if (have_vfp_pseudos)
7789 {
7790 /* NOTE: These are the only pseudo registers used by
7791 the ARM target at the moment. If more are added, a
7792 little more care in numbering will be needed. */
7793
7794 int num_pseudos = 32;
7795 if (have_neon_pseudos)
7796 num_pseudos += 16;
7797 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudos);
7798 set_gdbarch_pseudo_register_read (gdbarch, arm_pseudo_read);
7799 set_gdbarch_pseudo_register_write (gdbarch, arm_pseudo_write);
7800 }
7801
7802 if (tdesc_data)
7803 {
7804 set_tdesc_pseudo_register_name (gdbarch, arm_register_name);
7805
7806 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
7807
7808 /* Override tdesc_register_type to adjust the types of VFP
7809 registers for NEON. */
7810 set_gdbarch_register_type (gdbarch, arm_register_type);
7811 }
7812
7813 /* Add standard register aliases. We add aliases even for those
7814 nanes which are used by the current architecture - it's simpler,
7815 and does no harm, since nothing ever lists user registers. */
7816 for (i = 0; i < ARRAY_SIZE (arm_register_aliases); i++)
7817 user_reg_add (gdbarch, arm_register_aliases[i].name,
7818 value_of_arm_user_reg, &arm_register_aliases[i].regnum);
7819
7820 return gdbarch;
7821 }
7822
7823 static void
7824 arm_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
7825 {
7826 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
7827
7828 if (tdep == NULL)
7829 return;
7830
7831 fprintf_unfiltered (file, _("arm_dump_tdep: Lowest pc = 0x%lx"),
7832 (unsigned long) tdep->lowest_pc);
7833 }
7834
7835 extern initialize_file_ftype _initialize_arm_tdep; /* -Wmissing-prototypes */
7836
7837 void
7838 _initialize_arm_tdep (void)
7839 {
7840 struct ui_file *stb;
7841 long length;
7842 struct cmd_list_element *new_set, *new_show;
7843 const char *setname;
7844 const char *setdesc;
7845 const char *const *regnames;
7846 int numregs, i, j;
7847 static char *helptext;
7848 char regdesc[1024], *rdptr = regdesc;
7849 size_t rest = sizeof (regdesc);
7850
7851 gdbarch_register (bfd_arch_arm, arm_gdbarch_init, arm_dump_tdep);
7852
7853 arm_objfile_data_key
7854 = register_objfile_data_with_cleanup (NULL, arm_objfile_data_free);
7855
7856 /* Register an ELF OS ABI sniffer for ARM binaries. */
7857 gdbarch_register_osabi_sniffer (bfd_arch_arm,
7858 bfd_target_elf_flavour,
7859 arm_elf_osabi_sniffer);
7860
7861 /* Initialize the standard target descriptions. */
7862 initialize_tdesc_arm_with_m ();
7863
7864 /* Get the number of possible sets of register names defined in opcodes. */
7865 num_disassembly_options = get_arm_regname_num_options ();
7866
7867 /* Add root prefix command for all "set arm"/"show arm" commands. */
7868 add_prefix_cmd ("arm", no_class, set_arm_command,
7869 _("Various ARM-specific commands."),
7870 &setarmcmdlist, "set arm ", 0, &setlist);
7871
7872 add_prefix_cmd ("arm", no_class, show_arm_command,
7873 _("Various ARM-specific commands."),
7874 &showarmcmdlist, "show arm ", 0, &showlist);
7875
7876 /* Sync the opcode insn printer with our register viewer. */
7877 parse_arm_disassembler_option ("reg-names-std");
7878
7879 /* Initialize the array that will be passed to
7880 add_setshow_enum_cmd(). */
7881 valid_disassembly_styles
7882 = xmalloc ((num_disassembly_options + 1) * sizeof (char *));
7883 for (i = 0; i < num_disassembly_options; i++)
7884 {
7885 numregs = get_arm_regnames (i, &setname, &setdesc, &regnames);
7886 valid_disassembly_styles[i] = setname;
7887 length = snprintf (rdptr, rest, "%s - %s\n", setname, setdesc);
7888 rdptr += length;
7889 rest -= length;
7890 /* When we find the default names, tell the disassembler to use
7891 them. */
7892 if (!strcmp (setname, "std"))
7893 {
7894 disassembly_style = setname;
7895 set_arm_regname_option (i);
7896 }
7897 }
7898 /* Mark the end of valid options. */
7899 valid_disassembly_styles[num_disassembly_options] = NULL;
7900
7901 /* Create the help text. */
7902 stb = mem_fileopen ();
7903 fprintf_unfiltered (stb, "%s%s%s",
7904 _("The valid values are:\n"),
7905 regdesc,
7906 _("The default is \"std\"."));
7907 helptext = ui_file_xstrdup (stb, NULL);
7908 ui_file_delete (stb);
7909
7910 add_setshow_enum_cmd("disassembler", no_class,
7911 valid_disassembly_styles, &disassembly_style,
7912 _("Set the disassembly style."),
7913 _("Show the disassembly style."),
7914 helptext,
7915 set_disassembly_style_sfunc,
7916 NULL, /* FIXME: i18n: The disassembly style is
7917 \"%s\". */
7918 &setarmcmdlist, &showarmcmdlist);
7919
7920 add_setshow_boolean_cmd ("apcs32", no_class, &arm_apcs_32,
7921 _("Set usage of ARM 32-bit mode."),
7922 _("Show usage of ARM 32-bit mode."),
7923 _("When off, a 26-bit PC will be used."),
7924 NULL,
7925 NULL, /* FIXME: i18n: Usage of ARM 32-bit
7926 mode is %s. */
7927 &setarmcmdlist, &showarmcmdlist);
7928
7929 /* Add a command to allow the user to force the FPU model. */
7930 add_setshow_enum_cmd ("fpu", no_class, fp_model_strings, &current_fp_model,
7931 _("Set the floating point type."),
7932 _("Show the floating point type."),
7933 _("auto - Determine the FP typefrom the OS-ABI.\n\
7934 softfpa - Software FP, mixed-endian doubles on little-endian ARMs.\n\
7935 fpa - FPA co-processor (GCC compiled).\n\
7936 softvfp - Software FP with pure-endian doubles.\n\
7937 vfp - VFP co-processor."),
7938 set_fp_model_sfunc, show_fp_model,
7939 &setarmcmdlist, &showarmcmdlist);
7940
7941 /* Add a command to allow the user to force the ABI. */
7942 add_setshow_enum_cmd ("abi", class_support, arm_abi_strings, &arm_abi_string,
7943 _("Set the ABI."),
7944 _("Show the ABI."),
7945 NULL, arm_set_abi, arm_show_abi,
7946 &setarmcmdlist, &showarmcmdlist);
7947
7948 /* Add two commands to allow the user to force the assumed
7949 execution mode. */
7950 add_setshow_enum_cmd ("fallback-mode", class_support,
7951 arm_mode_strings, &arm_fallback_mode_string,
7952 _("Set the mode assumed when symbols are unavailable."),
7953 _("Show the mode assumed when symbols are unavailable."),
7954 NULL, NULL, arm_show_fallback_mode,
7955 &setarmcmdlist, &showarmcmdlist);
7956 add_setshow_enum_cmd ("force-mode", class_support,
7957 arm_mode_strings, &arm_force_mode_string,
7958 _("Set the mode assumed even when symbols are available."),
7959 _("Show the mode assumed even when symbols are available."),
7960 NULL, NULL, arm_show_force_mode,
7961 &setarmcmdlist, &showarmcmdlist);
7962
7963 /* Debugging flag. */
7964 add_setshow_boolean_cmd ("arm", class_maintenance, &arm_debug,
7965 _("Set ARM debugging."),
7966 _("Show ARM debugging."),
7967 _("When on, arm-specific debugging is enabled."),
7968 NULL,
7969 NULL, /* FIXME: i18n: "ARM debugging is %s. */
7970 &setdebuglist, &showdebuglist);
7971 }