2010-12-23 Yao Qi <yao@codesourcery.com>
[binutils-gdb.git] / gdb / arm-tdep.c
1 /* Common target dependent code for GDB on ARM systems.
2
3 Copyright (C) 1988, 1989, 1991, 1992, 1993, 1995, 1996, 1998, 1999, 2000,
4 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
5 Free Software Foundation, Inc.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include <ctype.h> /* XXX for isupper () */
23
24 #include "defs.h"
25 #include "frame.h"
26 #include "inferior.h"
27 #include "gdbcmd.h"
28 #include "gdbcore.h"
29 #include "gdb_string.h"
30 #include "dis-asm.h" /* For register styles. */
31 #include "regcache.h"
32 #include "reggroups.h"
33 #include "doublest.h"
34 #include "value.h"
35 #include "arch-utils.h"
36 #include "osabi.h"
37 #include "frame-unwind.h"
38 #include "frame-base.h"
39 #include "trad-frame.h"
40 #include "objfiles.h"
41 #include "dwarf2-frame.h"
42 #include "gdbtypes.h"
43 #include "prologue-value.h"
44 #include "target-descriptions.h"
45 #include "user-regs.h"
46
47 #include "arm-tdep.h"
48 #include "gdb/sim-arm.h"
49
50 #include "elf-bfd.h"
51 #include "coff/internal.h"
52 #include "elf/arm.h"
53
54 #include "gdb_assert.h"
55 #include "vec.h"
56
57 #include "features/arm-with-m.c"
58
59 static int arm_debug;
60
61 /* Macros for setting and testing a bit in a minimal symbol that marks
62 it as Thumb function. The MSB of the minimal symbol's "info" field
63 is used for this purpose.
64
65 MSYMBOL_SET_SPECIAL Actually sets the "special" bit.
66 MSYMBOL_IS_SPECIAL Tests the "special" bit in a minimal symbol. */
67
68 #define MSYMBOL_SET_SPECIAL(msym) \
69 MSYMBOL_TARGET_FLAG_1 (msym) = 1
70
71 #define MSYMBOL_IS_SPECIAL(msym) \
72 MSYMBOL_TARGET_FLAG_1 (msym)
73
74 /* Per-objfile data used for mapping symbols. */
75 static const struct objfile_data *arm_objfile_data_key;
76
77 struct arm_mapping_symbol
78 {
79 bfd_vma value;
80 char type;
81 };
82 typedef struct arm_mapping_symbol arm_mapping_symbol_s;
83 DEF_VEC_O(arm_mapping_symbol_s);
84
85 struct arm_per_objfile
86 {
87 VEC(arm_mapping_symbol_s) **section_maps;
88 };
89
90 /* The list of available "set arm ..." and "show arm ..." commands. */
91 static struct cmd_list_element *setarmcmdlist = NULL;
92 static struct cmd_list_element *showarmcmdlist = NULL;
93
94 /* The type of floating-point to use. Keep this in sync with enum
95 arm_float_model, and the help string in _initialize_arm_tdep. */
96 static const char *fp_model_strings[] =
97 {
98 "auto",
99 "softfpa",
100 "fpa",
101 "softvfp",
102 "vfp",
103 NULL
104 };
105
106 /* A variable that can be configured by the user. */
107 static enum arm_float_model arm_fp_model = ARM_FLOAT_AUTO;
108 static const char *current_fp_model = "auto";
109
110 /* The ABI to use. Keep this in sync with arm_abi_kind. */
111 static const char *arm_abi_strings[] =
112 {
113 "auto",
114 "APCS",
115 "AAPCS",
116 NULL
117 };
118
119 /* A variable that can be configured by the user. */
120 static enum arm_abi_kind arm_abi_global = ARM_ABI_AUTO;
121 static const char *arm_abi_string = "auto";
122
123 /* The execution mode to assume. */
124 static const char *arm_mode_strings[] =
125 {
126 "auto",
127 "arm",
128 "thumb",
129 NULL
130 };
131
132 static const char *arm_fallback_mode_string = "auto";
133 static const char *arm_force_mode_string = "auto";
134
135 /* Number of different reg name sets (options). */
136 static int num_disassembly_options;
137
138 /* The standard register names, and all the valid aliases for them. */
139 static const struct
140 {
141 const char *name;
142 int regnum;
143 } arm_register_aliases[] = {
144 /* Basic register numbers. */
145 { "r0", 0 },
146 { "r1", 1 },
147 { "r2", 2 },
148 { "r3", 3 },
149 { "r4", 4 },
150 { "r5", 5 },
151 { "r6", 6 },
152 { "r7", 7 },
153 { "r8", 8 },
154 { "r9", 9 },
155 { "r10", 10 },
156 { "r11", 11 },
157 { "r12", 12 },
158 { "r13", 13 },
159 { "r14", 14 },
160 { "r15", 15 },
161 /* Synonyms (argument and variable registers). */
162 { "a1", 0 },
163 { "a2", 1 },
164 { "a3", 2 },
165 { "a4", 3 },
166 { "v1", 4 },
167 { "v2", 5 },
168 { "v3", 6 },
169 { "v4", 7 },
170 { "v5", 8 },
171 { "v6", 9 },
172 { "v7", 10 },
173 { "v8", 11 },
174 /* Other platform-specific names for r9. */
175 { "sb", 9 },
176 { "tr", 9 },
177 /* Special names. */
178 { "ip", 12 },
179 { "sp", 13 },
180 { "lr", 14 },
181 { "pc", 15 },
182 /* Names used by GCC (not listed in the ARM EABI). */
183 { "sl", 10 },
184 { "fp", 11 },
185 /* A special name from the older ATPCS. */
186 { "wr", 7 },
187 };
188
189 static const char *const arm_register_names[] =
190 {"r0", "r1", "r2", "r3", /* 0 1 2 3 */
191 "r4", "r5", "r6", "r7", /* 4 5 6 7 */
192 "r8", "r9", "r10", "r11", /* 8 9 10 11 */
193 "r12", "sp", "lr", "pc", /* 12 13 14 15 */
194 "f0", "f1", "f2", "f3", /* 16 17 18 19 */
195 "f4", "f5", "f6", "f7", /* 20 21 22 23 */
196 "fps", "cpsr" }; /* 24 25 */
197
198 /* Valid register name styles. */
199 static const char **valid_disassembly_styles;
200
201 /* Disassembly style to use. Default to "std" register names. */
202 static const char *disassembly_style;
203
204 /* This is used to keep the bfd arch_info in sync with the disassembly
205 style. */
206 static void set_disassembly_style_sfunc(char *, int,
207 struct cmd_list_element *);
208 static void set_disassembly_style (void);
209
210 static void convert_from_extended (const struct floatformat *, const void *,
211 void *, int);
212 static void convert_to_extended (const struct floatformat *, void *,
213 const void *, int);
214
215 static void arm_neon_quad_read (struct gdbarch *gdbarch,
216 struct regcache *regcache,
217 int regnum, gdb_byte *buf);
218 static void arm_neon_quad_write (struct gdbarch *gdbarch,
219 struct regcache *regcache,
220 int regnum, const gdb_byte *buf);
221
222 struct arm_prologue_cache
223 {
224 /* The stack pointer at the time this frame was created; i.e. the
225 caller's stack pointer when this function was called. It is used
226 to identify this frame. */
227 CORE_ADDR prev_sp;
228
229 /* The frame base for this frame is just prev_sp - frame size.
230 FRAMESIZE is the distance from the frame pointer to the
231 initial stack pointer. */
232
233 int framesize;
234
235 /* The register used to hold the frame pointer for this frame. */
236 int framereg;
237
238 /* Saved register offsets. */
239 struct trad_frame_saved_reg *saved_regs;
240 };
241
242 static CORE_ADDR arm_analyze_prologue (struct gdbarch *gdbarch,
243 CORE_ADDR prologue_start,
244 CORE_ADDR prologue_end,
245 struct arm_prologue_cache *cache);
246
247 /* Architecture version for displaced stepping. This effects the behaviour of
248 certain instructions, and really should not be hard-wired. */
249
250 #define DISPLACED_STEPPING_ARCH_VERSION 5
251
252 /* Addresses for calling Thumb functions have the bit 0 set.
253 Here are some macros to test, set, or clear bit 0 of addresses. */
254 #define IS_THUMB_ADDR(addr) ((addr) & 1)
255 #define MAKE_THUMB_ADDR(addr) ((addr) | 1)
256 #define UNMAKE_THUMB_ADDR(addr) ((addr) & ~1)
257
258 /* Set to true if the 32-bit mode is in use. */
259
260 int arm_apcs_32 = 1;
261
262 /* Return the bit mask in ARM_PS_REGNUM that indicates Thumb mode. */
263
264 static int
265 arm_psr_thumb_bit (struct gdbarch *gdbarch)
266 {
267 if (gdbarch_tdep (gdbarch)->is_m)
268 return XPSR_T;
269 else
270 return CPSR_T;
271 }
272
273 /* Determine if FRAME is executing in Thumb mode. */
274
275 int
276 arm_frame_is_thumb (struct frame_info *frame)
277 {
278 CORE_ADDR cpsr;
279 ULONGEST t_bit = arm_psr_thumb_bit (get_frame_arch (frame));
280
281 /* Every ARM frame unwinder can unwind the T bit of the CPSR, either
282 directly (from a signal frame or dummy frame) or by interpreting
283 the saved LR (from a prologue or DWARF frame). So consult it and
284 trust the unwinders. */
285 cpsr = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
286
287 return (cpsr & t_bit) != 0;
288 }
289
290 /* Callback for VEC_lower_bound. */
291
292 static inline int
293 arm_compare_mapping_symbols (const struct arm_mapping_symbol *lhs,
294 const struct arm_mapping_symbol *rhs)
295 {
296 return lhs->value < rhs->value;
297 }
298
299 /* Search for the mapping symbol covering MEMADDR. If one is found,
300 return its type. Otherwise, return 0. If START is non-NULL,
301 set *START to the location of the mapping symbol. */
302
303 static char
304 arm_find_mapping_symbol (CORE_ADDR memaddr, CORE_ADDR *start)
305 {
306 struct obj_section *sec;
307
308 /* If there are mapping symbols, consult them. */
309 sec = find_pc_section (memaddr);
310 if (sec != NULL)
311 {
312 struct arm_per_objfile *data;
313 VEC(arm_mapping_symbol_s) *map;
314 struct arm_mapping_symbol map_key = { memaddr - obj_section_addr (sec),
315 0 };
316 unsigned int idx;
317
318 data = objfile_data (sec->objfile, arm_objfile_data_key);
319 if (data != NULL)
320 {
321 map = data->section_maps[sec->the_bfd_section->index];
322 if (!VEC_empty (arm_mapping_symbol_s, map))
323 {
324 struct arm_mapping_symbol *map_sym;
325
326 idx = VEC_lower_bound (arm_mapping_symbol_s, map, &map_key,
327 arm_compare_mapping_symbols);
328
329 /* VEC_lower_bound finds the earliest ordered insertion
330 point. If the following symbol starts at this exact
331 address, we use that; otherwise, the preceding
332 mapping symbol covers this address. */
333 if (idx < VEC_length (arm_mapping_symbol_s, map))
334 {
335 map_sym = VEC_index (arm_mapping_symbol_s, map, idx);
336 if (map_sym->value == map_key.value)
337 {
338 if (start)
339 *start = map_sym->value + obj_section_addr (sec);
340 return map_sym->type;
341 }
342 }
343
344 if (idx > 0)
345 {
346 map_sym = VEC_index (arm_mapping_symbol_s, map, idx - 1);
347 if (start)
348 *start = map_sym->value + obj_section_addr (sec);
349 return map_sym->type;
350 }
351 }
352 }
353 }
354
355 return 0;
356 }
357
358 static CORE_ADDR arm_get_next_pc_raw (struct frame_info *frame,
359 CORE_ADDR pc, int insert_bkpt);
360
361 /* Determine if the program counter specified in MEMADDR is in a Thumb
362 function. This function should be called for addresses unrelated to
363 any executing frame; otherwise, prefer arm_frame_is_thumb. */
364
365 static int
366 arm_pc_is_thumb (struct gdbarch *gdbarch, CORE_ADDR memaddr)
367 {
368 struct obj_section *sec;
369 struct minimal_symbol *sym;
370 char type;
371
372 /* If bit 0 of the address is set, assume this is a Thumb address. */
373 if (IS_THUMB_ADDR (memaddr))
374 return 1;
375
376 /* If the user wants to override the symbol table, let him. */
377 if (strcmp (arm_force_mode_string, "arm") == 0)
378 return 0;
379 if (strcmp (arm_force_mode_string, "thumb") == 0)
380 return 1;
381
382 /* ARM v6-M and v7-M are always in Thumb mode. */
383 if (gdbarch_tdep (gdbarch)->is_m)
384 return 1;
385
386 /* If there are mapping symbols, consult them. */
387 type = arm_find_mapping_symbol (memaddr, NULL);
388 if (type)
389 return type == 't';
390
391 /* Thumb functions have a "special" bit set in minimal symbols. */
392 sym = lookup_minimal_symbol_by_pc (memaddr);
393 if (sym)
394 return (MSYMBOL_IS_SPECIAL (sym));
395
396 /* If the user wants to override the fallback mode, let them. */
397 if (strcmp (arm_fallback_mode_string, "arm") == 0)
398 return 0;
399 if (strcmp (arm_fallback_mode_string, "thumb") == 0)
400 return 1;
401
402 /* If we couldn't find any symbol, but we're talking to a running
403 target, then trust the current value of $cpsr. This lets
404 "display/i $pc" always show the correct mode (though if there is
405 a symbol table we will not reach here, so it still may not be
406 displayed in the mode it will be executed).
407
408 As a further heuristic if we detect that we are doing a single-step we
409 see what state executing the current instruction ends up with us being
410 in. */
411 if (target_has_registers)
412 {
413 struct frame_info *current_frame = get_current_frame ();
414 CORE_ADDR current_pc = get_frame_pc (current_frame);
415 int is_thumb = arm_frame_is_thumb (current_frame);
416 CORE_ADDR next_pc;
417 if (memaddr == current_pc)
418 return is_thumb;
419 else
420 {
421 struct gdbarch *gdbarch = get_frame_arch (current_frame);
422 next_pc = arm_get_next_pc_raw (current_frame, current_pc, FALSE);
423 if (memaddr == gdbarch_addr_bits_remove (gdbarch, next_pc))
424 return IS_THUMB_ADDR (next_pc);
425 else
426 return is_thumb;
427 }
428 }
429
430 /* Otherwise we're out of luck; we assume ARM. */
431 return 0;
432 }
433
434 /* Remove useless bits from addresses in a running program. */
435 static CORE_ADDR
436 arm_addr_bits_remove (struct gdbarch *gdbarch, CORE_ADDR val)
437 {
438 if (arm_apcs_32)
439 return UNMAKE_THUMB_ADDR (val);
440 else
441 return (val & 0x03fffffc);
442 }
443
444 /* When reading symbols, we need to zap the low bit of the address,
445 which may be set to 1 for Thumb functions. */
446 static CORE_ADDR
447 arm_smash_text_address (struct gdbarch *gdbarch, CORE_ADDR val)
448 {
449 return val & ~1;
450 }
451
452 /* Return 1 if PC is the start of a compiler helper function which
453 can be safely ignored during prologue skipping. */
454 static int
455 skip_prologue_function (CORE_ADDR pc)
456 {
457 struct minimal_symbol *msym;
458 const char *name;
459
460 msym = lookup_minimal_symbol_by_pc (pc);
461 if (msym == NULL || SYMBOL_VALUE_ADDRESS (msym) != pc)
462 return 0;
463
464 name = SYMBOL_LINKAGE_NAME (msym);
465 if (name == NULL)
466 return 0;
467
468 /* The GNU linker's Thumb call stub to foo is named
469 __foo_from_thumb. */
470 if (strstr (name, "_from_thumb") != NULL)
471 name += 2;
472
473 /* On soft-float targets, __truncdfsf2 is called to convert promoted
474 arguments to their argument types in non-prototyped
475 functions. */
476 if (strncmp (name, "__truncdfsf2", strlen ("__truncdfsf2")) == 0)
477 return 1;
478 if (strncmp (name, "__aeabi_d2f", strlen ("__aeabi_d2f")) == 0)
479 return 1;
480
481 /* Internal functions related to thread-local storage. */
482 if (strncmp (name, "__tls_get_addr", strlen ("__tls_get_addr")) == 0)
483 return 1;
484 if (strncmp (name, "__aeabi_read_tp", strlen ("__aeabi_read_tp")) == 0)
485 return 1;
486
487 return 0;
488 }
489
490 /* Support routines for instruction parsing. */
491 #define submask(x) ((1L << ((x) + 1)) - 1)
492 #define bit(obj,st) (((obj) >> (st)) & 1)
493 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
494 #define sbits(obj,st,fn) \
495 ((long) (bits(obj,st,fn) | ((long) bit(obj,fn) * ~ submask (fn - st))))
496 #define BranchDest(addr,instr) \
497 ((CORE_ADDR) (((long) (addr)) + 8 + (sbits (instr, 0, 23) << 2)))
498
499 /* Decode immediate value; implements ThumbExpandImmediate pseudo-op. */
500
501 static unsigned int
502 thumb_expand_immediate (unsigned int imm)
503 {
504 unsigned int count = imm >> 7;
505
506 if (count < 8)
507 switch (count / 2)
508 {
509 case 0:
510 return imm & 0xff;
511 case 1:
512 return (imm & 0xff) | ((imm & 0xff) << 16);
513 case 2:
514 return ((imm & 0xff) << 8) | ((imm & 0xff) << 24);
515 case 3:
516 return (imm & 0xff) | ((imm & 0xff) << 8)
517 | ((imm & 0xff) << 16) | ((imm & 0xff) << 24);
518 }
519
520 return (0x80 | (imm & 0x7f)) << (32 - count);
521 }
522
523 /* Return 1 if the 16-bit Thumb instruction INST might change
524 control flow, 0 otherwise. */
525
526 static int
527 thumb_instruction_changes_pc (unsigned short inst)
528 {
529 if ((inst & 0xff00) == 0xbd00) /* pop {rlist, pc} */
530 return 1;
531
532 if ((inst & 0xf000) == 0xd000) /* conditional branch */
533 return 1;
534
535 if ((inst & 0xf800) == 0xe000) /* unconditional branch */
536 return 1;
537
538 if ((inst & 0xff00) == 0x4700) /* bx REG, blx REG */
539 return 1;
540
541 if ((inst & 0xff87) == 0x4687) /* mov pc, REG */
542 return 1;
543
544 if ((inst & 0xf500) == 0xb100) /* CBNZ or CBZ. */
545 return 1;
546
547 return 0;
548 }
549
550 /* Return 1 if the 32-bit Thumb instruction in INST1 and INST2
551 might change control flow, 0 otherwise. */
552
553 static int
554 thumb2_instruction_changes_pc (unsigned short inst1, unsigned short inst2)
555 {
556 if ((inst1 & 0xf800) == 0xf000 && (inst2 & 0x8000) == 0x8000)
557 {
558 /* Branches and miscellaneous control instructions. */
559
560 if ((inst2 & 0x1000) != 0 || (inst2 & 0xd001) == 0xc000)
561 {
562 /* B, BL, BLX. */
563 return 1;
564 }
565 else if (inst1 == 0xf3de && (inst2 & 0xff00) == 0x3f00)
566 {
567 /* SUBS PC, LR, #imm8. */
568 return 1;
569 }
570 else if ((inst2 & 0xd000) == 0x8000 && (inst1 & 0x0380) != 0x0380)
571 {
572 /* Conditional branch. */
573 return 1;
574 }
575
576 return 0;
577 }
578
579 if ((inst1 & 0xfe50) == 0xe810)
580 {
581 /* Load multiple or RFE. */
582
583 if (bit (inst1, 7) && !bit (inst1, 8))
584 {
585 /* LDMIA or POP */
586 if (bit (inst2, 15))
587 return 1;
588 }
589 else if (!bit (inst1, 7) && bit (inst1, 8))
590 {
591 /* LDMDB */
592 if (bit (inst2, 15))
593 return 1;
594 }
595 else if (bit (inst1, 7) && bit (inst1, 8))
596 {
597 /* RFEIA */
598 return 1;
599 }
600 else if (!bit (inst1, 7) && !bit (inst1, 8))
601 {
602 /* RFEDB */
603 return 1;
604 }
605
606 return 0;
607 }
608
609 if ((inst1 & 0xffef) == 0xea4f && (inst2 & 0xfff0) == 0x0f00)
610 {
611 /* MOV PC or MOVS PC. */
612 return 1;
613 }
614
615 if ((inst1 & 0xff70) == 0xf850 && (inst2 & 0xf000) == 0xf000)
616 {
617 /* LDR PC. */
618 if (bits (inst1, 0, 3) == 15)
619 return 1;
620 if (bit (inst1, 7))
621 return 1;
622 if (bit (inst2, 11))
623 return 1;
624 if ((inst2 & 0x0fc0) == 0x0000)
625 return 1;
626
627 return 0;
628 }
629
630 if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf000)
631 {
632 /* TBB. */
633 return 1;
634 }
635
636 if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf010)
637 {
638 /* TBH. */
639 return 1;
640 }
641
642 return 0;
643 }
644
645 /* Analyze a Thumb prologue, looking for a recognizable stack frame
646 and frame pointer. Scan until we encounter a store that could
647 clobber the stack frame unexpectedly, or an unknown instruction.
648 Return the last address which is definitely safe to skip for an
649 initial breakpoint. */
650
651 static CORE_ADDR
652 thumb_analyze_prologue (struct gdbarch *gdbarch,
653 CORE_ADDR start, CORE_ADDR limit,
654 struct arm_prologue_cache *cache)
655 {
656 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
657 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
658 int i;
659 pv_t regs[16];
660 struct pv_area *stack;
661 struct cleanup *back_to;
662 CORE_ADDR offset;
663 CORE_ADDR unrecognized_pc = 0;
664
665 for (i = 0; i < 16; i++)
666 regs[i] = pv_register (i, 0);
667 stack = make_pv_area (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
668 back_to = make_cleanup_free_pv_area (stack);
669
670 while (start < limit)
671 {
672 unsigned short insn;
673
674 insn = read_memory_unsigned_integer (start, 2, byte_order_for_code);
675
676 if ((insn & 0xfe00) == 0xb400) /* push { rlist } */
677 {
678 int regno;
679 int mask;
680
681 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
682 break;
683
684 /* Bits 0-7 contain a mask for registers R0-R7. Bit 8 says
685 whether to save LR (R14). */
686 mask = (insn & 0xff) | ((insn & 0x100) << 6);
687
688 /* Calculate offsets of saved R0-R7 and LR. */
689 for (regno = ARM_LR_REGNUM; regno >= 0; regno--)
690 if (mask & (1 << regno))
691 {
692 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
693 -4);
694 pv_area_store (stack, regs[ARM_SP_REGNUM], 4, regs[regno]);
695 }
696 }
697 else if ((insn & 0xff00) == 0xb000) /* add sp, #simm OR
698 sub sp, #simm */
699 {
700 offset = (insn & 0x7f) << 2; /* get scaled offset */
701 if (insn & 0x80) /* Check for SUB. */
702 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
703 -offset);
704 else
705 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
706 offset);
707 }
708 else if ((insn & 0xf800) == 0xa800) /* add Rd, sp, #imm */
709 regs[bits (insn, 8, 10)] = pv_add_constant (regs[ARM_SP_REGNUM],
710 (insn & 0xff) << 2);
711 else if ((insn & 0xfe00) == 0x1c00 /* add Rd, Rn, #imm */
712 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM))
713 regs[bits (insn, 0, 2)] = pv_add_constant (regs[bits (insn, 3, 5)],
714 bits (insn, 6, 8));
715 else if ((insn & 0xf800) == 0x3000 /* add Rd, #imm */
716 && pv_is_register (regs[bits (insn, 8, 10)], ARM_SP_REGNUM))
717 regs[bits (insn, 8, 10)] = pv_add_constant (regs[bits (insn, 8, 10)],
718 bits (insn, 0, 7));
719 else if ((insn & 0xfe00) == 0x1800 /* add Rd, Rn, Rm */
720 && pv_is_register (regs[bits (insn, 6, 8)], ARM_SP_REGNUM)
721 && pv_is_constant (regs[bits (insn, 3, 5)]))
722 regs[bits (insn, 0, 2)] = pv_add (regs[bits (insn, 3, 5)],
723 regs[bits (insn, 6, 8)]);
724 else if ((insn & 0xff00) == 0x4400 /* add Rd, Rm */
725 && pv_is_constant (regs[bits (insn, 3, 6)]))
726 {
727 int rd = (bit (insn, 7) << 3) + bits (insn, 0, 2);
728 int rm = bits (insn, 3, 6);
729 regs[rd] = pv_add (regs[rd], regs[rm]);
730 }
731 else if ((insn & 0xff00) == 0x4600) /* mov hi, lo or mov lo, hi */
732 {
733 int dst_reg = (insn & 0x7) + ((insn & 0x80) >> 4);
734 int src_reg = (insn & 0x78) >> 3;
735 regs[dst_reg] = regs[src_reg];
736 }
737 else if ((insn & 0xf800) == 0x9000) /* str rd, [sp, #off] */
738 {
739 /* Handle stores to the stack. Normally pushes are used,
740 but with GCC -mtpcs-frame, there may be other stores
741 in the prologue to create the frame. */
742 int regno = (insn >> 8) & 0x7;
743 pv_t addr;
744
745 offset = (insn & 0xff) << 2;
746 addr = pv_add_constant (regs[ARM_SP_REGNUM], offset);
747
748 if (pv_area_store_would_trash (stack, addr))
749 break;
750
751 pv_area_store (stack, addr, 4, regs[regno]);
752 }
753 else if ((insn & 0xf800) == 0x6000) /* str rd, [rn, #off] */
754 {
755 int rd = bits (insn, 0, 2);
756 int rn = bits (insn, 3, 5);
757 pv_t addr;
758
759 offset = bits (insn, 6, 10) << 2;
760 addr = pv_add_constant (regs[rn], offset);
761
762 if (pv_area_store_would_trash (stack, addr))
763 break;
764
765 pv_area_store (stack, addr, 4, regs[rd]);
766 }
767 else if (((insn & 0xf800) == 0x7000 /* strb Rd, [Rn, #off] */
768 || (insn & 0xf800) == 0x8000) /* strh Rd, [Rn, #off] */
769 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM))
770 /* Ignore stores of argument registers to the stack. */
771 ;
772 else if ((insn & 0xf800) == 0xc800 /* ldmia Rn!, { registers } */
773 && pv_is_register (regs[bits (insn, 8, 10)], ARM_SP_REGNUM))
774 /* Ignore block loads from the stack, potentially copying
775 parameters from memory. */
776 ;
777 else if ((insn & 0xf800) == 0x9800 /* ldr Rd, [Rn, #immed] */
778 || ((insn & 0xf800) == 0x6800 /* ldr Rd, [sp, #immed] */
779 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM)))
780 /* Similarly ignore single loads from the stack. */
781 ;
782 else if ((insn & 0xffc0) == 0x0000 /* lsls Rd, Rm, #0 */
783 || (insn & 0xffc0) == 0x1c00) /* add Rd, Rn, #0 */
784 /* Skip register copies, i.e. saves to another register
785 instead of the stack. */
786 ;
787 else if ((insn & 0xf800) == 0x2000) /* movs Rd, #imm */
788 /* Recognize constant loads; even with small stacks these are necessary
789 on Thumb. */
790 regs[bits (insn, 8, 10)] = pv_constant (bits (insn, 0, 7));
791 else if ((insn & 0xf800) == 0x4800) /* ldr Rd, [pc, #imm] */
792 {
793 /* Constant pool loads, for the same reason. */
794 unsigned int constant;
795 CORE_ADDR loc;
796
797 loc = start + 4 + bits (insn, 0, 7) * 4;
798 constant = read_memory_unsigned_integer (loc, 4, byte_order);
799 regs[bits (insn, 8, 10)] = pv_constant (constant);
800 }
801 else if ((insn & 0xe000) == 0xe000)
802 {
803 unsigned short inst2;
804
805 inst2 = read_memory_unsigned_integer (start + 2, 2,
806 byte_order_for_code);
807
808 if ((insn & 0xf800) == 0xf000 && (inst2 & 0xe800) == 0xe800)
809 {
810 /* BL, BLX. Allow some special function calls when
811 skipping the prologue; GCC generates these before
812 storing arguments to the stack. */
813 CORE_ADDR nextpc;
814 int j1, j2, imm1, imm2;
815
816 imm1 = sbits (insn, 0, 10);
817 imm2 = bits (inst2, 0, 10);
818 j1 = bit (inst2, 13);
819 j2 = bit (inst2, 11);
820
821 offset = ((imm1 << 12) + (imm2 << 1));
822 offset ^= ((!j2) << 22) | ((!j1) << 23);
823
824 nextpc = start + 4 + offset;
825 /* For BLX make sure to clear the low bits. */
826 if (bit (inst2, 12) == 0)
827 nextpc = nextpc & 0xfffffffc;
828
829 if (!skip_prologue_function (nextpc))
830 break;
831 }
832
833 else if ((insn & 0xffd0) == 0xe900 /* stmdb Rn{!}, { registers } */
834 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
835 {
836 pv_t addr = regs[bits (insn, 0, 3)];
837 int regno;
838
839 if (pv_area_store_would_trash (stack, addr))
840 break;
841
842 /* Calculate offsets of saved registers. */
843 for (regno = ARM_LR_REGNUM; regno >= 0; regno--)
844 if (inst2 & (1 << regno))
845 {
846 addr = pv_add_constant (addr, -4);
847 pv_area_store (stack, addr, 4, regs[regno]);
848 }
849
850 if (insn & 0x0020)
851 regs[bits (insn, 0, 3)] = addr;
852 }
853
854 else if ((insn & 0xff50) == 0xe940 /* strd Rt, Rt2, [Rn, #+/-imm]{!} */
855 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
856 {
857 int regno1 = bits (inst2, 12, 15);
858 int regno2 = bits (inst2, 8, 11);
859 pv_t addr = regs[bits (insn, 0, 3)];
860
861 offset = inst2 & 0xff;
862 if (insn & 0x0080)
863 addr = pv_add_constant (addr, offset);
864 else
865 addr = pv_add_constant (addr, -offset);
866
867 if (pv_area_store_would_trash (stack, addr))
868 break;
869
870 pv_area_store (stack, addr, 4, regs[regno1]);
871 pv_area_store (stack, pv_add_constant (addr, 4),
872 4, regs[regno2]);
873
874 if (insn & 0x0020)
875 regs[bits (insn, 0, 3)] = addr;
876 }
877
878 else if ((insn & 0xfff0) == 0xf8c0 /* str Rt,[Rn,+/-#imm]{!} */
879 && (inst2 & 0x0c00) == 0x0c00
880 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
881 {
882 int regno = bits (inst2, 12, 15);
883 pv_t addr = regs[bits (insn, 0, 3)];
884
885 offset = inst2 & 0xff;
886 if (inst2 & 0x0200)
887 addr = pv_add_constant (addr, offset);
888 else
889 addr = pv_add_constant (addr, -offset);
890
891 if (pv_area_store_would_trash (stack, addr))
892 break;
893
894 pv_area_store (stack, addr, 4, regs[regno]);
895
896 if (inst2 & 0x0100)
897 regs[bits (insn, 0, 3)] = addr;
898 }
899
900 else if ((insn & 0xfff0) == 0xf8c0 /* str.w Rt,[Rn,#imm] */
901 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
902 {
903 int regno = bits (inst2, 12, 15);
904 pv_t addr;
905
906 offset = inst2 & 0xfff;
907 addr = pv_add_constant (regs[bits (insn, 0, 3)], offset);
908
909 if (pv_area_store_would_trash (stack, addr))
910 break;
911
912 pv_area_store (stack, addr, 4, regs[regno]);
913 }
914
915 else if ((insn & 0xffd0) == 0xf880 /* str{bh}.w Rt,[Rn,#imm] */
916 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
917 /* Ignore stores of argument registers to the stack. */
918 ;
919
920 else if ((insn & 0xffd0) == 0xf800 /* str{bh} Rt,[Rn,#+/-imm] */
921 && (inst2 & 0x0d00) == 0x0c00
922 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
923 /* Ignore stores of argument registers to the stack. */
924 ;
925
926 else if ((insn & 0xffd0) == 0xe890 /* ldmia Rn[!], { registers } */
927 && (inst2 & 0x8000) == 0x0000
928 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
929 /* Ignore block loads from the stack, potentially copying
930 parameters from memory. */
931 ;
932
933 else if ((insn & 0xffb0) == 0xe950 /* ldrd Rt, Rt2, [Rn, #+/-imm] */
934 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
935 /* Similarly ignore dual loads from the stack. */
936 ;
937
938 else if ((insn & 0xfff0) == 0xf850 /* ldr Rt,[Rn,#+/-imm] */
939 && (inst2 & 0x0d00) == 0x0c00
940 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
941 /* Similarly ignore single loads from the stack. */
942 ;
943
944 else if ((insn & 0xfff0) == 0xf8d0 /* ldr.w Rt,[Rn,#imm] */
945 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
946 /* Similarly ignore single loads from the stack. */
947 ;
948
949 else if ((insn & 0xfbf0) == 0xf100 /* add.w Rd, Rn, #imm */
950 && (inst2 & 0x8000) == 0x0000)
951 {
952 unsigned int imm = ((bits (insn, 10, 10) << 11)
953 | (bits (inst2, 12, 14) << 8)
954 | bits (inst2, 0, 7));
955
956 regs[bits (inst2, 8, 11)]
957 = pv_add_constant (regs[bits (insn, 0, 3)],
958 thumb_expand_immediate (imm));
959 }
960
961 else if ((insn & 0xfbf0) == 0xf200 /* addw Rd, Rn, #imm */
962 && (inst2 & 0x8000) == 0x0000)
963 {
964 unsigned int imm = ((bits (insn, 10, 10) << 11)
965 | (bits (inst2, 12, 14) << 8)
966 | bits (inst2, 0, 7));
967
968 regs[bits (inst2, 8, 11)]
969 = pv_add_constant (regs[bits (insn, 0, 3)], imm);
970 }
971
972 else if ((insn & 0xfbf0) == 0xf1a0 /* sub.w Rd, Rn, #imm */
973 && (inst2 & 0x8000) == 0x0000)
974 {
975 unsigned int imm = ((bits (insn, 10, 10) << 11)
976 | (bits (inst2, 12, 14) << 8)
977 | bits (inst2, 0, 7));
978
979 regs[bits (inst2, 8, 11)]
980 = pv_add_constant (regs[bits (insn, 0, 3)],
981 - (CORE_ADDR) thumb_expand_immediate (imm));
982 }
983
984 else if ((insn & 0xfbf0) == 0xf2a0 /* subw Rd, Rn, #imm */
985 && (inst2 & 0x8000) == 0x0000)
986 {
987 unsigned int imm = ((bits (insn, 10, 10) << 11)
988 | (bits (inst2, 12, 14) << 8)
989 | bits (inst2, 0, 7));
990
991 regs[bits (inst2, 8, 11)]
992 = pv_add_constant (regs[bits (insn, 0, 3)], - (CORE_ADDR) imm);
993 }
994
995 else if ((insn & 0xfbff) == 0xf04f) /* mov.w Rd, #const */
996 {
997 unsigned int imm = ((bits (insn, 10, 10) << 11)
998 | (bits (inst2, 12, 14) << 8)
999 | bits (inst2, 0, 7));
1000
1001 regs[bits (inst2, 8, 11)]
1002 = pv_constant (thumb_expand_immediate (imm));
1003 }
1004
1005 else if ((insn & 0xfbf0) == 0xf240) /* movw Rd, #const */
1006 {
1007 unsigned int imm = ((bits (insn, 0, 3) << 12)
1008 | (bits (insn, 10, 10) << 11)
1009 | (bits (inst2, 12, 14) << 8)
1010 | bits (inst2, 0, 7));
1011
1012 regs[bits (inst2, 8, 11)] = pv_constant (imm);
1013 }
1014
1015 else if (insn == 0xea5f /* mov.w Rd,Rm */
1016 && (inst2 & 0xf0f0) == 0)
1017 {
1018 int dst_reg = (inst2 & 0x0f00) >> 8;
1019 int src_reg = inst2 & 0xf;
1020 regs[dst_reg] = regs[src_reg];
1021 }
1022
1023 else if ((insn & 0xff7f) == 0xf85f) /* ldr.w Rt,<label> */
1024 {
1025 /* Constant pool loads. */
1026 unsigned int constant;
1027 CORE_ADDR loc;
1028
1029 offset = bits (insn, 0, 11);
1030 if (insn & 0x0080)
1031 loc = start + 4 + offset;
1032 else
1033 loc = start + 4 - offset;
1034
1035 constant = read_memory_unsigned_integer (loc, 4, byte_order);
1036 regs[bits (inst2, 12, 15)] = pv_constant (constant);
1037 }
1038
1039 else if ((insn & 0xff7f) == 0xe95f) /* ldrd Rt,Rt2,<label> */
1040 {
1041 /* Constant pool loads. */
1042 unsigned int constant;
1043 CORE_ADDR loc;
1044
1045 offset = bits (insn, 0, 7) << 2;
1046 if (insn & 0x0080)
1047 loc = start + 4 + offset;
1048 else
1049 loc = start + 4 - offset;
1050
1051 constant = read_memory_unsigned_integer (loc, 4, byte_order);
1052 regs[bits (inst2, 12, 15)] = pv_constant (constant);
1053
1054 constant = read_memory_unsigned_integer (loc + 4, 4, byte_order);
1055 regs[bits (inst2, 8, 11)] = pv_constant (constant);
1056 }
1057
1058 else if (thumb2_instruction_changes_pc (insn, inst2))
1059 {
1060 /* Don't scan past anything that might change control flow. */
1061 break;
1062 }
1063 else
1064 {
1065 /* The optimizer might shove anything into the prologue,
1066 so we just skip what we don't recognize. */
1067 unrecognized_pc = start;
1068 }
1069
1070 start += 2;
1071 }
1072 else if (thumb_instruction_changes_pc (insn))
1073 {
1074 /* Don't scan past anything that might change control flow. */
1075 break;
1076 }
1077 else
1078 {
1079 /* The optimizer might shove anything into the prologue,
1080 so we just skip what we don't recognize. */
1081 unrecognized_pc = start;
1082 }
1083
1084 start += 2;
1085 }
1086
1087 if (arm_debug)
1088 fprintf_unfiltered (gdb_stdlog, "Prologue scan stopped at %s\n",
1089 paddress (gdbarch, start));
1090
1091 if (unrecognized_pc == 0)
1092 unrecognized_pc = start;
1093
1094 if (cache == NULL)
1095 {
1096 do_cleanups (back_to);
1097 return unrecognized_pc;
1098 }
1099
1100 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
1101 {
1102 /* Frame pointer is fp. Frame size is constant. */
1103 cache->framereg = ARM_FP_REGNUM;
1104 cache->framesize = -regs[ARM_FP_REGNUM].k;
1105 }
1106 else if (pv_is_register (regs[THUMB_FP_REGNUM], ARM_SP_REGNUM))
1107 {
1108 /* Frame pointer is r7. Frame size is constant. */
1109 cache->framereg = THUMB_FP_REGNUM;
1110 cache->framesize = -regs[THUMB_FP_REGNUM].k;
1111 }
1112 else if (pv_is_register (regs[ARM_SP_REGNUM], ARM_SP_REGNUM))
1113 {
1114 /* Try the stack pointer... this is a bit desperate. */
1115 cache->framereg = ARM_SP_REGNUM;
1116 cache->framesize = -regs[ARM_SP_REGNUM].k;
1117 }
1118 else
1119 {
1120 /* We're just out of luck. We don't know where the frame is. */
1121 cache->framereg = -1;
1122 cache->framesize = 0;
1123 }
1124
1125 for (i = 0; i < 16; i++)
1126 if (pv_area_find_reg (stack, gdbarch, i, &offset))
1127 cache->saved_regs[i].addr = offset;
1128
1129 do_cleanups (back_to);
1130 return unrecognized_pc;
1131 }
1132
1133 /* Advance the PC across any function entry prologue instructions to
1134 reach some "real" code.
1135
1136 The APCS (ARM Procedure Call Standard) defines the following
1137 prologue:
1138
1139 mov ip, sp
1140 [stmfd sp!, {a1,a2,a3,a4}]
1141 stmfd sp!, {...,fp,ip,lr,pc}
1142 [stfe f7, [sp, #-12]!]
1143 [stfe f6, [sp, #-12]!]
1144 [stfe f5, [sp, #-12]!]
1145 [stfe f4, [sp, #-12]!]
1146 sub fp, ip, #nn @@ nn == 20 or 4 depending on second insn */
1147
1148 static CORE_ADDR
1149 arm_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
1150 {
1151 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1152 unsigned long inst;
1153 CORE_ADDR skip_pc;
1154 CORE_ADDR func_addr, limit_pc;
1155 struct symtab_and_line sal;
1156
1157 /* See if we can determine the end of the prologue via the symbol table.
1158 If so, then return either PC, or the PC after the prologue, whichever
1159 is greater. */
1160 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
1161 {
1162 CORE_ADDR post_prologue_pc
1163 = skip_prologue_using_sal (gdbarch, func_addr);
1164 struct symtab *s = find_pc_symtab (func_addr);
1165
1166 /* GCC always emits a line note before the prologue and another
1167 one after, even if the two are at the same address or on the
1168 same line. Take advantage of this so that we do not need to
1169 know every instruction that might appear in the prologue. We
1170 will have producer information for most binaries; if it is
1171 missing (e.g. for -gstabs), assuming the GNU tools. */
1172 if (post_prologue_pc
1173 && (s == NULL
1174 || s->producer == NULL
1175 || strncmp (s->producer, "GNU ", sizeof ("GNU ") - 1) == 0))
1176 return post_prologue_pc;
1177
1178 if (post_prologue_pc != 0)
1179 {
1180 CORE_ADDR analyzed_limit;
1181
1182 /* For non-GCC compilers, make sure the entire line is an
1183 acceptable prologue; GDB will round this function's
1184 return value up to the end of the following line so we
1185 can not skip just part of a line (and we do not want to).
1186
1187 RealView does not treat the prologue specially, but does
1188 associate prologue code with the opening brace; so this
1189 lets us skip the first line if we think it is the opening
1190 brace. */
1191 if (arm_pc_is_thumb (gdbarch, func_addr))
1192 analyzed_limit = thumb_analyze_prologue (gdbarch, func_addr,
1193 post_prologue_pc, NULL);
1194 else
1195 analyzed_limit = arm_analyze_prologue (gdbarch, func_addr,
1196 post_prologue_pc, NULL);
1197
1198 if (analyzed_limit != post_prologue_pc)
1199 return func_addr;
1200
1201 return post_prologue_pc;
1202 }
1203 }
1204
1205 /* Can't determine prologue from the symbol table, need to examine
1206 instructions. */
1207
1208 /* Find an upper limit on the function prologue using the debug
1209 information. If the debug information could not be used to provide
1210 that bound, then use an arbitrary large number as the upper bound. */
1211 /* Like arm_scan_prologue, stop no later than pc + 64. */
1212 limit_pc = skip_prologue_using_sal (gdbarch, pc);
1213 if (limit_pc == 0)
1214 limit_pc = pc + 64; /* Magic. */
1215
1216
1217 /* Check if this is Thumb code. */
1218 if (arm_pc_is_thumb (gdbarch, pc))
1219 return thumb_analyze_prologue (gdbarch, pc, limit_pc, NULL);
1220
1221 for (skip_pc = pc; skip_pc < limit_pc; skip_pc += 4)
1222 {
1223 inst = read_memory_unsigned_integer (skip_pc, 4, byte_order_for_code);
1224
1225 /* "mov ip, sp" is no longer a required part of the prologue. */
1226 if (inst == 0xe1a0c00d) /* mov ip, sp */
1227 continue;
1228
1229 if ((inst & 0xfffff000) == 0xe28dc000) /* add ip, sp #n */
1230 continue;
1231
1232 if ((inst & 0xfffff000) == 0xe24dc000) /* sub ip, sp #n */
1233 continue;
1234
1235 /* Some prologues begin with "str lr, [sp, #-4]!". */
1236 if (inst == 0xe52de004) /* str lr, [sp, #-4]! */
1237 continue;
1238
1239 if ((inst & 0xfffffff0) == 0xe92d0000) /* stmfd sp!,{a1,a2,a3,a4} */
1240 continue;
1241
1242 if ((inst & 0xfffff800) == 0xe92dd800) /* stmfd sp!,{fp,ip,lr,pc} */
1243 continue;
1244
1245 /* Any insns after this point may float into the code, if it makes
1246 for better instruction scheduling, so we skip them only if we
1247 find them, but still consider the function to be frame-ful. */
1248
1249 /* We may have either one sfmfd instruction here, or several stfe
1250 insns, depending on the version of floating point code we
1251 support. */
1252 if ((inst & 0xffbf0fff) == 0xec2d0200) /* sfmfd fn, <cnt>, [sp]! */
1253 continue;
1254
1255 if ((inst & 0xffff8fff) == 0xed6d0103) /* stfe fn, [sp, #-12]! */
1256 continue;
1257
1258 if ((inst & 0xfffff000) == 0xe24cb000) /* sub fp, ip, #nn */
1259 continue;
1260
1261 if ((inst & 0xfffff000) == 0xe24dd000) /* sub sp, sp, #nn */
1262 continue;
1263
1264 if ((inst & 0xffffc000) == 0xe54b0000 /* strb r(0123),[r11,#-nn] */
1265 || (inst & 0xffffc0f0) == 0xe14b00b0 /* strh r(0123),[r11,#-nn] */
1266 || (inst & 0xffffc000) == 0xe50b0000) /* str r(0123),[r11,#-nn] */
1267 continue;
1268
1269 if ((inst & 0xffffc000) == 0xe5cd0000 /* strb r(0123),[sp,#nn] */
1270 || (inst & 0xffffc0f0) == 0xe1cd00b0 /* strh r(0123),[sp,#nn] */
1271 || (inst & 0xffffc000) == 0xe58d0000) /* str r(0123),[sp,#nn] */
1272 continue;
1273
1274 /* Un-recognized instruction; stop scanning. */
1275 break;
1276 }
1277
1278 return skip_pc; /* End of prologue */
1279 }
1280
1281 /* *INDENT-OFF* */
1282 /* Function: thumb_scan_prologue (helper function for arm_scan_prologue)
1283 This function decodes a Thumb function prologue to determine:
1284 1) the size of the stack frame
1285 2) which registers are saved on it
1286 3) the offsets of saved regs
1287 4) the offset from the stack pointer to the frame pointer
1288
1289 A typical Thumb function prologue would create this stack frame
1290 (offsets relative to FP)
1291 old SP -> 24 stack parameters
1292 20 LR
1293 16 R7
1294 R7 -> 0 local variables (16 bytes)
1295 SP -> -12 additional stack space (12 bytes)
1296 The frame size would thus be 36 bytes, and the frame offset would be
1297 12 bytes. The frame register is R7.
1298
1299 The comments for thumb_skip_prolog() describe the algorithm we use
1300 to detect the end of the prolog. */
1301 /* *INDENT-ON* */
1302
1303 static void
1304 thumb_scan_prologue (struct gdbarch *gdbarch, CORE_ADDR prev_pc,
1305 CORE_ADDR block_addr, struct arm_prologue_cache *cache)
1306 {
1307 CORE_ADDR prologue_start;
1308 CORE_ADDR prologue_end;
1309 CORE_ADDR current_pc;
1310
1311 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
1312 &prologue_end))
1313 {
1314 /* See comment in arm_scan_prologue for an explanation of
1315 this heuristics. */
1316 if (prologue_end > prologue_start + 64)
1317 {
1318 prologue_end = prologue_start + 64;
1319 }
1320 }
1321 else
1322 /* We're in the boondocks: we have no idea where the start of the
1323 function is. */
1324 return;
1325
1326 prologue_end = min (prologue_end, prev_pc);
1327
1328 thumb_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
1329 }
1330
1331 /* Return 1 if THIS_INSTR might change control flow, 0 otherwise. */
1332
1333 static int
1334 arm_instruction_changes_pc (uint32_t this_instr)
1335 {
1336 if (bits (this_instr, 28, 31) == INST_NV)
1337 /* Unconditional instructions. */
1338 switch (bits (this_instr, 24, 27))
1339 {
1340 case 0xa:
1341 case 0xb:
1342 /* Branch with Link and change to Thumb. */
1343 return 1;
1344 case 0xc:
1345 case 0xd:
1346 case 0xe:
1347 /* Coprocessor register transfer. */
1348 if (bits (this_instr, 12, 15) == 15)
1349 error (_("Invalid update to pc in instruction"));
1350 return 0;
1351 default:
1352 return 0;
1353 }
1354 else
1355 switch (bits (this_instr, 25, 27))
1356 {
1357 case 0x0:
1358 if (bits (this_instr, 23, 24) == 2 && bit (this_instr, 20) == 0)
1359 {
1360 /* Multiplies and extra load/stores. */
1361 if (bit (this_instr, 4) == 1 && bit (this_instr, 7) == 1)
1362 /* Neither multiplies nor extension load/stores are allowed
1363 to modify PC. */
1364 return 0;
1365
1366 /* Otherwise, miscellaneous instructions. */
1367
1368 /* BX <reg>, BXJ <reg>, BLX <reg> */
1369 if (bits (this_instr, 4, 27) == 0x12fff1
1370 || bits (this_instr, 4, 27) == 0x12fff2
1371 || bits (this_instr, 4, 27) == 0x12fff3)
1372 return 1;
1373
1374 /* Other miscellaneous instructions are unpredictable if they
1375 modify PC. */
1376 return 0;
1377 }
1378 /* Data processing instruction. Fall through. */
1379
1380 case 0x1:
1381 if (bits (this_instr, 12, 15) == 15)
1382 return 1;
1383 else
1384 return 0;
1385
1386 case 0x2:
1387 case 0x3:
1388 /* Media instructions and architecturally undefined instructions. */
1389 if (bits (this_instr, 25, 27) == 3 && bit (this_instr, 4) == 1)
1390 return 0;
1391
1392 /* Stores. */
1393 if (bit (this_instr, 20) == 0)
1394 return 0;
1395
1396 /* Loads. */
1397 if (bits (this_instr, 12, 15) == ARM_PC_REGNUM)
1398 return 1;
1399 else
1400 return 0;
1401
1402 case 0x4:
1403 /* Load/store multiple. */
1404 if (bit (this_instr, 20) == 1 && bit (this_instr, 15) == 1)
1405 return 1;
1406 else
1407 return 0;
1408
1409 case 0x5:
1410 /* Branch and branch with link. */
1411 return 1;
1412
1413 case 0x6:
1414 case 0x7:
1415 /* Coprocessor transfers or SWIs can not affect PC. */
1416 return 0;
1417
1418 default:
1419 internal_error (__FILE__, __LINE__, "bad value in switch");
1420 }
1421 }
1422
1423 /* Analyze an ARM mode prologue starting at PROLOGUE_START and
1424 continuing no further than PROLOGUE_END. If CACHE is non-NULL,
1425 fill it in. Return the first address not recognized as a prologue
1426 instruction.
1427
1428 We recognize all the instructions typically found in ARM prologues,
1429 plus harmless instructions which can be skipped (either for analysis
1430 purposes, or a more restrictive set that can be skipped when finding
1431 the end of the prologue). */
1432
1433 static CORE_ADDR
1434 arm_analyze_prologue (struct gdbarch *gdbarch,
1435 CORE_ADDR prologue_start, CORE_ADDR prologue_end,
1436 struct arm_prologue_cache *cache)
1437 {
1438 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1439 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1440 int regno;
1441 CORE_ADDR offset, current_pc;
1442 pv_t regs[ARM_FPS_REGNUM];
1443 struct pv_area *stack;
1444 struct cleanup *back_to;
1445 int framereg, framesize;
1446 CORE_ADDR unrecognized_pc = 0;
1447
1448 /* Search the prologue looking for instructions that set up the
1449 frame pointer, adjust the stack pointer, and save registers.
1450
1451 Be careful, however, and if it doesn't look like a prologue,
1452 don't try to scan it. If, for instance, a frameless function
1453 begins with stmfd sp!, then we will tell ourselves there is
1454 a frame, which will confuse stack traceback, as well as "finish"
1455 and other operations that rely on a knowledge of the stack
1456 traceback. */
1457
1458 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
1459 regs[regno] = pv_register (regno, 0);
1460 stack = make_pv_area (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
1461 back_to = make_cleanup_free_pv_area (stack);
1462
1463 for (current_pc = prologue_start;
1464 current_pc < prologue_end;
1465 current_pc += 4)
1466 {
1467 unsigned int insn
1468 = read_memory_unsigned_integer (current_pc, 4, byte_order_for_code);
1469
1470 if (insn == 0xe1a0c00d) /* mov ip, sp */
1471 {
1472 regs[ARM_IP_REGNUM] = regs[ARM_SP_REGNUM];
1473 continue;
1474 }
1475 else if ((insn & 0xfff00000) == 0xe2800000 /* add Rd, Rn, #n */
1476 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1477 {
1478 unsigned imm = insn & 0xff; /* immediate value */
1479 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1480 int rd = bits (insn, 12, 15);
1481 imm = (imm >> rot) | (imm << (32 - rot));
1482 regs[rd] = pv_add_constant (regs[bits (insn, 16, 19)], imm);
1483 continue;
1484 }
1485 else if ((insn & 0xfff00000) == 0xe2400000 /* sub Rd, Rn, #n */
1486 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1487 {
1488 unsigned imm = insn & 0xff; /* immediate value */
1489 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1490 int rd = bits (insn, 12, 15);
1491 imm = (imm >> rot) | (imm << (32 - rot));
1492 regs[rd] = pv_add_constant (regs[bits (insn, 16, 19)], -imm);
1493 continue;
1494 }
1495 else if ((insn & 0xffff0fff) == 0xe52d0004) /* str Rd, [sp, #-4]! */
1496 {
1497 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1498 break;
1499 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -4);
1500 pv_area_store (stack, regs[ARM_SP_REGNUM], 4,
1501 regs[bits (insn, 12, 15)]);
1502 continue;
1503 }
1504 else if ((insn & 0xffff0000) == 0xe92d0000)
1505 /* stmfd sp!, {..., fp, ip, lr, pc}
1506 or
1507 stmfd sp!, {a1, a2, a3, a4} */
1508 {
1509 int mask = insn & 0xffff;
1510
1511 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1512 break;
1513
1514 /* Calculate offsets of saved registers. */
1515 for (regno = ARM_PC_REGNUM; regno >= 0; regno--)
1516 if (mask & (1 << regno))
1517 {
1518 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -4);
1519 pv_area_store (stack, regs[ARM_SP_REGNUM], 4, regs[regno]);
1520 }
1521 }
1522 else if ((insn & 0xffff0000) == 0xe54b0000 /* strb rx,[r11,#-n] */
1523 || (insn & 0xffff00f0) == 0xe14b00b0 /* strh rx,[r11,#-n] */
1524 || (insn & 0xffffc000) == 0xe50b0000) /* str rx,[r11,#-n] */
1525 {
1526 /* No need to add this to saved_regs -- it's just an arg reg. */
1527 continue;
1528 }
1529 else if ((insn & 0xffff0000) == 0xe5cd0000 /* strb rx,[sp,#n] */
1530 || (insn & 0xffff00f0) == 0xe1cd00b0 /* strh rx,[sp,#n] */
1531 || (insn & 0xffffc000) == 0xe58d0000) /* str rx,[sp,#n] */
1532 {
1533 /* No need to add this to saved_regs -- it's just an arg reg. */
1534 continue;
1535 }
1536 else if ((insn & 0xfff00000) == 0xe8800000 /* stm Rn, { registers } */
1537 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1538 {
1539 /* No need to add this to saved_regs -- it's just arg regs. */
1540 continue;
1541 }
1542 else if ((insn & 0xfffff000) == 0xe24cb000) /* sub fp, ip #n */
1543 {
1544 unsigned imm = insn & 0xff; /* immediate value */
1545 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1546 imm = (imm >> rot) | (imm << (32 - rot));
1547 regs[ARM_FP_REGNUM] = pv_add_constant (regs[ARM_IP_REGNUM], -imm);
1548 }
1549 else if ((insn & 0xfffff000) == 0xe24dd000) /* sub sp, sp #n */
1550 {
1551 unsigned imm = insn & 0xff; /* immediate value */
1552 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1553 imm = (imm >> rot) | (imm << (32 - rot));
1554 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -imm);
1555 }
1556 else if ((insn & 0xffff7fff) == 0xed6d0103 /* stfe f?, [sp, -#c]! */
1557 && gdbarch_tdep (gdbarch)->have_fpa_registers)
1558 {
1559 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1560 break;
1561
1562 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
1563 regno = ARM_F0_REGNUM + ((insn >> 12) & 0x07);
1564 pv_area_store (stack, regs[ARM_SP_REGNUM], 12, regs[regno]);
1565 }
1566 else if ((insn & 0xffbf0fff) == 0xec2d0200 /* sfmfd f0, 4, [sp!] */
1567 && gdbarch_tdep (gdbarch)->have_fpa_registers)
1568 {
1569 int n_saved_fp_regs;
1570 unsigned int fp_start_reg, fp_bound_reg;
1571
1572 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1573 break;
1574
1575 if ((insn & 0x800) == 0x800) /* N0 is set */
1576 {
1577 if ((insn & 0x40000) == 0x40000) /* N1 is set */
1578 n_saved_fp_regs = 3;
1579 else
1580 n_saved_fp_regs = 1;
1581 }
1582 else
1583 {
1584 if ((insn & 0x40000) == 0x40000) /* N1 is set */
1585 n_saved_fp_regs = 2;
1586 else
1587 n_saved_fp_regs = 4;
1588 }
1589
1590 fp_start_reg = ARM_F0_REGNUM + ((insn >> 12) & 0x7);
1591 fp_bound_reg = fp_start_reg + n_saved_fp_regs;
1592 for (; fp_start_reg < fp_bound_reg; fp_start_reg++)
1593 {
1594 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
1595 pv_area_store (stack, regs[ARM_SP_REGNUM], 12,
1596 regs[fp_start_reg++]);
1597 }
1598 }
1599 else if ((insn & 0xff000000) == 0xeb000000 && cache == NULL) /* bl */
1600 {
1601 /* Allow some special function calls when skipping the
1602 prologue; GCC generates these before storing arguments to
1603 the stack. */
1604 CORE_ADDR dest = BranchDest (current_pc, insn);
1605
1606 if (skip_prologue_function (dest))
1607 continue;
1608 else
1609 break;
1610 }
1611 else if ((insn & 0xf0000000) != 0xe0000000)
1612 break; /* Condition not true, exit early */
1613 else if (arm_instruction_changes_pc (insn))
1614 /* Don't scan past anything that might change control flow. */
1615 break;
1616 else if ((insn & 0xfe500000) == 0xe8100000) /* ldm */
1617 {
1618 /* Ignore block loads from the stack, potentially copying
1619 parameters from memory. */
1620 if (pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1621 continue;
1622 else
1623 break;
1624 }
1625 else if ((insn & 0xfc500000) == 0xe4100000)
1626 {
1627 /* Similarly ignore single loads from the stack. */
1628 if (pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1629 continue;
1630 else
1631 break;
1632 }
1633 else if ((insn & 0xffff0ff0) == 0xe1a00000)
1634 /* MOV Rd, Rm. Skip register copies, i.e. saves to another
1635 register instead of the stack. */
1636 continue;
1637 else
1638 {
1639 /* The optimizer might shove anything into the prologue,
1640 so we just skip what we don't recognize. */
1641 unrecognized_pc = current_pc;
1642 continue;
1643 }
1644 }
1645
1646 if (unrecognized_pc == 0)
1647 unrecognized_pc = current_pc;
1648
1649 /* The frame size is just the distance from the frame register
1650 to the original stack pointer. */
1651 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
1652 {
1653 /* Frame pointer is fp. */
1654 framereg = ARM_FP_REGNUM;
1655 framesize = -regs[ARM_FP_REGNUM].k;
1656 }
1657 else if (pv_is_register (regs[ARM_SP_REGNUM], ARM_SP_REGNUM))
1658 {
1659 /* Try the stack pointer... this is a bit desperate. */
1660 framereg = ARM_SP_REGNUM;
1661 framesize = -regs[ARM_SP_REGNUM].k;
1662 }
1663 else
1664 {
1665 /* We're just out of luck. We don't know where the frame is. */
1666 framereg = -1;
1667 framesize = 0;
1668 }
1669
1670 if (cache)
1671 {
1672 cache->framereg = framereg;
1673 cache->framesize = framesize;
1674
1675 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
1676 if (pv_area_find_reg (stack, gdbarch, regno, &offset))
1677 cache->saved_regs[regno].addr = offset;
1678 }
1679
1680 if (arm_debug)
1681 fprintf_unfiltered (gdb_stdlog, "Prologue scan stopped at %s\n",
1682 paddress (gdbarch, unrecognized_pc));
1683
1684 do_cleanups (back_to);
1685 return unrecognized_pc;
1686 }
1687
1688 static void
1689 arm_scan_prologue (struct frame_info *this_frame,
1690 struct arm_prologue_cache *cache)
1691 {
1692 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1693 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1694 int regno;
1695 CORE_ADDR prologue_start, prologue_end, current_pc;
1696 CORE_ADDR prev_pc = get_frame_pc (this_frame);
1697 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
1698 pv_t regs[ARM_FPS_REGNUM];
1699 struct pv_area *stack;
1700 struct cleanup *back_to;
1701 CORE_ADDR offset;
1702
1703 /* Assume there is no frame until proven otherwise. */
1704 cache->framereg = ARM_SP_REGNUM;
1705 cache->framesize = 0;
1706
1707 /* Check for Thumb prologue. */
1708 if (arm_frame_is_thumb (this_frame))
1709 {
1710 thumb_scan_prologue (gdbarch, prev_pc, block_addr, cache);
1711 return;
1712 }
1713
1714 /* Find the function prologue. If we can't find the function in
1715 the symbol table, peek in the stack frame to find the PC. */
1716 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
1717 &prologue_end))
1718 {
1719 /* One way to find the end of the prologue (which works well
1720 for unoptimized code) is to do the following:
1721
1722 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
1723
1724 if (sal.line == 0)
1725 prologue_end = prev_pc;
1726 else if (sal.end < prologue_end)
1727 prologue_end = sal.end;
1728
1729 This mechanism is very accurate so long as the optimizer
1730 doesn't move any instructions from the function body into the
1731 prologue. If this happens, sal.end will be the last
1732 instruction in the first hunk of prologue code just before
1733 the first instruction that the scheduler has moved from
1734 the body to the prologue.
1735
1736 In order to make sure that we scan all of the prologue
1737 instructions, we use a slightly less accurate mechanism which
1738 may scan more than necessary. To help compensate for this
1739 lack of accuracy, the prologue scanning loop below contains
1740 several clauses which'll cause the loop to terminate early if
1741 an implausible prologue instruction is encountered.
1742
1743 The expression
1744
1745 prologue_start + 64
1746
1747 is a suitable endpoint since it accounts for the largest
1748 possible prologue plus up to five instructions inserted by
1749 the scheduler. */
1750
1751 if (prologue_end > prologue_start + 64)
1752 {
1753 prologue_end = prologue_start + 64; /* See above. */
1754 }
1755 }
1756 else
1757 {
1758 /* We have no symbol information. Our only option is to assume this
1759 function has a standard stack frame and the normal frame register.
1760 Then, we can find the value of our frame pointer on entrance to
1761 the callee (or at the present moment if this is the innermost frame).
1762 The value stored there should be the address of the stmfd + 8. */
1763 CORE_ADDR frame_loc;
1764 LONGEST return_value;
1765
1766 frame_loc = get_frame_register_unsigned (this_frame, ARM_FP_REGNUM);
1767 if (!safe_read_memory_integer (frame_loc, 4, byte_order, &return_value))
1768 return;
1769 else
1770 {
1771 prologue_start = gdbarch_addr_bits_remove
1772 (gdbarch, return_value) - 8;
1773 prologue_end = prologue_start + 64; /* See above. */
1774 }
1775 }
1776
1777 if (prev_pc < prologue_end)
1778 prologue_end = prev_pc;
1779
1780 arm_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
1781 }
1782
1783 static struct arm_prologue_cache *
1784 arm_make_prologue_cache (struct frame_info *this_frame)
1785 {
1786 int reg;
1787 struct arm_prologue_cache *cache;
1788 CORE_ADDR unwound_fp;
1789
1790 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
1791 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1792
1793 arm_scan_prologue (this_frame, cache);
1794
1795 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
1796 if (unwound_fp == 0)
1797 return cache;
1798
1799 cache->prev_sp = unwound_fp + cache->framesize;
1800
1801 /* Calculate actual addresses of saved registers using offsets
1802 determined by arm_scan_prologue. */
1803 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
1804 if (trad_frame_addr_p (cache->saved_regs, reg))
1805 cache->saved_regs[reg].addr += cache->prev_sp;
1806
1807 return cache;
1808 }
1809
1810 /* Our frame ID for a normal frame is the current function's starting PC
1811 and the caller's SP when we were called. */
1812
1813 static void
1814 arm_prologue_this_id (struct frame_info *this_frame,
1815 void **this_cache,
1816 struct frame_id *this_id)
1817 {
1818 struct arm_prologue_cache *cache;
1819 struct frame_id id;
1820 CORE_ADDR pc, func;
1821
1822 if (*this_cache == NULL)
1823 *this_cache = arm_make_prologue_cache (this_frame);
1824 cache = *this_cache;
1825
1826 /* This is meant to halt the backtrace at "_start". */
1827 pc = get_frame_pc (this_frame);
1828 if (pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
1829 return;
1830
1831 /* If we've hit a wall, stop. */
1832 if (cache->prev_sp == 0)
1833 return;
1834
1835 func = get_frame_func (this_frame);
1836 id = frame_id_build (cache->prev_sp, func);
1837 *this_id = id;
1838 }
1839
1840 static struct value *
1841 arm_prologue_prev_register (struct frame_info *this_frame,
1842 void **this_cache,
1843 int prev_regnum)
1844 {
1845 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1846 struct arm_prologue_cache *cache;
1847
1848 if (*this_cache == NULL)
1849 *this_cache = arm_make_prologue_cache (this_frame);
1850 cache = *this_cache;
1851
1852 /* If we are asked to unwind the PC, then we need to return the LR
1853 instead. The prologue may save PC, but it will point into this
1854 frame's prologue, not the next frame's resume location. Also
1855 strip the saved T bit. A valid LR may have the low bit set, but
1856 a valid PC never does. */
1857 if (prev_regnum == ARM_PC_REGNUM)
1858 {
1859 CORE_ADDR lr;
1860
1861 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
1862 return frame_unwind_got_constant (this_frame, prev_regnum,
1863 arm_addr_bits_remove (gdbarch, lr));
1864 }
1865
1866 /* SP is generally not saved to the stack, but this frame is
1867 identified by the next frame's stack pointer at the time of the call.
1868 The value was already reconstructed into PREV_SP. */
1869 if (prev_regnum == ARM_SP_REGNUM)
1870 return frame_unwind_got_constant (this_frame, prev_regnum, cache->prev_sp);
1871
1872 /* The CPSR may have been changed by the call instruction and by the
1873 called function. The only bit we can reconstruct is the T bit,
1874 by checking the low bit of LR as of the call. This is a reliable
1875 indicator of Thumb-ness except for some ARM v4T pre-interworking
1876 Thumb code, which could get away with a clear low bit as long as
1877 the called function did not use bx. Guess that all other
1878 bits are unchanged; the condition flags are presumably lost,
1879 but the processor status is likely valid. */
1880 if (prev_regnum == ARM_PS_REGNUM)
1881 {
1882 CORE_ADDR lr, cpsr;
1883 ULONGEST t_bit = arm_psr_thumb_bit (gdbarch);
1884
1885 cpsr = get_frame_register_unsigned (this_frame, prev_regnum);
1886 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
1887 if (IS_THUMB_ADDR (lr))
1888 cpsr |= t_bit;
1889 else
1890 cpsr &= ~t_bit;
1891 return frame_unwind_got_constant (this_frame, prev_regnum, cpsr);
1892 }
1893
1894 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
1895 prev_regnum);
1896 }
1897
1898 struct frame_unwind arm_prologue_unwind = {
1899 NORMAL_FRAME,
1900 arm_prologue_this_id,
1901 arm_prologue_prev_register,
1902 NULL,
1903 default_frame_sniffer
1904 };
1905
1906 static struct arm_prologue_cache *
1907 arm_make_stub_cache (struct frame_info *this_frame)
1908 {
1909 struct arm_prologue_cache *cache;
1910
1911 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
1912 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1913
1914 cache->prev_sp = get_frame_register_unsigned (this_frame, ARM_SP_REGNUM);
1915
1916 return cache;
1917 }
1918
1919 /* Our frame ID for a stub frame is the current SP and LR. */
1920
1921 static void
1922 arm_stub_this_id (struct frame_info *this_frame,
1923 void **this_cache,
1924 struct frame_id *this_id)
1925 {
1926 struct arm_prologue_cache *cache;
1927
1928 if (*this_cache == NULL)
1929 *this_cache = arm_make_stub_cache (this_frame);
1930 cache = *this_cache;
1931
1932 *this_id = frame_id_build (cache->prev_sp, get_frame_pc (this_frame));
1933 }
1934
1935 static int
1936 arm_stub_unwind_sniffer (const struct frame_unwind *self,
1937 struct frame_info *this_frame,
1938 void **this_prologue_cache)
1939 {
1940 CORE_ADDR addr_in_block;
1941 char dummy[4];
1942
1943 addr_in_block = get_frame_address_in_block (this_frame);
1944 if (in_plt_section (addr_in_block, NULL)
1945 /* We also use the stub winder if the target memory is unreadable
1946 to avoid having the prologue unwinder trying to read it. */
1947 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
1948 return 1;
1949
1950 return 0;
1951 }
1952
1953 struct frame_unwind arm_stub_unwind = {
1954 NORMAL_FRAME,
1955 arm_stub_this_id,
1956 arm_prologue_prev_register,
1957 NULL,
1958 arm_stub_unwind_sniffer
1959 };
1960
1961 static CORE_ADDR
1962 arm_normal_frame_base (struct frame_info *this_frame, void **this_cache)
1963 {
1964 struct arm_prologue_cache *cache;
1965
1966 if (*this_cache == NULL)
1967 *this_cache = arm_make_prologue_cache (this_frame);
1968 cache = *this_cache;
1969
1970 return cache->prev_sp - cache->framesize;
1971 }
1972
1973 struct frame_base arm_normal_base = {
1974 &arm_prologue_unwind,
1975 arm_normal_frame_base,
1976 arm_normal_frame_base,
1977 arm_normal_frame_base
1978 };
1979
1980 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
1981 dummy frame. The frame ID's base needs to match the TOS value
1982 saved by save_dummy_frame_tos() and returned from
1983 arm_push_dummy_call, and the PC needs to match the dummy frame's
1984 breakpoint. */
1985
1986 static struct frame_id
1987 arm_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
1988 {
1989 return frame_id_build (get_frame_register_unsigned (this_frame, ARM_SP_REGNUM),
1990 get_frame_pc (this_frame));
1991 }
1992
1993 /* Given THIS_FRAME, find the previous frame's resume PC (which will
1994 be used to construct the previous frame's ID, after looking up the
1995 containing function). */
1996
1997 static CORE_ADDR
1998 arm_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
1999 {
2000 CORE_ADDR pc;
2001 pc = frame_unwind_register_unsigned (this_frame, ARM_PC_REGNUM);
2002 return arm_addr_bits_remove (gdbarch, pc);
2003 }
2004
2005 static CORE_ADDR
2006 arm_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
2007 {
2008 return frame_unwind_register_unsigned (this_frame, ARM_SP_REGNUM);
2009 }
2010
2011 static struct value *
2012 arm_dwarf2_prev_register (struct frame_info *this_frame, void **this_cache,
2013 int regnum)
2014 {
2015 struct gdbarch * gdbarch = get_frame_arch (this_frame);
2016 CORE_ADDR lr, cpsr;
2017 ULONGEST t_bit = arm_psr_thumb_bit (gdbarch);
2018
2019 switch (regnum)
2020 {
2021 case ARM_PC_REGNUM:
2022 /* The PC is normally copied from the return column, which
2023 describes saves of LR. However, that version may have an
2024 extra bit set to indicate Thumb state. The bit is not
2025 part of the PC. */
2026 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
2027 return frame_unwind_got_constant (this_frame, regnum,
2028 arm_addr_bits_remove (gdbarch, lr));
2029
2030 case ARM_PS_REGNUM:
2031 /* Reconstruct the T bit; see arm_prologue_prev_register for details. */
2032 cpsr = get_frame_register_unsigned (this_frame, regnum);
2033 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
2034 if (IS_THUMB_ADDR (lr))
2035 cpsr |= t_bit;
2036 else
2037 cpsr &= ~t_bit;
2038 return frame_unwind_got_constant (this_frame, regnum, cpsr);
2039
2040 default:
2041 internal_error (__FILE__, __LINE__,
2042 _("Unexpected register %d"), regnum);
2043 }
2044 }
2045
2046 static void
2047 arm_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
2048 struct dwarf2_frame_state_reg *reg,
2049 struct frame_info *this_frame)
2050 {
2051 switch (regnum)
2052 {
2053 case ARM_PC_REGNUM:
2054 case ARM_PS_REGNUM:
2055 reg->how = DWARF2_FRAME_REG_FN;
2056 reg->loc.fn = arm_dwarf2_prev_register;
2057 break;
2058 case ARM_SP_REGNUM:
2059 reg->how = DWARF2_FRAME_REG_CFA;
2060 break;
2061 }
2062 }
2063
2064 /* Return true if we are in the function's epilogue, i.e. after the
2065 instruction that destroyed the function's stack frame. */
2066
2067 static int
2068 thumb_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
2069 {
2070 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2071 unsigned int insn, insn2;
2072 int found_return = 0, found_stack_adjust = 0;
2073 CORE_ADDR func_start, func_end;
2074 CORE_ADDR scan_pc;
2075 gdb_byte buf[4];
2076
2077 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
2078 return 0;
2079
2080 /* The epilogue is a sequence of instructions along the following lines:
2081
2082 - add stack frame size to SP or FP
2083 - [if frame pointer used] restore SP from FP
2084 - restore registers from SP [may include PC]
2085 - a return-type instruction [if PC wasn't already restored]
2086
2087 In a first pass, we scan forward from the current PC and verify the
2088 instructions we find as compatible with this sequence, ending in a
2089 return instruction.
2090
2091 However, this is not sufficient to distinguish indirect function calls
2092 within a function from indirect tail calls in the epilogue in some cases.
2093 Therefore, if we didn't already find any SP-changing instruction during
2094 forward scan, we add a backward scanning heuristic to ensure we actually
2095 are in the epilogue. */
2096
2097 scan_pc = pc;
2098 while (scan_pc < func_end && !found_return)
2099 {
2100 if (target_read_memory (scan_pc, buf, 2))
2101 break;
2102
2103 scan_pc += 2;
2104 insn = extract_unsigned_integer (buf, 2, byte_order_for_code);
2105
2106 if ((insn & 0xff80) == 0x4700) /* bx <Rm> */
2107 found_return = 1;
2108 else if (insn == 0x46f7) /* mov pc, lr */
2109 found_return = 1;
2110 else if (insn == 0x46bd) /* mov sp, r7 */
2111 found_stack_adjust = 1;
2112 else if ((insn & 0xff00) == 0xb000) /* add sp, imm or sub sp, imm */
2113 found_stack_adjust = 1;
2114 else if ((insn & 0xfe00) == 0xbc00) /* pop <registers> */
2115 {
2116 found_stack_adjust = 1;
2117 if (insn & 0x0100) /* <registers> include PC. */
2118 found_return = 1;
2119 }
2120 else if ((insn & 0xe000) == 0xe000) /* 32-bit Thumb-2 instruction */
2121 {
2122 if (target_read_memory (scan_pc, buf, 2))
2123 break;
2124
2125 scan_pc += 2;
2126 insn2 = extract_unsigned_integer (buf, 2, byte_order_for_code);
2127
2128 if (insn == 0xe8bd) /* ldm.w sp!, <registers> */
2129 {
2130 found_stack_adjust = 1;
2131 if (insn2 & 0x8000) /* <registers> include PC. */
2132 found_return = 1;
2133 }
2134 else if (insn == 0xf85d /* ldr.w <Rt>, [sp], #4 */
2135 && (insn2 & 0x0fff) == 0x0b04)
2136 {
2137 found_stack_adjust = 1;
2138 if ((insn2 & 0xf000) == 0xf000) /* <Rt> is PC. */
2139 found_return = 1;
2140 }
2141 else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, <list> */
2142 && (insn2 & 0x0e00) == 0x0a00)
2143 found_stack_adjust = 1;
2144 else
2145 break;
2146 }
2147 else
2148 break;
2149 }
2150
2151 if (!found_return)
2152 return 0;
2153
2154 /* Since any instruction in the epilogue sequence, with the possible
2155 exception of return itself, updates the stack pointer, we need to
2156 scan backwards for at most one instruction. Try either a 16-bit or
2157 a 32-bit instruction. This is just a heuristic, so we do not worry
2158 too much about false positives.*/
2159
2160 if (!found_stack_adjust)
2161 {
2162 if (pc - 4 < func_start)
2163 return 0;
2164 if (target_read_memory (pc - 4, buf, 4))
2165 return 0;
2166
2167 insn = extract_unsigned_integer (buf, 2, byte_order_for_code);
2168 insn2 = extract_unsigned_integer (buf + 2, 2, byte_order_for_code);
2169
2170 if (insn2 == 0x46bd) /* mov sp, r7 */
2171 found_stack_adjust = 1;
2172 else if ((insn2 & 0xff00) == 0xb000) /* add sp, imm or sub sp, imm */
2173 found_stack_adjust = 1;
2174 else if ((insn2 & 0xff00) == 0xbc00) /* pop <registers> without PC */
2175 found_stack_adjust = 1;
2176 else if (insn == 0xe8bd) /* ldm.w sp!, <registers> */
2177 found_stack_adjust = 1;
2178 else if (insn == 0xf85d /* ldr.w <Rt>, [sp], #4 */
2179 && (insn2 & 0x0fff) == 0x0b04)
2180 found_stack_adjust = 1;
2181 else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, <list> */
2182 && (insn2 & 0x0e00) == 0x0a00)
2183 found_stack_adjust = 1;
2184 }
2185
2186 return found_stack_adjust;
2187 }
2188
2189 /* Return true if we are in the function's epilogue, i.e. after the
2190 instruction that destroyed the function's stack frame. */
2191
2192 static int
2193 arm_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
2194 {
2195 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2196 unsigned int insn;
2197 int found_return, found_stack_adjust;
2198 CORE_ADDR func_start, func_end;
2199
2200 if (arm_pc_is_thumb (gdbarch, pc))
2201 return thumb_in_function_epilogue_p (gdbarch, pc);
2202
2203 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
2204 return 0;
2205
2206 /* We are in the epilogue if the previous instruction was a stack
2207 adjustment and the next instruction is a possible return (bx, mov
2208 pc, or pop). We could have to scan backwards to find the stack
2209 adjustment, or forwards to find the return, but this is a decent
2210 approximation. First scan forwards. */
2211
2212 found_return = 0;
2213 insn = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
2214 if (bits (insn, 28, 31) != INST_NV)
2215 {
2216 if ((insn & 0x0ffffff0) == 0x012fff10)
2217 /* BX. */
2218 found_return = 1;
2219 else if ((insn & 0x0ffffff0) == 0x01a0f000)
2220 /* MOV PC. */
2221 found_return = 1;
2222 else if ((insn & 0x0fff0000) == 0x08bd0000
2223 && (insn & 0x0000c000) != 0)
2224 /* POP (LDMIA), including PC or LR. */
2225 found_return = 1;
2226 }
2227
2228 if (!found_return)
2229 return 0;
2230
2231 /* Scan backwards. This is just a heuristic, so do not worry about
2232 false positives from mode changes. */
2233
2234 if (pc < func_start + 4)
2235 return 0;
2236
2237 found_stack_adjust = 0;
2238 insn = read_memory_unsigned_integer (pc - 4, 4, byte_order_for_code);
2239 if (bits (insn, 28, 31) != INST_NV)
2240 {
2241 if ((insn & 0x0df0f000) == 0x0080d000)
2242 /* ADD SP (register or immediate). */
2243 found_stack_adjust = 1;
2244 else if ((insn & 0x0df0f000) == 0x0040d000)
2245 /* SUB SP (register or immediate). */
2246 found_stack_adjust = 1;
2247 else if ((insn & 0x0ffffff0) == 0x01a0d000)
2248 /* MOV SP. */
2249 found_stack_adjust = 1;
2250 else if ((insn & 0x0fff0000) == 0x08bd0000)
2251 /* POP (LDMIA). */
2252 found_stack_adjust = 1;
2253 }
2254
2255 if (found_stack_adjust)
2256 return 1;
2257
2258 return 0;
2259 }
2260
2261
2262 /* When arguments must be pushed onto the stack, they go on in reverse
2263 order. The code below implements a FILO (stack) to do this. */
2264
2265 struct stack_item
2266 {
2267 int len;
2268 struct stack_item *prev;
2269 void *data;
2270 };
2271
2272 static struct stack_item *
2273 push_stack_item (struct stack_item *prev, const void *contents, int len)
2274 {
2275 struct stack_item *si;
2276 si = xmalloc (sizeof (struct stack_item));
2277 si->data = xmalloc (len);
2278 si->len = len;
2279 si->prev = prev;
2280 memcpy (si->data, contents, len);
2281 return si;
2282 }
2283
2284 static struct stack_item *
2285 pop_stack_item (struct stack_item *si)
2286 {
2287 struct stack_item *dead = si;
2288 si = si->prev;
2289 xfree (dead->data);
2290 xfree (dead);
2291 return si;
2292 }
2293
2294
2295 /* Return the alignment (in bytes) of the given type. */
2296
2297 static int
2298 arm_type_align (struct type *t)
2299 {
2300 int n;
2301 int align;
2302 int falign;
2303
2304 t = check_typedef (t);
2305 switch (TYPE_CODE (t))
2306 {
2307 default:
2308 /* Should never happen. */
2309 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
2310 return 4;
2311
2312 case TYPE_CODE_PTR:
2313 case TYPE_CODE_ENUM:
2314 case TYPE_CODE_INT:
2315 case TYPE_CODE_FLT:
2316 case TYPE_CODE_SET:
2317 case TYPE_CODE_RANGE:
2318 case TYPE_CODE_BITSTRING:
2319 case TYPE_CODE_REF:
2320 case TYPE_CODE_CHAR:
2321 case TYPE_CODE_BOOL:
2322 return TYPE_LENGTH (t);
2323
2324 case TYPE_CODE_ARRAY:
2325 case TYPE_CODE_COMPLEX:
2326 /* TODO: What about vector types? */
2327 return arm_type_align (TYPE_TARGET_TYPE (t));
2328
2329 case TYPE_CODE_STRUCT:
2330 case TYPE_CODE_UNION:
2331 align = 1;
2332 for (n = 0; n < TYPE_NFIELDS (t); n++)
2333 {
2334 falign = arm_type_align (TYPE_FIELD_TYPE (t, n));
2335 if (falign > align)
2336 align = falign;
2337 }
2338 return align;
2339 }
2340 }
2341
2342 /* Possible base types for a candidate for passing and returning in
2343 VFP registers. */
2344
2345 enum arm_vfp_cprc_base_type
2346 {
2347 VFP_CPRC_UNKNOWN,
2348 VFP_CPRC_SINGLE,
2349 VFP_CPRC_DOUBLE,
2350 VFP_CPRC_VEC64,
2351 VFP_CPRC_VEC128
2352 };
2353
2354 /* The length of one element of base type B. */
2355
2356 static unsigned
2357 arm_vfp_cprc_unit_length (enum arm_vfp_cprc_base_type b)
2358 {
2359 switch (b)
2360 {
2361 case VFP_CPRC_SINGLE:
2362 return 4;
2363 case VFP_CPRC_DOUBLE:
2364 return 8;
2365 case VFP_CPRC_VEC64:
2366 return 8;
2367 case VFP_CPRC_VEC128:
2368 return 16;
2369 default:
2370 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
2371 (int) b);
2372 }
2373 }
2374
2375 /* The character ('s', 'd' or 'q') for the type of VFP register used
2376 for passing base type B. */
2377
2378 static int
2379 arm_vfp_cprc_reg_char (enum arm_vfp_cprc_base_type b)
2380 {
2381 switch (b)
2382 {
2383 case VFP_CPRC_SINGLE:
2384 return 's';
2385 case VFP_CPRC_DOUBLE:
2386 return 'd';
2387 case VFP_CPRC_VEC64:
2388 return 'd';
2389 case VFP_CPRC_VEC128:
2390 return 'q';
2391 default:
2392 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
2393 (int) b);
2394 }
2395 }
2396
2397 /* Determine whether T may be part of a candidate for passing and
2398 returning in VFP registers, ignoring the limit on the total number
2399 of components. If *BASE_TYPE is VFP_CPRC_UNKNOWN, set it to the
2400 classification of the first valid component found; if it is not
2401 VFP_CPRC_UNKNOWN, all components must have the same classification
2402 as *BASE_TYPE. If it is found that T contains a type not permitted
2403 for passing and returning in VFP registers, a type differently
2404 classified from *BASE_TYPE, or two types differently classified
2405 from each other, return -1, otherwise return the total number of
2406 base-type elements found (possibly 0 in an empty structure or
2407 array). Vectors and complex types are not currently supported,
2408 matching the generic AAPCS support. */
2409
2410 static int
2411 arm_vfp_cprc_sub_candidate (struct type *t,
2412 enum arm_vfp_cprc_base_type *base_type)
2413 {
2414 t = check_typedef (t);
2415 switch (TYPE_CODE (t))
2416 {
2417 case TYPE_CODE_FLT:
2418 switch (TYPE_LENGTH (t))
2419 {
2420 case 4:
2421 if (*base_type == VFP_CPRC_UNKNOWN)
2422 *base_type = VFP_CPRC_SINGLE;
2423 else if (*base_type != VFP_CPRC_SINGLE)
2424 return -1;
2425 return 1;
2426
2427 case 8:
2428 if (*base_type == VFP_CPRC_UNKNOWN)
2429 *base_type = VFP_CPRC_DOUBLE;
2430 else if (*base_type != VFP_CPRC_DOUBLE)
2431 return -1;
2432 return 1;
2433
2434 default:
2435 return -1;
2436 }
2437 break;
2438
2439 case TYPE_CODE_ARRAY:
2440 {
2441 int count;
2442 unsigned unitlen;
2443 count = arm_vfp_cprc_sub_candidate (TYPE_TARGET_TYPE (t), base_type);
2444 if (count == -1)
2445 return -1;
2446 if (TYPE_LENGTH (t) == 0)
2447 {
2448 gdb_assert (count == 0);
2449 return 0;
2450 }
2451 else if (count == 0)
2452 return -1;
2453 unitlen = arm_vfp_cprc_unit_length (*base_type);
2454 gdb_assert ((TYPE_LENGTH (t) % unitlen) == 0);
2455 return TYPE_LENGTH (t) / unitlen;
2456 }
2457 break;
2458
2459 case TYPE_CODE_STRUCT:
2460 {
2461 int count = 0;
2462 unsigned unitlen;
2463 int i;
2464 for (i = 0; i < TYPE_NFIELDS (t); i++)
2465 {
2466 int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
2467 base_type);
2468 if (sub_count == -1)
2469 return -1;
2470 count += sub_count;
2471 }
2472 if (TYPE_LENGTH (t) == 0)
2473 {
2474 gdb_assert (count == 0);
2475 return 0;
2476 }
2477 else if (count == 0)
2478 return -1;
2479 unitlen = arm_vfp_cprc_unit_length (*base_type);
2480 if (TYPE_LENGTH (t) != unitlen * count)
2481 return -1;
2482 return count;
2483 }
2484
2485 case TYPE_CODE_UNION:
2486 {
2487 int count = 0;
2488 unsigned unitlen;
2489 int i;
2490 for (i = 0; i < TYPE_NFIELDS (t); i++)
2491 {
2492 int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
2493 base_type);
2494 if (sub_count == -1)
2495 return -1;
2496 count = (count > sub_count ? count : sub_count);
2497 }
2498 if (TYPE_LENGTH (t) == 0)
2499 {
2500 gdb_assert (count == 0);
2501 return 0;
2502 }
2503 else if (count == 0)
2504 return -1;
2505 unitlen = arm_vfp_cprc_unit_length (*base_type);
2506 if (TYPE_LENGTH (t) != unitlen * count)
2507 return -1;
2508 return count;
2509 }
2510
2511 default:
2512 break;
2513 }
2514
2515 return -1;
2516 }
2517
2518 /* Determine whether T is a VFP co-processor register candidate (CPRC)
2519 if passed to or returned from a non-variadic function with the VFP
2520 ABI in effect. Return 1 if it is, 0 otherwise. If it is, set
2521 *BASE_TYPE to the base type for T and *COUNT to the number of
2522 elements of that base type before returning. */
2523
2524 static int
2525 arm_vfp_call_candidate (struct type *t, enum arm_vfp_cprc_base_type *base_type,
2526 int *count)
2527 {
2528 enum arm_vfp_cprc_base_type b = VFP_CPRC_UNKNOWN;
2529 int c = arm_vfp_cprc_sub_candidate (t, &b);
2530 if (c <= 0 || c > 4)
2531 return 0;
2532 *base_type = b;
2533 *count = c;
2534 return 1;
2535 }
2536
2537 /* Return 1 if the VFP ABI should be used for passing arguments to and
2538 returning values from a function of type FUNC_TYPE, 0
2539 otherwise. */
2540
2541 static int
2542 arm_vfp_abi_for_function (struct gdbarch *gdbarch, struct type *func_type)
2543 {
2544 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2545 /* Variadic functions always use the base ABI. Assume that functions
2546 without debug info are not variadic. */
2547 if (func_type && TYPE_VARARGS (check_typedef (func_type)))
2548 return 0;
2549 /* The VFP ABI is only supported as a variant of AAPCS. */
2550 if (tdep->arm_abi != ARM_ABI_AAPCS)
2551 return 0;
2552 return gdbarch_tdep (gdbarch)->fp_model == ARM_FLOAT_VFP;
2553 }
2554
2555 /* We currently only support passing parameters in integer registers, which
2556 conforms with GCC's default model, and VFP argument passing following
2557 the VFP variant of AAPCS. Several other variants exist and
2558 we should probably support some of them based on the selected ABI. */
2559
2560 static CORE_ADDR
2561 arm_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
2562 struct regcache *regcache, CORE_ADDR bp_addr, int nargs,
2563 struct value **args, CORE_ADDR sp, int struct_return,
2564 CORE_ADDR struct_addr)
2565 {
2566 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2567 int argnum;
2568 int argreg;
2569 int nstack;
2570 struct stack_item *si = NULL;
2571 int use_vfp_abi;
2572 struct type *ftype;
2573 unsigned vfp_regs_free = (1 << 16) - 1;
2574
2575 /* Determine the type of this function and whether the VFP ABI
2576 applies. */
2577 ftype = check_typedef (value_type (function));
2578 if (TYPE_CODE (ftype) == TYPE_CODE_PTR)
2579 ftype = check_typedef (TYPE_TARGET_TYPE (ftype));
2580 use_vfp_abi = arm_vfp_abi_for_function (gdbarch, ftype);
2581
2582 /* Set the return address. For the ARM, the return breakpoint is
2583 always at BP_ADDR. */
2584 if (arm_pc_is_thumb (gdbarch, bp_addr))
2585 bp_addr |= 1;
2586 regcache_cooked_write_unsigned (regcache, ARM_LR_REGNUM, bp_addr);
2587
2588 /* Walk through the list of args and determine how large a temporary
2589 stack is required. Need to take care here as structs may be
2590 passed on the stack, and we have to to push them. */
2591 nstack = 0;
2592
2593 argreg = ARM_A1_REGNUM;
2594 nstack = 0;
2595
2596 /* The struct_return pointer occupies the first parameter
2597 passing register. */
2598 if (struct_return)
2599 {
2600 if (arm_debug)
2601 fprintf_unfiltered (gdb_stdlog, "struct return in %s = %s\n",
2602 gdbarch_register_name (gdbarch, argreg),
2603 paddress (gdbarch, struct_addr));
2604 regcache_cooked_write_unsigned (regcache, argreg, struct_addr);
2605 argreg++;
2606 }
2607
2608 for (argnum = 0; argnum < nargs; argnum++)
2609 {
2610 int len;
2611 struct type *arg_type;
2612 struct type *target_type;
2613 enum type_code typecode;
2614 const bfd_byte *val;
2615 int align;
2616 enum arm_vfp_cprc_base_type vfp_base_type;
2617 int vfp_base_count;
2618 int may_use_core_reg = 1;
2619
2620 arg_type = check_typedef (value_type (args[argnum]));
2621 len = TYPE_LENGTH (arg_type);
2622 target_type = TYPE_TARGET_TYPE (arg_type);
2623 typecode = TYPE_CODE (arg_type);
2624 val = value_contents (args[argnum]);
2625
2626 align = arm_type_align (arg_type);
2627 /* Round alignment up to a whole number of words. */
2628 align = (align + INT_REGISTER_SIZE - 1) & ~(INT_REGISTER_SIZE - 1);
2629 /* Different ABIs have different maximum alignments. */
2630 if (gdbarch_tdep (gdbarch)->arm_abi == ARM_ABI_APCS)
2631 {
2632 /* The APCS ABI only requires word alignment. */
2633 align = INT_REGISTER_SIZE;
2634 }
2635 else
2636 {
2637 /* The AAPCS requires at most doubleword alignment. */
2638 if (align > INT_REGISTER_SIZE * 2)
2639 align = INT_REGISTER_SIZE * 2;
2640 }
2641
2642 if (use_vfp_abi
2643 && arm_vfp_call_candidate (arg_type, &vfp_base_type,
2644 &vfp_base_count))
2645 {
2646 int regno;
2647 int unit_length;
2648 int shift;
2649 unsigned mask;
2650
2651 /* Because this is a CPRC it cannot go in a core register or
2652 cause a core register to be skipped for alignment.
2653 Either it goes in VFP registers and the rest of this loop
2654 iteration is skipped for this argument, or it goes on the
2655 stack (and the stack alignment code is correct for this
2656 case). */
2657 may_use_core_reg = 0;
2658
2659 unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
2660 shift = unit_length / 4;
2661 mask = (1 << (shift * vfp_base_count)) - 1;
2662 for (regno = 0; regno < 16; regno += shift)
2663 if (((vfp_regs_free >> regno) & mask) == mask)
2664 break;
2665
2666 if (regno < 16)
2667 {
2668 int reg_char;
2669 int reg_scaled;
2670 int i;
2671
2672 vfp_regs_free &= ~(mask << regno);
2673 reg_scaled = regno / shift;
2674 reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
2675 for (i = 0; i < vfp_base_count; i++)
2676 {
2677 char name_buf[4];
2678 int regnum;
2679 if (reg_char == 'q')
2680 arm_neon_quad_write (gdbarch, regcache, reg_scaled + i,
2681 val + i * unit_length);
2682 else
2683 {
2684 sprintf (name_buf, "%c%d", reg_char, reg_scaled + i);
2685 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
2686 strlen (name_buf));
2687 regcache_cooked_write (regcache, regnum,
2688 val + i * unit_length);
2689 }
2690 }
2691 continue;
2692 }
2693 else
2694 {
2695 /* This CPRC could not go in VFP registers, so all VFP
2696 registers are now marked as used. */
2697 vfp_regs_free = 0;
2698 }
2699 }
2700
2701 /* Push stack padding for dowubleword alignment. */
2702 if (nstack & (align - 1))
2703 {
2704 si = push_stack_item (si, val, INT_REGISTER_SIZE);
2705 nstack += INT_REGISTER_SIZE;
2706 }
2707
2708 /* Doubleword aligned quantities must go in even register pairs. */
2709 if (may_use_core_reg
2710 && argreg <= ARM_LAST_ARG_REGNUM
2711 && align > INT_REGISTER_SIZE
2712 && argreg & 1)
2713 argreg++;
2714
2715 /* If the argument is a pointer to a function, and it is a
2716 Thumb function, create a LOCAL copy of the value and set
2717 the THUMB bit in it. */
2718 if (TYPE_CODE_PTR == typecode
2719 && target_type != NULL
2720 && TYPE_CODE_FUNC == TYPE_CODE (check_typedef (target_type)))
2721 {
2722 CORE_ADDR regval = extract_unsigned_integer (val, len, byte_order);
2723 if (arm_pc_is_thumb (gdbarch, regval))
2724 {
2725 bfd_byte *copy = alloca (len);
2726 store_unsigned_integer (copy, len, byte_order,
2727 MAKE_THUMB_ADDR (regval));
2728 val = copy;
2729 }
2730 }
2731
2732 /* Copy the argument to general registers or the stack in
2733 register-sized pieces. Large arguments are split between
2734 registers and stack. */
2735 while (len > 0)
2736 {
2737 int partial_len = len < INT_REGISTER_SIZE ? len : INT_REGISTER_SIZE;
2738
2739 if (may_use_core_reg && argreg <= ARM_LAST_ARG_REGNUM)
2740 {
2741 /* The argument is being passed in a general purpose
2742 register. */
2743 CORE_ADDR regval
2744 = extract_unsigned_integer (val, partial_len, byte_order);
2745 if (byte_order == BFD_ENDIAN_BIG)
2746 regval <<= (INT_REGISTER_SIZE - partial_len) * 8;
2747 if (arm_debug)
2748 fprintf_unfiltered (gdb_stdlog, "arg %d in %s = 0x%s\n",
2749 argnum,
2750 gdbarch_register_name
2751 (gdbarch, argreg),
2752 phex (regval, INT_REGISTER_SIZE));
2753 regcache_cooked_write_unsigned (regcache, argreg, regval);
2754 argreg++;
2755 }
2756 else
2757 {
2758 /* Push the arguments onto the stack. */
2759 if (arm_debug)
2760 fprintf_unfiltered (gdb_stdlog, "arg %d @ sp + %d\n",
2761 argnum, nstack);
2762 si = push_stack_item (si, val, INT_REGISTER_SIZE);
2763 nstack += INT_REGISTER_SIZE;
2764 }
2765
2766 len -= partial_len;
2767 val += partial_len;
2768 }
2769 }
2770 /* If we have an odd number of words to push, then decrement the stack
2771 by one word now, so first stack argument will be dword aligned. */
2772 if (nstack & 4)
2773 sp -= 4;
2774
2775 while (si)
2776 {
2777 sp -= si->len;
2778 write_memory (sp, si->data, si->len);
2779 si = pop_stack_item (si);
2780 }
2781
2782 /* Finally, update teh SP register. */
2783 regcache_cooked_write_unsigned (regcache, ARM_SP_REGNUM, sp);
2784
2785 return sp;
2786 }
2787
2788
2789 /* Always align the frame to an 8-byte boundary. This is required on
2790 some platforms and harmless on the rest. */
2791
2792 static CORE_ADDR
2793 arm_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
2794 {
2795 /* Align the stack to eight bytes. */
2796 return sp & ~ (CORE_ADDR) 7;
2797 }
2798
2799 static void
2800 print_fpu_flags (int flags)
2801 {
2802 if (flags & (1 << 0))
2803 fputs ("IVO ", stdout);
2804 if (flags & (1 << 1))
2805 fputs ("DVZ ", stdout);
2806 if (flags & (1 << 2))
2807 fputs ("OFL ", stdout);
2808 if (flags & (1 << 3))
2809 fputs ("UFL ", stdout);
2810 if (flags & (1 << 4))
2811 fputs ("INX ", stdout);
2812 putchar ('\n');
2813 }
2814
2815 /* Print interesting information about the floating point processor
2816 (if present) or emulator. */
2817 static void
2818 arm_print_float_info (struct gdbarch *gdbarch, struct ui_file *file,
2819 struct frame_info *frame, const char *args)
2820 {
2821 unsigned long status = get_frame_register_unsigned (frame, ARM_FPS_REGNUM);
2822 int type;
2823
2824 type = (status >> 24) & 127;
2825 if (status & (1 << 31))
2826 printf (_("Hardware FPU type %d\n"), type);
2827 else
2828 printf (_("Software FPU type %d\n"), type);
2829 /* i18n: [floating point unit] mask */
2830 fputs (_("mask: "), stdout);
2831 print_fpu_flags (status >> 16);
2832 /* i18n: [floating point unit] flags */
2833 fputs (_("flags: "), stdout);
2834 print_fpu_flags (status);
2835 }
2836
2837 /* Construct the ARM extended floating point type. */
2838 static struct type *
2839 arm_ext_type (struct gdbarch *gdbarch)
2840 {
2841 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2842
2843 if (!tdep->arm_ext_type)
2844 tdep->arm_ext_type
2845 = arch_float_type (gdbarch, -1, "builtin_type_arm_ext",
2846 floatformats_arm_ext);
2847
2848 return tdep->arm_ext_type;
2849 }
2850
2851 static struct type *
2852 arm_neon_double_type (struct gdbarch *gdbarch)
2853 {
2854 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2855
2856 if (tdep->neon_double_type == NULL)
2857 {
2858 struct type *t, *elem;
2859
2860 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_d",
2861 TYPE_CODE_UNION);
2862 elem = builtin_type (gdbarch)->builtin_uint8;
2863 append_composite_type_field (t, "u8", init_vector_type (elem, 8));
2864 elem = builtin_type (gdbarch)->builtin_uint16;
2865 append_composite_type_field (t, "u16", init_vector_type (elem, 4));
2866 elem = builtin_type (gdbarch)->builtin_uint32;
2867 append_composite_type_field (t, "u32", init_vector_type (elem, 2));
2868 elem = builtin_type (gdbarch)->builtin_uint64;
2869 append_composite_type_field (t, "u64", elem);
2870 elem = builtin_type (gdbarch)->builtin_float;
2871 append_composite_type_field (t, "f32", init_vector_type (elem, 2));
2872 elem = builtin_type (gdbarch)->builtin_double;
2873 append_composite_type_field (t, "f64", elem);
2874
2875 TYPE_VECTOR (t) = 1;
2876 TYPE_NAME (t) = "neon_d";
2877 tdep->neon_double_type = t;
2878 }
2879
2880 return tdep->neon_double_type;
2881 }
2882
2883 /* FIXME: The vector types are not correctly ordered on big-endian
2884 targets. Just as s0 is the low bits of d0, d0[0] is also the low
2885 bits of d0 - regardless of what unit size is being held in d0. So
2886 the offset of the first uint8 in d0 is 7, but the offset of the
2887 first float is 4. This code works as-is for little-endian
2888 targets. */
2889
2890 static struct type *
2891 arm_neon_quad_type (struct gdbarch *gdbarch)
2892 {
2893 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2894
2895 if (tdep->neon_quad_type == NULL)
2896 {
2897 struct type *t, *elem;
2898
2899 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_q",
2900 TYPE_CODE_UNION);
2901 elem = builtin_type (gdbarch)->builtin_uint8;
2902 append_composite_type_field (t, "u8", init_vector_type (elem, 16));
2903 elem = builtin_type (gdbarch)->builtin_uint16;
2904 append_composite_type_field (t, "u16", init_vector_type (elem, 8));
2905 elem = builtin_type (gdbarch)->builtin_uint32;
2906 append_composite_type_field (t, "u32", init_vector_type (elem, 4));
2907 elem = builtin_type (gdbarch)->builtin_uint64;
2908 append_composite_type_field (t, "u64", init_vector_type (elem, 2));
2909 elem = builtin_type (gdbarch)->builtin_float;
2910 append_composite_type_field (t, "f32", init_vector_type (elem, 4));
2911 elem = builtin_type (gdbarch)->builtin_double;
2912 append_composite_type_field (t, "f64", init_vector_type (elem, 2));
2913
2914 TYPE_VECTOR (t) = 1;
2915 TYPE_NAME (t) = "neon_q";
2916 tdep->neon_quad_type = t;
2917 }
2918
2919 return tdep->neon_quad_type;
2920 }
2921
2922 /* Return the GDB type object for the "standard" data type of data in
2923 register N. */
2924
2925 static struct type *
2926 arm_register_type (struct gdbarch *gdbarch, int regnum)
2927 {
2928 int num_regs = gdbarch_num_regs (gdbarch);
2929
2930 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
2931 && regnum >= num_regs && regnum < num_regs + 32)
2932 return builtin_type (gdbarch)->builtin_float;
2933
2934 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
2935 && regnum >= num_regs + 32 && regnum < num_regs + 32 + 16)
2936 return arm_neon_quad_type (gdbarch);
2937
2938 /* If the target description has register information, we are only
2939 in this function so that we can override the types of
2940 double-precision registers for NEON. */
2941 if (tdesc_has_registers (gdbarch_target_desc (gdbarch)))
2942 {
2943 struct type *t = tdesc_register_type (gdbarch, regnum);
2944
2945 if (regnum >= ARM_D0_REGNUM && regnum < ARM_D0_REGNUM + 32
2946 && TYPE_CODE (t) == TYPE_CODE_FLT
2947 && gdbarch_tdep (gdbarch)->have_neon)
2948 return arm_neon_double_type (gdbarch);
2949 else
2950 return t;
2951 }
2952
2953 if (regnum >= ARM_F0_REGNUM && regnum < ARM_F0_REGNUM + NUM_FREGS)
2954 {
2955 if (!gdbarch_tdep (gdbarch)->have_fpa_registers)
2956 return builtin_type (gdbarch)->builtin_void;
2957
2958 return arm_ext_type (gdbarch);
2959 }
2960 else if (regnum == ARM_SP_REGNUM)
2961 return builtin_type (gdbarch)->builtin_data_ptr;
2962 else if (regnum == ARM_PC_REGNUM)
2963 return builtin_type (gdbarch)->builtin_func_ptr;
2964 else if (regnum >= ARRAY_SIZE (arm_register_names))
2965 /* These registers are only supported on targets which supply
2966 an XML description. */
2967 return builtin_type (gdbarch)->builtin_int0;
2968 else
2969 return builtin_type (gdbarch)->builtin_uint32;
2970 }
2971
2972 /* Map a DWARF register REGNUM onto the appropriate GDB register
2973 number. */
2974
2975 static int
2976 arm_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
2977 {
2978 /* Core integer regs. */
2979 if (reg >= 0 && reg <= 15)
2980 return reg;
2981
2982 /* Legacy FPA encoding. These were once used in a way which
2983 overlapped with VFP register numbering, so their use is
2984 discouraged, but GDB doesn't support the ARM toolchain
2985 which used them for VFP. */
2986 if (reg >= 16 && reg <= 23)
2987 return ARM_F0_REGNUM + reg - 16;
2988
2989 /* New assignments for the FPA registers. */
2990 if (reg >= 96 && reg <= 103)
2991 return ARM_F0_REGNUM + reg - 96;
2992
2993 /* WMMX register assignments. */
2994 if (reg >= 104 && reg <= 111)
2995 return ARM_WCGR0_REGNUM + reg - 104;
2996
2997 if (reg >= 112 && reg <= 127)
2998 return ARM_WR0_REGNUM + reg - 112;
2999
3000 if (reg >= 192 && reg <= 199)
3001 return ARM_WC0_REGNUM + reg - 192;
3002
3003 /* VFP v2 registers. A double precision value is actually
3004 in d1 rather than s2, but the ABI only defines numbering
3005 for the single precision registers. This will "just work"
3006 in GDB for little endian targets (we'll read eight bytes,
3007 starting in s0 and then progressing to s1), but will be
3008 reversed on big endian targets with VFP. This won't
3009 be a problem for the new Neon quad registers; you're supposed
3010 to use DW_OP_piece for those. */
3011 if (reg >= 64 && reg <= 95)
3012 {
3013 char name_buf[4];
3014
3015 sprintf (name_buf, "s%d", reg - 64);
3016 return user_reg_map_name_to_regnum (gdbarch, name_buf,
3017 strlen (name_buf));
3018 }
3019
3020 /* VFP v3 / Neon registers. This range is also used for VFP v2
3021 registers, except that it now describes d0 instead of s0. */
3022 if (reg >= 256 && reg <= 287)
3023 {
3024 char name_buf[4];
3025
3026 sprintf (name_buf, "d%d", reg - 256);
3027 return user_reg_map_name_to_regnum (gdbarch, name_buf,
3028 strlen (name_buf));
3029 }
3030
3031 return -1;
3032 }
3033
3034 /* Map GDB internal REGNUM onto the Arm simulator register numbers. */
3035 static int
3036 arm_register_sim_regno (struct gdbarch *gdbarch, int regnum)
3037 {
3038 int reg = regnum;
3039 gdb_assert (reg >= 0 && reg < gdbarch_num_regs (gdbarch));
3040
3041 if (regnum >= ARM_WR0_REGNUM && regnum <= ARM_WR15_REGNUM)
3042 return regnum - ARM_WR0_REGNUM + SIM_ARM_IWMMXT_COP0R0_REGNUM;
3043
3044 if (regnum >= ARM_WC0_REGNUM && regnum <= ARM_WC7_REGNUM)
3045 return regnum - ARM_WC0_REGNUM + SIM_ARM_IWMMXT_COP1R0_REGNUM;
3046
3047 if (regnum >= ARM_WCGR0_REGNUM && regnum <= ARM_WCGR7_REGNUM)
3048 return regnum - ARM_WCGR0_REGNUM + SIM_ARM_IWMMXT_COP1R8_REGNUM;
3049
3050 if (reg < NUM_GREGS)
3051 return SIM_ARM_R0_REGNUM + reg;
3052 reg -= NUM_GREGS;
3053
3054 if (reg < NUM_FREGS)
3055 return SIM_ARM_FP0_REGNUM + reg;
3056 reg -= NUM_FREGS;
3057
3058 if (reg < NUM_SREGS)
3059 return SIM_ARM_FPS_REGNUM + reg;
3060 reg -= NUM_SREGS;
3061
3062 internal_error (__FILE__, __LINE__, _("Bad REGNUM %d"), regnum);
3063 }
3064
3065 /* NOTE: cagney/2001-08-20: Both convert_from_extended() and
3066 convert_to_extended() use floatformat_arm_ext_littlebyte_bigword.
3067 It is thought that this is is the floating-point register format on
3068 little-endian systems. */
3069
3070 static void
3071 convert_from_extended (const struct floatformat *fmt, const void *ptr,
3072 void *dbl, int endianess)
3073 {
3074 DOUBLEST d;
3075
3076 if (endianess == BFD_ENDIAN_BIG)
3077 floatformat_to_doublest (&floatformat_arm_ext_big, ptr, &d);
3078 else
3079 floatformat_to_doublest (&floatformat_arm_ext_littlebyte_bigword,
3080 ptr, &d);
3081 floatformat_from_doublest (fmt, &d, dbl);
3082 }
3083
3084 static void
3085 convert_to_extended (const struct floatformat *fmt, void *dbl, const void *ptr,
3086 int endianess)
3087 {
3088 DOUBLEST d;
3089
3090 floatformat_to_doublest (fmt, ptr, &d);
3091 if (endianess == BFD_ENDIAN_BIG)
3092 floatformat_from_doublest (&floatformat_arm_ext_big, &d, dbl);
3093 else
3094 floatformat_from_doublest (&floatformat_arm_ext_littlebyte_bigword,
3095 &d, dbl);
3096 }
3097
3098 static int
3099 condition_true (unsigned long cond, unsigned long status_reg)
3100 {
3101 if (cond == INST_AL || cond == INST_NV)
3102 return 1;
3103
3104 switch (cond)
3105 {
3106 case INST_EQ:
3107 return ((status_reg & FLAG_Z) != 0);
3108 case INST_NE:
3109 return ((status_reg & FLAG_Z) == 0);
3110 case INST_CS:
3111 return ((status_reg & FLAG_C) != 0);
3112 case INST_CC:
3113 return ((status_reg & FLAG_C) == 0);
3114 case INST_MI:
3115 return ((status_reg & FLAG_N) != 0);
3116 case INST_PL:
3117 return ((status_reg & FLAG_N) == 0);
3118 case INST_VS:
3119 return ((status_reg & FLAG_V) != 0);
3120 case INST_VC:
3121 return ((status_reg & FLAG_V) == 0);
3122 case INST_HI:
3123 return ((status_reg & (FLAG_C | FLAG_Z)) == FLAG_C);
3124 case INST_LS:
3125 return ((status_reg & (FLAG_C | FLAG_Z)) != FLAG_C);
3126 case INST_GE:
3127 return (((status_reg & FLAG_N) == 0) == ((status_reg & FLAG_V) == 0));
3128 case INST_LT:
3129 return (((status_reg & FLAG_N) == 0) != ((status_reg & FLAG_V) == 0));
3130 case INST_GT:
3131 return (((status_reg & FLAG_Z) == 0)
3132 && (((status_reg & FLAG_N) == 0)
3133 == ((status_reg & FLAG_V) == 0)));
3134 case INST_LE:
3135 return (((status_reg & FLAG_Z) != 0)
3136 || (((status_reg & FLAG_N) == 0)
3137 != ((status_reg & FLAG_V) == 0)));
3138 }
3139 return 1;
3140 }
3141
3142 static unsigned long
3143 shifted_reg_val (struct frame_info *frame, unsigned long inst, int carry,
3144 unsigned long pc_val, unsigned long status_reg)
3145 {
3146 unsigned long res, shift;
3147 int rm = bits (inst, 0, 3);
3148 unsigned long shifttype = bits (inst, 5, 6);
3149
3150 if (bit (inst, 4))
3151 {
3152 int rs = bits (inst, 8, 11);
3153 shift = (rs == 15 ? pc_val + 8
3154 : get_frame_register_unsigned (frame, rs)) & 0xFF;
3155 }
3156 else
3157 shift = bits (inst, 7, 11);
3158
3159 res = (rm == 15
3160 ? (pc_val + (bit (inst, 4) ? 12 : 8))
3161 : get_frame_register_unsigned (frame, rm));
3162
3163 switch (shifttype)
3164 {
3165 case 0: /* LSL */
3166 res = shift >= 32 ? 0 : res << shift;
3167 break;
3168
3169 case 1: /* LSR */
3170 res = shift >= 32 ? 0 : res >> shift;
3171 break;
3172
3173 case 2: /* ASR */
3174 if (shift >= 32)
3175 shift = 31;
3176 res = ((res & 0x80000000L)
3177 ? ~((~res) >> shift) : res >> shift);
3178 break;
3179
3180 case 3: /* ROR/RRX */
3181 shift &= 31;
3182 if (shift == 0)
3183 res = (res >> 1) | (carry ? 0x80000000L : 0);
3184 else
3185 res = (res >> shift) | (res << (32 - shift));
3186 break;
3187 }
3188
3189 return res & 0xffffffff;
3190 }
3191
3192 /* Return number of 1-bits in VAL. */
3193
3194 static int
3195 bitcount (unsigned long val)
3196 {
3197 int nbits;
3198 for (nbits = 0; val != 0; nbits++)
3199 val &= val - 1; /* delete rightmost 1-bit in val */
3200 return nbits;
3201 }
3202
3203 /* Return the size in bytes of the complete Thumb instruction whose
3204 first halfword is INST1. */
3205
3206 static int
3207 thumb_insn_size (unsigned short inst1)
3208 {
3209 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
3210 return 4;
3211 else
3212 return 2;
3213 }
3214
3215 static int
3216 thumb_advance_itstate (unsigned int itstate)
3217 {
3218 /* Preserve IT[7:5], the first three bits of the condition. Shift
3219 the upcoming condition flags left by one bit. */
3220 itstate = (itstate & 0xe0) | ((itstate << 1) & 0x1f);
3221
3222 /* If we have finished the IT block, clear the state. */
3223 if ((itstate & 0x0f) == 0)
3224 itstate = 0;
3225
3226 return itstate;
3227 }
3228
3229 /* Find the next PC after the current instruction executes. In some
3230 cases we can not statically determine the answer (see the IT state
3231 handling in this function); in that case, a breakpoint may be
3232 inserted in addition to the returned PC, which will be used to set
3233 another breakpoint by our caller. */
3234
3235 static CORE_ADDR
3236 thumb_get_next_pc_raw (struct frame_info *frame, CORE_ADDR pc, int insert_bkpt)
3237 {
3238 struct gdbarch *gdbarch = get_frame_arch (frame);
3239 struct address_space *aspace = get_frame_address_space (frame);
3240 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3241 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3242 unsigned long pc_val = ((unsigned long) pc) + 4; /* PC after prefetch */
3243 unsigned short inst1;
3244 CORE_ADDR nextpc = pc + 2; /* default is next instruction */
3245 unsigned long offset;
3246 ULONGEST status, itstate;
3247
3248 nextpc = MAKE_THUMB_ADDR (nextpc);
3249 pc_val = MAKE_THUMB_ADDR (pc_val);
3250
3251 inst1 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
3252
3253 /* Thumb-2 conditional execution support. There are eight bits in
3254 the CPSR which describe conditional execution state. Once
3255 reconstructed (they're in a funny order), the low five bits
3256 describe the low bit of the condition for each instruction and
3257 how many instructions remain. The high three bits describe the
3258 base condition. One of the low four bits will be set if an IT
3259 block is active. These bits read as zero on earlier
3260 processors. */
3261 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
3262 itstate = ((status >> 8) & 0xfc) | ((status >> 25) & 0x3);
3263
3264 /* If-Then handling. On GNU/Linux, where this routine is used, we
3265 use an undefined instruction as a breakpoint. Unlike BKPT, IT
3266 can disable execution of the undefined instruction. So we might
3267 miss the breakpoint if we set it on a skipped conditional
3268 instruction. Because conditional instructions can change the
3269 flags, affecting the execution of further instructions, we may
3270 need to set two breakpoints. */
3271
3272 if (gdbarch_tdep (gdbarch)->thumb2_breakpoint != NULL)
3273 {
3274 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
3275 {
3276 /* An IT instruction. Because this instruction does not
3277 modify the flags, we can accurately predict the next
3278 executed instruction. */
3279 itstate = inst1 & 0x00ff;
3280 pc += thumb_insn_size (inst1);
3281
3282 while (itstate != 0 && ! condition_true (itstate >> 4, status))
3283 {
3284 inst1 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
3285 pc += thumb_insn_size (inst1);
3286 itstate = thumb_advance_itstate (itstate);
3287 }
3288
3289 return MAKE_THUMB_ADDR (pc);
3290 }
3291 else if (itstate != 0)
3292 {
3293 /* We are in a conditional block. Check the condition. */
3294 if (! condition_true (itstate >> 4, status))
3295 {
3296 /* Advance to the next executed instruction. */
3297 pc += thumb_insn_size (inst1);
3298 itstate = thumb_advance_itstate (itstate);
3299
3300 while (itstate != 0 && ! condition_true (itstate >> 4, status))
3301 {
3302 inst1 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
3303 pc += thumb_insn_size (inst1);
3304 itstate = thumb_advance_itstate (itstate);
3305 }
3306
3307 return MAKE_THUMB_ADDR (pc);
3308 }
3309 else if ((itstate & 0x0f) == 0x08)
3310 {
3311 /* This is the last instruction of the conditional
3312 block, and it is executed. We can handle it normally
3313 because the following instruction is not conditional,
3314 and we must handle it normally because it is
3315 permitted to branch. Fall through. */
3316 }
3317 else
3318 {
3319 int cond_negated;
3320
3321 /* There are conditional instructions after this one.
3322 If this instruction modifies the flags, then we can
3323 not predict what the next executed instruction will
3324 be. Fortunately, this instruction is architecturally
3325 forbidden to branch; we know it will fall through.
3326 Start by skipping past it. */
3327 pc += thumb_insn_size (inst1);
3328 itstate = thumb_advance_itstate (itstate);
3329
3330 /* Set a breakpoint on the following instruction. */
3331 gdb_assert ((itstate & 0x0f) != 0);
3332 if (insert_bkpt)
3333 insert_single_step_breakpoint (gdbarch, aspace, pc);
3334 cond_negated = (itstate >> 4) & 1;
3335
3336 /* Skip all following instructions with the same
3337 condition. If there is a later instruction in the IT
3338 block with the opposite condition, set the other
3339 breakpoint there. If not, then set a breakpoint on
3340 the instruction after the IT block. */
3341 do
3342 {
3343 inst1 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
3344 pc += thumb_insn_size (inst1);
3345 itstate = thumb_advance_itstate (itstate);
3346 }
3347 while (itstate != 0 && ((itstate >> 4) & 1) == cond_negated);
3348
3349 return MAKE_THUMB_ADDR (pc);
3350 }
3351 }
3352 }
3353 else if (itstate & 0x0f)
3354 {
3355 /* We are in a conditional block. Check the condition. */
3356 int cond = itstate >> 4;
3357
3358 if (! condition_true (cond, status))
3359 {
3360 /* Advance to the next instruction. All the 32-bit
3361 instructions share a common prefix. */
3362 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
3363 return MAKE_THUMB_ADDR (pc + 4);
3364 else
3365 return MAKE_THUMB_ADDR (pc + 2);
3366 }
3367
3368 /* Otherwise, handle the instruction normally. */
3369 }
3370
3371 if ((inst1 & 0xff00) == 0xbd00) /* pop {rlist, pc} */
3372 {
3373 CORE_ADDR sp;
3374
3375 /* Fetch the saved PC from the stack. It's stored above
3376 all of the other registers. */
3377 offset = bitcount (bits (inst1, 0, 7)) * INT_REGISTER_SIZE;
3378 sp = get_frame_register_unsigned (frame, ARM_SP_REGNUM);
3379 nextpc = read_memory_unsigned_integer (sp + offset, 4, byte_order);
3380 }
3381 else if ((inst1 & 0xf000) == 0xd000) /* conditional branch */
3382 {
3383 unsigned long cond = bits (inst1, 8, 11);
3384 if (cond == 0x0f) /* 0x0f = SWI */
3385 {
3386 struct gdbarch_tdep *tdep;
3387 tdep = gdbarch_tdep (gdbarch);
3388
3389 if (tdep->syscall_next_pc != NULL)
3390 nextpc = tdep->syscall_next_pc (frame);
3391
3392 }
3393 else if (cond != 0x0f && condition_true (cond, status))
3394 nextpc = pc_val + (sbits (inst1, 0, 7) << 1);
3395 }
3396 else if ((inst1 & 0xf800) == 0xe000) /* unconditional branch */
3397 {
3398 nextpc = pc_val + (sbits (inst1, 0, 10) << 1);
3399 }
3400 else if ((inst1 & 0xe000) == 0xe000) /* 32-bit instruction */
3401 {
3402 unsigned short inst2;
3403 inst2 = read_memory_unsigned_integer (pc + 2, 2, byte_order_for_code);
3404
3405 /* Default to the next instruction. */
3406 nextpc = pc + 4;
3407 nextpc = MAKE_THUMB_ADDR (nextpc);
3408
3409 if ((inst1 & 0xf800) == 0xf000 && (inst2 & 0x8000) == 0x8000)
3410 {
3411 /* Branches and miscellaneous control instructions. */
3412
3413 if ((inst2 & 0x1000) != 0 || (inst2 & 0xd001) == 0xc000)
3414 {
3415 /* B, BL, BLX. */
3416 int j1, j2, imm1, imm2;
3417
3418 imm1 = sbits (inst1, 0, 10);
3419 imm2 = bits (inst2, 0, 10);
3420 j1 = bit (inst2, 13);
3421 j2 = bit (inst2, 11);
3422
3423 offset = ((imm1 << 12) + (imm2 << 1));
3424 offset ^= ((!j2) << 22) | ((!j1) << 23);
3425
3426 nextpc = pc_val + offset;
3427 /* For BLX make sure to clear the low bits. */
3428 if (bit (inst2, 12) == 0)
3429 nextpc = nextpc & 0xfffffffc;
3430 }
3431 else if (inst1 == 0xf3de && (inst2 & 0xff00) == 0x3f00)
3432 {
3433 /* SUBS PC, LR, #imm8. */
3434 nextpc = get_frame_register_unsigned (frame, ARM_LR_REGNUM);
3435 nextpc -= inst2 & 0x00ff;
3436 }
3437 else if ((inst2 & 0xd000) == 0x8000 && (inst1 & 0x0380) != 0x0380)
3438 {
3439 /* Conditional branch. */
3440 if (condition_true (bits (inst1, 6, 9), status))
3441 {
3442 int sign, j1, j2, imm1, imm2;
3443
3444 sign = sbits (inst1, 10, 10);
3445 imm1 = bits (inst1, 0, 5);
3446 imm2 = bits (inst2, 0, 10);
3447 j1 = bit (inst2, 13);
3448 j2 = bit (inst2, 11);
3449
3450 offset = (sign << 20) + (j2 << 19) + (j1 << 18);
3451 offset += (imm1 << 12) + (imm2 << 1);
3452
3453 nextpc = pc_val + offset;
3454 }
3455 }
3456 }
3457 else if ((inst1 & 0xfe50) == 0xe810)
3458 {
3459 /* Load multiple or RFE. */
3460 int rn, offset, load_pc = 1;
3461
3462 rn = bits (inst1, 0, 3);
3463 if (bit (inst1, 7) && !bit (inst1, 8))
3464 {
3465 /* LDMIA or POP */
3466 if (!bit (inst2, 15))
3467 load_pc = 0;
3468 offset = bitcount (inst2) * 4 - 4;
3469 }
3470 else if (!bit (inst1, 7) && bit (inst1, 8))
3471 {
3472 /* LDMDB */
3473 if (!bit (inst2, 15))
3474 load_pc = 0;
3475 offset = -4;
3476 }
3477 else if (bit (inst1, 7) && bit (inst1, 8))
3478 {
3479 /* RFEIA */
3480 offset = 0;
3481 }
3482 else if (!bit (inst1, 7) && !bit (inst1, 8))
3483 {
3484 /* RFEDB */
3485 offset = -8;
3486 }
3487 else
3488 load_pc = 0;
3489
3490 if (load_pc)
3491 {
3492 CORE_ADDR addr = get_frame_register_unsigned (frame, rn);
3493 nextpc = get_frame_memory_unsigned (frame, addr + offset, 4);
3494 }
3495 }
3496 else if ((inst1 & 0xffef) == 0xea4f && (inst2 & 0xfff0) == 0x0f00)
3497 {
3498 /* MOV PC or MOVS PC. */
3499 nextpc = get_frame_register_unsigned (frame, bits (inst2, 0, 3));
3500 nextpc = MAKE_THUMB_ADDR (nextpc);
3501 }
3502 else if ((inst1 & 0xff70) == 0xf850 && (inst2 & 0xf000) == 0xf000)
3503 {
3504 /* LDR PC. */
3505 CORE_ADDR base;
3506 int rn, load_pc = 1;
3507
3508 rn = bits (inst1, 0, 3);
3509 base = get_frame_register_unsigned (frame, rn);
3510 if (rn == 15)
3511 {
3512 base = (base + 4) & ~(CORE_ADDR) 0x3;
3513 if (bit (inst1, 7))
3514 base += bits (inst2, 0, 11);
3515 else
3516 base -= bits (inst2, 0, 11);
3517 }
3518 else if (bit (inst1, 7))
3519 base += bits (inst2, 0, 11);
3520 else if (bit (inst2, 11))
3521 {
3522 if (bit (inst2, 10))
3523 {
3524 if (bit (inst2, 9))
3525 base += bits (inst2, 0, 7);
3526 else
3527 base -= bits (inst2, 0, 7);
3528 }
3529 }
3530 else if ((inst2 & 0x0fc0) == 0x0000)
3531 {
3532 int shift = bits (inst2, 4, 5), rm = bits (inst2, 0, 3);
3533 base += get_frame_register_unsigned (frame, rm) << shift;
3534 }
3535 else
3536 /* Reserved. */
3537 load_pc = 0;
3538
3539 if (load_pc)
3540 nextpc = get_frame_memory_unsigned (frame, base, 4);
3541 }
3542 else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf000)
3543 {
3544 /* TBB. */
3545 CORE_ADDR tbl_reg, table, offset, length;
3546
3547 tbl_reg = bits (inst1, 0, 3);
3548 if (tbl_reg == 0x0f)
3549 table = pc + 4; /* Regcache copy of PC isn't right yet. */
3550 else
3551 table = get_frame_register_unsigned (frame, tbl_reg);
3552
3553 offset = get_frame_register_unsigned (frame, bits (inst2, 0, 3));
3554 length = 2 * get_frame_memory_unsigned (frame, table + offset, 1);
3555 nextpc = pc_val + length;
3556 }
3557 else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf010)
3558 {
3559 /* TBH. */
3560 CORE_ADDR tbl_reg, table, offset, length;
3561
3562 tbl_reg = bits (inst1, 0, 3);
3563 if (tbl_reg == 0x0f)
3564 table = pc + 4; /* Regcache copy of PC isn't right yet. */
3565 else
3566 table = get_frame_register_unsigned (frame, tbl_reg);
3567
3568 offset = 2 * get_frame_register_unsigned (frame, bits (inst2, 0, 3));
3569 length = 2 * get_frame_memory_unsigned (frame, table + offset, 2);
3570 nextpc = pc_val + length;
3571 }
3572 }
3573 else if ((inst1 & 0xff00) == 0x4700) /* bx REG, blx REG */
3574 {
3575 if (bits (inst1, 3, 6) == 0x0f)
3576 nextpc = pc_val;
3577 else
3578 nextpc = get_frame_register_unsigned (frame, bits (inst1, 3, 6));
3579 }
3580 else if ((inst1 & 0xff87) == 0x4687) /* mov pc, REG */
3581 {
3582 if (bits (inst1, 3, 6) == 0x0f)
3583 nextpc = pc_val;
3584 else
3585 nextpc = get_frame_register_unsigned (frame, bits (inst1, 3, 6));
3586
3587 nextpc = MAKE_THUMB_ADDR (nextpc);
3588 }
3589 else if ((inst1 & 0xf500) == 0xb100)
3590 {
3591 /* CBNZ or CBZ. */
3592 int imm = (bit (inst1, 9) << 6) + (bits (inst1, 3, 7) << 1);
3593 ULONGEST reg = get_frame_register_unsigned (frame, bits (inst1, 0, 2));
3594
3595 if (bit (inst1, 11) && reg != 0)
3596 nextpc = pc_val + imm;
3597 else if (!bit (inst1, 11) && reg == 0)
3598 nextpc = pc_val + imm;
3599 }
3600 return nextpc;
3601 }
3602
3603 /* Get the raw next address. PC is the current program counter, in
3604 FRAME. INSERT_BKPT should be TRUE if we want a breakpoint set on
3605 the alternative next instruction if there are two options.
3606
3607 The value returned has the execution state of the next instruction
3608 encoded in it. Use IS_THUMB_ADDR () to see whether the instruction is
3609 in Thumb-State, and gdbarch_addr_bits_remove () to get the plain memory
3610 address.
3611 */
3612 static CORE_ADDR
3613 arm_get_next_pc_raw (struct frame_info *frame, CORE_ADDR pc, int insert_bkpt)
3614 {
3615 struct gdbarch *gdbarch = get_frame_arch (frame);
3616 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3617 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3618 unsigned long pc_val;
3619 unsigned long this_instr;
3620 unsigned long status;
3621 CORE_ADDR nextpc;
3622
3623 if (arm_frame_is_thumb (frame))
3624 return thumb_get_next_pc_raw (frame, pc, insert_bkpt);
3625
3626 pc_val = (unsigned long) pc;
3627 this_instr = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
3628
3629 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
3630 nextpc = (CORE_ADDR) (pc_val + 4); /* Default case */
3631
3632 if (bits (this_instr, 28, 31) == INST_NV)
3633 switch (bits (this_instr, 24, 27))
3634 {
3635 case 0xa:
3636 case 0xb:
3637 {
3638 /* Branch with Link and change to Thumb. */
3639 nextpc = BranchDest (pc, this_instr);
3640 nextpc |= bit (this_instr, 24) << 1;
3641 nextpc = MAKE_THUMB_ADDR (nextpc);
3642 break;
3643 }
3644 case 0xc:
3645 case 0xd:
3646 case 0xe:
3647 /* Coprocessor register transfer. */
3648 if (bits (this_instr, 12, 15) == 15)
3649 error (_("Invalid update to pc in instruction"));
3650 break;
3651 }
3652 else if (condition_true (bits (this_instr, 28, 31), status))
3653 {
3654 switch (bits (this_instr, 24, 27))
3655 {
3656 case 0x0:
3657 case 0x1: /* data processing */
3658 case 0x2:
3659 case 0x3:
3660 {
3661 unsigned long operand1, operand2, result = 0;
3662 unsigned long rn;
3663 int c;
3664
3665 if (bits (this_instr, 12, 15) != 15)
3666 break;
3667
3668 if (bits (this_instr, 22, 25) == 0
3669 && bits (this_instr, 4, 7) == 9) /* multiply */
3670 error (_("Invalid update to pc in instruction"));
3671
3672 /* BX <reg>, BLX <reg> */
3673 if (bits (this_instr, 4, 27) == 0x12fff1
3674 || bits (this_instr, 4, 27) == 0x12fff3)
3675 {
3676 rn = bits (this_instr, 0, 3);
3677 nextpc = (rn == 15) ? pc_val + 8
3678 : get_frame_register_unsigned (frame, rn);
3679 return nextpc;
3680 }
3681
3682 /* Multiply into PC */
3683 c = (status & FLAG_C) ? 1 : 0;
3684 rn = bits (this_instr, 16, 19);
3685 operand1 = (rn == 15) ? pc_val + 8
3686 : get_frame_register_unsigned (frame, rn);
3687
3688 if (bit (this_instr, 25))
3689 {
3690 unsigned long immval = bits (this_instr, 0, 7);
3691 unsigned long rotate = 2 * bits (this_instr, 8, 11);
3692 operand2 = ((immval >> rotate) | (immval << (32 - rotate)))
3693 & 0xffffffff;
3694 }
3695 else /* operand 2 is a shifted register */
3696 operand2 = shifted_reg_val (frame, this_instr, c, pc_val, status);
3697
3698 switch (bits (this_instr, 21, 24))
3699 {
3700 case 0x0: /*and */
3701 result = operand1 & operand2;
3702 break;
3703
3704 case 0x1: /*eor */
3705 result = operand1 ^ operand2;
3706 break;
3707
3708 case 0x2: /*sub */
3709 result = operand1 - operand2;
3710 break;
3711
3712 case 0x3: /*rsb */
3713 result = operand2 - operand1;
3714 break;
3715
3716 case 0x4: /*add */
3717 result = operand1 + operand2;
3718 break;
3719
3720 case 0x5: /*adc */
3721 result = operand1 + operand2 + c;
3722 break;
3723
3724 case 0x6: /*sbc */
3725 result = operand1 - operand2 + c;
3726 break;
3727
3728 case 0x7: /*rsc */
3729 result = operand2 - operand1 + c;
3730 break;
3731
3732 case 0x8:
3733 case 0x9:
3734 case 0xa:
3735 case 0xb: /* tst, teq, cmp, cmn */
3736 result = (unsigned long) nextpc;
3737 break;
3738
3739 case 0xc: /*orr */
3740 result = operand1 | operand2;
3741 break;
3742
3743 case 0xd: /*mov */
3744 /* Always step into a function. */
3745 result = operand2;
3746 break;
3747
3748 case 0xe: /*bic */
3749 result = operand1 & ~operand2;
3750 break;
3751
3752 case 0xf: /*mvn */
3753 result = ~operand2;
3754 break;
3755 }
3756
3757 /* In 26-bit APCS the bottom two bits of the result are
3758 ignored, and we always end up in ARM state. */
3759 if (!arm_apcs_32)
3760 nextpc = arm_addr_bits_remove (gdbarch, result);
3761 else
3762 nextpc = result;
3763
3764 break;
3765 }
3766
3767 case 0x4:
3768 case 0x5: /* data transfer */
3769 case 0x6:
3770 case 0x7:
3771 if (bit (this_instr, 20))
3772 {
3773 /* load */
3774 if (bits (this_instr, 12, 15) == 15)
3775 {
3776 /* rd == pc */
3777 unsigned long rn;
3778 unsigned long base;
3779
3780 if (bit (this_instr, 22))
3781 error (_("Invalid update to pc in instruction"));
3782
3783 /* byte write to PC */
3784 rn = bits (this_instr, 16, 19);
3785 base = (rn == 15) ? pc_val + 8
3786 : get_frame_register_unsigned (frame, rn);
3787 if (bit (this_instr, 24))
3788 {
3789 /* pre-indexed */
3790 int c = (status & FLAG_C) ? 1 : 0;
3791 unsigned long offset =
3792 (bit (this_instr, 25)
3793 ? shifted_reg_val (frame, this_instr, c, pc_val, status)
3794 : bits (this_instr, 0, 11));
3795
3796 if (bit (this_instr, 23))
3797 base += offset;
3798 else
3799 base -= offset;
3800 }
3801 nextpc = (CORE_ADDR) read_memory_integer ((CORE_ADDR) base,
3802 4, byte_order);
3803 }
3804 }
3805 break;
3806
3807 case 0x8:
3808 case 0x9: /* block transfer */
3809 if (bit (this_instr, 20))
3810 {
3811 /* LDM */
3812 if (bit (this_instr, 15))
3813 {
3814 /* loading pc */
3815 int offset = 0;
3816
3817 if (bit (this_instr, 23))
3818 {
3819 /* up */
3820 unsigned long reglist = bits (this_instr, 0, 14);
3821 offset = bitcount (reglist) * 4;
3822 if (bit (this_instr, 24)) /* pre */
3823 offset += 4;
3824 }
3825 else if (bit (this_instr, 24))
3826 offset = -4;
3827
3828 {
3829 unsigned long rn_val =
3830 get_frame_register_unsigned (frame,
3831 bits (this_instr, 16, 19));
3832 nextpc =
3833 (CORE_ADDR) read_memory_integer ((CORE_ADDR) (rn_val
3834 + offset),
3835 4, byte_order);
3836 }
3837 }
3838 }
3839 break;
3840
3841 case 0xb: /* branch & link */
3842 case 0xa: /* branch */
3843 {
3844 nextpc = BranchDest (pc, this_instr);
3845 break;
3846 }
3847
3848 case 0xc:
3849 case 0xd:
3850 case 0xe: /* coproc ops */
3851 break;
3852 case 0xf: /* SWI */
3853 {
3854 struct gdbarch_tdep *tdep;
3855 tdep = gdbarch_tdep (gdbarch);
3856
3857 if (tdep->syscall_next_pc != NULL)
3858 nextpc = tdep->syscall_next_pc (frame);
3859
3860 }
3861 break;
3862
3863 default:
3864 fprintf_filtered (gdb_stderr, _("Bad bit-field extraction\n"));
3865 return (pc);
3866 }
3867 }
3868
3869 return nextpc;
3870 }
3871
3872 CORE_ADDR
3873 arm_get_next_pc (struct frame_info *frame, CORE_ADDR pc)
3874 {
3875 struct gdbarch *gdbarch = get_frame_arch (frame);
3876 CORE_ADDR nextpc =
3877 gdbarch_addr_bits_remove (gdbarch,
3878 arm_get_next_pc_raw (frame, pc, TRUE));
3879 if (nextpc == pc)
3880 error (_("Infinite loop detected"));
3881 return nextpc;
3882 }
3883
3884 /* single_step() is called just before we want to resume the inferior,
3885 if we want to single-step it but there is no hardware or kernel
3886 single-step support. We find the target of the coming instruction
3887 and breakpoint it. */
3888
3889 int
3890 arm_software_single_step (struct frame_info *frame)
3891 {
3892 struct gdbarch *gdbarch = get_frame_arch (frame);
3893 struct address_space *aspace = get_frame_address_space (frame);
3894
3895 /* NOTE: This may insert the wrong breakpoint instruction when
3896 single-stepping over a mode-changing instruction, if the
3897 CPSR heuristics are used. */
3898
3899 CORE_ADDR next_pc = arm_get_next_pc (frame, get_frame_pc (frame));
3900 insert_single_step_breakpoint (gdbarch, aspace, next_pc);
3901
3902 return 1;
3903 }
3904
3905 /* Given BUF, which is OLD_LEN bytes ending at ENDADDR, expand
3906 the buffer to be NEW_LEN bytes ending at ENDADDR. Return
3907 NULL if an error occurs. BUF is freed. */
3908
3909 static gdb_byte *
3910 extend_buffer_earlier (gdb_byte *buf, CORE_ADDR endaddr,
3911 int old_len, int new_len)
3912 {
3913 gdb_byte *new_buf, *middle;
3914 int bytes_to_read = new_len - old_len;
3915
3916 new_buf = xmalloc (new_len);
3917 memcpy (new_buf + bytes_to_read, buf, old_len);
3918 xfree (buf);
3919 if (target_read_memory (endaddr - new_len, new_buf, bytes_to_read) != 0)
3920 {
3921 xfree (new_buf);
3922 return NULL;
3923 }
3924 return new_buf;
3925 }
3926
3927 /* An IT block is at most the 2-byte IT instruction followed by
3928 four 4-byte instructions. The furthest back we must search to
3929 find an IT block that affects the current instruction is thus
3930 2 + 3 * 4 == 14 bytes. */
3931 #define MAX_IT_BLOCK_PREFIX 14
3932
3933 /* Use a quick scan if there are more than this many bytes of
3934 code. */
3935 #define IT_SCAN_THRESHOLD 32
3936
3937 /* Adjust a breakpoint's address to move breakpoints out of IT blocks.
3938 A breakpoint in an IT block may not be hit, depending on the
3939 condition flags. */
3940 static CORE_ADDR
3941 arm_adjust_breakpoint_address (struct gdbarch *gdbarch, CORE_ADDR bpaddr)
3942 {
3943 gdb_byte *buf;
3944 char map_type;
3945 CORE_ADDR boundary, func_start;
3946 int buf_len, buf2_len;
3947 enum bfd_endian order = gdbarch_byte_order_for_code (gdbarch);
3948 int i, any, last_it, last_it_count;
3949
3950 /* If we are using BKPT breakpoints, none of this is necessary. */
3951 if (gdbarch_tdep (gdbarch)->thumb2_breakpoint == NULL)
3952 return bpaddr;
3953
3954 /* ARM mode does not have this problem. */
3955 if (!arm_pc_is_thumb (gdbarch, bpaddr))
3956 return bpaddr;
3957
3958 /* We are setting a breakpoint in Thumb code that could potentially
3959 contain an IT block. The first step is to find how much Thumb
3960 code there is; we do not need to read outside of known Thumb
3961 sequences. */
3962 map_type = arm_find_mapping_symbol (bpaddr, &boundary);
3963 if (map_type == 0)
3964 /* Thumb-2 code must have mapping symbols to have a chance. */
3965 return bpaddr;
3966
3967 bpaddr = gdbarch_addr_bits_remove (gdbarch, bpaddr);
3968
3969 if (find_pc_partial_function (bpaddr, NULL, &func_start, NULL)
3970 && func_start > boundary)
3971 boundary = func_start;
3972
3973 /* Search for a candidate IT instruction. We have to do some fancy
3974 footwork to distinguish a real IT instruction from the second
3975 half of a 32-bit instruction, but there is no need for that if
3976 there's no candidate. */
3977 buf_len = min (bpaddr - boundary, MAX_IT_BLOCK_PREFIX);
3978 if (buf_len == 0)
3979 /* No room for an IT instruction. */
3980 return bpaddr;
3981
3982 buf = xmalloc (buf_len);
3983 if (target_read_memory (bpaddr - buf_len, buf, buf_len) != 0)
3984 return bpaddr;
3985 any = 0;
3986 for (i = 0; i < buf_len; i += 2)
3987 {
3988 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
3989 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
3990 {
3991 any = 1;
3992 break;
3993 }
3994 }
3995 if (any == 0)
3996 {
3997 xfree (buf);
3998 return bpaddr;
3999 }
4000
4001 /* OK, the code bytes before this instruction contain at least one
4002 halfword which resembles an IT instruction. We know that it's
4003 Thumb code, but there are still two possibilities. Either the
4004 halfword really is an IT instruction, or it is the second half of
4005 a 32-bit Thumb instruction. The only way we can tell is to
4006 scan forwards from a known instruction boundary. */
4007 if (bpaddr - boundary > IT_SCAN_THRESHOLD)
4008 {
4009 int definite;
4010
4011 /* There's a lot of code before this instruction. Start with an
4012 optimistic search; it's easy to recognize halfwords that can
4013 not be the start of a 32-bit instruction, and use that to
4014 lock on to the instruction boundaries. */
4015 buf = extend_buffer_earlier (buf, bpaddr, buf_len, IT_SCAN_THRESHOLD);
4016 if (buf == NULL)
4017 return bpaddr;
4018 buf_len = IT_SCAN_THRESHOLD;
4019
4020 definite = 0;
4021 for (i = 0; i < buf_len - sizeof (buf) && ! definite; i += 2)
4022 {
4023 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
4024 if (thumb_insn_size (inst1) == 2)
4025 {
4026 definite = 1;
4027 break;
4028 }
4029 }
4030
4031 /* At this point, if DEFINITE, BUF[I] is the first place we
4032 are sure that we know the instruction boundaries, and it is far
4033 enough from BPADDR that we could not miss an IT instruction
4034 affecting BPADDR. If ! DEFINITE, give up - start from a
4035 known boundary. */
4036 if (! definite)
4037 {
4038 buf = extend_buffer_earlier (buf, bpaddr, buf_len, bpaddr - boundary);
4039 if (buf == NULL)
4040 return bpaddr;
4041 buf_len = bpaddr - boundary;
4042 i = 0;
4043 }
4044 }
4045 else
4046 {
4047 buf = extend_buffer_earlier (buf, bpaddr, buf_len, bpaddr - boundary);
4048 if (buf == NULL)
4049 return bpaddr;
4050 buf_len = bpaddr - boundary;
4051 i = 0;
4052 }
4053
4054 /* Scan forwards. Find the last IT instruction before BPADDR. */
4055 last_it = -1;
4056 last_it_count = 0;
4057 while (i < buf_len)
4058 {
4059 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
4060 last_it_count--;
4061 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
4062 {
4063 last_it = i;
4064 if (inst1 & 0x0001)
4065 last_it_count = 4;
4066 else if (inst1 & 0x0002)
4067 last_it_count = 3;
4068 else if (inst1 & 0x0004)
4069 last_it_count = 2;
4070 else
4071 last_it_count = 1;
4072 }
4073 i += thumb_insn_size (inst1);
4074 }
4075
4076 xfree (buf);
4077
4078 if (last_it == -1)
4079 /* There wasn't really an IT instruction after all. */
4080 return bpaddr;
4081
4082 if (last_it_count < 1)
4083 /* It was too far away. */
4084 return bpaddr;
4085
4086 /* This really is a trouble spot. Move the breakpoint to the IT
4087 instruction. */
4088 return bpaddr - buf_len + last_it;
4089 }
4090
4091 /* ARM displaced stepping support.
4092
4093 Generally ARM displaced stepping works as follows:
4094
4095 1. When an instruction is to be single-stepped, it is first decoded by
4096 arm_process_displaced_insn (called from arm_displaced_step_copy_insn).
4097 Depending on the type of instruction, it is then copied to a scratch
4098 location, possibly in a modified form. The copy_* set of functions
4099 performs such modification, as necessary. A breakpoint is placed after
4100 the modified instruction in the scratch space to return control to GDB.
4101 Note in particular that instructions which modify the PC will no longer
4102 do so after modification.
4103
4104 2. The instruction is single-stepped, by setting the PC to the scratch
4105 location address, and resuming. Control returns to GDB when the
4106 breakpoint is hit.
4107
4108 3. A cleanup function (cleanup_*) is called corresponding to the copy_*
4109 function used for the current instruction. This function's job is to
4110 put the CPU/memory state back to what it would have been if the
4111 instruction had been executed unmodified in its original location. */
4112
4113 /* NOP instruction (mov r0, r0). */
4114 #define ARM_NOP 0xe1a00000
4115
4116 /* Helper for register reads for displaced stepping. In particular, this
4117 returns the PC as it would be seen by the instruction at its original
4118 location. */
4119
4120 ULONGEST
4121 displaced_read_reg (struct regcache *regs, CORE_ADDR from, int regno)
4122 {
4123 ULONGEST ret;
4124
4125 if (regno == 15)
4126 {
4127 if (debug_displaced)
4128 fprintf_unfiltered (gdb_stdlog, "displaced: read pc value %.8lx\n",
4129 (unsigned long) from + 8);
4130 return (ULONGEST) from + 8; /* Pipeline offset. */
4131 }
4132 else
4133 {
4134 regcache_cooked_read_unsigned (regs, regno, &ret);
4135 if (debug_displaced)
4136 fprintf_unfiltered (gdb_stdlog, "displaced: read r%d value %.8lx\n",
4137 regno, (unsigned long) ret);
4138 return ret;
4139 }
4140 }
4141
4142 static int
4143 displaced_in_arm_mode (struct regcache *regs)
4144 {
4145 ULONGEST ps;
4146 ULONGEST t_bit = arm_psr_thumb_bit (get_regcache_arch (regs));
4147
4148 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
4149
4150 return (ps & t_bit) == 0;
4151 }
4152
4153 /* Write to the PC as from a branch instruction. */
4154
4155 static void
4156 branch_write_pc (struct regcache *regs, ULONGEST val)
4157 {
4158 if (displaced_in_arm_mode (regs))
4159 /* Note: If bits 0/1 are set, this branch would be unpredictable for
4160 architecture versions < 6. */
4161 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & ~(ULONGEST) 0x3);
4162 else
4163 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & ~(ULONGEST) 0x1);
4164 }
4165
4166 /* Write to the PC as from a branch-exchange instruction. */
4167
4168 static void
4169 bx_write_pc (struct regcache *regs, ULONGEST val)
4170 {
4171 ULONGEST ps;
4172 ULONGEST t_bit = arm_psr_thumb_bit (get_regcache_arch (regs));
4173
4174 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
4175
4176 if ((val & 1) == 1)
4177 {
4178 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps | t_bit);
4179 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffe);
4180 }
4181 else if ((val & 2) == 0)
4182 {
4183 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps & ~t_bit);
4184 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val);
4185 }
4186 else
4187 {
4188 /* Unpredictable behaviour. Try to do something sensible (switch to ARM
4189 mode, align dest to 4 bytes). */
4190 warning (_("Single-stepping BX to non-word-aligned ARM instruction."));
4191 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps & ~t_bit);
4192 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffc);
4193 }
4194 }
4195
4196 /* Write to the PC as if from a load instruction. */
4197
4198 static void
4199 load_write_pc (struct regcache *regs, ULONGEST val)
4200 {
4201 if (DISPLACED_STEPPING_ARCH_VERSION >= 5)
4202 bx_write_pc (regs, val);
4203 else
4204 branch_write_pc (regs, val);
4205 }
4206
4207 /* Write to the PC as if from an ALU instruction. */
4208
4209 static void
4210 alu_write_pc (struct regcache *regs, ULONGEST val)
4211 {
4212 if (DISPLACED_STEPPING_ARCH_VERSION >= 7 && displaced_in_arm_mode (regs))
4213 bx_write_pc (regs, val);
4214 else
4215 branch_write_pc (regs, val);
4216 }
4217
4218 /* Helper for writing to registers for displaced stepping. Writing to the PC
4219 has a varying effects depending on the instruction which does the write:
4220 this is controlled by the WRITE_PC argument. */
4221
4222 void
4223 displaced_write_reg (struct regcache *regs, struct displaced_step_closure *dsc,
4224 int regno, ULONGEST val, enum pc_write_style write_pc)
4225 {
4226 if (regno == 15)
4227 {
4228 if (debug_displaced)
4229 fprintf_unfiltered (gdb_stdlog, "displaced: writing pc %.8lx\n",
4230 (unsigned long) val);
4231 switch (write_pc)
4232 {
4233 case BRANCH_WRITE_PC:
4234 branch_write_pc (regs, val);
4235 break;
4236
4237 case BX_WRITE_PC:
4238 bx_write_pc (regs, val);
4239 break;
4240
4241 case LOAD_WRITE_PC:
4242 load_write_pc (regs, val);
4243 break;
4244
4245 case ALU_WRITE_PC:
4246 alu_write_pc (regs, val);
4247 break;
4248
4249 case CANNOT_WRITE_PC:
4250 warning (_("Instruction wrote to PC in an unexpected way when "
4251 "single-stepping"));
4252 break;
4253
4254 default:
4255 internal_error (__FILE__, __LINE__,
4256 _("Invalid argument to displaced_write_reg"));
4257 }
4258
4259 dsc->wrote_to_pc = 1;
4260 }
4261 else
4262 {
4263 if (debug_displaced)
4264 fprintf_unfiltered (gdb_stdlog, "displaced: writing r%d value %.8lx\n",
4265 regno, (unsigned long) val);
4266 regcache_cooked_write_unsigned (regs, regno, val);
4267 }
4268 }
4269
4270 /* This function is used to concisely determine if an instruction INSN
4271 references PC. Register fields of interest in INSN should have the
4272 corresponding fields of BITMASK set to 0b1111. The function returns return 1
4273 if any of these fields in INSN reference the PC (also 0b1111, r15), else it
4274 returns 0. */
4275
4276 static int
4277 insn_references_pc (uint32_t insn, uint32_t bitmask)
4278 {
4279 uint32_t lowbit = 1;
4280
4281 while (bitmask != 0)
4282 {
4283 uint32_t mask;
4284
4285 for (; lowbit && (bitmask & lowbit) == 0; lowbit <<= 1)
4286 ;
4287
4288 if (!lowbit)
4289 break;
4290
4291 mask = lowbit * 0xf;
4292
4293 if ((insn & mask) == mask)
4294 return 1;
4295
4296 bitmask &= ~mask;
4297 }
4298
4299 return 0;
4300 }
4301
4302 /* The simplest copy function. Many instructions have the same effect no
4303 matter what address they are executed at: in those cases, use this. */
4304
4305 static int
4306 copy_unmodified (struct gdbarch *gdbarch, uint32_t insn,
4307 const char *iname, struct displaced_step_closure *dsc)
4308 {
4309 if (debug_displaced)
4310 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.8lx, "
4311 "opcode/class '%s' unmodified\n", (unsigned long) insn,
4312 iname);
4313
4314 dsc->modinsn[0] = insn;
4315
4316 return 0;
4317 }
4318
4319 /* Preload instructions with immediate offset. */
4320
4321 static void
4322 cleanup_preload (struct gdbarch *gdbarch,
4323 struct regcache *regs, struct displaced_step_closure *dsc)
4324 {
4325 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
4326 if (!dsc->u.preload.immed)
4327 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
4328 }
4329
4330 static int
4331 copy_preload (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
4332 struct displaced_step_closure *dsc)
4333 {
4334 unsigned int rn = bits (insn, 16, 19);
4335 ULONGEST rn_val;
4336 CORE_ADDR from = dsc->insn_addr;
4337
4338 if (!insn_references_pc (insn, 0x000f0000ul))
4339 return copy_unmodified (gdbarch, insn, "preload", dsc);
4340
4341 if (debug_displaced)
4342 fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
4343 (unsigned long) insn);
4344
4345 /* Preload instructions:
4346
4347 {pli/pld} [rn, #+/-imm]
4348 ->
4349 {pli/pld} [r0, #+/-imm]. */
4350
4351 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
4352 rn_val = displaced_read_reg (regs, from, rn);
4353 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
4354
4355 dsc->u.preload.immed = 1;
4356
4357 dsc->modinsn[0] = insn & 0xfff0ffff;
4358
4359 dsc->cleanup = &cleanup_preload;
4360
4361 return 0;
4362 }
4363
4364 /* Preload instructions with register offset. */
4365
4366 static int
4367 copy_preload_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
4368 struct displaced_step_closure *dsc)
4369 {
4370 unsigned int rn = bits (insn, 16, 19);
4371 unsigned int rm = bits (insn, 0, 3);
4372 ULONGEST rn_val, rm_val;
4373 CORE_ADDR from = dsc->insn_addr;
4374
4375 if (!insn_references_pc (insn, 0x000f000ful))
4376 return copy_unmodified (gdbarch, insn, "preload reg", dsc);
4377
4378 if (debug_displaced)
4379 fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
4380 (unsigned long) insn);
4381
4382 /* Preload register-offset instructions:
4383
4384 {pli/pld} [rn, rm {, shift}]
4385 ->
4386 {pli/pld} [r0, r1 {, shift}]. */
4387
4388 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
4389 dsc->tmp[1] = displaced_read_reg (regs, from, 1);
4390 rn_val = displaced_read_reg (regs, from, rn);
4391 rm_val = displaced_read_reg (regs, from, rm);
4392 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
4393 displaced_write_reg (regs, dsc, 1, rm_val, CANNOT_WRITE_PC);
4394
4395 dsc->u.preload.immed = 0;
4396
4397 dsc->modinsn[0] = (insn & 0xfff0fff0) | 0x1;
4398
4399 dsc->cleanup = &cleanup_preload;
4400
4401 return 0;
4402 }
4403
4404 /* Copy/cleanup coprocessor load and store instructions. */
4405
4406 static void
4407 cleanup_copro_load_store (struct gdbarch *gdbarch,
4408 struct regcache *regs,
4409 struct displaced_step_closure *dsc)
4410 {
4411 ULONGEST rn_val = displaced_read_reg (regs, dsc->insn_addr, 0);
4412
4413 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
4414
4415 if (dsc->u.ldst.writeback)
4416 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, LOAD_WRITE_PC);
4417 }
4418
4419 static int
4420 copy_copro_load_store (struct gdbarch *gdbarch, uint32_t insn,
4421 struct regcache *regs,
4422 struct displaced_step_closure *dsc)
4423 {
4424 unsigned int rn = bits (insn, 16, 19);
4425 ULONGEST rn_val;
4426 CORE_ADDR from = dsc->insn_addr;
4427
4428 if (!insn_references_pc (insn, 0x000f0000ul))
4429 return copy_unmodified (gdbarch, insn, "copro load/store", dsc);
4430
4431 if (debug_displaced)
4432 fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
4433 "load/store insn %.8lx\n", (unsigned long) insn);
4434
4435 /* Coprocessor load/store instructions:
4436
4437 {stc/stc2} [<Rn>, #+/-imm] (and other immediate addressing modes)
4438 ->
4439 {stc/stc2} [r0, #+/-imm].
4440
4441 ldc/ldc2 are handled identically. */
4442
4443 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
4444 rn_val = displaced_read_reg (regs, from, rn);
4445 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
4446
4447 dsc->u.ldst.writeback = bit (insn, 25);
4448 dsc->u.ldst.rn = rn;
4449
4450 dsc->modinsn[0] = insn & 0xfff0ffff;
4451
4452 dsc->cleanup = &cleanup_copro_load_store;
4453
4454 return 0;
4455 }
4456
4457 /* Clean up branch instructions (actually perform the branch, by setting
4458 PC). */
4459
4460 static void
4461 cleanup_branch (struct gdbarch *gdbarch, struct regcache *regs,
4462 struct displaced_step_closure *dsc)
4463 {
4464 ULONGEST from = dsc->insn_addr;
4465 uint32_t status = displaced_read_reg (regs, from, ARM_PS_REGNUM);
4466 int branch_taken = condition_true (dsc->u.branch.cond, status);
4467 enum pc_write_style write_pc = dsc->u.branch.exchange
4468 ? BX_WRITE_PC : BRANCH_WRITE_PC;
4469
4470 if (!branch_taken)
4471 return;
4472
4473 if (dsc->u.branch.link)
4474 {
4475 ULONGEST pc = displaced_read_reg (regs, from, 15);
4476 displaced_write_reg (regs, dsc, 14, pc - 4, CANNOT_WRITE_PC);
4477 }
4478
4479 displaced_write_reg (regs, dsc, 15, dsc->u.branch.dest, write_pc);
4480 }
4481
4482 /* Copy B/BL/BLX instructions with immediate destinations. */
4483
4484 static int
4485 copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
4486 struct regcache *regs, struct displaced_step_closure *dsc)
4487 {
4488 unsigned int cond = bits (insn, 28, 31);
4489 int exchange = (cond == 0xf);
4490 int link = exchange || bit (insn, 24);
4491 CORE_ADDR from = dsc->insn_addr;
4492 long offset;
4493
4494 if (debug_displaced)
4495 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s immediate insn "
4496 "%.8lx\n", (exchange) ? "blx" : (link) ? "bl" : "b",
4497 (unsigned long) insn);
4498
4499 /* Implement "BL<cond> <label>" as:
4500
4501 Preparation: cond <- instruction condition
4502 Insn: mov r0, r0 (nop)
4503 Cleanup: if (condition true) { r14 <- pc; pc <- label }.
4504
4505 B<cond> similar, but don't set r14 in cleanup. */
4506
4507 if (exchange)
4508 /* For BLX, set bit 0 of the destination. The cleanup_branch function will
4509 then arrange the switch into Thumb mode. */
4510 offset = (bits (insn, 0, 23) << 2) | (bit (insn, 24) << 1) | 1;
4511 else
4512 offset = bits (insn, 0, 23) << 2;
4513
4514 if (bit (offset, 25))
4515 offset = offset | ~0x3ffffff;
4516
4517 dsc->u.branch.cond = cond;
4518 dsc->u.branch.link = link;
4519 dsc->u.branch.exchange = exchange;
4520 dsc->u.branch.dest = from + 8 + offset;
4521
4522 dsc->modinsn[0] = ARM_NOP;
4523
4524 dsc->cleanup = &cleanup_branch;
4525
4526 return 0;
4527 }
4528
4529 /* Copy BX/BLX with register-specified destinations. */
4530
4531 static int
4532 copy_bx_blx_reg (struct gdbarch *gdbarch, uint32_t insn,
4533 struct regcache *regs, struct displaced_step_closure *dsc)
4534 {
4535 unsigned int cond = bits (insn, 28, 31);
4536 /* BX: x12xxx1x
4537 BLX: x12xxx3x. */
4538 int link = bit (insn, 5);
4539 unsigned int rm = bits (insn, 0, 3);
4540 CORE_ADDR from = dsc->insn_addr;
4541
4542 if (debug_displaced)
4543 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s register insn "
4544 "%.8lx\n", (link) ? "blx" : "bx", (unsigned long) insn);
4545
4546 /* Implement {BX,BLX}<cond> <reg>" as:
4547
4548 Preparation: cond <- instruction condition
4549 Insn: mov r0, r0 (nop)
4550 Cleanup: if (condition true) { r14 <- pc; pc <- dest; }.
4551
4552 Don't set r14 in cleanup for BX. */
4553
4554 dsc->u.branch.dest = displaced_read_reg (regs, from, rm);
4555
4556 dsc->u.branch.cond = cond;
4557 dsc->u.branch.link = link;
4558 dsc->u.branch.exchange = 1;
4559
4560 dsc->modinsn[0] = ARM_NOP;
4561
4562 dsc->cleanup = &cleanup_branch;
4563
4564 return 0;
4565 }
4566
4567 /* Copy/cleanup arithmetic/logic instruction with immediate RHS. */
4568
4569 static void
4570 cleanup_alu_imm (struct gdbarch *gdbarch,
4571 struct regcache *regs, struct displaced_step_closure *dsc)
4572 {
4573 ULONGEST rd_val = displaced_read_reg (regs, dsc->insn_addr, 0);
4574 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
4575 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
4576 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
4577 }
4578
4579 static int
4580 copy_alu_imm (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
4581 struct displaced_step_closure *dsc)
4582 {
4583 unsigned int rn = bits (insn, 16, 19);
4584 unsigned int rd = bits (insn, 12, 15);
4585 unsigned int op = bits (insn, 21, 24);
4586 int is_mov = (op == 0xd);
4587 ULONGEST rd_val, rn_val;
4588 CORE_ADDR from = dsc->insn_addr;
4589
4590 if (!insn_references_pc (insn, 0x000ff000ul))
4591 return copy_unmodified (gdbarch, insn, "ALU immediate", dsc);
4592
4593 if (debug_displaced)
4594 fprintf_unfiltered (gdb_stdlog, "displaced: copying immediate %s insn "
4595 "%.8lx\n", is_mov ? "move" : "ALU",
4596 (unsigned long) insn);
4597
4598 /* Instruction is of form:
4599
4600 <op><cond> rd, [rn,] #imm
4601
4602 Rewrite as:
4603
4604 Preparation: tmp1, tmp2 <- r0, r1;
4605 r0, r1 <- rd, rn
4606 Insn: <op><cond> r0, r1, #imm
4607 Cleanup: rd <- r0; r0 <- tmp1; r1 <- tmp2
4608 */
4609
4610 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
4611 dsc->tmp[1] = displaced_read_reg (regs, from, 1);
4612 rn_val = displaced_read_reg (regs, from, rn);
4613 rd_val = displaced_read_reg (regs, from, rd);
4614 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
4615 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
4616 dsc->rd = rd;
4617
4618 if (is_mov)
4619 dsc->modinsn[0] = insn & 0xfff00fff;
4620 else
4621 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x10000;
4622
4623 dsc->cleanup = &cleanup_alu_imm;
4624
4625 return 0;
4626 }
4627
4628 /* Copy/cleanup arithmetic/logic insns with register RHS. */
4629
4630 static void
4631 cleanup_alu_reg (struct gdbarch *gdbarch,
4632 struct regcache *regs, struct displaced_step_closure *dsc)
4633 {
4634 ULONGEST rd_val;
4635 int i;
4636
4637 rd_val = displaced_read_reg (regs, dsc->insn_addr, 0);
4638
4639 for (i = 0; i < 3; i++)
4640 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
4641
4642 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
4643 }
4644
4645 static int
4646 copy_alu_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
4647 struct displaced_step_closure *dsc)
4648 {
4649 unsigned int rn = bits (insn, 16, 19);
4650 unsigned int rm = bits (insn, 0, 3);
4651 unsigned int rd = bits (insn, 12, 15);
4652 unsigned int op = bits (insn, 21, 24);
4653 int is_mov = (op == 0xd);
4654 ULONGEST rd_val, rn_val, rm_val;
4655 CORE_ADDR from = dsc->insn_addr;
4656
4657 if (!insn_references_pc (insn, 0x000ff00ful))
4658 return copy_unmodified (gdbarch, insn, "ALU reg", dsc);
4659
4660 if (debug_displaced)
4661 fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.8lx\n",
4662 is_mov ? "move" : "ALU", (unsigned long) insn);
4663
4664 /* Instruction is of form:
4665
4666 <op><cond> rd, [rn,] rm [, <shift>]
4667
4668 Rewrite as:
4669
4670 Preparation: tmp1, tmp2, tmp3 <- r0, r1, r2;
4671 r0, r1, r2 <- rd, rn, rm
4672 Insn: <op><cond> r0, r1, r2 [, <shift>]
4673 Cleanup: rd <- r0; r0, r1, r2 <- tmp1, tmp2, tmp3
4674 */
4675
4676 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
4677 dsc->tmp[1] = displaced_read_reg (regs, from, 1);
4678 dsc->tmp[2] = displaced_read_reg (regs, from, 2);
4679 rd_val = displaced_read_reg (regs, from, rd);
4680 rn_val = displaced_read_reg (regs, from, rn);
4681 rm_val = displaced_read_reg (regs, from, rm);
4682 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
4683 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
4684 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
4685 dsc->rd = rd;
4686
4687 if (is_mov)
4688 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x2;
4689 else
4690 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x10002;
4691
4692 dsc->cleanup = &cleanup_alu_reg;
4693
4694 return 0;
4695 }
4696
4697 /* Cleanup/copy arithmetic/logic insns with shifted register RHS. */
4698
4699 static void
4700 cleanup_alu_shifted_reg (struct gdbarch *gdbarch,
4701 struct regcache *regs,
4702 struct displaced_step_closure *dsc)
4703 {
4704 ULONGEST rd_val = displaced_read_reg (regs, dsc->insn_addr, 0);
4705 int i;
4706
4707 for (i = 0; i < 4; i++)
4708 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
4709
4710 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
4711 }
4712
4713 static int
4714 copy_alu_shifted_reg (struct gdbarch *gdbarch, uint32_t insn,
4715 struct regcache *regs, struct displaced_step_closure *dsc)
4716 {
4717 unsigned int rn = bits (insn, 16, 19);
4718 unsigned int rm = bits (insn, 0, 3);
4719 unsigned int rd = bits (insn, 12, 15);
4720 unsigned int rs = bits (insn, 8, 11);
4721 unsigned int op = bits (insn, 21, 24);
4722 int is_mov = (op == 0xd), i;
4723 ULONGEST rd_val, rn_val, rm_val, rs_val;
4724 CORE_ADDR from = dsc->insn_addr;
4725
4726 if (!insn_references_pc (insn, 0x000fff0ful))
4727 return copy_unmodified (gdbarch, insn, "ALU shifted reg", dsc);
4728
4729 if (debug_displaced)
4730 fprintf_unfiltered (gdb_stdlog, "displaced: copying shifted reg %s insn "
4731 "%.8lx\n", is_mov ? "move" : "ALU",
4732 (unsigned long) insn);
4733
4734 /* Instruction is of form:
4735
4736 <op><cond> rd, [rn,] rm, <shift> rs
4737
4738 Rewrite as:
4739
4740 Preparation: tmp1, tmp2, tmp3, tmp4 <- r0, r1, r2, r3
4741 r0, r1, r2, r3 <- rd, rn, rm, rs
4742 Insn: <op><cond> r0, r1, r2, <shift> r3
4743 Cleanup: tmp5 <- r0
4744 r0, r1, r2, r3 <- tmp1, tmp2, tmp3, tmp4
4745 rd <- tmp5
4746 */
4747
4748 for (i = 0; i < 4; i++)
4749 dsc->tmp[i] = displaced_read_reg (regs, from, i);
4750
4751 rd_val = displaced_read_reg (regs, from, rd);
4752 rn_val = displaced_read_reg (regs, from, rn);
4753 rm_val = displaced_read_reg (regs, from, rm);
4754 rs_val = displaced_read_reg (regs, from, rs);
4755 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
4756 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
4757 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
4758 displaced_write_reg (regs, dsc, 3, rs_val, CANNOT_WRITE_PC);
4759 dsc->rd = rd;
4760
4761 if (is_mov)
4762 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x302;
4763 else
4764 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x10302;
4765
4766 dsc->cleanup = &cleanup_alu_shifted_reg;
4767
4768 return 0;
4769 }
4770
4771 /* Clean up load instructions. */
4772
4773 static void
4774 cleanup_load (struct gdbarch *gdbarch, struct regcache *regs,
4775 struct displaced_step_closure *dsc)
4776 {
4777 ULONGEST rt_val, rt_val2 = 0, rn_val;
4778 CORE_ADDR from = dsc->insn_addr;
4779
4780 rt_val = displaced_read_reg (regs, from, 0);
4781 if (dsc->u.ldst.xfersize == 8)
4782 rt_val2 = displaced_read_reg (regs, from, 1);
4783 rn_val = displaced_read_reg (regs, from, 2);
4784
4785 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
4786 if (dsc->u.ldst.xfersize > 4)
4787 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
4788 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
4789 if (!dsc->u.ldst.immed)
4790 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
4791
4792 /* Handle register writeback. */
4793 if (dsc->u.ldst.writeback)
4794 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
4795 /* Put result in right place. */
4796 displaced_write_reg (regs, dsc, dsc->rd, rt_val, LOAD_WRITE_PC);
4797 if (dsc->u.ldst.xfersize == 8)
4798 displaced_write_reg (regs, dsc, dsc->rd + 1, rt_val2, LOAD_WRITE_PC);
4799 }
4800
4801 /* Clean up store instructions. */
4802
4803 static void
4804 cleanup_store (struct gdbarch *gdbarch, struct regcache *regs,
4805 struct displaced_step_closure *dsc)
4806 {
4807 CORE_ADDR from = dsc->insn_addr;
4808 ULONGEST rn_val = displaced_read_reg (regs, from, 2);
4809
4810 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
4811 if (dsc->u.ldst.xfersize > 4)
4812 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
4813 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
4814 if (!dsc->u.ldst.immed)
4815 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
4816 if (!dsc->u.ldst.restore_r4)
4817 displaced_write_reg (regs, dsc, 4, dsc->tmp[4], CANNOT_WRITE_PC);
4818
4819 /* Writeback. */
4820 if (dsc->u.ldst.writeback)
4821 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
4822 }
4823
4824 /* Copy "extra" load/store instructions. These are halfword/doubleword
4825 transfers, which have a different encoding to byte/word transfers. */
4826
4827 static int
4828 copy_extra_ld_st (struct gdbarch *gdbarch, uint32_t insn, int unpriveleged,
4829 struct regcache *regs, struct displaced_step_closure *dsc)
4830 {
4831 unsigned int op1 = bits (insn, 20, 24);
4832 unsigned int op2 = bits (insn, 5, 6);
4833 unsigned int rt = bits (insn, 12, 15);
4834 unsigned int rn = bits (insn, 16, 19);
4835 unsigned int rm = bits (insn, 0, 3);
4836 char load[12] = {0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1};
4837 char bytesize[12] = {2, 2, 2, 2, 8, 1, 8, 1, 8, 2, 8, 2};
4838 int immed = (op1 & 0x4) != 0;
4839 int opcode;
4840 ULONGEST rt_val, rt_val2 = 0, rn_val, rm_val = 0;
4841 CORE_ADDR from = dsc->insn_addr;
4842
4843 if (!insn_references_pc (insn, 0x000ff00ful))
4844 return copy_unmodified (gdbarch, insn, "extra load/store", dsc);
4845
4846 if (debug_displaced)
4847 fprintf_unfiltered (gdb_stdlog, "displaced: copying %sextra load/store "
4848 "insn %.8lx\n", unpriveleged ? "unpriveleged " : "",
4849 (unsigned long) insn);
4850
4851 opcode = ((op2 << 2) | (op1 & 0x1) | ((op1 & 0x4) >> 1)) - 4;
4852
4853 if (opcode < 0)
4854 internal_error (__FILE__, __LINE__,
4855 _("copy_extra_ld_st: instruction decode error"));
4856
4857 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
4858 dsc->tmp[1] = displaced_read_reg (regs, from, 1);
4859 dsc->tmp[2] = displaced_read_reg (regs, from, 2);
4860 if (!immed)
4861 dsc->tmp[3] = displaced_read_reg (regs, from, 3);
4862
4863 rt_val = displaced_read_reg (regs, from, rt);
4864 if (bytesize[opcode] == 8)
4865 rt_val2 = displaced_read_reg (regs, from, rt + 1);
4866 rn_val = displaced_read_reg (regs, from, rn);
4867 if (!immed)
4868 rm_val = displaced_read_reg (regs, from, rm);
4869
4870 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
4871 if (bytesize[opcode] == 8)
4872 displaced_write_reg (regs, dsc, 1, rt_val2, CANNOT_WRITE_PC);
4873 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
4874 if (!immed)
4875 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
4876
4877 dsc->rd = rt;
4878 dsc->u.ldst.xfersize = bytesize[opcode];
4879 dsc->u.ldst.rn = rn;
4880 dsc->u.ldst.immed = immed;
4881 dsc->u.ldst.writeback = bit (insn, 24) == 0 || bit (insn, 21) != 0;
4882 dsc->u.ldst.restore_r4 = 0;
4883
4884 if (immed)
4885 /* {ldr,str}<width><cond> rt, [rt2,] [rn, #imm]
4886 ->
4887 {ldr,str}<width><cond> r0, [r1,] [r2, #imm]. */
4888 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
4889 else
4890 /* {ldr,str}<width><cond> rt, [rt2,] [rn, +/-rm]
4891 ->
4892 {ldr,str}<width><cond> r0, [r1,] [r2, +/-r3]. */
4893 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
4894
4895 dsc->cleanup = load[opcode] ? &cleanup_load : &cleanup_store;
4896
4897 return 0;
4898 }
4899
4900 /* Copy byte/word loads and stores. */
4901
4902 static int
4903 copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint32_t insn,
4904 struct regcache *regs,
4905 struct displaced_step_closure *dsc, int load, int byte,
4906 int usermode)
4907 {
4908 int immed = !bit (insn, 25);
4909 unsigned int rt = bits (insn, 12, 15);
4910 unsigned int rn = bits (insn, 16, 19);
4911 unsigned int rm = bits (insn, 0, 3); /* Only valid if !immed. */
4912 ULONGEST rt_val, rn_val, rm_val = 0;
4913 CORE_ADDR from = dsc->insn_addr;
4914
4915 if (!insn_references_pc (insn, 0x000ff00ful))
4916 return copy_unmodified (gdbarch, insn, "load/store", dsc);
4917
4918 if (debug_displaced)
4919 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s%s insn %.8lx\n",
4920 load ? (byte ? "ldrb" : "ldr")
4921 : (byte ? "strb" : "str"), usermode ? "t" : "",
4922 (unsigned long) insn);
4923
4924 dsc->tmp[0] = displaced_read_reg (regs, from, 0);
4925 dsc->tmp[2] = displaced_read_reg (regs, from, 2);
4926 if (!immed)
4927 dsc->tmp[3] = displaced_read_reg (regs, from, 3);
4928 if (!load)
4929 dsc->tmp[4] = displaced_read_reg (regs, from, 4);
4930
4931 rt_val = displaced_read_reg (regs, from, rt);
4932 rn_val = displaced_read_reg (regs, from, rn);
4933 if (!immed)
4934 rm_val = displaced_read_reg (regs, from, rm);
4935
4936 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
4937 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
4938 if (!immed)
4939 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
4940
4941 dsc->rd = rt;
4942 dsc->u.ldst.xfersize = byte ? 1 : 4;
4943 dsc->u.ldst.rn = rn;
4944 dsc->u.ldst.immed = immed;
4945 dsc->u.ldst.writeback = bit (insn, 24) == 0 || bit (insn, 21) != 0;
4946
4947 /* To write PC we can do:
4948
4949 scratch+0: str pc, temp (*temp = scratch + 8 + offset)
4950 scratch+4: ldr r4, temp
4951 scratch+8: sub r4, r4, pc (r4 = scratch + 8 + offset - scratch - 8 - 8)
4952 scratch+12: add r4, r4, #8 (r4 = offset)
4953 scratch+16: add r0, r0, r4
4954 scratch+20: str r0, [r2, #imm] (or str r0, [r2, r3])
4955 scratch+24: <temp>
4956
4957 Otherwise we don't know what value to write for PC, since the offset is
4958 architecture-dependent (sometimes PC+8, sometimes PC+12). */
4959
4960 if (load || rt != 15)
4961 {
4962 dsc->u.ldst.restore_r4 = 0;
4963
4964 if (immed)
4965 /* {ldr,str}[b]<cond> rt, [rn, #imm], etc.
4966 ->
4967 {ldr,str}[b]<cond> r0, [r2, #imm]. */
4968 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
4969 else
4970 /* {ldr,str}[b]<cond> rt, [rn, rm], etc.
4971 ->
4972 {ldr,str}[b]<cond> r0, [r2, r3]. */
4973 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
4974 }
4975 else
4976 {
4977 /* We need to use r4 as scratch. Make sure it's restored afterwards. */
4978 dsc->u.ldst.restore_r4 = 1;
4979
4980 dsc->modinsn[0] = 0xe58ff014; /* str pc, [pc, #20]. */
4981 dsc->modinsn[1] = 0xe59f4010; /* ldr r4, [pc, #16]. */
4982 dsc->modinsn[2] = 0xe044400f; /* sub r4, r4, pc. */
4983 dsc->modinsn[3] = 0xe2844008; /* add r4, r4, #8. */
4984 dsc->modinsn[4] = 0xe0800004; /* add r0, r0, r4. */
4985
4986 /* As above. */
4987 if (immed)
4988 dsc->modinsn[5] = (insn & 0xfff00fff) | 0x20000;
4989 else
4990 dsc->modinsn[5] = (insn & 0xfff00ff0) | 0x20003;
4991
4992 dsc->modinsn[6] = 0x0; /* breakpoint location. */
4993 dsc->modinsn[7] = 0x0; /* scratch space. */
4994
4995 dsc->numinsns = 6;
4996 }
4997
4998 dsc->cleanup = load ? &cleanup_load : &cleanup_store;
4999
5000 return 0;
5001 }
5002
5003 /* Cleanup LDM instructions with fully-populated register list. This is an
5004 unfortunate corner case: it's impossible to implement correctly by modifying
5005 the instruction. The issue is as follows: we have an instruction,
5006
5007 ldm rN, {r0-r15}
5008
5009 which we must rewrite to avoid loading PC. A possible solution would be to
5010 do the load in two halves, something like (with suitable cleanup
5011 afterwards):
5012
5013 mov r8, rN
5014 ldm[id][ab] r8!, {r0-r7}
5015 str r7, <temp>
5016 ldm[id][ab] r8, {r7-r14}
5017 <bkpt>
5018
5019 but at present there's no suitable place for <temp>, since the scratch space
5020 is overwritten before the cleanup routine is called. For now, we simply
5021 emulate the instruction. */
5022
5023 static void
5024 cleanup_block_load_all (struct gdbarch *gdbarch, struct regcache *regs,
5025 struct displaced_step_closure *dsc)
5026 {
5027 ULONGEST from = dsc->insn_addr;
5028 int inc = dsc->u.block.increment;
5029 int bump_before = dsc->u.block.before ? (inc ? 4 : -4) : 0;
5030 int bump_after = dsc->u.block.before ? 0 : (inc ? 4 : -4);
5031 uint32_t regmask = dsc->u.block.regmask;
5032 int regno = inc ? 0 : 15;
5033 CORE_ADDR xfer_addr = dsc->u.block.xfer_addr;
5034 int exception_return = dsc->u.block.load && dsc->u.block.user
5035 && (regmask & 0x8000) != 0;
5036 uint32_t status = displaced_read_reg (regs, from, ARM_PS_REGNUM);
5037 int do_transfer = condition_true (dsc->u.block.cond, status);
5038 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
5039
5040 if (!do_transfer)
5041 return;
5042
5043 /* If the instruction is ldm rN, {...pc}^, I don't think there's anything
5044 sensible we can do here. Complain loudly. */
5045 if (exception_return)
5046 error (_("Cannot single-step exception return"));
5047
5048 /* We don't handle any stores here for now. */
5049 gdb_assert (dsc->u.block.load != 0);
5050
5051 if (debug_displaced)
5052 fprintf_unfiltered (gdb_stdlog, "displaced: emulating block transfer: "
5053 "%s %s %s\n", dsc->u.block.load ? "ldm" : "stm",
5054 dsc->u.block.increment ? "inc" : "dec",
5055 dsc->u.block.before ? "before" : "after");
5056
5057 while (regmask)
5058 {
5059 uint32_t memword;
5060
5061 if (inc)
5062 while (regno <= 15 && (regmask & (1 << regno)) == 0)
5063 regno++;
5064 else
5065 while (regno >= 0 && (regmask & (1 << regno)) == 0)
5066 regno--;
5067
5068 xfer_addr += bump_before;
5069
5070 memword = read_memory_unsigned_integer (xfer_addr, 4, byte_order);
5071 displaced_write_reg (regs, dsc, regno, memword, LOAD_WRITE_PC);
5072
5073 xfer_addr += bump_after;
5074
5075 regmask &= ~(1 << regno);
5076 }
5077
5078 if (dsc->u.block.writeback)
5079 displaced_write_reg (regs, dsc, dsc->u.block.rn, xfer_addr,
5080 CANNOT_WRITE_PC);
5081 }
5082
5083 /* Clean up an STM which included the PC in the register list. */
5084
5085 static void
5086 cleanup_block_store_pc (struct gdbarch *gdbarch, struct regcache *regs,
5087 struct displaced_step_closure *dsc)
5088 {
5089 ULONGEST from = dsc->insn_addr;
5090 uint32_t status = displaced_read_reg (regs, from, ARM_PS_REGNUM);
5091 int store_executed = condition_true (dsc->u.block.cond, status);
5092 CORE_ADDR pc_stored_at, transferred_regs = bitcount (dsc->u.block.regmask);
5093 CORE_ADDR stm_insn_addr;
5094 uint32_t pc_val;
5095 long offset;
5096 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
5097
5098 /* If condition code fails, there's nothing else to do. */
5099 if (!store_executed)
5100 return;
5101
5102 if (dsc->u.block.increment)
5103 {
5104 pc_stored_at = dsc->u.block.xfer_addr + 4 * transferred_regs;
5105
5106 if (dsc->u.block.before)
5107 pc_stored_at += 4;
5108 }
5109 else
5110 {
5111 pc_stored_at = dsc->u.block.xfer_addr;
5112
5113 if (dsc->u.block.before)
5114 pc_stored_at -= 4;
5115 }
5116
5117 pc_val = read_memory_unsigned_integer (pc_stored_at, 4, byte_order);
5118 stm_insn_addr = dsc->scratch_base;
5119 offset = pc_val - stm_insn_addr;
5120
5121 if (debug_displaced)
5122 fprintf_unfiltered (gdb_stdlog, "displaced: detected PC offset %.8lx for "
5123 "STM instruction\n", offset);
5124
5125 /* Rewrite the stored PC to the proper value for the non-displaced original
5126 instruction. */
5127 write_memory_unsigned_integer (pc_stored_at, 4, byte_order,
5128 dsc->insn_addr + offset);
5129 }
5130
5131 /* Clean up an LDM which includes the PC in the register list. We clumped all
5132 the registers in the transferred list into a contiguous range r0...rX (to
5133 avoid loading PC directly and losing control of the debugged program), so we
5134 must undo that here. */
5135
5136 static void
5137 cleanup_block_load_pc (struct gdbarch *gdbarch,
5138 struct regcache *regs,
5139 struct displaced_step_closure *dsc)
5140 {
5141 ULONGEST from = dsc->insn_addr;
5142 uint32_t status = displaced_read_reg (regs, from, ARM_PS_REGNUM);
5143 int load_executed = condition_true (dsc->u.block.cond, status), i;
5144 unsigned int mask = dsc->u.block.regmask, write_reg = 15;
5145 unsigned int regs_loaded = bitcount (mask);
5146 unsigned int num_to_shuffle = regs_loaded, clobbered;
5147
5148 /* The method employed here will fail if the register list is fully populated
5149 (we need to avoid loading PC directly). */
5150 gdb_assert (num_to_shuffle < 16);
5151
5152 if (!load_executed)
5153 return;
5154
5155 clobbered = (1 << num_to_shuffle) - 1;
5156
5157 while (num_to_shuffle > 0)
5158 {
5159 if ((mask & (1 << write_reg)) != 0)
5160 {
5161 unsigned int read_reg = num_to_shuffle - 1;
5162
5163 if (read_reg != write_reg)
5164 {
5165 ULONGEST rval = displaced_read_reg (regs, from, read_reg);
5166 displaced_write_reg (regs, dsc, write_reg, rval, LOAD_WRITE_PC);
5167 if (debug_displaced)
5168 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: move "
5169 "loaded register r%d to r%d\n"), read_reg,
5170 write_reg);
5171 }
5172 else if (debug_displaced)
5173 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: register "
5174 "r%d already in the right place\n"),
5175 write_reg);
5176
5177 clobbered &= ~(1 << write_reg);
5178
5179 num_to_shuffle--;
5180 }
5181
5182 write_reg--;
5183 }
5184
5185 /* Restore any registers we scribbled over. */
5186 for (write_reg = 0; clobbered != 0; write_reg++)
5187 {
5188 if ((clobbered & (1 << write_reg)) != 0)
5189 {
5190 displaced_write_reg (regs, dsc, write_reg, dsc->tmp[write_reg],
5191 CANNOT_WRITE_PC);
5192 if (debug_displaced)
5193 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: restored "
5194 "clobbered register r%d\n"), write_reg);
5195 clobbered &= ~(1 << write_reg);
5196 }
5197 }
5198
5199 /* Perform register writeback manually. */
5200 if (dsc->u.block.writeback)
5201 {
5202 ULONGEST new_rn_val = dsc->u.block.xfer_addr;
5203
5204 if (dsc->u.block.increment)
5205 new_rn_val += regs_loaded * 4;
5206 else
5207 new_rn_val -= regs_loaded * 4;
5208
5209 displaced_write_reg (regs, dsc, dsc->u.block.rn, new_rn_val,
5210 CANNOT_WRITE_PC);
5211 }
5212 }
5213
5214 /* Handle ldm/stm, apart from some tricky cases which are unlikely to occur
5215 in user-level code (in particular exception return, ldm rn, {...pc}^). */
5216
5217 static int
5218 copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
5219 struct displaced_step_closure *dsc)
5220 {
5221 int load = bit (insn, 20);
5222 int user = bit (insn, 22);
5223 int increment = bit (insn, 23);
5224 int before = bit (insn, 24);
5225 int writeback = bit (insn, 21);
5226 int rn = bits (insn, 16, 19);
5227 CORE_ADDR from = dsc->insn_addr;
5228
5229 /* Block transfers which don't mention PC can be run directly out-of-line. */
5230 if (rn != 15 && (insn & 0x8000) == 0)
5231 return copy_unmodified (gdbarch, insn, "ldm/stm", dsc);
5232
5233 if (rn == 15)
5234 {
5235 warning (_("displaced: Unpredictable LDM or STM with base register r15"));
5236 return copy_unmodified (gdbarch, insn, "unpredictable ldm/stm", dsc);
5237 }
5238
5239 if (debug_displaced)
5240 fprintf_unfiltered (gdb_stdlog, "displaced: copying block transfer insn "
5241 "%.8lx\n", (unsigned long) insn);
5242
5243 dsc->u.block.xfer_addr = displaced_read_reg (regs, from, rn);
5244 dsc->u.block.rn = rn;
5245
5246 dsc->u.block.load = load;
5247 dsc->u.block.user = user;
5248 dsc->u.block.increment = increment;
5249 dsc->u.block.before = before;
5250 dsc->u.block.writeback = writeback;
5251 dsc->u.block.cond = bits (insn, 28, 31);
5252
5253 dsc->u.block.regmask = insn & 0xffff;
5254
5255 if (load)
5256 {
5257 if ((insn & 0xffff) == 0xffff)
5258 {
5259 /* LDM with a fully-populated register list. This case is
5260 particularly tricky. Implement for now by fully emulating the
5261 instruction (which might not behave perfectly in all cases, but
5262 these instructions should be rare enough for that not to matter
5263 too much). */
5264 dsc->modinsn[0] = ARM_NOP;
5265
5266 dsc->cleanup = &cleanup_block_load_all;
5267 }
5268 else
5269 {
5270 /* LDM of a list of registers which includes PC. Implement by
5271 rewriting the list of registers to be transferred into a
5272 contiguous chunk r0...rX before doing the transfer, then shuffling
5273 registers into the correct places in the cleanup routine. */
5274 unsigned int regmask = insn & 0xffff;
5275 unsigned int num_in_list = bitcount (regmask), new_regmask, bit = 1;
5276 unsigned int to = 0, from = 0, i, new_rn;
5277
5278 for (i = 0; i < num_in_list; i++)
5279 dsc->tmp[i] = displaced_read_reg (regs, from, i);
5280
5281 /* Writeback makes things complicated. We need to avoid clobbering
5282 the base register with one of the registers in our modified
5283 register list, but just using a different register can't work in
5284 all cases, e.g.:
5285
5286 ldm r14!, {r0-r13,pc}
5287
5288 which would need to be rewritten as:
5289
5290 ldm rN!, {r0-r14}
5291
5292 but that can't work, because there's no free register for N.
5293
5294 Solve this by turning off the writeback bit, and emulating
5295 writeback manually in the cleanup routine. */
5296
5297 if (writeback)
5298 insn &= ~(1 << 21);
5299
5300 new_regmask = (1 << num_in_list) - 1;
5301
5302 if (debug_displaced)
5303 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM r%d%s, "
5304 "{..., pc}: original reg list %.4x, modified "
5305 "list %.4x\n"), rn, writeback ? "!" : "",
5306 (int) insn & 0xffff, new_regmask);
5307
5308 dsc->modinsn[0] = (insn & ~0xffff) | (new_regmask & 0xffff);
5309
5310 dsc->cleanup = &cleanup_block_load_pc;
5311 }
5312 }
5313 else
5314 {
5315 /* STM of a list of registers which includes PC. Run the instruction
5316 as-is, but out of line: this will store the wrong value for the PC,
5317 so we must manually fix up the memory in the cleanup routine.
5318 Doing things this way has the advantage that we can auto-detect
5319 the offset of the PC write (which is architecture-dependent) in
5320 the cleanup routine. */
5321 dsc->modinsn[0] = insn;
5322
5323 dsc->cleanup = &cleanup_block_store_pc;
5324 }
5325
5326 return 0;
5327 }
5328
5329 /* Cleanup/copy SVC (SWI) instructions. These two functions are overridden
5330 for Linux, where some SVC instructions must be treated specially. */
5331
5332 static void
5333 cleanup_svc (struct gdbarch *gdbarch, struct regcache *regs,
5334 struct displaced_step_closure *dsc)
5335 {
5336 CORE_ADDR from = dsc->insn_addr;
5337 CORE_ADDR resume_addr = from + 4;
5338
5339 if (debug_displaced)
5340 fprintf_unfiltered (gdb_stdlog, "displaced: cleanup for svc, resume at "
5341 "%.8lx\n", (unsigned long) resume_addr);
5342
5343 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, resume_addr, BRANCH_WRITE_PC);
5344 }
5345
5346 static int
5347 copy_svc (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
5348 struct regcache *regs, struct displaced_step_closure *dsc)
5349 {
5350 CORE_ADDR from = dsc->insn_addr;
5351
5352 /* Allow OS-specific code to override SVC handling. */
5353 if (dsc->u.svc.copy_svc_os)
5354 return dsc->u.svc.copy_svc_os (gdbarch, insn, to, regs, dsc);
5355
5356 if (debug_displaced)
5357 fprintf_unfiltered (gdb_stdlog, "displaced: copying svc insn %.8lx\n",
5358 (unsigned long) insn);
5359
5360 /* Preparation: none.
5361 Insn: unmodified svc.
5362 Cleanup: pc <- insn_addr + 4. */
5363
5364 dsc->modinsn[0] = insn;
5365
5366 dsc->cleanup = &cleanup_svc;
5367 /* Pretend we wrote to the PC, so cleanup doesn't set PC to the next
5368 instruction. */
5369 dsc->wrote_to_pc = 1;
5370
5371 return 0;
5372 }
5373
5374 /* Copy undefined instructions. */
5375
5376 static int
5377 copy_undef (struct gdbarch *gdbarch, uint32_t insn,
5378 struct displaced_step_closure *dsc)
5379 {
5380 if (debug_displaced)
5381 fprintf_unfiltered (gdb_stdlog, "displaced: copying undefined insn %.8lx\n",
5382 (unsigned long) insn);
5383
5384 dsc->modinsn[0] = insn;
5385
5386 return 0;
5387 }
5388
5389 /* Copy unpredictable instructions. */
5390
5391 static int
5392 copy_unpred (struct gdbarch *gdbarch, uint32_t insn,
5393 struct displaced_step_closure *dsc)
5394 {
5395 if (debug_displaced)
5396 fprintf_unfiltered (gdb_stdlog, "displaced: copying unpredictable insn "
5397 "%.8lx\n", (unsigned long) insn);
5398
5399 dsc->modinsn[0] = insn;
5400
5401 return 0;
5402 }
5403
5404 /* The decode_* functions are instruction decoding helpers. They mostly follow
5405 the presentation in the ARM ARM. */
5406
5407 static int
5408 decode_misc_memhint_neon (struct gdbarch *gdbarch, uint32_t insn,
5409 struct regcache *regs,
5410 struct displaced_step_closure *dsc)
5411 {
5412 unsigned int op1 = bits (insn, 20, 26), op2 = bits (insn, 4, 7);
5413 unsigned int rn = bits (insn, 16, 19);
5414
5415 if (op1 == 0x10 && (op2 & 0x2) == 0x0 && (rn & 0xe) == 0x0)
5416 return copy_unmodified (gdbarch, insn, "cps", dsc);
5417 else if (op1 == 0x10 && op2 == 0x0 && (rn & 0xe) == 0x1)
5418 return copy_unmodified (gdbarch, insn, "setend", dsc);
5419 else if ((op1 & 0x60) == 0x20)
5420 return copy_unmodified (gdbarch, insn, "neon dataproc", dsc);
5421 else if ((op1 & 0x71) == 0x40)
5422 return copy_unmodified (gdbarch, insn, "neon elt/struct load/store", dsc);
5423 else if ((op1 & 0x77) == 0x41)
5424 return copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
5425 else if ((op1 & 0x77) == 0x45)
5426 return copy_preload (gdbarch, insn, regs, dsc); /* pli. */
5427 else if ((op1 & 0x77) == 0x51)
5428 {
5429 if (rn != 0xf)
5430 return copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
5431 else
5432 return copy_unpred (gdbarch, insn, dsc);
5433 }
5434 else if ((op1 & 0x77) == 0x55)
5435 return copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
5436 else if (op1 == 0x57)
5437 switch (op2)
5438 {
5439 case 0x1: return copy_unmodified (gdbarch, insn, "clrex", dsc);
5440 case 0x4: return copy_unmodified (gdbarch, insn, "dsb", dsc);
5441 case 0x5: return copy_unmodified (gdbarch, insn, "dmb", dsc);
5442 case 0x6: return copy_unmodified (gdbarch, insn, "isb", dsc);
5443 default: return copy_unpred (gdbarch, insn, dsc);
5444 }
5445 else if ((op1 & 0x63) == 0x43)
5446 return copy_unpred (gdbarch, insn, dsc);
5447 else if ((op2 & 0x1) == 0x0)
5448 switch (op1 & ~0x80)
5449 {
5450 case 0x61:
5451 return copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
5452 case 0x65:
5453 return copy_preload_reg (gdbarch, insn, regs, dsc); /* pli reg. */
5454 case 0x71: case 0x75:
5455 /* pld/pldw reg. */
5456 return copy_preload_reg (gdbarch, insn, regs, dsc);
5457 case 0x63: case 0x67: case 0x73: case 0x77:
5458 return copy_unpred (gdbarch, insn, dsc);
5459 default:
5460 return copy_undef (gdbarch, insn, dsc);
5461 }
5462 else
5463 return copy_undef (gdbarch, insn, dsc); /* Probably unreachable. */
5464 }
5465
5466 static int
5467 decode_unconditional (struct gdbarch *gdbarch, uint32_t insn,
5468 struct regcache *regs, struct displaced_step_closure *dsc)
5469 {
5470 if (bit (insn, 27) == 0)
5471 return decode_misc_memhint_neon (gdbarch, insn, regs, dsc);
5472 /* Switch on bits: 0bxxxxx321xxx0xxxxxxxxxxxxxxxxxxxx. */
5473 else switch (((insn & 0x7000000) >> 23) | ((insn & 0x100000) >> 20))
5474 {
5475 case 0x0: case 0x2:
5476 return copy_unmodified (gdbarch, insn, "srs", dsc);
5477
5478 case 0x1: case 0x3:
5479 return copy_unmodified (gdbarch, insn, "rfe", dsc);
5480
5481 case 0x4: case 0x5: case 0x6: case 0x7:
5482 return copy_b_bl_blx (gdbarch, insn, regs, dsc);
5483
5484 case 0x8:
5485 switch ((insn & 0xe00000) >> 21)
5486 {
5487 case 0x1: case 0x3: case 0x4: case 0x5: case 0x6: case 0x7:
5488 /* stc/stc2. */
5489 return copy_copro_load_store (gdbarch, insn, regs, dsc);
5490
5491 case 0x2:
5492 return copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
5493
5494 default:
5495 return copy_undef (gdbarch, insn, dsc);
5496 }
5497
5498 case 0x9:
5499 {
5500 int rn_f = (bits (insn, 16, 19) == 0xf);
5501 switch ((insn & 0xe00000) >> 21)
5502 {
5503 case 0x1: case 0x3:
5504 /* ldc/ldc2 imm (undefined for rn == pc). */
5505 return rn_f ? copy_undef (gdbarch, insn, dsc)
5506 : copy_copro_load_store (gdbarch, insn, regs, dsc);
5507
5508 case 0x2:
5509 return copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
5510
5511 case 0x4: case 0x5: case 0x6: case 0x7:
5512 /* ldc/ldc2 lit (undefined for rn != pc). */
5513 return rn_f ? copy_copro_load_store (gdbarch, insn, regs, dsc)
5514 : copy_undef (gdbarch, insn, dsc);
5515
5516 default:
5517 return copy_undef (gdbarch, insn, dsc);
5518 }
5519 }
5520
5521 case 0xa:
5522 return copy_unmodified (gdbarch, insn, "stc/stc2", dsc);
5523
5524 case 0xb:
5525 if (bits (insn, 16, 19) == 0xf)
5526 /* ldc/ldc2 lit. */
5527 return copy_copro_load_store (gdbarch, insn, regs, dsc);
5528 else
5529 return copy_undef (gdbarch, insn, dsc);
5530
5531 case 0xc:
5532 if (bit (insn, 4))
5533 return copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
5534 else
5535 return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
5536
5537 case 0xd:
5538 if (bit (insn, 4))
5539 return copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
5540 else
5541 return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
5542
5543 default:
5544 return copy_undef (gdbarch, insn, dsc);
5545 }
5546 }
5547
5548 /* Decode miscellaneous instructions in dp/misc encoding space. */
5549
5550 static int
5551 decode_miscellaneous (struct gdbarch *gdbarch, uint32_t insn,
5552 struct regcache *regs, struct displaced_step_closure *dsc)
5553 {
5554 unsigned int op2 = bits (insn, 4, 6);
5555 unsigned int op = bits (insn, 21, 22);
5556 unsigned int op1 = bits (insn, 16, 19);
5557
5558 switch (op2)
5559 {
5560 case 0x0:
5561 return copy_unmodified (gdbarch, insn, "mrs/msr", dsc);
5562
5563 case 0x1:
5564 if (op == 0x1) /* bx. */
5565 return copy_bx_blx_reg (gdbarch, insn, regs, dsc);
5566 else if (op == 0x3)
5567 return copy_unmodified (gdbarch, insn, "clz", dsc);
5568 else
5569 return copy_undef (gdbarch, insn, dsc);
5570
5571 case 0x2:
5572 if (op == 0x1)
5573 /* Not really supported. */
5574 return copy_unmodified (gdbarch, insn, "bxj", dsc);
5575 else
5576 return copy_undef (gdbarch, insn, dsc);
5577
5578 case 0x3:
5579 if (op == 0x1)
5580 return copy_bx_blx_reg (gdbarch, insn, regs, dsc); /* blx register. */
5581 else
5582 return copy_undef (gdbarch, insn, dsc);
5583
5584 case 0x5:
5585 return copy_unmodified (gdbarch, insn, "saturating add/sub", dsc);
5586
5587 case 0x7:
5588 if (op == 0x1)
5589 return copy_unmodified (gdbarch, insn, "bkpt", dsc);
5590 else if (op == 0x3)
5591 /* Not really supported. */
5592 return copy_unmodified (gdbarch, insn, "smc", dsc);
5593
5594 default:
5595 return copy_undef (gdbarch, insn, dsc);
5596 }
5597 }
5598
5599 static int
5600 decode_dp_misc (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
5601 struct displaced_step_closure *dsc)
5602 {
5603 if (bit (insn, 25))
5604 switch (bits (insn, 20, 24))
5605 {
5606 case 0x10:
5607 return copy_unmodified (gdbarch, insn, "movw", dsc);
5608
5609 case 0x14:
5610 return copy_unmodified (gdbarch, insn, "movt", dsc);
5611
5612 case 0x12: case 0x16:
5613 return copy_unmodified (gdbarch, insn, "msr imm", dsc);
5614
5615 default:
5616 return copy_alu_imm (gdbarch, insn, regs, dsc);
5617 }
5618 else
5619 {
5620 uint32_t op1 = bits (insn, 20, 24), op2 = bits (insn, 4, 7);
5621
5622 if ((op1 & 0x19) != 0x10 && (op2 & 0x1) == 0x0)
5623 return copy_alu_reg (gdbarch, insn, regs, dsc);
5624 else if ((op1 & 0x19) != 0x10 && (op2 & 0x9) == 0x1)
5625 return copy_alu_shifted_reg (gdbarch, insn, regs, dsc);
5626 else if ((op1 & 0x19) == 0x10 && (op2 & 0x8) == 0x0)
5627 return decode_miscellaneous (gdbarch, insn, regs, dsc);
5628 else if ((op1 & 0x19) == 0x10 && (op2 & 0x9) == 0x8)
5629 return copy_unmodified (gdbarch, insn, "halfword mul/mla", dsc);
5630 else if ((op1 & 0x10) == 0x00 && op2 == 0x9)
5631 return copy_unmodified (gdbarch, insn, "mul/mla", dsc);
5632 else if ((op1 & 0x10) == 0x10 && op2 == 0x9)
5633 return copy_unmodified (gdbarch, insn, "synch", dsc);
5634 else if (op2 == 0xb || (op2 & 0xd) == 0xd)
5635 /* 2nd arg means "unpriveleged". */
5636 return copy_extra_ld_st (gdbarch, insn, (op1 & 0x12) == 0x02, regs,
5637 dsc);
5638 }
5639
5640 /* Should be unreachable. */
5641 return 1;
5642 }
5643
5644 static int
5645 decode_ld_st_word_ubyte (struct gdbarch *gdbarch, uint32_t insn,
5646 struct regcache *regs,
5647 struct displaced_step_closure *dsc)
5648 {
5649 int a = bit (insn, 25), b = bit (insn, 4);
5650 uint32_t op1 = bits (insn, 20, 24);
5651 int rn_f = bits (insn, 16, 19) == 0xf;
5652
5653 if ((!a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02)
5654 || (a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02 && !b))
5655 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 0, 0);
5656 else if ((!a && (op1 & 0x17) == 0x02)
5657 || (a && (op1 & 0x17) == 0x02 && !b))
5658 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 0, 1);
5659 else if ((!a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03)
5660 || (a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03 && !b))
5661 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 0, 0);
5662 else if ((!a && (op1 & 0x17) == 0x03)
5663 || (a && (op1 & 0x17) == 0x03 && !b))
5664 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 0, 1);
5665 else if ((!a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06)
5666 || (a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06 && !b))
5667 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 0);
5668 else if ((!a && (op1 & 0x17) == 0x06)
5669 || (a && (op1 & 0x17) == 0x06 && !b))
5670 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 1);
5671 else if ((!a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07)
5672 || (a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07 && !b))
5673 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 0);
5674 else if ((!a && (op1 & 0x17) == 0x07)
5675 || (a && (op1 & 0x17) == 0x07 && !b))
5676 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 1);
5677
5678 /* Should be unreachable. */
5679 return 1;
5680 }
5681
5682 static int
5683 decode_media (struct gdbarch *gdbarch, uint32_t insn,
5684 struct displaced_step_closure *dsc)
5685 {
5686 switch (bits (insn, 20, 24))
5687 {
5688 case 0x00: case 0x01: case 0x02: case 0x03:
5689 return copy_unmodified (gdbarch, insn, "parallel add/sub signed", dsc);
5690
5691 case 0x04: case 0x05: case 0x06: case 0x07:
5692 return copy_unmodified (gdbarch, insn, "parallel add/sub unsigned", dsc);
5693
5694 case 0x08: case 0x09: case 0x0a: case 0x0b:
5695 case 0x0c: case 0x0d: case 0x0e: case 0x0f:
5696 return copy_unmodified (gdbarch, insn,
5697 "decode/pack/unpack/saturate/reverse", dsc);
5698
5699 case 0x18:
5700 if (bits (insn, 5, 7) == 0) /* op2. */
5701 {
5702 if (bits (insn, 12, 15) == 0xf)
5703 return copy_unmodified (gdbarch, insn, "usad8", dsc);
5704 else
5705 return copy_unmodified (gdbarch, insn, "usada8", dsc);
5706 }
5707 else
5708 return copy_undef (gdbarch, insn, dsc);
5709
5710 case 0x1a: case 0x1b:
5711 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
5712 return copy_unmodified (gdbarch, insn, "sbfx", dsc);
5713 else
5714 return copy_undef (gdbarch, insn, dsc);
5715
5716 case 0x1c: case 0x1d:
5717 if (bits (insn, 5, 6) == 0x0) /* op2[1:0]. */
5718 {
5719 if (bits (insn, 0, 3) == 0xf)
5720 return copy_unmodified (gdbarch, insn, "bfc", dsc);
5721 else
5722 return copy_unmodified (gdbarch, insn, "bfi", dsc);
5723 }
5724 else
5725 return copy_undef (gdbarch, insn, dsc);
5726
5727 case 0x1e: case 0x1f:
5728 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
5729 return copy_unmodified (gdbarch, insn, "ubfx", dsc);
5730 else
5731 return copy_undef (gdbarch, insn, dsc);
5732 }
5733
5734 /* Should be unreachable. */
5735 return 1;
5736 }
5737
5738 static int
5739 decode_b_bl_ldmstm (struct gdbarch *gdbarch, int32_t insn,
5740 struct regcache *regs, struct displaced_step_closure *dsc)
5741 {
5742 if (bit (insn, 25))
5743 return copy_b_bl_blx (gdbarch, insn, regs, dsc);
5744 else
5745 return copy_block_xfer (gdbarch, insn, regs, dsc);
5746 }
5747
5748 static int
5749 decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint32_t insn,
5750 struct regcache *regs, struct displaced_step_closure *dsc)
5751 {
5752 unsigned int opcode = bits (insn, 20, 24);
5753
5754 switch (opcode)
5755 {
5756 case 0x04: case 0x05: /* VFP/Neon mrrc/mcrr. */
5757 return copy_unmodified (gdbarch, insn, "vfp/neon mrrc/mcrr", dsc);
5758
5759 case 0x08: case 0x0a: case 0x0c: case 0x0e:
5760 case 0x12: case 0x16:
5761 return copy_unmodified (gdbarch, insn, "vfp/neon vstm/vpush", dsc);
5762
5763 case 0x09: case 0x0b: case 0x0d: case 0x0f:
5764 case 0x13: case 0x17:
5765 return copy_unmodified (gdbarch, insn, "vfp/neon vldm/vpop", dsc);
5766
5767 case 0x10: case 0x14: case 0x18: case 0x1c: /* vstr. */
5768 case 0x11: case 0x15: case 0x19: case 0x1d: /* vldr. */
5769 /* Note: no writeback for these instructions. Bit 25 will always be
5770 zero though (via caller), so the following works OK. */
5771 return copy_copro_load_store (gdbarch, insn, regs, dsc);
5772 }
5773
5774 /* Should be unreachable. */
5775 return 1;
5776 }
5777
5778 static int
5779 decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
5780 struct regcache *regs, struct displaced_step_closure *dsc)
5781 {
5782 unsigned int op1 = bits (insn, 20, 25);
5783 int op = bit (insn, 4);
5784 unsigned int coproc = bits (insn, 8, 11);
5785 unsigned int rn = bits (insn, 16, 19);
5786
5787 if ((op1 & 0x20) == 0x00 && (op1 & 0x3a) != 0x00 && (coproc & 0xe) == 0xa)
5788 return decode_ext_reg_ld_st (gdbarch, insn, regs, dsc);
5789 else if ((op1 & 0x21) == 0x00 && (op1 & 0x3a) != 0x00
5790 && (coproc & 0xe) != 0xa)
5791 /* stc/stc2. */
5792 return copy_copro_load_store (gdbarch, insn, regs, dsc);
5793 else if ((op1 & 0x21) == 0x01 && (op1 & 0x3a) != 0x00
5794 && (coproc & 0xe) != 0xa)
5795 /* ldc/ldc2 imm/lit. */
5796 return copy_copro_load_store (gdbarch, insn, regs, dsc);
5797 else if ((op1 & 0x3e) == 0x00)
5798 return copy_undef (gdbarch, insn, dsc);
5799 else if ((op1 & 0x3e) == 0x04 && (coproc & 0xe) == 0xa)
5800 return copy_unmodified (gdbarch, insn, "neon 64bit xfer", dsc);
5801 else if (op1 == 0x04 && (coproc & 0xe) != 0xa)
5802 return copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
5803 else if (op1 == 0x05 && (coproc & 0xe) != 0xa)
5804 return copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
5805 else if ((op1 & 0x30) == 0x20 && !op)
5806 {
5807 if ((coproc & 0xe) == 0xa)
5808 return copy_unmodified (gdbarch, insn, "vfp dataproc", dsc);
5809 else
5810 return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
5811 }
5812 else if ((op1 & 0x30) == 0x20 && op)
5813 return copy_unmodified (gdbarch, insn, "neon 8/16/32 bit xfer", dsc);
5814 else if ((op1 & 0x31) == 0x20 && op && (coproc & 0xe) != 0xa)
5815 return copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
5816 else if ((op1 & 0x31) == 0x21 && op && (coproc & 0xe) != 0xa)
5817 return copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
5818 else if ((op1 & 0x30) == 0x30)
5819 return copy_svc (gdbarch, insn, to, regs, dsc);
5820 else
5821 return copy_undef (gdbarch, insn, dsc); /* Possibly unreachable. */
5822 }
5823
5824 void
5825 arm_process_displaced_insn (struct gdbarch *gdbarch, uint32_t insn,
5826 CORE_ADDR from, CORE_ADDR to, struct regcache *regs,
5827 struct displaced_step_closure *dsc)
5828 {
5829 int err = 0;
5830
5831 if (!displaced_in_arm_mode (regs))
5832 error (_("Displaced stepping is only supported in ARM mode"));
5833
5834 /* Most displaced instructions use a 1-instruction scratch space, so set this
5835 here and override below if/when necessary. */
5836 dsc->numinsns = 1;
5837 dsc->insn_addr = from;
5838 dsc->scratch_base = to;
5839 dsc->cleanup = NULL;
5840 dsc->wrote_to_pc = 0;
5841
5842 if ((insn & 0xf0000000) == 0xf0000000)
5843 err = decode_unconditional (gdbarch, insn, regs, dsc);
5844 else switch (((insn & 0x10) >> 4) | ((insn & 0xe000000) >> 24))
5845 {
5846 case 0x0: case 0x1: case 0x2: case 0x3:
5847 err = decode_dp_misc (gdbarch, insn, regs, dsc);
5848 break;
5849
5850 case 0x4: case 0x5: case 0x6:
5851 err = decode_ld_st_word_ubyte (gdbarch, insn, regs, dsc);
5852 break;
5853
5854 case 0x7:
5855 err = decode_media (gdbarch, insn, dsc);
5856 break;
5857
5858 case 0x8: case 0x9: case 0xa: case 0xb:
5859 err = decode_b_bl_ldmstm (gdbarch, insn, regs, dsc);
5860 break;
5861
5862 case 0xc: case 0xd: case 0xe: case 0xf:
5863 err = decode_svc_copro (gdbarch, insn, to, regs, dsc);
5864 break;
5865 }
5866
5867 if (err)
5868 internal_error (__FILE__, __LINE__,
5869 _("arm_process_displaced_insn: Instruction decode error"));
5870 }
5871
5872 /* Actually set up the scratch space for a displaced instruction. */
5873
5874 void
5875 arm_displaced_init_closure (struct gdbarch *gdbarch, CORE_ADDR from,
5876 CORE_ADDR to, struct displaced_step_closure *dsc)
5877 {
5878 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
5879 unsigned int i;
5880 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
5881
5882 /* Poke modified instruction(s). */
5883 for (i = 0; i < dsc->numinsns; i++)
5884 {
5885 if (debug_displaced)
5886 fprintf_unfiltered (gdb_stdlog, "displaced: writing insn %.8lx at "
5887 "%.8lx\n", (unsigned long) dsc->modinsn[i],
5888 (unsigned long) to + i * 4);
5889 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
5890 dsc->modinsn[i]);
5891 }
5892
5893 /* Put breakpoint afterwards. */
5894 write_memory (to + dsc->numinsns * 4, tdep->arm_breakpoint,
5895 tdep->arm_breakpoint_size);
5896
5897 if (debug_displaced)
5898 fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
5899 paddress (gdbarch, from), paddress (gdbarch, to));
5900 }
5901
5902 /* Entry point for copying an instruction into scratch space for displaced
5903 stepping. */
5904
5905 struct displaced_step_closure *
5906 arm_displaced_step_copy_insn (struct gdbarch *gdbarch,
5907 CORE_ADDR from, CORE_ADDR to,
5908 struct regcache *regs)
5909 {
5910 struct displaced_step_closure *dsc
5911 = xmalloc (sizeof (struct displaced_step_closure));
5912 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
5913 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
5914
5915 if (debug_displaced)
5916 fprintf_unfiltered (gdb_stdlog, "displaced: stepping insn %.8lx "
5917 "at %.8lx\n", (unsigned long) insn,
5918 (unsigned long) from);
5919
5920 arm_process_displaced_insn (gdbarch, insn, from, to, regs, dsc);
5921 arm_displaced_init_closure (gdbarch, from, to, dsc);
5922
5923 return dsc;
5924 }
5925
5926 /* Entry point for cleaning things up after a displaced instruction has been
5927 single-stepped. */
5928
5929 void
5930 arm_displaced_step_fixup (struct gdbarch *gdbarch,
5931 struct displaced_step_closure *dsc,
5932 CORE_ADDR from, CORE_ADDR to,
5933 struct regcache *regs)
5934 {
5935 if (dsc->cleanup)
5936 dsc->cleanup (gdbarch, regs, dsc);
5937
5938 if (!dsc->wrote_to_pc)
5939 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, dsc->insn_addr + 4);
5940 }
5941
5942 #include "bfd-in2.h"
5943 #include "libcoff.h"
5944
5945 static int
5946 gdb_print_insn_arm (bfd_vma memaddr, disassemble_info *info)
5947 {
5948 struct gdbarch *gdbarch = info->application_data;
5949
5950 if (arm_pc_is_thumb (gdbarch, memaddr))
5951 {
5952 static asymbol *asym;
5953 static combined_entry_type ce;
5954 static struct coff_symbol_struct csym;
5955 static struct bfd fake_bfd;
5956 static bfd_target fake_target;
5957
5958 if (csym.native == NULL)
5959 {
5960 /* Create a fake symbol vector containing a Thumb symbol.
5961 This is solely so that the code in print_insn_little_arm()
5962 and print_insn_big_arm() in opcodes/arm-dis.c will detect
5963 the presence of a Thumb symbol and switch to decoding
5964 Thumb instructions. */
5965
5966 fake_target.flavour = bfd_target_coff_flavour;
5967 fake_bfd.xvec = &fake_target;
5968 ce.u.syment.n_sclass = C_THUMBEXTFUNC;
5969 csym.native = &ce;
5970 csym.symbol.the_bfd = &fake_bfd;
5971 csym.symbol.name = "fake";
5972 asym = (asymbol *) & csym;
5973 }
5974
5975 memaddr = UNMAKE_THUMB_ADDR (memaddr);
5976 info->symbols = &asym;
5977 }
5978 else
5979 info->symbols = NULL;
5980
5981 if (info->endian == BFD_ENDIAN_BIG)
5982 return print_insn_big_arm (memaddr, info);
5983 else
5984 return print_insn_little_arm (memaddr, info);
5985 }
5986
5987 /* The following define instruction sequences that will cause ARM
5988 cpu's to take an undefined instruction trap. These are used to
5989 signal a breakpoint to GDB.
5990
5991 The newer ARMv4T cpu's are capable of operating in ARM or Thumb
5992 modes. A different instruction is required for each mode. The ARM
5993 cpu's can also be big or little endian. Thus four different
5994 instructions are needed to support all cases.
5995
5996 Note: ARMv4 defines several new instructions that will take the
5997 undefined instruction trap. ARM7TDMI is nominally ARMv4T, but does
5998 not in fact add the new instructions. The new undefined
5999 instructions in ARMv4 are all instructions that had no defined
6000 behaviour in earlier chips. There is no guarantee that they will
6001 raise an exception, but may be treated as NOP's. In practice, it
6002 may only safe to rely on instructions matching:
6003
6004 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
6005 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
6006 C C C C 0 1 1 x x x x x x x x x x x x x x x x x x x x 1 x x x x
6007
6008 Even this may only true if the condition predicate is true. The
6009 following use a condition predicate of ALWAYS so it is always TRUE.
6010
6011 There are other ways of forcing a breakpoint. GNU/Linux, RISC iX,
6012 and NetBSD all use a software interrupt rather than an undefined
6013 instruction to force a trap. This can be handled by by the
6014 abi-specific code during establishment of the gdbarch vector. */
6015
6016 #define ARM_LE_BREAKPOINT {0xFE,0xDE,0xFF,0xE7}
6017 #define ARM_BE_BREAKPOINT {0xE7,0xFF,0xDE,0xFE}
6018 #define THUMB_LE_BREAKPOINT {0xbe,0xbe}
6019 #define THUMB_BE_BREAKPOINT {0xbe,0xbe}
6020
6021 static const char arm_default_arm_le_breakpoint[] = ARM_LE_BREAKPOINT;
6022 static const char arm_default_arm_be_breakpoint[] = ARM_BE_BREAKPOINT;
6023 static const char arm_default_thumb_le_breakpoint[] = THUMB_LE_BREAKPOINT;
6024 static const char arm_default_thumb_be_breakpoint[] = THUMB_BE_BREAKPOINT;
6025
6026 /* Determine the type and size of breakpoint to insert at PCPTR. Uses
6027 the program counter value to determine whether a 16-bit or 32-bit
6028 breakpoint should be used. It returns a pointer to a string of
6029 bytes that encode a breakpoint instruction, stores the length of
6030 the string to *lenptr, and adjusts the program counter (if
6031 necessary) to point to the actual memory location where the
6032 breakpoint should be inserted. */
6033
6034 static const unsigned char *
6035 arm_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr, int *lenptr)
6036 {
6037 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
6038 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
6039
6040 if (arm_pc_is_thumb (gdbarch, *pcptr))
6041 {
6042 *pcptr = UNMAKE_THUMB_ADDR (*pcptr);
6043
6044 /* If we have a separate 32-bit breakpoint instruction for Thumb-2,
6045 check whether we are replacing a 32-bit instruction. */
6046 if (tdep->thumb2_breakpoint != NULL)
6047 {
6048 gdb_byte buf[2];
6049 if (target_read_memory (*pcptr, buf, 2) == 0)
6050 {
6051 unsigned short inst1;
6052 inst1 = extract_unsigned_integer (buf, 2, byte_order_for_code);
6053 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
6054 {
6055 *lenptr = tdep->thumb2_breakpoint_size;
6056 return tdep->thumb2_breakpoint;
6057 }
6058 }
6059 }
6060
6061 *lenptr = tdep->thumb_breakpoint_size;
6062 return tdep->thumb_breakpoint;
6063 }
6064 else
6065 {
6066 *lenptr = tdep->arm_breakpoint_size;
6067 return tdep->arm_breakpoint;
6068 }
6069 }
6070
6071 static void
6072 arm_remote_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr,
6073 int *kindptr)
6074 {
6075 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
6076
6077 arm_breakpoint_from_pc (gdbarch, pcptr, kindptr);
6078
6079 if (arm_pc_is_thumb (gdbarch, *pcptr) && *kindptr == 4)
6080 /* The documented magic value for a 32-bit Thumb-2 breakpoint, so
6081 that this is not confused with a 32-bit ARM breakpoint. */
6082 *kindptr = 3;
6083 }
6084
6085 /* Extract from an array REGBUF containing the (raw) register state a
6086 function return value of type TYPE, and copy that, in virtual
6087 format, into VALBUF. */
6088
6089 static void
6090 arm_extract_return_value (struct type *type, struct regcache *regs,
6091 gdb_byte *valbuf)
6092 {
6093 struct gdbarch *gdbarch = get_regcache_arch (regs);
6094 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
6095
6096 if (TYPE_CODE_FLT == TYPE_CODE (type))
6097 {
6098 switch (gdbarch_tdep (gdbarch)->fp_model)
6099 {
6100 case ARM_FLOAT_FPA:
6101 {
6102 /* The value is in register F0 in internal format. We need to
6103 extract the raw value and then convert it to the desired
6104 internal type. */
6105 bfd_byte tmpbuf[FP_REGISTER_SIZE];
6106
6107 regcache_cooked_read (regs, ARM_F0_REGNUM, tmpbuf);
6108 convert_from_extended (floatformat_from_type (type), tmpbuf,
6109 valbuf, gdbarch_byte_order (gdbarch));
6110 }
6111 break;
6112
6113 case ARM_FLOAT_SOFT_FPA:
6114 case ARM_FLOAT_SOFT_VFP:
6115 /* ARM_FLOAT_VFP can arise if this is a variadic function so
6116 not using the VFP ABI code. */
6117 case ARM_FLOAT_VFP:
6118 regcache_cooked_read (regs, ARM_A1_REGNUM, valbuf);
6119 if (TYPE_LENGTH (type) > 4)
6120 regcache_cooked_read (regs, ARM_A1_REGNUM + 1,
6121 valbuf + INT_REGISTER_SIZE);
6122 break;
6123
6124 default:
6125 internal_error
6126 (__FILE__, __LINE__,
6127 _("arm_extract_return_value: Floating point model not supported"));
6128 break;
6129 }
6130 }
6131 else if (TYPE_CODE (type) == TYPE_CODE_INT
6132 || TYPE_CODE (type) == TYPE_CODE_CHAR
6133 || TYPE_CODE (type) == TYPE_CODE_BOOL
6134 || TYPE_CODE (type) == TYPE_CODE_PTR
6135 || TYPE_CODE (type) == TYPE_CODE_REF
6136 || TYPE_CODE (type) == TYPE_CODE_ENUM)
6137 {
6138 /* If the the type is a plain integer, then the access is
6139 straight-forward. Otherwise we have to play around a bit more. */
6140 int len = TYPE_LENGTH (type);
6141 int regno = ARM_A1_REGNUM;
6142 ULONGEST tmp;
6143
6144 while (len > 0)
6145 {
6146 /* By using store_unsigned_integer we avoid having to do
6147 anything special for small big-endian values. */
6148 regcache_cooked_read_unsigned (regs, regno++, &tmp);
6149 store_unsigned_integer (valbuf,
6150 (len > INT_REGISTER_SIZE
6151 ? INT_REGISTER_SIZE : len),
6152 byte_order, tmp);
6153 len -= INT_REGISTER_SIZE;
6154 valbuf += INT_REGISTER_SIZE;
6155 }
6156 }
6157 else
6158 {
6159 /* For a structure or union the behaviour is as if the value had
6160 been stored to word-aligned memory and then loaded into
6161 registers with 32-bit load instruction(s). */
6162 int len = TYPE_LENGTH (type);
6163 int regno = ARM_A1_REGNUM;
6164 bfd_byte tmpbuf[INT_REGISTER_SIZE];
6165
6166 while (len > 0)
6167 {
6168 regcache_cooked_read (regs, regno++, tmpbuf);
6169 memcpy (valbuf, tmpbuf,
6170 len > INT_REGISTER_SIZE ? INT_REGISTER_SIZE : len);
6171 len -= INT_REGISTER_SIZE;
6172 valbuf += INT_REGISTER_SIZE;
6173 }
6174 }
6175 }
6176
6177
6178 /* Will a function return an aggregate type in memory or in a
6179 register? Return 0 if an aggregate type can be returned in a
6180 register, 1 if it must be returned in memory. */
6181
6182 static int
6183 arm_return_in_memory (struct gdbarch *gdbarch, struct type *type)
6184 {
6185 int nRc;
6186 enum type_code code;
6187
6188 CHECK_TYPEDEF (type);
6189
6190 /* In the ARM ABI, "integer" like aggregate types are returned in
6191 registers. For an aggregate type to be integer like, its size
6192 must be less than or equal to INT_REGISTER_SIZE and the
6193 offset of each addressable subfield must be zero. Note that bit
6194 fields are not addressable, and all addressable subfields of
6195 unions always start at offset zero.
6196
6197 This function is based on the behaviour of GCC 2.95.1.
6198 See: gcc/arm.c: arm_return_in_memory() for details.
6199
6200 Note: All versions of GCC before GCC 2.95.2 do not set up the
6201 parameters correctly for a function returning the following
6202 structure: struct { float f;}; This should be returned in memory,
6203 not a register. Richard Earnshaw sent me a patch, but I do not
6204 know of any way to detect if a function like the above has been
6205 compiled with the correct calling convention. */
6206
6207 /* All aggregate types that won't fit in a register must be returned
6208 in memory. */
6209 if (TYPE_LENGTH (type) > INT_REGISTER_SIZE)
6210 {
6211 return 1;
6212 }
6213
6214 /* The AAPCS says all aggregates not larger than a word are returned
6215 in a register. */
6216 if (gdbarch_tdep (gdbarch)->arm_abi != ARM_ABI_APCS)
6217 return 0;
6218
6219 /* The only aggregate types that can be returned in a register are
6220 structs and unions. Arrays must be returned in memory. */
6221 code = TYPE_CODE (type);
6222 if ((TYPE_CODE_STRUCT != code) && (TYPE_CODE_UNION != code))
6223 {
6224 return 1;
6225 }
6226
6227 /* Assume all other aggregate types can be returned in a register.
6228 Run a check for structures, unions and arrays. */
6229 nRc = 0;
6230
6231 if ((TYPE_CODE_STRUCT == code) || (TYPE_CODE_UNION == code))
6232 {
6233 int i;
6234 /* Need to check if this struct/union is "integer" like. For
6235 this to be true, its size must be less than or equal to
6236 INT_REGISTER_SIZE and the offset of each addressable
6237 subfield must be zero. Note that bit fields are not
6238 addressable, and unions always start at offset zero. If any
6239 of the subfields is a floating point type, the struct/union
6240 cannot be an integer type. */
6241
6242 /* For each field in the object, check:
6243 1) Is it FP? --> yes, nRc = 1;
6244 2) Is it addressable (bitpos != 0) and
6245 not packed (bitsize == 0)?
6246 --> yes, nRc = 1
6247 */
6248
6249 for (i = 0; i < TYPE_NFIELDS (type); i++)
6250 {
6251 enum type_code field_type_code;
6252 field_type_code = TYPE_CODE (check_typedef (TYPE_FIELD_TYPE (type, i)));
6253
6254 /* Is it a floating point type field? */
6255 if (field_type_code == TYPE_CODE_FLT)
6256 {
6257 nRc = 1;
6258 break;
6259 }
6260
6261 /* If bitpos != 0, then we have to care about it. */
6262 if (TYPE_FIELD_BITPOS (type, i) != 0)
6263 {
6264 /* Bitfields are not addressable. If the field bitsize is
6265 zero, then the field is not packed. Hence it cannot be
6266 a bitfield or any other packed type. */
6267 if (TYPE_FIELD_BITSIZE (type, i) == 0)
6268 {
6269 nRc = 1;
6270 break;
6271 }
6272 }
6273 }
6274 }
6275
6276 return nRc;
6277 }
6278
6279 /* Write into appropriate registers a function return value of type
6280 TYPE, given in virtual format. */
6281
6282 static void
6283 arm_store_return_value (struct type *type, struct regcache *regs,
6284 const gdb_byte *valbuf)
6285 {
6286 struct gdbarch *gdbarch = get_regcache_arch (regs);
6287 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
6288
6289 if (TYPE_CODE (type) == TYPE_CODE_FLT)
6290 {
6291 char buf[MAX_REGISTER_SIZE];
6292
6293 switch (gdbarch_tdep (gdbarch)->fp_model)
6294 {
6295 case ARM_FLOAT_FPA:
6296
6297 convert_to_extended (floatformat_from_type (type), buf, valbuf,
6298 gdbarch_byte_order (gdbarch));
6299 regcache_cooked_write (regs, ARM_F0_REGNUM, buf);
6300 break;
6301
6302 case ARM_FLOAT_SOFT_FPA:
6303 case ARM_FLOAT_SOFT_VFP:
6304 /* ARM_FLOAT_VFP can arise if this is a variadic function so
6305 not using the VFP ABI code. */
6306 case ARM_FLOAT_VFP:
6307 regcache_cooked_write (regs, ARM_A1_REGNUM, valbuf);
6308 if (TYPE_LENGTH (type) > 4)
6309 regcache_cooked_write (regs, ARM_A1_REGNUM + 1,
6310 valbuf + INT_REGISTER_SIZE);
6311 break;
6312
6313 default:
6314 internal_error
6315 (__FILE__, __LINE__,
6316 _("arm_store_return_value: Floating point model not supported"));
6317 break;
6318 }
6319 }
6320 else if (TYPE_CODE (type) == TYPE_CODE_INT
6321 || TYPE_CODE (type) == TYPE_CODE_CHAR
6322 || TYPE_CODE (type) == TYPE_CODE_BOOL
6323 || TYPE_CODE (type) == TYPE_CODE_PTR
6324 || TYPE_CODE (type) == TYPE_CODE_REF
6325 || TYPE_CODE (type) == TYPE_CODE_ENUM)
6326 {
6327 if (TYPE_LENGTH (type) <= 4)
6328 {
6329 /* Values of one word or less are zero/sign-extended and
6330 returned in r0. */
6331 bfd_byte tmpbuf[INT_REGISTER_SIZE];
6332 LONGEST val = unpack_long (type, valbuf);
6333
6334 store_signed_integer (tmpbuf, INT_REGISTER_SIZE, byte_order, val);
6335 regcache_cooked_write (regs, ARM_A1_REGNUM, tmpbuf);
6336 }
6337 else
6338 {
6339 /* Integral values greater than one word are stored in consecutive
6340 registers starting with r0. This will always be a multiple of
6341 the regiser size. */
6342 int len = TYPE_LENGTH (type);
6343 int regno = ARM_A1_REGNUM;
6344
6345 while (len > 0)
6346 {
6347 regcache_cooked_write (regs, regno++, valbuf);
6348 len -= INT_REGISTER_SIZE;
6349 valbuf += INT_REGISTER_SIZE;
6350 }
6351 }
6352 }
6353 else
6354 {
6355 /* For a structure or union the behaviour is as if the value had
6356 been stored to word-aligned memory and then loaded into
6357 registers with 32-bit load instruction(s). */
6358 int len = TYPE_LENGTH (type);
6359 int regno = ARM_A1_REGNUM;
6360 bfd_byte tmpbuf[INT_REGISTER_SIZE];
6361
6362 while (len > 0)
6363 {
6364 memcpy (tmpbuf, valbuf,
6365 len > INT_REGISTER_SIZE ? INT_REGISTER_SIZE : len);
6366 regcache_cooked_write (regs, regno++, tmpbuf);
6367 len -= INT_REGISTER_SIZE;
6368 valbuf += INT_REGISTER_SIZE;
6369 }
6370 }
6371 }
6372
6373
6374 /* Handle function return values. */
6375
6376 static enum return_value_convention
6377 arm_return_value (struct gdbarch *gdbarch, struct type *func_type,
6378 struct type *valtype, struct regcache *regcache,
6379 gdb_byte *readbuf, const gdb_byte *writebuf)
6380 {
6381 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
6382 enum arm_vfp_cprc_base_type vfp_base_type;
6383 int vfp_base_count;
6384
6385 if (arm_vfp_abi_for_function (gdbarch, func_type)
6386 && arm_vfp_call_candidate (valtype, &vfp_base_type, &vfp_base_count))
6387 {
6388 int reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
6389 int unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
6390 int i;
6391 for (i = 0; i < vfp_base_count; i++)
6392 {
6393 if (reg_char == 'q')
6394 {
6395 if (writebuf)
6396 arm_neon_quad_write (gdbarch, regcache, i,
6397 writebuf + i * unit_length);
6398
6399 if (readbuf)
6400 arm_neon_quad_read (gdbarch, regcache, i,
6401 readbuf + i * unit_length);
6402 }
6403 else
6404 {
6405 char name_buf[4];
6406 int regnum;
6407
6408 sprintf (name_buf, "%c%d", reg_char, i);
6409 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
6410 strlen (name_buf));
6411 if (writebuf)
6412 regcache_cooked_write (regcache, regnum,
6413 writebuf + i * unit_length);
6414 if (readbuf)
6415 regcache_cooked_read (regcache, regnum,
6416 readbuf + i * unit_length);
6417 }
6418 }
6419 return RETURN_VALUE_REGISTER_CONVENTION;
6420 }
6421
6422 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
6423 || TYPE_CODE (valtype) == TYPE_CODE_UNION
6424 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
6425 {
6426 if (tdep->struct_return == pcc_struct_return
6427 || arm_return_in_memory (gdbarch, valtype))
6428 return RETURN_VALUE_STRUCT_CONVENTION;
6429 }
6430
6431 if (writebuf)
6432 arm_store_return_value (valtype, regcache, writebuf);
6433
6434 if (readbuf)
6435 arm_extract_return_value (valtype, regcache, readbuf);
6436
6437 return RETURN_VALUE_REGISTER_CONVENTION;
6438 }
6439
6440
6441 static int
6442 arm_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
6443 {
6444 struct gdbarch *gdbarch = get_frame_arch (frame);
6445 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
6446 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
6447 CORE_ADDR jb_addr;
6448 char buf[INT_REGISTER_SIZE];
6449
6450 jb_addr = get_frame_register_unsigned (frame, ARM_A1_REGNUM);
6451
6452 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
6453 INT_REGISTER_SIZE))
6454 return 0;
6455
6456 *pc = extract_unsigned_integer (buf, INT_REGISTER_SIZE, byte_order);
6457 return 1;
6458 }
6459
6460 /* Recognize GCC and GNU ld's trampolines. If we are in a trampoline,
6461 return the target PC. Otherwise return 0. */
6462
6463 CORE_ADDR
6464 arm_skip_stub (struct frame_info *frame, CORE_ADDR pc)
6465 {
6466 char *name;
6467 int namelen;
6468 CORE_ADDR start_addr;
6469
6470 /* Find the starting address and name of the function containing the PC. */
6471 if (find_pc_partial_function (pc, &name, &start_addr, NULL) == 0)
6472 return 0;
6473
6474 /* If PC is in a Thumb call or return stub, return the address of the
6475 target PC, which is in a register. The thunk functions are called
6476 _call_via_xx, where x is the register name. The possible names
6477 are r0-r9, sl, fp, ip, sp, and lr. ARM RealView has similar
6478 functions, named __ARM_call_via_r[0-7]. */
6479 if (strncmp (name, "_call_via_", 10) == 0
6480 || strncmp (name, "__ARM_call_via_", strlen ("__ARM_call_via_")) == 0)
6481 {
6482 /* Use the name suffix to determine which register contains the
6483 target PC. */
6484 static char *table[15] =
6485 {"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
6486 "r8", "r9", "sl", "fp", "ip", "sp", "lr"
6487 };
6488 int regno;
6489 int offset = strlen (name) - 2;
6490
6491 for (regno = 0; regno <= 14; regno++)
6492 if (strcmp (&name[offset], table[regno]) == 0)
6493 return get_frame_register_unsigned (frame, regno);
6494 }
6495
6496 /* GNU ld generates __foo_from_arm or __foo_from_thumb for
6497 non-interworking calls to foo. We could decode the stubs
6498 to find the target but it's easier to use the symbol table. */
6499 namelen = strlen (name);
6500 if (name[0] == '_' && name[1] == '_'
6501 && ((namelen > 2 + strlen ("_from_thumb")
6502 && strncmp (name + namelen - strlen ("_from_thumb"), "_from_thumb",
6503 strlen ("_from_thumb")) == 0)
6504 || (namelen > 2 + strlen ("_from_arm")
6505 && strncmp (name + namelen - strlen ("_from_arm"), "_from_arm",
6506 strlen ("_from_arm")) == 0)))
6507 {
6508 char *target_name;
6509 int target_len = namelen - 2;
6510 struct minimal_symbol *minsym;
6511 struct objfile *objfile;
6512 struct obj_section *sec;
6513
6514 if (name[namelen - 1] == 'b')
6515 target_len -= strlen ("_from_thumb");
6516 else
6517 target_len -= strlen ("_from_arm");
6518
6519 target_name = alloca (target_len + 1);
6520 memcpy (target_name, name + 2, target_len);
6521 target_name[target_len] = '\0';
6522
6523 sec = find_pc_section (pc);
6524 objfile = (sec == NULL) ? NULL : sec->objfile;
6525 minsym = lookup_minimal_symbol (target_name, NULL, objfile);
6526 if (minsym != NULL)
6527 return SYMBOL_VALUE_ADDRESS (minsym);
6528 else
6529 return 0;
6530 }
6531
6532 return 0; /* not a stub */
6533 }
6534
6535 static void
6536 set_arm_command (char *args, int from_tty)
6537 {
6538 printf_unfiltered (_("\
6539 \"set arm\" must be followed by an apporpriate subcommand.\n"));
6540 help_list (setarmcmdlist, "set arm ", all_commands, gdb_stdout);
6541 }
6542
6543 static void
6544 show_arm_command (char *args, int from_tty)
6545 {
6546 cmd_show_list (showarmcmdlist, from_tty, "");
6547 }
6548
6549 static void
6550 arm_update_current_architecture (void)
6551 {
6552 struct gdbarch_info info;
6553
6554 /* If the current architecture is not ARM, we have nothing to do. */
6555 if (gdbarch_bfd_arch_info (target_gdbarch)->arch != bfd_arch_arm)
6556 return;
6557
6558 /* Update the architecture. */
6559 gdbarch_info_init (&info);
6560
6561 if (!gdbarch_update_p (info))
6562 internal_error (__FILE__, __LINE__, "could not update architecture");
6563 }
6564
6565 static void
6566 set_fp_model_sfunc (char *args, int from_tty,
6567 struct cmd_list_element *c)
6568 {
6569 enum arm_float_model fp_model;
6570
6571 for (fp_model = ARM_FLOAT_AUTO; fp_model != ARM_FLOAT_LAST; fp_model++)
6572 if (strcmp (current_fp_model, fp_model_strings[fp_model]) == 0)
6573 {
6574 arm_fp_model = fp_model;
6575 break;
6576 }
6577
6578 if (fp_model == ARM_FLOAT_LAST)
6579 internal_error (__FILE__, __LINE__, _("Invalid fp model accepted: %s."),
6580 current_fp_model);
6581
6582 arm_update_current_architecture ();
6583 }
6584
6585 static void
6586 show_fp_model (struct ui_file *file, int from_tty,
6587 struct cmd_list_element *c, const char *value)
6588 {
6589 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
6590
6591 if (arm_fp_model == ARM_FLOAT_AUTO
6592 && gdbarch_bfd_arch_info (target_gdbarch)->arch == bfd_arch_arm)
6593 fprintf_filtered (file, _("\
6594 The current ARM floating point model is \"auto\" (currently \"%s\").\n"),
6595 fp_model_strings[tdep->fp_model]);
6596 else
6597 fprintf_filtered (file, _("\
6598 The current ARM floating point model is \"%s\".\n"),
6599 fp_model_strings[arm_fp_model]);
6600 }
6601
6602 static void
6603 arm_set_abi (char *args, int from_tty,
6604 struct cmd_list_element *c)
6605 {
6606 enum arm_abi_kind arm_abi;
6607
6608 for (arm_abi = ARM_ABI_AUTO; arm_abi != ARM_ABI_LAST; arm_abi++)
6609 if (strcmp (arm_abi_string, arm_abi_strings[arm_abi]) == 0)
6610 {
6611 arm_abi_global = arm_abi;
6612 break;
6613 }
6614
6615 if (arm_abi == ARM_ABI_LAST)
6616 internal_error (__FILE__, __LINE__, _("Invalid ABI accepted: %s."),
6617 arm_abi_string);
6618
6619 arm_update_current_architecture ();
6620 }
6621
6622 static void
6623 arm_show_abi (struct ui_file *file, int from_tty,
6624 struct cmd_list_element *c, const char *value)
6625 {
6626 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
6627
6628 if (arm_abi_global == ARM_ABI_AUTO
6629 && gdbarch_bfd_arch_info (target_gdbarch)->arch == bfd_arch_arm)
6630 fprintf_filtered (file, _("\
6631 The current ARM ABI is \"auto\" (currently \"%s\").\n"),
6632 arm_abi_strings[tdep->arm_abi]);
6633 else
6634 fprintf_filtered (file, _("The current ARM ABI is \"%s\".\n"),
6635 arm_abi_string);
6636 }
6637
6638 static void
6639 arm_show_fallback_mode (struct ui_file *file, int from_tty,
6640 struct cmd_list_element *c, const char *value)
6641 {
6642 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
6643
6644 fprintf_filtered (file, _("\
6645 The current execution mode assumed (when symbols are unavailable) is \"%s\".\n"),
6646 arm_fallback_mode_string);
6647 }
6648
6649 static void
6650 arm_show_force_mode (struct ui_file *file, int from_tty,
6651 struct cmd_list_element *c, const char *value)
6652 {
6653 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
6654
6655 fprintf_filtered (file, _("\
6656 The current execution mode assumed (even when symbols are available) is \"%s\".\n"),
6657 arm_force_mode_string);
6658 }
6659
6660 /* If the user changes the register disassembly style used for info
6661 register and other commands, we have to also switch the style used
6662 in opcodes for disassembly output. This function is run in the "set
6663 arm disassembly" command, and does that. */
6664
6665 static void
6666 set_disassembly_style_sfunc (char *args, int from_tty,
6667 struct cmd_list_element *c)
6668 {
6669 set_disassembly_style ();
6670 }
6671 \f
6672 /* Return the ARM register name corresponding to register I. */
6673 static const char *
6674 arm_register_name (struct gdbarch *gdbarch, int i)
6675 {
6676 const int num_regs = gdbarch_num_regs (gdbarch);
6677
6678 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
6679 && i >= num_regs && i < num_regs + 32)
6680 {
6681 static const char *const vfp_pseudo_names[] = {
6682 "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
6683 "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
6684 "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
6685 "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
6686 };
6687
6688 return vfp_pseudo_names[i - num_regs];
6689 }
6690
6691 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
6692 && i >= num_regs + 32 && i < num_regs + 32 + 16)
6693 {
6694 static const char *const neon_pseudo_names[] = {
6695 "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7",
6696 "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15",
6697 };
6698
6699 return neon_pseudo_names[i - num_regs - 32];
6700 }
6701
6702 if (i >= ARRAY_SIZE (arm_register_names))
6703 /* These registers are only supported on targets which supply
6704 an XML description. */
6705 return "";
6706
6707 return arm_register_names[i];
6708 }
6709
6710 static void
6711 set_disassembly_style (void)
6712 {
6713 int current;
6714
6715 /* Find the style that the user wants. */
6716 for (current = 0; current < num_disassembly_options; current++)
6717 if (disassembly_style == valid_disassembly_styles[current])
6718 break;
6719 gdb_assert (current < num_disassembly_options);
6720
6721 /* Synchronize the disassembler. */
6722 set_arm_regname_option (current);
6723 }
6724
6725 /* Test whether the coff symbol specific value corresponds to a Thumb
6726 function. */
6727
6728 static int
6729 coff_sym_is_thumb (int val)
6730 {
6731 return (val == C_THUMBEXT
6732 || val == C_THUMBSTAT
6733 || val == C_THUMBEXTFUNC
6734 || val == C_THUMBSTATFUNC
6735 || val == C_THUMBLABEL);
6736 }
6737
6738 /* arm_coff_make_msymbol_special()
6739 arm_elf_make_msymbol_special()
6740
6741 These functions test whether the COFF or ELF symbol corresponds to
6742 an address in thumb code, and set a "special" bit in a minimal
6743 symbol to indicate that it does. */
6744
6745 static void
6746 arm_elf_make_msymbol_special(asymbol *sym, struct minimal_symbol *msym)
6747 {
6748 /* Thumb symbols are of type STT_LOPROC, (synonymous with
6749 STT_ARM_TFUNC). */
6750 if (ELF_ST_TYPE (((elf_symbol_type *)sym)->internal_elf_sym.st_info)
6751 == STT_LOPROC)
6752 MSYMBOL_SET_SPECIAL (msym);
6753 }
6754
6755 static void
6756 arm_coff_make_msymbol_special(int val, struct minimal_symbol *msym)
6757 {
6758 if (coff_sym_is_thumb (val))
6759 MSYMBOL_SET_SPECIAL (msym);
6760 }
6761
6762 static void
6763 arm_objfile_data_free (struct objfile *objfile, void *arg)
6764 {
6765 struct arm_per_objfile *data = arg;
6766 unsigned int i;
6767
6768 for (i = 0; i < objfile->obfd->section_count; i++)
6769 VEC_free (arm_mapping_symbol_s, data->section_maps[i]);
6770 }
6771
6772 static void
6773 arm_record_special_symbol (struct gdbarch *gdbarch, struct objfile *objfile,
6774 asymbol *sym)
6775 {
6776 const char *name = bfd_asymbol_name (sym);
6777 struct arm_per_objfile *data;
6778 VEC(arm_mapping_symbol_s) **map_p;
6779 struct arm_mapping_symbol new_map_sym;
6780
6781 gdb_assert (name[0] == '$');
6782 if (name[1] != 'a' && name[1] != 't' && name[1] != 'd')
6783 return;
6784
6785 data = objfile_data (objfile, arm_objfile_data_key);
6786 if (data == NULL)
6787 {
6788 data = OBSTACK_ZALLOC (&objfile->objfile_obstack,
6789 struct arm_per_objfile);
6790 set_objfile_data (objfile, arm_objfile_data_key, data);
6791 data->section_maps = OBSTACK_CALLOC (&objfile->objfile_obstack,
6792 objfile->obfd->section_count,
6793 VEC(arm_mapping_symbol_s) *);
6794 }
6795 map_p = &data->section_maps[bfd_get_section (sym)->index];
6796
6797 new_map_sym.value = sym->value;
6798 new_map_sym.type = name[1];
6799
6800 /* Assume that most mapping symbols appear in order of increasing
6801 value. If they were randomly distributed, it would be faster to
6802 always push here and then sort at first use. */
6803 if (!VEC_empty (arm_mapping_symbol_s, *map_p))
6804 {
6805 struct arm_mapping_symbol *prev_map_sym;
6806
6807 prev_map_sym = VEC_last (arm_mapping_symbol_s, *map_p);
6808 if (prev_map_sym->value >= sym->value)
6809 {
6810 unsigned int idx;
6811 idx = VEC_lower_bound (arm_mapping_symbol_s, *map_p, &new_map_sym,
6812 arm_compare_mapping_symbols);
6813 VEC_safe_insert (arm_mapping_symbol_s, *map_p, idx, &new_map_sym);
6814 return;
6815 }
6816 }
6817
6818 VEC_safe_push (arm_mapping_symbol_s, *map_p, &new_map_sym);
6819 }
6820
6821 static void
6822 arm_write_pc (struct regcache *regcache, CORE_ADDR pc)
6823 {
6824 struct gdbarch *gdbarch = get_regcache_arch (regcache);
6825 regcache_cooked_write_unsigned (regcache, ARM_PC_REGNUM, pc);
6826
6827 /* If necessary, set the T bit. */
6828 if (arm_apcs_32)
6829 {
6830 ULONGEST val, t_bit;
6831 regcache_cooked_read_unsigned (regcache, ARM_PS_REGNUM, &val);
6832 t_bit = arm_psr_thumb_bit (gdbarch);
6833 if (arm_pc_is_thumb (gdbarch, pc))
6834 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM,
6835 val | t_bit);
6836 else
6837 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM,
6838 val & ~t_bit);
6839 }
6840 }
6841
6842 /* Read the contents of a NEON quad register, by reading from two
6843 double registers. This is used to implement the quad pseudo
6844 registers, and for argument passing in case the quad registers are
6845 missing; vectors are passed in quad registers when using the VFP
6846 ABI, even if a NEON unit is not present. REGNUM is the index of
6847 the quad register, in [0, 15]. */
6848
6849 static void
6850 arm_neon_quad_read (struct gdbarch *gdbarch, struct regcache *regcache,
6851 int regnum, gdb_byte *buf)
6852 {
6853 char name_buf[4];
6854 gdb_byte reg_buf[8];
6855 int offset, double_regnum;
6856
6857 sprintf (name_buf, "d%d", regnum << 1);
6858 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
6859 strlen (name_buf));
6860
6861 /* d0 is always the least significant half of q0. */
6862 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
6863 offset = 8;
6864 else
6865 offset = 0;
6866
6867 regcache_raw_read (regcache, double_regnum, reg_buf);
6868 memcpy (buf + offset, reg_buf, 8);
6869
6870 offset = 8 - offset;
6871 regcache_raw_read (regcache, double_regnum + 1, reg_buf);
6872 memcpy (buf + offset, reg_buf, 8);
6873 }
6874
6875 static void
6876 arm_pseudo_read (struct gdbarch *gdbarch, struct regcache *regcache,
6877 int regnum, gdb_byte *buf)
6878 {
6879 const int num_regs = gdbarch_num_regs (gdbarch);
6880 char name_buf[4];
6881 gdb_byte reg_buf[8];
6882 int offset, double_regnum;
6883
6884 gdb_assert (regnum >= num_regs);
6885 regnum -= num_regs;
6886
6887 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
6888 /* Quad-precision register. */
6889 arm_neon_quad_read (gdbarch, regcache, regnum - 32, buf);
6890 else
6891 {
6892 /* Single-precision register. */
6893 gdb_assert (regnum < 32);
6894
6895 /* s0 is always the least significant half of d0. */
6896 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
6897 offset = (regnum & 1) ? 0 : 4;
6898 else
6899 offset = (regnum & 1) ? 4 : 0;
6900
6901 sprintf (name_buf, "d%d", regnum >> 1);
6902 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
6903 strlen (name_buf));
6904
6905 regcache_raw_read (regcache, double_regnum, reg_buf);
6906 memcpy (buf, reg_buf + offset, 4);
6907 }
6908 }
6909
6910 /* Store the contents of BUF to a NEON quad register, by writing to
6911 two double registers. This is used to implement the quad pseudo
6912 registers, and for argument passing in case the quad registers are
6913 missing; vectors are passed in quad registers when using the VFP
6914 ABI, even if a NEON unit is not present. REGNUM is the index
6915 of the quad register, in [0, 15]. */
6916
6917 static void
6918 arm_neon_quad_write (struct gdbarch *gdbarch, struct regcache *regcache,
6919 int regnum, const gdb_byte *buf)
6920 {
6921 char name_buf[4];
6922 gdb_byte reg_buf[8];
6923 int offset, double_regnum;
6924
6925 sprintf (name_buf, "d%d", regnum << 1);
6926 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
6927 strlen (name_buf));
6928
6929 /* d0 is always the least significant half of q0. */
6930 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
6931 offset = 8;
6932 else
6933 offset = 0;
6934
6935 regcache_raw_write (regcache, double_regnum, buf + offset);
6936 offset = 8 - offset;
6937 regcache_raw_write (regcache, double_regnum + 1, buf + offset);
6938 }
6939
6940 static void
6941 arm_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
6942 int regnum, const gdb_byte *buf)
6943 {
6944 const int num_regs = gdbarch_num_regs (gdbarch);
6945 char name_buf[4];
6946 gdb_byte reg_buf[8];
6947 int offset, double_regnum;
6948
6949 gdb_assert (regnum >= num_regs);
6950 regnum -= num_regs;
6951
6952 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
6953 /* Quad-precision register. */
6954 arm_neon_quad_write (gdbarch, regcache, regnum - 32, buf);
6955 else
6956 {
6957 /* Single-precision register. */
6958 gdb_assert (regnum < 32);
6959
6960 /* s0 is always the least significant half of d0. */
6961 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
6962 offset = (regnum & 1) ? 0 : 4;
6963 else
6964 offset = (regnum & 1) ? 4 : 0;
6965
6966 sprintf (name_buf, "d%d", regnum >> 1);
6967 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
6968 strlen (name_buf));
6969
6970 regcache_raw_read (regcache, double_regnum, reg_buf);
6971 memcpy (reg_buf + offset, buf, 4);
6972 regcache_raw_write (regcache, double_regnum, reg_buf);
6973 }
6974 }
6975
6976 static struct value *
6977 value_of_arm_user_reg (struct frame_info *frame, const void *baton)
6978 {
6979 const int *reg_p = baton;
6980 return value_of_register (*reg_p, frame);
6981 }
6982 \f
6983 static enum gdb_osabi
6984 arm_elf_osabi_sniffer (bfd *abfd)
6985 {
6986 unsigned int elfosabi;
6987 enum gdb_osabi osabi = GDB_OSABI_UNKNOWN;
6988
6989 elfosabi = elf_elfheader (abfd)->e_ident[EI_OSABI];
6990
6991 if (elfosabi == ELFOSABI_ARM)
6992 /* GNU tools use this value. Check note sections in this case,
6993 as well. */
6994 bfd_map_over_sections (abfd,
6995 generic_elf_osabi_sniff_abi_tag_sections,
6996 &osabi);
6997
6998 /* Anything else will be handled by the generic ELF sniffer. */
6999 return osabi;
7000 }
7001
7002 static int
7003 arm_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
7004 struct reggroup *group)
7005 {
7006 /* FPS register's type is INT, but belongs to float_group. */
7007 if (regnum == ARM_FPS_REGNUM)
7008 return (group == float_reggroup);
7009 else
7010 return default_register_reggroup_p (gdbarch, regnum, group);
7011 }
7012
7013 \f
7014 /* Initialize the current architecture based on INFO. If possible,
7015 re-use an architecture from ARCHES, which is a list of
7016 architectures already created during this debugging session.
7017
7018 Called e.g. at program startup, when reading a core file, and when
7019 reading a binary file. */
7020
7021 static struct gdbarch *
7022 arm_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
7023 {
7024 struct gdbarch_tdep *tdep;
7025 struct gdbarch *gdbarch;
7026 struct gdbarch_list *best_arch;
7027 enum arm_abi_kind arm_abi = arm_abi_global;
7028 enum arm_float_model fp_model = arm_fp_model;
7029 struct tdesc_arch_data *tdesc_data = NULL;
7030 int i, is_m = 0;
7031 int have_vfp_registers = 0, have_vfp_pseudos = 0, have_neon_pseudos = 0;
7032 int have_neon = 0;
7033 int have_fpa_registers = 1;
7034 const struct target_desc *tdesc = info.target_desc;
7035
7036 /* If we have an object to base this architecture on, try to determine
7037 its ABI. */
7038
7039 if (arm_abi == ARM_ABI_AUTO && info.abfd != NULL)
7040 {
7041 int ei_osabi, e_flags;
7042
7043 switch (bfd_get_flavour (info.abfd))
7044 {
7045 case bfd_target_aout_flavour:
7046 /* Assume it's an old APCS-style ABI. */
7047 arm_abi = ARM_ABI_APCS;
7048 break;
7049
7050 case bfd_target_coff_flavour:
7051 /* Assume it's an old APCS-style ABI. */
7052 /* XXX WinCE? */
7053 arm_abi = ARM_ABI_APCS;
7054 break;
7055
7056 case bfd_target_elf_flavour:
7057 ei_osabi = elf_elfheader (info.abfd)->e_ident[EI_OSABI];
7058 e_flags = elf_elfheader (info.abfd)->e_flags;
7059
7060 if (ei_osabi == ELFOSABI_ARM)
7061 {
7062 /* GNU tools used to use this value, but do not for EABI
7063 objects. There's nowhere to tag an EABI version
7064 anyway, so assume APCS. */
7065 arm_abi = ARM_ABI_APCS;
7066 }
7067 else if (ei_osabi == ELFOSABI_NONE)
7068 {
7069 int eabi_ver = EF_ARM_EABI_VERSION (e_flags);
7070 int attr_arch, attr_profile;
7071
7072 switch (eabi_ver)
7073 {
7074 case EF_ARM_EABI_UNKNOWN:
7075 /* Assume GNU tools. */
7076 arm_abi = ARM_ABI_APCS;
7077 break;
7078
7079 case EF_ARM_EABI_VER4:
7080 case EF_ARM_EABI_VER5:
7081 arm_abi = ARM_ABI_AAPCS;
7082 /* EABI binaries default to VFP float ordering.
7083 They may also contain build attributes that can
7084 be used to identify if the VFP argument-passing
7085 ABI is in use. */
7086 if (fp_model == ARM_FLOAT_AUTO)
7087 {
7088 #ifdef HAVE_ELF
7089 switch (bfd_elf_get_obj_attr_int (info.abfd,
7090 OBJ_ATTR_PROC,
7091 Tag_ABI_VFP_args))
7092 {
7093 case 0:
7094 /* "The user intended FP parameter/result
7095 passing to conform to AAPCS, base
7096 variant". */
7097 fp_model = ARM_FLOAT_SOFT_VFP;
7098 break;
7099 case 1:
7100 /* "The user intended FP parameter/result
7101 passing to conform to AAPCS, VFP
7102 variant". */
7103 fp_model = ARM_FLOAT_VFP;
7104 break;
7105 case 2:
7106 /* "The user intended FP parameter/result
7107 passing to conform to tool chain-specific
7108 conventions" - we don't know any such
7109 conventions, so leave it as "auto". */
7110 break;
7111 default:
7112 /* Attribute value not mentioned in the
7113 October 2008 ABI, so leave it as
7114 "auto". */
7115 break;
7116 }
7117 #else
7118 fp_model = ARM_FLOAT_SOFT_VFP;
7119 #endif
7120 }
7121 break;
7122
7123 default:
7124 /* Leave it as "auto". */
7125 warning (_("unknown ARM EABI version 0x%x"), eabi_ver);
7126 break;
7127 }
7128
7129 #ifdef HAVE_ELF
7130 /* Detect M-profile programs. This only works if the
7131 executable file includes build attributes; GCC does
7132 copy them to the executable, but e.g. RealView does
7133 not. */
7134 attr_arch = bfd_elf_get_obj_attr_int (info.abfd, OBJ_ATTR_PROC,
7135 Tag_CPU_arch);
7136 attr_profile = bfd_elf_get_obj_attr_int (info.abfd, OBJ_ATTR_PROC,
7137 Tag_CPU_arch_profile);
7138 /* GCC specifies the profile for v6-M; RealView only
7139 specifies the profile for architectures starting with
7140 V7 (as opposed to architectures with a tag
7141 numerically greater than TAG_CPU_ARCH_V7). */
7142 if (!tdesc_has_registers (tdesc)
7143 && (attr_arch == TAG_CPU_ARCH_V6_M
7144 || attr_arch == TAG_CPU_ARCH_V6S_M
7145 || attr_profile == 'M'))
7146 tdesc = tdesc_arm_with_m;
7147 #endif
7148 }
7149
7150 if (fp_model == ARM_FLOAT_AUTO)
7151 {
7152 int e_flags = elf_elfheader (info.abfd)->e_flags;
7153
7154 switch (e_flags & (EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT))
7155 {
7156 case 0:
7157 /* Leave it as "auto". Strictly speaking this case
7158 means FPA, but almost nobody uses that now, and
7159 many toolchains fail to set the appropriate bits
7160 for the floating-point model they use. */
7161 break;
7162 case EF_ARM_SOFT_FLOAT:
7163 fp_model = ARM_FLOAT_SOFT_FPA;
7164 break;
7165 case EF_ARM_VFP_FLOAT:
7166 fp_model = ARM_FLOAT_VFP;
7167 break;
7168 case EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT:
7169 fp_model = ARM_FLOAT_SOFT_VFP;
7170 break;
7171 }
7172 }
7173
7174 if (e_flags & EF_ARM_BE8)
7175 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
7176
7177 break;
7178
7179 default:
7180 /* Leave it as "auto". */
7181 break;
7182 }
7183 }
7184
7185 /* Check any target description for validity. */
7186 if (tdesc_has_registers (tdesc))
7187 {
7188 /* For most registers we require GDB's default names; but also allow
7189 the numeric names for sp / lr / pc, as a convenience. */
7190 static const char *const arm_sp_names[] = { "r13", "sp", NULL };
7191 static const char *const arm_lr_names[] = { "r14", "lr", NULL };
7192 static const char *const arm_pc_names[] = { "r15", "pc", NULL };
7193
7194 const struct tdesc_feature *feature;
7195 int valid_p;
7196
7197 feature = tdesc_find_feature (tdesc,
7198 "org.gnu.gdb.arm.core");
7199 if (feature == NULL)
7200 {
7201 feature = tdesc_find_feature (tdesc,
7202 "org.gnu.gdb.arm.m-profile");
7203 if (feature == NULL)
7204 return NULL;
7205 else
7206 is_m = 1;
7207 }
7208
7209 tdesc_data = tdesc_data_alloc ();
7210
7211 valid_p = 1;
7212 for (i = 0; i < ARM_SP_REGNUM; i++)
7213 valid_p &= tdesc_numbered_register (feature, tdesc_data, i,
7214 arm_register_names[i]);
7215 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
7216 ARM_SP_REGNUM,
7217 arm_sp_names);
7218 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
7219 ARM_LR_REGNUM,
7220 arm_lr_names);
7221 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
7222 ARM_PC_REGNUM,
7223 arm_pc_names);
7224 if (is_m)
7225 valid_p &= tdesc_numbered_register (feature, tdesc_data,
7226 ARM_PS_REGNUM, "xpsr");
7227 else
7228 valid_p &= tdesc_numbered_register (feature, tdesc_data,
7229 ARM_PS_REGNUM, "cpsr");
7230
7231 if (!valid_p)
7232 {
7233 tdesc_data_cleanup (tdesc_data);
7234 return NULL;
7235 }
7236
7237 feature = tdesc_find_feature (tdesc,
7238 "org.gnu.gdb.arm.fpa");
7239 if (feature != NULL)
7240 {
7241 valid_p = 1;
7242 for (i = ARM_F0_REGNUM; i <= ARM_FPS_REGNUM; i++)
7243 valid_p &= tdesc_numbered_register (feature, tdesc_data, i,
7244 arm_register_names[i]);
7245 if (!valid_p)
7246 {
7247 tdesc_data_cleanup (tdesc_data);
7248 return NULL;
7249 }
7250 }
7251 else
7252 have_fpa_registers = 0;
7253
7254 feature = tdesc_find_feature (tdesc,
7255 "org.gnu.gdb.xscale.iwmmxt");
7256 if (feature != NULL)
7257 {
7258 static const char *const iwmmxt_names[] = {
7259 "wR0", "wR1", "wR2", "wR3", "wR4", "wR5", "wR6", "wR7",
7260 "wR8", "wR9", "wR10", "wR11", "wR12", "wR13", "wR14", "wR15",
7261 "wCID", "wCon", "wCSSF", "wCASF", "", "", "", "",
7262 "wCGR0", "wCGR1", "wCGR2", "wCGR3", "", "", "", "",
7263 };
7264
7265 valid_p = 1;
7266 for (i = ARM_WR0_REGNUM; i <= ARM_WR15_REGNUM; i++)
7267 valid_p
7268 &= tdesc_numbered_register (feature, tdesc_data, i,
7269 iwmmxt_names[i - ARM_WR0_REGNUM]);
7270
7271 /* Check for the control registers, but do not fail if they
7272 are missing. */
7273 for (i = ARM_WC0_REGNUM; i <= ARM_WCASF_REGNUM; i++)
7274 tdesc_numbered_register (feature, tdesc_data, i,
7275 iwmmxt_names[i - ARM_WR0_REGNUM]);
7276
7277 for (i = ARM_WCGR0_REGNUM; i <= ARM_WCGR3_REGNUM; i++)
7278 valid_p
7279 &= tdesc_numbered_register (feature, tdesc_data, i,
7280 iwmmxt_names[i - ARM_WR0_REGNUM]);
7281
7282 if (!valid_p)
7283 {
7284 tdesc_data_cleanup (tdesc_data);
7285 return NULL;
7286 }
7287 }
7288
7289 /* If we have a VFP unit, check whether the single precision registers
7290 are present. If not, then we will synthesize them as pseudo
7291 registers. */
7292 feature = tdesc_find_feature (tdesc,
7293 "org.gnu.gdb.arm.vfp");
7294 if (feature != NULL)
7295 {
7296 static const char *const vfp_double_names[] = {
7297 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
7298 "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
7299 "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
7300 "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
7301 };
7302
7303 /* Require the double precision registers. There must be either
7304 16 or 32. */
7305 valid_p = 1;
7306 for (i = 0; i < 32; i++)
7307 {
7308 valid_p &= tdesc_numbered_register (feature, tdesc_data,
7309 ARM_D0_REGNUM + i,
7310 vfp_double_names[i]);
7311 if (!valid_p)
7312 break;
7313 }
7314
7315 if (!valid_p && i != 16)
7316 {
7317 tdesc_data_cleanup (tdesc_data);
7318 return NULL;
7319 }
7320
7321 if (tdesc_unnumbered_register (feature, "s0") == 0)
7322 have_vfp_pseudos = 1;
7323
7324 have_vfp_registers = 1;
7325
7326 /* If we have VFP, also check for NEON. The architecture allows
7327 NEON without VFP (integer vector operations only), but GDB
7328 does not support that. */
7329 feature = tdesc_find_feature (tdesc,
7330 "org.gnu.gdb.arm.neon");
7331 if (feature != NULL)
7332 {
7333 /* NEON requires 32 double-precision registers. */
7334 if (i != 32)
7335 {
7336 tdesc_data_cleanup (tdesc_data);
7337 return NULL;
7338 }
7339
7340 /* If there are quad registers defined by the stub, use
7341 their type; otherwise (normally) provide them with
7342 the default type. */
7343 if (tdesc_unnumbered_register (feature, "q0") == 0)
7344 have_neon_pseudos = 1;
7345
7346 have_neon = 1;
7347 }
7348 }
7349 }
7350
7351 /* If there is already a candidate, use it. */
7352 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
7353 best_arch != NULL;
7354 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
7355 {
7356 if (arm_abi != ARM_ABI_AUTO
7357 && arm_abi != gdbarch_tdep (best_arch->gdbarch)->arm_abi)
7358 continue;
7359
7360 if (fp_model != ARM_FLOAT_AUTO
7361 && fp_model != gdbarch_tdep (best_arch->gdbarch)->fp_model)
7362 continue;
7363
7364 /* There are various other properties in tdep that we do not
7365 need to check here: those derived from a target description,
7366 since gdbarches with a different target description are
7367 automatically disqualified. */
7368
7369 /* Do check is_m, though, since it might come from the binary. */
7370 if (is_m != gdbarch_tdep (best_arch->gdbarch)->is_m)
7371 continue;
7372
7373 /* Found a match. */
7374 break;
7375 }
7376
7377 if (best_arch != NULL)
7378 {
7379 if (tdesc_data != NULL)
7380 tdesc_data_cleanup (tdesc_data);
7381 return best_arch->gdbarch;
7382 }
7383
7384 tdep = xcalloc (1, sizeof (struct gdbarch_tdep));
7385 gdbarch = gdbarch_alloc (&info, tdep);
7386
7387 /* Record additional information about the architecture we are defining.
7388 These are gdbarch discriminators, like the OSABI. */
7389 tdep->arm_abi = arm_abi;
7390 tdep->fp_model = fp_model;
7391 tdep->is_m = is_m;
7392 tdep->have_fpa_registers = have_fpa_registers;
7393 tdep->have_vfp_registers = have_vfp_registers;
7394 tdep->have_vfp_pseudos = have_vfp_pseudos;
7395 tdep->have_neon_pseudos = have_neon_pseudos;
7396 tdep->have_neon = have_neon;
7397
7398 /* Breakpoints. */
7399 switch (info.byte_order_for_code)
7400 {
7401 case BFD_ENDIAN_BIG:
7402 tdep->arm_breakpoint = arm_default_arm_be_breakpoint;
7403 tdep->arm_breakpoint_size = sizeof (arm_default_arm_be_breakpoint);
7404 tdep->thumb_breakpoint = arm_default_thumb_be_breakpoint;
7405 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_be_breakpoint);
7406
7407 break;
7408
7409 case BFD_ENDIAN_LITTLE:
7410 tdep->arm_breakpoint = arm_default_arm_le_breakpoint;
7411 tdep->arm_breakpoint_size = sizeof (arm_default_arm_le_breakpoint);
7412 tdep->thumb_breakpoint = arm_default_thumb_le_breakpoint;
7413 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_le_breakpoint);
7414
7415 break;
7416
7417 default:
7418 internal_error (__FILE__, __LINE__,
7419 _("arm_gdbarch_init: bad byte order for float format"));
7420 }
7421
7422 /* On ARM targets char defaults to unsigned. */
7423 set_gdbarch_char_signed (gdbarch, 0);
7424
7425 /* Note: for displaced stepping, this includes the breakpoint, and one word
7426 of additional scratch space. This setting isn't used for anything beside
7427 displaced stepping at present. */
7428 set_gdbarch_max_insn_length (gdbarch, 4 * DISPLACED_MODIFIED_INSNS);
7429
7430 /* This should be low enough for everything. */
7431 tdep->lowest_pc = 0x20;
7432 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
7433
7434 /* The default, for both APCS and AAPCS, is to return small
7435 structures in registers. */
7436 tdep->struct_return = reg_struct_return;
7437
7438 set_gdbarch_push_dummy_call (gdbarch, arm_push_dummy_call);
7439 set_gdbarch_frame_align (gdbarch, arm_frame_align);
7440
7441 set_gdbarch_write_pc (gdbarch, arm_write_pc);
7442
7443 /* Frame handling. */
7444 set_gdbarch_dummy_id (gdbarch, arm_dummy_id);
7445 set_gdbarch_unwind_pc (gdbarch, arm_unwind_pc);
7446 set_gdbarch_unwind_sp (gdbarch, arm_unwind_sp);
7447
7448 frame_base_set_default (gdbarch, &arm_normal_base);
7449
7450 /* Address manipulation. */
7451 set_gdbarch_smash_text_address (gdbarch, arm_smash_text_address);
7452 set_gdbarch_addr_bits_remove (gdbarch, arm_addr_bits_remove);
7453
7454 /* Advance PC across function entry code. */
7455 set_gdbarch_skip_prologue (gdbarch, arm_skip_prologue);
7456
7457 /* Detect whether PC is in function epilogue. */
7458 set_gdbarch_in_function_epilogue_p (gdbarch, arm_in_function_epilogue_p);
7459
7460 /* Skip trampolines. */
7461 set_gdbarch_skip_trampoline_code (gdbarch, arm_skip_stub);
7462
7463 /* The stack grows downward. */
7464 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
7465
7466 /* Breakpoint manipulation. */
7467 set_gdbarch_breakpoint_from_pc (gdbarch, arm_breakpoint_from_pc);
7468 set_gdbarch_remote_breakpoint_from_pc (gdbarch,
7469 arm_remote_breakpoint_from_pc);
7470
7471 /* Information about registers, etc. */
7472 set_gdbarch_sp_regnum (gdbarch, ARM_SP_REGNUM);
7473 set_gdbarch_pc_regnum (gdbarch, ARM_PC_REGNUM);
7474 set_gdbarch_num_regs (gdbarch, ARM_NUM_REGS);
7475 set_gdbarch_register_type (gdbarch, arm_register_type);
7476 set_gdbarch_register_reggroup_p (gdbarch, arm_register_reggroup_p);
7477
7478 /* This "info float" is FPA-specific. Use the generic version if we
7479 do not have FPA. */
7480 if (gdbarch_tdep (gdbarch)->have_fpa_registers)
7481 set_gdbarch_print_float_info (gdbarch, arm_print_float_info);
7482
7483 /* Internal <-> external register number maps. */
7484 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, arm_dwarf_reg_to_regnum);
7485 set_gdbarch_register_sim_regno (gdbarch, arm_register_sim_regno);
7486
7487 set_gdbarch_register_name (gdbarch, arm_register_name);
7488
7489 /* Returning results. */
7490 set_gdbarch_return_value (gdbarch, arm_return_value);
7491
7492 /* Disassembly. */
7493 set_gdbarch_print_insn (gdbarch, gdb_print_insn_arm);
7494
7495 /* Minsymbol frobbing. */
7496 set_gdbarch_elf_make_msymbol_special (gdbarch, arm_elf_make_msymbol_special);
7497 set_gdbarch_coff_make_msymbol_special (gdbarch,
7498 arm_coff_make_msymbol_special);
7499 set_gdbarch_record_special_symbol (gdbarch, arm_record_special_symbol);
7500
7501 /* Thumb-2 IT block support. */
7502 set_gdbarch_adjust_breakpoint_address (gdbarch,
7503 arm_adjust_breakpoint_address);
7504
7505 /* Virtual tables. */
7506 set_gdbarch_vbit_in_delta (gdbarch, 1);
7507
7508 /* Hook in the ABI-specific overrides, if they have been registered. */
7509 gdbarch_init_osabi (info, gdbarch);
7510
7511 dwarf2_frame_set_init_reg (gdbarch, arm_dwarf2_frame_init_reg);
7512
7513 /* Add some default predicates. */
7514 frame_unwind_append_unwinder (gdbarch, &arm_stub_unwind);
7515 dwarf2_append_unwinders (gdbarch);
7516 frame_unwind_append_unwinder (gdbarch, &arm_prologue_unwind);
7517
7518 /* Now we have tuned the configuration, set a few final things,
7519 based on what the OS ABI has told us. */
7520
7521 /* If the ABI is not otherwise marked, assume the old GNU APCS. EABI
7522 binaries are always marked. */
7523 if (tdep->arm_abi == ARM_ABI_AUTO)
7524 tdep->arm_abi = ARM_ABI_APCS;
7525
7526 /* We used to default to FPA for generic ARM, but almost nobody
7527 uses that now, and we now provide a way for the user to force
7528 the model. So default to the most useful variant. */
7529 if (tdep->fp_model == ARM_FLOAT_AUTO)
7530 tdep->fp_model = ARM_FLOAT_SOFT_FPA;
7531
7532 if (tdep->jb_pc >= 0)
7533 set_gdbarch_get_longjmp_target (gdbarch, arm_get_longjmp_target);
7534
7535 /* Floating point sizes and format. */
7536 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
7537 if (tdep->fp_model == ARM_FLOAT_SOFT_FPA || tdep->fp_model == ARM_FLOAT_FPA)
7538 {
7539 set_gdbarch_double_format
7540 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
7541 set_gdbarch_long_double_format
7542 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
7543 }
7544 else
7545 {
7546 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
7547 set_gdbarch_long_double_format (gdbarch, floatformats_ieee_double);
7548 }
7549
7550 if (have_vfp_pseudos)
7551 {
7552 /* NOTE: These are the only pseudo registers used by
7553 the ARM target at the moment. If more are added, a
7554 little more care in numbering will be needed. */
7555
7556 int num_pseudos = 32;
7557 if (have_neon_pseudos)
7558 num_pseudos += 16;
7559 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudos);
7560 set_gdbarch_pseudo_register_read (gdbarch, arm_pseudo_read);
7561 set_gdbarch_pseudo_register_write (gdbarch, arm_pseudo_write);
7562 }
7563
7564 if (tdesc_data)
7565 {
7566 set_tdesc_pseudo_register_name (gdbarch, arm_register_name);
7567
7568 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
7569
7570 /* Override tdesc_register_type to adjust the types of VFP
7571 registers for NEON. */
7572 set_gdbarch_register_type (gdbarch, arm_register_type);
7573 }
7574
7575 /* Add standard register aliases. We add aliases even for those
7576 nanes which are used by the current architecture - it's simpler,
7577 and does no harm, since nothing ever lists user registers. */
7578 for (i = 0; i < ARRAY_SIZE (arm_register_aliases); i++)
7579 user_reg_add (gdbarch, arm_register_aliases[i].name,
7580 value_of_arm_user_reg, &arm_register_aliases[i].regnum);
7581
7582 return gdbarch;
7583 }
7584
7585 static void
7586 arm_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
7587 {
7588 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
7589
7590 if (tdep == NULL)
7591 return;
7592
7593 fprintf_unfiltered (file, _("arm_dump_tdep: Lowest pc = 0x%lx"),
7594 (unsigned long) tdep->lowest_pc);
7595 }
7596
7597 extern initialize_file_ftype _initialize_arm_tdep; /* -Wmissing-prototypes */
7598
7599 void
7600 _initialize_arm_tdep (void)
7601 {
7602 struct ui_file *stb;
7603 long length;
7604 struct cmd_list_element *new_set, *new_show;
7605 const char *setname;
7606 const char *setdesc;
7607 const char *const *regnames;
7608 int numregs, i, j;
7609 static char *helptext;
7610 char regdesc[1024], *rdptr = regdesc;
7611 size_t rest = sizeof (regdesc);
7612
7613 gdbarch_register (bfd_arch_arm, arm_gdbarch_init, arm_dump_tdep);
7614
7615 arm_objfile_data_key
7616 = register_objfile_data_with_cleanup (NULL, arm_objfile_data_free);
7617
7618 /* Register an ELF OS ABI sniffer for ARM binaries. */
7619 gdbarch_register_osabi_sniffer (bfd_arch_arm,
7620 bfd_target_elf_flavour,
7621 arm_elf_osabi_sniffer);
7622
7623 /* Initialize the standard target descriptions. */
7624 initialize_tdesc_arm_with_m ();
7625
7626 /* Get the number of possible sets of register names defined in opcodes. */
7627 num_disassembly_options = get_arm_regname_num_options ();
7628
7629 /* Add root prefix command for all "set arm"/"show arm" commands. */
7630 add_prefix_cmd ("arm", no_class, set_arm_command,
7631 _("Various ARM-specific commands."),
7632 &setarmcmdlist, "set arm ", 0, &setlist);
7633
7634 add_prefix_cmd ("arm", no_class, show_arm_command,
7635 _("Various ARM-specific commands."),
7636 &showarmcmdlist, "show arm ", 0, &showlist);
7637
7638 /* Sync the opcode insn printer with our register viewer. */
7639 parse_arm_disassembler_option ("reg-names-std");
7640
7641 /* Initialize the array that will be passed to
7642 add_setshow_enum_cmd(). */
7643 valid_disassembly_styles
7644 = xmalloc ((num_disassembly_options + 1) * sizeof (char *));
7645 for (i = 0; i < num_disassembly_options; i++)
7646 {
7647 numregs = get_arm_regnames (i, &setname, &setdesc, &regnames);
7648 valid_disassembly_styles[i] = setname;
7649 length = snprintf (rdptr, rest, "%s - %s\n", setname, setdesc);
7650 rdptr += length;
7651 rest -= length;
7652 /* When we find the default names, tell the disassembler to use
7653 them. */
7654 if (!strcmp (setname, "std"))
7655 {
7656 disassembly_style = setname;
7657 set_arm_regname_option (i);
7658 }
7659 }
7660 /* Mark the end of valid options. */
7661 valid_disassembly_styles[num_disassembly_options] = NULL;
7662
7663 /* Create the help text. */
7664 stb = mem_fileopen ();
7665 fprintf_unfiltered (stb, "%s%s%s",
7666 _("The valid values are:\n"),
7667 regdesc,
7668 _("The default is \"std\"."));
7669 helptext = ui_file_xstrdup (stb, NULL);
7670 ui_file_delete (stb);
7671
7672 add_setshow_enum_cmd("disassembler", no_class,
7673 valid_disassembly_styles, &disassembly_style,
7674 _("Set the disassembly style."),
7675 _("Show the disassembly style."),
7676 helptext,
7677 set_disassembly_style_sfunc,
7678 NULL, /* FIXME: i18n: The disassembly style is \"%s\". */
7679 &setarmcmdlist, &showarmcmdlist);
7680
7681 add_setshow_boolean_cmd ("apcs32", no_class, &arm_apcs_32,
7682 _("Set usage of ARM 32-bit mode."),
7683 _("Show usage of ARM 32-bit mode."),
7684 _("When off, a 26-bit PC will be used."),
7685 NULL,
7686 NULL, /* FIXME: i18n: Usage of ARM 32-bit mode is %s. */
7687 &setarmcmdlist, &showarmcmdlist);
7688
7689 /* Add a command to allow the user to force the FPU model. */
7690 add_setshow_enum_cmd ("fpu", no_class, fp_model_strings, &current_fp_model,
7691 _("Set the floating point type."),
7692 _("Show the floating point type."),
7693 _("auto - Determine the FP typefrom the OS-ABI.\n\
7694 softfpa - Software FP, mixed-endian doubles on little-endian ARMs.\n\
7695 fpa - FPA co-processor (GCC compiled).\n\
7696 softvfp - Software FP with pure-endian doubles.\n\
7697 vfp - VFP co-processor."),
7698 set_fp_model_sfunc, show_fp_model,
7699 &setarmcmdlist, &showarmcmdlist);
7700
7701 /* Add a command to allow the user to force the ABI. */
7702 add_setshow_enum_cmd ("abi", class_support, arm_abi_strings, &arm_abi_string,
7703 _("Set the ABI."),
7704 _("Show the ABI."),
7705 NULL, arm_set_abi, arm_show_abi,
7706 &setarmcmdlist, &showarmcmdlist);
7707
7708 /* Add two commands to allow the user to force the assumed
7709 execution mode. */
7710 add_setshow_enum_cmd ("fallback-mode", class_support,
7711 arm_mode_strings, &arm_fallback_mode_string,
7712 _("Set the mode assumed when symbols are unavailable."),
7713 _("Show the mode assumed when symbols are unavailable."),
7714 NULL, NULL, arm_show_fallback_mode,
7715 &setarmcmdlist, &showarmcmdlist);
7716 add_setshow_enum_cmd ("force-mode", class_support,
7717 arm_mode_strings, &arm_force_mode_string,
7718 _("Set the mode assumed even when symbols are available."),
7719 _("Show the mode assumed even when symbols are available."),
7720 NULL, NULL, arm_show_force_mode,
7721 &setarmcmdlist, &showarmcmdlist);
7722
7723 /* Debugging flag. */
7724 add_setshow_boolean_cmd ("arm", class_maintenance, &arm_debug,
7725 _("Set ARM debugging."),
7726 _("Show ARM debugging."),
7727 _("When on, arm-specific debugging is enabled."),
7728 NULL,
7729 NULL, /* FIXME: i18n: "ARM debugging is %s. */
7730 &setdebuglist, &showdebuglist);
7731 }