Use registry in gdbarch
[binutils-gdb.git] / gdb / ia64-tdep.c
1 /* Target-dependent code for the IA-64 for GDB, the GNU debugger.
2
3 Copyright (C) 1999-2022 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "inferior.h"
22 #include "gdbcore.h"
23 #include "arch-utils.h"
24 #include "floatformat.h"
25 #include "gdbtypes.h"
26 #include "regcache.h"
27 #include "reggroups.h"
28 #include "frame.h"
29 #include "frame-base.h"
30 #include "frame-unwind.h"
31 #include "target-float.h"
32 #include "value.h"
33 #include "objfiles.h"
34 #include "elf/common.h" /* for DT_PLTGOT value */
35 #include "elf-bfd.h"
36 #include "dis-asm.h"
37 #include "infcall.h"
38 #include "osabi.h"
39 #include "ia64-tdep.h"
40 #include "cp-abi.h"
41
42 #ifdef HAVE_LIBUNWIND_IA64_H
43 #include "elf/ia64.h" /* for PT_IA_64_UNWIND value */
44 #include "ia64-libunwind-tdep.h"
45
46 /* Note: KERNEL_START is supposed to be an address which is not going
47 to ever contain any valid unwind info. For ia64 linux, the choice
48 of 0xc000000000000000 is fairly safe since that's uncached space.
49
50 We use KERNEL_START as follows: after obtaining the kernel's
51 unwind table via getunwind(), we project its unwind data into
52 address-range KERNEL_START-(KERNEL_START+ktab_size) and then
53 when ia64_access_mem() sees a memory access to this
54 address-range, we redirect it to ktab instead.
55
56 None of this hackery is needed with a modern kernel/libcs
57 which uses the kernel virtual DSO to provide access to the
58 kernel's unwind info. In that case, ktab_size remains 0 and
59 hence the value of KERNEL_START doesn't matter. */
60
61 #define KERNEL_START 0xc000000000000000ULL
62
63 static size_t ktab_size = 0;
64 struct ia64_table_entry
65 {
66 uint64_t start_offset;
67 uint64_t end_offset;
68 uint64_t info_offset;
69 };
70
71 static struct ia64_table_entry *ktab = NULL;
72 static gdb::optional<gdb::byte_vector> ktab_buf;
73
74 #endif
75
76 /* An enumeration of the different IA-64 instruction types. */
77
78 enum ia64_instruction_type
79 {
80 A, /* Integer ALU ; I-unit or M-unit */
81 I, /* Non-ALU integer; I-unit */
82 M, /* Memory ; M-unit */
83 F, /* Floating-point ; F-unit */
84 B, /* Branch ; B-unit */
85 L, /* Extended (L+X) ; I-unit */
86 X, /* Extended (L+X) ; I-unit */
87 undefined /* undefined or reserved */
88 };
89
90 /* We represent IA-64 PC addresses as the value of the instruction
91 pointer or'd with some bit combination in the low nibble which
92 represents the slot number in the bundle addressed by the
93 instruction pointer. The problem is that the Linux kernel
94 multiplies its slot numbers (for exceptions) by one while the
95 disassembler multiplies its slot numbers by 6. In addition, I've
96 heard it said that the simulator uses 1 as the multiplier.
97
98 I've fixed the disassembler so that the bytes_per_line field will
99 be the slot multiplier. If bytes_per_line comes in as zero, it
100 is set to six (which is how it was set up initially). -- objdump
101 displays pretty disassembly dumps with this value. For our purposes,
102 we'll set bytes_per_line to SLOT_MULTIPLIER. This is okay since we
103 never want to also display the raw bytes the way objdump does. */
104
105 #define SLOT_MULTIPLIER 1
106
107 /* Length in bytes of an instruction bundle. */
108
109 #define BUNDLE_LEN 16
110
111 /* See the saved memory layout comment for ia64_memory_insert_breakpoint. */
112
113 #if BREAKPOINT_MAX < BUNDLE_LEN - 2
114 # error "BREAKPOINT_MAX < BUNDLE_LEN - 2"
115 #endif
116
117 static gdbarch_init_ftype ia64_gdbarch_init;
118
119 static gdbarch_register_name_ftype ia64_register_name;
120 static gdbarch_register_type_ftype ia64_register_type;
121 static gdbarch_breakpoint_from_pc_ftype ia64_breakpoint_from_pc;
122 static gdbarch_skip_prologue_ftype ia64_skip_prologue;
123 static struct type *is_float_or_hfa_type (struct type *t);
124 static CORE_ADDR ia64_find_global_pointer (struct gdbarch *gdbarch,
125 CORE_ADDR faddr);
126
127 #define NUM_IA64_RAW_REGS 462
128
129 /* Big enough to hold a FP register in bytes. */
130 #define IA64_FP_REGISTER_SIZE 16
131
132 static int sp_regnum = IA64_GR12_REGNUM;
133
134 /* NOTE: we treat the register stack registers r32-r127 as
135 pseudo-registers because they may not be accessible via the ptrace
136 register get/set interfaces. */
137
138 enum pseudo_regs { FIRST_PSEUDO_REGNUM = NUM_IA64_RAW_REGS,
139 VBOF_REGNUM = IA64_NAT127_REGNUM + 1, V32_REGNUM,
140 V127_REGNUM = V32_REGNUM + 95,
141 VP0_REGNUM, VP16_REGNUM = VP0_REGNUM + 16,
142 VP63_REGNUM = VP0_REGNUM + 63, LAST_PSEUDO_REGNUM };
143
144 /* Array of register names; There should be ia64_num_regs strings in
145 the initializer. */
146
147 static const char * const ia64_register_names[] =
148 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
149 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
150 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
151 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
152 "", "", "", "", "", "", "", "",
153 "", "", "", "", "", "", "", "",
154 "", "", "", "", "", "", "", "",
155 "", "", "", "", "", "", "", "",
156 "", "", "", "", "", "", "", "",
157 "", "", "", "", "", "", "", "",
158 "", "", "", "", "", "", "", "",
159 "", "", "", "", "", "", "", "",
160 "", "", "", "", "", "", "", "",
161 "", "", "", "", "", "", "", "",
162 "", "", "", "", "", "", "", "",
163 "", "", "", "", "", "", "", "",
164
165 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
166 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
167 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
168 "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31",
169 "f32", "f33", "f34", "f35", "f36", "f37", "f38", "f39",
170 "f40", "f41", "f42", "f43", "f44", "f45", "f46", "f47",
171 "f48", "f49", "f50", "f51", "f52", "f53", "f54", "f55",
172 "f56", "f57", "f58", "f59", "f60", "f61", "f62", "f63",
173 "f64", "f65", "f66", "f67", "f68", "f69", "f70", "f71",
174 "f72", "f73", "f74", "f75", "f76", "f77", "f78", "f79",
175 "f80", "f81", "f82", "f83", "f84", "f85", "f86", "f87",
176 "f88", "f89", "f90", "f91", "f92", "f93", "f94", "f95",
177 "f96", "f97", "f98", "f99", "f100", "f101", "f102", "f103",
178 "f104", "f105", "f106", "f107", "f108", "f109", "f110", "f111",
179 "f112", "f113", "f114", "f115", "f116", "f117", "f118", "f119",
180 "f120", "f121", "f122", "f123", "f124", "f125", "f126", "f127",
181
182 "", "", "", "", "", "", "", "",
183 "", "", "", "", "", "", "", "",
184 "", "", "", "", "", "", "", "",
185 "", "", "", "", "", "", "", "",
186 "", "", "", "", "", "", "", "",
187 "", "", "", "", "", "", "", "",
188 "", "", "", "", "", "", "", "",
189 "", "", "", "", "", "", "", "",
190
191 "b0", "b1", "b2", "b3", "b4", "b5", "b6", "b7",
192
193 "vfp", "vrap",
194
195 "pr", "ip", "psr", "cfm",
196
197 "kr0", "kr1", "kr2", "kr3", "kr4", "kr5", "kr6", "kr7",
198 "", "", "", "", "", "", "", "",
199 "rsc", "bsp", "bspstore", "rnat",
200 "", "fcr", "", "",
201 "eflag", "csd", "ssd", "cflg", "fsr", "fir", "fdr", "",
202 "ccv", "", "", "", "unat", "", "", "",
203 "fpsr", "", "", "", "itc",
204 "", "", "", "", "", "", "", "", "", "",
205 "", "", "", "", "", "", "", "", "",
206 "pfs", "lc", "ec",
207 "", "", "", "", "", "", "", "", "", "",
208 "", "", "", "", "", "", "", "", "", "",
209 "", "", "", "", "", "", "", "", "", "",
210 "", "", "", "", "", "", "", "", "", "",
211 "", "", "", "", "", "", "", "", "", "",
212 "", "", "", "", "", "", "", "", "", "",
213 "",
214 "nat0", "nat1", "nat2", "nat3", "nat4", "nat5", "nat6", "nat7",
215 "nat8", "nat9", "nat10", "nat11", "nat12", "nat13", "nat14", "nat15",
216 "nat16", "nat17", "nat18", "nat19", "nat20", "nat21", "nat22", "nat23",
217 "nat24", "nat25", "nat26", "nat27", "nat28", "nat29", "nat30", "nat31",
218 "nat32", "nat33", "nat34", "nat35", "nat36", "nat37", "nat38", "nat39",
219 "nat40", "nat41", "nat42", "nat43", "nat44", "nat45", "nat46", "nat47",
220 "nat48", "nat49", "nat50", "nat51", "nat52", "nat53", "nat54", "nat55",
221 "nat56", "nat57", "nat58", "nat59", "nat60", "nat61", "nat62", "nat63",
222 "nat64", "nat65", "nat66", "nat67", "nat68", "nat69", "nat70", "nat71",
223 "nat72", "nat73", "nat74", "nat75", "nat76", "nat77", "nat78", "nat79",
224 "nat80", "nat81", "nat82", "nat83", "nat84", "nat85", "nat86", "nat87",
225 "nat88", "nat89", "nat90", "nat91", "nat92", "nat93", "nat94", "nat95",
226 "nat96", "nat97", "nat98", "nat99", "nat100","nat101","nat102","nat103",
227 "nat104","nat105","nat106","nat107","nat108","nat109","nat110","nat111",
228 "nat112","nat113","nat114","nat115","nat116","nat117","nat118","nat119",
229 "nat120","nat121","nat122","nat123","nat124","nat125","nat126","nat127",
230
231 "bof",
232
233 "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
234 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
235 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
236 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
237 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
238 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
239 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
240 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
241 "r96", "r97", "r98", "r99", "r100", "r101", "r102", "r103",
242 "r104", "r105", "r106", "r107", "r108", "r109", "r110", "r111",
243 "r112", "r113", "r114", "r115", "r116", "r117", "r118", "r119",
244 "r120", "r121", "r122", "r123", "r124", "r125", "r126", "r127",
245
246 "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7",
247 "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15",
248 "p16", "p17", "p18", "p19", "p20", "p21", "p22", "p23",
249 "p24", "p25", "p26", "p27", "p28", "p29", "p30", "p31",
250 "p32", "p33", "p34", "p35", "p36", "p37", "p38", "p39",
251 "p40", "p41", "p42", "p43", "p44", "p45", "p46", "p47",
252 "p48", "p49", "p50", "p51", "p52", "p53", "p54", "p55",
253 "p56", "p57", "p58", "p59", "p60", "p61", "p62", "p63",
254 };
255
256 struct ia64_frame_cache
257 {
258 CORE_ADDR base; /* frame pointer base for frame */
259 CORE_ADDR pc; /* function start pc for frame */
260 CORE_ADDR saved_sp; /* stack pointer for frame */
261 CORE_ADDR bsp; /* points at r32 for the current frame */
262 CORE_ADDR cfm; /* cfm value for current frame */
263 CORE_ADDR prev_cfm; /* cfm value for previous frame */
264 int frameless;
265 int sof; /* Size of frame (decoded from cfm value). */
266 int sol; /* Size of locals (decoded from cfm value). */
267 int sor; /* Number of rotating registers (decoded from
268 cfm value). */
269 CORE_ADDR after_prologue;
270 /* Address of first instruction after the last
271 prologue instruction; Note that there may
272 be instructions from the function's body
273 intermingled with the prologue. */
274 int mem_stack_frame_size;
275 /* Size of the memory stack frame (may be zero),
276 or -1 if it has not been determined yet. */
277 int fp_reg; /* Register number (if any) used a frame pointer
278 for this frame. 0 if no register is being used
279 as the frame pointer. */
280
281 /* Saved registers. */
282 CORE_ADDR saved_regs[NUM_IA64_RAW_REGS];
283
284 };
285
286 static int
287 floatformat_valid (const struct floatformat *fmt, const void *from)
288 {
289 return 1;
290 }
291
292 static const struct floatformat floatformat_ia64_ext_little =
293 {
294 floatformat_little, 82, 0, 1, 17, 65535, 0x1ffff, 18, 64,
295 floatformat_intbit_yes, "floatformat_ia64_ext_little", floatformat_valid, NULL
296 };
297
298 static const struct floatformat floatformat_ia64_ext_big =
299 {
300 floatformat_big, 82, 46, 47, 17, 65535, 0x1ffff, 64, 64,
301 floatformat_intbit_yes, "floatformat_ia64_ext_big", floatformat_valid
302 };
303
304 static const struct floatformat *floatformats_ia64_ext[2] =
305 {
306 &floatformat_ia64_ext_big,
307 &floatformat_ia64_ext_little
308 };
309
310 static struct type *
311 ia64_ext_type (struct gdbarch *gdbarch)
312 {
313 ia64_gdbarch_tdep *tdep = gdbarch_tdep<ia64_gdbarch_tdep> (gdbarch);
314
315 if (!tdep->ia64_ext_type)
316 tdep->ia64_ext_type
317 = arch_float_type (gdbarch, 128, "builtin_type_ia64_ext",
318 floatformats_ia64_ext);
319
320 return tdep->ia64_ext_type;
321 }
322
323 static int
324 ia64_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
325 const struct reggroup *group)
326 {
327 int vector_p;
328 int float_p;
329 int raw_p;
330 if (group == all_reggroup)
331 return 1;
332 vector_p = register_type (gdbarch, regnum)->is_vector ();
333 float_p = register_type (gdbarch, regnum)->code () == TYPE_CODE_FLT;
334 raw_p = regnum < NUM_IA64_RAW_REGS;
335 if (group == float_reggroup)
336 return float_p;
337 if (group == vector_reggroup)
338 return vector_p;
339 if (group == general_reggroup)
340 return (!vector_p && !float_p);
341 if (group == save_reggroup || group == restore_reggroup)
342 return raw_p;
343 return 0;
344 }
345
346 static const char *
347 ia64_register_name (struct gdbarch *gdbarch, int reg)
348 {
349 return ia64_register_names[reg];
350 }
351
352 struct type *
353 ia64_register_type (struct gdbarch *arch, int reg)
354 {
355 if (reg >= IA64_FR0_REGNUM && reg <= IA64_FR127_REGNUM)
356 return ia64_ext_type (arch);
357 else
358 return builtin_type (arch)->builtin_long;
359 }
360
361 static int
362 ia64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
363 {
364 if (reg >= IA64_GR32_REGNUM && reg <= IA64_GR127_REGNUM)
365 return V32_REGNUM + (reg - IA64_GR32_REGNUM);
366 return reg;
367 }
368
369
370 /* Extract ``len'' bits from an instruction bundle starting at
371 bit ``from''. */
372
373 static long long
374 extract_bit_field (const gdb_byte *bundle, int from, int len)
375 {
376 long long result = 0LL;
377 int to = from + len;
378 int from_byte = from / 8;
379 int to_byte = to / 8;
380 unsigned char *b = (unsigned char *) bundle;
381 unsigned char c;
382 int lshift;
383 int i;
384
385 c = b[from_byte];
386 if (from_byte == to_byte)
387 c = ((unsigned char) (c << (8 - to % 8))) >> (8 - to % 8);
388 result = c >> (from % 8);
389 lshift = 8 - (from % 8);
390
391 for (i = from_byte+1; i < to_byte; i++)
392 {
393 result |= ((long long) b[i]) << lshift;
394 lshift += 8;
395 }
396
397 if (from_byte < to_byte && (to % 8 != 0))
398 {
399 c = b[to_byte];
400 c = ((unsigned char) (c << (8 - to % 8))) >> (8 - to % 8);
401 result |= ((long long) c) << lshift;
402 }
403
404 return result;
405 }
406
407 /* Replace the specified bits in an instruction bundle. */
408
409 static void
410 replace_bit_field (gdb_byte *bundle, long long val, int from, int len)
411 {
412 int to = from + len;
413 int from_byte = from / 8;
414 int to_byte = to / 8;
415 unsigned char *b = (unsigned char *) bundle;
416 unsigned char c;
417
418 if (from_byte == to_byte)
419 {
420 unsigned char left, right;
421 c = b[from_byte];
422 left = (c >> (to % 8)) << (to % 8);
423 right = ((unsigned char) (c << (8 - from % 8))) >> (8 - from % 8);
424 c = (unsigned char) (val & 0xff);
425 c = (unsigned char) (c << (from % 8 + 8 - to % 8)) >> (8 - to % 8);
426 c |= right | left;
427 b[from_byte] = c;
428 }
429 else
430 {
431 int i;
432 c = b[from_byte];
433 c = ((unsigned char) (c << (8 - from % 8))) >> (8 - from % 8);
434 c = c | (val << (from % 8));
435 b[from_byte] = c;
436 val >>= 8 - from % 8;
437
438 for (i = from_byte+1; i < to_byte; i++)
439 {
440 c = val & 0xff;
441 val >>= 8;
442 b[i] = c;
443 }
444
445 if (to % 8 != 0)
446 {
447 unsigned char cv = (unsigned char) val;
448 c = b[to_byte];
449 c = c >> (to % 8) << (to % 8);
450 c |= ((unsigned char) (cv << (8 - to % 8))) >> (8 - to % 8);
451 b[to_byte] = c;
452 }
453 }
454 }
455
456 /* Return the contents of slot N (for N = 0, 1, or 2) in
457 and instruction bundle. */
458
459 static long long
460 slotN_contents (gdb_byte *bundle, int slotnum)
461 {
462 return extract_bit_field (bundle, 5+41*slotnum, 41);
463 }
464
465 /* Store an instruction in an instruction bundle. */
466
467 static void
468 replace_slotN_contents (gdb_byte *bundle, long long instr, int slotnum)
469 {
470 replace_bit_field (bundle, instr, 5+41*slotnum, 41);
471 }
472
473 static const enum ia64_instruction_type template_encoding_table[32][3] =
474 {
475 { M, I, I }, /* 00 */
476 { M, I, I }, /* 01 */
477 { M, I, I }, /* 02 */
478 { M, I, I }, /* 03 */
479 { M, L, X }, /* 04 */
480 { M, L, X }, /* 05 */
481 { undefined, undefined, undefined }, /* 06 */
482 { undefined, undefined, undefined }, /* 07 */
483 { M, M, I }, /* 08 */
484 { M, M, I }, /* 09 */
485 { M, M, I }, /* 0A */
486 { M, M, I }, /* 0B */
487 { M, F, I }, /* 0C */
488 { M, F, I }, /* 0D */
489 { M, M, F }, /* 0E */
490 { M, M, F }, /* 0F */
491 { M, I, B }, /* 10 */
492 { M, I, B }, /* 11 */
493 { M, B, B }, /* 12 */
494 { M, B, B }, /* 13 */
495 { undefined, undefined, undefined }, /* 14 */
496 { undefined, undefined, undefined }, /* 15 */
497 { B, B, B }, /* 16 */
498 { B, B, B }, /* 17 */
499 { M, M, B }, /* 18 */
500 { M, M, B }, /* 19 */
501 { undefined, undefined, undefined }, /* 1A */
502 { undefined, undefined, undefined }, /* 1B */
503 { M, F, B }, /* 1C */
504 { M, F, B }, /* 1D */
505 { undefined, undefined, undefined }, /* 1E */
506 { undefined, undefined, undefined }, /* 1F */
507 };
508
509 /* Fetch and (partially) decode an instruction at ADDR and return the
510 address of the next instruction to fetch. */
511
512 static CORE_ADDR
513 fetch_instruction (CORE_ADDR addr, ia64_instruction_type *it, long long *instr)
514 {
515 gdb_byte bundle[BUNDLE_LEN];
516 int slotnum = (int) (addr & 0x0f) / SLOT_MULTIPLIER;
517 long long templ;
518 int val;
519
520 /* Warn about slot numbers greater than 2. We used to generate
521 an error here on the assumption that the user entered an invalid
522 address. But, sometimes GDB itself requests an invalid address.
523 This can (easily) happen when execution stops in a function for
524 which there are no symbols. The prologue scanner will attempt to
525 find the beginning of the function - if the nearest symbol
526 happens to not be aligned on a bundle boundary (16 bytes), the
527 resulting starting address will cause GDB to think that the slot
528 number is too large.
529
530 So we warn about it and set the slot number to zero. It is
531 not necessarily a fatal condition, particularly if debugging
532 at the assembly language level. */
533 if (slotnum > 2)
534 {
535 warning (_("Can't fetch instructions for slot numbers greater than 2.\n"
536 "Using slot 0 instead"));
537 slotnum = 0;
538 }
539
540 addr &= ~0x0f;
541
542 val = target_read_memory (addr, bundle, BUNDLE_LEN);
543
544 if (val != 0)
545 return 0;
546
547 *instr = slotN_contents (bundle, slotnum);
548 templ = extract_bit_field (bundle, 0, 5);
549 *it = template_encoding_table[(int)templ][slotnum];
550
551 if (slotnum == 2 || (slotnum == 1 && *it == L))
552 addr += 16;
553 else
554 addr += (slotnum + 1) * SLOT_MULTIPLIER;
555
556 return addr;
557 }
558
559 /* There are 5 different break instructions (break.i, break.b,
560 break.m, break.f, and break.x), but they all have the same
561 encoding. (The five bit template in the low five bits of the
562 instruction bundle distinguishes one from another.)
563
564 The runtime architecture manual specifies that break instructions
565 used for debugging purposes must have the upper two bits of the 21
566 bit immediate set to a 0 and a 1 respectively. A breakpoint
567 instruction encodes the most significant bit of its 21 bit
568 immediate at bit 36 of the 41 bit instruction. The penultimate msb
569 is at bit 25 which leads to the pattern below.
570
571 Originally, I had this set up to do, e.g, a "break.i 0x80000" But
572 it turns out that 0x80000 was used as the syscall break in the early
573 simulators. So I changed the pattern slightly to do "break.i 0x080001"
574 instead. But that didn't work either (I later found out that this
575 pattern was used by the simulator that I was using.) So I ended up
576 using the pattern seen below.
577
578 SHADOW_CONTENTS has byte-based addressing (PLACED_ADDRESS and SHADOW_LEN)
579 while we need bit-based addressing as the instructions length is 41 bits and
580 we must not modify/corrupt the adjacent slots in the same bundle.
581 Fortunately we may store larger memory incl. the adjacent bits with the
582 original memory content (not the possibly already stored breakpoints there).
583 We need to be careful in ia64_memory_remove_breakpoint to always restore
584 only the specific bits of this instruction ignoring any adjacent stored
585 bits.
586
587 We use the original addressing with the low nibble in the range <0..2> which
588 gets incorrectly interpreted by generic non-ia64 breakpoint_restore_shadows
589 as the direct byte offset of SHADOW_CONTENTS. We store whole BUNDLE_LEN
590 bytes just without these two possibly skipped bytes to not to exceed to the
591 next bundle.
592
593 If we would like to store the whole bundle to SHADOW_CONTENTS we would have
594 to store already the base address (`address & ~0x0f') into PLACED_ADDRESS.
595 In such case there is no other place where to store
596 SLOTNUM (`adress & 0x0f', value in the range <0..2>). We need to know
597 SLOTNUM in ia64_memory_remove_breakpoint.
598
599 There is one special case where we need to be extra careful:
600 L-X instructions, which are instructions that occupy 2 slots
601 (The L part is always in slot 1, and the X part is always in
602 slot 2). We must refuse to insert breakpoints for an address
603 that points at slot 2 of a bundle where an L-X instruction is
604 present, since there is logically no instruction at that address.
605 However, to make things more interesting, the opcode of L-X
606 instructions is located in slot 2. This means that, to insert
607 a breakpoint at an address that points to slot 1, we actually
608 need to write the breakpoint in slot 2! Slot 1 is actually
609 the extended operand, so writing the breakpoint there would not
610 have the desired effect. Another side-effect of this issue
611 is that we need to make sure that the shadow contents buffer
612 does save byte 15 of our instruction bundle (this is the tail
613 end of slot 2, which wouldn't be saved if we were to insert
614 the breakpoint in slot 1).
615
616 ia64 16-byte bundle layout:
617 | 5 bits | slot 0 with 41 bits | slot 1 with 41 bits | slot 2 with 41 bits |
618
619 The current addressing used by the code below:
620 original PC placed_address placed_size required covered
621 == bp_tgt->shadow_len reqd \subset covered
622 0xABCDE0 0xABCDE0 0x10 <0x0...0x5> <0x0..0xF>
623 0xABCDE1 0xABCDE1 0xF <0x5...0xA> <0x1..0xF>
624 0xABCDE2 0xABCDE2 0xE <0xA...0xF> <0x2..0xF>
625
626 L-X instructions are treated a little specially, as explained above:
627 0xABCDE1 0xABCDE1 0xF <0xA...0xF> <0x1..0xF>
628
629 `objdump -d' and some other tools show a bit unjustified offsets:
630 original PC byte where starts the instruction objdump offset
631 0xABCDE0 0xABCDE0 0xABCDE0
632 0xABCDE1 0xABCDE5 0xABCDE6
633 0xABCDE2 0xABCDEA 0xABCDEC
634 */
635
636 #define IA64_BREAKPOINT 0x00003333300LL
637
638 static int
639 ia64_memory_insert_breakpoint (struct gdbarch *gdbarch,
640 struct bp_target_info *bp_tgt)
641 {
642 CORE_ADDR addr = bp_tgt->placed_address = bp_tgt->reqstd_address;
643 gdb_byte bundle[BUNDLE_LEN];
644 int slotnum = (int) (addr & 0x0f) / SLOT_MULTIPLIER, shadow_slotnum;
645 long long instr_breakpoint;
646 int val;
647 int templ;
648
649 if (slotnum > 2)
650 error (_("Can't insert breakpoint for slot numbers greater than 2."));
651
652 addr &= ~0x0f;
653
654 /* Enable the automatic memory restoration from breakpoints while
655 we read our instruction bundle for the purpose of SHADOW_CONTENTS.
656 Otherwise, we could possibly store into the shadow parts of the adjacent
657 placed breakpoints. It is due to our SHADOW_CONTENTS overlapping the real
658 breakpoint instruction bits region. */
659 scoped_restore restore_memory_0
660 = make_scoped_restore_show_memory_breakpoints (0);
661 val = target_read_memory (addr, bundle, BUNDLE_LEN);
662 if (val != 0)
663 return val;
664
665 /* SHADOW_SLOTNUM saves the original slot number as expected by the caller
666 for addressing the SHADOW_CONTENTS placement. */
667 shadow_slotnum = slotnum;
668
669 /* Always cover the last byte of the bundle in case we are inserting
670 a breakpoint on an L-X instruction. */
671 bp_tgt->shadow_len = BUNDLE_LEN - shadow_slotnum;
672
673 templ = extract_bit_field (bundle, 0, 5);
674 if (template_encoding_table[templ][slotnum] == X)
675 {
676 /* X unit types can only be used in slot 2, and are actually
677 part of a 2-slot L-X instruction. We cannot break at this
678 address, as this is the second half of an instruction that
679 lives in slot 1 of that bundle. */
680 gdb_assert (slotnum == 2);
681 error (_("Can't insert breakpoint for non-existing slot X"));
682 }
683 if (template_encoding_table[templ][slotnum] == L)
684 {
685 /* L unit types can only be used in slot 1. But the associated
686 opcode for that instruction is in slot 2, so bump the slot number
687 accordingly. */
688 gdb_assert (slotnum == 1);
689 slotnum = 2;
690 }
691
692 /* Store the whole bundle, except for the initial skipped bytes by the slot
693 number interpreted as bytes offset in PLACED_ADDRESS. */
694 memcpy (bp_tgt->shadow_contents, bundle + shadow_slotnum,
695 bp_tgt->shadow_len);
696
697 /* Re-read the same bundle as above except that, this time, read it in order
698 to compute the new bundle inside which we will be inserting the
699 breakpoint. Therefore, disable the automatic memory restoration from
700 breakpoints while we read our instruction bundle. Otherwise, the general
701 restoration mechanism kicks in and we would possibly remove parts of the
702 adjacent placed breakpoints. It is due to our SHADOW_CONTENTS overlapping
703 the real breakpoint instruction bits region. */
704 scoped_restore restore_memory_1
705 = make_scoped_restore_show_memory_breakpoints (1);
706 val = target_read_memory (addr, bundle, BUNDLE_LEN);
707 if (val != 0)
708 return val;
709
710 /* Breakpoints already present in the code will get detected and not get
711 reinserted by bp_loc_is_permanent. Multiple breakpoints at the same
712 location cannot induce the internal error as they are optimized into
713 a single instance by update_global_location_list. */
714 instr_breakpoint = slotN_contents (bundle, slotnum);
715 if (instr_breakpoint == IA64_BREAKPOINT)
716 internal_error (__FILE__, __LINE__,
717 _("Address %s already contains a breakpoint."),
718 paddress (gdbarch, bp_tgt->placed_address));
719 replace_slotN_contents (bundle, IA64_BREAKPOINT, slotnum);
720
721 val = target_write_memory (addr + shadow_slotnum, bundle + shadow_slotnum,
722 bp_tgt->shadow_len);
723
724 return val;
725 }
726
727 static int
728 ia64_memory_remove_breakpoint (struct gdbarch *gdbarch,
729 struct bp_target_info *bp_tgt)
730 {
731 CORE_ADDR addr = bp_tgt->placed_address;
732 gdb_byte bundle_mem[BUNDLE_LEN], bundle_saved[BUNDLE_LEN];
733 int slotnum = (addr & 0x0f) / SLOT_MULTIPLIER, shadow_slotnum;
734 long long instr_breakpoint, instr_saved;
735 int val;
736 int templ;
737
738 addr &= ~0x0f;
739
740 /* Disable the automatic memory restoration from breakpoints while
741 we read our instruction bundle. Otherwise, the general restoration
742 mechanism kicks in and we would possibly remove parts of the adjacent
743 placed breakpoints. It is due to our SHADOW_CONTENTS overlapping the real
744 breakpoint instruction bits region. */
745 scoped_restore restore_memory_1
746 = make_scoped_restore_show_memory_breakpoints (1);
747 val = target_read_memory (addr, bundle_mem, BUNDLE_LEN);
748 if (val != 0)
749 return val;
750
751 /* SHADOW_SLOTNUM saves the original slot number as expected by the caller
752 for addressing the SHADOW_CONTENTS placement. */
753 shadow_slotnum = slotnum;
754
755 templ = extract_bit_field (bundle_mem, 0, 5);
756 if (template_encoding_table[templ][slotnum] == X)
757 {
758 /* X unit types can only be used in slot 2, and are actually
759 part of a 2-slot L-X instruction. We refuse to insert
760 breakpoints at this address, so there should be no reason
761 for us attempting to remove one there, except if the program's
762 code somehow got modified in memory. */
763 gdb_assert (slotnum == 2);
764 warning (_("Cannot remove breakpoint at address %s from non-existing "
765 "X-type slot, memory has changed underneath"),
766 paddress (gdbarch, bp_tgt->placed_address));
767 return -1;
768 }
769 if (template_encoding_table[templ][slotnum] == L)
770 {
771 /* L unit types can only be used in slot 1. But the breakpoint
772 was actually saved using slot 2, so update the slot number
773 accordingly. */
774 gdb_assert (slotnum == 1);
775 slotnum = 2;
776 }
777
778 gdb_assert (bp_tgt->shadow_len == BUNDLE_LEN - shadow_slotnum);
779
780 instr_breakpoint = slotN_contents (bundle_mem, slotnum);
781 if (instr_breakpoint != IA64_BREAKPOINT)
782 {
783 warning (_("Cannot remove breakpoint at address %s, "
784 "no break instruction at such address."),
785 paddress (gdbarch, bp_tgt->placed_address));
786 return -1;
787 }
788
789 /* Extract the original saved instruction from SLOTNUM normalizing its
790 bit-shift for INSTR_SAVED. */
791 memcpy (bundle_saved, bundle_mem, BUNDLE_LEN);
792 memcpy (bundle_saved + shadow_slotnum, bp_tgt->shadow_contents,
793 bp_tgt->shadow_len);
794 instr_saved = slotN_contents (bundle_saved, slotnum);
795
796 /* In BUNDLE_MEM, be careful to modify only the bits belonging to SLOTNUM
797 and not any of the other ones that are stored in SHADOW_CONTENTS. */
798 replace_slotN_contents (bundle_mem, instr_saved, slotnum);
799 val = target_write_raw_memory (addr, bundle_mem, BUNDLE_LEN);
800
801 return val;
802 }
803
804 /* Implement the breakpoint_kind_from_pc gdbarch method. */
805
806 static int
807 ia64_breakpoint_kind_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr)
808 {
809 /* A place holder of gdbarch method breakpoint_kind_from_pc. */
810 return 0;
811 }
812
813 /* As gdbarch_breakpoint_from_pc ranges have byte granularity and ia64
814 instruction slots ranges are bit-granular (41 bits) we have to provide an
815 extended range as described for ia64_memory_insert_breakpoint. We also take
816 care of preserving the `break' instruction 21-bit (or 62-bit) parameter to
817 make a match for permanent breakpoints. */
818
819 static const gdb_byte *
820 ia64_breakpoint_from_pc (struct gdbarch *gdbarch,
821 CORE_ADDR *pcptr, int *lenptr)
822 {
823 CORE_ADDR addr = *pcptr;
824 static gdb_byte bundle[BUNDLE_LEN];
825 int slotnum = (int) (*pcptr & 0x0f) / SLOT_MULTIPLIER, shadow_slotnum;
826 long long instr_fetched;
827 int val;
828 int templ;
829
830 if (slotnum > 2)
831 error (_("Can't insert breakpoint for slot numbers greater than 2."));
832
833 addr &= ~0x0f;
834
835 /* Enable the automatic memory restoration from breakpoints while
836 we read our instruction bundle to match bp_loc_is_permanent. */
837 {
838 scoped_restore restore_memory_0
839 = make_scoped_restore_show_memory_breakpoints (0);
840 val = target_read_memory (addr, bundle, BUNDLE_LEN);
841 }
842
843 /* The memory might be unreachable. This can happen, for instance,
844 when the user inserts a breakpoint at an invalid address. */
845 if (val != 0)
846 return NULL;
847
848 /* SHADOW_SLOTNUM saves the original slot number as expected by the caller
849 for addressing the SHADOW_CONTENTS placement. */
850 shadow_slotnum = slotnum;
851
852 /* Cover always the last byte of the bundle for the L-X slot case. */
853 *lenptr = BUNDLE_LEN - shadow_slotnum;
854
855 /* Check for L type instruction in slot 1, if present then bump up the slot
856 number to the slot 2. */
857 templ = extract_bit_field (bundle, 0, 5);
858 if (template_encoding_table[templ][slotnum] == X)
859 {
860 gdb_assert (slotnum == 2);
861 error (_("Can't insert breakpoint for non-existing slot X"));
862 }
863 if (template_encoding_table[templ][slotnum] == L)
864 {
865 gdb_assert (slotnum == 1);
866 slotnum = 2;
867 }
868
869 /* A break instruction has its all its opcode bits cleared except for
870 the parameter value. For L+X slot pair we are at the X slot (slot 2) so
871 we should not touch the L slot - the upper 41 bits of the parameter. */
872 instr_fetched = slotN_contents (bundle, slotnum);
873 instr_fetched &= 0x1003ffffc0LL;
874 replace_slotN_contents (bundle, instr_fetched, slotnum);
875
876 return bundle + shadow_slotnum;
877 }
878
879 static CORE_ADDR
880 ia64_read_pc (readable_regcache *regcache)
881 {
882 ULONGEST psr_value, pc_value;
883 int slot_num;
884
885 regcache->cooked_read (IA64_PSR_REGNUM, &psr_value);
886 regcache->cooked_read (IA64_IP_REGNUM, &pc_value);
887 slot_num = (psr_value >> 41) & 3;
888
889 return pc_value | (slot_num * SLOT_MULTIPLIER);
890 }
891
892 void
893 ia64_write_pc (struct regcache *regcache, CORE_ADDR new_pc)
894 {
895 int slot_num = (int) (new_pc & 0xf) / SLOT_MULTIPLIER;
896 ULONGEST psr_value;
897
898 regcache_cooked_read_unsigned (regcache, IA64_PSR_REGNUM, &psr_value);
899 psr_value &= ~(3LL << 41);
900 psr_value |= (ULONGEST)(slot_num & 0x3) << 41;
901
902 new_pc &= ~0xfLL;
903
904 regcache_cooked_write_unsigned (regcache, IA64_PSR_REGNUM, psr_value);
905 regcache_cooked_write_unsigned (regcache, IA64_IP_REGNUM, new_pc);
906 }
907
908 #define IS_NaT_COLLECTION_ADDR(addr) ((((addr) >> 3) & 0x3f) == 0x3f)
909
910 /* Returns the address of the slot that's NSLOTS slots away from
911 the address ADDR. NSLOTS may be positive or negative. */
912 static CORE_ADDR
913 rse_address_add(CORE_ADDR addr, int nslots)
914 {
915 CORE_ADDR new_addr;
916 int mandatory_nat_slots = nslots / 63;
917 int direction = nslots < 0 ? -1 : 1;
918
919 new_addr = addr + 8 * (nslots + mandatory_nat_slots);
920
921 if ((new_addr >> 9) != ((addr + 8 * 64 * mandatory_nat_slots) >> 9))
922 new_addr += 8 * direction;
923
924 if (IS_NaT_COLLECTION_ADDR(new_addr))
925 new_addr += 8 * direction;
926
927 return new_addr;
928 }
929
930 static enum register_status
931 ia64_pseudo_register_read (struct gdbarch *gdbarch, readable_regcache *regcache,
932 int regnum, gdb_byte *buf)
933 {
934 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
935 enum register_status status;
936
937 if (regnum >= V32_REGNUM && regnum <= V127_REGNUM)
938 {
939 #ifdef HAVE_LIBUNWIND_IA64_H
940 /* First try and use the libunwind special reg accessor,
941 otherwise fallback to standard logic. */
942 if (!libunwind_is_initialized ()
943 || libunwind_get_reg_special (gdbarch, regcache, regnum, buf) != 0)
944 #endif
945 {
946 /* The fallback position is to assume that r32-r127 are
947 found sequentially in memory starting at $bof. This
948 isn't always true, but without libunwind, this is the
949 best we can do. */
950 ULONGEST cfm;
951 ULONGEST bsp;
952 CORE_ADDR reg;
953
954 status = regcache->cooked_read (IA64_BSP_REGNUM, &bsp);
955 if (status != REG_VALID)
956 return status;
957
958 status = regcache->cooked_read (IA64_CFM_REGNUM, &cfm);
959 if (status != REG_VALID)
960 return status;
961
962 /* The bsp points at the end of the register frame so we
963 subtract the size of frame from it to get start of
964 register frame. */
965 bsp = rse_address_add (bsp, -(cfm & 0x7f));
966
967 if ((cfm & 0x7f) > regnum - V32_REGNUM)
968 {
969 ULONGEST reg_addr = rse_address_add (bsp, (regnum - V32_REGNUM));
970 reg = read_memory_integer ((CORE_ADDR)reg_addr, 8, byte_order);
971 store_unsigned_integer (buf, register_size (gdbarch, regnum),
972 byte_order, reg);
973 }
974 else
975 store_unsigned_integer (buf, register_size (gdbarch, regnum),
976 byte_order, 0);
977 }
978 }
979 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT31_REGNUM)
980 {
981 ULONGEST unatN_val;
982 ULONGEST unat;
983
984 status = regcache->cooked_read (IA64_UNAT_REGNUM, &unat);
985 if (status != REG_VALID)
986 return status;
987 unatN_val = (unat & (1LL << (regnum - IA64_NAT0_REGNUM))) != 0;
988 store_unsigned_integer (buf, register_size (gdbarch, regnum),
989 byte_order, unatN_val);
990 }
991 else if (IA64_NAT32_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
992 {
993 ULONGEST natN_val = 0;
994 ULONGEST bsp;
995 ULONGEST cfm;
996 CORE_ADDR gr_addr = 0;
997
998 status = regcache->cooked_read (IA64_BSP_REGNUM, &bsp);
999 if (status != REG_VALID)
1000 return status;
1001
1002 status = regcache->cooked_read (IA64_CFM_REGNUM, &cfm);
1003 if (status != REG_VALID)
1004 return status;
1005
1006 /* The bsp points at the end of the register frame so we
1007 subtract the size of frame from it to get start of register frame. */
1008 bsp = rse_address_add (bsp, -(cfm & 0x7f));
1009
1010 if ((cfm & 0x7f) > regnum - V32_REGNUM)
1011 gr_addr = rse_address_add (bsp, (regnum - V32_REGNUM));
1012
1013 if (gr_addr != 0)
1014 {
1015 /* Compute address of nat collection bits. */
1016 CORE_ADDR nat_addr = gr_addr | 0x1f8;
1017 ULONGEST nat_collection;
1018 int nat_bit;
1019 /* If our nat collection address is bigger than bsp, we have to get
1020 the nat collection from rnat. Otherwise, we fetch the nat
1021 collection from the computed address. */
1022 if (nat_addr >= bsp)
1023 regcache->cooked_read (IA64_RNAT_REGNUM, &nat_collection);
1024 else
1025 nat_collection = read_memory_integer (nat_addr, 8, byte_order);
1026 nat_bit = (gr_addr >> 3) & 0x3f;
1027 natN_val = (nat_collection >> nat_bit) & 1;
1028 }
1029
1030 store_unsigned_integer (buf, register_size (gdbarch, regnum),
1031 byte_order, natN_val);
1032 }
1033 else if (regnum == VBOF_REGNUM)
1034 {
1035 /* A virtual register frame start is provided for user convenience.
1036 It can be calculated as the bsp - sof (sizeof frame). */
1037 ULONGEST bsp, vbsp;
1038 ULONGEST cfm;
1039
1040 status = regcache->cooked_read (IA64_BSP_REGNUM, &bsp);
1041 if (status != REG_VALID)
1042 return status;
1043 status = regcache->cooked_read (IA64_CFM_REGNUM, &cfm);
1044 if (status != REG_VALID)
1045 return status;
1046
1047 /* The bsp points at the end of the register frame so we
1048 subtract the size of frame from it to get beginning of frame. */
1049 vbsp = rse_address_add (bsp, -(cfm & 0x7f));
1050 store_unsigned_integer (buf, register_size (gdbarch, regnum),
1051 byte_order, vbsp);
1052 }
1053 else if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
1054 {
1055 ULONGEST pr;
1056 ULONGEST cfm;
1057 ULONGEST prN_val;
1058
1059 status = regcache->cooked_read (IA64_PR_REGNUM, &pr);
1060 if (status != REG_VALID)
1061 return status;
1062 status = regcache->cooked_read (IA64_CFM_REGNUM, &cfm);
1063 if (status != REG_VALID)
1064 return status;
1065
1066 if (VP16_REGNUM <= regnum && regnum <= VP63_REGNUM)
1067 {
1068 /* Fetch predicate register rename base from current frame
1069 marker for this frame. */
1070 int rrb_pr = (cfm >> 32) & 0x3f;
1071
1072 /* Adjust the register number to account for register rotation. */
1073 regnum = VP16_REGNUM
1074 + ((regnum - VP16_REGNUM) + rrb_pr) % 48;
1075 }
1076 prN_val = (pr & (1LL << (regnum - VP0_REGNUM))) != 0;
1077 store_unsigned_integer (buf, register_size (gdbarch, regnum),
1078 byte_order, prN_val);
1079 }
1080 else
1081 memset (buf, 0, register_size (gdbarch, regnum));
1082
1083 return REG_VALID;
1084 }
1085
1086 static void
1087 ia64_pseudo_register_write (struct gdbarch *gdbarch, struct regcache *regcache,
1088 int regnum, const gdb_byte *buf)
1089 {
1090 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1091
1092 if (regnum >= V32_REGNUM && regnum <= V127_REGNUM)
1093 {
1094 ULONGEST bsp;
1095 ULONGEST cfm;
1096 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
1097 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1098
1099 bsp = rse_address_add (bsp, -(cfm & 0x7f));
1100
1101 if ((cfm & 0x7f) > regnum - V32_REGNUM)
1102 {
1103 ULONGEST reg_addr = rse_address_add (bsp, (regnum - V32_REGNUM));
1104 write_memory (reg_addr, buf, 8);
1105 }
1106 }
1107 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT31_REGNUM)
1108 {
1109 ULONGEST unatN_val, unat, unatN_mask;
1110 regcache_cooked_read_unsigned (regcache, IA64_UNAT_REGNUM, &unat);
1111 unatN_val = extract_unsigned_integer (buf, register_size (gdbarch,
1112 regnum),
1113 byte_order);
1114 unatN_mask = (1LL << (regnum - IA64_NAT0_REGNUM));
1115 if (unatN_val == 0)
1116 unat &= ~unatN_mask;
1117 else if (unatN_val == 1)
1118 unat |= unatN_mask;
1119 regcache_cooked_write_unsigned (regcache, IA64_UNAT_REGNUM, unat);
1120 }
1121 else if (IA64_NAT32_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
1122 {
1123 ULONGEST natN_val;
1124 ULONGEST bsp;
1125 ULONGEST cfm;
1126 CORE_ADDR gr_addr = 0;
1127 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
1128 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1129
1130 /* The bsp points at the end of the register frame so we
1131 subtract the size of frame from it to get start of register frame. */
1132 bsp = rse_address_add (bsp, -(cfm & 0x7f));
1133
1134 if ((cfm & 0x7f) > regnum - V32_REGNUM)
1135 gr_addr = rse_address_add (bsp, (regnum - V32_REGNUM));
1136
1137 natN_val = extract_unsigned_integer (buf, register_size (gdbarch,
1138 regnum),
1139 byte_order);
1140
1141 if (gr_addr != 0 && (natN_val == 0 || natN_val == 1))
1142 {
1143 /* Compute address of nat collection bits. */
1144 CORE_ADDR nat_addr = gr_addr | 0x1f8;
1145 CORE_ADDR nat_collection;
1146 int natN_bit = (gr_addr >> 3) & 0x3f;
1147 ULONGEST natN_mask = (1LL << natN_bit);
1148 /* If our nat collection address is bigger than bsp, we have to get
1149 the nat collection from rnat. Otherwise, we fetch the nat
1150 collection from the computed address. */
1151 if (nat_addr >= bsp)
1152 {
1153 regcache_cooked_read_unsigned (regcache,
1154 IA64_RNAT_REGNUM,
1155 &nat_collection);
1156 if (natN_val)
1157 nat_collection |= natN_mask;
1158 else
1159 nat_collection &= ~natN_mask;
1160 regcache_cooked_write_unsigned (regcache, IA64_RNAT_REGNUM,
1161 nat_collection);
1162 }
1163 else
1164 {
1165 gdb_byte nat_buf[8];
1166 nat_collection = read_memory_integer (nat_addr, 8, byte_order);
1167 if (natN_val)
1168 nat_collection |= natN_mask;
1169 else
1170 nat_collection &= ~natN_mask;
1171 store_unsigned_integer (nat_buf, register_size (gdbarch, regnum),
1172 byte_order, nat_collection);
1173 write_memory (nat_addr, nat_buf, 8);
1174 }
1175 }
1176 }
1177 else if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
1178 {
1179 ULONGEST pr;
1180 ULONGEST cfm;
1181 ULONGEST prN_val;
1182 ULONGEST prN_mask;
1183
1184 regcache_cooked_read_unsigned (regcache, IA64_PR_REGNUM, &pr);
1185 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1186
1187 if (VP16_REGNUM <= regnum && regnum <= VP63_REGNUM)
1188 {
1189 /* Fetch predicate register rename base from current frame
1190 marker for this frame. */
1191 int rrb_pr = (cfm >> 32) & 0x3f;
1192
1193 /* Adjust the register number to account for register rotation. */
1194 regnum = VP16_REGNUM
1195 + ((regnum - VP16_REGNUM) + rrb_pr) % 48;
1196 }
1197 prN_val = extract_unsigned_integer (buf, register_size (gdbarch, regnum),
1198 byte_order);
1199 prN_mask = (1LL << (regnum - VP0_REGNUM));
1200 if (prN_val == 0)
1201 pr &= ~prN_mask;
1202 else if (prN_val == 1)
1203 pr |= prN_mask;
1204 regcache_cooked_write_unsigned (regcache, IA64_PR_REGNUM, pr);
1205 }
1206 }
1207
1208 /* The ia64 needs to convert between various ieee floating-point formats
1209 and the special ia64 floating point register format. */
1210
1211 static int
1212 ia64_convert_register_p (struct gdbarch *gdbarch, int regno, struct type *type)
1213 {
1214 return (regno >= IA64_FR0_REGNUM && regno <= IA64_FR127_REGNUM
1215 && type->code () == TYPE_CODE_FLT
1216 && type != ia64_ext_type (gdbarch));
1217 }
1218
1219 static int
1220 ia64_register_to_value (struct frame_info *frame, int regnum,
1221 struct type *valtype, gdb_byte *out,
1222 int *optimizedp, int *unavailablep)
1223 {
1224 struct gdbarch *gdbarch = get_frame_arch (frame);
1225 gdb_byte in[IA64_FP_REGISTER_SIZE];
1226
1227 /* Convert to TYPE. */
1228 if (!get_frame_register_bytes (frame, regnum, 0,
1229 gdb::make_array_view (in,
1230 register_size (gdbarch,
1231 regnum)),
1232 optimizedp, unavailablep))
1233 return 0;
1234
1235 target_float_convert (in, ia64_ext_type (gdbarch), out, valtype);
1236 *optimizedp = *unavailablep = 0;
1237 return 1;
1238 }
1239
1240 static void
1241 ia64_value_to_register (struct frame_info *frame, int regnum,
1242 struct type *valtype, const gdb_byte *in)
1243 {
1244 struct gdbarch *gdbarch = get_frame_arch (frame);
1245 gdb_byte out[IA64_FP_REGISTER_SIZE];
1246 target_float_convert (in, valtype, out, ia64_ext_type (gdbarch));
1247 put_frame_register (frame, regnum, out);
1248 }
1249
1250
1251 /* Limit the number of skipped non-prologue instructions since examining
1252 of the prologue is expensive. */
1253 static int max_skip_non_prologue_insns = 40;
1254
1255 /* Given PC representing the starting address of a function, and
1256 LIM_PC which is the (sloppy) limit to which to scan when looking
1257 for a prologue, attempt to further refine this limit by using
1258 the line data in the symbol table. If successful, a better guess
1259 on where the prologue ends is returned, otherwise the previous
1260 value of lim_pc is returned. TRUST_LIMIT is a pointer to a flag
1261 which will be set to indicate whether the returned limit may be
1262 used with no further scanning in the event that the function is
1263 frameless. */
1264
1265 /* FIXME: cagney/2004-02-14: This function and logic have largely been
1266 superseded by skip_prologue_using_sal. */
1267
1268 static CORE_ADDR
1269 refine_prologue_limit (CORE_ADDR pc, CORE_ADDR lim_pc, int *trust_limit)
1270 {
1271 struct symtab_and_line prologue_sal;
1272 CORE_ADDR start_pc = pc;
1273 CORE_ADDR end_pc;
1274
1275 /* The prologue can not possibly go past the function end itself,
1276 so we can already adjust LIM_PC accordingly. */
1277 if (find_pc_partial_function (pc, NULL, NULL, &end_pc) && end_pc < lim_pc)
1278 lim_pc = end_pc;
1279
1280 /* Start off not trusting the limit. */
1281 *trust_limit = 0;
1282
1283 prologue_sal = find_pc_line (pc, 0);
1284 if (prologue_sal.line != 0)
1285 {
1286 int i;
1287 CORE_ADDR addr = prologue_sal.end;
1288
1289 /* Handle the case in which compiler's optimizer/scheduler
1290 has moved instructions into the prologue. We scan ahead
1291 in the function looking for address ranges whose corresponding
1292 line number is less than or equal to the first one that we
1293 found for the function. (It can be less than when the
1294 scheduler puts a body instruction before the first prologue
1295 instruction.) */
1296 for (i = 2 * max_skip_non_prologue_insns;
1297 i > 0 && (lim_pc == 0 || addr < lim_pc);
1298 i--)
1299 {
1300 struct symtab_and_line sal;
1301
1302 sal = find_pc_line (addr, 0);
1303 if (sal.line == 0)
1304 break;
1305 if (sal.line <= prologue_sal.line
1306 && sal.symtab == prologue_sal.symtab)
1307 {
1308 prologue_sal = sal;
1309 }
1310 addr = sal.end;
1311 }
1312
1313 if (lim_pc == 0 || prologue_sal.end < lim_pc)
1314 {
1315 lim_pc = prologue_sal.end;
1316 if (start_pc == get_pc_function_start (lim_pc))
1317 *trust_limit = 1;
1318 }
1319 }
1320 return lim_pc;
1321 }
1322
1323 #define isScratch(_regnum_) ((_regnum_) == 2 || (_regnum_) == 3 \
1324 || (8 <= (_regnum_) && (_regnum_) <= 11) \
1325 || (14 <= (_regnum_) && (_regnum_) <= 31))
1326 #define imm9(_instr_) \
1327 ( ((((_instr_) & 0x01000000000LL) ? -1 : 0) << 8) \
1328 | (((_instr_) & 0x00008000000LL) >> 20) \
1329 | (((_instr_) & 0x00000001fc0LL) >> 6))
1330
1331 /* Allocate and initialize a frame cache. */
1332
1333 static struct ia64_frame_cache *
1334 ia64_alloc_frame_cache (void)
1335 {
1336 struct ia64_frame_cache *cache;
1337 int i;
1338
1339 cache = FRAME_OBSTACK_ZALLOC (struct ia64_frame_cache);
1340
1341 /* Base address. */
1342 cache->base = 0;
1343 cache->pc = 0;
1344 cache->cfm = 0;
1345 cache->prev_cfm = 0;
1346 cache->sof = 0;
1347 cache->sol = 0;
1348 cache->sor = 0;
1349 cache->bsp = 0;
1350 cache->fp_reg = 0;
1351 cache->frameless = 1;
1352
1353 for (i = 0; i < NUM_IA64_RAW_REGS; i++)
1354 cache->saved_regs[i] = 0;
1355
1356 return cache;
1357 }
1358
1359 static CORE_ADDR
1360 examine_prologue (CORE_ADDR pc, CORE_ADDR lim_pc,
1361 struct frame_info *this_frame,
1362 struct ia64_frame_cache *cache)
1363 {
1364 CORE_ADDR next_pc;
1365 CORE_ADDR last_prologue_pc = pc;
1366 ia64_instruction_type it;
1367 long long instr;
1368 int cfm_reg = 0;
1369 int ret_reg = 0;
1370 int fp_reg = 0;
1371 int unat_save_reg = 0;
1372 int pr_save_reg = 0;
1373 int mem_stack_frame_size = 0;
1374 int spill_reg = 0;
1375 CORE_ADDR spill_addr = 0;
1376 char instores[8];
1377 char infpstores[8];
1378 char reg_contents[256];
1379 int trust_limit;
1380 int frameless = 1;
1381 int i;
1382 CORE_ADDR addr;
1383 gdb_byte buf[8];
1384 CORE_ADDR bof, sor, sol, sof, cfm, rrb_gr;
1385
1386 memset (instores, 0, sizeof instores);
1387 memset (infpstores, 0, sizeof infpstores);
1388 memset (reg_contents, 0, sizeof reg_contents);
1389
1390 if (cache->after_prologue != 0
1391 && cache->after_prologue <= lim_pc)
1392 return cache->after_prologue;
1393
1394 lim_pc = refine_prologue_limit (pc, lim_pc, &trust_limit);
1395 next_pc = fetch_instruction (pc, &it, &instr);
1396
1397 /* We want to check if we have a recognizable function start before we
1398 look ahead for a prologue. */
1399 if (pc < lim_pc && next_pc
1400 && it == M && ((instr & 0x1ee0000003fLL) == 0x02c00000000LL))
1401 {
1402 /* alloc - start of a regular function. */
1403 int sol_bits = (int) ((instr & 0x00007f00000LL) >> 20);
1404 int sof_bits = (int) ((instr & 0x000000fe000LL) >> 13);
1405 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1406
1407 /* Verify that the current cfm matches what we think is the
1408 function start. If we have somehow jumped within a function,
1409 we do not want to interpret the prologue and calculate the
1410 addresses of various registers such as the return address.
1411 We will instead treat the frame as frameless. */
1412 if (!this_frame ||
1413 (sof_bits == (cache->cfm & 0x7f) &&
1414 sol_bits == ((cache->cfm >> 7) & 0x7f)))
1415 frameless = 0;
1416
1417 cfm_reg = rN;
1418 last_prologue_pc = next_pc;
1419 pc = next_pc;
1420 }
1421 else
1422 {
1423 /* Look for a leaf routine. */
1424 if (pc < lim_pc && next_pc
1425 && (it == I || it == M)
1426 && ((instr & 0x1ee00000000LL) == 0x10800000000LL))
1427 {
1428 /* adds rN = imm14, rM (or mov rN, rM when imm14 is 0) */
1429 int imm = (int) ((((instr & 0x01000000000LL) ? -1 : 0) << 13)
1430 | ((instr & 0x001f8000000LL) >> 20)
1431 | ((instr & 0x000000fe000LL) >> 13));
1432 int rM = (int) ((instr & 0x00007f00000LL) >> 20);
1433 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1434 int qp = (int) (instr & 0x0000000003fLL);
1435 if (qp == 0 && rN == 2 && imm == 0 && rM == 12 && fp_reg == 0)
1436 {
1437 /* mov r2, r12 - beginning of leaf routine. */
1438 fp_reg = rN;
1439 last_prologue_pc = next_pc;
1440 }
1441 }
1442
1443 /* If we don't recognize a regular function or leaf routine, we are
1444 done. */
1445 if (!fp_reg)
1446 {
1447 pc = lim_pc;
1448 if (trust_limit)
1449 last_prologue_pc = lim_pc;
1450 }
1451 }
1452
1453 /* Loop, looking for prologue instructions, keeping track of
1454 where preserved registers were spilled. */
1455 while (pc < lim_pc)
1456 {
1457 next_pc = fetch_instruction (pc, &it, &instr);
1458 if (next_pc == 0)
1459 break;
1460
1461 if (it == B && ((instr & 0x1e1f800003fLL) != 0x04000000000LL))
1462 {
1463 /* Exit loop upon hitting a non-nop branch instruction. */
1464 if (trust_limit)
1465 lim_pc = pc;
1466 break;
1467 }
1468 else if (((instr & 0x3fLL) != 0LL) &&
1469 (frameless || ret_reg != 0))
1470 {
1471 /* Exit loop upon hitting a predicated instruction if
1472 we already have the return register or if we are frameless. */
1473 if (trust_limit)
1474 lim_pc = pc;
1475 break;
1476 }
1477 else if (it == I && ((instr & 0x1eff8000000LL) == 0x00188000000LL))
1478 {
1479 /* Move from BR */
1480 int b2 = (int) ((instr & 0x0000000e000LL) >> 13);
1481 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1482 int qp = (int) (instr & 0x0000000003f);
1483
1484 if (qp == 0 && b2 == 0 && rN >= 32 && ret_reg == 0)
1485 {
1486 ret_reg = rN;
1487 last_prologue_pc = next_pc;
1488 }
1489 }
1490 else if ((it == I || it == M)
1491 && ((instr & 0x1ee00000000LL) == 0x10800000000LL))
1492 {
1493 /* adds rN = imm14, rM (or mov rN, rM when imm14 is 0) */
1494 int imm = (int) ((((instr & 0x01000000000LL) ? -1 : 0) << 13)
1495 | ((instr & 0x001f8000000LL) >> 20)
1496 | ((instr & 0x000000fe000LL) >> 13));
1497 int rM = (int) ((instr & 0x00007f00000LL) >> 20);
1498 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1499 int qp = (int) (instr & 0x0000000003fLL);
1500
1501 if (qp == 0 && rN >= 32 && imm == 0 && rM == 12 && fp_reg == 0)
1502 {
1503 /* mov rN, r12 */
1504 fp_reg = rN;
1505 last_prologue_pc = next_pc;
1506 }
1507 else if (qp == 0 && rN == 12 && rM == 12)
1508 {
1509 /* adds r12, -mem_stack_frame_size, r12 */
1510 mem_stack_frame_size -= imm;
1511 last_prologue_pc = next_pc;
1512 }
1513 else if (qp == 0 && rN == 2
1514 && ((rM == fp_reg && fp_reg != 0) || rM == 12))
1515 {
1516 CORE_ADDR saved_sp = 0;
1517 /* adds r2, spilloffset, rFramePointer
1518 or
1519 adds r2, spilloffset, r12
1520
1521 Get ready for stf.spill or st8.spill instructions.
1522 The address to start spilling at is loaded into r2.
1523 FIXME: Why r2? That's what gcc currently uses; it
1524 could well be different for other compilers. */
1525
1526 /* Hmm... whether or not this will work will depend on
1527 where the pc is. If it's still early in the prologue
1528 this'll be wrong. FIXME */
1529 if (this_frame)
1530 saved_sp = get_frame_register_unsigned (this_frame,
1531 sp_regnum);
1532 spill_addr = saved_sp
1533 + (rM == 12 ? 0 : mem_stack_frame_size)
1534 + imm;
1535 spill_reg = rN;
1536 last_prologue_pc = next_pc;
1537 }
1538 else if (qp == 0 && rM >= 32 && rM < 40 && !instores[rM-32] &&
1539 rN < 256 && imm == 0)
1540 {
1541 /* mov rN, rM where rM is an input register. */
1542 reg_contents[rN] = rM;
1543 last_prologue_pc = next_pc;
1544 }
1545 else if (frameless && qp == 0 && rN == fp_reg && imm == 0 &&
1546 rM == 2)
1547 {
1548 /* mov r12, r2 */
1549 last_prologue_pc = next_pc;
1550 break;
1551 }
1552 }
1553 else if (it == M
1554 && ( ((instr & 0x1efc0000000LL) == 0x0eec0000000LL)
1555 || ((instr & 0x1ffc8000000LL) == 0x0cec0000000LL) ))
1556 {
1557 /* stf.spill [rN] = fM, imm9
1558 or
1559 stf.spill [rN] = fM */
1560
1561 int imm = imm9(instr);
1562 int rN = (int) ((instr & 0x00007f00000LL) >> 20);
1563 int fM = (int) ((instr & 0x000000fe000LL) >> 13);
1564 int qp = (int) (instr & 0x0000000003fLL);
1565 if (qp == 0 && rN == spill_reg && spill_addr != 0
1566 && ((2 <= fM && fM <= 5) || (16 <= fM && fM <= 31)))
1567 {
1568 cache->saved_regs[IA64_FR0_REGNUM + fM] = spill_addr;
1569
1570 if ((instr & 0x1efc0000000LL) == 0x0eec0000000LL)
1571 spill_addr += imm;
1572 else
1573 spill_addr = 0; /* last one; must be done. */
1574 last_prologue_pc = next_pc;
1575 }
1576 }
1577 else if ((it == M && ((instr & 0x1eff8000000LL) == 0x02110000000LL))
1578 || (it == I && ((instr & 0x1eff8000000LL) == 0x00050000000LL)) )
1579 {
1580 /* mov.m rN = arM
1581 or
1582 mov.i rN = arM */
1583
1584 int arM = (int) ((instr & 0x00007f00000LL) >> 20);
1585 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1586 int qp = (int) (instr & 0x0000000003fLL);
1587 if (qp == 0 && isScratch (rN) && arM == 36 /* ar.unat */)
1588 {
1589 /* We have something like "mov.m r3 = ar.unat". Remember the
1590 r3 (or whatever) and watch for a store of this register... */
1591 unat_save_reg = rN;
1592 last_prologue_pc = next_pc;
1593 }
1594 }
1595 else if (it == I && ((instr & 0x1eff8000000LL) == 0x00198000000LL))
1596 {
1597 /* mov rN = pr */
1598 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1599 int qp = (int) (instr & 0x0000000003fLL);
1600 if (qp == 0 && isScratch (rN))
1601 {
1602 pr_save_reg = rN;
1603 last_prologue_pc = next_pc;
1604 }
1605 }
1606 else if (it == M
1607 && ( ((instr & 0x1ffc8000000LL) == 0x08cc0000000LL)
1608 || ((instr & 0x1efc0000000LL) == 0x0acc0000000LL)))
1609 {
1610 /* st8 [rN] = rM
1611 or
1612 st8 [rN] = rM, imm9 */
1613 int rN = (int) ((instr & 0x00007f00000LL) >> 20);
1614 int rM = (int) ((instr & 0x000000fe000LL) >> 13);
1615 int qp = (int) (instr & 0x0000000003fLL);
1616 int indirect = rM < 256 ? reg_contents[rM] : 0;
1617 if (qp == 0 && rN == spill_reg && spill_addr != 0
1618 && (rM == unat_save_reg || rM == pr_save_reg))
1619 {
1620 /* We've found a spill of either the UNAT register or the PR
1621 register. (Well, not exactly; what we've actually found is
1622 a spill of the register that UNAT or PR was moved to).
1623 Record that fact and move on... */
1624 if (rM == unat_save_reg)
1625 {
1626 /* Track UNAT register. */
1627 cache->saved_regs[IA64_UNAT_REGNUM] = spill_addr;
1628 unat_save_reg = 0;
1629 }
1630 else
1631 {
1632 /* Track PR register. */
1633 cache->saved_regs[IA64_PR_REGNUM] = spill_addr;
1634 pr_save_reg = 0;
1635 }
1636 if ((instr & 0x1efc0000000LL) == 0x0acc0000000LL)
1637 /* st8 [rN] = rM, imm9 */
1638 spill_addr += imm9(instr);
1639 else
1640 spill_addr = 0; /* Must be done spilling. */
1641 last_prologue_pc = next_pc;
1642 }
1643 else if (qp == 0 && 32 <= rM && rM < 40 && !instores[rM-32])
1644 {
1645 /* Allow up to one store of each input register. */
1646 instores[rM-32] = 1;
1647 last_prologue_pc = next_pc;
1648 }
1649 else if (qp == 0 && 32 <= indirect && indirect < 40 &&
1650 !instores[indirect-32])
1651 {
1652 /* Allow an indirect store of an input register. */
1653 instores[indirect-32] = 1;
1654 last_prologue_pc = next_pc;
1655 }
1656 }
1657 else if (it == M && ((instr & 0x1ff08000000LL) == 0x08c00000000LL))
1658 {
1659 /* One of
1660 st1 [rN] = rM
1661 st2 [rN] = rM
1662 st4 [rN] = rM
1663 st8 [rN] = rM
1664 Note that the st8 case is handled in the clause above.
1665
1666 Advance over stores of input registers. One store per input
1667 register is permitted. */
1668 int rM = (int) ((instr & 0x000000fe000LL) >> 13);
1669 int qp = (int) (instr & 0x0000000003fLL);
1670 int indirect = rM < 256 ? reg_contents[rM] : 0;
1671 if (qp == 0 && 32 <= rM && rM < 40 && !instores[rM-32])
1672 {
1673 instores[rM-32] = 1;
1674 last_prologue_pc = next_pc;
1675 }
1676 else if (qp == 0 && 32 <= indirect && indirect < 40 &&
1677 !instores[indirect-32])
1678 {
1679 /* Allow an indirect store of an input register. */
1680 instores[indirect-32] = 1;
1681 last_prologue_pc = next_pc;
1682 }
1683 }
1684 else if (it == M && ((instr & 0x1ff88000000LL) == 0x0cc80000000LL))
1685 {
1686 /* Either
1687 stfs [rN] = fM
1688 or
1689 stfd [rN] = fM
1690
1691 Advance over stores of floating point input registers. Again
1692 one store per register is permitted. */
1693 int fM = (int) ((instr & 0x000000fe000LL) >> 13);
1694 int qp = (int) (instr & 0x0000000003fLL);
1695 if (qp == 0 && 8 <= fM && fM < 16 && !infpstores[fM - 8])
1696 {
1697 infpstores[fM-8] = 1;
1698 last_prologue_pc = next_pc;
1699 }
1700 }
1701 else if (it == M
1702 && ( ((instr & 0x1ffc8000000LL) == 0x08ec0000000LL)
1703 || ((instr & 0x1efc0000000LL) == 0x0aec0000000LL)))
1704 {
1705 /* st8.spill [rN] = rM
1706 or
1707 st8.spill [rN] = rM, imm9 */
1708 int rN = (int) ((instr & 0x00007f00000LL) >> 20);
1709 int rM = (int) ((instr & 0x000000fe000LL) >> 13);
1710 int qp = (int) (instr & 0x0000000003fLL);
1711 if (qp == 0 && rN == spill_reg && 4 <= rM && rM <= 7)
1712 {
1713 /* We've found a spill of one of the preserved general purpose
1714 regs. Record the spill address and advance the spill
1715 register if appropriate. */
1716 cache->saved_regs[IA64_GR0_REGNUM + rM] = spill_addr;
1717 if ((instr & 0x1efc0000000LL) == 0x0aec0000000LL)
1718 /* st8.spill [rN] = rM, imm9 */
1719 spill_addr += imm9(instr);
1720 else
1721 spill_addr = 0; /* Done spilling. */
1722 last_prologue_pc = next_pc;
1723 }
1724 }
1725
1726 pc = next_pc;
1727 }
1728
1729 /* If not frameless and we aren't called by skip_prologue, then we need
1730 to calculate registers for the previous frame which will be needed
1731 later. */
1732
1733 if (!frameless && this_frame)
1734 {
1735 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1736 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1737
1738 /* Extract the size of the rotating portion of the stack
1739 frame and the register rename base from the current
1740 frame marker. */
1741 cfm = cache->cfm;
1742 sor = cache->sor;
1743 sof = cache->sof;
1744 sol = cache->sol;
1745 rrb_gr = (cfm >> 18) & 0x7f;
1746
1747 /* Find the bof (beginning of frame). */
1748 bof = rse_address_add (cache->bsp, -sof);
1749
1750 for (i = 0, addr = bof;
1751 i < sof;
1752 i++, addr += 8)
1753 {
1754 if (IS_NaT_COLLECTION_ADDR (addr))
1755 {
1756 addr += 8;
1757 }
1758 if (i+32 == cfm_reg)
1759 cache->saved_regs[IA64_CFM_REGNUM] = addr;
1760 if (i+32 == ret_reg)
1761 cache->saved_regs[IA64_VRAP_REGNUM] = addr;
1762 if (i+32 == fp_reg)
1763 cache->saved_regs[IA64_VFP_REGNUM] = addr;
1764 }
1765
1766 /* For the previous argument registers we require the previous bof.
1767 If we can't find the previous cfm, then we can do nothing. */
1768 cfm = 0;
1769 if (cache->saved_regs[IA64_CFM_REGNUM] != 0)
1770 {
1771 cfm = read_memory_integer (cache->saved_regs[IA64_CFM_REGNUM],
1772 8, byte_order);
1773 }
1774 else if (cfm_reg != 0)
1775 {
1776 get_frame_register (this_frame, cfm_reg, buf);
1777 cfm = extract_unsigned_integer (buf, 8, byte_order);
1778 }
1779 cache->prev_cfm = cfm;
1780
1781 if (cfm != 0)
1782 {
1783 sor = ((cfm >> 14) & 0xf) * 8;
1784 sof = (cfm & 0x7f);
1785 sol = (cfm >> 7) & 0x7f;
1786 rrb_gr = (cfm >> 18) & 0x7f;
1787
1788 /* The previous bof only requires subtraction of the sol (size of
1789 locals) due to the overlap between output and input of
1790 subsequent frames. */
1791 bof = rse_address_add (bof, -sol);
1792
1793 for (i = 0, addr = bof;
1794 i < sof;
1795 i++, addr += 8)
1796 {
1797 if (IS_NaT_COLLECTION_ADDR (addr))
1798 {
1799 addr += 8;
1800 }
1801 if (i < sor)
1802 cache->saved_regs[IA64_GR32_REGNUM
1803 + ((i + (sor - rrb_gr)) % sor)]
1804 = addr;
1805 else
1806 cache->saved_regs[IA64_GR32_REGNUM + i] = addr;
1807 }
1808
1809 }
1810 }
1811
1812 /* Try and trust the lim_pc value whenever possible. */
1813 if (trust_limit && lim_pc >= last_prologue_pc)
1814 last_prologue_pc = lim_pc;
1815
1816 cache->frameless = frameless;
1817 cache->after_prologue = last_prologue_pc;
1818 cache->mem_stack_frame_size = mem_stack_frame_size;
1819 cache->fp_reg = fp_reg;
1820
1821 return last_prologue_pc;
1822 }
1823
1824 CORE_ADDR
1825 ia64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
1826 {
1827 struct ia64_frame_cache cache;
1828 cache.base = 0;
1829 cache.after_prologue = 0;
1830 cache.cfm = 0;
1831 cache.bsp = 0;
1832
1833 /* Call examine_prologue with - as third argument since we don't
1834 have a next frame pointer to send. */
1835 return examine_prologue (pc, pc+1024, 0, &cache);
1836 }
1837
1838
1839 /* Normal frames. */
1840
1841 static struct ia64_frame_cache *
1842 ia64_frame_cache (struct frame_info *this_frame, void **this_cache)
1843 {
1844 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1845 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1846 struct ia64_frame_cache *cache;
1847 gdb_byte buf[8];
1848 CORE_ADDR cfm;
1849
1850 if (*this_cache)
1851 return (struct ia64_frame_cache *) *this_cache;
1852
1853 cache = ia64_alloc_frame_cache ();
1854 *this_cache = cache;
1855
1856 get_frame_register (this_frame, sp_regnum, buf);
1857 cache->saved_sp = extract_unsigned_integer (buf, 8, byte_order);
1858
1859 /* We always want the bsp to point to the end of frame.
1860 This way, we can always get the beginning of frame (bof)
1861 by subtracting frame size. */
1862 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
1863 cache->bsp = extract_unsigned_integer (buf, 8, byte_order);
1864
1865 get_frame_register (this_frame, IA64_PSR_REGNUM, buf);
1866
1867 get_frame_register (this_frame, IA64_CFM_REGNUM, buf);
1868 cfm = extract_unsigned_integer (buf, 8, byte_order);
1869
1870 cache->sof = (cfm & 0x7f);
1871 cache->sol = (cfm >> 7) & 0x7f;
1872 cache->sor = ((cfm >> 14) & 0xf) * 8;
1873
1874 cache->cfm = cfm;
1875
1876 cache->pc = get_frame_func (this_frame);
1877
1878 if (cache->pc != 0)
1879 examine_prologue (cache->pc, get_frame_pc (this_frame), this_frame, cache);
1880
1881 cache->base = cache->saved_sp + cache->mem_stack_frame_size;
1882
1883 return cache;
1884 }
1885
1886 static void
1887 ia64_frame_this_id (struct frame_info *this_frame, void **this_cache,
1888 struct frame_id *this_id)
1889 {
1890 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1891 struct ia64_frame_cache *cache =
1892 ia64_frame_cache (this_frame, this_cache);
1893
1894 /* If outermost frame, mark with null frame id. */
1895 if (cache->base != 0)
1896 (*this_id) = frame_id_build_special (cache->base, cache->pc, cache->bsp);
1897 if (gdbarch_debug >= 1)
1898 gdb_printf (gdb_stdlog,
1899 "regular frame id: code %s, stack %s, "
1900 "special %s, this_frame %s\n",
1901 paddress (gdbarch, this_id->code_addr),
1902 paddress (gdbarch, this_id->stack_addr),
1903 paddress (gdbarch, cache->bsp),
1904 host_address_to_string (this_frame));
1905 }
1906
1907 static struct value *
1908 ia64_frame_prev_register (struct frame_info *this_frame, void **this_cache,
1909 int regnum)
1910 {
1911 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1912 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1913 struct ia64_frame_cache *cache = ia64_frame_cache (this_frame, this_cache);
1914 gdb_byte buf[8];
1915
1916 gdb_assert (regnum >= 0);
1917
1918 if (!target_has_registers ())
1919 error (_("No registers."));
1920
1921 if (regnum == gdbarch_sp_regnum (gdbarch))
1922 return frame_unwind_got_constant (this_frame, regnum, cache->base);
1923
1924 else if (regnum == IA64_BSP_REGNUM)
1925 {
1926 struct value *val;
1927 CORE_ADDR prev_cfm, bsp, prev_bsp;
1928
1929 /* We want to calculate the previous bsp as the end of the previous
1930 register stack frame. This corresponds to what the hardware bsp
1931 register will be if we pop the frame back which is why we might
1932 have been called. We know the beginning of the current frame is
1933 cache->bsp - cache->sof. This value in the previous frame points
1934 to the start of the output registers. We can calculate the end of
1935 that frame by adding the size of output:
1936 (sof (size of frame) - sol (size of locals)). */
1937 val = ia64_frame_prev_register (this_frame, this_cache, IA64_CFM_REGNUM);
1938 prev_cfm = extract_unsigned_integer (value_contents_all (val).data (),
1939 8, byte_order);
1940 bsp = rse_address_add (cache->bsp, -(cache->sof));
1941 prev_bsp =
1942 rse_address_add (bsp, (prev_cfm & 0x7f) - ((prev_cfm >> 7) & 0x7f));
1943
1944 return frame_unwind_got_constant (this_frame, regnum, prev_bsp);
1945 }
1946
1947 else if (regnum == IA64_CFM_REGNUM)
1948 {
1949 CORE_ADDR addr = cache->saved_regs[IA64_CFM_REGNUM];
1950
1951 if (addr != 0)
1952 return frame_unwind_got_memory (this_frame, regnum, addr);
1953
1954 if (cache->prev_cfm)
1955 return frame_unwind_got_constant (this_frame, regnum, cache->prev_cfm);
1956
1957 if (cache->frameless)
1958 return frame_unwind_got_register (this_frame, IA64_PFS_REGNUM,
1959 IA64_PFS_REGNUM);
1960 return frame_unwind_got_register (this_frame, regnum, 0);
1961 }
1962
1963 else if (regnum == IA64_VFP_REGNUM)
1964 {
1965 /* If the function in question uses an automatic register (r32-r127)
1966 for the frame pointer, it'll be found by ia64_find_saved_register()
1967 above. If the function lacks one of these frame pointers, we can
1968 still provide a value since we know the size of the frame. */
1969 return frame_unwind_got_constant (this_frame, regnum, cache->base);
1970 }
1971
1972 else if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
1973 {
1974 struct value *pr_val;
1975 ULONGEST prN;
1976
1977 pr_val = ia64_frame_prev_register (this_frame, this_cache,
1978 IA64_PR_REGNUM);
1979 if (VP16_REGNUM <= regnum && regnum <= VP63_REGNUM)
1980 {
1981 /* Fetch predicate register rename base from current frame
1982 marker for this frame. */
1983 int rrb_pr = (cache->cfm >> 32) & 0x3f;
1984
1985 /* Adjust the register number to account for register rotation. */
1986 regnum = VP16_REGNUM + ((regnum - VP16_REGNUM) + rrb_pr) % 48;
1987 }
1988 prN = extract_bit_field (value_contents_all (pr_val).data (),
1989 regnum - VP0_REGNUM, 1);
1990 return frame_unwind_got_constant (this_frame, regnum, prN);
1991 }
1992
1993 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT31_REGNUM)
1994 {
1995 struct value *unat_val;
1996 ULONGEST unatN;
1997 unat_val = ia64_frame_prev_register (this_frame, this_cache,
1998 IA64_UNAT_REGNUM);
1999 unatN = extract_bit_field (value_contents_all (unat_val).data (),
2000 regnum - IA64_NAT0_REGNUM, 1);
2001 return frame_unwind_got_constant (this_frame, regnum, unatN);
2002 }
2003
2004 else if (IA64_NAT32_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
2005 {
2006 int natval = 0;
2007 /* Find address of general register corresponding to nat bit we're
2008 interested in. */
2009 CORE_ADDR gr_addr;
2010
2011 gr_addr = cache->saved_regs[regnum - IA64_NAT0_REGNUM + IA64_GR0_REGNUM];
2012
2013 if (gr_addr != 0)
2014 {
2015 /* Compute address of nat collection bits. */
2016 CORE_ADDR nat_addr = gr_addr | 0x1f8;
2017 CORE_ADDR bsp;
2018 CORE_ADDR nat_collection;
2019 int nat_bit;
2020
2021 /* If our nat collection address is bigger than bsp, we have to get
2022 the nat collection from rnat. Otherwise, we fetch the nat
2023 collection from the computed address. */
2024 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
2025 bsp = extract_unsigned_integer (buf, 8, byte_order);
2026 if (nat_addr >= bsp)
2027 {
2028 get_frame_register (this_frame, IA64_RNAT_REGNUM, buf);
2029 nat_collection = extract_unsigned_integer (buf, 8, byte_order);
2030 }
2031 else
2032 nat_collection = read_memory_integer (nat_addr, 8, byte_order);
2033 nat_bit = (gr_addr >> 3) & 0x3f;
2034 natval = (nat_collection >> nat_bit) & 1;
2035 }
2036
2037 return frame_unwind_got_constant (this_frame, regnum, natval);
2038 }
2039
2040 else if (regnum == IA64_IP_REGNUM)
2041 {
2042 CORE_ADDR pc = 0;
2043 CORE_ADDR addr = cache->saved_regs[IA64_VRAP_REGNUM];
2044
2045 if (addr != 0)
2046 {
2047 read_memory (addr, buf, register_size (gdbarch, IA64_IP_REGNUM));
2048 pc = extract_unsigned_integer (buf, 8, byte_order);
2049 }
2050 else if (cache->frameless)
2051 {
2052 get_frame_register (this_frame, IA64_BR0_REGNUM, buf);
2053 pc = extract_unsigned_integer (buf, 8, byte_order);
2054 }
2055 pc &= ~0xf;
2056 return frame_unwind_got_constant (this_frame, regnum, pc);
2057 }
2058
2059 else if (regnum == IA64_PSR_REGNUM)
2060 {
2061 /* We don't know how to get the complete previous PSR, but we need it
2062 for the slot information when we unwind the pc (pc is formed of IP
2063 register plus slot information from PSR). To get the previous
2064 slot information, we mask it off the return address. */
2065 ULONGEST slot_num = 0;
2066 CORE_ADDR pc = 0;
2067 CORE_ADDR psr = 0;
2068 CORE_ADDR addr = cache->saved_regs[IA64_VRAP_REGNUM];
2069
2070 get_frame_register (this_frame, IA64_PSR_REGNUM, buf);
2071 psr = extract_unsigned_integer (buf, 8, byte_order);
2072
2073 if (addr != 0)
2074 {
2075 read_memory (addr, buf, register_size (gdbarch, IA64_IP_REGNUM));
2076 pc = extract_unsigned_integer (buf, 8, byte_order);
2077 }
2078 else if (cache->frameless)
2079 {
2080 get_frame_register (this_frame, IA64_BR0_REGNUM, buf);
2081 pc = extract_unsigned_integer (buf, 8, byte_order);
2082 }
2083 psr &= ~(3LL << 41);
2084 slot_num = pc & 0x3LL;
2085 psr |= (CORE_ADDR)slot_num << 41;
2086 return frame_unwind_got_constant (this_frame, regnum, psr);
2087 }
2088
2089 else if (regnum == IA64_BR0_REGNUM)
2090 {
2091 CORE_ADDR addr = cache->saved_regs[IA64_BR0_REGNUM];
2092
2093 if (addr != 0)
2094 return frame_unwind_got_memory (this_frame, regnum, addr);
2095
2096 return frame_unwind_got_constant (this_frame, regnum, 0);
2097 }
2098
2099 else if ((regnum >= IA64_GR32_REGNUM && regnum <= IA64_GR127_REGNUM)
2100 || (regnum >= V32_REGNUM && regnum <= V127_REGNUM))
2101 {
2102 CORE_ADDR addr = 0;
2103
2104 if (regnum >= V32_REGNUM)
2105 regnum = IA64_GR32_REGNUM + (regnum - V32_REGNUM);
2106 addr = cache->saved_regs[regnum];
2107 if (addr != 0)
2108 return frame_unwind_got_memory (this_frame, regnum, addr);
2109
2110 if (cache->frameless)
2111 {
2112 struct value *reg_val;
2113 CORE_ADDR prev_cfm, prev_bsp, prev_bof;
2114
2115 /* FIXME: brobecker/2008-05-01: Doesn't this seem redundant
2116 with the same code above? */
2117 if (regnum >= V32_REGNUM)
2118 regnum = IA64_GR32_REGNUM + (regnum - V32_REGNUM);
2119 reg_val = ia64_frame_prev_register (this_frame, this_cache,
2120 IA64_CFM_REGNUM);
2121 prev_cfm = extract_unsigned_integer
2122 (value_contents_all (reg_val).data (), 8, byte_order);
2123 reg_val = ia64_frame_prev_register (this_frame, this_cache,
2124 IA64_BSP_REGNUM);
2125 prev_bsp = extract_unsigned_integer
2126 (value_contents_all (reg_val).data (), 8, byte_order);
2127 prev_bof = rse_address_add (prev_bsp, -(prev_cfm & 0x7f));
2128
2129 addr = rse_address_add (prev_bof, (regnum - IA64_GR32_REGNUM));
2130 return frame_unwind_got_memory (this_frame, regnum, addr);
2131 }
2132
2133 return frame_unwind_got_constant (this_frame, regnum, 0);
2134 }
2135
2136 else /* All other registers. */
2137 {
2138 CORE_ADDR addr = 0;
2139
2140 if (IA64_FR32_REGNUM <= regnum && regnum <= IA64_FR127_REGNUM)
2141 {
2142 /* Fetch floating point register rename base from current
2143 frame marker for this frame. */
2144 int rrb_fr = (cache->cfm >> 25) & 0x7f;
2145
2146 /* Adjust the floating point register number to account for
2147 register rotation. */
2148 regnum = IA64_FR32_REGNUM
2149 + ((regnum - IA64_FR32_REGNUM) + rrb_fr) % 96;
2150 }
2151
2152 /* If we have stored a memory address, access the register. */
2153 addr = cache->saved_regs[regnum];
2154 if (addr != 0)
2155 return frame_unwind_got_memory (this_frame, regnum, addr);
2156 /* Otherwise, punt and get the current value of the register. */
2157 else
2158 return frame_unwind_got_register (this_frame, regnum, regnum);
2159 }
2160 }
2161
2162 static const struct frame_unwind ia64_frame_unwind =
2163 {
2164 "ia64 prologue",
2165 NORMAL_FRAME,
2166 default_frame_unwind_stop_reason,
2167 &ia64_frame_this_id,
2168 &ia64_frame_prev_register,
2169 NULL,
2170 default_frame_sniffer
2171 };
2172
2173 /* Signal trampolines. */
2174
2175 static void
2176 ia64_sigtramp_frame_init_saved_regs (struct frame_info *this_frame,
2177 struct ia64_frame_cache *cache)
2178 {
2179 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2180 ia64_gdbarch_tdep *tdep = gdbarch_tdep<ia64_gdbarch_tdep> (gdbarch);
2181
2182 if (tdep->sigcontext_register_address)
2183 {
2184 int regno;
2185
2186 cache->saved_regs[IA64_VRAP_REGNUM]
2187 = tdep->sigcontext_register_address (gdbarch, cache->base,
2188 IA64_IP_REGNUM);
2189 cache->saved_regs[IA64_CFM_REGNUM]
2190 = tdep->sigcontext_register_address (gdbarch, cache->base,
2191 IA64_CFM_REGNUM);
2192 cache->saved_regs[IA64_PSR_REGNUM]
2193 = tdep->sigcontext_register_address (gdbarch, cache->base,
2194 IA64_PSR_REGNUM);
2195 cache->saved_regs[IA64_BSP_REGNUM]
2196 = tdep->sigcontext_register_address (gdbarch, cache->base,
2197 IA64_BSP_REGNUM);
2198 cache->saved_regs[IA64_RNAT_REGNUM]
2199 = tdep->sigcontext_register_address (gdbarch, cache->base,
2200 IA64_RNAT_REGNUM);
2201 cache->saved_regs[IA64_CCV_REGNUM]
2202 = tdep->sigcontext_register_address (gdbarch, cache->base,
2203 IA64_CCV_REGNUM);
2204 cache->saved_regs[IA64_UNAT_REGNUM]
2205 = tdep->sigcontext_register_address (gdbarch, cache->base,
2206 IA64_UNAT_REGNUM);
2207 cache->saved_regs[IA64_FPSR_REGNUM]
2208 = tdep->sigcontext_register_address (gdbarch, cache->base,
2209 IA64_FPSR_REGNUM);
2210 cache->saved_regs[IA64_PFS_REGNUM]
2211 = tdep->sigcontext_register_address (gdbarch, cache->base,
2212 IA64_PFS_REGNUM);
2213 cache->saved_regs[IA64_LC_REGNUM]
2214 = tdep->sigcontext_register_address (gdbarch, cache->base,
2215 IA64_LC_REGNUM);
2216
2217 for (regno = IA64_GR1_REGNUM; regno <= IA64_GR31_REGNUM; regno++)
2218 cache->saved_regs[regno] =
2219 tdep->sigcontext_register_address (gdbarch, cache->base, regno);
2220 for (regno = IA64_BR0_REGNUM; regno <= IA64_BR7_REGNUM; regno++)
2221 cache->saved_regs[regno] =
2222 tdep->sigcontext_register_address (gdbarch, cache->base, regno);
2223 for (regno = IA64_FR2_REGNUM; regno <= IA64_FR31_REGNUM; regno++)
2224 cache->saved_regs[regno] =
2225 tdep->sigcontext_register_address (gdbarch, cache->base, regno);
2226 }
2227 }
2228
2229 static struct ia64_frame_cache *
2230 ia64_sigtramp_frame_cache (struct frame_info *this_frame, void **this_cache)
2231 {
2232 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2233 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2234 struct ia64_frame_cache *cache;
2235 gdb_byte buf[8];
2236
2237 if (*this_cache)
2238 return (struct ia64_frame_cache *) *this_cache;
2239
2240 cache = ia64_alloc_frame_cache ();
2241
2242 get_frame_register (this_frame, sp_regnum, buf);
2243 /* Note that frame size is hard-coded below. We cannot calculate it
2244 via prologue examination. */
2245 cache->base = extract_unsigned_integer (buf, 8, byte_order) + 16;
2246
2247 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
2248 cache->bsp = extract_unsigned_integer (buf, 8, byte_order);
2249
2250 get_frame_register (this_frame, IA64_CFM_REGNUM, buf);
2251 cache->cfm = extract_unsigned_integer (buf, 8, byte_order);
2252 cache->sof = cache->cfm & 0x7f;
2253
2254 ia64_sigtramp_frame_init_saved_regs (this_frame, cache);
2255
2256 *this_cache = cache;
2257 return cache;
2258 }
2259
2260 static void
2261 ia64_sigtramp_frame_this_id (struct frame_info *this_frame,
2262 void **this_cache, struct frame_id *this_id)
2263 {
2264 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2265 struct ia64_frame_cache *cache =
2266 ia64_sigtramp_frame_cache (this_frame, this_cache);
2267
2268 (*this_id) = frame_id_build_special (cache->base,
2269 get_frame_pc (this_frame),
2270 cache->bsp);
2271 if (gdbarch_debug >= 1)
2272 gdb_printf (gdb_stdlog,
2273 "sigtramp frame id: code %s, stack %s, "
2274 "special %s, this_frame %s\n",
2275 paddress (gdbarch, this_id->code_addr),
2276 paddress (gdbarch, this_id->stack_addr),
2277 paddress (gdbarch, cache->bsp),
2278 host_address_to_string (this_frame));
2279 }
2280
2281 static struct value *
2282 ia64_sigtramp_frame_prev_register (struct frame_info *this_frame,
2283 void **this_cache, int regnum)
2284 {
2285 struct ia64_frame_cache *cache =
2286 ia64_sigtramp_frame_cache (this_frame, this_cache);
2287
2288 gdb_assert (regnum >= 0);
2289
2290 if (!target_has_registers ())
2291 error (_("No registers."));
2292
2293 if (regnum == IA64_IP_REGNUM)
2294 {
2295 CORE_ADDR pc = 0;
2296 CORE_ADDR addr = cache->saved_regs[IA64_VRAP_REGNUM];
2297
2298 if (addr != 0)
2299 {
2300 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2301 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2302 pc = read_memory_unsigned_integer (addr, 8, byte_order);
2303 }
2304 pc &= ~0xf;
2305 return frame_unwind_got_constant (this_frame, regnum, pc);
2306 }
2307
2308 else if ((regnum >= IA64_GR32_REGNUM && regnum <= IA64_GR127_REGNUM)
2309 || (regnum >= V32_REGNUM && regnum <= V127_REGNUM))
2310 {
2311 CORE_ADDR addr = 0;
2312
2313 if (regnum >= V32_REGNUM)
2314 regnum = IA64_GR32_REGNUM + (regnum - V32_REGNUM);
2315 addr = cache->saved_regs[regnum];
2316 if (addr != 0)
2317 return frame_unwind_got_memory (this_frame, regnum, addr);
2318
2319 return frame_unwind_got_constant (this_frame, regnum, 0);
2320 }
2321
2322 else /* All other registers not listed above. */
2323 {
2324 CORE_ADDR addr = cache->saved_regs[regnum];
2325
2326 if (addr != 0)
2327 return frame_unwind_got_memory (this_frame, regnum, addr);
2328
2329 return frame_unwind_got_constant (this_frame, regnum, 0);
2330 }
2331 }
2332
2333 static int
2334 ia64_sigtramp_frame_sniffer (const struct frame_unwind *self,
2335 struct frame_info *this_frame,
2336 void **this_cache)
2337 {
2338 gdbarch *arch = get_frame_arch (this_frame);
2339 ia64_gdbarch_tdep *tdep = gdbarch_tdep<ia64_gdbarch_tdep> (arch);
2340 if (tdep->pc_in_sigtramp)
2341 {
2342 CORE_ADDR pc = get_frame_pc (this_frame);
2343
2344 if (tdep->pc_in_sigtramp (pc))
2345 return 1;
2346 }
2347
2348 return 0;
2349 }
2350
2351 static const struct frame_unwind ia64_sigtramp_frame_unwind =
2352 {
2353 "ia64 sigtramp",
2354 SIGTRAMP_FRAME,
2355 default_frame_unwind_stop_reason,
2356 ia64_sigtramp_frame_this_id,
2357 ia64_sigtramp_frame_prev_register,
2358 NULL,
2359 ia64_sigtramp_frame_sniffer
2360 };
2361
2362 \f
2363
2364 static CORE_ADDR
2365 ia64_frame_base_address (struct frame_info *this_frame, void **this_cache)
2366 {
2367 struct ia64_frame_cache *cache = ia64_frame_cache (this_frame, this_cache);
2368
2369 return cache->base;
2370 }
2371
2372 static const struct frame_base ia64_frame_base =
2373 {
2374 &ia64_frame_unwind,
2375 ia64_frame_base_address,
2376 ia64_frame_base_address,
2377 ia64_frame_base_address
2378 };
2379
2380 #ifdef HAVE_LIBUNWIND_IA64_H
2381
2382 struct ia64_unwind_table_entry
2383 {
2384 unw_word_t start_offset;
2385 unw_word_t end_offset;
2386 unw_word_t info_offset;
2387 };
2388
2389 static __inline__ uint64_t
2390 ia64_rse_slot_num (uint64_t addr)
2391 {
2392 return (addr >> 3) & 0x3f;
2393 }
2394
2395 /* Skip over a designated number of registers in the backing
2396 store, remembering every 64th position is for NAT. */
2397 static __inline__ uint64_t
2398 ia64_rse_skip_regs (uint64_t addr, long num_regs)
2399 {
2400 long delta = ia64_rse_slot_num(addr) + num_regs;
2401
2402 if (num_regs < 0)
2403 delta -= 0x3e;
2404 return addr + ((num_regs + delta/0x3f) << 3);
2405 }
2406
2407 /* Gdb ia64-libunwind-tdep callback function to convert from an ia64 gdb
2408 register number to a libunwind register number. */
2409 static int
2410 ia64_gdb2uw_regnum (int regnum)
2411 {
2412 if (regnum == sp_regnum)
2413 return UNW_IA64_SP;
2414 else if (regnum == IA64_BSP_REGNUM)
2415 return UNW_IA64_BSP;
2416 else if ((unsigned) (regnum - IA64_GR0_REGNUM) < 128)
2417 return UNW_IA64_GR + (regnum - IA64_GR0_REGNUM);
2418 else if ((unsigned) (regnum - V32_REGNUM) < 95)
2419 return UNW_IA64_GR + 32 + (regnum - V32_REGNUM);
2420 else if ((unsigned) (regnum - IA64_FR0_REGNUM) < 128)
2421 return UNW_IA64_FR + (regnum - IA64_FR0_REGNUM);
2422 else if ((unsigned) (regnum - IA64_PR0_REGNUM) < 64)
2423 return -1;
2424 else if ((unsigned) (regnum - IA64_BR0_REGNUM) < 8)
2425 return UNW_IA64_BR + (regnum - IA64_BR0_REGNUM);
2426 else if (regnum == IA64_PR_REGNUM)
2427 return UNW_IA64_PR;
2428 else if (regnum == IA64_IP_REGNUM)
2429 return UNW_REG_IP;
2430 else if (regnum == IA64_CFM_REGNUM)
2431 return UNW_IA64_CFM;
2432 else if ((unsigned) (regnum - IA64_AR0_REGNUM) < 128)
2433 return UNW_IA64_AR + (regnum - IA64_AR0_REGNUM);
2434 else if ((unsigned) (regnum - IA64_NAT0_REGNUM) < 128)
2435 return UNW_IA64_NAT + (regnum - IA64_NAT0_REGNUM);
2436 else
2437 return -1;
2438 }
2439
2440 /* Gdb ia64-libunwind-tdep callback function to convert from a libunwind
2441 register number to a ia64 gdb register number. */
2442 static int
2443 ia64_uw2gdb_regnum (int uw_regnum)
2444 {
2445 if (uw_regnum == UNW_IA64_SP)
2446 return sp_regnum;
2447 else if (uw_regnum == UNW_IA64_BSP)
2448 return IA64_BSP_REGNUM;
2449 else if ((unsigned) (uw_regnum - UNW_IA64_GR) < 32)
2450 return IA64_GR0_REGNUM + (uw_regnum - UNW_IA64_GR);
2451 else if ((unsigned) (uw_regnum - UNW_IA64_GR) < 128)
2452 return V32_REGNUM + (uw_regnum - (IA64_GR0_REGNUM + 32));
2453 else if ((unsigned) (uw_regnum - UNW_IA64_FR) < 128)
2454 return IA64_FR0_REGNUM + (uw_regnum - UNW_IA64_FR);
2455 else if ((unsigned) (uw_regnum - UNW_IA64_BR) < 8)
2456 return IA64_BR0_REGNUM + (uw_regnum - UNW_IA64_BR);
2457 else if (uw_regnum == UNW_IA64_PR)
2458 return IA64_PR_REGNUM;
2459 else if (uw_regnum == UNW_REG_IP)
2460 return IA64_IP_REGNUM;
2461 else if (uw_regnum == UNW_IA64_CFM)
2462 return IA64_CFM_REGNUM;
2463 else if ((unsigned) (uw_regnum - UNW_IA64_AR) < 128)
2464 return IA64_AR0_REGNUM + (uw_regnum - UNW_IA64_AR);
2465 else if ((unsigned) (uw_regnum - UNW_IA64_NAT) < 128)
2466 return IA64_NAT0_REGNUM + (uw_regnum - UNW_IA64_NAT);
2467 else
2468 return -1;
2469 }
2470
2471 /* Gdb ia64-libunwind-tdep callback function to reveal if register is
2472 a float register or not. */
2473 static int
2474 ia64_is_fpreg (int uw_regnum)
2475 {
2476 return unw_is_fpreg (uw_regnum);
2477 }
2478
2479 /* Libunwind callback accessor function for general registers. */
2480 static int
2481 ia64_access_reg (unw_addr_space_t as, unw_regnum_t uw_regnum, unw_word_t *val,
2482 int write, void *arg)
2483 {
2484 int regnum = ia64_uw2gdb_regnum (uw_regnum);
2485 unw_word_t bsp, sof, cfm, psr, ip;
2486 struct frame_info *this_frame = (struct frame_info *) arg;
2487 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2488 ia64_gdbarch_tdep *tdep = gdbarch_tdep<ia64_gdbarch_tdep> (gdbarch);
2489
2490 /* We never call any libunwind routines that need to write registers. */
2491 gdb_assert (!write);
2492
2493 switch (uw_regnum)
2494 {
2495 case UNW_REG_IP:
2496 /* Libunwind expects to see the pc value which means the slot number
2497 from the psr must be merged with the ip word address. */
2498 ip = get_frame_register_unsigned (this_frame, IA64_IP_REGNUM);
2499 psr = get_frame_register_unsigned (this_frame, IA64_PSR_REGNUM);
2500 *val = ip | ((psr >> 41) & 0x3);
2501 break;
2502
2503 case UNW_IA64_AR_BSP:
2504 /* Libunwind expects to see the beginning of the current
2505 register frame so we must account for the fact that
2506 ptrace() will return a value for bsp that points *after*
2507 the current register frame. */
2508 bsp = get_frame_register_unsigned (this_frame, IA64_BSP_REGNUM);
2509 cfm = get_frame_register_unsigned (this_frame, IA64_CFM_REGNUM);
2510 sof = tdep->size_of_register_frame (this_frame, cfm);
2511 *val = ia64_rse_skip_regs (bsp, -sof);
2512 break;
2513
2514 case UNW_IA64_AR_BSPSTORE:
2515 /* Libunwind wants bspstore to be after the current register frame.
2516 This is what ptrace() and gdb treats as the regular bsp value. */
2517 *val = get_frame_register_unsigned (this_frame, IA64_BSP_REGNUM);
2518 break;
2519
2520 default:
2521 /* For all other registers, just unwind the value directly. */
2522 *val = get_frame_register_unsigned (this_frame, regnum);
2523 break;
2524 }
2525
2526 if (gdbarch_debug >= 1)
2527 gdb_printf (gdb_stdlog,
2528 " access_reg: from cache: %4s=%s\n",
2529 (((unsigned) regnum <= IA64_NAT127_REGNUM)
2530 ? ia64_register_names[regnum] : "r??"),
2531 paddress (gdbarch, *val));
2532 return 0;
2533 }
2534
2535 /* Libunwind callback accessor function for floating-point registers. */
2536 static int
2537 ia64_access_fpreg (unw_addr_space_t as, unw_regnum_t uw_regnum,
2538 unw_fpreg_t *val, int write, void *arg)
2539 {
2540 int regnum = ia64_uw2gdb_regnum (uw_regnum);
2541 struct frame_info *this_frame = (struct frame_info *) arg;
2542
2543 /* We never call any libunwind routines that need to write registers. */
2544 gdb_assert (!write);
2545
2546 get_frame_register (this_frame, regnum, (gdb_byte *) val);
2547
2548 return 0;
2549 }
2550
2551 /* Libunwind callback accessor function for top-level rse registers. */
2552 static int
2553 ia64_access_rse_reg (unw_addr_space_t as, unw_regnum_t uw_regnum,
2554 unw_word_t *val, int write, void *arg)
2555 {
2556 int regnum = ia64_uw2gdb_regnum (uw_regnum);
2557 unw_word_t bsp, sof, cfm, psr, ip;
2558 struct regcache *regcache = (struct regcache *) arg;
2559 struct gdbarch *gdbarch = regcache->arch ();
2560
2561 /* We never call any libunwind routines that need to write registers. */
2562 gdb_assert (!write);
2563
2564 switch (uw_regnum)
2565 {
2566 case UNW_REG_IP:
2567 /* Libunwind expects to see the pc value which means the slot number
2568 from the psr must be merged with the ip word address. */
2569 regcache_cooked_read_unsigned (regcache, IA64_IP_REGNUM, &ip);
2570 regcache_cooked_read_unsigned (regcache, IA64_PSR_REGNUM, &psr);
2571 *val = ip | ((psr >> 41) & 0x3);
2572 break;
2573
2574 case UNW_IA64_AR_BSP:
2575 /* Libunwind expects to see the beginning of the current
2576 register frame so we must account for the fact that
2577 ptrace() will return a value for bsp that points *after*
2578 the current register frame. */
2579 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
2580 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
2581 sof = (cfm & 0x7f);
2582 *val = ia64_rse_skip_regs (bsp, -sof);
2583 break;
2584
2585 case UNW_IA64_AR_BSPSTORE:
2586 /* Libunwind wants bspstore to be after the current register frame.
2587 This is what ptrace() and gdb treats as the regular bsp value. */
2588 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, val);
2589 break;
2590
2591 default:
2592 /* For all other registers, just unwind the value directly. */
2593 regcache_cooked_read_unsigned (regcache, regnum, val);
2594 break;
2595 }
2596
2597 if (gdbarch_debug >= 1)
2598 gdb_printf (gdb_stdlog,
2599 " access_rse_reg: from cache: %4s=%s\n",
2600 (((unsigned) regnum <= IA64_NAT127_REGNUM)
2601 ? ia64_register_names[regnum] : "r??"),
2602 paddress (gdbarch, *val));
2603
2604 return 0;
2605 }
2606
2607 /* Libunwind callback accessor function for top-level fp registers. */
2608 static int
2609 ia64_access_rse_fpreg (unw_addr_space_t as, unw_regnum_t uw_regnum,
2610 unw_fpreg_t *val, int write, void *arg)
2611 {
2612 int regnum = ia64_uw2gdb_regnum (uw_regnum);
2613 struct regcache *regcache = (struct regcache *) arg;
2614
2615 /* We never call any libunwind routines that need to write registers. */
2616 gdb_assert (!write);
2617
2618 regcache->cooked_read (regnum, (gdb_byte *) val);
2619
2620 return 0;
2621 }
2622
2623 /* Libunwind callback accessor function for accessing memory. */
2624 static int
2625 ia64_access_mem (unw_addr_space_t as,
2626 unw_word_t addr, unw_word_t *val,
2627 int write, void *arg)
2628 {
2629 if (addr - KERNEL_START < ktab_size)
2630 {
2631 unw_word_t *laddr = (unw_word_t*) ((char *) ktab
2632 + (addr - KERNEL_START));
2633
2634 if (write)
2635 *laddr = *val;
2636 else
2637 *val = *laddr;
2638 return 0;
2639 }
2640
2641 /* XXX do we need to normalize byte-order here? */
2642 if (write)
2643 return target_write_memory (addr, (gdb_byte *) val, sizeof (unw_word_t));
2644 else
2645 return target_read_memory (addr, (gdb_byte *) val, sizeof (unw_word_t));
2646 }
2647
2648 /* Call low-level function to access the kernel unwind table. */
2649 static gdb::optional<gdb::byte_vector>
2650 getunwind_table ()
2651 {
2652 /* FIXME drow/2005-09-10: This code used to call
2653 ia64_linux_xfer_unwind_table directly to fetch the unwind table
2654 for the currently running ia64-linux kernel. That data should
2655 come from the core file and be accessed via the auxv vector; if
2656 we want to preserve fall back to the running kernel's table, then
2657 we should find a way to override the corefile layer's
2658 xfer_partial method. */
2659
2660 return target_read_alloc (current_inferior ()->top_target (),
2661 TARGET_OBJECT_UNWIND_TABLE, NULL);
2662 }
2663
2664 /* Get the kernel unwind table. */
2665 static int
2666 get_kernel_table (unw_word_t ip, unw_dyn_info_t *di)
2667 {
2668 static struct ia64_table_entry *etab;
2669
2670 if (!ktab)
2671 {
2672 ktab_buf = getunwind_table ();
2673 if (!ktab_buf)
2674 return -UNW_ENOINFO;
2675
2676 ktab = (struct ia64_table_entry *) ktab_buf->data ();
2677 ktab_size = ktab_buf->size ();
2678
2679 for (etab = ktab; etab->start_offset; ++etab)
2680 etab->info_offset += KERNEL_START;
2681 }
2682
2683 if (ip < ktab[0].start_offset || ip >= etab[-1].end_offset)
2684 return -UNW_ENOINFO;
2685
2686 di->format = UNW_INFO_FORMAT_TABLE;
2687 di->gp = 0;
2688 di->start_ip = ktab[0].start_offset;
2689 di->end_ip = etab[-1].end_offset;
2690 di->u.ti.name_ptr = (unw_word_t) "<kernel>";
2691 di->u.ti.segbase = 0;
2692 di->u.ti.table_len = ((char *) etab - (char *) ktab) / sizeof (unw_word_t);
2693 di->u.ti.table_data = (unw_word_t *) ktab;
2694
2695 if (gdbarch_debug >= 1)
2696 gdb_printf (gdb_stdlog, "get_kernel_table: found table `%s': "
2697 "segbase=%s, length=%s, gp=%s\n",
2698 (char *) di->u.ti.name_ptr,
2699 hex_string (di->u.ti.segbase),
2700 pulongest (di->u.ti.table_len),
2701 hex_string (di->gp));
2702 return 0;
2703 }
2704
2705 /* Find the unwind table entry for a specified address. */
2706 static int
2707 ia64_find_unwind_table (struct objfile *objfile, unw_word_t ip,
2708 unw_dyn_info_t *dip, void **buf)
2709 {
2710 Elf_Internal_Phdr *phdr, *p_text = NULL, *p_unwind = NULL;
2711 Elf_Internal_Ehdr *ehdr;
2712 unw_word_t segbase = 0;
2713 CORE_ADDR load_base;
2714 bfd *bfd;
2715 int i;
2716
2717 bfd = objfile->obfd;
2718
2719 ehdr = elf_tdata (bfd)->elf_header;
2720 phdr = elf_tdata (bfd)->phdr;
2721
2722 load_base = objfile->text_section_offset ();
2723
2724 for (i = 0; i < ehdr->e_phnum; ++i)
2725 {
2726 switch (phdr[i].p_type)
2727 {
2728 case PT_LOAD:
2729 if ((unw_word_t) (ip - load_base - phdr[i].p_vaddr)
2730 < phdr[i].p_memsz)
2731 p_text = phdr + i;
2732 break;
2733
2734 case PT_IA_64_UNWIND:
2735 p_unwind = phdr + i;
2736 break;
2737
2738 default:
2739 break;
2740 }
2741 }
2742
2743 if (!p_text || !p_unwind)
2744 return -UNW_ENOINFO;
2745
2746 /* Verify that the segment that contains the IP also contains
2747 the static unwind table. If not, we may be in the Linux kernel's
2748 DSO gate page in which case the unwind table is another segment.
2749 Otherwise, we are dealing with runtime-generated code, for which we
2750 have no info here. */
2751 segbase = p_text->p_vaddr + load_base;
2752
2753 if ((p_unwind->p_vaddr - p_text->p_vaddr) >= p_text->p_memsz)
2754 {
2755 int ok = 0;
2756 for (i = 0; i < ehdr->e_phnum; ++i)
2757 {
2758 if (phdr[i].p_type == PT_LOAD
2759 && (p_unwind->p_vaddr - phdr[i].p_vaddr) < phdr[i].p_memsz)
2760 {
2761 ok = 1;
2762 /* Get the segbase from the section containing the
2763 libunwind table. */
2764 segbase = phdr[i].p_vaddr + load_base;
2765 }
2766 }
2767 if (!ok)
2768 return -UNW_ENOINFO;
2769 }
2770
2771 dip->start_ip = p_text->p_vaddr + load_base;
2772 dip->end_ip = dip->start_ip + p_text->p_memsz;
2773 dip->gp = ia64_find_global_pointer (objfile->arch (), ip);
2774 dip->format = UNW_INFO_FORMAT_REMOTE_TABLE;
2775 dip->u.rti.name_ptr = (unw_word_t) bfd_get_filename (bfd);
2776 dip->u.rti.segbase = segbase;
2777 dip->u.rti.table_len = p_unwind->p_memsz / sizeof (unw_word_t);
2778 dip->u.rti.table_data = p_unwind->p_vaddr + load_base;
2779
2780 return 0;
2781 }
2782
2783 /* Libunwind callback accessor function to acquire procedure unwind-info. */
2784 static int
2785 ia64_find_proc_info_x (unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi,
2786 int need_unwind_info, void *arg)
2787 {
2788 struct obj_section *sec = find_pc_section (ip);
2789 unw_dyn_info_t di;
2790 int ret;
2791 void *buf = NULL;
2792
2793 if (!sec)
2794 {
2795 /* XXX This only works if the host and the target architecture are
2796 both ia64 and if the have (more or less) the same kernel
2797 version. */
2798 if (get_kernel_table (ip, &di) < 0)
2799 return -UNW_ENOINFO;
2800
2801 if (gdbarch_debug >= 1)
2802 gdb_printf (gdb_stdlog, "ia64_find_proc_info_x: %s -> "
2803 "(name=`%s',segbase=%s,start=%s,end=%s,gp=%s,"
2804 "length=%s,data=%s)\n",
2805 hex_string (ip), (char *)di.u.ti.name_ptr,
2806 hex_string (di.u.ti.segbase),
2807 hex_string (di.start_ip), hex_string (di.end_ip),
2808 hex_string (di.gp),
2809 pulongest (di.u.ti.table_len),
2810 hex_string ((CORE_ADDR)di.u.ti.table_data));
2811 }
2812 else
2813 {
2814 ret = ia64_find_unwind_table (sec->objfile, ip, &di, &buf);
2815 if (ret < 0)
2816 return ret;
2817
2818 if (gdbarch_debug >= 1)
2819 gdb_printf (gdb_stdlog, "ia64_find_proc_info_x: %s -> "
2820 "(name=`%s',segbase=%s,start=%s,end=%s,gp=%s,"
2821 "length=%s,data=%s)\n",
2822 hex_string (ip), (char *)di.u.rti.name_ptr,
2823 hex_string (di.u.rti.segbase),
2824 hex_string (di.start_ip), hex_string (di.end_ip),
2825 hex_string (di.gp),
2826 pulongest (di.u.rti.table_len),
2827 hex_string (di.u.rti.table_data));
2828 }
2829
2830 ret = libunwind_search_unwind_table (&as, ip, &di, pi, need_unwind_info,
2831 arg);
2832
2833 /* We no longer need the dyn info storage so free it. */
2834 xfree (buf);
2835
2836 return ret;
2837 }
2838
2839 /* Libunwind callback accessor function for cleanup. */
2840 static void
2841 ia64_put_unwind_info (unw_addr_space_t as,
2842 unw_proc_info_t *pip, void *arg)
2843 {
2844 /* Nothing required for now. */
2845 }
2846
2847 /* Libunwind callback accessor function to get head of the dynamic
2848 unwind-info registration list. */
2849 static int
2850 ia64_get_dyn_info_list (unw_addr_space_t as,
2851 unw_word_t *dilap, void *arg)
2852 {
2853 struct obj_section *text_sec;
2854 unw_word_t ip, addr;
2855 unw_dyn_info_t di;
2856 int ret;
2857
2858 if (!libunwind_is_initialized ())
2859 return -UNW_ENOINFO;
2860
2861 for (objfile *objfile : current_program_space->objfiles ())
2862 {
2863 void *buf = NULL;
2864
2865 text_sec = objfile->sections + SECT_OFF_TEXT (objfile);
2866 ip = text_sec->addr ();
2867 ret = ia64_find_unwind_table (objfile, ip, &di, &buf);
2868 if (ret >= 0)
2869 {
2870 addr = libunwind_find_dyn_list (as, &di, arg);
2871 /* We no longer need the dyn info storage so free it. */
2872 xfree (buf);
2873
2874 if (addr)
2875 {
2876 if (gdbarch_debug >= 1)
2877 gdb_printf (gdb_stdlog,
2878 "dynamic unwind table in objfile %s "
2879 "at %s (gp=%s)\n",
2880 bfd_get_filename (objfile->obfd),
2881 hex_string (addr), hex_string (di.gp));
2882 *dilap = addr;
2883 return 0;
2884 }
2885 }
2886 }
2887 return -UNW_ENOINFO;
2888 }
2889
2890
2891 /* Frame interface functions for libunwind. */
2892
2893 static void
2894 ia64_libunwind_frame_this_id (struct frame_info *this_frame, void **this_cache,
2895 struct frame_id *this_id)
2896 {
2897 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2898 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2899 struct frame_id id = outer_frame_id;
2900 gdb_byte buf[8];
2901 CORE_ADDR bsp;
2902
2903 libunwind_frame_this_id (this_frame, this_cache, &id);
2904 if (frame_id_eq (id, outer_frame_id))
2905 {
2906 (*this_id) = outer_frame_id;
2907 return;
2908 }
2909
2910 /* We must add the bsp as the special address for frame comparison
2911 purposes. */
2912 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
2913 bsp = extract_unsigned_integer (buf, 8, byte_order);
2914
2915 (*this_id) = frame_id_build_special (id.stack_addr, id.code_addr, bsp);
2916
2917 if (gdbarch_debug >= 1)
2918 gdb_printf (gdb_stdlog,
2919 "libunwind frame id: code %s, stack %s, "
2920 "special %s, this_frame %s\n",
2921 paddress (gdbarch, id.code_addr),
2922 paddress (gdbarch, id.stack_addr),
2923 paddress (gdbarch, bsp),
2924 host_address_to_string (this_frame));
2925 }
2926
2927 static struct value *
2928 ia64_libunwind_frame_prev_register (struct frame_info *this_frame,
2929 void **this_cache, int regnum)
2930 {
2931 int reg = regnum;
2932 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2933 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2934 struct value *val;
2935
2936 if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
2937 reg = IA64_PR_REGNUM;
2938 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
2939 reg = IA64_UNAT_REGNUM;
2940
2941 /* Let libunwind do most of the work. */
2942 val = libunwind_frame_prev_register (this_frame, this_cache, reg);
2943
2944 if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
2945 {
2946 ULONGEST prN_val;
2947
2948 if (VP16_REGNUM <= regnum && regnum <= VP63_REGNUM)
2949 {
2950 int rrb_pr = 0;
2951 ULONGEST cfm;
2952
2953 /* Fetch predicate register rename base from current frame
2954 marker for this frame. */
2955 cfm = get_frame_register_unsigned (this_frame, IA64_CFM_REGNUM);
2956 rrb_pr = (cfm >> 32) & 0x3f;
2957
2958 /* Adjust the register number to account for register rotation. */
2959 regnum = VP16_REGNUM + ((regnum - VP16_REGNUM) + rrb_pr) % 48;
2960 }
2961 prN_val = extract_bit_field (value_contents_all (val).data (),
2962 regnum - VP0_REGNUM, 1);
2963 return frame_unwind_got_constant (this_frame, regnum, prN_val);
2964 }
2965
2966 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
2967 {
2968 ULONGEST unatN_val;
2969
2970 unatN_val = extract_bit_field (value_contents_all (val).data (),
2971 regnum - IA64_NAT0_REGNUM, 1);
2972 return frame_unwind_got_constant (this_frame, regnum, unatN_val);
2973 }
2974
2975 else if (regnum == IA64_BSP_REGNUM)
2976 {
2977 struct value *cfm_val;
2978 CORE_ADDR prev_bsp, prev_cfm;
2979
2980 /* We want to calculate the previous bsp as the end of the previous
2981 register stack frame. This corresponds to what the hardware bsp
2982 register will be if we pop the frame back which is why we might
2983 have been called. We know that libunwind will pass us back the
2984 beginning of the current frame so we should just add sof to it. */
2985 prev_bsp = extract_unsigned_integer (value_contents_all (val).data (),
2986 8, byte_order);
2987 cfm_val = libunwind_frame_prev_register (this_frame, this_cache,
2988 IA64_CFM_REGNUM);
2989 prev_cfm = extract_unsigned_integer (value_contents_all (cfm_val).data (),
2990 8, byte_order);
2991 prev_bsp = rse_address_add (prev_bsp, (prev_cfm & 0x7f));
2992
2993 return frame_unwind_got_constant (this_frame, regnum, prev_bsp);
2994 }
2995 else
2996 return val;
2997 }
2998
2999 static int
3000 ia64_libunwind_frame_sniffer (const struct frame_unwind *self,
3001 struct frame_info *this_frame,
3002 void **this_cache)
3003 {
3004 if (libunwind_is_initialized ()
3005 && libunwind_frame_sniffer (self, this_frame, this_cache))
3006 return 1;
3007
3008 return 0;
3009 }
3010
3011 static const struct frame_unwind ia64_libunwind_frame_unwind =
3012 {
3013 "ia64 libunwind",
3014 NORMAL_FRAME,
3015 default_frame_unwind_stop_reason,
3016 ia64_libunwind_frame_this_id,
3017 ia64_libunwind_frame_prev_register,
3018 NULL,
3019 ia64_libunwind_frame_sniffer,
3020 libunwind_frame_dealloc_cache
3021 };
3022
3023 static void
3024 ia64_libunwind_sigtramp_frame_this_id (struct frame_info *this_frame,
3025 void **this_cache,
3026 struct frame_id *this_id)
3027 {
3028 struct gdbarch *gdbarch = get_frame_arch (this_frame);
3029 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3030 gdb_byte buf[8];
3031 CORE_ADDR bsp;
3032 struct frame_id id = outer_frame_id;
3033
3034 libunwind_frame_this_id (this_frame, this_cache, &id);
3035 if (frame_id_eq (id, outer_frame_id))
3036 {
3037 (*this_id) = outer_frame_id;
3038 return;
3039 }
3040
3041 /* We must add the bsp as the special address for frame comparison
3042 purposes. */
3043 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
3044 bsp = extract_unsigned_integer (buf, 8, byte_order);
3045
3046 /* For a sigtramp frame, we don't make the check for previous ip being 0. */
3047 (*this_id) = frame_id_build_special (id.stack_addr, id.code_addr, bsp);
3048
3049 if (gdbarch_debug >= 1)
3050 gdb_printf (gdb_stdlog,
3051 "libunwind sigtramp frame id: code %s, "
3052 "stack %s, special %s, this_frame %s\n",
3053 paddress (gdbarch, id.code_addr),
3054 paddress (gdbarch, id.stack_addr),
3055 paddress (gdbarch, bsp),
3056 host_address_to_string (this_frame));
3057 }
3058
3059 static struct value *
3060 ia64_libunwind_sigtramp_frame_prev_register (struct frame_info *this_frame,
3061 void **this_cache, int regnum)
3062 {
3063 struct gdbarch *gdbarch = get_frame_arch (this_frame);
3064 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3065 struct value *prev_ip_val;
3066 CORE_ADDR prev_ip;
3067
3068 /* If the previous frame pc value is 0, then we want to use the SIGCONTEXT
3069 method of getting previous registers. */
3070 prev_ip_val = libunwind_frame_prev_register (this_frame, this_cache,
3071 IA64_IP_REGNUM);
3072 prev_ip = extract_unsigned_integer (value_contents_all (prev_ip_val).data (),
3073 8, byte_order);
3074
3075 if (prev_ip == 0)
3076 {
3077 void *tmp_cache = NULL;
3078 return ia64_sigtramp_frame_prev_register (this_frame, &tmp_cache,
3079 regnum);
3080 }
3081 else
3082 return ia64_libunwind_frame_prev_register (this_frame, this_cache, regnum);
3083 }
3084
3085 static int
3086 ia64_libunwind_sigtramp_frame_sniffer (const struct frame_unwind *self,
3087 struct frame_info *this_frame,
3088 void **this_cache)
3089 {
3090 if (libunwind_is_initialized ())
3091 {
3092 if (libunwind_sigtramp_frame_sniffer (self, this_frame, this_cache))
3093 return 1;
3094 return 0;
3095 }
3096 else
3097 return ia64_sigtramp_frame_sniffer (self, this_frame, this_cache);
3098 }
3099
3100 static const struct frame_unwind ia64_libunwind_sigtramp_frame_unwind =
3101 {
3102 "ia64 libunwind sigtramp",
3103 SIGTRAMP_FRAME,
3104 default_frame_unwind_stop_reason,
3105 ia64_libunwind_sigtramp_frame_this_id,
3106 ia64_libunwind_sigtramp_frame_prev_register,
3107 NULL,
3108 ia64_libunwind_sigtramp_frame_sniffer
3109 };
3110
3111 /* Set of libunwind callback acccessor functions. */
3112 unw_accessors_t ia64_unw_accessors =
3113 {
3114 ia64_find_proc_info_x,
3115 ia64_put_unwind_info,
3116 ia64_get_dyn_info_list,
3117 ia64_access_mem,
3118 ia64_access_reg,
3119 ia64_access_fpreg,
3120 /* resume */
3121 /* get_proc_name */
3122 };
3123
3124 /* Set of special libunwind callback acccessor functions specific for accessing
3125 the rse registers. At the top of the stack, we want libunwind to figure out
3126 how to read r32 - r127. Though usually they are found sequentially in
3127 memory starting from $bof, this is not always true. */
3128 unw_accessors_t ia64_unw_rse_accessors =
3129 {
3130 ia64_find_proc_info_x,
3131 ia64_put_unwind_info,
3132 ia64_get_dyn_info_list,
3133 ia64_access_mem,
3134 ia64_access_rse_reg,
3135 ia64_access_rse_fpreg,
3136 /* resume */
3137 /* get_proc_name */
3138 };
3139
3140 /* Set of ia64-libunwind-tdep gdb callbacks and data for generic
3141 ia64-libunwind-tdep code to use. */
3142 struct libunwind_descr ia64_libunwind_descr =
3143 {
3144 ia64_gdb2uw_regnum,
3145 ia64_uw2gdb_regnum,
3146 ia64_is_fpreg,
3147 &ia64_unw_accessors,
3148 &ia64_unw_rse_accessors,
3149 };
3150
3151 #endif /* HAVE_LIBUNWIND_IA64_H */
3152
3153 static int
3154 ia64_use_struct_convention (struct type *type)
3155 {
3156 struct type *float_elt_type;
3157
3158 /* Don't use the struct convention for anything but structure,
3159 union, or array types. */
3160 if (!(type->code () == TYPE_CODE_STRUCT
3161 || type->code () == TYPE_CODE_UNION
3162 || type->code () == TYPE_CODE_ARRAY))
3163 return 0;
3164
3165 /* HFAs are structures (or arrays) consisting entirely of floating
3166 point values of the same length. Up to 8 of these are returned
3167 in registers. Don't use the struct convention when this is the
3168 case. */
3169 float_elt_type = is_float_or_hfa_type (type);
3170 if (float_elt_type != NULL
3171 && TYPE_LENGTH (type) / TYPE_LENGTH (float_elt_type) <= 8)
3172 return 0;
3173
3174 /* Other structs of length 32 or less are returned in r8-r11.
3175 Don't use the struct convention for those either. */
3176 return TYPE_LENGTH (type) > 32;
3177 }
3178
3179 /* Return non-zero if TYPE is a structure or union type. */
3180
3181 static int
3182 ia64_struct_type_p (const struct type *type)
3183 {
3184 return (type->code () == TYPE_CODE_STRUCT
3185 || type->code () == TYPE_CODE_UNION);
3186 }
3187
3188 static void
3189 ia64_extract_return_value (struct type *type, struct regcache *regcache,
3190 gdb_byte *valbuf)
3191 {
3192 struct gdbarch *gdbarch = regcache->arch ();
3193 struct type *float_elt_type;
3194
3195 float_elt_type = is_float_or_hfa_type (type);
3196 if (float_elt_type != NULL)
3197 {
3198 gdb_byte from[IA64_FP_REGISTER_SIZE];
3199 int offset = 0;
3200 int regnum = IA64_FR8_REGNUM;
3201 int n = TYPE_LENGTH (type) / TYPE_LENGTH (float_elt_type);
3202
3203 while (n-- > 0)
3204 {
3205 regcache->cooked_read (regnum, from);
3206 target_float_convert (from, ia64_ext_type (gdbarch),
3207 valbuf + offset, float_elt_type);
3208 offset += TYPE_LENGTH (float_elt_type);
3209 regnum++;
3210 }
3211 }
3212 else if (!ia64_struct_type_p (type) && TYPE_LENGTH (type) < 8)
3213 {
3214 /* This is an integral value, and its size is less than 8 bytes.
3215 These values are LSB-aligned, so extract the relevant bytes,
3216 and copy them into VALBUF. */
3217 /* brobecker/2005-12-30: Actually, all integral values are LSB aligned,
3218 so I suppose we should also add handling here for integral values
3219 whose size is greater than 8. But I wasn't able to create such
3220 a type, neither in C nor in Ada, so not worrying about these yet. */
3221 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3222 ULONGEST val;
3223
3224 regcache_cooked_read_unsigned (regcache, IA64_GR8_REGNUM, &val);
3225 store_unsigned_integer (valbuf, TYPE_LENGTH (type), byte_order, val);
3226 }
3227 else
3228 {
3229 ULONGEST val;
3230 int offset = 0;
3231 int regnum = IA64_GR8_REGNUM;
3232 int reglen = TYPE_LENGTH (register_type (gdbarch, IA64_GR8_REGNUM));
3233 int n = TYPE_LENGTH (type) / reglen;
3234 int m = TYPE_LENGTH (type) % reglen;
3235
3236 while (n-- > 0)
3237 {
3238 ULONGEST regval;
3239 regcache_cooked_read_unsigned (regcache, regnum, &regval);
3240 memcpy ((char *)valbuf + offset, &regval, reglen);
3241 offset += reglen;
3242 regnum++;
3243 }
3244
3245 if (m)
3246 {
3247 regcache_cooked_read_unsigned (regcache, regnum, &val);
3248 memcpy ((char *)valbuf + offset, &val, m);
3249 }
3250 }
3251 }
3252
3253 static void
3254 ia64_store_return_value (struct type *type, struct regcache *regcache,
3255 const gdb_byte *valbuf)
3256 {
3257 struct gdbarch *gdbarch = regcache->arch ();
3258 struct type *float_elt_type;
3259
3260 float_elt_type = is_float_or_hfa_type (type);
3261 if (float_elt_type != NULL)
3262 {
3263 gdb_byte to[IA64_FP_REGISTER_SIZE];
3264 int offset = 0;
3265 int regnum = IA64_FR8_REGNUM;
3266 int n = TYPE_LENGTH (type) / TYPE_LENGTH (float_elt_type);
3267
3268 while (n-- > 0)
3269 {
3270 target_float_convert (valbuf + offset, float_elt_type,
3271 to, ia64_ext_type (gdbarch));
3272 regcache->cooked_write (regnum, to);
3273 offset += TYPE_LENGTH (float_elt_type);
3274 regnum++;
3275 }
3276 }
3277 else
3278 {
3279 int offset = 0;
3280 int regnum = IA64_GR8_REGNUM;
3281 int reglen = TYPE_LENGTH (register_type (gdbarch, IA64_GR8_REGNUM));
3282 int n = TYPE_LENGTH (type) / reglen;
3283 int m = TYPE_LENGTH (type) % reglen;
3284
3285 while (n-- > 0)
3286 {
3287 ULONGEST val;
3288 memcpy (&val, (char *)valbuf + offset, reglen);
3289 regcache_cooked_write_unsigned (regcache, regnum, val);
3290 offset += reglen;
3291 regnum++;
3292 }
3293
3294 if (m)
3295 {
3296 ULONGEST val;
3297 memcpy (&val, (char *)valbuf + offset, m);
3298 regcache_cooked_write_unsigned (regcache, regnum, val);
3299 }
3300 }
3301 }
3302
3303 static enum return_value_convention
3304 ia64_return_value (struct gdbarch *gdbarch, struct value *function,
3305 struct type *valtype, struct regcache *regcache,
3306 gdb_byte *readbuf, const gdb_byte *writebuf)
3307 {
3308 int struct_return = ia64_use_struct_convention (valtype);
3309
3310 if (writebuf != NULL)
3311 {
3312 gdb_assert (!struct_return);
3313 ia64_store_return_value (valtype, regcache, writebuf);
3314 }
3315
3316 if (readbuf != NULL)
3317 {
3318 gdb_assert (!struct_return);
3319 ia64_extract_return_value (valtype, regcache, readbuf);
3320 }
3321
3322 if (struct_return)
3323 return RETURN_VALUE_STRUCT_CONVENTION;
3324 else
3325 return RETURN_VALUE_REGISTER_CONVENTION;
3326 }
3327
3328 static int
3329 is_float_or_hfa_type_recurse (struct type *t, struct type **etp)
3330 {
3331 switch (t->code ())
3332 {
3333 case TYPE_CODE_FLT:
3334 if (*etp)
3335 return TYPE_LENGTH (*etp) == TYPE_LENGTH (t);
3336 else
3337 {
3338 *etp = t;
3339 return 1;
3340 }
3341 break;
3342 case TYPE_CODE_ARRAY:
3343 return
3344 is_float_or_hfa_type_recurse (check_typedef (TYPE_TARGET_TYPE (t)),
3345 etp);
3346 break;
3347 case TYPE_CODE_STRUCT:
3348 {
3349 int i;
3350
3351 for (i = 0; i < t->num_fields (); i++)
3352 if (!is_float_or_hfa_type_recurse
3353 (check_typedef (t->field (i).type ()), etp))
3354 return 0;
3355 return 1;
3356 }
3357 break;
3358 default:
3359 break;
3360 }
3361
3362 return 0;
3363 }
3364
3365 /* Determine if the given type is one of the floating point types or
3366 and HFA (which is a struct, array, or combination thereof whose
3367 bottom-most elements are all of the same floating point type). */
3368
3369 static struct type *
3370 is_float_or_hfa_type (struct type *t)
3371 {
3372 struct type *et = 0;
3373
3374 return is_float_or_hfa_type_recurse (t, &et) ? et : 0;
3375 }
3376
3377
3378 /* Return 1 if the alignment of T is such that the next even slot
3379 should be used. Return 0, if the next available slot should
3380 be used. (See section 8.5.1 of the IA-64 Software Conventions
3381 and Runtime manual). */
3382
3383 static int
3384 slot_alignment_is_next_even (struct type *t)
3385 {
3386 switch (t->code ())
3387 {
3388 case TYPE_CODE_INT:
3389 case TYPE_CODE_FLT:
3390 if (TYPE_LENGTH (t) > 8)
3391 return 1;
3392 else
3393 return 0;
3394 case TYPE_CODE_ARRAY:
3395 return
3396 slot_alignment_is_next_even (check_typedef (TYPE_TARGET_TYPE (t)));
3397 case TYPE_CODE_STRUCT:
3398 {
3399 int i;
3400
3401 for (i = 0; i < t->num_fields (); i++)
3402 if (slot_alignment_is_next_even
3403 (check_typedef (t->field (i).type ())))
3404 return 1;
3405 return 0;
3406 }
3407 default:
3408 return 0;
3409 }
3410 }
3411
3412 /* Attempt to find (and return) the global pointer for the given
3413 function.
3414
3415 This is a rather nasty bit of code searchs for the .dynamic section
3416 in the objfile corresponding to the pc of the function we're trying
3417 to call. Once it finds the addresses at which the .dynamic section
3418 lives in the child process, it scans the Elf64_Dyn entries for a
3419 DT_PLTGOT tag. If it finds one of these, the corresponding
3420 d_un.d_ptr value is the global pointer. */
3421
3422 static CORE_ADDR
3423 ia64_find_global_pointer_from_dynamic_section (struct gdbarch *gdbarch,
3424 CORE_ADDR faddr)
3425 {
3426 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3427 struct obj_section *faddr_sect;
3428
3429 faddr_sect = find_pc_section (faddr);
3430 if (faddr_sect != NULL)
3431 {
3432 struct obj_section *osect;
3433
3434 ALL_OBJFILE_OSECTIONS (faddr_sect->objfile, osect)
3435 {
3436 if (strcmp (osect->the_bfd_section->name, ".dynamic") == 0)
3437 break;
3438 }
3439
3440 if (osect < faddr_sect->objfile->sections_end)
3441 {
3442 CORE_ADDR addr = osect->addr ();
3443 CORE_ADDR endaddr = osect->endaddr ();
3444
3445 while (addr < endaddr)
3446 {
3447 int status;
3448 LONGEST tag;
3449 gdb_byte buf[8];
3450
3451 status = target_read_memory (addr, buf, sizeof (buf));
3452 if (status != 0)
3453 break;
3454 tag = extract_signed_integer (buf, byte_order);
3455
3456 if (tag == DT_PLTGOT)
3457 {
3458 CORE_ADDR global_pointer;
3459
3460 status = target_read_memory (addr + 8, buf, sizeof (buf));
3461 if (status != 0)
3462 break;
3463 global_pointer = extract_unsigned_integer (buf, sizeof (buf),
3464 byte_order);
3465
3466 /* The payoff... */
3467 return global_pointer;
3468 }
3469
3470 if (tag == DT_NULL)
3471 break;
3472
3473 addr += 16;
3474 }
3475 }
3476 }
3477 return 0;
3478 }
3479
3480 /* Attempt to find (and return) the global pointer for the given
3481 function. We first try the find_global_pointer_from_solib routine
3482 from the gdbarch tdep vector, if provided. And if that does not
3483 work, then we try ia64_find_global_pointer_from_dynamic_section. */
3484
3485 static CORE_ADDR
3486 ia64_find_global_pointer (struct gdbarch *gdbarch, CORE_ADDR faddr)
3487 {
3488 ia64_gdbarch_tdep *tdep = gdbarch_tdep<ia64_gdbarch_tdep> (gdbarch);
3489 CORE_ADDR addr = 0;
3490
3491 if (tdep->find_global_pointer_from_solib)
3492 addr = tdep->find_global_pointer_from_solib (gdbarch, faddr);
3493 if (addr == 0)
3494 addr = ia64_find_global_pointer_from_dynamic_section (gdbarch, faddr);
3495 return addr;
3496 }
3497
3498 /* Given a function's address, attempt to find (and return) the
3499 corresponding (canonical) function descriptor. Return 0 if
3500 not found. */
3501 static CORE_ADDR
3502 find_extant_func_descr (struct gdbarch *gdbarch, CORE_ADDR faddr)
3503 {
3504 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3505 struct obj_section *faddr_sect;
3506
3507 /* Return early if faddr is already a function descriptor. */
3508 faddr_sect = find_pc_section (faddr);
3509 if (faddr_sect && strcmp (faddr_sect->the_bfd_section->name, ".opd") == 0)
3510 return faddr;
3511
3512 if (faddr_sect != NULL)
3513 {
3514 struct obj_section *osect;
3515 ALL_OBJFILE_OSECTIONS (faddr_sect->objfile, osect)
3516 {
3517 if (strcmp (osect->the_bfd_section->name, ".opd") == 0)
3518 break;
3519 }
3520
3521 if (osect < faddr_sect->objfile->sections_end)
3522 {
3523 CORE_ADDR addr = osect->addr ();
3524 CORE_ADDR endaddr = osect->endaddr ();
3525
3526 while (addr < endaddr)
3527 {
3528 int status;
3529 LONGEST faddr2;
3530 gdb_byte buf[8];
3531
3532 status = target_read_memory (addr, buf, sizeof (buf));
3533 if (status != 0)
3534 break;
3535 faddr2 = extract_signed_integer (buf, byte_order);
3536
3537 if (faddr == faddr2)
3538 return addr;
3539
3540 addr += 16;
3541 }
3542 }
3543 }
3544 return 0;
3545 }
3546
3547 /* Attempt to find a function descriptor corresponding to the
3548 given address. If none is found, construct one on the
3549 stack using the address at fdaptr. */
3550
3551 static CORE_ADDR
3552 find_func_descr (struct regcache *regcache, CORE_ADDR faddr, CORE_ADDR *fdaptr)
3553 {
3554 struct gdbarch *gdbarch = regcache->arch ();
3555 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3556 CORE_ADDR fdesc;
3557
3558 fdesc = find_extant_func_descr (gdbarch, faddr);
3559
3560 if (fdesc == 0)
3561 {
3562 ULONGEST global_pointer;
3563 gdb_byte buf[16];
3564
3565 fdesc = *fdaptr;
3566 *fdaptr += 16;
3567
3568 global_pointer = ia64_find_global_pointer (gdbarch, faddr);
3569
3570 if (global_pointer == 0)
3571 regcache_cooked_read_unsigned (regcache,
3572 IA64_GR1_REGNUM, &global_pointer);
3573
3574 store_unsigned_integer (buf, 8, byte_order, faddr);
3575 store_unsigned_integer (buf + 8, 8, byte_order, global_pointer);
3576
3577 write_memory (fdesc, buf, 16);
3578 }
3579
3580 return fdesc;
3581 }
3582
3583 /* Use the following routine when printing out function pointers
3584 so the user can see the function address rather than just the
3585 function descriptor. */
3586 static CORE_ADDR
3587 ia64_convert_from_func_ptr_addr (struct gdbarch *gdbarch, CORE_ADDR addr,
3588 struct target_ops *targ)
3589 {
3590 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3591 struct obj_section *s;
3592 gdb_byte buf[8];
3593
3594 s = find_pc_section (addr);
3595
3596 /* check if ADDR points to a function descriptor. */
3597 if (s && strcmp (s->the_bfd_section->name, ".opd") == 0)
3598 return read_memory_unsigned_integer (addr, 8, byte_order);
3599
3600 /* Normally, functions live inside a section that is executable.
3601 So, if ADDR points to a non-executable section, then treat it
3602 as a function descriptor and return the target address iff
3603 the target address itself points to a section that is executable.
3604 Check first the memory of the whole length of 8 bytes is readable. */
3605 if (s && (s->the_bfd_section->flags & SEC_CODE) == 0
3606 && target_read_memory (addr, buf, 8) == 0)
3607 {
3608 CORE_ADDR pc = extract_unsigned_integer (buf, 8, byte_order);
3609 struct obj_section *pc_section = find_pc_section (pc);
3610
3611 if (pc_section && (pc_section->the_bfd_section->flags & SEC_CODE))
3612 return pc;
3613 }
3614
3615 /* There are also descriptors embedded in vtables. */
3616 if (s)
3617 {
3618 struct bound_minimal_symbol minsym;
3619
3620 minsym = lookup_minimal_symbol_by_pc (addr);
3621
3622 if (minsym.minsym
3623 && is_vtable_name (minsym.minsym->linkage_name ()))
3624 return read_memory_unsigned_integer (addr, 8, byte_order);
3625 }
3626
3627 return addr;
3628 }
3629
3630 static CORE_ADDR
3631 ia64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
3632 {
3633 return sp & ~0xfLL;
3634 }
3635
3636 /* The default "allocate_new_rse_frame" ia64_infcall_ops routine for ia64. */
3637
3638 static void
3639 ia64_allocate_new_rse_frame (struct regcache *regcache, ULONGEST bsp, int sof)
3640 {
3641 ULONGEST cfm, pfs, new_bsp;
3642
3643 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
3644
3645 new_bsp = rse_address_add (bsp, sof);
3646 regcache_cooked_write_unsigned (regcache, IA64_BSP_REGNUM, new_bsp);
3647
3648 regcache_cooked_read_unsigned (regcache, IA64_PFS_REGNUM, &pfs);
3649 pfs &= 0xc000000000000000LL;
3650 pfs |= (cfm & 0xffffffffffffLL);
3651 regcache_cooked_write_unsigned (regcache, IA64_PFS_REGNUM, pfs);
3652
3653 cfm &= 0xc000000000000000LL;
3654 cfm |= sof;
3655 regcache_cooked_write_unsigned (regcache, IA64_CFM_REGNUM, cfm);
3656 }
3657
3658 /* The default "store_argument_in_slot" ia64_infcall_ops routine for
3659 ia64. */
3660
3661 static void
3662 ia64_store_argument_in_slot (struct regcache *regcache, CORE_ADDR bsp,
3663 int slotnum, gdb_byte *buf)
3664 {
3665 write_memory (rse_address_add (bsp, slotnum), buf, 8);
3666 }
3667
3668 /* The default "set_function_addr" ia64_infcall_ops routine for ia64. */
3669
3670 static void
3671 ia64_set_function_addr (struct regcache *regcache, CORE_ADDR func_addr)
3672 {
3673 /* Nothing needed. */
3674 }
3675
3676 static CORE_ADDR
3677 ia64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
3678 struct regcache *regcache, CORE_ADDR bp_addr,
3679 int nargs, struct value **args, CORE_ADDR sp,
3680 function_call_return_method return_method,
3681 CORE_ADDR struct_addr)
3682 {
3683 ia64_gdbarch_tdep *tdep = gdbarch_tdep<ia64_gdbarch_tdep> (gdbarch);
3684 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3685 int argno;
3686 struct value *arg;
3687 struct type *type;
3688 int len, argoffset;
3689 int nslots, rseslots, memslots, slotnum, nfuncargs;
3690 int floatreg;
3691 ULONGEST bsp;
3692 CORE_ADDR funcdescaddr, global_pointer;
3693 CORE_ADDR func_addr = find_function_addr (function, NULL);
3694
3695 nslots = 0;
3696 nfuncargs = 0;
3697 /* Count the number of slots needed for the arguments. */
3698 for (argno = 0; argno < nargs; argno++)
3699 {
3700 arg = args[argno];
3701 type = check_typedef (value_type (arg));
3702 len = TYPE_LENGTH (type);
3703
3704 if ((nslots & 1) && slot_alignment_is_next_even (type))
3705 nslots++;
3706
3707 if (type->code () == TYPE_CODE_FUNC)
3708 nfuncargs++;
3709
3710 nslots += (len + 7) / 8;
3711 }
3712
3713 /* Divvy up the slots between the RSE and the memory stack. */
3714 rseslots = (nslots > 8) ? 8 : nslots;
3715 memslots = nslots - rseslots;
3716
3717 /* Allocate a new RSE frame. */
3718 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
3719 tdep->infcall_ops.allocate_new_rse_frame (regcache, bsp, rseslots);
3720
3721 /* We will attempt to find function descriptors in the .opd segment,
3722 but if we can't we'll construct them ourselves. That being the
3723 case, we'll need to reserve space on the stack for them. */
3724 funcdescaddr = sp - nfuncargs * 16;
3725 funcdescaddr &= ~0xfLL;
3726
3727 /* Adjust the stack pointer to it's new value. The calling conventions
3728 require us to have 16 bytes of scratch, plus whatever space is
3729 necessary for the memory slots and our function descriptors. */
3730 sp = sp - 16 - (memslots + nfuncargs) * 8;
3731 sp &= ~0xfLL; /* Maintain 16 byte alignment. */
3732
3733 /* Place the arguments where they belong. The arguments will be
3734 either placed in the RSE backing store or on the memory stack.
3735 In addition, floating point arguments or HFAs are placed in
3736 floating point registers. */
3737 slotnum = 0;
3738 floatreg = IA64_FR8_REGNUM;
3739 for (argno = 0; argno < nargs; argno++)
3740 {
3741 struct type *float_elt_type;
3742
3743 arg = args[argno];
3744 type = check_typedef (value_type (arg));
3745 len = TYPE_LENGTH (type);
3746
3747 /* Special handling for function parameters. */
3748 if (len == 8
3749 && type->code () == TYPE_CODE_PTR
3750 && TYPE_TARGET_TYPE (type)->code () == TYPE_CODE_FUNC)
3751 {
3752 gdb_byte val_buf[8];
3753 ULONGEST faddr = extract_unsigned_integer
3754 (value_contents (arg).data (), 8, byte_order);
3755 store_unsigned_integer (val_buf, 8, byte_order,
3756 find_func_descr (regcache, faddr,
3757 &funcdescaddr));
3758 if (slotnum < rseslots)
3759 tdep->infcall_ops.store_argument_in_slot (regcache, bsp,
3760 slotnum, val_buf);
3761 else
3762 write_memory (sp + 16 + 8 * (slotnum - rseslots), val_buf, 8);
3763 slotnum++;
3764 continue;
3765 }
3766
3767 /* Normal slots. */
3768
3769 /* Skip odd slot if necessary... */
3770 if ((slotnum & 1) && slot_alignment_is_next_even (type))
3771 slotnum++;
3772
3773 argoffset = 0;
3774 while (len > 0)
3775 {
3776 gdb_byte val_buf[8];
3777
3778 memset (val_buf, 0, 8);
3779 if (!ia64_struct_type_p (type) && len < 8)
3780 {
3781 /* Integral types are LSB-aligned, so we have to be careful
3782 to insert the argument on the correct side of the buffer.
3783 This is why we use store_unsigned_integer. */
3784 store_unsigned_integer
3785 (val_buf, 8, byte_order,
3786 extract_unsigned_integer (value_contents (arg).data (), len,
3787 byte_order));
3788 }
3789 else
3790 {
3791 /* This is either an 8bit integral type, or an aggregate.
3792 For 8bit integral type, there is no problem, we just
3793 copy the value over.
3794
3795 For aggregates, the only potentially tricky portion
3796 is to write the last one if it is less than 8 bytes.
3797 In this case, the data is Byte0-aligned. Happy news,
3798 this means that we don't need to differentiate the
3799 handling of 8byte blocks and less-than-8bytes blocks. */
3800 memcpy (val_buf, value_contents (arg).data () + argoffset,
3801 (len > 8) ? 8 : len);
3802 }
3803
3804 if (slotnum < rseslots)
3805 tdep->infcall_ops.store_argument_in_slot (regcache, bsp,
3806 slotnum, val_buf);
3807 else
3808 write_memory (sp + 16 + 8 * (slotnum - rseslots), val_buf, 8);
3809
3810 argoffset += 8;
3811 len -= 8;
3812 slotnum++;
3813 }
3814
3815 /* Handle floating point types (including HFAs). */
3816 float_elt_type = is_float_or_hfa_type (type);
3817 if (float_elt_type != NULL)
3818 {
3819 argoffset = 0;
3820 len = TYPE_LENGTH (type);
3821 while (len > 0 && floatreg < IA64_FR16_REGNUM)
3822 {
3823 gdb_byte to[IA64_FP_REGISTER_SIZE];
3824 target_float_convert (value_contents (arg).data () + argoffset,
3825 float_elt_type, to,
3826 ia64_ext_type (gdbarch));
3827 regcache->cooked_write (floatreg, to);
3828 floatreg++;
3829 argoffset += TYPE_LENGTH (float_elt_type);
3830 len -= TYPE_LENGTH (float_elt_type);
3831 }
3832 }
3833 }
3834
3835 /* Store the struct return value in r8 if necessary. */
3836 if (return_method == return_method_struct)
3837 regcache_cooked_write_unsigned (regcache, IA64_GR8_REGNUM,
3838 (ULONGEST) struct_addr);
3839
3840 global_pointer = ia64_find_global_pointer (gdbarch, func_addr);
3841
3842 if (global_pointer != 0)
3843 regcache_cooked_write_unsigned (regcache, IA64_GR1_REGNUM, global_pointer);
3844
3845 /* The following is not necessary on HP-UX, because we're using
3846 a dummy code sequence pushed on the stack to make the call, and
3847 this sequence doesn't need b0 to be set in order for our dummy
3848 breakpoint to be hit. Nonetheless, this doesn't interfere, and
3849 it's needed for other OSes, so we do this unconditionaly. */
3850 regcache_cooked_write_unsigned (regcache, IA64_BR0_REGNUM, bp_addr);
3851
3852 regcache_cooked_write_unsigned (regcache, sp_regnum, sp);
3853
3854 tdep->infcall_ops.set_function_addr (regcache, func_addr);
3855
3856 return sp;
3857 }
3858
3859 static const struct ia64_infcall_ops ia64_infcall_ops =
3860 {
3861 ia64_allocate_new_rse_frame,
3862 ia64_store_argument_in_slot,
3863 ia64_set_function_addr
3864 };
3865
3866 static struct frame_id
3867 ia64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
3868 {
3869 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3870 gdb_byte buf[8];
3871 CORE_ADDR sp, bsp;
3872
3873 get_frame_register (this_frame, sp_regnum, buf);
3874 sp = extract_unsigned_integer (buf, 8, byte_order);
3875
3876 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
3877 bsp = extract_unsigned_integer (buf, 8, byte_order);
3878
3879 if (gdbarch_debug >= 1)
3880 gdb_printf (gdb_stdlog,
3881 "dummy frame id: code %s, stack %s, special %s\n",
3882 paddress (gdbarch, get_frame_pc (this_frame)),
3883 paddress (gdbarch, sp), paddress (gdbarch, bsp));
3884
3885 return frame_id_build_special (sp, get_frame_pc (this_frame), bsp);
3886 }
3887
3888 static CORE_ADDR
3889 ia64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *next_frame)
3890 {
3891 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3892 gdb_byte buf[8];
3893 CORE_ADDR ip, psr, pc;
3894
3895 frame_unwind_register (next_frame, IA64_IP_REGNUM, buf);
3896 ip = extract_unsigned_integer (buf, 8, byte_order);
3897 frame_unwind_register (next_frame, IA64_PSR_REGNUM, buf);
3898 psr = extract_unsigned_integer (buf, 8, byte_order);
3899
3900 pc = (ip & ~0xf) | ((psr >> 41) & 3);
3901 return pc;
3902 }
3903
3904 static int
3905 ia64_print_insn (bfd_vma memaddr, struct disassemble_info *info)
3906 {
3907 info->bytes_per_line = SLOT_MULTIPLIER;
3908 return default_print_insn (memaddr, info);
3909 }
3910
3911 /* The default "size_of_register_frame" gdbarch_tdep routine for ia64. */
3912
3913 static int
3914 ia64_size_of_register_frame (struct frame_info *this_frame, ULONGEST cfm)
3915 {
3916 return (cfm & 0x7f);
3917 }
3918
3919 static struct gdbarch *
3920 ia64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
3921 {
3922 struct gdbarch *gdbarch;
3923
3924 /* If there is already a candidate, use it. */
3925 arches = gdbarch_list_lookup_by_info (arches, &info);
3926 if (arches != NULL)
3927 return arches->gdbarch;
3928
3929 ia64_gdbarch_tdep *tdep = new ia64_gdbarch_tdep;
3930 gdbarch = gdbarch_alloc (&info, tdep);
3931
3932 tdep->size_of_register_frame = ia64_size_of_register_frame;
3933
3934 /* According to the ia64 specs, instructions that store long double
3935 floats in memory use a long-double format different than that
3936 used in the floating registers. The memory format matches the
3937 x86 extended float format which is 80 bits. An OS may choose to
3938 use this format (e.g. GNU/Linux) or choose to use a different
3939 format for storing long doubles (e.g. HPUX). In the latter case,
3940 the setting of the format may be moved/overridden in an
3941 OS-specific tdep file. */
3942 set_gdbarch_long_double_format (gdbarch, floatformats_i387_ext);
3943
3944 set_gdbarch_short_bit (gdbarch, 16);
3945 set_gdbarch_int_bit (gdbarch, 32);
3946 set_gdbarch_long_bit (gdbarch, 64);
3947 set_gdbarch_long_long_bit (gdbarch, 64);
3948 set_gdbarch_float_bit (gdbarch, 32);
3949 set_gdbarch_double_bit (gdbarch, 64);
3950 set_gdbarch_long_double_bit (gdbarch, 128);
3951 set_gdbarch_ptr_bit (gdbarch, 64);
3952
3953 set_gdbarch_num_regs (gdbarch, NUM_IA64_RAW_REGS);
3954 set_gdbarch_num_pseudo_regs (gdbarch,
3955 LAST_PSEUDO_REGNUM - FIRST_PSEUDO_REGNUM);
3956 set_gdbarch_sp_regnum (gdbarch, sp_regnum);
3957 set_gdbarch_fp0_regnum (gdbarch, IA64_FR0_REGNUM);
3958
3959 set_gdbarch_register_name (gdbarch, ia64_register_name);
3960 set_gdbarch_register_type (gdbarch, ia64_register_type);
3961
3962 set_gdbarch_pseudo_register_read (gdbarch, ia64_pseudo_register_read);
3963 set_gdbarch_pseudo_register_write (gdbarch, ia64_pseudo_register_write);
3964 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, ia64_dwarf_reg_to_regnum);
3965 set_gdbarch_register_reggroup_p (gdbarch, ia64_register_reggroup_p);
3966 set_gdbarch_convert_register_p (gdbarch, ia64_convert_register_p);
3967 set_gdbarch_register_to_value (gdbarch, ia64_register_to_value);
3968 set_gdbarch_value_to_register (gdbarch, ia64_value_to_register);
3969
3970 set_gdbarch_skip_prologue (gdbarch, ia64_skip_prologue);
3971
3972 set_gdbarch_return_value (gdbarch, ia64_return_value);
3973
3974 set_gdbarch_memory_insert_breakpoint (gdbarch,
3975 ia64_memory_insert_breakpoint);
3976 set_gdbarch_memory_remove_breakpoint (gdbarch,
3977 ia64_memory_remove_breakpoint);
3978 set_gdbarch_breakpoint_from_pc (gdbarch, ia64_breakpoint_from_pc);
3979 set_gdbarch_breakpoint_kind_from_pc (gdbarch, ia64_breakpoint_kind_from_pc);
3980 set_gdbarch_read_pc (gdbarch, ia64_read_pc);
3981 set_gdbarch_write_pc (gdbarch, ia64_write_pc);
3982
3983 /* Settings for calling functions in the inferior. */
3984 set_gdbarch_push_dummy_call (gdbarch, ia64_push_dummy_call);
3985 tdep->infcall_ops = ia64_infcall_ops;
3986 set_gdbarch_frame_align (gdbarch, ia64_frame_align);
3987 set_gdbarch_dummy_id (gdbarch, ia64_dummy_id);
3988
3989 set_gdbarch_unwind_pc (gdbarch, ia64_unwind_pc);
3990 #ifdef HAVE_LIBUNWIND_IA64_H
3991 frame_unwind_append_unwinder (gdbarch,
3992 &ia64_libunwind_sigtramp_frame_unwind);
3993 frame_unwind_append_unwinder (gdbarch, &ia64_libunwind_frame_unwind);
3994 frame_unwind_append_unwinder (gdbarch, &ia64_sigtramp_frame_unwind);
3995 libunwind_frame_set_descr (gdbarch, &ia64_libunwind_descr);
3996 #else
3997 frame_unwind_append_unwinder (gdbarch, &ia64_sigtramp_frame_unwind);
3998 #endif
3999 frame_unwind_append_unwinder (gdbarch, &ia64_frame_unwind);
4000 frame_base_set_default (gdbarch, &ia64_frame_base);
4001
4002 /* Settings that should be unnecessary. */
4003 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
4004
4005 set_gdbarch_print_insn (gdbarch, ia64_print_insn);
4006 set_gdbarch_convert_from_func_ptr_addr (gdbarch,
4007 ia64_convert_from_func_ptr_addr);
4008
4009 /* The virtual table contains 16-byte descriptors, not pointers to
4010 descriptors. */
4011 set_gdbarch_vtable_function_descriptors (gdbarch, 1);
4012
4013 /* Hook in ABI-specific overrides, if they have been registered. */
4014 gdbarch_init_osabi (info, gdbarch);
4015
4016 return gdbarch;
4017 }
4018
4019 void _initialize_ia64_tdep ();
4020 void
4021 _initialize_ia64_tdep ()
4022 {
4023 gdbarch_register (bfd_arch_ia64, ia64_gdbarch_init, NULL);
4024 }