Copyright year update in most files of the GDB Project.
[binutils-gdb.git] / gdb / ia64-tdep.c
1 /* Target-dependent code for the IA-64 for GDB, the GNU debugger.
2
3 Copyright (C) 1999-2012 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "inferior.h"
22 #include "gdbcore.h"
23 #include "arch-utils.h"
24 #include "floatformat.h"
25 #include "gdbtypes.h"
26 #include "regcache.h"
27 #include "reggroups.h"
28 #include "frame.h"
29 #include "frame-base.h"
30 #include "frame-unwind.h"
31 #include "doublest.h"
32 #include "value.h"
33 #include "gdb_assert.h"
34 #include "objfiles.h"
35 #include "elf/common.h" /* for DT_PLTGOT value */
36 #include "elf-bfd.h"
37 #include "dis-asm.h"
38 #include "infcall.h"
39 #include "osabi.h"
40 #include "ia64-tdep.h"
41 #include "cp-abi.h"
42
43 #ifdef HAVE_LIBUNWIND_IA64_H
44 #include "elf/ia64.h" /* for PT_IA_64_UNWIND value */
45 #include "libunwind-frame.h"
46 #include "libunwind-ia64.h"
47
48 /* Note: KERNEL_START is supposed to be an address which is not going
49 to ever contain any valid unwind info. For ia64 linux, the choice
50 of 0xc000000000000000 is fairly safe since that's uncached space.
51
52 We use KERNEL_START as follows: after obtaining the kernel's
53 unwind table via getunwind(), we project its unwind data into
54 address-range KERNEL_START-(KERNEL_START+ktab_size) and then
55 when ia64_access_mem() sees a memory access to this
56 address-range, we redirect it to ktab instead.
57
58 None of this hackery is needed with a modern kernel/libcs
59 which uses the kernel virtual DSO to provide access to the
60 kernel's unwind info. In that case, ktab_size remains 0 and
61 hence the value of KERNEL_START doesn't matter. */
62
63 #define KERNEL_START 0xc000000000000000ULL
64
65 static size_t ktab_size = 0;
66 struct ia64_table_entry
67 {
68 uint64_t start_offset;
69 uint64_t end_offset;
70 uint64_t info_offset;
71 };
72
73 static struct ia64_table_entry *ktab = NULL;
74
75 #endif
76
77 /* An enumeration of the different IA-64 instruction types. */
78
79 typedef enum instruction_type
80 {
81 A, /* Integer ALU ; I-unit or M-unit */
82 I, /* Non-ALU integer; I-unit */
83 M, /* Memory ; M-unit */
84 F, /* Floating-point ; F-unit */
85 B, /* Branch ; B-unit */
86 L, /* Extended (L+X) ; I-unit */
87 X, /* Extended (L+X) ; I-unit */
88 undefined /* undefined or reserved */
89 } instruction_type;
90
91 /* We represent IA-64 PC addresses as the value of the instruction
92 pointer or'd with some bit combination in the low nibble which
93 represents the slot number in the bundle addressed by the
94 instruction pointer. The problem is that the Linux kernel
95 multiplies its slot numbers (for exceptions) by one while the
96 disassembler multiplies its slot numbers by 6. In addition, I've
97 heard it said that the simulator uses 1 as the multiplier.
98
99 I've fixed the disassembler so that the bytes_per_line field will
100 be the slot multiplier. If bytes_per_line comes in as zero, it
101 is set to six (which is how it was set up initially). -- objdump
102 displays pretty disassembly dumps with this value. For our purposes,
103 we'll set bytes_per_line to SLOT_MULTIPLIER. This is okay since we
104 never want to also display the raw bytes the way objdump does. */
105
106 #define SLOT_MULTIPLIER 1
107
108 /* Length in bytes of an instruction bundle. */
109
110 #define BUNDLE_LEN 16
111
112 /* See the saved memory layout comment for ia64_memory_insert_breakpoint. */
113
114 #if BREAKPOINT_MAX < BUNDLE_LEN - 2
115 # error "BREAKPOINT_MAX < BUNDLE_LEN - 2"
116 #endif
117
118 static gdbarch_init_ftype ia64_gdbarch_init;
119
120 static gdbarch_register_name_ftype ia64_register_name;
121 static gdbarch_register_type_ftype ia64_register_type;
122 static gdbarch_breakpoint_from_pc_ftype ia64_breakpoint_from_pc;
123 static gdbarch_skip_prologue_ftype ia64_skip_prologue;
124 static struct type *is_float_or_hfa_type (struct type *t);
125 static CORE_ADDR ia64_find_global_pointer (struct gdbarch *gdbarch,
126 CORE_ADDR faddr);
127
128 #define NUM_IA64_RAW_REGS 462
129
130 static int sp_regnum = IA64_GR12_REGNUM;
131 static int fp_regnum = IA64_VFP_REGNUM;
132 static int lr_regnum = IA64_VRAP_REGNUM;
133
134 /* NOTE: we treat the register stack registers r32-r127 as
135 pseudo-registers because they may not be accessible via the ptrace
136 register get/set interfaces. */
137
138 enum pseudo_regs { FIRST_PSEUDO_REGNUM = NUM_IA64_RAW_REGS,
139 VBOF_REGNUM = IA64_NAT127_REGNUM + 1, V32_REGNUM,
140 V127_REGNUM = V32_REGNUM + 95,
141 VP0_REGNUM, VP16_REGNUM = VP0_REGNUM + 16,
142 VP63_REGNUM = VP0_REGNUM + 63, LAST_PSEUDO_REGNUM };
143
144 /* Array of register names; There should be ia64_num_regs strings in
145 the initializer. */
146
147 static char *ia64_register_names[] =
148 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
149 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
150 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
151 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
152 "", "", "", "", "", "", "", "",
153 "", "", "", "", "", "", "", "",
154 "", "", "", "", "", "", "", "",
155 "", "", "", "", "", "", "", "",
156 "", "", "", "", "", "", "", "",
157 "", "", "", "", "", "", "", "",
158 "", "", "", "", "", "", "", "",
159 "", "", "", "", "", "", "", "",
160 "", "", "", "", "", "", "", "",
161 "", "", "", "", "", "", "", "",
162 "", "", "", "", "", "", "", "",
163 "", "", "", "", "", "", "", "",
164
165 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
166 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
167 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
168 "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31",
169 "f32", "f33", "f34", "f35", "f36", "f37", "f38", "f39",
170 "f40", "f41", "f42", "f43", "f44", "f45", "f46", "f47",
171 "f48", "f49", "f50", "f51", "f52", "f53", "f54", "f55",
172 "f56", "f57", "f58", "f59", "f60", "f61", "f62", "f63",
173 "f64", "f65", "f66", "f67", "f68", "f69", "f70", "f71",
174 "f72", "f73", "f74", "f75", "f76", "f77", "f78", "f79",
175 "f80", "f81", "f82", "f83", "f84", "f85", "f86", "f87",
176 "f88", "f89", "f90", "f91", "f92", "f93", "f94", "f95",
177 "f96", "f97", "f98", "f99", "f100", "f101", "f102", "f103",
178 "f104", "f105", "f106", "f107", "f108", "f109", "f110", "f111",
179 "f112", "f113", "f114", "f115", "f116", "f117", "f118", "f119",
180 "f120", "f121", "f122", "f123", "f124", "f125", "f126", "f127",
181
182 "", "", "", "", "", "", "", "",
183 "", "", "", "", "", "", "", "",
184 "", "", "", "", "", "", "", "",
185 "", "", "", "", "", "", "", "",
186 "", "", "", "", "", "", "", "",
187 "", "", "", "", "", "", "", "",
188 "", "", "", "", "", "", "", "",
189 "", "", "", "", "", "", "", "",
190
191 "b0", "b1", "b2", "b3", "b4", "b5", "b6", "b7",
192
193 "vfp", "vrap",
194
195 "pr", "ip", "psr", "cfm",
196
197 "kr0", "kr1", "kr2", "kr3", "kr4", "kr5", "kr6", "kr7",
198 "", "", "", "", "", "", "", "",
199 "rsc", "bsp", "bspstore", "rnat",
200 "", "fcr", "", "",
201 "eflag", "csd", "ssd", "cflg", "fsr", "fir", "fdr", "",
202 "ccv", "", "", "", "unat", "", "", "",
203 "fpsr", "", "", "", "itc",
204 "", "", "", "", "", "", "", "", "", "",
205 "", "", "", "", "", "", "", "", "",
206 "pfs", "lc", "ec",
207 "", "", "", "", "", "", "", "", "", "",
208 "", "", "", "", "", "", "", "", "", "",
209 "", "", "", "", "", "", "", "", "", "",
210 "", "", "", "", "", "", "", "", "", "",
211 "", "", "", "", "", "", "", "", "", "",
212 "", "", "", "", "", "", "", "", "", "",
213 "",
214 "nat0", "nat1", "nat2", "nat3", "nat4", "nat5", "nat6", "nat7",
215 "nat8", "nat9", "nat10", "nat11", "nat12", "nat13", "nat14", "nat15",
216 "nat16", "nat17", "nat18", "nat19", "nat20", "nat21", "nat22", "nat23",
217 "nat24", "nat25", "nat26", "nat27", "nat28", "nat29", "nat30", "nat31",
218 "nat32", "nat33", "nat34", "nat35", "nat36", "nat37", "nat38", "nat39",
219 "nat40", "nat41", "nat42", "nat43", "nat44", "nat45", "nat46", "nat47",
220 "nat48", "nat49", "nat50", "nat51", "nat52", "nat53", "nat54", "nat55",
221 "nat56", "nat57", "nat58", "nat59", "nat60", "nat61", "nat62", "nat63",
222 "nat64", "nat65", "nat66", "nat67", "nat68", "nat69", "nat70", "nat71",
223 "nat72", "nat73", "nat74", "nat75", "nat76", "nat77", "nat78", "nat79",
224 "nat80", "nat81", "nat82", "nat83", "nat84", "nat85", "nat86", "nat87",
225 "nat88", "nat89", "nat90", "nat91", "nat92", "nat93", "nat94", "nat95",
226 "nat96", "nat97", "nat98", "nat99", "nat100","nat101","nat102","nat103",
227 "nat104","nat105","nat106","nat107","nat108","nat109","nat110","nat111",
228 "nat112","nat113","nat114","nat115","nat116","nat117","nat118","nat119",
229 "nat120","nat121","nat122","nat123","nat124","nat125","nat126","nat127",
230
231 "bof",
232
233 "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
234 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
235 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
236 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
237 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
238 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
239 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
240 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
241 "r96", "r97", "r98", "r99", "r100", "r101", "r102", "r103",
242 "r104", "r105", "r106", "r107", "r108", "r109", "r110", "r111",
243 "r112", "r113", "r114", "r115", "r116", "r117", "r118", "r119",
244 "r120", "r121", "r122", "r123", "r124", "r125", "r126", "r127",
245
246 "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7",
247 "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15",
248 "p16", "p17", "p18", "p19", "p20", "p21", "p22", "p23",
249 "p24", "p25", "p26", "p27", "p28", "p29", "p30", "p31",
250 "p32", "p33", "p34", "p35", "p36", "p37", "p38", "p39",
251 "p40", "p41", "p42", "p43", "p44", "p45", "p46", "p47",
252 "p48", "p49", "p50", "p51", "p52", "p53", "p54", "p55",
253 "p56", "p57", "p58", "p59", "p60", "p61", "p62", "p63",
254 };
255
256 struct ia64_frame_cache
257 {
258 CORE_ADDR base; /* frame pointer base for frame */
259 CORE_ADDR pc; /* function start pc for frame */
260 CORE_ADDR saved_sp; /* stack pointer for frame */
261 CORE_ADDR bsp; /* points at r32 for the current frame */
262 CORE_ADDR cfm; /* cfm value for current frame */
263 CORE_ADDR prev_cfm; /* cfm value for previous frame */
264 int frameless;
265 int sof; /* Size of frame (decoded from cfm value). */
266 int sol; /* Size of locals (decoded from cfm value). */
267 int sor; /* Number of rotating registers (decoded from
268 cfm value). */
269 CORE_ADDR after_prologue;
270 /* Address of first instruction after the last
271 prologue instruction; Note that there may
272 be instructions from the function's body
273 intermingled with the prologue. */
274 int mem_stack_frame_size;
275 /* Size of the memory stack frame (may be zero),
276 or -1 if it has not been determined yet. */
277 int fp_reg; /* Register number (if any) used a frame pointer
278 for this frame. 0 if no register is being used
279 as the frame pointer. */
280
281 /* Saved registers. */
282 CORE_ADDR saved_regs[NUM_IA64_RAW_REGS];
283
284 };
285
286 static int
287 floatformat_valid (const struct floatformat *fmt, const void *from)
288 {
289 return 1;
290 }
291
292 static const struct floatformat floatformat_ia64_ext_little =
293 {
294 floatformat_little, 82, 0, 1, 17, 65535, 0x1ffff, 18, 64,
295 floatformat_intbit_yes, "floatformat_ia64_ext_little", floatformat_valid, NULL
296 };
297
298 static const struct floatformat floatformat_ia64_ext_big =
299 {
300 floatformat_big, 82, 46, 47, 17, 65535, 0x1ffff, 64, 64,
301 floatformat_intbit_yes, "floatformat_ia64_ext_big", floatformat_valid
302 };
303
304 static const struct floatformat *floatformats_ia64_ext[2] =
305 {
306 &floatformat_ia64_ext_big,
307 &floatformat_ia64_ext_little
308 };
309
310 static struct type *
311 ia64_ext_type (struct gdbarch *gdbarch)
312 {
313 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
314
315 if (!tdep->ia64_ext_type)
316 tdep->ia64_ext_type
317 = arch_float_type (gdbarch, 128, "builtin_type_ia64_ext",
318 floatformats_ia64_ext);
319
320 return tdep->ia64_ext_type;
321 }
322
323 static int
324 ia64_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
325 struct reggroup *group)
326 {
327 int vector_p;
328 int float_p;
329 int raw_p;
330 if (group == all_reggroup)
331 return 1;
332 vector_p = TYPE_VECTOR (register_type (gdbarch, regnum));
333 float_p = TYPE_CODE (register_type (gdbarch, regnum)) == TYPE_CODE_FLT;
334 raw_p = regnum < NUM_IA64_RAW_REGS;
335 if (group == float_reggroup)
336 return float_p;
337 if (group == vector_reggroup)
338 return vector_p;
339 if (group == general_reggroup)
340 return (!vector_p && !float_p);
341 if (group == save_reggroup || group == restore_reggroup)
342 return raw_p;
343 return 0;
344 }
345
346 static const char *
347 ia64_register_name (struct gdbarch *gdbarch, int reg)
348 {
349 return ia64_register_names[reg];
350 }
351
352 struct type *
353 ia64_register_type (struct gdbarch *arch, int reg)
354 {
355 if (reg >= IA64_FR0_REGNUM && reg <= IA64_FR127_REGNUM)
356 return ia64_ext_type (arch);
357 else
358 return builtin_type (arch)->builtin_long;
359 }
360
361 static int
362 ia64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
363 {
364 if (reg >= IA64_GR32_REGNUM && reg <= IA64_GR127_REGNUM)
365 return V32_REGNUM + (reg - IA64_GR32_REGNUM);
366 return reg;
367 }
368
369
370 /* Extract ``len'' bits from an instruction bundle starting at
371 bit ``from''. */
372
373 static long long
374 extract_bit_field (const char *bundle, int from, int len)
375 {
376 long long result = 0LL;
377 int to = from + len;
378 int from_byte = from / 8;
379 int to_byte = to / 8;
380 unsigned char *b = (unsigned char *) bundle;
381 unsigned char c;
382 int lshift;
383 int i;
384
385 c = b[from_byte];
386 if (from_byte == to_byte)
387 c = ((unsigned char) (c << (8 - to % 8))) >> (8 - to % 8);
388 result = c >> (from % 8);
389 lshift = 8 - (from % 8);
390
391 for (i = from_byte+1; i < to_byte; i++)
392 {
393 result |= ((long long) b[i]) << lshift;
394 lshift += 8;
395 }
396
397 if (from_byte < to_byte && (to % 8 != 0))
398 {
399 c = b[to_byte];
400 c = ((unsigned char) (c << (8 - to % 8))) >> (8 - to % 8);
401 result |= ((long long) c) << lshift;
402 }
403
404 return result;
405 }
406
407 /* Replace the specified bits in an instruction bundle. */
408
409 static void
410 replace_bit_field (char *bundle, long long val, int from, int len)
411 {
412 int to = from + len;
413 int from_byte = from / 8;
414 int to_byte = to / 8;
415 unsigned char *b = (unsigned char *) bundle;
416 unsigned char c;
417
418 if (from_byte == to_byte)
419 {
420 unsigned char left, right;
421 c = b[from_byte];
422 left = (c >> (to % 8)) << (to % 8);
423 right = ((unsigned char) (c << (8 - from % 8))) >> (8 - from % 8);
424 c = (unsigned char) (val & 0xff);
425 c = (unsigned char) (c << (from % 8 + 8 - to % 8)) >> (8 - to % 8);
426 c |= right | left;
427 b[from_byte] = c;
428 }
429 else
430 {
431 int i;
432 c = b[from_byte];
433 c = ((unsigned char) (c << (8 - from % 8))) >> (8 - from % 8);
434 c = c | (val << (from % 8));
435 b[from_byte] = c;
436 val >>= 8 - from % 8;
437
438 for (i = from_byte+1; i < to_byte; i++)
439 {
440 c = val & 0xff;
441 val >>= 8;
442 b[i] = c;
443 }
444
445 if (to % 8 != 0)
446 {
447 unsigned char cv = (unsigned char) val;
448 c = b[to_byte];
449 c = c >> (to % 8) << (to % 8);
450 c |= ((unsigned char) (cv << (8 - to % 8))) >> (8 - to % 8);
451 b[to_byte] = c;
452 }
453 }
454 }
455
456 /* Return the contents of slot N (for N = 0, 1, or 2) in
457 and instruction bundle. */
458
459 static long long
460 slotN_contents (char *bundle, int slotnum)
461 {
462 return extract_bit_field (bundle, 5+41*slotnum, 41);
463 }
464
465 /* Store an instruction in an instruction bundle. */
466
467 static void
468 replace_slotN_contents (char *bundle, long long instr, int slotnum)
469 {
470 replace_bit_field (bundle, instr, 5+41*slotnum, 41);
471 }
472
473 static const enum instruction_type template_encoding_table[32][3] =
474 {
475 { M, I, I }, /* 00 */
476 { M, I, I }, /* 01 */
477 { M, I, I }, /* 02 */
478 { M, I, I }, /* 03 */
479 { M, L, X }, /* 04 */
480 { M, L, X }, /* 05 */
481 { undefined, undefined, undefined }, /* 06 */
482 { undefined, undefined, undefined }, /* 07 */
483 { M, M, I }, /* 08 */
484 { M, M, I }, /* 09 */
485 { M, M, I }, /* 0A */
486 { M, M, I }, /* 0B */
487 { M, F, I }, /* 0C */
488 { M, F, I }, /* 0D */
489 { M, M, F }, /* 0E */
490 { M, M, F }, /* 0F */
491 { M, I, B }, /* 10 */
492 { M, I, B }, /* 11 */
493 { M, B, B }, /* 12 */
494 { M, B, B }, /* 13 */
495 { undefined, undefined, undefined }, /* 14 */
496 { undefined, undefined, undefined }, /* 15 */
497 { B, B, B }, /* 16 */
498 { B, B, B }, /* 17 */
499 { M, M, B }, /* 18 */
500 { M, M, B }, /* 19 */
501 { undefined, undefined, undefined }, /* 1A */
502 { undefined, undefined, undefined }, /* 1B */
503 { M, F, B }, /* 1C */
504 { M, F, B }, /* 1D */
505 { undefined, undefined, undefined }, /* 1E */
506 { undefined, undefined, undefined }, /* 1F */
507 };
508
509 /* Fetch and (partially) decode an instruction at ADDR and return the
510 address of the next instruction to fetch. */
511
512 static CORE_ADDR
513 fetch_instruction (CORE_ADDR addr, instruction_type *it, long long *instr)
514 {
515 char bundle[BUNDLE_LEN];
516 int slotnum = (int) (addr & 0x0f) / SLOT_MULTIPLIER;
517 long long template;
518 int val;
519
520 /* Warn about slot numbers greater than 2. We used to generate
521 an error here on the assumption that the user entered an invalid
522 address. But, sometimes GDB itself requests an invalid address.
523 This can (easily) happen when execution stops in a function for
524 which there are no symbols. The prologue scanner will attempt to
525 find the beginning of the function - if the nearest symbol
526 happens to not be aligned on a bundle boundary (16 bytes), the
527 resulting starting address will cause GDB to think that the slot
528 number is too large.
529
530 So we warn about it and set the slot number to zero. It is
531 not necessarily a fatal condition, particularly if debugging
532 at the assembly language level. */
533 if (slotnum > 2)
534 {
535 warning (_("Can't fetch instructions for slot numbers greater than 2.\n"
536 "Using slot 0 instead"));
537 slotnum = 0;
538 }
539
540 addr &= ~0x0f;
541
542 val = target_read_memory (addr, bundle, BUNDLE_LEN);
543
544 if (val != 0)
545 return 0;
546
547 *instr = slotN_contents (bundle, slotnum);
548 template = extract_bit_field (bundle, 0, 5);
549 *it = template_encoding_table[(int)template][slotnum];
550
551 if (slotnum == 2 || (slotnum == 1 && *it == L))
552 addr += 16;
553 else
554 addr += (slotnum + 1) * SLOT_MULTIPLIER;
555
556 return addr;
557 }
558
559 /* There are 5 different break instructions (break.i, break.b,
560 break.m, break.f, and break.x), but they all have the same
561 encoding. (The five bit template in the low five bits of the
562 instruction bundle distinguishes one from another.)
563
564 The runtime architecture manual specifies that break instructions
565 used for debugging purposes must have the upper two bits of the 21
566 bit immediate set to a 0 and a 1 respectively. A breakpoint
567 instruction encodes the most significant bit of its 21 bit
568 immediate at bit 36 of the 41 bit instruction. The penultimate msb
569 is at bit 25 which leads to the pattern below.
570
571 Originally, I had this set up to do, e.g, a "break.i 0x80000" But
572 it turns out that 0x80000 was used as the syscall break in the early
573 simulators. So I changed the pattern slightly to do "break.i 0x080001"
574 instead. But that didn't work either (I later found out that this
575 pattern was used by the simulator that I was using.) So I ended up
576 using the pattern seen below.
577
578 SHADOW_CONTENTS has byte-based addressing (PLACED_ADDRESS and SHADOW_LEN)
579 while we need bit-based addressing as the instructions length is 41 bits and
580 we must not modify/corrupt the adjacent slots in the same bundle.
581 Fortunately we may store larger memory incl. the adjacent bits with the
582 original memory content (not the possibly already stored breakpoints there).
583 We need to be careful in ia64_memory_remove_breakpoint to always restore
584 only the specific bits of this instruction ignoring any adjacent stored
585 bits.
586
587 We use the original addressing with the low nibble in the range <0..2> which
588 gets incorrectly interpreted by generic non-ia64 breakpoint_restore_shadows
589 as the direct byte offset of SHADOW_CONTENTS. We store whole BUNDLE_LEN
590 bytes just without these two possibly skipped bytes to not to exceed to the
591 next bundle.
592
593 If we would like to store the whole bundle to SHADOW_CONTENTS we would have
594 to store already the base address (`address & ~0x0f') into PLACED_ADDRESS.
595 In such case there is no other place where to store
596 SLOTNUM (`adress & 0x0f', value in the range <0..2>). We need to know
597 SLOTNUM in ia64_memory_remove_breakpoint.
598
599 There is one special case where we need to be extra careful:
600 L-X instructions, which are instructions that occupy 2 slots
601 (The L part is always in slot 1, and the X part is always in
602 slot 2). We must refuse to insert breakpoints for an address
603 that points at slot 2 of a bundle where an L-X instruction is
604 present, since there is logically no instruction at that address.
605 However, to make things more interesting, the opcode of L-X
606 instructions is located in slot 2. This means that, to insert
607 a breakpoint at an address that points to slot 1, we actually
608 need to write the breakpoint in slot 2! Slot 1 is actually
609 the extended operand, so writing the breakpoint there would not
610 have the desired effect. Another side-effect of this issue
611 is that we need to make sure that the shadow contents buffer
612 does save byte 15 of our instruction bundle (this is the tail
613 end of slot 2, which wouldn't be saved if we were to insert
614 the breakpoint in slot 1).
615
616 ia64 16-byte bundle layout:
617 | 5 bits | slot 0 with 41 bits | slot 1 with 41 bits | slot 2 with 41 bits |
618
619 The current addressing used by the code below:
620 original PC placed_address placed_size required covered
621 == bp_tgt->shadow_len reqd \subset covered
622 0xABCDE0 0xABCDE0 0x10 <0x0...0x5> <0x0..0xF>
623 0xABCDE1 0xABCDE1 0xF <0x5...0xA> <0x1..0xF>
624 0xABCDE2 0xABCDE2 0xE <0xA...0xF> <0x2..0xF>
625
626 L-X instructions are treated a little specially, as explained above:
627 0xABCDE1 0xABCDE1 0xF <0xA...0xF> <0x1..0xF>
628
629 `objdump -d' and some other tools show a bit unjustified offsets:
630 original PC byte where starts the instruction objdump offset
631 0xABCDE0 0xABCDE0 0xABCDE0
632 0xABCDE1 0xABCDE5 0xABCDE6
633 0xABCDE2 0xABCDEA 0xABCDEC
634 */
635
636 #define IA64_BREAKPOINT 0x00003333300LL
637
638 static int
639 ia64_memory_insert_breakpoint (struct gdbarch *gdbarch,
640 struct bp_target_info *bp_tgt)
641 {
642 CORE_ADDR addr = bp_tgt->placed_address;
643 gdb_byte bundle[BUNDLE_LEN];
644 int slotnum = (int) (addr & 0x0f) / SLOT_MULTIPLIER, shadow_slotnum;
645 long long instr_breakpoint;
646 int val;
647 int template;
648 struct cleanup *cleanup;
649
650 if (slotnum > 2)
651 error (_("Can't insert breakpoint for slot numbers greater than 2."));
652
653 addr &= ~0x0f;
654
655 /* Enable the automatic memory restoration from breakpoints while
656 we read our instruction bundle for the purpose of SHADOW_CONTENTS.
657 Otherwise, we could possibly store into the shadow parts of the adjacent
658 placed breakpoints. It is due to our SHADOW_CONTENTS overlapping the real
659 breakpoint instruction bits region. */
660 cleanup = make_show_memory_breakpoints_cleanup (0);
661 val = target_read_memory (addr, bundle, BUNDLE_LEN);
662 if (val != 0)
663 {
664 do_cleanups (cleanup);
665 return val;
666 }
667
668 /* SHADOW_SLOTNUM saves the original slot number as expected by the caller
669 for addressing the SHADOW_CONTENTS placement. */
670 shadow_slotnum = slotnum;
671
672 /* Always cover the last byte of the bundle in case we are inserting
673 a breakpoint on an L-X instruction. */
674 bp_tgt->shadow_len = BUNDLE_LEN - shadow_slotnum;
675
676 template = extract_bit_field (bundle, 0, 5);
677 if (template_encoding_table[template][slotnum] == X)
678 {
679 /* X unit types can only be used in slot 2, and are actually
680 part of a 2-slot L-X instruction. We cannot break at this
681 address, as this is the second half of an instruction that
682 lives in slot 1 of that bundle. */
683 gdb_assert (slotnum == 2);
684 error (_("Can't insert breakpoint for non-existing slot X"));
685 }
686 if (template_encoding_table[template][slotnum] == L)
687 {
688 /* L unit types can only be used in slot 1. But the associated
689 opcode for that instruction is in slot 2, so bump the slot number
690 accordingly. */
691 gdb_assert (slotnum == 1);
692 slotnum = 2;
693 }
694
695 /* Store the whole bundle, except for the initial skipped bytes by the slot
696 number interpreted as bytes offset in PLACED_ADDRESS. */
697 memcpy (bp_tgt->shadow_contents, bundle + shadow_slotnum,
698 bp_tgt->shadow_len);
699
700 /* Re-read the same bundle as above except that, this time, read it in order
701 to compute the new bundle inside which we will be inserting the
702 breakpoint. Therefore, disable the automatic memory restoration from
703 breakpoints while we read our instruction bundle. Otherwise, the general
704 restoration mechanism kicks in and we would possibly remove parts of the
705 adjacent placed breakpoints. It is due to our SHADOW_CONTENTS overlapping
706 the real breakpoint instruction bits region. */
707 make_show_memory_breakpoints_cleanup (1);
708 val = target_read_memory (addr, bundle, BUNDLE_LEN);
709 if (val != 0)
710 {
711 do_cleanups (cleanup);
712 return val;
713 }
714
715 /* Breakpoints already present in the code will get deteacted and not get
716 reinserted by bp_loc_is_permanent. Multiple breakpoints at the same
717 location cannot induce the internal error as they are optimized into
718 a single instance by update_global_location_list. */
719 instr_breakpoint = slotN_contents (bundle, slotnum);
720 if (instr_breakpoint == IA64_BREAKPOINT)
721 internal_error (__FILE__, __LINE__,
722 _("Address %s already contains a breakpoint."),
723 paddress (gdbarch, bp_tgt->placed_address));
724 replace_slotN_contents (bundle, IA64_BREAKPOINT, slotnum);
725
726 bp_tgt->placed_size = bp_tgt->shadow_len;
727
728 val = target_write_memory (addr + shadow_slotnum, bundle + shadow_slotnum,
729 bp_tgt->shadow_len);
730
731 do_cleanups (cleanup);
732 return val;
733 }
734
735 static int
736 ia64_memory_remove_breakpoint (struct gdbarch *gdbarch,
737 struct bp_target_info *bp_tgt)
738 {
739 CORE_ADDR addr = bp_tgt->placed_address;
740 gdb_byte bundle_mem[BUNDLE_LEN], bundle_saved[BUNDLE_LEN];
741 int slotnum = (addr & 0x0f) / SLOT_MULTIPLIER, shadow_slotnum;
742 long long instr_breakpoint, instr_saved;
743 int val;
744 int template;
745 struct cleanup *cleanup;
746
747 addr &= ~0x0f;
748
749 /* Disable the automatic memory restoration from breakpoints while
750 we read our instruction bundle. Otherwise, the general restoration
751 mechanism kicks in and we would possibly remove parts of the adjacent
752 placed breakpoints. It is due to our SHADOW_CONTENTS overlapping the real
753 breakpoint instruction bits region. */
754 cleanup = make_show_memory_breakpoints_cleanup (1);
755 val = target_read_memory (addr, bundle_mem, BUNDLE_LEN);
756 if (val != 0)
757 {
758 do_cleanups (cleanup);
759 return val;
760 }
761
762 /* SHADOW_SLOTNUM saves the original slot number as expected by the caller
763 for addressing the SHADOW_CONTENTS placement. */
764 shadow_slotnum = slotnum;
765
766 template = extract_bit_field (bundle_mem, 0, 5);
767 if (template_encoding_table[template][slotnum] == X)
768 {
769 /* X unit types can only be used in slot 2, and are actually
770 part of a 2-slot L-X instruction. We refuse to insert
771 breakpoints at this address, so there should be no reason
772 for us attempting to remove one there, except if the program's
773 code somehow got modified in memory. */
774 gdb_assert (slotnum == 2);
775 warning (_("Cannot remove breakpoint at address %s from non-existing "
776 "X-type slot, memory has changed underneath"),
777 paddress (gdbarch, bp_tgt->placed_address));
778 do_cleanups (cleanup);
779 return -1;
780 }
781 if (template_encoding_table[template][slotnum] == L)
782 {
783 /* L unit types can only be used in slot 1. But the breakpoint
784 was actually saved using slot 2, so update the slot number
785 accordingly. */
786 gdb_assert (slotnum == 1);
787 slotnum = 2;
788 }
789
790 gdb_assert (bp_tgt->placed_size == BUNDLE_LEN - shadow_slotnum);
791 gdb_assert (bp_tgt->placed_size == bp_tgt->shadow_len);
792
793 instr_breakpoint = slotN_contents (bundle_mem, slotnum);
794 if (instr_breakpoint != IA64_BREAKPOINT)
795 {
796 warning (_("Cannot remove breakpoint at address %s, "
797 "no break instruction at such address."),
798 paddress (gdbarch, bp_tgt->placed_address));
799 do_cleanups (cleanup);
800 return -1;
801 }
802
803 /* Extract the original saved instruction from SLOTNUM normalizing its
804 bit-shift for INSTR_SAVED. */
805 memcpy (bundle_saved, bundle_mem, BUNDLE_LEN);
806 memcpy (bundle_saved + shadow_slotnum, bp_tgt->shadow_contents,
807 bp_tgt->shadow_len);
808 instr_saved = slotN_contents (bundle_saved, slotnum);
809
810 /* In BUNDLE_MEM, be careful to modify only the bits belonging to SLOTNUM
811 and not any of the other ones that are stored in SHADOW_CONTENTS. */
812 replace_slotN_contents (bundle_mem, instr_saved, slotnum);
813 val = target_write_raw_memory (addr, bundle_mem, BUNDLE_LEN);
814
815 do_cleanups (cleanup);
816 return val;
817 }
818
819 /* As gdbarch_breakpoint_from_pc ranges have byte granularity and ia64
820 instruction slots ranges are bit-granular (41 bits) we have to provide an
821 extended range as described for ia64_memory_insert_breakpoint. We also take
822 care of preserving the `break' instruction 21-bit (or 62-bit) parameter to
823 make a match for permanent breakpoints. */
824
825 static const gdb_byte *
826 ia64_breakpoint_from_pc (struct gdbarch *gdbarch,
827 CORE_ADDR *pcptr, int *lenptr)
828 {
829 CORE_ADDR addr = *pcptr;
830 static gdb_byte bundle[BUNDLE_LEN];
831 int slotnum = (int) (*pcptr & 0x0f) / SLOT_MULTIPLIER, shadow_slotnum;
832 long long instr_fetched;
833 int val;
834 int template;
835 struct cleanup *cleanup;
836
837 if (slotnum > 2)
838 error (_("Can't insert breakpoint for slot numbers greater than 2."));
839
840 addr &= ~0x0f;
841
842 /* Enable the automatic memory restoration from breakpoints while
843 we read our instruction bundle to match bp_loc_is_permanent. */
844 cleanup = make_show_memory_breakpoints_cleanup (0);
845 val = target_read_memory (addr, bundle, BUNDLE_LEN);
846 do_cleanups (cleanup);
847
848 /* The memory might be unreachable. This can happen, for instance,
849 when the user inserts a breakpoint at an invalid address. */
850 if (val != 0)
851 return NULL;
852
853 /* SHADOW_SLOTNUM saves the original slot number as expected by the caller
854 for addressing the SHADOW_CONTENTS placement. */
855 shadow_slotnum = slotnum;
856
857 /* Cover always the last byte of the bundle for the L-X slot case. */
858 *lenptr = BUNDLE_LEN - shadow_slotnum;
859
860 /* Check for L type instruction in slot 1, if present then bump up the slot
861 number to the slot 2. */
862 template = extract_bit_field (bundle, 0, 5);
863 if (template_encoding_table[template][slotnum] == X)
864 {
865 gdb_assert (slotnum == 2);
866 error (_("Can't insert breakpoint for non-existing slot X"));
867 }
868 if (template_encoding_table[template][slotnum] == L)
869 {
870 gdb_assert (slotnum == 1);
871 slotnum = 2;
872 }
873
874 /* A break instruction has its all its opcode bits cleared except for
875 the parameter value. For L+X slot pair we are at the X slot (slot 2) so
876 we should not touch the L slot - the upper 41 bits of the parameter. */
877 instr_fetched = slotN_contents (bundle, slotnum);
878 instr_fetched &= 0x1003ffffc0LL;
879 replace_slotN_contents (bundle, instr_fetched, slotnum);
880
881 return bundle + shadow_slotnum;
882 }
883
884 static CORE_ADDR
885 ia64_read_pc (struct regcache *regcache)
886 {
887 ULONGEST psr_value, pc_value;
888 int slot_num;
889
890 regcache_cooked_read_unsigned (regcache, IA64_PSR_REGNUM, &psr_value);
891 regcache_cooked_read_unsigned (regcache, IA64_IP_REGNUM, &pc_value);
892 slot_num = (psr_value >> 41) & 3;
893
894 return pc_value | (slot_num * SLOT_MULTIPLIER);
895 }
896
897 void
898 ia64_write_pc (struct regcache *regcache, CORE_ADDR new_pc)
899 {
900 int slot_num = (int) (new_pc & 0xf) / SLOT_MULTIPLIER;
901 ULONGEST psr_value;
902
903 regcache_cooked_read_unsigned (regcache, IA64_PSR_REGNUM, &psr_value);
904 psr_value &= ~(3LL << 41);
905 psr_value |= (ULONGEST)(slot_num & 0x3) << 41;
906
907 new_pc &= ~0xfLL;
908
909 regcache_cooked_write_unsigned (regcache, IA64_PSR_REGNUM, psr_value);
910 regcache_cooked_write_unsigned (regcache, IA64_IP_REGNUM, new_pc);
911 }
912
913 #define IS_NaT_COLLECTION_ADDR(addr) ((((addr) >> 3) & 0x3f) == 0x3f)
914
915 /* Returns the address of the slot that's NSLOTS slots away from
916 the address ADDR. NSLOTS may be positive or negative. */
917 static CORE_ADDR
918 rse_address_add(CORE_ADDR addr, int nslots)
919 {
920 CORE_ADDR new_addr;
921 int mandatory_nat_slots = nslots / 63;
922 int direction = nslots < 0 ? -1 : 1;
923
924 new_addr = addr + 8 * (nslots + mandatory_nat_slots);
925
926 if ((new_addr >> 9) != ((addr + 8 * 64 * mandatory_nat_slots) >> 9))
927 new_addr += 8 * direction;
928
929 if (IS_NaT_COLLECTION_ADDR(new_addr))
930 new_addr += 8 * direction;
931
932 return new_addr;
933 }
934
935 static enum register_status
936 ia64_pseudo_register_read (struct gdbarch *gdbarch, struct regcache *regcache,
937 int regnum, gdb_byte *buf)
938 {
939 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
940 enum register_status status;
941
942 if (regnum >= V32_REGNUM && regnum <= V127_REGNUM)
943 {
944 #ifdef HAVE_LIBUNWIND_IA64_H
945 /* First try and use the libunwind special reg accessor,
946 otherwise fallback to standard logic. */
947 if (!libunwind_is_initialized ()
948 || libunwind_get_reg_special (gdbarch, regcache, regnum, buf) != 0)
949 #endif
950 {
951 /* The fallback position is to assume that r32-r127 are
952 found sequentially in memory starting at $bof. This
953 isn't always true, but without libunwind, this is the
954 best we can do. */
955 enum register_status status;
956 ULONGEST cfm;
957 ULONGEST bsp;
958 CORE_ADDR reg;
959
960 status = regcache_cooked_read_unsigned (regcache,
961 IA64_BSP_REGNUM, &bsp);
962 if (status != REG_VALID)
963 return status;
964
965 status = regcache_cooked_read_unsigned (regcache,
966 IA64_CFM_REGNUM, &cfm);
967 if (status != REG_VALID)
968 return status;
969
970 /* The bsp points at the end of the register frame so we
971 subtract the size of frame from it to get start of
972 register frame. */
973 bsp = rse_address_add (bsp, -(cfm & 0x7f));
974
975 if ((cfm & 0x7f) > regnum - V32_REGNUM)
976 {
977 ULONGEST reg_addr = rse_address_add (bsp, (regnum - V32_REGNUM));
978 reg = read_memory_integer ((CORE_ADDR)reg_addr, 8, byte_order);
979 store_unsigned_integer (buf, register_size (gdbarch, regnum),
980 byte_order, reg);
981 }
982 else
983 store_unsigned_integer (buf, register_size (gdbarch, regnum),
984 byte_order, 0);
985 }
986 }
987 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT31_REGNUM)
988 {
989 ULONGEST unatN_val;
990 ULONGEST unat;
991 status = regcache_cooked_read_unsigned (regcache, IA64_UNAT_REGNUM, &unat);
992 if (status != REG_VALID)
993 return status;
994 unatN_val = (unat & (1LL << (regnum - IA64_NAT0_REGNUM))) != 0;
995 store_unsigned_integer (buf, register_size (gdbarch, regnum),
996 byte_order, unatN_val);
997 }
998 else if (IA64_NAT32_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
999 {
1000 ULONGEST natN_val = 0;
1001 ULONGEST bsp;
1002 ULONGEST cfm;
1003 CORE_ADDR gr_addr = 0;
1004 status = regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
1005 if (status != REG_VALID)
1006 return status;
1007 status = regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1008 if (status != REG_VALID)
1009 return status;
1010
1011 /* The bsp points at the end of the register frame so we
1012 subtract the size of frame from it to get start of register frame. */
1013 bsp = rse_address_add (bsp, -(cfm & 0x7f));
1014
1015 if ((cfm & 0x7f) > regnum - V32_REGNUM)
1016 gr_addr = rse_address_add (bsp, (regnum - V32_REGNUM));
1017
1018 if (gr_addr != 0)
1019 {
1020 /* Compute address of nat collection bits. */
1021 CORE_ADDR nat_addr = gr_addr | 0x1f8;
1022 CORE_ADDR nat_collection;
1023 int nat_bit;
1024 /* If our nat collection address is bigger than bsp, we have to get
1025 the nat collection from rnat. Otherwise, we fetch the nat
1026 collection from the computed address. */
1027 if (nat_addr >= bsp)
1028 regcache_cooked_read_unsigned (regcache, IA64_RNAT_REGNUM,
1029 &nat_collection);
1030 else
1031 nat_collection = read_memory_integer (nat_addr, 8, byte_order);
1032 nat_bit = (gr_addr >> 3) & 0x3f;
1033 natN_val = (nat_collection >> nat_bit) & 1;
1034 }
1035
1036 store_unsigned_integer (buf, register_size (gdbarch, regnum),
1037 byte_order, natN_val);
1038 }
1039 else if (regnum == VBOF_REGNUM)
1040 {
1041 /* A virtual register frame start is provided for user convenience.
1042 It can be calculated as the bsp - sof (sizeof frame). */
1043 ULONGEST bsp, vbsp;
1044 ULONGEST cfm;
1045 CORE_ADDR reg;
1046 status = regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
1047 if (status != REG_VALID)
1048 return status;
1049 status = regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1050 if (status != REG_VALID)
1051 return status;
1052
1053 /* The bsp points at the end of the register frame so we
1054 subtract the size of frame from it to get beginning of frame. */
1055 vbsp = rse_address_add (bsp, -(cfm & 0x7f));
1056 store_unsigned_integer (buf, register_size (gdbarch, regnum),
1057 byte_order, vbsp);
1058 }
1059 else if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
1060 {
1061 ULONGEST pr;
1062 ULONGEST cfm;
1063 ULONGEST prN_val;
1064 CORE_ADDR reg;
1065 status = regcache_cooked_read_unsigned (regcache, IA64_PR_REGNUM, &pr);
1066 if (status != REG_VALID)
1067 return status;
1068 status = regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1069 if (status != REG_VALID)
1070 return status;
1071
1072 if (VP16_REGNUM <= regnum && regnum <= VP63_REGNUM)
1073 {
1074 /* Fetch predicate register rename base from current frame
1075 marker for this frame. */
1076 int rrb_pr = (cfm >> 32) & 0x3f;
1077
1078 /* Adjust the register number to account for register rotation. */
1079 regnum = VP16_REGNUM
1080 + ((regnum - VP16_REGNUM) + rrb_pr) % 48;
1081 }
1082 prN_val = (pr & (1LL << (regnum - VP0_REGNUM))) != 0;
1083 store_unsigned_integer (buf, register_size (gdbarch, regnum),
1084 byte_order, prN_val);
1085 }
1086 else
1087 memset (buf, 0, register_size (gdbarch, regnum));
1088
1089 return REG_VALID;
1090 }
1091
1092 static void
1093 ia64_pseudo_register_write (struct gdbarch *gdbarch, struct regcache *regcache,
1094 int regnum, const gdb_byte *buf)
1095 {
1096 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1097
1098 if (regnum >= V32_REGNUM && regnum <= V127_REGNUM)
1099 {
1100 ULONGEST bsp;
1101 ULONGEST cfm;
1102 CORE_ADDR reg;
1103 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
1104 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1105
1106 bsp = rse_address_add (bsp, -(cfm & 0x7f));
1107
1108 if ((cfm & 0x7f) > regnum - V32_REGNUM)
1109 {
1110 ULONGEST reg_addr = rse_address_add (bsp, (regnum - V32_REGNUM));
1111 write_memory (reg_addr, (void *) buf, 8);
1112 }
1113 }
1114 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT31_REGNUM)
1115 {
1116 ULONGEST unatN_val, unat, unatN_mask;
1117 regcache_cooked_read_unsigned (regcache, IA64_UNAT_REGNUM, &unat);
1118 unatN_val = extract_unsigned_integer (buf, register_size (gdbarch,
1119 regnum),
1120 byte_order);
1121 unatN_mask = (1LL << (regnum - IA64_NAT0_REGNUM));
1122 if (unatN_val == 0)
1123 unat &= ~unatN_mask;
1124 else if (unatN_val == 1)
1125 unat |= unatN_mask;
1126 regcache_cooked_write_unsigned (regcache, IA64_UNAT_REGNUM, unat);
1127 }
1128 else if (IA64_NAT32_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
1129 {
1130 ULONGEST natN_val;
1131 ULONGEST bsp;
1132 ULONGEST cfm;
1133 CORE_ADDR gr_addr = 0;
1134 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
1135 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1136
1137 /* The bsp points at the end of the register frame so we
1138 subtract the size of frame from it to get start of register frame. */
1139 bsp = rse_address_add (bsp, -(cfm & 0x7f));
1140
1141 if ((cfm & 0x7f) > regnum - V32_REGNUM)
1142 gr_addr = rse_address_add (bsp, (regnum - V32_REGNUM));
1143
1144 natN_val = extract_unsigned_integer (buf, register_size (gdbarch,
1145 regnum),
1146 byte_order);
1147
1148 if (gr_addr != 0 && (natN_val == 0 || natN_val == 1))
1149 {
1150 /* Compute address of nat collection bits. */
1151 CORE_ADDR nat_addr = gr_addr | 0x1f8;
1152 CORE_ADDR nat_collection;
1153 int natN_bit = (gr_addr >> 3) & 0x3f;
1154 ULONGEST natN_mask = (1LL << natN_bit);
1155 /* If our nat collection address is bigger than bsp, we have to get
1156 the nat collection from rnat. Otherwise, we fetch the nat
1157 collection from the computed address. */
1158 if (nat_addr >= bsp)
1159 {
1160 regcache_cooked_read_unsigned (regcache,
1161 IA64_RNAT_REGNUM,
1162 &nat_collection);
1163 if (natN_val)
1164 nat_collection |= natN_mask;
1165 else
1166 nat_collection &= ~natN_mask;
1167 regcache_cooked_write_unsigned (regcache, IA64_RNAT_REGNUM,
1168 nat_collection);
1169 }
1170 else
1171 {
1172 char nat_buf[8];
1173 nat_collection = read_memory_integer (nat_addr, 8, byte_order);
1174 if (natN_val)
1175 nat_collection |= natN_mask;
1176 else
1177 nat_collection &= ~natN_mask;
1178 store_unsigned_integer (nat_buf, register_size (gdbarch, regnum),
1179 byte_order, nat_collection);
1180 write_memory (nat_addr, nat_buf, 8);
1181 }
1182 }
1183 }
1184 else if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
1185 {
1186 ULONGEST pr;
1187 ULONGEST cfm;
1188 ULONGEST prN_val;
1189 ULONGEST prN_mask;
1190
1191 regcache_cooked_read_unsigned (regcache, IA64_PR_REGNUM, &pr);
1192 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1193
1194 if (VP16_REGNUM <= regnum && regnum <= VP63_REGNUM)
1195 {
1196 /* Fetch predicate register rename base from current frame
1197 marker for this frame. */
1198 int rrb_pr = (cfm >> 32) & 0x3f;
1199
1200 /* Adjust the register number to account for register rotation. */
1201 regnum = VP16_REGNUM
1202 + ((regnum - VP16_REGNUM) + rrb_pr) % 48;
1203 }
1204 prN_val = extract_unsigned_integer (buf, register_size (gdbarch, regnum),
1205 byte_order);
1206 prN_mask = (1LL << (regnum - VP0_REGNUM));
1207 if (prN_val == 0)
1208 pr &= ~prN_mask;
1209 else if (prN_val == 1)
1210 pr |= prN_mask;
1211 regcache_cooked_write_unsigned (regcache, IA64_PR_REGNUM, pr);
1212 }
1213 }
1214
1215 /* The ia64 needs to convert between various ieee floating-point formats
1216 and the special ia64 floating point register format. */
1217
1218 static int
1219 ia64_convert_register_p (struct gdbarch *gdbarch, int regno, struct type *type)
1220 {
1221 return (regno >= IA64_FR0_REGNUM && regno <= IA64_FR127_REGNUM
1222 && type != ia64_ext_type (gdbarch));
1223 }
1224
1225 static int
1226 ia64_register_to_value (struct frame_info *frame, int regnum,
1227 struct type *valtype, gdb_byte *out,
1228 int *optimizedp, int *unavailablep)
1229 {
1230 struct gdbarch *gdbarch = get_frame_arch (frame);
1231 char in[MAX_REGISTER_SIZE];
1232
1233 /* Convert to TYPE. */
1234 if (!get_frame_register_bytes (frame, regnum, 0,
1235 register_size (gdbarch, regnum),
1236 in, optimizedp, unavailablep))
1237 return 0;
1238
1239 convert_typed_floating (in, ia64_ext_type (gdbarch), out, valtype);
1240 *optimizedp = *unavailablep = 0;
1241 return 1;
1242 }
1243
1244 static void
1245 ia64_value_to_register (struct frame_info *frame, int regnum,
1246 struct type *valtype, const gdb_byte *in)
1247 {
1248 struct gdbarch *gdbarch = get_frame_arch (frame);
1249 char out[MAX_REGISTER_SIZE];
1250 convert_typed_floating (in, valtype, out, ia64_ext_type (gdbarch));
1251 put_frame_register (frame, regnum, out);
1252 }
1253
1254
1255 /* Limit the number of skipped non-prologue instructions since examining
1256 of the prologue is expensive. */
1257 static int max_skip_non_prologue_insns = 40;
1258
1259 /* Given PC representing the starting address of a function, and
1260 LIM_PC which is the (sloppy) limit to which to scan when looking
1261 for a prologue, attempt to further refine this limit by using
1262 the line data in the symbol table. If successful, a better guess
1263 on where the prologue ends is returned, otherwise the previous
1264 value of lim_pc is returned. TRUST_LIMIT is a pointer to a flag
1265 which will be set to indicate whether the returned limit may be
1266 used with no further scanning in the event that the function is
1267 frameless. */
1268
1269 /* FIXME: cagney/2004-02-14: This function and logic have largely been
1270 superseded by skip_prologue_using_sal. */
1271
1272 static CORE_ADDR
1273 refine_prologue_limit (CORE_ADDR pc, CORE_ADDR lim_pc, int *trust_limit)
1274 {
1275 struct symtab_and_line prologue_sal;
1276 CORE_ADDR start_pc = pc;
1277 CORE_ADDR end_pc;
1278
1279 /* The prologue can not possibly go past the function end itself,
1280 so we can already adjust LIM_PC accordingly. */
1281 if (find_pc_partial_function (pc, NULL, NULL, &end_pc) && end_pc < lim_pc)
1282 lim_pc = end_pc;
1283
1284 /* Start off not trusting the limit. */
1285 *trust_limit = 0;
1286
1287 prologue_sal = find_pc_line (pc, 0);
1288 if (prologue_sal.line != 0)
1289 {
1290 int i;
1291 CORE_ADDR addr = prologue_sal.end;
1292
1293 /* Handle the case in which compiler's optimizer/scheduler
1294 has moved instructions into the prologue. We scan ahead
1295 in the function looking for address ranges whose corresponding
1296 line number is less than or equal to the first one that we
1297 found for the function. (It can be less than when the
1298 scheduler puts a body instruction before the first prologue
1299 instruction.) */
1300 for (i = 2 * max_skip_non_prologue_insns;
1301 i > 0 && (lim_pc == 0 || addr < lim_pc);
1302 i--)
1303 {
1304 struct symtab_and_line sal;
1305
1306 sal = find_pc_line (addr, 0);
1307 if (sal.line == 0)
1308 break;
1309 if (sal.line <= prologue_sal.line
1310 && sal.symtab == prologue_sal.symtab)
1311 {
1312 prologue_sal = sal;
1313 }
1314 addr = sal.end;
1315 }
1316
1317 if (lim_pc == 0 || prologue_sal.end < lim_pc)
1318 {
1319 lim_pc = prologue_sal.end;
1320 if (start_pc == get_pc_function_start (lim_pc))
1321 *trust_limit = 1;
1322 }
1323 }
1324 return lim_pc;
1325 }
1326
1327 #define isScratch(_regnum_) ((_regnum_) == 2 || (_regnum_) == 3 \
1328 || (8 <= (_regnum_) && (_regnum_) <= 11) \
1329 || (14 <= (_regnum_) && (_regnum_) <= 31))
1330 #define imm9(_instr_) \
1331 ( ((((_instr_) & 0x01000000000LL) ? -1 : 0) << 8) \
1332 | (((_instr_) & 0x00008000000LL) >> 20) \
1333 | (((_instr_) & 0x00000001fc0LL) >> 6))
1334
1335 /* Allocate and initialize a frame cache. */
1336
1337 static struct ia64_frame_cache *
1338 ia64_alloc_frame_cache (void)
1339 {
1340 struct ia64_frame_cache *cache;
1341 int i;
1342
1343 cache = FRAME_OBSTACK_ZALLOC (struct ia64_frame_cache);
1344
1345 /* Base address. */
1346 cache->base = 0;
1347 cache->pc = 0;
1348 cache->cfm = 0;
1349 cache->prev_cfm = 0;
1350 cache->sof = 0;
1351 cache->sol = 0;
1352 cache->sor = 0;
1353 cache->bsp = 0;
1354 cache->fp_reg = 0;
1355 cache->frameless = 1;
1356
1357 for (i = 0; i < NUM_IA64_RAW_REGS; i++)
1358 cache->saved_regs[i] = 0;
1359
1360 return cache;
1361 }
1362
1363 static CORE_ADDR
1364 examine_prologue (CORE_ADDR pc, CORE_ADDR lim_pc,
1365 struct frame_info *this_frame,
1366 struct ia64_frame_cache *cache)
1367 {
1368 CORE_ADDR next_pc;
1369 CORE_ADDR last_prologue_pc = pc;
1370 instruction_type it;
1371 long long instr;
1372 int cfm_reg = 0;
1373 int ret_reg = 0;
1374 int fp_reg = 0;
1375 int unat_save_reg = 0;
1376 int pr_save_reg = 0;
1377 int mem_stack_frame_size = 0;
1378 int spill_reg = 0;
1379 CORE_ADDR spill_addr = 0;
1380 char instores[8];
1381 char infpstores[8];
1382 char reg_contents[256];
1383 int trust_limit;
1384 int frameless = 1;
1385 int i;
1386 CORE_ADDR addr;
1387 char buf[8];
1388 CORE_ADDR bof, sor, sol, sof, cfm, rrb_gr;
1389
1390 memset (instores, 0, sizeof instores);
1391 memset (infpstores, 0, sizeof infpstores);
1392 memset (reg_contents, 0, sizeof reg_contents);
1393
1394 if (cache->after_prologue != 0
1395 && cache->after_prologue <= lim_pc)
1396 return cache->after_prologue;
1397
1398 lim_pc = refine_prologue_limit (pc, lim_pc, &trust_limit);
1399 next_pc = fetch_instruction (pc, &it, &instr);
1400
1401 /* We want to check if we have a recognizable function start before we
1402 look ahead for a prologue. */
1403 if (pc < lim_pc && next_pc
1404 && it == M && ((instr & 0x1ee0000003fLL) == 0x02c00000000LL))
1405 {
1406 /* alloc - start of a regular function. */
1407 int sor = (int) ((instr & 0x00078000000LL) >> 27);
1408 int sol = (int) ((instr & 0x00007f00000LL) >> 20);
1409 int sof = (int) ((instr & 0x000000fe000LL) >> 13);
1410 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1411
1412 /* Verify that the current cfm matches what we think is the
1413 function start. If we have somehow jumped within a function,
1414 we do not want to interpret the prologue and calculate the
1415 addresses of various registers such as the return address.
1416 We will instead treat the frame as frameless. */
1417 if (!this_frame ||
1418 (sof == (cache->cfm & 0x7f) &&
1419 sol == ((cache->cfm >> 7) & 0x7f)))
1420 frameless = 0;
1421
1422 cfm_reg = rN;
1423 last_prologue_pc = next_pc;
1424 pc = next_pc;
1425 }
1426 else
1427 {
1428 /* Look for a leaf routine. */
1429 if (pc < lim_pc && next_pc
1430 && (it == I || it == M)
1431 && ((instr & 0x1ee00000000LL) == 0x10800000000LL))
1432 {
1433 /* adds rN = imm14, rM (or mov rN, rM when imm14 is 0) */
1434 int imm = (int) ((((instr & 0x01000000000LL) ? -1 : 0) << 13)
1435 | ((instr & 0x001f8000000LL) >> 20)
1436 | ((instr & 0x000000fe000LL) >> 13));
1437 int rM = (int) ((instr & 0x00007f00000LL) >> 20);
1438 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1439 int qp = (int) (instr & 0x0000000003fLL);
1440 if (qp == 0 && rN == 2 && imm == 0 && rM == 12 && fp_reg == 0)
1441 {
1442 /* mov r2, r12 - beginning of leaf routine. */
1443 fp_reg = rN;
1444 last_prologue_pc = next_pc;
1445 }
1446 }
1447
1448 /* If we don't recognize a regular function or leaf routine, we are
1449 done. */
1450 if (!fp_reg)
1451 {
1452 pc = lim_pc;
1453 if (trust_limit)
1454 last_prologue_pc = lim_pc;
1455 }
1456 }
1457
1458 /* Loop, looking for prologue instructions, keeping track of
1459 where preserved registers were spilled. */
1460 while (pc < lim_pc)
1461 {
1462 next_pc = fetch_instruction (pc, &it, &instr);
1463 if (next_pc == 0)
1464 break;
1465
1466 if (it == B && ((instr & 0x1e1f800003fLL) != 0x04000000000LL))
1467 {
1468 /* Exit loop upon hitting a non-nop branch instruction. */
1469 if (trust_limit)
1470 lim_pc = pc;
1471 break;
1472 }
1473 else if (((instr & 0x3fLL) != 0LL) &&
1474 (frameless || ret_reg != 0))
1475 {
1476 /* Exit loop upon hitting a predicated instruction if
1477 we already have the return register or if we are frameless. */
1478 if (trust_limit)
1479 lim_pc = pc;
1480 break;
1481 }
1482 else if (it == I && ((instr & 0x1eff8000000LL) == 0x00188000000LL))
1483 {
1484 /* Move from BR */
1485 int b2 = (int) ((instr & 0x0000000e000LL) >> 13);
1486 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1487 int qp = (int) (instr & 0x0000000003f);
1488
1489 if (qp == 0 && b2 == 0 && rN >= 32 && ret_reg == 0)
1490 {
1491 ret_reg = rN;
1492 last_prologue_pc = next_pc;
1493 }
1494 }
1495 else if ((it == I || it == M)
1496 && ((instr & 0x1ee00000000LL) == 0x10800000000LL))
1497 {
1498 /* adds rN = imm14, rM (or mov rN, rM when imm14 is 0) */
1499 int imm = (int) ((((instr & 0x01000000000LL) ? -1 : 0) << 13)
1500 | ((instr & 0x001f8000000LL) >> 20)
1501 | ((instr & 0x000000fe000LL) >> 13));
1502 int rM = (int) ((instr & 0x00007f00000LL) >> 20);
1503 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1504 int qp = (int) (instr & 0x0000000003fLL);
1505
1506 if (qp == 0 && rN >= 32 && imm == 0 && rM == 12 && fp_reg == 0)
1507 {
1508 /* mov rN, r12 */
1509 fp_reg = rN;
1510 last_prologue_pc = next_pc;
1511 }
1512 else if (qp == 0 && rN == 12 && rM == 12)
1513 {
1514 /* adds r12, -mem_stack_frame_size, r12 */
1515 mem_stack_frame_size -= imm;
1516 last_prologue_pc = next_pc;
1517 }
1518 else if (qp == 0 && rN == 2
1519 && ((rM == fp_reg && fp_reg != 0) || rM == 12))
1520 {
1521 char buf[MAX_REGISTER_SIZE];
1522 CORE_ADDR saved_sp = 0;
1523 /* adds r2, spilloffset, rFramePointer
1524 or
1525 adds r2, spilloffset, r12
1526
1527 Get ready for stf.spill or st8.spill instructions.
1528 The address to start spilling at is loaded into r2.
1529 FIXME: Why r2? That's what gcc currently uses; it
1530 could well be different for other compilers. */
1531
1532 /* Hmm... whether or not this will work will depend on
1533 where the pc is. If it's still early in the prologue
1534 this'll be wrong. FIXME */
1535 if (this_frame)
1536 {
1537 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1538 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1539 get_frame_register (this_frame, sp_regnum, buf);
1540 saved_sp = extract_unsigned_integer (buf, 8, byte_order);
1541 }
1542 spill_addr = saved_sp
1543 + (rM == 12 ? 0 : mem_stack_frame_size)
1544 + imm;
1545 spill_reg = rN;
1546 last_prologue_pc = next_pc;
1547 }
1548 else if (qp == 0 && rM >= 32 && rM < 40 && !instores[rM-32] &&
1549 rN < 256 && imm == 0)
1550 {
1551 /* mov rN, rM where rM is an input register. */
1552 reg_contents[rN] = rM;
1553 last_prologue_pc = next_pc;
1554 }
1555 else if (frameless && qp == 0 && rN == fp_reg && imm == 0 &&
1556 rM == 2)
1557 {
1558 /* mov r12, r2 */
1559 last_prologue_pc = next_pc;
1560 break;
1561 }
1562 }
1563 else if (it == M
1564 && ( ((instr & 0x1efc0000000LL) == 0x0eec0000000LL)
1565 || ((instr & 0x1ffc8000000LL) == 0x0cec0000000LL) ))
1566 {
1567 /* stf.spill [rN] = fM, imm9
1568 or
1569 stf.spill [rN] = fM */
1570
1571 int imm = imm9(instr);
1572 int rN = (int) ((instr & 0x00007f00000LL) >> 20);
1573 int fM = (int) ((instr & 0x000000fe000LL) >> 13);
1574 int qp = (int) (instr & 0x0000000003fLL);
1575 if (qp == 0 && rN == spill_reg && spill_addr != 0
1576 && ((2 <= fM && fM <= 5) || (16 <= fM && fM <= 31)))
1577 {
1578 cache->saved_regs[IA64_FR0_REGNUM + fM] = spill_addr;
1579
1580 if ((instr & 0x1efc0000000LL) == 0x0eec0000000LL)
1581 spill_addr += imm;
1582 else
1583 spill_addr = 0; /* last one; must be done. */
1584 last_prologue_pc = next_pc;
1585 }
1586 }
1587 else if ((it == M && ((instr & 0x1eff8000000LL) == 0x02110000000LL))
1588 || (it == I && ((instr & 0x1eff8000000LL) == 0x00050000000LL)) )
1589 {
1590 /* mov.m rN = arM
1591 or
1592 mov.i rN = arM */
1593
1594 int arM = (int) ((instr & 0x00007f00000LL) >> 20);
1595 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1596 int qp = (int) (instr & 0x0000000003fLL);
1597 if (qp == 0 && isScratch (rN) && arM == 36 /* ar.unat */)
1598 {
1599 /* We have something like "mov.m r3 = ar.unat". Remember the
1600 r3 (or whatever) and watch for a store of this register... */
1601 unat_save_reg = rN;
1602 last_prologue_pc = next_pc;
1603 }
1604 }
1605 else if (it == I && ((instr & 0x1eff8000000LL) == 0x00198000000LL))
1606 {
1607 /* mov rN = pr */
1608 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1609 int qp = (int) (instr & 0x0000000003fLL);
1610 if (qp == 0 && isScratch (rN))
1611 {
1612 pr_save_reg = rN;
1613 last_prologue_pc = next_pc;
1614 }
1615 }
1616 else if (it == M
1617 && ( ((instr & 0x1ffc8000000LL) == 0x08cc0000000LL)
1618 || ((instr & 0x1efc0000000LL) == 0x0acc0000000LL)))
1619 {
1620 /* st8 [rN] = rM
1621 or
1622 st8 [rN] = rM, imm9 */
1623 int rN = (int) ((instr & 0x00007f00000LL) >> 20);
1624 int rM = (int) ((instr & 0x000000fe000LL) >> 13);
1625 int qp = (int) (instr & 0x0000000003fLL);
1626 int indirect = rM < 256 ? reg_contents[rM] : 0;
1627 if (qp == 0 && rN == spill_reg && spill_addr != 0
1628 && (rM == unat_save_reg || rM == pr_save_reg))
1629 {
1630 /* We've found a spill of either the UNAT register or the PR
1631 register. (Well, not exactly; what we've actually found is
1632 a spill of the register that UNAT or PR was moved to).
1633 Record that fact and move on... */
1634 if (rM == unat_save_reg)
1635 {
1636 /* Track UNAT register. */
1637 cache->saved_regs[IA64_UNAT_REGNUM] = spill_addr;
1638 unat_save_reg = 0;
1639 }
1640 else
1641 {
1642 /* Track PR register. */
1643 cache->saved_regs[IA64_PR_REGNUM] = spill_addr;
1644 pr_save_reg = 0;
1645 }
1646 if ((instr & 0x1efc0000000LL) == 0x0acc0000000LL)
1647 /* st8 [rN] = rM, imm9 */
1648 spill_addr += imm9(instr);
1649 else
1650 spill_addr = 0; /* Must be done spilling. */
1651 last_prologue_pc = next_pc;
1652 }
1653 else if (qp == 0 && 32 <= rM && rM < 40 && !instores[rM-32])
1654 {
1655 /* Allow up to one store of each input register. */
1656 instores[rM-32] = 1;
1657 last_prologue_pc = next_pc;
1658 }
1659 else if (qp == 0 && 32 <= indirect && indirect < 40 &&
1660 !instores[indirect-32])
1661 {
1662 /* Allow an indirect store of an input register. */
1663 instores[indirect-32] = 1;
1664 last_prologue_pc = next_pc;
1665 }
1666 }
1667 else if (it == M && ((instr & 0x1ff08000000LL) == 0x08c00000000LL))
1668 {
1669 /* One of
1670 st1 [rN] = rM
1671 st2 [rN] = rM
1672 st4 [rN] = rM
1673 st8 [rN] = rM
1674 Note that the st8 case is handled in the clause above.
1675
1676 Advance over stores of input registers. One store per input
1677 register is permitted. */
1678 int rM = (int) ((instr & 0x000000fe000LL) >> 13);
1679 int qp = (int) (instr & 0x0000000003fLL);
1680 int indirect = rM < 256 ? reg_contents[rM] : 0;
1681 if (qp == 0 && 32 <= rM && rM < 40 && !instores[rM-32])
1682 {
1683 instores[rM-32] = 1;
1684 last_prologue_pc = next_pc;
1685 }
1686 else if (qp == 0 && 32 <= indirect && indirect < 40 &&
1687 !instores[indirect-32])
1688 {
1689 /* Allow an indirect store of an input register. */
1690 instores[indirect-32] = 1;
1691 last_prologue_pc = next_pc;
1692 }
1693 }
1694 else if (it == M && ((instr & 0x1ff88000000LL) == 0x0cc80000000LL))
1695 {
1696 /* Either
1697 stfs [rN] = fM
1698 or
1699 stfd [rN] = fM
1700
1701 Advance over stores of floating point input registers. Again
1702 one store per register is permitted. */
1703 int fM = (int) ((instr & 0x000000fe000LL) >> 13);
1704 int qp = (int) (instr & 0x0000000003fLL);
1705 if (qp == 0 && 8 <= fM && fM < 16 && !infpstores[fM - 8])
1706 {
1707 infpstores[fM-8] = 1;
1708 last_prologue_pc = next_pc;
1709 }
1710 }
1711 else if (it == M
1712 && ( ((instr & 0x1ffc8000000LL) == 0x08ec0000000LL)
1713 || ((instr & 0x1efc0000000LL) == 0x0aec0000000LL)))
1714 {
1715 /* st8.spill [rN] = rM
1716 or
1717 st8.spill [rN] = rM, imm9 */
1718 int rN = (int) ((instr & 0x00007f00000LL) >> 20);
1719 int rM = (int) ((instr & 0x000000fe000LL) >> 13);
1720 int qp = (int) (instr & 0x0000000003fLL);
1721 if (qp == 0 && rN == spill_reg && 4 <= rM && rM <= 7)
1722 {
1723 /* We've found a spill of one of the preserved general purpose
1724 regs. Record the spill address and advance the spill
1725 register if appropriate. */
1726 cache->saved_regs[IA64_GR0_REGNUM + rM] = spill_addr;
1727 if ((instr & 0x1efc0000000LL) == 0x0aec0000000LL)
1728 /* st8.spill [rN] = rM, imm9 */
1729 spill_addr += imm9(instr);
1730 else
1731 spill_addr = 0; /* Done spilling. */
1732 last_prologue_pc = next_pc;
1733 }
1734 }
1735
1736 pc = next_pc;
1737 }
1738
1739 /* If not frameless and we aren't called by skip_prologue, then we need
1740 to calculate registers for the previous frame which will be needed
1741 later. */
1742
1743 if (!frameless && this_frame)
1744 {
1745 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1746 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1747
1748 /* Extract the size of the rotating portion of the stack
1749 frame and the register rename base from the current
1750 frame marker. */
1751 cfm = cache->cfm;
1752 sor = cache->sor;
1753 sof = cache->sof;
1754 sol = cache->sol;
1755 rrb_gr = (cfm >> 18) & 0x7f;
1756
1757 /* Find the bof (beginning of frame). */
1758 bof = rse_address_add (cache->bsp, -sof);
1759
1760 for (i = 0, addr = bof;
1761 i < sof;
1762 i++, addr += 8)
1763 {
1764 if (IS_NaT_COLLECTION_ADDR (addr))
1765 {
1766 addr += 8;
1767 }
1768 if (i+32 == cfm_reg)
1769 cache->saved_regs[IA64_CFM_REGNUM] = addr;
1770 if (i+32 == ret_reg)
1771 cache->saved_regs[IA64_VRAP_REGNUM] = addr;
1772 if (i+32 == fp_reg)
1773 cache->saved_regs[IA64_VFP_REGNUM] = addr;
1774 }
1775
1776 /* For the previous argument registers we require the previous bof.
1777 If we can't find the previous cfm, then we can do nothing. */
1778 cfm = 0;
1779 if (cache->saved_regs[IA64_CFM_REGNUM] != 0)
1780 {
1781 cfm = read_memory_integer (cache->saved_regs[IA64_CFM_REGNUM],
1782 8, byte_order);
1783 }
1784 else if (cfm_reg != 0)
1785 {
1786 get_frame_register (this_frame, cfm_reg, buf);
1787 cfm = extract_unsigned_integer (buf, 8, byte_order);
1788 }
1789 cache->prev_cfm = cfm;
1790
1791 if (cfm != 0)
1792 {
1793 sor = ((cfm >> 14) & 0xf) * 8;
1794 sof = (cfm & 0x7f);
1795 sol = (cfm >> 7) & 0x7f;
1796 rrb_gr = (cfm >> 18) & 0x7f;
1797
1798 /* The previous bof only requires subtraction of the sol (size of
1799 locals) due to the overlap between output and input of
1800 subsequent frames. */
1801 bof = rse_address_add (bof, -sol);
1802
1803 for (i = 0, addr = bof;
1804 i < sof;
1805 i++, addr += 8)
1806 {
1807 if (IS_NaT_COLLECTION_ADDR (addr))
1808 {
1809 addr += 8;
1810 }
1811 if (i < sor)
1812 cache->saved_regs[IA64_GR32_REGNUM
1813 + ((i + (sor - rrb_gr)) % sor)]
1814 = addr;
1815 else
1816 cache->saved_regs[IA64_GR32_REGNUM + i] = addr;
1817 }
1818
1819 }
1820 }
1821
1822 /* Try and trust the lim_pc value whenever possible. */
1823 if (trust_limit && lim_pc >= last_prologue_pc)
1824 last_prologue_pc = lim_pc;
1825
1826 cache->frameless = frameless;
1827 cache->after_prologue = last_prologue_pc;
1828 cache->mem_stack_frame_size = mem_stack_frame_size;
1829 cache->fp_reg = fp_reg;
1830
1831 return last_prologue_pc;
1832 }
1833
1834 CORE_ADDR
1835 ia64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
1836 {
1837 struct ia64_frame_cache cache;
1838 cache.base = 0;
1839 cache.after_prologue = 0;
1840 cache.cfm = 0;
1841 cache.bsp = 0;
1842
1843 /* Call examine_prologue with - as third argument since we don't
1844 have a next frame pointer to send. */
1845 return examine_prologue (pc, pc+1024, 0, &cache);
1846 }
1847
1848
1849 /* Normal frames. */
1850
1851 static struct ia64_frame_cache *
1852 ia64_frame_cache (struct frame_info *this_frame, void **this_cache)
1853 {
1854 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1855 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1856 struct ia64_frame_cache *cache;
1857 char buf[8];
1858 CORE_ADDR cfm, sof, sol, bsp, psr;
1859 int i;
1860
1861 if (*this_cache)
1862 return *this_cache;
1863
1864 cache = ia64_alloc_frame_cache ();
1865 *this_cache = cache;
1866
1867 get_frame_register (this_frame, sp_regnum, buf);
1868 cache->saved_sp = extract_unsigned_integer (buf, 8, byte_order);
1869
1870 /* We always want the bsp to point to the end of frame.
1871 This way, we can always get the beginning of frame (bof)
1872 by subtracting frame size. */
1873 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
1874 cache->bsp = extract_unsigned_integer (buf, 8, byte_order);
1875
1876 get_frame_register (this_frame, IA64_PSR_REGNUM, buf);
1877 psr = extract_unsigned_integer (buf, 8, byte_order);
1878
1879 get_frame_register (this_frame, IA64_CFM_REGNUM, buf);
1880 cfm = extract_unsigned_integer (buf, 8, byte_order);
1881
1882 cache->sof = (cfm & 0x7f);
1883 cache->sol = (cfm >> 7) & 0x7f;
1884 cache->sor = ((cfm >> 14) & 0xf) * 8;
1885
1886 cache->cfm = cfm;
1887
1888 cache->pc = get_frame_func (this_frame);
1889
1890 if (cache->pc != 0)
1891 examine_prologue (cache->pc, get_frame_pc (this_frame), this_frame, cache);
1892
1893 cache->base = cache->saved_sp + cache->mem_stack_frame_size;
1894
1895 return cache;
1896 }
1897
1898 static void
1899 ia64_frame_this_id (struct frame_info *this_frame, void **this_cache,
1900 struct frame_id *this_id)
1901 {
1902 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1903 struct ia64_frame_cache *cache =
1904 ia64_frame_cache (this_frame, this_cache);
1905
1906 /* If outermost frame, mark with null frame id. */
1907 if (cache->base != 0)
1908 (*this_id) = frame_id_build_special (cache->base, cache->pc, cache->bsp);
1909 if (gdbarch_debug >= 1)
1910 fprintf_unfiltered (gdb_stdlog,
1911 "regular frame id: code %s, stack %s, "
1912 "special %s, this_frame %s\n",
1913 paddress (gdbarch, this_id->code_addr),
1914 paddress (gdbarch, this_id->stack_addr),
1915 paddress (gdbarch, cache->bsp),
1916 host_address_to_string (this_frame));
1917 }
1918
1919 static struct value *
1920 ia64_frame_prev_register (struct frame_info *this_frame, void **this_cache,
1921 int regnum)
1922 {
1923 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1924 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1925 struct ia64_frame_cache *cache = ia64_frame_cache (this_frame, this_cache);
1926 char buf[8];
1927
1928 gdb_assert (regnum >= 0);
1929
1930 if (!target_has_registers)
1931 error (_("No registers."));
1932
1933 if (regnum == gdbarch_sp_regnum (gdbarch))
1934 return frame_unwind_got_constant (this_frame, regnum, cache->base);
1935
1936 else if (regnum == IA64_BSP_REGNUM)
1937 {
1938 struct value *val;
1939 CORE_ADDR prev_cfm, bsp, prev_bsp;
1940
1941 /* We want to calculate the previous bsp as the end of the previous
1942 register stack frame. This corresponds to what the hardware bsp
1943 register will be if we pop the frame back which is why we might
1944 have been called. We know the beginning of the current frame is
1945 cache->bsp - cache->sof. This value in the previous frame points
1946 to the start of the output registers. We can calculate the end of
1947 that frame by adding the size of output:
1948 (sof (size of frame) - sol (size of locals)). */
1949 val = ia64_frame_prev_register (this_frame, this_cache, IA64_CFM_REGNUM);
1950 prev_cfm = extract_unsigned_integer (value_contents_all (val),
1951 8, byte_order);
1952 bsp = rse_address_add (cache->bsp, -(cache->sof));
1953 prev_bsp =
1954 rse_address_add (bsp, (prev_cfm & 0x7f) - ((prev_cfm >> 7) & 0x7f));
1955
1956 return frame_unwind_got_constant (this_frame, regnum, prev_bsp);
1957 }
1958
1959 else if (regnum == IA64_CFM_REGNUM)
1960 {
1961 CORE_ADDR addr = cache->saved_regs[IA64_CFM_REGNUM];
1962
1963 if (addr != 0)
1964 return frame_unwind_got_memory (this_frame, regnum, addr);
1965
1966 if (cache->prev_cfm)
1967 return frame_unwind_got_constant (this_frame, regnum, cache->prev_cfm);
1968
1969 if (cache->frameless)
1970 return frame_unwind_got_register (this_frame, IA64_PFS_REGNUM,
1971 IA64_PFS_REGNUM);
1972 return frame_unwind_got_register (this_frame, regnum, 0);
1973 }
1974
1975 else if (regnum == IA64_VFP_REGNUM)
1976 {
1977 /* If the function in question uses an automatic register (r32-r127)
1978 for the frame pointer, it'll be found by ia64_find_saved_register()
1979 above. If the function lacks one of these frame pointers, we can
1980 still provide a value since we know the size of the frame. */
1981 return frame_unwind_got_constant (this_frame, regnum, cache->base);
1982 }
1983
1984 else if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
1985 {
1986 struct value *pr_val;
1987 ULONGEST prN;
1988
1989 pr_val = ia64_frame_prev_register (this_frame, this_cache,
1990 IA64_PR_REGNUM);
1991 if (VP16_REGNUM <= regnum && regnum <= VP63_REGNUM)
1992 {
1993 /* Fetch predicate register rename base from current frame
1994 marker for this frame. */
1995 int rrb_pr = (cache->cfm >> 32) & 0x3f;
1996
1997 /* Adjust the register number to account for register rotation. */
1998 regnum = VP16_REGNUM + ((regnum - VP16_REGNUM) + rrb_pr) % 48;
1999 }
2000 prN = extract_bit_field (value_contents_all (pr_val),
2001 regnum - VP0_REGNUM, 1);
2002 return frame_unwind_got_constant (this_frame, regnum, prN);
2003 }
2004
2005 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT31_REGNUM)
2006 {
2007 struct value *unat_val;
2008 ULONGEST unatN;
2009 unat_val = ia64_frame_prev_register (this_frame, this_cache,
2010 IA64_UNAT_REGNUM);
2011 unatN = extract_bit_field (value_contents_all (unat_val),
2012 regnum - IA64_NAT0_REGNUM, 1);
2013 return frame_unwind_got_constant (this_frame, regnum, unatN);
2014 }
2015
2016 else if (IA64_NAT32_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
2017 {
2018 int natval = 0;
2019 /* Find address of general register corresponding to nat bit we're
2020 interested in. */
2021 CORE_ADDR gr_addr;
2022
2023 gr_addr = cache->saved_regs[regnum - IA64_NAT0_REGNUM + IA64_GR0_REGNUM];
2024
2025 if (gr_addr != 0)
2026 {
2027 /* Compute address of nat collection bits. */
2028 CORE_ADDR nat_addr = gr_addr | 0x1f8;
2029 CORE_ADDR bsp;
2030 CORE_ADDR nat_collection;
2031 int nat_bit;
2032
2033 /* If our nat collection address is bigger than bsp, we have to get
2034 the nat collection from rnat. Otherwise, we fetch the nat
2035 collection from the computed address. */
2036 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
2037 bsp = extract_unsigned_integer (buf, 8, byte_order);
2038 if (nat_addr >= bsp)
2039 {
2040 get_frame_register (this_frame, IA64_RNAT_REGNUM, buf);
2041 nat_collection = extract_unsigned_integer (buf, 8, byte_order);
2042 }
2043 else
2044 nat_collection = read_memory_integer (nat_addr, 8, byte_order);
2045 nat_bit = (gr_addr >> 3) & 0x3f;
2046 natval = (nat_collection >> nat_bit) & 1;
2047 }
2048
2049 return frame_unwind_got_constant (this_frame, regnum, natval);
2050 }
2051
2052 else if (regnum == IA64_IP_REGNUM)
2053 {
2054 CORE_ADDR pc = 0;
2055 CORE_ADDR addr = cache->saved_regs[IA64_VRAP_REGNUM];
2056
2057 if (addr != 0)
2058 {
2059 read_memory (addr, buf, register_size (gdbarch, IA64_IP_REGNUM));
2060 pc = extract_unsigned_integer (buf, 8, byte_order);
2061 }
2062 else if (cache->frameless)
2063 {
2064 get_frame_register (this_frame, IA64_BR0_REGNUM, buf);
2065 pc = extract_unsigned_integer (buf, 8, byte_order);
2066 }
2067 pc &= ~0xf;
2068 return frame_unwind_got_constant (this_frame, regnum, pc);
2069 }
2070
2071 else if (regnum == IA64_PSR_REGNUM)
2072 {
2073 /* We don't know how to get the complete previous PSR, but we need it
2074 for the slot information when we unwind the pc (pc is formed of IP
2075 register plus slot information from PSR). To get the previous
2076 slot information, we mask it off the return address. */
2077 ULONGEST slot_num = 0;
2078 CORE_ADDR pc = 0;
2079 CORE_ADDR psr = 0;
2080 CORE_ADDR addr = cache->saved_regs[IA64_VRAP_REGNUM];
2081
2082 get_frame_register (this_frame, IA64_PSR_REGNUM, buf);
2083 psr = extract_unsigned_integer (buf, 8, byte_order);
2084
2085 if (addr != 0)
2086 {
2087 read_memory (addr, buf, register_size (gdbarch, IA64_IP_REGNUM));
2088 pc = extract_unsigned_integer (buf, 8, byte_order);
2089 }
2090 else if (cache->frameless)
2091 {
2092 get_frame_register (this_frame, IA64_BR0_REGNUM, buf);
2093 pc = extract_unsigned_integer (buf, 8, byte_order);
2094 }
2095 psr &= ~(3LL << 41);
2096 slot_num = pc & 0x3LL;
2097 psr |= (CORE_ADDR)slot_num << 41;
2098 return frame_unwind_got_constant (this_frame, regnum, psr);
2099 }
2100
2101 else if (regnum == IA64_BR0_REGNUM)
2102 {
2103 CORE_ADDR addr = cache->saved_regs[IA64_BR0_REGNUM];
2104
2105 if (addr != 0)
2106 return frame_unwind_got_memory (this_frame, regnum, addr);
2107
2108 return frame_unwind_got_constant (this_frame, regnum, 0);
2109 }
2110
2111 else if ((regnum >= IA64_GR32_REGNUM && regnum <= IA64_GR127_REGNUM)
2112 || (regnum >= V32_REGNUM && regnum <= V127_REGNUM))
2113 {
2114 CORE_ADDR addr = 0;
2115
2116 if (regnum >= V32_REGNUM)
2117 regnum = IA64_GR32_REGNUM + (regnum - V32_REGNUM);
2118 addr = cache->saved_regs[regnum];
2119 if (addr != 0)
2120 return frame_unwind_got_memory (this_frame, regnum, addr);
2121
2122 if (cache->frameless)
2123 {
2124 struct value *reg_val;
2125 CORE_ADDR prev_cfm, prev_bsp, prev_bof;
2126
2127 /* FIXME: brobecker/2008-05-01: Doesn't this seem redundant
2128 with the same code above? */
2129 if (regnum >= V32_REGNUM)
2130 regnum = IA64_GR32_REGNUM + (regnum - V32_REGNUM);
2131 reg_val = ia64_frame_prev_register (this_frame, this_cache,
2132 IA64_CFM_REGNUM);
2133 prev_cfm = extract_unsigned_integer (value_contents_all (reg_val),
2134 8, byte_order);
2135 reg_val = ia64_frame_prev_register (this_frame, this_cache,
2136 IA64_BSP_REGNUM);
2137 prev_bsp = extract_unsigned_integer (value_contents_all (reg_val),
2138 8, byte_order);
2139 prev_bof = rse_address_add (prev_bsp, -(prev_cfm & 0x7f));
2140
2141 addr = rse_address_add (prev_bof, (regnum - IA64_GR32_REGNUM));
2142 return frame_unwind_got_memory (this_frame, regnum, addr);
2143 }
2144
2145 return frame_unwind_got_constant (this_frame, regnum, 0);
2146 }
2147
2148 else /* All other registers. */
2149 {
2150 CORE_ADDR addr = 0;
2151
2152 if (IA64_FR32_REGNUM <= regnum && regnum <= IA64_FR127_REGNUM)
2153 {
2154 /* Fetch floating point register rename base from current
2155 frame marker for this frame. */
2156 int rrb_fr = (cache->cfm >> 25) & 0x7f;
2157
2158 /* Adjust the floating point register number to account for
2159 register rotation. */
2160 regnum = IA64_FR32_REGNUM
2161 + ((regnum - IA64_FR32_REGNUM) + rrb_fr) % 96;
2162 }
2163
2164 /* If we have stored a memory address, access the register. */
2165 addr = cache->saved_regs[regnum];
2166 if (addr != 0)
2167 return frame_unwind_got_memory (this_frame, regnum, addr);
2168 /* Otherwise, punt and get the current value of the register. */
2169 else
2170 return frame_unwind_got_register (this_frame, regnum, regnum);
2171 }
2172 }
2173
2174 static const struct frame_unwind ia64_frame_unwind =
2175 {
2176 NORMAL_FRAME,
2177 default_frame_unwind_stop_reason,
2178 &ia64_frame_this_id,
2179 &ia64_frame_prev_register,
2180 NULL,
2181 default_frame_sniffer
2182 };
2183
2184 /* Signal trampolines. */
2185
2186 static void
2187 ia64_sigtramp_frame_init_saved_regs (struct frame_info *this_frame,
2188 struct ia64_frame_cache *cache)
2189 {
2190 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2191 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2192
2193 if (tdep->sigcontext_register_address)
2194 {
2195 int regno;
2196
2197 cache->saved_regs[IA64_VRAP_REGNUM]
2198 = tdep->sigcontext_register_address (gdbarch, cache->base,
2199 IA64_IP_REGNUM);
2200 cache->saved_regs[IA64_CFM_REGNUM]
2201 = tdep->sigcontext_register_address (gdbarch, cache->base,
2202 IA64_CFM_REGNUM);
2203 cache->saved_regs[IA64_PSR_REGNUM]
2204 = tdep->sigcontext_register_address (gdbarch, cache->base,
2205 IA64_PSR_REGNUM);
2206 cache->saved_regs[IA64_BSP_REGNUM]
2207 = tdep->sigcontext_register_address (gdbarch, cache->base,
2208 IA64_BSP_REGNUM);
2209 cache->saved_regs[IA64_RNAT_REGNUM]
2210 = tdep->sigcontext_register_address (gdbarch, cache->base,
2211 IA64_RNAT_REGNUM);
2212 cache->saved_regs[IA64_CCV_REGNUM]
2213 = tdep->sigcontext_register_address (gdbarch, cache->base,
2214 IA64_CCV_REGNUM);
2215 cache->saved_regs[IA64_UNAT_REGNUM]
2216 = tdep->sigcontext_register_address (gdbarch, cache->base,
2217 IA64_UNAT_REGNUM);
2218 cache->saved_regs[IA64_FPSR_REGNUM]
2219 = tdep->sigcontext_register_address (gdbarch, cache->base,
2220 IA64_FPSR_REGNUM);
2221 cache->saved_regs[IA64_PFS_REGNUM]
2222 = tdep->sigcontext_register_address (gdbarch, cache->base,
2223 IA64_PFS_REGNUM);
2224 cache->saved_regs[IA64_LC_REGNUM]
2225 = tdep->sigcontext_register_address (gdbarch, cache->base,
2226 IA64_LC_REGNUM);
2227
2228 for (regno = IA64_GR1_REGNUM; regno <= IA64_GR31_REGNUM; regno++)
2229 cache->saved_regs[regno] =
2230 tdep->sigcontext_register_address (gdbarch, cache->base, regno);
2231 for (regno = IA64_BR0_REGNUM; regno <= IA64_BR7_REGNUM; regno++)
2232 cache->saved_regs[regno] =
2233 tdep->sigcontext_register_address (gdbarch, cache->base, regno);
2234 for (regno = IA64_FR2_REGNUM; regno <= IA64_FR31_REGNUM; regno++)
2235 cache->saved_regs[regno] =
2236 tdep->sigcontext_register_address (gdbarch, cache->base, regno);
2237 }
2238 }
2239
2240 static struct ia64_frame_cache *
2241 ia64_sigtramp_frame_cache (struct frame_info *this_frame, void **this_cache)
2242 {
2243 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2244 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2245 struct ia64_frame_cache *cache;
2246 CORE_ADDR addr;
2247 char buf[8];
2248 int i;
2249
2250 if (*this_cache)
2251 return *this_cache;
2252
2253 cache = ia64_alloc_frame_cache ();
2254
2255 get_frame_register (this_frame, sp_regnum, buf);
2256 /* Note that frame size is hard-coded below. We cannot calculate it
2257 via prologue examination. */
2258 cache->base = extract_unsigned_integer (buf, 8, byte_order) + 16;
2259
2260 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
2261 cache->bsp = extract_unsigned_integer (buf, 8, byte_order);
2262
2263 get_frame_register (this_frame, IA64_CFM_REGNUM, buf);
2264 cache->cfm = extract_unsigned_integer (buf, 8, byte_order);
2265 cache->sof = cache->cfm & 0x7f;
2266
2267 ia64_sigtramp_frame_init_saved_regs (this_frame, cache);
2268
2269 *this_cache = cache;
2270 return cache;
2271 }
2272
2273 static void
2274 ia64_sigtramp_frame_this_id (struct frame_info *this_frame,
2275 void **this_cache, struct frame_id *this_id)
2276 {
2277 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2278 struct ia64_frame_cache *cache =
2279 ia64_sigtramp_frame_cache (this_frame, this_cache);
2280
2281 (*this_id) = frame_id_build_special (cache->base,
2282 get_frame_pc (this_frame),
2283 cache->bsp);
2284 if (gdbarch_debug >= 1)
2285 fprintf_unfiltered (gdb_stdlog,
2286 "sigtramp frame id: code %s, stack %s, "
2287 "special %s, this_frame %s\n",
2288 paddress (gdbarch, this_id->code_addr),
2289 paddress (gdbarch, this_id->stack_addr),
2290 paddress (gdbarch, cache->bsp),
2291 host_address_to_string (this_frame));
2292 }
2293
2294 static struct value *
2295 ia64_sigtramp_frame_prev_register (struct frame_info *this_frame,
2296 void **this_cache, int regnum)
2297 {
2298 char buf[MAX_REGISTER_SIZE];
2299
2300 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2301 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2302 struct ia64_frame_cache *cache =
2303 ia64_sigtramp_frame_cache (this_frame, this_cache);
2304
2305 gdb_assert (regnum >= 0);
2306
2307 if (!target_has_registers)
2308 error (_("No registers."));
2309
2310 if (regnum == IA64_IP_REGNUM)
2311 {
2312 CORE_ADDR pc = 0;
2313 CORE_ADDR addr = cache->saved_regs[IA64_VRAP_REGNUM];
2314
2315 if (addr != 0)
2316 {
2317 read_memory (addr, buf, register_size (gdbarch, IA64_IP_REGNUM));
2318 pc = extract_unsigned_integer (buf, 8, byte_order);
2319 }
2320 pc &= ~0xf;
2321 return frame_unwind_got_constant (this_frame, regnum, pc);
2322 }
2323
2324 else if ((regnum >= IA64_GR32_REGNUM && regnum <= IA64_GR127_REGNUM)
2325 || (regnum >= V32_REGNUM && regnum <= V127_REGNUM))
2326 {
2327 CORE_ADDR addr = 0;
2328
2329 if (regnum >= V32_REGNUM)
2330 regnum = IA64_GR32_REGNUM + (regnum - V32_REGNUM);
2331 addr = cache->saved_regs[regnum];
2332 if (addr != 0)
2333 return frame_unwind_got_memory (this_frame, regnum, addr);
2334
2335 return frame_unwind_got_constant (this_frame, regnum, 0);
2336 }
2337
2338 else /* All other registers not listed above. */
2339 {
2340 CORE_ADDR addr = cache->saved_regs[regnum];
2341
2342 if (addr != 0)
2343 return frame_unwind_got_memory (this_frame, regnum, addr);
2344
2345 return frame_unwind_got_constant (this_frame, regnum, 0);
2346 }
2347 }
2348
2349 static int
2350 ia64_sigtramp_frame_sniffer (const struct frame_unwind *self,
2351 struct frame_info *this_frame,
2352 void **this_cache)
2353 {
2354 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
2355 if (tdep->pc_in_sigtramp)
2356 {
2357 CORE_ADDR pc = get_frame_pc (this_frame);
2358
2359 if (tdep->pc_in_sigtramp (pc))
2360 return 1;
2361 }
2362
2363 return 0;
2364 }
2365
2366 static const struct frame_unwind ia64_sigtramp_frame_unwind =
2367 {
2368 SIGTRAMP_FRAME,
2369 default_frame_unwind_stop_reason,
2370 ia64_sigtramp_frame_this_id,
2371 ia64_sigtramp_frame_prev_register,
2372 NULL,
2373 ia64_sigtramp_frame_sniffer
2374 };
2375
2376 \f
2377
2378 static CORE_ADDR
2379 ia64_frame_base_address (struct frame_info *this_frame, void **this_cache)
2380 {
2381 struct ia64_frame_cache *cache = ia64_frame_cache (this_frame, this_cache);
2382
2383 return cache->base;
2384 }
2385
2386 static const struct frame_base ia64_frame_base =
2387 {
2388 &ia64_frame_unwind,
2389 ia64_frame_base_address,
2390 ia64_frame_base_address,
2391 ia64_frame_base_address
2392 };
2393
2394 #ifdef HAVE_LIBUNWIND_IA64_H
2395
2396 struct ia64_unwind_table_entry
2397 {
2398 unw_word_t start_offset;
2399 unw_word_t end_offset;
2400 unw_word_t info_offset;
2401 };
2402
2403 static __inline__ uint64_t
2404 ia64_rse_slot_num (uint64_t addr)
2405 {
2406 return (addr >> 3) & 0x3f;
2407 }
2408
2409 /* Skip over a designated number of registers in the backing
2410 store, remembering every 64th position is for NAT. */
2411 static __inline__ uint64_t
2412 ia64_rse_skip_regs (uint64_t addr, long num_regs)
2413 {
2414 long delta = ia64_rse_slot_num(addr) + num_regs;
2415
2416 if (num_regs < 0)
2417 delta -= 0x3e;
2418 return addr + ((num_regs + delta/0x3f) << 3);
2419 }
2420
2421 /* Gdb libunwind-frame callback function to convert from an ia64 gdb register
2422 number to a libunwind register number. */
2423 static int
2424 ia64_gdb2uw_regnum (int regnum)
2425 {
2426 if (regnum == sp_regnum)
2427 return UNW_IA64_SP;
2428 else if (regnum == IA64_BSP_REGNUM)
2429 return UNW_IA64_BSP;
2430 else if ((unsigned) (regnum - IA64_GR0_REGNUM) < 128)
2431 return UNW_IA64_GR + (regnum - IA64_GR0_REGNUM);
2432 else if ((unsigned) (regnum - V32_REGNUM) < 95)
2433 return UNW_IA64_GR + 32 + (regnum - V32_REGNUM);
2434 else if ((unsigned) (regnum - IA64_FR0_REGNUM) < 128)
2435 return UNW_IA64_FR + (regnum - IA64_FR0_REGNUM);
2436 else if ((unsigned) (regnum - IA64_PR0_REGNUM) < 64)
2437 return -1;
2438 else if ((unsigned) (regnum - IA64_BR0_REGNUM) < 8)
2439 return UNW_IA64_BR + (regnum - IA64_BR0_REGNUM);
2440 else if (regnum == IA64_PR_REGNUM)
2441 return UNW_IA64_PR;
2442 else if (regnum == IA64_IP_REGNUM)
2443 return UNW_REG_IP;
2444 else if (regnum == IA64_CFM_REGNUM)
2445 return UNW_IA64_CFM;
2446 else if ((unsigned) (regnum - IA64_AR0_REGNUM) < 128)
2447 return UNW_IA64_AR + (regnum - IA64_AR0_REGNUM);
2448 else if ((unsigned) (regnum - IA64_NAT0_REGNUM) < 128)
2449 return UNW_IA64_NAT + (regnum - IA64_NAT0_REGNUM);
2450 else
2451 return -1;
2452 }
2453
2454 /* Gdb libunwind-frame callback function to convert from a libunwind register
2455 number to a ia64 gdb register number. */
2456 static int
2457 ia64_uw2gdb_regnum (int uw_regnum)
2458 {
2459 if (uw_regnum == UNW_IA64_SP)
2460 return sp_regnum;
2461 else if (uw_regnum == UNW_IA64_BSP)
2462 return IA64_BSP_REGNUM;
2463 else if ((unsigned) (uw_regnum - UNW_IA64_GR) < 32)
2464 return IA64_GR0_REGNUM + (uw_regnum - UNW_IA64_GR);
2465 else if ((unsigned) (uw_regnum - UNW_IA64_GR) < 128)
2466 return V32_REGNUM + (uw_regnum - (IA64_GR0_REGNUM + 32));
2467 else if ((unsigned) (uw_regnum - UNW_IA64_FR) < 128)
2468 return IA64_FR0_REGNUM + (uw_regnum - UNW_IA64_FR);
2469 else if ((unsigned) (uw_regnum - UNW_IA64_BR) < 8)
2470 return IA64_BR0_REGNUM + (uw_regnum - UNW_IA64_BR);
2471 else if (uw_regnum == UNW_IA64_PR)
2472 return IA64_PR_REGNUM;
2473 else if (uw_regnum == UNW_REG_IP)
2474 return IA64_IP_REGNUM;
2475 else if (uw_regnum == UNW_IA64_CFM)
2476 return IA64_CFM_REGNUM;
2477 else if ((unsigned) (uw_regnum - UNW_IA64_AR) < 128)
2478 return IA64_AR0_REGNUM + (uw_regnum - UNW_IA64_AR);
2479 else if ((unsigned) (uw_regnum - UNW_IA64_NAT) < 128)
2480 return IA64_NAT0_REGNUM + (uw_regnum - UNW_IA64_NAT);
2481 else
2482 return -1;
2483 }
2484
2485 /* Gdb libunwind-frame callback function to reveal if register is a float
2486 register or not. */
2487 static int
2488 ia64_is_fpreg (int uw_regnum)
2489 {
2490 return unw_is_fpreg (uw_regnum);
2491 }
2492
2493 /* Libunwind callback accessor function for general registers. */
2494 static int
2495 ia64_access_reg (unw_addr_space_t as, unw_regnum_t uw_regnum, unw_word_t *val,
2496 int write, void *arg)
2497 {
2498 int regnum = ia64_uw2gdb_regnum (uw_regnum);
2499 unw_word_t bsp, sof, sol, cfm, psr, ip;
2500 struct frame_info *this_frame = arg;
2501 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2502 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2503 long new_sof, old_sof;
2504 char buf[MAX_REGISTER_SIZE];
2505
2506 /* We never call any libunwind routines that need to write registers. */
2507 gdb_assert (!write);
2508
2509 switch (uw_regnum)
2510 {
2511 case UNW_REG_IP:
2512 /* Libunwind expects to see the pc value which means the slot number
2513 from the psr must be merged with the ip word address. */
2514 get_frame_register (this_frame, IA64_IP_REGNUM, buf);
2515 ip = extract_unsigned_integer (buf, 8, byte_order);
2516 get_frame_register (this_frame, IA64_PSR_REGNUM, buf);
2517 psr = extract_unsigned_integer (buf, 8, byte_order);
2518 *val = ip | ((psr >> 41) & 0x3);
2519 break;
2520
2521 case UNW_IA64_AR_BSP:
2522 /* Libunwind expects to see the beginning of the current
2523 register frame so we must account for the fact that
2524 ptrace() will return a value for bsp that points *after*
2525 the current register frame. */
2526 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
2527 bsp = extract_unsigned_integer (buf, 8, byte_order);
2528 get_frame_register (this_frame, IA64_CFM_REGNUM, buf);
2529 cfm = extract_unsigned_integer (buf, 8, byte_order);
2530 sof = gdbarch_tdep (gdbarch)->size_of_register_frame (this_frame, cfm);
2531 *val = ia64_rse_skip_regs (bsp, -sof);
2532 break;
2533
2534 case UNW_IA64_AR_BSPSTORE:
2535 /* Libunwind wants bspstore to be after the current register frame.
2536 This is what ptrace() and gdb treats as the regular bsp value. */
2537 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
2538 *val = extract_unsigned_integer (buf, 8, byte_order);
2539 break;
2540
2541 default:
2542 /* For all other registers, just unwind the value directly. */
2543 get_frame_register (this_frame, regnum, buf);
2544 *val = extract_unsigned_integer (buf, 8, byte_order);
2545 break;
2546 }
2547
2548 if (gdbarch_debug >= 1)
2549 fprintf_unfiltered (gdb_stdlog,
2550 " access_reg: from cache: %4s=%s\n",
2551 (((unsigned) regnum <= IA64_NAT127_REGNUM)
2552 ? ia64_register_names[regnum] : "r??"),
2553 paddress (gdbarch, *val));
2554 return 0;
2555 }
2556
2557 /* Libunwind callback accessor function for floating-point registers. */
2558 static int
2559 ia64_access_fpreg (unw_addr_space_t as, unw_regnum_t uw_regnum,
2560 unw_fpreg_t *val, int write, void *arg)
2561 {
2562 int regnum = ia64_uw2gdb_regnum (uw_regnum);
2563 struct frame_info *this_frame = arg;
2564
2565 /* We never call any libunwind routines that need to write registers. */
2566 gdb_assert (!write);
2567
2568 get_frame_register (this_frame, regnum, (char *) val);
2569
2570 return 0;
2571 }
2572
2573 /* Libunwind callback accessor function for top-level rse registers. */
2574 static int
2575 ia64_access_rse_reg (unw_addr_space_t as, unw_regnum_t uw_regnum,
2576 unw_word_t *val, int write, void *arg)
2577 {
2578 int regnum = ia64_uw2gdb_regnum (uw_regnum);
2579 unw_word_t bsp, sof, sol, cfm, psr, ip;
2580 struct regcache *regcache = arg;
2581 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2582 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2583 long new_sof, old_sof;
2584 char buf[MAX_REGISTER_SIZE];
2585
2586 /* We never call any libunwind routines that need to write registers. */
2587 gdb_assert (!write);
2588
2589 switch (uw_regnum)
2590 {
2591 case UNW_REG_IP:
2592 /* Libunwind expects to see the pc value which means the slot number
2593 from the psr must be merged with the ip word address. */
2594 regcache_cooked_read (regcache, IA64_IP_REGNUM, buf);
2595 ip = extract_unsigned_integer (buf, 8, byte_order);
2596 regcache_cooked_read (regcache, IA64_PSR_REGNUM, buf);
2597 psr = extract_unsigned_integer (buf, 8, byte_order);
2598 *val = ip | ((psr >> 41) & 0x3);
2599 break;
2600
2601 case UNW_IA64_AR_BSP:
2602 /* Libunwind expects to see the beginning of the current
2603 register frame so we must account for the fact that
2604 ptrace() will return a value for bsp that points *after*
2605 the current register frame. */
2606 regcache_cooked_read (regcache, IA64_BSP_REGNUM, buf);
2607 bsp = extract_unsigned_integer (buf, 8, byte_order);
2608 regcache_cooked_read (regcache, IA64_CFM_REGNUM, buf);
2609 cfm = extract_unsigned_integer (buf, 8, byte_order);
2610 sof = (cfm & 0x7f);
2611 *val = ia64_rse_skip_regs (bsp, -sof);
2612 break;
2613
2614 case UNW_IA64_AR_BSPSTORE:
2615 /* Libunwind wants bspstore to be after the current register frame.
2616 This is what ptrace() and gdb treats as the regular bsp value. */
2617 regcache_cooked_read (regcache, IA64_BSP_REGNUM, buf);
2618 *val = extract_unsigned_integer (buf, 8, byte_order);
2619 break;
2620
2621 default:
2622 /* For all other registers, just unwind the value directly. */
2623 regcache_cooked_read (regcache, regnum, buf);
2624 *val = extract_unsigned_integer (buf, 8, byte_order);
2625 break;
2626 }
2627
2628 if (gdbarch_debug >= 1)
2629 fprintf_unfiltered (gdb_stdlog,
2630 " access_rse_reg: from cache: %4s=%s\n",
2631 (((unsigned) regnum <= IA64_NAT127_REGNUM)
2632 ? ia64_register_names[regnum] : "r??"),
2633 paddress (gdbarch, *val));
2634
2635 return 0;
2636 }
2637
2638 /* Libunwind callback accessor function for top-level fp registers. */
2639 static int
2640 ia64_access_rse_fpreg (unw_addr_space_t as, unw_regnum_t uw_regnum,
2641 unw_fpreg_t *val, int write, void *arg)
2642 {
2643 int regnum = ia64_uw2gdb_regnum (uw_regnum);
2644 struct regcache *regcache = arg;
2645
2646 /* We never call any libunwind routines that need to write registers. */
2647 gdb_assert (!write);
2648
2649 regcache_cooked_read (regcache, regnum, (char *) val);
2650
2651 return 0;
2652 }
2653
2654 /* Libunwind callback accessor function for accessing memory. */
2655 static int
2656 ia64_access_mem (unw_addr_space_t as,
2657 unw_word_t addr, unw_word_t *val,
2658 int write, void *arg)
2659 {
2660 if (addr - KERNEL_START < ktab_size)
2661 {
2662 unw_word_t *laddr = (unw_word_t*) ((char *) ktab
2663 + (addr - KERNEL_START));
2664
2665 if (write)
2666 *laddr = *val;
2667 else
2668 *val = *laddr;
2669 return 0;
2670 }
2671
2672 /* XXX do we need to normalize byte-order here? */
2673 if (write)
2674 return target_write_memory (addr, (char *) val, sizeof (unw_word_t));
2675 else
2676 return target_read_memory (addr, (char *) val, sizeof (unw_word_t));
2677 }
2678
2679 /* Call low-level function to access the kernel unwind table. */
2680 static LONGEST
2681 getunwind_table (gdb_byte **buf_p)
2682 {
2683 LONGEST x;
2684
2685 /* FIXME drow/2005-09-10: This code used to call
2686 ia64_linux_xfer_unwind_table directly to fetch the unwind table
2687 for the currently running ia64-linux kernel. That data should
2688 come from the core file and be accessed via the auxv vector; if
2689 we want to preserve fall back to the running kernel's table, then
2690 we should find a way to override the corefile layer's
2691 xfer_partial method. */
2692
2693 x = target_read_alloc (&current_target, TARGET_OBJECT_UNWIND_TABLE,
2694 NULL, buf_p);
2695
2696 return x;
2697 }
2698
2699 /* Get the kernel unwind table. */
2700 static int
2701 get_kernel_table (unw_word_t ip, unw_dyn_info_t *di)
2702 {
2703 static struct ia64_table_entry *etab;
2704
2705 if (!ktab)
2706 {
2707 gdb_byte *ktab_buf;
2708 LONGEST size;
2709
2710 size = getunwind_table (&ktab_buf);
2711 if (size <= 0)
2712 return -UNW_ENOINFO;
2713
2714 ktab = (struct ia64_table_entry *) ktab_buf;
2715 ktab_size = size;
2716
2717 for (etab = ktab; etab->start_offset; ++etab)
2718 etab->info_offset += KERNEL_START;
2719 }
2720
2721 if (ip < ktab[0].start_offset || ip >= etab[-1].end_offset)
2722 return -UNW_ENOINFO;
2723
2724 di->format = UNW_INFO_FORMAT_TABLE;
2725 di->gp = 0;
2726 di->start_ip = ktab[0].start_offset;
2727 di->end_ip = etab[-1].end_offset;
2728 di->u.ti.name_ptr = (unw_word_t) "<kernel>";
2729 di->u.ti.segbase = 0;
2730 di->u.ti.table_len = ((char *) etab - (char *) ktab) / sizeof (unw_word_t);
2731 di->u.ti.table_data = (unw_word_t *) ktab;
2732
2733 if (gdbarch_debug >= 1)
2734 fprintf_unfiltered (gdb_stdlog, "get_kernel_table: found table `%s': "
2735 "segbase=%s, length=%s, gp=%s\n",
2736 (char *) di->u.ti.name_ptr,
2737 hex_string (di->u.ti.segbase),
2738 pulongest (di->u.ti.table_len),
2739 hex_string (di->gp));
2740 return 0;
2741 }
2742
2743 /* Find the unwind table entry for a specified address. */
2744 static int
2745 ia64_find_unwind_table (struct objfile *objfile, unw_word_t ip,
2746 unw_dyn_info_t *dip, void **buf)
2747 {
2748 Elf_Internal_Phdr *phdr, *p_text = NULL, *p_unwind = NULL;
2749 Elf_Internal_Ehdr *ehdr;
2750 unw_word_t segbase = 0;
2751 CORE_ADDR load_base;
2752 bfd *bfd;
2753 int i;
2754
2755 bfd = objfile->obfd;
2756
2757 ehdr = elf_tdata (bfd)->elf_header;
2758 phdr = elf_tdata (bfd)->phdr;
2759
2760 load_base = ANOFFSET (objfile->section_offsets, SECT_OFF_TEXT (objfile));
2761
2762 for (i = 0; i < ehdr->e_phnum; ++i)
2763 {
2764 switch (phdr[i].p_type)
2765 {
2766 case PT_LOAD:
2767 if ((unw_word_t) (ip - load_base - phdr[i].p_vaddr)
2768 < phdr[i].p_memsz)
2769 p_text = phdr + i;
2770 break;
2771
2772 case PT_IA_64_UNWIND:
2773 p_unwind = phdr + i;
2774 break;
2775
2776 default:
2777 break;
2778 }
2779 }
2780
2781 if (!p_text || !p_unwind)
2782 return -UNW_ENOINFO;
2783
2784 /* Verify that the segment that contains the IP also contains
2785 the static unwind table. If not, we may be in the Linux kernel's
2786 DSO gate page in which case the unwind table is another segment.
2787 Otherwise, we are dealing with runtime-generated code, for which we
2788 have no info here. */
2789 segbase = p_text->p_vaddr + load_base;
2790
2791 if ((p_unwind->p_vaddr - p_text->p_vaddr) >= p_text->p_memsz)
2792 {
2793 int ok = 0;
2794 for (i = 0; i < ehdr->e_phnum; ++i)
2795 {
2796 if (phdr[i].p_type == PT_LOAD
2797 && (p_unwind->p_vaddr - phdr[i].p_vaddr) < phdr[i].p_memsz)
2798 {
2799 ok = 1;
2800 /* Get the segbase from the section containing the
2801 libunwind table. */
2802 segbase = phdr[i].p_vaddr + load_base;
2803 }
2804 }
2805 if (!ok)
2806 return -UNW_ENOINFO;
2807 }
2808
2809 dip->start_ip = p_text->p_vaddr + load_base;
2810 dip->end_ip = dip->start_ip + p_text->p_memsz;
2811 dip->gp = ia64_find_global_pointer (get_objfile_arch (objfile), ip);
2812 dip->format = UNW_INFO_FORMAT_REMOTE_TABLE;
2813 dip->u.rti.name_ptr = (unw_word_t) bfd_get_filename (bfd);
2814 dip->u.rti.segbase = segbase;
2815 dip->u.rti.table_len = p_unwind->p_memsz / sizeof (unw_word_t);
2816 dip->u.rti.table_data = p_unwind->p_vaddr + load_base;
2817
2818 return 0;
2819 }
2820
2821 /* Libunwind callback accessor function to acquire procedure unwind-info. */
2822 static int
2823 ia64_find_proc_info_x (unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi,
2824 int need_unwind_info, void *arg)
2825 {
2826 struct obj_section *sec = find_pc_section (ip);
2827 unw_dyn_info_t di;
2828 int ret;
2829 void *buf = NULL;
2830
2831 if (!sec)
2832 {
2833 /* XXX This only works if the host and the target architecture are
2834 both ia64 and if the have (more or less) the same kernel
2835 version. */
2836 if (get_kernel_table (ip, &di) < 0)
2837 return -UNW_ENOINFO;
2838
2839 if (gdbarch_debug >= 1)
2840 fprintf_unfiltered (gdb_stdlog, "ia64_find_proc_info_x: %s -> "
2841 "(name=`%s',segbase=%s,start=%s,end=%s,gp=%s,"
2842 "length=%s,data=%s)\n",
2843 hex_string (ip), (char *)di.u.ti.name_ptr,
2844 hex_string (di.u.ti.segbase),
2845 hex_string (di.start_ip), hex_string (di.end_ip),
2846 hex_string (di.gp),
2847 pulongest (di.u.ti.table_len),
2848 hex_string ((CORE_ADDR)di.u.ti.table_data));
2849 }
2850 else
2851 {
2852 ret = ia64_find_unwind_table (sec->objfile, ip, &di, &buf);
2853 if (ret < 0)
2854 return ret;
2855
2856 if (gdbarch_debug >= 1)
2857 fprintf_unfiltered (gdb_stdlog, "ia64_find_proc_info_x: %s -> "
2858 "(name=`%s',segbase=%s,start=%s,end=%s,gp=%s,"
2859 "length=%s,data=%s)\n",
2860 hex_string (ip), (char *)di.u.rti.name_ptr,
2861 hex_string (di.u.rti.segbase),
2862 hex_string (di.start_ip), hex_string (di.end_ip),
2863 hex_string (di.gp),
2864 pulongest (di.u.rti.table_len),
2865 hex_string (di.u.rti.table_data));
2866 }
2867
2868 ret = libunwind_search_unwind_table (&as, ip, &di, pi, need_unwind_info,
2869 arg);
2870
2871 /* We no longer need the dyn info storage so free it. */
2872 xfree (buf);
2873
2874 return ret;
2875 }
2876
2877 /* Libunwind callback accessor function for cleanup. */
2878 static void
2879 ia64_put_unwind_info (unw_addr_space_t as,
2880 unw_proc_info_t *pip, void *arg)
2881 {
2882 /* Nothing required for now. */
2883 }
2884
2885 /* Libunwind callback accessor function to get head of the dynamic
2886 unwind-info registration list. */
2887 static int
2888 ia64_get_dyn_info_list (unw_addr_space_t as,
2889 unw_word_t *dilap, void *arg)
2890 {
2891 struct obj_section *text_sec;
2892 struct objfile *objfile;
2893 unw_word_t ip, addr;
2894 unw_dyn_info_t di;
2895 int ret;
2896
2897 if (!libunwind_is_initialized ())
2898 return -UNW_ENOINFO;
2899
2900 for (objfile = object_files; objfile; objfile = objfile->next)
2901 {
2902 void *buf = NULL;
2903
2904 text_sec = objfile->sections + SECT_OFF_TEXT (objfile);
2905 ip = obj_section_addr (text_sec);
2906 ret = ia64_find_unwind_table (objfile, ip, &di, &buf);
2907 if (ret >= 0)
2908 {
2909 addr = libunwind_find_dyn_list (as, &di, arg);
2910 /* We no longer need the dyn info storage so free it. */
2911 xfree (buf);
2912
2913 if (addr)
2914 {
2915 if (gdbarch_debug >= 1)
2916 fprintf_unfiltered (gdb_stdlog,
2917 "dynamic unwind table in objfile %s "
2918 "at %s (gp=%s)\n",
2919 bfd_get_filename (objfile->obfd),
2920 hex_string (addr), hex_string (di.gp));
2921 *dilap = addr;
2922 return 0;
2923 }
2924 }
2925 }
2926 return -UNW_ENOINFO;
2927 }
2928
2929
2930 /* Frame interface functions for libunwind. */
2931
2932 static void
2933 ia64_libunwind_frame_this_id (struct frame_info *this_frame, void **this_cache,
2934 struct frame_id *this_id)
2935 {
2936 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2937 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2938 struct frame_id id = outer_frame_id;
2939 char buf[8];
2940 CORE_ADDR bsp;
2941
2942 libunwind_frame_this_id (this_frame, this_cache, &id);
2943 if (frame_id_eq (id, outer_frame_id))
2944 {
2945 (*this_id) = outer_frame_id;
2946 return;
2947 }
2948
2949 /* We must add the bsp as the special address for frame comparison
2950 purposes. */
2951 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
2952 bsp = extract_unsigned_integer (buf, 8, byte_order);
2953
2954 (*this_id) = frame_id_build_special (id.stack_addr, id.code_addr, bsp);
2955
2956 if (gdbarch_debug >= 1)
2957 fprintf_unfiltered (gdb_stdlog,
2958 "libunwind frame id: code %s, stack %s, "
2959 "special %s, this_frame %s\n",
2960 paddress (gdbarch, id.code_addr),
2961 paddress (gdbarch, id.stack_addr),
2962 paddress (gdbarch, bsp),
2963 host_address_to_string (this_frame));
2964 }
2965
2966 static struct value *
2967 ia64_libunwind_frame_prev_register (struct frame_info *this_frame,
2968 void **this_cache, int regnum)
2969 {
2970 int reg = regnum;
2971 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2972 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2973 struct value *val;
2974
2975 if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
2976 reg = IA64_PR_REGNUM;
2977 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
2978 reg = IA64_UNAT_REGNUM;
2979
2980 /* Let libunwind do most of the work. */
2981 val = libunwind_frame_prev_register (this_frame, this_cache, reg);
2982
2983 if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
2984 {
2985 ULONGEST prN_val;
2986
2987 if (VP16_REGNUM <= regnum && regnum <= VP63_REGNUM)
2988 {
2989 int rrb_pr = 0;
2990 ULONGEST cfm;
2991 unsigned char buf[MAX_REGISTER_SIZE];
2992
2993 /* Fetch predicate register rename base from current frame
2994 marker for this frame. */
2995 get_frame_register (this_frame, IA64_CFM_REGNUM, buf);
2996 cfm = extract_unsigned_integer (buf, 8, byte_order);
2997 rrb_pr = (cfm >> 32) & 0x3f;
2998
2999 /* Adjust the register number to account for register rotation. */
3000 regnum = VP16_REGNUM + ((regnum - VP16_REGNUM) + rrb_pr) % 48;
3001 }
3002 prN_val = extract_bit_field (value_contents_all (val),
3003 regnum - VP0_REGNUM, 1);
3004 return frame_unwind_got_constant (this_frame, regnum, prN_val);
3005 }
3006
3007 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
3008 {
3009 ULONGEST unatN_val;
3010
3011 unatN_val = extract_bit_field (value_contents_all (val),
3012 regnum - IA64_NAT0_REGNUM, 1);
3013 return frame_unwind_got_constant (this_frame, regnum, unatN_val);
3014 }
3015
3016 else if (regnum == IA64_BSP_REGNUM)
3017 {
3018 struct value *cfm_val;
3019 CORE_ADDR prev_bsp, prev_cfm;
3020
3021 /* We want to calculate the previous bsp as the end of the previous
3022 register stack frame. This corresponds to what the hardware bsp
3023 register will be if we pop the frame back which is why we might
3024 have been called. We know that libunwind will pass us back the
3025 beginning of the current frame so we should just add sof to it. */
3026 prev_bsp = extract_unsigned_integer (value_contents_all (val),
3027 8, byte_order);
3028 cfm_val = libunwind_frame_prev_register (this_frame, this_cache,
3029 IA64_CFM_REGNUM);
3030 prev_cfm = extract_unsigned_integer (value_contents_all (cfm_val),
3031 8, byte_order);
3032 prev_bsp = rse_address_add (prev_bsp, (prev_cfm & 0x7f));
3033
3034 return frame_unwind_got_constant (this_frame, regnum, prev_bsp);
3035 }
3036 else
3037 return val;
3038 }
3039
3040 static int
3041 ia64_libunwind_frame_sniffer (const struct frame_unwind *self,
3042 struct frame_info *this_frame,
3043 void **this_cache)
3044 {
3045 if (libunwind_is_initialized ()
3046 && libunwind_frame_sniffer (self, this_frame, this_cache))
3047 return 1;
3048
3049 return 0;
3050 }
3051
3052 static const struct frame_unwind ia64_libunwind_frame_unwind =
3053 {
3054 NORMAL_FRAME,
3055 default_frame_unwind_stop_reason,
3056 ia64_libunwind_frame_this_id,
3057 ia64_libunwind_frame_prev_register,
3058 NULL,
3059 ia64_libunwind_frame_sniffer,
3060 libunwind_frame_dealloc_cache
3061 };
3062
3063 static void
3064 ia64_libunwind_sigtramp_frame_this_id (struct frame_info *this_frame,
3065 void **this_cache,
3066 struct frame_id *this_id)
3067 {
3068 struct gdbarch *gdbarch = get_frame_arch (this_frame);
3069 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3070 char buf[8];
3071 CORE_ADDR bsp;
3072 struct frame_id id = outer_frame_id;
3073 CORE_ADDR prev_ip;
3074
3075 libunwind_frame_this_id (this_frame, this_cache, &id);
3076 if (frame_id_eq (id, outer_frame_id))
3077 {
3078 (*this_id) = outer_frame_id;
3079 return;
3080 }
3081
3082 /* We must add the bsp as the special address for frame comparison
3083 purposes. */
3084 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
3085 bsp = extract_unsigned_integer (buf, 8, byte_order);
3086
3087 /* For a sigtramp frame, we don't make the check for previous ip being 0. */
3088 (*this_id) = frame_id_build_special (id.stack_addr, id.code_addr, bsp);
3089
3090 if (gdbarch_debug >= 1)
3091 fprintf_unfiltered (gdb_stdlog,
3092 "libunwind sigtramp frame id: code %s, "
3093 "stack %s, special %s, this_frame %s\n",
3094 paddress (gdbarch, id.code_addr),
3095 paddress (gdbarch, id.stack_addr),
3096 paddress (gdbarch, bsp),
3097 host_address_to_string (this_frame));
3098 }
3099
3100 static struct value *
3101 ia64_libunwind_sigtramp_frame_prev_register (struct frame_info *this_frame,
3102 void **this_cache, int regnum)
3103 {
3104 struct gdbarch *gdbarch = get_frame_arch (this_frame);
3105 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3106 struct value *prev_ip_val;
3107 CORE_ADDR prev_ip;
3108
3109 /* If the previous frame pc value is 0, then we want to use the SIGCONTEXT
3110 method of getting previous registers. */
3111 prev_ip_val = libunwind_frame_prev_register (this_frame, this_cache,
3112 IA64_IP_REGNUM);
3113 prev_ip = extract_unsigned_integer (value_contents_all (prev_ip_val),
3114 8, byte_order);
3115
3116 if (prev_ip == 0)
3117 {
3118 void *tmp_cache = NULL;
3119 return ia64_sigtramp_frame_prev_register (this_frame, &tmp_cache,
3120 regnum);
3121 }
3122 else
3123 return ia64_libunwind_frame_prev_register (this_frame, this_cache, regnum);
3124 }
3125
3126 static int
3127 ia64_libunwind_sigtramp_frame_sniffer (const struct frame_unwind *self,
3128 struct frame_info *this_frame,
3129 void **this_cache)
3130 {
3131 if (libunwind_is_initialized ())
3132 {
3133 if (libunwind_sigtramp_frame_sniffer (self, this_frame, this_cache))
3134 return 1;
3135 return 0;
3136 }
3137 else
3138 return ia64_sigtramp_frame_sniffer (self, this_frame, this_cache);
3139 }
3140
3141 static const struct frame_unwind ia64_libunwind_sigtramp_frame_unwind =
3142 {
3143 SIGTRAMP_FRAME,
3144 default_frame_unwind_stop_reason,
3145 ia64_libunwind_sigtramp_frame_this_id,
3146 ia64_libunwind_sigtramp_frame_prev_register,
3147 NULL,
3148 ia64_libunwind_sigtramp_frame_sniffer
3149 };
3150
3151 /* Set of libunwind callback acccessor functions. */
3152 static unw_accessors_t ia64_unw_accessors =
3153 {
3154 ia64_find_proc_info_x,
3155 ia64_put_unwind_info,
3156 ia64_get_dyn_info_list,
3157 ia64_access_mem,
3158 ia64_access_reg,
3159 ia64_access_fpreg,
3160 /* resume */
3161 /* get_proc_name */
3162 };
3163
3164 /* Set of special libunwind callback acccessor functions specific for accessing
3165 the rse registers. At the top of the stack, we want libunwind to figure out
3166 how to read r32 - r127. Though usually they are found sequentially in
3167 memory starting from $bof, this is not always true. */
3168 static unw_accessors_t ia64_unw_rse_accessors =
3169 {
3170 ia64_find_proc_info_x,
3171 ia64_put_unwind_info,
3172 ia64_get_dyn_info_list,
3173 ia64_access_mem,
3174 ia64_access_rse_reg,
3175 ia64_access_rse_fpreg,
3176 /* resume */
3177 /* get_proc_name */
3178 };
3179
3180 /* Set of ia64 gdb libunwind-frame callbacks and data for generic
3181 libunwind-frame code to use. */
3182 static struct libunwind_descr ia64_libunwind_descr =
3183 {
3184 ia64_gdb2uw_regnum,
3185 ia64_uw2gdb_regnum,
3186 ia64_is_fpreg,
3187 &ia64_unw_accessors,
3188 &ia64_unw_rse_accessors,
3189 };
3190
3191 #endif /* HAVE_LIBUNWIND_IA64_H */
3192
3193 static int
3194 ia64_use_struct_convention (struct type *type)
3195 {
3196 struct type *float_elt_type;
3197
3198 /* Don't use the struct convention for anything but structure,
3199 union, or array types. */
3200 if (!(TYPE_CODE (type) == TYPE_CODE_STRUCT
3201 || TYPE_CODE (type) == TYPE_CODE_UNION
3202 || TYPE_CODE (type) == TYPE_CODE_ARRAY))
3203 return 0;
3204
3205 /* HFAs are structures (or arrays) consisting entirely of floating
3206 point values of the same length. Up to 8 of these are returned
3207 in registers. Don't use the struct convention when this is the
3208 case. */
3209 float_elt_type = is_float_or_hfa_type (type);
3210 if (float_elt_type != NULL
3211 && TYPE_LENGTH (type) / TYPE_LENGTH (float_elt_type) <= 8)
3212 return 0;
3213
3214 /* Other structs of length 32 or less are returned in r8-r11.
3215 Don't use the struct convention for those either. */
3216 return TYPE_LENGTH (type) > 32;
3217 }
3218
3219 /* Return non-zero if TYPE is a structure or union type. */
3220
3221 static int
3222 ia64_struct_type_p (const struct type *type)
3223 {
3224 return (TYPE_CODE (type) == TYPE_CODE_STRUCT
3225 || TYPE_CODE (type) == TYPE_CODE_UNION);
3226 }
3227
3228 static void
3229 ia64_extract_return_value (struct type *type, struct regcache *regcache,
3230 gdb_byte *valbuf)
3231 {
3232 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3233 struct type *float_elt_type;
3234
3235 float_elt_type = is_float_or_hfa_type (type);
3236 if (float_elt_type != NULL)
3237 {
3238 char from[MAX_REGISTER_SIZE];
3239 int offset = 0;
3240 int regnum = IA64_FR8_REGNUM;
3241 int n = TYPE_LENGTH (type) / TYPE_LENGTH (float_elt_type);
3242
3243 while (n-- > 0)
3244 {
3245 regcache_cooked_read (regcache, regnum, from);
3246 convert_typed_floating (from, ia64_ext_type (gdbarch),
3247 (char *)valbuf + offset, float_elt_type);
3248 offset += TYPE_LENGTH (float_elt_type);
3249 regnum++;
3250 }
3251 }
3252 else if (!ia64_struct_type_p (type) && TYPE_LENGTH (type) < 8)
3253 {
3254 /* This is an integral value, and its size is less than 8 bytes.
3255 These values are LSB-aligned, so extract the relevant bytes,
3256 and copy them into VALBUF. */
3257 /* brobecker/2005-12-30: Actually, all integral values are LSB aligned,
3258 so I suppose we should also add handling here for integral values
3259 whose size is greater than 8. But I wasn't able to create such
3260 a type, neither in C nor in Ada, so not worrying about these yet. */
3261 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3262 ULONGEST val;
3263
3264 regcache_cooked_read_unsigned (regcache, IA64_GR8_REGNUM, &val);
3265 store_unsigned_integer (valbuf, TYPE_LENGTH (type), byte_order, val);
3266 }
3267 else
3268 {
3269 ULONGEST val;
3270 int offset = 0;
3271 int regnum = IA64_GR8_REGNUM;
3272 int reglen = TYPE_LENGTH (register_type (gdbarch, IA64_GR8_REGNUM));
3273 int n = TYPE_LENGTH (type) / reglen;
3274 int m = TYPE_LENGTH (type) % reglen;
3275
3276 while (n-- > 0)
3277 {
3278 ULONGEST val;
3279 regcache_cooked_read_unsigned (regcache, regnum, &val);
3280 memcpy ((char *)valbuf + offset, &val, reglen);
3281 offset += reglen;
3282 regnum++;
3283 }
3284
3285 if (m)
3286 {
3287 regcache_cooked_read_unsigned (regcache, regnum, &val);
3288 memcpy ((char *)valbuf + offset, &val, m);
3289 }
3290 }
3291 }
3292
3293 static void
3294 ia64_store_return_value (struct type *type, struct regcache *regcache,
3295 const gdb_byte *valbuf)
3296 {
3297 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3298 struct type *float_elt_type;
3299
3300 float_elt_type = is_float_or_hfa_type (type);
3301 if (float_elt_type != NULL)
3302 {
3303 char to[MAX_REGISTER_SIZE];
3304 int offset = 0;
3305 int regnum = IA64_FR8_REGNUM;
3306 int n = TYPE_LENGTH (type) / TYPE_LENGTH (float_elt_type);
3307
3308 while (n-- > 0)
3309 {
3310 convert_typed_floating ((char *)valbuf + offset, float_elt_type,
3311 to, ia64_ext_type (gdbarch));
3312 regcache_cooked_write (regcache, regnum, to);
3313 offset += TYPE_LENGTH (float_elt_type);
3314 regnum++;
3315 }
3316 }
3317 else
3318 {
3319 ULONGEST val;
3320 int offset = 0;
3321 int regnum = IA64_GR8_REGNUM;
3322 int reglen = TYPE_LENGTH (register_type (gdbarch, IA64_GR8_REGNUM));
3323 int n = TYPE_LENGTH (type) / reglen;
3324 int m = TYPE_LENGTH (type) % reglen;
3325
3326 while (n-- > 0)
3327 {
3328 ULONGEST val;
3329 memcpy (&val, (char *)valbuf + offset, reglen);
3330 regcache_cooked_write_unsigned (regcache, regnum, val);
3331 offset += reglen;
3332 regnum++;
3333 }
3334
3335 if (m)
3336 {
3337 memcpy (&val, (char *)valbuf + offset, m);
3338 regcache_cooked_write_unsigned (regcache, regnum, val);
3339 }
3340 }
3341 }
3342
3343 static enum return_value_convention
3344 ia64_return_value (struct gdbarch *gdbarch, struct type *func_type,
3345 struct type *valtype, struct regcache *regcache,
3346 gdb_byte *readbuf, const gdb_byte *writebuf)
3347 {
3348 int struct_return = ia64_use_struct_convention (valtype);
3349
3350 if (writebuf != NULL)
3351 {
3352 gdb_assert (!struct_return);
3353 ia64_store_return_value (valtype, regcache, writebuf);
3354 }
3355
3356 if (readbuf != NULL)
3357 {
3358 gdb_assert (!struct_return);
3359 ia64_extract_return_value (valtype, regcache, readbuf);
3360 }
3361
3362 if (struct_return)
3363 return RETURN_VALUE_STRUCT_CONVENTION;
3364 else
3365 return RETURN_VALUE_REGISTER_CONVENTION;
3366 }
3367
3368 static int
3369 is_float_or_hfa_type_recurse (struct type *t, struct type **etp)
3370 {
3371 switch (TYPE_CODE (t))
3372 {
3373 case TYPE_CODE_FLT:
3374 if (*etp)
3375 return TYPE_LENGTH (*etp) == TYPE_LENGTH (t);
3376 else
3377 {
3378 *etp = t;
3379 return 1;
3380 }
3381 break;
3382 case TYPE_CODE_ARRAY:
3383 return
3384 is_float_or_hfa_type_recurse (check_typedef (TYPE_TARGET_TYPE (t)),
3385 etp);
3386 break;
3387 case TYPE_CODE_STRUCT:
3388 {
3389 int i;
3390
3391 for (i = 0; i < TYPE_NFIELDS (t); i++)
3392 if (!is_float_or_hfa_type_recurse
3393 (check_typedef (TYPE_FIELD_TYPE (t, i)), etp))
3394 return 0;
3395 return 1;
3396 }
3397 break;
3398 default:
3399 return 0;
3400 break;
3401 }
3402 }
3403
3404 /* Determine if the given type is one of the floating point types or
3405 and HFA (which is a struct, array, or combination thereof whose
3406 bottom-most elements are all of the same floating point type). */
3407
3408 static struct type *
3409 is_float_or_hfa_type (struct type *t)
3410 {
3411 struct type *et = 0;
3412
3413 return is_float_or_hfa_type_recurse (t, &et) ? et : 0;
3414 }
3415
3416
3417 /* Return 1 if the alignment of T is such that the next even slot
3418 should be used. Return 0, if the next available slot should
3419 be used. (See section 8.5.1 of the IA-64 Software Conventions
3420 and Runtime manual). */
3421
3422 static int
3423 slot_alignment_is_next_even (struct type *t)
3424 {
3425 switch (TYPE_CODE (t))
3426 {
3427 case TYPE_CODE_INT:
3428 case TYPE_CODE_FLT:
3429 if (TYPE_LENGTH (t) > 8)
3430 return 1;
3431 else
3432 return 0;
3433 case TYPE_CODE_ARRAY:
3434 return
3435 slot_alignment_is_next_even (check_typedef (TYPE_TARGET_TYPE (t)));
3436 case TYPE_CODE_STRUCT:
3437 {
3438 int i;
3439
3440 for (i = 0; i < TYPE_NFIELDS (t); i++)
3441 if (slot_alignment_is_next_even
3442 (check_typedef (TYPE_FIELD_TYPE (t, i))))
3443 return 1;
3444 return 0;
3445 }
3446 default:
3447 return 0;
3448 }
3449 }
3450
3451 /* Attempt to find (and return) the global pointer for the given
3452 function.
3453
3454 This is a rather nasty bit of code searchs for the .dynamic section
3455 in the objfile corresponding to the pc of the function we're trying
3456 to call. Once it finds the addresses at which the .dynamic section
3457 lives in the child process, it scans the Elf64_Dyn entries for a
3458 DT_PLTGOT tag. If it finds one of these, the corresponding
3459 d_un.d_ptr value is the global pointer. */
3460
3461 static CORE_ADDR
3462 ia64_find_global_pointer_from_dynamic_section (struct gdbarch *gdbarch,
3463 CORE_ADDR faddr)
3464 {
3465 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3466 struct obj_section *faddr_sect;
3467
3468 faddr_sect = find_pc_section (faddr);
3469 if (faddr_sect != NULL)
3470 {
3471 struct obj_section *osect;
3472
3473 ALL_OBJFILE_OSECTIONS (faddr_sect->objfile, osect)
3474 {
3475 if (strcmp (osect->the_bfd_section->name, ".dynamic") == 0)
3476 break;
3477 }
3478
3479 if (osect < faddr_sect->objfile->sections_end)
3480 {
3481 CORE_ADDR addr, endaddr;
3482
3483 addr = obj_section_addr (osect);
3484 endaddr = obj_section_endaddr (osect);
3485
3486 while (addr < endaddr)
3487 {
3488 int status;
3489 LONGEST tag;
3490 char buf[8];
3491
3492 status = target_read_memory (addr, buf, sizeof (buf));
3493 if (status != 0)
3494 break;
3495 tag = extract_signed_integer (buf, sizeof (buf), byte_order);
3496
3497 if (tag == DT_PLTGOT)
3498 {
3499 CORE_ADDR global_pointer;
3500
3501 status = target_read_memory (addr + 8, buf, sizeof (buf));
3502 if (status != 0)
3503 break;
3504 global_pointer = extract_unsigned_integer (buf, sizeof (buf),
3505 byte_order);
3506
3507 /* The payoff... */
3508 return global_pointer;
3509 }
3510
3511 if (tag == DT_NULL)
3512 break;
3513
3514 addr += 16;
3515 }
3516 }
3517 }
3518 return 0;
3519 }
3520
3521 /* Attempt to find (and return) the global pointer for the given
3522 function. We first try the find_global_pointer_from_solib routine
3523 from the gdbarch tdep vector, if provided. And if that does not
3524 work, then we try ia64_find_global_pointer_from_dynamic_section. */
3525
3526 static CORE_ADDR
3527 ia64_find_global_pointer (struct gdbarch *gdbarch, CORE_ADDR faddr)
3528 {
3529 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3530 CORE_ADDR addr = 0;
3531
3532 if (tdep->find_global_pointer_from_solib)
3533 addr = tdep->find_global_pointer_from_solib (gdbarch, faddr);
3534 if (addr == 0)
3535 addr = ia64_find_global_pointer_from_dynamic_section (gdbarch, faddr);
3536 return addr;
3537 }
3538
3539 /* Given a function's address, attempt to find (and return) the
3540 corresponding (canonical) function descriptor. Return 0 if
3541 not found. */
3542 static CORE_ADDR
3543 find_extant_func_descr (struct gdbarch *gdbarch, CORE_ADDR faddr)
3544 {
3545 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3546 struct obj_section *faddr_sect;
3547
3548 /* Return early if faddr is already a function descriptor. */
3549 faddr_sect = find_pc_section (faddr);
3550 if (faddr_sect && strcmp (faddr_sect->the_bfd_section->name, ".opd") == 0)
3551 return faddr;
3552
3553 if (faddr_sect != NULL)
3554 {
3555 struct obj_section *osect;
3556 ALL_OBJFILE_OSECTIONS (faddr_sect->objfile, osect)
3557 {
3558 if (strcmp (osect->the_bfd_section->name, ".opd") == 0)
3559 break;
3560 }
3561
3562 if (osect < faddr_sect->objfile->sections_end)
3563 {
3564 CORE_ADDR addr, endaddr;
3565
3566 addr = obj_section_addr (osect);
3567 endaddr = obj_section_endaddr (osect);
3568
3569 while (addr < endaddr)
3570 {
3571 int status;
3572 LONGEST faddr2;
3573 char buf[8];
3574
3575 status = target_read_memory (addr, buf, sizeof (buf));
3576 if (status != 0)
3577 break;
3578 faddr2 = extract_signed_integer (buf, sizeof (buf), byte_order);
3579
3580 if (faddr == faddr2)
3581 return addr;
3582
3583 addr += 16;
3584 }
3585 }
3586 }
3587 return 0;
3588 }
3589
3590 /* Attempt to find a function descriptor corresponding to the
3591 given address. If none is found, construct one on the
3592 stack using the address at fdaptr. */
3593
3594 static CORE_ADDR
3595 find_func_descr (struct regcache *regcache, CORE_ADDR faddr, CORE_ADDR *fdaptr)
3596 {
3597 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3598 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3599 CORE_ADDR fdesc;
3600
3601 fdesc = find_extant_func_descr (gdbarch, faddr);
3602
3603 if (fdesc == 0)
3604 {
3605 ULONGEST global_pointer;
3606 char buf[16];
3607
3608 fdesc = *fdaptr;
3609 *fdaptr += 16;
3610
3611 global_pointer = ia64_find_global_pointer (gdbarch, faddr);
3612
3613 if (global_pointer == 0)
3614 regcache_cooked_read_unsigned (regcache,
3615 IA64_GR1_REGNUM, &global_pointer);
3616
3617 store_unsigned_integer (buf, 8, byte_order, faddr);
3618 store_unsigned_integer (buf + 8, 8, byte_order, global_pointer);
3619
3620 write_memory (fdesc, buf, 16);
3621 }
3622
3623 return fdesc;
3624 }
3625
3626 /* Use the following routine when printing out function pointers
3627 so the user can see the function address rather than just the
3628 function descriptor. */
3629 static CORE_ADDR
3630 ia64_convert_from_func_ptr_addr (struct gdbarch *gdbarch, CORE_ADDR addr,
3631 struct target_ops *targ)
3632 {
3633 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3634 struct obj_section *s;
3635 gdb_byte buf[8];
3636
3637 s = find_pc_section (addr);
3638
3639 /* check if ADDR points to a function descriptor. */
3640 if (s && strcmp (s->the_bfd_section->name, ".opd") == 0)
3641 return read_memory_unsigned_integer (addr, 8, byte_order);
3642
3643 /* Normally, functions live inside a section that is executable.
3644 So, if ADDR points to a non-executable section, then treat it
3645 as a function descriptor and return the target address iff
3646 the target address itself points to a section that is executable.
3647 Check first the memory of the whole length of 8 bytes is readable. */
3648 if (s && (s->the_bfd_section->flags & SEC_CODE) == 0
3649 && target_read_memory (addr, buf, 8) == 0)
3650 {
3651 CORE_ADDR pc = extract_unsigned_integer (buf, 8, byte_order);
3652 struct obj_section *pc_section = find_pc_section (pc);
3653
3654 if (pc_section && (pc_section->the_bfd_section->flags & SEC_CODE))
3655 return pc;
3656 }
3657
3658 /* There are also descriptors embedded in vtables. */
3659 if (s)
3660 {
3661 struct minimal_symbol *minsym;
3662
3663 minsym = lookup_minimal_symbol_by_pc (addr);
3664
3665 if (minsym && is_vtable_name (SYMBOL_LINKAGE_NAME (minsym)))
3666 return read_memory_unsigned_integer (addr, 8, byte_order);
3667 }
3668
3669 return addr;
3670 }
3671
3672 static CORE_ADDR
3673 ia64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
3674 {
3675 return sp & ~0xfLL;
3676 }
3677
3678 /* The default "allocate_new_rse_frame" ia64_infcall_ops routine for ia64. */
3679
3680 static void
3681 ia64_allocate_new_rse_frame (struct regcache *regcache, ULONGEST bsp, int sof)
3682 {
3683 ULONGEST cfm, pfs, new_bsp;
3684
3685 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
3686
3687 new_bsp = rse_address_add (bsp, sof);
3688 regcache_cooked_write_unsigned (regcache, IA64_BSP_REGNUM, new_bsp);
3689
3690 regcache_cooked_read_unsigned (regcache, IA64_PFS_REGNUM, &pfs);
3691 pfs &= 0xc000000000000000LL;
3692 pfs |= (cfm & 0xffffffffffffLL);
3693 regcache_cooked_write_unsigned (regcache, IA64_PFS_REGNUM, pfs);
3694
3695 cfm &= 0xc000000000000000LL;
3696 cfm |= sof;
3697 regcache_cooked_write_unsigned (regcache, IA64_CFM_REGNUM, cfm);
3698 }
3699
3700 /* The default "store_argument_in_slot" ia64_infcall_ops routine for
3701 ia64. */
3702
3703 static void
3704 ia64_store_argument_in_slot (struct regcache *regcache, CORE_ADDR bsp,
3705 int slotnum, gdb_byte *buf)
3706 {
3707 write_memory (rse_address_add (bsp, slotnum), buf, 8);
3708 }
3709
3710 /* The default "set_function_addr" ia64_infcall_ops routine for ia64. */
3711
3712 static void
3713 ia64_set_function_addr (struct regcache *regcache, CORE_ADDR func_addr)
3714 {
3715 /* Nothing needed. */
3716 }
3717
3718 static CORE_ADDR
3719 ia64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
3720 struct regcache *regcache, CORE_ADDR bp_addr,
3721 int nargs, struct value **args, CORE_ADDR sp,
3722 int struct_return, CORE_ADDR struct_addr)
3723 {
3724 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3725 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3726 int argno;
3727 struct value *arg;
3728 struct type *type;
3729 int len, argoffset;
3730 int nslots, rseslots, memslots, slotnum, nfuncargs;
3731 int floatreg;
3732 ULONGEST bsp;
3733 CORE_ADDR funcdescaddr, pc, global_pointer;
3734 CORE_ADDR func_addr = find_function_addr (function, NULL);
3735
3736 nslots = 0;
3737 nfuncargs = 0;
3738 /* Count the number of slots needed for the arguments. */
3739 for (argno = 0; argno < nargs; argno++)
3740 {
3741 arg = args[argno];
3742 type = check_typedef (value_type (arg));
3743 len = TYPE_LENGTH (type);
3744
3745 if ((nslots & 1) && slot_alignment_is_next_even (type))
3746 nslots++;
3747
3748 if (TYPE_CODE (type) == TYPE_CODE_FUNC)
3749 nfuncargs++;
3750
3751 nslots += (len + 7) / 8;
3752 }
3753
3754 /* Divvy up the slots between the RSE and the memory stack. */
3755 rseslots = (nslots > 8) ? 8 : nslots;
3756 memslots = nslots - rseslots;
3757
3758 /* Allocate a new RSE frame. */
3759 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
3760 tdep->infcall_ops.allocate_new_rse_frame (regcache, bsp, rseslots);
3761
3762 /* We will attempt to find function descriptors in the .opd segment,
3763 but if we can't we'll construct them ourselves. That being the
3764 case, we'll need to reserve space on the stack for them. */
3765 funcdescaddr = sp - nfuncargs * 16;
3766 funcdescaddr &= ~0xfLL;
3767
3768 /* Adjust the stack pointer to it's new value. The calling conventions
3769 require us to have 16 bytes of scratch, plus whatever space is
3770 necessary for the memory slots and our function descriptors. */
3771 sp = sp - 16 - (memslots + nfuncargs) * 8;
3772 sp &= ~0xfLL; /* Maintain 16 byte alignment. */
3773
3774 /* Place the arguments where they belong. The arguments will be
3775 either placed in the RSE backing store or on the memory stack.
3776 In addition, floating point arguments or HFAs are placed in
3777 floating point registers. */
3778 slotnum = 0;
3779 floatreg = IA64_FR8_REGNUM;
3780 for (argno = 0; argno < nargs; argno++)
3781 {
3782 struct type *float_elt_type;
3783
3784 arg = args[argno];
3785 type = check_typedef (value_type (arg));
3786 len = TYPE_LENGTH (type);
3787
3788 /* Special handling for function parameters. */
3789 if (len == 8
3790 && TYPE_CODE (type) == TYPE_CODE_PTR
3791 && TYPE_CODE (TYPE_TARGET_TYPE (type)) == TYPE_CODE_FUNC)
3792 {
3793 char val_buf[8];
3794 ULONGEST faddr = extract_unsigned_integer (value_contents (arg),
3795 8, byte_order);
3796 store_unsigned_integer (val_buf, 8, byte_order,
3797 find_func_descr (regcache, faddr,
3798 &funcdescaddr));
3799 if (slotnum < rseslots)
3800 tdep->infcall_ops.store_argument_in_slot (regcache, bsp,
3801 slotnum, val_buf);
3802 else
3803 write_memory (sp + 16 + 8 * (slotnum - rseslots), val_buf, 8);
3804 slotnum++;
3805 continue;
3806 }
3807
3808 /* Normal slots. */
3809
3810 /* Skip odd slot if necessary... */
3811 if ((slotnum & 1) && slot_alignment_is_next_even (type))
3812 slotnum++;
3813
3814 argoffset = 0;
3815 while (len > 0)
3816 {
3817 char val_buf[8];
3818
3819 memset (val_buf, 0, 8);
3820 if (!ia64_struct_type_p (type) && len < 8)
3821 {
3822 /* Integral types are LSB-aligned, so we have to be careful
3823 to insert the argument on the correct side of the buffer.
3824 This is why we use store_unsigned_integer. */
3825 store_unsigned_integer
3826 (val_buf, 8, byte_order,
3827 extract_unsigned_integer (value_contents (arg), len,
3828 byte_order));
3829 }
3830 else
3831 {
3832 /* This is either an 8bit integral type, or an aggregate.
3833 For 8bit integral type, there is no problem, we just
3834 copy the value over.
3835
3836 For aggregates, the only potentially tricky portion
3837 is to write the last one if it is less than 8 bytes.
3838 In this case, the data is Byte0-aligned. Happy news,
3839 this means that we don't need to differentiate the
3840 handling of 8byte blocks and less-than-8bytes blocks. */
3841 memcpy (val_buf, value_contents (arg) + argoffset,
3842 (len > 8) ? 8 : len);
3843 }
3844
3845 if (slotnum < rseslots)
3846 tdep->infcall_ops.store_argument_in_slot (regcache, bsp,
3847 slotnum, val_buf);
3848 else
3849 write_memory (sp + 16 + 8 * (slotnum - rseslots), val_buf, 8);
3850
3851 argoffset += 8;
3852 len -= 8;
3853 slotnum++;
3854 }
3855
3856 /* Handle floating point types (including HFAs). */
3857 float_elt_type = is_float_or_hfa_type (type);
3858 if (float_elt_type != NULL)
3859 {
3860 argoffset = 0;
3861 len = TYPE_LENGTH (type);
3862 while (len > 0 && floatreg < IA64_FR16_REGNUM)
3863 {
3864 char to[MAX_REGISTER_SIZE];
3865 convert_typed_floating (value_contents (arg) + argoffset,
3866 float_elt_type, to,
3867 ia64_ext_type (gdbarch));
3868 regcache_cooked_write (regcache, floatreg, (void *)to);
3869 floatreg++;
3870 argoffset += TYPE_LENGTH (float_elt_type);
3871 len -= TYPE_LENGTH (float_elt_type);
3872 }
3873 }
3874 }
3875
3876 /* Store the struct return value in r8 if necessary. */
3877 if (struct_return)
3878 {
3879 regcache_cooked_write_unsigned (regcache, IA64_GR8_REGNUM,
3880 (ULONGEST) struct_addr);
3881 }
3882
3883 global_pointer = ia64_find_global_pointer (gdbarch, func_addr);
3884
3885 if (global_pointer != 0)
3886 regcache_cooked_write_unsigned (regcache, IA64_GR1_REGNUM, global_pointer);
3887
3888 /* The following is not necessary on HP-UX, because we're using
3889 a dummy code sequence pushed on the stack to make the call, and
3890 this sequence doesn't need b0 to be set in order for our dummy
3891 breakpoint to be hit. Nonetheless, this doesn't interfere, and
3892 it's needed for other OSes, so we do this unconditionaly. */
3893 regcache_cooked_write_unsigned (regcache, IA64_BR0_REGNUM, bp_addr);
3894
3895 regcache_cooked_write_unsigned (regcache, sp_regnum, sp);
3896
3897 tdep->infcall_ops.set_function_addr (regcache, func_addr);
3898
3899 return sp;
3900 }
3901
3902 static const struct ia64_infcall_ops ia64_infcall_ops =
3903 {
3904 ia64_allocate_new_rse_frame,
3905 ia64_store_argument_in_slot,
3906 ia64_set_function_addr
3907 };
3908
3909 static struct frame_id
3910 ia64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
3911 {
3912 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3913 char buf[8];
3914 CORE_ADDR sp, bsp;
3915
3916 get_frame_register (this_frame, sp_regnum, buf);
3917 sp = extract_unsigned_integer (buf, 8, byte_order);
3918
3919 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
3920 bsp = extract_unsigned_integer (buf, 8, byte_order);
3921
3922 if (gdbarch_debug >= 1)
3923 fprintf_unfiltered (gdb_stdlog,
3924 "dummy frame id: code %s, stack %s, special %s\n",
3925 paddress (gdbarch, get_frame_pc (this_frame)),
3926 paddress (gdbarch, sp), paddress (gdbarch, bsp));
3927
3928 return frame_id_build_special (sp, get_frame_pc (this_frame), bsp);
3929 }
3930
3931 static CORE_ADDR
3932 ia64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *next_frame)
3933 {
3934 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3935 char buf[8];
3936 CORE_ADDR ip, psr, pc;
3937
3938 frame_unwind_register (next_frame, IA64_IP_REGNUM, buf);
3939 ip = extract_unsigned_integer (buf, 8, byte_order);
3940 frame_unwind_register (next_frame, IA64_PSR_REGNUM, buf);
3941 psr = extract_unsigned_integer (buf, 8, byte_order);
3942
3943 pc = (ip & ~0xf) | ((psr >> 41) & 3);
3944 return pc;
3945 }
3946
3947 static int
3948 ia64_print_insn (bfd_vma memaddr, struct disassemble_info *info)
3949 {
3950 info->bytes_per_line = SLOT_MULTIPLIER;
3951 return print_insn_ia64 (memaddr, info);
3952 }
3953
3954 /* The default "size_of_register_frame" gdbarch_tdep routine for ia64. */
3955
3956 static int
3957 ia64_size_of_register_frame (struct frame_info *this_frame, ULONGEST cfm)
3958 {
3959 return (cfm & 0x7f);
3960 }
3961
3962 static struct gdbarch *
3963 ia64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
3964 {
3965 struct gdbarch *gdbarch;
3966 struct gdbarch_tdep *tdep;
3967
3968 /* If there is already a candidate, use it. */
3969 arches = gdbarch_list_lookup_by_info (arches, &info);
3970 if (arches != NULL)
3971 return arches->gdbarch;
3972
3973 tdep = xzalloc (sizeof (struct gdbarch_tdep));
3974 gdbarch = gdbarch_alloc (&info, tdep);
3975
3976 tdep->size_of_register_frame = ia64_size_of_register_frame;
3977
3978 /* According to the ia64 specs, instructions that store long double
3979 floats in memory use a long-double format different than that
3980 used in the floating registers. The memory format matches the
3981 x86 extended float format which is 80 bits. An OS may choose to
3982 use this format (e.g. GNU/Linux) or choose to use a different
3983 format for storing long doubles (e.g. HPUX). In the latter case,
3984 the setting of the format may be moved/overridden in an
3985 OS-specific tdep file. */
3986 set_gdbarch_long_double_format (gdbarch, floatformats_i387_ext);
3987
3988 set_gdbarch_short_bit (gdbarch, 16);
3989 set_gdbarch_int_bit (gdbarch, 32);
3990 set_gdbarch_long_bit (gdbarch, 64);
3991 set_gdbarch_long_long_bit (gdbarch, 64);
3992 set_gdbarch_float_bit (gdbarch, 32);
3993 set_gdbarch_double_bit (gdbarch, 64);
3994 set_gdbarch_long_double_bit (gdbarch, 128);
3995 set_gdbarch_ptr_bit (gdbarch, 64);
3996
3997 set_gdbarch_num_regs (gdbarch, NUM_IA64_RAW_REGS);
3998 set_gdbarch_num_pseudo_regs (gdbarch,
3999 LAST_PSEUDO_REGNUM - FIRST_PSEUDO_REGNUM);
4000 set_gdbarch_sp_regnum (gdbarch, sp_regnum);
4001 set_gdbarch_fp0_regnum (gdbarch, IA64_FR0_REGNUM);
4002
4003 set_gdbarch_register_name (gdbarch, ia64_register_name);
4004 set_gdbarch_register_type (gdbarch, ia64_register_type);
4005
4006 set_gdbarch_pseudo_register_read (gdbarch, ia64_pseudo_register_read);
4007 set_gdbarch_pseudo_register_write (gdbarch, ia64_pseudo_register_write);
4008 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, ia64_dwarf_reg_to_regnum);
4009 set_gdbarch_register_reggroup_p (gdbarch, ia64_register_reggroup_p);
4010 set_gdbarch_convert_register_p (gdbarch, ia64_convert_register_p);
4011 set_gdbarch_register_to_value (gdbarch, ia64_register_to_value);
4012 set_gdbarch_value_to_register (gdbarch, ia64_value_to_register);
4013
4014 set_gdbarch_skip_prologue (gdbarch, ia64_skip_prologue);
4015
4016 set_gdbarch_return_value (gdbarch, ia64_return_value);
4017
4018 set_gdbarch_memory_insert_breakpoint (gdbarch,
4019 ia64_memory_insert_breakpoint);
4020 set_gdbarch_memory_remove_breakpoint (gdbarch,
4021 ia64_memory_remove_breakpoint);
4022 set_gdbarch_breakpoint_from_pc (gdbarch, ia64_breakpoint_from_pc);
4023 set_gdbarch_read_pc (gdbarch, ia64_read_pc);
4024 set_gdbarch_write_pc (gdbarch, ia64_write_pc);
4025
4026 /* Settings for calling functions in the inferior. */
4027 set_gdbarch_push_dummy_call (gdbarch, ia64_push_dummy_call);
4028 tdep->infcall_ops = ia64_infcall_ops;
4029 set_gdbarch_frame_align (gdbarch, ia64_frame_align);
4030 set_gdbarch_dummy_id (gdbarch, ia64_dummy_id);
4031
4032 set_gdbarch_unwind_pc (gdbarch, ia64_unwind_pc);
4033 #ifdef HAVE_LIBUNWIND_IA64_H
4034 frame_unwind_append_unwinder (gdbarch,
4035 &ia64_libunwind_sigtramp_frame_unwind);
4036 frame_unwind_append_unwinder (gdbarch, &ia64_libunwind_frame_unwind);
4037 frame_unwind_append_unwinder (gdbarch, &ia64_sigtramp_frame_unwind);
4038 libunwind_frame_set_descr (gdbarch, &ia64_libunwind_descr);
4039 #else
4040 frame_unwind_append_unwinder (gdbarch, &ia64_sigtramp_frame_unwind);
4041 #endif
4042 frame_unwind_append_unwinder (gdbarch, &ia64_frame_unwind);
4043 frame_base_set_default (gdbarch, &ia64_frame_base);
4044
4045 /* Settings that should be unnecessary. */
4046 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
4047
4048 set_gdbarch_print_insn (gdbarch, ia64_print_insn);
4049 set_gdbarch_convert_from_func_ptr_addr (gdbarch,
4050 ia64_convert_from_func_ptr_addr);
4051
4052 /* The virtual table contains 16-byte descriptors, not pointers to
4053 descriptors. */
4054 set_gdbarch_vtable_function_descriptors (gdbarch, 1);
4055
4056 /* Hook in ABI-specific overrides, if they have been registered. */
4057 gdbarch_init_osabi (info, gdbarch);
4058
4059 return gdbarch;
4060 }
4061
4062 extern initialize_file_ftype _initialize_ia64_tdep; /* -Wmissing-prototypes */
4063
4064 void
4065 _initialize_ia64_tdep (void)
4066 {
4067 gdbarch_register (bfd_arch_ia64, ia64_gdbarch_init, NULL);
4068 }