1 /* Target-dependent code for AMD64.
3 Copyright (C) 2001-2021 Free Software Foundation, Inc.
5 Contributed by Jiri Smid, SuSE Labs.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23 #include "opcode/i386.h"
25 #include "arch-utils.h"
27 #include "dummy-frame.h"
29 #include "frame-base.h"
30 #include "frame-unwind.h"
40 #include "amd64-tdep.h"
41 #include "i387-tdep.h"
42 #include "gdbsupport/x86-xstate.h"
44 #include "target-descriptions.h"
45 #include "arch/amd64.h"
49 #include "gdbsupport/byte-vector.h"
52 #include "amd64-ravenscar-thread.h"
54 /* Note that the AMD64 architecture was previously known as x86-64.
55 The latter is (forever) engraved into the canonical system name as
56 returned by config.guess, and used as the name for the AMD64 port
57 of GNU/Linux. The BSD's have renamed their ports to amd64; they
58 don't like to shout. For GDB we prefer the amd64_-prefix over the
59 x86_64_-prefix since it's so much easier to type. */
61 /* Register information. */
63 static const char * const amd64_register_names
[] =
65 "rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rbp", "rsp",
67 /* %r8 is indeed register number 8. */
68 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
69 "rip", "eflags", "cs", "ss", "ds", "es", "fs", "gs",
71 /* %st0 is register number 24. */
72 "st0", "st1", "st2", "st3", "st4", "st5", "st6", "st7",
73 "fctrl", "fstat", "ftag", "fiseg", "fioff", "foseg", "fooff", "fop",
75 /* %xmm0 is register number 40. */
76 "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
77 "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15",
81 static const char * const amd64_ymm_names
[] =
83 "ymm0", "ymm1", "ymm2", "ymm3",
84 "ymm4", "ymm5", "ymm6", "ymm7",
85 "ymm8", "ymm9", "ymm10", "ymm11",
86 "ymm12", "ymm13", "ymm14", "ymm15"
89 static const char * const amd64_ymm_avx512_names
[] =
91 "ymm16", "ymm17", "ymm18", "ymm19",
92 "ymm20", "ymm21", "ymm22", "ymm23",
93 "ymm24", "ymm25", "ymm26", "ymm27",
94 "ymm28", "ymm29", "ymm30", "ymm31"
97 static const char * const amd64_ymmh_names
[] =
99 "ymm0h", "ymm1h", "ymm2h", "ymm3h",
100 "ymm4h", "ymm5h", "ymm6h", "ymm7h",
101 "ymm8h", "ymm9h", "ymm10h", "ymm11h",
102 "ymm12h", "ymm13h", "ymm14h", "ymm15h"
105 static const char * const amd64_ymmh_avx512_names
[] =
107 "ymm16h", "ymm17h", "ymm18h", "ymm19h",
108 "ymm20h", "ymm21h", "ymm22h", "ymm23h",
109 "ymm24h", "ymm25h", "ymm26h", "ymm27h",
110 "ymm28h", "ymm29h", "ymm30h", "ymm31h"
113 static const char * const amd64_mpx_names
[] =
115 "bnd0raw", "bnd1raw", "bnd2raw", "bnd3raw", "bndcfgu", "bndstatus"
118 static const char * const amd64_k_names
[] =
120 "k0", "k1", "k2", "k3",
121 "k4", "k5", "k6", "k7"
124 static const char * const amd64_zmmh_names
[] =
126 "zmm0h", "zmm1h", "zmm2h", "zmm3h",
127 "zmm4h", "zmm5h", "zmm6h", "zmm7h",
128 "zmm8h", "zmm9h", "zmm10h", "zmm11h",
129 "zmm12h", "zmm13h", "zmm14h", "zmm15h",
130 "zmm16h", "zmm17h", "zmm18h", "zmm19h",
131 "zmm20h", "zmm21h", "zmm22h", "zmm23h",
132 "zmm24h", "zmm25h", "zmm26h", "zmm27h",
133 "zmm28h", "zmm29h", "zmm30h", "zmm31h"
136 static const char * const amd64_zmm_names
[] =
138 "zmm0", "zmm1", "zmm2", "zmm3",
139 "zmm4", "zmm5", "zmm6", "zmm7",
140 "zmm8", "zmm9", "zmm10", "zmm11",
141 "zmm12", "zmm13", "zmm14", "zmm15",
142 "zmm16", "zmm17", "zmm18", "zmm19",
143 "zmm20", "zmm21", "zmm22", "zmm23",
144 "zmm24", "zmm25", "zmm26", "zmm27",
145 "zmm28", "zmm29", "zmm30", "zmm31"
148 static const char * const amd64_xmm_avx512_names
[] = {
149 "xmm16", "xmm17", "xmm18", "xmm19",
150 "xmm20", "xmm21", "xmm22", "xmm23",
151 "xmm24", "xmm25", "xmm26", "xmm27",
152 "xmm28", "xmm29", "xmm30", "xmm31"
155 static const char * const amd64_pkeys_names
[] = {
159 /* DWARF Register Number Mapping as defined in the System V psABI,
162 static int amd64_dwarf_regmap
[] =
164 /* General Purpose Registers RAX, RDX, RCX, RBX, RSI, RDI. */
165 AMD64_RAX_REGNUM
, AMD64_RDX_REGNUM
,
166 AMD64_RCX_REGNUM
, AMD64_RBX_REGNUM
,
167 AMD64_RSI_REGNUM
, AMD64_RDI_REGNUM
,
169 /* Frame Pointer Register RBP. */
172 /* Stack Pointer Register RSP. */
175 /* Extended Integer Registers 8 - 15. */
176 AMD64_R8_REGNUM
, /* %r8 */
177 AMD64_R9_REGNUM
, /* %r9 */
178 AMD64_R10_REGNUM
, /* %r10 */
179 AMD64_R11_REGNUM
, /* %r11 */
180 AMD64_R12_REGNUM
, /* %r12 */
181 AMD64_R13_REGNUM
, /* %r13 */
182 AMD64_R14_REGNUM
, /* %r14 */
183 AMD64_R15_REGNUM
, /* %r15 */
185 /* Return Address RA. Mapped to RIP. */
188 /* SSE Registers 0 - 7. */
189 AMD64_XMM0_REGNUM
+ 0, AMD64_XMM1_REGNUM
,
190 AMD64_XMM0_REGNUM
+ 2, AMD64_XMM0_REGNUM
+ 3,
191 AMD64_XMM0_REGNUM
+ 4, AMD64_XMM0_REGNUM
+ 5,
192 AMD64_XMM0_REGNUM
+ 6, AMD64_XMM0_REGNUM
+ 7,
194 /* Extended SSE Registers 8 - 15. */
195 AMD64_XMM0_REGNUM
+ 8, AMD64_XMM0_REGNUM
+ 9,
196 AMD64_XMM0_REGNUM
+ 10, AMD64_XMM0_REGNUM
+ 11,
197 AMD64_XMM0_REGNUM
+ 12, AMD64_XMM0_REGNUM
+ 13,
198 AMD64_XMM0_REGNUM
+ 14, AMD64_XMM0_REGNUM
+ 15,
200 /* Floating Point Registers 0-7. */
201 AMD64_ST0_REGNUM
+ 0, AMD64_ST0_REGNUM
+ 1,
202 AMD64_ST0_REGNUM
+ 2, AMD64_ST0_REGNUM
+ 3,
203 AMD64_ST0_REGNUM
+ 4, AMD64_ST0_REGNUM
+ 5,
204 AMD64_ST0_REGNUM
+ 6, AMD64_ST0_REGNUM
+ 7,
206 /* MMX Registers 0 - 7.
207 We have to handle those registers specifically, as their register
208 number within GDB depends on the target (or they may even not be
209 available at all). */
210 -1, -1, -1, -1, -1, -1, -1, -1,
212 /* Control and Status Flags Register. */
215 /* Selector Registers. */
225 /* Segment Base Address Registers. */
231 /* Special Selector Registers. */
235 /* Floating Point Control Registers. */
241 static const int amd64_dwarf_regmap_len
=
242 (sizeof (amd64_dwarf_regmap
) / sizeof (amd64_dwarf_regmap
[0]));
244 /* Convert DWARF register number REG to the appropriate register
245 number used by GDB. */
248 amd64_dwarf_reg_to_regnum (struct gdbarch
*gdbarch
, int reg
)
250 i386_gdbarch_tdep
*tdep
= (i386_gdbarch_tdep
*) gdbarch_tdep (gdbarch
);
251 int ymm0_regnum
= tdep
->ymm0_regnum
;
254 if (reg
>= 0 && reg
< amd64_dwarf_regmap_len
)
255 regnum
= amd64_dwarf_regmap
[reg
];
258 && i386_xmm_regnum_p (gdbarch
, regnum
))
259 regnum
+= ymm0_regnum
- I387_XMM0_REGNUM (tdep
);
264 /* Map architectural register numbers to gdb register numbers. */
266 static const int amd64_arch_regmap
[16] =
268 AMD64_RAX_REGNUM
, /* %rax */
269 AMD64_RCX_REGNUM
, /* %rcx */
270 AMD64_RDX_REGNUM
, /* %rdx */
271 AMD64_RBX_REGNUM
, /* %rbx */
272 AMD64_RSP_REGNUM
, /* %rsp */
273 AMD64_RBP_REGNUM
, /* %rbp */
274 AMD64_RSI_REGNUM
, /* %rsi */
275 AMD64_RDI_REGNUM
, /* %rdi */
276 AMD64_R8_REGNUM
, /* %r8 */
277 AMD64_R9_REGNUM
, /* %r9 */
278 AMD64_R10_REGNUM
, /* %r10 */
279 AMD64_R11_REGNUM
, /* %r11 */
280 AMD64_R12_REGNUM
, /* %r12 */
281 AMD64_R13_REGNUM
, /* %r13 */
282 AMD64_R14_REGNUM
, /* %r14 */
283 AMD64_R15_REGNUM
/* %r15 */
286 static const int amd64_arch_regmap_len
=
287 (sizeof (amd64_arch_regmap
) / sizeof (amd64_arch_regmap
[0]));
289 /* Convert architectural register number REG to the appropriate register
290 number used by GDB. */
293 amd64_arch_reg_to_regnum (int reg
)
295 gdb_assert (reg
>= 0 && reg
< amd64_arch_regmap_len
);
297 return amd64_arch_regmap
[reg
];
300 /* Register names for byte pseudo-registers. */
302 static const char * const amd64_byte_names
[] =
304 "al", "bl", "cl", "dl", "sil", "dil", "bpl", "spl",
305 "r8l", "r9l", "r10l", "r11l", "r12l", "r13l", "r14l", "r15l",
306 "ah", "bh", "ch", "dh"
309 /* Number of lower byte registers. */
310 #define AMD64_NUM_LOWER_BYTE_REGS 16
312 /* Register names for word pseudo-registers. */
314 static const char * const amd64_word_names
[] =
316 "ax", "bx", "cx", "dx", "si", "di", "bp", "",
317 "r8w", "r9w", "r10w", "r11w", "r12w", "r13w", "r14w", "r15w"
320 /* Register names for dword pseudo-registers. */
322 static const char * const amd64_dword_names
[] =
324 "eax", "ebx", "ecx", "edx", "esi", "edi", "ebp", "esp",
325 "r8d", "r9d", "r10d", "r11d", "r12d", "r13d", "r14d", "r15d",
329 /* Return the name of register REGNUM. */
332 amd64_pseudo_register_name (struct gdbarch
*gdbarch
, int regnum
)
334 i386_gdbarch_tdep
*tdep
= (i386_gdbarch_tdep
*) gdbarch_tdep (gdbarch
);
335 if (i386_byte_regnum_p (gdbarch
, regnum
))
336 return amd64_byte_names
[regnum
- tdep
->al_regnum
];
337 else if (i386_zmm_regnum_p (gdbarch
, regnum
))
338 return amd64_zmm_names
[regnum
- tdep
->zmm0_regnum
];
339 else if (i386_ymm_regnum_p (gdbarch
, regnum
))
340 return amd64_ymm_names
[regnum
- tdep
->ymm0_regnum
];
341 else if (i386_ymm_avx512_regnum_p (gdbarch
, regnum
))
342 return amd64_ymm_avx512_names
[regnum
- tdep
->ymm16_regnum
];
343 else if (i386_word_regnum_p (gdbarch
, regnum
))
344 return amd64_word_names
[regnum
- tdep
->ax_regnum
];
345 else if (i386_dword_regnum_p (gdbarch
, regnum
))
346 return amd64_dword_names
[regnum
- tdep
->eax_regnum
];
348 return i386_pseudo_register_name (gdbarch
, regnum
);
351 static struct value
*
352 amd64_pseudo_register_read_value (struct gdbarch
*gdbarch
,
353 readable_regcache
*regcache
,
356 i386_gdbarch_tdep
*tdep
= (i386_gdbarch_tdep
*) gdbarch_tdep (gdbarch
);
358 value
*result_value
= allocate_value (register_type (gdbarch
, regnum
));
359 VALUE_LVAL (result_value
) = lval_register
;
360 VALUE_REGNUM (result_value
) = regnum
;
361 gdb_byte
*buf
= value_contents_raw (result_value
).data ();
363 if (i386_byte_regnum_p (gdbarch
, regnum
))
365 int gpnum
= regnum
- tdep
->al_regnum
;
367 /* Extract (always little endian). */
368 if (gpnum
>= AMD64_NUM_LOWER_BYTE_REGS
)
370 gpnum
-= AMD64_NUM_LOWER_BYTE_REGS
;
371 gdb_byte raw_buf
[register_size (gdbarch
, gpnum
)];
373 /* Special handling for AH, BH, CH, DH. */
374 register_status status
= regcache
->raw_read (gpnum
, raw_buf
);
375 if (status
== REG_VALID
)
376 memcpy (buf
, raw_buf
+ 1, 1);
378 mark_value_bytes_unavailable (result_value
, 0,
379 TYPE_LENGTH (value_type (result_value
)));
383 gdb_byte raw_buf
[register_size (gdbarch
, gpnum
)];
384 register_status status
= regcache
->raw_read (gpnum
, raw_buf
);
385 if (status
== REG_VALID
)
386 memcpy (buf
, raw_buf
, 1);
388 mark_value_bytes_unavailable (result_value
, 0,
389 TYPE_LENGTH (value_type (result_value
)));
392 else if (i386_dword_regnum_p (gdbarch
, regnum
))
394 int gpnum
= regnum
- tdep
->eax_regnum
;
395 gdb_byte raw_buf
[register_size (gdbarch
, gpnum
)];
396 /* Extract (always little endian). */
397 register_status status
= regcache
->raw_read (gpnum
, raw_buf
);
398 if (status
== REG_VALID
)
399 memcpy (buf
, raw_buf
, 4);
401 mark_value_bytes_unavailable (result_value
, 0,
402 TYPE_LENGTH (value_type (result_value
)));
405 i386_pseudo_register_read_into_value (gdbarch
, regcache
, regnum
,
412 amd64_pseudo_register_write (struct gdbarch
*gdbarch
,
413 struct regcache
*regcache
,
414 int regnum
, const gdb_byte
*buf
)
416 i386_gdbarch_tdep
*tdep
= (i386_gdbarch_tdep
*) gdbarch_tdep (gdbarch
);
418 if (i386_byte_regnum_p (gdbarch
, regnum
))
420 int gpnum
= regnum
- tdep
->al_regnum
;
422 if (gpnum
>= AMD64_NUM_LOWER_BYTE_REGS
)
424 gpnum
-= AMD64_NUM_LOWER_BYTE_REGS
;
425 gdb_byte raw_buf
[register_size (gdbarch
, gpnum
)];
427 /* Read ... AH, BH, CH, DH. */
428 regcache
->raw_read (gpnum
, raw_buf
);
429 /* ... Modify ... (always little endian). */
430 memcpy (raw_buf
+ 1, buf
, 1);
432 regcache
->raw_write (gpnum
, raw_buf
);
436 gdb_byte raw_buf
[register_size (gdbarch
, gpnum
)];
439 regcache
->raw_read (gpnum
, raw_buf
);
440 /* ... Modify ... (always little endian). */
441 memcpy (raw_buf
, buf
, 1);
443 regcache
->raw_write (gpnum
, raw_buf
);
446 else if (i386_dword_regnum_p (gdbarch
, regnum
))
448 int gpnum
= regnum
- tdep
->eax_regnum
;
449 gdb_byte raw_buf
[register_size (gdbarch
, gpnum
)];
452 regcache
->raw_read (gpnum
, raw_buf
);
453 /* ... Modify ... (always little endian). */
454 memcpy (raw_buf
, buf
, 4);
456 regcache
->raw_write (gpnum
, raw_buf
);
459 i386_pseudo_register_write (gdbarch
, regcache
, regnum
, buf
);
462 /* Implement the 'ax_pseudo_register_collect' gdbarch method. */
465 amd64_ax_pseudo_register_collect (struct gdbarch
*gdbarch
,
466 struct agent_expr
*ax
, int regnum
)
468 i386_gdbarch_tdep
*tdep
= (i386_gdbarch_tdep
*) gdbarch_tdep (gdbarch
);
470 if (i386_byte_regnum_p (gdbarch
, regnum
))
472 int gpnum
= regnum
- tdep
->al_regnum
;
474 if (gpnum
>= AMD64_NUM_LOWER_BYTE_REGS
)
475 ax_reg_mask (ax
, gpnum
- AMD64_NUM_LOWER_BYTE_REGS
);
477 ax_reg_mask (ax
, gpnum
);
480 else if (i386_dword_regnum_p (gdbarch
, regnum
))
482 int gpnum
= regnum
- tdep
->eax_regnum
;
484 ax_reg_mask (ax
, gpnum
);
488 return i386_ax_pseudo_register_collect (gdbarch
, ax
, regnum
);
493 /* Register classes as defined in the psABI. */
507 /* Return the union class of CLASS1 and CLASS2. See the psABI for
510 static enum amd64_reg_class
511 amd64_merge_classes (enum amd64_reg_class class1
, enum amd64_reg_class class2
)
513 /* Rule (a): If both classes are equal, this is the resulting class. */
514 if (class1
== class2
)
517 /* Rule (b): If one of the classes is NO_CLASS, the resulting class
518 is the other class. */
519 if (class1
== AMD64_NO_CLASS
)
521 if (class2
== AMD64_NO_CLASS
)
524 /* Rule (c): If one of the classes is MEMORY, the result is MEMORY. */
525 if (class1
== AMD64_MEMORY
|| class2
== AMD64_MEMORY
)
528 /* Rule (d): If one of the classes is INTEGER, the result is INTEGER. */
529 if (class1
== AMD64_INTEGER
|| class2
== AMD64_INTEGER
)
530 return AMD64_INTEGER
;
532 /* Rule (e): If one of the classes is X87, X87UP, COMPLEX_X87 class,
533 MEMORY is used as class. */
534 if (class1
== AMD64_X87
|| class1
== AMD64_X87UP
535 || class1
== AMD64_COMPLEX_X87
|| class2
== AMD64_X87
536 || class2
== AMD64_X87UP
|| class2
== AMD64_COMPLEX_X87
)
539 /* Rule (f): Otherwise class SSE is used. */
543 static void amd64_classify (struct type
*type
, enum amd64_reg_class theclass
[2]);
545 /* Return true if TYPE is a structure or union with unaligned fields. */
548 amd64_has_unaligned_fields (struct type
*type
)
550 if (type
->code () == TYPE_CODE_STRUCT
551 || type
->code () == TYPE_CODE_UNION
)
553 for (int i
= 0; i
< type
->num_fields (); i
++)
555 struct type
*subtype
= check_typedef (type
->field (i
).type ());
557 /* Ignore static fields, empty fields (for example nested
558 empty structures), and bitfields (these are handled by
560 if (field_is_static (&type
->field (i
))
561 || (TYPE_FIELD_BITSIZE (type
, i
) == 0
562 && TYPE_LENGTH (subtype
) == 0)
563 || TYPE_FIELD_PACKED (type
, i
))
566 int bitpos
= type
->field (i
).loc_bitpos ();
571 int align
= type_align (subtype
);
573 error (_("could not determine alignment of type"));
575 int bytepos
= bitpos
/ 8;
576 if (bytepos
% align
!= 0)
579 if (amd64_has_unaligned_fields (subtype
))
587 /* Classify field I of TYPE starting at BITOFFSET according to the rules for
588 structures and union types, and store the result in THECLASS. */
591 amd64_classify_aggregate_field (struct type
*type
, int i
,
592 enum amd64_reg_class theclass
[2],
593 unsigned int bitoffset
)
595 struct type
*subtype
= check_typedef (type
->field (i
).type ());
596 enum amd64_reg_class subclass
[2];
597 int bitsize
= TYPE_FIELD_BITSIZE (type
, i
);
600 bitsize
= TYPE_LENGTH (subtype
) * 8;
602 /* Ignore static fields, or empty fields, for example nested
604 if (field_is_static (&type
->field (i
)) || bitsize
== 0)
607 int bitpos
= bitoffset
+ type
->field (i
).loc_bitpos ();
608 int pos
= bitpos
/ 64;
609 int endpos
= (bitpos
+ bitsize
- 1) / 64;
611 if (subtype
->code () == TYPE_CODE_STRUCT
612 || subtype
->code () == TYPE_CODE_UNION
)
614 /* Each field of an object is classified recursively. */
616 for (j
= 0; j
< subtype
->num_fields (); j
++)
617 amd64_classify_aggregate_field (subtype
, j
, theclass
, bitpos
);
621 gdb_assert (pos
== 0 || pos
== 1);
623 amd64_classify (subtype
, subclass
);
624 theclass
[pos
] = amd64_merge_classes (theclass
[pos
], subclass
[0]);
625 if (bitsize
<= 64 && pos
== 0 && endpos
== 1)
626 /* This is a bit of an odd case: We have a field that would
627 normally fit in one of the two eightbytes, except that
628 it is placed in a way that this field straddles them.
629 This has been seen with a structure containing an array.
631 The ABI is a bit unclear in this case, but we assume that
632 this field's class (stored in subclass[0]) must also be merged
633 into class[1]. In other words, our field has a piece stored
634 in the second eight-byte, and thus its class applies to
635 the second eight-byte as well.
637 In the case where the field length exceeds 8 bytes,
638 it should not be necessary to merge the field class
639 into class[1]. As LEN > 8, subclass[1] is necessarily
640 different from AMD64_NO_CLASS. If subclass[1] is equal
641 to subclass[0], then the normal class[1]/subclass[1]
642 merging will take care of everything. For subclass[1]
643 to be different from subclass[0], I can only see the case
644 where we have a SSE/SSEUP or X87/X87UP pair, which both
645 use up all 16 bytes of the aggregate, and are already
646 handled just fine (because each portion sits on its own
648 theclass
[1] = amd64_merge_classes (theclass
[1], subclass
[0]);
650 theclass
[1] = amd64_merge_classes (theclass
[1], subclass
[1]);
653 /* Classify TYPE according to the rules for aggregate (structures and
654 arrays) and union types, and store the result in CLASS. */
657 amd64_classify_aggregate (struct type
*type
, enum amd64_reg_class theclass
[2])
659 /* 1. If the size of an object is larger than two eightbytes, or it has
660 unaligned fields, it has class memory. */
661 if (TYPE_LENGTH (type
) > 16 || amd64_has_unaligned_fields (type
))
663 theclass
[0] = theclass
[1] = AMD64_MEMORY
;
667 /* 2. Both eightbytes get initialized to class NO_CLASS. */
668 theclass
[0] = theclass
[1] = AMD64_NO_CLASS
;
670 /* 3. Each field of an object is classified recursively so that
671 always two fields are considered. The resulting class is
672 calculated according to the classes of the fields in the
675 if (type
->code () == TYPE_CODE_ARRAY
)
677 struct type
*subtype
= check_typedef (TYPE_TARGET_TYPE (type
));
679 /* All fields in an array have the same type. */
680 amd64_classify (subtype
, theclass
);
681 if (TYPE_LENGTH (type
) > 8 && theclass
[1] == AMD64_NO_CLASS
)
682 theclass
[1] = theclass
[0];
688 /* Structure or union. */
689 gdb_assert (type
->code () == TYPE_CODE_STRUCT
690 || type
->code () == TYPE_CODE_UNION
);
692 for (i
= 0; i
< type
->num_fields (); i
++)
693 amd64_classify_aggregate_field (type
, i
, theclass
, 0);
696 /* 4. Then a post merger cleanup is done: */
698 /* Rule (a): If one of the classes is MEMORY, the whole argument is
700 if (theclass
[0] == AMD64_MEMORY
|| theclass
[1] == AMD64_MEMORY
)
701 theclass
[0] = theclass
[1] = AMD64_MEMORY
;
703 /* Rule (b): If SSEUP is not preceded by SSE, it is converted to
705 if (theclass
[0] == AMD64_SSEUP
)
706 theclass
[0] = AMD64_SSE
;
707 if (theclass
[1] == AMD64_SSEUP
&& theclass
[0] != AMD64_SSE
)
708 theclass
[1] = AMD64_SSE
;
711 /* Classify TYPE, and store the result in CLASS. */
714 amd64_classify (struct type
*type
, enum amd64_reg_class theclass
[2])
716 enum type_code code
= type
->code ();
717 int len
= TYPE_LENGTH (type
);
719 theclass
[0] = theclass
[1] = AMD64_NO_CLASS
;
721 /* Arguments of types (signed and unsigned) _Bool, char, short, int,
722 long, long long, and pointers are in the INTEGER class. Similarly,
723 range types, used by languages such as Ada, are also in the INTEGER
725 if ((code
== TYPE_CODE_INT
|| code
== TYPE_CODE_ENUM
726 || code
== TYPE_CODE_BOOL
|| code
== TYPE_CODE_RANGE
727 || code
== TYPE_CODE_CHAR
728 || code
== TYPE_CODE_PTR
|| TYPE_IS_REFERENCE (type
))
729 && (len
== 1 || len
== 2 || len
== 4 || len
== 8))
730 theclass
[0] = AMD64_INTEGER
;
732 /* Arguments of types _Float16, float, double, _Decimal32, _Decimal64 and
733 __m64 are in class SSE. */
734 else if ((code
== TYPE_CODE_FLT
|| code
== TYPE_CODE_DECFLOAT
)
735 && (len
== 2 || len
== 4 || len
== 8))
737 theclass
[0] = AMD64_SSE
;
739 /* Arguments of types __float128, _Decimal128 and __m128 are split into
740 two halves. The least significant ones belong to class SSE, the most
741 significant one to class SSEUP. */
742 else if (code
== TYPE_CODE_DECFLOAT
&& len
== 16)
743 /* FIXME: __float128, __m128. */
744 theclass
[0] = AMD64_SSE
, theclass
[1] = AMD64_SSEUP
;
746 /* The 64-bit mantissa of arguments of type long double belongs to
747 class X87, the 16-bit exponent plus 6 bytes of padding belongs to
749 else if (code
== TYPE_CODE_FLT
&& len
== 16)
750 /* Class X87 and X87UP. */
751 theclass
[0] = AMD64_X87
, theclass
[1] = AMD64_X87UP
;
753 /* Arguments of complex T - where T is one of the types _Float16, float or
754 double - get treated as if they are implemented as:
762 else if (code
== TYPE_CODE_COMPLEX
&& (len
== 8 || len
== 4))
763 theclass
[0] = AMD64_SSE
;
764 else if (code
== TYPE_CODE_COMPLEX
&& len
== 16)
765 theclass
[0] = theclass
[1] = AMD64_SSE
;
767 /* A variable of type complex long double is classified as type
769 else if (code
== TYPE_CODE_COMPLEX
&& len
== 32)
770 theclass
[0] = AMD64_COMPLEX_X87
;
773 else if (code
== TYPE_CODE_ARRAY
|| code
== TYPE_CODE_STRUCT
774 || code
== TYPE_CODE_UNION
)
775 amd64_classify_aggregate (type
, theclass
);
778 static enum return_value_convention
779 amd64_return_value (struct gdbarch
*gdbarch
, struct value
*function
,
780 struct type
*type
, struct regcache
*regcache
,
781 gdb_byte
*readbuf
, const gdb_byte
*writebuf
)
783 enum amd64_reg_class theclass
[2];
784 int len
= TYPE_LENGTH (type
);
785 static int integer_regnum
[] = { AMD64_RAX_REGNUM
, AMD64_RDX_REGNUM
};
786 static int sse_regnum
[] = { AMD64_XMM0_REGNUM
, AMD64_XMM1_REGNUM
};
791 gdb_assert (!(readbuf
&& writebuf
));
793 /* 1. Classify the return type with the classification algorithm. */
794 amd64_classify (type
, theclass
);
796 /* 2. If the type has class MEMORY, then the caller provides space
797 for the return value and passes the address of this storage in
798 %rdi as if it were the first argument to the function. In effect,
799 this address becomes a hidden first argument.
801 On return %rax will contain the address that has been passed in
802 by the caller in %rdi. */
803 if (theclass
[0] == AMD64_MEMORY
)
805 /* As indicated by the comment above, the ABI guarantees that we
806 can always find the return value just after the function has
813 regcache_raw_read_unsigned (regcache
, AMD64_RAX_REGNUM
, &addr
);
814 read_memory (addr
, readbuf
, TYPE_LENGTH (type
));
817 return RETURN_VALUE_ABI_RETURNS_ADDRESS
;
820 /* 8. If the class is COMPLEX_X87, the real part of the value is
821 returned in %st0 and the imaginary part in %st1. */
822 if (theclass
[0] == AMD64_COMPLEX_X87
)
826 regcache
->raw_read (AMD64_ST0_REGNUM
, readbuf
);
827 regcache
->raw_read (AMD64_ST1_REGNUM
, readbuf
+ 16);
832 i387_return_value (gdbarch
, regcache
);
833 regcache
->raw_write (AMD64_ST0_REGNUM
, writebuf
);
834 regcache
->raw_write (AMD64_ST1_REGNUM
, writebuf
+ 16);
836 /* Fix up the tag word such that both %st(0) and %st(1) are
838 regcache_raw_write_unsigned (regcache
, AMD64_FTAG_REGNUM
, 0xfff);
841 return RETURN_VALUE_REGISTER_CONVENTION
;
844 gdb_assert (theclass
[1] != AMD64_MEMORY
);
845 gdb_assert (len
<= 16);
847 for (i
= 0; len
> 0; i
++, len
-= 8)
855 /* 3. If the class is INTEGER, the next available register
856 of the sequence %rax, %rdx is used. */
857 regnum
= integer_regnum
[integer_reg
++];
861 /* 4. If the class is SSE, the next available SSE register
862 of the sequence %xmm0, %xmm1 is used. */
863 regnum
= sse_regnum
[sse_reg
++];
867 /* 5. If the class is SSEUP, the eightbyte is passed in the
868 upper half of the last used SSE register. */
869 gdb_assert (sse_reg
> 0);
870 regnum
= sse_regnum
[sse_reg
- 1];
875 /* 6. If the class is X87, the value is returned on the X87
876 stack in %st0 as 80-bit x87 number. */
877 regnum
= AMD64_ST0_REGNUM
;
879 i387_return_value (gdbarch
, regcache
);
883 /* 7. If the class is X87UP, the value is returned together
884 with the previous X87 value in %st0. */
885 gdb_assert (i
> 0 && theclass
[0] == AMD64_X87
);
886 regnum
= AMD64_ST0_REGNUM
;
895 gdb_assert (!"Unexpected register class.");
898 gdb_assert (regnum
!= -1);
901 regcache
->raw_read_part (regnum
, offset
, std::min (len
, 8),
904 regcache
->raw_write_part (regnum
, offset
, std::min (len
, 8),
908 return RETURN_VALUE_REGISTER_CONVENTION
;
913 amd64_push_arguments (struct regcache
*regcache
, int nargs
, struct value
**args
,
914 CORE_ADDR sp
, function_call_return_method return_method
)
916 static int integer_regnum
[] =
918 AMD64_RDI_REGNUM
, /* %rdi */
919 AMD64_RSI_REGNUM
, /* %rsi */
920 AMD64_RDX_REGNUM
, /* %rdx */
921 AMD64_RCX_REGNUM
, /* %rcx */
922 AMD64_R8_REGNUM
, /* %r8 */
923 AMD64_R9_REGNUM
/* %r9 */
925 static int sse_regnum
[] =
927 /* %xmm0 ... %xmm7 */
928 AMD64_XMM0_REGNUM
+ 0, AMD64_XMM1_REGNUM
,
929 AMD64_XMM0_REGNUM
+ 2, AMD64_XMM0_REGNUM
+ 3,
930 AMD64_XMM0_REGNUM
+ 4, AMD64_XMM0_REGNUM
+ 5,
931 AMD64_XMM0_REGNUM
+ 6, AMD64_XMM0_REGNUM
+ 7,
933 struct value
**stack_args
= XALLOCAVEC (struct value
*, nargs
);
934 int num_stack_args
= 0;
935 int num_elements
= 0;
941 /* Reserve a register for the "hidden" argument. */
942 if (return_method
== return_method_struct
)
945 for (i
= 0; i
< nargs
; i
++)
947 struct type
*type
= value_type (args
[i
]);
948 int len
= TYPE_LENGTH (type
);
949 enum amd64_reg_class theclass
[2];
950 int needed_integer_regs
= 0;
951 int needed_sse_regs
= 0;
954 /* Classify argument. */
955 amd64_classify (type
, theclass
);
957 /* Calculate the number of integer and SSE registers needed for
959 for (j
= 0; j
< 2; j
++)
961 if (theclass
[j
] == AMD64_INTEGER
)
962 needed_integer_regs
++;
963 else if (theclass
[j
] == AMD64_SSE
)
967 /* Check whether enough registers are available, and if the
968 argument should be passed in registers at all. */
969 if (integer_reg
+ needed_integer_regs
> ARRAY_SIZE (integer_regnum
)
970 || sse_reg
+ needed_sse_regs
> ARRAY_SIZE (sse_regnum
)
971 || (needed_integer_regs
== 0 && needed_sse_regs
== 0))
973 /* The argument will be passed on the stack. */
974 num_elements
+= ((len
+ 7) / 8);
975 stack_args
[num_stack_args
++] = args
[i
];
979 /* The argument will be passed in registers. */
980 const gdb_byte
*valbuf
= value_contents (args
[i
]).data ();
983 gdb_assert (len
<= 16);
985 for (j
= 0; len
> 0; j
++, len
-= 8)
993 regnum
= integer_regnum
[integer_reg
++];
997 regnum
= sse_regnum
[sse_reg
++];
1001 gdb_assert (sse_reg
> 0);
1002 regnum
= sse_regnum
[sse_reg
- 1];
1006 case AMD64_NO_CLASS
:
1010 gdb_assert (!"Unexpected register class.");
1013 gdb_assert (regnum
!= -1);
1014 memset (buf
, 0, sizeof buf
);
1015 memcpy (buf
, valbuf
+ j
* 8, std::min (len
, 8));
1016 regcache
->raw_write_part (regnum
, offset
, 8, buf
);
1021 /* Allocate space for the arguments on the stack. */
1022 sp
-= num_elements
* 8;
1024 /* The psABI says that "The end of the input argument area shall be
1025 aligned on a 16 byte boundary." */
1028 /* Write out the arguments to the stack. */
1029 for (i
= 0; i
< num_stack_args
; i
++)
1031 struct type
*type
= value_type (stack_args
[i
]);
1032 const gdb_byte
*valbuf
= value_contents (stack_args
[i
]).data ();
1033 int len
= TYPE_LENGTH (type
);
1035 write_memory (sp
+ element
* 8, valbuf
, len
);
1036 element
+= ((len
+ 7) / 8);
1039 /* The psABI says that "For calls that may call functions that use
1040 varargs or stdargs (prototype-less calls or calls to functions
1041 containing ellipsis (...) in the declaration) %al is used as
1042 hidden argument to specify the number of SSE registers used. */
1043 regcache_raw_write_unsigned (regcache
, AMD64_RAX_REGNUM
, sse_reg
);
1048 amd64_push_dummy_call (struct gdbarch
*gdbarch
, struct value
*function
,
1049 struct regcache
*regcache
, CORE_ADDR bp_addr
,
1050 int nargs
, struct value
**args
, CORE_ADDR sp
,
1051 function_call_return_method return_method
,
1052 CORE_ADDR struct_addr
)
1054 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1057 /* BND registers can be in arbitrary values at the moment of the
1058 inferior call. This can cause boundary violations that are not
1059 due to a real bug or even desired by the user. The best to be done
1060 is set the BND registers to allow access to the whole memory, INIT
1061 state, before pushing the inferior call. */
1062 i387_reset_bnd_regs (gdbarch
, regcache
);
1064 /* Pass arguments. */
1065 sp
= amd64_push_arguments (regcache
, nargs
, args
, sp
, return_method
);
1067 /* Pass "hidden" argument". */
1068 if (return_method
== return_method_struct
)
1070 store_unsigned_integer (buf
, 8, byte_order
, struct_addr
);
1071 regcache
->cooked_write (AMD64_RDI_REGNUM
, buf
);
1074 /* Store return address. */
1076 store_unsigned_integer (buf
, 8, byte_order
, bp_addr
);
1077 write_memory (sp
, buf
, 8);
1079 /* Finally, update the stack pointer... */
1080 store_unsigned_integer (buf
, 8, byte_order
, sp
);
1081 regcache
->cooked_write (AMD64_RSP_REGNUM
, buf
);
1083 /* ...and fake a frame pointer. */
1084 regcache
->cooked_write (AMD64_RBP_REGNUM
, buf
);
1089 /* Displaced instruction handling. */
1091 /* A partially decoded instruction.
1092 This contains enough details for displaced stepping purposes. */
1096 /* The number of opcode bytes. */
1098 /* The offset of the REX/VEX instruction encoding prefix or -1 if
1100 int enc_prefix_offset
;
1101 /* The offset to the first opcode byte. */
1103 /* The offset to the modrm byte or -1 if not present. */
1106 /* The raw instruction. */
1110 struct amd64_displaced_step_copy_insn_closure
1111 : public displaced_step_copy_insn_closure
1113 amd64_displaced_step_copy_insn_closure (int insn_buf_len
)
1114 : insn_buf (insn_buf_len
, 0)
1117 /* For rip-relative insns, saved copy of the reg we use instead of %rip. */
1122 /* Details of the instruction. */
1123 struct amd64_insn insn_details
;
1125 /* The possibly modified insn. */
1126 gdb::byte_vector insn_buf
;
1129 /* WARNING: Keep onebyte_has_modrm, twobyte_has_modrm in sync with
1130 ../opcodes/i386-dis.c (until libopcodes exports them, or an alternative,
1131 at which point delete these in favor of libopcodes' versions). */
1133 static const unsigned char onebyte_has_modrm
[256] = {
1134 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1135 /* ------------------------------- */
1136 /* 00 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 00 */
1137 /* 10 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 10 */
1138 /* 20 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 20 */
1139 /* 30 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 30 */
1140 /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 40 */
1141 /* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 50 */
1142 /* 60 */ 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0, /* 60 */
1143 /* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 70 */
1144 /* 80 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 80 */
1145 /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 90 */
1146 /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* a0 */
1147 /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* b0 */
1148 /* c0 */ 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0, /* c0 */
1149 /* d0 */ 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1, /* d0 */
1150 /* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* e0 */
1151 /* f0 */ 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1 /* f0 */
1152 /* ------------------------------- */
1153 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1156 static const unsigned char twobyte_has_modrm
[256] = {
1157 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1158 /* ------------------------------- */
1159 /* 00 */ 1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,1, /* 0f */
1160 /* 10 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 1f */
1161 /* 20 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 2f */
1162 /* 30 */ 0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0, /* 3f */
1163 /* 40 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 4f */
1164 /* 50 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 5f */
1165 /* 60 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 6f */
1166 /* 70 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 7f */
1167 /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
1168 /* 90 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 9f */
1169 /* a0 */ 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1, /* af */
1170 /* b0 */ 1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1, /* bf */
1171 /* c0 */ 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0, /* cf */
1172 /* d0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* df */
1173 /* e0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* ef */
1174 /* f0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0 /* ff */
1175 /* ------------------------------- */
1176 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1179 static int amd64_syscall_p (const struct amd64_insn
*insn
, int *lengthp
);
1182 rex_prefix_p (gdb_byte pfx
)
1184 return REX_PREFIX_P (pfx
);
1187 /* True if PFX is the start of the 2-byte VEX prefix. */
1190 vex2_prefix_p (gdb_byte pfx
)
1195 /* True if PFX is the start of the 3-byte VEX prefix. */
1198 vex3_prefix_p (gdb_byte pfx
)
1203 /* Skip the legacy instruction prefixes in INSN.
1204 We assume INSN is properly sentineled so we don't have to worry
1205 about falling off the end of the buffer. */
1208 amd64_skip_prefixes (gdb_byte
*insn
)
1214 case DATA_PREFIX_OPCODE
:
1215 case ADDR_PREFIX_OPCODE
:
1216 case CS_PREFIX_OPCODE
:
1217 case DS_PREFIX_OPCODE
:
1218 case ES_PREFIX_OPCODE
:
1219 case FS_PREFIX_OPCODE
:
1220 case GS_PREFIX_OPCODE
:
1221 case SS_PREFIX_OPCODE
:
1222 case LOCK_PREFIX_OPCODE
:
1223 case REPE_PREFIX_OPCODE
:
1224 case REPNE_PREFIX_OPCODE
:
1236 /* Return an integer register (other than RSP) that is unused as an input
1238 In order to not require adding a rex prefix if the insn doesn't already
1239 have one, the result is restricted to RAX ... RDI, sans RSP.
1240 The register numbering of the result follows architecture ordering,
1244 amd64_get_unused_input_int_reg (const struct amd64_insn
*details
)
1246 /* 1 bit for each reg */
1247 int used_regs_mask
= 0;
1249 /* There can be at most 3 int regs used as inputs in an insn, and we have
1250 7 to choose from (RAX ... RDI, sans RSP).
1251 This allows us to take a conservative approach and keep things simple.
1252 E.g. By avoiding RAX, we don't have to specifically watch for opcodes
1253 that implicitly specify RAX. */
1256 used_regs_mask
|= 1 << EAX_REG_NUM
;
1257 /* Similarily avoid RDX, implicit operand in divides. */
1258 used_regs_mask
|= 1 << EDX_REG_NUM
;
1260 used_regs_mask
|= 1 << ESP_REG_NUM
;
1262 /* If the opcode is one byte long and there's no ModRM byte,
1263 assume the opcode specifies a register. */
1264 if (details
->opcode_len
== 1 && details
->modrm_offset
== -1)
1265 used_regs_mask
|= 1 << (details
->raw_insn
[details
->opcode_offset
] & 7);
1267 /* Mark used regs in the modrm/sib bytes. */
1268 if (details
->modrm_offset
!= -1)
1270 int modrm
= details
->raw_insn
[details
->modrm_offset
];
1271 int mod
= MODRM_MOD_FIELD (modrm
);
1272 int reg
= MODRM_REG_FIELD (modrm
);
1273 int rm
= MODRM_RM_FIELD (modrm
);
1274 int have_sib
= mod
!= 3 && rm
== 4;
1276 /* Assume the reg field of the modrm byte specifies a register. */
1277 used_regs_mask
|= 1 << reg
;
1281 int base
= SIB_BASE_FIELD (details
->raw_insn
[details
->modrm_offset
+ 1]);
1282 int idx
= SIB_INDEX_FIELD (details
->raw_insn
[details
->modrm_offset
+ 1]);
1283 used_regs_mask
|= 1 << base
;
1284 used_regs_mask
|= 1 << idx
;
1288 used_regs_mask
|= 1 << rm
;
1292 gdb_assert (used_regs_mask
< 256);
1293 gdb_assert (used_regs_mask
!= 255);
1295 /* Finally, find a free reg. */
1299 for (i
= 0; i
< 8; ++i
)
1301 if (! (used_regs_mask
& (1 << i
)))
1305 /* We shouldn't get here. */
1306 internal_error (__FILE__
, __LINE__
, _("unable to find free reg"));
1310 /* Extract the details of INSN that we need. */
1313 amd64_get_insn_details (gdb_byte
*insn
, struct amd64_insn
*details
)
1315 gdb_byte
*start
= insn
;
1318 details
->raw_insn
= insn
;
1320 details
->opcode_len
= -1;
1321 details
->enc_prefix_offset
= -1;
1322 details
->opcode_offset
= -1;
1323 details
->modrm_offset
= -1;
1325 /* Skip legacy instruction prefixes. */
1326 insn
= amd64_skip_prefixes (insn
);
1328 /* Skip REX/VEX instruction encoding prefixes. */
1329 if (rex_prefix_p (*insn
))
1331 details
->enc_prefix_offset
= insn
- start
;
1334 else if (vex2_prefix_p (*insn
))
1336 /* Don't record the offset in this case because this prefix has
1337 no REX.B equivalent. */
1340 else if (vex3_prefix_p (*insn
))
1342 details
->enc_prefix_offset
= insn
- start
;
1346 details
->opcode_offset
= insn
- start
;
1348 if (*insn
== TWO_BYTE_OPCODE_ESCAPE
)
1350 /* Two or three-byte opcode. */
1352 need_modrm
= twobyte_has_modrm
[*insn
];
1354 /* Check for three-byte opcode. */
1364 details
->opcode_len
= 3;
1367 details
->opcode_len
= 2;
1373 /* One-byte opcode. */
1374 need_modrm
= onebyte_has_modrm
[*insn
];
1375 details
->opcode_len
= 1;
1381 details
->modrm_offset
= insn
- start
;
1385 /* Update %rip-relative addressing in INSN.
1387 %rip-relative addressing only uses a 32-bit displacement.
1388 32 bits is not enough to be guaranteed to cover the distance between where
1389 the real instruction is and where its copy is.
1390 Convert the insn to use base+disp addressing.
1391 We set base = pc + insn_length so we can leave disp unchanged. */
1394 fixup_riprel (struct gdbarch
*gdbarch
,
1395 amd64_displaced_step_copy_insn_closure
*dsc
,
1396 CORE_ADDR from
, CORE_ADDR to
, struct regcache
*regs
)
1398 const struct amd64_insn
*insn_details
= &dsc
->insn_details
;
1399 int modrm_offset
= insn_details
->modrm_offset
;
1400 gdb_byte
*insn
= insn_details
->raw_insn
+ modrm_offset
;
1403 int arch_tmp_regno
, tmp_regno
;
1404 ULONGEST orig_value
;
1406 /* %rip+disp32 addressing mode, displacement follows ModRM byte. */
1409 /* Compute the rip-relative address. */
1410 insn_length
= gdb_buffered_insn_length (gdbarch
, dsc
->insn_buf
.data (),
1411 dsc
->insn_buf
.size (), from
);
1412 rip_base
= from
+ insn_length
;
1414 /* We need a register to hold the address.
1415 Pick one not used in the insn.
1416 NOTE: arch_tmp_regno uses architecture ordering, e.g. RDI = 7. */
1417 arch_tmp_regno
= amd64_get_unused_input_int_reg (insn_details
);
1418 tmp_regno
= amd64_arch_reg_to_regnum (arch_tmp_regno
);
1420 /* Position of the not-B bit in the 3-byte VEX prefix (in byte 1). */
1421 static constexpr gdb_byte VEX3_NOT_B
= 0x20;
1423 /* REX.B should be unset (VEX.!B set) as we were using rip-relative
1424 addressing, but ensure it's unset (set for VEX) anyway, tmp_regno
1426 if (insn_details
->enc_prefix_offset
!= -1)
1428 gdb_byte
*pfx
= &dsc
->insn_buf
[insn_details
->enc_prefix_offset
];
1429 if (rex_prefix_p (pfx
[0]))
1431 else if (vex3_prefix_p (pfx
[0]))
1432 pfx
[1] |= VEX3_NOT_B
;
1434 gdb_assert_not_reached ("unhandled prefix");
1437 regcache_cooked_read_unsigned (regs
, tmp_regno
, &orig_value
);
1438 dsc
->tmp_regno
= tmp_regno
;
1439 dsc
->tmp_save
= orig_value
;
1442 /* Convert the ModRM field to be base+disp. */
1443 dsc
->insn_buf
[modrm_offset
] &= ~0xc7;
1444 dsc
->insn_buf
[modrm_offset
] |= 0x80 + arch_tmp_regno
;
1446 regcache_cooked_write_unsigned (regs
, tmp_regno
, rip_base
);
1448 displaced_debug_printf ("%%rip-relative addressing used.");
1449 displaced_debug_printf ("using temp reg %d, old value %s, new value %s",
1450 dsc
->tmp_regno
, paddress (gdbarch
, dsc
->tmp_save
),
1451 paddress (gdbarch
, rip_base
));
1455 fixup_displaced_copy (struct gdbarch
*gdbarch
,
1456 amd64_displaced_step_copy_insn_closure
*dsc
,
1457 CORE_ADDR from
, CORE_ADDR to
, struct regcache
*regs
)
1459 const struct amd64_insn
*details
= &dsc
->insn_details
;
1461 if (details
->modrm_offset
!= -1)
1463 gdb_byte modrm
= details
->raw_insn
[details
->modrm_offset
];
1465 if ((modrm
& 0xc7) == 0x05)
1467 /* The insn uses rip-relative addressing.
1469 fixup_riprel (gdbarch
, dsc
, from
, to
, regs
);
1474 displaced_step_copy_insn_closure_up
1475 amd64_displaced_step_copy_insn (struct gdbarch
*gdbarch
,
1476 CORE_ADDR from
, CORE_ADDR to
,
1477 struct regcache
*regs
)
1479 int len
= gdbarch_max_insn_length (gdbarch
);
1480 /* Extra space for sentinels so fixup_{riprel,displaced_copy} don't have to
1481 continually watch for running off the end of the buffer. */
1482 int fixup_sentinel_space
= len
;
1483 std::unique_ptr
<amd64_displaced_step_copy_insn_closure
> dsc
1484 (new amd64_displaced_step_copy_insn_closure (len
+ fixup_sentinel_space
));
1485 gdb_byte
*buf
= &dsc
->insn_buf
[0];
1486 struct amd64_insn
*details
= &dsc
->insn_details
;
1488 read_memory (from
, buf
, len
);
1490 /* Set up the sentinel space so we don't have to worry about running
1491 off the end of the buffer. An excessive number of leading prefixes
1492 could otherwise cause this. */
1493 memset (buf
+ len
, 0, fixup_sentinel_space
);
1495 amd64_get_insn_details (buf
, details
);
1497 /* GDB may get control back after the insn after the syscall.
1498 Presumably this is a kernel bug.
1499 If this is a syscall, make sure there's a nop afterwards. */
1503 if (amd64_syscall_p (details
, &syscall_length
))
1504 buf
[details
->opcode_offset
+ syscall_length
] = NOP_OPCODE
;
1507 /* Modify the insn to cope with the address where it will be executed from.
1508 In particular, handle any rip-relative addressing. */
1509 fixup_displaced_copy (gdbarch
, dsc
.get (), from
, to
, regs
);
1511 write_memory (to
, buf
, len
);
1513 displaced_debug_printf ("copy %s->%s: %s",
1514 paddress (gdbarch
, from
), paddress (gdbarch
, to
),
1515 displaced_step_dump_bytes (buf
, len
).c_str ());
1517 /* This is a work around for a problem with g++ 4.8. */
1518 return displaced_step_copy_insn_closure_up (dsc
.release ());
1522 amd64_absolute_jmp_p (const struct amd64_insn
*details
)
1524 const gdb_byte
*insn
= &details
->raw_insn
[details
->opcode_offset
];
1526 if (insn
[0] == 0xff)
1528 /* jump near, absolute indirect (/4) */
1529 if ((insn
[1] & 0x38) == 0x20)
1532 /* jump far, absolute indirect (/5) */
1533 if ((insn
[1] & 0x38) == 0x28)
1540 /* Return non-zero if the instruction DETAILS is a jump, zero otherwise. */
1543 amd64_jmp_p (const struct amd64_insn
*details
)
1545 const gdb_byte
*insn
= &details
->raw_insn
[details
->opcode_offset
];
1547 /* jump short, relative. */
1548 if (insn
[0] == 0xeb)
1551 /* jump near, relative. */
1552 if (insn
[0] == 0xe9)
1555 return amd64_absolute_jmp_p (details
);
1559 amd64_absolute_call_p (const struct amd64_insn
*details
)
1561 const gdb_byte
*insn
= &details
->raw_insn
[details
->opcode_offset
];
1563 if (insn
[0] == 0xff)
1565 /* Call near, absolute indirect (/2) */
1566 if ((insn
[1] & 0x38) == 0x10)
1569 /* Call far, absolute indirect (/3) */
1570 if ((insn
[1] & 0x38) == 0x18)
1578 amd64_ret_p (const struct amd64_insn
*details
)
1580 /* NOTE: gcc can emit "repz ; ret". */
1581 const gdb_byte
*insn
= &details
->raw_insn
[details
->opcode_offset
];
1585 case 0xc2: /* ret near, pop N bytes */
1586 case 0xc3: /* ret near */
1587 case 0xca: /* ret far, pop N bytes */
1588 case 0xcb: /* ret far */
1589 case 0xcf: /* iret */
1598 amd64_call_p (const struct amd64_insn
*details
)
1600 const gdb_byte
*insn
= &details
->raw_insn
[details
->opcode_offset
];
1602 if (amd64_absolute_call_p (details
))
1605 /* call near, relative */
1606 if (insn
[0] == 0xe8)
1612 /* Return non-zero if INSN is a system call, and set *LENGTHP to its
1613 length in bytes. Otherwise, return zero. */
1616 amd64_syscall_p (const struct amd64_insn
*details
, int *lengthp
)
1618 const gdb_byte
*insn
= &details
->raw_insn
[details
->opcode_offset
];
1620 if (insn
[0] == 0x0f && insn
[1] == 0x05)
1629 /* Classify the instruction at ADDR using PRED.
1630 Throw an error if the memory can't be read. */
1633 amd64_classify_insn_at (struct gdbarch
*gdbarch
, CORE_ADDR addr
,
1634 int (*pred
) (const struct amd64_insn
*))
1636 struct amd64_insn details
;
1638 int len
, classification
;
1640 len
= gdbarch_max_insn_length (gdbarch
);
1641 buf
= (gdb_byte
*) alloca (len
);
1643 read_code (addr
, buf
, len
);
1644 amd64_get_insn_details (buf
, &details
);
1646 classification
= pred (&details
);
1648 return classification
;
1651 /* The gdbarch insn_is_call method. */
1654 amd64_insn_is_call (struct gdbarch
*gdbarch
, CORE_ADDR addr
)
1656 return amd64_classify_insn_at (gdbarch
, addr
, amd64_call_p
);
1659 /* The gdbarch insn_is_ret method. */
1662 amd64_insn_is_ret (struct gdbarch
*gdbarch
, CORE_ADDR addr
)
1664 return amd64_classify_insn_at (gdbarch
, addr
, amd64_ret_p
);
1667 /* The gdbarch insn_is_jump method. */
1670 amd64_insn_is_jump (struct gdbarch
*gdbarch
, CORE_ADDR addr
)
1672 return amd64_classify_insn_at (gdbarch
, addr
, amd64_jmp_p
);
1675 /* Fix up the state of registers and memory after having single-stepped
1676 a displaced instruction. */
1679 amd64_displaced_step_fixup (struct gdbarch
*gdbarch
,
1680 struct displaced_step_copy_insn_closure
*dsc_
,
1681 CORE_ADDR from
, CORE_ADDR to
,
1682 struct regcache
*regs
)
1684 amd64_displaced_step_copy_insn_closure
*dsc
1685 = (amd64_displaced_step_copy_insn_closure
*) dsc_
;
1686 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1687 /* The offset we applied to the instruction's address. */
1688 ULONGEST insn_offset
= to
- from
;
1689 gdb_byte
*insn
= dsc
->insn_buf
.data ();
1690 const struct amd64_insn
*insn_details
= &dsc
->insn_details
;
1692 displaced_debug_printf ("fixup (%s, %s), insn = 0x%02x 0x%02x ...",
1693 paddress (gdbarch
, from
), paddress (gdbarch
, to
),
1696 /* If we used a tmp reg, restore it. */
1700 displaced_debug_printf ("restoring reg %d to %s",
1701 dsc
->tmp_regno
, paddress (gdbarch
, dsc
->tmp_save
));
1702 regcache_cooked_write_unsigned (regs
, dsc
->tmp_regno
, dsc
->tmp_save
);
1705 /* The list of issues to contend with here is taken from
1706 resume_execution in arch/x86/kernel/kprobes.c, Linux 2.6.28.
1707 Yay for Free Software! */
1709 /* Relocate the %rip back to the program's instruction stream,
1712 /* Except in the case of absolute or indirect jump or call
1713 instructions, or a return instruction, the new rip is relative to
1714 the displaced instruction; make it relative to the original insn.
1715 Well, signal handler returns don't need relocation either, but we use the
1716 value of %rip to recognize those; see below. */
1717 if (! amd64_absolute_jmp_p (insn_details
)
1718 && ! amd64_absolute_call_p (insn_details
)
1719 && ! amd64_ret_p (insn_details
))
1724 regcache_cooked_read_unsigned (regs
, AMD64_RIP_REGNUM
, &orig_rip
);
1726 /* A signal trampoline system call changes the %rip, resuming
1727 execution of the main program after the signal handler has
1728 returned. That makes them like 'return' instructions; we
1729 shouldn't relocate %rip.
1731 But most system calls don't, and we do need to relocate %rip.
1733 Our heuristic for distinguishing these cases: if stepping
1734 over the system call instruction left control directly after
1735 the instruction, the we relocate --- control almost certainly
1736 doesn't belong in the displaced copy. Otherwise, we assume
1737 the instruction has put control where it belongs, and leave
1738 it unrelocated. Goodness help us if there are PC-relative
1740 if (amd64_syscall_p (insn_details
, &insn_len
)
1741 && orig_rip
!= to
+ insn_len
1742 /* GDB can get control back after the insn after the syscall.
1743 Presumably this is a kernel bug.
1744 Fixup ensures its a nop, we add one to the length for it. */
1745 && orig_rip
!= to
+ insn_len
+ 1)
1746 displaced_debug_printf ("syscall changed %%rip; not relocating");
1749 ULONGEST rip
= orig_rip
- insn_offset
;
1751 /* If we just stepped over a breakpoint insn, we don't backup
1752 the pc on purpose; this is to match behaviour without
1755 regcache_cooked_write_unsigned (regs
, AMD64_RIP_REGNUM
, rip
);
1757 displaced_debug_printf ("relocated %%rip from %s to %s",
1758 paddress (gdbarch
, orig_rip
),
1759 paddress (gdbarch
, rip
));
1763 /* If the instruction was PUSHFL, then the TF bit will be set in the
1764 pushed value, and should be cleared. We'll leave this for later,
1765 since GDB already messes up the TF flag when stepping over a
1768 /* If the instruction was a call, the return address now atop the
1769 stack is the address following the copied instruction. We need
1770 to make it the address following the original instruction. */
1771 if (amd64_call_p (insn_details
))
1775 const ULONGEST retaddr_len
= 8;
1777 regcache_cooked_read_unsigned (regs
, AMD64_RSP_REGNUM
, &rsp
);
1778 retaddr
= read_memory_unsigned_integer (rsp
, retaddr_len
, byte_order
);
1779 retaddr
= (retaddr
- insn_offset
) & 0xffffffffffffffffULL
;
1780 write_memory_unsigned_integer (rsp
, retaddr_len
, byte_order
, retaddr
);
1782 displaced_debug_printf ("relocated return addr at %s to %s",
1783 paddress (gdbarch
, rsp
),
1784 paddress (gdbarch
, retaddr
));
1788 /* If the instruction INSN uses RIP-relative addressing, return the
1789 offset into the raw INSN where the displacement to be adjusted is
1790 found. Returns 0 if the instruction doesn't use RIP-relative
1794 rip_relative_offset (struct amd64_insn
*insn
)
1796 if (insn
->modrm_offset
!= -1)
1798 gdb_byte modrm
= insn
->raw_insn
[insn
->modrm_offset
];
1800 if ((modrm
& 0xc7) == 0x05)
1802 /* The displacement is found right after the ModRM byte. */
1803 return insn
->modrm_offset
+ 1;
1811 append_insns (CORE_ADDR
*to
, ULONGEST len
, const gdb_byte
*buf
)
1813 target_write_memory (*to
, buf
, len
);
1818 amd64_relocate_instruction (struct gdbarch
*gdbarch
,
1819 CORE_ADDR
*to
, CORE_ADDR oldloc
)
1821 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1822 int len
= gdbarch_max_insn_length (gdbarch
);
1823 /* Extra space for sentinels. */
1824 int fixup_sentinel_space
= len
;
1825 gdb_byte
*buf
= (gdb_byte
*) xmalloc (len
+ fixup_sentinel_space
);
1826 struct amd64_insn insn_details
;
1828 LONGEST rel32
, newrel
;
1832 read_memory (oldloc
, buf
, len
);
1834 /* Set up the sentinel space so we don't have to worry about running
1835 off the end of the buffer. An excessive number of leading prefixes
1836 could otherwise cause this. */
1837 memset (buf
+ len
, 0, fixup_sentinel_space
);
1840 amd64_get_insn_details (insn
, &insn_details
);
1842 insn_length
= gdb_buffered_insn_length (gdbarch
, insn
, len
, oldloc
);
1844 /* Skip legacy instruction prefixes. */
1845 insn
= amd64_skip_prefixes (insn
);
1847 /* Adjust calls with 32-bit relative addresses as push/jump, with
1848 the address pushed being the location where the original call in
1849 the user program would return to. */
1850 if (insn
[0] == 0xe8)
1852 gdb_byte push_buf
[32];
1856 /* Where "ret" in the original code will return to. */
1857 ret_addr
= oldloc
+ insn_length
;
1859 /* If pushing an address higher than or equal to 0x80000000,
1860 avoid 'pushq', as that sign extends its 32-bit operand, which
1861 would be incorrect. */
1862 if (ret_addr
<= 0x7fffffff)
1864 push_buf
[0] = 0x68; /* pushq $... */
1865 store_unsigned_integer (&push_buf
[1], 4, byte_order
, ret_addr
);
1870 push_buf
[i
++] = 0x48; /* sub $0x8,%rsp */
1871 push_buf
[i
++] = 0x83;
1872 push_buf
[i
++] = 0xec;
1873 push_buf
[i
++] = 0x08;
1875 push_buf
[i
++] = 0xc7; /* movl $imm,(%rsp) */
1876 push_buf
[i
++] = 0x04;
1877 push_buf
[i
++] = 0x24;
1878 store_unsigned_integer (&push_buf
[i
], 4, byte_order
,
1879 ret_addr
& 0xffffffff);
1882 push_buf
[i
++] = 0xc7; /* movl $imm,4(%rsp) */
1883 push_buf
[i
++] = 0x44;
1884 push_buf
[i
++] = 0x24;
1885 push_buf
[i
++] = 0x04;
1886 store_unsigned_integer (&push_buf
[i
], 4, byte_order
,
1890 gdb_assert (i
<= sizeof (push_buf
));
1891 /* Push the push. */
1892 append_insns (to
, i
, push_buf
);
1894 /* Convert the relative call to a relative jump. */
1897 /* Adjust the destination offset. */
1898 rel32
= extract_signed_integer (insn
+ 1, 4, byte_order
);
1899 newrel
= (oldloc
- *to
) + rel32
;
1900 store_signed_integer (insn
+ 1, 4, byte_order
, newrel
);
1902 displaced_debug_printf ("adjusted insn rel32=%s at %s to rel32=%s at %s",
1903 hex_string (rel32
), paddress (gdbarch
, oldloc
),
1904 hex_string (newrel
), paddress (gdbarch
, *to
));
1906 /* Write the adjusted jump into its displaced location. */
1907 append_insns (to
, 5, insn
);
1911 offset
= rip_relative_offset (&insn_details
);
1914 /* Adjust jumps with 32-bit relative addresses. Calls are
1915 already handled above. */
1916 if (insn
[0] == 0xe9)
1918 /* Adjust conditional jumps. */
1919 else if (insn
[0] == 0x0f && (insn
[1] & 0xf0) == 0x80)
1925 rel32
= extract_signed_integer (insn
+ offset
, 4, byte_order
);
1926 newrel
= (oldloc
- *to
) + rel32
;
1927 store_signed_integer (insn
+ offset
, 4, byte_order
, newrel
);
1928 displaced_debug_printf ("adjusted insn rel32=%s at %s to rel32=%s at %s",
1929 hex_string (rel32
), paddress (gdbarch
, oldloc
),
1930 hex_string (newrel
), paddress (gdbarch
, *to
));
1933 /* Write the adjusted instruction into its displaced location. */
1934 append_insns (to
, insn_length
, buf
);
1938 /* The maximum number of saved registers. This should include %rip. */
1939 #define AMD64_NUM_SAVED_REGS AMD64_NUM_GREGS
1941 struct amd64_frame_cache
1946 CORE_ADDR sp_offset
;
1949 /* Saved registers. */
1950 CORE_ADDR saved_regs
[AMD64_NUM_SAVED_REGS
];
1954 /* Do we have a frame? */
1958 /* Initialize a frame cache. */
1961 amd64_init_frame_cache (struct amd64_frame_cache
*cache
)
1968 cache
->sp_offset
= -8;
1971 /* Saved registers. We initialize these to -1 since zero is a valid
1972 offset (that's where %rbp is supposed to be stored).
1973 The values start out as being offsets, and are later converted to
1974 addresses (at which point -1 is interpreted as an address, still meaning
1976 for (i
= 0; i
< AMD64_NUM_SAVED_REGS
; i
++)
1977 cache
->saved_regs
[i
] = -1;
1978 cache
->saved_sp
= 0;
1979 cache
->saved_sp_reg
= -1;
1981 /* Frameless until proven otherwise. */
1982 cache
->frameless_p
= 1;
1985 /* Allocate and initialize a frame cache. */
1987 static struct amd64_frame_cache
*
1988 amd64_alloc_frame_cache (void)
1990 struct amd64_frame_cache
*cache
;
1992 cache
= FRAME_OBSTACK_ZALLOC (struct amd64_frame_cache
);
1993 amd64_init_frame_cache (cache
);
1997 /* GCC 4.4 and later, can put code in the prologue to realign the
1998 stack pointer. Check whether PC points to such code, and update
1999 CACHE accordingly. Return the first instruction after the code
2000 sequence or CURRENT_PC, whichever is smaller. If we don't
2001 recognize the code, return PC. */
2004 amd64_analyze_stack_align (CORE_ADDR pc
, CORE_ADDR current_pc
,
2005 struct amd64_frame_cache
*cache
)
2007 /* There are 2 code sequences to re-align stack before the frame
2010 1. Use a caller-saved saved register:
2016 2. Use a callee-saved saved register:
2023 "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
2025 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
2026 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
2031 int offset
, offset_and
;
2033 if (target_read_code (pc
, buf
, sizeof buf
))
2036 /* Check caller-saved saved register. The first instruction has
2037 to be "leaq 8(%rsp), %reg". */
2038 if ((buf
[0] & 0xfb) == 0x48
2043 /* MOD must be binary 10 and R/M must be binary 100. */
2044 if ((buf
[2] & 0xc7) != 0x44)
2047 /* REG has register number. */
2048 reg
= (buf
[2] >> 3) & 7;
2050 /* Check the REX.R bit. */
2058 /* Check callee-saved saved register. The first instruction
2059 has to be "pushq %reg". */
2061 if ((buf
[0] & 0xf8) == 0x50)
2063 else if ((buf
[0] & 0xf6) == 0x40
2064 && (buf
[1] & 0xf8) == 0x50)
2066 /* Check the REX.B bit. */
2067 if ((buf
[0] & 1) != 0)
2076 reg
+= buf
[offset
] & 0x7;
2080 /* The next instruction has to be "leaq 16(%rsp), %reg". */
2081 if ((buf
[offset
] & 0xfb) != 0x48
2082 || buf
[offset
+ 1] != 0x8d
2083 || buf
[offset
+ 3] != 0x24
2084 || buf
[offset
+ 4] != 0x10)
2087 /* MOD must be binary 10 and R/M must be binary 100. */
2088 if ((buf
[offset
+ 2] & 0xc7) != 0x44)
2091 /* REG has register number. */
2092 r
= (buf
[offset
+ 2] >> 3) & 7;
2094 /* Check the REX.R bit. */
2095 if (buf
[offset
] == 0x4c)
2098 /* Registers in pushq and leaq have to be the same. */
2105 /* Rigister can't be %rsp nor %rbp. */
2106 if (reg
== 4 || reg
== 5)
2109 /* The next instruction has to be "andq $-XXX, %rsp". */
2110 if (buf
[offset
] != 0x48
2111 || buf
[offset
+ 2] != 0xe4
2112 || (buf
[offset
+ 1] != 0x81 && buf
[offset
+ 1] != 0x83))
2115 offset_and
= offset
;
2116 offset
+= buf
[offset
+ 1] == 0x81 ? 7 : 4;
2118 /* The next instruction has to be "pushq -8(%reg)". */
2120 if (buf
[offset
] == 0xff)
2122 else if ((buf
[offset
] & 0xf6) == 0x40
2123 && buf
[offset
+ 1] == 0xff)
2125 /* Check the REX.B bit. */
2126 if ((buf
[offset
] & 0x1) != 0)
2133 /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
2135 if (buf
[offset
+ 1] != 0xf8
2136 || (buf
[offset
] & 0xf8) != 0x70)
2139 /* R/M has register. */
2140 r
+= buf
[offset
] & 7;
2142 /* Registers in leaq and pushq have to be the same. */
2146 if (current_pc
> pc
+ offset_and
)
2147 cache
->saved_sp_reg
= amd64_arch_reg_to_regnum (reg
);
2149 return std::min (pc
+ offset
+ 2, current_pc
);
2152 /* Similar to amd64_analyze_stack_align for x32. */
2155 amd64_x32_analyze_stack_align (CORE_ADDR pc
, CORE_ADDR current_pc
,
2156 struct amd64_frame_cache
*cache
)
2158 /* There are 2 code sequences to re-align stack before the frame
2161 1. Use a caller-saved saved register:
2169 [addr32] leal 8(%rsp), %reg
2171 [addr32] pushq -8(%reg)
2173 2. Use a callee-saved saved register:
2183 [addr32] leal 16(%rsp), %reg
2185 [addr32] pushq -8(%reg)
2187 "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
2189 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
2190 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
2192 "andl $-XXX, %esp" can be either 3 bytes or 6 bytes:
2194 0x83 0xe4 0xf0 andl $-16, %esp
2195 0x81 0xe4 0x00 0xff 0xff 0xff andl $-256, %esp
2200 int offset
, offset_and
;
2202 if (target_read_memory (pc
, buf
, sizeof buf
))
2205 /* Skip optional addr32 prefix. */
2206 offset
= buf
[0] == 0x67 ? 1 : 0;
2208 /* Check caller-saved saved register. The first instruction has
2209 to be "leaq 8(%rsp), %reg" or "leal 8(%rsp), %reg". */
2210 if (((buf
[offset
] & 0xfb) == 0x48 || (buf
[offset
] & 0xfb) == 0x40)
2211 && buf
[offset
+ 1] == 0x8d
2212 && buf
[offset
+ 3] == 0x24
2213 && buf
[offset
+ 4] == 0x8)
2215 /* MOD must be binary 10 and R/M must be binary 100. */
2216 if ((buf
[offset
+ 2] & 0xc7) != 0x44)
2219 /* REG has register number. */
2220 reg
= (buf
[offset
+ 2] >> 3) & 7;
2222 /* Check the REX.R bit. */
2223 if ((buf
[offset
] & 0x4) != 0)
2230 /* Check callee-saved saved register. The first instruction
2231 has to be "pushq %reg". */
2233 if ((buf
[offset
] & 0xf6) == 0x40
2234 && (buf
[offset
+ 1] & 0xf8) == 0x50)
2236 /* Check the REX.B bit. */
2237 if ((buf
[offset
] & 1) != 0)
2242 else if ((buf
[offset
] & 0xf8) != 0x50)
2246 reg
+= buf
[offset
] & 0x7;
2250 /* Skip optional addr32 prefix. */
2251 if (buf
[offset
] == 0x67)
2254 /* The next instruction has to be "leaq 16(%rsp), %reg" or
2255 "leal 16(%rsp), %reg". */
2256 if (((buf
[offset
] & 0xfb) != 0x48 && (buf
[offset
] & 0xfb) != 0x40)
2257 || buf
[offset
+ 1] != 0x8d
2258 || buf
[offset
+ 3] != 0x24
2259 || buf
[offset
+ 4] != 0x10)
2262 /* MOD must be binary 10 and R/M must be binary 100. */
2263 if ((buf
[offset
+ 2] & 0xc7) != 0x44)
2266 /* REG has register number. */
2267 r
= (buf
[offset
+ 2] >> 3) & 7;
2269 /* Check the REX.R bit. */
2270 if ((buf
[offset
] & 0x4) != 0)
2273 /* Registers in pushq and leaq have to be the same. */
2280 /* Rigister can't be %rsp nor %rbp. */
2281 if (reg
== 4 || reg
== 5)
2284 /* The next instruction may be "andq $-XXX, %rsp" or
2285 "andl $-XXX, %esp". */
2286 if (buf
[offset
] != 0x48)
2289 if (buf
[offset
+ 2] != 0xe4
2290 || (buf
[offset
+ 1] != 0x81 && buf
[offset
+ 1] != 0x83))
2293 offset_and
= offset
;
2294 offset
+= buf
[offset
+ 1] == 0x81 ? 7 : 4;
2296 /* Skip optional addr32 prefix. */
2297 if (buf
[offset
] == 0x67)
2300 /* The next instruction has to be "pushq -8(%reg)". */
2302 if (buf
[offset
] == 0xff)
2304 else if ((buf
[offset
] & 0xf6) == 0x40
2305 && buf
[offset
+ 1] == 0xff)
2307 /* Check the REX.B bit. */
2308 if ((buf
[offset
] & 0x1) != 0)
2315 /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
2317 if (buf
[offset
+ 1] != 0xf8
2318 || (buf
[offset
] & 0xf8) != 0x70)
2321 /* R/M has register. */
2322 r
+= buf
[offset
] & 7;
2324 /* Registers in leaq and pushq have to be the same. */
2328 if (current_pc
> pc
+ offset_and
)
2329 cache
->saved_sp_reg
= amd64_arch_reg_to_regnum (reg
);
2331 return std::min (pc
+ offset
+ 2, current_pc
);
2334 /* Do a limited analysis of the prologue at PC and update CACHE
2335 accordingly. Bail out early if CURRENT_PC is reached. Return the
2336 address where the analysis stopped.
2338 We will handle only functions beginning with:
2341 movq %rsp, %rbp 0x48 0x89 0xe5 (or 0x48 0x8b 0xec)
2343 or (for the X32 ABI):
2346 movl %esp, %ebp 0x89 0xe5 (or 0x8b 0xec)
2348 The `endbr64` instruction can be found before these sequences, and will be
2351 Any function that doesn't start with one of these sequences will be
2352 assumed to have no prologue and thus no valid frame pointer in
2356 amd64_analyze_prologue (struct gdbarch
*gdbarch
,
2357 CORE_ADDR pc
, CORE_ADDR current_pc
,
2358 struct amd64_frame_cache
*cache
)
2360 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2361 /* The `endbr64` instruction. */
2362 static const gdb_byte endbr64
[4] = { 0xf3, 0x0f, 0x1e, 0xfa };
2363 /* There are two variations of movq %rsp, %rbp. */
2364 static const gdb_byte mov_rsp_rbp_1
[3] = { 0x48, 0x89, 0xe5 };
2365 static const gdb_byte mov_rsp_rbp_2
[3] = { 0x48, 0x8b, 0xec };
2366 /* Ditto for movl %esp, %ebp. */
2367 static const gdb_byte mov_esp_ebp_1
[2] = { 0x89, 0xe5 };
2368 static const gdb_byte mov_esp_ebp_2
[2] = { 0x8b, 0xec };
2373 if (current_pc
<= pc
)
2376 if (gdbarch_ptr_bit (gdbarch
) == 32)
2377 pc
= amd64_x32_analyze_stack_align (pc
, current_pc
, cache
);
2379 pc
= amd64_analyze_stack_align (pc
, current_pc
, cache
);
2381 op
= read_code_unsigned_integer (pc
, 1, byte_order
);
2383 /* Check for the `endbr64` instruction, skip it if found. */
2384 if (op
== endbr64
[0])
2386 read_code (pc
+ 1, buf
, 3);
2388 if (memcmp (buf
, &endbr64
[1], 3) == 0)
2391 op
= read_code_unsigned_integer (pc
, 1, byte_order
);
2394 if (current_pc
<= pc
)
2397 if (op
== 0x55) /* pushq %rbp */
2399 /* Take into account that we've executed the `pushq %rbp' that
2400 starts this instruction sequence. */
2401 cache
->saved_regs
[AMD64_RBP_REGNUM
] = 0;
2402 cache
->sp_offset
+= 8;
2404 /* If that's all, return now. */
2405 if (current_pc
<= pc
+ 1)
2408 read_code (pc
+ 1, buf
, 3);
2410 /* Check for `movq %rsp, %rbp'. */
2411 if (memcmp (buf
, mov_rsp_rbp_1
, 3) == 0
2412 || memcmp (buf
, mov_rsp_rbp_2
, 3) == 0)
2414 /* OK, we actually have a frame. */
2415 cache
->frameless_p
= 0;
2419 /* For X32, also check for `movl %esp, %ebp'. */
2420 if (gdbarch_ptr_bit (gdbarch
) == 32)
2422 if (memcmp (buf
, mov_esp_ebp_1
, 2) == 0
2423 || memcmp (buf
, mov_esp_ebp_2
, 2) == 0)
2425 /* OK, we actually have a frame. */
2426 cache
->frameless_p
= 0;
2437 /* Work around false termination of prologue - GCC PR debug/48827.
2439 START_PC is the first instruction of a function, PC is its minimal already
2440 determined advanced address. Function returns PC if it has nothing to do.
2444 <-- here is 0 lines advance - the false prologue end marker.
2445 0f 29 85 70 ff ff ff movaps %xmm0,-0x90(%rbp)
2446 0f 29 4d 80 movaps %xmm1,-0x80(%rbp)
2447 0f 29 55 90 movaps %xmm2,-0x70(%rbp)
2448 0f 29 5d a0 movaps %xmm3,-0x60(%rbp)
2449 0f 29 65 b0 movaps %xmm4,-0x50(%rbp)
2450 0f 29 6d c0 movaps %xmm5,-0x40(%rbp)
2451 0f 29 75 d0 movaps %xmm6,-0x30(%rbp)
2452 0f 29 7d e0 movaps %xmm7,-0x20(%rbp)
2456 amd64_skip_xmm_prologue (CORE_ADDR pc
, CORE_ADDR start_pc
)
2458 struct symtab_and_line start_pc_sal
, next_sal
;
2459 gdb_byte buf
[4 + 8 * 7];
2465 start_pc_sal
= find_pc_sect_line (start_pc
, NULL
, 0);
2466 if (start_pc_sal
.symtab
== NULL
2467 || producer_is_gcc_ge_4 (COMPUNIT_PRODUCER
2468 (SYMTAB_COMPUNIT (start_pc_sal
.symtab
))) < 6
2469 || start_pc_sal
.pc
!= start_pc
|| pc
>= start_pc_sal
.end
)
2472 next_sal
= find_pc_sect_line (start_pc_sal
.end
, NULL
, 0);
2473 if (next_sal
.line
!= start_pc_sal
.line
)
2476 /* START_PC can be from overlayed memory, ignored here. */
2477 if (target_read_code (next_sal
.pc
- 4, buf
, sizeof (buf
)) != 0)
2481 if (buf
[0] != 0x84 || buf
[1] != 0xc0)
2488 for (xmmreg
= 0; xmmreg
< 8; xmmreg
++)
2490 /* 0x0f 0x29 0b??000101 movaps %xmmreg?,-0x??(%rbp) */
2491 if (buf
[offset
] != 0x0f || buf
[offset
+ 1] != 0x29
2492 || (buf
[offset
+ 2] & 0x3f) != (xmmreg
<< 3 | 0x5))
2496 if ((buf
[offset
+ 2] & 0xc0) == 0x40)
2498 /* 8-bit displacement. */
2502 else if ((buf
[offset
+ 2] & 0xc0) == 0x80)
2504 /* 32-bit displacement. */
2512 if (offset
- 4 != buf
[3])
2515 return next_sal
.end
;
2518 /* Return PC of first real instruction. */
2521 amd64_skip_prologue (struct gdbarch
*gdbarch
, CORE_ADDR start_pc
)
2523 struct amd64_frame_cache cache
;
2525 CORE_ADDR func_addr
;
2527 if (find_pc_partial_function (start_pc
, NULL
, &func_addr
, NULL
))
2529 CORE_ADDR post_prologue_pc
2530 = skip_prologue_using_sal (gdbarch
, func_addr
);
2531 struct compunit_symtab
*cust
= find_pc_compunit_symtab (func_addr
);
2533 /* LLVM backend (Clang/Flang) always emits a line note before the
2534 prologue and another one after. We trust clang and newer Intel
2535 compilers to emit usable line notes. */
2536 if (post_prologue_pc
2538 && COMPUNIT_PRODUCER (cust
) != NULL
2539 && (producer_is_llvm (COMPUNIT_PRODUCER (cust
))
2540 || producer_is_icc_ge_19 (COMPUNIT_PRODUCER (cust
)))))
2541 return std::max (start_pc
, post_prologue_pc
);
2544 amd64_init_frame_cache (&cache
);
2545 pc
= amd64_analyze_prologue (gdbarch
, start_pc
, 0xffffffffffffffffLL
,
2547 if (cache
.frameless_p
)
2550 return amd64_skip_xmm_prologue (pc
, start_pc
);
2554 /* Normal frames. */
2557 amd64_frame_cache_1 (struct frame_info
*this_frame
,
2558 struct amd64_frame_cache
*cache
)
2560 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
2561 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2565 cache
->pc
= get_frame_func (this_frame
);
2567 amd64_analyze_prologue (gdbarch
, cache
->pc
, get_frame_pc (this_frame
),
2570 if (cache
->frameless_p
)
2572 /* We didn't find a valid frame. If we're at the start of a
2573 function, or somewhere half-way its prologue, the function's
2574 frame probably hasn't been fully setup yet. Try to
2575 reconstruct the base address for the stack frame by looking
2576 at the stack pointer. For truly "frameless" functions this
2579 if (cache
->saved_sp_reg
!= -1)
2581 /* Stack pointer has been saved. */
2582 get_frame_register (this_frame
, cache
->saved_sp_reg
, buf
);
2583 cache
->saved_sp
= extract_unsigned_integer (buf
, 8, byte_order
);
2585 /* We're halfway aligning the stack. */
2586 cache
->base
= ((cache
->saved_sp
- 8) & 0xfffffffffffffff0LL
) - 8;
2587 cache
->saved_regs
[AMD64_RIP_REGNUM
] = cache
->saved_sp
- 8;
2589 /* This will be added back below. */
2590 cache
->saved_regs
[AMD64_RIP_REGNUM
] -= cache
->base
;
2594 get_frame_register (this_frame
, AMD64_RSP_REGNUM
, buf
);
2595 cache
->base
= extract_unsigned_integer (buf
, 8, byte_order
)
2601 get_frame_register (this_frame
, AMD64_RBP_REGNUM
, buf
);
2602 cache
->base
= extract_unsigned_integer (buf
, 8, byte_order
);
2605 /* Now that we have the base address for the stack frame we can
2606 calculate the value of %rsp in the calling frame. */
2607 cache
->saved_sp
= cache
->base
+ 16;
2609 /* For normal frames, %rip is stored at 8(%rbp). If we don't have a
2610 frame we find it at the same offset from the reconstructed base
2611 address. If we're halfway aligning the stack, %rip is handled
2612 differently (see above). */
2613 if (!cache
->frameless_p
|| cache
->saved_sp_reg
== -1)
2614 cache
->saved_regs
[AMD64_RIP_REGNUM
] = 8;
2616 /* Adjust all the saved registers such that they contain addresses
2617 instead of offsets. */
2618 for (i
= 0; i
< AMD64_NUM_SAVED_REGS
; i
++)
2619 if (cache
->saved_regs
[i
] != -1)
2620 cache
->saved_regs
[i
] += cache
->base
;
2625 static struct amd64_frame_cache
*
2626 amd64_frame_cache (struct frame_info
*this_frame
, void **this_cache
)
2628 struct amd64_frame_cache
*cache
;
2631 return (struct amd64_frame_cache
*) *this_cache
;
2633 cache
= amd64_alloc_frame_cache ();
2634 *this_cache
= cache
;
2638 amd64_frame_cache_1 (this_frame
, cache
);
2640 catch (const gdb_exception_error
&ex
)
2642 if (ex
.error
!= NOT_AVAILABLE_ERROR
)
2649 static enum unwind_stop_reason
2650 amd64_frame_unwind_stop_reason (struct frame_info
*this_frame
,
2653 struct amd64_frame_cache
*cache
=
2654 amd64_frame_cache (this_frame
, this_cache
);
2657 return UNWIND_UNAVAILABLE
;
2659 /* This marks the outermost frame. */
2660 if (cache
->base
== 0)
2661 return UNWIND_OUTERMOST
;
2663 return UNWIND_NO_REASON
;
2667 amd64_frame_this_id (struct frame_info
*this_frame
, void **this_cache
,
2668 struct frame_id
*this_id
)
2670 struct amd64_frame_cache
*cache
=
2671 amd64_frame_cache (this_frame
, this_cache
);
2674 (*this_id
) = frame_id_build_unavailable_stack (cache
->pc
);
2675 else if (cache
->base
== 0)
2677 /* This marks the outermost frame. */
2681 (*this_id
) = frame_id_build (cache
->base
+ 16, cache
->pc
);
2684 static struct value
*
2685 amd64_frame_prev_register (struct frame_info
*this_frame
, void **this_cache
,
2688 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
2689 struct amd64_frame_cache
*cache
=
2690 amd64_frame_cache (this_frame
, this_cache
);
2692 gdb_assert (regnum
>= 0);
2694 if (regnum
== gdbarch_sp_regnum (gdbarch
) && cache
->saved_sp
)
2695 return frame_unwind_got_constant (this_frame
, regnum
, cache
->saved_sp
);
2697 if (regnum
< AMD64_NUM_SAVED_REGS
&& cache
->saved_regs
[regnum
] != -1)
2698 return frame_unwind_got_memory (this_frame
, regnum
,
2699 cache
->saved_regs
[regnum
]);
2701 return frame_unwind_got_register (this_frame
, regnum
, regnum
);
2704 static const struct frame_unwind amd64_frame_unwind
=
2708 amd64_frame_unwind_stop_reason
,
2709 amd64_frame_this_id
,
2710 amd64_frame_prev_register
,
2712 default_frame_sniffer
2715 /* Generate a bytecode expression to get the value of the saved PC. */
2718 amd64_gen_return_address (struct gdbarch
*gdbarch
,
2719 struct agent_expr
*ax
, struct axs_value
*value
,
2722 /* The following sequence assumes the traditional use of the base
2724 ax_reg (ax
, AMD64_RBP_REGNUM
);
2726 ax_simple (ax
, aop_add
);
2727 value
->type
= register_type (gdbarch
, AMD64_RIP_REGNUM
);
2728 value
->kind
= axs_lvalue_memory
;
2732 /* Signal trampolines. */
2734 /* FIXME: kettenis/20030419: Perhaps, we can unify the 32-bit and
2735 64-bit variants. This would require using identical frame caches
2736 on both platforms. */
2738 static struct amd64_frame_cache
*
2739 amd64_sigtramp_frame_cache (struct frame_info
*this_frame
, void **this_cache
)
2741 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
2742 i386_gdbarch_tdep
*tdep
= (i386_gdbarch_tdep
*) gdbarch_tdep (gdbarch
);
2743 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2744 struct amd64_frame_cache
*cache
;
2750 return (struct amd64_frame_cache
*) *this_cache
;
2752 cache
= amd64_alloc_frame_cache ();
2756 get_frame_register (this_frame
, AMD64_RSP_REGNUM
, buf
);
2757 cache
->base
= extract_unsigned_integer (buf
, 8, byte_order
) - 8;
2759 addr
= tdep
->sigcontext_addr (this_frame
);
2760 gdb_assert (tdep
->sc_reg_offset
);
2761 gdb_assert (tdep
->sc_num_regs
<= AMD64_NUM_SAVED_REGS
);
2762 for (i
= 0; i
< tdep
->sc_num_regs
; i
++)
2763 if (tdep
->sc_reg_offset
[i
] != -1)
2764 cache
->saved_regs
[i
] = addr
+ tdep
->sc_reg_offset
[i
];
2768 catch (const gdb_exception_error
&ex
)
2770 if (ex
.error
!= NOT_AVAILABLE_ERROR
)
2774 *this_cache
= cache
;
2778 static enum unwind_stop_reason
2779 amd64_sigtramp_frame_unwind_stop_reason (struct frame_info
*this_frame
,
2782 struct amd64_frame_cache
*cache
=
2783 amd64_sigtramp_frame_cache (this_frame
, this_cache
);
2786 return UNWIND_UNAVAILABLE
;
2788 return UNWIND_NO_REASON
;
2792 amd64_sigtramp_frame_this_id (struct frame_info
*this_frame
,
2793 void **this_cache
, struct frame_id
*this_id
)
2795 struct amd64_frame_cache
*cache
=
2796 amd64_sigtramp_frame_cache (this_frame
, this_cache
);
2799 (*this_id
) = frame_id_build_unavailable_stack (get_frame_pc (this_frame
));
2800 else if (cache
->base
== 0)
2802 /* This marks the outermost frame. */
2806 (*this_id
) = frame_id_build (cache
->base
+ 16, get_frame_pc (this_frame
));
2809 static struct value
*
2810 amd64_sigtramp_frame_prev_register (struct frame_info
*this_frame
,
2811 void **this_cache
, int regnum
)
2813 /* Make sure we've initialized the cache. */
2814 amd64_sigtramp_frame_cache (this_frame
, this_cache
);
2816 return amd64_frame_prev_register (this_frame
, this_cache
, regnum
);
2820 amd64_sigtramp_frame_sniffer (const struct frame_unwind
*self
,
2821 struct frame_info
*this_frame
,
2824 gdbarch
*arch
= get_frame_arch (this_frame
);
2825 i386_gdbarch_tdep
*tdep
= (i386_gdbarch_tdep
*) gdbarch_tdep (arch
);
2827 /* We shouldn't even bother if we don't have a sigcontext_addr
2829 if (tdep
->sigcontext_addr
== NULL
)
2832 if (tdep
->sigtramp_p
!= NULL
)
2834 if (tdep
->sigtramp_p (this_frame
))
2838 if (tdep
->sigtramp_start
!= 0)
2840 CORE_ADDR pc
= get_frame_pc (this_frame
);
2842 gdb_assert (tdep
->sigtramp_end
!= 0);
2843 if (pc
>= tdep
->sigtramp_start
&& pc
< tdep
->sigtramp_end
)
2850 static const struct frame_unwind amd64_sigtramp_frame_unwind
=
2854 amd64_sigtramp_frame_unwind_stop_reason
,
2855 amd64_sigtramp_frame_this_id
,
2856 amd64_sigtramp_frame_prev_register
,
2858 amd64_sigtramp_frame_sniffer
2863 amd64_frame_base_address (struct frame_info
*this_frame
, void **this_cache
)
2865 struct amd64_frame_cache
*cache
=
2866 amd64_frame_cache (this_frame
, this_cache
);
2871 static const struct frame_base amd64_frame_base
=
2873 &amd64_frame_unwind
,
2874 amd64_frame_base_address
,
2875 amd64_frame_base_address
,
2876 amd64_frame_base_address
2879 /* Normal frames, but in a function epilogue. */
2881 /* Implement the stack_frame_destroyed_p gdbarch method.
2883 The epilogue is defined here as the 'ret' instruction, which will
2884 follow any instruction such as 'leave' or 'pop %ebp' that destroys
2885 the function's stack frame. */
2888 amd64_stack_frame_destroyed_p (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
2891 struct compunit_symtab
*cust
;
2893 cust
= find_pc_compunit_symtab (pc
);
2894 if (cust
!= NULL
&& COMPUNIT_EPILOGUE_UNWIND_VALID (cust
))
2897 if (target_read_memory (pc
, &insn
, 1))
2898 return 0; /* Can't read memory at pc. */
2900 if (insn
!= 0xc3) /* 'ret' instruction. */
2907 amd64_epilogue_frame_sniffer (const struct frame_unwind
*self
,
2908 struct frame_info
*this_frame
,
2909 void **this_prologue_cache
)
2911 if (frame_relative_level (this_frame
) == 0)
2912 return amd64_stack_frame_destroyed_p (get_frame_arch (this_frame
),
2913 get_frame_pc (this_frame
));
2918 static struct amd64_frame_cache
*
2919 amd64_epilogue_frame_cache (struct frame_info
*this_frame
, void **this_cache
)
2921 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
2922 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2923 struct amd64_frame_cache
*cache
;
2927 return (struct amd64_frame_cache
*) *this_cache
;
2929 cache
= amd64_alloc_frame_cache ();
2930 *this_cache
= cache
;
2934 /* Cache base will be %esp plus cache->sp_offset (-8). */
2935 get_frame_register (this_frame
, AMD64_RSP_REGNUM
, buf
);
2936 cache
->base
= extract_unsigned_integer (buf
, 8,
2937 byte_order
) + cache
->sp_offset
;
2939 /* Cache pc will be the frame func. */
2940 cache
->pc
= get_frame_pc (this_frame
);
2942 /* The saved %esp will be at cache->base plus 16. */
2943 cache
->saved_sp
= cache
->base
+ 16;
2945 /* The saved %eip will be at cache->base plus 8. */
2946 cache
->saved_regs
[AMD64_RIP_REGNUM
] = cache
->base
+ 8;
2950 catch (const gdb_exception_error
&ex
)
2952 if (ex
.error
!= NOT_AVAILABLE_ERROR
)
2959 static enum unwind_stop_reason
2960 amd64_epilogue_frame_unwind_stop_reason (struct frame_info
*this_frame
,
2963 struct amd64_frame_cache
*cache
2964 = amd64_epilogue_frame_cache (this_frame
, this_cache
);
2967 return UNWIND_UNAVAILABLE
;
2969 return UNWIND_NO_REASON
;
2973 amd64_epilogue_frame_this_id (struct frame_info
*this_frame
,
2975 struct frame_id
*this_id
)
2977 struct amd64_frame_cache
*cache
= amd64_epilogue_frame_cache (this_frame
,
2981 (*this_id
) = frame_id_build_unavailable_stack (cache
->pc
);
2983 (*this_id
) = frame_id_build (cache
->base
+ 8, cache
->pc
);
2986 static const struct frame_unwind amd64_epilogue_frame_unwind
=
2990 amd64_epilogue_frame_unwind_stop_reason
,
2991 amd64_epilogue_frame_this_id
,
2992 amd64_frame_prev_register
,
2994 amd64_epilogue_frame_sniffer
2997 static struct frame_id
2998 amd64_dummy_id (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
3002 fp
= get_frame_register_unsigned (this_frame
, AMD64_RBP_REGNUM
);
3004 return frame_id_build (fp
+ 16, get_frame_pc (this_frame
));
3007 /* 16 byte align the SP per frame requirements. */
3010 amd64_frame_align (struct gdbarch
*gdbarch
, CORE_ADDR sp
)
3012 return sp
& -(CORE_ADDR
)16;
3016 /* Supply register REGNUM from the buffer specified by FPREGS and LEN
3017 in the floating-point register set REGSET to register cache
3018 REGCACHE. If REGNUM is -1, do this for all registers in REGSET. */
3021 amd64_supply_fpregset (const struct regset
*regset
, struct regcache
*regcache
,
3022 int regnum
, const void *fpregs
, size_t len
)
3024 struct gdbarch
*gdbarch
= regcache
->arch ();
3025 const i386_gdbarch_tdep
*tdep
= (i386_gdbarch_tdep
*) gdbarch_tdep (gdbarch
);
3027 gdb_assert (len
>= tdep
->sizeof_fpregset
);
3028 amd64_supply_fxsave (regcache
, regnum
, fpregs
);
3031 /* Collect register REGNUM from the register cache REGCACHE and store
3032 it in the buffer specified by FPREGS and LEN as described by the
3033 floating-point register set REGSET. If REGNUM is -1, do this for
3034 all registers in REGSET. */
3037 amd64_collect_fpregset (const struct regset
*regset
,
3038 const struct regcache
*regcache
,
3039 int regnum
, void *fpregs
, size_t len
)
3041 struct gdbarch
*gdbarch
= regcache
->arch ();
3042 const i386_gdbarch_tdep
*tdep
= (i386_gdbarch_tdep
*) gdbarch_tdep (gdbarch
);
3044 gdb_assert (len
>= tdep
->sizeof_fpregset
);
3045 amd64_collect_fxsave (regcache
, regnum
, fpregs
);
3048 const struct regset amd64_fpregset
=
3050 NULL
, amd64_supply_fpregset
, amd64_collect_fpregset
3054 /* Figure out where the longjmp will land. Slurp the jmp_buf out of
3055 %rdi. We expect its value to be a pointer to the jmp_buf structure
3056 from which we extract the address that we will land at. This
3057 address is copied into PC. This routine returns non-zero on
3061 amd64_get_longjmp_target (struct frame_info
*frame
, CORE_ADDR
*pc
)
3065 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
3066 i386_gdbarch_tdep
*tdep
= (i386_gdbarch_tdep
*) gdbarch_tdep (gdbarch
);
3067 int jb_pc_offset
= tdep
->jb_pc_offset
;
3068 int len
= TYPE_LENGTH (builtin_type (gdbarch
)->builtin_func_ptr
);
3070 /* If JB_PC_OFFSET is -1, we have no way to find out where the
3071 longjmp will land. */
3072 if (jb_pc_offset
== -1)
3075 get_frame_register (frame
, AMD64_RDI_REGNUM
, buf
);
3076 jb_addr
= extract_typed_address
3077 (buf
, builtin_type (gdbarch
)->builtin_data_ptr
);
3078 if (target_read_memory (jb_addr
+ jb_pc_offset
, buf
, len
))
3081 *pc
= extract_typed_address (buf
, builtin_type (gdbarch
)->builtin_func_ptr
);
3086 static const int amd64_record_regmap
[] =
3088 AMD64_RAX_REGNUM
, AMD64_RCX_REGNUM
, AMD64_RDX_REGNUM
, AMD64_RBX_REGNUM
,
3089 AMD64_RSP_REGNUM
, AMD64_RBP_REGNUM
, AMD64_RSI_REGNUM
, AMD64_RDI_REGNUM
,
3090 AMD64_R8_REGNUM
, AMD64_R9_REGNUM
, AMD64_R10_REGNUM
, AMD64_R11_REGNUM
,
3091 AMD64_R12_REGNUM
, AMD64_R13_REGNUM
, AMD64_R14_REGNUM
, AMD64_R15_REGNUM
,
3092 AMD64_RIP_REGNUM
, AMD64_EFLAGS_REGNUM
, AMD64_CS_REGNUM
, AMD64_SS_REGNUM
,
3093 AMD64_DS_REGNUM
, AMD64_ES_REGNUM
, AMD64_FS_REGNUM
, AMD64_GS_REGNUM
3096 /* Implement the "in_indirect_branch_thunk" gdbarch function. */
3099 amd64_in_indirect_branch_thunk (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
3101 return x86_in_indirect_branch_thunk (pc
, amd64_register_names
,
3107 amd64_init_abi (struct gdbarch_info info
, struct gdbarch
*gdbarch
,
3108 const target_desc
*default_tdesc
)
3110 i386_gdbarch_tdep
*tdep
= (i386_gdbarch_tdep
*) gdbarch_tdep (gdbarch
);
3111 const struct target_desc
*tdesc
= info
.target_desc
;
3112 static const char *const stap_integer_prefixes
[] = { "$", NULL
};
3113 static const char *const stap_register_prefixes
[] = { "%", NULL
};
3114 static const char *const stap_register_indirection_prefixes
[] = { "(",
3116 static const char *const stap_register_indirection_suffixes
[] = { ")",
3119 /* AMD64 generally uses `fxsave' instead of `fsave' for saving its
3120 floating-point registers. */
3121 tdep
->sizeof_fpregset
= I387_SIZEOF_FXSAVE
;
3122 tdep
->fpregset
= &amd64_fpregset
;
3124 if (! tdesc_has_registers (tdesc
))
3125 tdesc
= default_tdesc
;
3126 tdep
->tdesc
= tdesc
;
3128 tdep
->num_core_regs
= AMD64_NUM_GREGS
+ I387_NUM_REGS
;
3129 tdep
->register_names
= amd64_register_names
;
3131 if (tdesc_find_feature (tdesc
, "org.gnu.gdb.i386.avx512") != NULL
)
3133 tdep
->zmmh_register_names
= amd64_zmmh_names
;
3134 tdep
->k_register_names
= amd64_k_names
;
3135 tdep
->xmm_avx512_register_names
= amd64_xmm_avx512_names
;
3136 tdep
->ymm16h_register_names
= amd64_ymmh_avx512_names
;
3138 tdep
->num_zmm_regs
= 32;
3139 tdep
->num_xmm_avx512_regs
= 16;
3140 tdep
->num_ymm_avx512_regs
= 16;
3142 tdep
->zmm0h_regnum
= AMD64_ZMM0H_REGNUM
;
3143 tdep
->k0_regnum
= AMD64_K0_REGNUM
;
3144 tdep
->xmm16_regnum
= AMD64_XMM16_REGNUM
;
3145 tdep
->ymm16h_regnum
= AMD64_YMM16H_REGNUM
;
3148 if (tdesc_find_feature (tdesc
, "org.gnu.gdb.i386.avx") != NULL
)
3150 tdep
->ymmh_register_names
= amd64_ymmh_names
;
3151 tdep
->num_ymm_regs
= 16;
3152 tdep
->ymm0h_regnum
= AMD64_YMM0H_REGNUM
;
3155 if (tdesc_find_feature (tdesc
, "org.gnu.gdb.i386.mpx") != NULL
)
3157 tdep
->mpx_register_names
= amd64_mpx_names
;
3158 tdep
->bndcfgu_regnum
= AMD64_BNDCFGU_REGNUM
;
3159 tdep
->bnd0r_regnum
= AMD64_BND0R_REGNUM
;
3162 if (tdesc_find_feature (tdesc
, "org.gnu.gdb.i386.segments") != NULL
)
3164 tdep
->fsbase_regnum
= AMD64_FSBASE_REGNUM
;
3167 if (tdesc_find_feature (tdesc
, "org.gnu.gdb.i386.pkeys") != NULL
)
3169 tdep
->pkeys_register_names
= amd64_pkeys_names
;
3170 tdep
->pkru_regnum
= AMD64_PKRU_REGNUM
;
3171 tdep
->num_pkeys_regs
= 1;
3174 tdep
->num_byte_regs
= 20;
3175 tdep
->num_word_regs
= 16;
3176 tdep
->num_dword_regs
= 16;
3177 /* Avoid wiring in the MMX registers for now. */
3178 tdep
->num_mmx_regs
= 0;
3180 set_gdbarch_pseudo_register_read_value (gdbarch
,
3181 amd64_pseudo_register_read_value
);
3182 set_gdbarch_pseudo_register_write (gdbarch
,
3183 amd64_pseudo_register_write
);
3184 set_gdbarch_ax_pseudo_register_collect (gdbarch
,
3185 amd64_ax_pseudo_register_collect
);
3187 set_tdesc_pseudo_register_name (gdbarch
, amd64_pseudo_register_name
);
3189 /* AMD64 has an FPU and 16 SSE registers. */
3190 tdep
->st0_regnum
= AMD64_ST0_REGNUM
;
3191 tdep
->num_xmm_regs
= 16;
3193 /* This is what all the fuss is about. */
3194 set_gdbarch_long_bit (gdbarch
, 64);
3195 set_gdbarch_long_long_bit (gdbarch
, 64);
3196 set_gdbarch_ptr_bit (gdbarch
, 64);
3198 /* In contrast to the i386, on AMD64 a `long double' actually takes
3199 up 128 bits, even though it's still based on the i387 extended
3200 floating-point format which has only 80 significant bits. */
3201 set_gdbarch_long_double_bit (gdbarch
, 128);
3203 set_gdbarch_num_regs (gdbarch
, AMD64_NUM_REGS
);
3205 /* Register numbers of various important registers. */
3206 set_gdbarch_sp_regnum (gdbarch
, AMD64_RSP_REGNUM
); /* %rsp */
3207 set_gdbarch_pc_regnum (gdbarch
, AMD64_RIP_REGNUM
); /* %rip */
3208 set_gdbarch_ps_regnum (gdbarch
, AMD64_EFLAGS_REGNUM
); /* %eflags */
3209 set_gdbarch_fp0_regnum (gdbarch
, AMD64_ST0_REGNUM
); /* %st(0) */
3211 /* The "default" register numbering scheme for AMD64 is referred to
3212 as the "DWARF Register Number Mapping" in the System V psABI.
3213 The preferred debugging format for all known AMD64 targets is
3214 actually DWARF2, and GCC doesn't seem to support DWARF (that is
3215 DWARF-1), but we provide the same mapping just in case. This
3216 mapping is also used for stabs, which GCC does support. */
3217 set_gdbarch_stab_reg_to_regnum (gdbarch
, amd64_dwarf_reg_to_regnum
);
3218 set_gdbarch_dwarf2_reg_to_regnum (gdbarch
, amd64_dwarf_reg_to_regnum
);
3220 /* We don't override SDB_REG_RO_REGNUM, since COFF doesn't seem to
3221 be in use on any of the supported AMD64 targets. */
3223 /* Call dummy code. */
3224 set_gdbarch_push_dummy_call (gdbarch
, amd64_push_dummy_call
);
3225 set_gdbarch_frame_align (gdbarch
, amd64_frame_align
);
3226 set_gdbarch_frame_red_zone_size (gdbarch
, 128);
3228 set_gdbarch_convert_register_p (gdbarch
, i387_convert_register_p
);
3229 set_gdbarch_register_to_value (gdbarch
, i387_register_to_value
);
3230 set_gdbarch_value_to_register (gdbarch
, i387_value_to_register
);
3232 set_gdbarch_return_value (gdbarch
, amd64_return_value
);
3234 set_gdbarch_skip_prologue (gdbarch
, amd64_skip_prologue
);
3236 tdep
->record_regmap
= amd64_record_regmap
;
3238 set_gdbarch_dummy_id (gdbarch
, amd64_dummy_id
);
3240 /* Hook the function epilogue frame unwinder. This unwinder is
3241 appended to the list first, so that it supercedes the other
3242 unwinders in function epilogues. */
3243 frame_unwind_prepend_unwinder (gdbarch
, &amd64_epilogue_frame_unwind
);
3245 /* Hook the prologue-based frame unwinders. */
3246 frame_unwind_append_unwinder (gdbarch
, &amd64_sigtramp_frame_unwind
);
3247 frame_unwind_append_unwinder (gdbarch
, &amd64_frame_unwind
);
3248 frame_base_set_default (gdbarch
, &amd64_frame_base
);
3250 set_gdbarch_get_longjmp_target (gdbarch
, amd64_get_longjmp_target
);
3252 set_gdbarch_relocate_instruction (gdbarch
, amd64_relocate_instruction
);
3254 set_gdbarch_gen_return_address (gdbarch
, amd64_gen_return_address
);
3256 /* SystemTap variables and functions. */
3257 set_gdbarch_stap_integer_prefixes (gdbarch
, stap_integer_prefixes
);
3258 set_gdbarch_stap_register_prefixes (gdbarch
, stap_register_prefixes
);
3259 set_gdbarch_stap_register_indirection_prefixes (gdbarch
,
3260 stap_register_indirection_prefixes
);
3261 set_gdbarch_stap_register_indirection_suffixes (gdbarch
,
3262 stap_register_indirection_suffixes
);
3263 set_gdbarch_stap_is_single_operand (gdbarch
,
3264 i386_stap_is_single_operand
);
3265 set_gdbarch_stap_parse_special_token (gdbarch
,
3266 i386_stap_parse_special_token
);
3267 set_gdbarch_insn_is_call (gdbarch
, amd64_insn_is_call
);
3268 set_gdbarch_insn_is_ret (gdbarch
, amd64_insn_is_ret
);
3269 set_gdbarch_insn_is_jump (gdbarch
, amd64_insn_is_jump
);
3271 set_gdbarch_in_indirect_branch_thunk (gdbarch
,
3272 amd64_in_indirect_branch_thunk
);
3274 register_amd64_ravenscar_ops (gdbarch
);
3277 /* Initialize ARCH for x86-64, no osabi. */
3280 amd64_none_init_abi (gdbarch_info info
, gdbarch
*arch
)
3282 amd64_init_abi (info
, arch
, amd64_target_description (X86_XSTATE_SSE_MASK
,
3286 static struct type
*
3287 amd64_x32_pseudo_register_type (struct gdbarch
*gdbarch
, int regnum
)
3289 i386_gdbarch_tdep
*tdep
= (i386_gdbarch_tdep
*) gdbarch_tdep (gdbarch
);
3291 switch (regnum
- tdep
->eax_regnum
)
3293 case AMD64_RBP_REGNUM
: /* %ebp */
3294 case AMD64_RSP_REGNUM
: /* %esp */
3295 return builtin_type (gdbarch
)->builtin_data_ptr
;
3296 case AMD64_RIP_REGNUM
: /* %eip */
3297 return builtin_type (gdbarch
)->builtin_func_ptr
;
3300 return i386_pseudo_register_type (gdbarch
, regnum
);
3304 amd64_x32_init_abi (struct gdbarch_info info
, struct gdbarch
*gdbarch
,
3305 const target_desc
*default_tdesc
)
3307 i386_gdbarch_tdep
*tdep
= (i386_gdbarch_tdep
*) gdbarch_tdep (gdbarch
);
3309 amd64_init_abi (info
, gdbarch
, default_tdesc
);
3311 tdep
->num_dword_regs
= 17;
3312 set_tdesc_pseudo_register_type (gdbarch
, amd64_x32_pseudo_register_type
);
3314 set_gdbarch_long_bit (gdbarch
, 32);
3315 set_gdbarch_ptr_bit (gdbarch
, 32);
3318 /* Initialize ARCH for x64-32, no osabi. */
3321 amd64_x32_none_init_abi (gdbarch_info info
, gdbarch
*arch
)
3323 amd64_x32_init_abi (info
, arch
,
3324 amd64_target_description (X86_XSTATE_SSE_MASK
, true));
3327 /* Return the target description for a specified XSAVE feature mask. */
3329 const struct target_desc
*
3330 amd64_target_description (uint64_t xcr0
, bool segments
)
3332 static target_desc
*amd64_tdescs \
3333 [2/*AVX*/][2/*MPX*/][2/*AVX512*/][2/*PKRU*/][2/*segments*/] = {};
3334 target_desc
**tdesc
;
3336 tdesc
= &amd64_tdescs
[(xcr0
& X86_XSTATE_AVX
) ? 1 : 0]
3337 [(xcr0
& X86_XSTATE_MPX
) ? 1 : 0]
3338 [(xcr0
& X86_XSTATE_AVX512
) ? 1 : 0]
3339 [(xcr0
& X86_XSTATE_PKRU
) ? 1 : 0]
3343 *tdesc
= amd64_create_target_description (xcr0
, false, false,
3349 void _initialize_amd64_tdep ();
3351 _initialize_amd64_tdep ()
3353 gdbarch_register_osabi (bfd_arch_i386
, bfd_mach_x86_64
, GDB_OSABI_NONE
,
3354 amd64_none_init_abi
);
3355 gdbarch_register_osabi (bfd_arch_i386
, bfd_mach_x64_32
, GDB_OSABI_NONE
,
3356 amd64_x32_none_init_abi
);
3360 /* The 64-bit FXSAVE format differs from the 32-bit format in the
3361 sense that the instruction pointer and data pointer are simply
3362 64-bit offsets into the code segment and the data segment instead
3363 of a selector offset pair. The functions below store the upper 32
3364 bits of these pointers (instead of just the 16-bits of the segment
3367 /* Fill register REGNUM in REGCACHE with the appropriate
3368 floating-point or SSE register value from *FXSAVE. If REGNUM is
3369 -1, do this for all registers. This function masks off any of the
3370 reserved bits in *FXSAVE. */
3373 amd64_supply_fxsave (struct regcache
*regcache
, int regnum
,
3376 struct gdbarch
*gdbarch
= regcache
->arch ();
3377 i386_gdbarch_tdep
*tdep
= (i386_gdbarch_tdep
*) gdbarch_tdep (gdbarch
);
3379 i387_supply_fxsave (regcache
, regnum
, fxsave
);
3382 && gdbarch_bfd_arch_info (gdbarch
)->bits_per_word
== 64)
3384 const gdb_byte
*regs
= (const gdb_byte
*) fxsave
;
3386 if (regnum
== -1 || regnum
== I387_FISEG_REGNUM (tdep
))
3387 regcache
->raw_supply (I387_FISEG_REGNUM (tdep
), regs
+ 12);
3388 if (regnum
== -1 || regnum
== I387_FOSEG_REGNUM (tdep
))
3389 regcache
->raw_supply (I387_FOSEG_REGNUM (tdep
), regs
+ 20);
3393 /* Similar to amd64_supply_fxsave, but use XSAVE extended state. */
3396 amd64_supply_xsave (struct regcache
*regcache
, int regnum
,
3399 struct gdbarch
*gdbarch
= regcache
->arch ();
3400 i386_gdbarch_tdep
*tdep
= (i386_gdbarch_tdep
*) gdbarch_tdep (gdbarch
);
3402 i387_supply_xsave (regcache
, regnum
, xsave
);
3405 && gdbarch_bfd_arch_info (gdbarch
)->bits_per_word
== 64)
3407 const gdb_byte
*regs
= (const gdb_byte
*) xsave
;
3410 clear_bv
= i387_xsave_get_clear_bv (gdbarch
, xsave
);
3412 /* If the FISEG and FOSEG registers have not been initialised yet
3413 (their CLEAR_BV bit is set) then their default values of zero will
3414 have already been setup by I387_SUPPLY_XSAVE. */
3415 if (!(clear_bv
& X86_XSTATE_X87
))
3417 if (regnum
== -1 || regnum
== I387_FISEG_REGNUM (tdep
))
3418 regcache
->raw_supply (I387_FISEG_REGNUM (tdep
), regs
+ 12);
3419 if (regnum
== -1 || regnum
== I387_FOSEG_REGNUM (tdep
))
3420 regcache
->raw_supply (I387_FOSEG_REGNUM (tdep
), regs
+ 20);
3425 /* Fill register REGNUM (if it is a floating-point or SSE register) in
3426 *FXSAVE with the value from REGCACHE. If REGNUM is -1, do this for
3427 all registers. This function doesn't touch any of the reserved
3431 amd64_collect_fxsave (const struct regcache
*regcache
, int regnum
,
3434 struct gdbarch
*gdbarch
= regcache
->arch ();
3435 i386_gdbarch_tdep
*tdep
= (i386_gdbarch_tdep
*) gdbarch_tdep (gdbarch
);
3436 gdb_byte
*regs
= (gdb_byte
*) fxsave
;
3438 i387_collect_fxsave (regcache
, regnum
, fxsave
);
3440 if (gdbarch_bfd_arch_info (gdbarch
)->bits_per_word
== 64)
3442 if (regnum
== -1 || regnum
== I387_FISEG_REGNUM (tdep
))
3443 regcache
->raw_collect (I387_FISEG_REGNUM (tdep
), regs
+ 12);
3444 if (regnum
== -1 || regnum
== I387_FOSEG_REGNUM (tdep
))
3445 regcache
->raw_collect (I387_FOSEG_REGNUM (tdep
), regs
+ 20);
3449 /* Similar to amd64_collect_fxsave, but use XSAVE extended state. */
3452 amd64_collect_xsave (const struct regcache
*regcache
, int regnum
,
3453 void *xsave
, int gcore
)
3455 struct gdbarch
*gdbarch
= regcache
->arch ();
3456 i386_gdbarch_tdep
*tdep
= (i386_gdbarch_tdep
*) gdbarch_tdep (gdbarch
);
3457 gdb_byte
*regs
= (gdb_byte
*) xsave
;
3459 i387_collect_xsave (regcache
, regnum
, xsave
, gcore
);
3461 if (gdbarch_bfd_arch_info (gdbarch
)->bits_per_word
== 64)
3463 if (regnum
== -1 || regnum
== I387_FISEG_REGNUM (tdep
))
3464 regcache
->raw_collect (I387_FISEG_REGNUM (tdep
),
3466 if (regnum
== -1 || regnum
== I387_FOSEG_REGNUM (tdep
))
3467 regcache
->raw_collect (I387_FOSEG_REGNUM (tdep
),