Replace regcache_raw_read with regcache->raw_read
[binutils-gdb.git] / gdb / amd64-tdep.c
1 /* Target-dependent code for AMD64.
2
3 Copyright (C) 2001-2018 Free Software Foundation, Inc.
4
5 Contributed by Jiri Smid, SuSE Labs.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "opcode/i386.h"
24 #include "dis-asm.h"
25 #include "arch-utils.h"
26 #include "block.h"
27 #include "dummy-frame.h"
28 #include "frame.h"
29 #include "frame-base.h"
30 #include "frame-unwind.h"
31 #include "inferior.h"
32 #include "infrun.h"
33 #include "gdbcmd.h"
34 #include "gdbcore.h"
35 #include "objfiles.h"
36 #include "regcache.h"
37 #include "regset.h"
38 #include "symfile.h"
39 #include "disasm.h"
40 #include "amd64-tdep.h"
41 #include "i387-tdep.h"
42 #include "x86-xstate.h"
43 #include <algorithm>
44 #include "target-descriptions.h"
45 #include "arch/amd64.h"
46 #include "producer.h"
47 #include "ax.h"
48 #include "ax-gdb.h"
49 #include "common/byte-vector.h"
50
51 /* Note that the AMD64 architecture was previously known as x86-64.
52 The latter is (forever) engraved into the canonical system name as
53 returned by config.guess, and used as the name for the AMD64 port
54 of GNU/Linux. The BSD's have renamed their ports to amd64; they
55 don't like to shout. For GDB we prefer the amd64_-prefix over the
56 x86_64_-prefix since it's so much easier to type. */
57
58 /* Register information. */
59
60 static const char *amd64_register_names[] =
61 {
62 "rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rbp", "rsp",
63
64 /* %r8 is indeed register number 8. */
65 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
66 "rip", "eflags", "cs", "ss", "ds", "es", "fs", "gs",
67
68 /* %st0 is register number 24. */
69 "st0", "st1", "st2", "st3", "st4", "st5", "st6", "st7",
70 "fctrl", "fstat", "ftag", "fiseg", "fioff", "foseg", "fooff", "fop",
71
72 /* %xmm0 is register number 40. */
73 "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
74 "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15",
75 "mxcsr",
76 };
77
78 static const char *amd64_ymm_names[] =
79 {
80 "ymm0", "ymm1", "ymm2", "ymm3",
81 "ymm4", "ymm5", "ymm6", "ymm7",
82 "ymm8", "ymm9", "ymm10", "ymm11",
83 "ymm12", "ymm13", "ymm14", "ymm15"
84 };
85
86 static const char *amd64_ymm_avx512_names[] =
87 {
88 "ymm16", "ymm17", "ymm18", "ymm19",
89 "ymm20", "ymm21", "ymm22", "ymm23",
90 "ymm24", "ymm25", "ymm26", "ymm27",
91 "ymm28", "ymm29", "ymm30", "ymm31"
92 };
93
94 static const char *amd64_ymmh_names[] =
95 {
96 "ymm0h", "ymm1h", "ymm2h", "ymm3h",
97 "ymm4h", "ymm5h", "ymm6h", "ymm7h",
98 "ymm8h", "ymm9h", "ymm10h", "ymm11h",
99 "ymm12h", "ymm13h", "ymm14h", "ymm15h"
100 };
101
102 static const char *amd64_ymmh_avx512_names[] =
103 {
104 "ymm16h", "ymm17h", "ymm18h", "ymm19h",
105 "ymm20h", "ymm21h", "ymm22h", "ymm23h",
106 "ymm24h", "ymm25h", "ymm26h", "ymm27h",
107 "ymm28h", "ymm29h", "ymm30h", "ymm31h"
108 };
109
110 static const char *amd64_mpx_names[] =
111 {
112 "bnd0raw", "bnd1raw", "bnd2raw", "bnd3raw", "bndcfgu", "bndstatus"
113 };
114
115 static const char *amd64_k_names[] =
116 {
117 "k0", "k1", "k2", "k3",
118 "k4", "k5", "k6", "k7"
119 };
120
121 static const char *amd64_zmmh_names[] =
122 {
123 "zmm0h", "zmm1h", "zmm2h", "zmm3h",
124 "zmm4h", "zmm5h", "zmm6h", "zmm7h",
125 "zmm8h", "zmm9h", "zmm10h", "zmm11h",
126 "zmm12h", "zmm13h", "zmm14h", "zmm15h",
127 "zmm16h", "zmm17h", "zmm18h", "zmm19h",
128 "zmm20h", "zmm21h", "zmm22h", "zmm23h",
129 "zmm24h", "zmm25h", "zmm26h", "zmm27h",
130 "zmm28h", "zmm29h", "zmm30h", "zmm31h"
131 };
132
133 static const char *amd64_zmm_names[] =
134 {
135 "zmm0", "zmm1", "zmm2", "zmm3",
136 "zmm4", "zmm5", "zmm6", "zmm7",
137 "zmm8", "zmm9", "zmm10", "zmm11",
138 "zmm12", "zmm13", "zmm14", "zmm15",
139 "zmm16", "zmm17", "zmm18", "zmm19",
140 "zmm20", "zmm21", "zmm22", "zmm23",
141 "zmm24", "zmm25", "zmm26", "zmm27",
142 "zmm28", "zmm29", "zmm30", "zmm31"
143 };
144
145 static const char *amd64_xmm_avx512_names[] = {
146 "xmm16", "xmm17", "xmm18", "xmm19",
147 "xmm20", "xmm21", "xmm22", "xmm23",
148 "xmm24", "xmm25", "xmm26", "xmm27",
149 "xmm28", "xmm29", "xmm30", "xmm31"
150 };
151
152 static const char *amd64_pkeys_names[] = {
153 "pkru"
154 };
155
156 /* DWARF Register Number Mapping as defined in the System V psABI,
157 section 3.6. */
158
159 static int amd64_dwarf_regmap[] =
160 {
161 /* General Purpose Registers RAX, RDX, RCX, RBX, RSI, RDI. */
162 AMD64_RAX_REGNUM, AMD64_RDX_REGNUM,
163 AMD64_RCX_REGNUM, AMD64_RBX_REGNUM,
164 AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
165
166 /* Frame Pointer Register RBP. */
167 AMD64_RBP_REGNUM,
168
169 /* Stack Pointer Register RSP. */
170 AMD64_RSP_REGNUM,
171
172 /* Extended Integer Registers 8 - 15. */
173 AMD64_R8_REGNUM, /* %r8 */
174 AMD64_R9_REGNUM, /* %r9 */
175 AMD64_R10_REGNUM, /* %r10 */
176 AMD64_R11_REGNUM, /* %r11 */
177 AMD64_R12_REGNUM, /* %r12 */
178 AMD64_R13_REGNUM, /* %r13 */
179 AMD64_R14_REGNUM, /* %r14 */
180 AMD64_R15_REGNUM, /* %r15 */
181
182 /* Return Address RA. Mapped to RIP. */
183 AMD64_RIP_REGNUM,
184
185 /* SSE Registers 0 - 7. */
186 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
187 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
188 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
189 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
190
191 /* Extended SSE Registers 8 - 15. */
192 AMD64_XMM0_REGNUM + 8, AMD64_XMM0_REGNUM + 9,
193 AMD64_XMM0_REGNUM + 10, AMD64_XMM0_REGNUM + 11,
194 AMD64_XMM0_REGNUM + 12, AMD64_XMM0_REGNUM + 13,
195 AMD64_XMM0_REGNUM + 14, AMD64_XMM0_REGNUM + 15,
196
197 /* Floating Point Registers 0-7. */
198 AMD64_ST0_REGNUM + 0, AMD64_ST0_REGNUM + 1,
199 AMD64_ST0_REGNUM + 2, AMD64_ST0_REGNUM + 3,
200 AMD64_ST0_REGNUM + 4, AMD64_ST0_REGNUM + 5,
201 AMD64_ST0_REGNUM + 6, AMD64_ST0_REGNUM + 7,
202
203 /* MMX Registers 0 - 7.
204 We have to handle those registers specifically, as their register
205 number within GDB depends on the target (or they may even not be
206 available at all). */
207 -1, -1, -1, -1, -1, -1, -1, -1,
208
209 /* Control and Status Flags Register. */
210 AMD64_EFLAGS_REGNUM,
211
212 /* Selector Registers. */
213 AMD64_ES_REGNUM,
214 AMD64_CS_REGNUM,
215 AMD64_SS_REGNUM,
216 AMD64_DS_REGNUM,
217 AMD64_FS_REGNUM,
218 AMD64_GS_REGNUM,
219 -1,
220 -1,
221
222 /* Segment Base Address Registers. */
223 -1,
224 -1,
225 -1,
226 -1,
227
228 /* Special Selector Registers. */
229 -1,
230 -1,
231
232 /* Floating Point Control Registers. */
233 AMD64_MXCSR_REGNUM,
234 AMD64_FCTRL_REGNUM,
235 AMD64_FSTAT_REGNUM
236 };
237
238 static const int amd64_dwarf_regmap_len =
239 (sizeof (amd64_dwarf_regmap) / sizeof (amd64_dwarf_regmap[0]));
240
241 /* Convert DWARF register number REG to the appropriate register
242 number used by GDB. */
243
244 static int
245 amd64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
246 {
247 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
248 int ymm0_regnum = tdep->ymm0_regnum;
249 int regnum = -1;
250
251 if (reg >= 0 && reg < amd64_dwarf_regmap_len)
252 regnum = amd64_dwarf_regmap[reg];
253
254 if (ymm0_regnum >= 0
255 && i386_xmm_regnum_p (gdbarch, regnum))
256 regnum += ymm0_regnum - I387_XMM0_REGNUM (tdep);
257
258 return regnum;
259 }
260
261 /* Map architectural register numbers to gdb register numbers. */
262
263 static const int amd64_arch_regmap[16] =
264 {
265 AMD64_RAX_REGNUM, /* %rax */
266 AMD64_RCX_REGNUM, /* %rcx */
267 AMD64_RDX_REGNUM, /* %rdx */
268 AMD64_RBX_REGNUM, /* %rbx */
269 AMD64_RSP_REGNUM, /* %rsp */
270 AMD64_RBP_REGNUM, /* %rbp */
271 AMD64_RSI_REGNUM, /* %rsi */
272 AMD64_RDI_REGNUM, /* %rdi */
273 AMD64_R8_REGNUM, /* %r8 */
274 AMD64_R9_REGNUM, /* %r9 */
275 AMD64_R10_REGNUM, /* %r10 */
276 AMD64_R11_REGNUM, /* %r11 */
277 AMD64_R12_REGNUM, /* %r12 */
278 AMD64_R13_REGNUM, /* %r13 */
279 AMD64_R14_REGNUM, /* %r14 */
280 AMD64_R15_REGNUM /* %r15 */
281 };
282
283 static const int amd64_arch_regmap_len =
284 (sizeof (amd64_arch_regmap) / sizeof (amd64_arch_regmap[0]));
285
286 /* Convert architectural register number REG to the appropriate register
287 number used by GDB. */
288
289 static int
290 amd64_arch_reg_to_regnum (int reg)
291 {
292 gdb_assert (reg >= 0 && reg < amd64_arch_regmap_len);
293
294 return amd64_arch_regmap[reg];
295 }
296
297 /* Register names for byte pseudo-registers. */
298
299 static const char *amd64_byte_names[] =
300 {
301 "al", "bl", "cl", "dl", "sil", "dil", "bpl", "spl",
302 "r8l", "r9l", "r10l", "r11l", "r12l", "r13l", "r14l", "r15l",
303 "ah", "bh", "ch", "dh"
304 };
305
306 /* Number of lower byte registers. */
307 #define AMD64_NUM_LOWER_BYTE_REGS 16
308
309 /* Register names for word pseudo-registers. */
310
311 static const char *amd64_word_names[] =
312 {
313 "ax", "bx", "cx", "dx", "si", "di", "bp", "",
314 "r8w", "r9w", "r10w", "r11w", "r12w", "r13w", "r14w", "r15w"
315 };
316
317 /* Register names for dword pseudo-registers. */
318
319 static const char *amd64_dword_names[] =
320 {
321 "eax", "ebx", "ecx", "edx", "esi", "edi", "ebp", "esp",
322 "r8d", "r9d", "r10d", "r11d", "r12d", "r13d", "r14d", "r15d",
323 "eip"
324 };
325
326 /* Return the name of register REGNUM. */
327
328 static const char *
329 amd64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
330 {
331 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
332 if (i386_byte_regnum_p (gdbarch, regnum))
333 return amd64_byte_names[regnum - tdep->al_regnum];
334 else if (i386_zmm_regnum_p (gdbarch, regnum))
335 return amd64_zmm_names[regnum - tdep->zmm0_regnum];
336 else if (i386_ymm_regnum_p (gdbarch, regnum))
337 return amd64_ymm_names[regnum - tdep->ymm0_regnum];
338 else if (i386_ymm_avx512_regnum_p (gdbarch, regnum))
339 return amd64_ymm_avx512_names[regnum - tdep->ymm16_regnum];
340 else if (i386_word_regnum_p (gdbarch, regnum))
341 return amd64_word_names[regnum - tdep->ax_regnum];
342 else if (i386_dword_regnum_p (gdbarch, regnum))
343 return amd64_dword_names[regnum - tdep->eax_regnum];
344 else
345 return i386_pseudo_register_name (gdbarch, regnum);
346 }
347
348 static struct value *
349 amd64_pseudo_register_read_value (struct gdbarch *gdbarch,
350 struct regcache *regcache,
351 int regnum)
352 {
353 gdb_byte *raw_buf = (gdb_byte *) alloca (register_size (gdbarch, regnum));
354 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
355 enum register_status status;
356 struct value *result_value;
357 gdb_byte *buf;
358
359 result_value = allocate_value (register_type (gdbarch, regnum));
360 VALUE_LVAL (result_value) = lval_register;
361 VALUE_REGNUM (result_value) = regnum;
362 buf = value_contents_raw (result_value);
363
364 if (i386_byte_regnum_p (gdbarch, regnum))
365 {
366 int gpnum = regnum - tdep->al_regnum;
367
368 /* Extract (always little endian). */
369 if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
370 {
371 /* Special handling for AH, BH, CH, DH. */
372 status = regcache->raw_read (gpnum - AMD64_NUM_LOWER_BYTE_REGS,
373 raw_buf);
374 if (status == REG_VALID)
375 memcpy (buf, raw_buf + 1, 1);
376 else
377 mark_value_bytes_unavailable (result_value, 0,
378 TYPE_LENGTH (value_type (result_value)));
379 }
380 else
381 {
382 status = regcache->raw_read (gpnum, raw_buf);
383 if (status == REG_VALID)
384 memcpy (buf, raw_buf, 1);
385 else
386 mark_value_bytes_unavailable (result_value, 0,
387 TYPE_LENGTH (value_type (result_value)));
388 }
389 }
390 else if (i386_dword_regnum_p (gdbarch, regnum))
391 {
392 int gpnum = regnum - tdep->eax_regnum;
393 /* Extract (always little endian). */
394 status = regcache->raw_read (gpnum, raw_buf);
395 if (status == REG_VALID)
396 memcpy (buf, raw_buf, 4);
397 else
398 mark_value_bytes_unavailable (result_value, 0,
399 TYPE_LENGTH (value_type (result_value)));
400 }
401 else
402 i386_pseudo_register_read_into_value (gdbarch, regcache, regnum,
403 result_value);
404
405 return result_value;
406 }
407
408 static void
409 amd64_pseudo_register_write (struct gdbarch *gdbarch,
410 struct regcache *regcache,
411 int regnum, const gdb_byte *buf)
412 {
413 gdb_byte *raw_buf = (gdb_byte *) alloca (register_size (gdbarch, regnum));
414 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
415
416 if (i386_byte_regnum_p (gdbarch, regnum))
417 {
418 int gpnum = regnum - tdep->al_regnum;
419
420 if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
421 {
422 /* Read ... AH, BH, CH, DH. */
423 regcache_raw_read (regcache,
424 gpnum - AMD64_NUM_LOWER_BYTE_REGS, raw_buf);
425 /* ... Modify ... (always little endian). */
426 memcpy (raw_buf + 1, buf, 1);
427 /* ... Write. */
428 regcache_raw_write (regcache,
429 gpnum - AMD64_NUM_LOWER_BYTE_REGS, raw_buf);
430 }
431 else
432 {
433 /* Read ... */
434 regcache_raw_read (regcache, gpnum, raw_buf);
435 /* ... Modify ... (always little endian). */
436 memcpy (raw_buf, buf, 1);
437 /* ... Write. */
438 regcache_raw_write (regcache, gpnum, raw_buf);
439 }
440 }
441 else if (i386_dword_regnum_p (gdbarch, regnum))
442 {
443 int gpnum = regnum - tdep->eax_regnum;
444
445 /* Read ... */
446 regcache_raw_read (regcache, gpnum, raw_buf);
447 /* ... Modify ... (always little endian). */
448 memcpy (raw_buf, buf, 4);
449 /* ... Write. */
450 regcache_raw_write (regcache, gpnum, raw_buf);
451 }
452 else
453 i386_pseudo_register_write (gdbarch, regcache, regnum, buf);
454 }
455
456 /* Implement the 'ax_pseudo_register_collect' gdbarch method. */
457
458 static int
459 amd64_ax_pseudo_register_collect (struct gdbarch *gdbarch,
460 struct agent_expr *ax, int regnum)
461 {
462 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
463
464 if (i386_byte_regnum_p (gdbarch, regnum))
465 {
466 int gpnum = regnum - tdep->al_regnum;
467
468 if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
469 ax_reg_mask (ax, gpnum - AMD64_NUM_LOWER_BYTE_REGS);
470 else
471 ax_reg_mask (ax, gpnum);
472 return 0;
473 }
474 else if (i386_dword_regnum_p (gdbarch, regnum))
475 {
476 int gpnum = regnum - tdep->eax_regnum;
477
478 ax_reg_mask (ax, gpnum);
479 return 0;
480 }
481 else
482 return i386_ax_pseudo_register_collect (gdbarch, ax, regnum);
483 }
484
485 \f
486
487 /* Register classes as defined in the psABI. */
488
489 enum amd64_reg_class
490 {
491 AMD64_INTEGER,
492 AMD64_SSE,
493 AMD64_SSEUP,
494 AMD64_X87,
495 AMD64_X87UP,
496 AMD64_COMPLEX_X87,
497 AMD64_NO_CLASS,
498 AMD64_MEMORY
499 };
500
501 /* Return the union class of CLASS1 and CLASS2. See the psABI for
502 details. */
503
504 static enum amd64_reg_class
505 amd64_merge_classes (enum amd64_reg_class class1, enum amd64_reg_class class2)
506 {
507 /* Rule (a): If both classes are equal, this is the resulting class. */
508 if (class1 == class2)
509 return class1;
510
511 /* Rule (b): If one of the classes is NO_CLASS, the resulting class
512 is the other class. */
513 if (class1 == AMD64_NO_CLASS)
514 return class2;
515 if (class2 == AMD64_NO_CLASS)
516 return class1;
517
518 /* Rule (c): If one of the classes is MEMORY, the result is MEMORY. */
519 if (class1 == AMD64_MEMORY || class2 == AMD64_MEMORY)
520 return AMD64_MEMORY;
521
522 /* Rule (d): If one of the classes is INTEGER, the result is INTEGER. */
523 if (class1 == AMD64_INTEGER || class2 == AMD64_INTEGER)
524 return AMD64_INTEGER;
525
526 /* Rule (e): If one of the classes is X87, X87UP, COMPLEX_X87 class,
527 MEMORY is used as class. */
528 if (class1 == AMD64_X87 || class1 == AMD64_X87UP
529 || class1 == AMD64_COMPLEX_X87 || class2 == AMD64_X87
530 || class2 == AMD64_X87UP || class2 == AMD64_COMPLEX_X87)
531 return AMD64_MEMORY;
532
533 /* Rule (f): Otherwise class SSE is used. */
534 return AMD64_SSE;
535 }
536
537 static void amd64_classify (struct type *type, enum amd64_reg_class theclass[2]);
538
539 /* Return non-zero if TYPE is a non-POD structure or union type. */
540
541 static int
542 amd64_non_pod_p (struct type *type)
543 {
544 /* ??? A class with a base class certainly isn't POD, but does this
545 catch all non-POD structure types? */
546 if (TYPE_CODE (type) == TYPE_CODE_STRUCT && TYPE_N_BASECLASSES (type) > 0)
547 return 1;
548
549 return 0;
550 }
551
552 /* Classify TYPE according to the rules for aggregate (structures and
553 arrays) and union types, and store the result in CLASS. */
554
555 static void
556 amd64_classify_aggregate (struct type *type, enum amd64_reg_class theclass[2])
557 {
558 /* 1. If the size of an object is larger than two eightbytes, or in
559 C++, is a non-POD structure or union type, or contains
560 unaligned fields, it has class memory. */
561 if (TYPE_LENGTH (type) > 16 || amd64_non_pod_p (type))
562 {
563 theclass[0] = theclass[1] = AMD64_MEMORY;
564 return;
565 }
566
567 /* 2. Both eightbytes get initialized to class NO_CLASS. */
568 theclass[0] = theclass[1] = AMD64_NO_CLASS;
569
570 /* 3. Each field of an object is classified recursively so that
571 always two fields are considered. The resulting class is
572 calculated according to the classes of the fields in the
573 eightbyte: */
574
575 if (TYPE_CODE (type) == TYPE_CODE_ARRAY)
576 {
577 struct type *subtype = check_typedef (TYPE_TARGET_TYPE (type));
578
579 /* All fields in an array have the same type. */
580 amd64_classify (subtype, theclass);
581 if (TYPE_LENGTH (type) > 8 && theclass[1] == AMD64_NO_CLASS)
582 theclass[1] = theclass[0];
583 }
584 else
585 {
586 int i;
587
588 /* Structure or union. */
589 gdb_assert (TYPE_CODE (type) == TYPE_CODE_STRUCT
590 || TYPE_CODE (type) == TYPE_CODE_UNION);
591
592 for (i = 0; i < TYPE_NFIELDS (type); i++)
593 {
594 struct type *subtype = check_typedef (TYPE_FIELD_TYPE (type, i));
595 int pos = TYPE_FIELD_BITPOS (type, i) / 64;
596 enum amd64_reg_class subclass[2];
597 int bitsize = TYPE_FIELD_BITSIZE (type, i);
598 int endpos;
599
600 if (bitsize == 0)
601 bitsize = TYPE_LENGTH (subtype) * 8;
602 endpos = (TYPE_FIELD_BITPOS (type, i) + bitsize - 1) / 64;
603
604 /* Ignore static fields. */
605 if (field_is_static (&TYPE_FIELD (type, i)))
606 continue;
607
608 gdb_assert (pos == 0 || pos == 1);
609
610 amd64_classify (subtype, subclass);
611 theclass[pos] = amd64_merge_classes (theclass[pos], subclass[0]);
612 if (bitsize <= 64 && pos == 0 && endpos == 1)
613 /* This is a bit of an odd case: We have a field that would
614 normally fit in one of the two eightbytes, except that
615 it is placed in a way that this field straddles them.
616 This has been seen with a structure containing an array.
617
618 The ABI is a bit unclear in this case, but we assume that
619 this field's class (stored in subclass[0]) must also be merged
620 into class[1]. In other words, our field has a piece stored
621 in the second eight-byte, and thus its class applies to
622 the second eight-byte as well.
623
624 In the case where the field length exceeds 8 bytes,
625 it should not be necessary to merge the field class
626 into class[1]. As LEN > 8, subclass[1] is necessarily
627 different from AMD64_NO_CLASS. If subclass[1] is equal
628 to subclass[0], then the normal class[1]/subclass[1]
629 merging will take care of everything. For subclass[1]
630 to be different from subclass[0], I can only see the case
631 where we have a SSE/SSEUP or X87/X87UP pair, which both
632 use up all 16 bytes of the aggregate, and are already
633 handled just fine (because each portion sits on its own
634 8-byte). */
635 theclass[1] = amd64_merge_classes (theclass[1], subclass[0]);
636 if (pos == 0)
637 theclass[1] = amd64_merge_classes (theclass[1], subclass[1]);
638 }
639 }
640
641 /* 4. Then a post merger cleanup is done: */
642
643 /* Rule (a): If one of the classes is MEMORY, the whole argument is
644 passed in memory. */
645 if (theclass[0] == AMD64_MEMORY || theclass[1] == AMD64_MEMORY)
646 theclass[0] = theclass[1] = AMD64_MEMORY;
647
648 /* Rule (b): If SSEUP is not preceded by SSE, it is converted to
649 SSE. */
650 if (theclass[0] == AMD64_SSEUP)
651 theclass[0] = AMD64_SSE;
652 if (theclass[1] == AMD64_SSEUP && theclass[0] != AMD64_SSE)
653 theclass[1] = AMD64_SSE;
654 }
655
656 /* Classify TYPE, and store the result in CLASS. */
657
658 static void
659 amd64_classify (struct type *type, enum amd64_reg_class theclass[2])
660 {
661 enum type_code code = TYPE_CODE (type);
662 int len = TYPE_LENGTH (type);
663
664 theclass[0] = theclass[1] = AMD64_NO_CLASS;
665
666 /* Arguments of types (signed and unsigned) _Bool, char, short, int,
667 long, long long, and pointers are in the INTEGER class. Similarly,
668 range types, used by languages such as Ada, are also in the INTEGER
669 class. */
670 if ((code == TYPE_CODE_INT || code == TYPE_CODE_ENUM
671 || code == TYPE_CODE_BOOL || code == TYPE_CODE_RANGE
672 || code == TYPE_CODE_CHAR
673 || code == TYPE_CODE_PTR || TYPE_IS_REFERENCE (type))
674 && (len == 1 || len == 2 || len == 4 || len == 8))
675 theclass[0] = AMD64_INTEGER;
676
677 /* Arguments of types float, double, _Decimal32, _Decimal64 and __m64
678 are in class SSE. */
679 else if ((code == TYPE_CODE_FLT || code == TYPE_CODE_DECFLOAT)
680 && (len == 4 || len == 8))
681 /* FIXME: __m64 . */
682 theclass[0] = AMD64_SSE;
683
684 /* Arguments of types __float128, _Decimal128 and __m128 are split into
685 two halves. The least significant ones belong to class SSE, the most
686 significant one to class SSEUP. */
687 else if (code == TYPE_CODE_DECFLOAT && len == 16)
688 /* FIXME: __float128, __m128. */
689 theclass[0] = AMD64_SSE, theclass[1] = AMD64_SSEUP;
690
691 /* The 64-bit mantissa of arguments of type long double belongs to
692 class X87, the 16-bit exponent plus 6 bytes of padding belongs to
693 class X87UP. */
694 else if (code == TYPE_CODE_FLT && len == 16)
695 /* Class X87 and X87UP. */
696 theclass[0] = AMD64_X87, theclass[1] = AMD64_X87UP;
697
698 /* Arguments of complex T where T is one of the types float or
699 double get treated as if they are implemented as:
700
701 struct complexT {
702 T real;
703 T imag;
704 };
705
706 */
707 else if (code == TYPE_CODE_COMPLEX && len == 8)
708 theclass[0] = AMD64_SSE;
709 else if (code == TYPE_CODE_COMPLEX && len == 16)
710 theclass[0] = theclass[1] = AMD64_SSE;
711
712 /* A variable of type complex long double is classified as type
713 COMPLEX_X87. */
714 else if (code == TYPE_CODE_COMPLEX && len == 32)
715 theclass[0] = AMD64_COMPLEX_X87;
716
717 /* Aggregates. */
718 else if (code == TYPE_CODE_ARRAY || code == TYPE_CODE_STRUCT
719 || code == TYPE_CODE_UNION)
720 amd64_classify_aggregate (type, theclass);
721 }
722
723 static enum return_value_convention
724 amd64_return_value (struct gdbarch *gdbarch, struct value *function,
725 struct type *type, struct regcache *regcache,
726 gdb_byte *readbuf, const gdb_byte *writebuf)
727 {
728 enum amd64_reg_class theclass[2];
729 int len = TYPE_LENGTH (type);
730 static int integer_regnum[] = { AMD64_RAX_REGNUM, AMD64_RDX_REGNUM };
731 static int sse_regnum[] = { AMD64_XMM0_REGNUM, AMD64_XMM1_REGNUM };
732 int integer_reg = 0;
733 int sse_reg = 0;
734 int i;
735
736 gdb_assert (!(readbuf && writebuf));
737
738 /* 1. Classify the return type with the classification algorithm. */
739 amd64_classify (type, theclass);
740
741 /* 2. If the type has class MEMORY, then the caller provides space
742 for the return value and passes the address of this storage in
743 %rdi as if it were the first argument to the function. In effect,
744 this address becomes a hidden first argument.
745
746 On return %rax will contain the address that has been passed in
747 by the caller in %rdi. */
748 if (theclass[0] == AMD64_MEMORY)
749 {
750 /* As indicated by the comment above, the ABI guarantees that we
751 can always find the return value just after the function has
752 returned. */
753
754 if (readbuf)
755 {
756 ULONGEST addr;
757
758 regcache_raw_read_unsigned (regcache, AMD64_RAX_REGNUM, &addr);
759 read_memory (addr, readbuf, TYPE_LENGTH (type));
760 }
761
762 return RETURN_VALUE_ABI_RETURNS_ADDRESS;
763 }
764
765 /* 8. If the class is COMPLEX_X87, the real part of the value is
766 returned in %st0 and the imaginary part in %st1. */
767 if (theclass[0] == AMD64_COMPLEX_X87)
768 {
769 if (readbuf)
770 {
771 regcache_raw_read (regcache, AMD64_ST0_REGNUM, readbuf);
772 regcache_raw_read (regcache, AMD64_ST1_REGNUM, readbuf + 16);
773 }
774
775 if (writebuf)
776 {
777 i387_return_value (gdbarch, regcache);
778 regcache_raw_write (regcache, AMD64_ST0_REGNUM, writebuf);
779 regcache_raw_write (regcache, AMD64_ST1_REGNUM, writebuf + 16);
780
781 /* Fix up the tag word such that both %st(0) and %st(1) are
782 marked as valid. */
783 regcache_raw_write_unsigned (regcache, AMD64_FTAG_REGNUM, 0xfff);
784 }
785
786 return RETURN_VALUE_REGISTER_CONVENTION;
787 }
788
789 gdb_assert (theclass[1] != AMD64_MEMORY);
790 gdb_assert (len <= 16);
791
792 for (i = 0; len > 0; i++, len -= 8)
793 {
794 int regnum = -1;
795 int offset = 0;
796
797 switch (theclass[i])
798 {
799 case AMD64_INTEGER:
800 /* 3. If the class is INTEGER, the next available register
801 of the sequence %rax, %rdx is used. */
802 regnum = integer_regnum[integer_reg++];
803 break;
804
805 case AMD64_SSE:
806 /* 4. If the class is SSE, the next available SSE register
807 of the sequence %xmm0, %xmm1 is used. */
808 regnum = sse_regnum[sse_reg++];
809 break;
810
811 case AMD64_SSEUP:
812 /* 5. If the class is SSEUP, the eightbyte is passed in the
813 upper half of the last used SSE register. */
814 gdb_assert (sse_reg > 0);
815 regnum = sse_regnum[sse_reg - 1];
816 offset = 8;
817 break;
818
819 case AMD64_X87:
820 /* 6. If the class is X87, the value is returned on the X87
821 stack in %st0 as 80-bit x87 number. */
822 regnum = AMD64_ST0_REGNUM;
823 if (writebuf)
824 i387_return_value (gdbarch, regcache);
825 break;
826
827 case AMD64_X87UP:
828 /* 7. If the class is X87UP, the value is returned together
829 with the previous X87 value in %st0. */
830 gdb_assert (i > 0 && theclass[0] == AMD64_X87);
831 regnum = AMD64_ST0_REGNUM;
832 offset = 8;
833 len = 2;
834 break;
835
836 case AMD64_NO_CLASS:
837 continue;
838
839 default:
840 gdb_assert (!"Unexpected register class.");
841 }
842
843 gdb_assert (regnum != -1);
844
845 if (readbuf)
846 regcache_raw_read_part (regcache, regnum, offset, std::min (len, 8),
847 readbuf + i * 8);
848 if (writebuf)
849 regcache_raw_write_part (regcache, regnum, offset, std::min (len, 8),
850 writebuf + i * 8);
851 }
852
853 return RETURN_VALUE_REGISTER_CONVENTION;
854 }
855 \f
856
857 static CORE_ADDR
858 amd64_push_arguments (struct regcache *regcache, int nargs,
859 struct value **args, CORE_ADDR sp, int struct_return)
860 {
861 static int integer_regnum[] =
862 {
863 AMD64_RDI_REGNUM, /* %rdi */
864 AMD64_RSI_REGNUM, /* %rsi */
865 AMD64_RDX_REGNUM, /* %rdx */
866 AMD64_RCX_REGNUM, /* %rcx */
867 AMD64_R8_REGNUM, /* %r8 */
868 AMD64_R9_REGNUM /* %r9 */
869 };
870 static int sse_regnum[] =
871 {
872 /* %xmm0 ... %xmm7 */
873 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
874 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
875 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
876 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
877 };
878 struct value **stack_args = XALLOCAVEC (struct value *, nargs);
879 int num_stack_args = 0;
880 int num_elements = 0;
881 int element = 0;
882 int integer_reg = 0;
883 int sse_reg = 0;
884 int i;
885
886 /* Reserve a register for the "hidden" argument. */
887 if (struct_return)
888 integer_reg++;
889
890 for (i = 0; i < nargs; i++)
891 {
892 struct type *type = value_type (args[i]);
893 int len = TYPE_LENGTH (type);
894 enum amd64_reg_class theclass[2];
895 int needed_integer_regs = 0;
896 int needed_sse_regs = 0;
897 int j;
898
899 /* Classify argument. */
900 amd64_classify (type, theclass);
901
902 /* Calculate the number of integer and SSE registers needed for
903 this argument. */
904 for (j = 0; j < 2; j++)
905 {
906 if (theclass[j] == AMD64_INTEGER)
907 needed_integer_regs++;
908 else if (theclass[j] == AMD64_SSE)
909 needed_sse_regs++;
910 }
911
912 /* Check whether enough registers are available, and if the
913 argument should be passed in registers at all. */
914 if (integer_reg + needed_integer_regs > ARRAY_SIZE (integer_regnum)
915 || sse_reg + needed_sse_regs > ARRAY_SIZE (sse_regnum)
916 || (needed_integer_regs == 0 && needed_sse_regs == 0))
917 {
918 /* The argument will be passed on the stack. */
919 num_elements += ((len + 7) / 8);
920 stack_args[num_stack_args++] = args[i];
921 }
922 else
923 {
924 /* The argument will be passed in registers. */
925 const gdb_byte *valbuf = value_contents (args[i]);
926 gdb_byte buf[8];
927
928 gdb_assert (len <= 16);
929
930 for (j = 0; len > 0; j++, len -= 8)
931 {
932 int regnum = -1;
933 int offset = 0;
934
935 switch (theclass[j])
936 {
937 case AMD64_INTEGER:
938 regnum = integer_regnum[integer_reg++];
939 break;
940
941 case AMD64_SSE:
942 regnum = sse_regnum[sse_reg++];
943 break;
944
945 case AMD64_SSEUP:
946 gdb_assert (sse_reg > 0);
947 regnum = sse_regnum[sse_reg - 1];
948 offset = 8;
949 break;
950
951 default:
952 gdb_assert (!"Unexpected register class.");
953 }
954
955 gdb_assert (regnum != -1);
956 memset (buf, 0, sizeof buf);
957 memcpy (buf, valbuf + j * 8, std::min (len, 8));
958 regcache_raw_write_part (regcache, regnum, offset, 8, buf);
959 }
960 }
961 }
962
963 /* Allocate space for the arguments on the stack. */
964 sp -= num_elements * 8;
965
966 /* The psABI says that "The end of the input argument area shall be
967 aligned on a 16 byte boundary." */
968 sp &= ~0xf;
969
970 /* Write out the arguments to the stack. */
971 for (i = 0; i < num_stack_args; i++)
972 {
973 struct type *type = value_type (stack_args[i]);
974 const gdb_byte *valbuf = value_contents (stack_args[i]);
975 int len = TYPE_LENGTH (type);
976
977 write_memory (sp + element * 8, valbuf, len);
978 element += ((len + 7) / 8);
979 }
980
981 /* The psABI says that "For calls that may call functions that use
982 varargs or stdargs (prototype-less calls or calls to functions
983 containing ellipsis (...) in the declaration) %al is used as
984 hidden argument to specify the number of SSE registers used. */
985 regcache_raw_write_unsigned (regcache, AMD64_RAX_REGNUM, sse_reg);
986 return sp;
987 }
988
989 static CORE_ADDR
990 amd64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
991 struct regcache *regcache, CORE_ADDR bp_addr,
992 int nargs, struct value **args, CORE_ADDR sp,
993 int struct_return, CORE_ADDR struct_addr)
994 {
995 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
996 gdb_byte buf[8];
997
998 /* BND registers can be in arbitrary values at the moment of the
999 inferior call. This can cause boundary violations that are not
1000 due to a real bug or even desired by the user. The best to be done
1001 is set the BND registers to allow access to the whole memory, INIT
1002 state, before pushing the inferior call. */
1003 i387_reset_bnd_regs (gdbarch, regcache);
1004
1005 /* Pass arguments. */
1006 sp = amd64_push_arguments (regcache, nargs, args, sp, struct_return);
1007
1008 /* Pass "hidden" argument". */
1009 if (struct_return)
1010 {
1011 store_unsigned_integer (buf, 8, byte_order, struct_addr);
1012 regcache_cooked_write (regcache, AMD64_RDI_REGNUM, buf);
1013 }
1014
1015 /* Store return address. */
1016 sp -= 8;
1017 store_unsigned_integer (buf, 8, byte_order, bp_addr);
1018 write_memory (sp, buf, 8);
1019
1020 /* Finally, update the stack pointer... */
1021 store_unsigned_integer (buf, 8, byte_order, sp);
1022 regcache_cooked_write (regcache, AMD64_RSP_REGNUM, buf);
1023
1024 /* ...and fake a frame pointer. */
1025 regcache_cooked_write (regcache, AMD64_RBP_REGNUM, buf);
1026
1027 return sp + 16;
1028 }
1029 \f
1030 /* Displaced instruction handling. */
1031
1032 /* A partially decoded instruction.
1033 This contains enough details for displaced stepping purposes. */
1034
1035 struct amd64_insn
1036 {
1037 /* The number of opcode bytes. */
1038 int opcode_len;
1039 /* The offset of the REX/VEX instruction encoding prefix or -1 if
1040 not present. */
1041 int enc_prefix_offset;
1042 /* The offset to the first opcode byte. */
1043 int opcode_offset;
1044 /* The offset to the modrm byte or -1 if not present. */
1045 int modrm_offset;
1046
1047 /* The raw instruction. */
1048 gdb_byte *raw_insn;
1049 };
1050
1051 struct amd64_displaced_step_closure : public displaced_step_closure
1052 {
1053 amd64_displaced_step_closure (int insn_buf_len)
1054 : insn_buf (insn_buf_len, 0)
1055 {}
1056
1057 /* For rip-relative insns, saved copy of the reg we use instead of %rip. */
1058 int tmp_used = 0;
1059 int tmp_regno;
1060 ULONGEST tmp_save;
1061
1062 /* Details of the instruction. */
1063 struct amd64_insn insn_details;
1064
1065 /* The possibly modified insn. */
1066 gdb::byte_vector insn_buf;
1067 };
1068
1069 /* WARNING: Keep onebyte_has_modrm, twobyte_has_modrm in sync with
1070 ../opcodes/i386-dis.c (until libopcodes exports them, or an alternative,
1071 at which point delete these in favor of libopcodes' versions). */
1072
1073 static const unsigned char onebyte_has_modrm[256] = {
1074 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1075 /* ------------------------------- */
1076 /* 00 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 00 */
1077 /* 10 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 10 */
1078 /* 20 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 20 */
1079 /* 30 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 30 */
1080 /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 40 */
1081 /* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 50 */
1082 /* 60 */ 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0, /* 60 */
1083 /* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 70 */
1084 /* 80 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 80 */
1085 /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 90 */
1086 /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* a0 */
1087 /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* b0 */
1088 /* c0 */ 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0, /* c0 */
1089 /* d0 */ 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1, /* d0 */
1090 /* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* e0 */
1091 /* f0 */ 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1 /* f0 */
1092 /* ------------------------------- */
1093 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1094 };
1095
1096 static const unsigned char twobyte_has_modrm[256] = {
1097 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1098 /* ------------------------------- */
1099 /* 00 */ 1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,1, /* 0f */
1100 /* 10 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 1f */
1101 /* 20 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 2f */
1102 /* 30 */ 0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0, /* 3f */
1103 /* 40 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 4f */
1104 /* 50 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 5f */
1105 /* 60 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 6f */
1106 /* 70 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 7f */
1107 /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
1108 /* 90 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 9f */
1109 /* a0 */ 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1, /* af */
1110 /* b0 */ 1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1, /* bf */
1111 /* c0 */ 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0, /* cf */
1112 /* d0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* df */
1113 /* e0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* ef */
1114 /* f0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0 /* ff */
1115 /* ------------------------------- */
1116 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1117 };
1118
1119 static int amd64_syscall_p (const struct amd64_insn *insn, int *lengthp);
1120
1121 static int
1122 rex_prefix_p (gdb_byte pfx)
1123 {
1124 return REX_PREFIX_P (pfx);
1125 }
1126
1127 /* True if PFX is the start of the 2-byte VEX prefix. */
1128
1129 static bool
1130 vex2_prefix_p (gdb_byte pfx)
1131 {
1132 return pfx == 0xc5;
1133 }
1134
1135 /* True if PFX is the start of the 3-byte VEX prefix. */
1136
1137 static bool
1138 vex3_prefix_p (gdb_byte pfx)
1139 {
1140 return pfx == 0xc4;
1141 }
1142
1143 /* Skip the legacy instruction prefixes in INSN.
1144 We assume INSN is properly sentineled so we don't have to worry
1145 about falling off the end of the buffer. */
1146
1147 static gdb_byte *
1148 amd64_skip_prefixes (gdb_byte *insn)
1149 {
1150 while (1)
1151 {
1152 switch (*insn)
1153 {
1154 case DATA_PREFIX_OPCODE:
1155 case ADDR_PREFIX_OPCODE:
1156 case CS_PREFIX_OPCODE:
1157 case DS_PREFIX_OPCODE:
1158 case ES_PREFIX_OPCODE:
1159 case FS_PREFIX_OPCODE:
1160 case GS_PREFIX_OPCODE:
1161 case SS_PREFIX_OPCODE:
1162 case LOCK_PREFIX_OPCODE:
1163 case REPE_PREFIX_OPCODE:
1164 case REPNE_PREFIX_OPCODE:
1165 ++insn;
1166 continue;
1167 default:
1168 break;
1169 }
1170 break;
1171 }
1172
1173 return insn;
1174 }
1175
1176 /* Return an integer register (other than RSP) that is unused as an input
1177 operand in INSN.
1178 In order to not require adding a rex prefix if the insn doesn't already
1179 have one, the result is restricted to RAX ... RDI, sans RSP.
1180 The register numbering of the result follows architecture ordering,
1181 e.g. RDI = 7. */
1182
1183 static int
1184 amd64_get_unused_input_int_reg (const struct amd64_insn *details)
1185 {
1186 /* 1 bit for each reg */
1187 int used_regs_mask = 0;
1188
1189 /* There can be at most 3 int regs used as inputs in an insn, and we have
1190 7 to choose from (RAX ... RDI, sans RSP).
1191 This allows us to take a conservative approach and keep things simple.
1192 E.g. By avoiding RAX, we don't have to specifically watch for opcodes
1193 that implicitly specify RAX. */
1194
1195 /* Avoid RAX. */
1196 used_regs_mask |= 1 << EAX_REG_NUM;
1197 /* Similarily avoid RDX, implicit operand in divides. */
1198 used_regs_mask |= 1 << EDX_REG_NUM;
1199 /* Avoid RSP. */
1200 used_regs_mask |= 1 << ESP_REG_NUM;
1201
1202 /* If the opcode is one byte long and there's no ModRM byte,
1203 assume the opcode specifies a register. */
1204 if (details->opcode_len == 1 && details->modrm_offset == -1)
1205 used_regs_mask |= 1 << (details->raw_insn[details->opcode_offset] & 7);
1206
1207 /* Mark used regs in the modrm/sib bytes. */
1208 if (details->modrm_offset != -1)
1209 {
1210 int modrm = details->raw_insn[details->modrm_offset];
1211 int mod = MODRM_MOD_FIELD (modrm);
1212 int reg = MODRM_REG_FIELD (modrm);
1213 int rm = MODRM_RM_FIELD (modrm);
1214 int have_sib = mod != 3 && rm == 4;
1215
1216 /* Assume the reg field of the modrm byte specifies a register. */
1217 used_regs_mask |= 1 << reg;
1218
1219 if (have_sib)
1220 {
1221 int base = SIB_BASE_FIELD (details->raw_insn[details->modrm_offset + 1]);
1222 int idx = SIB_INDEX_FIELD (details->raw_insn[details->modrm_offset + 1]);
1223 used_regs_mask |= 1 << base;
1224 used_regs_mask |= 1 << idx;
1225 }
1226 else
1227 {
1228 used_regs_mask |= 1 << rm;
1229 }
1230 }
1231
1232 gdb_assert (used_regs_mask < 256);
1233 gdb_assert (used_regs_mask != 255);
1234
1235 /* Finally, find a free reg. */
1236 {
1237 int i;
1238
1239 for (i = 0; i < 8; ++i)
1240 {
1241 if (! (used_regs_mask & (1 << i)))
1242 return i;
1243 }
1244
1245 /* We shouldn't get here. */
1246 internal_error (__FILE__, __LINE__, _("unable to find free reg"));
1247 }
1248 }
1249
1250 /* Extract the details of INSN that we need. */
1251
1252 static void
1253 amd64_get_insn_details (gdb_byte *insn, struct amd64_insn *details)
1254 {
1255 gdb_byte *start = insn;
1256 int need_modrm;
1257
1258 details->raw_insn = insn;
1259
1260 details->opcode_len = -1;
1261 details->enc_prefix_offset = -1;
1262 details->opcode_offset = -1;
1263 details->modrm_offset = -1;
1264
1265 /* Skip legacy instruction prefixes. */
1266 insn = amd64_skip_prefixes (insn);
1267
1268 /* Skip REX/VEX instruction encoding prefixes. */
1269 if (rex_prefix_p (*insn))
1270 {
1271 details->enc_prefix_offset = insn - start;
1272 ++insn;
1273 }
1274 else if (vex2_prefix_p (*insn))
1275 {
1276 /* Don't record the offset in this case because this prefix has
1277 no REX.B equivalent. */
1278 insn += 2;
1279 }
1280 else if (vex3_prefix_p (*insn))
1281 {
1282 details->enc_prefix_offset = insn - start;
1283 insn += 3;
1284 }
1285
1286 details->opcode_offset = insn - start;
1287
1288 if (*insn == TWO_BYTE_OPCODE_ESCAPE)
1289 {
1290 /* Two or three-byte opcode. */
1291 ++insn;
1292 need_modrm = twobyte_has_modrm[*insn];
1293
1294 /* Check for three-byte opcode. */
1295 switch (*insn)
1296 {
1297 case 0x24:
1298 case 0x25:
1299 case 0x38:
1300 case 0x3a:
1301 case 0x7a:
1302 case 0x7b:
1303 ++insn;
1304 details->opcode_len = 3;
1305 break;
1306 default:
1307 details->opcode_len = 2;
1308 break;
1309 }
1310 }
1311 else
1312 {
1313 /* One-byte opcode. */
1314 need_modrm = onebyte_has_modrm[*insn];
1315 details->opcode_len = 1;
1316 }
1317
1318 if (need_modrm)
1319 {
1320 ++insn;
1321 details->modrm_offset = insn - start;
1322 }
1323 }
1324
1325 /* Update %rip-relative addressing in INSN.
1326
1327 %rip-relative addressing only uses a 32-bit displacement.
1328 32 bits is not enough to be guaranteed to cover the distance between where
1329 the real instruction is and where its copy is.
1330 Convert the insn to use base+disp addressing.
1331 We set base = pc + insn_length so we can leave disp unchanged. */
1332
1333 static void
1334 fixup_riprel (struct gdbarch *gdbarch, amd64_displaced_step_closure *dsc,
1335 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1336 {
1337 const struct amd64_insn *insn_details = &dsc->insn_details;
1338 int modrm_offset = insn_details->modrm_offset;
1339 gdb_byte *insn = insn_details->raw_insn + modrm_offset;
1340 CORE_ADDR rip_base;
1341 int insn_length;
1342 int arch_tmp_regno, tmp_regno;
1343 ULONGEST orig_value;
1344
1345 /* %rip+disp32 addressing mode, displacement follows ModRM byte. */
1346 ++insn;
1347
1348 /* Compute the rip-relative address. */
1349 insn_length = gdb_buffered_insn_length (gdbarch, dsc->insn_buf.data (),
1350 dsc->insn_buf.size (), from);
1351 rip_base = from + insn_length;
1352
1353 /* We need a register to hold the address.
1354 Pick one not used in the insn.
1355 NOTE: arch_tmp_regno uses architecture ordering, e.g. RDI = 7. */
1356 arch_tmp_regno = amd64_get_unused_input_int_reg (insn_details);
1357 tmp_regno = amd64_arch_reg_to_regnum (arch_tmp_regno);
1358
1359 /* Position of the not-B bit in the 3-byte VEX prefix (in byte 1). */
1360 static constexpr gdb_byte VEX3_NOT_B = 0x20;
1361
1362 /* REX.B should be unset (VEX.!B set) as we were using rip-relative
1363 addressing, but ensure it's unset (set for VEX) anyway, tmp_regno
1364 is not r8-r15. */
1365 if (insn_details->enc_prefix_offset != -1)
1366 {
1367 gdb_byte *pfx = &dsc->insn_buf[insn_details->enc_prefix_offset];
1368 if (rex_prefix_p (pfx[0]))
1369 pfx[0] &= ~REX_B;
1370 else if (vex3_prefix_p (pfx[0]))
1371 pfx[1] |= VEX3_NOT_B;
1372 else
1373 gdb_assert_not_reached ("unhandled prefix");
1374 }
1375
1376 regcache_cooked_read_unsigned (regs, tmp_regno, &orig_value);
1377 dsc->tmp_regno = tmp_regno;
1378 dsc->tmp_save = orig_value;
1379 dsc->tmp_used = 1;
1380
1381 /* Convert the ModRM field to be base+disp. */
1382 dsc->insn_buf[modrm_offset] &= ~0xc7;
1383 dsc->insn_buf[modrm_offset] |= 0x80 + arch_tmp_regno;
1384
1385 regcache_cooked_write_unsigned (regs, tmp_regno, rip_base);
1386
1387 if (debug_displaced)
1388 fprintf_unfiltered (gdb_stdlog, "displaced: %%rip-relative addressing used.\n"
1389 "displaced: using temp reg %d, old value %s, new value %s\n",
1390 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save),
1391 paddress (gdbarch, rip_base));
1392 }
1393
1394 static void
1395 fixup_displaced_copy (struct gdbarch *gdbarch,
1396 amd64_displaced_step_closure *dsc,
1397 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1398 {
1399 const struct amd64_insn *details = &dsc->insn_details;
1400
1401 if (details->modrm_offset != -1)
1402 {
1403 gdb_byte modrm = details->raw_insn[details->modrm_offset];
1404
1405 if ((modrm & 0xc7) == 0x05)
1406 {
1407 /* The insn uses rip-relative addressing.
1408 Deal with it. */
1409 fixup_riprel (gdbarch, dsc, from, to, regs);
1410 }
1411 }
1412 }
1413
1414 struct displaced_step_closure *
1415 amd64_displaced_step_copy_insn (struct gdbarch *gdbarch,
1416 CORE_ADDR from, CORE_ADDR to,
1417 struct regcache *regs)
1418 {
1419 int len = gdbarch_max_insn_length (gdbarch);
1420 /* Extra space for sentinels so fixup_{riprel,displaced_copy} don't have to
1421 continually watch for running off the end of the buffer. */
1422 int fixup_sentinel_space = len;
1423 amd64_displaced_step_closure *dsc
1424 = new amd64_displaced_step_closure (len + fixup_sentinel_space);
1425 gdb_byte *buf = &dsc->insn_buf[0];
1426 struct amd64_insn *details = &dsc->insn_details;
1427
1428 read_memory (from, buf, len);
1429
1430 /* Set up the sentinel space so we don't have to worry about running
1431 off the end of the buffer. An excessive number of leading prefixes
1432 could otherwise cause this. */
1433 memset (buf + len, 0, fixup_sentinel_space);
1434
1435 amd64_get_insn_details (buf, details);
1436
1437 /* GDB may get control back after the insn after the syscall.
1438 Presumably this is a kernel bug.
1439 If this is a syscall, make sure there's a nop afterwards. */
1440 {
1441 int syscall_length;
1442
1443 if (amd64_syscall_p (details, &syscall_length))
1444 buf[details->opcode_offset + syscall_length] = NOP_OPCODE;
1445 }
1446
1447 /* Modify the insn to cope with the address where it will be executed from.
1448 In particular, handle any rip-relative addressing. */
1449 fixup_displaced_copy (gdbarch, dsc, from, to, regs);
1450
1451 write_memory (to, buf, len);
1452
1453 if (debug_displaced)
1454 {
1455 fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
1456 paddress (gdbarch, from), paddress (gdbarch, to));
1457 displaced_step_dump_bytes (gdb_stdlog, buf, len);
1458 }
1459
1460 return dsc;
1461 }
1462
1463 static int
1464 amd64_absolute_jmp_p (const struct amd64_insn *details)
1465 {
1466 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1467
1468 if (insn[0] == 0xff)
1469 {
1470 /* jump near, absolute indirect (/4) */
1471 if ((insn[1] & 0x38) == 0x20)
1472 return 1;
1473
1474 /* jump far, absolute indirect (/5) */
1475 if ((insn[1] & 0x38) == 0x28)
1476 return 1;
1477 }
1478
1479 return 0;
1480 }
1481
1482 /* Return non-zero if the instruction DETAILS is a jump, zero otherwise. */
1483
1484 static int
1485 amd64_jmp_p (const struct amd64_insn *details)
1486 {
1487 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1488
1489 /* jump short, relative. */
1490 if (insn[0] == 0xeb)
1491 return 1;
1492
1493 /* jump near, relative. */
1494 if (insn[0] == 0xe9)
1495 return 1;
1496
1497 return amd64_absolute_jmp_p (details);
1498 }
1499
1500 static int
1501 amd64_absolute_call_p (const struct amd64_insn *details)
1502 {
1503 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1504
1505 if (insn[0] == 0xff)
1506 {
1507 /* Call near, absolute indirect (/2) */
1508 if ((insn[1] & 0x38) == 0x10)
1509 return 1;
1510
1511 /* Call far, absolute indirect (/3) */
1512 if ((insn[1] & 0x38) == 0x18)
1513 return 1;
1514 }
1515
1516 return 0;
1517 }
1518
1519 static int
1520 amd64_ret_p (const struct amd64_insn *details)
1521 {
1522 /* NOTE: gcc can emit "repz ; ret". */
1523 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1524
1525 switch (insn[0])
1526 {
1527 case 0xc2: /* ret near, pop N bytes */
1528 case 0xc3: /* ret near */
1529 case 0xca: /* ret far, pop N bytes */
1530 case 0xcb: /* ret far */
1531 case 0xcf: /* iret */
1532 return 1;
1533
1534 default:
1535 return 0;
1536 }
1537 }
1538
1539 static int
1540 amd64_call_p (const struct amd64_insn *details)
1541 {
1542 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1543
1544 if (amd64_absolute_call_p (details))
1545 return 1;
1546
1547 /* call near, relative */
1548 if (insn[0] == 0xe8)
1549 return 1;
1550
1551 return 0;
1552 }
1553
1554 /* Return non-zero if INSN is a system call, and set *LENGTHP to its
1555 length in bytes. Otherwise, return zero. */
1556
1557 static int
1558 amd64_syscall_p (const struct amd64_insn *details, int *lengthp)
1559 {
1560 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1561
1562 if (insn[0] == 0x0f && insn[1] == 0x05)
1563 {
1564 *lengthp = 2;
1565 return 1;
1566 }
1567
1568 return 0;
1569 }
1570
1571 /* Classify the instruction at ADDR using PRED.
1572 Throw an error if the memory can't be read. */
1573
1574 static int
1575 amd64_classify_insn_at (struct gdbarch *gdbarch, CORE_ADDR addr,
1576 int (*pred) (const struct amd64_insn *))
1577 {
1578 struct amd64_insn details;
1579 gdb_byte *buf;
1580 int len, classification;
1581
1582 len = gdbarch_max_insn_length (gdbarch);
1583 buf = (gdb_byte *) alloca (len);
1584
1585 read_code (addr, buf, len);
1586 amd64_get_insn_details (buf, &details);
1587
1588 classification = pred (&details);
1589
1590 return classification;
1591 }
1592
1593 /* The gdbarch insn_is_call method. */
1594
1595 static int
1596 amd64_insn_is_call (struct gdbarch *gdbarch, CORE_ADDR addr)
1597 {
1598 return amd64_classify_insn_at (gdbarch, addr, amd64_call_p);
1599 }
1600
1601 /* The gdbarch insn_is_ret method. */
1602
1603 static int
1604 amd64_insn_is_ret (struct gdbarch *gdbarch, CORE_ADDR addr)
1605 {
1606 return amd64_classify_insn_at (gdbarch, addr, amd64_ret_p);
1607 }
1608
1609 /* The gdbarch insn_is_jump method. */
1610
1611 static int
1612 amd64_insn_is_jump (struct gdbarch *gdbarch, CORE_ADDR addr)
1613 {
1614 return amd64_classify_insn_at (gdbarch, addr, amd64_jmp_p);
1615 }
1616
1617 /* Fix up the state of registers and memory after having single-stepped
1618 a displaced instruction. */
1619
1620 void
1621 amd64_displaced_step_fixup (struct gdbarch *gdbarch,
1622 struct displaced_step_closure *dsc_,
1623 CORE_ADDR from, CORE_ADDR to,
1624 struct regcache *regs)
1625 {
1626 amd64_displaced_step_closure *dsc = (amd64_displaced_step_closure *) dsc_;
1627 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1628 /* The offset we applied to the instruction's address. */
1629 ULONGEST insn_offset = to - from;
1630 gdb_byte *insn = dsc->insn_buf.data ();
1631 const struct amd64_insn *insn_details = &dsc->insn_details;
1632
1633 if (debug_displaced)
1634 fprintf_unfiltered (gdb_stdlog,
1635 "displaced: fixup (%s, %s), "
1636 "insn = 0x%02x 0x%02x ...\n",
1637 paddress (gdbarch, from), paddress (gdbarch, to),
1638 insn[0], insn[1]);
1639
1640 /* If we used a tmp reg, restore it. */
1641
1642 if (dsc->tmp_used)
1643 {
1644 if (debug_displaced)
1645 fprintf_unfiltered (gdb_stdlog, "displaced: restoring reg %d to %s\n",
1646 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save));
1647 regcache_cooked_write_unsigned (regs, dsc->tmp_regno, dsc->tmp_save);
1648 }
1649
1650 /* The list of issues to contend with here is taken from
1651 resume_execution in arch/x86/kernel/kprobes.c, Linux 2.6.28.
1652 Yay for Free Software! */
1653
1654 /* Relocate the %rip back to the program's instruction stream,
1655 if necessary. */
1656
1657 /* Except in the case of absolute or indirect jump or call
1658 instructions, or a return instruction, the new rip is relative to
1659 the displaced instruction; make it relative to the original insn.
1660 Well, signal handler returns don't need relocation either, but we use the
1661 value of %rip to recognize those; see below. */
1662 if (! amd64_absolute_jmp_p (insn_details)
1663 && ! amd64_absolute_call_p (insn_details)
1664 && ! amd64_ret_p (insn_details))
1665 {
1666 ULONGEST orig_rip;
1667 int insn_len;
1668
1669 regcache_cooked_read_unsigned (regs, AMD64_RIP_REGNUM, &orig_rip);
1670
1671 /* A signal trampoline system call changes the %rip, resuming
1672 execution of the main program after the signal handler has
1673 returned. That makes them like 'return' instructions; we
1674 shouldn't relocate %rip.
1675
1676 But most system calls don't, and we do need to relocate %rip.
1677
1678 Our heuristic for distinguishing these cases: if stepping
1679 over the system call instruction left control directly after
1680 the instruction, the we relocate --- control almost certainly
1681 doesn't belong in the displaced copy. Otherwise, we assume
1682 the instruction has put control where it belongs, and leave
1683 it unrelocated. Goodness help us if there are PC-relative
1684 system calls. */
1685 if (amd64_syscall_p (insn_details, &insn_len)
1686 && orig_rip != to + insn_len
1687 /* GDB can get control back after the insn after the syscall.
1688 Presumably this is a kernel bug.
1689 Fixup ensures its a nop, we add one to the length for it. */
1690 && orig_rip != to + insn_len + 1)
1691 {
1692 if (debug_displaced)
1693 fprintf_unfiltered (gdb_stdlog,
1694 "displaced: syscall changed %%rip; "
1695 "not relocating\n");
1696 }
1697 else
1698 {
1699 ULONGEST rip = orig_rip - insn_offset;
1700
1701 /* If we just stepped over a breakpoint insn, we don't backup
1702 the pc on purpose; this is to match behaviour without
1703 stepping. */
1704
1705 regcache_cooked_write_unsigned (regs, AMD64_RIP_REGNUM, rip);
1706
1707 if (debug_displaced)
1708 fprintf_unfiltered (gdb_stdlog,
1709 "displaced: "
1710 "relocated %%rip from %s to %s\n",
1711 paddress (gdbarch, orig_rip),
1712 paddress (gdbarch, rip));
1713 }
1714 }
1715
1716 /* If the instruction was PUSHFL, then the TF bit will be set in the
1717 pushed value, and should be cleared. We'll leave this for later,
1718 since GDB already messes up the TF flag when stepping over a
1719 pushfl. */
1720
1721 /* If the instruction was a call, the return address now atop the
1722 stack is the address following the copied instruction. We need
1723 to make it the address following the original instruction. */
1724 if (amd64_call_p (insn_details))
1725 {
1726 ULONGEST rsp;
1727 ULONGEST retaddr;
1728 const ULONGEST retaddr_len = 8;
1729
1730 regcache_cooked_read_unsigned (regs, AMD64_RSP_REGNUM, &rsp);
1731 retaddr = read_memory_unsigned_integer (rsp, retaddr_len, byte_order);
1732 retaddr = (retaddr - insn_offset) & 0xffffffffffffffffULL;
1733 write_memory_unsigned_integer (rsp, retaddr_len, byte_order, retaddr);
1734
1735 if (debug_displaced)
1736 fprintf_unfiltered (gdb_stdlog,
1737 "displaced: relocated return addr at %s "
1738 "to %s\n",
1739 paddress (gdbarch, rsp),
1740 paddress (gdbarch, retaddr));
1741 }
1742 }
1743
1744 /* If the instruction INSN uses RIP-relative addressing, return the
1745 offset into the raw INSN where the displacement to be adjusted is
1746 found. Returns 0 if the instruction doesn't use RIP-relative
1747 addressing. */
1748
1749 static int
1750 rip_relative_offset (struct amd64_insn *insn)
1751 {
1752 if (insn->modrm_offset != -1)
1753 {
1754 gdb_byte modrm = insn->raw_insn[insn->modrm_offset];
1755
1756 if ((modrm & 0xc7) == 0x05)
1757 {
1758 /* The displacement is found right after the ModRM byte. */
1759 return insn->modrm_offset + 1;
1760 }
1761 }
1762
1763 return 0;
1764 }
1765
1766 static void
1767 append_insns (CORE_ADDR *to, ULONGEST len, const gdb_byte *buf)
1768 {
1769 target_write_memory (*to, buf, len);
1770 *to += len;
1771 }
1772
1773 static void
1774 amd64_relocate_instruction (struct gdbarch *gdbarch,
1775 CORE_ADDR *to, CORE_ADDR oldloc)
1776 {
1777 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1778 int len = gdbarch_max_insn_length (gdbarch);
1779 /* Extra space for sentinels. */
1780 int fixup_sentinel_space = len;
1781 gdb_byte *buf = (gdb_byte *) xmalloc (len + fixup_sentinel_space);
1782 struct amd64_insn insn_details;
1783 int offset = 0;
1784 LONGEST rel32, newrel;
1785 gdb_byte *insn;
1786 int insn_length;
1787
1788 read_memory (oldloc, buf, len);
1789
1790 /* Set up the sentinel space so we don't have to worry about running
1791 off the end of the buffer. An excessive number of leading prefixes
1792 could otherwise cause this. */
1793 memset (buf + len, 0, fixup_sentinel_space);
1794
1795 insn = buf;
1796 amd64_get_insn_details (insn, &insn_details);
1797
1798 insn_length = gdb_buffered_insn_length (gdbarch, insn, len, oldloc);
1799
1800 /* Skip legacy instruction prefixes. */
1801 insn = amd64_skip_prefixes (insn);
1802
1803 /* Adjust calls with 32-bit relative addresses as push/jump, with
1804 the address pushed being the location where the original call in
1805 the user program would return to. */
1806 if (insn[0] == 0xe8)
1807 {
1808 gdb_byte push_buf[32];
1809 CORE_ADDR ret_addr;
1810 int i = 0;
1811
1812 /* Where "ret" in the original code will return to. */
1813 ret_addr = oldloc + insn_length;
1814
1815 /* If pushing an address higher than or equal to 0x80000000,
1816 avoid 'pushq', as that sign extends its 32-bit operand, which
1817 would be incorrect. */
1818 if (ret_addr <= 0x7fffffff)
1819 {
1820 push_buf[0] = 0x68; /* pushq $... */
1821 store_unsigned_integer (&push_buf[1], 4, byte_order, ret_addr);
1822 i = 5;
1823 }
1824 else
1825 {
1826 push_buf[i++] = 0x48; /* sub $0x8,%rsp */
1827 push_buf[i++] = 0x83;
1828 push_buf[i++] = 0xec;
1829 push_buf[i++] = 0x08;
1830
1831 push_buf[i++] = 0xc7; /* movl $imm,(%rsp) */
1832 push_buf[i++] = 0x04;
1833 push_buf[i++] = 0x24;
1834 store_unsigned_integer (&push_buf[i], 4, byte_order,
1835 ret_addr & 0xffffffff);
1836 i += 4;
1837
1838 push_buf[i++] = 0xc7; /* movl $imm,4(%rsp) */
1839 push_buf[i++] = 0x44;
1840 push_buf[i++] = 0x24;
1841 push_buf[i++] = 0x04;
1842 store_unsigned_integer (&push_buf[i], 4, byte_order,
1843 ret_addr >> 32);
1844 i += 4;
1845 }
1846 gdb_assert (i <= sizeof (push_buf));
1847 /* Push the push. */
1848 append_insns (to, i, push_buf);
1849
1850 /* Convert the relative call to a relative jump. */
1851 insn[0] = 0xe9;
1852
1853 /* Adjust the destination offset. */
1854 rel32 = extract_signed_integer (insn + 1, 4, byte_order);
1855 newrel = (oldloc - *to) + rel32;
1856 store_signed_integer (insn + 1, 4, byte_order, newrel);
1857
1858 if (debug_displaced)
1859 fprintf_unfiltered (gdb_stdlog,
1860 "Adjusted insn rel32=%s at %s to"
1861 " rel32=%s at %s\n",
1862 hex_string (rel32), paddress (gdbarch, oldloc),
1863 hex_string (newrel), paddress (gdbarch, *to));
1864
1865 /* Write the adjusted jump into its displaced location. */
1866 append_insns (to, 5, insn);
1867 return;
1868 }
1869
1870 offset = rip_relative_offset (&insn_details);
1871 if (!offset)
1872 {
1873 /* Adjust jumps with 32-bit relative addresses. Calls are
1874 already handled above. */
1875 if (insn[0] == 0xe9)
1876 offset = 1;
1877 /* Adjust conditional jumps. */
1878 else if (insn[0] == 0x0f && (insn[1] & 0xf0) == 0x80)
1879 offset = 2;
1880 }
1881
1882 if (offset)
1883 {
1884 rel32 = extract_signed_integer (insn + offset, 4, byte_order);
1885 newrel = (oldloc - *to) + rel32;
1886 store_signed_integer (insn + offset, 4, byte_order, newrel);
1887 if (debug_displaced)
1888 fprintf_unfiltered (gdb_stdlog,
1889 "Adjusted insn rel32=%s at %s to"
1890 " rel32=%s at %s\n",
1891 hex_string (rel32), paddress (gdbarch, oldloc),
1892 hex_string (newrel), paddress (gdbarch, *to));
1893 }
1894
1895 /* Write the adjusted instruction into its displaced location. */
1896 append_insns (to, insn_length, buf);
1897 }
1898
1899 \f
1900 /* The maximum number of saved registers. This should include %rip. */
1901 #define AMD64_NUM_SAVED_REGS AMD64_NUM_GREGS
1902
1903 struct amd64_frame_cache
1904 {
1905 /* Base address. */
1906 CORE_ADDR base;
1907 int base_p;
1908 CORE_ADDR sp_offset;
1909 CORE_ADDR pc;
1910
1911 /* Saved registers. */
1912 CORE_ADDR saved_regs[AMD64_NUM_SAVED_REGS];
1913 CORE_ADDR saved_sp;
1914 int saved_sp_reg;
1915
1916 /* Do we have a frame? */
1917 int frameless_p;
1918 };
1919
1920 /* Initialize a frame cache. */
1921
1922 static void
1923 amd64_init_frame_cache (struct amd64_frame_cache *cache)
1924 {
1925 int i;
1926
1927 /* Base address. */
1928 cache->base = 0;
1929 cache->base_p = 0;
1930 cache->sp_offset = -8;
1931 cache->pc = 0;
1932
1933 /* Saved registers. We initialize these to -1 since zero is a valid
1934 offset (that's where %rbp is supposed to be stored).
1935 The values start out as being offsets, and are later converted to
1936 addresses (at which point -1 is interpreted as an address, still meaning
1937 "invalid"). */
1938 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
1939 cache->saved_regs[i] = -1;
1940 cache->saved_sp = 0;
1941 cache->saved_sp_reg = -1;
1942
1943 /* Frameless until proven otherwise. */
1944 cache->frameless_p = 1;
1945 }
1946
1947 /* Allocate and initialize a frame cache. */
1948
1949 static struct amd64_frame_cache *
1950 amd64_alloc_frame_cache (void)
1951 {
1952 struct amd64_frame_cache *cache;
1953
1954 cache = FRAME_OBSTACK_ZALLOC (struct amd64_frame_cache);
1955 amd64_init_frame_cache (cache);
1956 return cache;
1957 }
1958
1959 /* GCC 4.4 and later, can put code in the prologue to realign the
1960 stack pointer. Check whether PC points to such code, and update
1961 CACHE accordingly. Return the first instruction after the code
1962 sequence or CURRENT_PC, whichever is smaller. If we don't
1963 recognize the code, return PC. */
1964
1965 static CORE_ADDR
1966 amd64_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
1967 struct amd64_frame_cache *cache)
1968 {
1969 /* There are 2 code sequences to re-align stack before the frame
1970 gets set up:
1971
1972 1. Use a caller-saved saved register:
1973
1974 leaq 8(%rsp), %reg
1975 andq $-XXX, %rsp
1976 pushq -8(%reg)
1977
1978 2. Use a callee-saved saved register:
1979
1980 pushq %reg
1981 leaq 16(%rsp), %reg
1982 andq $-XXX, %rsp
1983 pushq -8(%reg)
1984
1985 "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
1986
1987 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
1988 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
1989 */
1990
1991 gdb_byte buf[18];
1992 int reg, r;
1993 int offset, offset_and;
1994
1995 if (target_read_code (pc, buf, sizeof buf))
1996 return pc;
1997
1998 /* Check caller-saved saved register. The first instruction has
1999 to be "leaq 8(%rsp), %reg". */
2000 if ((buf[0] & 0xfb) == 0x48
2001 && buf[1] == 0x8d
2002 && buf[3] == 0x24
2003 && buf[4] == 0x8)
2004 {
2005 /* MOD must be binary 10 and R/M must be binary 100. */
2006 if ((buf[2] & 0xc7) != 0x44)
2007 return pc;
2008
2009 /* REG has register number. */
2010 reg = (buf[2] >> 3) & 7;
2011
2012 /* Check the REX.R bit. */
2013 if (buf[0] == 0x4c)
2014 reg += 8;
2015
2016 offset = 5;
2017 }
2018 else
2019 {
2020 /* Check callee-saved saved register. The first instruction
2021 has to be "pushq %reg". */
2022 reg = 0;
2023 if ((buf[0] & 0xf8) == 0x50)
2024 offset = 0;
2025 else if ((buf[0] & 0xf6) == 0x40
2026 && (buf[1] & 0xf8) == 0x50)
2027 {
2028 /* Check the REX.B bit. */
2029 if ((buf[0] & 1) != 0)
2030 reg = 8;
2031
2032 offset = 1;
2033 }
2034 else
2035 return pc;
2036
2037 /* Get register. */
2038 reg += buf[offset] & 0x7;
2039
2040 offset++;
2041
2042 /* The next instruction has to be "leaq 16(%rsp), %reg". */
2043 if ((buf[offset] & 0xfb) != 0x48
2044 || buf[offset + 1] != 0x8d
2045 || buf[offset + 3] != 0x24
2046 || buf[offset + 4] != 0x10)
2047 return pc;
2048
2049 /* MOD must be binary 10 and R/M must be binary 100. */
2050 if ((buf[offset + 2] & 0xc7) != 0x44)
2051 return pc;
2052
2053 /* REG has register number. */
2054 r = (buf[offset + 2] >> 3) & 7;
2055
2056 /* Check the REX.R bit. */
2057 if (buf[offset] == 0x4c)
2058 r += 8;
2059
2060 /* Registers in pushq and leaq have to be the same. */
2061 if (reg != r)
2062 return pc;
2063
2064 offset += 5;
2065 }
2066
2067 /* Rigister can't be %rsp nor %rbp. */
2068 if (reg == 4 || reg == 5)
2069 return pc;
2070
2071 /* The next instruction has to be "andq $-XXX, %rsp". */
2072 if (buf[offset] != 0x48
2073 || buf[offset + 2] != 0xe4
2074 || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
2075 return pc;
2076
2077 offset_and = offset;
2078 offset += buf[offset + 1] == 0x81 ? 7 : 4;
2079
2080 /* The next instruction has to be "pushq -8(%reg)". */
2081 r = 0;
2082 if (buf[offset] == 0xff)
2083 offset++;
2084 else if ((buf[offset] & 0xf6) == 0x40
2085 && buf[offset + 1] == 0xff)
2086 {
2087 /* Check the REX.B bit. */
2088 if ((buf[offset] & 0x1) != 0)
2089 r = 8;
2090 offset += 2;
2091 }
2092 else
2093 return pc;
2094
2095 /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
2096 01. */
2097 if (buf[offset + 1] != 0xf8
2098 || (buf[offset] & 0xf8) != 0x70)
2099 return pc;
2100
2101 /* R/M has register. */
2102 r += buf[offset] & 7;
2103
2104 /* Registers in leaq and pushq have to be the same. */
2105 if (reg != r)
2106 return pc;
2107
2108 if (current_pc > pc + offset_and)
2109 cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
2110
2111 return std::min (pc + offset + 2, current_pc);
2112 }
2113
2114 /* Similar to amd64_analyze_stack_align for x32. */
2115
2116 static CORE_ADDR
2117 amd64_x32_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
2118 struct amd64_frame_cache *cache)
2119 {
2120 /* There are 2 code sequences to re-align stack before the frame
2121 gets set up:
2122
2123 1. Use a caller-saved saved register:
2124
2125 leaq 8(%rsp), %reg
2126 andq $-XXX, %rsp
2127 pushq -8(%reg)
2128
2129 or
2130
2131 [addr32] leal 8(%rsp), %reg
2132 andl $-XXX, %esp
2133 [addr32] pushq -8(%reg)
2134
2135 2. Use a callee-saved saved register:
2136
2137 pushq %reg
2138 leaq 16(%rsp), %reg
2139 andq $-XXX, %rsp
2140 pushq -8(%reg)
2141
2142 or
2143
2144 pushq %reg
2145 [addr32] leal 16(%rsp), %reg
2146 andl $-XXX, %esp
2147 [addr32] pushq -8(%reg)
2148
2149 "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
2150
2151 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
2152 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
2153
2154 "andl $-XXX, %esp" can be either 3 bytes or 6 bytes:
2155
2156 0x83 0xe4 0xf0 andl $-16, %esp
2157 0x81 0xe4 0x00 0xff 0xff 0xff andl $-256, %esp
2158 */
2159
2160 gdb_byte buf[19];
2161 int reg, r;
2162 int offset, offset_and;
2163
2164 if (target_read_memory (pc, buf, sizeof buf))
2165 return pc;
2166
2167 /* Skip optional addr32 prefix. */
2168 offset = buf[0] == 0x67 ? 1 : 0;
2169
2170 /* Check caller-saved saved register. The first instruction has
2171 to be "leaq 8(%rsp), %reg" or "leal 8(%rsp), %reg". */
2172 if (((buf[offset] & 0xfb) == 0x48 || (buf[offset] & 0xfb) == 0x40)
2173 && buf[offset + 1] == 0x8d
2174 && buf[offset + 3] == 0x24
2175 && buf[offset + 4] == 0x8)
2176 {
2177 /* MOD must be binary 10 and R/M must be binary 100. */
2178 if ((buf[offset + 2] & 0xc7) != 0x44)
2179 return pc;
2180
2181 /* REG has register number. */
2182 reg = (buf[offset + 2] >> 3) & 7;
2183
2184 /* Check the REX.R bit. */
2185 if ((buf[offset] & 0x4) != 0)
2186 reg += 8;
2187
2188 offset += 5;
2189 }
2190 else
2191 {
2192 /* Check callee-saved saved register. The first instruction
2193 has to be "pushq %reg". */
2194 reg = 0;
2195 if ((buf[offset] & 0xf6) == 0x40
2196 && (buf[offset + 1] & 0xf8) == 0x50)
2197 {
2198 /* Check the REX.B bit. */
2199 if ((buf[offset] & 1) != 0)
2200 reg = 8;
2201
2202 offset += 1;
2203 }
2204 else if ((buf[offset] & 0xf8) != 0x50)
2205 return pc;
2206
2207 /* Get register. */
2208 reg += buf[offset] & 0x7;
2209
2210 offset++;
2211
2212 /* Skip optional addr32 prefix. */
2213 if (buf[offset] == 0x67)
2214 offset++;
2215
2216 /* The next instruction has to be "leaq 16(%rsp), %reg" or
2217 "leal 16(%rsp), %reg". */
2218 if (((buf[offset] & 0xfb) != 0x48 && (buf[offset] & 0xfb) != 0x40)
2219 || buf[offset + 1] != 0x8d
2220 || buf[offset + 3] != 0x24
2221 || buf[offset + 4] != 0x10)
2222 return pc;
2223
2224 /* MOD must be binary 10 and R/M must be binary 100. */
2225 if ((buf[offset + 2] & 0xc7) != 0x44)
2226 return pc;
2227
2228 /* REG has register number. */
2229 r = (buf[offset + 2] >> 3) & 7;
2230
2231 /* Check the REX.R bit. */
2232 if ((buf[offset] & 0x4) != 0)
2233 r += 8;
2234
2235 /* Registers in pushq and leaq have to be the same. */
2236 if (reg != r)
2237 return pc;
2238
2239 offset += 5;
2240 }
2241
2242 /* Rigister can't be %rsp nor %rbp. */
2243 if (reg == 4 || reg == 5)
2244 return pc;
2245
2246 /* The next instruction may be "andq $-XXX, %rsp" or
2247 "andl $-XXX, %esp". */
2248 if (buf[offset] != 0x48)
2249 offset--;
2250
2251 if (buf[offset + 2] != 0xe4
2252 || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
2253 return pc;
2254
2255 offset_and = offset;
2256 offset += buf[offset + 1] == 0x81 ? 7 : 4;
2257
2258 /* Skip optional addr32 prefix. */
2259 if (buf[offset] == 0x67)
2260 offset++;
2261
2262 /* The next instruction has to be "pushq -8(%reg)". */
2263 r = 0;
2264 if (buf[offset] == 0xff)
2265 offset++;
2266 else if ((buf[offset] & 0xf6) == 0x40
2267 && buf[offset + 1] == 0xff)
2268 {
2269 /* Check the REX.B bit. */
2270 if ((buf[offset] & 0x1) != 0)
2271 r = 8;
2272 offset += 2;
2273 }
2274 else
2275 return pc;
2276
2277 /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
2278 01. */
2279 if (buf[offset + 1] != 0xf8
2280 || (buf[offset] & 0xf8) != 0x70)
2281 return pc;
2282
2283 /* R/M has register. */
2284 r += buf[offset] & 7;
2285
2286 /* Registers in leaq and pushq have to be the same. */
2287 if (reg != r)
2288 return pc;
2289
2290 if (current_pc > pc + offset_and)
2291 cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
2292
2293 return std::min (pc + offset + 2, current_pc);
2294 }
2295
2296 /* Do a limited analysis of the prologue at PC and update CACHE
2297 accordingly. Bail out early if CURRENT_PC is reached. Return the
2298 address where the analysis stopped.
2299
2300 We will handle only functions beginning with:
2301
2302 pushq %rbp 0x55
2303 movq %rsp, %rbp 0x48 0x89 0xe5 (or 0x48 0x8b 0xec)
2304
2305 or (for the X32 ABI):
2306
2307 pushq %rbp 0x55
2308 movl %esp, %ebp 0x89 0xe5 (or 0x8b 0xec)
2309
2310 Any function that doesn't start with one of these sequences will be
2311 assumed to have no prologue and thus no valid frame pointer in
2312 %rbp. */
2313
2314 static CORE_ADDR
2315 amd64_analyze_prologue (struct gdbarch *gdbarch,
2316 CORE_ADDR pc, CORE_ADDR current_pc,
2317 struct amd64_frame_cache *cache)
2318 {
2319 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2320 /* There are two variations of movq %rsp, %rbp. */
2321 static const gdb_byte mov_rsp_rbp_1[3] = { 0x48, 0x89, 0xe5 };
2322 static const gdb_byte mov_rsp_rbp_2[3] = { 0x48, 0x8b, 0xec };
2323 /* Ditto for movl %esp, %ebp. */
2324 static const gdb_byte mov_esp_ebp_1[2] = { 0x89, 0xe5 };
2325 static const gdb_byte mov_esp_ebp_2[2] = { 0x8b, 0xec };
2326
2327 gdb_byte buf[3];
2328 gdb_byte op;
2329
2330 if (current_pc <= pc)
2331 return current_pc;
2332
2333 if (gdbarch_ptr_bit (gdbarch) == 32)
2334 pc = amd64_x32_analyze_stack_align (pc, current_pc, cache);
2335 else
2336 pc = amd64_analyze_stack_align (pc, current_pc, cache);
2337
2338 op = read_code_unsigned_integer (pc, 1, byte_order);
2339
2340 if (op == 0x55) /* pushq %rbp */
2341 {
2342 /* Take into account that we've executed the `pushq %rbp' that
2343 starts this instruction sequence. */
2344 cache->saved_regs[AMD64_RBP_REGNUM] = 0;
2345 cache->sp_offset += 8;
2346
2347 /* If that's all, return now. */
2348 if (current_pc <= pc + 1)
2349 return current_pc;
2350
2351 read_code (pc + 1, buf, 3);
2352
2353 /* Check for `movq %rsp, %rbp'. */
2354 if (memcmp (buf, mov_rsp_rbp_1, 3) == 0
2355 || memcmp (buf, mov_rsp_rbp_2, 3) == 0)
2356 {
2357 /* OK, we actually have a frame. */
2358 cache->frameless_p = 0;
2359 return pc + 4;
2360 }
2361
2362 /* For X32, also check for `movq %esp, %ebp'. */
2363 if (gdbarch_ptr_bit (gdbarch) == 32)
2364 {
2365 if (memcmp (buf, mov_esp_ebp_1, 2) == 0
2366 || memcmp (buf, mov_esp_ebp_2, 2) == 0)
2367 {
2368 /* OK, we actually have a frame. */
2369 cache->frameless_p = 0;
2370 return pc + 3;
2371 }
2372 }
2373
2374 return pc + 1;
2375 }
2376
2377 return pc;
2378 }
2379
2380 /* Work around false termination of prologue - GCC PR debug/48827.
2381
2382 START_PC is the first instruction of a function, PC is its minimal already
2383 determined advanced address. Function returns PC if it has nothing to do.
2384
2385 84 c0 test %al,%al
2386 74 23 je after
2387 <-- here is 0 lines advance - the false prologue end marker.
2388 0f 29 85 70 ff ff ff movaps %xmm0,-0x90(%rbp)
2389 0f 29 4d 80 movaps %xmm1,-0x80(%rbp)
2390 0f 29 55 90 movaps %xmm2,-0x70(%rbp)
2391 0f 29 5d a0 movaps %xmm3,-0x60(%rbp)
2392 0f 29 65 b0 movaps %xmm4,-0x50(%rbp)
2393 0f 29 6d c0 movaps %xmm5,-0x40(%rbp)
2394 0f 29 75 d0 movaps %xmm6,-0x30(%rbp)
2395 0f 29 7d e0 movaps %xmm7,-0x20(%rbp)
2396 after: */
2397
2398 static CORE_ADDR
2399 amd64_skip_xmm_prologue (CORE_ADDR pc, CORE_ADDR start_pc)
2400 {
2401 struct symtab_and_line start_pc_sal, next_sal;
2402 gdb_byte buf[4 + 8 * 7];
2403 int offset, xmmreg;
2404
2405 if (pc == start_pc)
2406 return pc;
2407
2408 start_pc_sal = find_pc_sect_line (start_pc, NULL, 0);
2409 if (start_pc_sal.symtab == NULL
2410 || producer_is_gcc_ge_4 (COMPUNIT_PRODUCER
2411 (SYMTAB_COMPUNIT (start_pc_sal.symtab))) < 6
2412 || start_pc_sal.pc != start_pc || pc >= start_pc_sal.end)
2413 return pc;
2414
2415 next_sal = find_pc_sect_line (start_pc_sal.end, NULL, 0);
2416 if (next_sal.line != start_pc_sal.line)
2417 return pc;
2418
2419 /* START_PC can be from overlayed memory, ignored here. */
2420 if (target_read_code (next_sal.pc - 4, buf, sizeof (buf)) != 0)
2421 return pc;
2422
2423 /* test %al,%al */
2424 if (buf[0] != 0x84 || buf[1] != 0xc0)
2425 return pc;
2426 /* je AFTER */
2427 if (buf[2] != 0x74)
2428 return pc;
2429
2430 offset = 4;
2431 for (xmmreg = 0; xmmreg < 8; xmmreg++)
2432 {
2433 /* 0x0f 0x29 0b??000101 movaps %xmmreg?,-0x??(%rbp) */
2434 if (buf[offset] != 0x0f || buf[offset + 1] != 0x29
2435 || (buf[offset + 2] & 0x3f) != (xmmreg << 3 | 0x5))
2436 return pc;
2437
2438 /* 0b01?????? */
2439 if ((buf[offset + 2] & 0xc0) == 0x40)
2440 {
2441 /* 8-bit displacement. */
2442 offset += 4;
2443 }
2444 /* 0b10?????? */
2445 else if ((buf[offset + 2] & 0xc0) == 0x80)
2446 {
2447 /* 32-bit displacement. */
2448 offset += 7;
2449 }
2450 else
2451 return pc;
2452 }
2453
2454 /* je AFTER */
2455 if (offset - 4 != buf[3])
2456 return pc;
2457
2458 return next_sal.end;
2459 }
2460
2461 /* Return PC of first real instruction. */
2462
2463 static CORE_ADDR
2464 amd64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR start_pc)
2465 {
2466 struct amd64_frame_cache cache;
2467 CORE_ADDR pc;
2468 CORE_ADDR func_addr;
2469
2470 if (find_pc_partial_function (start_pc, NULL, &func_addr, NULL))
2471 {
2472 CORE_ADDR post_prologue_pc
2473 = skip_prologue_using_sal (gdbarch, func_addr);
2474 struct compunit_symtab *cust = find_pc_compunit_symtab (func_addr);
2475
2476 /* Clang always emits a line note before the prologue and another
2477 one after. We trust clang to emit usable line notes. */
2478 if (post_prologue_pc
2479 && (cust != NULL
2480 && COMPUNIT_PRODUCER (cust) != NULL
2481 && startswith (COMPUNIT_PRODUCER (cust), "clang ")))
2482 return std::max (start_pc, post_prologue_pc);
2483 }
2484
2485 amd64_init_frame_cache (&cache);
2486 pc = amd64_analyze_prologue (gdbarch, start_pc, 0xffffffffffffffffLL,
2487 &cache);
2488 if (cache.frameless_p)
2489 return start_pc;
2490
2491 return amd64_skip_xmm_prologue (pc, start_pc);
2492 }
2493 \f
2494
2495 /* Normal frames. */
2496
2497 static void
2498 amd64_frame_cache_1 (struct frame_info *this_frame,
2499 struct amd64_frame_cache *cache)
2500 {
2501 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2502 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2503 gdb_byte buf[8];
2504 int i;
2505
2506 cache->pc = get_frame_func (this_frame);
2507 if (cache->pc != 0)
2508 amd64_analyze_prologue (gdbarch, cache->pc, get_frame_pc (this_frame),
2509 cache);
2510
2511 if (cache->frameless_p)
2512 {
2513 /* We didn't find a valid frame. If we're at the start of a
2514 function, or somewhere half-way its prologue, the function's
2515 frame probably hasn't been fully setup yet. Try to
2516 reconstruct the base address for the stack frame by looking
2517 at the stack pointer. For truly "frameless" functions this
2518 might work too. */
2519
2520 if (cache->saved_sp_reg != -1)
2521 {
2522 /* Stack pointer has been saved. */
2523 get_frame_register (this_frame, cache->saved_sp_reg, buf);
2524 cache->saved_sp = extract_unsigned_integer (buf, 8, byte_order);
2525
2526 /* We're halfway aligning the stack. */
2527 cache->base = ((cache->saved_sp - 8) & 0xfffffffffffffff0LL) - 8;
2528 cache->saved_regs[AMD64_RIP_REGNUM] = cache->saved_sp - 8;
2529
2530 /* This will be added back below. */
2531 cache->saved_regs[AMD64_RIP_REGNUM] -= cache->base;
2532 }
2533 else
2534 {
2535 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2536 cache->base = extract_unsigned_integer (buf, 8, byte_order)
2537 + cache->sp_offset;
2538 }
2539 }
2540 else
2541 {
2542 get_frame_register (this_frame, AMD64_RBP_REGNUM, buf);
2543 cache->base = extract_unsigned_integer (buf, 8, byte_order);
2544 }
2545
2546 /* Now that we have the base address for the stack frame we can
2547 calculate the value of %rsp in the calling frame. */
2548 cache->saved_sp = cache->base + 16;
2549
2550 /* For normal frames, %rip is stored at 8(%rbp). If we don't have a
2551 frame we find it at the same offset from the reconstructed base
2552 address. If we're halfway aligning the stack, %rip is handled
2553 differently (see above). */
2554 if (!cache->frameless_p || cache->saved_sp_reg == -1)
2555 cache->saved_regs[AMD64_RIP_REGNUM] = 8;
2556
2557 /* Adjust all the saved registers such that they contain addresses
2558 instead of offsets. */
2559 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
2560 if (cache->saved_regs[i] != -1)
2561 cache->saved_regs[i] += cache->base;
2562
2563 cache->base_p = 1;
2564 }
2565
2566 static struct amd64_frame_cache *
2567 amd64_frame_cache (struct frame_info *this_frame, void **this_cache)
2568 {
2569 struct amd64_frame_cache *cache;
2570
2571 if (*this_cache)
2572 return (struct amd64_frame_cache *) *this_cache;
2573
2574 cache = amd64_alloc_frame_cache ();
2575 *this_cache = cache;
2576
2577 TRY
2578 {
2579 amd64_frame_cache_1 (this_frame, cache);
2580 }
2581 CATCH (ex, RETURN_MASK_ERROR)
2582 {
2583 if (ex.error != NOT_AVAILABLE_ERROR)
2584 throw_exception (ex);
2585 }
2586 END_CATCH
2587
2588 return cache;
2589 }
2590
2591 static enum unwind_stop_reason
2592 amd64_frame_unwind_stop_reason (struct frame_info *this_frame,
2593 void **this_cache)
2594 {
2595 struct amd64_frame_cache *cache =
2596 amd64_frame_cache (this_frame, this_cache);
2597
2598 if (!cache->base_p)
2599 return UNWIND_UNAVAILABLE;
2600
2601 /* This marks the outermost frame. */
2602 if (cache->base == 0)
2603 return UNWIND_OUTERMOST;
2604
2605 return UNWIND_NO_REASON;
2606 }
2607
2608 static void
2609 amd64_frame_this_id (struct frame_info *this_frame, void **this_cache,
2610 struct frame_id *this_id)
2611 {
2612 struct amd64_frame_cache *cache =
2613 amd64_frame_cache (this_frame, this_cache);
2614
2615 if (!cache->base_p)
2616 (*this_id) = frame_id_build_unavailable_stack (cache->pc);
2617 else if (cache->base == 0)
2618 {
2619 /* This marks the outermost frame. */
2620 return;
2621 }
2622 else
2623 (*this_id) = frame_id_build (cache->base + 16, cache->pc);
2624 }
2625
2626 static struct value *
2627 amd64_frame_prev_register (struct frame_info *this_frame, void **this_cache,
2628 int regnum)
2629 {
2630 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2631 struct amd64_frame_cache *cache =
2632 amd64_frame_cache (this_frame, this_cache);
2633
2634 gdb_assert (regnum >= 0);
2635
2636 if (regnum == gdbarch_sp_regnum (gdbarch) && cache->saved_sp)
2637 return frame_unwind_got_constant (this_frame, regnum, cache->saved_sp);
2638
2639 if (regnum < AMD64_NUM_SAVED_REGS && cache->saved_regs[regnum] != -1)
2640 return frame_unwind_got_memory (this_frame, regnum,
2641 cache->saved_regs[regnum]);
2642
2643 return frame_unwind_got_register (this_frame, regnum, regnum);
2644 }
2645
2646 static const struct frame_unwind amd64_frame_unwind =
2647 {
2648 NORMAL_FRAME,
2649 amd64_frame_unwind_stop_reason,
2650 amd64_frame_this_id,
2651 amd64_frame_prev_register,
2652 NULL,
2653 default_frame_sniffer
2654 };
2655 \f
2656 /* Generate a bytecode expression to get the value of the saved PC. */
2657
2658 static void
2659 amd64_gen_return_address (struct gdbarch *gdbarch,
2660 struct agent_expr *ax, struct axs_value *value,
2661 CORE_ADDR scope)
2662 {
2663 /* The following sequence assumes the traditional use of the base
2664 register. */
2665 ax_reg (ax, AMD64_RBP_REGNUM);
2666 ax_const_l (ax, 8);
2667 ax_simple (ax, aop_add);
2668 value->type = register_type (gdbarch, AMD64_RIP_REGNUM);
2669 value->kind = axs_lvalue_memory;
2670 }
2671 \f
2672
2673 /* Signal trampolines. */
2674
2675 /* FIXME: kettenis/20030419: Perhaps, we can unify the 32-bit and
2676 64-bit variants. This would require using identical frame caches
2677 on both platforms. */
2678
2679 static struct amd64_frame_cache *
2680 amd64_sigtramp_frame_cache (struct frame_info *this_frame, void **this_cache)
2681 {
2682 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2683 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2684 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2685 struct amd64_frame_cache *cache;
2686 CORE_ADDR addr;
2687 gdb_byte buf[8];
2688 int i;
2689
2690 if (*this_cache)
2691 return (struct amd64_frame_cache *) *this_cache;
2692
2693 cache = amd64_alloc_frame_cache ();
2694
2695 TRY
2696 {
2697 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2698 cache->base = extract_unsigned_integer (buf, 8, byte_order) - 8;
2699
2700 addr = tdep->sigcontext_addr (this_frame);
2701 gdb_assert (tdep->sc_reg_offset);
2702 gdb_assert (tdep->sc_num_regs <= AMD64_NUM_SAVED_REGS);
2703 for (i = 0; i < tdep->sc_num_regs; i++)
2704 if (tdep->sc_reg_offset[i] != -1)
2705 cache->saved_regs[i] = addr + tdep->sc_reg_offset[i];
2706
2707 cache->base_p = 1;
2708 }
2709 CATCH (ex, RETURN_MASK_ERROR)
2710 {
2711 if (ex.error != NOT_AVAILABLE_ERROR)
2712 throw_exception (ex);
2713 }
2714 END_CATCH
2715
2716 *this_cache = cache;
2717 return cache;
2718 }
2719
2720 static enum unwind_stop_reason
2721 amd64_sigtramp_frame_unwind_stop_reason (struct frame_info *this_frame,
2722 void **this_cache)
2723 {
2724 struct amd64_frame_cache *cache =
2725 amd64_sigtramp_frame_cache (this_frame, this_cache);
2726
2727 if (!cache->base_p)
2728 return UNWIND_UNAVAILABLE;
2729
2730 return UNWIND_NO_REASON;
2731 }
2732
2733 static void
2734 amd64_sigtramp_frame_this_id (struct frame_info *this_frame,
2735 void **this_cache, struct frame_id *this_id)
2736 {
2737 struct amd64_frame_cache *cache =
2738 amd64_sigtramp_frame_cache (this_frame, this_cache);
2739
2740 if (!cache->base_p)
2741 (*this_id) = frame_id_build_unavailable_stack (get_frame_pc (this_frame));
2742 else if (cache->base == 0)
2743 {
2744 /* This marks the outermost frame. */
2745 return;
2746 }
2747 else
2748 (*this_id) = frame_id_build (cache->base + 16, get_frame_pc (this_frame));
2749 }
2750
2751 static struct value *
2752 amd64_sigtramp_frame_prev_register (struct frame_info *this_frame,
2753 void **this_cache, int regnum)
2754 {
2755 /* Make sure we've initialized the cache. */
2756 amd64_sigtramp_frame_cache (this_frame, this_cache);
2757
2758 return amd64_frame_prev_register (this_frame, this_cache, regnum);
2759 }
2760
2761 static int
2762 amd64_sigtramp_frame_sniffer (const struct frame_unwind *self,
2763 struct frame_info *this_frame,
2764 void **this_cache)
2765 {
2766 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
2767
2768 /* We shouldn't even bother if we don't have a sigcontext_addr
2769 handler. */
2770 if (tdep->sigcontext_addr == NULL)
2771 return 0;
2772
2773 if (tdep->sigtramp_p != NULL)
2774 {
2775 if (tdep->sigtramp_p (this_frame))
2776 return 1;
2777 }
2778
2779 if (tdep->sigtramp_start != 0)
2780 {
2781 CORE_ADDR pc = get_frame_pc (this_frame);
2782
2783 gdb_assert (tdep->sigtramp_end != 0);
2784 if (pc >= tdep->sigtramp_start && pc < tdep->sigtramp_end)
2785 return 1;
2786 }
2787
2788 return 0;
2789 }
2790
2791 static const struct frame_unwind amd64_sigtramp_frame_unwind =
2792 {
2793 SIGTRAMP_FRAME,
2794 amd64_sigtramp_frame_unwind_stop_reason,
2795 amd64_sigtramp_frame_this_id,
2796 amd64_sigtramp_frame_prev_register,
2797 NULL,
2798 amd64_sigtramp_frame_sniffer
2799 };
2800 \f
2801
2802 static CORE_ADDR
2803 amd64_frame_base_address (struct frame_info *this_frame, void **this_cache)
2804 {
2805 struct amd64_frame_cache *cache =
2806 amd64_frame_cache (this_frame, this_cache);
2807
2808 return cache->base;
2809 }
2810
2811 static const struct frame_base amd64_frame_base =
2812 {
2813 &amd64_frame_unwind,
2814 amd64_frame_base_address,
2815 amd64_frame_base_address,
2816 amd64_frame_base_address
2817 };
2818
2819 /* Normal frames, but in a function epilogue. */
2820
2821 /* Implement the stack_frame_destroyed_p gdbarch method.
2822
2823 The epilogue is defined here as the 'ret' instruction, which will
2824 follow any instruction such as 'leave' or 'pop %ebp' that destroys
2825 the function's stack frame. */
2826
2827 static int
2828 amd64_stack_frame_destroyed_p (struct gdbarch *gdbarch, CORE_ADDR pc)
2829 {
2830 gdb_byte insn;
2831 struct compunit_symtab *cust;
2832
2833 cust = find_pc_compunit_symtab (pc);
2834 if (cust != NULL && COMPUNIT_EPILOGUE_UNWIND_VALID (cust))
2835 return 0;
2836
2837 if (target_read_memory (pc, &insn, 1))
2838 return 0; /* Can't read memory at pc. */
2839
2840 if (insn != 0xc3) /* 'ret' instruction. */
2841 return 0;
2842
2843 return 1;
2844 }
2845
2846 static int
2847 amd64_epilogue_frame_sniffer (const struct frame_unwind *self,
2848 struct frame_info *this_frame,
2849 void **this_prologue_cache)
2850 {
2851 if (frame_relative_level (this_frame) == 0)
2852 return amd64_stack_frame_destroyed_p (get_frame_arch (this_frame),
2853 get_frame_pc (this_frame));
2854 else
2855 return 0;
2856 }
2857
2858 static struct amd64_frame_cache *
2859 amd64_epilogue_frame_cache (struct frame_info *this_frame, void **this_cache)
2860 {
2861 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2862 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2863 struct amd64_frame_cache *cache;
2864 gdb_byte buf[8];
2865
2866 if (*this_cache)
2867 return (struct amd64_frame_cache *) *this_cache;
2868
2869 cache = amd64_alloc_frame_cache ();
2870 *this_cache = cache;
2871
2872 TRY
2873 {
2874 /* Cache base will be %esp plus cache->sp_offset (-8). */
2875 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2876 cache->base = extract_unsigned_integer (buf, 8,
2877 byte_order) + cache->sp_offset;
2878
2879 /* Cache pc will be the frame func. */
2880 cache->pc = get_frame_pc (this_frame);
2881
2882 /* The saved %esp will be at cache->base plus 16. */
2883 cache->saved_sp = cache->base + 16;
2884
2885 /* The saved %eip will be at cache->base plus 8. */
2886 cache->saved_regs[AMD64_RIP_REGNUM] = cache->base + 8;
2887
2888 cache->base_p = 1;
2889 }
2890 CATCH (ex, RETURN_MASK_ERROR)
2891 {
2892 if (ex.error != NOT_AVAILABLE_ERROR)
2893 throw_exception (ex);
2894 }
2895 END_CATCH
2896
2897 return cache;
2898 }
2899
2900 static enum unwind_stop_reason
2901 amd64_epilogue_frame_unwind_stop_reason (struct frame_info *this_frame,
2902 void **this_cache)
2903 {
2904 struct amd64_frame_cache *cache
2905 = amd64_epilogue_frame_cache (this_frame, this_cache);
2906
2907 if (!cache->base_p)
2908 return UNWIND_UNAVAILABLE;
2909
2910 return UNWIND_NO_REASON;
2911 }
2912
2913 static void
2914 amd64_epilogue_frame_this_id (struct frame_info *this_frame,
2915 void **this_cache,
2916 struct frame_id *this_id)
2917 {
2918 struct amd64_frame_cache *cache = amd64_epilogue_frame_cache (this_frame,
2919 this_cache);
2920
2921 if (!cache->base_p)
2922 (*this_id) = frame_id_build_unavailable_stack (cache->pc);
2923 else
2924 (*this_id) = frame_id_build (cache->base + 8, cache->pc);
2925 }
2926
2927 static const struct frame_unwind amd64_epilogue_frame_unwind =
2928 {
2929 NORMAL_FRAME,
2930 amd64_epilogue_frame_unwind_stop_reason,
2931 amd64_epilogue_frame_this_id,
2932 amd64_frame_prev_register,
2933 NULL,
2934 amd64_epilogue_frame_sniffer
2935 };
2936
2937 static struct frame_id
2938 amd64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
2939 {
2940 CORE_ADDR fp;
2941
2942 fp = get_frame_register_unsigned (this_frame, AMD64_RBP_REGNUM);
2943
2944 return frame_id_build (fp + 16, get_frame_pc (this_frame));
2945 }
2946
2947 /* 16 byte align the SP per frame requirements. */
2948
2949 static CORE_ADDR
2950 amd64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
2951 {
2952 return sp & -(CORE_ADDR)16;
2953 }
2954 \f
2955
2956 /* Supply register REGNUM from the buffer specified by FPREGS and LEN
2957 in the floating-point register set REGSET to register cache
2958 REGCACHE. If REGNUM is -1, do this for all registers in REGSET. */
2959
2960 static void
2961 amd64_supply_fpregset (const struct regset *regset, struct regcache *regcache,
2962 int regnum, const void *fpregs, size_t len)
2963 {
2964 struct gdbarch *gdbarch = regcache->arch ();
2965 const struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2966
2967 gdb_assert (len >= tdep->sizeof_fpregset);
2968 amd64_supply_fxsave (regcache, regnum, fpregs);
2969 }
2970
2971 /* Collect register REGNUM from the register cache REGCACHE and store
2972 it in the buffer specified by FPREGS and LEN as described by the
2973 floating-point register set REGSET. If REGNUM is -1, do this for
2974 all registers in REGSET. */
2975
2976 static void
2977 amd64_collect_fpregset (const struct regset *regset,
2978 const struct regcache *regcache,
2979 int regnum, void *fpregs, size_t len)
2980 {
2981 struct gdbarch *gdbarch = regcache->arch ();
2982 const struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2983
2984 gdb_assert (len >= tdep->sizeof_fpregset);
2985 amd64_collect_fxsave (regcache, regnum, fpregs);
2986 }
2987
2988 const struct regset amd64_fpregset =
2989 {
2990 NULL, amd64_supply_fpregset, amd64_collect_fpregset
2991 };
2992 \f
2993
2994 /* Figure out where the longjmp will land. Slurp the jmp_buf out of
2995 %rdi. We expect its value to be a pointer to the jmp_buf structure
2996 from which we extract the address that we will land at. This
2997 address is copied into PC. This routine returns non-zero on
2998 success. */
2999
3000 static int
3001 amd64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
3002 {
3003 gdb_byte buf[8];
3004 CORE_ADDR jb_addr;
3005 struct gdbarch *gdbarch = get_frame_arch (frame);
3006 int jb_pc_offset = gdbarch_tdep (gdbarch)->jb_pc_offset;
3007 int len = TYPE_LENGTH (builtin_type (gdbarch)->builtin_func_ptr);
3008
3009 /* If JB_PC_OFFSET is -1, we have no way to find out where the
3010 longjmp will land. */
3011 if (jb_pc_offset == -1)
3012 return 0;
3013
3014 get_frame_register (frame, AMD64_RDI_REGNUM, buf);
3015 jb_addr= extract_typed_address
3016 (buf, builtin_type (gdbarch)->builtin_data_ptr);
3017 if (target_read_memory (jb_addr + jb_pc_offset, buf, len))
3018 return 0;
3019
3020 *pc = extract_typed_address (buf, builtin_type (gdbarch)->builtin_func_ptr);
3021
3022 return 1;
3023 }
3024
3025 static const int amd64_record_regmap[] =
3026 {
3027 AMD64_RAX_REGNUM, AMD64_RCX_REGNUM, AMD64_RDX_REGNUM, AMD64_RBX_REGNUM,
3028 AMD64_RSP_REGNUM, AMD64_RBP_REGNUM, AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
3029 AMD64_R8_REGNUM, AMD64_R9_REGNUM, AMD64_R10_REGNUM, AMD64_R11_REGNUM,
3030 AMD64_R12_REGNUM, AMD64_R13_REGNUM, AMD64_R14_REGNUM, AMD64_R15_REGNUM,
3031 AMD64_RIP_REGNUM, AMD64_EFLAGS_REGNUM, AMD64_CS_REGNUM, AMD64_SS_REGNUM,
3032 AMD64_DS_REGNUM, AMD64_ES_REGNUM, AMD64_FS_REGNUM, AMD64_GS_REGNUM
3033 };
3034
3035 void
3036 amd64_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch,
3037 const target_desc *default_tdesc)
3038 {
3039 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3040 const struct target_desc *tdesc = info.target_desc;
3041 static const char *const stap_integer_prefixes[] = { "$", NULL };
3042 static const char *const stap_register_prefixes[] = { "%", NULL };
3043 static const char *const stap_register_indirection_prefixes[] = { "(",
3044 NULL };
3045 static const char *const stap_register_indirection_suffixes[] = { ")",
3046 NULL };
3047
3048 /* AMD64 generally uses `fxsave' instead of `fsave' for saving its
3049 floating-point registers. */
3050 tdep->sizeof_fpregset = I387_SIZEOF_FXSAVE;
3051 tdep->fpregset = &amd64_fpregset;
3052
3053 if (! tdesc_has_registers (tdesc))
3054 tdesc = default_tdesc;
3055 tdep->tdesc = tdesc;
3056
3057 tdep->num_core_regs = AMD64_NUM_GREGS + I387_NUM_REGS;
3058 tdep->register_names = amd64_register_names;
3059
3060 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.avx512") != NULL)
3061 {
3062 tdep->zmmh_register_names = amd64_zmmh_names;
3063 tdep->k_register_names = amd64_k_names;
3064 tdep->xmm_avx512_register_names = amd64_xmm_avx512_names;
3065 tdep->ymm16h_register_names = amd64_ymmh_avx512_names;
3066
3067 tdep->num_zmm_regs = 32;
3068 tdep->num_xmm_avx512_regs = 16;
3069 tdep->num_ymm_avx512_regs = 16;
3070
3071 tdep->zmm0h_regnum = AMD64_ZMM0H_REGNUM;
3072 tdep->k0_regnum = AMD64_K0_REGNUM;
3073 tdep->xmm16_regnum = AMD64_XMM16_REGNUM;
3074 tdep->ymm16h_regnum = AMD64_YMM16H_REGNUM;
3075 }
3076
3077 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.avx") != NULL)
3078 {
3079 tdep->ymmh_register_names = amd64_ymmh_names;
3080 tdep->num_ymm_regs = 16;
3081 tdep->ymm0h_regnum = AMD64_YMM0H_REGNUM;
3082 }
3083
3084 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.mpx") != NULL)
3085 {
3086 tdep->mpx_register_names = amd64_mpx_names;
3087 tdep->bndcfgu_regnum = AMD64_BNDCFGU_REGNUM;
3088 tdep->bnd0r_regnum = AMD64_BND0R_REGNUM;
3089 }
3090
3091 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.segments") != NULL)
3092 {
3093 const struct tdesc_feature *feature =
3094 tdesc_find_feature (tdesc, "org.gnu.gdb.i386.segments");
3095 struct tdesc_arch_data *tdesc_data_segments =
3096 (struct tdesc_arch_data *) info.tdep_info;
3097
3098 tdesc_numbered_register (feature, tdesc_data_segments,
3099 AMD64_FSBASE_REGNUM, "fs_base");
3100 tdesc_numbered_register (feature, tdesc_data_segments,
3101 AMD64_GSBASE_REGNUM, "gs_base");
3102 }
3103
3104 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.pkeys") != NULL)
3105 {
3106 tdep->pkeys_register_names = amd64_pkeys_names;
3107 tdep->pkru_regnum = AMD64_PKRU_REGNUM;
3108 tdep->num_pkeys_regs = 1;
3109 }
3110
3111 tdep->num_byte_regs = 20;
3112 tdep->num_word_regs = 16;
3113 tdep->num_dword_regs = 16;
3114 /* Avoid wiring in the MMX registers for now. */
3115 tdep->num_mmx_regs = 0;
3116
3117 set_gdbarch_pseudo_register_read_value (gdbarch,
3118 amd64_pseudo_register_read_value);
3119 set_gdbarch_pseudo_register_write (gdbarch,
3120 amd64_pseudo_register_write);
3121 set_gdbarch_ax_pseudo_register_collect (gdbarch,
3122 amd64_ax_pseudo_register_collect);
3123
3124 set_tdesc_pseudo_register_name (gdbarch, amd64_pseudo_register_name);
3125
3126 /* AMD64 has an FPU and 16 SSE registers. */
3127 tdep->st0_regnum = AMD64_ST0_REGNUM;
3128 tdep->num_xmm_regs = 16;
3129
3130 /* This is what all the fuss is about. */
3131 set_gdbarch_long_bit (gdbarch, 64);
3132 set_gdbarch_long_long_bit (gdbarch, 64);
3133 set_gdbarch_ptr_bit (gdbarch, 64);
3134
3135 /* In contrast to the i386, on AMD64 a `long double' actually takes
3136 up 128 bits, even though it's still based on the i387 extended
3137 floating-point format which has only 80 significant bits. */
3138 set_gdbarch_long_double_bit (gdbarch, 128);
3139
3140 set_gdbarch_num_regs (gdbarch, AMD64_NUM_REGS);
3141
3142 /* Register numbers of various important registers. */
3143 set_gdbarch_sp_regnum (gdbarch, AMD64_RSP_REGNUM); /* %rsp */
3144 set_gdbarch_pc_regnum (gdbarch, AMD64_RIP_REGNUM); /* %rip */
3145 set_gdbarch_ps_regnum (gdbarch, AMD64_EFLAGS_REGNUM); /* %eflags */
3146 set_gdbarch_fp0_regnum (gdbarch, AMD64_ST0_REGNUM); /* %st(0) */
3147
3148 /* The "default" register numbering scheme for AMD64 is referred to
3149 as the "DWARF Register Number Mapping" in the System V psABI.
3150 The preferred debugging format for all known AMD64 targets is
3151 actually DWARF2, and GCC doesn't seem to support DWARF (that is
3152 DWARF-1), but we provide the same mapping just in case. This
3153 mapping is also used for stabs, which GCC does support. */
3154 set_gdbarch_stab_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
3155 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
3156
3157 /* We don't override SDB_REG_RO_REGNUM, since COFF doesn't seem to
3158 be in use on any of the supported AMD64 targets. */
3159
3160 /* Call dummy code. */
3161 set_gdbarch_push_dummy_call (gdbarch, amd64_push_dummy_call);
3162 set_gdbarch_frame_align (gdbarch, amd64_frame_align);
3163 set_gdbarch_frame_red_zone_size (gdbarch, 128);
3164
3165 set_gdbarch_convert_register_p (gdbarch, i387_convert_register_p);
3166 set_gdbarch_register_to_value (gdbarch, i387_register_to_value);
3167 set_gdbarch_value_to_register (gdbarch, i387_value_to_register);
3168
3169 set_gdbarch_return_value (gdbarch, amd64_return_value);
3170
3171 set_gdbarch_skip_prologue (gdbarch, amd64_skip_prologue);
3172
3173 tdep->record_regmap = amd64_record_regmap;
3174
3175 set_gdbarch_dummy_id (gdbarch, amd64_dummy_id);
3176
3177 /* Hook the function epilogue frame unwinder. This unwinder is
3178 appended to the list first, so that it supercedes the other
3179 unwinders in function epilogues. */
3180 frame_unwind_prepend_unwinder (gdbarch, &amd64_epilogue_frame_unwind);
3181
3182 /* Hook the prologue-based frame unwinders. */
3183 frame_unwind_append_unwinder (gdbarch, &amd64_sigtramp_frame_unwind);
3184 frame_unwind_append_unwinder (gdbarch, &amd64_frame_unwind);
3185 frame_base_set_default (gdbarch, &amd64_frame_base);
3186
3187 set_gdbarch_get_longjmp_target (gdbarch, amd64_get_longjmp_target);
3188
3189 set_gdbarch_relocate_instruction (gdbarch, amd64_relocate_instruction);
3190
3191 set_gdbarch_gen_return_address (gdbarch, amd64_gen_return_address);
3192
3193 /* SystemTap variables and functions. */
3194 set_gdbarch_stap_integer_prefixes (gdbarch, stap_integer_prefixes);
3195 set_gdbarch_stap_register_prefixes (gdbarch, stap_register_prefixes);
3196 set_gdbarch_stap_register_indirection_prefixes (gdbarch,
3197 stap_register_indirection_prefixes);
3198 set_gdbarch_stap_register_indirection_suffixes (gdbarch,
3199 stap_register_indirection_suffixes);
3200 set_gdbarch_stap_is_single_operand (gdbarch,
3201 i386_stap_is_single_operand);
3202 set_gdbarch_stap_parse_special_token (gdbarch,
3203 i386_stap_parse_special_token);
3204 set_gdbarch_insn_is_call (gdbarch, amd64_insn_is_call);
3205 set_gdbarch_insn_is_ret (gdbarch, amd64_insn_is_ret);
3206 set_gdbarch_insn_is_jump (gdbarch, amd64_insn_is_jump);
3207 }
3208 \f
3209
3210 static struct type *
3211 amd64_x32_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
3212 {
3213 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3214
3215 switch (regnum - tdep->eax_regnum)
3216 {
3217 case AMD64_RBP_REGNUM: /* %ebp */
3218 case AMD64_RSP_REGNUM: /* %esp */
3219 return builtin_type (gdbarch)->builtin_data_ptr;
3220 case AMD64_RIP_REGNUM: /* %eip */
3221 return builtin_type (gdbarch)->builtin_func_ptr;
3222 }
3223
3224 return i386_pseudo_register_type (gdbarch, regnum);
3225 }
3226
3227 void
3228 amd64_x32_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch,
3229 const target_desc *default_tdesc)
3230 {
3231 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3232
3233 amd64_init_abi (info, gdbarch, default_tdesc);
3234
3235 tdep->num_dword_regs = 17;
3236 set_tdesc_pseudo_register_type (gdbarch, amd64_x32_pseudo_register_type);
3237
3238 set_gdbarch_long_bit (gdbarch, 32);
3239 set_gdbarch_ptr_bit (gdbarch, 32);
3240 }
3241
3242 /* Return the target description for a specified XSAVE feature mask. */
3243
3244 const struct target_desc *
3245 amd64_target_description (uint64_t xcr0)
3246 {
3247 static target_desc *amd64_tdescs \
3248 [2/*AVX*/][2/*MPX*/][2/*AVX512*/][2/*PKRU*/] = {};
3249 target_desc **tdesc;
3250
3251 tdesc = &amd64_tdescs[(xcr0 & X86_XSTATE_AVX) ? 1 : 0]
3252 [(xcr0 & X86_XSTATE_MPX) ? 1 : 0]
3253 [(xcr0 & X86_XSTATE_AVX512) ? 1 : 0]
3254 [(xcr0 & X86_XSTATE_PKRU) ? 1 : 0];
3255
3256 if (*tdesc == NULL)
3257 *tdesc = amd64_create_target_description (xcr0, false, false);
3258
3259 return *tdesc;
3260 }
3261
3262 void
3263 _initialize_amd64_tdep (void)
3264 {
3265 #if GDB_SELF_TEST
3266 struct
3267 {
3268 const char *xml;
3269 uint64_t mask;
3270 } xml_masks[] = {
3271 { "i386/amd64.xml", X86_XSTATE_SSE_MASK },
3272 { "i386/amd64-avx.xml", X86_XSTATE_AVX_MASK },
3273 { "i386/amd64-mpx.xml", X86_XSTATE_MPX_MASK },
3274 { "i386/amd64-avx-mpx.xml", X86_XSTATE_AVX_MPX_MASK },
3275 { "i386/amd64-avx-avx512.xml", X86_XSTATE_AVX_AVX512_MASK },
3276 { "i386/amd64-avx-mpx-avx512-pku.xml",
3277 X86_XSTATE_AVX_MPX_AVX512_PKU_MASK },
3278 };
3279
3280 for (auto &a : xml_masks)
3281 {
3282 auto tdesc = amd64_target_description (a.mask);
3283
3284 selftests::record_xml_tdesc (a.xml, tdesc);
3285 }
3286 #endif /* GDB_SELF_TEST */
3287 }
3288 \f
3289
3290 /* The 64-bit FXSAVE format differs from the 32-bit format in the
3291 sense that the instruction pointer and data pointer are simply
3292 64-bit offsets into the code segment and the data segment instead
3293 of a selector offset pair. The functions below store the upper 32
3294 bits of these pointers (instead of just the 16-bits of the segment
3295 selector). */
3296
3297 /* Fill register REGNUM in REGCACHE with the appropriate
3298 floating-point or SSE register value from *FXSAVE. If REGNUM is
3299 -1, do this for all registers. This function masks off any of the
3300 reserved bits in *FXSAVE. */
3301
3302 void
3303 amd64_supply_fxsave (struct regcache *regcache, int regnum,
3304 const void *fxsave)
3305 {
3306 struct gdbarch *gdbarch = regcache->arch ();
3307 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3308
3309 i387_supply_fxsave (regcache, regnum, fxsave);
3310
3311 if (fxsave
3312 && gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3313 {
3314 const gdb_byte *regs = (const gdb_byte *) fxsave;
3315
3316 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3317 regcache_raw_supply (regcache, I387_FISEG_REGNUM (tdep), regs + 12);
3318 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3319 regcache_raw_supply (regcache, I387_FOSEG_REGNUM (tdep), regs + 20);
3320 }
3321 }
3322
3323 /* Similar to amd64_supply_fxsave, but use XSAVE extended state. */
3324
3325 void
3326 amd64_supply_xsave (struct regcache *regcache, int regnum,
3327 const void *xsave)
3328 {
3329 struct gdbarch *gdbarch = regcache->arch ();
3330 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3331
3332 i387_supply_xsave (regcache, regnum, xsave);
3333
3334 if (xsave
3335 && gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3336 {
3337 const gdb_byte *regs = (const gdb_byte *) xsave;
3338
3339 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3340 regcache_raw_supply (regcache, I387_FISEG_REGNUM (tdep),
3341 regs + 12);
3342 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3343 regcache_raw_supply (regcache, I387_FOSEG_REGNUM (tdep),
3344 regs + 20);
3345 }
3346 }
3347
3348 /* Fill register REGNUM (if it is a floating-point or SSE register) in
3349 *FXSAVE with the value from REGCACHE. If REGNUM is -1, do this for
3350 all registers. This function doesn't touch any of the reserved
3351 bits in *FXSAVE. */
3352
3353 void
3354 amd64_collect_fxsave (const struct regcache *regcache, int regnum,
3355 void *fxsave)
3356 {
3357 struct gdbarch *gdbarch = regcache->arch ();
3358 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3359 gdb_byte *regs = (gdb_byte *) fxsave;
3360
3361 i387_collect_fxsave (regcache, regnum, fxsave);
3362
3363 if (gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3364 {
3365 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3366 regcache_raw_collect (regcache, I387_FISEG_REGNUM (tdep), regs + 12);
3367 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3368 regcache_raw_collect (regcache, I387_FOSEG_REGNUM (tdep), regs + 20);
3369 }
3370 }
3371
3372 /* Similar to amd64_collect_fxsave, but use XSAVE extended state. */
3373
3374 void
3375 amd64_collect_xsave (const struct regcache *regcache, int regnum,
3376 void *xsave, int gcore)
3377 {
3378 struct gdbarch *gdbarch = regcache->arch ();
3379 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3380 gdb_byte *regs = (gdb_byte *) xsave;
3381
3382 i387_collect_xsave (regcache, regnum, xsave, gcore);
3383
3384 if (gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3385 {
3386 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3387 regcache_raw_collect (regcache, I387_FISEG_REGNUM (tdep),
3388 regs + 12);
3389 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3390 regcache_raw_collect (regcache, I387_FOSEG_REGNUM (tdep),
3391 regs + 20);
3392 }
3393 }