1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002-2020 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "linux-low.h"
27 #include "gdbsupport/x86-xstate.h"
28 #include "nat/gdb_ptrace.h"
31 #include "nat/amd64-linux-siginfo.h"
34 #include "gdb_proc_service.h"
35 /* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
38 #include "elf/common.h"
41 #include "gdbsupport/agent.h"
43 #include "tracepoint.h"
45 #include "nat/linux-nat.h"
46 #include "nat/x86-linux.h"
47 #include "nat/x86-linux-dregs.h"
48 #include "linux-x86-tdesc.h"
51 static struct target_desc
*tdesc_amd64_linux_no_xml
;
53 static struct target_desc
*tdesc_i386_linux_no_xml
;
56 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
57 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
59 /* Backward compatibility for gdb without XML support. */
61 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
62 <architecture>i386</architecture>\
63 <osabi>GNU/Linux</osabi>\
67 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
68 <architecture>i386:x86-64</architecture>\
69 <osabi>GNU/Linux</osabi>\
74 #include <sys/procfs.h>
77 #ifndef PTRACE_GET_THREAD_AREA
78 #define PTRACE_GET_THREAD_AREA 25
81 /* This definition comes from prctl.h, but some kernels may not have it. */
82 #ifndef PTRACE_ARCH_PRCTL
83 #define PTRACE_ARCH_PRCTL 30
86 /* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
89 #define ARCH_SET_GS 0x1001
90 #define ARCH_SET_FS 0x1002
91 #define ARCH_GET_FS 0x1003
92 #define ARCH_GET_GS 0x1004
95 /* Linux target op definitions for the x86 architecture.
96 This is initialized assuming an amd64 target.
97 'low_arch_setup' will correct it for i386 or amd64 targets. */
99 class x86_target
: public linux_process_target
103 const regs_info
*get_regs_info () override
;
105 const gdb_byte
*sw_breakpoint_from_kind (int kind
, int *size
) override
;
107 bool supports_z_point_type (char z_type
) override
;
109 void process_qsupported (char **features
, int count
) override
;
111 bool supports_tracepoints () override
;
113 bool supports_fast_tracepoints () override
;
115 int install_fast_tracepoint_jump_pad
116 (CORE_ADDR tpoint
, CORE_ADDR tpaddr
, CORE_ADDR collector
,
117 CORE_ADDR lockaddr
, ULONGEST orig_size
, CORE_ADDR
*jump_entry
,
118 CORE_ADDR
*trampoline
, ULONGEST
*trampoline_size
,
119 unsigned char *jjump_pad_insn
, ULONGEST
*jjump_pad_insn_size
,
120 CORE_ADDR
*adjusted_insn_addr
, CORE_ADDR
*adjusted_insn_addr_end
,
123 int get_min_fast_tracepoint_insn_len () override
;
125 struct emit_ops
*emit_ops () override
;
129 void low_arch_setup () override
;
131 bool low_cannot_fetch_register (int regno
) override
;
133 bool low_cannot_store_register (int regno
) override
;
135 bool low_supports_breakpoints () override
;
137 CORE_ADDR
low_get_pc (regcache
*regcache
) override
;
139 void low_set_pc (regcache
*regcache
, CORE_ADDR newpc
) override
;
141 int low_decr_pc_after_break () override
;
143 bool low_breakpoint_at (CORE_ADDR pc
) override
;
145 int low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
146 int size
, raw_breakpoint
*bp
) override
;
148 int low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
149 int size
, raw_breakpoint
*bp
) override
;
151 bool low_stopped_by_watchpoint () override
;
153 CORE_ADDR
low_stopped_data_address () override
;
155 /* collect_ptrace_register/supply_ptrace_register are not needed in the
156 native i386 case (no registers smaller than an xfer unit), and are not
157 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
159 /* Need to fix up i386 siginfo if host is amd64. */
160 bool low_siginfo_fixup (siginfo_t
*native
, gdb_byte
*inf
,
161 int direction
) override
;
163 arch_process_info
*low_new_process () override
;
165 void low_delete_process (arch_process_info
*info
) override
;
167 void low_new_thread (lwp_info
*) override
;
169 void low_delete_thread (arch_lwp_info
*) override
;
171 void low_new_fork (process_info
*parent
, process_info
*child
) override
;
173 void low_prepare_to_resume (lwp_info
*lwp
) override
;
175 int low_get_thread_area (int lwpid
, CORE_ADDR
*addrp
) override
;
177 bool low_supports_range_stepping () override
;
181 /* Update all the target description of all processes; a new GDB
182 connected, and it may or not support xml target descriptions. */
183 void update_xmltarget ();
186 /* The singleton target ops object. */
188 static x86_target the_x86_target
;
190 /* Per-process arch-specific data we want to keep. */
192 struct arch_process_info
194 struct x86_debug_reg_state debug_reg_state
;
199 /* Mapping between the general-purpose registers in `struct user'
200 format and GDB's register array layout.
201 Note that the transfer layout uses 64-bit regs. */
202 static /*const*/ int i386_regmap
[] =
204 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
205 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
206 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
207 DS
* 8, ES
* 8, FS
* 8, GS
* 8
210 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
212 /* So code below doesn't have to care, i386 or amd64. */
213 #define ORIG_EAX ORIG_RAX
216 static const int x86_64_regmap
[] =
218 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
219 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
220 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
221 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
222 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
223 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
224 -1, -1, -1, -1, -1, -1, -1, -1,
225 -1, -1, -1, -1, -1, -1, -1, -1,
226 -1, -1, -1, -1, -1, -1, -1, -1,
228 -1, -1, -1, -1, -1, -1, -1, -1,
230 #ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
235 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
236 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
237 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
238 -1, -1, -1, -1, -1, -1, -1, -1,
239 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
240 -1, -1, -1, -1, -1, -1, -1, -1,
241 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
242 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
243 -1, -1, -1, -1, -1, -1, -1, -1,
244 -1, -1, -1, -1, -1, -1, -1, -1,
245 -1, -1, -1, -1, -1, -1, -1, -1,
249 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
250 #define X86_64_USER_REGS (GS + 1)
252 #else /* ! __x86_64__ */
254 /* Mapping between the general-purpose registers in `struct user'
255 format and GDB's register array layout. */
256 static /*const*/ int i386_regmap
[] =
258 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
259 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
260 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
261 DS
* 4, ES
* 4, FS
* 4, GS
* 4
264 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
272 /* Returns true if the current inferior belongs to a x86-64 process,
276 is_64bit_tdesc (void)
278 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
280 return register_size (regcache
->tdesc
, 0) == 8;
286 /* Called by libthread_db. */
289 ps_get_thread_area (struct ps_prochandle
*ph
,
290 lwpid_t lwpid
, int idx
, void **base
)
293 int use_64bit
= is_64bit_tdesc ();
300 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
304 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
315 unsigned int desc
[4];
317 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
318 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
321 /* Ensure we properly extend the value to 64-bits for x86_64. */
322 *base
= (void *) (uintptr_t) desc
[1];
327 /* Get the thread area address. This is used to recognize which
328 thread is which when tracing with the in-process agent library. We
329 don't read anything from the address, and treat it as opaque; it's
330 the address itself that we assume is unique per-thread. */
333 x86_target::low_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
336 int use_64bit
= is_64bit_tdesc ();
341 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
343 *addr
= (CORE_ADDR
) (uintptr_t) base
;
352 struct lwp_info
*lwp
= find_lwp_pid (ptid_t (lwpid
));
353 struct thread_info
*thr
= get_lwp_thread (lwp
);
354 struct regcache
*regcache
= get_thread_regcache (thr
, 1);
355 unsigned int desc
[4];
357 const int reg_thread_area
= 3; /* bits to scale down register value. */
360 collect_register_by_name (regcache
, "gs", &gs
);
362 idx
= gs
>> reg_thread_area
;
364 if (ptrace (PTRACE_GET_THREAD_AREA
,
366 (void *) (long) idx
, (unsigned long) &desc
) < 0)
377 x86_target::low_cannot_store_register (int regno
)
380 if (is_64bit_tdesc ())
384 return regno
>= I386_NUM_REGS
;
388 x86_target::low_cannot_fetch_register (int regno
)
391 if (is_64bit_tdesc ())
395 return regno
>= I386_NUM_REGS
;
399 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
404 if (register_size (regcache
->tdesc
, 0) == 8)
406 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
407 if (x86_64_regmap
[i
] != -1)
408 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
410 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
413 int lwpid
= lwpid_of (current_thread
);
415 collect_register_by_name (regcache
, "fs_base", &base
);
416 ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_SET_FS
);
418 collect_register_by_name (regcache
, "gs_base", &base
);
419 ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_SET_GS
);
426 /* 32-bit inferior registers need to be zero-extended.
427 Callers would read uninitialized memory otherwise. */
428 memset (buf
, 0x00, X86_64_USER_REGS
* 8);
431 for (i
= 0; i
< I386_NUM_REGS
; i
++)
432 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
434 collect_register_by_name (regcache
, "orig_eax",
435 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
438 /* Sign extend EAX value to avoid potential syscall restart
441 See amd64_linux_collect_native_gregset() in gdb/amd64-linux-nat.c
442 for a detailed explanation. */
443 if (register_size (regcache
->tdesc
, 0) == 4)
445 void *ptr
= ((gdb_byte
*) buf
446 + i386_regmap
[find_regno (regcache
->tdesc
, "eax")]);
448 *(int64_t *) ptr
= *(int32_t *) ptr
;
454 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
459 if (register_size (regcache
->tdesc
, 0) == 8)
461 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
462 if (x86_64_regmap
[i
] != -1)
463 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
465 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
468 int lwpid
= lwpid_of (current_thread
);
470 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
471 supply_register_by_name (regcache
, "fs_base", &base
);
473 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_GS
) == 0)
474 supply_register_by_name (regcache
, "gs_base", &base
);
481 for (i
= 0; i
< I386_NUM_REGS
; i
++)
482 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
484 supply_register_by_name (regcache
, "orig_eax",
485 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
489 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
492 i387_cache_to_fxsave (regcache
, buf
);
494 i387_cache_to_fsave (regcache
, buf
);
499 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
502 i387_fxsave_to_cache (regcache
, buf
);
504 i387_fsave_to_cache (regcache
, buf
);
511 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
513 i387_cache_to_fxsave (regcache
, buf
);
517 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
519 i387_fxsave_to_cache (regcache
, buf
);
525 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
527 i387_cache_to_xsave (regcache
, buf
);
531 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
533 i387_xsave_to_cache (regcache
, buf
);
536 /* ??? The non-biarch i386 case stores all the i387 regs twice.
537 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
538 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
539 doesn't work. IWBN to avoid the duplication in the case where it
540 does work. Maybe the arch_setup routine could check whether it works
541 and update the supported regsets accordingly. */
543 static struct regset_info x86_regsets
[] =
545 #ifdef HAVE_PTRACE_GETREGS
546 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
548 x86_fill_gregset
, x86_store_gregset
},
549 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
550 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
552 # ifdef HAVE_PTRACE_GETFPXREGS
553 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
555 x86_fill_fpxregset
, x86_store_fpxregset
},
558 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
560 x86_fill_fpregset
, x86_store_fpregset
},
561 #endif /* HAVE_PTRACE_GETREGS */
566 x86_target::low_supports_breakpoints ()
572 x86_target::low_get_pc (regcache
*regcache
)
574 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
580 collect_register_by_name (regcache
, "rip", &pc
);
581 return (CORE_ADDR
) pc
;
587 collect_register_by_name (regcache
, "eip", &pc
);
588 return (CORE_ADDR
) pc
;
593 x86_target::low_set_pc (regcache
*regcache
, CORE_ADDR pc
)
595 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
601 supply_register_by_name (regcache
, "rip", &newpc
);
607 supply_register_by_name (regcache
, "eip", &newpc
);
612 x86_target::low_decr_pc_after_break ()
618 static const gdb_byte x86_breakpoint
[] = { 0xCC };
619 #define x86_breakpoint_len 1
622 x86_target::low_breakpoint_at (CORE_ADDR pc
)
626 read_memory (pc
, &c
, 1);
633 /* Low-level function vector. */
634 struct x86_dr_low_type x86_dr_low
=
636 x86_linux_dr_set_control
,
637 x86_linux_dr_set_addr
,
638 x86_linux_dr_get_addr
,
639 x86_linux_dr_get_status
,
640 x86_linux_dr_get_control
,
644 /* Breakpoint/Watchpoint support. */
647 x86_target::supports_z_point_type (char z_type
)
653 case Z_PACKET_WRITE_WP
:
654 case Z_PACKET_ACCESS_WP
:
662 x86_target::low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
663 int size
, raw_breakpoint
*bp
)
665 struct process_info
*proc
= current_process ();
669 case raw_bkpt_type_hw
:
670 case raw_bkpt_type_write_wp
:
671 case raw_bkpt_type_access_wp
:
673 enum target_hw_bp_type hw_type
674 = raw_bkpt_type_to_target_hw_bp_type (type
);
675 struct x86_debug_reg_state
*state
676 = &proc
->priv
->arch_private
->debug_reg_state
;
678 return x86_dr_insert_watchpoint (state
, hw_type
, addr
, size
);
688 x86_target::low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
689 int size
, raw_breakpoint
*bp
)
691 struct process_info
*proc
= current_process ();
695 case raw_bkpt_type_hw
:
696 case raw_bkpt_type_write_wp
:
697 case raw_bkpt_type_access_wp
:
699 enum target_hw_bp_type hw_type
700 = raw_bkpt_type_to_target_hw_bp_type (type
);
701 struct x86_debug_reg_state
*state
702 = &proc
->priv
->arch_private
->debug_reg_state
;
704 return x86_dr_remove_watchpoint (state
, hw_type
, addr
, size
);
713 x86_target::low_stopped_by_watchpoint ()
715 struct process_info
*proc
= current_process ();
716 return x86_dr_stopped_by_watchpoint (&proc
->priv
->arch_private
->debug_reg_state
);
720 x86_target::low_stopped_data_address ()
722 struct process_info
*proc
= current_process ();
724 if (x86_dr_stopped_data_address (&proc
->priv
->arch_private
->debug_reg_state
,
730 /* Called when a new process is created. */
733 x86_target::low_new_process ()
735 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
737 x86_low_init_dregs (&info
->debug_reg_state
);
742 /* Called when a process is being deleted. */
745 x86_target::low_delete_process (arch_process_info
*info
)
751 x86_target::low_new_thread (lwp_info
*lwp
)
753 /* This comes from nat/. */
754 x86_linux_new_thread (lwp
);
758 x86_target::low_delete_thread (arch_lwp_info
*alwp
)
760 /* This comes from nat/. */
761 x86_linux_delete_thread (alwp
);
764 /* Target routine for new_fork. */
767 x86_target::low_new_fork (process_info
*parent
, process_info
*child
)
769 /* These are allocated by linux_add_process. */
770 gdb_assert (parent
->priv
!= NULL
771 && parent
->priv
->arch_private
!= NULL
);
772 gdb_assert (child
->priv
!= NULL
773 && child
->priv
->arch_private
!= NULL
);
775 /* Linux kernel before 2.6.33 commit
776 72f674d203cd230426437cdcf7dd6f681dad8b0d
777 will inherit hardware debug registers from parent
778 on fork/vfork/clone. Newer Linux kernels create such tasks with
779 zeroed debug registers.
781 GDB core assumes the child inherits the watchpoints/hw
782 breakpoints of the parent, and will remove them all from the
783 forked off process. Copy the debug registers mirrors into the
784 new process so that all breakpoints and watchpoints can be
785 removed together. The debug registers mirror will become zeroed
786 in the end before detaching the forked off process, thus making
787 this compatible with older Linux kernels too. */
789 *child
->priv
->arch_private
= *parent
->priv
->arch_private
;
793 x86_target::low_prepare_to_resume (lwp_info
*lwp
)
795 /* This comes from nat/. */
796 x86_linux_prepare_to_resume (lwp
);
799 /* See nat/x86-dregs.h. */
801 struct x86_debug_reg_state
*
802 x86_debug_reg_state (pid_t pid
)
804 struct process_info
*proc
= find_process_pid (pid
);
806 return &proc
->priv
->arch_private
->debug_reg_state
;
809 /* When GDBSERVER is built as a 64-bit application on linux, the
810 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
811 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
812 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
813 conversion in-place ourselves. */
815 /* Convert a ptrace/host siginfo object, into/from the siginfo in the
816 layout of the inferiors' architecture. Returns true if any
817 conversion was done; false otherwise. If DIRECTION is 1, then copy
818 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
822 x86_target::low_siginfo_fixup (siginfo_t
*ptrace
, gdb_byte
*inf
, int direction
)
825 unsigned int machine
;
826 int tid
= lwpid_of (current_thread
);
827 int is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
829 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
830 if (!is_64bit_tdesc ())
831 return amd64_linux_siginfo_fixup_common (ptrace
, inf
, direction
,
833 /* No fixup for native x32 GDB. */
834 else if (!is_elf64
&& sizeof (void *) == 8)
835 return amd64_linux_siginfo_fixup_common (ptrace
, inf
, direction
,
844 /* Format of XSAVE extended state is:
848 sw_usable_bytes[464..511]
849 xstate_hdr_bytes[512..575]
854 Same memory layout will be used for the coredump NT_X86_XSTATE
855 representing the XSAVE extended state registers.
857 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
858 extended state mask, which is the same as the extended control register
859 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
860 together with the mask saved in the xstate_hdr_bytes to determine what
861 states the processor/OS supports and what state, used or initialized,
862 the process/thread is in. */
863 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
865 /* Does the current host support the GETFPXREGS request? The header
866 file may or may not define it, and even if it is defined, the
867 kernel will return EIO if it's running on a pre-SSE processor. */
868 int have_ptrace_getfpxregs
=
869 #ifdef HAVE_PTRACE_GETFPXREGS
876 /* Get Linux/x86 target description from running target. */
878 static const struct target_desc
*
879 x86_linux_read_description (void)
881 unsigned int machine
;
885 static uint64_t xcr0
;
886 struct regset_info
*regset
;
888 tid
= lwpid_of (current_thread
);
890 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
892 if (sizeof (void *) == 4)
895 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
897 else if (machine
== EM_X86_64
)
898 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
902 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
903 if (machine
== EM_386
&& have_ptrace_getfpxregs
== -1)
905 elf_fpxregset_t fpxregs
;
907 if (ptrace (PTRACE_GETFPXREGS
, tid
, 0, (long) &fpxregs
) < 0)
909 have_ptrace_getfpxregs
= 0;
910 have_ptrace_getregset
= 0;
911 return i386_linux_read_description (X86_XSTATE_X87
);
914 have_ptrace_getfpxregs
= 1;
920 x86_xcr0
= X86_XSTATE_SSE_MASK
;
924 if (machine
== EM_X86_64
)
925 return tdesc_amd64_linux_no_xml
;
928 return tdesc_i386_linux_no_xml
;
931 if (have_ptrace_getregset
== -1)
933 uint64_t xstateregs
[(X86_XSTATE_SSE_SIZE
/ sizeof (uint64_t))];
936 iov
.iov_base
= xstateregs
;
937 iov
.iov_len
= sizeof (xstateregs
);
939 /* Check if PTRACE_GETREGSET works. */
940 if (ptrace (PTRACE_GETREGSET
, tid
,
941 (unsigned int) NT_X86_XSTATE
, (long) &iov
) < 0)
942 have_ptrace_getregset
= 0;
945 have_ptrace_getregset
= 1;
947 /* Get XCR0 from XSAVE extended state. */
948 xcr0
= xstateregs
[(I386_LINUX_XSAVE_XCR0_OFFSET
949 / sizeof (uint64_t))];
951 /* Use PTRACE_GETREGSET if it is available. */
952 for (regset
= x86_regsets
;
953 regset
->fill_function
!= NULL
; regset
++)
954 if (regset
->get_request
== PTRACE_GETREGSET
)
955 regset
->size
= X86_XSTATE_SIZE (xcr0
);
956 else if (regset
->type
!= GENERAL_REGS
)
961 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
962 xcr0_features
= (have_ptrace_getregset
963 && (xcr0
& X86_XSTATE_ALL_MASK
));
968 if (machine
== EM_X86_64
)
971 const target_desc
*tdesc
= NULL
;
975 tdesc
= amd64_linux_read_description (xcr0
& X86_XSTATE_ALL_MASK
,
980 tdesc
= amd64_linux_read_description (X86_XSTATE_SSE_MASK
, !is_elf64
);
986 const target_desc
*tdesc
= NULL
;
989 tdesc
= i386_linux_read_description (xcr0
& X86_XSTATE_ALL_MASK
);
992 tdesc
= i386_linux_read_description (X86_XSTATE_SSE
);
997 gdb_assert_not_reached ("failed to return tdesc");
1000 /* Update all the target description of all processes; a new GDB
1001 connected, and it may or not support xml target descriptions. */
1004 x86_target::update_xmltarget ()
1006 struct thread_info
*saved_thread
= current_thread
;
1008 /* Before changing the register cache's internal layout, flush the
1009 contents of the current valid caches back to the threads, and
1010 release the current regcache objects. */
1011 regcache_release ();
1013 for_each_process ([this] (process_info
*proc
) {
1014 int pid
= proc
->pid
;
1016 /* Look up any thread of this process. */
1017 current_thread
= find_any_thread_of_pid (pid
);
1022 current_thread
= saved_thread
;
1025 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1026 PTRACE_GETREGSET. */
1029 x86_target::process_qsupported (char **features
, int count
)
1033 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1034 with "i386" in qSupported query, it supports x86 XML target
1037 for (i
= 0; i
< count
; i
++)
1039 const char *feature
= features
[i
];
1041 if (startswith (feature
, "xmlRegisters="))
1043 char *copy
= xstrdup (feature
+ 13);
1046 for (char *p
= strtok_r (copy
, ",", &saveptr
);
1048 p
= strtok_r (NULL
, ",", &saveptr
))
1050 if (strcmp (p
, "i386") == 0)
1060 update_xmltarget ();
1063 /* Common for x86/x86-64. */
1065 static struct regsets_info x86_regsets_info
=
1067 x86_regsets
, /* regsets */
1068 0, /* num_regsets */
1069 NULL
, /* disabled_regsets */
1073 static struct regs_info amd64_linux_regs_info
=
1075 NULL
, /* regset_bitmap */
1076 NULL
, /* usrregs_info */
1080 static struct usrregs_info i386_linux_usrregs_info
=
1086 static struct regs_info i386_linux_regs_info
=
1088 NULL
, /* regset_bitmap */
1089 &i386_linux_usrregs_info
,
1094 x86_target::get_regs_info ()
1097 if (is_64bit_tdesc ())
1098 return &amd64_linux_regs_info
;
1101 return &i386_linux_regs_info
;
1104 /* Initialize the target description for the architecture of the
1108 x86_target::low_arch_setup ()
1110 current_process ()->tdesc
= x86_linux_read_description ();
1113 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1114 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1117 x86_get_syscall_trapinfo (struct regcache
*regcache
, int *sysno
)
1119 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
1125 collect_register_by_name (regcache
, "orig_rax", &l_sysno
);
1126 *sysno
= (int) l_sysno
;
1129 collect_register_by_name (regcache
, "orig_eax", sysno
);
1133 x86_target::supports_tracepoints ()
1139 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1141 target_write_memory (*to
, buf
, len
);
1146 push_opcode (unsigned char *buf
, const char *op
)
1148 unsigned char *buf_org
= buf
;
1153 unsigned long ul
= strtoul (op
, &endptr
, 16);
1162 return buf
- buf_org
;
1167 /* Build a jump pad that saves registers and calls a collection
1168 function. Writes a jump instruction to the jump pad to
1169 JJUMPAD_INSN. The caller is responsible to write it in at the
1170 tracepoint address. */
1173 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1174 CORE_ADDR collector
,
1177 CORE_ADDR
*jump_entry
,
1178 CORE_ADDR
*trampoline
,
1179 ULONGEST
*trampoline_size
,
1180 unsigned char *jjump_pad_insn
,
1181 ULONGEST
*jjump_pad_insn_size
,
1182 CORE_ADDR
*adjusted_insn_addr
,
1183 CORE_ADDR
*adjusted_insn_addr_end
,
1186 unsigned char buf
[40];
1190 CORE_ADDR buildaddr
= *jump_entry
;
1192 /* Build the jump pad. */
1194 /* First, do tracepoint data collection. Save registers. */
1196 /* Need to ensure stack pointer saved first. */
1197 buf
[i
++] = 0x54; /* push %rsp */
1198 buf
[i
++] = 0x55; /* push %rbp */
1199 buf
[i
++] = 0x57; /* push %rdi */
1200 buf
[i
++] = 0x56; /* push %rsi */
1201 buf
[i
++] = 0x52; /* push %rdx */
1202 buf
[i
++] = 0x51; /* push %rcx */
1203 buf
[i
++] = 0x53; /* push %rbx */
1204 buf
[i
++] = 0x50; /* push %rax */
1205 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1206 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1207 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1208 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1209 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1210 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1211 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1212 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1213 buf
[i
++] = 0x9c; /* pushfq */
1214 buf
[i
++] = 0x48; /* movabs <addr>,%rdi */
1216 memcpy (buf
+ i
, &tpaddr
, 8);
1218 buf
[i
++] = 0x57; /* push %rdi */
1219 append_insns (&buildaddr
, i
, buf
);
1221 /* Stack space for the collecting_t object. */
1223 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1224 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1225 memcpy (buf
+ i
, &tpoint
, 8);
1227 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1228 i
+= push_opcode (&buf
[i
],
1229 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1230 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1231 append_insns (&buildaddr
, i
, buf
);
1235 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1236 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1238 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1239 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1240 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1241 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1242 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1243 append_insns (&buildaddr
, i
, buf
);
1245 /* Set up the gdb_collect call. */
1246 /* At this point, (stack pointer + 0x18) is the base of our saved
1250 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1251 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1253 /* tpoint address may be 64-bit wide. */
1254 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1255 memcpy (buf
+ i
, &tpoint
, 8);
1257 append_insns (&buildaddr
, i
, buf
);
1259 /* The collector function being in the shared library, may be
1260 >31-bits away off the jump pad. */
1262 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1263 memcpy (buf
+ i
, &collector
, 8);
1265 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1266 append_insns (&buildaddr
, i
, buf
);
1268 /* Clear the spin-lock. */
1270 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1271 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1272 memcpy (buf
+ i
, &lockaddr
, 8);
1274 append_insns (&buildaddr
, i
, buf
);
1276 /* Remove stack that had been used for the collect_t object. */
1278 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1279 append_insns (&buildaddr
, i
, buf
);
1281 /* Restore register state. */
1283 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1287 buf
[i
++] = 0x9d; /* popfq */
1288 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1289 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1290 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1291 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1292 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1293 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1294 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1295 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1296 buf
[i
++] = 0x58; /* pop %rax */
1297 buf
[i
++] = 0x5b; /* pop %rbx */
1298 buf
[i
++] = 0x59; /* pop %rcx */
1299 buf
[i
++] = 0x5a; /* pop %rdx */
1300 buf
[i
++] = 0x5e; /* pop %rsi */
1301 buf
[i
++] = 0x5f; /* pop %rdi */
1302 buf
[i
++] = 0x5d; /* pop %rbp */
1303 buf
[i
++] = 0x5c; /* pop %rsp */
1304 append_insns (&buildaddr
, i
, buf
);
1306 /* Now, adjust the original instruction to execute in the jump
1308 *adjusted_insn_addr
= buildaddr
;
1309 relocate_instruction (&buildaddr
, tpaddr
);
1310 *adjusted_insn_addr_end
= buildaddr
;
1312 /* Finally, write a jump back to the program. */
1314 loffset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1315 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1318 "E.Jump back from jump pad too far from tracepoint "
1319 "(offset 0x%" PRIx64
" > int32).", loffset
);
1323 offset
= (int) loffset
;
1324 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1325 memcpy (buf
+ 1, &offset
, 4);
1326 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1328 /* The jump pad is now built. Wire in a jump to our jump pad. This
1329 is always done last (by our caller actually), so that we can
1330 install fast tracepoints with threads running. This relies on
1331 the agent's atomic write support. */
1332 loffset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1333 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1336 "E.Jump pad too far from tracepoint "
1337 "(offset 0x%" PRIx64
" > int32).", loffset
);
1341 offset
= (int) loffset
;
1343 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1344 memcpy (buf
+ 1, &offset
, 4);
1345 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1346 *jjump_pad_insn_size
= sizeof (jump_insn
);
1348 /* Return the end address of our pad. */
1349 *jump_entry
= buildaddr
;
1354 #endif /* __x86_64__ */
1356 /* Build a jump pad that saves registers and calls a collection
1357 function. Writes a jump instruction to the jump pad to
1358 JJUMPAD_INSN. The caller is responsible to write it in at the
1359 tracepoint address. */
1362 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1363 CORE_ADDR collector
,
1366 CORE_ADDR
*jump_entry
,
1367 CORE_ADDR
*trampoline
,
1368 ULONGEST
*trampoline_size
,
1369 unsigned char *jjump_pad_insn
,
1370 ULONGEST
*jjump_pad_insn_size
,
1371 CORE_ADDR
*adjusted_insn_addr
,
1372 CORE_ADDR
*adjusted_insn_addr_end
,
1375 unsigned char buf
[0x100];
1377 CORE_ADDR buildaddr
= *jump_entry
;
1379 /* Build the jump pad. */
1381 /* First, do tracepoint data collection. Save registers. */
1383 buf
[i
++] = 0x60; /* pushad */
1384 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1385 *((int *)(buf
+ i
)) = (int) tpaddr
;
1387 buf
[i
++] = 0x9c; /* pushf */
1388 buf
[i
++] = 0x1e; /* push %ds */
1389 buf
[i
++] = 0x06; /* push %es */
1390 buf
[i
++] = 0x0f; /* push %fs */
1392 buf
[i
++] = 0x0f; /* push %gs */
1394 buf
[i
++] = 0x16; /* push %ss */
1395 buf
[i
++] = 0x0e; /* push %cs */
1396 append_insns (&buildaddr
, i
, buf
);
1398 /* Stack space for the collecting_t object. */
1400 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1402 /* Build the object. */
1403 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1404 memcpy (buf
+ i
, &tpoint
, 4);
1406 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1408 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1409 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1410 append_insns (&buildaddr
, i
, buf
);
1412 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1413 If we cared for it, this could be using xchg alternatively. */
1416 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1417 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1419 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1421 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1422 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1423 append_insns (&buildaddr
, i
, buf
);
1426 /* Set up arguments to the gdb_collect call. */
1428 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1429 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1430 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1431 append_insns (&buildaddr
, i
, buf
);
1434 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1435 append_insns (&buildaddr
, i
, buf
);
1438 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1439 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1441 append_insns (&buildaddr
, i
, buf
);
1443 buf
[0] = 0xe8; /* call <reladdr> */
1444 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1445 memcpy (buf
+ 1, &offset
, 4);
1446 append_insns (&buildaddr
, 5, buf
);
1447 /* Clean up after the call. */
1448 buf
[0] = 0x83; /* add $0x8,%esp */
1451 append_insns (&buildaddr
, 3, buf
);
1454 /* Clear the spin-lock. This would need the LOCK prefix on older
1457 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1458 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1459 memcpy (buf
+ i
, &lockaddr
, 4);
1461 append_insns (&buildaddr
, i
, buf
);
1464 /* Remove stack that had been used for the collect_t object. */
1466 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1467 append_insns (&buildaddr
, i
, buf
);
1470 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1473 buf
[i
++] = 0x17; /* pop %ss */
1474 buf
[i
++] = 0x0f; /* pop %gs */
1476 buf
[i
++] = 0x0f; /* pop %fs */
1478 buf
[i
++] = 0x07; /* pop %es */
1479 buf
[i
++] = 0x1f; /* pop %ds */
1480 buf
[i
++] = 0x9d; /* popf */
1481 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1484 buf
[i
++] = 0x61; /* popad */
1485 append_insns (&buildaddr
, i
, buf
);
1487 /* Now, adjust the original instruction to execute in the jump
1489 *adjusted_insn_addr
= buildaddr
;
1490 relocate_instruction (&buildaddr
, tpaddr
);
1491 *adjusted_insn_addr_end
= buildaddr
;
1493 /* Write the jump back to the program. */
1494 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1495 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1496 memcpy (buf
+ 1, &offset
, 4);
1497 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1499 /* The jump pad is now built. Wire in a jump to our jump pad. This
1500 is always done last (by our caller actually), so that we can
1501 install fast tracepoints with threads running. This relies on
1502 the agent's atomic write support. */
1505 /* Create a trampoline. */
1506 *trampoline_size
= sizeof (jump_insn
);
1507 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1509 /* No trampoline space available. */
1511 "E.Cannot allocate trampoline space needed for fast "
1512 "tracepoints on 4-byte instructions.");
1516 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1517 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1518 memcpy (buf
+ 1, &offset
, 4);
1519 target_write_memory (*trampoline
, buf
, sizeof (jump_insn
));
1521 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1522 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1523 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1524 memcpy (buf
+ 2, &offset
, 2);
1525 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1526 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1530 /* Else use a 32-bit relative jump instruction. */
1531 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1532 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1533 memcpy (buf
+ 1, &offset
, 4);
1534 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1535 *jjump_pad_insn_size
= sizeof (jump_insn
);
1538 /* Return the end address of our pad. */
1539 *jump_entry
= buildaddr
;
1545 x86_target::supports_fast_tracepoints ()
1551 x86_target::install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
,
1553 CORE_ADDR collector
,
1556 CORE_ADDR
*jump_entry
,
1557 CORE_ADDR
*trampoline
,
1558 ULONGEST
*trampoline_size
,
1559 unsigned char *jjump_pad_insn
,
1560 ULONGEST
*jjump_pad_insn_size
,
1561 CORE_ADDR
*adjusted_insn_addr
,
1562 CORE_ADDR
*adjusted_insn_addr_end
,
1566 if (is_64bit_tdesc ())
1567 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1568 collector
, lockaddr
,
1569 orig_size
, jump_entry
,
1570 trampoline
, trampoline_size
,
1572 jjump_pad_insn_size
,
1574 adjusted_insn_addr_end
,
1578 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1579 collector
, lockaddr
,
1580 orig_size
, jump_entry
,
1581 trampoline
, trampoline_size
,
1583 jjump_pad_insn_size
,
1585 adjusted_insn_addr_end
,
1589 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1593 x86_target::get_min_fast_tracepoint_insn_len ()
1595 static int warned_about_fast_tracepoints
= 0;
1598 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1599 used for fast tracepoints. */
1600 if (is_64bit_tdesc ())
1604 if (agent_loaded_p ())
1606 char errbuf
[IPA_BUFSIZ
];
1610 /* On x86, if trampolines are available, then 4-byte jump instructions
1611 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1612 with a 4-byte offset are used instead. */
1613 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
1617 /* GDB has no channel to explain to user why a shorter fast
1618 tracepoint is not possible, but at least make GDBserver
1619 mention that something has gone awry. */
1620 if (!warned_about_fast_tracepoints
)
1622 warning ("4-byte fast tracepoints not available; %s", errbuf
);
1623 warned_about_fast_tracepoints
= 1;
1630 /* Indicate that the minimum length is currently unknown since the IPA
1631 has not loaded yet. */
1637 add_insns (unsigned char *start
, int len
)
1639 CORE_ADDR buildaddr
= current_insn_ptr
;
1642 debug_printf ("Adding %d bytes of insn at %s\n",
1643 len
, paddress (buildaddr
));
1645 append_insns (&buildaddr
, len
, start
);
1646 current_insn_ptr
= buildaddr
;
1649 /* Our general strategy for emitting code is to avoid specifying raw
1650 bytes whenever possible, and instead copy a block of inline asm
1651 that is embedded in the function. This is a little messy, because
1652 we need to keep the compiler from discarding what looks like dead
1653 code, plus suppress various warnings. */
1655 #define EMIT_ASM(NAME, INSNS) \
1658 extern unsigned char start_ ## NAME, end_ ## NAME; \
1659 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1660 __asm__ ("jmp end_" #NAME "\n" \
1661 "\t" "start_" #NAME ":" \
1663 "\t" "end_" #NAME ":"); \
1668 #define EMIT_ASM32(NAME,INSNS) \
1671 extern unsigned char start_ ## NAME, end_ ## NAME; \
1672 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1673 __asm__ (".code32\n" \
1674 "\t" "jmp end_" #NAME "\n" \
1675 "\t" "start_" #NAME ":\n" \
1677 "\t" "end_" #NAME ":\n" \
1683 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1690 amd64_emit_prologue (void)
1692 EMIT_ASM (amd64_prologue
,
1694 "movq %rsp,%rbp\n\t"
1695 "sub $0x20,%rsp\n\t"
1696 "movq %rdi,-8(%rbp)\n\t"
1697 "movq %rsi,-16(%rbp)");
1702 amd64_emit_epilogue (void)
1704 EMIT_ASM (amd64_epilogue
,
1705 "movq -16(%rbp),%rdi\n\t"
1706 "movq %rax,(%rdi)\n\t"
1713 amd64_emit_add (void)
1715 EMIT_ASM (amd64_add
,
1716 "add (%rsp),%rax\n\t"
1717 "lea 0x8(%rsp),%rsp");
1721 amd64_emit_sub (void)
1723 EMIT_ASM (amd64_sub
,
1724 "sub %rax,(%rsp)\n\t"
1729 amd64_emit_mul (void)
1735 amd64_emit_lsh (void)
1741 amd64_emit_rsh_signed (void)
1747 amd64_emit_rsh_unsigned (void)
1753 amd64_emit_ext (int arg
)
1758 EMIT_ASM (amd64_ext_8
,
1764 EMIT_ASM (amd64_ext_16
,
1769 EMIT_ASM (amd64_ext_32
,
1778 amd64_emit_log_not (void)
1780 EMIT_ASM (amd64_log_not
,
1781 "test %rax,%rax\n\t"
1787 amd64_emit_bit_and (void)
1789 EMIT_ASM (amd64_and
,
1790 "and (%rsp),%rax\n\t"
1791 "lea 0x8(%rsp),%rsp");
1795 amd64_emit_bit_or (void)
1798 "or (%rsp),%rax\n\t"
1799 "lea 0x8(%rsp),%rsp");
1803 amd64_emit_bit_xor (void)
1805 EMIT_ASM (amd64_xor
,
1806 "xor (%rsp),%rax\n\t"
1807 "lea 0x8(%rsp),%rsp");
1811 amd64_emit_bit_not (void)
1813 EMIT_ASM (amd64_bit_not
,
1814 "xorq $0xffffffffffffffff,%rax");
1818 amd64_emit_equal (void)
1820 EMIT_ASM (amd64_equal
,
1821 "cmp %rax,(%rsp)\n\t"
1822 "je .Lamd64_equal_true\n\t"
1824 "jmp .Lamd64_equal_end\n\t"
1825 ".Lamd64_equal_true:\n\t"
1827 ".Lamd64_equal_end:\n\t"
1828 "lea 0x8(%rsp),%rsp");
1832 amd64_emit_less_signed (void)
1834 EMIT_ASM (amd64_less_signed
,
1835 "cmp %rax,(%rsp)\n\t"
1836 "jl .Lamd64_less_signed_true\n\t"
1838 "jmp .Lamd64_less_signed_end\n\t"
1839 ".Lamd64_less_signed_true:\n\t"
1841 ".Lamd64_less_signed_end:\n\t"
1842 "lea 0x8(%rsp),%rsp");
1846 amd64_emit_less_unsigned (void)
1848 EMIT_ASM (amd64_less_unsigned
,
1849 "cmp %rax,(%rsp)\n\t"
1850 "jb .Lamd64_less_unsigned_true\n\t"
1852 "jmp .Lamd64_less_unsigned_end\n\t"
1853 ".Lamd64_less_unsigned_true:\n\t"
1855 ".Lamd64_less_unsigned_end:\n\t"
1856 "lea 0x8(%rsp),%rsp");
1860 amd64_emit_ref (int size
)
1865 EMIT_ASM (amd64_ref1
,
1869 EMIT_ASM (amd64_ref2
,
1873 EMIT_ASM (amd64_ref4
,
1874 "movl (%rax),%eax");
1877 EMIT_ASM (amd64_ref8
,
1878 "movq (%rax),%rax");
1884 amd64_emit_if_goto (int *offset_p
, int *size_p
)
1886 EMIT_ASM (amd64_if_goto
,
1890 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1898 amd64_emit_goto (int *offset_p
, int *size_p
)
1900 EMIT_ASM (amd64_goto
,
1901 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1909 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
1911 int diff
= (to
- (from
+ size
));
1912 unsigned char buf
[sizeof (int)];
1920 memcpy (buf
, &diff
, sizeof (int));
1921 target_write_memory (from
, buf
, sizeof (int));
1925 amd64_emit_const (LONGEST num
)
1927 unsigned char buf
[16];
1929 CORE_ADDR buildaddr
= current_insn_ptr
;
1932 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
1933 memcpy (&buf
[i
], &num
, sizeof (num
));
1935 append_insns (&buildaddr
, i
, buf
);
1936 current_insn_ptr
= buildaddr
;
1940 amd64_emit_call (CORE_ADDR fn
)
1942 unsigned char buf
[16];
1944 CORE_ADDR buildaddr
;
1947 /* The destination function being in the shared library, may be
1948 >31-bits away off the compiled code pad. */
1950 buildaddr
= current_insn_ptr
;
1952 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
1956 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
1958 /* Offset is too large for a call. Use callq, but that requires
1959 a register, so avoid it if possible. Use r10, since it is
1960 call-clobbered, we don't have to push/pop it. */
1961 buf
[i
++] = 0x48; /* mov $fn,%r10 */
1963 memcpy (buf
+ i
, &fn
, 8);
1965 buf
[i
++] = 0xff; /* callq *%r10 */
1970 int offset32
= offset64
; /* we know we can't overflow here. */
1972 buf
[i
++] = 0xe8; /* call <reladdr> */
1973 memcpy (buf
+ i
, &offset32
, 4);
1977 append_insns (&buildaddr
, i
, buf
);
1978 current_insn_ptr
= buildaddr
;
1982 amd64_emit_reg (int reg
)
1984 unsigned char buf
[16];
1986 CORE_ADDR buildaddr
;
1988 /* Assume raw_regs is still in %rdi. */
1989 buildaddr
= current_insn_ptr
;
1991 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
1992 memcpy (&buf
[i
], ®
, sizeof (reg
));
1994 append_insns (&buildaddr
, i
, buf
);
1995 current_insn_ptr
= buildaddr
;
1996 amd64_emit_call (get_raw_reg_func_addr ());
2000 amd64_emit_pop (void)
2002 EMIT_ASM (amd64_pop
,
2007 amd64_emit_stack_flush (void)
2009 EMIT_ASM (amd64_stack_flush
,
2014 amd64_emit_zero_ext (int arg
)
2019 EMIT_ASM (amd64_zero_ext_8
,
2023 EMIT_ASM (amd64_zero_ext_16
,
2024 "and $0xffff,%rax");
2027 EMIT_ASM (amd64_zero_ext_32
,
2028 "mov $0xffffffff,%rcx\n\t"
2037 amd64_emit_swap (void)
2039 EMIT_ASM (amd64_swap
,
2046 amd64_emit_stack_adjust (int n
)
2048 unsigned char buf
[16];
2050 CORE_ADDR buildaddr
= current_insn_ptr
;
2053 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
2057 /* This only handles adjustments up to 16, but we don't expect any more. */
2059 append_insns (&buildaddr
, i
, buf
);
2060 current_insn_ptr
= buildaddr
;
2063 /* FN's prototype is `LONGEST(*fn)(int)'. */
2066 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2068 unsigned char buf
[16];
2070 CORE_ADDR buildaddr
;
2072 buildaddr
= current_insn_ptr
;
2074 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2075 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2077 append_insns (&buildaddr
, i
, buf
);
2078 current_insn_ptr
= buildaddr
;
2079 amd64_emit_call (fn
);
2082 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2085 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2087 unsigned char buf
[16];
2089 CORE_ADDR buildaddr
;
2091 buildaddr
= current_insn_ptr
;
2093 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2094 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2096 append_insns (&buildaddr
, i
, buf
);
2097 current_insn_ptr
= buildaddr
;
2098 EMIT_ASM (amd64_void_call_2_a
,
2099 /* Save away a copy of the stack top. */
2101 /* Also pass top as the second argument. */
2103 amd64_emit_call (fn
);
2104 EMIT_ASM (amd64_void_call_2_b
,
2105 /* Restore the stack top, %rax may have been trashed. */
2110 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
2113 "cmp %rax,(%rsp)\n\t"
2114 "jne .Lamd64_eq_fallthru\n\t"
2115 "lea 0x8(%rsp),%rsp\n\t"
2117 /* jmp, but don't trust the assembler to choose the right jump */
2118 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2119 ".Lamd64_eq_fallthru:\n\t"
2120 "lea 0x8(%rsp),%rsp\n\t"
2130 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
2133 "cmp %rax,(%rsp)\n\t"
2134 "je .Lamd64_ne_fallthru\n\t"
2135 "lea 0x8(%rsp),%rsp\n\t"
2137 /* jmp, but don't trust the assembler to choose the right jump */
2138 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2139 ".Lamd64_ne_fallthru:\n\t"
2140 "lea 0x8(%rsp),%rsp\n\t"
2150 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2153 "cmp %rax,(%rsp)\n\t"
2154 "jnl .Lamd64_lt_fallthru\n\t"
2155 "lea 0x8(%rsp),%rsp\n\t"
2157 /* jmp, but don't trust the assembler to choose the right jump */
2158 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2159 ".Lamd64_lt_fallthru:\n\t"
2160 "lea 0x8(%rsp),%rsp\n\t"
2170 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2173 "cmp %rax,(%rsp)\n\t"
2174 "jnle .Lamd64_le_fallthru\n\t"
2175 "lea 0x8(%rsp),%rsp\n\t"
2177 /* jmp, but don't trust the assembler to choose the right jump */
2178 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2179 ".Lamd64_le_fallthru:\n\t"
2180 "lea 0x8(%rsp),%rsp\n\t"
2190 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2193 "cmp %rax,(%rsp)\n\t"
2194 "jng .Lamd64_gt_fallthru\n\t"
2195 "lea 0x8(%rsp),%rsp\n\t"
2197 /* jmp, but don't trust the assembler to choose the right jump */
2198 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2199 ".Lamd64_gt_fallthru:\n\t"
2200 "lea 0x8(%rsp),%rsp\n\t"
2210 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2213 "cmp %rax,(%rsp)\n\t"
2214 "jnge .Lamd64_ge_fallthru\n\t"
2215 ".Lamd64_ge_jump:\n\t"
2216 "lea 0x8(%rsp),%rsp\n\t"
2218 /* jmp, but don't trust the assembler to choose the right jump */
2219 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2220 ".Lamd64_ge_fallthru:\n\t"
2221 "lea 0x8(%rsp),%rsp\n\t"
2230 struct emit_ops amd64_emit_ops
=
2232 amd64_emit_prologue
,
2233 amd64_emit_epilogue
,
2238 amd64_emit_rsh_signed
,
2239 amd64_emit_rsh_unsigned
,
2247 amd64_emit_less_signed
,
2248 amd64_emit_less_unsigned
,
2252 amd64_write_goto_address
,
2257 amd64_emit_stack_flush
,
2258 amd64_emit_zero_ext
,
2260 amd64_emit_stack_adjust
,
2261 amd64_emit_int_call_1
,
2262 amd64_emit_void_call_2
,
2271 #endif /* __x86_64__ */
2274 i386_emit_prologue (void)
2276 EMIT_ASM32 (i386_prologue
,
2280 /* At this point, the raw regs base address is at 8(%ebp), and the
2281 value pointer is at 12(%ebp). */
2285 i386_emit_epilogue (void)
2287 EMIT_ASM32 (i386_epilogue
,
2288 "mov 12(%ebp),%ecx\n\t"
2289 "mov %eax,(%ecx)\n\t"
2290 "mov %ebx,0x4(%ecx)\n\t"
2298 i386_emit_add (void)
2300 EMIT_ASM32 (i386_add
,
2301 "add (%esp),%eax\n\t"
2302 "adc 0x4(%esp),%ebx\n\t"
2303 "lea 0x8(%esp),%esp");
2307 i386_emit_sub (void)
2309 EMIT_ASM32 (i386_sub
,
2310 "subl %eax,(%esp)\n\t"
2311 "sbbl %ebx,4(%esp)\n\t"
2317 i386_emit_mul (void)
2323 i386_emit_lsh (void)
2329 i386_emit_rsh_signed (void)
2335 i386_emit_rsh_unsigned (void)
2341 i386_emit_ext (int arg
)
2346 EMIT_ASM32 (i386_ext_8
,
2349 "movl %eax,%ebx\n\t"
2353 EMIT_ASM32 (i386_ext_16
,
2355 "movl %eax,%ebx\n\t"
2359 EMIT_ASM32 (i386_ext_32
,
2360 "movl %eax,%ebx\n\t"
2369 i386_emit_log_not (void)
2371 EMIT_ASM32 (i386_log_not
,
2373 "test %eax,%eax\n\t"
2380 i386_emit_bit_and (void)
2382 EMIT_ASM32 (i386_and
,
2383 "and (%esp),%eax\n\t"
2384 "and 0x4(%esp),%ebx\n\t"
2385 "lea 0x8(%esp),%esp");
2389 i386_emit_bit_or (void)
2391 EMIT_ASM32 (i386_or
,
2392 "or (%esp),%eax\n\t"
2393 "or 0x4(%esp),%ebx\n\t"
2394 "lea 0x8(%esp),%esp");
2398 i386_emit_bit_xor (void)
2400 EMIT_ASM32 (i386_xor
,
2401 "xor (%esp),%eax\n\t"
2402 "xor 0x4(%esp),%ebx\n\t"
2403 "lea 0x8(%esp),%esp");
2407 i386_emit_bit_not (void)
2409 EMIT_ASM32 (i386_bit_not
,
2410 "xor $0xffffffff,%eax\n\t"
2411 "xor $0xffffffff,%ebx\n\t");
2415 i386_emit_equal (void)
2417 EMIT_ASM32 (i386_equal
,
2418 "cmpl %ebx,4(%esp)\n\t"
2419 "jne .Li386_equal_false\n\t"
2420 "cmpl %eax,(%esp)\n\t"
2421 "je .Li386_equal_true\n\t"
2422 ".Li386_equal_false:\n\t"
2424 "jmp .Li386_equal_end\n\t"
2425 ".Li386_equal_true:\n\t"
2427 ".Li386_equal_end:\n\t"
2429 "lea 0x8(%esp),%esp");
2433 i386_emit_less_signed (void)
2435 EMIT_ASM32 (i386_less_signed
,
2436 "cmpl %ebx,4(%esp)\n\t"
2437 "jl .Li386_less_signed_true\n\t"
2438 "jne .Li386_less_signed_false\n\t"
2439 "cmpl %eax,(%esp)\n\t"
2440 "jl .Li386_less_signed_true\n\t"
2441 ".Li386_less_signed_false:\n\t"
2443 "jmp .Li386_less_signed_end\n\t"
2444 ".Li386_less_signed_true:\n\t"
2446 ".Li386_less_signed_end:\n\t"
2448 "lea 0x8(%esp),%esp");
2452 i386_emit_less_unsigned (void)
2454 EMIT_ASM32 (i386_less_unsigned
,
2455 "cmpl %ebx,4(%esp)\n\t"
2456 "jb .Li386_less_unsigned_true\n\t"
2457 "jne .Li386_less_unsigned_false\n\t"
2458 "cmpl %eax,(%esp)\n\t"
2459 "jb .Li386_less_unsigned_true\n\t"
2460 ".Li386_less_unsigned_false:\n\t"
2462 "jmp .Li386_less_unsigned_end\n\t"
2463 ".Li386_less_unsigned_true:\n\t"
2465 ".Li386_less_unsigned_end:\n\t"
2467 "lea 0x8(%esp),%esp");
2471 i386_emit_ref (int size
)
2476 EMIT_ASM32 (i386_ref1
,
2480 EMIT_ASM32 (i386_ref2
,
2484 EMIT_ASM32 (i386_ref4
,
2485 "movl (%eax),%eax");
2488 EMIT_ASM32 (i386_ref8
,
2489 "movl 4(%eax),%ebx\n\t"
2490 "movl (%eax),%eax");
2496 i386_emit_if_goto (int *offset_p
, int *size_p
)
2498 EMIT_ASM32 (i386_if_goto
,
2504 /* Don't trust the assembler to choose the right jump */
2505 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2508 *offset_p
= 11; /* be sure that this matches the sequence above */
2514 i386_emit_goto (int *offset_p
, int *size_p
)
2516 EMIT_ASM32 (i386_goto
,
2517 /* Don't trust the assembler to choose the right jump */
2518 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2526 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2528 int diff
= (to
- (from
+ size
));
2529 unsigned char buf
[sizeof (int)];
2531 /* We're only doing 4-byte sizes at the moment. */
2538 memcpy (buf
, &diff
, sizeof (int));
2539 target_write_memory (from
, buf
, sizeof (int));
2543 i386_emit_const (LONGEST num
)
2545 unsigned char buf
[16];
2547 CORE_ADDR buildaddr
= current_insn_ptr
;
2550 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2551 lo
= num
& 0xffffffff;
2552 memcpy (&buf
[i
], &lo
, sizeof (lo
));
2554 hi
= ((num
>> 32) & 0xffffffff);
2557 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
2558 memcpy (&buf
[i
], &hi
, sizeof (hi
));
2563 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
2565 append_insns (&buildaddr
, i
, buf
);
2566 current_insn_ptr
= buildaddr
;
2570 i386_emit_call (CORE_ADDR fn
)
2572 unsigned char buf
[16];
2574 CORE_ADDR buildaddr
;
2576 buildaddr
= current_insn_ptr
;
2578 buf
[i
++] = 0xe8; /* call <reladdr> */
2579 offset
= ((int) fn
) - (buildaddr
+ 5);
2580 memcpy (buf
+ 1, &offset
, 4);
2581 append_insns (&buildaddr
, 5, buf
);
2582 current_insn_ptr
= buildaddr
;
2586 i386_emit_reg (int reg
)
2588 unsigned char buf
[16];
2590 CORE_ADDR buildaddr
;
2592 EMIT_ASM32 (i386_reg_a
,
2594 buildaddr
= current_insn_ptr
;
2596 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2597 memcpy (&buf
[i
], ®
, sizeof (reg
));
2599 append_insns (&buildaddr
, i
, buf
);
2600 current_insn_ptr
= buildaddr
;
2601 EMIT_ASM32 (i386_reg_b
,
2602 "mov %eax,4(%esp)\n\t"
2603 "mov 8(%ebp),%eax\n\t"
2605 i386_emit_call (get_raw_reg_func_addr ());
2606 EMIT_ASM32 (i386_reg_c
,
2608 "lea 0x8(%esp),%esp");
2612 i386_emit_pop (void)
2614 EMIT_ASM32 (i386_pop
,
2620 i386_emit_stack_flush (void)
2622 EMIT_ASM32 (i386_stack_flush
,
2628 i386_emit_zero_ext (int arg
)
2633 EMIT_ASM32 (i386_zero_ext_8
,
2634 "and $0xff,%eax\n\t"
2638 EMIT_ASM32 (i386_zero_ext_16
,
2639 "and $0xffff,%eax\n\t"
2643 EMIT_ASM32 (i386_zero_ext_32
,
2652 i386_emit_swap (void)
2654 EMIT_ASM32 (i386_swap
,
2664 i386_emit_stack_adjust (int n
)
2666 unsigned char buf
[16];
2668 CORE_ADDR buildaddr
= current_insn_ptr
;
2671 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
2675 append_insns (&buildaddr
, i
, buf
);
2676 current_insn_ptr
= buildaddr
;
2679 /* FN's prototype is `LONGEST(*fn)(int)'. */
2682 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2684 unsigned char buf
[16];
2686 CORE_ADDR buildaddr
;
2688 EMIT_ASM32 (i386_int_call_1_a
,
2689 /* Reserve a bit of stack space. */
2691 /* Put the one argument on the stack. */
2692 buildaddr
= current_insn_ptr
;
2694 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2697 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2699 append_insns (&buildaddr
, i
, buf
);
2700 current_insn_ptr
= buildaddr
;
2701 i386_emit_call (fn
);
2702 EMIT_ASM32 (i386_int_call_1_c
,
2704 "lea 0x8(%esp),%esp");
2707 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2710 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2712 unsigned char buf
[16];
2714 CORE_ADDR buildaddr
;
2716 EMIT_ASM32 (i386_void_call_2_a
,
2717 /* Preserve %eax only; we don't have to worry about %ebx. */
2719 /* Reserve a bit of stack space for arguments. */
2720 "sub $0x10,%esp\n\t"
2721 /* Copy "top" to the second argument position. (Note that
2722 we can't assume function won't scribble on its
2723 arguments, so don't try to restore from this.) */
2724 "mov %eax,4(%esp)\n\t"
2725 "mov %ebx,8(%esp)");
2726 /* Put the first argument on the stack. */
2727 buildaddr
= current_insn_ptr
;
2729 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2732 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2734 append_insns (&buildaddr
, i
, buf
);
2735 current_insn_ptr
= buildaddr
;
2736 i386_emit_call (fn
);
2737 EMIT_ASM32 (i386_void_call_2_b
,
2738 "lea 0x10(%esp),%esp\n\t"
2739 /* Restore original stack top. */
2745 i386_emit_eq_goto (int *offset_p
, int *size_p
)
2748 /* Check low half first, more likely to be decider */
2749 "cmpl %eax,(%esp)\n\t"
2750 "jne .Leq_fallthru\n\t"
2751 "cmpl %ebx,4(%esp)\n\t"
2752 "jne .Leq_fallthru\n\t"
2753 "lea 0x8(%esp),%esp\n\t"
2756 /* jmp, but don't trust the assembler to choose the right jump */
2757 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2758 ".Leq_fallthru:\n\t"
2759 "lea 0x8(%esp),%esp\n\t"
2770 i386_emit_ne_goto (int *offset_p
, int *size_p
)
2773 /* Check low half first, more likely to be decider */
2774 "cmpl %eax,(%esp)\n\t"
2776 "cmpl %ebx,4(%esp)\n\t"
2777 "je .Lne_fallthru\n\t"
2779 "lea 0x8(%esp),%esp\n\t"
2782 /* jmp, but don't trust the assembler to choose the right jump */
2783 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2784 ".Lne_fallthru:\n\t"
2785 "lea 0x8(%esp),%esp\n\t"
2796 i386_emit_lt_goto (int *offset_p
, int *size_p
)
2799 "cmpl %ebx,4(%esp)\n\t"
2801 "jne .Llt_fallthru\n\t"
2802 "cmpl %eax,(%esp)\n\t"
2803 "jnl .Llt_fallthru\n\t"
2805 "lea 0x8(%esp),%esp\n\t"
2808 /* jmp, but don't trust the assembler to choose the right jump */
2809 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2810 ".Llt_fallthru:\n\t"
2811 "lea 0x8(%esp),%esp\n\t"
2822 i386_emit_le_goto (int *offset_p
, int *size_p
)
2825 "cmpl %ebx,4(%esp)\n\t"
2827 "jne .Lle_fallthru\n\t"
2828 "cmpl %eax,(%esp)\n\t"
2829 "jnle .Lle_fallthru\n\t"
2831 "lea 0x8(%esp),%esp\n\t"
2834 /* jmp, but don't trust the assembler to choose the right jump */
2835 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2836 ".Lle_fallthru:\n\t"
2837 "lea 0x8(%esp),%esp\n\t"
2848 i386_emit_gt_goto (int *offset_p
, int *size_p
)
2851 "cmpl %ebx,4(%esp)\n\t"
2853 "jne .Lgt_fallthru\n\t"
2854 "cmpl %eax,(%esp)\n\t"
2855 "jng .Lgt_fallthru\n\t"
2857 "lea 0x8(%esp),%esp\n\t"
2860 /* jmp, but don't trust the assembler to choose the right jump */
2861 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2862 ".Lgt_fallthru:\n\t"
2863 "lea 0x8(%esp),%esp\n\t"
2874 i386_emit_ge_goto (int *offset_p
, int *size_p
)
2877 "cmpl %ebx,4(%esp)\n\t"
2879 "jne .Lge_fallthru\n\t"
2880 "cmpl %eax,(%esp)\n\t"
2881 "jnge .Lge_fallthru\n\t"
2883 "lea 0x8(%esp),%esp\n\t"
2886 /* jmp, but don't trust the assembler to choose the right jump */
2887 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2888 ".Lge_fallthru:\n\t"
2889 "lea 0x8(%esp),%esp\n\t"
2899 struct emit_ops i386_emit_ops
=
2907 i386_emit_rsh_signed
,
2908 i386_emit_rsh_unsigned
,
2916 i386_emit_less_signed
,
2917 i386_emit_less_unsigned
,
2921 i386_write_goto_address
,
2926 i386_emit_stack_flush
,
2929 i386_emit_stack_adjust
,
2930 i386_emit_int_call_1
,
2931 i386_emit_void_call_2
,
2942 x86_target::emit_ops ()
2945 if (is_64bit_tdesc ())
2946 return &amd64_emit_ops
;
2949 return &i386_emit_ops
;
2952 /* Implementation of target ops method "sw_breakpoint_from_kind". */
2955 x86_target::sw_breakpoint_from_kind (int kind
, int *size
)
2957 *size
= x86_breakpoint_len
;
2958 return x86_breakpoint
;
2962 x86_target::low_supports_range_stepping ()
2967 /* Implementation of linux_target_ops method "supports_hardware_single_step".
2971 x86_supports_hardware_single_step (void)
2977 x86_get_ipa_tdesc_idx (void)
2979 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
2980 const struct target_desc
*tdesc
= regcache
->tdesc
;
2983 return amd64_get_ipa_tdesc_idx (tdesc
);
2986 if (tdesc
== tdesc_i386_linux_no_xml
)
2987 return X86_TDESC_SSE
;
2989 return i386_get_ipa_tdesc_idx (tdesc
);
2992 /* This is initialized assuming an amd64 target.
2993 x86_arch_setup will correct it for i386 or amd64 targets. */
2995 struct linux_target_ops the_low_target
=
2997 x86_supports_hardware_single_step
,
2998 x86_get_syscall_trapinfo
,
2999 x86_get_ipa_tdesc_idx
,
3002 /* The linux target ops object. */
3004 linux_process_target
*the_linux_target
= &the_x86_target
;
3007 initialize_low_arch (void)
3009 /* Initialize the Linux target descriptions. */
3011 tdesc_amd64_linux_no_xml
= allocate_target_description ();
3012 copy_target_description (tdesc_amd64_linux_no_xml
,
3013 amd64_linux_read_description (X86_XSTATE_SSE_MASK
,
3015 tdesc_amd64_linux_no_xml
->xmltarget
= xmltarget_amd64_linux_no_xml
;
3018 tdesc_i386_linux_no_xml
= allocate_target_description ();
3019 copy_target_description (tdesc_i386_linux_no_xml
,
3020 i386_linux_read_description (X86_XSTATE_SSE_MASK
));
3021 tdesc_i386_linux_no_xml
->xmltarget
= xmltarget_i386_linux_no_xml
;
3023 initialize_regsets_info (&x86_regsets_info
);