1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002-2020 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "linux-low.h"
27 #include "gdbsupport/x86-xstate.h"
28 #include "nat/gdb_ptrace.h"
31 #include "nat/amd64-linux-siginfo.h"
34 #include "gdb_proc_service.h"
35 /* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
38 #include "elf/common.h"
41 #include "gdbsupport/agent.h"
43 #include "tracepoint.h"
45 #include "nat/linux-nat.h"
46 #include "nat/x86-linux.h"
47 #include "nat/x86-linux-dregs.h"
48 #include "linux-x86-tdesc.h"
51 static struct target_desc
*tdesc_amd64_linux_no_xml
;
53 static struct target_desc
*tdesc_i386_linux_no_xml
;
56 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
57 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
59 /* Backward compatibility for gdb without XML support. */
61 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
62 <architecture>i386</architecture>\
63 <osabi>GNU/Linux</osabi>\
67 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
68 <architecture>i386:x86-64</architecture>\
69 <osabi>GNU/Linux</osabi>\
74 #include <sys/procfs.h>
77 #ifndef PTRACE_GET_THREAD_AREA
78 #define PTRACE_GET_THREAD_AREA 25
81 /* This definition comes from prctl.h, but some kernels may not have it. */
82 #ifndef PTRACE_ARCH_PRCTL
83 #define PTRACE_ARCH_PRCTL 30
86 /* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
89 #define ARCH_SET_GS 0x1001
90 #define ARCH_SET_FS 0x1002
91 #define ARCH_GET_FS 0x1003
92 #define ARCH_GET_GS 0x1004
95 /* Linux target op definitions for the x86 architecture.
96 This is initialized assuming an amd64 target.
97 'low_arch_setup' will correct it for i386 or amd64 targets. */
99 class x86_target
: public linux_process_target
103 const regs_info
*get_regs_info () override
;
105 const gdb_byte
*sw_breakpoint_from_kind (int kind
, int *size
) override
;
107 bool supports_z_point_type (char z_type
) override
;
109 void process_qsupported (char **features
, int count
) override
;
111 bool supports_tracepoints () override
;
115 void low_arch_setup () override
;
117 bool low_cannot_fetch_register (int regno
) override
;
119 bool low_cannot_store_register (int regno
) override
;
121 bool low_supports_breakpoints () override
;
123 CORE_ADDR
low_get_pc (regcache
*regcache
) override
;
125 void low_set_pc (regcache
*regcache
, CORE_ADDR newpc
) override
;
127 int low_decr_pc_after_break () override
;
129 bool low_breakpoint_at (CORE_ADDR pc
) override
;
131 int low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
132 int size
, raw_breakpoint
*bp
) override
;
134 int low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
135 int size
, raw_breakpoint
*bp
) override
;
137 bool low_stopped_by_watchpoint () override
;
139 CORE_ADDR
low_stopped_data_address () override
;
141 /* collect_ptrace_register/supply_ptrace_register are not needed in the
142 native i386 case (no registers smaller than an xfer unit), and are not
143 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
145 /* Need to fix up i386 siginfo if host is amd64. */
146 bool low_siginfo_fixup (siginfo_t
*native
, gdb_byte
*inf
,
147 int direction
) override
;
149 arch_process_info
*low_new_process () override
;
151 void low_delete_process (arch_process_info
*info
) override
;
153 void low_new_thread (lwp_info
*) override
;
155 void low_delete_thread (arch_lwp_info
*) override
;
157 void low_new_fork (process_info
*parent
, process_info
*child
) override
;
159 void low_prepare_to_resume (lwp_info
*lwp
) override
;
163 /* Update all the target description of all processes; a new GDB
164 connected, and it may or not support xml target descriptions. */
165 void update_xmltarget ();
168 /* The singleton target ops object. */
170 static x86_target the_x86_target
;
172 /* Per-process arch-specific data we want to keep. */
174 struct arch_process_info
176 struct x86_debug_reg_state debug_reg_state
;
181 /* Mapping between the general-purpose registers in `struct user'
182 format and GDB's register array layout.
183 Note that the transfer layout uses 64-bit regs. */
184 static /*const*/ int i386_regmap
[] =
186 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
187 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
188 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
189 DS
* 8, ES
* 8, FS
* 8, GS
* 8
192 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
194 /* So code below doesn't have to care, i386 or amd64. */
195 #define ORIG_EAX ORIG_RAX
198 static const int x86_64_regmap
[] =
200 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
201 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
202 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
203 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
204 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
205 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
206 -1, -1, -1, -1, -1, -1, -1, -1,
207 -1, -1, -1, -1, -1, -1, -1, -1,
208 -1, -1, -1, -1, -1, -1, -1, -1,
210 -1, -1, -1, -1, -1, -1, -1, -1,
212 #ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
217 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
218 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
219 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
220 -1, -1, -1, -1, -1, -1, -1, -1,
221 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
222 -1, -1, -1, -1, -1, -1, -1, -1,
223 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
224 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
225 -1, -1, -1, -1, -1, -1, -1, -1,
226 -1, -1, -1, -1, -1, -1, -1, -1,
227 -1, -1, -1, -1, -1, -1, -1, -1,
231 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
232 #define X86_64_USER_REGS (GS + 1)
234 #else /* ! __x86_64__ */
236 /* Mapping between the general-purpose registers in `struct user'
237 format and GDB's register array layout. */
238 static /*const*/ int i386_regmap
[] =
240 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
241 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
242 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
243 DS
* 4, ES
* 4, FS
* 4, GS
* 4
246 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
254 /* Returns true if the current inferior belongs to a x86-64 process,
258 is_64bit_tdesc (void)
260 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
262 return register_size (regcache
->tdesc
, 0) == 8;
268 /* Called by libthread_db. */
271 ps_get_thread_area (struct ps_prochandle
*ph
,
272 lwpid_t lwpid
, int idx
, void **base
)
275 int use_64bit
= is_64bit_tdesc ();
282 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
286 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
297 unsigned int desc
[4];
299 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
300 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
303 /* Ensure we properly extend the value to 64-bits for x86_64. */
304 *base
= (void *) (uintptr_t) desc
[1];
309 /* Get the thread area address. This is used to recognize which
310 thread is which when tracing with the in-process agent library. We
311 don't read anything from the address, and treat it as opaque; it's
312 the address itself that we assume is unique per-thread. */
315 x86_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
318 int use_64bit
= is_64bit_tdesc ();
323 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
325 *addr
= (CORE_ADDR
) (uintptr_t) base
;
334 struct lwp_info
*lwp
= find_lwp_pid (ptid_t (lwpid
));
335 struct thread_info
*thr
= get_lwp_thread (lwp
);
336 struct regcache
*regcache
= get_thread_regcache (thr
, 1);
337 unsigned int desc
[4];
339 const int reg_thread_area
= 3; /* bits to scale down register value. */
342 collect_register_by_name (regcache
, "gs", &gs
);
344 idx
= gs
>> reg_thread_area
;
346 if (ptrace (PTRACE_GET_THREAD_AREA
,
348 (void *) (long) idx
, (unsigned long) &desc
) < 0)
359 x86_target::low_cannot_store_register (int regno
)
362 if (is_64bit_tdesc ())
366 return regno
>= I386_NUM_REGS
;
370 x86_target::low_cannot_fetch_register (int regno
)
373 if (is_64bit_tdesc ())
377 return regno
>= I386_NUM_REGS
;
381 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
386 if (register_size (regcache
->tdesc
, 0) == 8)
388 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
389 if (x86_64_regmap
[i
] != -1)
390 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
392 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
395 int lwpid
= lwpid_of (current_thread
);
397 collect_register_by_name (regcache
, "fs_base", &base
);
398 ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_SET_FS
);
400 collect_register_by_name (regcache
, "gs_base", &base
);
401 ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_SET_GS
);
408 /* 32-bit inferior registers need to be zero-extended.
409 Callers would read uninitialized memory otherwise. */
410 memset (buf
, 0x00, X86_64_USER_REGS
* 8);
413 for (i
= 0; i
< I386_NUM_REGS
; i
++)
414 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
416 collect_register_by_name (regcache
, "orig_eax",
417 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
420 /* Sign extend EAX value to avoid potential syscall restart
423 See amd64_linux_collect_native_gregset() in gdb/amd64-linux-nat.c
424 for a detailed explanation. */
425 if (register_size (regcache
->tdesc
, 0) == 4)
427 void *ptr
= ((gdb_byte
*) buf
428 + i386_regmap
[find_regno (regcache
->tdesc
, "eax")]);
430 *(int64_t *) ptr
= *(int32_t *) ptr
;
436 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
441 if (register_size (regcache
->tdesc
, 0) == 8)
443 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
444 if (x86_64_regmap
[i
] != -1)
445 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
447 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
450 int lwpid
= lwpid_of (current_thread
);
452 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
453 supply_register_by_name (regcache
, "fs_base", &base
);
455 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_GS
) == 0)
456 supply_register_by_name (regcache
, "gs_base", &base
);
463 for (i
= 0; i
< I386_NUM_REGS
; i
++)
464 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
466 supply_register_by_name (regcache
, "orig_eax",
467 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
471 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
474 i387_cache_to_fxsave (regcache
, buf
);
476 i387_cache_to_fsave (regcache
, buf
);
481 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
484 i387_fxsave_to_cache (regcache
, buf
);
486 i387_fsave_to_cache (regcache
, buf
);
493 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
495 i387_cache_to_fxsave (regcache
, buf
);
499 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
501 i387_fxsave_to_cache (regcache
, buf
);
507 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
509 i387_cache_to_xsave (regcache
, buf
);
513 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
515 i387_xsave_to_cache (regcache
, buf
);
518 /* ??? The non-biarch i386 case stores all the i387 regs twice.
519 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
520 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
521 doesn't work. IWBN to avoid the duplication in the case where it
522 does work. Maybe the arch_setup routine could check whether it works
523 and update the supported regsets accordingly. */
525 static struct regset_info x86_regsets
[] =
527 #ifdef HAVE_PTRACE_GETREGS
528 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
530 x86_fill_gregset
, x86_store_gregset
},
531 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
532 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
534 # ifdef HAVE_PTRACE_GETFPXREGS
535 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
537 x86_fill_fpxregset
, x86_store_fpxregset
},
540 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
542 x86_fill_fpregset
, x86_store_fpregset
},
543 #endif /* HAVE_PTRACE_GETREGS */
548 x86_target::low_supports_breakpoints ()
554 x86_target::low_get_pc (regcache
*regcache
)
556 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
562 collect_register_by_name (regcache
, "rip", &pc
);
563 return (CORE_ADDR
) pc
;
569 collect_register_by_name (regcache
, "eip", &pc
);
570 return (CORE_ADDR
) pc
;
575 x86_target::low_set_pc (regcache
*regcache
, CORE_ADDR pc
)
577 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
583 supply_register_by_name (regcache
, "rip", &newpc
);
589 supply_register_by_name (regcache
, "eip", &newpc
);
594 x86_target::low_decr_pc_after_break ()
600 static const gdb_byte x86_breakpoint
[] = { 0xCC };
601 #define x86_breakpoint_len 1
604 x86_target::low_breakpoint_at (CORE_ADDR pc
)
608 read_memory (pc
, &c
, 1);
615 /* Low-level function vector. */
616 struct x86_dr_low_type x86_dr_low
=
618 x86_linux_dr_set_control
,
619 x86_linux_dr_set_addr
,
620 x86_linux_dr_get_addr
,
621 x86_linux_dr_get_status
,
622 x86_linux_dr_get_control
,
626 /* Breakpoint/Watchpoint support. */
629 x86_target::supports_z_point_type (char z_type
)
635 case Z_PACKET_WRITE_WP
:
636 case Z_PACKET_ACCESS_WP
:
644 x86_target::low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
645 int size
, raw_breakpoint
*bp
)
647 struct process_info
*proc
= current_process ();
651 case raw_bkpt_type_hw
:
652 case raw_bkpt_type_write_wp
:
653 case raw_bkpt_type_access_wp
:
655 enum target_hw_bp_type hw_type
656 = raw_bkpt_type_to_target_hw_bp_type (type
);
657 struct x86_debug_reg_state
*state
658 = &proc
->priv
->arch_private
->debug_reg_state
;
660 return x86_dr_insert_watchpoint (state
, hw_type
, addr
, size
);
670 x86_target::low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
671 int size
, raw_breakpoint
*bp
)
673 struct process_info
*proc
= current_process ();
677 case raw_bkpt_type_hw
:
678 case raw_bkpt_type_write_wp
:
679 case raw_bkpt_type_access_wp
:
681 enum target_hw_bp_type hw_type
682 = raw_bkpt_type_to_target_hw_bp_type (type
);
683 struct x86_debug_reg_state
*state
684 = &proc
->priv
->arch_private
->debug_reg_state
;
686 return x86_dr_remove_watchpoint (state
, hw_type
, addr
, size
);
695 x86_target::low_stopped_by_watchpoint ()
697 struct process_info
*proc
= current_process ();
698 return x86_dr_stopped_by_watchpoint (&proc
->priv
->arch_private
->debug_reg_state
);
702 x86_target::low_stopped_data_address ()
704 struct process_info
*proc
= current_process ();
706 if (x86_dr_stopped_data_address (&proc
->priv
->arch_private
->debug_reg_state
,
712 /* Called when a new process is created. */
715 x86_target::low_new_process ()
717 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
719 x86_low_init_dregs (&info
->debug_reg_state
);
724 /* Called when a process is being deleted. */
727 x86_target::low_delete_process (arch_process_info
*info
)
733 x86_target::low_new_thread (lwp_info
*lwp
)
735 /* This comes from nat/. */
736 x86_linux_new_thread (lwp
);
740 x86_target::low_delete_thread (arch_lwp_info
*alwp
)
742 /* This comes from nat/. */
743 x86_linux_delete_thread (alwp
);
746 /* Target routine for new_fork. */
749 x86_target::low_new_fork (process_info
*parent
, process_info
*child
)
751 /* These are allocated by linux_add_process. */
752 gdb_assert (parent
->priv
!= NULL
753 && parent
->priv
->arch_private
!= NULL
);
754 gdb_assert (child
->priv
!= NULL
755 && child
->priv
->arch_private
!= NULL
);
757 /* Linux kernel before 2.6.33 commit
758 72f674d203cd230426437cdcf7dd6f681dad8b0d
759 will inherit hardware debug registers from parent
760 on fork/vfork/clone. Newer Linux kernels create such tasks with
761 zeroed debug registers.
763 GDB core assumes the child inherits the watchpoints/hw
764 breakpoints of the parent, and will remove them all from the
765 forked off process. Copy the debug registers mirrors into the
766 new process so that all breakpoints and watchpoints can be
767 removed together. The debug registers mirror will become zeroed
768 in the end before detaching the forked off process, thus making
769 this compatible with older Linux kernels too. */
771 *child
->priv
->arch_private
= *parent
->priv
->arch_private
;
775 x86_target::low_prepare_to_resume (lwp_info
*lwp
)
777 /* This comes from nat/. */
778 x86_linux_prepare_to_resume (lwp
);
781 /* See nat/x86-dregs.h. */
783 struct x86_debug_reg_state
*
784 x86_debug_reg_state (pid_t pid
)
786 struct process_info
*proc
= find_process_pid (pid
);
788 return &proc
->priv
->arch_private
->debug_reg_state
;
791 /* When GDBSERVER is built as a 64-bit application on linux, the
792 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
793 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
794 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
795 conversion in-place ourselves. */
797 /* Convert a ptrace/host siginfo object, into/from the siginfo in the
798 layout of the inferiors' architecture. Returns true if any
799 conversion was done; false otherwise. If DIRECTION is 1, then copy
800 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
804 x86_target::low_siginfo_fixup (siginfo_t
*ptrace
, gdb_byte
*inf
, int direction
)
807 unsigned int machine
;
808 int tid
= lwpid_of (current_thread
);
809 int is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
811 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
812 if (!is_64bit_tdesc ())
813 return amd64_linux_siginfo_fixup_common (ptrace
, inf
, direction
,
815 /* No fixup for native x32 GDB. */
816 else if (!is_elf64
&& sizeof (void *) == 8)
817 return amd64_linux_siginfo_fixup_common (ptrace
, inf
, direction
,
826 /* Format of XSAVE extended state is:
830 sw_usable_bytes[464..511]
831 xstate_hdr_bytes[512..575]
836 Same memory layout will be used for the coredump NT_X86_XSTATE
837 representing the XSAVE extended state registers.
839 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
840 extended state mask, which is the same as the extended control register
841 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
842 together with the mask saved in the xstate_hdr_bytes to determine what
843 states the processor/OS supports and what state, used or initialized,
844 the process/thread is in. */
845 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
847 /* Does the current host support the GETFPXREGS request? The header
848 file may or may not define it, and even if it is defined, the
849 kernel will return EIO if it's running on a pre-SSE processor. */
850 int have_ptrace_getfpxregs
=
851 #ifdef HAVE_PTRACE_GETFPXREGS
858 /* Get Linux/x86 target description from running target. */
860 static const struct target_desc
*
861 x86_linux_read_description (void)
863 unsigned int machine
;
867 static uint64_t xcr0
;
868 struct regset_info
*regset
;
870 tid
= lwpid_of (current_thread
);
872 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
874 if (sizeof (void *) == 4)
877 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
879 else if (machine
== EM_X86_64
)
880 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
884 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
885 if (machine
== EM_386
&& have_ptrace_getfpxregs
== -1)
887 elf_fpxregset_t fpxregs
;
889 if (ptrace (PTRACE_GETFPXREGS
, tid
, 0, (long) &fpxregs
) < 0)
891 have_ptrace_getfpxregs
= 0;
892 have_ptrace_getregset
= 0;
893 return i386_linux_read_description (X86_XSTATE_X87
);
896 have_ptrace_getfpxregs
= 1;
902 x86_xcr0
= X86_XSTATE_SSE_MASK
;
906 if (machine
== EM_X86_64
)
907 return tdesc_amd64_linux_no_xml
;
910 return tdesc_i386_linux_no_xml
;
913 if (have_ptrace_getregset
== -1)
915 uint64_t xstateregs
[(X86_XSTATE_SSE_SIZE
/ sizeof (uint64_t))];
918 iov
.iov_base
= xstateregs
;
919 iov
.iov_len
= sizeof (xstateregs
);
921 /* Check if PTRACE_GETREGSET works. */
922 if (ptrace (PTRACE_GETREGSET
, tid
,
923 (unsigned int) NT_X86_XSTATE
, (long) &iov
) < 0)
924 have_ptrace_getregset
= 0;
927 have_ptrace_getregset
= 1;
929 /* Get XCR0 from XSAVE extended state. */
930 xcr0
= xstateregs
[(I386_LINUX_XSAVE_XCR0_OFFSET
931 / sizeof (uint64_t))];
933 /* Use PTRACE_GETREGSET if it is available. */
934 for (regset
= x86_regsets
;
935 regset
->fill_function
!= NULL
; regset
++)
936 if (regset
->get_request
== PTRACE_GETREGSET
)
937 regset
->size
= X86_XSTATE_SIZE (xcr0
);
938 else if (regset
->type
!= GENERAL_REGS
)
943 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
944 xcr0_features
= (have_ptrace_getregset
945 && (xcr0
& X86_XSTATE_ALL_MASK
));
950 if (machine
== EM_X86_64
)
953 const target_desc
*tdesc
= NULL
;
957 tdesc
= amd64_linux_read_description (xcr0
& X86_XSTATE_ALL_MASK
,
962 tdesc
= amd64_linux_read_description (X86_XSTATE_SSE_MASK
, !is_elf64
);
968 const target_desc
*tdesc
= NULL
;
971 tdesc
= i386_linux_read_description (xcr0
& X86_XSTATE_ALL_MASK
);
974 tdesc
= i386_linux_read_description (X86_XSTATE_SSE
);
979 gdb_assert_not_reached ("failed to return tdesc");
982 /* Update all the target description of all processes; a new GDB
983 connected, and it may or not support xml target descriptions. */
986 x86_target::update_xmltarget ()
988 struct thread_info
*saved_thread
= current_thread
;
990 /* Before changing the register cache's internal layout, flush the
991 contents of the current valid caches back to the threads, and
992 release the current regcache objects. */
995 for_each_process ([this] (process_info
*proc
) {
998 /* Look up any thread of this process. */
999 current_thread
= find_any_thread_of_pid (pid
);
1004 current_thread
= saved_thread
;
1007 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1008 PTRACE_GETREGSET. */
1011 x86_target::process_qsupported (char **features
, int count
)
1015 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1016 with "i386" in qSupported query, it supports x86 XML target
1019 for (i
= 0; i
< count
; i
++)
1021 const char *feature
= features
[i
];
1023 if (startswith (feature
, "xmlRegisters="))
1025 char *copy
= xstrdup (feature
+ 13);
1028 for (char *p
= strtok_r (copy
, ",", &saveptr
);
1030 p
= strtok_r (NULL
, ",", &saveptr
))
1032 if (strcmp (p
, "i386") == 0)
1042 update_xmltarget ();
1045 /* Common for x86/x86-64. */
1047 static struct regsets_info x86_regsets_info
=
1049 x86_regsets
, /* regsets */
1050 0, /* num_regsets */
1051 NULL
, /* disabled_regsets */
1055 static struct regs_info amd64_linux_regs_info
=
1057 NULL
, /* regset_bitmap */
1058 NULL
, /* usrregs_info */
1062 static struct usrregs_info i386_linux_usrregs_info
=
1068 static struct regs_info i386_linux_regs_info
=
1070 NULL
, /* regset_bitmap */
1071 &i386_linux_usrregs_info
,
1076 x86_target::get_regs_info ()
1079 if (is_64bit_tdesc ())
1080 return &amd64_linux_regs_info
;
1083 return &i386_linux_regs_info
;
1086 /* Initialize the target description for the architecture of the
1090 x86_target::low_arch_setup ()
1092 current_process ()->tdesc
= x86_linux_read_description ();
1095 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1096 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1099 x86_get_syscall_trapinfo (struct regcache
*regcache
, int *sysno
)
1101 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
1107 collect_register_by_name (regcache
, "orig_rax", &l_sysno
);
1108 *sysno
= (int) l_sysno
;
1111 collect_register_by_name (regcache
, "orig_eax", sysno
);
1115 x86_target::supports_tracepoints ()
1121 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1123 target_write_memory (*to
, buf
, len
);
1128 push_opcode (unsigned char *buf
, const char *op
)
1130 unsigned char *buf_org
= buf
;
1135 unsigned long ul
= strtoul (op
, &endptr
, 16);
1144 return buf
- buf_org
;
1149 /* Build a jump pad that saves registers and calls a collection
1150 function. Writes a jump instruction to the jump pad to
1151 JJUMPAD_INSN. The caller is responsible to write it in at the
1152 tracepoint address. */
1155 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1156 CORE_ADDR collector
,
1159 CORE_ADDR
*jump_entry
,
1160 CORE_ADDR
*trampoline
,
1161 ULONGEST
*trampoline_size
,
1162 unsigned char *jjump_pad_insn
,
1163 ULONGEST
*jjump_pad_insn_size
,
1164 CORE_ADDR
*adjusted_insn_addr
,
1165 CORE_ADDR
*adjusted_insn_addr_end
,
1168 unsigned char buf
[40];
1172 CORE_ADDR buildaddr
= *jump_entry
;
1174 /* Build the jump pad. */
1176 /* First, do tracepoint data collection. Save registers. */
1178 /* Need to ensure stack pointer saved first. */
1179 buf
[i
++] = 0x54; /* push %rsp */
1180 buf
[i
++] = 0x55; /* push %rbp */
1181 buf
[i
++] = 0x57; /* push %rdi */
1182 buf
[i
++] = 0x56; /* push %rsi */
1183 buf
[i
++] = 0x52; /* push %rdx */
1184 buf
[i
++] = 0x51; /* push %rcx */
1185 buf
[i
++] = 0x53; /* push %rbx */
1186 buf
[i
++] = 0x50; /* push %rax */
1187 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1188 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1189 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1190 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1191 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1192 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1193 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1194 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1195 buf
[i
++] = 0x9c; /* pushfq */
1196 buf
[i
++] = 0x48; /* movabs <addr>,%rdi */
1198 memcpy (buf
+ i
, &tpaddr
, 8);
1200 buf
[i
++] = 0x57; /* push %rdi */
1201 append_insns (&buildaddr
, i
, buf
);
1203 /* Stack space for the collecting_t object. */
1205 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1206 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1207 memcpy (buf
+ i
, &tpoint
, 8);
1209 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1210 i
+= push_opcode (&buf
[i
],
1211 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1212 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1213 append_insns (&buildaddr
, i
, buf
);
1217 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1218 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1220 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1221 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1222 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1223 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1224 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1225 append_insns (&buildaddr
, i
, buf
);
1227 /* Set up the gdb_collect call. */
1228 /* At this point, (stack pointer + 0x18) is the base of our saved
1232 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1233 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1235 /* tpoint address may be 64-bit wide. */
1236 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1237 memcpy (buf
+ i
, &tpoint
, 8);
1239 append_insns (&buildaddr
, i
, buf
);
1241 /* The collector function being in the shared library, may be
1242 >31-bits away off the jump pad. */
1244 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1245 memcpy (buf
+ i
, &collector
, 8);
1247 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1248 append_insns (&buildaddr
, i
, buf
);
1250 /* Clear the spin-lock. */
1252 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1253 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1254 memcpy (buf
+ i
, &lockaddr
, 8);
1256 append_insns (&buildaddr
, i
, buf
);
1258 /* Remove stack that had been used for the collect_t object. */
1260 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1261 append_insns (&buildaddr
, i
, buf
);
1263 /* Restore register state. */
1265 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1269 buf
[i
++] = 0x9d; /* popfq */
1270 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1271 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1272 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1273 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1274 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1275 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1276 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1277 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1278 buf
[i
++] = 0x58; /* pop %rax */
1279 buf
[i
++] = 0x5b; /* pop %rbx */
1280 buf
[i
++] = 0x59; /* pop %rcx */
1281 buf
[i
++] = 0x5a; /* pop %rdx */
1282 buf
[i
++] = 0x5e; /* pop %rsi */
1283 buf
[i
++] = 0x5f; /* pop %rdi */
1284 buf
[i
++] = 0x5d; /* pop %rbp */
1285 buf
[i
++] = 0x5c; /* pop %rsp */
1286 append_insns (&buildaddr
, i
, buf
);
1288 /* Now, adjust the original instruction to execute in the jump
1290 *adjusted_insn_addr
= buildaddr
;
1291 relocate_instruction (&buildaddr
, tpaddr
);
1292 *adjusted_insn_addr_end
= buildaddr
;
1294 /* Finally, write a jump back to the program. */
1296 loffset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1297 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1300 "E.Jump back from jump pad too far from tracepoint "
1301 "(offset 0x%" PRIx64
" > int32).", loffset
);
1305 offset
= (int) loffset
;
1306 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1307 memcpy (buf
+ 1, &offset
, 4);
1308 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1310 /* The jump pad is now built. Wire in a jump to our jump pad. This
1311 is always done last (by our caller actually), so that we can
1312 install fast tracepoints with threads running. This relies on
1313 the agent's atomic write support. */
1314 loffset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1315 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1318 "E.Jump pad too far from tracepoint "
1319 "(offset 0x%" PRIx64
" > int32).", loffset
);
1323 offset
= (int) loffset
;
1325 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1326 memcpy (buf
+ 1, &offset
, 4);
1327 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1328 *jjump_pad_insn_size
= sizeof (jump_insn
);
1330 /* Return the end address of our pad. */
1331 *jump_entry
= buildaddr
;
1336 #endif /* __x86_64__ */
1338 /* Build a jump pad that saves registers and calls a collection
1339 function. Writes a jump instruction to the jump pad to
1340 JJUMPAD_INSN. The caller is responsible to write it in at the
1341 tracepoint address. */
1344 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1345 CORE_ADDR collector
,
1348 CORE_ADDR
*jump_entry
,
1349 CORE_ADDR
*trampoline
,
1350 ULONGEST
*trampoline_size
,
1351 unsigned char *jjump_pad_insn
,
1352 ULONGEST
*jjump_pad_insn_size
,
1353 CORE_ADDR
*adjusted_insn_addr
,
1354 CORE_ADDR
*adjusted_insn_addr_end
,
1357 unsigned char buf
[0x100];
1359 CORE_ADDR buildaddr
= *jump_entry
;
1361 /* Build the jump pad. */
1363 /* First, do tracepoint data collection. Save registers. */
1365 buf
[i
++] = 0x60; /* pushad */
1366 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1367 *((int *)(buf
+ i
)) = (int) tpaddr
;
1369 buf
[i
++] = 0x9c; /* pushf */
1370 buf
[i
++] = 0x1e; /* push %ds */
1371 buf
[i
++] = 0x06; /* push %es */
1372 buf
[i
++] = 0x0f; /* push %fs */
1374 buf
[i
++] = 0x0f; /* push %gs */
1376 buf
[i
++] = 0x16; /* push %ss */
1377 buf
[i
++] = 0x0e; /* push %cs */
1378 append_insns (&buildaddr
, i
, buf
);
1380 /* Stack space for the collecting_t object. */
1382 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1384 /* Build the object. */
1385 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1386 memcpy (buf
+ i
, &tpoint
, 4);
1388 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1390 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1391 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1392 append_insns (&buildaddr
, i
, buf
);
1394 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1395 If we cared for it, this could be using xchg alternatively. */
1398 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1399 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1401 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1403 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1404 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1405 append_insns (&buildaddr
, i
, buf
);
1408 /* Set up arguments to the gdb_collect call. */
1410 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1411 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1412 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1413 append_insns (&buildaddr
, i
, buf
);
1416 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1417 append_insns (&buildaddr
, i
, buf
);
1420 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1421 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1423 append_insns (&buildaddr
, i
, buf
);
1425 buf
[0] = 0xe8; /* call <reladdr> */
1426 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1427 memcpy (buf
+ 1, &offset
, 4);
1428 append_insns (&buildaddr
, 5, buf
);
1429 /* Clean up after the call. */
1430 buf
[0] = 0x83; /* add $0x8,%esp */
1433 append_insns (&buildaddr
, 3, buf
);
1436 /* Clear the spin-lock. This would need the LOCK prefix on older
1439 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1440 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1441 memcpy (buf
+ i
, &lockaddr
, 4);
1443 append_insns (&buildaddr
, i
, buf
);
1446 /* Remove stack that had been used for the collect_t object. */
1448 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1449 append_insns (&buildaddr
, i
, buf
);
1452 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1455 buf
[i
++] = 0x17; /* pop %ss */
1456 buf
[i
++] = 0x0f; /* pop %gs */
1458 buf
[i
++] = 0x0f; /* pop %fs */
1460 buf
[i
++] = 0x07; /* pop %es */
1461 buf
[i
++] = 0x1f; /* pop %ds */
1462 buf
[i
++] = 0x9d; /* popf */
1463 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1466 buf
[i
++] = 0x61; /* popad */
1467 append_insns (&buildaddr
, i
, buf
);
1469 /* Now, adjust the original instruction to execute in the jump
1471 *adjusted_insn_addr
= buildaddr
;
1472 relocate_instruction (&buildaddr
, tpaddr
);
1473 *adjusted_insn_addr_end
= buildaddr
;
1475 /* Write the jump back to the program. */
1476 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1477 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1478 memcpy (buf
+ 1, &offset
, 4);
1479 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1481 /* The jump pad is now built. Wire in a jump to our jump pad. This
1482 is always done last (by our caller actually), so that we can
1483 install fast tracepoints with threads running. This relies on
1484 the agent's atomic write support. */
1487 /* Create a trampoline. */
1488 *trampoline_size
= sizeof (jump_insn
);
1489 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1491 /* No trampoline space available. */
1493 "E.Cannot allocate trampoline space needed for fast "
1494 "tracepoints on 4-byte instructions.");
1498 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1499 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1500 memcpy (buf
+ 1, &offset
, 4);
1501 target_write_memory (*trampoline
, buf
, sizeof (jump_insn
));
1503 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1504 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1505 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1506 memcpy (buf
+ 2, &offset
, 2);
1507 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1508 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1512 /* Else use a 32-bit relative jump instruction. */
1513 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1514 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1515 memcpy (buf
+ 1, &offset
, 4);
1516 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1517 *jjump_pad_insn_size
= sizeof (jump_insn
);
1520 /* Return the end address of our pad. */
1521 *jump_entry
= buildaddr
;
1527 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1528 CORE_ADDR collector
,
1531 CORE_ADDR
*jump_entry
,
1532 CORE_ADDR
*trampoline
,
1533 ULONGEST
*trampoline_size
,
1534 unsigned char *jjump_pad_insn
,
1535 ULONGEST
*jjump_pad_insn_size
,
1536 CORE_ADDR
*adjusted_insn_addr
,
1537 CORE_ADDR
*adjusted_insn_addr_end
,
1541 if (is_64bit_tdesc ())
1542 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1543 collector
, lockaddr
,
1544 orig_size
, jump_entry
,
1545 trampoline
, trampoline_size
,
1547 jjump_pad_insn_size
,
1549 adjusted_insn_addr_end
,
1553 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1554 collector
, lockaddr
,
1555 orig_size
, jump_entry
,
1556 trampoline
, trampoline_size
,
1558 jjump_pad_insn_size
,
1560 adjusted_insn_addr_end
,
1564 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1568 x86_get_min_fast_tracepoint_insn_len (void)
1570 static int warned_about_fast_tracepoints
= 0;
1573 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1574 used for fast tracepoints. */
1575 if (is_64bit_tdesc ())
1579 if (agent_loaded_p ())
1581 char errbuf
[IPA_BUFSIZ
];
1585 /* On x86, if trampolines are available, then 4-byte jump instructions
1586 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1587 with a 4-byte offset are used instead. */
1588 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
1592 /* GDB has no channel to explain to user why a shorter fast
1593 tracepoint is not possible, but at least make GDBserver
1594 mention that something has gone awry. */
1595 if (!warned_about_fast_tracepoints
)
1597 warning ("4-byte fast tracepoints not available; %s", errbuf
);
1598 warned_about_fast_tracepoints
= 1;
1605 /* Indicate that the minimum length is currently unknown since the IPA
1606 has not loaded yet. */
1612 add_insns (unsigned char *start
, int len
)
1614 CORE_ADDR buildaddr
= current_insn_ptr
;
1617 debug_printf ("Adding %d bytes of insn at %s\n",
1618 len
, paddress (buildaddr
));
1620 append_insns (&buildaddr
, len
, start
);
1621 current_insn_ptr
= buildaddr
;
1624 /* Our general strategy for emitting code is to avoid specifying raw
1625 bytes whenever possible, and instead copy a block of inline asm
1626 that is embedded in the function. This is a little messy, because
1627 we need to keep the compiler from discarding what looks like dead
1628 code, plus suppress various warnings. */
1630 #define EMIT_ASM(NAME, INSNS) \
1633 extern unsigned char start_ ## NAME, end_ ## NAME; \
1634 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1635 __asm__ ("jmp end_" #NAME "\n" \
1636 "\t" "start_" #NAME ":" \
1638 "\t" "end_" #NAME ":"); \
1643 #define EMIT_ASM32(NAME,INSNS) \
1646 extern unsigned char start_ ## NAME, end_ ## NAME; \
1647 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1648 __asm__ (".code32\n" \
1649 "\t" "jmp end_" #NAME "\n" \
1650 "\t" "start_" #NAME ":\n" \
1652 "\t" "end_" #NAME ":\n" \
1658 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1665 amd64_emit_prologue (void)
1667 EMIT_ASM (amd64_prologue
,
1669 "movq %rsp,%rbp\n\t"
1670 "sub $0x20,%rsp\n\t"
1671 "movq %rdi,-8(%rbp)\n\t"
1672 "movq %rsi,-16(%rbp)");
1677 amd64_emit_epilogue (void)
1679 EMIT_ASM (amd64_epilogue
,
1680 "movq -16(%rbp),%rdi\n\t"
1681 "movq %rax,(%rdi)\n\t"
1688 amd64_emit_add (void)
1690 EMIT_ASM (amd64_add
,
1691 "add (%rsp),%rax\n\t"
1692 "lea 0x8(%rsp),%rsp");
1696 amd64_emit_sub (void)
1698 EMIT_ASM (amd64_sub
,
1699 "sub %rax,(%rsp)\n\t"
1704 amd64_emit_mul (void)
1710 amd64_emit_lsh (void)
1716 amd64_emit_rsh_signed (void)
1722 amd64_emit_rsh_unsigned (void)
1728 amd64_emit_ext (int arg
)
1733 EMIT_ASM (amd64_ext_8
,
1739 EMIT_ASM (amd64_ext_16
,
1744 EMIT_ASM (amd64_ext_32
,
1753 amd64_emit_log_not (void)
1755 EMIT_ASM (amd64_log_not
,
1756 "test %rax,%rax\n\t"
1762 amd64_emit_bit_and (void)
1764 EMIT_ASM (amd64_and
,
1765 "and (%rsp),%rax\n\t"
1766 "lea 0x8(%rsp),%rsp");
1770 amd64_emit_bit_or (void)
1773 "or (%rsp),%rax\n\t"
1774 "lea 0x8(%rsp),%rsp");
1778 amd64_emit_bit_xor (void)
1780 EMIT_ASM (amd64_xor
,
1781 "xor (%rsp),%rax\n\t"
1782 "lea 0x8(%rsp),%rsp");
1786 amd64_emit_bit_not (void)
1788 EMIT_ASM (amd64_bit_not
,
1789 "xorq $0xffffffffffffffff,%rax");
1793 amd64_emit_equal (void)
1795 EMIT_ASM (amd64_equal
,
1796 "cmp %rax,(%rsp)\n\t"
1797 "je .Lamd64_equal_true\n\t"
1799 "jmp .Lamd64_equal_end\n\t"
1800 ".Lamd64_equal_true:\n\t"
1802 ".Lamd64_equal_end:\n\t"
1803 "lea 0x8(%rsp),%rsp");
1807 amd64_emit_less_signed (void)
1809 EMIT_ASM (amd64_less_signed
,
1810 "cmp %rax,(%rsp)\n\t"
1811 "jl .Lamd64_less_signed_true\n\t"
1813 "jmp .Lamd64_less_signed_end\n\t"
1814 ".Lamd64_less_signed_true:\n\t"
1816 ".Lamd64_less_signed_end:\n\t"
1817 "lea 0x8(%rsp),%rsp");
1821 amd64_emit_less_unsigned (void)
1823 EMIT_ASM (amd64_less_unsigned
,
1824 "cmp %rax,(%rsp)\n\t"
1825 "jb .Lamd64_less_unsigned_true\n\t"
1827 "jmp .Lamd64_less_unsigned_end\n\t"
1828 ".Lamd64_less_unsigned_true:\n\t"
1830 ".Lamd64_less_unsigned_end:\n\t"
1831 "lea 0x8(%rsp),%rsp");
1835 amd64_emit_ref (int size
)
1840 EMIT_ASM (amd64_ref1
,
1844 EMIT_ASM (amd64_ref2
,
1848 EMIT_ASM (amd64_ref4
,
1849 "movl (%rax),%eax");
1852 EMIT_ASM (amd64_ref8
,
1853 "movq (%rax),%rax");
1859 amd64_emit_if_goto (int *offset_p
, int *size_p
)
1861 EMIT_ASM (amd64_if_goto
,
1865 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1873 amd64_emit_goto (int *offset_p
, int *size_p
)
1875 EMIT_ASM (amd64_goto
,
1876 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1884 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
1886 int diff
= (to
- (from
+ size
));
1887 unsigned char buf
[sizeof (int)];
1895 memcpy (buf
, &diff
, sizeof (int));
1896 target_write_memory (from
, buf
, sizeof (int));
1900 amd64_emit_const (LONGEST num
)
1902 unsigned char buf
[16];
1904 CORE_ADDR buildaddr
= current_insn_ptr
;
1907 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
1908 memcpy (&buf
[i
], &num
, sizeof (num
));
1910 append_insns (&buildaddr
, i
, buf
);
1911 current_insn_ptr
= buildaddr
;
1915 amd64_emit_call (CORE_ADDR fn
)
1917 unsigned char buf
[16];
1919 CORE_ADDR buildaddr
;
1922 /* The destination function being in the shared library, may be
1923 >31-bits away off the compiled code pad. */
1925 buildaddr
= current_insn_ptr
;
1927 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
1931 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
1933 /* Offset is too large for a call. Use callq, but that requires
1934 a register, so avoid it if possible. Use r10, since it is
1935 call-clobbered, we don't have to push/pop it. */
1936 buf
[i
++] = 0x48; /* mov $fn,%r10 */
1938 memcpy (buf
+ i
, &fn
, 8);
1940 buf
[i
++] = 0xff; /* callq *%r10 */
1945 int offset32
= offset64
; /* we know we can't overflow here. */
1947 buf
[i
++] = 0xe8; /* call <reladdr> */
1948 memcpy (buf
+ i
, &offset32
, 4);
1952 append_insns (&buildaddr
, i
, buf
);
1953 current_insn_ptr
= buildaddr
;
1957 amd64_emit_reg (int reg
)
1959 unsigned char buf
[16];
1961 CORE_ADDR buildaddr
;
1963 /* Assume raw_regs is still in %rdi. */
1964 buildaddr
= current_insn_ptr
;
1966 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
1967 memcpy (&buf
[i
], ®
, sizeof (reg
));
1969 append_insns (&buildaddr
, i
, buf
);
1970 current_insn_ptr
= buildaddr
;
1971 amd64_emit_call (get_raw_reg_func_addr ());
1975 amd64_emit_pop (void)
1977 EMIT_ASM (amd64_pop
,
1982 amd64_emit_stack_flush (void)
1984 EMIT_ASM (amd64_stack_flush
,
1989 amd64_emit_zero_ext (int arg
)
1994 EMIT_ASM (amd64_zero_ext_8
,
1998 EMIT_ASM (amd64_zero_ext_16
,
1999 "and $0xffff,%rax");
2002 EMIT_ASM (amd64_zero_ext_32
,
2003 "mov $0xffffffff,%rcx\n\t"
2012 amd64_emit_swap (void)
2014 EMIT_ASM (amd64_swap
,
2021 amd64_emit_stack_adjust (int n
)
2023 unsigned char buf
[16];
2025 CORE_ADDR buildaddr
= current_insn_ptr
;
2028 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
2032 /* This only handles adjustments up to 16, but we don't expect any more. */
2034 append_insns (&buildaddr
, i
, buf
);
2035 current_insn_ptr
= buildaddr
;
2038 /* FN's prototype is `LONGEST(*fn)(int)'. */
2041 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2043 unsigned char buf
[16];
2045 CORE_ADDR buildaddr
;
2047 buildaddr
= current_insn_ptr
;
2049 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2050 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2052 append_insns (&buildaddr
, i
, buf
);
2053 current_insn_ptr
= buildaddr
;
2054 amd64_emit_call (fn
);
2057 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2060 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2062 unsigned char buf
[16];
2064 CORE_ADDR buildaddr
;
2066 buildaddr
= current_insn_ptr
;
2068 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2069 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2071 append_insns (&buildaddr
, i
, buf
);
2072 current_insn_ptr
= buildaddr
;
2073 EMIT_ASM (amd64_void_call_2_a
,
2074 /* Save away a copy of the stack top. */
2076 /* Also pass top as the second argument. */
2078 amd64_emit_call (fn
);
2079 EMIT_ASM (amd64_void_call_2_b
,
2080 /* Restore the stack top, %rax may have been trashed. */
2085 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
2088 "cmp %rax,(%rsp)\n\t"
2089 "jne .Lamd64_eq_fallthru\n\t"
2090 "lea 0x8(%rsp),%rsp\n\t"
2092 /* jmp, but don't trust the assembler to choose the right jump */
2093 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2094 ".Lamd64_eq_fallthru:\n\t"
2095 "lea 0x8(%rsp),%rsp\n\t"
2105 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
2108 "cmp %rax,(%rsp)\n\t"
2109 "je .Lamd64_ne_fallthru\n\t"
2110 "lea 0x8(%rsp),%rsp\n\t"
2112 /* jmp, but don't trust the assembler to choose the right jump */
2113 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2114 ".Lamd64_ne_fallthru:\n\t"
2115 "lea 0x8(%rsp),%rsp\n\t"
2125 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2128 "cmp %rax,(%rsp)\n\t"
2129 "jnl .Lamd64_lt_fallthru\n\t"
2130 "lea 0x8(%rsp),%rsp\n\t"
2132 /* jmp, but don't trust the assembler to choose the right jump */
2133 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2134 ".Lamd64_lt_fallthru:\n\t"
2135 "lea 0x8(%rsp),%rsp\n\t"
2145 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2148 "cmp %rax,(%rsp)\n\t"
2149 "jnle .Lamd64_le_fallthru\n\t"
2150 "lea 0x8(%rsp),%rsp\n\t"
2152 /* jmp, but don't trust the assembler to choose the right jump */
2153 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2154 ".Lamd64_le_fallthru:\n\t"
2155 "lea 0x8(%rsp),%rsp\n\t"
2165 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2168 "cmp %rax,(%rsp)\n\t"
2169 "jng .Lamd64_gt_fallthru\n\t"
2170 "lea 0x8(%rsp),%rsp\n\t"
2172 /* jmp, but don't trust the assembler to choose the right jump */
2173 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2174 ".Lamd64_gt_fallthru:\n\t"
2175 "lea 0x8(%rsp),%rsp\n\t"
2185 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2188 "cmp %rax,(%rsp)\n\t"
2189 "jnge .Lamd64_ge_fallthru\n\t"
2190 ".Lamd64_ge_jump:\n\t"
2191 "lea 0x8(%rsp),%rsp\n\t"
2193 /* jmp, but don't trust the assembler to choose the right jump */
2194 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2195 ".Lamd64_ge_fallthru:\n\t"
2196 "lea 0x8(%rsp),%rsp\n\t"
2205 struct emit_ops amd64_emit_ops
=
2207 amd64_emit_prologue
,
2208 amd64_emit_epilogue
,
2213 amd64_emit_rsh_signed
,
2214 amd64_emit_rsh_unsigned
,
2222 amd64_emit_less_signed
,
2223 amd64_emit_less_unsigned
,
2227 amd64_write_goto_address
,
2232 amd64_emit_stack_flush
,
2233 amd64_emit_zero_ext
,
2235 amd64_emit_stack_adjust
,
2236 amd64_emit_int_call_1
,
2237 amd64_emit_void_call_2
,
2246 #endif /* __x86_64__ */
2249 i386_emit_prologue (void)
2251 EMIT_ASM32 (i386_prologue
,
2255 /* At this point, the raw regs base address is at 8(%ebp), and the
2256 value pointer is at 12(%ebp). */
2260 i386_emit_epilogue (void)
2262 EMIT_ASM32 (i386_epilogue
,
2263 "mov 12(%ebp),%ecx\n\t"
2264 "mov %eax,(%ecx)\n\t"
2265 "mov %ebx,0x4(%ecx)\n\t"
2273 i386_emit_add (void)
2275 EMIT_ASM32 (i386_add
,
2276 "add (%esp),%eax\n\t"
2277 "adc 0x4(%esp),%ebx\n\t"
2278 "lea 0x8(%esp),%esp");
2282 i386_emit_sub (void)
2284 EMIT_ASM32 (i386_sub
,
2285 "subl %eax,(%esp)\n\t"
2286 "sbbl %ebx,4(%esp)\n\t"
2292 i386_emit_mul (void)
2298 i386_emit_lsh (void)
2304 i386_emit_rsh_signed (void)
2310 i386_emit_rsh_unsigned (void)
2316 i386_emit_ext (int arg
)
2321 EMIT_ASM32 (i386_ext_8
,
2324 "movl %eax,%ebx\n\t"
2328 EMIT_ASM32 (i386_ext_16
,
2330 "movl %eax,%ebx\n\t"
2334 EMIT_ASM32 (i386_ext_32
,
2335 "movl %eax,%ebx\n\t"
2344 i386_emit_log_not (void)
2346 EMIT_ASM32 (i386_log_not
,
2348 "test %eax,%eax\n\t"
2355 i386_emit_bit_and (void)
2357 EMIT_ASM32 (i386_and
,
2358 "and (%esp),%eax\n\t"
2359 "and 0x4(%esp),%ebx\n\t"
2360 "lea 0x8(%esp),%esp");
2364 i386_emit_bit_or (void)
2366 EMIT_ASM32 (i386_or
,
2367 "or (%esp),%eax\n\t"
2368 "or 0x4(%esp),%ebx\n\t"
2369 "lea 0x8(%esp),%esp");
2373 i386_emit_bit_xor (void)
2375 EMIT_ASM32 (i386_xor
,
2376 "xor (%esp),%eax\n\t"
2377 "xor 0x4(%esp),%ebx\n\t"
2378 "lea 0x8(%esp),%esp");
2382 i386_emit_bit_not (void)
2384 EMIT_ASM32 (i386_bit_not
,
2385 "xor $0xffffffff,%eax\n\t"
2386 "xor $0xffffffff,%ebx\n\t");
2390 i386_emit_equal (void)
2392 EMIT_ASM32 (i386_equal
,
2393 "cmpl %ebx,4(%esp)\n\t"
2394 "jne .Li386_equal_false\n\t"
2395 "cmpl %eax,(%esp)\n\t"
2396 "je .Li386_equal_true\n\t"
2397 ".Li386_equal_false:\n\t"
2399 "jmp .Li386_equal_end\n\t"
2400 ".Li386_equal_true:\n\t"
2402 ".Li386_equal_end:\n\t"
2404 "lea 0x8(%esp),%esp");
2408 i386_emit_less_signed (void)
2410 EMIT_ASM32 (i386_less_signed
,
2411 "cmpl %ebx,4(%esp)\n\t"
2412 "jl .Li386_less_signed_true\n\t"
2413 "jne .Li386_less_signed_false\n\t"
2414 "cmpl %eax,(%esp)\n\t"
2415 "jl .Li386_less_signed_true\n\t"
2416 ".Li386_less_signed_false:\n\t"
2418 "jmp .Li386_less_signed_end\n\t"
2419 ".Li386_less_signed_true:\n\t"
2421 ".Li386_less_signed_end:\n\t"
2423 "lea 0x8(%esp),%esp");
2427 i386_emit_less_unsigned (void)
2429 EMIT_ASM32 (i386_less_unsigned
,
2430 "cmpl %ebx,4(%esp)\n\t"
2431 "jb .Li386_less_unsigned_true\n\t"
2432 "jne .Li386_less_unsigned_false\n\t"
2433 "cmpl %eax,(%esp)\n\t"
2434 "jb .Li386_less_unsigned_true\n\t"
2435 ".Li386_less_unsigned_false:\n\t"
2437 "jmp .Li386_less_unsigned_end\n\t"
2438 ".Li386_less_unsigned_true:\n\t"
2440 ".Li386_less_unsigned_end:\n\t"
2442 "lea 0x8(%esp),%esp");
2446 i386_emit_ref (int size
)
2451 EMIT_ASM32 (i386_ref1
,
2455 EMIT_ASM32 (i386_ref2
,
2459 EMIT_ASM32 (i386_ref4
,
2460 "movl (%eax),%eax");
2463 EMIT_ASM32 (i386_ref8
,
2464 "movl 4(%eax),%ebx\n\t"
2465 "movl (%eax),%eax");
2471 i386_emit_if_goto (int *offset_p
, int *size_p
)
2473 EMIT_ASM32 (i386_if_goto
,
2479 /* Don't trust the assembler to choose the right jump */
2480 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2483 *offset_p
= 11; /* be sure that this matches the sequence above */
2489 i386_emit_goto (int *offset_p
, int *size_p
)
2491 EMIT_ASM32 (i386_goto
,
2492 /* Don't trust the assembler to choose the right jump */
2493 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2501 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2503 int diff
= (to
- (from
+ size
));
2504 unsigned char buf
[sizeof (int)];
2506 /* We're only doing 4-byte sizes at the moment. */
2513 memcpy (buf
, &diff
, sizeof (int));
2514 target_write_memory (from
, buf
, sizeof (int));
2518 i386_emit_const (LONGEST num
)
2520 unsigned char buf
[16];
2522 CORE_ADDR buildaddr
= current_insn_ptr
;
2525 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2526 lo
= num
& 0xffffffff;
2527 memcpy (&buf
[i
], &lo
, sizeof (lo
));
2529 hi
= ((num
>> 32) & 0xffffffff);
2532 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
2533 memcpy (&buf
[i
], &hi
, sizeof (hi
));
2538 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
2540 append_insns (&buildaddr
, i
, buf
);
2541 current_insn_ptr
= buildaddr
;
2545 i386_emit_call (CORE_ADDR fn
)
2547 unsigned char buf
[16];
2549 CORE_ADDR buildaddr
;
2551 buildaddr
= current_insn_ptr
;
2553 buf
[i
++] = 0xe8; /* call <reladdr> */
2554 offset
= ((int) fn
) - (buildaddr
+ 5);
2555 memcpy (buf
+ 1, &offset
, 4);
2556 append_insns (&buildaddr
, 5, buf
);
2557 current_insn_ptr
= buildaddr
;
2561 i386_emit_reg (int reg
)
2563 unsigned char buf
[16];
2565 CORE_ADDR buildaddr
;
2567 EMIT_ASM32 (i386_reg_a
,
2569 buildaddr
= current_insn_ptr
;
2571 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2572 memcpy (&buf
[i
], ®
, sizeof (reg
));
2574 append_insns (&buildaddr
, i
, buf
);
2575 current_insn_ptr
= buildaddr
;
2576 EMIT_ASM32 (i386_reg_b
,
2577 "mov %eax,4(%esp)\n\t"
2578 "mov 8(%ebp),%eax\n\t"
2580 i386_emit_call (get_raw_reg_func_addr ());
2581 EMIT_ASM32 (i386_reg_c
,
2583 "lea 0x8(%esp),%esp");
2587 i386_emit_pop (void)
2589 EMIT_ASM32 (i386_pop
,
2595 i386_emit_stack_flush (void)
2597 EMIT_ASM32 (i386_stack_flush
,
2603 i386_emit_zero_ext (int arg
)
2608 EMIT_ASM32 (i386_zero_ext_8
,
2609 "and $0xff,%eax\n\t"
2613 EMIT_ASM32 (i386_zero_ext_16
,
2614 "and $0xffff,%eax\n\t"
2618 EMIT_ASM32 (i386_zero_ext_32
,
2627 i386_emit_swap (void)
2629 EMIT_ASM32 (i386_swap
,
2639 i386_emit_stack_adjust (int n
)
2641 unsigned char buf
[16];
2643 CORE_ADDR buildaddr
= current_insn_ptr
;
2646 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
2650 append_insns (&buildaddr
, i
, buf
);
2651 current_insn_ptr
= buildaddr
;
2654 /* FN's prototype is `LONGEST(*fn)(int)'. */
2657 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2659 unsigned char buf
[16];
2661 CORE_ADDR buildaddr
;
2663 EMIT_ASM32 (i386_int_call_1_a
,
2664 /* Reserve a bit of stack space. */
2666 /* Put the one argument on the stack. */
2667 buildaddr
= current_insn_ptr
;
2669 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2672 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2674 append_insns (&buildaddr
, i
, buf
);
2675 current_insn_ptr
= buildaddr
;
2676 i386_emit_call (fn
);
2677 EMIT_ASM32 (i386_int_call_1_c
,
2679 "lea 0x8(%esp),%esp");
2682 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2685 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2687 unsigned char buf
[16];
2689 CORE_ADDR buildaddr
;
2691 EMIT_ASM32 (i386_void_call_2_a
,
2692 /* Preserve %eax only; we don't have to worry about %ebx. */
2694 /* Reserve a bit of stack space for arguments. */
2695 "sub $0x10,%esp\n\t"
2696 /* Copy "top" to the second argument position. (Note that
2697 we can't assume function won't scribble on its
2698 arguments, so don't try to restore from this.) */
2699 "mov %eax,4(%esp)\n\t"
2700 "mov %ebx,8(%esp)");
2701 /* Put the first argument on the stack. */
2702 buildaddr
= current_insn_ptr
;
2704 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2707 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2709 append_insns (&buildaddr
, i
, buf
);
2710 current_insn_ptr
= buildaddr
;
2711 i386_emit_call (fn
);
2712 EMIT_ASM32 (i386_void_call_2_b
,
2713 "lea 0x10(%esp),%esp\n\t"
2714 /* Restore original stack top. */
2720 i386_emit_eq_goto (int *offset_p
, int *size_p
)
2723 /* Check low half first, more likely to be decider */
2724 "cmpl %eax,(%esp)\n\t"
2725 "jne .Leq_fallthru\n\t"
2726 "cmpl %ebx,4(%esp)\n\t"
2727 "jne .Leq_fallthru\n\t"
2728 "lea 0x8(%esp),%esp\n\t"
2731 /* jmp, but don't trust the assembler to choose the right jump */
2732 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2733 ".Leq_fallthru:\n\t"
2734 "lea 0x8(%esp),%esp\n\t"
2745 i386_emit_ne_goto (int *offset_p
, int *size_p
)
2748 /* Check low half first, more likely to be decider */
2749 "cmpl %eax,(%esp)\n\t"
2751 "cmpl %ebx,4(%esp)\n\t"
2752 "je .Lne_fallthru\n\t"
2754 "lea 0x8(%esp),%esp\n\t"
2757 /* jmp, but don't trust the assembler to choose the right jump */
2758 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2759 ".Lne_fallthru:\n\t"
2760 "lea 0x8(%esp),%esp\n\t"
2771 i386_emit_lt_goto (int *offset_p
, int *size_p
)
2774 "cmpl %ebx,4(%esp)\n\t"
2776 "jne .Llt_fallthru\n\t"
2777 "cmpl %eax,(%esp)\n\t"
2778 "jnl .Llt_fallthru\n\t"
2780 "lea 0x8(%esp),%esp\n\t"
2783 /* jmp, but don't trust the assembler to choose the right jump */
2784 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2785 ".Llt_fallthru:\n\t"
2786 "lea 0x8(%esp),%esp\n\t"
2797 i386_emit_le_goto (int *offset_p
, int *size_p
)
2800 "cmpl %ebx,4(%esp)\n\t"
2802 "jne .Lle_fallthru\n\t"
2803 "cmpl %eax,(%esp)\n\t"
2804 "jnle .Lle_fallthru\n\t"
2806 "lea 0x8(%esp),%esp\n\t"
2809 /* jmp, but don't trust the assembler to choose the right jump */
2810 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2811 ".Lle_fallthru:\n\t"
2812 "lea 0x8(%esp),%esp\n\t"
2823 i386_emit_gt_goto (int *offset_p
, int *size_p
)
2826 "cmpl %ebx,4(%esp)\n\t"
2828 "jne .Lgt_fallthru\n\t"
2829 "cmpl %eax,(%esp)\n\t"
2830 "jng .Lgt_fallthru\n\t"
2832 "lea 0x8(%esp),%esp\n\t"
2835 /* jmp, but don't trust the assembler to choose the right jump */
2836 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2837 ".Lgt_fallthru:\n\t"
2838 "lea 0x8(%esp),%esp\n\t"
2849 i386_emit_ge_goto (int *offset_p
, int *size_p
)
2852 "cmpl %ebx,4(%esp)\n\t"
2854 "jne .Lge_fallthru\n\t"
2855 "cmpl %eax,(%esp)\n\t"
2856 "jnge .Lge_fallthru\n\t"
2858 "lea 0x8(%esp),%esp\n\t"
2861 /* jmp, but don't trust the assembler to choose the right jump */
2862 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2863 ".Lge_fallthru:\n\t"
2864 "lea 0x8(%esp),%esp\n\t"
2874 struct emit_ops i386_emit_ops
=
2882 i386_emit_rsh_signed
,
2883 i386_emit_rsh_unsigned
,
2891 i386_emit_less_signed
,
2892 i386_emit_less_unsigned
,
2896 i386_write_goto_address
,
2901 i386_emit_stack_flush
,
2904 i386_emit_stack_adjust
,
2905 i386_emit_int_call_1
,
2906 i386_emit_void_call_2
,
2916 static struct emit_ops
*
2920 if (is_64bit_tdesc ())
2921 return &amd64_emit_ops
;
2924 return &i386_emit_ops
;
2927 /* Implementation of target ops method "sw_breakpoint_from_kind". */
2930 x86_target::sw_breakpoint_from_kind (int kind
, int *size
)
2932 *size
= x86_breakpoint_len
;
2933 return x86_breakpoint
;
2937 x86_supports_range_stepping (void)
2942 /* Implementation of linux_target_ops method "supports_hardware_single_step".
2946 x86_supports_hardware_single_step (void)
2952 x86_get_ipa_tdesc_idx (void)
2954 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
2955 const struct target_desc
*tdesc
= regcache
->tdesc
;
2958 return amd64_get_ipa_tdesc_idx (tdesc
);
2961 if (tdesc
== tdesc_i386_linux_no_xml
)
2962 return X86_TDESC_SSE
;
2964 return i386_get_ipa_tdesc_idx (tdesc
);
2967 /* This is initialized assuming an amd64 target.
2968 x86_arch_setup will correct it for i386 or amd64 targets. */
2970 struct linux_target_ops the_low_target
=
2972 x86_get_thread_area
,
2973 x86_install_fast_tracepoint_jump_pad
,
2975 x86_get_min_fast_tracepoint_insn_len
,
2976 x86_supports_range_stepping
,
2977 x86_supports_hardware_single_step
,
2978 x86_get_syscall_trapinfo
,
2979 x86_get_ipa_tdesc_idx
,
2982 /* The linux target ops object. */
2984 linux_process_target
*the_linux_target
= &the_x86_target
;
2987 initialize_low_arch (void)
2989 /* Initialize the Linux target descriptions. */
2991 tdesc_amd64_linux_no_xml
= allocate_target_description ();
2992 copy_target_description (tdesc_amd64_linux_no_xml
,
2993 amd64_linux_read_description (X86_XSTATE_SSE_MASK
,
2995 tdesc_amd64_linux_no_xml
->xmltarget
= xmltarget_amd64_linux_no_xml
;
2998 tdesc_i386_linux_no_xml
= allocate_target_description ();
2999 copy_target_description (tdesc_i386_linux_no_xml
,
3000 i386_linux_read_description (X86_XSTATE_SSE_MASK
));
3001 tdesc_i386_linux_no_xml
->xmltarget
= xmltarget_i386_linux_no_xml
;
3003 initialize_regsets_info (&x86_regsets_info
);