1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002-2020 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "linux-low.h"
27 #include "gdbsupport/x86-xstate.h"
28 #include "nat/gdb_ptrace.h"
31 #include "nat/amd64-linux-siginfo.h"
34 #include "gdb_proc_service.h"
35 /* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
38 #include "elf/common.h"
41 #include "gdbsupport/agent.h"
43 #include "tracepoint.h"
45 #include "nat/linux-nat.h"
46 #include "nat/x86-linux.h"
47 #include "nat/x86-linux-dregs.h"
48 #include "linux-x86-tdesc.h"
51 static struct target_desc
*tdesc_amd64_linux_no_xml
;
53 static struct target_desc
*tdesc_i386_linux_no_xml
;
56 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
57 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
59 /* Backward compatibility for gdb without XML support. */
61 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
62 <architecture>i386</architecture>\
63 <osabi>GNU/Linux</osabi>\
67 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
68 <architecture>i386:x86-64</architecture>\
69 <osabi>GNU/Linux</osabi>\
74 #include <sys/procfs.h>
77 #ifndef PTRACE_GET_THREAD_AREA
78 #define PTRACE_GET_THREAD_AREA 25
81 /* This definition comes from prctl.h, but some kernels may not have it. */
82 #ifndef PTRACE_ARCH_PRCTL
83 #define PTRACE_ARCH_PRCTL 30
86 /* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
89 #define ARCH_SET_GS 0x1001
90 #define ARCH_SET_FS 0x1002
91 #define ARCH_GET_FS 0x1003
92 #define ARCH_GET_GS 0x1004
95 /* Linux target op definitions for the x86 architecture.
96 This is initialized assuming an amd64 target.
97 'low_arch_setup' will correct it for i386 or amd64 targets. */
99 class x86_target
: public linux_process_target
103 /* Update all the target description of all processes; a new GDB
104 connected, and it may or not support xml target descriptions. */
105 void update_xmltarget ();
107 const regs_info
*get_regs_info () override
;
109 const gdb_byte
*sw_breakpoint_from_kind (int kind
, int *size
) override
;
113 void low_arch_setup () override
;
115 bool low_cannot_fetch_register (int regno
) override
;
117 bool low_cannot_store_register (int regno
) override
;
119 bool low_supports_breakpoints () override
;
121 CORE_ADDR
low_get_pc (regcache
*regcache
) override
;
123 void low_set_pc (regcache
*regcache
, CORE_ADDR newpc
) override
;
126 /* The singleton target ops object. */
128 static x86_target the_x86_target
;
130 /* Per-process arch-specific data we want to keep. */
132 struct arch_process_info
134 struct x86_debug_reg_state debug_reg_state
;
139 /* Mapping between the general-purpose registers in `struct user'
140 format and GDB's register array layout.
141 Note that the transfer layout uses 64-bit regs. */
142 static /*const*/ int i386_regmap
[] =
144 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
145 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
146 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
147 DS
* 8, ES
* 8, FS
* 8, GS
* 8
150 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
152 /* So code below doesn't have to care, i386 or amd64. */
153 #define ORIG_EAX ORIG_RAX
156 static const int x86_64_regmap
[] =
158 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
159 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
160 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
161 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
162 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
163 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
164 -1, -1, -1, -1, -1, -1, -1, -1,
165 -1, -1, -1, -1, -1, -1, -1, -1,
166 -1, -1, -1, -1, -1, -1, -1, -1,
168 -1, -1, -1, -1, -1, -1, -1, -1,
170 #ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
175 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
176 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
177 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
178 -1, -1, -1, -1, -1, -1, -1, -1,
179 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
180 -1, -1, -1, -1, -1, -1, -1, -1,
181 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
182 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
183 -1, -1, -1, -1, -1, -1, -1, -1,
184 -1, -1, -1, -1, -1, -1, -1, -1,
185 -1, -1, -1, -1, -1, -1, -1, -1,
189 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
190 #define X86_64_USER_REGS (GS + 1)
192 #else /* ! __x86_64__ */
194 /* Mapping between the general-purpose registers in `struct user'
195 format and GDB's register array layout. */
196 static /*const*/ int i386_regmap
[] =
198 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
199 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
200 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
201 DS
* 4, ES
* 4, FS
* 4, GS
* 4
204 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
212 /* Returns true if the current inferior belongs to a x86-64 process,
216 is_64bit_tdesc (void)
218 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
220 return register_size (regcache
->tdesc
, 0) == 8;
226 /* Called by libthread_db. */
229 ps_get_thread_area (struct ps_prochandle
*ph
,
230 lwpid_t lwpid
, int idx
, void **base
)
233 int use_64bit
= is_64bit_tdesc ();
240 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
244 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
255 unsigned int desc
[4];
257 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
258 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
261 /* Ensure we properly extend the value to 64-bits for x86_64. */
262 *base
= (void *) (uintptr_t) desc
[1];
267 /* Get the thread area address. This is used to recognize which
268 thread is which when tracing with the in-process agent library. We
269 don't read anything from the address, and treat it as opaque; it's
270 the address itself that we assume is unique per-thread. */
273 x86_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
276 int use_64bit
= is_64bit_tdesc ();
281 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
283 *addr
= (CORE_ADDR
) (uintptr_t) base
;
292 struct lwp_info
*lwp
= find_lwp_pid (ptid_t (lwpid
));
293 struct thread_info
*thr
= get_lwp_thread (lwp
);
294 struct regcache
*regcache
= get_thread_regcache (thr
, 1);
295 unsigned int desc
[4];
297 const int reg_thread_area
= 3; /* bits to scale down register value. */
300 collect_register_by_name (regcache
, "gs", &gs
);
302 idx
= gs
>> reg_thread_area
;
304 if (ptrace (PTRACE_GET_THREAD_AREA
,
306 (void *) (long) idx
, (unsigned long) &desc
) < 0)
317 x86_target::low_cannot_store_register (int regno
)
320 if (is_64bit_tdesc ())
324 return regno
>= I386_NUM_REGS
;
328 x86_target::low_cannot_fetch_register (int regno
)
331 if (is_64bit_tdesc ())
335 return regno
>= I386_NUM_REGS
;
339 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
344 if (register_size (regcache
->tdesc
, 0) == 8)
346 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
347 if (x86_64_regmap
[i
] != -1)
348 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
350 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
353 int lwpid
= lwpid_of (current_thread
);
355 collect_register_by_name (regcache
, "fs_base", &base
);
356 ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_SET_FS
);
358 collect_register_by_name (regcache
, "gs_base", &base
);
359 ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_SET_GS
);
366 /* 32-bit inferior registers need to be zero-extended.
367 Callers would read uninitialized memory otherwise. */
368 memset (buf
, 0x00, X86_64_USER_REGS
* 8);
371 for (i
= 0; i
< I386_NUM_REGS
; i
++)
372 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
374 collect_register_by_name (regcache
, "orig_eax",
375 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
378 /* Sign extend EAX value to avoid potential syscall restart
381 See amd64_linux_collect_native_gregset() in gdb/amd64-linux-nat.c
382 for a detailed explanation. */
383 if (register_size (regcache
->tdesc
, 0) == 4)
385 void *ptr
= ((gdb_byte
*) buf
386 + i386_regmap
[find_regno (regcache
->tdesc
, "eax")]);
388 *(int64_t *) ptr
= *(int32_t *) ptr
;
394 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
399 if (register_size (regcache
->tdesc
, 0) == 8)
401 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
402 if (x86_64_regmap
[i
] != -1)
403 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
405 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
408 int lwpid
= lwpid_of (current_thread
);
410 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
411 supply_register_by_name (regcache
, "fs_base", &base
);
413 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_GS
) == 0)
414 supply_register_by_name (regcache
, "gs_base", &base
);
421 for (i
= 0; i
< I386_NUM_REGS
; i
++)
422 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
424 supply_register_by_name (regcache
, "orig_eax",
425 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
429 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
432 i387_cache_to_fxsave (regcache
, buf
);
434 i387_cache_to_fsave (regcache
, buf
);
439 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
442 i387_fxsave_to_cache (regcache
, buf
);
444 i387_fsave_to_cache (regcache
, buf
);
451 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
453 i387_cache_to_fxsave (regcache
, buf
);
457 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
459 i387_fxsave_to_cache (regcache
, buf
);
465 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
467 i387_cache_to_xsave (regcache
, buf
);
471 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
473 i387_xsave_to_cache (regcache
, buf
);
476 /* ??? The non-biarch i386 case stores all the i387 regs twice.
477 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
478 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
479 doesn't work. IWBN to avoid the duplication in the case where it
480 does work. Maybe the arch_setup routine could check whether it works
481 and update the supported regsets accordingly. */
483 static struct regset_info x86_regsets
[] =
485 #ifdef HAVE_PTRACE_GETREGS
486 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
488 x86_fill_gregset
, x86_store_gregset
},
489 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
490 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
492 # ifdef HAVE_PTRACE_GETFPXREGS
493 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
495 x86_fill_fpxregset
, x86_store_fpxregset
},
498 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
500 x86_fill_fpregset
, x86_store_fpregset
},
501 #endif /* HAVE_PTRACE_GETREGS */
506 x86_target::low_supports_breakpoints ()
512 x86_target::low_get_pc (regcache
*regcache
)
514 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
520 collect_register_by_name (regcache
, "rip", &pc
);
521 return (CORE_ADDR
) pc
;
527 collect_register_by_name (regcache
, "eip", &pc
);
528 return (CORE_ADDR
) pc
;
533 x86_target::low_set_pc (regcache
*regcache
, CORE_ADDR pc
)
535 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
541 supply_register_by_name (regcache
, "rip", &newpc
);
547 supply_register_by_name (regcache
, "eip", &newpc
);
551 static const gdb_byte x86_breakpoint
[] = { 0xCC };
552 #define x86_breakpoint_len 1
555 x86_breakpoint_at (CORE_ADDR pc
)
559 the_target
->read_memory (pc
, &c
, 1);
566 /* Low-level function vector. */
567 struct x86_dr_low_type x86_dr_low
=
569 x86_linux_dr_set_control
,
570 x86_linux_dr_set_addr
,
571 x86_linux_dr_get_addr
,
572 x86_linux_dr_get_status
,
573 x86_linux_dr_get_control
,
577 /* Breakpoint/Watchpoint support. */
580 x86_supports_z_point_type (char z_type
)
586 case Z_PACKET_WRITE_WP
:
587 case Z_PACKET_ACCESS_WP
:
595 x86_insert_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
596 int size
, struct raw_breakpoint
*bp
)
598 struct process_info
*proc
= current_process ();
602 case raw_bkpt_type_hw
:
603 case raw_bkpt_type_write_wp
:
604 case raw_bkpt_type_access_wp
:
606 enum target_hw_bp_type hw_type
607 = raw_bkpt_type_to_target_hw_bp_type (type
);
608 struct x86_debug_reg_state
*state
609 = &proc
->priv
->arch_private
->debug_reg_state
;
611 return x86_dr_insert_watchpoint (state
, hw_type
, addr
, size
);
621 x86_remove_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
622 int size
, struct raw_breakpoint
*bp
)
624 struct process_info
*proc
= current_process ();
628 case raw_bkpt_type_hw
:
629 case raw_bkpt_type_write_wp
:
630 case raw_bkpt_type_access_wp
:
632 enum target_hw_bp_type hw_type
633 = raw_bkpt_type_to_target_hw_bp_type (type
);
634 struct x86_debug_reg_state
*state
635 = &proc
->priv
->arch_private
->debug_reg_state
;
637 return x86_dr_remove_watchpoint (state
, hw_type
, addr
, size
);
646 x86_stopped_by_watchpoint (void)
648 struct process_info
*proc
= current_process ();
649 return x86_dr_stopped_by_watchpoint (&proc
->priv
->arch_private
->debug_reg_state
);
653 x86_stopped_data_address (void)
655 struct process_info
*proc
= current_process ();
657 if (x86_dr_stopped_data_address (&proc
->priv
->arch_private
->debug_reg_state
,
663 /* Called when a new process is created. */
665 static struct arch_process_info
*
666 x86_linux_new_process (void)
668 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
670 x86_low_init_dregs (&info
->debug_reg_state
);
675 /* Called when a process is being deleted. */
678 x86_linux_delete_process (struct arch_process_info
*info
)
683 /* Target routine for linux_new_fork. */
686 x86_linux_new_fork (struct process_info
*parent
, struct process_info
*child
)
688 /* These are allocated by linux_add_process. */
689 gdb_assert (parent
->priv
!= NULL
690 && parent
->priv
->arch_private
!= NULL
);
691 gdb_assert (child
->priv
!= NULL
692 && child
->priv
->arch_private
!= NULL
);
694 /* Linux kernel before 2.6.33 commit
695 72f674d203cd230426437cdcf7dd6f681dad8b0d
696 will inherit hardware debug registers from parent
697 on fork/vfork/clone. Newer Linux kernels create such tasks with
698 zeroed debug registers.
700 GDB core assumes the child inherits the watchpoints/hw
701 breakpoints of the parent, and will remove them all from the
702 forked off process. Copy the debug registers mirrors into the
703 new process so that all breakpoints and watchpoints can be
704 removed together. The debug registers mirror will become zeroed
705 in the end before detaching the forked off process, thus making
706 this compatible with older Linux kernels too. */
708 *child
->priv
->arch_private
= *parent
->priv
->arch_private
;
711 /* See nat/x86-dregs.h. */
713 struct x86_debug_reg_state
*
714 x86_debug_reg_state (pid_t pid
)
716 struct process_info
*proc
= find_process_pid (pid
);
718 return &proc
->priv
->arch_private
->debug_reg_state
;
721 /* When GDBSERVER is built as a 64-bit application on linux, the
722 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
723 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
724 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
725 conversion in-place ourselves. */
727 /* Convert a ptrace/host siginfo object, into/from the siginfo in the
728 layout of the inferiors' architecture. Returns true if any
729 conversion was done; false otherwise. If DIRECTION is 1, then copy
730 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
734 x86_siginfo_fixup (siginfo_t
*ptrace
, gdb_byte
*inf
, int direction
)
737 unsigned int machine
;
738 int tid
= lwpid_of (current_thread
);
739 int is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
741 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
742 if (!is_64bit_tdesc ())
743 return amd64_linux_siginfo_fixup_common (ptrace
, inf
, direction
,
745 /* No fixup for native x32 GDB. */
746 else if (!is_elf64
&& sizeof (void *) == 8)
747 return amd64_linux_siginfo_fixup_common (ptrace
, inf
, direction
,
756 /* Format of XSAVE extended state is:
760 sw_usable_bytes[464..511]
761 xstate_hdr_bytes[512..575]
766 Same memory layout will be used for the coredump NT_X86_XSTATE
767 representing the XSAVE extended state registers.
769 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
770 extended state mask, which is the same as the extended control register
771 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
772 together with the mask saved in the xstate_hdr_bytes to determine what
773 states the processor/OS supports and what state, used or initialized,
774 the process/thread is in. */
775 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
777 /* Does the current host support the GETFPXREGS request? The header
778 file may or may not define it, and even if it is defined, the
779 kernel will return EIO if it's running on a pre-SSE processor. */
780 int have_ptrace_getfpxregs
=
781 #ifdef HAVE_PTRACE_GETFPXREGS
788 /* Get Linux/x86 target description from running target. */
790 static const struct target_desc
*
791 x86_linux_read_description (void)
793 unsigned int machine
;
797 static uint64_t xcr0
;
798 struct regset_info
*regset
;
800 tid
= lwpid_of (current_thread
);
802 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
804 if (sizeof (void *) == 4)
807 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
809 else if (machine
== EM_X86_64
)
810 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
814 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
815 if (machine
== EM_386
&& have_ptrace_getfpxregs
== -1)
817 elf_fpxregset_t fpxregs
;
819 if (ptrace (PTRACE_GETFPXREGS
, tid
, 0, (long) &fpxregs
) < 0)
821 have_ptrace_getfpxregs
= 0;
822 have_ptrace_getregset
= 0;
823 return i386_linux_read_description (X86_XSTATE_X87
);
826 have_ptrace_getfpxregs
= 1;
832 x86_xcr0
= X86_XSTATE_SSE_MASK
;
836 if (machine
== EM_X86_64
)
837 return tdesc_amd64_linux_no_xml
;
840 return tdesc_i386_linux_no_xml
;
843 if (have_ptrace_getregset
== -1)
845 uint64_t xstateregs
[(X86_XSTATE_SSE_SIZE
/ sizeof (uint64_t))];
848 iov
.iov_base
= xstateregs
;
849 iov
.iov_len
= sizeof (xstateregs
);
851 /* Check if PTRACE_GETREGSET works. */
852 if (ptrace (PTRACE_GETREGSET
, tid
,
853 (unsigned int) NT_X86_XSTATE
, (long) &iov
) < 0)
854 have_ptrace_getregset
= 0;
857 have_ptrace_getregset
= 1;
859 /* Get XCR0 from XSAVE extended state. */
860 xcr0
= xstateregs
[(I386_LINUX_XSAVE_XCR0_OFFSET
861 / sizeof (uint64_t))];
863 /* Use PTRACE_GETREGSET if it is available. */
864 for (regset
= x86_regsets
;
865 regset
->fill_function
!= NULL
; regset
++)
866 if (regset
->get_request
== PTRACE_GETREGSET
)
867 regset
->size
= X86_XSTATE_SIZE (xcr0
);
868 else if (regset
->type
!= GENERAL_REGS
)
873 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
874 xcr0_features
= (have_ptrace_getregset
875 && (xcr0
& X86_XSTATE_ALL_MASK
));
880 if (machine
== EM_X86_64
)
883 const target_desc
*tdesc
= NULL
;
887 tdesc
= amd64_linux_read_description (xcr0
& X86_XSTATE_ALL_MASK
,
892 tdesc
= amd64_linux_read_description (X86_XSTATE_SSE_MASK
, !is_elf64
);
898 const target_desc
*tdesc
= NULL
;
901 tdesc
= i386_linux_read_description (xcr0
& X86_XSTATE_ALL_MASK
);
904 tdesc
= i386_linux_read_description (X86_XSTATE_SSE
);
909 gdb_assert_not_reached ("failed to return tdesc");
912 /* Update all the target description of all processes; a new GDB
913 connected, and it may or not support xml target descriptions. */
916 x86_target::update_xmltarget ()
918 struct thread_info
*saved_thread
= current_thread
;
920 /* Before changing the register cache's internal layout, flush the
921 contents of the current valid caches back to the threads, and
922 release the current regcache objects. */
925 for_each_process ([this] (process_info
*proc
) {
928 /* Look up any thread of this process. */
929 current_thread
= find_any_thread_of_pid (pid
);
934 current_thread
= saved_thread
;
937 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
941 x86_linux_process_qsupported (char **features
, int count
)
945 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
946 with "i386" in qSupported query, it supports x86 XML target
949 for (i
= 0; i
< count
; i
++)
951 const char *feature
= features
[i
];
953 if (startswith (feature
, "xmlRegisters="))
955 char *copy
= xstrdup (feature
+ 13);
958 for (char *p
= strtok_r (copy
, ",", &saveptr
);
960 p
= strtok_r (NULL
, ",", &saveptr
))
962 if (strcmp (p
, "i386") == 0)
972 the_x86_target
.update_xmltarget ();
975 /* Common for x86/x86-64. */
977 static struct regsets_info x86_regsets_info
=
979 x86_regsets
, /* regsets */
981 NULL
, /* disabled_regsets */
985 static struct regs_info amd64_linux_regs_info
=
987 NULL
, /* regset_bitmap */
988 NULL
, /* usrregs_info */
992 static struct usrregs_info i386_linux_usrregs_info
=
998 static struct regs_info i386_linux_regs_info
=
1000 NULL
, /* regset_bitmap */
1001 &i386_linux_usrregs_info
,
1006 x86_target::get_regs_info ()
1009 if (is_64bit_tdesc ())
1010 return &amd64_linux_regs_info
;
1013 return &i386_linux_regs_info
;
1016 /* Initialize the target description for the architecture of the
1020 x86_target::low_arch_setup ()
1022 current_process ()->tdesc
= x86_linux_read_description ();
1025 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1026 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1029 x86_get_syscall_trapinfo (struct regcache
*regcache
, int *sysno
)
1031 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
1037 collect_register_by_name (regcache
, "orig_rax", &l_sysno
);
1038 *sysno
= (int) l_sysno
;
1041 collect_register_by_name (regcache
, "orig_eax", sysno
);
1045 x86_supports_tracepoints (void)
1051 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1053 target_write_memory (*to
, buf
, len
);
1058 push_opcode (unsigned char *buf
, const char *op
)
1060 unsigned char *buf_org
= buf
;
1065 unsigned long ul
= strtoul (op
, &endptr
, 16);
1074 return buf
- buf_org
;
1079 /* Build a jump pad that saves registers and calls a collection
1080 function. Writes a jump instruction to the jump pad to
1081 JJUMPAD_INSN. The caller is responsible to write it in at the
1082 tracepoint address. */
1085 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1086 CORE_ADDR collector
,
1089 CORE_ADDR
*jump_entry
,
1090 CORE_ADDR
*trampoline
,
1091 ULONGEST
*trampoline_size
,
1092 unsigned char *jjump_pad_insn
,
1093 ULONGEST
*jjump_pad_insn_size
,
1094 CORE_ADDR
*adjusted_insn_addr
,
1095 CORE_ADDR
*adjusted_insn_addr_end
,
1098 unsigned char buf
[40];
1102 CORE_ADDR buildaddr
= *jump_entry
;
1104 /* Build the jump pad. */
1106 /* First, do tracepoint data collection. Save registers. */
1108 /* Need to ensure stack pointer saved first. */
1109 buf
[i
++] = 0x54; /* push %rsp */
1110 buf
[i
++] = 0x55; /* push %rbp */
1111 buf
[i
++] = 0x57; /* push %rdi */
1112 buf
[i
++] = 0x56; /* push %rsi */
1113 buf
[i
++] = 0x52; /* push %rdx */
1114 buf
[i
++] = 0x51; /* push %rcx */
1115 buf
[i
++] = 0x53; /* push %rbx */
1116 buf
[i
++] = 0x50; /* push %rax */
1117 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1118 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1119 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1120 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1121 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1122 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1123 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1124 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1125 buf
[i
++] = 0x9c; /* pushfq */
1126 buf
[i
++] = 0x48; /* movabs <addr>,%rdi */
1128 memcpy (buf
+ i
, &tpaddr
, 8);
1130 buf
[i
++] = 0x57; /* push %rdi */
1131 append_insns (&buildaddr
, i
, buf
);
1133 /* Stack space for the collecting_t object. */
1135 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1136 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1137 memcpy (buf
+ i
, &tpoint
, 8);
1139 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1140 i
+= push_opcode (&buf
[i
],
1141 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1142 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1143 append_insns (&buildaddr
, i
, buf
);
1147 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1148 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1150 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1151 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1152 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1153 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1154 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1155 append_insns (&buildaddr
, i
, buf
);
1157 /* Set up the gdb_collect call. */
1158 /* At this point, (stack pointer + 0x18) is the base of our saved
1162 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1163 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1165 /* tpoint address may be 64-bit wide. */
1166 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1167 memcpy (buf
+ i
, &tpoint
, 8);
1169 append_insns (&buildaddr
, i
, buf
);
1171 /* The collector function being in the shared library, may be
1172 >31-bits away off the jump pad. */
1174 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1175 memcpy (buf
+ i
, &collector
, 8);
1177 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1178 append_insns (&buildaddr
, i
, buf
);
1180 /* Clear the spin-lock. */
1182 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1183 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1184 memcpy (buf
+ i
, &lockaddr
, 8);
1186 append_insns (&buildaddr
, i
, buf
);
1188 /* Remove stack that had been used for the collect_t object. */
1190 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1191 append_insns (&buildaddr
, i
, buf
);
1193 /* Restore register state. */
1195 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1199 buf
[i
++] = 0x9d; /* popfq */
1200 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1201 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1202 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1203 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1204 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1205 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1206 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1207 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1208 buf
[i
++] = 0x58; /* pop %rax */
1209 buf
[i
++] = 0x5b; /* pop %rbx */
1210 buf
[i
++] = 0x59; /* pop %rcx */
1211 buf
[i
++] = 0x5a; /* pop %rdx */
1212 buf
[i
++] = 0x5e; /* pop %rsi */
1213 buf
[i
++] = 0x5f; /* pop %rdi */
1214 buf
[i
++] = 0x5d; /* pop %rbp */
1215 buf
[i
++] = 0x5c; /* pop %rsp */
1216 append_insns (&buildaddr
, i
, buf
);
1218 /* Now, adjust the original instruction to execute in the jump
1220 *adjusted_insn_addr
= buildaddr
;
1221 relocate_instruction (&buildaddr
, tpaddr
);
1222 *adjusted_insn_addr_end
= buildaddr
;
1224 /* Finally, write a jump back to the program. */
1226 loffset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1227 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1230 "E.Jump back from jump pad too far from tracepoint "
1231 "(offset 0x%" PRIx64
" > int32).", loffset
);
1235 offset
= (int) loffset
;
1236 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1237 memcpy (buf
+ 1, &offset
, 4);
1238 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1240 /* The jump pad is now built. Wire in a jump to our jump pad. This
1241 is always done last (by our caller actually), so that we can
1242 install fast tracepoints with threads running. This relies on
1243 the agent's atomic write support. */
1244 loffset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1245 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1248 "E.Jump pad too far from tracepoint "
1249 "(offset 0x%" PRIx64
" > int32).", loffset
);
1253 offset
= (int) loffset
;
1255 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1256 memcpy (buf
+ 1, &offset
, 4);
1257 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1258 *jjump_pad_insn_size
= sizeof (jump_insn
);
1260 /* Return the end address of our pad. */
1261 *jump_entry
= buildaddr
;
1266 #endif /* __x86_64__ */
1268 /* Build a jump pad that saves registers and calls a collection
1269 function. Writes a jump instruction to the jump pad to
1270 JJUMPAD_INSN. The caller is responsible to write it in at the
1271 tracepoint address. */
1274 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1275 CORE_ADDR collector
,
1278 CORE_ADDR
*jump_entry
,
1279 CORE_ADDR
*trampoline
,
1280 ULONGEST
*trampoline_size
,
1281 unsigned char *jjump_pad_insn
,
1282 ULONGEST
*jjump_pad_insn_size
,
1283 CORE_ADDR
*adjusted_insn_addr
,
1284 CORE_ADDR
*adjusted_insn_addr_end
,
1287 unsigned char buf
[0x100];
1289 CORE_ADDR buildaddr
= *jump_entry
;
1291 /* Build the jump pad. */
1293 /* First, do tracepoint data collection. Save registers. */
1295 buf
[i
++] = 0x60; /* pushad */
1296 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1297 *((int *)(buf
+ i
)) = (int) tpaddr
;
1299 buf
[i
++] = 0x9c; /* pushf */
1300 buf
[i
++] = 0x1e; /* push %ds */
1301 buf
[i
++] = 0x06; /* push %es */
1302 buf
[i
++] = 0x0f; /* push %fs */
1304 buf
[i
++] = 0x0f; /* push %gs */
1306 buf
[i
++] = 0x16; /* push %ss */
1307 buf
[i
++] = 0x0e; /* push %cs */
1308 append_insns (&buildaddr
, i
, buf
);
1310 /* Stack space for the collecting_t object. */
1312 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1314 /* Build the object. */
1315 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1316 memcpy (buf
+ i
, &tpoint
, 4);
1318 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1320 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1321 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1322 append_insns (&buildaddr
, i
, buf
);
1324 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1325 If we cared for it, this could be using xchg alternatively. */
1328 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1329 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1331 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1333 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1334 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1335 append_insns (&buildaddr
, i
, buf
);
1338 /* Set up arguments to the gdb_collect call. */
1340 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1341 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1342 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1343 append_insns (&buildaddr
, i
, buf
);
1346 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1347 append_insns (&buildaddr
, i
, buf
);
1350 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1351 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1353 append_insns (&buildaddr
, i
, buf
);
1355 buf
[0] = 0xe8; /* call <reladdr> */
1356 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1357 memcpy (buf
+ 1, &offset
, 4);
1358 append_insns (&buildaddr
, 5, buf
);
1359 /* Clean up after the call. */
1360 buf
[0] = 0x83; /* add $0x8,%esp */
1363 append_insns (&buildaddr
, 3, buf
);
1366 /* Clear the spin-lock. This would need the LOCK prefix on older
1369 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1370 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1371 memcpy (buf
+ i
, &lockaddr
, 4);
1373 append_insns (&buildaddr
, i
, buf
);
1376 /* Remove stack that had been used for the collect_t object. */
1378 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1379 append_insns (&buildaddr
, i
, buf
);
1382 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1385 buf
[i
++] = 0x17; /* pop %ss */
1386 buf
[i
++] = 0x0f; /* pop %gs */
1388 buf
[i
++] = 0x0f; /* pop %fs */
1390 buf
[i
++] = 0x07; /* pop %es */
1391 buf
[i
++] = 0x1f; /* pop %ds */
1392 buf
[i
++] = 0x9d; /* popf */
1393 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1396 buf
[i
++] = 0x61; /* popad */
1397 append_insns (&buildaddr
, i
, buf
);
1399 /* Now, adjust the original instruction to execute in the jump
1401 *adjusted_insn_addr
= buildaddr
;
1402 relocate_instruction (&buildaddr
, tpaddr
);
1403 *adjusted_insn_addr_end
= buildaddr
;
1405 /* Write the jump back to the program. */
1406 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1407 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1408 memcpy (buf
+ 1, &offset
, 4);
1409 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1411 /* The jump pad is now built. Wire in a jump to our jump pad. This
1412 is always done last (by our caller actually), so that we can
1413 install fast tracepoints with threads running. This relies on
1414 the agent's atomic write support. */
1417 /* Create a trampoline. */
1418 *trampoline_size
= sizeof (jump_insn
);
1419 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1421 /* No trampoline space available. */
1423 "E.Cannot allocate trampoline space needed for fast "
1424 "tracepoints on 4-byte instructions.");
1428 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1429 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1430 memcpy (buf
+ 1, &offset
, 4);
1431 target_write_memory (*trampoline
, buf
, sizeof (jump_insn
));
1433 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1434 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1435 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1436 memcpy (buf
+ 2, &offset
, 2);
1437 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1438 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1442 /* Else use a 32-bit relative jump instruction. */
1443 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1444 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1445 memcpy (buf
+ 1, &offset
, 4);
1446 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1447 *jjump_pad_insn_size
= sizeof (jump_insn
);
1450 /* Return the end address of our pad. */
1451 *jump_entry
= buildaddr
;
1457 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1458 CORE_ADDR collector
,
1461 CORE_ADDR
*jump_entry
,
1462 CORE_ADDR
*trampoline
,
1463 ULONGEST
*trampoline_size
,
1464 unsigned char *jjump_pad_insn
,
1465 ULONGEST
*jjump_pad_insn_size
,
1466 CORE_ADDR
*adjusted_insn_addr
,
1467 CORE_ADDR
*adjusted_insn_addr_end
,
1471 if (is_64bit_tdesc ())
1472 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1473 collector
, lockaddr
,
1474 orig_size
, jump_entry
,
1475 trampoline
, trampoline_size
,
1477 jjump_pad_insn_size
,
1479 adjusted_insn_addr_end
,
1483 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1484 collector
, lockaddr
,
1485 orig_size
, jump_entry
,
1486 trampoline
, trampoline_size
,
1488 jjump_pad_insn_size
,
1490 adjusted_insn_addr_end
,
1494 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1498 x86_get_min_fast_tracepoint_insn_len (void)
1500 static int warned_about_fast_tracepoints
= 0;
1503 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1504 used for fast tracepoints. */
1505 if (is_64bit_tdesc ())
1509 if (agent_loaded_p ())
1511 char errbuf
[IPA_BUFSIZ
];
1515 /* On x86, if trampolines are available, then 4-byte jump instructions
1516 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1517 with a 4-byte offset are used instead. */
1518 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
1522 /* GDB has no channel to explain to user why a shorter fast
1523 tracepoint is not possible, but at least make GDBserver
1524 mention that something has gone awry. */
1525 if (!warned_about_fast_tracepoints
)
1527 warning ("4-byte fast tracepoints not available; %s", errbuf
);
1528 warned_about_fast_tracepoints
= 1;
1535 /* Indicate that the minimum length is currently unknown since the IPA
1536 has not loaded yet. */
1542 add_insns (unsigned char *start
, int len
)
1544 CORE_ADDR buildaddr
= current_insn_ptr
;
1547 debug_printf ("Adding %d bytes of insn at %s\n",
1548 len
, paddress (buildaddr
));
1550 append_insns (&buildaddr
, len
, start
);
1551 current_insn_ptr
= buildaddr
;
1554 /* Our general strategy for emitting code is to avoid specifying raw
1555 bytes whenever possible, and instead copy a block of inline asm
1556 that is embedded in the function. This is a little messy, because
1557 we need to keep the compiler from discarding what looks like dead
1558 code, plus suppress various warnings. */
1560 #define EMIT_ASM(NAME, INSNS) \
1563 extern unsigned char start_ ## NAME, end_ ## NAME; \
1564 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1565 __asm__ ("jmp end_" #NAME "\n" \
1566 "\t" "start_" #NAME ":" \
1568 "\t" "end_" #NAME ":"); \
1573 #define EMIT_ASM32(NAME,INSNS) \
1576 extern unsigned char start_ ## NAME, end_ ## NAME; \
1577 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1578 __asm__ (".code32\n" \
1579 "\t" "jmp end_" #NAME "\n" \
1580 "\t" "start_" #NAME ":\n" \
1582 "\t" "end_" #NAME ":\n" \
1588 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1595 amd64_emit_prologue (void)
1597 EMIT_ASM (amd64_prologue
,
1599 "movq %rsp,%rbp\n\t"
1600 "sub $0x20,%rsp\n\t"
1601 "movq %rdi,-8(%rbp)\n\t"
1602 "movq %rsi,-16(%rbp)");
1607 amd64_emit_epilogue (void)
1609 EMIT_ASM (amd64_epilogue
,
1610 "movq -16(%rbp),%rdi\n\t"
1611 "movq %rax,(%rdi)\n\t"
1618 amd64_emit_add (void)
1620 EMIT_ASM (amd64_add
,
1621 "add (%rsp),%rax\n\t"
1622 "lea 0x8(%rsp),%rsp");
1626 amd64_emit_sub (void)
1628 EMIT_ASM (amd64_sub
,
1629 "sub %rax,(%rsp)\n\t"
1634 amd64_emit_mul (void)
1640 amd64_emit_lsh (void)
1646 amd64_emit_rsh_signed (void)
1652 amd64_emit_rsh_unsigned (void)
1658 amd64_emit_ext (int arg
)
1663 EMIT_ASM (amd64_ext_8
,
1669 EMIT_ASM (amd64_ext_16
,
1674 EMIT_ASM (amd64_ext_32
,
1683 amd64_emit_log_not (void)
1685 EMIT_ASM (amd64_log_not
,
1686 "test %rax,%rax\n\t"
1692 amd64_emit_bit_and (void)
1694 EMIT_ASM (amd64_and
,
1695 "and (%rsp),%rax\n\t"
1696 "lea 0x8(%rsp),%rsp");
1700 amd64_emit_bit_or (void)
1703 "or (%rsp),%rax\n\t"
1704 "lea 0x8(%rsp),%rsp");
1708 amd64_emit_bit_xor (void)
1710 EMIT_ASM (amd64_xor
,
1711 "xor (%rsp),%rax\n\t"
1712 "lea 0x8(%rsp),%rsp");
1716 amd64_emit_bit_not (void)
1718 EMIT_ASM (amd64_bit_not
,
1719 "xorq $0xffffffffffffffff,%rax");
1723 amd64_emit_equal (void)
1725 EMIT_ASM (amd64_equal
,
1726 "cmp %rax,(%rsp)\n\t"
1727 "je .Lamd64_equal_true\n\t"
1729 "jmp .Lamd64_equal_end\n\t"
1730 ".Lamd64_equal_true:\n\t"
1732 ".Lamd64_equal_end:\n\t"
1733 "lea 0x8(%rsp),%rsp");
1737 amd64_emit_less_signed (void)
1739 EMIT_ASM (amd64_less_signed
,
1740 "cmp %rax,(%rsp)\n\t"
1741 "jl .Lamd64_less_signed_true\n\t"
1743 "jmp .Lamd64_less_signed_end\n\t"
1744 ".Lamd64_less_signed_true:\n\t"
1746 ".Lamd64_less_signed_end:\n\t"
1747 "lea 0x8(%rsp),%rsp");
1751 amd64_emit_less_unsigned (void)
1753 EMIT_ASM (amd64_less_unsigned
,
1754 "cmp %rax,(%rsp)\n\t"
1755 "jb .Lamd64_less_unsigned_true\n\t"
1757 "jmp .Lamd64_less_unsigned_end\n\t"
1758 ".Lamd64_less_unsigned_true:\n\t"
1760 ".Lamd64_less_unsigned_end:\n\t"
1761 "lea 0x8(%rsp),%rsp");
1765 amd64_emit_ref (int size
)
1770 EMIT_ASM (amd64_ref1
,
1774 EMIT_ASM (amd64_ref2
,
1778 EMIT_ASM (amd64_ref4
,
1779 "movl (%rax),%eax");
1782 EMIT_ASM (amd64_ref8
,
1783 "movq (%rax),%rax");
1789 amd64_emit_if_goto (int *offset_p
, int *size_p
)
1791 EMIT_ASM (amd64_if_goto
,
1795 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1803 amd64_emit_goto (int *offset_p
, int *size_p
)
1805 EMIT_ASM (amd64_goto
,
1806 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1814 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
1816 int diff
= (to
- (from
+ size
));
1817 unsigned char buf
[sizeof (int)];
1825 memcpy (buf
, &diff
, sizeof (int));
1826 target_write_memory (from
, buf
, sizeof (int));
1830 amd64_emit_const (LONGEST num
)
1832 unsigned char buf
[16];
1834 CORE_ADDR buildaddr
= current_insn_ptr
;
1837 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
1838 memcpy (&buf
[i
], &num
, sizeof (num
));
1840 append_insns (&buildaddr
, i
, buf
);
1841 current_insn_ptr
= buildaddr
;
1845 amd64_emit_call (CORE_ADDR fn
)
1847 unsigned char buf
[16];
1849 CORE_ADDR buildaddr
;
1852 /* The destination function being in the shared library, may be
1853 >31-bits away off the compiled code pad. */
1855 buildaddr
= current_insn_ptr
;
1857 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
1861 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
1863 /* Offset is too large for a call. Use callq, but that requires
1864 a register, so avoid it if possible. Use r10, since it is
1865 call-clobbered, we don't have to push/pop it. */
1866 buf
[i
++] = 0x48; /* mov $fn,%r10 */
1868 memcpy (buf
+ i
, &fn
, 8);
1870 buf
[i
++] = 0xff; /* callq *%r10 */
1875 int offset32
= offset64
; /* we know we can't overflow here. */
1877 buf
[i
++] = 0xe8; /* call <reladdr> */
1878 memcpy (buf
+ i
, &offset32
, 4);
1882 append_insns (&buildaddr
, i
, buf
);
1883 current_insn_ptr
= buildaddr
;
1887 amd64_emit_reg (int reg
)
1889 unsigned char buf
[16];
1891 CORE_ADDR buildaddr
;
1893 /* Assume raw_regs is still in %rdi. */
1894 buildaddr
= current_insn_ptr
;
1896 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
1897 memcpy (&buf
[i
], ®
, sizeof (reg
));
1899 append_insns (&buildaddr
, i
, buf
);
1900 current_insn_ptr
= buildaddr
;
1901 amd64_emit_call (get_raw_reg_func_addr ());
1905 amd64_emit_pop (void)
1907 EMIT_ASM (amd64_pop
,
1912 amd64_emit_stack_flush (void)
1914 EMIT_ASM (amd64_stack_flush
,
1919 amd64_emit_zero_ext (int arg
)
1924 EMIT_ASM (amd64_zero_ext_8
,
1928 EMIT_ASM (amd64_zero_ext_16
,
1929 "and $0xffff,%rax");
1932 EMIT_ASM (amd64_zero_ext_32
,
1933 "mov $0xffffffff,%rcx\n\t"
1942 amd64_emit_swap (void)
1944 EMIT_ASM (amd64_swap
,
1951 amd64_emit_stack_adjust (int n
)
1953 unsigned char buf
[16];
1955 CORE_ADDR buildaddr
= current_insn_ptr
;
1958 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
1962 /* This only handles adjustments up to 16, but we don't expect any more. */
1964 append_insns (&buildaddr
, i
, buf
);
1965 current_insn_ptr
= buildaddr
;
1968 /* FN's prototype is `LONGEST(*fn)(int)'. */
1971 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
1973 unsigned char buf
[16];
1975 CORE_ADDR buildaddr
;
1977 buildaddr
= current_insn_ptr
;
1979 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
1980 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
1982 append_insns (&buildaddr
, i
, buf
);
1983 current_insn_ptr
= buildaddr
;
1984 amd64_emit_call (fn
);
1987 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
1990 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
1992 unsigned char buf
[16];
1994 CORE_ADDR buildaddr
;
1996 buildaddr
= current_insn_ptr
;
1998 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
1999 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2001 append_insns (&buildaddr
, i
, buf
);
2002 current_insn_ptr
= buildaddr
;
2003 EMIT_ASM (amd64_void_call_2_a
,
2004 /* Save away a copy of the stack top. */
2006 /* Also pass top as the second argument. */
2008 amd64_emit_call (fn
);
2009 EMIT_ASM (amd64_void_call_2_b
,
2010 /* Restore the stack top, %rax may have been trashed. */
2015 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
2018 "cmp %rax,(%rsp)\n\t"
2019 "jne .Lamd64_eq_fallthru\n\t"
2020 "lea 0x8(%rsp),%rsp\n\t"
2022 /* jmp, but don't trust the assembler to choose the right jump */
2023 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2024 ".Lamd64_eq_fallthru:\n\t"
2025 "lea 0x8(%rsp),%rsp\n\t"
2035 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
2038 "cmp %rax,(%rsp)\n\t"
2039 "je .Lamd64_ne_fallthru\n\t"
2040 "lea 0x8(%rsp),%rsp\n\t"
2042 /* jmp, but don't trust the assembler to choose the right jump */
2043 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2044 ".Lamd64_ne_fallthru:\n\t"
2045 "lea 0x8(%rsp),%rsp\n\t"
2055 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2058 "cmp %rax,(%rsp)\n\t"
2059 "jnl .Lamd64_lt_fallthru\n\t"
2060 "lea 0x8(%rsp),%rsp\n\t"
2062 /* jmp, but don't trust the assembler to choose the right jump */
2063 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2064 ".Lamd64_lt_fallthru:\n\t"
2065 "lea 0x8(%rsp),%rsp\n\t"
2075 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2078 "cmp %rax,(%rsp)\n\t"
2079 "jnle .Lamd64_le_fallthru\n\t"
2080 "lea 0x8(%rsp),%rsp\n\t"
2082 /* jmp, but don't trust the assembler to choose the right jump */
2083 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2084 ".Lamd64_le_fallthru:\n\t"
2085 "lea 0x8(%rsp),%rsp\n\t"
2095 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2098 "cmp %rax,(%rsp)\n\t"
2099 "jng .Lamd64_gt_fallthru\n\t"
2100 "lea 0x8(%rsp),%rsp\n\t"
2102 /* jmp, but don't trust the assembler to choose the right jump */
2103 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2104 ".Lamd64_gt_fallthru:\n\t"
2105 "lea 0x8(%rsp),%rsp\n\t"
2115 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2118 "cmp %rax,(%rsp)\n\t"
2119 "jnge .Lamd64_ge_fallthru\n\t"
2120 ".Lamd64_ge_jump:\n\t"
2121 "lea 0x8(%rsp),%rsp\n\t"
2123 /* jmp, but don't trust the assembler to choose the right jump */
2124 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2125 ".Lamd64_ge_fallthru:\n\t"
2126 "lea 0x8(%rsp),%rsp\n\t"
2135 struct emit_ops amd64_emit_ops
=
2137 amd64_emit_prologue
,
2138 amd64_emit_epilogue
,
2143 amd64_emit_rsh_signed
,
2144 amd64_emit_rsh_unsigned
,
2152 amd64_emit_less_signed
,
2153 amd64_emit_less_unsigned
,
2157 amd64_write_goto_address
,
2162 amd64_emit_stack_flush
,
2163 amd64_emit_zero_ext
,
2165 amd64_emit_stack_adjust
,
2166 amd64_emit_int_call_1
,
2167 amd64_emit_void_call_2
,
2176 #endif /* __x86_64__ */
2179 i386_emit_prologue (void)
2181 EMIT_ASM32 (i386_prologue
,
2185 /* At this point, the raw regs base address is at 8(%ebp), and the
2186 value pointer is at 12(%ebp). */
2190 i386_emit_epilogue (void)
2192 EMIT_ASM32 (i386_epilogue
,
2193 "mov 12(%ebp),%ecx\n\t"
2194 "mov %eax,(%ecx)\n\t"
2195 "mov %ebx,0x4(%ecx)\n\t"
2203 i386_emit_add (void)
2205 EMIT_ASM32 (i386_add
,
2206 "add (%esp),%eax\n\t"
2207 "adc 0x4(%esp),%ebx\n\t"
2208 "lea 0x8(%esp),%esp");
2212 i386_emit_sub (void)
2214 EMIT_ASM32 (i386_sub
,
2215 "subl %eax,(%esp)\n\t"
2216 "sbbl %ebx,4(%esp)\n\t"
2222 i386_emit_mul (void)
2228 i386_emit_lsh (void)
2234 i386_emit_rsh_signed (void)
2240 i386_emit_rsh_unsigned (void)
2246 i386_emit_ext (int arg
)
2251 EMIT_ASM32 (i386_ext_8
,
2254 "movl %eax,%ebx\n\t"
2258 EMIT_ASM32 (i386_ext_16
,
2260 "movl %eax,%ebx\n\t"
2264 EMIT_ASM32 (i386_ext_32
,
2265 "movl %eax,%ebx\n\t"
2274 i386_emit_log_not (void)
2276 EMIT_ASM32 (i386_log_not
,
2278 "test %eax,%eax\n\t"
2285 i386_emit_bit_and (void)
2287 EMIT_ASM32 (i386_and
,
2288 "and (%esp),%eax\n\t"
2289 "and 0x4(%esp),%ebx\n\t"
2290 "lea 0x8(%esp),%esp");
2294 i386_emit_bit_or (void)
2296 EMIT_ASM32 (i386_or
,
2297 "or (%esp),%eax\n\t"
2298 "or 0x4(%esp),%ebx\n\t"
2299 "lea 0x8(%esp),%esp");
2303 i386_emit_bit_xor (void)
2305 EMIT_ASM32 (i386_xor
,
2306 "xor (%esp),%eax\n\t"
2307 "xor 0x4(%esp),%ebx\n\t"
2308 "lea 0x8(%esp),%esp");
2312 i386_emit_bit_not (void)
2314 EMIT_ASM32 (i386_bit_not
,
2315 "xor $0xffffffff,%eax\n\t"
2316 "xor $0xffffffff,%ebx\n\t");
2320 i386_emit_equal (void)
2322 EMIT_ASM32 (i386_equal
,
2323 "cmpl %ebx,4(%esp)\n\t"
2324 "jne .Li386_equal_false\n\t"
2325 "cmpl %eax,(%esp)\n\t"
2326 "je .Li386_equal_true\n\t"
2327 ".Li386_equal_false:\n\t"
2329 "jmp .Li386_equal_end\n\t"
2330 ".Li386_equal_true:\n\t"
2332 ".Li386_equal_end:\n\t"
2334 "lea 0x8(%esp),%esp");
2338 i386_emit_less_signed (void)
2340 EMIT_ASM32 (i386_less_signed
,
2341 "cmpl %ebx,4(%esp)\n\t"
2342 "jl .Li386_less_signed_true\n\t"
2343 "jne .Li386_less_signed_false\n\t"
2344 "cmpl %eax,(%esp)\n\t"
2345 "jl .Li386_less_signed_true\n\t"
2346 ".Li386_less_signed_false:\n\t"
2348 "jmp .Li386_less_signed_end\n\t"
2349 ".Li386_less_signed_true:\n\t"
2351 ".Li386_less_signed_end:\n\t"
2353 "lea 0x8(%esp),%esp");
2357 i386_emit_less_unsigned (void)
2359 EMIT_ASM32 (i386_less_unsigned
,
2360 "cmpl %ebx,4(%esp)\n\t"
2361 "jb .Li386_less_unsigned_true\n\t"
2362 "jne .Li386_less_unsigned_false\n\t"
2363 "cmpl %eax,(%esp)\n\t"
2364 "jb .Li386_less_unsigned_true\n\t"
2365 ".Li386_less_unsigned_false:\n\t"
2367 "jmp .Li386_less_unsigned_end\n\t"
2368 ".Li386_less_unsigned_true:\n\t"
2370 ".Li386_less_unsigned_end:\n\t"
2372 "lea 0x8(%esp),%esp");
2376 i386_emit_ref (int size
)
2381 EMIT_ASM32 (i386_ref1
,
2385 EMIT_ASM32 (i386_ref2
,
2389 EMIT_ASM32 (i386_ref4
,
2390 "movl (%eax),%eax");
2393 EMIT_ASM32 (i386_ref8
,
2394 "movl 4(%eax),%ebx\n\t"
2395 "movl (%eax),%eax");
2401 i386_emit_if_goto (int *offset_p
, int *size_p
)
2403 EMIT_ASM32 (i386_if_goto
,
2409 /* Don't trust the assembler to choose the right jump */
2410 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2413 *offset_p
= 11; /* be sure that this matches the sequence above */
2419 i386_emit_goto (int *offset_p
, int *size_p
)
2421 EMIT_ASM32 (i386_goto
,
2422 /* Don't trust the assembler to choose the right jump */
2423 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2431 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2433 int diff
= (to
- (from
+ size
));
2434 unsigned char buf
[sizeof (int)];
2436 /* We're only doing 4-byte sizes at the moment. */
2443 memcpy (buf
, &diff
, sizeof (int));
2444 target_write_memory (from
, buf
, sizeof (int));
2448 i386_emit_const (LONGEST num
)
2450 unsigned char buf
[16];
2452 CORE_ADDR buildaddr
= current_insn_ptr
;
2455 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2456 lo
= num
& 0xffffffff;
2457 memcpy (&buf
[i
], &lo
, sizeof (lo
));
2459 hi
= ((num
>> 32) & 0xffffffff);
2462 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
2463 memcpy (&buf
[i
], &hi
, sizeof (hi
));
2468 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
2470 append_insns (&buildaddr
, i
, buf
);
2471 current_insn_ptr
= buildaddr
;
2475 i386_emit_call (CORE_ADDR fn
)
2477 unsigned char buf
[16];
2479 CORE_ADDR buildaddr
;
2481 buildaddr
= current_insn_ptr
;
2483 buf
[i
++] = 0xe8; /* call <reladdr> */
2484 offset
= ((int) fn
) - (buildaddr
+ 5);
2485 memcpy (buf
+ 1, &offset
, 4);
2486 append_insns (&buildaddr
, 5, buf
);
2487 current_insn_ptr
= buildaddr
;
2491 i386_emit_reg (int reg
)
2493 unsigned char buf
[16];
2495 CORE_ADDR buildaddr
;
2497 EMIT_ASM32 (i386_reg_a
,
2499 buildaddr
= current_insn_ptr
;
2501 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2502 memcpy (&buf
[i
], ®
, sizeof (reg
));
2504 append_insns (&buildaddr
, i
, buf
);
2505 current_insn_ptr
= buildaddr
;
2506 EMIT_ASM32 (i386_reg_b
,
2507 "mov %eax,4(%esp)\n\t"
2508 "mov 8(%ebp),%eax\n\t"
2510 i386_emit_call (get_raw_reg_func_addr ());
2511 EMIT_ASM32 (i386_reg_c
,
2513 "lea 0x8(%esp),%esp");
2517 i386_emit_pop (void)
2519 EMIT_ASM32 (i386_pop
,
2525 i386_emit_stack_flush (void)
2527 EMIT_ASM32 (i386_stack_flush
,
2533 i386_emit_zero_ext (int arg
)
2538 EMIT_ASM32 (i386_zero_ext_8
,
2539 "and $0xff,%eax\n\t"
2543 EMIT_ASM32 (i386_zero_ext_16
,
2544 "and $0xffff,%eax\n\t"
2548 EMIT_ASM32 (i386_zero_ext_32
,
2557 i386_emit_swap (void)
2559 EMIT_ASM32 (i386_swap
,
2569 i386_emit_stack_adjust (int n
)
2571 unsigned char buf
[16];
2573 CORE_ADDR buildaddr
= current_insn_ptr
;
2576 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
2580 append_insns (&buildaddr
, i
, buf
);
2581 current_insn_ptr
= buildaddr
;
2584 /* FN's prototype is `LONGEST(*fn)(int)'. */
2587 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2589 unsigned char buf
[16];
2591 CORE_ADDR buildaddr
;
2593 EMIT_ASM32 (i386_int_call_1_a
,
2594 /* Reserve a bit of stack space. */
2596 /* Put the one argument on the stack. */
2597 buildaddr
= current_insn_ptr
;
2599 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2602 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2604 append_insns (&buildaddr
, i
, buf
);
2605 current_insn_ptr
= buildaddr
;
2606 i386_emit_call (fn
);
2607 EMIT_ASM32 (i386_int_call_1_c
,
2609 "lea 0x8(%esp),%esp");
2612 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2615 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2617 unsigned char buf
[16];
2619 CORE_ADDR buildaddr
;
2621 EMIT_ASM32 (i386_void_call_2_a
,
2622 /* Preserve %eax only; we don't have to worry about %ebx. */
2624 /* Reserve a bit of stack space for arguments. */
2625 "sub $0x10,%esp\n\t"
2626 /* Copy "top" to the second argument position. (Note that
2627 we can't assume function won't scribble on its
2628 arguments, so don't try to restore from this.) */
2629 "mov %eax,4(%esp)\n\t"
2630 "mov %ebx,8(%esp)");
2631 /* Put the first argument on the stack. */
2632 buildaddr
= current_insn_ptr
;
2634 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2637 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2639 append_insns (&buildaddr
, i
, buf
);
2640 current_insn_ptr
= buildaddr
;
2641 i386_emit_call (fn
);
2642 EMIT_ASM32 (i386_void_call_2_b
,
2643 "lea 0x10(%esp),%esp\n\t"
2644 /* Restore original stack top. */
2650 i386_emit_eq_goto (int *offset_p
, int *size_p
)
2653 /* Check low half first, more likely to be decider */
2654 "cmpl %eax,(%esp)\n\t"
2655 "jne .Leq_fallthru\n\t"
2656 "cmpl %ebx,4(%esp)\n\t"
2657 "jne .Leq_fallthru\n\t"
2658 "lea 0x8(%esp),%esp\n\t"
2661 /* jmp, but don't trust the assembler to choose the right jump */
2662 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2663 ".Leq_fallthru:\n\t"
2664 "lea 0x8(%esp),%esp\n\t"
2675 i386_emit_ne_goto (int *offset_p
, int *size_p
)
2678 /* Check low half first, more likely to be decider */
2679 "cmpl %eax,(%esp)\n\t"
2681 "cmpl %ebx,4(%esp)\n\t"
2682 "je .Lne_fallthru\n\t"
2684 "lea 0x8(%esp),%esp\n\t"
2687 /* jmp, but don't trust the assembler to choose the right jump */
2688 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2689 ".Lne_fallthru:\n\t"
2690 "lea 0x8(%esp),%esp\n\t"
2701 i386_emit_lt_goto (int *offset_p
, int *size_p
)
2704 "cmpl %ebx,4(%esp)\n\t"
2706 "jne .Llt_fallthru\n\t"
2707 "cmpl %eax,(%esp)\n\t"
2708 "jnl .Llt_fallthru\n\t"
2710 "lea 0x8(%esp),%esp\n\t"
2713 /* jmp, but don't trust the assembler to choose the right jump */
2714 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2715 ".Llt_fallthru:\n\t"
2716 "lea 0x8(%esp),%esp\n\t"
2727 i386_emit_le_goto (int *offset_p
, int *size_p
)
2730 "cmpl %ebx,4(%esp)\n\t"
2732 "jne .Lle_fallthru\n\t"
2733 "cmpl %eax,(%esp)\n\t"
2734 "jnle .Lle_fallthru\n\t"
2736 "lea 0x8(%esp),%esp\n\t"
2739 /* jmp, but don't trust the assembler to choose the right jump */
2740 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2741 ".Lle_fallthru:\n\t"
2742 "lea 0x8(%esp),%esp\n\t"
2753 i386_emit_gt_goto (int *offset_p
, int *size_p
)
2756 "cmpl %ebx,4(%esp)\n\t"
2758 "jne .Lgt_fallthru\n\t"
2759 "cmpl %eax,(%esp)\n\t"
2760 "jng .Lgt_fallthru\n\t"
2762 "lea 0x8(%esp),%esp\n\t"
2765 /* jmp, but don't trust the assembler to choose the right jump */
2766 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2767 ".Lgt_fallthru:\n\t"
2768 "lea 0x8(%esp),%esp\n\t"
2779 i386_emit_ge_goto (int *offset_p
, int *size_p
)
2782 "cmpl %ebx,4(%esp)\n\t"
2784 "jne .Lge_fallthru\n\t"
2785 "cmpl %eax,(%esp)\n\t"
2786 "jnge .Lge_fallthru\n\t"
2788 "lea 0x8(%esp),%esp\n\t"
2791 /* jmp, but don't trust the assembler to choose the right jump */
2792 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2793 ".Lge_fallthru:\n\t"
2794 "lea 0x8(%esp),%esp\n\t"
2804 struct emit_ops i386_emit_ops
=
2812 i386_emit_rsh_signed
,
2813 i386_emit_rsh_unsigned
,
2821 i386_emit_less_signed
,
2822 i386_emit_less_unsigned
,
2826 i386_write_goto_address
,
2831 i386_emit_stack_flush
,
2834 i386_emit_stack_adjust
,
2835 i386_emit_int_call_1
,
2836 i386_emit_void_call_2
,
2846 static struct emit_ops
*
2850 if (is_64bit_tdesc ())
2851 return &amd64_emit_ops
;
2854 return &i386_emit_ops
;
2857 /* Implementation of target ops method "sw_breakpoint_from_kind". */
2860 x86_target::sw_breakpoint_from_kind (int kind
, int *size
)
2862 *size
= x86_breakpoint_len
;
2863 return x86_breakpoint
;
2867 x86_supports_range_stepping (void)
2872 /* Implementation of linux_target_ops method "supports_hardware_single_step".
2876 x86_supports_hardware_single_step (void)
2882 x86_get_ipa_tdesc_idx (void)
2884 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
2885 const struct target_desc
*tdesc
= regcache
->tdesc
;
2888 return amd64_get_ipa_tdesc_idx (tdesc
);
2891 if (tdesc
== tdesc_i386_linux_no_xml
)
2892 return X86_TDESC_SSE
;
2894 return i386_get_ipa_tdesc_idx (tdesc
);
2897 /* This is initialized assuming an amd64 target.
2898 x86_arch_setup will correct it for i386 or amd64 targets. */
2900 struct linux_target_ops the_low_target
=
2905 x86_supports_z_point_type
,
2908 x86_stopped_by_watchpoint
,
2909 x86_stopped_data_address
,
2910 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2911 native i386 case (no registers smaller than an xfer unit), and are not
2912 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2915 /* need to fix up i386 siginfo if host is amd64 */
2917 x86_linux_new_process
,
2918 x86_linux_delete_process
,
2919 x86_linux_new_thread
,
2920 x86_linux_delete_thread
,
2922 x86_linux_prepare_to_resume
,
2923 x86_linux_process_qsupported
,
2924 x86_supports_tracepoints
,
2925 x86_get_thread_area
,
2926 x86_install_fast_tracepoint_jump_pad
,
2928 x86_get_min_fast_tracepoint_insn_len
,
2929 x86_supports_range_stepping
,
2930 x86_supports_hardware_single_step
,
2931 x86_get_syscall_trapinfo
,
2932 x86_get_ipa_tdesc_idx
,
2935 /* The linux target ops object. */
2937 linux_process_target
*the_linux_target
= &the_x86_target
;
2940 initialize_low_arch (void)
2942 /* Initialize the Linux target descriptions. */
2944 tdesc_amd64_linux_no_xml
= allocate_target_description ();
2945 copy_target_description (tdesc_amd64_linux_no_xml
,
2946 amd64_linux_read_description (X86_XSTATE_SSE_MASK
,
2948 tdesc_amd64_linux_no_xml
->xmltarget
= xmltarget_amd64_linux_no_xml
;
2951 tdesc_i386_linux_no_xml
= allocate_target_description ();
2952 copy_target_description (tdesc_i386_linux_no_xml
,
2953 i386_linux_read_description (X86_XSTATE_SSE_MASK
));
2954 tdesc_i386_linux_no_xml
->xmltarget
= xmltarget_i386_linux_no_xml
;
2956 initialize_regsets_info (&x86_regsets_info
);