1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
4 Free Software Foundation, Inc.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
25 #include "linux-low.h"
28 #include "i386-xstate.h"
29 #include "elf/common.h"
31 #include "gdb_proc_service.h"
33 /* Defined in auto-generated file i386-linux.c. */
34 void init_registers_i386_linux (void);
35 /* Defined in auto-generated file amd64-linux.c. */
36 void init_registers_amd64_linux (void);
37 /* Defined in auto-generated file i386-avx-linux.c. */
38 void init_registers_i386_avx_linux (void);
39 /* Defined in auto-generated file amd64-avx-linux.c. */
40 void init_registers_amd64_avx_linux (void);
41 /* Defined in auto-generated file i386-mmx-linux.c. */
42 void init_registers_i386_mmx_linux (void);
44 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
46 /* Backward compatibility for gdb without XML support. */
48 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
49 <architecture>i386</architecture>\
50 <osabi>GNU/Linux</osabi>\
54 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
55 <architecture>i386:x86-64</architecture>\
56 <osabi>GNU/Linux</osabi>\
61 #include <sys/procfs.h>
62 #include <sys/ptrace.h>
65 #ifndef PTRACE_GETREGSET
66 #define PTRACE_GETREGSET 0x4204
69 #ifndef PTRACE_SETREGSET
70 #define PTRACE_SETREGSET 0x4205
74 #ifndef PTRACE_GET_THREAD_AREA
75 #define PTRACE_GET_THREAD_AREA 25
78 /* This definition comes from prctl.h, but some kernels may not have it. */
79 #ifndef PTRACE_ARCH_PRCTL
80 #define PTRACE_ARCH_PRCTL 30
83 /* The following definitions come from prctl.h, but may be absent
84 for certain configurations. */
86 #define ARCH_SET_GS 0x1001
87 #define ARCH_SET_FS 0x1002
88 #define ARCH_GET_FS 0x1003
89 #define ARCH_GET_GS 0x1004
92 /* Per-process arch-specific data we want to keep. */
94 struct arch_process_info
96 struct i386_debug_reg_state debug_reg_state
;
99 /* Per-thread arch-specific data we want to keep. */
103 /* Non-zero if our copy differs from what's recorded in the thread. */
104 int debug_registers_changed
;
109 /* Mapping between the general-purpose registers in `struct user'
110 format and GDB's register array layout.
111 Note that the transfer layout uses 64-bit regs. */
112 static /*const*/ int i386_regmap
[] =
114 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
115 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
116 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
117 DS
* 8, ES
* 8, FS
* 8, GS
* 8
120 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
122 /* So code below doesn't have to care, i386 or amd64. */
123 #define ORIG_EAX ORIG_RAX
125 static const int x86_64_regmap
[] =
127 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
128 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
129 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
130 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
131 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
132 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
133 -1, -1, -1, -1, -1, -1, -1, -1,
134 -1, -1, -1, -1, -1, -1, -1, -1,
135 -1, -1, -1, -1, -1, -1, -1, -1,
136 -1, -1, -1, -1, -1, -1, -1, -1, -1,
140 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
142 #else /* ! __x86_64__ */
144 /* Mapping between the general-purpose registers in `struct user'
145 format and GDB's register array layout. */
146 static /*const*/ int i386_regmap
[] =
148 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
149 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
150 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
151 DS
* 4, ES
* 4, FS
* 4, GS
* 4
154 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
158 /* Called by libthread_db. */
161 ps_get_thread_area (const struct ps_prochandle
*ph
,
162 lwpid_t lwpid
, int idx
, void **base
)
165 int use_64bit
= register_size (0) == 8;
172 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
176 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
187 unsigned int desc
[4];
189 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
190 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
193 *(int *)base
= desc
[1];
198 /* Get the thread area address. This is used to recognize which
199 thread is which when tracing with the in-process agent library. We
200 don't read anything from the address, and treat it as opaque; it's
201 the address itself that we assume is unique per-thread. */
204 x86_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
207 int use_64bit
= register_size (0) == 8;
212 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
214 *addr
= (CORE_ADDR
) (uintptr_t) base
;
223 struct lwp_info
*lwp
= find_lwp_pid (pid_to_ptid (lwpid
));
224 struct regcache
*regcache
= get_thread_regcache (get_lwp_thread (lwp
), 1);
225 unsigned int desc
[4];
227 const int reg_thread_area
= 3; /* bits to scale down register value. */
230 collect_register_by_name (regcache
, "gs", &gs
);
232 idx
= gs
>> reg_thread_area
;
234 if (ptrace (PTRACE_GET_THREAD_AREA
,
236 (void *) (long) idx
, (unsigned long) &desc
) < 0)
247 i386_cannot_store_register (int regno
)
249 return regno
>= I386_NUM_REGS
;
253 i386_cannot_fetch_register (int regno
)
255 return regno
>= I386_NUM_REGS
;
259 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
264 if (register_size (0) == 8)
266 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
267 if (x86_64_regmap
[i
] != -1)
268 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
273 for (i
= 0; i
< I386_NUM_REGS
; i
++)
274 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
276 collect_register_by_name (regcache
, "orig_eax",
277 ((char *) buf
) + ORIG_EAX
* 4);
281 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
286 if (register_size (0) == 8)
288 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
289 if (x86_64_regmap
[i
] != -1)
290 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
295 for (i
= 0; i
< I386_NUM_REGS
; i
++)
296 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
298 supply_register_by_name (regcache
, "orig_eax",
299 ((char *) buf
) + ORIG_EAX
* 4);
303 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
306 i387_cache_to_fxsave (regcache
, buf
);
308 i387_cache_to_fsave (regcache
, buf
);
313 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
316 i387_fxsave_to_cache (regcache
, buf
);
318 i387_fsave_to_cache (regcache
, buf
);
325 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
327 i387_cache_to_fxsave (regcache
, buf
);
331 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
333 i387_fxsave_to_cache (regcache
, buf
);
339 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
341 i387_cache_to_xsave (regcache
, buf
);
345 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
347 i387_xsave_to_cache (regcache
, buf
);
350 /* ??? The non-biarch i386 case stores all the i387 regs twice.
351 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
352 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
353 doesn't work. IWBN to avoid the duplication in the case where it
354 does work. Maybe the arch_setup routine could check whether it works
355 and update target_regsets accordingly, maybe by moving target_regsets
356 to linux_target_ops and set the right one there, rather than having to
357 modify the target_regsets global. */
359 struct regset_info target_regsets
[] =
361 #ifdef HAVE_PTRACE_GETREGS
362 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
364 x86_fill_gregset
, x86_store_gregset
},
365 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
366 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
368 # ifdef HAVE_PTRACE_GETFPXREGS
369 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
371 x86_fill_fpxregset
, x86_store_fpxregset
},
374 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
376 x86_fill_fpregset
, x86_store_fpregset
},
377 #endif /* HAVE_PTRACE_GETREGS */
378 { 0, 0, 0, -1, -1, NULL
, NULL
}
382 x86_get_pc (struct regcache
*regcache
)
384 int use_64bit
= register_size (0) == 8;
389 collect_register_by_name (regcache
, "rip", &pc
);
390 return (CORE_ADDR
) pc
;
395 collect_register_by_name (regcache
, "eip", &pc
);
396 return (CORE_ADDR
) pc
;
401 x86_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
403 int use_64bit
= register_size (0) == 8;
407 unsigned long newpc
= pc
;
408 supply_register_by_name (regcache
, "rip", &newpc
);
412 unsigned int newpc
= pc
;
413 supply_register_by_name (regcache
, "eip", &newpc
);
417 static const unsigned char x86_breakpoint
[] = { 0xCC };
418 #define x86_breakpoint_len 1
421 x86_breakpoint_at (CORE_ADDR pc
)
425 (*the_target
->read_memory
) (pc
, &c
, 1);
432 /* Support for debug registers. */
435 x86_linux_dr_get (ptid_t ptid
, int regnum
)
440 tid
= ptid_get_lwp (ptid
);
443 value
= ptrace (PTRACE_PEEKUSER
, tid
,
444 offsetof (struct user
, u_debugreg
[regnum
]), 0);
446 error ("Couldn't read debug register");
452 x86_linux_dr_set (ptid_t ptid
, int regnum
, unsigned long value
)
456 tid
= ptid_get_lwp (ptid
);
459 ptrace (PTRACE_POKEUSER
, tid
,
460 offsetof (struct user
, u_debugreg
[regnum
]), value
);
462 error ("Couldn't write debug register");
466 update_debug_registers_callback (struct inferior_list_entry
*entry
,
469 struct lwp_info
*lwp
= (struct lwp_info
*) entry
;
470 int pid
= *(int *) pid_p
;
472 /* Only update the threads of this process. */
473 if (pid_of (lwp
) == pid
)
475 /* The actual update is done later just before resuming the lwp,
476 we just mark that the registers need updating. */
477 lwp
->arch_private
->debug_registers_changed
= 1;
479 /* If the lwp isn't stopped, force it to momentarily pause, so
480 we can update its debug registers. */
482 linux_stop_lwp (lwp
);
488 /* Update the inferior's debug register REGNUM from STATE. */
491 i386_dr_low_set_addr (const struct i386_debug_reg_state
*state
, int regnum
)
493 /* Only update the threads of this process. */
494 int pid
= pid_of (get_thread_lwp (current_inferior
));
496 if (! (regnum
>= 0 && regnum
<= DR_LASTADDR
- DR_FIRSTADDR
))
497 fatal ("Invalid debug register %d", regnum
);
499 find_inferior (&all_lwps
, update_debug_registers_callback
, &pid
);
502 /* Return the inferior's debug register REGNUM. */
505 i386_dr_low_get_addr (int regnum
)
507 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
508 ptid_t ptid
= ptid_of (lwp
);
510 /* DR6 and DR7 are retrieved with some other way. */
511 gdb_assert (DR_FIRSTADDR
<= regnum
&& regnum
<= DR_LASTADDR
);
513 return x86_linux_dr_get (ptid
, regnum
);
516 /* Update the inferior's DR7 debug control register from STATE. */
519 i386_dr_low_set_control (const struct i386_debug_reg_state
*state
)
521 /* Only update the threads of this process. */
522 int pid
= pid_of (get_thread_lwp (current_inferior
));
524 find_inferior (&all_lwps
, update_debug_registers_callback
, &pid
);
527 /* Return the inferior's DR7 debug control register. */
530 i386_dr_low_get_control (void)
532 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
533 ptid_t ptid
= ptid_of (lwp
);
535 return x86_linux_dr_get (ptid
, DR_CONTROL
);
538 /* Get the value of the DR6 debug status register from the inferior
539 and record it in STATE. */
542 i386_dr_low_get_status (void)
544 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
545 ptid_t ptid
= ptid_of (lwp
);
547 return x86_linux_dr_get (ptid
, DR_STATUS
);
550 /* Breakpoint/Watchpoint support. */
553 x86_insert_point (char type
, CORE_ADDR addr
, int len
)
555 struct process_info
*proc
= current_process ();
562 ret
= prepare_to_access_memory ();
565 ret
= set_gdb_breakpoint_at (addr
);
566 done_accessing_memory ();
572 return i386_low_insert_watchpoint (&proc
->private->arch_private
->debug_reg_state
,
581 x86_remove_point (char type
, CORE_ADDR addr
, int len
)
583 struct process_info
*proc
= current_process ();
590 ret
= prepare_to_access_memory ();
593 ret
= delete_gdb_breakpoint_at (addr
);
594 done_accessing_memory ();
600 return i386_low_remove_watchpoint (&proc
->private->arch_private
->debug_reg_state
,
609 x86_stopped_by_watchpoint (void)
611 struct process_info
*proc
= current_process ();
612 return i386_low_stopped_by_watchpoint (&proc
->private->arch_private
->debug_reg_state
);
616 x86_stopped_data_address (void)
618 struct process_info
*proc
= current_process ();
620 if (i386_low_stopped_data_address (&proc
->private->arch_private
->debug_reg_state
,
626 /* Called when a new process is created. */
628 static struct arch_process_info
*
629 x86_linux_new_process (void)
631 struct arch_process_info
*info
= xcalloc (1, sizeof (*info
));
633 i386_low_init_dregs (&info
->debug_reg_state
);
638 /* Called when a new thread is detected. */
640 static struct arch_lwp_info
*
641 x86_linux_new_thread (void)
643 struct arch_lwp_info
*info
= xcalloc (1, sizeof (*info
));
645 info
->debug_registers_changed
= 1;
650 /* Called when resuming a thread.
651 If the debug regs have changed, update the thread's copies. */
654 x86_linux_prepare_to_resume (struct lwp_info
*lwp
)
656 ptid_t ptid
= ptid_of (lwp
);
658 if (lwp
->arch_private
->debug_registers_changed
)
661 int pid
= ptid_get_pid (ptid
);
662 struct process_info
*proc
= find_process_pid (pid
);
663 struct i386_debug_reg_state
*state
664 = &proc
->private->arch_private
->debug_reg_state
;
666 for (i
= DR_FIRSTADDR
; i
<= DR_LASTADDR
; i
++)
667 x86_linux_dr_set (ptid
, i
, state
->dr_mirror
[i
]);
669 x86_linux_dr_set (ptid
, DR_CONTROL
, state
->dr_control_mirror
);
671 lwp
->arch_private
->debug_registers_changed
= 0;
674 if (lwp
->stopped_by_watchpoint
)
675 x86_linux_dr_set (ptid
, DR_STATUS
, 0);
678 /* When GDBSERVER is built as a 64-bit application on linux, the
679 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
680 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
681 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
682 conversion in-place ourselves. */
684 /* These types below (compat_*) define a siginfo type that is layout
685 compatible with the siginfo type exported by the 32-bit userspace
690 typedef int compat_int_t
;
691 typedef unsigned int compat_uptr_t
;
693 typedef int compat_time_t
;
694 typedef int compat_timer_t
;
695 typedef int compat_clock_t
;
697 struct compat_timeval
699 compat_time_t tv_sec
;
703 typedef union compat_sigval
705 compat_int_t sival_int
;
706 compat_uptr_t sival_ptr
;
709 typedef struct compat_siginfo
717 int _pad
[((128 / sizeof (int)) - 3)];
726 /* POSIX.1b timers */
731 compat_sigval_t _sigval
;
734 /* POSIX.1b signals */
739 compat_sigval_t _sigval
;
748 compat_clock_t _utime
;
749 compat_clock_t _stime
;
752 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
767 #define cpt_si_pid _sifields._kill._pid
768 #define cpt_si_uid _sifields._kill._uid
769 #define cpt_si_timerid _sifields._timer._tid
770 #define cpt_si_overrun _sifields._timer._overrun
771 #define cpt_si_status _sifields._sigchld._status
772 #define cpt_si_utime _sifields._sigchld._utime
773 #define cpt_si_stime _sifields._sigchld._stime
774 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
775 #define cpt_si_addr _sifields._sigfault._addr
776 #define cpt_si_band _sifields._sigpoll._band
777 #define cpt_si_fd _sifields._sigpoll._fd
779 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
780 In their place is si_timer1,si_timer2. */
782 #define si_timerid si_timer1
785 #define si_overrun si_timer2
789 compat_siginfo_from_siginfo (compat_siginfo_t
*to
, siginfo_t
*from
)
791 memset (to
, 0, sizeof (*to
));
793 to
->si_signo
= from
->si_signo
;
794 to
->si_errno
= from
->si_errno
;
795 to
->si_code
= from
->si_code
;
797 if (to
->si_code
== SI_TIMER
)
799 to
->cpt_si_timerid
= from
->si_timerid
;
800 to
->cpt_si_overrun
= from
->si_overrun
;
801 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
803 else if (to
->si_code
== SI_USER
)
805 to
->cpt_si_pid
= from
->si_pid
;
806 to
->cpt_si_uid
= from
->si_uid
;
808 else if (to
->si_code
< 0)
810 to
->cpt_si_pid
= from
->si_pid
;
811 to
->cpt_si_uid
= from
->si_uid
;
812 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
816 switch (to
->si_signo
)
819 to
->cpt_si_pid
= from
->si_pid
;
820 to
->cpt_si_uid
= from
->si_uid
;
821 to
->cpt_si_status
= from
->si_status
;
822 to
->cpt_si_utime
= from
->si_utime
;
823 to
->cpt_si_stime
= from
->si_stime
;
829 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
832 to
->cpt_si_band
= from
->si_band
;
833 to
->cpt_si_fd
= from
->si_fd
;
836 to
->cpt_si_pid
= from
->si_pid
;
837 to
->cpt_si_uid
= from
->si_uid
;
838 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
845 siginfo_from_compat_siginfo (siginfo_t
*to
, compat_siginfo_t
*from
)
847 memset (to
, 0, sizeof (*to
));
849 to
->si_signo
= from
->si_signo
;
850 to
->si_errno
= from
->si_errno
;
851 to
->si_code
= from
->si_code
;
853 if (to
->si_code
== SI_TIMER
)
855 to
->si_timerid
= from
->cpt_si_timerid
;
856 to
->si_overrun
= from
->cpt_si_overrun
;
857 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
859 else if (to
->si_code
== SI_USER
)
861 to
->si_pid
= from
->cpt_si_pid
;
862 to
->si_uid
= from
->cpt_si_uid
;
864 else if (to
->si_code
< 0)
866 to
->si_pid
= from
->cpt_si_pid
;
867 to
->si_uid
= from
->cpt_si_uid
;
868 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
872 switch (to
->si_signo
)
875 to
->si_pid
= from
->cpt_si_pid
;
876 to
->si_uid
= from
->cpt_si_uid
;
877 to
->si_status
= from
->cpt_si_status
;
878 to
->si_utime
= from
->cpt_si_utime
;
879 to
->si_stime
= from
->cpt_si_stime
;
885 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
888 to
->si_band
= from
->cpt_si_band
;
889 to
->si_fd
= from
->cpt_si_fd
;
892 to
->si_pid
= from
->cpt_si_pid
;
893 to
->si_uid
= from
->cpt_si_uid
;
894 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
900 #endif /* __x86_64__ */
902 /* Convert a native/host siginfo object, into/from the siginfo in the
903 layout of the inferiors' architecture. Returns true if any
904 conversion was done; false otherwise. If DIRECTION is 1, then copy
905 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
909 x86_siginfo_fixup (struct siginfo
*native
, void *inf
, int direction
)
912 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
913 if (register_size (0) == 4)
915 if (sizeof (struct siginfo
) != sizeof (compat_siginfo_t
))
916 fatal ("unexpected difference in siginfo");
919 compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
, native
);
921 siginfo_from_compat_siginfo (native
, (struct compat_siginfo
*) inf
);
932 /* Update gdbserver_xmltarget. */
935 x86_linux_update_xmltarget (void)
938 struct regset_info
*regset
;
939 static unsigned long long xcr0
;
940 static int have_ptrace_getregset
= -1;
941 #if !defined(__x86_64__) && defined(HAVE_PTRACE_GETFPXREGS)
942 static int have_ptrace_getfpxregs
= -1;
945 if (!current_inferior
)
948 /* Before changing the register cache internal layout or the target
949 regsets, flush the contents of the current valid caches back to
951 regcache_invalidate ();
953 pid
= pid_of (get_thread_lwp (current_inferior
));
955 if (num_xmm_registers
== 8)
956 init_registers_i386_linux ();
958 init_registers_amd64_linux ();
961 # ifdef HAVE_PTRACE_GETFPXREGS
962 if (have_ptrace_getfpxregs
== -1)
964 elf_fpxregset_t fpxregs
;
966 if (ptrace (PTRACE_GETFPXREGS
, pid
, 0, (int) &fpxregs
) < 0)
968 have_ptrace_getfpxregs
= 0;
969 x86_xcr0
= I386_XSTATE_X87_MASK
;
971 /* Disable PTRACE_GETFPXREGS. */
972 for (regset
= target_regsets
;
973 regset
->fill_function
!= NULL
; regset
++)
974 if (regset
->get_request
== PTRACE_GETFPXREGS
)
981 have_ptrace_getfpxregs
= 1;
984 if (!have_ptrace_getfpxregs
)
986 init_registers_i386_mmx_linux ();
990 init_registers_i386_linux ();
998 if (num_xmm_registers
== 8)
999 gdbserver_xmltarget
= xmltarget_i386_linux_no_xml
;
1001 gdbserver_xmltarget
= xmltarget_amd64_linux_no_xml
;
1003 gdbserver_xmltarget
= xmltarget_i386_linux_no_xml
;
1006 x86_xcr0
= I386_XSTATE_SSE_MASK
;
1011 /* Check if XSAVE extended state is supported. */
1012 if (have_ptrace_getregset
== -1)
1014 unsigned long long xstateregs
[I386_XSTATE_SSE_SIZE
/ sizeof (long long)];
1017 iov
.iov_base
= xstateregs
;
1018 iov
.iov_len
= sizeof (xstateregs
);
1020 /* Check if PTRACE_GETREGSET works. */
1021 if (ptrace (PTRACE_GETREGSET
, pid
, (unsigned int) NT_X86_XSTATE
,
1024 have_ptrace_getregset
= 0;
1028 have_ptrace_getregset
= 1;
1030 /* Get XCR0 from XSAVE extended state at byte 464. */
1031 xcr0
= xstateregs
[464 / sizeof (long long)];
1033 /* Use PTRACE_GETREGSET if it is available. */
1034 for (regset
= target_regsets
;
1035 regset
->fill_function
!= NULL
; regset
++)
1036 if (regset
->get_request
== PTRACE_GETREGSET
)
1037 regset
->size
= I386_XSTATE_SIZE (xcr0
);
1038 else if (regset
->type
!= GENERAL_REGS
)
1042 if (have_ptrace_getregset
)
1044 /* AVX is the highest feature we support. */
1045 if ((xcr0
& I386_XSTATE_AVX_MASK
) == I386_XSTATE_AVX_MASK
)
1050 /* I386 has 8 xmm regs. */
1051 if (num_xmm_registers
== 8)
1052 init_registers_i386_avx_linux ();
1054 init_registers_amd64_avx_linux ();
1056 init_registers_i386_avx_linux ();
1062 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1063 PTRACE_GETREGSET. */
1066 x86_linux_process_qsupported (const char *query
)
1068 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1069 with "i386" in qSupported query, it supports x86 XML target
1072 if (query
!= NULL
&& strncmp (query
, "xmlRegisters=", 13) == 0)
1074 char *copy
= xstrdup (query
+ 13);
1077 for (p
= strtok (copy
, ","); p
!= NULL
; p
= strtok (NULL
, ","))
1079 if (strcmp (p
, "i386") == 0)
1089 x86_linux_update_xmltarget ();
1092 /* Initialize gdbserver for the architecture of the inferior. */
1095 x86_arch_setup (void)
1098 int pid
= pid_of (get_thread_lwp (current_inferior
));
1099 char *file
= linux_child_pid_to_exec_file (pid
);
1100 int use_64bit
= elf_64_file_p (file
);
1106 /* This can only happen if /proc/<pid>/exe is unreadable,
1107 but "that can't happen" if we've gotten this far.
1108 Fall through and assume this is a 32-bit program. */
1112 /* Amd64 doesn't have HAVE_LINUX_USRREGS. */
1113 the_low_target
.num_regs
= -1;
1114 the_low_target
.regmap
= NULL
;
1115 the_low_target
.cannot_fetch_register
= NULL
;
1116 the_low_target
.cannot_store_register
= NULL
;
1118 /* Amd64 has 16 xmm regs. */
1119 num_xmm_registers
= 16;
1121 x86_linux_update_xmltarget ();
1126 /* Ok we have a 32-bit inferior. */
1128 the_low_target
.num_regs
= I386_NUM_REGS
;
1129 the_low_target
.regmap
= i386_regmap
;
1130 the_low_target
.cannot_fetch_register
= i386_cannot_fetch_register
;
1131 the_low_target
.cannot_store_register
= i386_cannot_store_register
;
1133 /* I386 has 8 xmm regs. */
1134 num_xmm_registers
= 8;
1136 x86_linux_update_xmltarget ();
1140 x86_supports_tracepoints (void)
1146 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1148 write_inferior_memory (*to
, buf
, len
);
1153 push_opcode (unsigned char *buf
, char *op
)
1155 unsigned char *buf_org
= buf
;
1160 unsigned long ul
= strtoul (op
, &endptr
, 16);
1169 return buf
- buf_org
;
1174 /* Build a jump pad that saves registers and calls a collection
1175 function. Writes a jump instruction to the jump pad to
1176 JJUMPAD_INSN. The caller is responsible to write it in at the
1177 tracepoint address. */
1180 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1181 CORE_ADDR collector
,
1184 CORE_ADDR
*jump_entry
,
1185 unsigned char *jjump_pad_insn
,
1186 ULONGEST
*jjump_pad_insn_size
,
1187 CORE_ADDR
*adjusted_insn_addr
,
1188 CORE_ADDR
*adjusted_insn_addr_end
)
1190 unsigned char buf
[40];
1192 CORE_ADDR buildaddr
= *jump_entry
;
1194 /* Build the jump pad. */
1196 /* First, do tracepoint data collection. Save registers. */
1198 /* Need to ensure stack pointer saved first. */
1199 buf
[i
++] = 0x54; /* push %rsp */
1200 buf
[i
++] = 0x55; /* push %rbp */
1201 buf
[i
++] = 0x57; /* push %rdi */
1202 buf
[i
++] = 0x56; /* push %rsi */
1203 buf
[i
++] = 0x52; /* push %rdx */
1204 buf
[i
++] = 0x51; /* push %rcx */
1205 buf
[i
++] = 0x53; /* push %rbx */
1206 buf
[i
++] = 0x50; /* push %rax */
1207 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1208 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1209 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1210 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1211 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1212 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1213 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1214 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1215 buf
[i
++] = 0x9c; /* pushfq */
1216 buf
[i
++] = 0x48; /* movl <addr>,%rdi */
1218 *((unsigned long *)(buf
+ i
)) = (unsigned long) tpaddr
;
1219 i
+= sizeof (unsigned long);
1220 buf
[i
++] = 0x57; /* push %rdi */
1221 append_insns (&buildaddr
, i
, buf
);
1223 /* Stack space for the collecting_t object. */
1225 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1226 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1227 memcpy (buf
+ i
, &tpoint
, 8);
1229 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1230 i
+= push_opcode (&buf
[i
],
1231 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1232 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1233 append_insns (&buildaddr
, i
, buf
);
1237 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1238 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1240 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1241 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1242 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1243 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1244 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1245 append_insns (&buildaddr
, i
, buf
);
1247 /* Set up the gdb_collect call. */
1248 /* At this point, (stack pointer + 0x18) is the base of our saved
1252 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1253 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1255 /* tpoint address may be 64-bit wide. */
1256 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1257 memcpy (buf
+ i
, &tpoint
, 8);
1259 append_insns (&buildaddr
, i
, buf
);
1261 /* The collector function being in the shared library, may be
1262 >31-bits away off the jump pad. */
1264 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1265 memcpy (buf
+ i
, &collector
, 8);
1267 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1268 append_insns (&buildaddr
, i
, buf
);
1270 /* Clear the spin-lock. */
1272 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1273 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1274 memcpy (buf
+ i
, &lockaddr
, 8);
1276 append_insns (&buildaddr
, i
, buf
);
1278 /* Remove stack that had been used for the collect_t object. */
1280 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1281 append_insns (&buildaddr
, i
, buf
);
1283 /* Restore register state. */
1285 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1289 buf
[i
++] = 0x9d; /* popfq */
1290 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1291 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1292 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1293 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1294 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1295 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1296 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1297 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1298 buf
[i
++] = 0x58; /* pop %rax */
1299 buf
[i
++] = 0x5b; /* pop %rbx */
1300 buf
[i
++] = 0x59; /* pop %rcx */
1301 buf
[i
++] = 0x5a; /* pop %rdx */
1302 buf
[i
++] = 0x5e; /* pop %rsi */
1303 buf
[i
++] = 0x5f; /* pop %rdi */
1304 buf
[i
++] = 0x5d; /* pop %rbp */
1305 buf
[i
++] = 0x5c; /* pop %rsp */
1306 append_insns (&buildaddr
, i
, buf
);
1308 /* Now, adjust the original instruction to execute in the jump
1310 *adjusted_insn_addr
= buildaddr
;
1311 relocate_instruction (&buildaddr
, tpaddr
);
1312 *adjusted_insn_addr_end
= buildaddr
;
1314 /* Finally, write a jump back to the program. */
1315 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1316 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1317 memcpy (buf
+ 1, &offset
, 4);
1318 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1320 /* The jump pad is now built. Wire in a jump to our jump pad. This
1321 is always done last (by our caller actually), so that we can
1322 install fast tracepoints with threads running. This relies on
1323 the agent's atomic write support. */
1324 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1325 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1326 memcpy (buf
+ 1, &offset
, 4);
1327 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1328 *jjump_pad_insn_size
= sizeof (jump_insn
);
1330 /* Return the end address of our pad. */
1331 *jump_entry
= buildaddr
;
1336 #endif /* __x86_64__ */
1338 /* Build a jump pad that saves registers and calls a collection
1339 function. Writes a jump instruction to the jump pad to
1340 JJUMPAD_INSN. The caller is responsible to write it in at the
1341 tracepoint address. */
1344 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1345 CORE_ADDR collector
,
1348 CORE_ADDR
*jump_entry
,
1349 unsigned char *jjump_pad_insn
,
1350 ULONGEST
*jjump_pad_insn_size
,
1351 CORE_ADDR
*adjusted_insn_addr
,
1352 CORE_ADDR
*adjusted_insn_addr_end
)
1354 unsigned char buf
[0x100];
1356 CORE_ADDR buildaddr
= *jump_entry
;
1358 /* Build the jump pad. */
1360 /* First, do tracepoint data collection. Save registers. */
1362 buf
[i
++] = 0x60; /* pushad */
1363 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1364 *((int *)(buf
+ i
)) = (int) tpaddr
;
1366 buf
[i
++] = 0x9c; /* pushf */
1367 buf
[i
++] = 0x1e; /* push %ds */
1368 buf
[i
++] = 0x06; /* push %es */
1369 buf
[i
++] = 0x0f; /* push %fs */
1371 buf
[i
++] = 0x0f; /* push %gs */
1373 buf
[i
++] = 0x16; /* push %ss */
1374 buf
[i
++] = 0x0e; /* push %cs */
1375 append_insns (&buildaddr
, i
, buf
);
1377 /* Stack space for the collecting_t object. */
1379 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1381 /* Build the object. */
1382 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1383 memcpy (buf
+ i
, &tpoint
, 4);
1385 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1387 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1388 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1389 append_insns (&buildaddr
, i
, buf
);
1391 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1392 If we cared for it, this could be using xchg alternatively. */
1395 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1396 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1398 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1400 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1401 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1402 append_insns (&buildaddr
, i
, buf
);
1405 /* Set up arguments to the gdb_collect call. */
1407 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1408 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1409 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1410 append_insns (&buildaddr
, i
, buf
);
1413 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1414 append_insns (&buildaddr
, i
, buf
);
1417 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1418 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1420 append_insns (&buildaddr
, i
, buf
);
1422 buf
[0] = 0xe8; /* call <reladdr> */
1423 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1424 memcpy (buf
+ 1, &offset
, 4);
1425 append_insns (&buildaddr
, 5, buf
);
1426 /* Clean up after the call. */
1427 buf
[0] = 0x83; /* add $0x8,%esp */
1430 append_insns (&buildaddr
, 3, buf
);
1433 /* Clear the spin-lock. This would need the LOCK prefix on older
1436 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1437 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1438 memcpy (buf
+ i
, &lockaddr
, 4);
1440 append_insns (&buildaddr
, i
, buf
);
1443 /* Remove stack that had been used for the collect_t object. */
1445 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1446 append_insns (&buildaddr
, i
, buf
);
1449 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1452 buf
[i
++] = 0x17; /* pop %ss */
1453 buf
[i
++] = 0x0f; /* pop %gs */
1455 buf
[i
++] = 0x0f; /* pop %fs */
1457 buf
[i
++] = 0x07; /* pop %es */
1458 buf
[i
++] = 0x1f; /* pop %de */
1459 buf
[i
++] = 0x9d; /* popf */
1460 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1463 buf
[i
++] = 0x61; /* popad */
1464 append_insns (&buildaddr
, i
, buf
);
1466 /* Now, adjust the original instruction to execute in the jump
1468 *adjusted_insn_addr
= buildaddr
;
1469 relocate_instruction (&buildaddr
, tpaddr
);
1470 *adjusted_insn_addr_end
= buildaddr
;
1472 /* Write the jump back to the program. */
1473 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1474 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1475 memcpy (buf
+ 1, &offset
, 4);
1476 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1478 /* The jump pad is now built. Wire in a jump to our jump pad. This
1479 is always done last (by our caller actually), so that we can
1480 install fast tracepoints with threads running. This relies on
1481 the agent's atomic write support. */
1482 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1483 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1484 memcpy (buf
+ 1, &offset
, 4);
1485 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1486 *jjump_pad_insn_size
= sizeof (jump_insn
);
1488 /* Return the end address of our pad. */
1489 *jump_entry
= buildaddr
;
1495 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1496 CORE_ADDR collector
,
1499 CORE_ADDR
*jump_entry
,
1500 unsigned char *jjump_pad_insn
,
1501 ULONGEST
*jjump_pad_insn_size
,
1502 CORE_ADDR
*adjusted_insn_addr
,
1503 CORE_ADDR
*adjusted_insn_addr_end
)
1506 if (register_size (0) == 8)
1507 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1508 collector
, lockaddr
,
1509 orig_size
, jump_entry
,
1511 jjump_pad_insn_size
,
1513 adjusted_insn_addr_end
);
1516 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1517 collector
, lockaddr
,
1518 orig_size
, jump_entry
,
1520 jjump_pad_insn_size
,
1522 adjusted_insn_addr_end
);
1526 add_insns (unsigned char *start
, int len
)
1528 CORE_ADDR buildaddr
= current_insn_ptr
;
1531 fprintf (stderr
, "Adding %d bytes of insn at %s\n",
1532 len
, paddress (buildaddr
));
1534 append_insns (&buildaddr
, len
, start
);
1535 current_insn_ptr
= buildaddr
;
1538 /* Our general strategy for emitting code is to avoid specifying raw
1539 bytes whenever possible, and instead copy a block of inline asm
1540 that is embedded in the function. This is a little messy, because
1541 we need to keep the compiler from discarding what looks like dead
1542 code, plus suppress various warnings. */
1544 #define EMIT_ASM(NAME, INSNS) \
1547 extern unsigned char start_ ## NAME, end_ ## NAME; \
1548 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1549 __asm__ ("jmp end_" #NAME "\n" \
1550 "\t" "start_" #NAME ":" \
1552 "\t" "end_" #NAME ":"); \
1557 #define EMIT_ASM32(NAME,INSNS) \
1560 extern unsigned char start_ ## NAME, end_ ## NAME; \
1561 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1562 __asm__ (".code32\n" \
1563 "\t" "jmp end_" #NAME "\n" \
1564 "\t" "start_" #NAME ":\n" \
1566 "\t" "end_" #NAME ":\n" \
1572 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1579 amd64_emit_prologue (void)
1581 EMIT_ASM (amd64_prologue
,
1583 "movq %rsp,%rbp\n\t"
1584 "sub $0x20,%rsp\n\t"
1585 "movq %rdi,-8(%rbp)\n\t"
1586 "movq %rsi,-16(%rbp)");
1591 amd64_emit_epilogue (void)
1593 EMIT_ASM (amd64_epilogue
,
1594 "movq -16(%rbp),%rdi\n\t"
1595 "movq %rax,(%rdi)\n\t"
1602 amd64_emit_add (void)
1604 EMIT_ASM (amd64_add
,
1605 "add (%rsp),%rax\n\t"
1606 "lea 0x8(%rsp),%rsp");
1610 amd64_emit_sub (void)
1612 EMIT_ASM (amd64_sub
,
1613 "sub %rax,(%rsp)\n\t"
1618 amd64_emit_mul (void)
1624 amd64_emit_lsh (void)
1630 amd64_emit_rsh_signed (void)
1636 amd64_emit_rsh_unsigned (void)
1642 amd64_emit_ext (int arg
)
1647 EMIT_ASM (amd64_ext_8
,
1653 EMIT_ASM (amd64_ext_16
,
1658 EMIT_ASM (amd64_ext_32
,
1667 amd64_emit_log_not (void)
1669 EMIT_ASM (amd64_log_not
,
1670 "test %rax,%rax\n\t"
1676 amd64_emit_bit_and (void)
1678 EMIT_ASM (amd64_and
,
1679 "and (%rsp),%rax\n\t"
1680 "lea 0x8(%rsp),%rsp");
1684 amd64_emit_bit_or (void)
1687 "or (%rsp),%rax\n\t"
1688 "lea 0x8(%rsp),%rsp");
1692 amd64_emit_bit_xor (void)
1694 EMIT_ASM (amd64_xor
,
1695 "xor (%rsp),%rax\n\t"
1696 "lea 0x8(%rsp),%rsp");
1700 amd64_emit_bit_not (void)
1702 EMIT_ASM (amd64_bit_not
,
1703 "xorq $0xffffffffffffffff,%rax");
1707 amd64_emit_equal (void)
1709 EMIT_ASM (amd64_equal
,
1710 "cmp %rax,(%rsp)\n\t"
1711 "je .Lamd64_equal_true\n\t"
1713 "jmp .Lamd64_equal_end\n\t"
1714 ".Lamd64_equal_true:\n\t"
1716 ".Lamd64_equal_end:\n\t"
1717 "lea 0x8(%rsp),%rsp");
1721 amd64_emit_less_signed (void)
1723 EMIT_ASM (amd64_less_signed
,
1724 "cmp %rax,(%rsp)\n\t"
1725 "jl .Lamd64_less_signed_true\n\t"
1727 "jmp .Lamd64_less_signed_end\n\t"
1728 ".Lamd64_less_signed_true:\n\t"
1730 ".Lamd64_less_signed_end:\n\t"
1731 "lea 0x8(%rsp),%rsp");
1735 amd64_emit_less_unsigned (void)
1737 EMIT_ASM (amd64_less_unsigned
,
1738 "cmp %rax,(%rsp)\n\t"
1739 "jb .Lamd64_less_unsigned_true\n\t"
1741 "jmp .Lamd64_less_unsigned_end\n\t"
1742 ".Lamd64_less_unsigned_true:\n\t"
1744 ".Lamd64_less_unsigned_end:\n\t"
1745 "lea 0x8(%rsp),%rsp");
1749 amd64_emit_ref (int size
)
1754 EMIT_ASM (amd64_ref1
,
1758 EMIT_ASM (amd64_ref2
,
1762 EMIT_ASM (amd64_ref4
,
1763 "movl (%rax),%eax");
1766 EMIT_ASM (amd64_ref8
,
1767 "movq (%rax),%rax");
1773 amd64_emit_if_goto (int *offset_p
, int *size_p
)
1775 EMIT_ASM (amd64_if_goto
,
1779 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1787 amd64_emit_goto (int *offset_p
, int *size_p
)
1789 EMIT_ASM (amd64_goto
,
1790 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1798 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
1800 int diff
= (to
- (from
+ size
));
1801 unsigned char buf
[sizeof (int)];
1809 memcpy (buf
, &diff
, sizeof (int));
1810 write_inferior_memory (from
, buf
, sizeof (int));
1814 amd64_emit_const (LONGEST num
)
1816 unsigned char buf
[16];
1818 CORE_ADDR buildaddr
= current_insn_ptr
;
1821 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
1822 memcpy (&buf
[i
], &num
, sizeof (num
));
1824 append_insns (&buildaddr
, i
, buf
);
1825 current_insn_ptr
= buildaddr
;
1829 amd64_emit_call (CORE_ADDR fn
)
1831 unsigned char buf
[16];
1833 CORE_ADDR buildaddr
;
1836 /* The destination function being in the shared library, may be
1837 >31-bits away off the compiled code pad. */
1839 buildaddr
= current_insn_ptr
;
1841 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
1845 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
1847 /* Offset is too large for a call. Use callq, but that requires
1848 a register, so avoid it if possible. Use r10, since it is
1849 call-clobbered, we don't have to push/pop it. */
1850 buf
[i
++] = 0x48; /* mov $fn,%r10 */
1852 memcpy (buf
+ i
, &fn
, 8);
1854 buf
[i
++] = 0xff; /* callq *%r10 */
1859 int offset32
= offset64
; /* we know we can't overflow here. */
1860 memcpy (buf
+ i
, &offset32
, 4);
1864 append_insns (&buildaddr
, i
, buf
);
1865 current_insn_ptr
= buildaddr
;
1869 amd64_emit_reg (int reg
)
1871 unsigned char buf
[16];
1873 CORE_ADDR buildaddr
;
1875 /* Assume raw_regs is still in %rdi. */
1876 buildaddr
= current_insn_ptr
;
1878 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
1879 memcpy (&buf
[i
], ®
, sizeof (reg
));
1881 append_insns (&buildaddr
, i
, buf
);
1882 current_insn_ptr
= buildaddr
;
1883 amd64_emit_call (get_raw_reg_func_addr ());
1887 amd64_emit_pop (void)
1889 EMIT_ASM (amd64_pop
,
1894 amd64_emit_stack_flush (void)
1896 EMIT_ASM (amd64_stack_flush
,
1901 amd64_emit_zero_ext (int arg
)
1906 EMIT_ASM (amd64_zero_ext_8
,
1910 EMIT_ASM (amd64_zero_ext_16
,
1911 "and $0xffff,%rax");
1914 EMIT_ASM (amd64_zero_ext_32
,
1915 "mov $0xffffffff,%rcx\n\t"
1924 amd64_emit_swap (void)
1926 EMIT_ASM (amd64_swap
,
1933 amd64_emit_stack_adjust (int n
)
1935 unsigned char buf
[16];
1937 CORE_ADDR buildaddr
= current_insn_ptr
;
1940 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
1944 /* This only handles adjustments up to 16, but we don't expect any more. */
1946 append_insns (&buildaddr
, i
, buf
);
1947 current_insn_ptr
= buildaddr
;
1950 /* FN's prototype is `LONGEST(*fn)(int)'. */
1953 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
1955 unsigned char buf
[16];
1957 CORE_ADDR buildaddr
;
1959 buildaddr
= current_insn_ptr
;
1961 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
1962 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
1964 append_insns (&buildaddr
, i
, buf
);
1965 current_insn_ptr
= buildaddr
;
1966 amd64_emit_call (fn
);
1969 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
1972 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
1974 unsigned char buf
[16];
1976 CORE_ADDR buildaddr
;
1978 buildaddr
= current_insn_ptr
;
1980 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
1981 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
1983 append_insns (&buildaddr
, i
, buf
);
1984 current_insn_ptr
= buildaddr
;
1985 EMIT_ASM (amd64_void_call_2_a
,
1986 /* Save away a copy of the stack top. */
1988 /* Also pass top as the second argument. */
1990 amd64_emit_call (fn
);
1991 EMIT_ASM (amd64_void_call_2_b
,
1992 /* Restore the stack top, %rax may have been trashed. */
1996 struct emit_ops amd64_emit_ops
=
1998 amd64_emit_prologue
,
1999 amd64_emit_epilogue
,
2004 amd64_emit_rsh_signed
,
2005 amd64_emit_rsh_unsigned
,
2013 amd64_emit_less_signed
,
2014 amd64_emit_less_unsigned
,
2018 amd64_write_goto_address
,
2023 amd64_emit_stack_flush
,
2024 amd64_emit_zero_ext
,
2026 amd64_emit_stack_adjust
,
2027 amd64_emit_int_call_1
,
2028 amd64_emit_void_call_2
2031 #endif /* __x86_64__ */
2034 i386_emit_prologue (void)
2036 EMIT_ASM32 (i386_prologue
,
2039 /* At this point, the raw regs base address is at 8(%ebp), and the
2040 value pointer is at 12(%ebp). */
2044 i386_emit_epilogue (void)
2046 EMIT_ASM32 (i386_epilogue
,
2047 "mov 12(%ebp),%ecx\n\t"
2048 "mov %eax,(%ecx)\n\t"
2049 "mov %ebx,0x4(%ecx)\n\t"
2056 i386_emit_add (void)
2058 EMIT_ASM32 (i386_add
,
2059 "add (%esp),%eax\n\t"
2060 "adc 0x4(%esp),%ebx\n\t"
2061 "lea 0x8(%esp),%esp");
2065 i386_emit_sub (void)
2067 EMIT_ASM32 (i386_sub
,
2068 "subl %eax,(%esp)\n\t"
2069 "sbbl %ebx,4(%esp)\n\t"
2075 i386_emit_mul (void)
2081 i386_emit_lsh (void)
2087 i386_emit_rsh_signed (void)
2093 i386_emit_rsh_unsigned (void)
2099 i386_emit_ext (int arg
)
2104 EMIT_ASM32 (i386_ext_8
,
2107 "movl %eax,%ebx\n\t"
2111 EMIT_ASM32 (i386_ext_16
,
2113 "movl %eax,%ebx\n\t"
2117 EMIT_ASM32 (i386_ext_32
,
2118 "movl %eax,%ebx\n\t"
2127 i386_emit_log_not (void)
2129 EMIT_ASM32 (i386_log_not
,
2131 "test %eax,%eax\n\t"
2138 i386_emit_bit_and (void)
2140 EMIT_ASM32 (i386_and
,
2141 "and (%esp),%eax\n\t"
2142 "and 0x4(%esp),%ebx\n\t"
2143 "lea 0x8(%esp),%esp");
2147 i386_emit_bit_or (void)
2149 EMIT_ASM32 (i386_or
,
2150 "or (%esp),%eax\n\t"
2151 "or 0x4(%esp),%ebx\n\t"
2152 "lea 0x8(%esp),%esp");
2156 i386_emit_bit_xor (void)
2158 EMIT_ASM32 (i386_xor
,
2159 "xor (%esp),%eax\n\t"
2160 "xor 0x4(%esp),%ebx\n\t"
2161 "lea 0x8(%esp),%esp");
2165 i386_emit_bit_not (void)
2167 EMIT_ASM32 (i386_bit_not
,
2168 "xor $0xffffffff,%eax\n\t"
2169 "xor $0xffffffff,%ebx\n\t");
2173 i386_emit_equal (void)
2175 EMIT_ASM32 (i386_equal
,
2176 "cmpl %ebx,4(%esp)\n\t"
2177 "jne .Li386_equal_false\n\t"
2178 "cmpl %eax,(%esp)\n\t"
2179 "je .Li386_equal_true\n\t"
2180 ".Li386_equal_false:\n\t"
2182 "jmp .Li386_equal_end\n\t"
2183 ".Li386_equal_true:\n\t"
2185 ".Li386_equal_end:\n\t"
2187 "lea 0x8(%esp),%esp");
2191 i386_emit_less_signed (void)
2193 EMIT_ASM32 (i386_less_signed
,
2194 "cmpl %ebx,4(%esp)\n\t"
2195 "jl .Li386_less_signed_true\n\t"
2196 "jne .Li386_less_signed_false\n\t"
2197 "cmpl %eax,(%esp)\n\t"
2198 "jl .Li386_less_signed_true\n\t"
2199 ".Li386_less_signed_false:\n\t"
2201 "jmp .Li386_less_signed_end\n\t"
2202 ".Li386_less_signed_true:\n\t"
2204 ".Li386_less_signed_end:\n\t"
2206 "lea 0x8(%esp),%esp");
2210 i386_emit_less_unsigned (void)
2212 EMIT_ASM32 (i386_less_unsigned
,
2213 "cmpl %ebx,4(%esp)\n\t"
2214 "jb .Li386_less_unsigned_true\n\t"
2215 "jne .Li386_less_unsigned_false\n\t"
2216 "cmpl %eax,(%esp)\n\t"
2217 "jb .Li386_less_unsigned_true\n\t"
2218 ".Li386_less_unsigned_false:\n\t"
2220 "jmp .Li386_less_unsigned_end\n\t"
2221 ".Li386_less_unsigned_true:\n\t"
2223 ".Li386_less_unsigned_end:\n\t"
2225 "lea 0x8(%esp),%esp");
2229 i386_emit_ref (int size
)
2234 EMIT_ASM32 (i386_ref1
,
2238 EMIT_ASM32 (i386_ref2
,
2242 EMIT_ASM32 (i386_ref4
,
2243 "movl (%eax),%eax");
2246 EMIT_ASM32 (i386_ref8
,
2247 "movl 4(%eax),%ebx\n\t"
2248 "movl (%eax),%eax");
2254 i386_emit_if_goto (int *offset_p
, int *size_p
)
2256 EMIT_ASM32 (i386_if_goto
,
2262 /* Don't trust the assembler to choose the right jump */
2263 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2266 *offset_p
= 11; /* be sure that this matches the sequence above */
2272 i386_emit_goto (int *offset_p
, int *size_p
)
2274 EMIT_ASM32 (i386_goto
,
2275 /* Don't trust the assembler to choose the right jump */
2276 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2284 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2286 int diff
= (to
- (from
+ size
));
2287 unsigned char buf
[sizeof (int)];
2289 /* We're only doing 4-byte sizes at the moment. */
2296 memcpy (buf
, &diff
, sizeof (int));
2297 write_inferior_memory (from
, buf
, sizeof (int));
2301 i386_emit_const (LONGEST num
)
2303 unsigned char buf
[16];
2305 CORE_ADDR buildaddr
= current_insn_ptr
;
2308 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2309 lo
= num
& 0xffffffff;
2310 memcpy (&buf
[i
], &lo
, sizeof (lo
));
2312 hi
= ((num
>> 32) & 0xffffffff);
2315 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
2316 memcpy (&buf
[i
], &hi
, sizeof (hi
));
2321 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
2323 append_insns (&buildaddr
, i
, buf
);
2324 current_insn_ptr
= buildaddr
;
2328 i386_emit_call (CORE_ADDR fn
)
2330 unsigned char buf
[16];
2332 CORE_ADDR buildaddr
;
2334 buildaddr
= current_insn_ptr
;
2336 buf
[i
++] = 0xe8; /* call <reladdr> */
2337 offset
= ((int) fn
) - (buildaddr
+ 5);
2338 memcpy (buf
+ 1, &offset
, 4);
2339 append_insns (&buildaddr
, 5, buf
);
2340 current_insn_ptr
= buildaddr
;
2344 i386_emit_reg (int reg
)
2346 unsigned char buf
[16];
2348 CORE_ADDR buildaddr
;
2350 EMIT_ASM32 (i386_reg_a
,
2352 buildaddr
= current_insn_ptr
;
2354 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2355 memcpy (&buf
[i
], ®
, sizeof (reg
));
2357 append_insns (&buildaddr
, i
, buf
);
2358 current_insn_ptr
= buildaddr
;
2359 EMIT_ASM32 (i386_reg_b
,
2360 "mov %eax,4(%esp)\n\t"
2361 "mov 8(%ebp),%eax\n\t"
2363 i386_emit_call (get_raw_reg_func_addr ());
2364 EMIT_ASM32 (i386_reg_c
,
2366 "lea 0x8(%esp),%esp");
2370 i386_emit_pop (void)
2372 EMIT_ASM32 (i386_pop
,
2378 i386_emit_stack_flush (void)
2380 EMIT_ASM32 (i386_stack_flush
,
2386 i386_emit_zero_ext (int arg
)
2391 EMIT_ASM32 (i386_zero_ext_8
,
2392 "and $0xff,%eax\n\t"
2396 EMIT_ASM32 (i386_zero_ext_16
,
2397 "and $0xffff,%eax\n\t"
2401 EMIT_ASM32 (i386_zero_ext_32
,
2410 i386_emit_swap (void)
2412 EMIT_ASM32 (i386_swap
,
2422 i386_emit_stack_adjust (int n
)
2424 unsigned char buf
[16];
2426 CORE_ADDR buildaddr
= current_insn_ptr
;
2429 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
2433 append_insns (&buildaddr
, i
, buf
);
2434 current_insn_ptr
= buildaddr
;
2437 /* FN's prototype is `LONGEST(*fn)(int)'. */
2440 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2442 unsigned char buf
[16];
2444 CORE_ADDR buildaddr
;
2446 EMIT_ASM32 (i386_int_call_1_a
,
2447 /* Reserve a bit of stack space. */
2449 /* Put the one argument on the stack. */
2450 buildaddr
= current_insn_ptr
;
2452 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2455 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2457 append_insns (&buildaddr
, i
, buf
);
2458 current_insn_ptr
= buildaddr
;
2459 i386_emit_call (fn
);
2460 EMIT_ASM32 (i386_int_call_1_c
,
2462 "lea 0x8(%esp),%esp");
2465 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2468 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2470 unsigned char buf
[16];
2472 CORE_ADDR buildaddr
;
2474 EMIT_ASM32 (i386_void_call_2_a
,
2475 /* Preserve %eax only; we don't have to worry about %ebx. */
2477 /* Reserve a bit of stack space for arguments. */
2478 "sub $0x10,%esp\n\t"
2479 /* Copy "top" to the second argument position. (Note that
2480 we can't assume function won't scribble on its
2481 arguments, so don't try to restore from this.) */
2482 "mov %eax,4(%esp)\n\t"
2483 "mov %ebx,8(%esp)");
2484 /* Put the first argument on the stack. */
2485 buildaddr
= current_insn_ptr
;
2487 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2490 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2492 append_insns (&buildaddr
, i
, buf
);
2493 current_insn_ptr
= buildaddr
;
2494 i386_emit_call (fn
);
2495 EMIT_ASM32 (i386_void_call_2_b
,
2496 "lea 0x10(%esp),%esp\n\t"
2497 /* Restore original stack top. */
2501 struct emit_ops i386_emit_ops
=
2509 i386_emit_rsh_signed
,
2510 i386_emit_rsh_unsigned
,
2518 i386_emit_less_signed
,
2519 i386_emit_less_unsigned
,
2523 i386_write_goto_address
,
2528 i386_emit_stack_flush
,
2531 i386_emit_stack_adjust
,
2532 i386_emit_int_call_1
,
2533 i386_emit_void_call_2
2537 static struct emit_ops
*
2541 int use_64bit
= register_size (0) == 8;
2544 return &amd64_emit_ops
;
2547 return &i386_emit_ops
;
2550 /* This is initialized assuming an amd64 target.
2551 x86_arch_setup will correct it for i386 or amd64 targets. */
2553 struct linux_target_ops the_low_target
=
2569 x86_stopped_by_watchpoint
,
2570 x86_stopped_data_address
,
2571 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2572 native i386 case (no registers smaller than an xfer unit), and are not
2573 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2576 /* need to fix up i386 siginfo if host is amd64 */
2578 x86_linux_new_process
,
2579 x86_linux_new_thread
,
2580 x86_linux_prepare_to_resume
,
2581 x86_linux_process_qsupported
,
2582 x86_supports_tracepoints
,
2583 x86_get_thread_area
,
2584 x86_install_fast_tracepoint_jump_pad
,