X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=gdbserver%2Flinux-x86-low.cc;h=d2b55f6f0d2b9f027f192601087c90cda6afa461;hb=0d02e70b197c786f26175b9a73f94e01d14abdab;hp=8c9ab733ea88c7a684705a5355e892cf3eab3aa8;hpb=daca57a7de50f97a4e8df917447561617a0298b2;p=binutils-gdb.git diff --git a/gdbserver/linux-x86-low.cc b/gdbserver/linux-x86-low.cc index 8c9ab733ea8..d2b55f6f0d2 100644 --- a/gdbserver/linux-x86-low.cc +++ b/gdbserver/linux-x86-low.cc @@ -1,6 +1,6 @@ /* GNU/Linux/x86-64 specific low level interface, for the remote server for GDB. - Copyright (C) 2002-2020 Free Software Foundation, Inc. + Copyright (C) 2002-2022 Free Software Foundation, Inc. This file is part of GDB. @@ -48,9 +48,9 @@ #include "linux-x86-tdesc.h" #ifdef __x86_64__ -static struct target_desc *tdesc_amd64_linux_no_xml; +static target_desc_up tdesc_amd64_linux_no_xml; #endif -static struct target_desc *tdesc_i386_linux_no_xml; +static target_desc_up tdesc_i386_linux_no_xml; static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 }; @@ -58,13 +58,13 @@ static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 }; /* Backward compatibility for gdb without XML support. */ -static const char *xmltarget_i386_linux_no_xml = "@\ +static const char xmltarget_i386_linux_no_xml[] = "@\ i386\ GNU/Linux\ "; #ifdef __x86_64__ -static const char *xmltarget_amd64_linux_no_xml = "@\ +static const char xmltarget_amd64_linux_no_xml[] = "@\ i386:x86-64\ GNU/Linux\ "; @@ -100,12 +100,32 @@ class x86_target : public linux_process_target { public: - /* Update all the target description of all processes; a new GDB - connected, and it may or not support xml target descriptions. */ - void update_xmltarget (); - const regs_info *get_regs_info () override; + const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override; + + bool supports_z_point_type (char z_type) override; + + void process_qsupported (gdb::array_view features) override; + + bool supports_tracepoints () override; + + bool supports_fast_tracepoints () override; + + int install_fast_tracepoint_jump_pad + (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector, + CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry, + CORE_ADDR *trampoline, ULONGEST *trampoline_size, + unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size, + CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end, + char *err) override; + + int get_min_fast_tracepoint_insn_len () override; + + struct emit_ops *emit_ops () override; + + int get_ipa_tdesc_idx () override; + protected: void low_arch_setup () override; @@ -113,6 +133,60 @@ protected: bool low_cannot_fetch_register (int regno) override; bool low_cannot_store_register (int regno) override; + + bool low_supports_breakpoints () override; + + CORE_ADDR low_get_pc (regcache *regcache) override; + + void low_set_pc (regcache *regcache, CORE_ADDR newpc) override; + + int low_decr_pc_after_break () override; + + bool low_breakpoint_at (CORE_ADDR pc) override; + + int low_insert_point (raw_bkpt_type type, CORE_ADDR addr, + int size, raw_breakpoint *bp) override; + + int low_remove_point (raw_bkpt_type type, CORE_ADDR addr, + int size, raw_breakpoint *bp) override; + + bool low_stopped_by_watchpoint () override; + + CORE_ADDR low_stopped_data_address () override; + + /* collect_ptrace_register/supply_ptrace_register are not needed in the + native i386 case (no registers smaller than an xfer unit), and are not + used in the biarch case (HAVE_LINUX_USRREGS is not defined). */ + + /* Need to fix up i386 siginfo if host is amd64. */ + bool low_siginfo_fixup (siginfo_t *native, gdb_byte *inf, + int direction) override; + + arch_process_info *low_new_process () override; + + void low_delete_process (arch_process_info *info) override; + + void low_new_thread (lwp_info *) override; + + void low_delete_thread (arch_lwp_info *) override; + + void low_new_fork (process_info *parent, process_info *child) override; + + void low_prepare_to_resume (lwp_info *lwp) override; + + int low_get_thread_area (int lwpid, CORE_ADDR *addrp) override; + + bool low_supports_range_stepping () override; + + bool low_supports_catch_syscall () override; + + void low_get_syscall_trapinfo (regcache *regcache, int *sysno) override; + +private: + + /* Update all the target description of all processes; a new GDB + connected, and it may or not support xml target descriptions. */ + void update_xmltarget (); }; /* The singleton target ops object. */ @@ -159,11 +233,7 @@ static const int x86_64_regmap[] = -1, -1, -1, -1, -1, -1, -1, -1, -1, ORIG_RAX * 8, -#ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE 21 * 8, 22 * 8, -#else - -1, -1, -#endif -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */ -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */ -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */ @@ -261,8 +331,8 @@ ps_get_thread_area (struct ps_prochandle *ph, don't read anything from the address, and treat it as opaque; it's the address itself that we assume is unique per-thread. */ -static int -x86_get_thread_area (int lwpid, CORE_ADDR *addr) +int +x86_target::low_get_thread_area (int lwpid, CORE_ADDR *addr) { #ifdef __x86_64__ int use_64bit = is_64bit_tdesc (); @@ -327,6 +397,35 @@ x86_target::low_cannot_fetch_register (int regno) return regno >= I386_NUM_REGS; } +static void +collect_register_i386 (struct regcache *regcache, int regno, void *buf) +{ + collect_register (regcache, regno, buf); + +#ifdef __x86_64__ + /* In case of x86_64 -m32, collect_register only writes 4 bytes, but the + space reserved in buf for the register is 8 bytes. Make sure the entire + reserved space is initialized. */ + + gdb_assert (register_size (regcache->tdesc, regno) == 4); + + if (regno == RAX) + { + /* Sign extend EAX value to avoid potential syscall restart + problems. + + See amd64_linux_collect_native_gregset() in + gdb/amd64-linux-nat.c for a detailed explanation. */ + *(int64_t *) buf = *(int32_t *) buf; + } + else + { + /* Zero-extend. */ + *(uint64_t *) buf = *(uint32_t *) buf; + } +#endif +} + static void x86_fill_gregset (struct regcache *regcache, void *buf) { @@ -339,47 +438,16 @@ x86_fill_gregset (struct regcache *regcache, void *buf) if (x86_64_regmap[i] != -1) collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]); -#ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE - { - unsigned long base; - int lwpid = lwpid_of (current_thread); - - collect_register_by_name (regcache, "fs_base", &base); - ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_FS); - - collect_register_by_name (regcache, "gs_base", &base); - ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_GS); - } -#endif - return; } - - /* 32-bit inferior registers need to be zero-extended. - Callers would read uninitialized memory otherwise. */ - memset (buf, 0x00, X86_64_USER_REGS * 8); #endif for (i = 0; i < I386_NUM_REGS; i++) - collect_register (regcache, i, ((char *) buf) + i386_regmap[i]); + collect_register_i386 (regcache, i, ((char *) buf) + i386_regmap[i]); - collect_register_by_name (regcache, "orig_eax", - ((char *) buf) + ORIG_EAX * REGSIZE); - -#ifdef __x86_64__ - /* Sign extend EAX value to avoid potential syscall restart - problems. - - See amd64_linux_collect_native_gregset() in gdb/amd64-linux-nat.c - for a detailed explanation. */ - if (register_size (regcache->tdesc, 0) == 4) - { - void *ptr = ((gdb_byte *) buf - + i386_regmap[find_regno (regcache->tdesc, "eax")]); - - *(int64_t *) ptr = *(int32_t *) ptr; - } -#endif + /* Handle ORIG_EAX, which is not in i386_regmap. */ + collect_register_i386 (regcache, find_regno (regcache->tdesc, "orig_eax"), + ((char *) buf) + ORIG_EAX * REGSIZE); } static void @@ -394,18 +462,6 @@ x86_store_gregset (struct regcache *regcache, const void *buf) if (x86_64_regmap[i] != -1) supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]); -#ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE - { - unsigned long base; - int lwpid = lwpid_of (current_thread); - - if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0) - supply_register_by_name (regcache, "fs_base", &base); - - if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_GS) == 0) - supply_register_by_name (regcache, "gs_base", &base); - } -#endif return; } #endif @@ -494,8 +550,14 @@ static struct regset_info x86_regsets[] = NULL_REGSET }; -static CORE_ADDR -x86_get_pc (struct regcache *regcache) +bool +x86_target::low_supports_breakpoints () +{ + return true; +} + +CORE_ADDR +x86_target::low_get_pc (regcache *regcache) { int use_64bit = register_size (regcache->tdesc, 0) == 8; @@ -515,8 +577,8 @@ x86_get_pc (struct regcache *regcache) } } -static void -x86_set_pc (struct regcache *regcache, CORE_ADDR pc) +void +x86_target::low_set_pc (regcache *regcache, CORE_ADDR pc) { int use_64bit = register_size (regcache->tdesc, 0) == 8; @@ -533,20 +595,27 @@ x86_set_pc (struct regcache *regcache, CORE_ADDR pc) supply_register_by_name (regcache, "eip", &newpc); } } + +int +x86_target::low_decr_pc_after_break () +{ + return 1; +} + static const gdb_byte x86_breakpoint[] = { 0xCC }; #define x86_breakpoint_len 1 -static int -x86_breakpoint_at (CORE_ADDR pc) +bool +x86_target::low_breakpoint_at (CORE_ADDR pc) { unsigned char c; - the_target->read_memory (pc, &c, 1); + read_memory (pc, &c, 1); if (c == 0xCC) - return 1; + return true; - return 0; + return false; } /* Low-level function vector. */ @@ -562,8 +631,8 @@ struct x86_dr_low_type x86_dr_low = /* Breakpoint/Watchpoint support. */ -static int -x86_supports_z_point_type (char z_type) +bool +x86_target::supports_z_point_type (char z_type) { switch (z_type) { @@ -571,15 +640,15 @@ x86_supports_z_point_type (char z_type) case Z_PACKET_HW_BP: case Z_PACKET_WRITE_WP: case Z_PACKET_ACCESS_WP: - return 1; + return true; default: - return 0; + return false; } } -static int -x86_insert_point (enum raw_bkpt_type type, CORE_ADDR addr, - int size, struct raw_breakpoint *bp) +int +x86_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr, + int size, raw_breakpoint *bp) { struct process_info *proc = current_process (); @@ -603,9 +672,9 @@ x86_insert_point (enum raw_bkpt_type type, CORE_ADDR addr, } } -static int -x86_remove_point (enum raw_bkpt_type type, CORE_ADDR addr, - int size, struct raw_breakpoint *bp) +int +x86_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr, + int size, raw_breakpoint *bp) { struct process_info *proc = current_process (); @@ -628,15 +697,15 @@ x86_remove_point (enum raw_bkpt_type type, CORE_ADDR addr, } } -static int -x86_stopped_by_watchpoint (void) +bool +x86_target::low_stopped_by_watchpoint () { struct process_info *proc = current_process (); return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state); } -static CORE_ADDR -x86_stopped_data_address (void) +CORE_ADDR +x86_target::low_stopped_data_address () { struct process_info *proc = current_process (); CORE_ADDR addr; @@ -648,8 +717,8 @@ x86_stopped_data_address (void) /* Called when a new process is created. */ -static struct arch_process_info * -x86_linux_new_process (void) +arch_process_info * +x86_target::low_new_process () { struct arch_process_info *info = XCNEW (struct arch_process_info); @@ -660,16 +729,30 @@ x86_linux_new_process (void) /* Called when a process is being deleted. */ -static void -x86_linux_delete_process (struct arch_process_info *info) +void +x86_target::low_delete_process (arch_process_info *info) { xfree (info); } -/* Target routine for linux_new_fork. */ +void +x86_target::low_new_thread (lwp_info *lwp) +{ + /* This comes from nat/. */ + x86_linux_new_thread (lwp); +} -static void -x86_linux_new_fork (struct process_info *parent, struct process_info *child) +void +x86_target::low_delete_thread (arch_lwp_info *alwp) +{ + /* This comes from nat/. */ + x86_linux_delete_thread (alwp); +} + +/* Target routine for new_fork. */ + +void +x86_target::low_new_fork (process_info *parent, process_info *child) { /* These are allocated by linux_add_process. */ gdb_assert (parent->priv != NULL @@ -694,6 +777,13 @@ x86_linux_new_fork (struct process_info *parent, struct process_info *child) *child->priv->arch_private = *parent->priv->arch_private; } +void +x86_target::low_prepare_to_resume (lwp_info *lwp) +{ + /* This comes from nat/. */ + x86_linux_prepare_to_resume (lwp); +} + /* See nat/x86-dregs.h. */ struct x86_debug_reg_state * @@ -716,8 +806,8 @@ x86_debug_reg_state (pid_t pid) from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to INF. */ -static int -x86_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction) +bool +x86_target::low_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction) { #ifdef __x86_64__ unsigned int machine; @@ -734,7 +824,7 @@ x86_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction) FIXUP_X32); #endif - return 0; + return false; } static int use_xml; @@ -820,10 +910,10 @@ x86_linux_read_description (void) /* Don't use XML. */ #ifdef __x86_64__ if (machine == EM_X86_64) - return tdesc_amd64_linux_no_xml; + return tdesc_amd64_linux_no_xml.get (); else #endif - return tdesc_i386_linux_no_xml; + return tdesc_i386_linux_no_xml.get (); } if (have_ptrace_getregset == -1) @@ -901,7 +991,7 @@ x86_linux_read_description (void) void x86_target::update_xmltarget () { - struct thread_info *saved_thread = current_thread; + scoped_restore_current_thread restore_thread; /* Before changing the register cache's internal layout, flush the contents of the current valid caches back to the threads, and @@ -912,30 +1002,25 @@ x86_target::update_xmltarget () int pid = proc->pid; /* Look up any thread of this process. */ - current_thread = find_any_thread_of_pid (pid); + switch_to_thread (find_any_thread_of_pid (pid)); low_arch_setup (); }); - - current_thread = saved_thread; } /* Process qSupported query, "xmlRegisters=". Update the buffer size for PTRACE_GETREGSET. */ -static void -x86_linux_process_qsupported (char **features, int count) +void +x86_target::process_qsupported (gdb::array_view features) { - int i; - /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters=" with "i386" in qSupported query, it supports x86 XML target descriptions. */ use_xml = 0; - for (i = 0; i < count; i++) - { - const char *feature = features[i]; + for (const char *feature : features) + { if (startswith (feature, "xmlRegisters=")) { char *copy = xstrdup (feature + 13); @@ -955,7 +1040,8 @@ x86_linux_process_qsupported (char **features, int count) free (copy); } } - the_x86_target.update_xmltarget (); + + update_xmltarget (); } /* Common for x86/x86-64. */ @@ -1008,11 +1094,17 @@ x86_target::low_arch_setup () current_process ()->tdesc = x86_linux_read_description (); } +bool +x86_target::low_supports_catch_syscall () +{ + return true; +} + /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return code. This should only be called if LWP got a SYSCALL_SIGTRAP. */ -static void -x86_get_syscall_trapinfo (struct regcache *regcache, int *sysno) +void +x86_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno) { int use_64bit = register_size (regcache->tdesc, 0) == 8; @@ -1027,10 +1119,10 @@ x86_get_syscall_trapinfo (struct regcache *regcache, int *sysno) collect_register_by_name (regcache, "orig_eax", sysno); } -static int -x86_supports_tracepoints (void) +bool +x86_target::supports_tracepoints () { - return 1; + return true; } static void @@ -1439,19 +1531,26 @@ i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr, return 0; } -static int -x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr, - CORE_ADDR collector, - CORE_ADDR lockaddr, - ULONGEST orig_size, - CORE_ADDR *jump_entry, - CORE_ADDR *trampoline, - ULONGEST *trampoline_size, - unsigned char *jjump_pad_insn, - ULONGEST *jjump_pad_insn_size, - CORE_ADDR *adjusted_insn_addr, - CORE_ADDR *adjusted_insn_addr_end, - char *err) +bool +x86_target::supports_fast_tracepoints () +{ + return true; +} + +int +x86_target::install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, + CORE_ADDR tpaddr, + CORE_ADDR collector, + CORE_ADDR lockaddr, + ULONGEST orig_size, + CORE_ADDR *jump_entry, + CORE_ADDR *trampoline, + ULONGEST *trampoline_size, + unsigned char *jjump_pad_insn, + ULONGEST *jjump_pad_insn_size, + CORE_ADDR *adjusted_insn_addr, + CORE_ADDR *adjusted_insn_addr_end, + char *err) { #ifdef __x86_64__ if (is_64bit_tdesc ()) @@ -1480,8 +1579,8 @@ x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr, /* Return the minimum instruction length for fast tracepoints on x86/x86-64 architectures. */ -static int -x86_get_min_fast_tracepoint_insn_len (void) +int +x86_target::get_min_fast_tracepoint_insn_len () { static int warned_about_fast_tracepoints = 0; @@ -1529,9 +1628,8 @@ add_insns (unsigned char *start, int len) { CORE_ADDR buildaddr = current_insn_ptr; - if (debug_threads) - debug_printf ("Adding %d bytes of insn at %s\n", - len, paddress (buildaddr)); + threads_debug_printf ("Adding %d bytes of insn at %s", + len, paddress (buildaddr)); append_insns (&buildaddr, len, start); current_insn_ptr = buildaddr; @@ -2118,7 +2216,7 @@ amd64_emit_ge_goto (int *offset_p, int *size_p) *size_p = 4; } -struct emit_ops amd64_emit_ops = +static emit_ops amd64_emit_ops = { amd64_emit_prologue, amd64_emit_epilogue, @@ -2787,7 +2885,7 @@ i386_emit_ge_goto (int *offset_p, int *size_p) *size_p = 4; } -struct emit_ops i386_emit_ops = +static emit_ops i386_emit_ops = { i386_emit_prologue, i386_emit_epilogue, @@ -2829,8 +2927,8 @@ struct emit_ops i386_emit_ops = }; -static struct emit_ops * -x86_emit_ops (void) +emit_ops * +x86_target::emit_ops () { #ifdef __x86_64__ if (is_64bit_tdesc ()) @@ -2840,32 +2938,23 @@ x86_emit_ops (void) return &i386_emit_ops; } -/* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */ +/* Implementation of target ops method "sw_breakpoint_from_kind". */ -static const gdb_byte * -x86_sw_breakpoint_from_kind (int kind, int *size) +const gdb_byte * +x86_target::sw_breakpoint_from_kind (int kind, int *size) { *size = x86_breakpoint_len; return x86_breakpoint; } -static int -x86_supports_range_stepping (void) -{ - return 1; -} - -/* Implementation of linux_target_ops method "supports_hardware_single_step". - */ - -static int -x86_supports_hardware_single_step (void) +bool +x86_target::low_supports_range_stepping () { - return 1; + return true; } -static int -x86_get_ipa_tdesc_idx (void) +int +x86_target::get_ipa_tdesc_idx () { struct regcache *regcache = get_thread_regcache (current_thread, 0); const struct target_desc *tdesc = regcache->tdesc; @@ -2874,56 +2963,12 @@ x86_get_ipa_tdesc_idx (void) return amd64_get_ipa_tdesc_idx (tdesc); #endif - if (tdesc == tdesc_i386_linux_no_xml) + if (tdesc == tdesc_i386_linux_no_xml.get ()) return X86_TDESC_SSE; return i386_get_ipa_tdesc_idx (tdesc); } -/* This is initialized assuming an amd64 target. - x86_arch_setup will correct it for i386 or amd64 targets. */ - -struct linux_target_ops the_low_target = -{ - NULL, /* fetch_register */ - x86_get_pc, - x86_set_pc, - NULL, /* breakpoint_kind_from_pc */ - x86_sw_breakpoint_from_kind, - NULL, - 1, - x86_breakpoint_at, - x86_supports_z_point_type, - x86_insert_point, - x86_remove_point, - x86_stopped_by_watchpoint, - x86_stopped_data_address, - /* collect_ptrace_register/supply_ptrace_register are not needed in the - native i386 case (no registers smaller than an xfer unit), and are not - used in the biarch case (HAVE_LINUX_USRREGS is not defined). */ - NULL, - NULL, - /* need to fix up i386 siginfo if host is amd64 */ - x86_siginfo_fixup, - x86_linux_new_process, - x86_linux_delete_process, - x86_linux_new_thread, - x86_linux_delete_thread, - x86_linux_new_fork, - x86_linux_prepare_to_resume, - x86_linux_process_qsupported, - x86_supports_tracepoints, - x86_get_thread_area, - x86_install_fast_tracepoint_jump_pad, - x86_emit_ops, - x86_get_min_fast_tracepoint_insn_len, - x86_supports_range_stepping, - NULL, /* breakpoint_kind_from_current_state */ - x86_supports_hardware_single_step, - x86_get_syscall_trapinfo, - x86_get_ipa_tdesc_idx, -}; - /* The linux target ops object. */ linux_process_target *the_linux_target = &the_x86_target; @@ -2934,14 +2979,14 @@ initialize_low_arch (void) /* Initialize the Linux target descriptions. */ #ifdef __x86_64__ tdesc_amd64_linux_no_xml = allocate_target_description (); - copy_target_description (tdesc_amd64_linux_no_xml, + copy_target_description (tdesc_amd64_linux_no_xml.get (), amd64_linux_read_description (X86_XSTATE_SSE_MASK, false)); tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml; #endif tdesc_i386_linux_no_xml = allocate_target_description (); - copy_target_description (tdesc_i386_linux_no_xml, + copy_target_description (tdesc_i386_linux_no_xml.get (), i386_linux_read_description (X86_XSTATE_SSE_MASK)); tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;