X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=gdb%2Faarch64-linux-tdep.c;h=15773c75da8375f4ab4744e5712930dd9f6c7d6e;hb=b3d5660a7adf2e1e3846976ff4346c6a9b323978;hp=5a126b0c83c03b28da7feecbf63065c1a368897d;hpb=d55e5aa6b29906346c51ad00e6a9b112590aa294;p=binutils-gdb.git diff --git a/gdb/aarch64-linux-tdep.c b/gdb/aarch64-linux-tdep.c index 5a126b0c83c..15773c75da8 100644 --- a/gdb/aarch64-linux-tdep.c +++ b/gdb/aarch64-linux-tdep.c @@ -1,6 +1,6 @@ /* Target-dependent code for GNU/Linux AArch64. - Copyright (C) 2009-2019 Free Software Foundation, Inc. + Copyright (C) 2009-2022 Free Software Foundation, Inc. Contributed by ARM Ltd. This file is part of GDB. @@ -20,33 +20,41 @@ #include "defs.h" -/* Standard C includes. */ -#include - -/* Local non-gdb includes. */ -#include "aarch64-linux-tdep.h" -#include "aarch64-tdep.h" -#include "arch-utils.h" -#include "auxv.h" -#include "cli/cli-utils.h" -#include "elf/common.h" #include "gdbarch.h" #include "glibc-tdep.h" -#include "inferior.h" -#include "linux-record.h" #include "linux-tdep.h" +#include "aarch64-tdep.h" +#include "aarch64-linux-tdep.h" #include "osabi.h" -#include "parser-defs.h" -#include "record-full.h" -#include "regcache.h" -#include "regset.h" #include "solib-svr4.h" -#include "stap-probe.h" #include "symtab.h" -#include "trad-frame.h" #include "tramp-frame.h" +#include "trad-frame.h" +#include "target.h" +#include "target/target.h" +#include "expop.h" + +#include "regcache.h" +#include "regset.h" + +#include "stap-probe.h" +#include "parser-defs.h" #include "user-regs.h" #include "xml-syscall.h" +#include + +#include "record-full.h" +#include "linux-record.h" + +#include "arch/aarch64-mte-linux.h" + +#include "arch-utils.h" +#include "value.h" + +#include "gdbsupport/selftest.h" + +#include "elf/common.h" +#include "elf/aarch64.h" /* Signal frame handling. @@ -184,6 +192,93 @@ read_aarch64_ctx (CORE_ADDR ctx_addr, enum bfd_endian byte_order, return magic; } +/* Given CACHE, use the trad_frame* functions to restore the FPSIMD + registers from a signal frame. + + VREG_NUM is the number of the V register being restored, OFFSET is the + address containing the register value, BYTE_ORDER is the endianness and + HAS_SVE tells us if we have a valid SVE context or not. */ + +static void +aarch64_linux_restore_vreg (struct trad_frame_cache *cache, int num_regs, + int vreg_num, CORE_ADDR offset, + enum bfd_endian byte_order, bool has_sve) +{ + /* WARNING: SIMD state is laid out in memory in target-endian format. + + So we have a couple cases to consider: + + 1 - If the target is big endian, then SIMD state is big endian, + requiring a byteswap. + + 2 - If the target is little endian, then SIMD state is little endian, so + no byteswap is needed. */ + + if (byte_order == BFD_ENDIAN_BIG) + { + gdb_byte buf[V_REGISTER_SIZE]; + + if (target_read_memory (offset, buf, V_REGISTER_SIZE) != 0) + { + size_t size = V_REGISTER_SIZE/2; + + /* Read the two halves of the V register in reverse byte order. */ + CORE_ADDR u64 = extract_unsigned_integer (buf, size, + byte_order); + CORE_ADDR l64 = extract_unsigned_integer (buf + size, size, + byte_order); + + /* Copy the reversed bytes to the buffer. */ + store_unsigned_integer (buf, size, BFD_ENDIAN_LITTLE, l64); + store_unsigned_integer (buf + size , size, BFD_ENDIAN_LITTLE, u64); + + /* Now we can store the correct bytes for the V register. */ + trad_frame_set_reg_value_bytes (cache, AARCH64_V0_REGNUM + vreg_num, + {buf, V_REGISTER_SIZE}); + trad_frame_set_reg_value_bytes (cache, + num_regs + AARCH64_Q0_REGNUM + + vreg_num, {buf, Q_REGISTER_SIZE}); + trad_frame_set_reg_value_bytes (cache, + num_regs + AARCH64_D0_REGNUM + + vreg_num, {buf, D_REGISTER_SIZE}); + trad_frame_set_reg_value_bytes (cache, + num_regs + AARCH64_S0_REGNUM + + vreg_num, {buf, S_REGISTER_SIZE}); + trad_frame_set_reg_value_bytes (cache, + num_regs + AARCH64_H0_REGNUM + + vreg_num, {buf, H_REGISTER_SIZE}); + trad_frame_set_reg_value_bytes (cache, + num_regs + AARCH64_B0_REGNUM + + vreg_num, {buf, B_REGISTER_SIZE}); + + if (has_sve) + trad_frame_set_reg_value_bytes (cache, + num_regs + AARCH64_SVE_V0_REGNUM + + vreg_num, {buf, V_REGISTER_SIZE}); + } + return; + } + + /* Little endian, just point at the address containing the register + value. */ + trad_frame_set_reg_addr (cache, AARCH64_V0_REGNUM + vreg_num, offset); + trad_frame_set_reg_addr (cache, num_regs + AARCH64_Q0_REGNUM + vreg_num, + offset); + trad_frame_set_reg_addr (cache, num_regs + AARCH64_D0_REGNUM + vreg_num, + offset); + trad_frame_set_reg_addr (cache, num_regs + AARCH64_S0_REGNUM + vreg_num, + offset); + trad_frame_set_reg_addr (cache, num_regs + AARCH64_H0_REGNUM + vreg_num, + offset); + trad_frame_set_reg_addr (cache, num_regs + AARCH64_B0_REGNUM + vreg_num, + offset); + + if (has_sve) + trad_frame_set_reg_addr (cache, num_regs + AARCH64_SVE_V0_REGNUM + + vreg_num, offset); + +} + /* Implement the "init" method of struct tramp_frame. */ static void @@ -194,7 +289,7 @@ aarch64_linux_sigframe_init (const struct tramp_frame *self, { struct gdbarch *gdbarch = get_frame_arch (this_frame); enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); - struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); + aarch64_gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); CORE_ADDR sp = get_frame_register_unsigned (this_frame, AARCH64_SP_REGNUM); CORE_ADDR sigcontext_addr = (sp + AARCH64_RT_SIGFRAME_UCONTEXT_OFFSET + AARCH64_UCONTEXT_SIGCONTEXT_OFFSET ); @@ -336,27 +431,16 @@ aarch64_linux_sigframe_init (const struct tramp_frame *self, /* If there was no SVE section then set up the V registers. */ if (sve_regs == 0) - for (int i = 0; i < 32; i++) - { - CORE_ADDR offset = (fpsimd + AARCH64_FPSIMD_V0_OFFSET + { + for (int i = 0; i < 32; i++) + { + CORE_ADDR offset = (fpsimd + AARCH64_FPSIMD_V0_OFFSET + (i * AARCH64_FPSIMD_VREG_SIZE)); - trad_frame_set_reg_addr (this_cache, AARCH64_V0_REGNUM + i, offset); - trad_frame_set_reg_addr (this_cache, - num_regs + AARCH64_Q0_REGNUM + i, offset); - trad_frame_set_reg_addr (this_cache, - num_regs + AARCH64_D0_REGNUM + i, offset); - trad_frame_set_reg_addr (this_cache, - num_regs + AARCH64_S0_REGNUM + i, offset); - trad_frame_set_reg_addr (this_cache, - num_regs + AARCH64_H0_REGNUM + i, offset); - trad_frame_set_reg_addr (this_cache, - num_regs + AARCH64_B0_REGNUM + i, offset); - if (tdep->has_sve ()) - trad_frame_set_reg_addr (this_cache, - num_regs + AARCH64_SVE_V0_REGNUM + i, - offset); - } + aarch64_linux_restore_vreg (this_cache, num_regs, i, offset, + byte_order, tdep->has_sve ()); + } + } } trad_frame_set_id (this_cache, frame_id_build (sp, func)); @@ -452,7 +536,7 @@ aarch64_linux_core_read_vq (struct gdbarch *gdbarch, bfd *abfd) return 0; } - size_t size = bfd_section_size (abfd, sve_section); + size_t size = bfd_section_size (sve_section); /* Check extended state size. */ if (size < SVE_HEADER_SIZE) @@ -559,7 +643,8 @@ aarch64_linux_collect_sve_regset (const struct regset *regset, gdb_byte *header = (gdb_byte *) buf; struct gdbarch *gdbarch = regcache->arch (); enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); - uint64_t vq = gdbarch_tdep (gdbarch)->vq; + aarch64_gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); + uint64_t vq = tdep->vq; gdb_assert (buf != NULL); gdb_assert (size > SVE_HEADER_SIZE); @@ -586,7 +671,7 @@ aarch64_linux_collect_sve_regset (const struct regset *regset, size - SVE_HEADER_SIZE); } -/* Implement the "regset_from_core_section" gdbarch method. */ +/* Implement the "iterate_over_regset_sections" gdbarch method. */ static void aarch64_linux_iterate_over_regset_sections (struct gdbarch *gdbarch, @@ -594,7 +679,7 @@ aarch64_linux_iterate_over_regset_sections (struct gdbarch *gdbarch, void *cb_data, const struct regcache *regcache) { - struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); + aarch64_gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); cb (".reg", AARCH64_LINUX_SIZEOF_GREGSET, AARCH64_LINUX_SIZEOF_GREGSET, &aarch64_linux_gregset, NULL, cb_data); @@ -606,7 +691,7 @@ aarch64_linux_iterate_over_regset_sections (struct gdbarch *gdbarch, { { 32, AARCH64_SVE_Z0_REGNUM, (int) (tdep->vq * 16) }, { 16, AARCH64_SVE_P0_REGNUM, (int) (tdep->vq * 16 / 8) }, - { 1, AARCH64_SVE_FFR_REGNUM, 4 }, + { 1, AARCH64_SVE_FFR_REGNUM, (int) (tdep->vq * 16 / 8) }, { 1, AARCH64_FPSR_REGNUM, 4 }, { 1, AARCH64_FPCR_REGNUM, 4 }, { 0 } @@ -647,6 +732,44 @@ aarch64_linux_iterate_over_regset_sections (struct gdbarch *gdbarch, AARCH64_LINUX_SIZEOF_PAUTH, &aarch64_linux_pauth_regset, "pauth registers", cb_data); } + + /* Handle MTE registers. */ + if (tdep->has_mte ()) + { + /* Create this on the fly in order to handle the variable location. */ + const struct regcache_map_entry mte_regmap[] = + { + { 1, tdep->mte_reg_base, 8}, + { 0 } + }; + + const struct regset aarch64_linux_mte_regset = + { + mte_regmap, regcache_supply_regset, regcache_collect_regset + }; + + cb (".reg-aarch-mte", AARCH64_LINUX_SIZEOF_MTE_REGSET, + AARCH64_LINUX_SIZEOF_MTE_REGSET, &aarch64_linux_mte_regset, + "MTE registers", cb_data); + } + + if (tdep->has_tls ()) + { + const struct regcache_map_entry tls_regmap[] = + { + { 1, tdep->tls_regnum, 8 }, + { 0 } + }; + + const struct regset aarch64_linux_tls_regset = + { + tls_regmap, regcache_supply_regset, regcache_collect_regset + }; + + cb (".reg-aarch-tls", AARCH64_LINUX_SIZEOF_TLSREGSET, + AARCH64_LINUX_SIZEOF_TLSREGSET, &aarch64_linux_tls_regset, + "TLS register", cb_data); + } } /* Implement the "core_read_description" gdbarch method. */ @@ -655,10 +778,17 @@ static const struct target_desc * aarch64_linux_core_read_description (struct gdbarch *gdbarch, struct target_ops *target, bfd *abfd) { + asection *tls = bfd_get_section_by_name (abfd, ".reg-aarch-tls"); CORE_ADDR hwcap = linux_get_hwcap (target); + CORE_ADDR hwcap2 = linux_get_hwcap2 (target); + + aarch64_features features; + features.vq = aarch64_linux_core_read_vq (gdbarch, abfd); + features.pauth = hwcap & AARCH64_HWCAP_PACA; + features.mte = hwcap2 & HWCAP2_MTE; + features.tls = tls != nullptr; - return aarch64_read_description (aarch64_linux_core_read_vq (gdbarch, abfd), - hwcap & AARCH64_HWCAP_PACA); + return aarch64_read_description (features); } /* Implementation of `gdbarch_stap_is_single_operand', as defined in @@ -681,7 +811,7 @@ aarch64_stap_is_single_operand (struct gdbarch *gdbarch, const char *s) It returns one if the special token has been parsed successfully, or zero if the current token is not considered special. */ -static int +static expr::operation_up aarch64_stap_parse_special_token (struct gdbarch *gdbarch, struct stap_parse_info *p) { @@ -692,11 +822,9 @@ aarch64_stap_parse_special_token (struct gdbarch *gdbarch, char *endp; /* Used to save the register name. */ const char *start; - char *regname; int len; int got_minus = 0; long displacement; - struct stoken str; ++tmp; start = tmp; @@ -706,17 +834,14 @@ aarch64_stap_parse_special_token (struct gdbarch *gdbarch, ++tmp; if (*tmp != ',') - return 0; + return {}; len = tmp - start; - regname = (char *) alloca (len + 2); - - strncpy (regname, start, len); - regname[len] = '\0'; + std::string regname (start, len); - if (user_reg_map_name_to_regnum (gdbarch, regname, len) == -1) + if (user_reg_map_name_to_regnum (gdbarch, regname.c_str (), len) == -1) error (_("Invalid register name `%s' on expression `%s'."), - regname, p->saved_arg); + regname.c_str (), p->saved_arg); ++tmp; tmp = skip_spaces (tmp); @@ -734,50 +859,44 @@ aarch64_stap_parse_special_token (struct gdbarch *gdbarch, ++tmp; if (!isdigit (*tmp)) - return 0; + return {}; displacement = strtol (tmp, &endp, 10); tmp = endp; /* Skipping last `]'. */ if (*tmp++ != ']') - return 0; + return {}; + p->arg = tmp; + + using namespace expr; /* The displacement. */ - write_exp_elt_opcode (&p->pstate, OP_LONG); - write_exp_elt_type (&p->pstate, builtin_type (gdbarch)->builtin_long); - write_exp_elt_longcst (&p->pstate, displacement); - write_exp_elt_opcode (&p->pstate, OP_LONG); + struct type *long_type = builtin_type (gdbarch)->builtin_long; if (got_minus) - write_exp_elt_opcode (&p->pstate, UNOP_NEG); + displacement = -displacement; + operation_up disp = make_operation (long_type, + displacement); /* The register name. */ - write_exp_elt_opcode (&p->pstate, OP_REGISTER); - str.ptr = regname; - str.length = len; - write_exp_string (&p->pstate, str); - write_exp_elt_opcode (&p->pstate, OP_REGISTER); + operation_up reg + = make_operation (std::move (regname)); - write_exp_elt_opcode (&p->pstate, BINOP_ADD); + operation_up sum + = make_operation (std::move (reg), std::move (disp)); /* Casting to the expected type. */ - write_exp_elt_opcode (&p->pstate, UNOP_CAST); - write_exp_elt_type (&p->pstate, lookup_pointer_type (p->arg_type)); - write_exp_elt_opcode (&p->pstate, UNOP_CAST); - - write_exp_elt_opcode (&p->pstate, UNOP_IND); - - p->arg = tmp; + struct type *arg_ptr_type = lookup_pointer_type (p->arg_type); + sum = make_operation (std::move (sum), + arg_ptr_type); + return make_operation (std::move (sum)); } - else - return 0; - - return 1; + return {}; } /* AArch64 process record-replay constructs: syscall, signal etc. */ -struct linux_record_tdep aarch64_linux_record_tdep; +static linux_record_tdep aarch64_linux_record_tdep; /* Enum that defines the AArch64 linux specific syscall identifiers used for process record/replay. */ @@ -1043,6 +1162,7 @@ enum aarch64_syscall { aarch64_sys_finit_module = 273, aarch64_sys_sched_setattr = 274, aarch64_sys_sched_getattr = 275, + aarch64_sys_getrandom = 278 }; /* aarch64_canonicalize_syscall maps syscall ids from the native AArch64 @@ -1325,6 +1445,7 @@ aarch64_canonicalize_syscall (enum aarch64_syscall syscall_number) UNSUPPORTED_SYSCALL_MAP (finit_module); UNSUPPORTED_SYSCALL_MAP (sched_setattr); UNSUPPORTED_SYSCALL_MAP (sched_getattr); + SYSCALL_MAP (getrandom); default: return gdb_sys_no_syscall; } @@ -1347,7 +1468,7 @@ aarch64_linux_get_syscall_number (struct gdbarch *gdbarch, thread_info *thread) This function will only ever get called when stopped at the entry or exit of a syscall, so by checking for 0 in x0 (arg0/retval), x1 (arg1), x8 (syscall), x29 (FP) and x30 (LR) we can infer: - 1) Either inferior is at exit from sucessful execve. + 1) Either inferior is at exit from successful execve. 2) Or inferior is at entry to a call to io_setup with invalid arguments and a corrupted FP and LR. It should be safe enough to assume case 1. */ @@ -1395,9 +1516,10 @@ aarch64_linux_syscall_record (struct regcache *regcache, if (syscall_gdb < 0) { - printf_unfiltered (_("Process record and replay target doesn't " - "support syscall number %s\n"), - plongest (svc_number)); + gdb_printf (gdb_stderr, + _("Process record and replay target doesn't " + "support syscall number %s\n"), + plongest (svc_number)); return -1; } @@ -1429,11 +1551,414 @@ aarch64_linux_syscall_record (struct regcache *regcache, /* Implement the "gcc_target_options" gdbarch method. */ -static char * +static std::string aarch64_linux_gcc_target_options (struct gdbarch *gdbarch) { /* GCC doesn't know "-m64". */ - return NULL; + return {}; +} + +/* Helper to get the allocation tag from a 64-bit ADDRESS. + + Return the allocation tag if successful and nullopt otherwise. */ + +static gdb::optional +aarch64_mte_get_atag (CORE_ADDR address) +{ + gdb::byte_vector tags; + + /* Attempt to fetch the allocation tag. */ + if (!target_fetch_memtags (address, 1, tags, + static_cast (memtag_type::allocation))) + return {}; + + /* Only one tag should've been returned. Make sure we got exactly that. */ + if (tags.size () != 1) + error (_("Target returned an unexpected number of tags.")); + + /* Although our tags are 4 bits in size, they are stored in a + byte. */ + return tags[0]; +} + +/* Implement the tagged_address_p gdbarch method. */ + +static bool +aarch64_linux_tagged_address_p (struct gdbarch *gdbarch, struct value *address) +{ + gdb_assert (address != nullptr); + + CORE_ADDR addr = value_as_address (address); + + /* Remove the top byte for the memory range check. */ + addr = address_significant (gdbarch, addr); + + /* Check if the page that contains ADDRESS is mapped with PROT_MTE. */ + if (!linux_address_in_memtag_page (addr)) + return false; + + /* We have a valid tag in the top byte of the 64-bit address. */ + return true; +} + +/* Implement the memtag_matches_p gdbarch method. */ + +static bool +aarch64_linux_memtag_matches_p (struct gdbarch *gdbarch, + struct value *address) +{ + gdb_assert (address != nullptr); + + /* Make sure we are dealing with a tagged address to begin with. */ + if (!aarch64_linux_tagged_address_p (gdbarch, address)) + return true; + + CORE_ADDR addr = value_as_address (address); + + /* Fetch the allocation tag for ADDRESS. */ + gdb::optional atag + = aarch64_mte_get_atag (address_significant (gdbarch, addr)); + + if (!atag.has_value ()) + return true; + + /* Fetch the logical tag for ADDRESS. */ + gdb_byte ltag = aarch64_mte_get_ltag (addr); + + /* Are the tags the same? */ + return ltag == *atag; +} + +/* Implement the set_memtags gdbarch method. */ + +static bool +aarch64_linux_set_memtags (struct gdbarch *gdbarch, struct value *address, + size_t length, const gdb::byte_vector &tags, + memtag_type tag_type) +{ + gdb_assert (!tags.empty ()); + gdb_assert (address != nullptr); + + CORE_ADDR addr = value_as_address (address); + + /* Set the logical tag or the allocation tag. */ + if (tag_type == memtag_type::logical) + { + /* When setting logical tags, we don't care about the length, since + we are only setting a single logical tag. */ + addr = aarch64_mte_set_ltag (addr, tags[0]); + + /* Update the value's content with the tag. */ + enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); + gdb_byte *srcbuf = value_contents_raw (address).data (); + store_unsigned_integer (srcbuf, sizeof (addr), byte_order, addr); + } + else + { + /* Remove the top byte. */ + addr = address_significant (gdbarch, addr); + + /* Make sure we are dealing with a tagged address to begin with. */ + if (!aarch64_linux_tagged_address_p (gdbarch, address)) + return false; + + /* With G being the number of tag granules and N the number of tags + passed in, we can have the following cases: + + 1 - G == N: Store all the N tags to memory. + + 2 - G < N : Warn about having more tags than granules, but write G + tags. + + 3 - G > N : This is a "fill tags" operation. We should use the tags + as a pattern to fill the granules repeatedly until we have + written G tags to memory. + */ + + size_t g = aarch64_mte_get_tag_granules (addr, length, + AARCH64_MTE_GRANULE_SIZE); + size_t n = tags.size (); + + if (g < n) + warning (_("Got more tags than memory granules. Tags will be " + "truncated.")); + else if (g > n) + warning (_("Using tag pattern to fill memory range.")); + + if (!target_store_memtags (addr, length, tags, + static_cast (memtag_type::allocation))) + return false; + } + return true; +} + +/* Implement the get_memtag gdbarch method. */ + +static struct value * +aarch64_linux_get_memtag (struct gdbarch *gdbarch, struct value *address, + memtag_type tag_type) +{ + gdb_assert (address != nullptr); + + CORE_ADDR addr = value_as_address (address); + CORE_ADDR tag = 0; + + /* Get the logical tag or the allocation tag. */ + if (tag_type == memtag_type::logical) + tag = aarch64_mte_get_ltag (addr); + else + { + /* Make sure we are dealing with a tagged address to begin with. */ + if (!aarch64_linux_tagged_address_p (gdbarch, address)) + return nullptr; + + /* Remove the top byte. */ + addr = address_significant (gdbarch, addr); + gdb::optional atag = aarch64_mte_get_atag (addr); + + if (!atag.has_value ()) + return nullptr; + + tag = *atag; + } + + /* Convert the tag to a value. */ + return value_from_ulongest (builtin_type (gdbarch)->builtin_unsigned_int, + tag); +} + +/* Implement the memtag_to_string gdbarch method. */ + +static std::string +aarch64_linux_memtag_to_string (struct gdbarch *gdbarch, struct value *tag_value) +{ + if (tag_value == nullptr) + return ""; + + CORE_ADDR tag = value_as_address (tag_value); + + return string_printf ("0x%s", phex_nz (tag, sizeof (tag))); +} + +/* AArch64 Linux implementation of the report_signal_info gdbarch + hook. Displays information about possible memory tag violations. */ + +static void +aarch64_linux_report_signal_info (struct gdbarch *gdbarch, + struct ui_out *uiout, + enum gdb_signal siggnal) +{ + aarch64_gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); + + if (!tdep->has_mte () || siggnal != GDB_SIGNAL_SEGV) + return; + + CORE_ADDR fault_addr = 0; + long si_code = 0; + + try + { + /* Sigcode tells us if the segfault is actually a memory tag + violation. */ + si_code = parse_and_eval_long ("$_siginfo.si_code"); + + fault_addr + = parse_and_eval_long ("$_siginfo._sifields._sigfault.si_addr"); + } + catch (const gdb_exception_error &exception) + { + exception_print (gdb_stderr, exception); + return; + } + + /* If this is not a memory tag violation, just return. */ + if (si_code != SEGV_MTEAERR && si_code != SEGV_MTESERR) + return; + + uiout->text ("\n"); + + uiout->field_string ("sigcode-meaning", _("Memory tag violation")); + + /* For synchronous faults, show additional information. */ + if (si_code == SEGV_MTESERR) + { + uiout->text (_(" while accessing address ")); + uiout->field_core_addr ("fault-addr", gdbarch, fault_addr); + uiout->text ("\n"); + + gdb::optional atag + = aarch64_mte_get_atag (address_significant (gdbarch, fault_addr)); + gdb_byte ltag = aarch64_mte_get_ltag (fault_addr); + + if (!atag.has_value ()) + uiout->text (_("Allocation tag unavailable")); + else + { + uiout->text (_("Allocation tag ")); + uiout->field_string ("allocation-tag", hex_string (*atag)); + uiout->text ("\n"); + uiout->text (_("Logical tag ")); + uiout->field_string ("logical-tag", hex_string (ltag)); + } + } + else + { + uiout->text ("\n"); + uiout->text (_("Fault address unavailable")); + } +} + +/* AArch64 Linux implementation of the gdbarch_create_memtag_section hook. */ + +static asection * +aarch64_linux_create_memtag_section (struct gdbarch *gdbarch, bfd *obfd, + CORE_ADDR address, size_t size) +{ + gdb_assert (obfd != nullptr); + gdb_assert (size > 0); + + /* Create the section and associated program header. + + Make sure the section's flags has SEC_HAS_CONTENTS, otherwise BFD will + refuse to write data to this section. */ + asection *mte_section + = bfd_make_section_anyway_with_flags (obfd, "memtag", SEC_HAS_CONTENTS); + + if (mte_section == nullptr) + return nullptr; + + bfd_set_section_vma (mte_section, address); + /* The size of the memory range covered by the memory tags. We reuse the + section's rawsize field for this purpose. */ + mte_section->rawsize = size; + + /* Fetch the number of tags we need to save. */ + size_t tags_count + = aarch64_mte_get_tag_granules (address, size, AARCH64_MTE_GRANULE_SIZE); + /* Tags are stored packed as 2 tags per byte. */ + bfd_set_section_size (mte_section, (tags_count + 1) >> 1); + /* Store program header information. */ + bfd_record_phdr (obfd, PT_AARCH64_MEMTAG_MTE, 1, 0, 0, 0, 0, 0, 1, + &mte_section); + + return mte_section; +} + +/* Maximum number of tags to request. */ +#define MAX_TAGS_TO_TRANSFER 1024 + +/* AArch64 Linux implementation of the gdbarch_fill_memtag_section hook. */ + +static bool +aarch64_linux_fill_memtag_section (struct gdbarch *gdbarch, asection *osec) +{ + /* We only handle MTE tags for now. */ + + size_t segment_size = osec->rawsize; + CORE_ADDR start_address = bfd_section_vma (osec); + CORE_ADDR end_address = start_address + segment_size; + + /* Figure out how many tags we need to store in this memory range. */ + size_t granules = aarch64_mte_get_tag_granules (start_address, segment_size, + AARCH64_MTE_GRANULE_SIZE); + + /* If there are no tag granules to fetch, just return. */ + if (granules == 0) + return true; + + CORE_ADDR address = start_address; + + /* Vector of tags. */ + gdb::byte_vector tags; + + while (granules > 0) + { + /* Transfer tags in chunks. */ + gdb::byte_vector tags_read; + size_t xfer_len + = ((granules >= MAX_TAGS_TO_TRANSFER) + ? MAX_TAGS_TO_TRANSFER * AARCH64_MTE_GRANULE_SIZE + : granules * AARCH64_MTE_GRANULE_SIZE); + + if (!target_fetch_memtags (address, xfer_len, tags_read, + static_cast (memtag_type::allocation))) + { + warning (_("Failed to read MTE tags from memory range [%s,%s)."), + phex_nz (start_address, sizeof (start_address)), + phex_nz (end_address, sizeof (end_address))); + return false; + } + + /* Transfer over the tags that have been read. */ + tags.insert (tags.end (), tags_read.begin (), tags_read.end ()); + + /* Adjust the remaining granules and starting address. */ + granules -= tags_read.size (); + address += tags_read.size () * AARCH64_MTE_GRANULE_SIZE; + } + + /* Pack the MTE tag bits. */ + aarch64_mte_pack_tags (tags); + + if (!bfd_set_section_contents (osec->owner, osec, tags.data (), + 0, tags.size ())) + { + warning (_("Failed to write %s bytes of corefile memory " + "tag content (%s)."), + pulongest (tags.size ()), + bfd_errmsg (bfd_get_error ())); + } + return true; +} + +/* AArch64 Linux implementation of the gdbarch_decode_memtag_section + hook. Decode a memory tag section and return the requested tags. + + The section is guaranteed to cover the [ADDRESS, ADDRESS + length) + range. */ + +static gdb::byte_vector +aarch64_linux_decode_memtag_section (struct gdbarch *gdbarch, + bfd_section *section, + int type, + CORE_ADDR address, size_t length) +{ + gdb_assert (section != nullptr); + + /* The requested address must not be less than section->vma. */ + gdb_assert (section->vma <= address); + + /* Figure out how many tags we need to fetch in this memory range. */ + size_t granules = aarch64_mte_get_tag_granules (address, length, + AARCH64_MTE_GRANULE_SIZE); + /* Sanity check. */ + gdb_assert (granules > 0); + + /* Fetch the total number of tags in the range [VMA, address + length). */ + size_t granules_from_vma + = aarch64_mte_get_tag_granules (section->vma, + address - section->vma + length, + AARCH64_MTE_GRANULE_SIZE); + + /* Adjust the tags vector to contain the exact number of packed bytes. */ + gdb::byte_vector tags (((granules - 1) >> 1) + 1); + + /* Figure out the starting offset into the packed tags data. */ + file_ptr offset = ((granules_from_vma - granules) >> 1); + + if (!bfd_get_section_contents (section->owner, section, tags.data (), + offset, tags.size ())) + error (_("Couldn't read contents from memtag section.")); + + /* At this point, the tags are packed 2 per byte. Unpack them before + returning. */ + bool skip_first = ((granules_from_vma - granules) % 2) != 0; + aarch64_mte_unpack_tags (tags, skip_first); + + /* Resize to the exact number of tags that was requested. */ + tags.resize (granules); + + return tags; } static void @@ -1445,18 +1970,18 @@ aarch64_linux_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch) NULL }; static const char *const stap_register_indirection_suffixes[] = { "]", NULL }; - struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); + aarch64_gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); tdep->lowest_pc = 0x8000; - linux_init_abi (info, gdbarch); + linux_init_abi (info, gdbarch, 1); set_solib_svr4_fetch_link_map_offsets (gdbarch, - svr4_lp64_fetch_link_map_offsets); + linux_lp64_fetch_link_map_offsets); /* Enable TLS support. */ set_gdbarch_fetch_tls_load_module_address (gdbarch, - svr4_fetch_objfile_link_map); + svr4_fetch_objfile_link_map); /* Shared library handling. */ set_gdbarch_skip_trampoline_code (gdbarch, find_solib_trampoline_target); @@ -1493,6 +2018,49 @@ aarch64_linux_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch) data associated with the address. */ set_gdbarch_significant_addr_bit (gdbarch, 56); + /* MTE-specific settings and hooks. */ + if (tdep->has_mte ()) + { + /* Register a hook for checking if an address is tagged or not. */ + set_gdbarch_tagged_address_p (gdbarch, aarch64_linux_tagged_address_p); + + /* Register a hook for checking if there is a memory tag match. */ + set_gdbarch_memtag_matches_p (gdbarch, + aarch64_linux_memtag_matches_p); + + /* Register a hook for setting the logical/allocation tags for + a range of addresses. */ + set_gdbarch_set_memtags (gdbarch, aarch64_linux_set_memtags); + + /* Register a hook for extracting the logical/allocation tag from an + address. */ + set_gdbarch_get_memtag (gdbarch, aarch64_linux_get_memtag); + + /* Set the allocation tag granule size to 16 bytes. */ + set_gdbarch_memtag_granule_size (gdbarch, AARCH64_MTE_GRANULE_SIZE); + + /* Register a hook for converting a memory tag to a string. */ + set_gdbarch_memtag_to_string (gdbarch, aarch64_linux_memtag_to_string); + + set_gdbarch_report_signal_info (gdbarch, + aarch64_linux_report_signal_info); + + /* Core file helpers. */ + + /* Core file helper to create a memory tag section for a particular + PT_LOAD segment. */ + set_gdbarch_create_memtag_section + (gdbarch, aarch64_linux_create_memtag_section); + + /* Core file helper to fill a memory tag section with tag data. */ + set_gdbarch_fill_memtag_section + (gdbarch, aarch64_linux_fill_memtag_section); + + /* Core file helper to decode a memory tag section. */ + set_gdbarch_decode_memtag_section (gdbarch, + aarch64_linux_decode_memtag_section); + } + /* Initialize the aarch64_linux_record_tdep. */ /* These values are the size of the type that will be used in a system call. They are obtained from Linux Kernel source. */ @@ -1658,20 +2226,49 @@ aarch64_linux_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch) set_gdbarch_get_syscall_number (gdbarch, aarch64_linux_get_syscall_number); /* Displaced stepping. */ - set_gdbarch_max_insn_length (gdbarch, 4 * DISPLACED_MODIFIED_INSNS); + set_gdbarch_max_insn_length (gdbarch, 4 * AARCH64_DISPLACED_MODIFIED_INSNS); set_gdbarch_displaced_step_copy_insn (gdbarch, aarch64_displaced_step_copy_insn); set_gdbarch_displaced_step_fixup (gdbarch, aarch64_displaced_step_fixup); - set_gdbarch_displaced_step_location (gdbarch, linux_displaced_step_location); set_gdbarch_displaced_step_hw_singlestep (gdbarch, aarch64_displaced_step_hw_singlestep); set_gdbarch_gcc_target_options (gdbarch, aarch64_linux_gcc_target_options); } +#if GDB_SELF_TEST + +namespace selftests { + +/* Verify functions to read and write logical tags. */ + +static void +aarch64_linux_ltag_tests (void) +{ + /* We have 4 bits of tags, but we test writing all the bits of the top + byte of address. */ + for (int i = 0; i < 1 << 8; i++) + { + CORE_ADDR addr = ((CORE_ADDR) i << 56) | 0xdeadbeef; + SELF_CHECK (aarch64_mte_get_ltag (addr) == (i & 0xf)); + + addr = aarch64_mte_set_ltag (0xdeadbeef, i); + SELF_CHECK (addr = ((CORE_ADDR) (i & 0xf) << 56) | 0xdeadbeef); + } +} + +} // namespace selftests +#endif /* GDB_SELF_TEST */ + +void _initialize_aarch64_linux_tdep (); void -_initialize_aarch64_linux_tdep (void) +_initialize_aarch64_linux_tdep () { gdbarch_register_osabi (bfd_arch_aarch64, 0, GDB_OSABI_LINUX, aarch64_linux_init_abi); + +#if GDB_SELF_TEST + selftests::register_test ("aarch64-linux-tagged-address", + selftests::aarch64_linux_ltag_tests); +#endif }