name can be changed. The only requirement is the %s be present. */
#define STUB_ENTRY_NAME "__%s_veneer"
+/* Stub name for a BTI landing stub. */
+#define BTI_STUB_ENTRY_NAME "__%s_bti_veneer"
+
/* The name of the dynamic interpreter. This is put in the .interp
section. */
#define ELF_DYNAMIC_INTERPRETER "/lib/ld.so.1"
0x00000000,
};
+static const uint32_t aarch64_bti_direct_branch_stub[] =
+{
+ 0xd503245f, /* bti c */
+ 0x14000000, /* b <label> */
+};
+
static const uint32_t aarch64_erratum_835769_stub[] =
{
0x00000000, /* Placeholder for multiply accumulate. */
aarch64_stub_none,
aarch64_stub_adrp_branch,
aarch64_stub_long_branch,
+ aarch64_stub_bti_direct_branch,
aarch64_stub_erratum_835769_veneer,
aarch64_stub_erratum_843419_veneer,
};
/* Destination symbol type */
unsigned char st_type;
+ /* The target is also a stub. */
+ bool double_stub;
+
/* Where this stub is being called from, or, in the case of combined
stub sections, the first input section in the group. */
asection *id_sec;
unsigned int top_index;
asection **input_list;
+ /* True when two stubs are added where one targets the other, happens
+ when BTI stubs are inserted and then the stub layout must not change
+ during elfNN_aarch64_build_stubs. */
+ bool has_double_stub;
+
/* JUMP_SLOT relocs for variant PCS symbols may be present. */
int variant_pcs;
/* Initialize the local fields. */
eh = (struct elf_aarch64_stub_hash_entry *) entry;
- eh->adrp_offset = 0;
- eh->stub_sec = NULL;
- eh->stub_offset = 0;
- eh->target_value = 0;
- eh->target_section = NULL;
- eh->stub_type = aarch64_stub_none;
- eh->h = NULL;
- eh->id_sec = NULL;
+ memset (&eh->stub_sec, 0,
+ (sizeof (struct elf_aarch64_stub_hash_entry)
+ - offsetof (struct elf_aarch64_stub_hash_entry, stub_sec)));
}
return entry;
howto, value) == bfd_reloc_ok;
}
-static enum elf_aarch64_stub_type
-aarch64_select_branch_stub (bfd_vma value, bfd_vma place)
-{
- if (aarch64_valid_for_adrp_p (value, place))
- return aarch64_stub_adrp_branch;
- return aarch64_stub_long_branch;
-}
-
/* Determine the type of stub needed, if any, for a call. */
static enum elf_aarch64_stub_type
bfd_vma veneer_entry_loc;
bfd_signed_vma branch_offset = 0;
unsigned int template_size;
+ unsigned int pad_size = 0;
const uint32_t *template;
unsigned int i;
struct bfd_link_info *info;
+ struct elf_aarch64_link_hash_table *htab;
/* Massage our args to the form they really have. */
stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
info = (struct bfd_link_info *) in_arg;
+ htab = elf_aarch64_hash_table (info);
/* Fail if the target section could not be assigned to an output
section. The user should fix his linker script. */
stub_sec = stub_entry->stub_sec;
+ /* The layout must not change when a stub may be the target of another. */
+ if (htab->has_double_stub)
+ BFD_ASSERT (stub_entry->stub_offset == stub_sec->size);
+
/* Make a note of the offset within the stubs for this entry. */
stub_entry->stub_offset = stub_sec->size;
loc = stub_sec->contents + stub_entry->stub_offset;
/* See if we can relax the stub. */
if (aarch64_valid_for_adrp_p (sym_value, place))
- stub_entry->stub_type = aarch64_select_branch_stub (sym_value, place);
+ {
+ stub_entry->stub_type = aarch64_stub_adrp_branch;
+
+ /* Avoid the relaxation changing the layout. */
+ if (htab->has_double_stub)
+ pad_size = sizeof (aarch64_long_branch_stub)
+ - sizeof (aarch64_adrp_branch_stub);
+ }
}
switch (stub_entry->stub_type)
template = aarch64_long_branch_stub;
template_size = sizeof (aarch64_long_branch_stub);
break;
+ case aarch64_stub_bti_direct_branch:
+ template = aarch64_bti_direct_branch_stub;
+ template_size = sizeof (aarch64_bti_direct_branch_stub);
+ break;
case aarch64_stub_erratum_835769_veneer:
template = aarch64_erratum_835769_stub;
template_size = sizeof (aarch64_erratum_835769_stub);
loc += 4;
}
+ template_size += pad_size;
template_size = (template_size + 7) & ~7;
stub_sec->size += template_size;
BFD_FAIL ();
break;
+ case aarch64_stub_bti_direct_branch:
+ if (!aarch64_relocate (AARCH64_R (JUMP26), stub_bfd, stub_sec,
+ stub_entry->stub_offset + 4, sym_value))
+ BFD_FAIL ();
+ break;
+
case aarch64_stub_erratum_835769_veneer:
veneered_insn_loc = stub_entry->target_section->output_section->vma
+ stub_entry->target_section->output_offset
}
/* As above, but don't actually build the stub. Just bump offset so
- we know stub section sizes. */
+ we know stub section sizes and record the offset for each stub so
+ a stub can target another stub (needed for BTI direct branch stub). */
static bool
aarch64_size_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg)
case aarch64_stub_long_branch:
size = sizeof (aarch64_long_branch_stub);
break;
+ case aarch64_stub_bti_direct_branch:
+ size = sizeof (aarch64_bti_direct_branch_stub);
+ break;
case aarch64_stub_erratum_835769_veneer:
size = sizeof (aarch64_erratum_835769_stub);
break;
}
size = (size + 7) & ~7;
+ stub_entry->stub_offset = stub_entry->stub_sec->size;
stub_entry->stub_sec->size += size;
return true;
}
+/* Output is BTI compatible. */
+
+static bool
+elf_aarch64_bti_p (bfd *output_bfd)
+{
+ uint32_t prop = elf_aarch64_tdata (output_bfd)->gnu_and_prop;
+ return prop & GNU_PROPERTY_AARCH64_FEATURE_1_BTI;
+}
+
/* External entry points for sizing and building linker stubs. */
/* Set up various things so that we can make a list of input sections
#undef PREV_SEC
#undef PREV_SEC
+#define AARCH64_HINT(insn) (((insn) & 0xfffff01f) == 0xd503201f)
+#define AARCH64_PACIASP 0xd503233f
+#define AARCH64_PACIBSP 0xd503237f
+#define AARCH64_BTI_C 0xd503245f
+#define AARCH64_BTI_J 0xd503249f
+#define AARCH64_BTI_JC 0xd50324df
+
+/* True if the inserted stub does not break BTI compatibility. */
+
+static bool
+aarch64_bti_stub_p (bfd *input_bfd,
+ struct elf_aarch64_stub_hash_entry *stub_entry)
+{
+ /* Stubs without indirect branch are BTI compatible. */
+ if (stub_entry->stub_type != aarch64_stub_adrp_branch
+ && stub_entry->stub_type != aarch64_stub_long_branch)
+ return true;
+
+ /* Return true if the target instruction is compatible with BR x16. */
+
+ asection *section = stub_entry->target_section;
+ bfd_byte loc[4];
+ file_ptr off = stub_entry->target_value;
+ bfd_size_type count = sizeof (loc);
+
+ if (!bfd_get_section_contents (input_bfd, section, loc, off, count))
+ return false;
+
+ uint32_t insn = bfd_getl32 (loc);
+ if (!AARCH64_HINT (insn))
+ return false;
+ return insn == AARCH64_BTI_C
+ || insn == AARCH64_PACIASP
+ || insn == AARCH64_BTI_JC
+ || insn == AARCH64_BTI_J
+ || insn == AARCH64_PACIBSP;
+}
+
#define AARCH64_BITS(x, pos, n) (((x) >> (pos)) & ((1 << (n)) - 1))
#define AARCH64_RT(insn) AARCH64_BITS (insn, 0, 5)
/* Ignore non-stub sections. */
if (!strstr (section->name, STUB_SUFFIX))
continue;
- section->size = 0;
+
+ /* Add space for a branch. Add 8 bytes to keep section 8 byte aligned,
+ as long branch stubs contain a 64-bit address. */
+ section->size = 8;
}
bfd_hash_traverse (&htab->stub_hash_table, aarch64_size_one_stub, htab);
if (!strstr (section->name, STUB_SUFFIX))
continue;
- /* Add space for a branch. Add 8 bytes to keep section 8 byte aligned,
- as long branch stubs contain a 64-bit address. */
- if (section->size)
- section->size += 8;
+ /* Empty stub section. */
+ if (section->size == 8)
+ section->size = 0;
/* Ensure all stub sections have a size which is a multiple of
4096. This is important in order to ensure that the insertion
struct bfd_link_info *info)
{
struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
+ bool need_bti = elf_aarch64_bti_p (output_bfd);
bfd *input_bfd;
for (input_bfd = info->input_bfds; input_bfd != NULL;
unsigned int r_type, r_indx;
enum elf_aarch64_stub_type stub_type;
struct elf_aarch64_stub_hash_entry *stub_entry;
+ struct elf_aarch64_stub_hash_entry *stub_entry_bti;
asection *sym_sec;
bfd_vma sym_value;
bfd_vma destination;
struct elf_aarch64_link_hash_entry *hash;
const char *sym_name;
char *stub_name;
+ char *stub_name_bti;
const asection *id_sec;
+ const asection *id_sec_bti;
unsigned char st_type;
bfd_size_type len;
/* Always update this stub's target since it may have
changed after layout. */
stub_entry->target_value = sym_value + irela->r_addend;
+
+ if (stub_entry->double_stub)
+ {
+ /* Update the target of both stubs. */
+
+ id_sec_bti = htab->stub_group[sym_sec->id].link_sec;
+ stub_name_bti =
+ elfNN_aarch64_stub_name (id_sec_bti, sym_sec, hash,
+ irela);
+ if (!stub_name_bti)
+ goto error_ret_free_internal;
+ stub_entry_bti =
+ aarch64_stub_hash_lookup (&htab->stub_hash_table,
+ stub_name_bti, false, false);
+ BFD_ASSERT (stub_entry_bti != NULL);
+ free (stub_name_bti);
+ stub_entry_bti->target_value = stub_entry->target_value;
+ stub_entry->target_value = stub_entry_bti->stub_offset;
+ }
continue;
}
snprintf (stub_entry->output_name, len, STUB_ENTRY_NAME,
sym_name);
+ /* A stub with indirect jump may break BTI compatibility, so
+ insert another stub with direct jump near the target then. */
+ if (need_bti && !aarch64_bti_stub_p (input_bfd, stub_entry))
+ {
+ stub_entry->double_stub = true;
+ htab->has_double_stub = true;
+ id_sec_bti = htab->stub_group[sym_sec->id].link_sec;
+ stub_name_bti =
+ elfNN_aarch64_stub_name (id_sec_bti, sym_sec, hash, irela);
+ if (!stub_name_bti)
+ {
+ free (stub_name);
+ goto error_ret_free_internal;
+ }
+
+ stub_entry_bti =
+ aarch64_stub_hash_lookup (&htab->stub_hash_table,
+ stub_name_bti, false, false);
+ if (stub_entry_bti == NULL)
+ stub_entry_bti =
+ _bfd_aarch64_add_stub_entry_in_group (stub_name_bti,
+ sym_sec, htab);
+ if (stub_entry_bti == NULL)
+ {
+ free (stub_name);
+ free (stub_name_bti);
+ goto error_ret_free_internal;
+ }
+
+ stub_entry_bti->target_value = sym_value + irela->r_addend;
+ stub_entry_bti->target_section = sym_sec;
+ stub_entry_bti->stub_type = aarch64_stub_bti_direct_branch;
+ stub_entry_bti->h = hash;
+ stub_entry_bti->st_type = st_type;
+
+ len = sizeof (BTI_STUB_ENTRY_NAME) + strlen (sym_name);
+ stub_entry_bti->output_name = bfd_alloc (htab->stub_bfd, len);
+ if (stub_entry_bti->output_name == NULL)
+ {
+ free (stub_name);
+ free (stub_name_bti);
+ goto error_ret_free_internal;
+ }
+ snprintf (stub_entry_bti->output_name, len,
+ BTI_STUB_ENTRY_NAME, sym_name);
+
+ /* Update the indirect call stub to target the BTI stub. */
+ stub_entry->target_value = 0;
+ stub_entry->target_section = stub_entry_bti->stub_sec;
+ stub_entry->stub_type = stub_type;
+ stub_entry->h = NULL;
+ stub_entry->st_type = STT_FUNC;
+ }
+
*stub_changed = true;
}
/* An object file might have a reference to a local
undefined symbol. This is a daft object file, but we
- should at least do something about it. */
+ should at least do something about it. NONE and NULL
+ relocations do not use the symbol and are explicitly
+ allowed to use an undefined one, so allow those.
+ Likewise for relocations against STN_UNDEF. */
if (r_type != R_AARCH64_NONE && r_type != R_AARCH64_NULL
+ && r_symndx != STN_UNDEF
&& bfd_is_und_section (sec)
&& ELF_ST_BIND (sym->st_info) != STB_WEAK)
(*info->callbacks->undefined_symbol)
if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_DATA, addr + 16))
return false;
break;
+ case aarch64_stub_bti_direct_branch:
+ if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
+ sizeof (aarch64_bti_direct_branch_stub)))
+ return false;
+ if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
+ return false;
+ break;
case aarch64_stub_erratum_835769_veneer:
if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
sizeof (aarch64_erratum_835769_stub)))