Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
MA 02110-1301, USA. */
-#include "sysdep.h"
-#include "bfd.h"
-#include "bfdlink.h"
-#include "libbfd.h"
-#include "elf-bfd.h"
+#include "elfxx-x86.h"
#include "elf-nacl.h"
-#include "bfd_stdint.h"
-#include "objalloc.h"
-#include "hashtab.h"
#include "dwarf2.h"
#include "libiberty.h"
#define ELF64_DYNAMIC_INTERPRETER "/lib/ld64.so.1"
#define ELF32_DYNAMIC_INTERPRETER "/lib/ldx32.so.1"
-/* If ELIMINATE_COPY_RELOCS is non-zero, the linker will try to avoid
- copying dynamic variables from a shared lib into an app's dynbss
- section, and instead use a dynamic relocation to point into the
- shared lib. */
-#define ELIMINATE_COPY_RELOCS 1
-
/* The size in bytes of an entry in the global offset table. */
#define GOT_ENTRY_SIZE 8
0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
};
+/* The first entry in the IBT-enabled lazy procedure linkage table is the
+ the same as the lazy PLT with BND prefix so that bound registers are
+ preserved when control is passed to dynamic linker. Subsequent
+ entries for a IBT-enabled lazy procedure linkage table look like
+ this. */
+
+static const bfd_byte elf_x86_64_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
+{
+ 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
+ 0x68, 0, 0, 0, 0, /* pushq immediate */
+ 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
+ 0x90 /* nop */
+};
+
+/* The first entry in the x32 IBT-enabled lazy procedure linkage table
+ is the same as the normal lazy PLT. Subsequent entries for an
+ x32 IBT-enabled lazy procedure linkage table look like this. */
+
+static const bfd_byte elf_x32_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
+{
+ 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
+ 0x68, 0, 0, 0, 0, /* pushq immediate */
+ 0xe9, 0, 0, 0, 0, /* jmpq relative */
+ 0x66, 0x90 /* xchg %ax,%ax */
+};
+
/* Entries in the non-lazey procedure linkage table look like this. */
static const bfd_byte elf_x86_64_non_lazy_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
0x90 /* nop */
};
+/* Entries for branches with IBT-enabled in the non-lazey procedure
+ linkage table look like this. They have the same size as the lazy
+ PLT entry. */
+
+static const bfd_byte elf_x86_64_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
+{
+ 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
+ 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
+ 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
+ 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopl 0x0(%rax,%rax,1) */
+};
+
+/* Entries for branches with IBT-enabled in the x32 non-lazey procedure
+ linkage table look like this. They have the same size as the lazy
+ PLT entry. */
+
+static const bfd_byte elf_x32_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
+{
+ 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
+ 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
+ 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
+ 0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopw 0x0(%rax,%rax,1) */
+};
+
/* .eh_frame covering the lazy .plt section. */
static const bfd_byte elf_x86_64_eh_frame_lazy_plt[] =
DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
};
+/* .eh_frame covering the lazy .plt section with IBT-enabled. */
+
+static const bfd_byte elf_x86_64_eh_frame_lazy_ibt_plt[] =
+{
+ PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
+ 0, 0, 0, 0, /* CIE ID */
+ 1, /* CIE version */
+ 'z', 'R', 0, /* Augmentation string */
+ 1, /* Code alignment factor */
+ 0x78, /* Data alignment factor */
+ 16, /* Return address column */
+ 1, /* Augmentation size */
+ DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
+ DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
+ DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
+ DW_CFA_nop, DW_CFA_nop,
+
+ PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
+ PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
+ 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
+ 0, 0, 0, 0, /* .plt size goes here */
+ 0, /* Augmentation size */
+ DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
+ DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
+ DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
+ DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
+ DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
+ 11, /* Block length */
+ DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
+ DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
+ DW_OP_lit15, DW_OP_and, DW_OP_lit10, DW_OP_ge,
+ DW_OP_lit3, DW_OP_shl, DW_OP_plus,
+ DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
+};
+
+/* .eh_frame covering the x32 lazy .plt section with IBT-enabled. */
+
+static const bfd_byte elf_x32_eh_frame_lazy_ibt_plt[] =
+{
+ PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
+ 0, 0, 0, 0, /* CIE ID */
+ 1, /* CIE version */
+ 'z', 'R', 0, /* Augmentation string */
+ 1, /* Code alignment factor */
+ 0x78, /* Data alignment factor */
+ 16, /* Return address column */
+ 1, /* Augmentation size */
+ DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
+ DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
+ DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
+ DW_CFA_nop, DW_CFA_nop,
+
+ PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
+ PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
+ 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
+ 0, 0, 0, 0, /* .plt size goes here */
+ 0, /* Augmentation size */
+ DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
+ DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
+ DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
+ DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
+ DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
+ 11, /* Block length */
+ DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
+ DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
+ DW_OP_lit15, DW_OP_and, DW_OP_lit9, DW_OP_ge,
+ DW_OP_lit3, DW_OP_shl, DW_OP_plus,
+ DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
+};
+
/* .eh_frame covering the non-lazy .plt section. */
static const bfd_byte elf_x86_64_eh_frame_non_lazy_plt[] =
sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
};
+static const struct elf_x86_64_lazy_plt_layout elf_x86_64_lazy_ibt_plt =
+ {
+ elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
+ elf_x86_64_lazy_ibt_plt_entry, /* plt_entry */
+ LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
+ 2, /* plt0_got1_offset */
+ 1+8, /* plt0_got2_offset */
+ 1+12, /* plt0_got2_insn_end */
+ 4+1+2, /* plt_got_offset */
+ 4+1, /* plt_reloc_offset */
+ 4+1+6, /* plt_plt_offset */
+ 4+1+6, /* plt_got_insn_size */
+ 4+1+5+5, /* plt_plt_insn_end */
+ 0, /* plt_lazy_offset */
+ elf_x86_64_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
+ sizeof (elf_x86_64_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
+ };
+
+static const struct elf_x86_64_lazy_plt_layout elf_x32_lazy_ibt_plt =
+ {
+ elf_x86_64_lazy_plt0_entry, /* plt0_entry */
+ elf_x32_lazy_ibt_plt_entry, /* plt_entry */
+ LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
+ 2, /* plt0_got1_offset */
+ 8, /* plt0_got2_offset */
+ 12, /* plt0_got2_insn_end */
+ 4+2, /* plt_got_offset */
+ 4+1, /* plt_reloc_offset */
+ 4+6, /* plt_plt_offset */
+ 4+6, /* plt_got_insn_size */
+ 4+5+5, /* plt_plt_insn_end */
+ 0, /* plt_lazy_offset */
+ elf_x32_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
+ sizeof (elf_x32_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
+ };
+
+static const struct elf_x86_64_non_lazy_plt_layout elf_x86_64_non_lazy_ibt_plt =
+ {
+ elf_x86_64_non_lazy_ibt_plt_entry, /* plt_entry */
+ LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
+ 4+1+2, /* plt_got_offset */
+ 4+1+6, /* plt_got_insn_size */
+ elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
+ sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
+ };
+
+static const struct elf_x86_64_non_lazy_plt_layout elf_x32_non_lazy_ibt_plt =
+ {
+ elf_x32_non_lazy_ibt_plt_entry, /* plt_entry */
+ LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
+ 4+2, /* plt_got_offset */
+ 4+6, /* plt_got_insn_size */
+ elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
+ sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
+ };
+
static const struct elf_x86_64_backend_data elf_x86_64_arch_bed =
{
is_normal /* os */
#define elf_backend_arch_data &elf_x86_64_arch_bed
-/* Is a undefined weak symbol which is resolved to 0. Reference to an
- undefined weak symbol is resolved to 0 when building executable if
- it isn't dynamic and
- 1. Has non-GOT/non-PLT relocations in text section. Or
- 2. Has no GOT/PLT relocation.
- */
-#define UNDEFINED_WEAK_RESOLVED_TO_ZERO(INFO, GOT_RELOC, EH) \
- ((EH)->elf.root.type == bfd_link_hash_undefweak \
- && bfd_link_executable (INFO) \
- && (elf_x86_64_hash_table (INFO)->interp == NULL \
- || !(GOT_RELOC) \
- || (EH)->has_non_got_reloc \
- || !(INFO)->dynamic_undefined_weak))
-
-/* x86-64 ELF linker hash entry. */
-
-struct elf_x86_64_link_hash_entry
-{
- struct elf_link_hash_entry elf;
-
- /* Track dynamic relocs copied for this symbol. */
- struct elf_dyn_relocs *dyn_relocs;
-
-#define GOT_UNKNOWN 0
-#define GOT_NORMAL 1
-#define GOT_TLS_GD 2
+/* Values in tls_type of x86 ELF linker hash entry. */
#define GOT_TLS_IE 3
#define GOT_TLS_GDESC 4
#define GOT_TLS_GD_BOTH_P(type) \
((type) == GOT_TLS_GDESC || GOT_TLS_GD_BOTH_P (type))
#define GOT_TLS_GD_ANY_P(type) \
(GOT_TLS_GD_P (type) || GOT_TLS_GDESC_P (type))
- unsigned char tls_type;
-
- /* TRUE if a weak symbol with a real definition needs a copy reloc.
- When there is a weak symbol with a real definition, the processor
- independent code will have arranged for us to see the real
- definition first. We need to copy the needs_copy bit from the
- real definition and check it when allowing copy reloc in PIE. */
- unsigned int needs_copy : 1;
-
- /* TRUE if symbol has GOT or PLT relocations. */
- unsigned int has_got_reloc : 1;
-
- /* TRUE if symbol has non-GOT/non-PLT relocations in text sections. */
- unsigned int has_non_got_reloc : 1;
-
- /* Don't call finish_dynamic_symbol on this symbol. */
- unsigned int no_finish_dynamic_symbol : 1;
-
- /* 0: symbol isn't __tls_get_addr.
- 1: symbol is __tls_get_addr.
- 2: symbol is unknown. */
- unsigned int tls_get_addr : 2;
-
- /* Reference count of C/C++ function pointer relocations in read-write
- section which can be resolved at run-time. */
- bfd_signed_vma func_pointer_refcount;
-
- /* Information about the GOT PLT entry. Filled when there are both
- GOT and PLT relocations against the same function. */
- union gotplt_union plt_got;
-
- /* Information about the second PLT entry. */
- union gotplt_union plt_second;
-
- /* Offset of the GOTPLT entry reserved for the TLS descriptor,
- starting at the end of the jump table. */
- bfd_vma tlsdesc_got;
-};
-
-#define elf_x86_64_hash_entry(ent) \
- ((struct elf_x86_64_link_hash_entry *)(ent))
-
-struct elf_x86_64_obj_tdata
-{
- struct elf_obj_tdata root;
-
- /* tls_type for each local got entry. */
- char *local_got_tls_type;
-
- /* GOTPLT entries for TLS descriptors. */
- bfd_vma *local_tlsdesc_gotent;
-};
-
-#define elf_x86_64_tdata(abfd) \
- ((struct elf_x86_64_obj_tdata *) (abfd)->tdata.any)
-
-#define elf_x86_64_local_got_tls_type(abfd) \
- (elf_x86_64_tdata (abfd)->local_got_tls_type)
-
-#define elf_x86_64_local_tlsdesc_gotent(abfd) \
- (elf_x86_64_tdata (abfd)->local_tlsdesc_gotent)
#define is_x86_64_elf(bfd) \
(bfd_get_flavour (bfd) == bfd_target_elf_flavour \
static bfd_boolean
elf_x86_64_mkobject (bfd *abfd)
{
- return bfd_elf_allocate_object (abfd, sizeof (struct elf_x86_64_obj_tdata),
+ return bfd_elf_allocate_object (abfd, sizeof (struct elf_x86_obj_tdata),
X86_64_ELF_DATA);
}
struct elf_x86_64_link_hash_table
{
- struct elf_link_hash_table elf;
-
- /* Short-cuts to get to dynamic linker sections. */
- asection *interp;
- asection *plt_eh_frame;
- asection *plt_second;
- asection *plt_second_eh_frame;
- asection *plt_got;
- asection *plt_got_eh_frame;
+ struct elf_x86_link_hash_table x86;
/* Parameters describing PLT generation, lazy or non-lazy. */
struct elf_x86_64_plt_layout plt;
/* Parameters describing non-lazy PLT generation. */
const struct elf_x86_64_non_lazy_plt_layout *non_lazy_plt;
-
- union
- {
- bfd_signed_vma refcount;
- bfd_vma offset;
- } tls_ld_got;
-
- /* The amount of space used by the jump slots in the GOT. */
- bfd_vma sgotplt_jump_table_size;
-
- /* Small local sym cache. */
- struct sym_cache sym_cache;
-
- bfd_vma (*r_info) (bfd_vma, bfd_vma);
- bfd_vma (*r_sym) (bfd_vma);
- unsigned int pointer_r_type;
- const char *dynamic_interpreter;
- int dynamic_interpreter_size;
-
- /* _TLS_MODULE_BASE_ symbol. */
- struct bfd_link_hash_entry *tls_module_base;
-
- /* Used by local STT_GNU_IFUNC symbols. */
- htab_t loc_hash_table;
- void * loc_hash_memory;
-
- /* The offset into splt of the PLT entry for the TLS descriptor
- resolver. Special values are 0, if not necessary (or not found
- to be necessary yet), and -1 if needed but not determined
- yet. */
- bfd_vma tlsdesc_plt;
- /* The offset into sgot of the GOT entry used by the PLT entry
- above. */
- bfd_vma tlsdesc_got;
-
- /* The index of the next R_X86_64_JUMP_SLOT entry in .rela.plt. */
- bfd_vma next_jump_slot_index;
- /* The index of the next R_X86_64_IRELATIVE entry in .rela.plt. */
- bfd_vma next_irelative_index;
-
- /* TRUE if there are dynamic relocs against IFUNC symbols that apply
- to read-only sections. */
- bfd_boolean readonly_dynrelocs_against_ifunc;
};
-/* Get the x86-64 ELF linker hash table from a link_info structure. */
+#define elf_x86_64_plt(htab) \
+ ((struct elf_x86_64_link_hash_table *) (htab))->plt
-#define elf_x86_64_hash_table(p) \
- (elf_hash_table_id ((struct elf_link_hash_table *) ((p)->hash)) \
- == X86_64_ELF_DATA ? ((struct elf_x86_64_link_hash_table *) ((p)->hash)) : NULL)
+#define elf_x86_64_lazy_plt(htab) \
+ ((struct elf_x86_64_link_hash_table *) (htab))->lazy_plt
+
+#define elf_x86_64_non_lazy_plt(htab) \
+ ((struct elf_x86_64_link_hash_table *) (htab))->non_lazy_plt
#define elf_x86_64_compute_jump_table_size(htab) \
((htab)->elf.srelplt->reloc_count * GOT_ENTRY_SIZE)
-/* Create an entry in an x86-64 ELF linker hash table. */
-
-static struct bfd_hash_entry *
-elf_x86_64_link_hash_newfunc (struct bfd_hash_entry *entry,
- struct bfd_hash_table *table,
- const char *string)
-{
- /* Allocate the structure if it has not already been allocated by a
- subclass. */
- if (entry == NULL)
- {
- entry = (struct bfd_hash_entry *)
- bfd_hash_allocate (table,
- sizeof (struct elf_x86_64_link_hash_entry));
- if (entry == NULL)
- return entry;
- }
-
- /* Call the allocation method of the superclass. */
- entry = _bfd_elf_link_hash_newfunc (entry, table, string);
- if (entry != NULL)
- {
- struct elf_x86_64_link_hash_entry *eh;
-
- eh = (struct elf_x86_64_link_hash_entry *) entry;
- eh->dyn_relocs = NULL;
- eh->tls_type = GOT_UNKNOWN;
- eh->needs_copy = 0;
- eh->has_got_reloc = 0;
- eh->has_non_got_reloc = 0;
- eh->no_finish_dynamic_symbol = 0;
- eh->tls_get_addr = 2;
- eh->func_pointer_refcount = 0;
- eh->plt_second.offset = (bfd_vma) -1;
- eh->plt_got.offset = (bfd_vma) -1;
- eh->tlsdesc_got = (bfd_vma) -1;
- }
-
- return entry;
-}
-
-/* Compute a hash of a local hash entry. We use elf_link_hash_entry
- for local symbol so that we can handle local STT_GNU_IFUNC symbols
- as global symbol. We reuse indx and dynstr_index for local symbol
- hash since they aren't used by global symbols in this backend. */
-
-static hashval_t
-elf_x86_64_local_htab_hash (const void *ptr)
-{
- struct elf_link_hash_entry *h
- = (struct elf_link_hash_entry *) ptr;
- return ELF_LOCAL_SYMBOL_HASH (h->indx, h->dynstr_index);
-}
-
-/* Compare local hash entries. */
-
-static int
-elf_x86_64_local_htab_eq (const void *ptr1, const void *ptr2)
-{
- struct elf_link_hash_entry *h1
- = (struct elf_link_hash_entry *) ptr1;
- struct elf_link_hash_entry *h2
- = (struct elf_link_hash_entry *) ptr2;
-
- return h1->indx == h2->indx && h1->dynstr_index == h2->dynstr_index;
-}
-
-/* Find and/or create a hash entry for local symbol. */
-
-static struct elf_link_hash_entry *
-elf_x86_64_get_local_sym_hash (struct elf_x86_64_link_hash_table *htab,
- bfd *abfd, const Elf_Internal_Rela *rel,
- bfd_boolean create)
-{
- struct elf_x86_64_link_hash_entry e, *ret;
- asection *sec = abfd->sections;
- hashval_t h = ELF_LOCAL_SYMBOL_HASH (sec->id,
- htab->r_sym (rel->r_info));
- void **slot;
-
- e.elf.indx = sec->id;
- e.elf.dynstr_index = htab->r_sym (rel->r_info);
- slot = htab_find_slot_with_hash (htab->loc_hash_table, &e, h,
- create ? INSERT : NO_INSERT);
-
- if (!slot)
- return NULL;
-
- if (*slot)
- {
- ret = (struct elf_x86_64_link_hash_entry *) *slot;
- return &ret->elf;
- }
-
- ret = (struct elf_x86_64_link_hash_entry *)
- objalloc_alloc ((struct objalloc *) htab->loc_hash_memory,
- sizeof (struct elf_x86_64_link_hash_entry));
- if (ret)
- {
- memset (ret, 0, sizeof (*ret));
- ret->elf.indx = sec->id;
- ret->elf.dynstr_index = htab->r_sym (rel->r_info);
- ret->elf.dynindx = -1;
- ret->func_pointer_refcount = 0;
- ret->plt_got.offset = (bfd_vma) -1;
- *slot = ret;
- }
- return &ret->elf;
-}
-
-/* Destroy an X86-64 ELF linker hash table. */
-
-static void
-elf_x86_64_link_hash_table_free (bfd *obfd)
-{
- struct elf_x86_64_link_hash_table *htab
- = (struct elf_x86_64_link_hash_table *) obfd->link.hash;
-
- if (htab->loc_hash_table)
- htab_delete (htab->loc_hash_table);
- if (htab->loc_hash_memory)
- objalloc_free ((struct objalloc *) htab->loc_hash_memory);
- _bfd_elf_link_hash_table_free (obfd);
-}
-
/* Create an X86-64 ELF linker hash table. */
static struct bfd_link_hash_table *
elf_x86_64_link_hash_table_create (bfd *abfd)
{
- struct elf_x86_64_link_hash_table *ret;
+ struct elf_x86_link_hash_table *ret;
bfd_size_type amt = sizeof (struct elf_x86_64_link_hash_table);
- ret = (struct elf_x86_64_link_hash_table *) bfd_zmalloc (amt);
+ ret = (struct elf_x86_link_hash_table *) bfd_zmalloc (amt);
if (ret == NULL)
return NULL;
if (!_bfd_elf_link_hash_table_init (&ret->elf, abfd,
- elf_x86_64_link_hash_newfunc,
- sizeof (struct elf_x86_64_link_hash_entry),
+ _bfd_x86_elf_link_hash_newfunc,
+ sizeof (struct elf_x86_link_hash_entry),
X86_64_ELF_DATA))
{
free (ret);
ret->dynamic_interpreter_size = sizeof ELF32_DYNAMIC_INTERPRETER;
}
+ ret->tls_get_addr = "__tls_get_addr";
ret->loc_hash_table = htab_try_create (1024,
- elf_x86_64_local_htab_hash,
- elf_x86_64_local_htab_eq,
+ _bfd_x86_elf_local_htab_hash,
+ _bfd_x86_elf_local_htab_eq,
NULL);
ret->loc_hash_memory = objalloc_create ();
if (!ret->loc_hash_table || !ret->loc_hash_memory)
{
- elf_x86_64_link_hash_table_free (abfd);
+ _bfd_x86_elf_link_hash_table_free (abfd);
return NULL;
}
- ret->elf.root.hash_table_free = elf_x86_64_link_hash_table_free;
+ ret->elf.root.hash_table_free = _bfd_x86_elf_link_hash_table_free;
return &ret->elf.root;
}
-/* Copy the extra info we tack onto an elf_link_hash_entry. */
-
-static void
-elf_x86_64_copy_indirect_symbol (struct bfd_link_info *info,
- struct elf_link_hash_entry *dir,
- struct elf_link_hash_entry *ind)
-{
- struct elf_x86_64_link_hash_entry *edir, *eind;
-
- edir = (struct elf_x86_64_link_hash_entry *) dir;
- eind = (struct elf_x86_64_link_hash_entry *) ind;
-
- edir->has_got_reloc |= eind->has_got_reloc;
- edir->has_non_got_reloc |= eind->has_non_got_reloc;
-
- if (eind->dyn_relocs != NULL)
- {
- if (edir->dyn_relocs != NULL)
- {
- struct elf_dyn_relocs **pp;
- struct elf_dyn_relocs *p;
-
- /* Add reloc counts against the indirect sym to the direct sym
- list. Merge any entries against the same section. */
- for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
- {
- struct elf_dyn_relocs *q;
-
- for (q = edir->dyn_relocs; q != NULL; q = q->next)
- if (q->sec == p->sec)
- {
- q->pc_count += p->pc_count;
- q->count += p->count;
- *pp = p->next;
- break;
- }
- if (q == NULL)
- pp = &p->next;
- }
- *pp = edir->dyn_relocs;
- }
-
- edir->dyn_relocs = eind->dyn_relocs;
- eind->dyn_relocs = NULL;
- }
-
- if (ind->root.type == bfd_link_hash_indirect
- && dir->got.refcount <= 0)
- {
- edir->tls_type = eind->tls_type;
- eind->tls_type = GOT_UNKNOWN;
- }
-
- if (ELIMINATE_COPY_RELOCS
- && ind->root.type != bfd_link_hash_indirect
- && dir->dynamic_adjusted)
- {
- /* If called to transfer flags for a weakdef during processing
- of elf_adjust_dynamic_symbol, don't copy non_got_ref.
- We clear it ourselves for ELIMINATE_COPY_RELOCS. */
- if (dir->versioned != versioned_hidden)
- dir->ref_dynamic |= ind->ref_dynamic;
- dir->ref_regular |= ind->ref_regular;
- dir->ref_regular_nonweak |= ind->ref_regular_nonweak;
- dir->needs_plt |= ind->needs_plt;
- dir->pointer_equality_needed |= ind->pointer_equality_needed;
- }
- else
- {
- if (eind->func_pointer_refcount > 0)
- {
- edir->func_pointer_refcount += eind->func_pointer_refcount;
- eind->func_pointer_refcount = 0;
- }
-
- _bfd_elf_link_hash_copy_indirect (info, dir, ind);
- }
-}
-
static bfd_boolean
elf64_x86_64_elf_object_p (bfd *abfd)
{
bfd_boolean largepic = FALSE;
struct elf_link_hash_entry *h;
bfd_vma offset;
- struct elf_x86_64_link_hash_table *htab;
+ struct elf_x86_link_hash_table *htab;
bfd_byte *call;
- bfd_boolean indirect_call, tls_get_addr;
+ bfd_boolean indirect_call;
- htab = elf_x86_64_hash_table (info);
+ htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
offset = rel->r_offset;
switch (r_type)
{
if (r_symndx < symtab_hdr->sh_info)
return FALSE;
- tls_get_addr = FALSE;
h = sym_hashes[r_symndx - symtab_hdr->sh_info];
- if (h != NULL && h->root.root.string != NULL)
- {
- struct elf_x86_64_link_hash_entry *eh
- = (struct elf_x86_64_link_hash_entry *) h;
- tls_get_addr = eh->tls_get_addr == 1;
- if (eh->tls_get_addr > 1)
- {
- /* Use strncmp to check __tls_get_addr since
- __tls_get_addr may be versioned. */
- if (strncmp (h->root.root.string, "__tls_get_addr", 14)
- == 0)
- {
- eh->tls_get_addr = 1;
- tls_get_addr = TRUE;
- }
- else
- eh->tls_get_addr = 0;
- }
- }
-
- if (!tls_get_addr)
+ if (h == NULL
+ || !((struct elf_x86_link_hash_entry *) h)->tls_get_addr)
return FALSE;
else if (largepic)
return ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLTOFF64;
name = h->root.root.string;
else
{
- struct elf_x86_64_link_hash_table *htab;
+ struct elf_x86_link_hash_table *htab;
- htab = elf_x86_64_hash_table (info);
+ htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
if (htab == NULL)
name = "*unknown*";
else
_bfd_error_handler
/* xgettext:c-format */
- (_("%B: TLS transition from %s to %s against `%s' at 0x%lx "
+ (_("%B: TLS transition from %s to %s against `%s' at %#Lx "
"in section `%A' failed"),
- abfd, from->name, to->name, name,
- (unsigned long) rel->r_offset, sec);
+ abfd, from->name, to->name, name, rel->r_offset, sec);
bfd_set_error (bfd_error_bad_value);
return FALSE;
}
#define check_relocs_failed sec_flg1
static bfd_boolean
-elf_x86_64_need_pic (bfd *input_bfd, asection *sec,
+elf_x86_64_need_pic (struct bfd_link_info *info,
+ bfd *input_bfd, asection *sec,
struct elf_link_hash_entry *h,
Elf_Internal_Shdr *symtab_hdr,
Elf_Internal_Sym *isym,
const char *v = "";
const char *und = "";
const char *pic = "";
+ const char *object;
const char *name;
if (h)
v = _("protected symbol ");
break;
default:
- v = _("symbol ");
+ if (((struct elf_x86_link_hash_entry *) h)->def_protected)
+ v = _("protected symbol ");
+ else
+ v = _("symbol ");
pic = _("; recompile with -fPIC");
break;
}
pic = _("; recompile with -fPIC");
}
+ if (bfd_link_dll (info))
+ object = _("a shared object");
+ else if (bfd_link_pie (info))
+ object = _("a PIE object");
+ else
+ object = _("a PDE object");
+
/* xgettext:c-format */
_bfd_error_handler (_("%B: relocation %s against %s%s`%s' can "
- "not be used when making a shared object%s"),
- input_bfd, howto->name, und, v, name, pic);
+ "not be used when making %s%s"),
+ input_bfd, howto->name, und, v, name,
+ object, pic);
bfd_set_error (bfd_error_bad_value);
sec->check_relocs_failed = 1;
return FALSE;
bfd_boolean *converted,
struct bfd_link_info *link_info)
{
- struct elf_x86_64_link_hash_table *htab;
+ struct elf_x86_link_hash_table *htab;
bfd_boolean is_pic;
bfd_boolean require_reloc_pc32;
bfd_boolean relocx;
if (raddend != -4)
return TRUE;
- htab = elf_x86_64_hash_table (link_info);
+ htab = elf_x86_hash_table (link_info, X86_64_ELF_DATA);
is_pic = bfd_link_pic (link_info);
relocx = (r_type == R_X86_64_GOTPCRELX
R_X86_64_PC32. */
if ((relocx || opcode == 0x8b)
&& UNDEFINED_WEAK_RESOLVED_TO_ZERO (link_info,
+ X86_64_ELF_DATA,
TRUE,
- elf_x86_64_hash_entry (h)))
+ elf_x86_hash_entry (h)))
{
if (opcode == 0xff)
{
}
/* Avoid optimizing GOTPCREL relocations againt _DYNAMIC since
ld.so may use its link-time address. */
- else if ((h->def_regular
- || h->root.type == bfd_link_hash_defined
- || h->root.type == bfd_link_hash_defweak)
- && h != htab->elf.hdynamic
- && SYMBOL_REFERENCES_LOCAL (link_info, h))
+ else if (h->start_stop
+ || ((h->def_regular
+ || h->root.type == bfd_link_hash_defined
+ || h->root.type == bfd_link_hash_defweak)
+ && h != htab->elf.hdynamic
+ && SYMBOL_REFERENCES_LOCAL (link_info, h)))
{
/* bfd_link_hash_new or bfd_link_hash_undefined is
set by an assignment in a linker script in
- bfd_elf_record_link_assignment. */
- if (h->def_regular
- && (h->root.type == bfd_link_hash_new
- || h->root.type == bfd_link_hash_undefined
- || ((h->root.type == bfd_link_hash_defined
- || h->root.type == bfd_link_hash_defweak)
- && h->root.u.def.section == bfd_und_section_ptr)))
+ bfd_elf_record_link_assignment. start_stop is set
+ on __start_SECNAME/__stop_SECNAME which mark section
+ SECNAME. */
+ if (h->start_stop
+ || (h->def_regular
+ && (h->root.type == bfd_link_hash_new
+ || h->root.type == bfd_link_hash_undefined
+ || ((h->root.type == bfd_link_hash_defined
+ || h->root.type == bfd_link_hash_defweak)
+ && h->root.u.def.section == bfd_und_section_ptr))))
{
/* Skip since R_X86_64_32/R_X86_64_32S may overflow. */
if (require_reloc_pc32)
}
else
{
- struct elf_x86_64_link_hash_entry *eh
- = (struct elf_x86_64_link_hash_entry *) h;
+ struct elf_x86_link_hash_entry *eh
+ = (struct elf_x86_link_hash_entry *) h;
/* Convert to "nop call foo". ADDR_PREFIX_OPCODE
is a nop prefix. */
modrm = 0xe8;
/* To support TLS optimization, always use addr32 prefix for
"call *__tls_get_addr@GOTPCREL(%rip)". */
- if (eh && eh->tls_get_addr == 1)
+ if (eh && eh->tls_get_addr)
{
nop = 0x67;
nop_offset = irel->r_offset - 2;
asection *sec,
const Elf_Internal_Rela *relocs)
{
- struct elf_x86_64_link_hash_table *htab;
+ struct elf_x86_link_hash_table *htab;
Elf_Internal_Shdr *symtab_hdr;
struct elf_link_hash_entry **sym_hashes;
const Elf_Internal_Rela *rel;
BFD_ASSERT (is_x86_64_elf (abfd));
- htab = elf_x86_64_hash_table (info);
+ htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
if (htab == NULL)
{
sec->check_relocs_failed = 1;
for (rel = relocs; rel < rel_end; rel++)
{
unsigned int r_type;
- unsigned long r_symndx;
+ unsigned int r_symndx;
struct elf_link_hash_entry *h;
- struct elf_x86_64_link_hash_entry *eh;
+ struct elf_x86_link_hash_entry *eh;
Elf_Internal_Sym *isym;
const char *name;
bfd_boolean size_reloc;
/* Check relocation against local STT_GNU_IFUNC symbol. */
if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
{
- h = elf_x86_64_get_local_sym_hash (htab, abfd, rel,
- TRUE);
+ h = _bfd_elf_x86_get_local_sym_hash (htab, abfd, rel,
+ TRUE);
if (h == NULL)
goto error_return;
rel, rel_end, h, r_symndx, FALSE))
goto error_return;
- eh = (struct elf_x86_64_link_hash_entry *) h;
+ eh = (struct elf_x86_link_hash_entry *) h;
switch (r_type)
{
case R_X86_64_TLSLD:
- htab->tls_ld_got.refcount += 1;
+ htab->tls_ld_or_ldm_got.refcount += 1;
goto create_got;
case R_X86_64_TPOFF32:
if (!bfd_link_executable (info) && ABI_64_P (abfd))
- return elf_x86_64_need_pic (abfd, sec, h, symtab_hdr, isym,
+ return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
&x86_64_elf_howto_table[r_type]);
if (eh != NULL)
eh->has_got_reloc = 1;
if (local_got_refcounts == NULL)
goto error_return;
elf_local_got_refcounts (abfd) = local_got_refcounts;
- elf_x86_64_local_tlsdesc_gotent (abfd)
+ elf_x86_local_tlsdesc_gotent (abfd)
= (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info);
- elf_x86_64_local_got_tls_type (abfd)
+ elf_x86_local_got_tls_type (abfd)
= (char *) (local_got_refcounts + 2 * symtab_hdr->sh_info);
}
local_got_refcounts[r_symndx] += 1;
old_tls_type
- = elf_x86_64_local_got_tls_type (abfd) [r_symndx];
+ = elf_x86_local_got_tls_type (abfd) [r_symndx];
}
/* If a TLS symbol is accessed using IE at least once,
if (eh != NULL)
eh->tls_type = tls_type;
else
- elf_x86_64_local_got_tls_type (abfd) [r_symndx] = tls_type;
+ elf_x86_local_got_tls_type (abfd) [r_symndx] = tls_type;
}
}
/* Fall through */
&& !h->def_regular
&& h->def_dynamic
&& (sec->flags & SEC_READONLY) == 0)))
- return elf_x86_64_need_pic (abfd, sec, h, symtab_hdr, isym,
+ return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
&x86_64_elf_howto_table[r_type]);
/* Fall through. */
return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
}
-/* Remove undefined weak symbol from the dynamic symbol table if it
- is resolved to 0. */
-
-static bfd_boolean
-elf_x86_64_fixup_symbol (struct bfd_link_info *info,
- struct elf_link_hash_entry *h)
-{
- if (h->dynindx != -1
- && UNDEFINED_WEAK_RESOLVED_TO_ZERO (info,
- elf_x86_64_hash_entry (h)->has_got_reloc,
- elf_x86_64_hash_entry (h)))
- {
- h->dynindx = -1;
- _bfd_elf_strtab_delref (elf_hash_table (info)->dynstr,
- h->dynstr_index);
- }
- return TRUE;
-}
-
/* Adjust a symbol defined by a dynamic object and referenced by a
regular object. The current definition is in some section of the
dynamic object, but we're not including those sections. We have to
elf_x86_64_adjust_dynamic_symbol (struct bfd_link_info *info,
struct elf_link_hash_entry *h)
{
- struct elf_x86_64_link_hash_table *htab;
+ struct elf_x86_link_hash_table *htab;
asection *s, *srel;
- struct elf_x86_64_link_hash_entry *eh;
+ struct elf_x86_link_hash_entry *eh;
struct elf_dyn_relocs *p;
/* STT_GNU_IFUNC symbol must go through PLT. */
bfd_size_type pc_count = 0, count = 0;
struct elf_dyn_relocs **pp;
- eh = (struct elf_x86_64_link_hash_entry *) h;
+ eh = (struct elf_x86_link_hash_entry *) h;
for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
{
pc_count += p->pc_count;
the link may change h->type. So fix it now. */
h->plt.offset = (bfd_vma) -1;
+ eh = (struct elf_x86_link_hash_entry *) h;
+
/* If this is a weak symbol, and there is a real definition, the
processor independent code will have arranged for us to see the
real definition first, and we can just use the same value. */
|| h->u.weakdef->root.type == bfd_link_hash_defweak);
h->root.u.def.section = h->u.weakdef->root.u.def.section;
h->root.u.def.value = h->u.weakdef->root.u.def.value;
- if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
+ if (ELIMINATE_COPY_RELOCS
+ || info->nocopyreloc
+ || SYMBOL_NO_COPYRELOC (info, eh))
{
- eh = (struct elf_x86_64_link_hash_entry *) h;
h->non_got_ref = h->u.weakdef->non_got_ref;
eh->needs_copy = h->u.weakdef->needs_copy;
}
return TRUE;
/* If -z nocopyreloc was given, we won't generate them either. */
- if (info->nocopyreloc)
+ if (info->nocopyreloc || SYMBOL_NO_COPYRELOC (info, eh))
{
h->non_got_ref = 0;
return TRUE;
if (ELIMINATE_COPY_RELOCS)
{
- eh = (struct elf_x86_64_link_hash_entry *) h;
+ eh = (struct elf_x86_link_hash_entry *) h;
for (p = eh->dyn_relocs; p != NULL; p = p->next)
{
s = p->sec->output_section;
both the dynamic object and the regular object will refer to the
same memory location for the variable. */
- htab = elf_x86_64_hash_table (info);
+ htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
if (htab == NULL)
return FALSE;
elf_x86_64_allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf)
{
struct bfd_link_info *info;
- struct elf_x86_64_link_hash_table *htab;
- struct elf_x86_64_link_hash_entry *eh;
+ struct elf_x86_link_hash_table *htab;
+ struct elf_x86_link_hash_entry *eh;
struct elf_dyn_relocs *p;
const struct elf_backend_data *bed;
unsigned int plt_entry_size;
bfd_boolean resolved_to_zero;
+ const struct elf_x86_64_plt_layout *plt_layout;
+ const struct elf_x86_64_non_lazy_plt_layout *non_lazy_plt_layout;
if (h->root.type == bfd_link_hash_indirect)
return TRUE;
- eh = (struct elf_x86_64_link_hash_entry *) h;
+ eh = (struct elf_x86_link_hash_entry *) h;
info = (struct bfd_link_info *) inf;
- htab = elf_x86_64_hash_table (info);
+ htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
if (htab == NULL)
return FALSE;
bed = get_elf_backend_data (info->output_bfd);
- plt_entry_size = htab->plt.plt_entry_size;
+
+ plt_layout = &elf_x86_64_plt (htab);
+ non_lazy_plt_layout = elf_x86_64_non_lazy_plt (htab);
+ plt_entry_size = plt_layout->plt_entry_size;
resolved_to_zero = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info,
+ X86_64_ELF_DATA,
eh->has_got_reloc,
eh);
&eh->dyn_relocs,
&htab->readonly_dynrelocs_against_ifunc,
plt_entry_size,
- (htab->plt.has_plt0
+ (plt_layout->has_plt0
* plt_entry_size),
GOT_ENTRY_SIZE, TRUE))
{
eh->plt_second.offset = s->size;
/* Make room for this entry in the second PLT section. */
- s->size += htab->non_lazy_plt->plt_entry_size;
+ s->size += non_lazy_plt_layout->plt_entry_size;
}
return TRUE;
first entry. The .plt section is used by prelink to undo
prelinking for dynamic relocations. */
if (s->size == 0)
- s->size = htab->plt.has_plt0 * plt_entry_size;
+ s->size = plt_layout->has_plt0 * plt_entry_size;
if (use_plt_got)
eh->plt_got.offset = got_s->size;
/* Make room for this entry. */
if (use_plt_got)
- got_s->size += htab->non_lazy_plt->plt_entry_size;
+ got_s->size += non_lazy_plt_layout->plt_entry_size;
else
{
s->size += plt_entry_size;
if (second_s)
- second_s->size += htab->non_lazy_plt->plt_entry_size;
+ second_s->size += non_lazy_plt_layout->plt_entry_size;
/* We also need to make an entry in the .got.plt section,
which will be placed in the .got section by the linker
if (h->got.refcount > 0
&& bfd_link_executable (info)
&& h->dynindx == -1
- && elf_x86_64_hash_entry (h)->tls_type == GOT_TLS_IE)
+ && elf_x86_hash_entry (h)->tls_type == GOT_TLS_IE)
{
h->got.offset = (bfd_vma) -1;
}
{
asection *s;
bfd_boolean dyn;
- int tls_type = elf_x86_64_hash_entry (h)->tls_type;
+ int tls_type = elf_x86_hash_entry (h)->tls_type;
/* Make sure this symbol is output as a dynamic symbol.
Undefined weak syms won't yet be marked as dynamic. */
return elf_x86_64_allocate_dynrelocs (h, inf);
}
-/* Find any dynamic relocs that apply to read-only sections. */
-
-static bfd_boolean
-elf_x86_64_readonly_dynrelocs (struct elf_link_hash_entry *h,
- void * inf)
-{
- struct elf_x86_64_link_hash_entry *eh;
- struct elf_dyn_relocs *p;
-
- /* Skip local IFUNC symbols. */
- if (h->forced_local && h->type == STT_GNU_IFUNC)
- return TRUE;
-
- eh = (struct elf_x86_64_link_hash_entry *) h;
- for (p = eh->dyn_relocs; p != NULL; p = p->next)
- {
- asection *s = p->sec->output_section;
-
- if (s != NULL && (s->flags & SEC_READONLY) != 0)
- {
- struct bfd_link_info *info = (struct bfd_link_info *) inf;
-
- info->flags |= DF_TEXTREL;
-
- if ((info->warn_shared_textrel && bfd_link_pic (info))
- || info->error_textrel)
- /* xgettext:c-format */
- info->callbacks->einfo (_("%P: %B: warning: relocation against `%s' in readonly section `%A'\n"),
- p->sec->owner, h->root.root.string,
- p->sec);
-
- /* Not an error, just cut short the traversal. */
- return FALSE;
- }
- }
- return TRUE;
-}
-
/* Convert load via the GOT slot to load immediate. */
static bfd_boolean
Elf_Internal_Rela *internal_relocs;
Elf_Internal_Rela *irel, *irelend;
bfd_byte *contents;
- struct elf_x86_64_link_hash_table *htab;
+ struct elf_x86_link_hash_table *htab;
bfd_boolean changed;
bfd_signed_vma *local_got_refcounts;
return FALSE;
changed = FALSE;
- htab = elf_x86_64_hash_table (link_info);
+ htab = elf_x86_hash_table (link_info, X86_64_ELF_DATA);
local_got_refcounts = elf_local_got_refcounts (abfd);
/* Get the section contents. */
r_symndx = htab->r_sym (irel->r_info);
if (r_symndx < symtab_hdr->sh_info)
- h = elf_x86_64_get_local_sym_hash (htab, sec->owner,
- (const Elf_Internal_Rela *) irel,
- FALSE);
+ h = _bfd_elf_x86_get_local_sym_hash (htab, sec->owner,
+ (const Elf_Internal_Rela *) irel,
+ FALSE);
else
{
h = elf_sym_hashes (abfd)[r_symndx - symtab_hdr->sh_info];
elf_x86_64_size_dynamic_sections (bfd *output_bfd,
struct bfd_link_info *info)
{
- struct elf_x86_64_link_hash_table *htab;
+ struct elf_x86_link_hash_table *htab;
bfd *dynobj;
asection *s;
bfd_boolean relocs;
bfd *ibfd;
const struct elf_backend_data *bed;
+ const struct elf_x86_64_plt_layout *plt_layout;
+ const struct elf_x86_64_non_lazy_plt_layout *non_lazy_plt_layout;
- htab = elf_x86_64_hash_table (info);
+ htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
if (htab == NULL)
return FALSE;
bed = get_elf_backend_data (output_bfd);
if (dynobj == NULL)
abort ();
+ plt_layout = &elf_x86_64_plt (htab);
+ non_lazy_plt_layout = elf_x86_64_non_lazy_plt (htab);
+
/* Set up .got offsets for local syms, and space for local dynamic
relocs. */
for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
symtab_hdr = &elf_symtab_hdr (ibfd);
locsymcount = symtab_hdr->sh_info;
end_local_got = local_got + locsymcount;
- local_tls_type = elf_x86_64_local_got_tls_type (ibfd);
- local_tlsdesc_gotent = elf_x86_64_local_tlsdesc_gotent (ibfd);
+ local_tls_type = elf_x86_local_got_tls_type (ibfd);
+ local_tlsdesc_gotent = elf_x86_local_tlsdesc_gotent (ibfd);
s = htab->elf.sgot;
srel = htab->elf.srelgot;
for (; local_got < end_local_got;
}
}
- if (htab->tls_ld_got.refcount > 0)
+ if (htab->tls_ld_or_ldm_got.refcount > 0)
{
/* Allocate 2 got entries and 1 dynamic reloc for R_X86_64_TLSLD
relocs. */
- htab->tls_ld_got.offset = htab->elf.sgot->size;
+ htab->tls_ld_or_ldm_got.offset = htab->elf.sgot->size;
htab->elf.sgot->size += 2 * GOT_ENTRY_SIZE;
htab->elf.srelgot->size += bed->s->sizeof_rela;
}
else
- htab->tls_ld_got.offset = -1;
+ htab->tls_ld_or_ldm_got.offset = -1;
/* Allocate global sym .plt and .got entries, and space for global
sym dynamic relocs. */
/* Reserve room for the initial entry.
FIXME: we could probably do away with it in this case. */
if (htab->elf.splt->size == 0)
- htab->elf.splt->size = htab->plt.plt_entry_size;
+ htab->elf.splt->size = plt_layout->plt_entry_size;
htab->tlsdesc_plt = htab->elf.splt->size;
- htab->elf.splt->size += htab->plt.plt_entry_size;
+ htab->elf.splt->size += plt_layout->plt_entry_size;
}
}
&& htab->elf.splt != NULL
&& htab->elf.splt->size != 0
&& !bfd_is_abs_section (htab->elf.splt->output_section))
- htab->plt_eh_frame->size = htab->plt.eh_frame_plt_size;
+ htab->plt_eh_frame->size = plt_layout->eh_frame_plt_size;
if (htab->plt_got_eh_frame != NULL
&& htab->plt_got != NULL
&& htab->plt_got->size != 0
&& !bfd_is_abs_section (htab->plt_got->output_section))
htab->plt_got_eh_frame->size
- = htab->non_lazy_plt->eh_frame_plt_size;
+ = non_lazy_plt_layout->eh_frame_plt_size;
/* Unwind info for the second PLT and .plt.got sections are
identical. */
&& htab->plt_second->size != 0
&& !bfd_is_abs_section (htab->plt_second->output_section))
htab->plt_second_eh_frame->size
- = htab->non_lazy_plt->eh_frame_plt_size;
+ = non_lazy_plt_layout->eh_frame_plt_size;
}
/* We now have determined the sizes of the various dynamic sections.
&& htab->plt_eh_frame->contents != NULL)
{
memcpy (htab->plt_eh_frame->contents,
- htab->plt.eh_frame_plt, htab->plt_eh_frame->size);
+ plt_layout->eh_frame_plt, htab->plt_eh_frame->size);
bfd_put_32 (dynobj, htab->elf.splt->size,
htab->plt_eh_frame->contents + PLT_FDE_LEN_OFFSET);
}
&& htab->plt_got_eh_frame->contents != NULL)
{
memcpy (htab->plt_got_eh_frame->contents,
- htab->non_lazy_plt->eh_frame_plt,
+ non_lazy_plt_layout->eh_frame_plt,
htab->plt_got_eh_frame->size);
bfd_put_32 (dynobj, htab->plt_got->size,
(htab->plt_got_eh_frame->contents
&& htab->plt_second_eh_frame->contents != NULL)
{
memcpy (htab->plt_second_eh_frame->contents,
- htab->non_lazy_plt->eh_frame_plt,
+ non_lazy_plt_layout->eh_frame_plt,
htab->plt_second_eh_frame->size);
bfd_put_32 (dynobj, htab->plt_second->size,
(htab->plt_second_eh_frame->contents
then we need a DT_TEXTREL entry. */
if ((info->flags & DF_TEXTREL) == 0)
elf_link_hash_traverse (&htab->elf,
- elf_x86_64_readonly_dynrelocs,
+ _bfd_x86_elf_readonly_dynrelocs,
info);
if ((info->flags & DF_TEXTREL) != 0)
return TRUE;
}
-static bfd_boolean
-elf_x86_64_always_size_sections (bfd *output_bfd,
- struct bfd_link_info *info)
-{
- asection *tls_sec = elf_hash_table (info)->tls_sec;
-
- if (tls_sec)
- {
- struct elf_link_hash_entry *tlsbase;
-
- tlsbase = elf_link_hash_lookup (elf_hash_table (info),
- "_TLS_MODULE_BASE_",
- FALSE, FALSE, FALSE);
-
- if (tlsbase && tlsbase->type == STT_TLS)
- {
- struct elf_x86_64_link_hash_table *htab;
- struct bfd_link_hash_entry *bh = NULL;
- const struct elf_backend_data *bed
- = get_elf_backend_data (output_bfd);
-
- htab = elf_x86_64_hash_table (info);
- if (htab == NULL)
- return FALSE;
-
- if (!(_bfd_generic_link_add_one_symbol
- (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
- tls_sec, 0, NULL, FALSE,
- bed->collect, &bh)))
- return FALSE;
-
- htab->tls_module_base = bh;
-
- tlsbase = (struct elf_link_hash_entry *)bh;
- tlsbase->def_regular = 1;
- tlsbase->other = STV_HIDDEN;
- tlsbase->root.linker_def = 1;
- (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
- }
- }
-
- return TRUE;
-}
-
-/* _TLS_MODULE_BASE_ needs to be treated especially when linking
- executables. Rather than setting it to the beginning of the TLS
- section, we have to set it to the end. This function may be called
- multiple times, it is idempotent. */
-
-static void
-elf_x86_64_set_tls_module_base (struct bfd_link_info *info)
-{
- struct elf_x86_64_link_hash_table *htab;
- struct bfd_link_hash_entry *base;
-
- if (!bfd_link_executable (info))
- return;
-
- htab = elf_x86_64_hash_table (info);
- if (htab == NULL)
- return;
-
- base = htab->tls_module_base;
- if (base == NULL)
- return;
-
- base->u.def.value = htab->elf.tls_size;
-}
-
-/* Return the base VMA address which should be subtracted from real addresses
- when resolving @dtpoff relocation.
- This is PT_TLS segment p_vaddr. */
-
-static bfd_vma
-elf_x86_64_dtpoff_base (struct bfd_link_info *info)
-{
- /* If tls_sec is NULL, we should have signalled an error already. */
- if (elf_hash_table (info)->tls_sec == NULL)
- return 0;
- return elf_hash_table (info)->tls_sec->vma;
-}
-
/* Return the relocation value for @tpoff relocation
if STT_TLS virtual address is ADDRESS. */
Elf_Internal_Sym *local_syms,
asection **local_sections)
{
- struct elf_x86_64_link_hash_table *htab;
+ struct elf_x86_link_hash_table *htab;
Elf_Internal_Shdr *symtab_hdr;
struct elf_link_hash_entry **sym_hashes;
bfd_vma *local_got_offsets;
if (input_section->check_relocs_failed)
return FALSE;
- htab = elf_x86_64_hash_table (info);
+ htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
if (htab == NULL)
return FALSE;
- plt_entry_size = htab->plt.plt_entry_size;
+ plt_entry_size = elf_x86_64_plt (htab).plt_entry_size;
symtab_hdr = &elf_symtab_hdr (input_bfd);
sym_hashes = elf_sym_hashes (input_bfd);
local_got_offsets = elf_local_got_offsets (input_bfd);
- local_tlsdesc_gotents = elf_x86_64_local_tlsdesc_gotent (input_bfd);
+ local_tlsdesc_gotents = elf_x86_local_tlsdesc_gotent (input_bfd);
- elf_x86_64_set_tls_module_base (info);
+ _bfd_x86_elf_set_tls_module_base (info);
rel = wrel = relocs;
relend = relocs + input_section->reloc_count;
reloc_howto_type *howto;
unsigned long r_symndx;
struct elf_link_hash_entry *h;
- struct elf_x86_64_link_hash_entry *eh;
+ struct elf_x86_link_hash_entry *eh;
Elf_Internal_Sym *sym;
asection *sec;
bfd_vma off, offplt, plt_offset;
}
if (r_type >= (int) R_X86_64_standard)
- {
- _bfd_error_handler
- /* xgettext:c-format */
- (_("%B: unrecognized relocation (0x%x) in section `%A'"),
- input_bfd, r_type, input_section);
- bfd_set_error (bfd_error_bad_value);
- return FALSE;
- }
+ return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
if (r_type != (int) R_X86_64_32
|| ABI_64_P (output_bfd))
if (!bfd_link_relocatable (info)
&& ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
{
- h = elf_x86_64_get_local_sym_hash (htab, input_bfd,
- rel, FALSE);
+ h = _bfd_elf_x86_get_local_sym_hash (htab, input_bfd,
+ rel, FALSE);
if (h == NULL)
abort ();
}
}
- eh = (struct elf_x86_64_link_hash_entry *) h;
+ eh = (struct elf_x86_link_hash_entry *) h;
/* Since STT_GNU_IFUNC symbol must go through PLT, we handle
it here if it is defined in a non-shared object. */
if (htab->elf.splt != NULL)
{
plt_index = (h->plt.offset / plt_entry_size
- - htab->plt.has_plt0);
+ - elf_x86_64_plt (htab).has_plt0);
off = (plt_index + 3) * GOT_ENTRY_SIZE;
base_got = htab->elf.sgotplt;
}
_bfd_error_handler
/* xgettext:c-format */
(_("%B: relocation %s against STT_GNU_IFUNC "
- "symbol `%s' has non-zero addend: %d"),
+ "symbol `%s' has non-zero addend: %Ld"),
input_bfd, howto->name, name, rel->r_addend);
bfd_set_error (bfd_error_bad_value);
return FALSE;
resolved_to_zero = (eh != NULL
&& UNDEFINED_WEAK_RESOLVED_TO_ZERO (info,
+ X86_64_ELF_DATA,
eh->has_got_reloc,
eh));
case R_X86_64_GOTPCREL64:
/* Use global offset table entry as symbol value. */
case R_X86_64_GOTPLT64:
- /* This is obsolete and treated the the same as GOT64. */
+ /* This is obsolete and treated the same as GOT64. */
base_got = htab->elf.sgot;
if (htab->elf.sgot == NULL)
finish_dynamic_symbol would use that as offset into
.got. */
bfd_vma plt_index = (h->plt.offset / plt_entry_size
- - htab->plt.has_plt0);
+ - elf_x86_64_plt (htab).has_plt0);
off = (plt_index + 3) * GOT_ENTRY_SIZE;
base_got = htab->elf.sgotplt;
}
case R_X86_64_PC32:
case R_X86_64_PC32_BND:
/* Don't complain about -fPIC if the symbol is undefined when
- building executable unless it is unresolved weak symbol. */
+ building executable unless it is unresolved weak symbol or
+ -z nocopyreloc is used. */
if ((input_section->flags & SEC_ALLOC) != 0
&& (input_section->flags & SEC_READONLY) != 0
&& h != NULL
&& ((bfd_link_executable (info)
- && h->root.type == bfd_link_hash_undefweak
- && !resolved_to_zero)
- || (bfd_link_pic (info)
- && !(bfd_link_pie (info)
- && h->root.type == bfd_link_hash_undefined))))
+ && ((h->root.type == bfd_link_hash_undefweak
+ && !resolved_to_zero)
+ || ((info->nocopyreloc
+ || (eh->def_protected
+ && elf_has_no_copy_on_protected (h->root.u.def.section->owner)))
+ && h->def_dynamic
+ && !(h->root.u.def.section->flags & SEC_CODE))))
+ || bfd_link_dll (info)))
{
bfd_boolean fail = FALSE;
bfd_boolean branch
}
if (fail)
- return elf_x86_64_need_pic (input_bfd, input_section,
+ return elf_x86_64_need_pic (info, input_bfd, input_section,
h, NULL, NULL, howto);
}
/* Fall through. */
else
name = bfd_elf_sym_name (input_bfd, symtab_hdr,
sym, NULL);
- if (addend < 0)
- _bfd_error_handler
- /* xgettext:c-format */
- (_("%B: addend -0x%x in relocation %s against "
- "symbol `%s' at 0x%lx in section `%A' is "
- "out of range"),
- input_bfd, addend, howto->name, name,
- (unsigned long) rel->r_offset, input_section);
- else
- _bfd_error_handler
- /* xgettext:c-format */
- (_("%B: addend 0x%x in relocation %s against "
- "symbol `%s' at 0x%lx in section `%A' is "
- "out of range"),
- input_bfd, addend, howto->name, name,
- (unsigned long) rel->r_offset, input_section);
+ _bfd_error_handler
+ /* xgettext:c-format */
+ (_("%B: addend %s%#x in relocation %s against "
+ "symbol `%s' at %#Lx in section `%A' is "
+ "out of range"),
+ input_bfd, addend < 0 ? "-" : "", addend,
+ howto->name, name, rel->r_offset, input_section);
bfd_set_error (bfd_error_bad_value);
return FALSE;
}
case R_X86_64_GOTTPOFF:
tls_type = GOT_UNKNOWN;
if (h == NULL && local_got_offsets)
- tls_type = elf_x86_64_local_got_tls_type (input_bfd) [r_symndx];
+ tls_type = elf_x86_local_got_tls_type (input_bfd) [r_symndx];
else if (h != NULL)
- tls_type = elf_x86_64_hash_entry (h)->tls_type;
+ tls_type = elf_x86_hash_entry (h)->tls_type;
if (! elf_x86_64_tls_transition (info, input_bfd,
input_section, contents,
if (h != NULL)
{
off = h->got.offset;
- offplt = elf_x86_64_hash_entry (h)->tlsdesc_got;
+ offplt = elf_x86_hash_entry (h)->tlsdesc_got;
}
else
{
+ htab->sgotplt_jump_table_size);
sreloc = htab->elf.srelplt;
if (indx == 0)
- outrel.r_addend = relocation - elf_x86_64_dtpoff_base (info);
+ outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
else
outrel.r_addend = 0;
elf_append_rela (output_bfd, sreloc, &outrel);
outrel.r_addend = 0;
if ((dr_type == R_X86_64_TPOFF64
|| dr_type == R_X86_64_TLSDESC) && indx == 0)
- outrel.r_addend = relocation - elf_x86_64_dtpoff_base (info);
+ outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
outrel.r_info = htab->r_info (indx, dr_type);
elf_append_rela (output_bfd, sreloc, &outrel);
{
BFD_ASSERT (! unresolved_reloc);
bfd_put_64 (output_bfd,
- relocation - elf_x86_64_dtpoff_base (info),
+ relocation - _bfd_x86_elf_dtpoff_base (info),
htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
}
else
if (htab->elf.sgot == NULL)
abort ();
- off = htab->tls_ld_got.offset;
+ off = htab->tls_ld_or_ldm_got.offset;
if (off & 1)
off &= ~1;
else
outrel.r_addend = 0;
elf_append_rela (output_bfd, htab->elf.srelgot,
&outrel);
- htab->tls_ld_got.offset |= 1;
+ htab->tls_ld_or_ldm_got.offset |= 1;
}
relocation = htab->elf.sgot->output_section->vma
+ htab->elf.sgot->output_offset + off;
case R_X86_64_DTPOFF32:
if (!bfd_link_executable (info)
|| (input_section->flags & SEC_CODE) == 0)
- relocation -= elf_x86_64_dtpoff_base (info);
+ relocation -= _bfd_x86_elf_dtpoff_base (info);
else
relocation = elf_x86_64_tpoff (info, relocation);
break;
case R_X86_64_DTPOFF64:
BFD_ASSERT ((input_section->flags & SEC_CODE) == 0);
- relocation -= elf_x86_64_dtpoff_base (info);
+ relocation -= _bfd_x86_elf_dtpoff_base (info);
break;
default:
&& _bfd_elf_section_offset (output_bfd, info, input_section,
rel->r_offset) != (bfd_vma) -1)
{
- _bfd_error_handler
- /* xgettext:c-format */
- (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
- input_bfd,
- input_section,
- (long) rel->r_offset,
- howto->name,
- h->root.root.string);
- return FALSE;
+ switch (r_type)
+ {
+ case R_X86_64_32S:
+ sec = h->root.u.def.section;
+ if ((info->nocopyreloc
+ || (eh->def_protected
+ && elf_has_no_copy_on_protected (h->root.u.def.section->owner)))
+ && !(h->root.u.def.section->flags & SEC_CODE))
+ return elf_x86_64_need_pic (info, input_bfd, input_section,
+ h, NULL, NULL, howto);
+ /* Fall through. */
+
+ default:
+ _bfd_error_handler
+ /* xgettext:c-format */
+ (_("%B(%A+%#Lx): unresolvable %s relocation against symbol `%s'"),
+ input_bfd,
+ input_section,
+ rel->r_offset,
+ howto->name,
+ h->root.root.string);
+ return FALSE;
+ }
}
do_relocation:
{
_bfd_error_handler
/* xgettext:c-format */
- (_("%B(%A+0x%lx): reloc against `%s': error %d"),
+ (_("%B(%A+%#Lx): reloc against `%s': error %d"),
input_bfd, input_section,
- (long) rel->r_offset, name, (int) r);
+ rel->r_offset, name, (int) r);
return FALSE;
}
}
struct elf_link_hash_entry *h,
Elf_Internal_Sym *sym)
{
- struct elf_x86_64_link_hash_table *htab;
+ struct elf_x86_link_hash_table *htab;
bfd_boolean use_plt_second;
- struct elf_x86_64_link_hash_entry *eh;
+ struct elf_x86_link_hash_entry *eh;
bfd_boolean local_undefweak;
+ const struct elf_x86_64_plt_layout *plt_layout;
+ const struct elf_x86_64_lazy_plt_layout *lazy_plt_layout;
+ const struct elf_x86_64_non_lazy_plt_layout *non_lazy_plt_layout;
- htab = elf_x86_64_hash_table (info);
+ htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
if (htab == NULL)
return FALSE;
/* Use the second PLT section only if there is .plt section. */
use_plt_second = htab->elf.splt != NULL && htab->plt_second != NULL;
- eh = (struct elf_x86_64_link_hash_entry *) h;
+ eh = (struct elf_x86_link_hash_entry *) h;
if (eh->no_finish_dynamic_symbol)
abort ();
+ plt_layout = &elf_x86_64_plt (htab);
+ lazy_plt_layout = elf_x86_64_lazy_plt (htab);
+ non_lazy_plt_layout = elf_x86_64_non_lazy_plt (htab);
+
/* We keep PLT/GOT entries without dynamic PLT/GOT relocations for
resolved undefined weak symbols in executable so that their
references have value 0 at run-time. */
local_undefweak = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info,
+ X86_64_ELF_DATA,
eh->has_got_reloc,
eh);
if (plt == htab->elf.splt)
{
- got_offset = (h->plt.offset / htab->plt.plt_entry_size
- - htab->plt.has_plt0);
+ got_offset = (h->plt.offset / plt_layout->plt_entry_size
+ - plt_layout->has_plt0);
got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
}
else
{
- got_offset = h->plt.offset / htab->plt.plt_entry_size;
+ got_offset = h->plt.offset / plt_layout->plt_entry_size;
got_offset = got_offset * GOT_ENTRY_SIZE;
}
/* Fill in the entry in the procedure linkage table. */
- memcpy (plt->contents + h->plt.offset, htab->plt.plt_entry,
- htab->plt.plt_entry_size);
+ memcpy (plt->contents + h->plt.offset, plt_layout->plt_entry,
+ plt_layout->plt_entry_size);
if (use_plt_second)
{
memcpy (htab->plt_second->contents + eh->plt_second.offset,
- htab->non_lazy_plt->plt_entry,
- htab->non_lazy_plt->plt_entry_size);
+ non_lazy_plt_layout->plt_entry,
+ non_lazy_plt_layout->plt_entry_size);
resolved_plt = htab->plt_second;
plt_offset = eh->plt_second.offset;
- resolved_plt->output_section->vma
- resolved_plt->output_offset
- plt_offset
- - htab->plt.plt_got_insn_size);
+ - plt_layout->plt_got_insn_size);
/* Check PC-relative offset overflow in PLT entry. */
if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff)
bfd_put_32 (output_bfd, plt_got_pcrel_offset,
(resolved_plt->contents + plt_offset
- + htab->plt.plt_got_offset));
+ + plt_layout->plt_got_offset));
/* Fill in the entry in the global offset table, initially this
points to the second part of the PLT entry. Leave the entry
against undefined weak symbol in PIE. */
if (!local_undefweak)
{
- if (htab->plt.has_plt0)
+ if (plt_layout->has_plt0)
bfd_put_64 (output_bfd, (plt->output_section->vma
+ plt->output_offset
+ h->plt.offset
- + htab->lazy_plt->plt_lazy_offset),
+ + lazy_plt_layout->plt_lazy_offset),
gotplt->contents + got_offset);
/* Fill in the entry in the .rela.plt section. */
/* Don't fill the second and third slots in PLT entry for
static executables nor without PLT0. */
- if (plt == htab->elf.splt && htab->plt.has_plt0)
+ if (plt == htab->elf.splt && plt_layout->has_plt0)
{
bfd_vma plt0_offset
- = h->plt.offset + htab->lazy_plt->plt_plt_insn_end;
+ = h->plt.offset + lazy_plt_layout->plt_plt_insn_end;
/* Put relocation index. */
bfd_put_32 (output_bfd, plt_index,
(plt->contents + h->plt.offset
- + htab->lazy_plt->plt_reloc_offset));
+ + lazy_plt_layout->plt_reloc_offset));
/* Put offset for jmp .PLT0 and check for overflow. We don't
check relocation index for overflow since branch displacement
output_bfd, h->root.root.string);
bfd_put_32 (output_bfd, - plt0_offset,
(plt->contents + h->plt.offset
- + htab->lazy_plt->plt_plt_offset));
+ + lazy_plt_layout->plt_plt_offset));
}
bed = get_elf_backend_data (output_bfd);
/* Fill in the entry in the GOT procedure linkage table. */
plt_offset = eh->plt_got.offset;
memcpy (plt->contents + plt_offset,
- htab->non_lazy_plt->plt_entry,
- htab->non_lazy_plt->plt_entry_size);
+ non_lazy_plt_layout->plt_entry,
+ non_lazy_plt_layout->plt_entry_size);
/* Put offset the PC-relative instruction referring to the GOT
entry, subtracting the size of that instruction. */
- plt->output_section->vma
- plt->output_offset
- plt_offset
- - htab->non_lazy_plt->plt_got_insn_size);
+ - non_lazy_plt_layout->plt_got_insn_size);
/* Check PC-relative offset overflow in GOT PLT entry. */
got_after_plt = got->output_section->vma > plt->output_section->vma;
bfd_put_32 (output_bfd, got_pcrel_offset,
(plt->contents + plt_offset
- + htab->non_lazy_plt->plt_got_offset));
+ + non_lazy_plt_layout->plt_got_offset));
}
if (!local_undefweak
/* Don't generate dynamic GOT relocation against undefined weak
symbol in executable. */
if (h->got.offset != (bfd_vma) -1
- && ! GOT_TLS_GD_ANY_P (elf_x86_64_hash_entry (h)->tls_type)
- && elf_x86_64_hash_entry (h)->tls_type != GOT_TLS_IE
+ && ! GOT_TLS_GD_ANY_P (elf_x86_hash_entry (h)->tls_type)
+ && elf_x86_hash_entry (h)->tls_type != GOT_TLS_IE
&& !local_undefweak)
{
Elf_Internal_Rela rela;
= (struct bfd_link_info *) inf;
return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
- info, h, NULL);
+ info, h, NULL);
}
/* Finish up undefined weak symbol handling in PIE. Fill its PLT entry
return TRUE;
return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
- info, h, NULL);
+ info, h, NULL);
}
/* Used to decide how to sort relocs in an optimal manner for the
{
bfd *abfd = info->output_bfd;
const struct elf_backend_data *bed = get_elf_backend_data (abfd);
- struct elf_x86_64_link_hash_table *htab = elf_x86_64_hash_table (info);
+ struct elf_x86_link_hash_table *htab
+ = elf_x86_hash_table (info, X86_64_ELF_DATA);
if (htab->elf.dynsym != NULL
&& htab->elf.dynsym->contents != NULL)
elf_x86_64_finish_dynamic_sections (bfd *output_bfd,
struct bfd_link_info *info)
{
- struct elf_x86_64_link_hash_table *htab;
+ struct elf_x86_link_hash_table *htab;
bfd *dynobj;
asection *sdyn;
- htab = elf_x86_64_hash_table (info);
+ htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
if (htab == NULL)
return FALSE;
bfd_byte *dyncon, *dynconend;
const struct elf_backend_data *bed;
bfd_size_type sizeof_dyn;
+ const struct elf_x86_64_plt_layout *plt_layout;
+ const struct elf_x86_64_lazy_plt_layout *lazy_plt_layout;
+ const struct elf_x86_64_non_lazy_plt_layout *non_lazy_plt_layout;
if (sdyn == NULL || htab->elf.sgot == NULL)
abort ();
(*bed->s->swap_dyn_out) (output_bfd, &dyn, dyncon);
}
+ plt_layout = &elf_x86_64_plt (htab);
+ lazy_plt_layout = elf_x86_64_lazy_plt (htab);
+ non_lazy_plt_layout = elf_x86_64_non_lazy_plt (htab);
+
if (htab->elf.splt && htab->elf.splt->size > 0)
{
elf_section_data (htab->elf.splt->output_section)
- ->this_hdr.sh_entsize = htab->plt.plt_entry_size;
+ ->this_hdr.sh_entsize = plt_layout->plt_entry_size;
- if (htab->plt.has_plt0)
+ if (plt_layout->has_plt0)
{
/* Fill in the special first entry in the procedure linkage
table. */
memcpy (htab->elf.splt->contents,
- htab->lazy_plt->plt0_entry,
- htab->lazy_plt->plt_entry_size);
+ lazy_plt_layout->plt0_entry,
+ lazy_plt_layout->plt_entry_size);
/* Add offset for pushq GOT+8(%rip), since the instruction
uses 6 bytes subtract this value. */
bfd_put_32 (output_bfd,
- htab->elf.splt->output_offset
- 6),
(htab->elf.splt->contents
- + htab->lazy_plt->plt0_got1_offset));
+ + lazy_plt_layout->plt0_got1_offset));
/* Add offset for the PC-relative instruction accessing
GOT+16, subtracting the offset to the end of that
instruction. */
+ 16
- htab->elf.splt->output_section->vma
- htab->elf.splt->output_offset
- - htab->lazy_plt->plt0_got2_insn_end),
+ - lazy_plt_layout->plt0_got2_insn_end),
(htab->elf.splt->contents
- + htab->lazy_plt->plt0_got2_offset));
+ + lazy_plt_layout->plt0_got2_offset));
if (htab->tlsdesc_plt)
{
htab->elf.sgot->contents + htab->tlsdesc_got);
memcpy (htab->elf.splt->contents + htab->tlsdesc_plt,
- htab->lazy_plt->plt0_entry,
- htab->lazy_plt->plt_entry_size);
+ lazy_plt_layout->plt0_entry,
+ lazy_plt_layout->plt_entry_size);
/* Add offset for pushq GOT+8(%rip), since the
instruction uses 6 bytes subtract this value. */
- 6),
(htab->elf.splt->contents
+ htab->tlsdesc_plt
- + htab->lazy_plt->plt0_got1_offset));
+ + lazy_plt_layout->plt0_got1_offset));
/* Add offset for the PC-relative instruction accessing
GOT+TDG, where TDG stands for htab->tlsdesc_got,
subtracting the offset to the end of that
- htab->elf.splt->output_section->vma
- htab->elf.splt->output_offset
- htab->tlsdesc_plt
- - htab->lazy_plt->plt0_got2_insn_end),
+ - lazy_plt_layout->plt0_got2_insn_end),
(htab->elf.splt->contents
+ htab->tlsdesc_plt
- + htab->lazy_plt->plt0_got2_offset));
+ + lazy_plt_layout->plt0_got2_offset));
}
}
}
- }
- if (htab->plt_got != NULL && htab->plt_got->size > 0)
- elf_section_data (htab->plt_got->output_section)
- ->this_hdr.sh_entsize = htab->non_lazy_plt->plt_entry_size;
+ if (htab->plt_got != NULL && htab->plt_got->size > 0)
+ elf_section_data (htab->plt_got->output_section)
+ ->this_hdr.sh_entsize = non_lazy_plt_layout->plt_entry_size;
- if (htab->plt_second != NULL && htab->plt_second->size > 0)
- elf_section_data (htab->plt_second->output_section)
- ->this_hdr.sh_entsize = htab->non_lazy_plt->plt_entry_size;
+ if (htab->plt_second != NULL && htab->plt_second->size > 0)
+ elf_section_data (htab->plt_second->output_section)
+ ->this_hdr.sh_entsize = non_lazy_plt_layout->plt_entry_size;
+ }
/* GOT is always created in setup_gnu_properties. But it may not be
needed. */
asection *,
struct elf_link_hash_entry *) ATTRIBUTE_UNUSED)
{
- struct elf_x86_64_link_hash_table *htab = elf_x86_64_hash_table (info);
+ struct elf_x86_link_hash_table *htab
+ = elf_x86_hash_table (info, X86_64_ELF_DATA);
if (htab == NULL)
return FALSE;
return TRUE;
}
-/* Sort relocs into address order. */
-
-static int
-compare_relocs (const void *ap, const void *bp)
-{
- const arelent *a = * (const arelent **) ap;
- const arelent *b = * (const arelent **) bp;
-
- if (a->address > b->address)
- return 1;
- else if (a->address < b->address)
- return -1;
- else
- return 0;
-}
-
enum elf_x86_64_plt_type
{
plt_non_lazy = 0,
asymbol **dynsyms,
asymbol **ret)
{
- long size, count, i, n;
+ long size, count, i, n, len;
int j;
unsigned int plt_got_offset, plt_entry_size, plt_got_insn_size;
asymbol *s;
bfd_byte *plt_contents;
long dynrelcount, relsize;
- arelent **dynrelbuf;
+ arelent **dynrelbuf, *p;
const struct elf_x86_64_lazy_plt_layout *lazy_plt;
const struct elf_x86_64_non_lazy_plt_layout *non_lazy_plt;
const struct elf_x86_64_lazy_plt_layout *lazy_bnd_plt;
const struct elf_x86_64_non_lazy_plt_layout *non_lazy_bnd_plt;
+ const struct elf_x86_64_lazy_plt_layout *lazy_ibt_plt;
+ const struct elf_x86_64_non_lazy_plt_layout *non_lazy_ibt_plt;
asection *plt;
char *names;
enum elf_x86_64_plt_type plt_type;
if (relsize <= 0)
return -1;
- dynrelbuf = (arelent **) bfd_malloc (relsize);
- if (dynrelbuf == NULL)
- return -1;
-
- dynrelcount = bfd_canonicalize_dynamic_reloc (abfd, dynrelbuf,
- dynsyms);
-
- /* Sort the relocs by address. */
- qsort (dynrelbuf, dynrelcount, sizeof (arelent *), compare_relocs);
-
if (get_elf_x86_64_backend_data (abfd)->os == is_normal)
{
lazy_plt = &elf_x86_64_lazy_plt;
non_lazy_plt = &elf_x86_64_non_lazy_plt;
lazy_bnd_plt = &elf_x86_64_lazy_bnd_plt;
non_lazy_bnd_plt = &elf_x86_64_non_lazy_bnd_plt;
+ if (ABI_64_P (abfd))
+ {
+ lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
+ non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
+ }
+ else
+ {
+ lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
+ non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
+ }
}
else
{
non_lazy_plt = NULL;
lazy_bnd_plt = NULL;
non_lazy_bnd_plt = NULL;
+ lazy_ibt_plt = NULL;
+ non_lazy_ibt_plt = NULL;
}
count = 0;
for (j = 0; plts[j].name != NULL; j++)
{
plt = bfd_get_section_by_name (abfd, plts[j].name);
- if (plt == NULL)
+ if (plt == NULL || plt->size == 0)
continue;
/* Get the PLT section contents. */
/* Check what kind of PLT it is. */
plt_type = plt_unknown;
- if (plts[j].type == plt_unknown)
+ if (plts[j].type == plt_unknown
+ && (plt->size >= (lazy_plt->plt_entry_size
+ + lazy_plt->plt_entry_size)))
{
/* Match lazy PLT first. Need to check the first two
instructions. */
lazy_bnd_plt->plt0_entry + 6, 3) == 0))
{
plt_type = plt_lazy | plt_second;
- lazy_plt = lazy_bnd_plt;
+ /* The fist entry in the lazy IBT PLT is the same as the
+ lazy BND PLT. */
+ if ((memcmp (plt_contents + lazy_ibt_plt->plt_entry_size,
+ lazy_ibt_plt->plt_entry,
+ lazy_ibt_plt->plt_got_offset) == 0))
+ lazy_plt = lazy_ibt_plt;
+ else
+ lazy_plt = lazy_bnd_plt;
}
}
if (non_lazy_plt != NULL
- && (plt_type == plt_unknown || plt_type == plt_non_lazy))
+ && (plt_type == plt_unknown || plt_type == plt_non_lazy)
+ && plt->size >= non_lazy_plt->plt_entry_size)
{
/* Match non-lazy PLT. */
if (memcmp (plt_contents, non_lazy_plt->plt_entry,
plt_type = plt_non_lazy;
}
- if (non_lazy_bnd_plt != NULL
- && (plt_type == plt_unknown || plt_type == plt_second))
+ if (plt_type == plt_unknown || plt_type == plt_second)
{
- /* Match BND PLT. */
- if (memcmp (plt_contents, non_lazy_bnd_plt->plt_entry,
- non_lazy_bnd_plt->plt_got_offset) == 0)
+ if (non_lazy_bnd_plt != NULL
+ && plt->size >= non_lazy_bnd_plt->plt_entry_size
+ && (memcmp (plt_contents, non_lazy_bnd_plt->plt_entry,
+ non_lazy_bnd_plt->plt_got_offset) == 0))
{
+ /* Match BND PLT. */
plt_type = plt_second;
non_lazy_plt = non_lazy_bnd_plt;
}
+ else if (non_lazy_ibt_plt != NULL
+ && plt->size >= non_lazy_ibt_plt->plt_entry_size
+ && (memcmp (plt_contents,
+ non_lazy_ibt_plt->plt_entry,
+ non_lazy_ibt_plt->plt_got_offset) == 0))
+ {
+ /* Match IBT PLT. */
+ plt_type = plt_second;
+ non_lazy_plt = non_lazy_ibt_plt;
+ }
}
if (plt_type == plt_unknown)
- continue;
+ {
+ free (plt_contents);
+ continue;
+ }
plts[j].sec = plt;
plts[j].type = plt_type;
plts[j].contents = plt_contents;
}
+ if (count == 0)
+ return -1;
+
+ dynrelbuf = (arelent **) bfd_malloc (relsize);
+ if (dynrelbuf == NULL)
+ return -1;
+
+ dynrelcount = bfd_canonicalize_dynamic_reloc (abfd, dynrelbuf,
+ dynsyms);
+
+ /* Sort the relocs by address. */
+ qsort (dynrelbuf, dynrelcount, sizeof (arelent *),
+ _bfd_x86_elf_compare_relocs);
+
size = count * sizeof (asymbol);
- s = *ret = (asymbol *) bfd_zmalloc (size);
- if (s == NULL)
+
+ /* Allocate space for @plt suffixes. */
+ n = 0;
+ for (i = 0; i < dynrelcount; i++)
{
-bad_return:
- for (j = 0; plts[j].name != NULL; j++)
- if (plts[j].contents != NULL)
- free (plts[j].contents);
- free (dynrelbuf);
- return -1;
+ p = dynrelbuf[i];
+ size += strlen ((*p->sym_ptr_ptr)->name) + sizeof ("@plt");
+ if (p->addend != 0)
+ size += sizeof ("+0x") - 1 + 8 + 8 * ABI_64_P (abfd);
}
+ s = *ret = (asymbol *) bfd_zmalloc (size);
+ if (s == NULL)
+ goto bad_return;
+
/* Check for each PLT section. */
+ names = (char *) (s + count);
size = 0;
n = 0;
for (j = 0; plts[j].name != NULL; j++)
int off;
bfd_vma got_vma;
long min, max, mid;
- arelent *p;
/* Get the PC-relative offset, a signed 32-bit integer. */
off = H_GET_32 (abfd, (plt_contents + offset
s->section = plt;
s->the_bfd = plt->owner;
s->value = offset;
- /* Store relocation for later use. */
- s->udata.p = p;
- /* Add @plt to function name later. */
- size += strlen (s->name) + sizeof ("@plt");
+ s->udata.p = NULL;
+ s->name = names;
+ len = strlen ((*p->sym_ptr_ptr)->name);
+ memcpy (names, (*p->sym_ptr_ptr)->name, len);
+ names += len;
if (p->addend != 0)
- size += sizeof ("+0x") - 1 + 8 + 8 * ABI_64_P (abfd);
+ {
+ char buf[30], *a;
+
+ memcpy (names, "+0x", sizeof ("+0x") - 1);
+ names += sizeof ("+0x") - 1;
+ bfd_sprintf_vma (abfd, buf, p->addend);
+ for (a = buf; *a == '0'; ++a)
+ ;
+ size = strlen (a);
+ memcpy (names, a, size);
+ names += size;
+ }
+ memcpy (names, "@plt", sizeof ("@plt"));
+ names += sizeof ("@plt");
n++;
s++;
}
/* PLT entries with R_X86_64_TLSDESC relocations are skipped. */
if (n == 0)
- goto bad_return;
-
- count = n;
-
- /* Allocate space for @plt suffixes. */
- names = (char *) bfd_malloc (size);
- if (s == NULL)
- goto bad_return;
-
- s = *ret;
- for (i = 0; i < count; i++)
{
- /* Add @plt to function name. */
- arelent *p = (arelent *) s->udata.p;
- /* Clear it now. */
- s->udata.p = NULL;
- size = strlen (s->name);
- memcpy (names, s->name, size);
- s->name = names;
- names += size;
- if (p->addend != 0)
- {
- char buf[30], *a;
-
- memcpy (names, "+0x", sizeof ("+0x") - 1);
- names += sizeof ("+0x") - 1;
- bfd_sprintf_vma (abfd, buf, p->addend);
- for (a = buf; *a == '0'; ++a)
- ;
- size = strlen (a);
- memcpy (names, a, size);
- names += size;
- }
- memcpy (names, "@plt", sizeof ("@plt"));
- names += sizeof ("@plt");
- s++;
+bad_return:
+ count = -1;
}
+ else
+ count = n;
for (j = 0; plts[j].name != NULL; j++)
if (plts[j].contents != NULL)
return count;
}
-/* Return TRUE if symbol should be hashed in the `.gnu.hash' section. */
-
-static bfd_boolean
-elf_x86_64_hash_symbol (struct elf_link_hash_entry *h)
-{
- if (h->plt.offset != (bfd_vma) -1
- && !h->def_regular
- && !h->pointer_equality_needed)
- return FALSE;
-
- return _bfd_elf_hash_symbol (h);
-}
-
/* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */
static bfd_boolean
&& _bfd_elf_relocs_compatible (input, output));
}
-/* Parse x86-64 GNU properties. */
+/* Set up x86-64 GNU properties. Return the first relocatable ELF input
+ with GNU properties if found. Otherwise, return NULL. */
-static enum elf_property_kind
-elf_x86_64_parse_gnu_properties (bfd *abfd, unsigned int type,
- bfd_byte *ptr, unsigned int datasz)
+static bfd *
+elf_x86_64_link_setup_gnu_properties (struct bfd_link_info *info)
{
+ bfd_boolean normal_target;
+ bfd_boolean lazy_plt;
+ asection *sec, *pltsec;
+ bfd *dynobj;
+ bfd_boolean use_ibt_plt;
+ unsigned int plt_alignment, features;
+ struct elf_x86_link_hash_table *htab;
+ bfd *pbfd;
+ bfd *ebfd = NULL;
elf_property *prop;
+ struct elf_x86_64_plt_layout *plt_layout;
+ const struct elf_x86_64_lazy_plt_layout *lazy_plt_layout;
+ const struct elf_x86_64_non_lazy_plt_layout *non_lazy_plt_layout;
+
+ features = 0;
+ if (info->ibt)
+ features = GNU_PROPERTY_X86_FEATURE_1_IBT;
+ if (info->shstk)
+ features |= GNU_PROPERTY_X86_FEATURE_1_SHSTK;
+
+ /* Find a normal input file with GNU property note. */
+ for (pbfd = info->input_bfds;
+ pbfd != NULL;
+ pbfd = pbfd->link.next)
+ if (bfd_get_flavour (pbfd) == bfd_target_elf_flavour
+ && bfd_count_sections (pbfd) != 0)
+ {
+ ebfd = pbfd;
- switch (type)
+ if (elf_properties (pbfd) != NULL)
+ break;
+ }
+
+ if (ebfd != NULL && features)
{
- case GNU_PROPERTY_X86_ISA_1_USED:
- case GNU_PROPERTY_X86_ISA_1_NEEDED:
- if (datasz != 4)
- {
- _bfd_error_handler
- ((type == GNU_PROPERTY_X86_ISA_1_USED
- ? _("error: %B: <corrupt x86 ISA used size: 0x%x>")
- : _("error: %B: <corrupt x86 ISA needed size: 0x%x>")),
- abfd, datasz);
- return property_corrupt;
- }
- prop = _bfd_elf_get_property (abfd, type, datasz);
- /* Combine properties of the same type. */
- prop->u.number |= bfd_h_get_32 (abfd, ptr);
+ /* If features is set, add GNU_PROPERTY_X86_FEATURE_1_IBT and
+ GNU_PROPERTY_X86_FEATURE_1_SHSTK. */
+ prop = _bfd_elf_get_property (ebfd,
+ GNU_PROPERTY_X86_FEATURE_1_AND,
+ 4);
+ prop->u.number |= features;
prop->pr_kind = property_number;
- break;
-
- default:
- return property_ignored;
- }
-
- return property_number;
-}
-/* Merge x86-64 GNU property BPROP with APROP. If APROP isn't NULL,
- return TRUE if APROP is updated. Otherwise, return TRUE if BPROP
- should be merged with ABFD. */
+ /* Create the GNU property note section if needed. */
+ if (pbfd == NULL)
+ {
+ sec = bfd_make_section_with_flags (ebfd,
+ NOTE_GNU_PROPERTY_SECTION_NAME,
+ (SEC_ALLOC
+ | SEC_LOAD
+ | SEC_IN_MEMORY
+ | SEC_READONLY
+ | SEC_HAS_CONTENTS
+ | SEC_DATA));
+ if (sec == NULL)
+ info->callbacks->einfo (_("%F: failed to create GNU property section\n"));
-static bfd_boolean
-elf_x86_64_merge_gnu_properties (bfd *abfd ATTRIBUTE_UNUSED,
- elf_property *aprop,
- elf_property *bprop)
-{
- unsigned int number;
- bfd_boolean updated = FALSE;
- unsigned int pr_type = aprop != NULL ? aprop->pr_type : bprop->pr_type;
+ if (!bfd_set_section_alignment (ebfd, sec,
+ ABI_64_P (ebfd) ? 3 : 2))
+ {
+error_alignment:
+ info->callbacks->einfo (_("%F%A: failed to align section\n"),
+ sec);
+ }
- switch (pr_type)
- {
- case GNU_PROPERTY_X86_ISA_1_USED:
- case GNU_PROPERTY_X86_ISA_1_NEEDED:
- if (aprop != NULL && bprop != NULL)
- {
- number = aprop->u.number;
- aprop->u.number = number | bprop->u.number;
- updated = number != (unsigned int) aprop->u.number;
- }
- else
- {
- /* Return TRUE if APROP is NULL to indicate that BPROP should
- be added to ABFD. */
- updated = aprop == NULL;
+ elf_section_type (sec) = SHT_NOTE;
}
- break;
-
- default:
- /* Never should happen. */
- abort ();
}
- return updated;
-}
-
-/* Set up x86-64 GNU properties. Return the first relocatable ELF input
- with GNU properties if found. Otherwise, return NULL. */
-
-static bfd *
-elf_x86_64_link_setup_gnu_properties (struct bfd_link_info *info)
-{
- bfd_boolean normal_target;
- bfd_boolean lazy_plt;
- asection *sec, *pltsec;
- bfd *dynobj;
- unsigned int plt_alignment;
- struct elf_x86_64_link_hash_table *htab;
- bfd *pbfd = _bfd_elf_link_setup_gnu_properties (info);
+ pbfd = _bfd_elf_link_setup_gnu_properties (info);
if (bfd_link_relocatable (info))
return pbfd;
- htab = elf_x86_64_hash_table (info);
+ htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
if (htab == NULL)
return pbfd;
+ use_ibt_plt = info->ibtplt || info->ibt;
+ if (!use_ibt_plt && pbfd != NULL)
+ {
+ /* Check if GNU_PROPERTY_X86_FEATURE_1_IBT is on. */
+ elf_property_list *p;
+
+ /* The property list is sorted in order of type. */
+ for (p = elf_properties (pbfd); p; p = p->next)
+ {
+ if (GNU_PROPERTY_X86_FEATURE_1_AND == p->property.pr_type)
+ {
+ use_ibt_plt = !!(p->property.u.number
+ & GNU_PROPERTY_X86_FEATURE_1_IBT);
+ break;
+ }
+ else if (GNU_PROPERTY_X86_FEATURE_1_AND < p->property.pr_type)
+ break;
+ }
+ }
+
dynobj = htab->elf.dynobj;
/* Set htab->elf.dynobj here so that there is no need to check and
set it in check_relocs. */
if (dynobj == NULL)
{
- bfd *abfd;
-
- /* Find a normal input file to hold linker created
- sections. */
- for (abfd = info->input_bfds;
- abfd != NULL;
- abfd = abfd->link.next)
- if ((abfd->flags
- & (DYNAMIC | BFD_LINKER_CREATED | BFD_PLUGIN)) == 0)
- {
- htab->elf.dynobj = abfd;
- dynobj = abfd;
- break;
- }
+ if (pbfd != NULL)
+ {
+ htab->elf.dynobj = pbfd;
+ dynobj = pbfd;
+ }
+ else
+ {
+ bfd *abfd;
+
+ /* Find a normal input file to hold linker created
+ sections. */
+ for (abfd = info->input_bfds;
+ abfd != NULL;
+ abfd = abfd->link.next)
+ if (bfd_get_flavour (abfd) == bfd_target_elf_flavour
+ && (abfd->flags
+ & (DYNAMIC | BFD_LINKER_CREATED | BFD_PLUGIN)) == 0)
+ {
+ htab->elf.dynobj = abfd;
+ dynobj = abfd;
+ break;
+ }
+ }
}
+ plt_layout = &elf_x86_64_plt (htab);
+
/* Even when lazy binding is disabled by "-z now", the PLT0 entry may
still be used with LD_AUDIT or LD_PROFILE if PLT entry is used for
canonical function address. */
- htab->plt.has_plt0 = 1;
+ plt_layout->has_plt0 = 1;
if (get_elf_x86_64_backend_data (info->output_bfd)->os
== is_normal)
{
- if (info->bndplt)
+ if (use_ibt_plt)
+ {
+ if (ABI_64_P (dynobj))
+ {
+ elf_x86_64_lazy_plt (htab)
+ = &elf_x86_64_lazy_ibt_plt;
+ elf_x86_64_non_lazy_plt (htab)
+ = &elf_x86_64_non_lazy_ibt_plt;
+ }
+ else
+ {
+ elf_x86_64_lazy_plt (htab)
+ = &elf_x32_lazy_ibt_plt;
+ elf_x86_64_non_lazy_plt (htab)
+ = &elf_x32_non_lazy_ibt_plt;
+ }
+ }
+ else if (info->bndplt)
{
- htab->lazy_plt = &elf_x86_64_lazy_bnd_plt;
- htab->non_lazy_plt = &elf_x86_64_non_lazy_bnd_plt;
+ elf_x86_64_lazy_plt (htab) = &elf_x86_64_lazy_bnd_plt;
+ elf_x86_64_non_lazy_plt (htab) = &elf_x86_64_non_lazy_bnd_plt;
}
else
{
- htab->lazy_plt = &elf_x86_64_lazy_plt;
- htab->non_lazy_plt = &elf_x86_64_non_lazy_plt;
+ elf_x86_64_lazy_plt (htab) = &elf_x86_64_lazy_plt;
+ elf_x86_64_non_lazy_plt (htab) = &elf_x86_64_non_lazy_plt;
}
normal_target = TRUE;
}
else
{
- htab->lazy_plt = &elf_x86_64_nacl_plt;
- htab->non_lazy_plt = NULL;
+ elf_x86_64_lazy_plt (htab) = &elf_x86_64_nacl_plt;
+ elf_x86_64_non_lazy_plt (htab) = NULL;
normal_target = FALSE;
}
+ lazy_plt_layout = elf_x86_64_lazy_plt (htab);
+ non_lazy_plt_layout = elf_x86_64_non_lazy_plt (htab);
+
pltsec = htab->elf.splt;
/* If the non-lazy PLT is available, use it for all PLT entries if
there are no PLT0 or no .plt section. */
- if (htab->non_lazy_plt != NULL
- && (!htab->plt.has_plt0 || pltsec == NULL))
+ if (non_lazy_plt_layout != NULL
+ && (!plt_layout->has_plt0 || pltsec == NULL))
{
lazy_plt = FALSE;
- htab->plt.plt_entry
- = htab->non_lazy_plt->plt_entry;
- htab->plt.plt_entry_size
- = htab->non_lazy_plt->plt_entry_size;
- htab->plt.plt_got_offset
- = htab->non_lazy_plt->plt_got_offset;
- htab->plt.plt_got_insn_size
- = htab->non_lazy_plt->plt_got_insn_size;
- htab->plt.eh_frame_plt_size
- = htab->non_lazy_plt->eh_frame_plt_size;
- htab->plt.eh_frame_plt
- = htab->non_lazy_plt->eh_frame_plt;
+ plt_layout->plt_entry
+ = non_lazy_plt_layout->plt_entry;
+ plt_layout->plt_entry_size
+ = non_lazy_plt_layout->plt_entry_size;
+ plt_layout->plt_got_offset
+ = non_lazy_plt_layout->plt_got_offset;
+ plt_layout->plt_got_insn_size
+ = non_lazy_plt_layout->plt_got_insn_size;
+ plt_layout->eh_frame_plt_size
+ = non_lazy_plt_layout->eh_frame_plt_size;
+ plt_layout->eh_frame_plt
+ = non_lazy_plt_layout->eh_frame_plt;
}
else
{
lazy_plt = TRUE;
- htab->plt.plt_entry
- = htab->lazy_plt->plt_entry;
- htab->plt.plt_entry_size
- = htab->lazy_plt->plt_entry_size;
- htab->plt.plt_got_offset
- = htab->lazy_plt->plt_got_offset;
- htab->plt.plt_got_insn_size
- = htab->lazy_plt->plt_got_insn_size;
- htab->plt.eh_frame_plt_size
- = htab->lazy_plt->eh_frame_plt_size;
- htab->plt.eh_frame_plt
- = htab->lazy_plt->eh_frame_plt;
+ plt_layout->plt_entry
+ = lazy_plt_layout->plt_entry;
+ plt_layout->plt_entry_size
+ = lazy_plt_layout->plt_entry_size;
+ plt_layout->plt_got_offset
+ = lazy_plt_layout->plt_got_offset;
+ plt_layout->plt_got_insn_size
+ = lazy_plt_layout->plt_got_insn_size;
+ plt_layout->eh_frame_plt_size
+ = lazy_plt_layout->eh_frame_plt_size;
+ plt_layout->eh_frame_plt
+ = lazy_plt_layout->eh_frame_plt;
}
/* Return if there are no normal input files. */
properly aligned even if create_dynamic_sections isn't called. */
sec = htab->elf.sgot;
if (!bfd_set_section_alignment (dynobj, sec, 3))
- {
-error_alignment:
- info->callbacks->einfo (_("%F%A: failed to align section\n"),
- sec);
- }
+ goto error_alignment;
sec = htab->elf.sgotplt;
if (!bfd_set_section_alignment (dynobj, sec, 3))
if (!_bfd_elf_create_ifunc_sections (dynobj, info))
info->callbacks->einfo (_("%F: failed to create ifunc sections\n"));
- plt_alignment = bfd_log2 (htab->plt.plt_entry_size);
+ plt_alignment = bfd_log2 (plt_layout->plt_entry_size);
if (pltsec != NULL)
{
| SEC_LOAD
| SEC_READONLY);
unsigned int non_lazy_plt_alignment
- = bfd_log2 (htab->non_lazy_plt->plt_entry_size);
+ = bfd_log2 (non_lazy_plt_layout->plt_entry_size);
sec = pltsec;
if (!bfd_set_section_alignment (sec->owner, sec,
htab->plt_got = sec;
- /* MPX PLT is supported only for non-NaCl target in 64-bit
- mode and is needed only for lazy binding. */
- if (lazy_plt
- && info->bndplt && ABI_64_P (dynobj))
+ if (lazy_plt)
{
- /* Create the second PLT for Intel MPX support. */
- sec = bfd_make_section_anyway_with_flags (dynobj,
- ".plt.sec",
- pltflags);
- if (sec == NULL)
- info->callbacks->einfo (_("%F: failed to create BND PLT section\n"));
+ sec = NULL;
- if (!bfd_set_section_alignment (dynobj, sec,
- non_lazy_plt_alignment))
- goto error_alignment;
+ if (use_ibt_plt)
+ {
+ /* Create the second PLT for Intel IBT support. IBT
+ PLT is supported only for non-NaCl target and is
+ is needed only for lazy binding. */
+ sec = bfd_make_section_anyway_with_flags (dynobj,
+ ".plt.sec",
+ pltflags);
+ if (sec == NULL)
+ info->callbacks->einfo (_("%F: failed to create IBT-enabled PLT section\n"));
+
+ if (!bfd_set_section_alignment (dynobj, sec,
+ plt_alignment))
+ goto error_alignment;
+ }
+ else if (info->bndplt && ABI_64_P (dynobj))
+ {
+ /* Create the second PLT for Intel MPX support. MPX
+ PLT is supported only for non-NaCl target in 64-bit
+ mode and is needed only for lazy binding. */
+ sec = bfd_make_section_anyway_with_flags (dynobj,
+ ".plt.sec",
+ pltflags);
+ if (sec == NULL)
+ info->callbacks->einfo (_("%F: failed to create BND PLT section\n"));
+
+ if (!bfd_set_section_alignment (dynobj, sec,
+ non_lazy_plt_alignment))
+ goto error_alignment;
+ }
htab->plt_second = sec;
}
#define elf_backend_adjust_dynamic_symbol elf_x86_64_adjust_dynamic_symbol
#define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
#define elf_backend_check_relocs elf_x86_64_check_relocs
-#define elf_backend_copy_indirect_symbol elf_x86_64_copy_indirect_symbol
#define elf_backend_create_dynamic_sections _bfd_elf_create_dynamic_sections
#define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
#define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
#define elf_backend_reloc_type_class elf_x86_64_reloc_type_class
#define elf_backend_relocate_section elf_x86_64_relocate_section
#define elf_backend_size_dynamic_sections elf_x86_64_size_dynamic_sections
-#define elf_backend_always_size_sections elf_x86_64_always_size_sections
#define elf_backend_init_index_section _bfd_elf_init_1_index_section
#define elf_backend_object_p elf64_x86_64_elf_object_p
#define bfd_elf64_mkobject elf_x86_64_mkobject
elf_x86_64_special_sections
#define elf_backend_additional_program_headers \
elf_x86_64_additional_program_headers
-#define elf_backend_hash_symbol \
- elf_x86_64_hash_symbol
-#define elf_backend_omit_section_dynsym \
- ((bfd_boolean (*) (bfd *, struct bfd_link_info *, asection *)) bfd_true)
-#define elf_backend_fixup_symbol \
- elf_x86_64_fixup_symbol
-#define elf_backend_parse_gnu_properties \
- elf_x86_64_parse_gnu_properties
-#define elf_backend_merge_gnu_properties \
- elf_x86_64_merge_gnu_properties
#define elf_backend_setup_gnu_properties \
elf_x86_64_link_setup_gnu_properties