X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=bfd%2Felf32-arm.c;h=e1ee67315f84f59ea11d62015e8ffa0e0530ebcf;hb=6bde4c52fb2d49572d365612f222a42b4d316f09;hp=8f888391cfa5bc973e9d671a03f7355dc55d9dc2;hpb=39f21624991525f1f937a796cf008654a8596240;p=binutils-gdb.git diff --git a/bfd/elf32-arm.c b/bfd/elf32-arm.c index 8f888391cfa..e1ee67315f8 100644 --- a/bfd/elf32-arm.c +++ b/bfd/elf32-arm.c @@ -1,5 +1,5 @@ /* 32-bit ELF support for ARM - Copyright 1998-2013 Free Software Foundation, Inc. + Copyright (C) 1998-2016 Free Software Foundation, Inc. This file is part of BFD, the Binary File Descriptor library. @@ -79,7 +79,7 @@ static reloc_howto_type elf32_arm_howto_table_1[] = /* No relocation. */ HOWTO (R_ARM_NONE, /* type */ 0, /* rightshift */ - 0, /* size (0 = byte, 1 = short, 2 = long) */ + 3, /* size (0 = byte, 1 = short, 2 = long) */ 0, /* bitsize */ FALSE, /* pc_relative */ 0, /* bitpos */ @@ -1606,7 +1606,7 @@ static reloc_howto_type elf32_arm_howto_table_1[] = FALSE, /* pc_relative */ 0, /* bitpos */ complain_overflow_bitfield,/* complain_on_overflow */ - bfd_elf_generic_reloc, /* special_function */ + NULL, /* special_function */ "R_ARM_TLS_LE32", /* name */ TRUE, /* partial_inplace */ 0xffffffff, /* src_mask */ @@ -1689,6 +1689,60 @@ static reloc_howto_type elf32_arm_howto_table_1[] = 0x00000000, /* src_mask */ 0x00000000, /* dst_mask */ FALSE), /* pcrel_offset */ + EMPTY_HOWTO (130), + EMPTY_HOWTO (131), + HOWTO (R_ARM_THM_ALU_ABS_G0_NC,/* type. */ + 0, /* rightshift. */ + 1, /* size (0 = byte, 1 = short, 2 = long). */ + 16, /* bitsize. */ + FALSE, /* pc_relative. */ + 0, /* bitpos. */ + complain_overflow_bitfield,/* complain_on_overflow. */ + bfd_elf_generic_reloc, /* special_function. */ + "R_ARM_THM_ALU_ABS_G0_NC",/* name. */ + FALSE, /* partial_inplace. */ + 0x00000000, /* src_mask. */ + 0x00000000, /* dst_mask. */ + FALSE), /* pcrel_offset. */ + HOWTO (R_ARM_THM_ALU_ABS_G1_NC,/* type. */ + 0, /* rightshift. */ + 1, /* size (0 = byte, 1 = short, 2 = long). */ + 16, /* bitsize. */ + FALSE, /* pc_relative. */ + 0, /* bitpos. */ + complain_overflow_bitfield,/* complain_on_overflow. */ + bfd_elf_generic_reloc, /* special_function. */ + "R_ARM_THM_ALU_ABS_G1_NC",/* name. */ + FALSE, /* partial_inplace. */ + 0x00000000, /* src_mask. */ + 0x00000000, /* dst_mask. */ + FALSE), /* pcrel_offset. */ + HOWTO (R_ARM_THM_ALU_ABS_G2_NC,/* type. */ + 0, /* rightshift. */ + 1, /* size (0 = byte, 1 = short, 2 = long). */ + 16, /* bitsize. */ + FALSE, /* pc_relative. */ + 0, /* bitpos. */ + complain_overflow_bitfield,/* complain_on_overflow. */ + bfd_elf_generic_reloc, /* special_function. */ + "R_ARM_THM_ALU_ABS_G2_NC",/* name. */ + FALSE, /* partial_inplace. */ + 0x00000000, /* src_mask. */ + 0x00000000, /* dst_mask. */ + FALSE), /* pcrel_offset. */ + HOWTO (R_ARM_THM_ALU_ABS_G3_NC,/* type. */ + 0, /* rightshift. */ + 1, /* size (0 = byte, 1 = short, 2 = long). */ + 16, /* bitsize. */ + FALSE, /* pc_relative. */ + 0, /* bitpos. */ + complain_overflow_bitfield,/* complain_on_overflow. */ + bfd_elf_generic_reloc, /* special_function. */ + "R_ARM_THM_ALU_ABS_G3_NC",/* name. */ + FALSE, /* partial_inplace. */ + 0x00000000, /* src_mask. */ + 0x00000000, /* dst_mask. */ + FALSE), /* pcrel_offset. */ }; /* 160 onwards: */ @@ -1889,7 +1943,11 @@ static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] = {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0}, {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1}, {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2}, - {BFD_RELOC_ARM_V4BX, R_ARM_V4BX} + {BFD_RELOC_ARM_V4BX, R_ARM_V4BX}, + {BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC, R_ARM_THM_ALU_ABS_G3_NC}, + {BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC, R_ARM_THM_ALU_ABS_G2_NC}, + {BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC, R_ARM_THM_ALU_ABS_G1_NC}, + {BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC, R_ARM_THM_ALU_ABS_G0_NC} }; static reloc_howto_type * @@ -2040,9 +2098,9 @@ elf32_arm_nabi_write_core_note (bfd *abfd, char *buf, int *bufsiz, } } -#define TARGET_LITTLE_SYM bfd_elf32_littlearm_vec +#define TARGET_LITTLE_SYM arm_elf32_le_vec #define TARGET_LITTLE_NAME "elf32-littlearm" -#define TARGET_BIG_SYM bfd_elf32_bigarm_vec +#define TARGET_BIG_SYM arm_elf32_be_vec #define TARGET_BIG_NAME "elf32-bigarm" #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus @@ -2072,6 +2130,9 @@ typedef unsigned short int insn16; #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer" #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x" +#define STM32L4XX_ERRATUM_VENEER_SECTION_NAME ".text.stm32l4xx_veneer" +#define STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "__stm32l4xx_veneer_%x" + #define ARM_BX_GLUE_SECTION_NAME ".v4_bx" #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d" @@ -2125,7 +2186,7 @@ static const bfd_vma elf32_arm_plt_entry [] = 0x00000000, /* unused */ }; -#else +#else /* not FOUR_WORD_PLT */ /* The first entry in a procedure linkage table looks like this. It is set up so that any shared library function that is @@ -2140,16 +2201,55 @@ static const bfd_vma elf32_arm_plt0_entry [] = 0x00000000, /* &GOT[0] - . */ }; -/* Subsequent entries in a procedure linkage table look like - this. */ -static const bfd_vma elf32_arm_plt_entry [] = +/* By default subsequent entries in a procedure linkage table look like + this. Offsets that don't fit into 28 bits will cause link error. */ +static const bfd_vma elf32_arm_plt_entry_short [] = { 0xe28fc600, /* add ip, pc, #0xNN00000 */ 0xe28cca00, /* add ip, ip, #0xNN000 */ 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */ }; -#endif +/* When explicitly asked, we'll use this "long" entry format + which can cope with arbitrary displacements. */ +static const bfd_vma elf32_arm_plt_entry_long [] = +{ + 0xe28fc200, /* add ip, pc, #0xN0000000 */ + 0xe28cc600, /* add ip, ip, #0xNN00000 */ + 0xe28cca00, /* add ip, ip, #0xNN000 */ + 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */ +}; + +static bfd_boolean elf32_arm_use_long_plt_entry = FALSE; + +#endif /* not FOUR_WORD_PLT */ + +/* The first entry in a procedure linkage table looks like this. + It is set up so that any shared library function that is called before the + relocation has been set up calls the dynamic linker first. */ +static const bfd_vma elf32_thumb2_plt0_entry [] = +{ + /* NOTE: As this is a mixture of 16-bit and 32-bit instructions, + an instruction maybe encoded to one or two array elements. */ + 0xf8dfb500, /* push {lr} */ + 0x44fee008, /* ldr.w lr, [pc, #8] */ + /* add lr, pc */ + 0xff08f85e, /* ldr.w pc, [lr, #8]! */ + 0x00000000, /* &GOT[0] - . */ +}; + +/* Subsequent entries in a procedure linkage table for thumb only target + look like this. */ +static const bfd_vma elf32_thumb2_plt_entry [] = +{ + /* NOTE: As this is a mixture of 16-bit and 32-bit instructions, + an instruction maybe encoded to one or two array elements. */ + 0x0c00f240, /* movw ip, #0xNNNN */ + 0x0c00f2c0, /* movt ip, #0xNNNN */ + 0xf8dc44fc, /* add ip, pc */ + 0xbf00f000 /* ldr.w pc, [ip] */ + /* nop */ +}; /* The format of the first entry in the procedure linkage table for a VxWorks executable. */ @@ -2244,6 +2344,8 @@ static const bfd_vma elf32_arm_nacl_plt_entry [] = #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4) #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4) #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4) +#define THM2_MAX_FWD_COND_BRANCH_OFFSET (((1 << 20) -2) + 4) +#define THM2_MAX_BWD_COND_BRANCH_OFFSET (-(1 << 20) + 4) enum stub_insn_type { @@ -2530,11 +2632,12 @@ enum elf32_arm_stub_type { arm_stub_none, DEF_STUBS - /* Note the first a8_veneer type */ - arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond }; #undef DEF_STUB +/* Note the first a8_veneer type. */ +const unsigned arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond; + typedef struct { const insn_sequence* template_sequence; @@ -2564,8 +2667,12 @@ struct elf32_arm_stub_hash_entry bfd_vma target_value; asection *target_section; - /* Offset to apply to relocation referencing target_value. */ - bfd_vma target_addend; + /* Same as above but for the source of the branch to the stub. Used for + Cortex-A8 erratum workaround to patch it to branch to the stub. As + such, source section does not need to be recorded since Cortex-A8 erratum + workaround stubs are only generated when both source and target are in the + same section. */ + bfd_vma source_value; /* The instruction which caused this stub to be generated (only valid for Cortex-A8 erratum workaround stubs at present). */ @@ -2638,6 +2745,36 @@ typedef struct elf32_vfp11_erratum_list } elf32_vfp11_erratum_list; +/* Information about a STM32L4XX erratum veneer, or a branch to such a + veneer. */ +typedef enum +{ + STM32L4XX_ERRATUM_BRANCH_TO_VENEER, + STM32L4XX_ERRATUM_VENEER +} +elf32_stm32l4xx_erratum_type; + +typedef struct elf32_stm32l4xx_erratum_list +{ + struct elf32_stm32l4xx_erratum_list *next; + bfd_vma vma; + union + { + struct + { + struct elf32_stm32l4xx_erratum_list *veneer; + unsigned int insn; + } b; + struct + { + struct elf32_stm32l4xx_erratum_list *branch; + unsigned int id; + } v; + } u; + elf32_stm32l4xx_erratum_type type; +} +elf32_stm32l4xx_erratum_list; + typedef enum { DELETE_EXIDX_ENTRY, @@ -2668,6 +2805,9 @@ typedef struct _arm_elf_section_data /* Information about CPU errata. */ unsigned int erratumcount; elf32_vfp11_erratum_list *erratumlist; + unsigned int stm32l4xx_erratumcount; + elf32_stm32l4xx_erratum_list *stm32l4xx_erratumlist; + unsigned int additional_reloc_count; /* Information about unwind tables. */ union { @@ -2701,7 +2841,7 @@ struct a8_erratum_fix bfd *input_bfd; asection *section; bfd_vma offset; - bfd_vma addend; + bfd_vma target_offset; unsigned long orig_insn; char *stub_name; enum elf32_arm_stub_type stub_type; @@ -2901,6 +3041,10 @@ struct elf32_arm_link_hash_table veneers. */ bfd_size_type vfp11_erratum_glue_size; + /* The size in bytes of the section containing glue for STM32L4XX erratum + veneers. */ + bfd_size_type stm32l4xx_erratum_glue_size; + /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and elf32_arm_write_section(). */ @@ -2941,6 +3085,13 @@ struct elf32_arm_link_hash_table /* Global counter for the number of fixes we have emitted. */ int num_vfp11_fixes; + /* What sort of code sequences we should look for which may trigger the + STM32L4XX erratum. */ + bfd_arm_stm32l4xx_fix stm32l4xx_fix; + + /* Global counter for the number of fixes we have emitted. */ + int num_stm32l4xx_fixes; + /* Nonzero to force PIC branch veneers. */ int pic_veneer; @@ -3012,7 +3163,8 @@ struct elf32_arm_link_hash_table bfd *stub_bfd; /* Linker call-backs. */ - asection * (*add_stub_section) (const char *, asection *, unsigned int); + asection * (*add_stub_section) (const char *, asection *, asection *, + unsigned int); void (*layout_sections_again) (void); /* Array to keep track of which stub sections have been created, and @@ -3020,14 +3172,50 @@ struct elf32_arm_link_hash_table struct map_stub *stub_group; /* Number of elements in stub_group. */ - int top_id; + unsigned int top_id; /* Assorted information used by elf32_arm_size_stubs. */ unsigned int bfd_count; - int top_index; + unsigned int top_index; asection **input_list; }; +static inline int +ctz (unsigned int mask) +{ +#if GCC_VERSION >= 3004 + return __builtin_ctz (mask); +#else + unsigned int i; + + for (i = 0; i < 8 * sizeof (mask); i++) + { + if (mask & 0x1) + break; + mask = (mask >> 1); + } + return i; +#endif +} + +static inline int +popcount (unsigned int mask) +{ +#if GCC_VERSION >= 3004 + return __builtin_popcount (mask); +#else + unsigned int i, sum = 0; + + for (i = 0; i < 8 * sizeof (mask); i++) + { + if (mask & 0x1) + sum++; + mask = (mask >> 1); + } + return sum; +#endif +} + /* Create an entry in an ARM ELF linker hash table. */ static struct bfd_hash_entry * @@ -3231,9 +3419,9 @@ stub_hash_newfunc (struct bfd_hash_entry *entry, eh = (struct elf32_arm_stub_hash_entry *) entry; eh->stub_sec = NULL; eh->stub_offset = 0; + eh->source_value = 0; eh->target_value = 0; eh->target_section = NULL; - eh->target_addend = 0; eh->orig_insn = 0; eh->stub_type = arm_stub_none; eh->stub_size = 0; @@ -3317,6 +3505,40 @@ create_ifunc_sections (struct bfd_link_info *info) return TRUE; } +/* Determine if we're dealing with a Thumb only architecture. */ + +static bfd_boolean +using_thumb_only (struct elf32_arm_link_hash_table *globals) +{ + int arch; + int profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, + Tag_CPU_arch_profile); + + if (profile) + return profile == 'M'; + + arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch); + + if (arch == TAG_CPU_ARCH_V6_M + || arch == TAG_CPU_ARCH_V6S_M + || arch == TAG_CPU_ARCH_V7E_M + || arch == TAG_CPU_ARCH_V8M_BASE + || arch == TAG_CPU_ARCH_V8M_MAIN) + return TRUE; + + return FALSE; +} + +/* Determine if we're dealing with a Thumb-2 object. */ + +static bfd_boolean +using_thumb2 (struct elf32_arm_link_hash_table *globals) +{ + int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, + Tag_CPU_arch); + return arch == TAG_CPU_ARCH_V6T2 || arch >= TAG_CPU_ARCH_V7; +} + /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our hash table. */ @@ -3337,7 +3559,7 @@ elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info) return FALSE; htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss"); - if (!info->shared) + if (!bfd_link_pic (info)) htab->srelbss = bfd_get_linker_section (dynobj, RELOC_SECTION (htab, ".bss")); @@ -3346,7 +3568,7 @@ elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info) if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2)) return FALSE; - if (info->shared) + if (bfd_link_pic (info)) { htab->plt_header_size = 0; htab->plt_entry_size @@ -3359,12 +3581,31 @@ elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info) htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry); } + + if (elf_elfheader (dynobj)) + elf_elfheader (dynobj)->e_ident[EI_CLASS] = ELFCLASS32; + } + else + { + /* PR ld/16017 + Test for thumb only architectures. Note - we cannot just call + using_thumb_only() as the attributes in the output bfd have not been + initialised at this point, so instead we use the input bfd. */ + bfd * saved_obfd = htab->obfd; + + htab->obfd = dynobj; + if (using_thumb_only (htab)) + { + htab->plt_header_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry); + htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_thumb2_plt_entry); + } + htab->obfd = saved_obfd; } if (!htab->root.splt || !htab->root.srelplt || !htab->sdynbss - || (!info->shared && !htab->srelbss)) + || (!bfd_link_pic (info) && !htab->srelbss)) abort (); return TRUE; @@ -3437,6 +3678,18 @@ elf32_arm_copy_indirect_symbol (struct bfd_link_info *info, _bfd_elf_link_hash_copy_indirect (info, dir, ind); } +/* Destroy an ARM elf linker hash table. */ + +static void +elf32_arm_link_hash_table_free (bfd *obfd) +{ + struct elf32_arm_link_hash_table *ret + = (struct elf32_arm_link_hash_table *) obfd->link.hash; + + bfd_hash_table_free (&ret->stub_hash_table); + _bfd_elf_link_hash_table_free (obfd); +} + /* Create an ARM elf linker hash table. */ static struct bfd_link_hash_table * @@ -3459,12 +3712,13 @@ elf32_arm_link_hash_table_create (bfd *abfd) } ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE; + ret->stm32l4xx_fix = BFD_ARM_STM32L4XX_FIX_NONE; #ifdef FOUR_WORD_PLT ret->plt_header_size = 16; ret->plt_entry_size = 16; #else ret->plt_header_size = 20; - ret->plt_entry_size = 12; + ret->plt_entry_size = elf32_arm_use_long_plt_entry ? 16 : 12; #endif ret->use_rel = 1; ret->obfd = abfd; @@ -3472,56 +3726,14 @@ elf32_arm_link_hash_table_create (bfd *abfd) if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc, sizeof (struct elf32_arm_stub_hash_entry))) { - free (ret); + _bfd_elf_link_hash_table_free (abfd); return NULL; } + ret->root.root.hash_table_free = elf32_arm_link_hash_table_free; return &ret->root.root; } -/* Free the derived linker hash table. */ - -static void -elf32_arm_hash_table_free (struct bfd_link_hash_table *hash) -{ - struct elf32_arm_link_hash_table *ret - = (struct elf32_arm_link_hash_table *) hash; - - bfd_hash_table_free (&ret->stub_hash_table); - _bfd_elf_link_hash_table_free (hash); -} - -/* Determine if we're dealing with a Thumb only architecture. */ - -static bfd_boolean -using_thumb_only (struct elf32_arm_link_hash_table *globals) -{ - int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, - Tag_CPU_arch); - int profile; - - if (arch == TAG_CPU_ARCH_V6_M || arch == TAG_CPU_ARCH_V6S_M) - return TRUE; - - if (arch != TAG_CPU_ARCH_V7 && arch != TAG_CPU_ARCH_V7E_M) - return FALSE; - - profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, - Tag_CPU_arch_profile); - - return profile == 'M'; -} - -/* Determine if we're dealing with a Thumb-2 object. */ - -static bfd_boolean -using_thumb2 (struct elf32_arm_link_hash_table *globals) -{ - int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, - Tag_CPU_arch); - return arch == TAG_CPU_ARCH_V6T2 || arch >= TAG_CPU_ARCH_V7; -} - /* Determine what kind of NOPs are available. */ static bfd_boolean @@ -3611,7 +3823,8 @@ arm_type_of_stub (struct bfd_link_info *info, /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we are considering a function call relocation. */ - if (thumb_only && (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24) + if (thumb_only && (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24 + || r_type == R_ARM_THM_JUMP19) && branch_type == ST_BRANCH_TO_ARM) branch_type = ST_BRANCH_TO_THUMB; @@ -3655,7 +3868,7 @@ arm_type_of_stub (struct bfd_link_info *info, branch_offset = (bfd_signed_vma)(destination - location); if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24 - || r_type == R_ARM_THM_TLS_CALL) + || r_type == R_ARM_THM_TLS_CALL || r_type == R_ARM_THM_JUMP19) { /* Handle cases where: - this call goes too far (different Thumb/Thumb2 max @@ -3671,10 +3884,15 @@ arm_type_of_stub (struct bfd_link_info *info, || (thumb2 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET))) + || (thumb2 + && (branch_offset > THM2_MAX_FWD_COND_BRANCH_OFFSET + || (branch_offset < THM2_MAX_BWD_COND_BRANCH_OFFSET)) + && (r_type == R_ARM_THM_JUMP19)) || (branch_type == ST_BRANCH_TO_ARM && (((r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_TLS_CALL) && !globals->use_blx) - || (r_type == R_ARM_THM_JUMP24)) + || (r_type == R_ARM_THM_JUMP24) + || (r_type == R_ARM_THM_JUMP19)) && !use_plt)) { if (branch_type == ST_BRANCH_TO_THUMB) @@ -3682,7 +3900,7 @@ arm_type_of_stub (struct bfd_link_info *info, /* Thumb to thumb. */ if (!thumb_only) { - stub_type = (info->shared | globals->pic_veneer) + stub_type = (bfd_link_pic (info) | globals->pic_veneer) /* PIC stubs. */ ? ((globals->use_blx && (r_type == R_ARM_THM_CALL)) @@ -3704,7 +3922,7 @@ arm_type_of_stub (struct bfd_link_info *info, } else { - stub_type = (info->shared | globals->pic_veneer) + stub_type = (bfd_link_pic (info) | globals->pic_veneer) /* PIC stub. */ ? arm_stub_long_branch_thumb_only_pic /* non-PIC stub. */ @@ -3725,10 +3943,10 @@ arm_type_of_stub (struct bfd_link_info *info, } stub_type = - (info->shared | globals->pic_veneer) + (bfd_link_pic (info) | globals->pic_veneer) /* PIC stubs. */ ? (r_type == R_ARM_THM_TLS_CALL - /* TLS PIC stubs */ + /* TLS PIC stubs. */ ? (globals->use_blx ? arm_stub_long_branch_any_tls_pic : arm_stub_long_branch_v4t_thumb_tls_pic) : ((globals->use_blx && r_type == R_ARM_THM_CALL) @@ -3779,7 +3997,7 @@ arm_type_of_stub (struct bfd_link_info *info, || (r_type == R_ARM_JUMP24) || (r_type == R_ARM_PLT32)) { - stub_type = (info->shared | globals->pic_veneer) + stub_type = (bfd_link_pic (info) | globals->pic_veneer) /* PIC stubs. */ ? ((globals->use_blx) /* V5T and above. */ @@ -3802,10 +4020,10 @@ arm_type_of_stub (struct bfd_link_info *info, || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)) { stub_type = - (info->shared | globals->pic_veneer) + (bfd_link_pic (info) | globals->pic_veneer) /* PIC stubs. */ ? (r_type == R_ARM_TLS_CALL - /* TLS PIC Stub */ + /* TLS PIC Stub. */ ? arm_stub_long_branch_any_tls_pic : (globals->nacl_p ? arm_stub_long_branch_arm_nacl_pic @@ -3927,6 +4145,7 @@ elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section, { asection *link_sec; asection *stub_sec; + asection *out_sec; link_sec = htab->stub_group[section->id].link_sec; BFD_ASSERT (link_sec != NULL); @@ -3949,7 +4168,8 @@ elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section, memcpy (s_name, link_sec->name, namelen); memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX)); - stub_sec = (*htab->add_stub_section) (s_name, link_sec, + out_sec = link_sec->output_section; + stub_sec = (*htab->add_stub_section) (s_name, out_sec, link_sec, htab->nacl_p ? 4 : 3); if (stub_sec == NULL) return NULL; @@ -3985,6 +4205,8 @@ elf32_arm_add_stub (const char *stub_name, TRUE, FALSE); if (stub_entry == NULL) { + if (section == NULL) + section = stub_sec; (*_bfd_error_handler) (_("%s: cannot create stub entry %s"), section->owner, stub_name); @@ -4024,6 +4246,26 @@ put_thumb_insn (struct elf32_arm_link_hash_table * htab, bfd_putb16 (val, ptr); } +/* Store a Thumb2 insn into an output section not processed by + elf32_arm_write_section. */ + +static void +put_thumb2_insn (struct elf32_arm_link_hash_table * htab, + bfd * output_bfd, bfd_vma val, bfd_byte * ptr) +{ + /* T2 instructions are 16-bit streamed. */ + if (htab->byteswap_code != bfd_little_endian (output_bfd)) + { + bfd_putl16 ((val >> 16) & 0xffff, ptr); + bfd_putl16 ((val & 0xffff), ptr + 2); + } + else + { + bfd_putb16 ((val >> 16) & 0xffff, ptr); + bfd_putb16 ((val & 0xffff), ptr + 2); + } +} + /* If it's possible to change R_TYPE to a more efficient access model, return the new reloc type. */ @@ -4033,7 +4275,8 @@ elf32_arm_tls_transition (struct bfd_link_info *info, int r_type, { int is_local = (h == NULL); - if (info->shared || (h && h->root.type == bfd_link_hash_undefweak)) + if (bfd_link_pic (info) + || (h && h->root.type == bfd_link_hash_undefweak)) return r_type; /* We do not support relaxations for Old TLS models. */ @@ -4217,65 +4460,36 @@ arm_build_one_stub (struct bfd_hash_entry *gen_entry, BFD_ASSERT (nrelocs != 0 && nrelocs <= MAXRELOCS); for (i = 0; i < nrelocs; i++) - if (template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP24 - || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP19 - || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_CALL - || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_XPC22) - { - Elf_Internal_Rela rel; - bfd_boolean unresolved_reloc; - char *error_message; - enum arm_st_branch_type branch_type - = (template_sequence[stub_reloc_idx[i]].r_type != R_ARM_THM_XPC22 - ? ST_BRANCH_TO_THUMB : ST_BRANCH_TO_ARM); - bfd_vma points_to = sym_value + stub_entry->target_addend; - - rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i]; - rel.r_info = ELF32_R_INFO (0, - template_sequence[stub_reloc_idx[i]].r_type); - rel.r_addend = template_sequence[stub_reloc_idx[i]].reloc_addend; - - if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0) - /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[] - template should refer back to the instruction after the original - branch. */ - points_to = sym_value; - - /* There may be unintended consequences if this is not true. */ - BFD_ASSERT (stub_entry->h == NULL); - - /* Note: _bfd_final_link_relocate doesn't handle these relocations - properly. We should probably use this function unconditionally, - rather than only for certain relocations listed in the enclosing - conditional, for the sake of consistency. */ - elf32_arm_final_link_relocate (elf32_arm_howto_from_type - (template_sequence[stub_reloc_idx[i]].r_type), - stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel, - points_to, info, stub_entry->target_section, "", STT_FUNC, - branch_type, (struct elf_link_hash_entry *) stub_entry->h, - &unresolved_reloc, &error_message); - } - else - { - Elf_Internal_Rela rel; - bfd_boolean unresolved_reloc; - char *error_message; - bfd_vma points_to = sym_value + stub_entry->target_addend - + template_sequence[stub_reloc_idx[i]].reloc_addend; - - rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i]; - rel.r_info = ELF32_R_INFO (0, - template_sequence[stub_reloc_idx[i]].r_type); - rel.r_addend = 0; - - elf32_arm_final_link_relocate (elf32_arm_howto_from_type - (template_sequence[stub_reloc_idx[i]].r_type), - stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel, - points_to, info, stub_entry->target_section, "", STT_FUNC, - stub_entry->branch_type, - (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc, - &error_message); - } + { + Elf_Internal_Rela rel; + bfd_boolean unresolved_reloc; + char *error_message; + bfd_vma points_to = + sym_value + template_sequence[stub_reloc_idx[i]].reloc_addend; + + rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i]; + rel.r_info = ELF32_R_INFO (0, + template_sequence[stub_reloc_idx[i]].r_type); + rel.r_addend = 0; + + if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0) + /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[] + template should refer back to the instruction after the original + branch. We use target_section as Cortex-A8 erratum workaround stubs + are only generated when both source and target are in the same + section. */ + points_to = stub_entry->target_section->output_section->vma + + stub_entry->target_section->output_offset + + stub_entry->source_value; + + elf32_arm_final_link_relocate (elf32_arm_howto_from_type + (template_sequence[stub_reloc_idx[i]].r_type), + stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel, + points_to, info, stub_entry->target_section, "", STT_FUNC, + stub_entry->branch_type, + (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc, + &error_message); + } return TRUE; #undef MAXRELOCS @@ -4367,7 +4581,7 @@ elf32_arm_setup_section_lists (bfd *output_bfd, { bfd *input_bfd; unsigned int bfd_count; - int top_id, top_index; + unsigned int top_id, top_index; asection *section; asection **input_list, **list; bfd_size_type amt; @@ -4381,7 +4595,7 @@ elf32_arm_setup_section_lists (bfd *output_bfd, /* Count the number of input BFDs and find the top input section id. */ for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0; input_bfd != NULL; - input_bfd = input_bfd->link_next) + input_bfd = input_bfd->link.next) { bfd_count += 1; for (section = input_bfd->sections; @@ -4868,7 +5082,8 @@ cortex_a8_erratum_scan (bfd *input_bfd, a8_fixes[num_a8_fixes].input_bfd = input_bfd; a8_fixes[num_a8_fixes].section = section; a8_fixes[num_a8_fixes].offset = i; - a8_fixes[num_a8_fixes].addend = offset; + a8_fixes[num_a8_fixes].target_offset = + target - base_vma; a8_fixes[num_a8_fixes].orig_insn = insn; a8_fixes[num_a8_fixes].stub_name = stub_name; a8_fixes[num_a8_fixes].stub_type = stub_type; @@ -4897,6 +5112,103 @@ cortex_a8_erratum_scan (bfd *input_bfd, return FALSE; } +/* Create or update a stub entry depending on whether the stub can already be + found in HTAB. The stub is identified by: + - its type STUB_TYPE + - its source branch (note that several can share the same stub) whose + section and relocation (if any) are given by SECTION and IRELA + respectively + - its target symbol whose input section, hash, name, value and branch type + are given in SYM_SEC, HASH, SYM_NAME, SYM_VALUE and BRANCH_TYPE + respectively + + If found, the value of the stub's target symbol is updated from SYM_VALUE + and *NEW_STUB is set to FALSE. Otherwise, *NEW_STUB is set to + TRUE and the stub entry is initialized. + + Returns whether the stub could be successfully created or updated, or FALSE + if an error occured. */ + +static bfd_boolean +elf32_arm_create_stub (struct elf32_arm_link_hash_table *htab, + enum elf32_arm_stub_type stub_type, asection *section, + Elf_Internal_Rela *irela, asection *sym_sec, + struct elf32_arm_link_hash_entry *hash, char *sym_name, + bfd_vma sym_value, enum arm_st_branch_type branch_type, + bfd_boolean *new_stub) +{ + const asection *id_sec; + char *stub_name; + struct elf32_arm_stub_hash_entry *stub_entry; + unsigned int r_type; + + BFD_ASSERT (stub_type != arm_stub_none); + *new_stub = FALSE; + + BFD_ASSERT (irela); + BFD_ASSERT (section); + + /* Support for grouping stub sections. */ + id_sec = htab->stub_group[section->id].link_sec; + + /* Get the name of this stub. */ + stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash, irela, stub_type); + if (!stub_name) + return FALSE; + + stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name, FALSE, + FALSE); + /* The proper stub has already been created, just update its value. */ + if (stub_entry != NULL) + { + free (stub_name); + stub_entry->target_value = sym_value; + return TRUE; + } + + stub_entry = elf32_arm_add_stub (stub_name, section, htab); + if (stub_entry == NULL) + { + free (stub_name); + return FALSE; + } + + stub_entry->target_value = sym_value; + stub_entry->target_section = sym_sec; + stub_entry->stub_type = stub_type; + stub_entry->h = hash; + stub_entry->branch_type = branch_type; + + if (sym_name == NULL) + sym_name = "unnamed"; + stub_entry->output_name = (char *) + bfd_alloc (htab->stub_bfd, sizeof (THUMB2ARM_GLUE_ENTRY_NAME) + + strlen (sym_name)); + if (stub_entry->output_name == NULL) + { + free (stub_name); + return FALSE; + } + + /* For historical reasons, use the existing names for ARM-to-Thumb and + Thumb-to-ARM stubs. */ + r_type = ELF32_R_TYPE (irela->r_info); + if ((r_type == (unsigned int) R_ARM_THM_CALL + || r_type == (unsigned int) R_ARM_THM_JUMP24 + || r_type == (unsigned int) R_ARM_THM_JUMP19) + && branch_type == ST_BRANCH_TO_ARM) + sprintf (stub_entry->output_name, THUMB2ARM_GLUE_ENTRY_NAME, sym_name); + else if ((r_type == (unsigned int) R_ARM_CALL + || r_type == (unsigned int) R_ARM_JUMP24) + && branch_type == ST_BRANCH_TO_THUMB) + sprintf (stub_entry->output_name, ARM2THUMB_GLUE_ENTRY_NAME, sym_name); + else + sprintf (stub_entry->output_name, STUB_ENTRY_NAME, sym_name); + + *new_stub = TRUE; + return TRUE; +} + /* Determine and set the size of the stub section for a final link. The basic idea here is to examine all the relocations looking for @@ -4909,6 +5221,7 @@ elf32_arm_size_stubs (bfd *output_bfd, struct bfd_link_info *info, bfd_signed_vma group_size, asection * (*add_stub_section) (const char *, asection *, + asection *, unsigned int), void (*layout_sections_again) (void)) { @@ -4990,7 +5303,7 @@ elf32_arm_size_stubs (bfd *output_bfd, num_a8_fixes = 0; for (input_bfd = info->input_bfds, bfd_indx = 0; input_bfd != NULL; - input_bfd = input_bfd->link_next, bfd_indx++) + input_bfd = input_bfd->link.next, bfd_indx++) { Elf_Internal_Shdr *symtab_hdr; asection *section; @@ -5040,14 +5353,11 @@ elf32_arm_size_stubs (bfd *output_bfd, { unsigned int r_type, r_indx; enum elf32_arm_stub_type stub_type; - struct elf32_arm_stub_hash_entry *stub_entry; asection *sym_sec; bfd_vma sym_value; bfd_vma destination; struct elf32_arm_link_hash_entry *hash; const char *sym_name; - char *stub_name; - const asection *id_sec; unsigned char st_type; enum arm_st_branch_type branch_type; bfd_boolean created_stub = FALSE; @@ -5061,7 +5371,13 @@ elf32_arm_size_stubs (bfd *output_bfd, error_ret_free_internal: if (elf_section_data (section)->relocs == NULL) free (internal_relocs); - goto error_ret_free_local; + /* Fall through. */ + error_ret_free_local: + if (local_syms != NULL + && (symtab_hdr->contents + != (unsigned char *) local_syms)) + free (local_syms); + return FALSE; } hash = NULL; @@ -5140,7 +5456,7 @@ elf32_arm_size_stubs (bfd *output_bfd, if (!sym_sec) /* This is an undefined symbol. It can never - be resolved. */ + be resolved. */ continue; if (ELF_ST_TYPE (sym->st_info) != STT_SECTION) @@ -5230,6 +5546,8 @@ elf32_arm_size_stubs (bfd *output_bfd, do { + bfd_boolean new_stub; + /* Determine what (if any) linker stub is needed. */ stub_type = arm_type_of_stub (info, section, irela, st_type, &branch_type, @@ -5238,73 +5556,20 @@ elf32_arm_size_stubs (bfd *output_bfd, if (stub_type == arm_stub_none) break; - /* Support for grouping stub sections. */ - id_sec = htab->stub_group[section->id].link_sec; - - /* Get the name of this stub. */ - stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash, - irela, stub_type); - if (!stub_name) - goto error_ret_free_internal; - /* We've either created a stub for this reloc already, or we are about to. */ - created_stub = TRUE; - - stub_entry = arm_stub_hash_lookup - (&htab->stub_hash_table, stub_name, - FALSE, FALSE); - if (stub_entry != NULL) - { - /* The proper stub has already been created. */ - free (stub_name); - stub_entry->target_value = sym_value; - break; - } - - stub_entry = elf32_arm_add_stub (stub_name, section, - htab); - if (stub_entry == NULL) - { - free (stub_name); - goto error_ret_free_internal; - } - - stub_entry->target_value = sym_value; - stub_entry->target_section = sym_sec; - stub_entry->stub_type = stub_type; - stub_entry->h = hash; - stub_entry->branch_type = branch_type; - - if (sym_name == NULL) - sym_name = "unnamed"; - stub_entry->output_name = (char *) - bfd_alloc (htab->stub_bfd, - sizeof (THUMB2ARM_GLUE_ENTRY_NAME) - + strlen (sym_name)); - if (stub_entry->output_name == NULL) - { - free (stub_name); - goto error_ret_free_internal; - } + created_stub = + elf32_arm_create_stub (htab, stub_type, section, irela, + sym_sec, hash, + (char *) sym_name, sym_value, + branch_type, &new_stub); - /* For historical reasons, use the existing names for - ARM-to-Thumb and Thumb-to-ARM stubs. */ - if ((r_type == (unsigned int) R_ARM_THM_CALL - || r_type == (unsigned int) R_ARM_THM_JUMP24) - && branch_type == ST_BRANCH_TO_ARM) - sprintf (stub_entry->output_name, - THUMB2ARM_GLUE_ENTRY_NAME, sym_name); - else if ((r_type == (unsigned int) R_ARM_CALL - || r_type == (unsigned int) R_ARM_JUMP24) - && branch_type == ST_BRANCH_TO_THUMB) - sprintf (stub_entry->output_name, - ARM2THUMB_GLUE_ENTRY_NAME, sym_name); + if (!created_stub) + goto error_ret_free_internal; + else if (!new_stub) + break; else - sprintf (stub_entry->output_name, STUB_ENTRY_NAME, - sym_name); - - stub_changed = TRUE; + stub_changed = TRUE; } while (0); @@ -5440,9 +5705,9 @@ elf32_arm_size_stubs (bfd *output_bfd, stub_entry->stub_offset = 0; stub_entry->id_sec = link_sec; stub_entry->stub_type = a8_fixes[i].stub_type; + stub_entry->source_value = a8_fixes[i].offset; stub_entry->target_section = a8_fixes[i].section; - stub_entry->target_value = a8_fixes[i].offset; - stub_entry->target_addend = a8_fixes[i].addend; + stub_entry->target_value = a8_fixes[i].target_offset; stub_entry->orig_insn = a8_fixes[i].orig_insn; stub_entry->branch_type = a8_fixes[i].branch_type; @@ -5466,9 +5731,6 @@ elf32_arm_size_stubs (bfd *output_bfd, htab->num_a8_erratum_fixes = 0; } return TRUE; - - error_ret_free_local: - return FALSE; } /* Build all the stubs associated with the current output file. The @@ -5652,6 +5914,8 @@ static const insn16 t2a2_noop_insn = 0x46c0; static const insn32 t2a3_b_insn = 0xea000000; #define VFP11_ERRATUM_VENEER_SIZE 8 +#define STM32L4XX_ERRATUM_LDM_VENEER_SIZE 16 +#define STM32L4XX_ERRATUM_VLDM_VENEER_SIZE 24 #define ARM_BX_VENEER_SIZE 12 static const insn32 armbx1_tst_insn = 0xe3100001; @@ -5708,6 +5972,10 @@ bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info) globals->vfp11_erratum_glue_size, VFP11_ERRATUM_VENEER_SECTION_NAME); + arm_allocate_glue_section_space (globals->bfd_of_glue_owner, + globals->stm32l4xx_erratum_glue_size, + STM32L4XX_ERRATUM_VENEER_SECTION_NAME); + arm_allocate_glue_section_space (globals->bfd_of_glue_owner, globals->bx_glue_size, ARM_BX_GLUE_SECTION_NAME); @@ -5773,7 +6041,8 @@ record_arm_to_thumb_glue (struct bfd_link_info * link_info, free (tmp_name); - if (link_info->shared || globals->root.is_relocatable_executable + if (bfd_link_pic (link_info) + || globals->root.is_relocatable_executable || globals->pic_veneer) size = ARM2THUMB_PIC_GLUE_SIZE; else if (globals->use_blx) @@ -5998,51 +6267,190 @@ record_vfp11_erratum_veneer (struct bfd_link_info *link_info, return val; } -#define ARM_GLUE_SECTION_FLAGS \ - (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \ - | SEC_READONLY | SEC_LINKER_CREATED) - -/* Create a fake section for use by the ARM backend of the linker. */ +/* Record information about a STM32L4XX STM erratum veneer. Only THUMB-mode + veneers need to be handled because used only in Cortex-M. */ -static bfd_boolean -arm_make_glue_section (bfd * abfd, const char * name) +static bfd_vma +record_stm32l4xx_erratum_veneer (struct bfd_link_info *link_info, + elf32_stm32l4xx_erratum_list *branch, + bfd *branch_bfd, + asection *branch_sec, + unsigned int offset, + bfd_size_type veneer_size) { - asection * sec; + asection *s; + struct elf32_arm_link_hash_table *hash_table; + char *tmp_name; + struct elf_link_hash_entry *myh; + struct bfd_link_hash_entry *bh; + bfd_vma val; + struct _arm_elf_section_data *sec_data; + elf32_stm32l4xx_erratum_list *newerr; - sec = bfd_get_linker_section (abfd, name); - if (sec != NULL) - /* Already made. */ - return TRUE; + hash_table = elf32_arm_hash_table (link_info); + BFD_ASSERT (hash_table != NULL); + BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL); - sec = bfd_make_section_anyway_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS); + s = bfd_get_linker_section + (hash_table->bfd_of_glue_owner, STM32L4XX_ERRATUM_VENEER_SECTION_NAME); - if (sec == NULL - || !bfd_set_section_alignment (abfd, sec, 2)) - return FALSE; + BFD_ASSERT (s != NULL); - /* Set the gc mark to prevent the section from being removed by garbage - collection, despite the fact that no relocs refer to this section. */ - sec->gc_mark = 1; + sec_data = elf32_arm_section_data (s); - return TRUE; -} + tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen + (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10); -/* Add the glue sections to ABFD. This function is called from the - linker scripts in ld/emultempl/{armelf}.em. */ + BFD_ASSERT (tmp_name); -bfd_boolean -bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd, - struct bfd_link_info *info) + sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME, + hash_table->num_stm32l4xx_fixes); + + myh = elf_link_hash_lookup + (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE); + + BFD_ASSERT (myh == NULL); + + bh = NULL; + val = hash_table->stm32l4xx_erratum_glue_size; + _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner, + tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val, + NULL, TRUE, FALSE, &bh); + + myh = (struct elf_link_hash_entry *) bh; + myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC); + myh->forced_local = 1; + + /* Link veneer back to calling location. */ + sec_data->stm32l4xx_erratumcount += 1; + newerr = (elf32_stm32l4xx_erratum_list *) + bfd_zmalloc (sizeof (elf32_stm32l4xx_erratum_list)); + + newerr->type = STM32L4XX_ERRATUM_VENEER; + newerr->vma = -1; + newerr->u.v.branch = branch; + newerr->u.v.id = hash_table->num_stm32l4xx_fixes; + branch->u.b.veneer = newerr; + + newerr->next = sec_data->stm32l4xx_erratumlist; + sec_data->stm32l4xx_erratumlist = newerr; + + /* A symbol for the return from the veneer. */ + sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r", + hash_table->num_stm32l4xx_fixes); + + myh = elf_link_hash_lookup + (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE); + + if (myh != NULL) + abort (); + + bh = NULL; + val = offset + 4; + _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL, + branch_sec, val, NULL, TRUE, FALSE, &bh); + + myh = (struct elf_link_hash_entry *) bh; + myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC); + myh->forced_local = 1; + + free (tmp_name); + + /* Generate a mapping symbol for the veneer section, and explicitly add an + entry for that symbol to the code/data map for the section. */ + if (hash_table->stm32l4xx_erratum_glue_size == 0) + { + bh = NULL; + /* Creates a THUMB symbol since there is no other choice. */ + _bfd_generic_link_add_one_symbol (link_info, + hash_table->bfd_of_glue_owner, "$t", + BSF_LOCAL, s, 0, NULL, + TRUE, FALSE, &bh); + + myh = (struct elf_link_hash_entry *) bh; + myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE); + myh->forced_local = 1; + + /* The elf32_arm_init_maps function only cares about symbols from input + BFDs. We must make a note of this generated mapping symbol + ourselves so that code byteswapping works properly in + elf32_arm_write_section. */ + elf32_arm_section_map_add (s, 't', 0); + } + + s->size += veneer_size; + hash_table->stm32l4xx_erratum_glue_size += veneer_size; + hash_table->num_stm32l4xx_fixes++; + + /* The offset of the veneer. */ + return val; +} + +#define ARM_GLUE_SECTION_FLAGS \ + (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \ + | SEC_READONLY | SEC_LINKER_CREATED) + +/* Create a fake section for use by the ARM backend of the linker. */ + +static bfd_boolean +arm_make_glue_section (bfd * abfd, const char * name) { + asection * sec; + + sec = bfd_get_linker_section (abfd, name); + if (sec != NULL) + /* Already made. */ + return TRUE; + + sec = bfd_make_section_anyway_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS); + + if (sec == NULL + || !bfd_set_section_alignment (abfd, sec, 2)) + return FALSE; + + /* Set the gc mark to prevent the section from being removed by garbage + collection, despite the fact that no relocs refer to this section. */ + sec->gc_mark = 1; + + return TRUE; +} + +/* Set size of .plt entries. This function is called from the + linker scripts in ld/emultempl/{armelf}.em. */ + +void +bfd_elf32_arm_use_long_plt (void) +{ + elf32_arm_use_long_plt_entry = TRUE; +} + +/* Add the glue sections to ABFD. This function is called from the + linker scripts in ld/emultempl/{armelf}.em. */ + +bfd_boolean +bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd, + struct bfd_link_info *info) +{ + struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info); + bfd_boolean dostm32l4xx = globals + && globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE; + bfd_boolean addglue; + /* If we are only performing a partial link do not bother adding the glue. */ - if (info->relocatable) + if (bfd_link_relocatable (info)) return TRUE; - return arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME) + addglue = arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME) && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME) && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME) && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME); + + if (!dostm32l4xx) + return addglue; + + return addglue + && arm_make_glue_section (abfd, STM32L4XX_ERRATUM_VENEER_SECTION_NAME); } /* Select a BFD to be used to hold the sections used by the glue code. @@ -6056,7 +6464,7 @@ bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info) /* If we are only performing a partial link do not bother getting a bfd to hold the glue. */ - if (info->relocatable) + if (bfd_link_relocatable (info)) return TRUE; /* Make sure we don't attach the glue sections to a dynamic object. */ @@ -6108,7 +6516,7 @@ bfd_elf32_arm_process_before_allocation (bfd *abfd, /* If we are only performing a partial link do not bother to construct any glue. */ - if (link_info->relocatable) + if (bfd_link_relocatable (link_info)) return TRUE; /* Here we have a bfd that is to be included on the link. We have a @@ -6361,6 +6769,26 @@ bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info) globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE; } +void +bfd_elf32_arm_set_stm32l4xx_fix (bfd *obfd, struct bfd_link_info *link_info) +{ + struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info); + obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd); + + if (globals == NULL) + return; + + /* We assume only Cortex-M4 may require the fix. */ + if (out_attr[Tag_CPU_arch].i != TAG_CPU_ARCH_V7E_M + || out_attr[Tag_CPU_arch_profile].i != 'M') + { + if (globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE) + /* Give a warning, but do as the user requests anyway. */ + (*_bfd_error_handler) + (_("%B: warning: selected STM32L4XX erratum " + "workaround is not necessary for target architecture"), obfd); + } +} enum bfd_arm_vfp11_pipe { @@ -6672,7 +7100,7 @@ bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info) /* If we are only performing a partial link do not bother to construct any glue. */ - if (link_info->relocatable) + if (bfd_link_relocatable (link_info)) return TRUE; /* Skip if this bfd does not correspond to an ELF image. */ @@ -6858,7 +7286,7 @@ bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd, struct elf32_arm_link_hash_table *globals; char *tmp_name; - if (link_info->relocatable) + if (bfd_link_relocatable (link_info)) return; /* Skip if this bfd does not correspond to an ELF image. */ @@ -6933,6 +7361,352 @@ bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd, free (tmp_name); } +/* Find virtual-memory addresses for STM32L4XX erratum veneers and + return locations after sections have been laid out, using + specially-named symbols. */ + +void +bfd_elf32_arm_stm32l4xx_fix_veneer_locations (bfd *abfd, + struct bfd_link_info *link_info) +{ + asection *sec; + struct elf32_arm_link_hash_table *globals; + char *tmp_name; + + if (bfd_link_relocatable (link_info)) + return; + + /* Skip if this bfd does not correspond to an ELF image. */ + if (! is_arm_elf (abfd)) + return; + + globals = elf32_arm_hash_table (link_info); + if (globals == NULL) + return; + + tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen + (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10); + + for (sec = abfd->sections; sec != NULL; sec = sec->next) + { + struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec); + elf32_stm32l4xx_erratum_list *errnode = sec_data->stm32l4xx_erratumlist; + + for (; errnode != NULL; errnode = errnode->next) + { + struct elf_link_hash_entry *myh; + bfd_vma vma; + + switch (errnode->type) + { + case STM32L4XX_ERRATUM_BRANCH_TO_VENEER: + /* Find veneer symbol. */ + sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME, + errnode->u.b.veneer->u.v.id); + + myh = elf_link_hash_lookup + (&(globals)->root, tmp_name, FALSE, FALSE, TRUE); + + if (myh == NULL) + (*_bfd_error_handler) (_("%B: unable to find STM32L4XX veneer " + "`%s'"), abfd, tmp_name); + + vma = myh->root.u.def.section->output_section->vma + + myh->root.u.def.section->output_offset + + myh->root.u.def.value; + + errnode->u.b.veneer->vma = vma; + break; + + case STM32L4XX_ERRATUM_VENEER: + /* Find return location. */ + sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r", + errnode->u.v.id); + + myh = elf_link_hash_lookup + (&(globals)->root, tmp_name, FALSE, FALSE, TRUE); + + if (myh == NULL) + (*_bfd_error_handler) (_("%B: unable to find STM32L4XX veneer " + "`%s'"), abfd, tmp_name); + + vma = myh->root.u.def.section->output_section->vma + + myh->root.u.def.section->output_offset + + myh->root.u.def.value; + + errnode->u.v.branch->vma = vma; + break; + + default: + abort (); + } + } + } + + free (tmp_name); +} + +static inline bfd_boolean +is_thumb2_ldmia (const insn32 insn) +{ + /* Encoding T2: LDM.W {!}, + 1110 - 1000 - 10W1 - rrrr - PM (0) l - llll - llll - llll. */ + return (insn & 0xffd02000) == 0xe8900000; +} + +static inline bfd_boolean +is_thumb2_ldmdb (const insn32 insn) +{ + /* Encoding T1: LDMDB {!}, + 1110 - 1001 - 00W1 - rrrr - PM (0) l - llll - llll - llll. */ + return (insn & 0xffd02000) == 0xe9100000; +} + +static inline bfd_boolean +is_thumb2_vldm (const insn32 insn) +{ + /* A6.5 Extension register load or store instruction + A7.7.229 + We look for SP 32-bit and DP 64-bit registers. + Encoding T1 VLDM{mode} {!}, + is consecutive 64-bit registers + 1110 - 110P - UDW1 - rrrr - vvvv - 1011 - iiii - iiii + Encoding T2 VLDM{mode} {!}, + is consecutive 32-bit registers + 1110 - 110P - UDW1 - rrrr - vvvv - 1010 - iiii - iiii + if P==0 && U==1 && W==1 && Rn=1101 VPOP + if PUW=010 || PUW=011 || PUW=101 VLDM. */ + return + (((insn & 0xfe100f00) == 0xec100b00) || + ((insn & 0xfe100f00) == 0xec100a00)) + && /* (IA without !). */ + (((((insn << 7) >> 28) & 0xd) == 0x4) + /* (IA with !), includes VPOP (when reg number is SP). */ + || ((((insn << 7) >> 28) & 0xd) == 0x5) + /* (DB with !). */ + || ((((insn << 7) >> 28) & 0xd) == 0x9)); +} + +/* STM STM32L4XX erratum : This function assumes that it receives an LDM or + VLDM opcode and: + - computes the number and the mode of memory accesses + - decides if the replacement should be done: + . replaces only if > 8-word accesses + . or (testing purposes only) replaces all accesses. */ + +static bfd_boolean +stm32l4xx_need_create_replacing_stub (const insn32 insn, + bfd_arm_stm32l4xx_fix stm32l4xx_fix) +{ + int nb_words = 0; + + /* The field encoding the register list is the same for both LDMIA + and LDMDB encodings. */ + if (is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn)) + nb_words = popcount (insn & 0x0000ffff); + else if (is_thumb2_vldm (insn)) + nb_words = (insn & 0xff); + + /* DEFAULT mode accounts for the real bug condition situation, + ALL mode inserts stubs for each LDM/VLDM instruction (testing). */ + return + (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_DEFAULT) ? nb_words > 8 : + (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_ALL) ? TRUE : FALSE; +} + +/* Look for potentially-troublesome code sequences which might trigger + the STM STM32L4XX erratum. */ + +bfd_boolean +bfd_elf32_arm_stm32l4xx_erratum_scan (bfd *abfd, + struct bfd_link_info *link_info) +{ + asection *sec; + bfd_byte *contents = NULL; + struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info); + + if (globals == NULL) + return FALSE; + + /* If we are only performing a partial link do not bother + to construct any glue. */ + if (bfd_link_relocatable (link_info)) + return TRUE; + + /* Skip if this bfd does not correspond to an ELF image. */ + if (! is_arm_elf (abfd)) + return TRUE; + + if (globals->stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_NONE) + return TRUE; + + /* Skip this BFD if it corresponds to an executable or dynamic object. */ + if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0) + return TRUE; + + for (sec = abfd->sections; sec != NULL; sec = sec->next) + { + unsigned int i, span; + struct _arm_elf_section_data *sec_data; + + /* If we don't have executable progbits, we're not interested in this + section. Also skip if section is to be excluded. */ + if (elf_section_type (sec) != SHT_PROGBITS + || (elf_section_flags (sec) & SHF_EXECINSTR) == 0 + || (sec->flags & SEC_EXCLUDE) != 0 + || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS + || sec->output_section == bfd_abs_section_ptr + || strcmp (sec->name, STM32L4XX_ERRATUM_VENEER_SECTION_NAME) == 0) + continue; + + sec_data = elf32_arm_section_data (sec); + + if (sec_data->mapcount == 0) + continue; + + if (elf_section_data (sec)->this_hdr.contents != NULL) + contents = elf_section_data (sec)->this_hdr.contents; + else if (! bfd_malloc_and_get_section (abfd, sec, &contents)) + goto error_return; + + qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map), + elf32_arm_compare_mapping); + + for (span = 0; span < sec_data->mapcount; span++) + { + unsigned int span_start = sec_data->map[span].vma; + unsigned int span_end = (span == sec_data->mapcount - 1) + ? sec->size : sec_data->map[span + 1].vma; + char span_type = sec_data->map[span].type; + int itblock_current_pos = 0; + + /* Only Thumb2 mode need be supported with this CM4 specific + code, we should not encounter any arm mode eg span_type + != 'a'. */ + if (span_type != 't') + continue; + + for (i = span_start; i < span_end;) + { + unsigned int insn = bfd_get_16 (abfd, &contents[i]); + bfd_boolean insn_32bit = FALSE; + bfd_boolean is_ldm = FALSE; + bfd_boolean is_vldm = FALSE; + bfd_boolean is_not_last_in_it_block = FALSE; + + /* The first 16-bits of all 32-bit thumb2 instructions start + with opcode[15..13]=0b111 and the encoded op1 can be anything + except opcode[12..11]!=0b00. + See 32-bit Thumb instruction encoding. */ + if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000) + insn_32bit = TRUE; + + /* Compute the predicate that tells if the instruction + is concerned by the IT block + - Creates an error if there is a ldm that is not + last in the IT block thus cannot be replaced + - Otherwise we can create a branch at the end of the + IT block, it will be controlled naturally by IT + with the proper pseudo-predicate + - So the only interesting predicate is the one that + tells that we are not on the last item of an IT + block. */ + if (itblock_current_pos != 0) + is_not_last_in_it_block = !!--itblock_current_pos; + + if (insn_32bit) + { + /* Load the rest of the insn (in manual-friendly order). */ + insn = (insn << 16) | bfd_get_16 (abfd, &contents[i + 2]); + is_ldm = is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn); + is_vldm = is_thumb2_vldm (insn); + + /* Veneers are created for (v)ldm depending on + option flags and memory accesses conditions; but + if the instruction is not the last instruction of + an IT block, we cannot create a jump there, so we + bail out. */ + if ((is_ldm || is_vldm) && + stm32l4xx_need_create_replacing_stub + (insn, globals->stm32l4xx_fix)) + { + if (is_not_last_in_it_block) + { + (*_bfd_error_handler) + /* Note - overlong line used here to allow for translation. */ + (_("\ +%B(%A+0x%lx): error: multiple load detected in non-last IT block instruction : STM32L4XX veneer cannot be generated.\n" + "Use gcc option -mrestrict-it to generate only one instruction per IT block.\n"), + abfd, sec, (long)i); + } + else + { + elf32_stm32l4xx_erratum_list *newerr = + (elf32_stm32l4xx_erratum_list *) + bfd_zmalloc + (sizeof (elf32_stm32l4xx_erratum_list)); + + elf32_arm_section_data (sec) + ->stm32l4xx_erratumcount += 1; + newerr->u.b.insn = insn; + /* We create only thumb branches. */ + newerr->type = + STM32L4XX_ERRATUM_BRANCH_TO_VENEER; + record_stm32l4xx_erratum_veneer + (link_info, newerr, abfd, sec, + i, + is_ldm ? + STM32L4XX_ERRATUM_LDM_VENEER_SIZE: + STM32L4XX_ERRATUM_VLDM_VENEER_SIZE); + newerr->vma = -1; + newerr->next = sec_data->stm32l4xx_erratumlist; + sec_data->stm32l4xx_erratumlist = newerr; + } + } + } + else + { + /* A7.7.37 IT p208 + IT blocks are only encoded in T1 + Encoding T1: IT{x{y{z}}} + 1 0 1 1 - 1 1 1 1 - firstcond - mask + if mask = '0000' then see 'related encodings' + We don't deal with UNPREDICTABLE, just ignore these. + There can be no nested IT blocks so an IT block + is naturally a new one for which it is worth + computing its size. */ + bfd_boolean is_newitblock = ((insn & 0xff00) == 0xbf00) && + ((insn & 0x000f) != 0x0000); + /* If we have a new IT block we compute its size. */ + if (is_newitblock) + { + /* Compute the number of instructions controlled + by the IT block, it will be used to decide + whether we are inside an IT block or not. */ + unsigned int mask = insn & 0x000f; + itblock_current_pos = 4 - ctz (mask); + } + } + + i += insn_32bit ? 4 : 2; + } + } + + if (contents != NULL + && elf_section_data (sec)->this_hdr.contents != contents) + free (contents); + contents = NULL; + } + + return TRUE; + +error_return: + if (contents != NULL + && elf_section_data (sec)->this_hdr.contents != contents) + free (contents); + + return FALSE; +} /* Set target relocation values needed during linking. */ @@ -6944,6 +7718,7 @@ bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd, int fix_v4bx, int use_blx, bfd_arm_vfp11_fix vfp11_fix, + bfd_arm_stm32l4xx_fix stm32l4xx_fix, int no_enum_warn, int no_wchar_warn, int pic_veneer, int fix_cortex_a8, int fix_arm1176) @@ -6969,6 +7744,7 @@ bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd, globals->fix_v4bx = fix_v4bx; globals->use_blx |= use_blx; globals->vfp11_fix = vfp11_fix; + globals->stm32l4xx_fix = stm32l4xx_fix; globals->pic_veneer = pic_veneer; globals->fix_cortex_a8 = fix_cortex_a8; globals->fix_arm1176 = fix_arm1176; @@ -7146,7 +7922,8 @@ elf32_arm_create_thumb_stub (struct bfd_link_info * info, --my_offset; myh->root.u.def.value = my_offset; - if (info->shared || globals->root.is_relocatable_executable + if (bfd_link_pic (info) + || globals->root.is_relocatable_executable || globals->pic_veneer) { /* For relocatable objects we can't use absolute addresses, @@ -7457,6 +8234,8 @@ elf32_arm_allocate_plt_entry (struct bfd_link_info *info, first entry. */ if (splt->size == 0) splt->size += htab->plt_header_size; + + htab->next_tls_desc_index++; } /* Allocate the PLT entry itself, including any leading Thumb stub. */ @@ -7469,7 +8248,10 @@ elf32_arm_allocate_plt_entry (struct bfd_link_info *info, { /* We also need to make an entry in the .got.plt section, which will be placed in the .got section by the linker script. */ - arm_plt->got_offset = sgotplt->size - 8 * htab->num_tls_desc; + if (is_iplt_entry) + arm_plt->got_offset = sgotplt->size; + else + arm_plt->got_offset = sgotplt->size - 8 * htab->num_tls_desc; sgotplt->size += 4; } } @@ -7493,9 +8275,11 @@ arm_movt_immediate (bfd_vma value) ROOT_PLT points to the offset of the PLT entry from the start of its section (.iplt or .plt). ARM_PLT points to the symbol's ARM-specific - bookkeeping information. */ + bookkeeping information. -static void + Returns FALSE if there was a problem. */ + +static bfd_boolean elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info, union gotplt_union *root_plt, struct arm_plt_info *arm_plt, @@ -7590,7 +8374,7 @@ elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info, + root_plt->offset); ptr = splt->contents + root_plt->offset; - if (htab->vxworks_p && info->shared) + if (htab->vxworks_p && bfd_link_pic (info)) { unsigned int i; bfd_vma val; @@ -7685,6 +8469,46 @@ elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info, | (tail_displacement & 0x00ffffff), ptr + 12); } + else if (using_thumb_only (htab)) + { + /* PR ld/16017: Generate thumb only PLT entries. */ + if (!using_thumb2 (htab)) + { + /* FIXME: We ought to be able to generate thumb-1 PLT + instructions... */ + _bfd_error_handler (_("%B: Warning: thumb-1 mode PLT generation not currently supported"), + output_bfd); + return FALSE; + } + + /* Calculate the displacement between the PLT slot and the entry in + the GOT. The 12-byte offset accounts for the value produced by + adding to pc in the 3rd instruction of the PLT stub. */ + got_displacement = got_address - (plt_address + 12); + + /* As we are using 32 bit instructions we have to use 'put_arm_insn' + instead of 'put_thumb_insn'. */ + put_arm_insn (htab, output_bfd, + elf32_thumb2_plt_entry[0] + | ((got_displacement & 0x000000ff) << 16) + | ((got_displacement & 0x00000700) << 20) + | ((got_displacement & 0x00000800) >> 1) + | ((got_displacement & 0x0000f000) >> 12), + ptr + 0); + put_arm_insn (htab, output_bfd, + elf32_thumb2_plt_entry[1] + | ((got_displacement & 0x00ff0000) ) + | ((got_displacement & 0x07000000) << 4) + | ((got_displacement & 0x08000000) >> 17) + | ((got_displacement & 0xf0000000) >> 28), + ptr + 4); + put_arm_insn (htab, output_bfd, + elf32_thumb2_plt_entry[2], + ptr + 8); + put_arm_insn (htab, output_bfd, + elf32_thumb2_plt_entry[3], + ptr + 12); + } else { /* Calculate the displacement between the PLT slot and the @@ -7693,8 +8517,6 @@ elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info, of the PLT stub. */ got_displacement = got_address - (plt_address + 8); - BFD_ASSERT ((got_displacement & 0xf0000000) == 0); - if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt)) { put_thumb_insn (htab, output_bfd, @@ -7703,21 +8525,45 @@ elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info, elf32_arm_plt_thumb_stub[1], ptr - 2); } - put_arm_insn (htab, output_bfd, - elf32_arm_plt_entry[0] - | ((got_displacement & 0x0ff00000) >> 20), - ptr + 0); - put_arm_insn (htab, output_bfd, - elf32_arm_plt_entry[1] - | ((got_displacement & 0x000ff000) >> 12), - ptr+ 4); - put_arm_insn (htab, output_bfd, - elf32_arm_plt_entry[2] - | (got_displacement & 0x00000fff), - ptr + 8); + if (!elf32_arm_use_long_plt_entry) + { + BFD_ASSERT ((got_displacement & 0xf0000000) == 0); + + put_arm_insn (htab, output_bfd, + elf32_arm_plt_entry_short[0] + | ((got_displacement & 0x0ff00000) >> 20), + ptr + 0); + put_arm_insn (htab, output_bfd, + elf32_arm_plt_entry_short[1] + | ((got_displacement & 0x000ff000) >> 12), + ptr+ 4); + put_arm_insn (htab, output_bfd, + elf32_arm_plt_entry_short[2] + | (got_displacement & 0x00000fff), + ptr + 8); #ifdef FOUR_WORD_PLT - bfd_put_32 (output_bfd, elf32_arm_plt_entry[3], ptr + 12); + bfd_put_32 (output_bfd, elf32_arm_plt_entry_short[3], ptr + 12); #endif + } + else + { + put_arm_insn (htab, output_bfd, + elf32_arm_plt_entry_long[0] + | ((got_displacement & 0xf0000000) >> 28), + ptr + 0); + put_arm_insn (htab, output_bfd, + elf32_arm_plt_entry_long[1] + | ((got_displacement & 0x0ff00000) >> 20), + ptr + 4); + put_arm_insn (htab, output_bfd, + elf32_arm_plt_entry_long[2] + | ((got_displacement & 0x000ff000) >> 12), + ptr+ 8); + put_arm_insn (htab, output_bfd, + elf32_arm_plt_entry_long[3] + | (got_displacement & 0x00000fff), + ptr + 12); + } } /* Fill in the entry in the .rel(a).(i)plt section. */ @@ -7750,6 +8596,8 @@ elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info, loc = srel->contents + plt_index * RELOC_SIZE (htab); SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc); } + + return TRUE; } /* Some relocations map to different relocations depending on the @@ -7945,7 +8793,7 @@ elf32_arm_tls_relax (struct elf32_arm_link_hash_table *globals, break; case R_ARM_THM_TLS_CALL: - /* GD->IE relaxation */ + /* GD->IE relaxation. */ if (!is_local) /* add r0,pc; ldr r0, [r0] */ insn = 0x44786800; @@ -8089,18 +8937,6 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, if (r_type != howto->type) howto = elf32_arm_howto_from_type (r_type); - /* If the start address has been set, then set the EF_ARM_HASENTRY - flag. Setting this more than once is redundant, but the cost is - not too high, and it keeps the code simple. - - The test is done here, rather than somewhere else, because the - start address is only set just before the final link commences. - - Note - if the user deliberately sets a start address of 0, the - flag will not be set. */ - if (bfd_get_start_address (output_bfd) != 0) - elf_elfheader (output_bfd)->e_flags |= EF_ARM_HASENTRY; - eh = (struct elf32_arm_link_hash_entry *) h; sgot = globals->root.sgot; local_got_offsets = elf_local_got_offsets (input_bfd); @@ -8165,9 +9001,11 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, plt_offset--; else { - elf32_arm_populate_plt_entry (output_bfd, info, root_plt, arm_plt, - -1, dynreloc_value); - root_plt->offset |= 1; + if (elf32_arm_populate_plt_entry (output_bfd, info, root_plt, arm_plt, + -1, dynreloc_value)) + root_plt->offset |= 1; + else + return bfd_reloc_notsupported; } /* Static relocations always resolve to the .iplt entry. */ @@ -8248,7 +9086,8 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, /* When generating a shared object or relocatable executable, these relocations are copied into the output file to be resolved at run time. */ - if ((info->shared || globals->root.is_relocatable_executable) + if ((bfd_link_pic (info) + || globals->root.is_relocatable_executable) && (input_section->flags & SEC_ALLOC) && !(globals->vxworks_p && strcmp (input_section->output_section->name, @@ -8269,6 +9108,21 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, Elf_Internal_Rela outrel; bfd_boolean skip, relocate; + if ((r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI) + && !h->def_regular) + { + char *v = _("shared object"); + + if (bfd_link_executable (info)) + v = _("PIE executable"); + + (*_bfd_error_handler) + (_("%B: relocation %s against external or undefined symbol `%s'" + " can not be used when making a %s; recompile with -fPIC"), input_bfd, + elf32_arm_howto_table_1[r_type].name, h->root.root.string, v); + return bfd_reloc_notsupported; + } + *unresolved_reloc_p = FALSE; if (sreloc == NULL && globals->root.dynamic_sections_created) @@ -8298,8 +9152,8 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, memset (&outrel, 0, sizeof outrel); else if (h != NULL && h->dynindx != -1 - && (!info->shared - || !info->symbolic + && (!bfd_link_pic (info) + || !SYMBOLIC_BIND (info, h) || !h->def_regular)) outrel.r_info = ELF32_R_INFO (h->dynindx, r_type); else @@ -8593,6 +9447,9 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, return bfd_reloc_ok; case R_ARM_ABS8: + /* PR 16202: Refectch the addend using the correct size. */ + if (globals->use_rel) + addend = bfd_get_8 (input_bfd, hit_data); value += addend; /* There is no way to tell whether the user intended to use a signed or @@ -8605,6 +9462,9 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, return bfd_reloc_ok; case R_ARM_ABS16: + /* PR 16202: Refectch the addend using the correct size. */ + if (globals->use_rel) + addend = bfd_get_16 (input_bfd, hit_data); value += addend; /* See comment for R_ARM_ABS8. */ @@ -8656,7 +9516,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, + input_section->output_offset + rel->r_offset); - value = abs (relocation); + value = relocation; if (value >= 0x1000) return bfd_reloc_overflow; @@ -8691,7 +9551,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, + input_section->output_offset + rel->r_offset); - value = abs (relocation); + value = relocation; /* We do not check for overflow of this reloc. Although strictly speaking this is incorrect, it appears to be necessary in order @@ -8728,7 +9588,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, + input_section->output_offset + rel->r_offset); - value = abs (relocation); + value = relocation; if (value >= 0x1000) return bfd_reloc_overflow; @@ -8896,7 +9756,9 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, + splt->output_offset + plt_offset); - if (globals->use_blx && r_type == R_ARM_THM_CALL) + if (globals->use_blx + && r_type == R_ARM_THM_CALL + && ! using_thumb_only (globals)) { /* If the Thumb BLX instruction is available, convert the BL to a BLX instruction to call the ARM-mode @@ -8906,8 +9768,9 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, } else { - /* Target the Thumb stub before the ARM PLT entry. */ - value -= PLT_THUMB_STUB_SIZE; + if (! using_thumb_only (globals)) + /* Target the Thumb stub before the ARM PLT entry. */ + value -= PLT_THUMB_STUB_SIZE; branch_type = ST_BRANCH_TO_THUMB; } *unresolved_reloc_p = FALSE; @@ -8978,6 +9841,9 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, bfd_signed_vma reloc_signed_max = 0xffffe; bfd_signed_vma reloc_signed_min = -0x100000; bfd_signed_vma signed_check; + enum elf32_arm_stub_type stub_type = arm_stub_none; + struct elf32_arm_stub_hash_entry *stub_entry; + struct elf32_arm_link_hash_entry *hash; /* Need to refetch the addend, reconstruct the top three bits, and squish the two 11 bit pieces together. */ @@ -9009,8 +9875,25 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, *unresolved_reloc_p = FALSE; } - /* ??? Should handle interworking? GCC might someday try to - use this for tail calls. */ + hash = (struct elf32_arm_link_hash_entry *)h; + + stub_type = arm_type_of_stub (info, input_section, rel, + st_type, &branch_type, + hash, value, sym_sec, + input_bfd, sym_name); + if (stub_type != arm_stub_none) + { + stub_entry = elf32_arm_get_stub_entry (input_section, + sym_sec, h, + rel, globals, + stub_type); + if (stub_entry != NULL) + { + value = (stub_entry->stub_offset + + stub_entry->stub_sec->output_offset + + stub_entry->stub_sec->output_section->vma); + } + } relocation = value + signed_addend; relocation -= (input_section->output_section->vma @@ -9219,7 +10102,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, { if (dynreloc_st_type == STT_GNU_IFUNC) outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE); - else if (info->shared && + else if (bfd_link_pic (info) && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT || h->root.type != bfd_link_hash_undefweak)) outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE); @@ -9268,7 +10151,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, if (globals->use_rel) bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + off); - if (info->shared || dynreloc_st_type == STT_GNU_IFUNC) + if (bfd_link_pic (info) || dynreloc_st_type == STT_GNU_IFUNC) { Elf_Internal_Rela outrel; @@ -9317,7 +10200,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, { /* If we don't know the module number, create a relocation for it. */ - if (info->shared) + if (bfd_link_pic (info)) { Elf_Internal_Rela outrel; @@ -9367,8 +10250,10 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, { bfd_boolean dyn; dyn = globals->root.dynamic_sections_created; - if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h) - && (!info->shared + if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, + bfd_link_pic (info), + h) + && (!bfd_link_pic (info) || !SYMBOL_REFERENCES_LOCAL (info, h))) { *unresolved_reloc_p = FALSE; @@ -9405,7 +10290,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, now, and emit any relocations. If both an IE GOT and a GD GOT are necessary, we emit the GD first. */ - if ((info->shared || indx != 0) + if ((bfd_link_pic (info) || indx != 0) && (h == NULL || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT || h->root.type != bfd_link_hash_undefweak)) @@ -9421,7 +10306,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, /* We should have relaxed, unless this is an undefined weak symbol. */ BFD_ASSERT ((h && (h->root.type == bfd_link_hash_undefweak)) - || info->shared); + || bfd_link_pic (info)); BFD_ASSERT (globals->sgotplt_jump_table_size + offplt + 8 <= globals->root.sgotplt->size); @@ -9602,7 +10487,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, else { lower_insn = 0xc000; - /* Round up the offset to a word boundary */ + /* Round up the offset to a word boundary. */ offset = (offset + 2) & ~2; } @@ -9621,7 +10506,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, /* These relocations needs special care, as besides the fact they point somewhere in .gotplt, the addend must be adjusted accordingly depending on the type of instruction - we refer to */ + we refer to. */ else if ((r_type == R_ARM_TLS_GOTDESC) && (tls_type & GOT_TLS_GDESC)) { unsigned long data, insn; @@ -9696,7 +10581,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, } case R_ARM_TLS_LE32: - if (info->shared && !info->pie) + if (bfd_link_dll (info)) { (*_bfd_error_handler) (_("%B(%A+0x%lx): R_ARM_TLS_LE32 relocation not permitted in shared object"), @@ -9852,10 +10737,8 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, bfd_vma insn = bfd_get_32 (input_bfd, hit_data); bfd_vma pc = input_section->output_section->vma + input_section->output_offset + rel->r_offset; - /* sb should be the origin of the *segment* containing the symbol. - It is not clear how to obtain this OS-dependent value, so we - make an arbitrary choice of zero. */ - bfd_vma sb = 0; + /* sb is the origin of the *segment* containing the symbol. */ + bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0; bfd_vma residual; bfd_vma g_n; bfd_signed_vma signed_value; @@ -9942,8 +10825,8 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, /* Calculate the value of the relevant G_n, in encoded constant-with-rotation format. */ - g_n = calculate_group_reloc_mask (abs (signed_value), group, - &residual); + g_n = calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value, + group, &residual); /* Check for overflow if required. */ if ((r_type == R_ARM_ALU_PC_G0 @@ -9956,7 +10839,8 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, (*_bfd_error_handler) (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"), input_bfd, input_section, - (long) rel->r_offset, abs (signed_value), howto->name); + (long) rel->r_offset, signed_value < 0 ? - signed_value : signed_value, + howto->name); return bfd_reloc_overflow; } @@ -9988,7 +10872,8 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, bfd_vma insn = bfd_get_32 (input_bfd, hit_data); bfd_vma pc = input_section->output_section->vma + input_section->output_offset + rel->r_offset; - bfd_vma sb = 0; /* See note above. */ + /* sb is the origin of the *segment* containing the symbol. */ + bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0; bfd_vma residual; bfd_signed_vma signed_value; int group = 0; @@ -10035,15 +10920,16 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, /* Calculate the value of the relevant G_{n-1} to obtain the residual at that stage. */ - calculate_group_reloc_mask (abs (signed_value), group - 1, &residual); + calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value, + group - 1, &residual); /* Check for overflow. */ if (residual >= 0x1000) { (*_bfd_error_handler) (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"), - input_bfd, input_section, - (long) rel->r_offset, abs (signed_value), howto->name); + input_bfd, input_section, + (long) rel->r_offset, labs (signed_value), howto->name); return bfd_reloc_overflow; } @@ -10071,7 +10957,8 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, bfd_vma insn = bfd_get_32 (input_bfd, hit_data); bfd_vma pc = input_section->output_section->vma + input_section->output_offset + rel->r_offset; - bfd_vma sb = 0; /* See note above. */ + /* sb is the origin of the *segment* containing the symbol. */ + bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0; bfd_vma residual; bfd_signed_vma signed_value; int group = 0; @@ -10118,15 +11005,16 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, /* Calculate the value of the relevant G_{n-1} to obtain the residual at that stage. */ - calculate_group_reloc_mask (abs (signed_value), group - 1, &residual); + calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value, + group - 1, &residual); /* Check for overflow. */ if (residual >= 0x100) { (*_bfd_error_handler) (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"), - input_bfd, input_section, - (long) rel->r_offset, abs (signed_value), howto->name); + input_bfd, input_section, + (long) rel->r_offset, labs (signed_value), howto->name); return bfd_reloc_overflow; } @@ -10154,7 +11042,8 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, bfd_vma insn = bfd_get_32 (input_bfd, hit_data); bfd_vma pc = input_section->output_section->vma + input_section->output_offset + rel->r_offset; - bfd_vma sb = 0; /* See note above. */ + /* sb is the origin of the *segment* containing the symbol. */ + bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0; bfd_vma residual; bfd_signed_vma signed_value; int group = 0; @@ -10201,7 +11090,8 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, /* Calculate the value of the relevant G_{n-1} to obtain the residual at that stage. */ - calculate_group_reloc_mask (abs (signed_value), group - 1, &residual); + calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value, + group - 1, &residual); /* Check for overflow. (The absolute value to go in the place must be divisible by four and, after having been divided by four, must @@ -10211,7 +11101,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, (*_bfd_error_handler) (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"), input_bfd, input_section, - (long) rel->r_offset, abs (signed_value), howto->name); + (long) rel->r_offset, labs (signed_value), howto->name); return bfd_reloc_overflow; } @@ -10229,6 +11119,33 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, } return bfd_reloc_ok; + case R_ARM_THM_ALU_ABS_G0_NC: + case R_ARM_THM_ALU_ABS_G1_NC: + case R_ARM_THM_ALU_ABS_G2_NC: + case R_ARM_THM_ALU_ABS_G3_NC: + { + const int shift_array[4] = {0, 8, 16, 24}; + bfd_vma insn = bfd_get_16 (input_bfd, hit_data); + bfd_vma addr = value; + int shift = shift_array[r_type - R_ARM_THM_ALU_ABS_G0_NC]; + + /* Compute address. */ + if (globals->use_rel) + signed_addend = insn & 0xff; + addr += signed_addend; + if (branch_type == ST_BRANCH_TO_THUMB) + addr |= 1; + /* Clean imm8 insn. */ + insn &= 0xff00; + /* And update with correct part of address. */ + insn |= (addr >> shift) & 0xff; + /* Update insn. */ + bfd_put_16 (input_bfd, insn, hit_data); + } + + *unresolved_reloc_p = FALSE; + return bfd_reloc_ok; + default: return bfd_reloc_notsupported; } @@ -10416,7 +11333,7 @@ elf32_arm_relocate_section (bfd * output_bfd, relocation = (sec->output_section->vma + sec->output_offset + sym->st_value); - if (!info->relocatable + if (!bfd_link_relocatable (info) && (sec->flags & SEC_MERGE) && ELF_ST_TYPE (sym->st_info) == STT_SECTION) { @@ -10509,12 +11426,12 @@ elf32_arm_relocate_section (bfd * output_bfd, } else { - bfd_boolean warned; + bfd_boolean warned, ignored; RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel, r_symndx, symtab_hdr, sym_hashes, h, sec, relocation, - unresolved_reloc, warned); + unresolved_reloc, warned, ignored); sym_type = h->type; } @@ -10523,7 +11440,7 @@ elf32_arm_relocate_section (bfd * output_bfd, RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section, rel, 1, relend, howto, 0, contents); - if (info->relocatable) + if (bfd_link_relocatable (info)) { /* This is a relocatable link. We don't have to change anything, unless the reloc is against a section symbol, @@ -10572,7 +11489,7 @@ elf32_arm_relocate_section (bfd * output_bfd, done, i.e., the relaxation produced the final output we want, and we won't let anybody mess with it. Also, we have to do addend adjustments in case of a R_ARM_TLS_GOTDESC relocation - both in relaxed and non-relaxed cases */ + both in relaxed and non-relaxed cases. */ if ((elf32_arm_tls_transition (info, r_type, h) != (unsigned)r_type) || (IS_ARM_TLS_GNU_RELOC (r_type) && !((h ? elf32_arm_hash_entry (h)->tls_type : @@ -10740,6 +11657,8 @@ insert_cantunwind_after(asection *text_sec, asection *exidx_sec) &exidx_arm_data->u.exidx.unwind_edit_tail, INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX); + exidx_arm_data->additional_reloc_count++; + adjust_exidx_size(exidx_sec, 8); } @@ -10769,7 +11688,7 @@ elf32_arm_fix_exidx_coverage (asection **text_section_order, /* Walk over all EXIDX sections, and create backlinks from the corrsponding text sections. */ - for (inp = info->input_bfds; inp != NULL; inp = inp->link_next) + for (inp = info->input_bfds; inp != NULL; inp = inp->link.next) { asection *sec; @@ -10855,6 +11774,18 @@ elf32_arm_fix_exidx_coverage (asection **text_section_order, /* An error? */ continue; + if (last_unwind_type > 0) + { + unsigned int first_word = bfd_get_32 (ibfd, contents); + /* Add cantunwind if first unwind item does not match section + start. */ + if (first_word != sec->vma) + { + insert_cantunwind_after (last_text_sec, last_exidx_sec); + last_unwind_type = 0; + } + } + for (j = 0; j < hdr->sh_size; j += 8) { unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4); @@ -10882,7 +11813,7 @@ elf32_arm_fix_exidx_coverage (asection **text_section_order, else unwind_type = 2; - if (elide) + if (elide && !bfd_link_relocatable (info)) { add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail, DELETE_EXIDX_ENTRY, NULL, j / 8); @@ -10909,7 +11840,8 @@ elf32_arm_fix_exidx_coverage (asection **text_section_order, } /* Add terminating CANTUNWIND entry. */ - if (last_exidx_sec && last_unwind_type != 0) + if (!bfd_link_relocatable (info) && last_exidx_sec + && last_unwind_type != 0) insert_cantunwind_after(last_text_sec, last_exidx_sec); return TRUE; @@ -10951,7 +11883,7 @@ elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info) /* Process stub sections (eg BE8 encoding, ...). */ struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info); - int i; + unsigned int i; for (i=0; itop_id; i++) { sec = htab->stub_group[i].stub_sec; @@ -10985,6 +11917,11 @@ elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info) VFP11_ERRATUM_VENEER_SECTION_NAME)) return FALSE; + if (! elf32_arm_output_glue_section (info, abfd, + globals->bfd_of_glue_owner, + STM32L4XX_ERRATUM_VENEER_SECTION_NAME)) + return FALSE; + if (! elf32_arm_output_glue_section (info, abfd, globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME)) @@ -11141,14 +12078,7 @@ elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd) elf_elfheader (obfd)->e_flags = in_flags; elf_flags_init (obfd) = TRUE; - /* Also copy the EI_OSABI field. */ - elf_elfheader (obfd)->e_ident[EI_OSABI] = - elf_elfheader (ibfd)->e_ident[EI_OSABI]; - - /* Copy object attributes. */ - _bfd_elf_copy_obj_attributes (ibfd, obfd); - - return TRUE; + return _bfd_elf_copy_private_bfd_data (ibfd, obfd); } /* Values for Tag_ABI_PCS_R9_use. */ @@ -11394,6 +12324,47 @@ tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out, T(V8), /* V7E_M. */ T(V8) /* V8. */ }; + const int v8m_baseline[] = + { + -1, /* PRE_V4. */ + -1, /* V4. */ + -1, /* V4T. */ + -1, /* V5T. */ + -1, /* V5TE. */ + -1, /* V5TEJ. */ + -1, /* V6. */ + -1, /* V6KZ. */ + -1, /* V6T2. */ + -1, /* V6K. */ + -1, /* V7. */ + T(V8M_BASE), /* V6_M. */ + T(V8M_BASE), /* V6S_M. */ + -1, /* V7E_M. */ + -1, /* V8. */ + -1, + T(V8M_BASE) /* V8-M BASELINE. */ + }; + const int v8m_mainline[] = + { + -1, /* PRE_V4. */ + -1, /* V4. */ + -1, /* V4T. */ + -1, /* V5T. */ + -1, /* V5TE. */ + -1, /* V5TEJ. */ + -1, /* V6. */ + -1, /* V6KZ. */ + -1, /* V6T2. */ + -1, /* V6K. */ + T(V8M_MAIN), /* V7. */ + T(V8M_MAIN), /* V6_M. */ + T(V8M_MAIN), /* V6S_M. */ + T(V8M_MAIN), /* V7E_M. */ + -1, /* V8. */ + -1, + T(V8M_MAIN), /* V8-M BASELINE. */ + T(V8M_MAIN) /* V8-M MAINLINE. */ + }; const int v4t_plus_v6_m[] = { -1, /* PRE_V4. */ @@ -11411,6 +12382,9 @@ tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out, T(V6S_M), /* V6S_M. */ T(V7E_M), /* V7E_M. */ T(V8), /* V8. */ + -1, /* Unused. */ + T(V8M_BASE), /* V8-M BASELINE. */ + T(V8M_MAIN), /* V8-M MAINLINE. */ T(V4T_PLUS_V6_M) /* V4T plus V6_M. */ }; const int *comb[] = @@ -11422,6 +12396,9 @@ tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out, v6s_m, v7e_m, v8, + NULL, + v8m_baseline, + v8m_mainline, /* Pseudo-architecture. */ v4t_plus_v6_m }; @@ -11454,7 +12431,7 @@ tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out, if (tagh <= TAG_CPU_ARCH_V6KZ) return result; - result = comb[tagh - T(V6T2)][tagl]; + result = comb[tagh - T(V6T2)] ? comb[tagh - T(V6T2)][tagl] : -1; /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M) as the canonical version. */ @@ -11530,6 +12507,7 @@ elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd) static const int order_021[3] = {0, 2, 1}; int i; bfd_boolean result = TRUE; + const char *sec_name = get_elf_backend_data (ibfd)->obj_attrs_section; /* Skip the linker stubs file. This preserves previous behavior of accepting unknown attributes in the first input file - but @@ -11537,6 +12515,12 @@ elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd) if (ibfd->flags & BFD_LINKER_CREATED) return TRUE; + /* Skip any input that hasn't attribute section. + This enables to link object files without attribute section with + any others. */ + if (bfd_get_section_by_name (ibfd, sec_name) == NULL) + return TRUE; + if (!elf_known_obj_attributes_proc (obfd)[0].i) { /* This is the first object. Copy the attributes. */ @@ -11576,10 +12560,14 @@ elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd) /* This needs to happen before Tag_ABI_FP_number_model is merged. */ if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i) { - /* Ignore mismatches if the object doesn't use floating point. */ - if (out_attr[Tag_ABI_FP_number_model].i == 0) + /* Ignore mismatches if the object doesn't use floating point or is + floating point ABI independent. */ + if (out_attr[Tag_ABI_FP_number_model].i == AEABI_FP_number_model_none + || (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none + && out_attr[Tag_ABI_VFP_args].i == AEABI_VFP_args_compatible)) out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i; - else if (in_attr[Tag_ABI_FP_number_model].i != 0) + else if (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none + && in_attr[Tag_ABI_VFP_args].i != AEABI_VFP_args_compatible) { _bfd_error_handler (_("error: %B uses VFP register arguments, %B does not"), @@ -11596,7 +12584,7 @@ elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd) { case Tag_CPU_raw_name: case Tag_CPU_name: - /* These are merged after Tag_CPU_arch. */ + /* These are merged after Tag_CPU_arch. */ break; case Tag_ABI_optimization_goals: @@ -11608,7 +12596,9 @@ elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd) { int secondary_compat = -1, secondary_compat_out = -1; unsigned int saved_out_attr = out_attr[i].i; - static const char *name_table[] = { + int arch_attr; + static const char *name_table[] = + { /* These aren't real CPU names, but we can't guess that from the architecture version alone. */ "Pre v4", @@ -11624,16 +12614,26 @@ elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd) "ARM v7", "ARM v6-M", "ARM v6S-M", - "ARM v8" + "ARM v8", + "", + "ARM v8-M.baseline", + "ARM v8-M.mainline", }; /* Merge Tag_CPU_arch and Tag_also_compatible_with. */ secondary_compat = get_secondary_compatible_arch (ibfd); secondary_compat_out = get_secondary_compatible_arch (obfd); - out_attr[i].i = tag_cpu_arch_combine (ibfd, out_attr[i].i, - &secondary_compat_out, - in_attr[i].i, - secondary_compat); + arch_attr = tag_cpu_arch_combine (ibfd, out_attr[i].i, + &secondary_compat_out, + in_attr[i].i, + secondary_compat); + + /* Return with error if failed to merge. */ + if (arch_attr == -1) + return FALSE; + + out_attr[i].i = arch_attr; + set_secondary_compatible_arch (obfd, secondary_compat_out); /* Merge Tag_CPU_name and Tag_CPU_raw_name. */ @@ -11750,7 +12750,7 @@ elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd) else if (in_attr[i].i == 0 || (in_attr[i].i == 'S' && (out_attr[i].i == 'A' || out_attr[i].i == 'R'))) - ; /* Do nothing. */ + ; /* Do nothing. */ else { _bfd_error_handler @@ -11762,14 +12762,39 @@ elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd) } } break; + + case Tag_DSP_extension: + /* No need to change output value if any of: + - pre (<=) ARMv5T input architecture (do not have DSP) + - M input profile not ARMv7E-M and do not have DSP. */ + if (in_attr[Tag_CPU_arch].i <= 3 + || (in_attr[Tag_CPU_arch_profile].i == 'M' + && in_attr[Tag_CPU_arch].i != 13 + && in_attr[i].i == 0)) + ; /* Do nothing. */ + /* Output value should be 0 if DSP part of architecture, ie. + - post (>=) ARMv5te architecture output + - A, R or S profile output or ARMv7E-M output architecture. */ + else if (out_attr[Tag_CPU_arch].i >= 4 + && (out_attr[Tag_CPU_arch_profile].i == 'A' + || out_attr[Tag_CPU_arch_profile].i == 'R' + || out_attr[Tag_CPU_arch_profile].i == 'S' + || out_attr[Tag_CPU_arch].i == 13)) + out_attr[i].i = 0; + /* Otherwise, DSP instructions are added and not part of output + architecture. */ + else + out_attr[i].i = 1; + break; + case Tag_FP_arch: { /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch when it's 0. It might mean absence of FP hardware if - Tag_FP_arch is zero, otherwise it is effectively SP + DP. */ + Tag_FP_arch is zero. */ -#define VFP_VERSION_COUNT 8 +#define VFP_VERSION_COUNT 9 static const struct { int ver; @@ -11783,7 +12808,8 @@ elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd) {3, 16}, {4, 32}, {4, 16}, - {8, 32} + {8, 32}, + {8, 16} }; int ver; int regs; @@ -11808,7 +12834,7 @@ elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd) } /* Both the input and the output have nonzero Tag_FP_arch. - So Tag_ABI_HardFP_use is (SP & DP) when it's zero. */ + So Tag_ABI_HardFP_use is implied by Tag_FP_arch when it's zero. */ /* If both the input and the output have zero Tag_ABI_HardFP_use, do nothing. */ @@ -11816,10 +12842,10 @@ elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd) && out_attr[Tag_ABI_HardFP_use].i == 0) ; /* If the input and the output have different Tag_ABI_HardFP_use, - the combination of them is 3 (SP & DP). */ + the combination of them is 0 (implied by Tag_FP_arch). */ else if (in_attr[Tag_ABI_HardFP_use].i != out_attr[Tag_ABI_HardFP_use].i) - out_attr[Tag_ABI_HardFP_use].i = 3; + out_attr[Tag_ABI_HardFP_use].i = 0; /* Now we can handle Tag_FP_arch. */ @@ -12188,10 +13214,7 @@ elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr) if (flags & EF_ARM_RELEXEC) fprintf (file, _(" [relocatable executable]")); - if (flags & EF_ARM_HASENTRY) - fprintf (file, _(" [has entry point]")); - - flags &= ~ (EF_ARM_RELEXEC | EF_ARM_HASENTRY); + flags &= ~EF_ARM_RELEXEC; if (flags) fprintf (file, _("")); @@ -12257,7 +13280,7 @@ elf32_arm_gc_sweep_hook (bfd * abfd, const Elf_Internal_Rela *rel, *relend; struct elf32_arm_link_hash_table * globals; - if (info->relocatable) + if (bfd_link_relocatable (info)) return TRUE; globals = elf32_arm_hash_table (info); @@ -12355,11 +13378,11 @@ elf32_arm_gc_sweep_hook (bfd * abfd, case R_ARM_THM_MOVW_PREL_NC: case R_ARM_THM_MOVT_PREL: /* Should the interworking branches be here also? */ - if ((info->shared || globals->root.is_relocatable_executable) + if ((bfd_link_pic (info) || globals->root.is_relocatable_executable) && (sec->flags & SEC_ALLOC) != 0) { if (h == NULL - && (r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)) + && elf32_arm_howto_from_type (r_type)->pc_relative) { call_reloc_p = TRUE; may_need_local_target_p = TRUE; @@ -12453,7 +13476,7 @@ elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info, bfd_boolean may_need_local_target_p; unsigned long nsyms; - if (info->relocatable) + if (bfd_link_relocatable (info)) return TRUE; BFD_ASSERT (is_arm_elf (abfd)); @@ -12570,6 +13593,9 @@ elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info, default: tls_type = GOT_NORMAL; break; } + if (!bfd_link_executable (info) && (tls_type & GOT_TLS_IE)) + info->flags |= DF_STATIC_TLS; + if (h != NULL) { h->got.refcount++; @@ -12600,7 +13626,7 @@ elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info, /* If the symbol is accessed in both IE and GDESC method, we're able to relax. Turn off the GDESC flag, without messing up with any other kind of tls types - that may be involved */ + that may be involved. */ if ((tls_type & GOT_TLS_IE) && (tls_type & GOT_TLS_GDESC)) tls_type &= ~GOT_TLS_GDESC; @@ -12646,13 +13672,15 @@ elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info, may_need_local_target_p = TRUE; break; } + else goto jump_over; + /* Fall through. */ case R_ARM_MOVW_ABS_NC: case R_ARM_MOVT_ABS: case R_ARM_THM_MOVW_ABS_NC: case R_ARM_THM_MOVT_ABS: - if (info->shared) + if (bfd_link_pic (info)) { (*_bfd_error_handler) (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"), @@ -12665,6 +13693,12 @@ elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info, /* Fall through. */ case R_ARM_ABS32: case R_ARM_ABS32_NOI: + jump_over: + if (h != NULL && bfd_link_executable (info)) + { + h->pointer_equality_needed = 1; + } + /* Fall through. */ case R_ARM_REL32: case R_ARM_REL32_NOI: case R_ARM_MOVW_PREL_NC: @@ -12673,11 +13707,11 @@ elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info, case R_ARM_THM_MOVT_PREL: /* Should the interworking branches be listed here? */ - if ((info->shared || htab->root.is_relocatable_executable) + if ((bfd_link_pic (info) || htab->root.is_relocatable_executable) && (sec->flags & SEC_ALLOC) != 0) { if (h == NULL - && (r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)) + && elf32_arm_howto_from_type (r_type)->pc_relative) { /* In shared libraries and relocatable executables, we treat local relative references as calls; @@ -12823,7 +13857,7 @@ elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info, p->pc_count = 0; } - if (r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI) + if (elf32_arm_howto_from_type (r_type)->pc_relative) p->pc_count += 1; p->count += 1; } @@ -12851,7 +13885,7 @@ elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info, while (again) { again = FALSE; - for (sub = info->input_bfds; sub != NULL; sub = sub->link_next) + for (sub = info->input_bfds; sub != NULL; sub = sub->link.next) { asection *o; @@ -12896,8 +13930,8 @@ elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym) static bfd_boolean arm_elf_find_function (bfd * abfd ATTRIBUTE_UNUSED, - asection * section, asymbol ** symbols, + asection * section, bfd_vma offset, const char ** filename_ptr, const char ** functionname_ptr) @@ -12958,31 +13992,33 @@ arm_elf_find_function (bfd * abfd ATTRIBUTE_UNUSED, static bfd_boolean elf32_arm_find_nearest_line (bfd * abfd, - asection * section, asymbol ** symbols, + asection * section, bfd_vma offset, const char ** filename_ptr, const char ** functionname_ptr, - unsigned int * line_ptr) + unsigned int * line_ptr, + unsigned int * discriminator_ptr) { bfd_boolean found = FALSE; - /* We skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain uses it. */ - - if (_bfd_dwarf2_find_nearest_line (abfd, dwarf_debug_sections, - section, symbols, offset, + if (_bfd_dwarf2_find_nearest_line (abfd, symbols, NULL, section, offset, filename_ptr, functionname_ptr, - line_ptr, NULL, 0, + line_ptr, discriminator_ptr, + dwarf_debug_sections, 0, & elf_tdata (abfd)->dwarf2_find_line_info)) { if (!*functionname_ptr) - arm_elf_find_function (abfd, section, symbols, offset, + arm_elf_find_function (abfd, symbols, section, offset, *filename_ptr ? NULL : filename_ptr, functionname_ptr); return TRUE; } + /* Skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain + uses DWARF1. */ + if (! _bfd_stab_section_find_nearest_line (abfd, symbols, section, offset, & found, filename_ptr, functionname_ptr, line_ptr, @@ -12995,7 +14031,7 @@ elf32_arm_find_nearest_line (bfd * abfd, if (symbols == NULL) return FALSE; - if (! arm_elf_find_function (abfd, section, symbols, offset, + if (! arm_elf_find_function (abfd, symbols, section, offset, filename_ptr, functionname_ptr)) return FALSE; @@ -13114,7 +14150,7 @@ elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info, be handled correctly by relocate_section. Relocatable executables can reference data in shared objects directly, so we don't need to do anything here. */ - if (info->shared || globals->root.is_relocatable_executable) + if (bfd_link_pic (info) || globals->root.is_relocatable_executable) return TRUE; /* We must allocate the symbol in our .dynbss section, which will @@ -13129,11 +14165,13 @@ elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info, s = bfd_get_linker_section (dynobj, ".dynbss"); BFD_ASSERT (s != NULL); - /* We must generate a R_ARM_COPY reloc to tell the dynamic linker to - copy the initial value out of the dynamic object and into the - runtime process image. We need to remember the offset into the + /* If allowed, we must generate a R_ARM_COPY reloc to tell the dynamic + linker to copy the initial value out of the dynamic object and into + the runtime process image. We need to remember the offset into the .rel(a).bss section we are going to use. */ - if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0) + if (info->nocopyreloc == 0 + && (h->root.u.def.section->flags & SEC_ALLOC) != 0 + && h->size != 0) { asection *srel; @@ -13142,7 +14180,7 @@ elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info, h->needs_copy = 1; } - return _bfd_elf_adjust_dynamic_copy (h, s); + return _bfd_elf_adjust_dynamic_copy (info, h, s); } /* Allocate space in .plt, .got and associated reloc sections for @@ -13196,7 +14234,7 @@ allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf) h->got.refcount = 0; } - if (info->shared + if (bfd_link_pic (info) || eh->is_iplt || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h)) { @@ -13207,7 +14245,7 @@ allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf) location in the .plt. This is required to make function pointers compare as equal between the normal executable and the shared library. */ - if (! info->shared + if (! bfd_link_pic (info) && !h->def_regular) { h->root.u.def.section = htab->root.splt; @@ -13219,12 +14257,10 @@ allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf) h->target_internal = ST_BRANCH_TO_ARM; } - htab->next_tls_desc_index++; - /* VxWorks executables have a second set of relocations for each PLT entry. They go in a separate relocation section, which is processed by the kernel loader. */ - if (htab->vxworks_p && !info->shared) + if (htab->vxworks_p && !bfd_link_pic (info)) { /* There is a relocation for the initial PLT entry: an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */ @@ -13311,13 +14347,15 @@ allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf) dyn = htab->root.dynamic_sections_created; indx = 0; - if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h) - && (!info->shared + if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, + bfd_link_pic (info), + h) + && (!bfd_link_pic (info) || !SYMBOL_REFERENCES_LOCAL (info, h))) indx = h->dynindx; if (tls_type != GOT_NORMAL - && (info->shared || indx != 0) + && (bfd_link_pic (info) || indx != 0) && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT || h->root.type != bfd_link_hash_undefweak)) { @@ -13351,8 +14389,9 @@ allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf) they all resolve dynamically instead. Reserve room for the GOT entry's R_ARM_IRELATIVE relocation. */ elf32_arm_allocate_irelocs (info, htab->root.srelgot, 1); - else if (info->shared && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT - || h->root.type != bfd_link_hash_undefweak)) + else if (bfd_link_pic (info) + && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT + || h->root.type != bfd_link_hash_undefweak)) /* Reserve room for the GOT entry's R_ARM_RELATIVE relocation. */ elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1); } @@ -13402,14 +14441,14 @@ allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf) space for pc-relative relocs that have become local due to symbol visibility changes. */ - if (info->shared || htab->root.is_relocatable_executable) + if (bfd_link_pic (info) || htab->root.is_relocatable_executable) { - /* The only relocs that use pc_count are R_ARM_REL32 and - R_ARM_REL32_NOI, which will appear on something like - ".long foo - .". We want calls to protected symbols to resolve - directly to the function rather than going via the plt. If people - want function pointer comparisons to work as expected then they - should avoid writing assembly like ".long foo - .". */ + /* Relocs that use pc_count are PC-relative forms, which will appear + on something like ".long foo - ." or "movw REG, foo - .". We want + calls to protected symbols to resolve directly to the function + rather than going via the plt. If people want function pointer + comparisons to work as expected then they should avoid writing + assembly like ".long foo - .". */ if (SYMBOL_CALLS_LOCAL (info, h)) { struct elf_dyn_relocs **pp; @@ -13578,7 +14617,7 @@ elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED, if (elf_hash_table (info)->dynamic_sections_created) { /* Set the contents of the .interp section to the interpreter. */ - if (info->executable) + if (bfd_link_executable (info) && !info->nointerp) { s = bfd_get_linker_section (dynobj, ".interp"); BFD_ASSERT (s != NULL); @@ -13589,7 +14628,7 @@ elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED, /* Set up .got offsets for local syms, and space for local dynamic relocs. */ - for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next) + for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next) { bfd_signed_vma *local_got; bfd_signed_vma *end_local_got; @@ -13730,13 +14769,13 @@ elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED, && (local_iplt == NULL || local_iplt->arm.noncall_refcount == 0)) elf32_arm_allocate_irelocs (info, srel, 1); - else if (info->shared || output_bfd->flags & DYNAMIC) + else if (bfd_link_pic (info) || output_bfd->flags & DYNAMIC) { - if ((info->shared && !(*local_tls_type & GOT_TLS_GDESC)) + if ((bfd_link_pic (info) && !(*local_tls_type & GOT_TLS_GDESC)) || *local_tls_type & GOT_TLS_GD) elf32_arm_allocate_dynrelocs (info, srel, 1); - if (info->shared && *local_tls_type & GOT_TLS_GDESC) + if (bfd_link_pic (info) && *local_tls_type & GOT_TLS_GDESC) { elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1); @@ -13755,7 +14794,7 @@ elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED, for R_ARM_TLS_LDM32 relocations. */ htab->tls_ldm_got.offset = htab->root.sgot->size; htab->root.sgot->size += 8; - if (info->shared) + if (bfd_link_pic (info)) elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1); } else @@ -13766,7 +14805,7 @@ elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED, elf_link_hash_traverse (& htab->root, allocate_dynrelocs_for_symbol, info); /* Here we rummage through the found bfds to collect glue information. */ - for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next) + for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next) { if (! is_arm_elf (ibfd)) continue; @@ -13775,7 +14814,8 @@ elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED, bfd_elf32_arm_init_maps (ibfd); if (!bfd_elf32_arm_process_before_allocation (ibfd, info) - || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info)) + || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info) + || !bfd_elf32_arm_stm32l4xx_erratum_scan (ibfd, info)) /* xgettext:c-format */ _bfd_error_handler (_("Errors encountered processing file %s"), ibfd->filename); @@ -13891,7 +14931,7 @@ elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED, #define add_dynamic_entry(TAG, VAL) \ _bfd_elf_add_dynamic_entry (info, TAG, VAL) - if (info->executable) + if (bfd_link_executable (info)) { if (!add_dynamic_entry (DT_DEBUG, 0)) return FALSE; @@ -13959,7 +14999,7 @@ elf32_arm_always_size_sections (bfd *output_bfd, { asection *tls_sec; - if (info->relocatable) + if (bfd_link_relocatable (info)) return TRUE; tls_sec = elf_hash_table (info)->tls_sec; @@ -14016,20 +15056,25 @@ elf32_arm_finish_dynamic_symbol (bfd * output_bfd, if (!eh->is_iplt) { BFD_ASSERT (h->dynindx != -1); - elf32_arm_populate_plt_entry (output_bfd, info, &h->plt, &eh->plt, - h->dynindx, 0); + if (! elf32_arm_populate_plt_entry (output_bfd, info, &h->plt, &eh->plt, + h->dynindx, 0)) + return FALSE; } if (!h->def_regular) { /* Mark the symbol as undefined, rather than as defined in - the .plt section. Leave the value alone. */ + the .plt section. */ sym->st_shndx = SHN_UNDEF; - /* If the symbol is weak, we do need to clear the value. + /* If the symbol is weak we need to clear the value. Otherwise, the PLT entry would provide a definition for the symbol even if the symbol wasn't defined anywhere, - and so the symbol would never be NULL. */ - if (!h->ref_regular_nonweak) + and so the symbol would never be NULL. Leave the value if + there were any relocations where pointer equality matters + (this is a clue for the dynamic linker, to make function + pointer comparisons work between an application and shared + library). */ + if (!h->ref_regular_nonweak || !h->pointer_equality_needed) sym->st_value = 0; } else if (eh->is_iplt && eh->plt.noncall_refcount != 0) @@ -14367,6 +15412,20 @@ elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info else if (htab->nacl_p) arm_nacl_put_plt0 (htab, output_bfd, splt, got_address + 8 - (plt_address + 16)); + else if (using_thumb_only (htab)) + { + got_displacement = got_address - (plt_address + 12); + + plt0_entry = elf32_thumb2_plt0_entry; + put_arm_insn (htab, output_bfd, plt0_entry[0], + splt->contents + 0); + put_arm_insn (htab, output_bfd, plt0_entry[1], + splt->contents + 4); + put_arm_insn (htab, output_bfd, plt0_entry[2], + splt->contents + 8); + + bfd_put_32 (output_bfd, got_displacement, splt->contents + 12); + } else { got_displacement = got_address - (plt_address + 16); @@ -14431,7 +15490,9 @@ elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info #endif } - if (htab->vxworks_p && !info->shared && htab->root.splt->size > 0) + if (htab->vxworks_p + && !bfd_link_pic (info) + && htab->root.splt->size > 0) { /* Correct the .rel(a).plt.unloaded relocations. They will have incorrect symbol indexes. */ @@ -14489,13 +15550,14 @@ elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATT { Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */ struct elf32_arm_link_hash_table *globals; + struct elf_segment_map *m; i_ehdrp = elf_elfheader (abfd); if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN) i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM; else - i_ehdrp->e_ident[EI_OSABI] = 0; + _bfd_elf_post_process_headers (abfd, link_info); i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION; if (link_info) @@ -14509,11 +15571,31 @@ elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATT && ((i_ehdrp->e_type == ET_DYN) || (i_ehdrp->e_type == ET_EXEC))) { int abi = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_ABI_VFP_args); - if (abi) + if (abi == AEABI_VFP_args_vfp) i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_HARD; else i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_SOFT; } + + /* Scan segment to set p_flags attribute if it contains only sections with + SHF_ARM_NOREAD flag. */ + for (m = elf_seg_map (abfd); m != NULL; m = m->next) + { + unsigned int j; + + if (m->count == 0) + continue; + for (j = 0; j < m->count; j++) + { + if (!(elf_section_flags (m->sections[j]) & SHF_ARM_NOREAD)) + break; + } + if (j == m->count) + { + m->p_flags = PF_X; + m->p_flags_valid = 1; + } + } } static enum elf_reloc_type_class @@ -14529,6 +15611,8 @@ elf32_arm_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED, return reloc_class_plt; case R_ARM_COPY: return reloc_class_copy; + case R_ARM_IRELATIVE: + return reloc_class_ifunc; default: return reloc_class_normal; } @@ -14565,6 +15649,10 @@ elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec) hdr->sh_type = SHT_ARM_EXIDX; hdr->sh_flags |= SHF_LINK_ORDER; } + + if (sec->flags & SEC_ELF_NOREAD) + hdr->sh_flags |= SHF_ARM_NOREAD; + return TRUE; } @@ -14705,6 +15793,11 @@ elf32_arm_output_plt_map_1 (output_arch_syminfo *osi, if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr)) return FALSE; } + else if (using_thumb_only (htab)) + { + if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr)) + return FALSE; + } else { bfd_boolean thumb_stub_p; @@ -14912,7 +16005,7 @@ elf32_arm_output_arch_local_syms (bfd *output_bfd, mapping symbols. */ for (input_bfd = info->input_bfds; input_bfd != NULL; - input_bfd = input_bfd->link_next) + input_bfd = input_bfd->link.next) { if ((input_bfd->flags & (BFD_LINKER_CREATED | HAS_SYMS)) == HAS_SYMS) for (osi.sec = input_bfd->sections; @@ -14945,7 +16038,7 @@ elf32_arm_output_arch_local_syms (bfd *output_bfd, osi.sec_shndx = _bfd_elf_section_from_bfd_section (output_bfd, osi.sec->output_section); - if (info->shared || htab->root.is_relocatable_executable + if (bfd_link_pic (info) || htab->root.is_relocatable_executable || htab->pic_veneer) size = ARM2THUMB_PIC_GLUE_SIZE; else if (htab->use_blx) @@ -15023,7 +16116,7 @@ elf32_arm_output_arch_local_syms (bfd *output_bfd, if (htab->vxworks_p) { /* VxWorks shared libraries have no PLT header. */ - if (!info->shared) + if (!bfd_link_pic (info)) { if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0)) return FALSE; @@ -15036,6 +16129,15 @@ elf32_arm_output_arch_local_syms (bfd *output_bfd, if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0)) return FALSE; } + else if (using_thumb_only (htab)) + { + if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 0)) + return FALSE; + if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12)) + return FALSE; + if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 16)) + return FALSE; + } else if (!htab->symbian_p) { if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0)) @@ -15061,7 +16163,7 @@ elf32_arm_output_arch_local_syms (bfd *output_bfd, elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, &osi); for (input_bfd = info->input_bfds; input_bfd != NULL; - input_bfd = input_bfd->link_next) + input_bfd = input_bfd->link.next) { struct arm_local_iplt_info **local_iplt; unsigned int i, num_syms; @@ -15200,7 +16302,7 @@ make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry, bfd_vma veneered_insn_loc, veneer_entry_loc; bfd_signed_vma branch_offset; bfd *abfd; - unsigned int target; + unsigned int loc; stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry; data = (struct a8_branch_to_stub_data *) in_arg; @@ -15211,9 +16313,11 @@ make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry, contents = data->contents; + /* We use target_section as Cortex-A8 erratum workaround stubs are only + generated when both source and target are in the same section. */ veneered_insn_loc = stub_entry->target_section->output_section->vma + stub_entry->target_section->output_offset - + stub_entry->target_value; + + stub_entry->source_value; veneer_entry_loc = stub_entry->stub_sec->output_section->vma + stub_entry->stub_sec->output_offset @@ -15222,74 +16326,840 @@ make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry, if (stub_entry->stub_type == arm_stub_a8_veneer_blx) veneered_insn_loc &= ~3u; - branch_offset = veneer_entry_loc - veneered_insn_loc - 4; + branch_offset = veneer_entry_loc - veneered_insn_loc - 4; + + abfd = stub_entry->target_section->owner; + loc = stub_entry->source_value; + + /* We attempt to avoid this condition by setting stubs_always_after_branch + in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround. + This check is just to be on the safe side... */ + if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff)) + { + (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub is " + "allocated in unsafe location"), abfd); + return FALSE; + } + + switch (stub_entry->stub_type) + { + case arm_stub_a8_veneer_b: + case arm_stub_a8_veneer_b_cond: + branch_insn = 0xf0009000; + goto jump24; + + case arm_stub_a8_veneer_blx: + branch_insn = 0xf000e800; + goto jump24; + + case arm_stub_a8_veneer_bl: + { + unsigned int i1, j1, i2, j2, s; + + branch_insn = 0xf000d000; + + jump24: + if (branch_offset < -16777216 || branch_offset > 16777214) + { + /* There's not much we can do apart from complain if this + happens. */ + (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub out " + "of range (input file too large)"), abfd); + return FALSE; + } + + /* i1 = not(j1 eor s), so: + not i1 = j1 eor s + j1 = (not i1) eor s. */ + + branch_insn |= (branch_offset >> 1) & 0x7ff; + branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16; + i2 = (branch_offset >> 22) & 1; + i1 = (branch_offset >> 23) & 1; + s = (branch_offset >> 24) & 1; + j1 = (!i1) ^ s; + j2 = (!i2) ^ s; + branch_insn |= j2 << 11; + branch_insn |= j1 << 13; + branch_insn |= s << 26; + } + break; + + default: + BFD_FAIL (); + return FALSE; + } + + bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[loc]); + bfd_put_16 (abfd, branch_insn & 0xffff, &contents[loc + 2]); + + return TRUE; +} + +/* Beginning of stm32l4xx work-around. */ + +/* Functions encoding instructions necessary for the emission of the + fix-stm32l4xx-629360. + Encoding is extracted from the + ARM (C) Architecture Reference Manual + ARMv7-A and ARMv7-R edition + ARM DDI 0406C.b (ID072512). */ + +static inline bfd_vma +create_instruction_branch_absolute (int branch_offset) +{ + /* A8.8.18 B (A8-334) + B target_address (Encoding T4). */ + /* 1111 - 0Sii - iiii - iiii - 10J1 - Jiii - iiii - iiii. */ + /* jump offset is: S:I1:I2:imm10:imm11:0. */ + /* with : I1 = NOT (J1 EOR S) I2 = NOT (J2 EOR S). */ + + int s = ((branch_offset & 0x1000000) >> 24); + int j1 = s ^ !((branch_offset & 0x800000) >> 23); + int j2 = s ^ !((branch_offset & 0x400000) >> 22); + + if (branch_offset < -(1 << 24) || branch_offset >= (1 << 24)) + BFD_ASSERT (0 && "Error: branch out of range. Cannot create branch."); + + bfd_vma patched_inst = 0xf0009000 + | s << 26 /* S. */ + | (((unsigned long) (branch_offset) >> 12) & 0x3ff) << 16 /* imm10. */ + | j1 << 13 /* J1. */ + | j2 << 11 /* J2. */ + | (((unsigned long) (branch_offset) >> 1) & 0x7ff); /* imm11. */ + + return patched_inst; +} + +static inline bfd_vma +create_instruction_ldmia (int base_reg, int wback, int reg_mask) +{ + /* A8.8.57 LDM/LDMIA/LDMFD (A8-396) + LDMIA Rn!, {Ra, Rb, Rc, ...} (Encoding T2). */ + bfd_vma patched_inst = 0xe8900000 + | (/*W=*/wback << 21) + | (base_reg << 16) + | (reg_mask & 0x0000ffff); + + return patched_inst; +} + +static inline bfd_vma +create_instruction_ldmdb (int base_reg, int wback, int reg_mask) +{ + /* A8.8.60 LDMDB/LDMEA (A8-402) + LDMDB Rn!, {Ra, Rb, Rc, ...} (Encoding T1). */ + bfd_vma patched_inst = 0xe9100000 + | (/*W=*/wback << 21) + | (base_reg << 16) + | (reg_mask & 0x0000ffff); + + return patched_inst; +} + +static inline bfd_vma +create_instruction_mov (int target_reg, int source_reg) +{ + /* A8.8.103 MOV (register) (A8-486) + MOV Rd, Rm (Encoding T1). */ + bfd_vma patched_inst = 0x4600 + | (target_reg & 0x7) + | ((target_reg & 0x8) >> 3) << 7 + | (source_reg << 3); + + return patched_inst; +} + +static inline bfd_vma +create_instruction_sub (int target_reg, int source_reg, int value) +{ + /* A8.8.221 SUB (immediate) (A8-708) + SUB Rd, Rn, #value (Encoding T3). */ + bfd_vma patched_inst = 0xf1a00000 + | (target_reg << 8) + | (source_reg << 16) + | (/*S=*/0 << 20) + | ((value & 0x800) >> 11) << 26 + | ((value & 0x700) >> 8) << 12 + | (value & 0x0ff); + + return patched_inst; +} + +static inline bfd_vma +create_instruction_vldmia (int base_reg, int is_dp, int wback, int num_words, + int first_reg) +{ + /* A8.8.332 VLDM (A8-922) + VLMD{MODE} Rn{!}, {list} (Encoding T1 or T2). */ + bfd_vma patched_inst = (is_dp ? 0xec900b00 : 0xec900a00) + | (/*W=*/wback << 21) + | (base_reg << 16) + | (num_words & 0x000000ff) + | (((unsigned)first_reg >> 1) & 0x0000000f) << 12 + | (first_reg & 0x00000001) << 22; + + return patched_inst; +} + +static inline bfd_vma +create_instruction_vldmdb (int base_reg, int is_dp, int num_words, + int first_reg) +{ + /* A8.8.332 VLDM (A8-922) + VLMD{MODE} Rn!, {} (Encoding T1 or T2). */ + bfd_vma patched_inst = (is_dp ? 0xed300b00 : 0xed300a00) + | (base_reg << 16) + | (num_words & 0x000000ff) + | (((unsigned)first_reg >>1 ) & 0x0000000f) << 12 + | (first_reg & 0x00000001) << 22; + + return patched_inst; +} + +static inline bfd_vma +create_instruction_udf_w (int value) +{ + /* A8.8.247 UDF (A8-758) + Undefined (Encoding T2). */ + bfd_vma patched_inst = 0xf7f0a000 + | (value & 0x00000fff) + | (value & 0x000f0000) << 16; + + return patched_inst; +} + +static inline bfd_vma +create_instruction_udf (int value) +{ + /* A8.8.247 UDF (A8-758) + Undefined (Encoding T1). */ + bfd_vma patched_inst = 0xde00 + | (value & 0xff); + + return patched_inst; +} + +/* Functions writing an instruction in memory, returning the next + memory position to write to. */ + +static inline bfd_byte * +push_thumb2_insn32 (struct elf32_arm_link_hash_table * htab, + bfd * output_bfd, bfd_byte *pt, insn32 insn) +{ + put_thumb2_insn (htab, output_bfd, insn, pt); + return pt + 4; +} + +static inline bfd_byte * +push_thumb2_insn16 (struct elf32_arm_link_hash_table * htab, + bfd * output_bfd, bfd_byte *pt, insn32 insn) +{ + put_thumb_insn (htab, output_bfd, insn, pt); + return pt + 2; +} + +/* Function filling up a region in memory with T1 and T2 UDFs taking + care of alignment. */ + +static bfd_byte * +stm32l4xx_fill_stub_udf (struct elf32_arm_link_hash_table * htab, + bfd * output_bfd, + const bfd_byte * const base_stub_contents, + bfd_byte * const from_stub_contents, + const bfd_byte * const end_stub_contents) +{ + bfd_byte *current_stub_contents = from_stub_contents; + + /* Fill the remaining of the stub with deterministic contents : UDF + instructions. + Check if realignment is needed on modulo 4 frontier using T1, to + further use T2. */ + if ((current_stub_contents < end_stub_contents) + && !((current_stub_contents - base_stub_contents) % 2) + && ((current_stub_contents - base_stub_contents) % 4)) + current_stub_contents = + push_thumb2_insn16 (htab, output_bfd, current_stub_contents, + create_instruction_udf (0)); + + for (; current_stub_contents < end_stub_contents;) + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + create_instruction_udf_w (0)); + + return current_stub_contents; +} + +/* Functions writing the stream of instructions equivalent to the + derived sequence for ldmia, ldmdb, vldm respectively. */ + +static void +stm32l4xx_create_replacing_stub_ldmia (struct elf32_arm_link_hash_table * htab, + bfd * output_bfd, + const insn32 initial_insn, + const bfd_byte *const initial_insn_addr, + bfd_byte *const base_stub_contents) +{ + int wback = (initial_insn & 0x00200000) >> 21; + int ri, rn = (initial_insn & 0x000F0000) >> 16; + int insn_all_registers = initial_insn & 0x0000ffff; + int insn_low_registers, insn_high_registers; + int usable_register_mask; + int nb_registers = popcount (insn_all_registers); + int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0; + int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0; + bfd_byte *current_stub_contents = base_stub_contents; + + BFD_ASSERT (is_thumb2_ldmia (initial_insn)); + + /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with + smaller than 8 registers load sequences that do not cause the + hardware issue. */ + if (nb_registers <= 8) + { + /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */ + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + initial_insn); + + /* B initial_insn_addr+4. */ + if (!restore_pc) + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + create_instruction_branch_absolute + (initial_insn_addr - current_stub_contents)); + + + /* Fill the remaining of the stub with deterministic contents. */ + current_stub_contents = + stm32l4xx_fill_stub_udf (htab, output_bfd, + base_stub_contents, current_stub_contents, + base_stub_contents + + STM32L4XX_ERRATUM_LDM_VENEER_SIZE); + + return; + } + + /* - reg_list[13] == 0. */ + BFD_ASSERT ((insn_all_registers & (1 << 13))==0); + + /* - reg_list[14] & reg_list[15] != 1. */ + BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000); + + /* - if (wback==1) reg_list[rn] == 0. */ + BFD_ASSERT (!wback || !restore_rn); + + /* - nb_registers > 8. */ + BFD_ASSERT (popcount (insn_all_registers) > 8); + + /* At this point, LDMxx initial insn loads between 9 and 14 registers. */ + + /* In the following algorithm, we split this wide LDM using 2 LDM insns: + - One with the 7 lowest registers (register mask 0x007F) + This LDM will finally contain between 2 and 7 registers + - One with the 7 highest registers (register mask 0xDF80) + This ldm will finally contain between 2 and 7 registers. */ + insn_low_registers = insn_all_registers & 0x007F; + insn_high_registers = insn_all_registers & 0xDF80; + + /* A spare register may be needed during this veneer to temporarily + handle the base register. This register will be restored with the + last LDM operation. + The usable register may be any general purpose register (that + excludes PC, SP, LR : register mask is 0x1FFF). */ + usable_register_mask = 0x1FFF; + + /* Generate the stub function. */ + if (wback) + { + /* LDMIA Rn!, {R-low-register-list} : (Encoding T2). */ + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + create_instruction_ldmia + (rn, /*wback=*/1, insn_low_registers)); + + /* LDMIA Rn!, {R-high-register-list} : (Encoding T2). */ + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + create_instruction_ldmia + (rn, /*wback=*/1, insn_high_registers)); + if (!restore_pc) + { + /* B initial_insn_addr+4. */ + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + create_instruction_branch_absolute + (initial_insn_addr - current_stub_contents)); + } + } + else /* if (!wback). */ + { + ri = rn; + + /* If Rn is not part of the high-register-list, move it there. */ + if (!(insn_high_registers & (1 << rn))) + { + /* Choose a Ri in the high-register-list that will be restored. */ + ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn)); + + /* MOV Ri, Rn. */ + current_stub_contents = + push_thumb2_insn16 (htab, output_bfd, current_stub_contents, + create_instruction_mov (ri, rn)); + } + + /* LDMIA Ri!, {R-low-register-list} : (Encoding T2). */ + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + create_instruction_ldmia + (ri, /*wback=*/1, insn_low_registers)); + + /* LDMIA Ri, {R-high-register-list} : (Encoding T2). */ + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + create_instruction_ldmia + (ri, /*wback=*/0, insn_high_registers)); + + if (!restore_pc) + { + /* B initial_insn_addr+4. */ + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + create_instruction_branch_absolute + (initial_insn_addr - current_stub_contents)); + } + } + + /* Fill the remaining of the stub with deterministic contents. */ + current_stub_contents = + stm32l4xx_fill_stub_udf (htab, output_bfd, + base_stub_contents, current_stub_contents, + base_stub_contents + + STM32L4XX_ERRATUM_LDM_VENEER_SIZE); +} + +static void +stm32l4xx_create_replacing_stub_ldmdb (struct elf32_arm_link_hash_table * htab, + bfd * output_bfd, + const insn32 initial_insn, + const bfd_byte *const initial_insn_addr, + bfd_byte *const base_stub_contents) +{ + int wback = (initial_insn & 0x00200000) >> 21; + int ri, rn = (initial_insn & 0x000f0000) >> 16; + int insn_all_registers = initial_insn & 0x0000ffff; + int insn_low_registers, insn_high_registers; + int usable_register_mask; + int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0; + int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0; + int nb_registers = popcount (insn_all_registers); + bfd_byte *current_stub_contents = base_stub_contents; + + BFD_ASSERT (is_thumb2_ldmdb (initial_insn)); + + /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with + smaller than 8 registers load sequences that do not cause the + hardware issue. */ + if (nb_registers <= 8) + { + /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */ + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + initial_insn); + + /* B initial_insn_addr+4. */ + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + create_instruction_branch_absolute + (initial_insn_addr - current_stub_contents)); + + /* Fill the remaining of the stub with deterministic contents. */ + current_stub_contents = + stm32l4xx_fill_stub_udf (htab, output_bfd, + base_stub_contents, current_stub_contents, + base_stub_contents + + STM32L4XX_ERRATUM_LDM_VENEER_SIZE); + + return; + } + + /* - reg_list[13] == 0. */ + BFD_ASSERT ((insn_all_registers & (1 << 13)) == 0); + + /* - reg_list[14] & reg_list[15] != 1. */ + BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000); + + /* - if (wback==1) reg_list[rn] == 0. */ + BFD_ASSERT (!wback || !restore_rn); + + /* - nb_registers > 8. */ + BFD_ASSERT (popcount (insn_all_registers) > 8); + + /* At this point, LDMxx initial insn loads between 9 and 14 registers. */ + + /* In the following algorithm, we split this wide LDM using 2 LDM insn: + - One with the 7 lowest registers (register mask 0x007F) + This LDM will finally contain between 2 and 7 registers + - One with the 7 highest registers (register mask 0xDF80) + This ldm will finally contain between 2 and 7 registers. */ + insn_low_registers = insn_all_registers & 0x007F; + insn_high_registers = insn_all_registers & 0xDF80; + + /* A spare register may be needed during this veneer to temporarily + handle the base register. This register will be restored with + the last LDM operation. + The usable register may be any general purpose register (that excludes + PC, SP, LR : register mask is 0x1FFF). */ + usable_register_mask = 0x1FFF; + + /* Generate the stub function. */ + if (!wback && !restore_pc && !restore_rn) + { + /* Choose a Ri in the low-register-list that will be restored. */ + ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn)); + + /* MOV Ri, Rn. */ + current_stub_contents = + push_thumb2_insn16 (htab, output_bfd, current_stub_contents, + create_instruction_mov (ri, rn)); + + /* LDMDB Ri!, {R-high-register-list}. */ + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + create_instruction_ldmdb + (ri, /*wback=*/1, insn_high_registers)); + + /* LDMDB Ri, {R-low-register-list}. */ + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + create_instruction_ldmdb + (ri, /*wback=*/0, insn_low_registers)); + + /* B initial_insn_addr+4. */ + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + create_instruction_branch_absolute + (initial_insn_addr - current_stub_contents)); + } + else if (wback && !restore_pc && !restore_rn) + { + /* LDMDB Rn!, {R-high-register-list}. */ + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + create_instruction_ldmdb + (rn, /*wback=*/1, insn_high_registers)); + + /* LDMDB Rn!, {R-low-register-list}. */ + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + create_instruction_ldmdb + (rn, /*wback=*/1, insn_low_registers)); + + /* B initial_insn_addr+4. */ + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + create_instruction_branch_absolute + (initial_insn_addr - current_stub_contents)); + } + else if (!wback && restore_pc && !restore_rn) + { + /* Choose a Ri in the high-register-list that will be restored. */ + ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn)); + + /* SUB Ri, Rn, #(4*nb_registers). */ + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + create_instruction_sub (ri, rn, (4 * nb_registers))); + + /* LDMIA Ri!, {R-low-register-list}. */ + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + create_instruction_ldmia + (ri, /*wback=*/1, insn_low_registers)); + + /* LDMIA Ri, {R-high-register-list}. */ + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + create_instruction_ldmia + (ri, /*wback=*/0, insn_high_registers)); + } + else if (wback && restore_pc && !restore_rn) + { + /* Choose a Ri in the high-register-list that will be restored. */ + ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn)); + + /* SUB Rn, Rn, #(4*nb_registers) */ + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + create_instruction_sub (rn, rn, (4 * nb_registers))); + + /* MOV Ri, Rn. */ + current_stub_contents = + push_thumb2_insn16 (htab, output_bfd, current_stub_contents, + create_instruction_mov (ri, rn)); + + /* LDMIA Ri!, {R-low-register-list}. */ + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + create_instruction_ldmia + (ri, /*wback=*/1, insn_low_registers)); + + /* LDMIA Ri, {R-high-register-list}. */ + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + create_instruction_ldmia + (ri, /*wback=*/0, insn_high_registers)); + } + else if (!wback && !restore_pc && restore_rn) + { + ri = rn; + if (!(insn_low_registers & (1 << rn))) + { + /* Choose a Ri in the low-register-list that will be restored. */ + ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn)); + + /* MOV Ri, Rn. */ + current_stub_contents = + push_thumb2_insn16 (htab, output_bfd, current_stub_contents, + create_instruction_mov (ri, rn)); + } + + /* LDMDB Ri!, {R-high-register-list}. */ + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + create_instruction_ldmdb + (ri, /*wback=*/1, insn_high_registers)); + + /* LDMDB Ri, {R-low-register-list}. */ + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + create_instruction_ldmdb + (ri, /*wback=*/0, insn_low_registers)); + + /* B initial_insn_addr+4. */ + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + create_instruction_branch_absolute + (initial_insn_addr - current_stub_contents)); + } + else if (!wback && restore_pc && restore_rn) + { + ri = rn; + if (!(insn_high_registers & (1 << rn))) + { + /* Choose a Ri in the high-register-list that will be restored. */ + ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn)); + } + + /* SUB Ri, Rn, #(4*nb_registers). */ + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + create_instruction_sub (ri, rn, (4 * nb_registers))); + + /* LDMIA Ri!, {R-low-register-list}. */ + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + create_instruction_ldmia + (ri, /*wback=*/1, insn_low_registers)); + + /* LDMIA Ri, {R-high-register-list}. */ + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + create_instruction_ldmia + (ri, /*wback=*/0, insn_high_registers)); + } + else if (wback && restore_rn) + { + /* The assembler should not have accepted to encode this. */ + BFD_ASSERT (0 && "Cannot patch an instruction that has an " + "undefined behavior.\n"); + } + + /* Fill the remaining of the stub with deterministic contents. */ + current_stub_contents = + stm32l4xx_fill_stub_udf (htab, output_bfd, + base_stub_contents, current_stub_contents, + base_stub_contents + + STM32L4XX_ERRATUM_LDM_VENEER_SIZE); + +} + +static void +stm32l4xx_create_replacing_stub_vldm (struct elf32_arm_link_hash_table * htab, + bfd * output_bfd, + const insn32 initial_insn, + const bfd_byte *const initial_insn_addr, + bfd_byte *const base_stub_contents) +{ + int num_words = ((unsigned int) initial_insn << 24) >> 24; + bfd_byte *current_stub_contents = base_stub_contents; + + BFD_ASSERT (is_thumb2_vldm (initial_insn)); + + /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with + smaller than 8 words load sequences that do not cause the + hardware issue. */ + if (num_words <= 8) + { + /* Untouched instruction. */ + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + initial_insn); + + /* B initial_insn_addr+4. */ + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + create_instruction_branch_absolute + (initial_insn_addr - current_stub_contents)); + } + else + { + bfd_boolean is_dp = /* DP encoding. */ + (initial_insn & 0xfe100f00) == 0xec100b00; + bfd_boolean is_ia_nobang = /* (IA without !). */ + (((initial_insn << 7) >> 28) & 0xd) == 0x4; + bfd_boolean is_ia_bang = /* (IA with !) - includes VPOP. */ + (((initial_insn << 7) >> 28) & 0xd) == 0x5; + bfd_boolean is_db_bang = /* (DB with !). */ + (((initial_insn << 7) >> 28) & 0xd) == 0x9; + int base_reg = ((unsigned int) initial_insn << 12) >> 28; + /* d = UInt (Vd:D);. */ + int first_reg = ((((unsigned int) initial_insn << 16) >> 28) << 1) + | (((unsigned int)initial_insn << 9) >> 31); + + /* Compute the number of 8-words chunks needed to split. */ + int chunks = (num_words % 8) ? (num_words / 8 + 1) : (num_words / 8); + int chunk; + + /* The test coverage has been done assuming the following + hypothesis that exactly one of the previous is_ predicates is + true. */ + BFD_ASSERT ( (is_ia_nobang ^ is_ia_bang ^ is_db_bang) + && !(is_ia_nobang & is_ia_bang & is_db_bang)); + + /* We treat the cutting of the words in one pass for all + cases, then we emit the adjustments: + + vldm rx, {...} + -> vldm rx!, {8_words_or_less} for each needed 8_word + -> sub rx, rx, #size (list) + + vldm rx!, {...} + -> vldm rx!, {8_words_or_less} for each needed 8_word + This also handles vpop instruction (when rx is sp) + + vldmd rx!, {...} + -> vldmb rx!, {8_words_or_less} for each needed 8_word. */ + for (chunk = 0; chunk < chunks; ++chunk) + { + bfd_vma new_insn = 0; + + if (is_ia_nobang || is_ia_bang) + { + new_insn = create_instruction_vldmia + (base_reg, + is_dp, + /*wback= . */1, + chunks - (chunk + 1) ? + 8 : num_words - chunk * 8, + first_reg + chunk * 8); + } + else if (is_db_bang) + { + new_insn = create_instruction_vldmdb + (base_reg, + is_dp, + chunks - (chunk + 1) ? + 8 : num_words - chunk * 8, + first_reg + chunk * 8); + } + + if (new_insn) + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + new_insn); + } - abfd = stub_entry->target_section->owner; - target = stub_entry->target_value; + /* Only this case requires the base register compensation + subtract. */ + if (is_ia_nobang) + { + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + create_instruction_sub + (base_reg, base_reg, 4*num_words)); + } - /* We attempt to avoid this condition by setting stubs_always_after_branch - in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround. - This check is just to be on the safe side... */ - if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff)) - { - (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub is " - "allocated in unsafe location"), abfd); - return FALSE; + /* B initial_insn_addr+4. */ + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + create_instruction_branch_absolute + (initial_insn_addr - current_stub_contents)); } - switch (stub_entry->stub_type) - { - case arm_stub_a8_veneer_b: - case arm_stub_a8_veneer_b_cond: - branch_insn = 0xf0009000; - goto jump24; - - case arm_stub_a8_veneer_blx: - branch_insn = 0xf000e800; - goto jump24; + /* Fill the remaining of the stub with deterministic contents. */ + current_stub_contents = + stm32l4xx_fill_stub_udf (htab, output_bfd, + base_stub_contents, current_stub_contents, + base_stub_contents + + STM32L4XX_ERRATUM_VLDM_VENEER_SIZE); +} - case arm_stub_a8_veneer_bl: - { - unsigned int i1, j1, i2, j2, s; +static void +stm32l4xx_create_replacing_stub (struct elf32_arm_link_hash_table * htab, + bfd * output_bfd, + const insn32 wrong_insn, + const bfd_byte *const wrong_insn_addr, + bfd_byte *const stub_contents) +{ + if (is_thumb2_ldmia (wrong_insn)) + stm32l4xx_create_replacing_stub_ldmia (htab, output_bfd, + wrong_insn, wrong_insn_addr, + stub_contents); + else if (is_thumb2_ldmdb (wrong_insn)) + stm32l4xx_create_replacing_stub_ldmdb (htab, output_bfd, + wrong_insn, wrong_insn_addr, + stub_contents); + else if (is_thumb2_vldm (wrong_insn)) + stm32l4xx_create_replacing_stub_vldm (htab, output_bfd, + wrong_insn, wrong_insn_addr, + stub_contents); +} - branch_insn = 0xf000d000; +/* End of stm32l4xx work-around. */ - jump24: - if (branch_offset < -16777216 || branch_offset > 16777214) - { - /* There's not much we can do apart from complain if this - happens. */ - (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub out " - "of range (input file too large)"), abfd); - return FALSE; - } - /* i1 = not(j1 eor s), so: - not i1 = j1 eor s - j1 = (not i1) eor s. */ +static void +elf32_arm_add_relocation (bfd *output_bfd, struct bfd_link_info *info, + asection *output_sec, Elf_Internal_Rela *rel) +{ + BFD_ASSERT (output_sec && rel); + struct bfd_elf_section_reloc_data *output_reldata; + struct elf32_arm_link_hash_table *htab; + struct bfd_elf_section_data *oesd = elf_section_data (output_sec); + Elf_Internal_Shdr *rel_hdr; - branch_insn |= (branch_offset >> 1) & 0x7ff; - branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16; - i2 = (branch_offset >> 22) & 1; - i1 = (branch_offset >> 23) & 1; - s = (branch_offset >> 24) & 1; - j1 = (!i1) ^ s; - j2 = (!i2) ^ s; - branch_insn |= j2 << 11; - branch_insn |= j1 << 13; - branch_insn |= s << 26; - } - break; - default: - BFD_FAIL (); - return FALSE; + if (oesd->rel.hdr) + { + rel_hdr = oesd->rel.hdr; + output_reldata = &(oesd->rel); + } + else if (oesd->rela.hdr) + { + rel_hdr = oesd->rela.hdr; + output_reldata = &(oesd->rela); + } + else + { + abort (); } - bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[target]); - bfd_put_16 (abfd, branch_insn & 0xffff, &contents[target + 2]); - - return TRUE; + bfd_byte *erel = rel_hdr->contents; + erel += output_reldata->count * rel_hdr->sh_entsize; + htab = elf32_arm_hash_table (info); + SWAP_RELOC_OUT (htab) (output_bfd, rel, erel); + output_reldata->count++; } /* Do code byteswapping. Return FALSE afterwards so that the section is @@ -15306,6 +17176,7 @@ elf32_arm_write_section (bfd *output_bfd, struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info); elf32_arm_section_map *map; elf32_vfp11_erratum_list *errnode; + elf32_stm32l4xx_erratum_list *stm32l4xx_errnode; bfd_vma ptr; bfd_vma end; bfd_vma offset = sec->output_section->vma + sec->output_offset; @@ -15400,6 +17271,89 @@ elf32_arm_write_section (bfd *output_bfd, } } + if (arm_data->stm32l4xx_erratumcount != 0) + { + for (stm32l4xx_errnode = arm_data->stm32l4xx_erratumlist; + stm32l4xx_errnode != 0; + stm32l4xx_errnode = stm32l4xx_errnode->next) + { + bfd_vma target = stm32l4xx_errnode->vma - offset; + + switch (stm32l4xx_errnode->type) + { + case STM32L4XX_ERRATUM_BRANCH_TO_VENEER: + { + unsigned int insn; + bfd_vma branch_to_veneer = + stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma; + + if ((signed) branch_to_veneer < -(1 << 24) + || (signed) branch_to_veneer >= (1 << 24)) + { + bfd_vma out_of_range = + ((signed) branch_to_veneer < -(1 << 24)) ? + - branch_to_veneer - (1 << 24) : + ((signed) branch_to_veneer >= (1 << 24)) ? + branch_to_veneer - (1 << 24) : 0; + + (*_bfd_error_handler) + (_("%B(%#x): error: Cannot create STM32L4XX veneer. " + "Jump out of range by %ld bytes. " + "Cannot encode branch instruction. "), + output_bfd, + (long) (stm32l4xx_errnode->vma - 4), + out_of_range); + continue; + } + + insn = create_instruction_branch_absolute + (stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma); + + /* The instruction is before the label. */ + target -= 4; + + put_thumb2_insn (globals, output_bfd, + (bfd_vma) insn, contents + target); + } + break; + + case STM32L4XX_ERRATUM_VENEER: + { + bfd_byte * veneer; + bfd_byte * veneer_r; + unsigned int insn; + + veneer = contents + target; + veneer_r = veneer + + stm32l4xx_errnode->u.b.veneer->vma + - stm32l4xx_errnode->vma - 4; + + if ((signed) (veneer_r - veneer - + STM32L4XX_ERRATUM_VLDM_VENEER_SIZE > + STM32L4XX_ERRATUM_LDM_VENEER_SIZE ? + STM32L4XX_ERRATUM_VLDM_VENEER_SIZE : + STM32L4XX_ERRATUM_LDM_VENEER_SIZE) < -(1 << 24) + || (signed) (veneer_r - veneer) >= (1 << 24)) + { + (*_bfd_error_handler) (_("%B: error: Cannot create STM32L4XX " + "veneer."), output_bfd); + continue; + } + + /* Original instruction. */ + insn = stm32l4xx_errnode->u.v.branch->u.b.insn; + + stm32l4xx_create_replacing_stub + (globals, output_bfd, insn, (void*)veneer_r, (void*)veneer); + } + break; + + default: + abort (); + } + } + } + if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX) { arm_unwind_table_edit *edit_node @@ -15452,6 +17406,26 @@ elf32_arm_write_section (bfd *output_bfd, usual BFD method. */ prel31_offset = (text_offset - exidx_offset) & 0x7ffffffful; + if (bfd_link_relocatable (link_info)) + { + /* Here relocation for new EXIDX_CANTUNWIND is + created, so there is no need to + adjust offset by hand. */ + prel31_offset = text_sec->output_offset + + text_sec->size; + + /* New relocation entity. */ + asection *text_out = text_sec->output_section; + Elf_Internal_Rela rel; + rel.r_addend = 0; + rel.r_offset = exidx_offset; + rel.r_info = ELF32_R_INFO (text_out->target_index, + R_ARM_PREL31); + + elf32_arm_add_relocation (output_bfd, link_info, + sec->output_section, + &rel); + } /* First address we can't unwind. */ bfd_put_32 (output_bfd, prel31_offset, @@ -15496,8 +17470,8 @@ elf32_arm_write_section (bfd *output_bfd, data.writing_section = sec; data.contents = contents; - bfd_hash_traverse (&globals->stub_hash_table, make_branch_to_a8_stub, - &data); + bfd_hash_traverse (& globals->stub_hash_table, make_branch_to_a8_stub, + & data); } if (mapcount == 0) @@ -15692,10 +17666,14 @@ elf32_arm_add_symbol_hook (bfd *abfd, struct bfd_link_info *info, Elf_Internal_Sym *sym, const char **namep, flagword *flagsp, asection **secp, bfd_vma *valp) { - if ((abfd->flags & DYNAMIC) == 0 - && (ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC - || ELF_ST_BIND (sym->st_info) == STB_GNU_UNIQUE)) - elf_tdata (info->output_bfd)->has_gnu_symbols = TRUE; + if ((ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC + || ELF_ST_BIND (sym->st_info) == STB_GNU_UNIQUE) + && (abfd->flags & DYNAMIC) == 0 + && bfd_get_flavour (info->output_bfd) == bfd_target_elf_flavour) + elf_tdata (info->output_bfd)->has_gnu_symbols = elf_gnu_symbol_any; + + if (elf32_arm_hash_table (info) == NULL) + return FALSE; if (elf32_arm_hash_table (info)->vxworks_p && !elf_vxworks_add_symbol_hook (abfd, info, sym, namep, @@ -15736,13 +17714,331 @@ const struct elf_size_info elf32_arm_size_info = bfd_elf32_swap_reloca_out }; +static bfd_vma +read_code32 (const bfd *abfd, const bfd_byte *addr) +{ + /* V7 BE8 code is always little endian. */ + if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0) + return bfd_getl32 (addr); + + return bfd_get_32 (abfd, addr); +} + +static bfd_vma +read_code16 (const bfd *abfd, const bfd_byte *addr) +{ + /* V7 BE8 code is always little endian. */ + if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0) + return bfd_getl16 (addr); + + return bfd_get_16 (abfd, addr); +} + +/* Return size of plt0 entry starting at ADDR + or (bfd_vma) -1 if size can not be determined. */ + +static bfd_vma +elf32_arm_plt0_size (const bfd *abfd, const bfd_byte *addr) +{ + bfd_vma first_word; + bfd_vma plt0_size; + + first_word = read_code32 (abfd, addr); + + if (first_word == elf32_arm_plt0_entry[0]) + plt0_size = 4 * ARRAY_SIZE (elf32_arm_plt0_entry); + else if (first_word == elf32_thumb2_plt0_entry[0]) + plt0_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry); + else + /* We don't yet handle this PLT format. */ + return (bfd_vma) -1; + + return plt0_size; +} + +/* Return size of plt entry starting at offset OFFSET + of plt section located at address START + or (bfd_vma) -1 if size can not be determined. */ + +static bfd_vma +elf32_arm_plt_size (const bfd *abfd, const bfd_byte *start, bfd_vma offset) +{ + bfd_vma first_insn; + bfd_vma plt_size = 0; + const bfd_byte *addr = start + offset; + + /* PLT entry size if fixed on Thumb-only platforms. */ + if (read_code32 (abfd, start) == elf32_thumb2_plt0_entry[0]) + return 4 * ARRAY_SIZE (elf32_thumb2_plt_entry); + + /* Respect Thumb stub if necessary. */ + if (read_code16 (abfd, addr) == elf32_arm_plt_thumb_stub[0]) + { + plt_size += 2 * ARRAY_SIZE(elf32_arm_plt_thumb_stub); + } + + /* Strip immediate from first add. */ + first_insn = read_code32 (abfd, addr + plt_size) & 0xffffff00; + +#ifdef FOUR_WORD_PLT + if (first_insn == elf32_arm_plt_entry[0]) + plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry); +#else + if (first_insn == elf32_arm_plt_entry_long[0]) + plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_long); + else if (first_insn == elf32_arm_plt_entry_short[0]) + plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_short); +#endif + else + /* We don't yet handle this PLT format. */ + return (bfd_vma) -1; + + return plt_size; +} + +/* Implementation is shamelessly borrowed from _bfd_elf_get_synthetic_symtab. */ + +static long +elf32_arm_get_synthetic_symtab (bfd *abfd, + long symcount ATTRIBUTE_UNUSED, + asymbol **syms ATTRIBUTE_UNUSED, + long dynsymcount, + asymbol **dynsyms, + asymbol **ret) +{ + asection *relplt; + asymbol *s; + arelent *p; + long count, i, n; + size_t size; + Elf_Internal_Shdr *hdr; + char *names; + asection *plt; + bfd_vma offset; + bfd_byte *data; + + *ret = NULL; + + if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0) + return 0; + + if (dynsymcount <= 0) + return 0; + + relplt = bfd_get_section_by_name (abfd, ".rel.plt"); + if (relplt == NULL) + return 0; + + hdr = &elf_section_data (relplt)->this_hdr; + if (hdr->sh_link != elf_dynsymtab (abfd) + || (hdr->sh_type != SHT_REL && hdr->sh_type != SHT_RELA)) + return 0; + + plt = bfd_get_section_by_name (abfd, ".plt"); + if (plt == NULL) + return 0; + + if (!elf32_arm_size_info.slurp_reloc_table (abfd, relplt, dynsyms, TRUE)) + return -1; + + data = plt->contents; + if (data == NULL) + { + if (!bfd_get_full_section_contents(abfd, (asection *) plt, &data) || data == NULL) + return -1; + bfd_cache_section_contents((asection *) plt, data); + } + + count = relplt->size / hdr->sh_entsize; + size = count * sizeof (asymbol); + p = relplt->relocation; + for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel) + { + size += strlen ((*p->sym_ptr_ptr)->name) + sizeof ("@plt"); + if (p->addend != 0) + size += sizeof ("+0x") - 1 + 8; + } + + s = *ret = (asymbol *) bfd_malloc (size); + if (s == NULL) + return -1; + + offset = elf32_arm_plt0_size (abfd, data); + if (offset == (bfd_vma) -1) + return -1; + + names = (char *) (s + count); + p = relplt->relocation; + n = 0; + for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel) + { + size_t len; + + bfd_vma plt_size = elf32_arm_plt_size (abfd, data, offset); + if (plt_size == (bfd_vma) -1) + break; + + *s = **p->sym_ptr_ptr; + /* Undefined syms won't have BSF_LOCAL or BSF_GLOBAL set. Since + we are defining a symbol, ensure one of them is set. */ + if ((s->flags & BSF_LOCAL) == 0) + s->flags |= BSF_GLOBAL; + s->flags |= BSF_SYNTHETIC; + s->section = plt; + s->value = offset; + s->name = names; + s->udata.p = NULL; + len = strlen ((*p->sym_ptr_ptr)->name); + memcpy (names, (*p->sym_ptr_ptr)->name, len); + names += len; + if (p->addend != 0) + { + char buf[30], *a; + + memcpy (names, "+0x", sizeof ("+0x") - 1); + names += sizeof ("+0x") - 1; + bfd_sprintf_vma (abfd, buf, p->addend); + for (a = buf; *a == '0'; ++a) + ; + len = strlen (a); + memcpy (names, a, len); + names += len; + } + memcpy (names, "@plt", sizeof ("@plt")); + names += sizeof ("@plt"); + ++s, ++n; + offset += plt_size; + } + + return n; +} + +static bfd_boolean +elf32_arm_section_flags (flagword *flags, const Elf_Internal_Shdr * hdr) +{ + if (hdr->sh_flags & SHF_ARM_NOREAD) + *flags |= SEC_ELF_NOREAD; + return TRUE; +} + +static flagword +elf32_arm_lookup_section_flags (char *flag_name) +{ + if (!strcmp (flag_name, "SHF_ARM_NOREAD")) + return SHF_ARM_NOREAD; + + return SEC_NO_FLAGS; +} + +static unsigned int +elf32_arm_count_additional_relocs (asection *sec) +{ + struct _arm_elf_section_data *arm_data; + arm_data = get_arm_elf_section_data (sec); + return arm_data->additional_reloc_count; +} + +/* Called to set the sh_flags, sh_link and sh_info fields of OSECTION which + has a type >= SHT_LOOS. Returns TRUE if these fields were initialised + FALSE otherwise. ISECTION is the best guess matching section from the + input bfd IBFD, but it might be NULL. */ + +static bfd_boolean +elf32_arm_copy_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED, + bfd *obfd ATTRIBUTE_UNUSED, + const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED, + Elf_Internal_Shdr *osection) +{ + switch (osection->sh_type) + { + case SHT_ARM_EXIDX: + { + Elf_Internal_Shdr **oheaders = elf_elfsections (obfd); + Elf_Internal_Shdr **iheaders = elf_elfsections (ibfd); + unsigned i = 0; + + osection->sh_flags = SHF_ALLOC | SHF_LINK_ORDER; + osection->sh_info = 0; + + /* The sh_link field must be set to the text section associated with + this index section. Unfortunately the ARM EHABI does not specify + exactly how to determine this association. Our caller does try + to match up OSECTION with its corresponding input section however + so that is a good first guess. */ + if (isection != NULL + && osection->bfd_section != NULL + && isection->bfd_section != NULL + && isection->bfd_section->output_section != NULL + && isection->bfd_section->output_section == osection->bfd_section + && iheaders != NULL + && isection->sh_link > 0 + && isection->sh_link < elf_numsections (ibfd) + && iheaders[isection->sh_link]->bfd_section != NULL + && iheaders[isection->sh_link]->bfd_section->output_section != NULL + ) + { + for (i = elf_numsections (obfd); i-- > 0;) + if (oheaders[i]->bfd_section + == iheaders[isection->sh_link]->bfd_section->output_section) + break; + } + + if (i == 0) + { + /* Failing that we have to find a matching section ourselves. If + we had the output section name available we could compare that + with input section names. Unfortunately we don't. So instead + we use a simple heuristic and look for the nearest executable + section before this one. */ + for (i = elf_numsections (obfd); i-- > 0;) + if (oheaders[i] == osection) + break; + if (i == 0) + break; + + while (i-- > 0) + if (oheaders[i]->sh_type == SHT_PROGBITS + && (oheaders[i]->sh_flags & (SHF_ALLOC | SHF_EXECINSTR)) + == (SHF_ALLOC | SHF_EXECINSTR)) + break; + } + + if (i) + { + osection->sh_link = i; + /* If the text section was part of a group + then the index section should be too. */ + if (oheaders[i]->sh_flags & SHF_GROUP) + osection->sh_flags |= SHF_GROUP; + return TRUE; + } + } + break; + + case SHT_ARM_PREEMPTMAP: + osection->sh_flags = SHF_ALLOC; + break; + + case SHT_ARM_ATTRIBUTES: + case SHT_ARM_DEBUGOVERLAY: + case SHT_ARM_OVERLAYSECTION: + default: + break; + } + + return FALSE; +} + +#undef elf_backend_copy_special_section_fields +#define elf_backend_copy_special_section_fields elf32_arm_copy_special_section_fields + #define ELF_ARCH bfd_arch_arm #define ELF_TARGET_ID ARM_ELF_DATA #define ELF_MACHINE_CODE EM_ARM #ifdef __QNXTARGET__ #define ELF_MAXPAGESIZE 0x1000 #else -#define ELF_MAXPAGESIZE 0x8000 +#define ELF_MAXPAGESIZE 0x10000 #endif #define ELF_MINPAGESIZE 0x1000 #define ELF_COMMONPAGESIZE 0x1000 @@ -15754,7 +18050,6 @@ const struct elf_size_info elf32_arm_size_info = #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create -#define bfd_elf32_bfd_link_hash_table_free elf32_arm_hash_table_free #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup #define bfd_elf32_find_nearest_line elf32_arm_find_nearest_line @@ -15762,6 +18057,7 @@ const struct elf_size_info elf32_arm_size_info = #define bfd_elf32_new_section_hook elf32_arm_new_section_hook #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol #define bfd_elf32_bfd_final_link elf32_arm_final_link +#define bfd_elf32_get_synthetic_symtab elf32_arm_get_synthetic_symtab #define elf_backend_get_symbol_type elf32_arm_get_symbol_type #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook @@ -15790,6 +18086,7 @@ const struct elf_size_info elf32_arm_size_info = #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms #define elf_backend_begin_write_processing elf32_arm_begin_write_processing #define elf_backend_add_symbol_hook elf32_arm_add_symbol_hook +#define elf_backend_count_additional_relocs elf32_arm_count_additional_relocs #define elf_backend_can_refcount 1 #define elf_backend_can_gc_sections 1 @@ -15801,6 +18098,7 @@ const struct elf_size_info elf32_arm_size_info = #define elf_backend_default_use_rela_p 0 #define elf_backend_got_header_size 12 +#define elf_backend_extern_protected_data 1 #undef elf_backend_obj_attrs_vendor #define elf_backend_obj_attrs_vendor "aeabi" @@ -15813,16 +18111,21 @@ const struct elf_size_info elf32_arm_size_info = #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order #define elf_backend_obj_attrs_handle_unknown elf32_arm_obj_attrs_handle_unknown +#undef elf_backend_section_flags +#define elf_backend_section_flags elf32_arm_section_flags +#undef elf_backend_lookup_section_flags_hook +#define elf_backend_lookup_section_flags_hook elf32_arm_lookup_section_flags + #include "elf32-target.h" /* Native Client targets. */ #undef TARGET_LITTLE_SYM -#define TARGET_LITTLE_SYM bfd_elf32_littlearm_nacl_vec +#define TARGET_LITTLE_SYM arm_elf32_nacl_le_vec #undef TARGET_LITTLE_NAME #define TARGET_LITTLE_NAME "elf32-littlearm-nacl" #undef TARGET_BIG_SYM -#define TARGET_BIG_SYM bfd_elf32_bigarm_nacl_vec +#define TARGET_BIG_SYM arm_elf32_nacl_be_vec #undef TARGET_BIG_NAME #define TARGET_BIG_NAME "elf32-bigarm-nacl" @@ -15859,20 +18162,43 @@ elf32_arm_nacl_modify_segment_map (bfd *abfd, struct bfd_link_info *info) && nacl_modify_segment_map (abfd, info)); } +static void +elf32_arm_nacl_final_write_processing (bfd *abfd, bfd_boolean linker) +{ + elf32_arm_final_write_processing (abfd, linker); + nacl_final_write_processing (abfd, linker); +} + +static bfd_vma +elf32_arm_nacl_plt_sym_val (bfd_vma i, const asection *plt, + const arelent *rel ATTRIBUTE_UNUSED) +{ + return plt->vma + + 4 * (ARRAY_SIZE (elf32_arm_nacl_plt0_entry) + + i * ARRAY_SIZE (elf32_arm_nacl_plt_entry)); +} + #undef elf32_bed -#define elf32_bed elf32_arm_nacl_bed +#define elf32_bed elf32_arm_nacl_bed #undef bfd_elf32_bfd_link_hash_table_create #define bfd_elf32_bfd_link_hash_table_create \ elf32_arm_nacl_link_hash_table_create #undef elf_backend_plt_alignment -#define elf_backend_plt_alignment 4 +#define elf_backend_plt_alignment 4 #undef elf_backend_modify_segment_map #define elf_backend_modify_segment_map elf32_arm_nacl_modify_segment_map #undef elf_backend_modify_program_headers #define elf_backend_modify_program_headers nacl_modify_program_headers +#undef elf_backend_final_write_processing +#define elf_backend_final_write_processing elf32_arm_nacl_final_write_processing +#undef bfd_elf32_get_synthetic_symtab +#undef elf_backend_plt_sym_val +#define elf_backend_plt_sym_val elf32_arm_nacl_plt_sym_val +#undef elf_backend_copy_special_section_fields + +#undef ELF_MINPAGESIZE +#undef ELF_COMMONPAGESIZE -#undef ELF_MAXPAGESIZE -#define ELF_MAXPAGESIZE 0x10000 #include "elf32-target.h" @@ -15881,15 +18207,22 @@ elf32_arm_nacl_modify_segment_map (bfd *abfd, struct bfd_link_info *info) #undef elf_backend_modify_segment_map #define elf_backend_modify_segment_map elf32_arm_modify_segment_map #undef elf_backend_modify_program_headers +#undef elf_backend_final_write_processing +#define elf_backend_final_write_processing elf32_arm_final_write_processing +#undef ELF_MINPAGESIZE +#define ELF_MINPAGESIZE 0x1000 +#undef ELF_COMMONPAGESIZE +#define ELF_COMMONPAGESIZE 0x1000 + /* VxWorks Targets. */ #undef TARGET_LITTLE_SYM -#define TARGET_LITTLE_SYM bfd_elf32_littlearm_vxworks_vec +#define TARGET_LITTLE_SYM arm_elf32_vxworks_le_vec #undef TARGET_LITTLE_NAME #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks" #undef TARGET_BIG_SYM -#define TARGET_BIG_SYM bfd_elf32_bigarm_vxworks_vec +#define TARGET_BIG_SYM arm_elf32_vxworks_be_vec #undef TARGET_BIG_NAME #define TARGET_BIG_NAME "elf32-bigarm-vxworks" @@ -16171,11 +18504,11 @@ elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd) /* Symbian OS Targets. */ #undef TARGET_LITTLE_SYM -#define TARGET_LITTLE_SYM bfd_elf32_littlearm_symbian_vec +#define TARGET_LITTLE_SYM arm_elf32_symbian_le_vec #undef TARGET_LITTLE_NAME #define TARGET_LITTLE_NAME "elf32-littlearm-symbian" #undef TARGET_BIG_SYM -#define TARGET_BIG_SYM bfd_elf32_bigarm_symbian_vec +#define TARGET_BIG_SYM arm_elf32_symbian_be_vec #undef TARGET_BIG_NAME #define TARGET_BIG_NAME "elf32-bigarm-symbian" @@ -16281,7 +18614,6 @@ elf32_arm_symbian_plt_sym_val (bfd_vma i, const asection *plt, return plt->vma + 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry) * i; } - #undef elf32_bed #define elf32_bed elf32_arm_symbian_bed