elf_tdata (abfd)->core_signal = bfd_get_16 (abfd, note->descdata + 12);
/* pr_pid */
- elf_tdata (abfd)->core_pid = bfd_get_32 (abfd, note->descdata + 24);
+ elf_tdata (abfd)->core_lwpid = bfd_get_32 (abfd, note->descdata + 24);
/* pr_reg */
offset = 72;
blx to reach the stub if necessary. */
static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
{
- ARM_INSN(0xe59fc000), /* ldr r12, [pc] */
+ ARM_INSN(0xe59fc000), /* ldr ip, [pc] */
ARM_INSN(0xe08ff00c), /* add pc, pc, ip */
DATA_WORD(0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
};
ARMv7). */
static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
{
- ARM_INSN(0xe59fc004), /* ldr r12, [pc, #4] */
+ ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */
ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
ARM_INSN(0xe12fff1c), /* bx ip */
DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
struct a8_erratum_reloc {
bfd_vma from;
bfd_vma destination;
+ struct elf32_arm_link_hash_entry *hash;
+ const char *sym_name;
unsigned int r_type;
unsigned char st_type;
- const char *sym_name;
bfd_boolean non_a8_stub;
};
Tag_CPU_arch);
int profile;
+ if (arch == TAG_CPU_ARCH_V6_M || arch == TAG_CPU_ARCH_V6S_M)
+ return TRUE;
+
if (arch != TAG_CPU_ARCH_V7 && arch != TAG_CPU_ARCH_V7E_M)
return FALSE;
/* If a stub is needed, record the actual destination type. */
if (stub_type != arm_stub_none)
- {
- *actual_st_type = st_type;
- }
+ *actual_st_type = st_type;
return stub_type;
}
Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
const char *, int, struct elf_link_hash_entry *, bfd_boolean *, char **);
+static unsigned int
+arm_stub_required_alignment (enum elf32_arm_stub_type stub_type)
+{
+ switch (stub_type)
+ {
+ case arm_stub_a8_veneer_b_cond:
+ case arm_stub_a8_veneer_b:
+ case arm_stub_a8_veneer_bl:
+ return 2;
+
+ case arm_stub_long_branch_any_any:
+ case arm_stub_long_branch_v4t_arm_thumb:
+ case arm_stub_long_branch_thumb_only:
+ case arm_stub_long_branch_v4t_thumb_thumb:
+ case arm_stub_long_branch_v4t_thumb_arm:
+ case arm_stub_short_branch_v4t_thumb_arm:
+ case arm_stub_long_branch_any_arm_pic:
+ case arm_stub_long_branch_any_thumb_pic:
+ case arm_stub_long_branch_v4t_thumb_thumb_pic:
+ case arm_stub_long_branch_v4t_arm_thumb_pic:
+ case arm_stub_long_branch_v4t_thumb_arm_pic:
+ case arm_stub_long_branch_thumb_only_pic:
+ case arm_stub_a8_veneer_blx:
+ return 4;
+
+ default:
+ abort (); /* Should be unreachable. */
+ }
+}
+
static bfd_boolean
arm_build_one_stub (struct bfd_hash_entry *gen_entry,
void * in_arg)
struct bfd_link_info *info;
asection *stub_sec;
bfd *stub_bfd;
- bfd_vma stub_addr;
bfd_byte *loc;
bfd_vma sym_value;
int template_size;
stub_sec = stub_entry->stub_sec;
if ((globals->fix_cortex_a8 < 0)
- != (stub_entry->stub_type >= arm_stub_a8_veneer_lwm))
- /* We have to do the a8 fixes last, as they are less aligned than
- the other veneers. */
+ != (arm_stub_required_alignment (stub_entry->stub_type) == 2))
+ /* We have to do less-strictly-aligned fixes last. */
return TRUE;
/* Make a note of the offset within the stubs for this entry. */
stub_bfd = stub_sec->owner;
- /* This is the address of the start of the stub. */
- stub_addr = stub_sec->output_section->vma + stub_sec->output_offset
- + stub_entry->stub_offset;
-
/* This is the address of the stub destination. */
sym_value = (stub_entry->target_value
+ stub_entry->target_section->output_offset
unsigned int size;
template_sequence = stub_definitions[stub_type].template_sequence;
+ if (stub_template)
+ *stub_template = template_sequence;
+
template_size = stub_definitions[stub_type].template_size;
+ if (stub_template_size)
+ *stub_template_size = template_size;
size = 0;
for (i = 0; i < template_size; i++)
default:
BFD_FAIL ();
- return FALSE;
+ return 0;
}
}
- if (stub_template)
- *stub_template = template_sequence;
-
- if (stub_template_size)
- *stub_template_size = template_size;
-
return size;
}
static bfd_boolean
arm_size_one_stub (struct bfd_hash_entry *gen_entry,
- void * in_arg)
+ void *in_arg ATTRIBUTE_UNUSED)
{
struct elf32_arm_stub_hash_entry *stub_entry;
- struct elf32_arm_link_hash_table *htab;
const insn_sequence *template_sequence;
int template_size, size;
/* Massage our args to the form they really have. */
stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
- htab = (struct elf32_arm_link_hash_table *) in_arg;
BFD_ASSERT((stub_entry->stub_type > arm_stub_none)
&& stub_entry->stub_type < ARRAY_SIZE(stub_definitions));
{
char *error_message = NULL;
struct elf_link_hash_entry *entry;
+ bfd_boolean use_plt = FALSE;
/* We don't care about the error returned from this
function, only if there is glue or not. */
if (entry)
found->non_a8_stub = TRUE;
- if (found->r_type == R_ARM_THM_CALL
- && found->st_type != STT_ARM_TFUNC)
- force_target_arm = TRUE;
- else if (found->r_type == R_ARM_THM_CALL
- && found->st_type == STT_ARM_TFUNC)
- force_target_thumb = TRUE;
+ /* Keep a simpler condition, for the sake of clarity. */
+ if (htab->splt != NULL && found->hash != NULL
+ && found->hash->root.plt.offset != (bfd_vma) -1)
+ use_plt = TRUE;
+
+ if (found->r_type == R_ARM_THM_CALL)
+ {
+ if (found->st_type != STT_ARM_TFUNC || use_plt)
+ force_target_arm = TRUE;
+ else
+ force_target_thumb = TRUE;
+ }
}
/* Check if we have an offending branch instruction. */
{
/* It's a local symbol. */
Elf_Internal_Sym *sym;
- Elf_Internal_Shdr *hdr;
if (local_syms == NULL)
{
}
sym = local_syms + r_indx;
- hdr = elf_elfsections (input_bfd)[sym->st_shndx];
- sym_sec = hdr->bfd_section;
+ if (sym->st_shndx == SHN_UNDEF)
+ sym_sec = bfd_und_section_ptr;
+ else if (sym->st_shndx == SHN_ABS)
+ sym_sec = bfd_abs_section_ptr;
+ else if (sym->st_shndx == SHN_COMMON)
+ sym_sec = bfd_com_section_ptr;
+ else
+ sym_sec =
+ bfd_section_from_elf_index (input_bfd, sym->st_shndx);
+
if (!sym_sec)
/* This is an undefined symbol. It can never
be resolved. */
a8_relocs[num_a8_relocs].st_type = st_type;
a8_relocs[num_a8_relocs].sym_name = sym_name;
a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
+ a8_relocs[num_a8_relocs].hash = hash;
num_a8_relocs++;
}
struct bfd_link_hash_entry *bh;
bfd_vma val;
struct _arm_elf_section_data *sec_data;
- int errcount;
elf32_vfp11_erratum_list *newerr;
hash_table = elf32_arm_hash_table (link_info);
myh->forced_local = 1;
/* Link veneer back to calling location. */
- errcount = ++(sec_data->erratumcount);
+ sec_data->erratumcount += 1;
newerr = (elf32_vfp11_erratum_list *)
bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
{
elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *)
bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
- int errcount;
- errcount = ++(elf32_arm_section_data (sec)->erratumcount);
+ elf32_arm_section_data (sec)->erratumcount += 1;
newerr->u.b.vfp_insn = veneer_of_insn;
unsigned long r_symndx;
bfd_byte * hit_data = contents + rel->r_offset;
bfd * dynobj = NULL;
- Elf_Internal_Shdr * symtab_hdr;
- struct elf_link_hash_entry ** sym_hashes;
bfd_vma * local_got_offsets;
asection * sgot = NULL;
asection * splt = NULL;
sgot = bfd_get_section_by_name (dynobj, ".got");
splt = bfd_get_section_by_name (dynobj, ".plt");
}
- symtab_hdr = & elf_symtab_hdr (input_bfd);
- sym_hashes = elf_sym_hashes (input_bfd);
local_got_offsets = elf_local_got_offsets (input_bfd);
r_symndx = ELF32_R_SYM (rel->r_info);
/* A branch to an undefined weak symbol is turned into a jump to
the next instruction unless a PLT entry will be created.
- Do the same for local undefined symbols.
+ Do the same for local undefined symbols (but not for STN_UNDEF).
The jump to the next instruction is optimized as a NOP depending
on the architecture. */
if (h ? (h->root.type == bfd_link_hash_undefweak
&& !(splt != NULL && h->plt.offset != (bfd_vma) -1))
- : bfd_is_und_section (sym_sec))
+ : r_symndx != STN_UNDEF && bfd_is_und_section (sym_sec))
{
value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000);
undefined symbol. This is a daft object file, but we
should at least do something about it. V4BX & NONE
relocations do not use the symbol and are explicitly
- allowed to use the undefined symbol, so allow those. */
+ allowed to use the undefined symbol, so allow those.
+ Likewise for relocations against STN_UNDEF. */
if (r_type != R_ARM_V4BX
&& r_type != R_ARM_NONE
+ && r_symndx != STN_UNDEF
&& bfd_is_und_section (sec)
&& ELF_ST_BIND (sym->st_info) != STB_WEAK)
{
name = bfd_section_name (input_bfd, sec);
}
- if (r_symndx != 0
+ if (r_symndx != STN_UNDEF
&& r_type != R_ARM_NONE
&& (h == NULL
|| h->root.type == bfd_link_hash_defined
2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
codes which have been inlined into the index).
+ If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged.
+
The edits are applied when the tables are written
(in elf32_arm_write_section).
*/
bfd_boolean
elf32_arm_fix_exidx_coverage (asection **text_section_order,
unsigned int num_text_sections,
- struct bfd_link_info *info)
+ struct bfd_link_info *info,
+ bfd_boolean merge_exidx_entries)
{
bfd *inp;
unsigned int last_second_word = 0, i;
/* Inlined unwinding data. Merge if equal to previous. */
else if ((second_word & 0x80000000) != 0)
{
- if (last_second_word == second_word && last_unwind_type == 1)
+ if (merge_exidx_entries
+ && last_second_word == second_word && last_unwind_type == 1)
elide = 1;
unwind_type = 1;
last_second_word = second_word;
/* Process stub sections (eg BE8 encoding, ...). */
struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
int i;
- for(i=0; i<htab->top_id; i++) {
- sec = htab->stub_group[i].stub_sec;
- if (sec) {
- osec = sec->output_section;
- elf32_arm_write_section (abfd, info, sec, sec->contents);
- if (! bfd_set_section_contents (abfd, osec, sec->contents,
- sec->output_offset, sec->size))
- return FALSE;
+ for (i=0; i<htab->top_id; i++)
+ {
+ sec = htab->stub_group[i].stub_sec;
+ /* Only process it once, in its link_sec slot. */
+ if (sec && i == htab->stub_group[i].link_sec->id)
+ {
+ osec = sec->output_section;
+ elf32_arm_write_section (abfd, info, sec, sec->contents);
+ if (! bfd_set_section_contents (abfd, osec, sec->contents,
+ sec->output_offset, sec->size))
+ return FALSE;
+ }
}
- }
/* Write out any glue sections now that we have created all the
stubs. */
static int
elf32_arm_obj_attrs_order (int num)
{
- if (num == 4)
+ if (num == LEAST_KNOWN_OBJ_ATTRIBUTE)
return Tag_conformance;
- if (num == 5)
+ if (num == LEAST_KNOWN_OBJ_ATTRIBUTE + 1)
return Tag_nodefaults;
if ((num - 2) < Tag_nodefaults)
return num - 2;
}
}
- for (i = 4; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
+ for (i = LEAST_KNOWN_OBJ_ATTRIBUTE; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
{
/* Merge this attribute with existing attributes. */
switch (i)
case Tag_ABI_FP_exceptions:
case Tag_ABI_FP_user_exceptions:
case Tag_ABI_FP_number_model:
- case Tag_VFP_HP_extension:
+ case Tag_FP_HP_extension:
case Tag_CPU_unaligned_access:
case Tag_T2EE_use:
- case Tag_Virtualization_use:
case Tag_MPextension_use:
/* Use the largest value specified. */
if (in_attr[i].i > out_attr[i].i)
out_attr[i].i = in_attr[i].i;
break;
- case Tag_ABI_align8_preserved:
+ case Tag_ABI_align_preserved:
case Tag_ABI_PCS_RO_data:
/* Use the smallest value specified. */
if (in_attr[i].i < out_attr[i].i)
out_attr[i].i = in_attr[i].i;
break;
- case Tag_ABI_align8_needed:
+ case Tag_ABI_align_needed:
if ((in_attr[i].i > 0 || out_attr[i].i > 0)
- && (in_attr[Tag_ABI_align8_preserved].i == 0
- || out_attr[Tag_ABI_align8_preserved].i == 0))
+ && (in_attr[Tag_ABI_align_preserved].i == 0
+ || out_attr[Tag_ABI_align_preserved].i == 0))
{
/* This error message should be enabled once all non-conformant
binaries in the toolchain have had the attributes set
out_attr[i].i = in_attr[i].i;
break;
+ case Tag_Virtualization_use:
+ /* The virtualization tag effectively stores two bits of
+ information: the intended use of TrustZone (in bit 0), and the
+ intended use of Virtualization (in bit 1). */
+ if (out_attr[i].i == 0)
+ out_attr[i].i = in_attr[i].i;
+ else if (in_attr[i].i != 0
+ && in_attr[i].i != out_attr[i].i)
+ {
+ if (in_attr[i].i <= 3 && out_attr[i].i <= 3)
+ out_attr[i].i = 3;
+ else
+ {
+ _bfd_error_handler
+ (_("error: %B: unable to merge virtualization attributes "
+ "with %B"),
+ obfd, ibfd);
+ result = FALSE;
+ }
+ }
+ break;
case Tag_CPU_arch_profile:
if (out_attr[i].i != in_attr[i].i)
}
}
break;
- case Tag_VFP_arch:
+ case Tag_FP_arch:
{
+ /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since
+ the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch
+ when it's 0. It might mean absence of FP hardware if
+ Tag_FP_arch is zero, otherwise it is effectively SP + DP. */
+
static const struct
{
int ver;
int regs;
int newval;
+ /* If the output has no requirement about FP hardware,
+ follow the requirement of the input. */
+ if (out_attr[i].i == 0)
+ {
+ BFD_ASSERT (out_attr[Tag_ABI_HardFP_use].i == 0);
+ out_attr[i].i = in_attr[i].i;
+ out_attr[Tag_ABI_HardFP_use].i
+ = in_attr[Tag_ABI_HardFP_use].i;
+ break;
+ }
+ /* If the input has no requirement about FP hardware, do
+ nothing. */
+ else if (in_attr[i].i == 0)
+ {
+ BFD_ASSERT (in_attr[Tag_ABI_HardFP_use].i == 0);
+ break;
+ }
+
+ /* Both the input and the output have nonzero Tag_FP_arch.
+ So Tag_ABI_HardFP_use is (SP & DP) when it's zero. */
+
+ /* If both the input and the output have zero Tag_ABI_HardFP_use,
+ do nothing. */
+ if (in_attr[Tag_ABI_HardFP_use].i == 0
+ && out_attr[Tag_ABI_HardFP_use].i == 0)
+ ;
+ /* If the input and the output have different Tag_ABI_HardFP_use,
+ the combination of them is 3 (SP & DP). */
+ else if (in_attr[Tag_ABI_HardFP_use].i
+ != out_attr[Tag_ABI_HardFP_use].i)
+ out_attr[Tag_ABI_HardFP_use].i = 3;
+
+ /* Now we can handle Tag_FP_arch. */
+
/* Values greater than 6 aren't defined, so just pick the
biggest */
if (in_attr[i].i > 6 && in_attr[i].i > out_attr[i].i)
/* Merged in target-independent code. */
break;
case Tag_ABI_HardFP_use:
- /* 1 (SP) and 2 (DP) conflict, so combine to 3 (SP & DP). */
- if ((in_attr[i].i == 1 && out_attr[i].i == 2)
- || (in_attr[i].i == 2 && out_attr[i].i == 1))
- out_attr[i].i = 3;
- else if (in_attr[i].i > out_attr[i].i)
- out_attr[i].i = in_attr[i].i;
+ /* This is handled along with Tag_FP_arch. */
break;
case Tag_ABI_FP_16bit_format:
if (in_attr[i].i != 0 && out_attr[i].i != 0)
const Elf_Internal_Rela *rel_end;
bfd *dynobj;
asection *sreloc;
- bfd_vma *local_got_offsets;
struct elf32_arm_link_hash_table *htab;
bfd_boolean needs_plt;
unsigned long nsyms;
}
dynobj = elf_hash_table (info)->dynobj;
- local_got_offsets = elf_local_got_offsets (abfd);
-
symtab_hdr = & elf_symtab_hdr (abfd);
sym_hashes = elf_sym_hashes (abfd);
nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
/* PR 9934: It is possible to have relocations that do not
refer to symbols, thus it is also possible to have an
object file containing relocations but no symbol table. */
- && (r_symndx > 0 || nsyms > 0))
+ && (r_symndx > STN_UNDEF || nsyms > 0))
{
(*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
r_symndx);
void * in_arg)
{
struct elf32_arm_stub_hash_entry *stub_entry;
- struct bfd_link_info *info;
asection *stub_sec;
bfd_vma addr;
char *stub_name;
stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
osi = (output_arch_syminfo *) in_arg;
- info = osi->info;
-
stub_sec = stub_entry->stub_sec;
/* Ensure this stub is attached to the current section being
data = (struct a8_branch_to_stub_data *) in_arg;
if (stub_entry->target_section != data->writing_section
- || stub_entry->stub_type < arm_stub_a8_veneer_b_cond)
+ || stub_entry->stub_type < arm_stub_a8_veneer_lwm)
return TRUE;
contents = data->contents;
};
#define ELF_ARCH bfd_arch_arm
+#define ELF_TARGET_ID ARM_ELF_DATA
#define ELF_MACHINE_CODE EM_ARM
#ifdef __QNXTARGET__
#define ELF_MAXPAGESIZE 0x1000